CombinedText
stringlengths
4
3.42M
pub type dev_t = u32; pub type mode_t = u16; pub type pthread_attr_t = *mut ::c_void; pub type rlim_t = i64; pub type pthread_mutex_t = *mut ::c_void; pub type pthread_mutexattr_t = *mut ::c_void; pub type pthread_cond_t = *mut ::c_void; pub type pthread_condattr_t = *mut ::c_void; pub type pthread_rwlock_t = *mut ::c_void; pub type pthread_rwlockattr_t = *mut ::c_void; pub type pthread_key_t = ::c_int; pub type tcflag_t = ::c_uint; pub type speed_t = ::c_uint; pub type nl_item = ::c_int; pub type id_t = i64; pub enum timezone {} s! { pub struct glob_t { pub gl_pathc: ::size_t, pub gl_matchc: ::size_t, pub gl_offs: ::size_t, pub gl_flags: ::c_int, pub gl_pathv: *mut *mut ::c_char, __unused3: *mut ::c_void, __unused4: *mut ::c_void, __unused5: *mut ::c_void, __unused6: *mut ::c_void, __unused7: *mut ::c_void, __unused8: *mut ::c_void, } pub struct kevent { pub ident: ::uintptr_t, pub filter: ::c_short, pub flags: ::c_ushort, pub fflags: ::c_uint, pub data: ::intptr_t, pub udata: *mut ::c_void, } pub struct sockaddr_storage { pub ss_len: u8, pub ss_family: ::sa_family_t, __ss_pad1: [u8; 6], __ss_align: i64, __ss_pad2: [u8; 112], } pub struct addrinfo { pub ai_flags: ::c_int, pub ai_family: ::c_int, pub ai_socktype: ::c_int, pub ai_protocol: ::c_int, pub ai_addrlen: ::socklen_t, pub ai_canonname: *mut ::c_char, pub ai_addr: *mut ::sockaddr, pub ai_next: *mut addrinfo, } pub struct sigset_t { bits: [u32; 4], } pub struct siginfo_t { pub si_signo: ::c_int, pub si_errno: ::c_int, pub si_code: ::c_int, pub si_pid: ::pid_t, pub si_uid: ::uid_t, pub si_status: ::c_int, pub si_addr: *mut ::c_void, _pad: [::c_int; 12], } pub struct sigaction { pub sa_sigaction: ::sighandler_t, pub sa_flags: ::c_int, pub sa_mask: sigset_t, } pub struct stack_t { // In FreeBSD 11 and later, ss_sp is actually a void* pub ss_sp: *mut ::c_char, pub ss_size: ::size_t, pub ss_flags: ::c_int, } pub struct sched_param { pub sched_priority: ::c_int, } pub struct Dl_info { pub dli_fname: *const ::c_char, pub dli_fbase: *mut ::c_void, pub dli_sname: *const ::c_char, pub dli_saddr: *mut ::c_void, } pub struct sockaddr_in { pub sin_len: u8, pub sin_family: ::sa_family_t, pub sin_port: ::in_port_t, pub sin_addr: ::in_addr, pub sin_zero: [::c_char; 8], } pub struct termios { pub c_iflag: ::tcflag_t, pub c_oflag: ::tcflag_t, pub c_cflag: ::tcflag_t, pub c_lflag: ::tcflag_t, pub c_cc: [::cc_t; ::NCCS], pub c_ispeed: ::speed_t, pub c_ospeed: ::speed_t, } pub struct flock { pub l_start: ::off_t, pub l_len: ::off_t, pub l_pid: ::pid_t, pub l_type: ::c_short, pub l_whence: ::c_short, #[cfg(not(target_os = "dragonfly"))] pub l_sysid: ::c_int, } pub struct sf_hdtr { pub headers: *mut ::iovec, pub hdr_cnt: ::c_int, pub trailers: *mut ::iovec, pub trl_cnt: ::c_int, } pub struct lconv { pub decimal_point: *mut ::c_char, pub thousands_sep: *mut ::c_char, pub grouping: *mut ::c_char, pub int_curr_symbol: *mut ::c_char, pub currency_symbol: *mut ::c_char, pub mon_decimal_point: *mut ::c_char, pub mon_thousands_sep: *mut ::c_char, pub mon_grouping: *mut ::c_char, pub positive_sign: *mut ::c_char, pub negative_sign: *mut ::c_char, pub int_frac_digits: ::c_char, pub frac_digits: ::c_char, pub p_cs_precedes: ::c_char, pub p_sep_by_space: ::c_char, pub n_cs_precedes: ::c_char, pub n_sep_by_space: ::c_char, pub p_sign_posn: ::c_char, pub n_sign_posn: ::c_char, pub int_p_cs_precedes: ::c_char, pub int_n_cs_precedes: ::c_char, pub int_p_sep_by_space: ::c_char, pub int_n_sep_by_space: ::c_char, pub int_p_sign_posn: ::c_char, pub int_n_sign_posn: ::c_char, } } pub const AIO_LISTIO_MAX: ::c_int = 16; pub const AIO_CANCELED: ::c_int = 1; pub const AIO_NOTCANCELED: ::c_int = 2; pub const AIO_ALLDONE: ::c_int = 3; pub const LIO_NOP: ::c_int = 0; pub const LIO_WRITE: ::c_int = 1; pub const LIO_READ: ::c_int = 2; pub const LIO_WAIT: ::c_int = 1; pub const LIO_NOWAIT: ::c_int = 0; pub const SIGEV_NONE: ::c_int = 0; pub const SIGEV_SIGNAL: ::c_int = 1; pub const SIGEV_THREAD: ::c_int = 2; pub const SIGEV_KEVENT: ::c_int = 3; pub const CODESET: ::nl_item = 0; pub const D_T_FMT: ::nl_item = 1; pub const D_FMT: ::nl_item = 2; pub const T_FMT: ::nl_item = 3; pub const T_FMT_AMPM: ::nl_item = 4; pub const AM_STR: ::nl_item = 5; pub const PM_STR: ::nl_item = 6; pub const DAY_1: ::nl_item = 7; pub const DAY_2: ::nl_item = 8; pub const DAY_3: ::nl_item = 9; pub const DAY_4: ::nl_item = 10; pub const DAY_5: ::nl_item = 11; pub const DAY_6: ::nl_item = 12; pub const DAY_7: ::nl_item = 13; pub const ABDAY_1: ::nl_item = 14; pub const ABDAY_2: ::nl_item = 15; pub const ABDAY_3: ::nl_item = 16; pub const ABDAY_4: ::nl_item = 17; pub const ABDAY_5: ::nl_item = 18; pub const ABDAY_6: ::nl_item = 19; pub const ABDAY_7: ::nl_item = 20; pub const MON_1: ::nl_item = 21; pub const MON_2: ::nl_item = 22; pub const MON_3: ::nl_item = 23; pub const MON_4: ::nl_item = 24; pub const MON_5: ::nl_item = 25; pub const MON_6: ::nl_item = 26; pub const MON_7: ::nl_item = 27; pub const MON_8: ::nl_item = 28; pub const MON_9: ::nl_item = 29; pub const MON_10: ::nl_item = 30; pub const MON_11: ::nl_item = 31; pub const MON_12: ::nl_item = 32; pub const ABMON_1: ::nl_item = 33; pub const ABMON_2: ::nl_item = 34; pub const ABMON_3: ::nl_item = 35; pub const ABMON_4: ::nl_item = 36; pub const ABMON_5: ::nl_item = 37; pub const ABMON_6: ::nl_item = 38; pub const ABMON_7: ::nl_item = 39; pub const ABMON_8: ::nl_item = 40; pub const ABMON_9: ::nl_item = 41; pub const ABMON_10: ::nl_item = 42; pub const ABMON_11: ::nl_item = 43; pub const ABMON_12: ::nl_item = 44; pub const ERA: ::nl_item = 45; pub const ERA_D_FMT: ::nl_item = 46; pub const ERA_D_T_FMT: ::nl_item = 47; pub const ERA_T_FMT: ::nl_item = 48; pub const ALT_DIGITS: ::nl_item = 49; pub const RADIXCHAR: ::nl_item = 50; pub const THOUSEP: ::nl_item = 51; pub const YESEXPR: ::nl_item = 52; pub const NOEXPR: ::nl_item = 53; pub const YESSTR: ::nl_item = 54; pub const NOSTR: ::nl_item = 55; pub const CRNCYSTR: ::nl_item = 56; pub const D_MD_ORDER: ::nl_item = 57; pub const ALTMON_1: ::nl_item = 58; pub const ALTMON_2: ::nl_item = 59; pub const ALTMON_3: ::nl_item = 60; pub const ALTMON_4: ::nl_item = 61; pub const ALTMON_5: ::nl_item = 62; pub const ALTMON_6: ::nl_item = 63; pub const ALTMON_7: ::nl_item = 64; pub const ALTMON_8: ::nl_item = 65; pub const ALTMON_9: ::nl_item = 66; pub const ALTMON_10: ::nl_item = 67; pub const ALTMON_11: ::nl_item = 68; pub const ALTMON_12: ::nl_item = 69; pub const EXIT_FAILURE: ::c_int = 1; pub const EXIT_SUCCESS: ::c_int = 0; pub const EOF: ::c_int = -1; pub const SEEK_SET: ::c_int = 0; pub const SEEK_CUR: ::c_int = 1; pub const SEEK_END: ::c_int = 2; pub const _IOFBF: ::c_int = 0; pub const _IONBF: ::c_int = 2; pub const _IOLBF: ::c_int = 1; pub const BUFSIZ: ::c_uint = 1024; pub const FOPEN_MAX: ::c_uint = 20; pub const FILENAME_MAX: ::c_uint = 1024; pub const L_tmpnam: ::c_uint = 1024; pub const TMP_MAX: ::c_uint = 308915776; pub const O_NOCTTY: ::c_int = 32768; pub const O_DIRECT: ::c_int = 0x00010000; pub const S_IFIFO: mode_t = 4096; pub const S_IFCHR: mode_t = 8192; pub const S_IFBLK: mode_t = 24576; pub const S_IFDIR: mode_t = 16384; pub const S_IFREG: mode_t = 32768; pub const S_IFLNK: mode_t = 40960; pub const S_IFSOCK: mode_t = 49152; pub const S_IFMT: mode_t = 61440; pub const S_IEXEC: mode_t = 64; pub const S_IWRITE: mode_t = 128; pub const S_IREAD: mode_t = 256; pub const S_IRWXU: mode_t = 448; pub const S_IXUSR: mode_t = 64; pub const S_IWUSR: mode_t = 128; pub const S_IRUSR: mode_t = 256; pub const S_IRWXG: mode_t = 56; pub const S_IXGRP: mode_t = 8; pub const S_IWGRP: mode_t = 16; pub const S_IRGRP: mode_t = 32; pub const S_IRWXO: mode_t = 7; pub const S_IXOTH: mode_t = 1; pub const S_IWOTH: mode_t = 2; pub const S_IROTH: mode_t = 4; pub const F_OK: ::c_int = 0; pub const R_OK: ::c_int = 4; pub const W_OK: ::c_int = 2; pub const X_OK: ::c_int = 1; pub const STDIN_FILENO: ::c_int = 0; pub const STDOUT_FILENO: ::c_int = 1; pub const STDERR_FILENO: ::c_int = 2; pub const F_LOCK: ::c_int = 1; pub const F_TEST: ::c_int = 3; pub const F_TLOCK: ::c_int = 2; pub const F_ULOCK: ::c_int = 0; pub const F_DUPFD_CLOEXEC: ::c_int = 17; pub const SIGHUP: ::c_int = 1; pub const SIGINT: ::c_int = 2; pub const SIGQUIT: ::c_int = 3; pub const SIGILL: ::c_int = 4; pub const SIGABRT: ::c_int = 6; pub const SIGEMT: ::c_int = 7; pub const SIGFPE: ::c_int = 8; pub const SIGKILL: ::c_int = 9; pub const SIGSEGV: ::c_int = 11; pub const SIGPIPE: ::c_int = 13; pub const SIGALRM: ::c_int = 14; pub const SIGTERM: ::c_int = 15; pub const PROT_NONE: ::c_int = 0; pub const PROT_READ: ::c_int = 1; pub const PROT_WRITE: ::c_int = 2; pub const PROT_EXEC: ::c_int = 4; pub const MAP_FILE: ::c_int = 0x0000; pub const MAP_SHARED: ::c_int = 0x0001; pub const MAP_PRIVATE: ::c_int = 0x0002; pub const MAP_FIXED: ::c_int = 0x0010; pub const MAP_ANON: ::c_int = 0x1000; pub const MAP_FAILED: *mut ::c_void = !0 as *mut ::c_void; pub const MCL_CURRENT: ::c_int = 0x0001; pub const MCL_FUTURE: ::c_int = 0x0002; pub const MS_SYNC: ::c_int = 0x0000; pub const MS_ASYNC: ::c_int = 0x0001; pub const MS_INVALIDATE: ::c_int = 0x0002; pub const EPERM: ::c_int = 1; pub const ENOENT: ::c_int = 2; pub const ESRCH: ::c_int = 3; pub const EINTR: ::c_int = 4; pub const EIO: ::c_int = 5; pub const ENXIO: ::c_int = 6; pub const E2BIG: ::c_int = 7; pub const ENOEXEC: ::c_int = 8; pub const EBADF: ::c_int = 9; pub const ECHILD: ::c_int = 10; pub const EDEADLK: ::c_int = 11; pub const ENOMEM: ::c_int = 12; pub const EACCES: ::c_int = 13; pub const EFAULT: ::c_int = 14; pub const ENOTBLK: ::c_int = 15; pub const EBUSY: ::c_int = 16; pub const EEXIST: ::c_int = 17; pub const EXDEV: ::c_int = 18; pub const ENODEV: ::c_int = 19; pub const ENOTDIR: ::c_int = 20; pub const EISDIR: ::c_int = 21; pub const EINVAL: ::c_int = 22; pub const ENFILE: ::c_int = 23; pub const EMFILE: ::c_int = 24; pub const ENOTTY: ::c_int = 25; pub const ETXTBSY: ::c_int = 26; pub const EFBIG: ::c_int = 27; pub const ENOSPC: ::c_int = 28; pub const ESPIPE: ::c_int = 29; pub const EROFS: ::c_int = 30; pub const EMLINK: ::c_int = 31; pub const EPIPE: ::c_int = 32; pub const EDOM: ::c_int = 33; pub const ERANGE: ::c_int = 34; pub const EAGAIN: ::c_int = 35; pub const EWOULDBLOCK: ::c_int = 35; pub const EINPROGRESS: ::c_int = 36; pub const EALREADY: ::c_int = 37; pub const ENOTSOCK: ::c_int = 38; pub const EDESTADDRREQ: ::c_int = 39; pub const EMSGSIZE: ::c_int = 40; pub const EPROTOTYPE: ::c_int = 41; pub const ENOPROTOOPT: ::c_int = 42; pub const EPROTONOSUPPORT: ::c_int = 43; pub const ESOCKTNOSUPPORT: ::c_int = 44; pub const EOPNOTSUPP: ::c_int = 45; pub const ENOTSUP: ::c_int = EOPNOTSUPP; pub const EPFNOSUPPORT: ::c_int = 46; pub const EAFNOSUPPORT: ::c_int = 47; pub const EADDRINUSE: ::c_int = 48; pub const EADDRNOTAVAIL: ::c_int = 49; pub const ENETDOWN: ::c_int = 50; pub const ENETUNREACH: ::c_int = 51; pub const ENETRESET: ::c_int = 52; pub const ECONNABORTED: ::c_int = 53; pub const ECONNRESET: ::c_int = 54; pub const ENOBUFS: ::c_int = 55; pub const EISCONN: ::c_int = 56; pub const ENOTCONN: ::c_int = 57; pub const ESHUTDOWN: ::c_int = 58; pub const ETOOMANYREFS: ::c_int = 59; pub const ETIMEDOUT: ::c_int = 60; pub const ECONNREFUSED: ::c_int = 61; pub const ELOOP: ::c_int = 62; pub const ENAMETOOLONG: ::c_int = 63; pub const EHOSTDOWN: ::c_int = 64; pub const EHOSTUNREACH: ::c_int = 65; pub const ENOTEMPTY: ::c_int = 66; pub const EPROCLIM: ::c_int = 67; pub const EUSERS: ::c_int = 68; pub const EDQUOT: ::c_int = 69; pub const ESTALE: ::c_int = 70; pub const EREMOTE: ::c_int = 71; pub const EBADRPC: ::c_int = 72; pub const ERPCMISMATCH: ::c_int = 73; pub const EPROGUNAVAIL: ::c_int = 74; pub const EPROGMISMATCH: ::c_int = 75; pub const EPROCUNAVAIL: ::c_int = 76; pub const ENOLCK: ::c_int = 77; pub const ENOSYS: ::c_int = 78; pub const EFTYPE: ::c_int = 79; pub const EAUTH: ::c_int = 80; pub const ENEEDAUTH: ::c_int = 81; pub const EIDRM: ::c_int = 82; pub const ENOMSG: ::c_int = 83; pub const EOVERFLOW: ::c_int = 84; pub const ECANCELED: ::c_int = 85; pub const EILSEQ: ::c_int = 86; pub const ENOATTR: ::c_int = 87; pub const EDOOFUS: ::c_int = 88; pub const EBADMSG: ::c_int = 89; pub const EMULTIHOP: ::c_int = 90; pub const ENOLINK: ::c_int = 91; pub const EPROTO: ::c_int = 92; pub const POLLSTANDARD: ::c_short = ::POLLIN | ::POLLPRI | ::POLLOUT | ::POLLRDNORM | ::POLLRDBAND | ::POLLWRBAND | ::POLLERR | ::POLLHUP | ::POLLNVAL; pub const EAI_SYSTEM: ::c_int = 11; pub const F_DUPFD: ::c_int = 0; pub const F_GETFD: ::c_int = 1; pub const F_SETFD: ::c_int = 2; pub const F_GETFL: ::c_int = 3; pub const F_SETFL: ::c_int = 4; pub const SIGTRAP: ::c_int = 5; pub const GLOB_APPEND : ::c_int = 0x0001; pub const GLOB_DOOFFS : ::c_int = 0x0002; pub const GLOB_ERR : ::c_int = 0x0004; pub const GLOB_MARK : ::c_int = 0x0008; pub const GLOB_NOCHECK : ::c_int = 0x0010; pub const GLOB_NOSORT : ::c_int = 0x0020; pub const GLOB_NOESCAPE: ::c_int = 0x2000; pub const GLOB_NOSPACE : ::c_int = -1; pub const GLOB_ABORTED : ::c_int = -2; pub const GLOB_NOMATCH : ::c_int = -3; pub const POSIX_MADV_NORMAL: ::c_int = 0; pub const POSIX_MADV_RANDOM: ::c_int = 1; pub const POSIX_MADV_SEQUENTIAL: ::c_int = 2; pub const POSIX_MADV_WILLNEED: ::c_int = 3; pub const POSIX_MADV_DONTNEED: ::c_int = 4; pub const PTHREAD_PROCESS_PRIVATE: ::c_int = 0; pub const PTHREAD_PROCESS_SHARED: ::c_int = 1; pub const PTHREAD_CREATE_JOINABLE: ::c_int = 0; pub const PTHREAD_CREATE_DETACHED: ::c_int = 1; pub const RLIMIT_CPU: ::c_int = 0; pub const RLIMIT_FSIZE: ::c_int = 1; pub const RLIMIT_DATA: ::c_int = 2; pub const RLIMIT_STACK: ::c_int = 3; pub const RLIMIT_CORE: ::c_int = 4; pub const RLIMIT_RSS: ::c_int = 5; pub const RLIMIT_MEMLOCK: ::c_int = 6; pub const RLIMIT_NPROC: ::c_int = 7; pub const RLIMIT_NOFILE: ::c_int = 8; pub const RLIMIT_SBSIZE: ::c_int = 9; pub const RLIMIT_VMEM: ::c_int = 10; pub const RLIMIT_AS: ::c_int = RLIMIT_VMEM; pub const RLIM_INFINITY: rlim_t = 0x7fff_ffff_ffff_ffff; pub const RUSAGE_SELF: ::c_int = 0; pub const RUSAGE_CHILDREN: ::c_int = -1; pub const MADV_NORMAL: ::c_int = 0; pub const MADV_RANDOM: ::c_int = 1; pub const MADV_SEQUENTIAL: ::c_int = 2; pub const MADV_WILLNEED: ::c_int = 3; pub const MADV_DONTNEED: ::c_int = 4; pub const MADV_FREE: ::c_int = 5; pub const MADV_NOSYNC: ::c_int = 6; pub const MADV_AUTOSYNC: ::c_int = 7; pub const MADV_NOCORE: ::c_int = 8; pub const MADV_CORE: ::c_int = 9; pub const MINCORE_INCORE: ::c_int = 0x1; pub const MINCORE_REFERENCED: ::c_int = 0x2; pub const MINCORE_MODIFIED: ::c_int = 0x4; pub const MINCORE_REFERENCED_OTHER: ::c_int = 0x8; pub const MINCORE_MODIFIED_OTHER: ::c_int = 0x10; pub const MINCORE_SUPER: ::c_int = 0x20; pub const AF_UNSPEC: ::c_int = 0; pub const AF_LOCAL: ::c_int = 1; pub const AF_UNIX: ::c_int = AF_LOCAL; pub const AF_INET: ::c_int = 2; pub const AF_IMPLINK: ::c_int = 3; pub const AF_PUP: ::c_int = 4; pub const AF_CHAOS: ::c_int = 5; pub const AF_NETBIOS: ::c_int = 6; pub const AF_ISO: ::c_int = 7; pub const AF_OSI: ::c_int = AF_ISO; pub const AF_ECMA: ::c_int = 8; pub const AF_DATAKIT: ::c_int = 9; pub const AF_CCITT: ::c_int = 10; pub const AF_SNA: ::c_int = 11; pub const AF_DECnet: ::c_int = 12; pub const AF_DLI: ::c_int = 13; pub const AF_LAT: ::c_int = 14; pub const AF_HYLINK: ::c_int = 15; pub const AF_APPLETALK: ::c_int = 16; pub const AF_ROUTE: ::c_int = 17; pub const AF_LINK: ::c_int = 18; pub const pseudo_AF_XTP: ::c_int = 19; pub const AF_COIP: ::c_int = 20; pub const AF_CNT: ::c_int = 21; pub const pseudo_AF_RTIP: ::c_int = 22; pub const AF_IPX: ::c_int = 23; pub const AF_SIP: ::c_int = 24; pub const pseudo_AF_PIP: ::c_int = 25; pub const AF_ISDN: ::c_int = 26; pub const AF_E164: ::c_int = AF_ISDN; pub const pseudo_AF_KEY: ::c_int = 27; pub const AF_INET6: ::c_int = 28; pub const AF_NATM: ::c_int = 29; pub const AF_ATM: ::c_int = 30; pub const pseudo_AF_HDRCMPLT: ::c_int = 31; pub const AF_NETGRAPH: ::c_int = 32; pub const PF_UNSPEC: ::c_int = AF_UNSPEC; pub const PF_LOCAL: ::c_int = AF_LOCAL; pub const PF_UNIX: ::c_int = PF_LOCAL; pub const PF_INET: ::c_int = AF_INET; pub const PF_IMPLINK: ::c_int = AF_IMPLINK; pub const PF_PUP: ::c_int = AF_PUP; pub const PF_CHAOS: ::c_int = AF_CHAOS; pub const PF_NETBIOS: ::c_int = AF_NETBIOS; pub const PF_ISO: ::c_int = AF_ISO; pub const PF_OSI: ::c_int = AF_ISO; pub const PF_ECMA: ::c_int = AF_ECMA; pub const PF_DATAKIT: ::c_int = AF_DATAKIT; pub const PF_CCITT: ::c_int = AF_CCITT; pub const PF_SNA: ::c_int = AF_SNA; pub const PF_DECnet: ::c_int = AF_DECnet; pub const PF_DLI: ::c_int = AF_DLI; pub const PF_LAT: ::c_int = AF_LAT; pub const PF_HYLINK: ::c_int = AF_HYLINK; pub const PF_APPLETALK: ::c_int = AF_APPLETALK; pub const PF_ROUTE: ::c_int = AF_ROUTE; pub const PF_LINK: ::c_int = AF_LINK; pub const PF_XTP: ::c_int = pseudo_AF_XTP; pub const PF_COIP: ::c_int = AF_COIP; pub const PF_CNT: ::c_int = AF_CNT; pub const PF_SIP: ::c_int = AF_SIP; pub const PF_IPX: ::c_int = AF_IPX; pub const PF_RTIP: ::c_int = pseudo_AF_RTIP; pub const PF_PIP: ::c_int = pseudo_AF_PIP; pub const PF_ISDN: ::c_int = AF_ISDN; pub const PF_KEY: ::c_int = pseudo_AF_KEY; pub const PF_INET6: ::c_int = AF_INET6; pub const PF_NATM: ::c_int = AF_NATM; pub const PF_ATM: ::c_int = AF_ATM; pub const PF_NETGRAPH: ::c_int = AF_NETGRAPH; pub const SOMAXCONN: ::c_int = 128; pub const MSG_OOB: ::c_int = 0x00000001; pub const MSG_PEEK: ::c_int = 0x00000002; pub const MSG_DONTROUTE: ::c_int = 0x00000004; pub const MSG_EOR: ::c_int = 0x00000008; pub const MSG_TRUNC: ::c_int = 0x00000010; pub const MSG_CTRUNC: ::c_int = 0x00000020; pub const MSG_WAITALL: ::c_int = 0x00000040; pub const MSG_DONTWAIT: ::c_int = 0x00000080; pub const MSG_EOF: ::c_int = 0x00000100; pub const SCM_TIMESTAMP: ::c_int = 0x02; pub const SOCK_STREAM: ::c_int = 1; pub const SOCK_DGRAM: ::c_int = 2; pub const SOCK_RAW: ::c_int = 3; pub const SOCK_RDM: ::c_int = 4; pub const SOCK_SEQPACKET: ::c_int = 5; pub const SOCK_CLOEXEC: ::c_int = 0x10000000; pub const SOCK_NONBLOCK: ::c_int = 0x20000000; pub const SOCK_MAXADDRLEN: ::c_int = 255; pub const IP_TTL: ::c_int = 4; pub const IP_HDRINCL: ::c_int = 2; pub const IP_ADD_MEMBERSHIP: ::c_int = 12; pub const IP_DROP_MEMBERSHIP: ::c_int = 13; pub const IPV6_JOIN_GROUP: ::c_int = 12; pub const IPV6_LEAVE_GROUP: ::c_int = 13; pub const TCP_NODELAY: ::c_int = 1; pub const TCP_KEEPIDLE: ::c_int = 256; pub const SOL_SOCKET: ::c_int = 0xffff; pub const SO_DEBUG: ::c_int = 0x01; pub const SO_ACCEPTCONN: ::c_int = 0x0002; pub const SO_REUSEADDR: ::c_int = 0x0004; pub const SO_KEEPALIVE: ::c_int = 0x0008; pub const SO_DONTROUTE: ::c_int = 0x0010; pub const SO_BROADCAST: ::c_int = 0x0020; pub const SO_USELOOPBACK: ::c_int = 0x0040; pub const SO_LINGER: ::c_int = 0x0080; pub const SO_OOBINLINE: ::c_int = 0x0100; pub const SO_REUSEPORT: ::c_int = 0x0200; pub const SO_TIMESTAMP: ::c_int = 0x0400; pub const SO_NOSIGPIPE: ::c_int = 0x0800; pub const SO_ACCEPTFILTER: ::c_int = 0x1000; pub const SO_SNDBUF: ::c_int = 0x1001; pub const SO_RCVBUF: ::c_int = 0x1002; pub const SO_SNDLOWAT: ::c_int = 0x1003; pub const SO_RCVLOWAT: ::c_int = 0x1004; pub const SO_SNDTIMEO: ::c_int = 0x1005; pub const SO_RCVTIMEO: ::c_int = 0x1006; pub const SO_ERROR: ::c_int = 0x1007; pub const SO_TYPE: ::c_int = 0x1008; pub const IFF_LOOPBACK: ::c_int = 0x8; pub const SHUT_RD: ::c_int = 0; pub const SHUT_WR: ::c_int = 1; pub const SHUT_RDWR: ::c_int = 2; pub const LOCK_SH: ::c_int = 1; pub const LOCK_EX: ::c_int = 2; pub const LOCK_NB: ::c_int = 4; pub const LOCK_UN: ::c_int = 8; pub const MAP_COPY: ::c_int = 0x0002; pub const MAP_RENAME: ::c_int = 0x0020; pub const MAP_NORESERVE: ::c_int = 0x0040; pub const MAP_HASSEMAPHORE: ::c_int = 0x0200; pub const MAP_STACK: ::c_int = 0x0400; pub const MAP_NOSYNC: ::c_int = 0x0800; pub const MAP_NOCORE: ::c_int = 0x020000; pub const IPPROTO_RAW: ::c_int = 255; pub const _PC_LINK_MAX: ::c_int = 1; pub const _PC_MAX_CANON: ::c_int = 2; pub const _PC_MAX_INPUT: ::c_int = 3; pub const _PC_NAME_MAX: ::c_int = 4; pub const _PC_PATH_MAX: ::c_int = 5; pub const _PC_PIPE_BUF: ::c_int = 6; pub const _PC_CHOWN_RESTRICTED: ::c_int = 7; pub const _PC_NO_TRUNC: ::c_int = 8; pub const _PC_VDISABLE: ::c_int = 9; pub const _PC_ALLOC_SIZE_MIN: ::c_int = 10; pub const _PC_FILESIZEBITS: ::c_int = 12; pub const _PC_REC_INCR_XFER_SIZE: ::c_int = 14; pub const _PC_REC_MAX_XFER_SIZE: ::c_int = 15; pub const _PC_REC_MIN_XFER_SIZE: ::c_int = 16; pub const _PC_REC_XFER_ALIGN: ::c_int = 17; pub const _PC_SYMLINK_MAX: ::c_int = 18; pub const _PC_MIN_HOLE_SIZE: ::c_int = 21; pub const _PC_ASYNC_IO: ::c_int = 53; pub const _PC_PRIO_IO: ::c_int = 54; pub const _PC_SYNC_IO: ::c_int = 55; pub const _PC_ACL_EXTENDED: ::c_int = 59; pub const _PC_ACL_PATH_MAX: ::c_int = 60; pub const _PC_CAP_PRESENT: ::c_int = 61; pub const _PC_INF_PRESENT: ::c_int = 62; pub const _PC_MAC_PRESENT: ::c_int = 63; pub const _SC_ARG_MAX: ::c_int = 1; pub const _SC_CHILD_MAX: ::c_int = 2; pub const _SC_CLK_TCK: ::c_int = 3; pub const _SC_NGROUPS_MAX: ::c_int = 4; pub const _SC_OPEN_MAX: ::c_int = 5; pub const _SC_JOB_CONTROL: ::c_int = 6; pub const _SC_SAVED_IDS: ::c_int = 7; pub const _SC_VERSION: ::c_int = 8; pub const _SC_BC_BASE_MAX: ::c_int = 9; pub const _SC_BC_DIM_MAX: ::c_int = 10; pub const _SC_BC_SCALE_MAX: ::c_int = 11; pub const _SC_BC_STRING_MAX: ::c_int = 12; pub const _SC_COLL_WEIGHTS_MAX: ::c_int = 13; pub const _SC_EXPR_NEST_MAX: ::c_int = 14; pub const _SC_LINE_MAX: ::c_int = 15; pub const _SC_RE_DUP_MAX: ::c_int = 16; pub const _SC_2_VERSION: ::c_int = 17; pub const _SC_2_C_BIND: ::c_int = 18; pub const _SC_2_C_DEV: ::c_int = 19; pub const _SC_2_CHAR_TERM: ::c_int = 20; pub const _SC_2_FORT_DEV: ::c_int = 21; pub const _SC_2_FORT_RUN: ::c_int = 22; pub const _SC_2_LOCALEDEF: ::c_int = 23; pub const _SC_2_SW_DEV: ::c_int = 24; pub const _SC_2_UPE: ::c_int = 25; pub const _SC_STREAM_MAX: ::c_int = 26; pub const _SC_TZNAME_MAX: ::c_int = 27; pub const _SC_ASYNCHRONOUS_IO: ::c_int = 28; pub const _SC_MAPPED_FILES: ::c_int = 29; pub const _SC_MEMLOCK: ::c_int = 30; pub const _SC_MEMLOCK_RANGE: ::c_int = 31; pub const _SC_MEMORY_PROTECTION: ::c_int = 32; pub const _SC_MESSAGE_PASSING: ::c_int = 33; pub const _SC_PRIORITIZED_IO: ::c_int = 34; pub const _SC_PRIORITY_SCHEDULING: ::c_int = 35; pub const _SC_REALTIME_SIGNALS: ::c_int = 36; pub const _SC_SEMAPHORES: ::c_int = 37; pub const _SC_FSYNC: ::c_int = 38; pub const _SC_SHARED_MEMORY_OBJECTS: ::c_int = 39; pub const _SC_SYNCHRONIZED_IO: ::c_int = 40; pub const _SC_TIMERS: ::c_int = 41; pub const _SC_AIO_LISTIO_MAX: ::c_int = 42; pub const _SC_AIO_MAX: ::c_int = 43; pub const _SC_AIO_PRIO_DELTA_MAX: ::c_int = 44; pub const _SC_DELAYTIMER_MAX: ::c_int = 45; pub const _SC_MQ_OPEN_MAX: ::c_int = 46; pub const _SC_PAGESIZE: ::c_int = 47; pub const _SC_PAGE_SIZE: ::c_int = _SC_PAGESIZE; pub const _SC_RTSIG_MAX: ::c_int = 48; pub const _SC_SEM_NSEMS_MAX: ::c_int = 49; pub const _SC_SEM_VALUE_MAX: ::c_int = 50; pub const _SC_SIGQUEUE_MAX: ::c_int = 51; pub const _SC_TIMER_MAX: ::c_int = 52; pub const _SC_IOV_MAX: ::c_int = 56; pub const _SC_NPROCESSORS_CONF: ::c_int = 57; pub const _SC_2_PBS: ::c_int = 59; pub const _SC_2_PBS_ACCOUNTING: ::c_int = 60; pub const _SC_2_PBS_CHECKPOINT: ::c_int = 61; pub const _SC_2_PBS_LOCATE: ::c_int = 62; pub const _SC_2_PBS_MESSAGE: ::c_int = 63; pub const _SC_2_PBS_TRACK: ::c_int = 64; pub const _SC_ADVISORY_INFO: ::c_int = 65; pub const _SC_BARRIERS: ::c_int = 66; pub const _SC_CLOCK_SELECTION: ::c_int = 67; pub const _SC_CPUTIME: ::c_int = 68; pub const _SC_FILE_LOCKING: ::c_int = 69; pub const _SC_NPROCESSORS_ONLN: ::c_int = 58; pub const _SC_GETGR_R_SIZE_MAX: ::c_int = 70; pub const _SC_GETPW_R_SIZE_MAX: ::c_int = 71; pub const _SC_HOST_NAME_MAX: ::c_int = 72; pub const _SC_LOGIN_NAME_MAX: ::c_int = 73; pub const _SC_MONOTONIC_CLOCK: ::c_int = 74; pub const _SC_MQ_PRIO_MAX: ::c_int = 75; pub const _SC_READER_WRITER_LOCKS: ::c_int = 76; pub const _SC_REGEXP: ::c_int = 77; pub const _SC_SHELL: ::c_int = 78; pub const _SC_SPAWN: ::c_int = 79; pub const _SC_SPIN_LOCKS: ::c_int = 80; pub const _SC_SPORADIC_SERVER: ::c_int = 81; pub const _SC_THREAD_ATTR_STACKADDR: ::c_int = 82; pub const _SC_THREAD_ATTR_STACKSIZE: ::c_int = 83; pub const _SC_THREAD_DESTRUCTOR_ITERATIONS: ::c_int = 85; pub const _SC_THREAD_KEYS_MAX: ::c_int = 86; pub const _SC_THREAD_PRIO_INHERIT: ::c_int = 87; pub const _SC_THREAD_PRIO_PROTECT: ::c_int = 88; pub const _SC_THREAD_PRIORITY_SCHEDULING: ::c_int = 89; pub const _SC_THREAD_PROCESS_SHARED: ::c_int = 90; pub const _SC_THREAD_SAFE_FUNCTIONS: ::c_int = 91; pub const _SC_THREAD_SPORADIC_SERVER: ::c_int = 92; pub const _SC_THREAD_STACK_MIN: ::c_int = 93; pub const _SC_THREAD_THREADS_MAX: ::c_int = 94; pub const _SC_TIMEOUTS: ::c_int = 95; pub const _SC_THREADS: ::c_int = 96; pub const _SC_TRACE: ::c_int = 97; pub const _SC_TRACE_EVENT_FILTER: ::c_int = 98; pub const _SC_TRACE_INHERIT: ::c_int = 99; pub const _SC_TRACE_LOG: ::c_int = 100; pub const _SC_TTY_NAME_MAX: ::c_int = 101; pub const _SC_TYPED_MEMORY_OBJECTS: ::c_int = 102; pub const _SC_V6_ILP32_OFF32: ::c_int = 103; pub const _SC_V6_ILP32_OFFBIG: ::c_int = 104; pub const _SC_V6_LP64_OFF64: ::c_int = 105; pub const _SC_V6_LPBIG_OFFBIG: ::c_int = 106; pub const _SC_ATEXIT_MAX: ::c_int = 107; pub const _SC_XOPEN_CRYPT: ::c_int = 108; pub const _SC_XOPEN_ENH_I18N: ::c_int = 109; pub const _SC_XOPEN_LEGACY: ::c_int = 110; pub const _SC_XOPEN_REALTIME: ::c_int = 111; pub const _SC_XOPEN_REALTIME_THREADS: ::c_int = 112; pub const _SC_XOPEN_SHM: ::c_int = 113; pub const _SC_XOPEN_STREAMS: ::c_int = 114; pub const _SC_XOPEN_UNIX: ::c_int = 115; pub const _SC_XOPEN_VERSION: ::c_int = 116; pub const _SC_XOPEN_XCU_VERSION: ::c_int = 117; pub const _SC_IPV6: ::c_int = 118; pub const _SC_RAW_SOCKETS: ::c_int = 119; pub const _SC_SYMLOOP_MAX: ::c_int = 120; pub const _SC_PHYS_PAGES: ::c_int = 121; pub const PTHREAD_MUTEX_INITIALIZER: pthread_mutex_t = 0 as *mut _; pub const PTHREAD_COND_INITIALIZER: pthread_cond_t = 0 as *mut _; pub const PTHREAD_RWLOCK_INITIALIZER: pthread_rwlock_t = 0 as *mut _; pub const PTHREAD_MUTEX_ERRORCHECK: ::c_int = 1; pub const PTHREAD_MUTEX_RECURSIVE: ::c_int = 2; pub const PTHREAD_MUTEX_NORMAL: ::c_int = 3; pub const PTHREAD_MUTEX_DEFAULT: ::c_int = PTHREAD_MUTEX_ERRORCHECK; pub const SCHED_FIFO: ::c_int = 1; pub const SCHED_OTHER: ::c_int = 2; pub const SCHED_RR: ::c_int = 3; pub const FD_SETSIZE: usize = 1024; pub const ST_NOSUID: ::c_ulong = 2; pub const NI_MAXHOST: ::size_t = 1025; pub const RTLD_LOCAL: ::c_int = 0; pub const RTLD_NODELETE: ::c_int = 0x1000; pub const RTLD_NOLOAD: ::c_int = 0x2000; pub const RTLD_GLOBAL: ::c_int = 0x100; pub const LOG_NTP: ::c_int = 12 << 3; pub const LOG_SECURITY: ::c_int = 13 << 3; pub const LOG_CONSOLE: ::c_int = 14 << 3; pub const LOG_NFACILITIES: ::c_int = 24; pub const TIOCEXCL: ::c_uint = 0x2000740d; pub const TIOCNXCL: ::c_uint = 0x2000740e; pub const TIOCFLUSH: ::c_ulong = 0x80047410; pub const TIOCGETA: ::c_uint = 0x402c7413; pub const TIOCSETA: ::c_ulong = 0x802c7414; pub const TIOCSETAW: ::c_ulong = 0x802c7415; pub const TIOCSETAF: ::c_ulong = 0x802c7416; pub const TIOCGETD: ::c_uint = 0x4004741a; pub const TIOCSETD: ::c_ulong = 0x8004741b; pub const TIOCGDRAINWAIT: ::c_uint = 0x40047456; pub const TIOCSDRAINWAIT: ::c_ulong = 0x80047457; pub const TIOCTIMESTAMP: ::c_uint = 0x40107459; pub const TIOCMGDTRWAIT: ::c_uint = 0x4004745a; pub const TIOCMSDTRWAIT: ::c_ulong = 0x8004745b; pub const TIOCDRAIN: ::c_uint = 0x2000745e; pub const TIOCEXT: ::c_ulong = 0x80047460; pub const TIOCSCTTY: ::c_uint = 0x20007461; pub const TIOCCONS: ::c_ulong = 0x80047462; pub const TIOCGSID: ::c_uint = 0x40047463; pub const TIOCSTAT: ::c_uint = 0x20007465; pub const TIOCUCNTL: ::c_ulong = 0x80047466; pub const TIOCSWINSZ: ::c_ulong = 0x80087467; pub const TIOCGWINSZ: ::c_uint = 0x40087468; pub const TIOCMGET: ::c_uint = 0x4004746a; pub const TIOCM_LE: ::c_int = 0x1; pub const TIOCM_DTR: ::c_int = 0x2; pub const TIOCM_RTS: ::c_int = 0x4; pub const TIOCM_ST: ::c_int = 0x8; pub const TIOCM_SR: ::c_int = 0x10; pub const TIOCM_CTS: ::c_int = 0x20; pub const TIOCM_RI: ::c_int = 0x80; pub const TIOCM_DSR: ::c_int = 0x100; pub const TIOCM_CD: ::c_int = 0x40; pub const TIOCM_CAR: ::c_int = 0x40; pub const TIOCM_RNG: ::c_int = 0x80; pub const TIOCMBIC: ::c_ulong = 0x8004746b; pub const TIOCMBIS: ::c_ulong = 0x8004746c; pub const TIOCMSET: ::c_ulong = 0x8004746d; pub const TIOCSTART: ::c_uint = 0x2000746e; pub const TIOCSTOP: ::c_uint = 0x2000746f; pub const TIOCPKT: ::c_ulong = 0x80047470; pub const TIOCPKT_DATA: ::c_int = 0x0; pub const TIOCPKT_FLUSHREAD: ::c_int = 0x1; pub const TIOCPKT_FLUSHWRITE: ::c_int = 0x2; pub const TIOCPKT_STOP: ::c_int = 0x4; pub const TIOCPKT_START: ::c_int = 0x8; pub const TIOCPKT_NOSTOP: ::c_int = 0x10; pub const TIOCPKT_DOSTOP: ::c_int = 0x20; pub const TIOCPKT_IOCTL: ::c_int = 0x40; pub const TIOCNOTTY: ::c_uint = 0x20007471; pub const TIOCSTI: ::c_ulong = 0x80017472; pub const TIOCOUTQ: ::c_uint = 0x40047473; pub const TIOCSPGRP: ::c_ulong = 0x80047476; pub const TIOCGPGRP: ::c_uint = 0x40047477; pub const TIOCCDTR: ::c_uint = 0x20007478; pub const TIOCSDTR: ::c_uint = 0x20007479; pub const TIOCCBRK: ::c_uint = 0x2000747a; pub const TIOCSBRK: ::c_uint = 0x2000747b; pub const TTYDISC: ::c_int = 0x0; pub const SLIPDISC: ::c_int = 0x4; pub const PPPDISC: ::c_int = 0x5; pub const NETGRAPHDISC: ::c_int = 0x6; pub const B0: speed_t = 0; pub const B50: speed_t = 50; pub const B75: speed_t = 75; pub const B110: speed_t = 110; pub const B134: speed_t = 134; pub const B150: speed_t = 150; pub const B200: speed_t = 200; pub const B300: speed_t = 300; pub const B600: speed_t = 600; pub const B1200: speed_t = 1200; pub const B1800: speed_t = 1800; pub const B2400: speed_t = 2400; pub const B4800: speed_t = 4800; pub const B9600: speed_t = 9600; pub const B19200: speed_t = 19200; pub const B38400: speed_t = 38400; pub const B7200: speed_t = 7200; pub const B14400: speed_t = 14400; pub const B28800: speed_t = 28800; pub const B57600: speed_t = 57600; pub const B76800: speed_t = 76800; pub const B115200: speed_t = 115200; pub const B230400: speed_t = 230400; pub const EXTA: speed_t = 19200; pub const EXTB: speed_t = 38400; pub const SEM_FAILED: *mut sem_t = 0 as *mut sem_t; pub const CRTSCTS: ::tcflag_t = 0x00030000; pub const CCTS_OFLOW: ::tcflag_t = 0x00010000; pub const CRTS_IFLOW: ::tcflag_t = 0x00020000; pub const CDTR_IFLOW: ::tcflag_t = 0x00040000; pub const CDSR_OFLOW: ::tcflag_t = 0x00080000; pub const CCAR_OFLOW: ::tcflag_t = 0x00100000; pub const VERASE2: usize = 7; pub const OCRNL: ::tcflag_t = 0x10; pub const ONOCR: ::tcflag_t = 0x20; pub const ONLRET: ::tcflag_t = 0x40; f! { pub fn WIFCONTINUED(status: ::c_int) -> bool { status == 0x13 } pub fn WSTOPSIG(status: ::c_int) -> ::c_int { status >> 8 } pub fn WIFSIGNALED(status: ::c_int) -> bool { (status & 0o177) != 0o177 && (status & 0o177) != 0 } pub fn WIFSTOPPED(status: ::c_int) -> bool { (status & 0o177) == 0o177 } } extern { pub fn lutimes(file: *const ::c_char, times: *const ::timeval) -> ::c_int; pub fn endutxent(); pub fn getutxent() -> *mut utmpx; pub fn getutxid(ut: *const utmpx) -> *mut utmpx; pub fn getutxline(ut: *const utmpx) -> *mut utmpx; pub fn pututxline(ut: *const utmpx) -> *mut utmpx; pub fn setutxent(); pub fn setresgid(rgid: ::gid_t, egid: ::gid_t, sgid: ::gid_t) -> ::c_int; pub fn setresuid(ruid: ::uid_t, euid: ::uid_t, suid: ::uid_t) -> ::c_int; } #[link(name = "util")] extern { pub fn aio_read(aiocbp: *mut aiocb) -> ::c_int; pub fn aio_write(aiocbp: *mut aiocb) -> ::c_int; pub fn aio_fsync(op: ::c_int, aiocbp: *mut aiocb) -> ::c_int; pub fn aio_error(aiocbp: *const aiocb) -> ::c_int; pub fn aio_return(aiocbp: *mut aiocb) -> ::ssize_t; pub fn aio_suspend(aiocb_list: *const *const aiocb, nitems: ::c_int, timeout: *const ::timespec) -> ::c_int; pub fn aio_cancel(fd: ::c_int, aiocbp: *mut aiocb) -> ::c_int; pub fn lio_listio(mode: ::c_int, aiocb_list: *const *mut aiocb, nitems: ::c_int, sevp: *mut sigevent) -> ::c_int; pub fn dirfd(dirp: *mut ::DIR) -> ::c_int; pub fn getnameinfo(sa: *const ::sockaddr, salen: ::socklen_t, host: *mut ::c_char, hostlen: ::size_t, serv: *mut ::c_char, servlen: ::size_t, flags: ::c_int) -> ::c_int; pub fn kevent(kq: ::c_int, changelist: *const ::kevent, nchanges: ::c_int, eventlist: *mut ::kevent, nevents: ::c_int, timeout: *const ::timespec) -> ::c_int; pub fn mincore(addr: *const ::c_void, len: ::size_t, vec: *mut ::c_char) -> ::c_int; pub fn pwritev(fd: ::c_int, iov: *const ::iovec, iovcnt: ::c_int, offset: ::off_t) -> ::ssize_t; pub fn preadv(fd: ::c_int, iov: *const ::iovec, iovcnt: ::c_int, offset: ::off_t) -> ::ssize_t; pub fn sysctlnametomib(name: *const ::c_char, mibp: *mut ::c_int, sizep: *mut ::size_t) -> ::c_int; pub fn shm_open(name: *const ::c_char, oflag: ::c_int, mode: ::mode_t) -> ::c_int; pub fn sysctl(name: *const ::c_int, namelen: ::c_uint, oldp: *mut ::c_void, oldlenp: *mut ::size_t, newp: *const ::c_void, newlen: ::size_t) -> ::c_int; pub fn sysctlbyname(name: *const ::c_char, oldp: *mut ::c_void, oldlenp: *mut ::size_t, newp: *const ::c_void, newlen: ::size_t) -> ::c_int; pub fn sched_setscheduler(pid: ::pid_t, policy: ::c_int, param: *const sched_param) -> ::c_int; pub fn sched_getscheduler(pid: ::pid_t) -> ::c_int; pub fn memrchr(cx: *const ::c_void, c: ::c_int, n: ::size_t) -> *mut ::c_void; pub fn sendfile(fd: ::c_int, s: ::c_int, offset: ::off_t, nbytes: ::size_t, hdtr: *mut ::sf_hdtr, sbytes: *mut ::off_t, flags: ::c_int) -> ::c_int; pub fn sigtimedwait(set: *const sigset_t, info: *mut siginfo_t, timeout: *const ::timespec) -> ::c_int; pub fn sigwaitinfo(set: *const sigset_t, info: *mut siginfo_t) -> ::c_int; pub fn openpty(amaster: *mut ::c_int, aslave: *mut ::c_int, name: *mut ::c_char, termp: *mut termios, winp: *mut ::winsize) -> ::c_int; pub fn forkpty(amaster: *mut ::c_int, name: *mut ::c_char, termp: *mut termios, winp: *mut ::winsize) -> ::pid_t; pub fn nl_langinfo_l(item: ::nl_item, locale: ::locale_t) -> *mut ::c_char; pub fn duplocale(base: ::locale_t) -> ::locale_t; pub fn newlocale(mask: ::c_int, locale: *const ::c_char, base: ::locale_t) -> ::locale_t; pub fn uselocale(loc: ::locale_t) -> ::locale_t; pub fn querylocale(mask: ::c_int, loc: ::locale_t) -> *const ::c_char; pub fn accept4(s: ::c_int, addr: *mut ::sockaddr, addrlen: *mut ::socklen_t, flags: ::c_int) -> ::c_int; pub fn pthread_set_name_np(tid: ::pthread_t, name: *const ::c_char); pub fn pthread_attr_get_np(tid: ::pthread_t, attr: *mut ::pthread_attr_t) -> ::c_int; pub fn pthread_attr_getguardsize(attr: *const ::pthread_attr_t, guardsize: *mut ::size_t) -> ::c_int; pub fn pthread_attr_getstack(attr: *const ::pthread_attr_t, stackaddr: *mut *mut ::c_void, stacksize: *mut ::size_t) -> ::c_int; pub fn pthread_condattr_setpshared(attr: *mut pthread_condattr_t, pshared: ::c_int) -> ::c_int; pub fn pthread_condattr_getpshared(attr: *const pthread_condattr_t, pshared: *mut ::c_int) -> ::c_int; pub fn pthread_mutexattr_setpshared(attr: *mut pthread_mutexattr_t, pshared: ::c_int) -> ::c_int; pub fn pthread_mutexattr_getpshared(attr: *const pthread_mutexattr_t, pshared: *mut ::c_int) -> ::c_int; pub fn pthread_rwlockattr_getpshared(attr: *const pthread_rwlockattr_t, val: *mut ::c_int) -> ::c_int; pub fn pthread_rwlockattr_setpshared(attr: *mut pthread_rwlockattr_t, val: ::c_int) -> ::c_int; pub fn getpriority(which: ::c_int, who: ::c_int) -> ::c_int; pub fn setpriority(which: ::c_int, who: ::c_int, prio: ::c_int) -> ::c_int; pub fn fdopendir(fd: ::c_int) -> *mut ::DIR; pub fn mknodat(dirfd: ::c_int, pathname: *const ::c_char, mode: ::mode_t, dev: dev_t) -> ::c_int; pub fn mkfifoat(dirfd: ::c_int, pathname: *const ::c_char, mode: ::mode_t) -> ::c_int; pub fn pthread_condattr_getclock(attr: *const pthread_condattr_t, clock_id: *mut clockid_t) -> ::c_int; pub fn pthread_condattr_setclock(attr: *mut pthread_condattr_t, clock_id: clockid_t) -> ::c_int; pub fn sethostname(name: *const ::c_char, len: ::c_int) -> ::c_int; pub fn sem_timedwait(sem: *mut sem_t, abstime: *const ::timespec) -> ::c_int; pub fn pthread_mutex_timedlock(lock: *mut pthread_mutex_t, abstime: *const ::timespec) -> ::c_int; pub fn pipe2(fds: *mut ::c_int, flags: ::c_int) -> ::c_int; pub fn ppoll(fds: *mut ::pollfd, nfds: ::nfds_t, timeout: *const ::timespec, sigmask: *const sigset_t) -> ::c_int; pub fn settimeofday(tv: *const ::timeval, tz: *const ::timezone) -> ::c_int; } cfg_if! { if #[cfg(target_os = "freebsd")] { mod freebsd; pub use self::freebsd::*; } else if #[cfg(target_os = "dragonfly")] { mod dragonfly; pub use self::dragonfly::*; } else { // ... } } Auto merge of #723 - ncaracci:master, r=alexcrichton Add SEEK_DATA and SEEK_HOLE constants to FreeBSD and DragonFlyBSD They can be found [here](https://github.com/DragonFlyBSD/DragonFlyBSD/blob/725edadf86d63f56a584adf23265845c8590d734/sys/sys/unistd.h#L126) for DragonFlyBSD and [here](https://github.com/freebsd/freebsd/blob/f5d95e1f8d32db4ccccfd5ad9cecb21ed07a695d/sys/sys/unistd.h) for FreeBSD. pub type dev_t = u32; pub type mode_t = u16; pub type pthread_attr_t = *mut ::c_void; pub type rlim_t = i64; pub type pthread_mutex_t = *mut ::c_void; pub type pthread_mutexattr_t = *mut ::c_void; pub type pthread_cond_t = *mut ::c_void; pub type pthread_condattr_t = *mut ::c_void; pub type pthread_rwlock_t = *mut ::c_void; pub type pthread_rwlockattr_t = *mut ::c_void; pub type pthread_key_t = ::c_int; pub type tcflag_t = ::c_uint; pub type speed_t = ::c_uint; pub type nl_item = ::c_int; pub type id_t = i64; pub enum timezone {} s! { pub struct glob_t { pub gl_pathc: ::size_t, pub gl_matchc: ::size_t, pub gl_offs: ::size_t, pub gl_flags: ::c_int, pub gl_pathv: *mut *mut ::c_char, __unused3: *mut ::c_void, __unused4: *mut ::c_void, __unused5: *mut ::c_void, __unused6: *mut ::c_void, __unused7: *mut ::c_void, __unused8: *mut ::c_void, } pub struct kevent { pub ident: ::uintptr_t, pub filter: ::c_short, pub flags: ::c_ushort, pub fflags: ::c_uint, pub data: ::intptr_t, pub udata: *mut ::c_void, } pub struct sockaddr_storage { pub ss_len: u8, pub ss_family: ::sa_family_t, __ss_pad1: [u8; 6], __ss_align: i64, __ss_pad2: [u8; 112], } pub struct addrinfo { pub ai_flags: ::c_int, pub ai_family: ::c_int, pub ai_socktype: ::c_int, pub ai_protocol: ::c_int, pub ai_addrlen: ::socklen_t, pub ai_canonname: *mut ::c_char, pub ai_addr: *mut ::sockaddr, pub ai_next: *mut addrinfo, } pub struct sigset_t { bits: [u32; 4], } pub struct siginfo_t { pub si_signo: ::c_int, pub si_errno: ::c_int, pub si_code: ::c_int, pub si_pid: ::pid_t, pub si_uid: ::uid_t, pub si_status: ::c_int, pub si_addr: *mut ::c_void, _pad: [::c_int; 12], } pub struct sigaction { pub sa_sigaction: ::sighandler_t, pub sa_flags: ::c_int, pub sa_mask: sigset_t, } pub struct stack_t { // In FreeBSD 11 and later, ss_sp is actually a void* pub ss_sp: *mut ::c_char, pub ss_size: ::size_t, pub ss_flags: ::c_int, } pub struct sched_param { pub sched_priority: ::c_int, } pub struct Dl_info { pub dli_fname: *const ::c_char, pub dli_fbase: *mut ::c_void, pub dli_sname: *const ::c_char, pub dli_saddr: *mut ::c_void, } pub struct sockaddr_in { pub sin_len: u8, pub sin_family: ::sa_family_t, pub sin_port: ::in_port_t, pub sin_addr: ::in_addr, pub sin_zero: [::c_char; 8], } pub struct termios { pub c_iflag: ::tcflag_t, pub c_oflag: ::tcflag_t, pub c_cflag: ::tcflag_t, pub c_lflag: ::tcflag_t, pub c_cc: [::cc_t; ::NCCS], pub c_ispeed: ::speed_t, pub c_ospeed: ::speed_t, } pub struct flock { pub l_start: ::off_t, pub l_len: ::off_t, pub l_pid: ::pid_t, pub l_type: ::c_short, pub l_whence: ::c_short, #[cfg(not(target_os = "dragonfly"))] pub l_sysid: ::c_int, } pub struct sf_hdtr { pub headers: *mut ::iovec, pub hdr_cnt: ::c_int, pub trailers: *mut ::iovec, pub trl_cnt: ::c_int, } pub struct lconv { pub decimal_point: *mut ::c_char, pub thousands_sep: *mut ::c_char, pub grouping: *mut ::c_char, pub int_curr_symbol: *mut ::c_char, pub currency_symbol: *mut ::c_char, pub mon_decimal_point: *mut ::c_char, pub mon_thousands_sep: *mut ::c_char, pub mon_grouping: *mut ::c_char, pub positive_sign: *mut ::c_char, pub negative_sign: *mut ::c_char, pub int_frac_digits: ::c_char, pub frac_digits: ::c_char, pub p_cs_precedes: ::c_char, pub p_sep_by_space: ::c_char, pub n_cs_precedes: ::c_char, pub n_sep_by_space: ::c_char, pub p_sign_posn: ::c_char, pub n_sign_posn: ::c_char, pub int_p_cs_precedes: ::c_char, pub int_n_cs_precedes: ::c_char, pub int_p_sep_by_space: ::c_char, pub int_n_sep_by_space: ::c_char, pub int_p_sign_posn: ::c_char, pub int_n_sign_posn: ::c_char, } } pub const AIO_LISTIO_MAX: ::c_int = 16; pub const AIO_CANCELED: ::c_int = 1; pub const AIO_NOTCANCELED: ::c_int = 2; pub const AIO_ALLDONE: ::c_int = 3; pub const LIO_NOP: ::c_int = 0; pub const LIO_WRITE: ::c_int = 1; pub const LIO_READ: ::c_int = 2; pub const LIO_WAIT: ::c_int = 1; pub const LIO_NOWAIT: ::c_int = 0; pub const SIGEV_NONE: ::c_int = 0; pub const SIGEV_SIGNAL: ::c_int = 1; pub const SIGEV_THREAD: ::c_int = 2; pub const SIGEV_KEVENT: ::c_int = 3; pub const CODESET: ::nl_item = 0; pub const D_T_FMT: ::nl_item = 1; pub const D_FMT: ::nl_item = 2; pub const T_FMT: ::nl_item = 3; pub const T_FMT_AMPM: ::nl_item = 4; pub const AM_STR: ::nl_item = 5; pub const PM_STR: ::nl_item = 6; pub const DAY_1: ::nl_item = 7; pub const DAY_2: ::nl_item = 8; pub const DAY_3: ::nl_item = 9; pub const DAY_4: ::nl_item = 10; pub const DAY_5: ::nl_item = 11; pub const DAY_6: ::nl_item = 12; pub const DAY_7: ::nl_item = 13; pub const ABDAY_1: ::nl_item = 14; pub const ABDAY_2: ::nl_item = 15; pub const ABDAY_3: ::nl_item = 16; pub const ABDAY_4: ::nl_item = 17; pub const ABDAY_5: ::nl_item = 18; pub const ABDAY_6: ::nl_item = 19; pub const ABDAY_7: ::nl_item = 20; pub const MON_1: ::nl_item = 21; pub const MON_2: ::nl_item = 22; pub const MON_3: ::nl_item = 23; pub const MON_4: ::nl_item = 24; pub const MON_5: ::nl_item = 25; pub const MON_6: ::nl_item = 26; pub const MON_7: ::nl_item = 27; pub const MON_8: ::nl_item = 28; pub const MON_9: ::nl_item = 29; pub const MON_10: ::nl_item = 30; pub const MON_11: ::nl_item = 31; pub const MON_12: ::nl_item = 32; pub const ABMON_1: ::nl_item = 33; pub const ABMON_2: ::nl_item = 34; pub const ABMON_3: ::nl_item = 35; pub const ABMON_4: ::nl_item = 36; pub const ABMON_5: ::nl_item = 37; pub const ABMON_6: ::nl_item = 38; pub const ABMON_7: ::nl_item = 39; pub const ABMON_8: ::nl_item = 40; pub const ABMON_9: ::nl_item = 41; pub const ABMON_10: ::nl_item = 42; pub const ABMON_11: ::nl_item = 43; pub const ABMON_12: ::nl_item = 44; pub const ERA: ::nl_item = 45; pub const ERA_D_FMT: ::nl_item = 46; pub const ERA_D_T_FMT: ::nl_item = 47; pub const ERA_T_FMT: ::nl_item = 48; pub const ALT_DIGITS: ::nl_item = 49; pub const RADIXCHAR: ::nl_item = 50; pub const THOUSEP: ::nl_item = 51; pub const YESEXPR: ::nl_item = 52; pub const NOEXPR: ::nl_item = 53; pub const YESSTR: ::nl_item = 54; pub const NOSTR: ::nl_item = 55; pub const CRNCYSTR: ::nl_item = 56; pub const D_MD_ORDER: ::nl_item = 57; pub const ALTMON_1: ::nl_item = 58; pub const ALTMON_2: ::nl_item = 59; pub const ALTMON_3: ::nl_item = 60; pub const ALTMON_4: ::nl_item = 61; pub const ALTMON_5: ::nl_item = 62; pub const ALTMON_6: ::nl_item = 63; pub const ALTMON_7: ::nl_item = 64; pub const ALTMON_8: ::nl_item = 65; pub const ALTMON_9: ::nl_item = 66; pub const ALTMON_10: ::nl_item = 67; pub const ALTMON_11: ::nl_item = 68; pub const ALTMON_12: ::nl_item = 69; pub const EXIT_FAILURE: ::c_int = 1; pub const EXIT_SUCCESS: ::c_int = 0; pub const EOF: ::c_int = -1; pub const SEEK_SET: ::c_int = 0; pub const SEEK_CUR: ::c_int = 1; pub const SEEK_END: ::c_int = 2; pub const SEEK_DATA: ::c_int = 3; pub const SEEK_HOLE: ::c_int = 4; pub const _IOFBF: ::c_int = 0; pub const _IONBF: ::c_int = 2; pub const _IOLBF: ::c_int = 1; pub const BUFSIZ: ::c_uint = 1024; pub const FOPEN_MAX: ::c_uint = 20; pub const FILENAME_MAX: ::c_uint = 1024; pub const L_tmpnam: ::c_uint = 1024; pub const TMP_MAX: ::c_uint = 308915776; pub const O_NOCTTY: ::c_int = 32768; pub const O_DIRECT: ::c_int = 0x00010000; pub const S_IFIFO: mode_t = 4096; pub const S_IFCHR: mode_t = 8192; pub const S_IFBLK: mode_t = 24576; pub const S_IFDIR: mode_t = 16384; pub const S_IFREG: mode_t = 32768; pub const S_IFLNK: mode_t = 40960; pub const S_IFSOCK: mode_t = 49152; pub const S_IFMT: mode_t = 61440; pub const S_IEXEC: mode_t = 64; pub const S_IWRITE: mode_t = 128; pub const S_IREAD: mode_t = 256; pub const S_IRWXU: mode_t = 448; pub const S_IXUSR: mode_t = 64; pub const S_IWUSR: mode_t = 128; pub const S_IRUSR: mode_t = 256; pub const S_IRWXG: mode_t = 56; pub const S_IXGRP: mode_t = 8; pub const S_IWGRP: mode_t = 16; pub const S_IRGRP: mode_t = 32; pub const S_IRWXO: mode_t = 7; pub const S_IXOTH: mode_t = 1; pub const S_IWOTH: mode_t = 2; pub const S_IROTH: mode_t = 4; pub const F_OK: ::c_int = 0; pub const R_OK: ::c_int = 4; pub const W_OK: ::c_int = 2; pub const X_OK: ::c_int = 1; pub const STDIN_FILENO: ::c_int = 0; pub const STDOUT_FILENO: ::c_int = 1; pub const STDERR_FILENO: ::c_int = 2; pub const F_LOCK: ::c_int = 1; pub const F_TEST: ::c_int = 3; pub const F_TLOCK: ::c_int = 2; pub const F_ULOCK: ::c_int = 0; pub const F_DUPFD_CLOEXEC: ::c_int = 17; pub const SIGHUP: ::c_int = 1; pub const SIGINT: ::c_int = 2; pub const SIGQUIT: ::c_int = 3; pub const SIGILL: ::c_int = 4; pub const SIGABRT: ::c_int = 6; pub const SIGEMT: ::c_int = 7; pub const SIGFPE: ::c_int = 8; pub const SIGKILL: ::c_int = 9; pub const SIGSEGV: ::c_int = 11; pub const SIGPIPE: ::c_int = 13; pub const SIGALRM: ::c_int = 14; pub const SIGTERM: ::c_int = 15; pub const PROT_NONE: ::c_int = 0; pub const PROT_READ: ::c_int = 1; pub const PROT_WRITE: ::c_int = 2; pub const PROT_EXEC: ::c_int = 4; pub const MAP_FILE: ::c_int = 0x0000; pub const MAP_SHARED: ::c_int = 0x0001; pub const MAP_PRIVATE: ::c_int = 0x0002; pub const MAP_FIXED: ::c_int = 0x0010; pub const MAP_ANON: ::c_int = 0x1000; pub const MAP_FAILED: *mut ::c_void = !0 as *mut ::c_void; pub const MCL_CURRENT: ::c_int = 0x0001; pub const MCL_FUTURE: ::c_int = 0x0002; pub const MS_SYNC: ::c_int = 0x0000; pub const MS_ASYNC: ::c_int = 0x0001; pub const MS_INVALIDATE: ::c_int = 0x0002; pub const EPERM: ::c_int = 1; pub const ENOENT: ::c_int = 2; pub const ESRCH: ::c_int = 3; pub const EINTR: ::c_int = 4; pub const EIO: ::c_int = 5; pub const ENXIO: ::c_int = 6; pub const E2BIG: ::c_int = 7; pub const ENOEXEC: ::c_int = 8; pub const EBADF: ::c_int = 9; pub const ECHILD: ::c_int = 10; pub const EDEADLK: ::c_int = 11; pub const ENOMEM: ::c_int = 12; pub const EACCES: ::c_int = 13; pub const EFAULT: ::c_int = 14; pub const ENOTBLK: ::c_int = 15; pub const EBUSY: ::c_int = 16; pub const EEXIST: ::c_int = 17; pub const EXDEV: ::c_int = 18; pub const ENODEV: ::c_int = 19; pub const ENOTDIR: ::c_int = 20; pub const EISDIR: ::c_int = 21; pub const EINVAL: ::c_int = 22; pub const ENFILE: ::c_int = 23; pub const EMFILE: ::c_int = 24; pub const ENOTTY: ::c_int = 25; pub const ETXTBSY: ::c_int = 26; pub const EFBIG: ::c_int = 27; pub const ENOSPC: ::c_int = 28; pub const ESPIPE: ::c_int = 29; pub const EROFS: ::c_int = 30; pub const EMLINK: ::c_int = 31; pub const EPIPE: ::c_int = 32; pub const EDOM: ::c_int = 33; pub const ERANGE: ::c_int = 34; pub const EAGAIN: ::c_int = 35; pub const EWOULDBLOCK: ::c_int = 35; pub const EINPROGRESS: ::c_int = 36; pub const EALREADY: ::c_int = 37; pub const ENOTSOCK: ::c_int = 38; pub const EDESTADDRREQ: ::c_int = 39; pub const EMSGSIZE: ::c_int = 40; pub const EPROTOTYPE: ::c_int = 41; pub const ENOPROTOOPT: ::c_int = 42; pub const EPROTONOSUPPORT: ::c_int = 43; pub const ESOCKTNOSUPPORT: ::c_int = 44; pub const EOPNOTSUPP: ::c_int = 45; pub const ENOTSUP: ::c_int = EOPNOTSUPP; pub const EPFNOSUPPORT: ::c_int = 46; pub const EAFNOSUPPORT: ::c_int = 47; pub const EADDRINUSE: ::c_int = 48; pub const EADDRNOTAVAIL: ::c_int = 49; pub const ENETDOWN: ::c_int = 50; pub const ENETUNREACH: ::c_int = 51; pub const ENETRESET: ::c_int = 52; pub const ECONNABORTED: ::c_int = 53; pub const ECONNRESET: ::c_int = 54; pub const ENOBUFS: ::c_int = 55; pub const EISCONN: ::c_int = 56; pub const ENOTCONN: ::c_int = 57; pub const ESHUTDOWN: ::c_int = 58; pub const ETOOMANYREFS: ::c_int = 59; pub const ETIMEDOUT: ::c_int = 60; pub const ECONNREFUSED: ::c_int = 61; pub const ELOOP: ::c_int = 62; pub const ENAMETOOLONG: ::c_int = 63; pub const EHOSTDOWN: ::c_int = 64; pub const EHOSTUNREACH: ::c_int = 65; pub const ENOTEMPTY: ::c_int = 66; pub const EPROCLIM: ::c_int = 67; pub const EUSERS: ::c_int = 68; pub const EDQUOT: ::c_int = 69; pub const ESTALE: ::c_int = 70; pub const EREMOTE: ::c_int = 71; pub const EBADRPC: ::c_int = 72; pub const ERPCMISMATCH: ::c_int = 73; pub const EPROGUNAVAIL: ::c_int = 74; pub const EPROGMISMATCH: ::c_int = 75; pub const EPROCUNAVAIL: ::c_int = 76; pub const ENOLCK: ::c_int = 77; pub const ENOSYS: ::c_int = 78; pub const EFTYPE: ::c_int = 79; pub const EAUTH: ::c_int = 80; pub const ENEEDAUTH: ::c_int = 81; pub const EIDRM: ::c_int = 82; pub const ENOMSG: ::c_int = 83; pub const EOVERFLOW: ::c_int = 84; pub const ECANCELED: ::c_int = 85; pub const EILSEQ: ::c_int = 86; pub const ENOATTR: ::c_int = 87; pub const EDOOFUS: ::c_int = 88; pub const EBADMSG: ::c_int = 89; pub const EMULTIHOP: ::c_int = 90; pub const ENOLINK: ::c_int = 91; pub const EPROTO: ::c_int = 92; pub const POLLSTANDARD: ::c_short = ::POLLIN | ::POLLPRI | ::POLLOUT | ::POLLRDNORM | ::POLLRDBAND | ::POLLWRBAND | ::POLLERR | ::POLLHUP | ::POLLNVAL; pub const EAI_SYSTEM: ::c_int = 11; pub const F_DUPFD: ::c_int = 0; pub const F_GETFD: ::c_int = 1; pub const F_SETFD: ::c_int = 2; pub const F_GETFL: ::c_int = 3; pub const F_SETFL: ::c_int = 4; pub const SIGTRAP: ::c_int = 5; pub const GLOB_APPEND : ::c_int = 0x0001; pub const GLOB_DOOFFS : ::c_int = 0x0002; pub const GLOB_ERR : ::c_int = 0x0004; pub const GLOB_MARK : ::c_int = 0x0008; pub const GLOB_NOCHECK : ::c_int = 0x0010; pub const GLOB_NOSORT : ::c_int = 0x0020; pub const GLOB_NOESCAPE: ::c_int = 0x2000; pub const GLOB_NOSPACE : ::c_int = -1; pub const GLOB_ABORTED : ::c_int = -2; pub const GLOB_NOMATCH : ::c_int = -3; pub const POSIX_MADV_NORMAL: ::c_int = 0; pub const POSIX_MADV_RANDOM: ::c_int = 1; pub const POSIX_MADV_SEQUENTIAL: ::c_int = 2; pub const POSIX_MADV_WILLNEED: ::c_int = 3; pub const POSIX_MADV_DONTNEED: ::c_int = 4; pub const PTHREAD_PROCESS_PRIVATE: ::c_int = 0; pub const PTHREAD_PROCESS_SHARED: ::c_int = 1; pub const PTHREAD_CREATE_JOINABLE: ::c_int = 0; pub const PTHREAD_CREATE_DETACHED: ::c_int = 1; pub const RLIMIT_CPU: ::c_int = 0; pub const RLIMIT_FSIZE: ::c_int = 1; pub const RLIMIT_DATA: ::c_int = 2; pub const RLIMIT_STACK: ::c_int = 3; pub const RLIMIT_CORE: ::c_int = 4; pub const RLIMIT_RSS: ::c_int = 5; pub const RLIMIT_MEMLOCK: ::c_int = 6; pub const RLIMIT_NPROC: ::c_int = 7; pub const RLIMIT_NOFILE: ::c_int = 8; pub const RLIMIT_SBSIZE: ::c_int = 9; pub const RLIMIT_VMEM: ::c_int = 10; pub const RLIMIT_AS: ::c_int = RLIMIT_VMEM; pub const RLIM_INFINITY: rlim_t = 0x7fff_ffff_ffff_ffff; pub const RUSAGE_SELF: ::c_int = 0; pub const RUSAGE_CHILDREN: ::c_int = -1; pub const MADV_NORMAL: ::c_int = 0; pub const MADV_RANDOM: ::c_int = 1; pub const MADV_SEQUENTIAL: ::c_int = 2; pub const MADV_WILLNEED: ::c_int = 3; pub const MADV_DONTNEED: ::c_int = 4; pub const MADV_FREE: ::c_int = 5; pub const MADV_NOSYNC: ::c_int = 6; pub const MADV_AUTOSYNC: ::c_int = 7; pub const MADV_NOCORE: ::c_int = 8; pub const MADV_CORE: ::c_int = 9; pub const MINCORE_INCORE: ::c_int = 0x1; pub const MINCORE_REFERENCED: ::c_int = 0x2; pub const MINCORE_MODIFIED: ::c_int = 0x4; pub const MINCORE_REFERENCED_OTHER: ::c_int = 0x8; pub const MINCORE_MODIFIED_OTHER: ::c_int = 0x10; pub const MINCORE_SUPER: ::c_int = 0x20; pub const AF_UNSPEC: ::c_int = 0; pub const AF_LOCAL: ::c_int = 1; pub const AF_UNIX: ::c_int = AF_LOCAL; pub const AF_INET: ::c_int = 2; pub const AF_IMPLINK: ::c_int = 3; pub const AF_PUP: ::c_int = 4; pub const AF_CHAOS: ::c_int = 5; pub const AF_NETBIOS: ::c_int = 6; pub const AF_ISO: ::c_int = 7; pub const AF_OSI: ::c_int = AF_ISO; pub const AF_ECMA: ::c_int = 8; pub const AF_DATAKIT: ::c_int = 9; pub const AF_CCITT: ::c_int = 10; pub const AF_SNA: ::c_int = 11; pub const AF_DECnet: ::c_int = 12; pub const AF_DLI: ::c_int = 13; pub const AF_LAT: ::c_int = 14; pub const AF_HYLINK: ::c_int = 15; pub const AF_APPLETALK: ::c_int = 16; pub const AF_ROUTE: ::c_int = 17; pub const AF_LINK: ::c_int = 18; pub const pseudo_AF_XTP: ::c_int = 19; pub const AF_COIP: ::c_int = 20; pub const AF_CNT: ::c_int = 21; pub const pseudo_AF_RTIP: ::c_int = 22; pub const AF_IPX: ::c_int = 23; pub const AF_SIP: ::c_int = 24; pub const pseudo_AF_PIP: ::c_int = 25; pub const AF_ISDN: ::c_int = 26; pub const AF_E164: ::c_int = AF_ISDN; pub const pseudo_AF_KEY: ::c_int = 27; pub const AF_INET6: ::c_int = 28; pub const AF_NATM: ::c_int = 29; pub const AF_ATM: ::c_int = 30; pub const pseudo_AF_HDRCMPLT: ::c_int = 31; pub const AF_NETGRAPH: ::c_int = 32; pub const PF_UNSPEC: ::c_int = AF_UNSPEC; pub const PF_LOCAL: ::c_int = AF_LOCAL; pub const PF_UNIX: ::c_int = PF_LOCAL; pub const PF_INET: ::c_int = AF_INET; pub const PF_IMPLINK: ::c_int = AF_IMPLINK; pub const PF_PUP: ::c_int = AF_PUP; pub const PF_CHAOS: ::c_int = AF_CHAOS; pub const PF_NETBIOS: ::c_int = AF_NETBIOS; pub const PF_ISO: ::c_int = AF_ISO; pub const PF_OSI: ::c_int = AF_ISO; pub const PF_ECMA: ::c_int = AF_ECMA; pub const PF_DATAKIT: ::c_int = AF_DATAKIT; pub const PF_CCITT: ::c_int = AF_CCITT; pub const PF_SNA: ::c_int = AF_SNA; pub const PF_DECnet: ::c_int = AF_DECnet; pub const PF_DLI: ::c_int = AF_DLI; pub const PF_LAT: ::c_int = AF_LAT; pub const PF_HYLINK: ::c_int = AF_HYLINK; pub const PF_APPLETALK: ::c_int = AF_APPLETALK; pub const PF_ROUTE: ::c_int = AF_ROUTE; pub const PF_LINK: ::c_int = AF_LINK; pub const PF_XTP: ::c_int = pseudo_AF_XTP; pub const PF_COIP: ::c_int = AF_COIP; pub const PF_CNT: ::c_int = AF_CNT; pub const PF_SIP: ::c_int = AF_SIP; pub const PF_IPX: ::c_int = AF_IPX; pub const PF_RTIP: ::c_int = pseudo_AF_RTIP; pub const PF_PIP: ::c_int = pseudo_AF_PIP; pub const PF_ISDN: ::c_int = AF_ISDN; pub const PF_KEY: ::c_int = pseudo_AF_KEY; pub const PF_INET6: ::c_int = AF_INET6; pub const PF_NATM: ::c_int = AF_NATM; pub const PF_ATM: ::c_int = AF_ATM; pub const PF_NETGRAPH: ::c_int = AF_NETGRAPH; pub const SOMAXCONN: ::c_int = 128; pub const MSG_OOB: ::c_int = 0x00000001; pub const MSG_PEEK: ::c_int = 0x00000002; pub const MSG_DONTROUTE: ::c_int = 0x00000004; pub const MSG_EOR: ::c_int = 0x00000008; pub const MSG_TRUNC: ::c_int = 0x00000010; pub const MSG_CTRUNC: ::c_int = 0x00000020; pub const MSG_WAITALL: ::c_int = 0x00000040; pub const MSG_DONTWAIT: ::c_int = 0x00000080; pub const MSG_EOF: ::c_int = 0x00000100; pub const SCM_TIMESTAMP: ::c_int = 0x02; pub const SOCK_STREAM: ::c_int = 1; pub const SOCK_DGRAM: ::c_int = 2; pub const SOCK_RAW: ::c_int = 3; pub const SOCK_RDM: ::c_int = 4; pub const SOCK_SEQPACKET: ::c_int = 5; pub const SOCK_CLOEXEC: ::c_int = 0x10000000; pub const SOCK_NONBLOCK: ::c_int = 0x20000000; pub const SOCK_MAXADDRLEN: ::c_int = 255; pub const IP_TTL: ::c_int = 4; pub const IP_HDRINCL: ::c_int = 2; pub const IP_ADD_MEMBERSHIP: ::c_int = 12; pub const IP_DROP_MEMBERSHIP: ::c_int = 13; pub const IPV6_JOIN_GROUP: ::c_int = 12; pub const IPV6_LEAVE_GROUP: ::c_int = 13; pub const TCP_NODELAY: ::c_int = 1; pub const TCP_KEEPIDLE: ::c_int = 256; pub const SOL_SOCKET: ::c_int = 0xffff; pub const SO_DEBUG: ::c_int = 0x01; pub const SO_ACCEPTCONN: ::c_int = 0x0002; pub const SO_REUSEADDR: ::c_int = 0x0004; pub const SO_KEEPALIVE: ::c_int = 0x0008; pub const SO_DONTROUTE: ::c_int = 0x0010; pub const SO_BROADCAST: ::c_int = 0x0020; pub const SO_USELOOPBACK: ::c_int = 0x0040; pub const SO_LINGER: ::c_int = 0x0080; pub const SO_OOBINLINE: ::c_int = 0x0100; pub const SO_REUSEPORT: ::c_int = 0x0200; pub const SO_TIMESTAMP: ::c_int = 0x0400; pub const SO_NOSIGPIPE: ::c_int = 0x0800; pub const SO_ACCEPTFILTER: ::c_int = 0x1000; pub const SO_SNDBUF: ::c_int = 0x1001; pub const SO_RCVBUF: ::c_int = 0x1002; pub const SO_SNDLOWAT: ::c_int = 0x1003; pub const SO_RCVLOWAT: ::c_int = 0x1004; pub const SO_SNDTIMEO: ::c_int = 0x1005; pub const SO_RCVTIMEO: ::c_int = 0x1006; pub const SO_ERROR: ::c_int = 0x1007; pub const SO_TYPE: ::c_int = 0x1008; pub const IFF_LOOPBACK: ::c_int = 0x8; pub const SHUT_RD: ::c_int = 0; pub const SHUT_WR: ::c_int = 1; pub const SHUT_RDWR: ::c_int = 2; pub const LOCK_SH: ::c_int = 1; pub const LOCK_EX: ::c_int = 2; pub const LOCK_NB: ::c_int = 4; pub const LOCK_UN: ::c_int = 8; pub const MAP_COPY: ::c_int = 0x0002; pub const MAP_RENAME: ::c_int = 0x0020; pub const MAP_NORESERVE: ::c_int = 0x0040; pub const MAP_HASSEMAPHORE: ::c_int = 0x0200; pub const MAP_STACK: ::c_int = 0x0400; pub const MAP_NOSYNC: ::c_int = 0x0800; pub const MAP_NOCORE: ::c_int = 0x020000; pub const IPPROTO_RAW: ::c_int = 255; pub const _PC_LINK_MAX: ::c_int = 1; pub const _PC_MAX_CANON: ::c_int = 2; pub const _PC_MAX_INPUT: ::c_int = 3; pub const _PC_NAME_MAX: ::c_int = 4; pub const _PC_PATH_MAX: ::c_int = 5; pub const _PC_PIPE_BUF: ::c_int = 6; pub const _PC_CHOWN_RESTRICTED: ::c_int = 7; pub const _PC_NO_TRUNC: ::c_int = 8; pub const _PC_VDISABLE: ::c_int = 9; pub const _PC_ALLOC_SIZE_MIN: ::c_int = 10; pub const _PC_FILESIZEBITS: ::c_int = 12; pub const _PC_REC_INCR_XFER_SIZE: ::c_int = 14; pub const _PC_REC_MAX_XFER_SIZE: ::c_int = 15; pub const _PC_REC_MIN_XFER_SIZE: ::c_int = 16; pub const _PC_REC_XFER_ALIGN: ::c_int = 17; pub const _PC_SYMLINK_MAX: ::c_int = 18; pub const _PC_MIN_HOLE_SIZE: ::c_int = 21; pub const _PC_ASYNC_IO: ::c_int = 53; pub const _PC_PRIO_IO: ::c_int = 54; pub const _PC_SYNC_IO: ::c_int = 55; pub const _PC_ACL_EXTENDED: ::c_int = 59; pub const _PC_ACL_PATH_MAX: ::c_int = 60; pub const _PC_CAP_PRESENT: ::c_int = 61; pub const _PC_INF_PRESENT: ::c_int = 62; pub const _PC_MAC_PRESENT: ::c_int = 63; pub const _SC_ARG_MAX: ::c_int = 1; pub const _SC_CHILD_MAX: ::c_int = 2; pub const _SC_CLK_TCK: ::c_int = 3; pub const _SC_NGROUPS_MAX: ::c_int = 4; pub const _SC_OPEN_MAX: ::c_int = 5; pub const _SC_JOB_CONTROL: ::c_int = 6; pub const _SC_SAVED_IDS: ::c_int = 7; pub const _SC_VERSION: ::c_int = 8; pub const _SC_BC_BASE_MAX: ::c_int = 9; pub const _SC_BC_DIM_MAX: ::c_int = 10; pub const _SC_BC_SCALE_MAX: ::c_int = 11; pub const _SC_BC_STRING_MAX: ::c_int = 12; pub const _SC_COLL_WEIGHTS_MAX: ::c_int = 13; pub const _SC_EXPR_NEST_MAX: ::c_int = 14; pub const _SC_LINE_MAX: ::c_int = 15; pub const _SC_RE_DUP_MAX: ::c_int = 16; pub const _SC_2_VERSION: ::c_int = 17; pub const _SC_2_C_BIND: ::c_int = 18; pub const _SC_2_C_DEV: ::c_int = 19; pub const _SC_2_CHAR_TERM: ::c_int = 20; pub const _SC_2_FORT_DEV: ::c_int = 21; pub const _SC_2_FORT_RUN: ::c_int = 22; pub const _SC_2_LOCALEDEF: ::c_int = 23; pub const _SC_2_SW_DEV: ::c_int = 24; pub const _SC_2_UPE: ::c_int = 25; pub const _SC_STREAM_MAX: ::c_int = 26; pub const _SC_TZNAME_MAX: ::c_int = 27; pub const _SC_ASYNCHRONOUS_IO: ::c_int = 28; pub const _SC_MAPPED_FILES: ::c_int = 29; pub const _SC_MEMLOCK: ::c_int = 30; pub const _SC_MEMLOCK_RANGE: ::c_int = 31; pub const _SC_MEMORY_PROTECTION: ::c_int = 32; pub const _SC_MESSAGE_PASSING: ::c_int = 33; pub const _SC_PRIORITIZED_IO: ::c_int = 34; pub const _SC_PRIORITY_SCHEDULING: ::c_int = 35; pub const _SC_REALTIME_SIGNALS: ::c_int = 36; pub const _SC_SEMAPHORES: ::c_int = 37; pub const _SC_FSYNC: ::c_int = 38; pub const _SC_SHARED_MEMORY_OBJECTS: ::c_int = 39; pub const _SC_SYNCHRONIZED_IO: ::c_int = 40; pub const _SC_TIMERS: ::c_int = 41; pub const _SC_AIO_LISTIO_MAX: ::c_int = 42; pub const _SC_AIO_MAX: ::c_int = 43; pub const _SC_AIO_PRIO_DELTA_MAX: ::c_int = 44; pub const _SC_DELAYTIMER_MAX: ::c_int = 45; pub const _SC_MQ_OPEN_MAX: ::c_int = 46; pub const _SC_PAGESIZE: ::c_int = 47; pub const _SC_PAGE_SIZE: ::c_int = _SC_PAGESIZE; pub const _SC_RTSIG_MAX: ::c_int = 48; pub const _SC_SEM_NSEMS_MAX: ::c_int = 49; pub const _SC_SEM_VALUE_MAX: ::c_int = 50; pub const _SC_SIGQUEUE_MAX: ::c_int = 51; pub const _SC_TIMER_MAX: ::c_int = 52; pub const _SC_IOV_MAX: ::c_int = 56; pub const _SC_NPROCESSORS_CONF: ::c_int = 57; pub const _SC_2_PBS: ::c_int = 59; pub const _SC_2_PBS_ACCOUNTING: ::c_int = 60; pub const _SC_2_PBS_CHECKPOINT: ::c_int = 61; pub const _SC_2_PBS_LOCATE: ::c_int = 62; pub const _SC_2_PBS_MESSAGE: ::c_int = 63; pub const _SC_2_PBS_TRACK: ::c_int = 64; pub const _SC_ADVISORY_INFO: ::c_int = 65; pub const _SC_BARRIERS: ::c_int = 66; pub const _SC_CLOCK_SELECTION: ::c_int = 67; pub const _SC_CPUTIME: ::c_int = 68; pub const _SC_FILE_LOCKING: ::c_int = 69; pub const _SC_NPROCESSORS_ONLN: ::c_int = 58; pub const _SC_GETGR_R_SIZE_MAX: ::c_int = 70; pub const _SC_GETPW_R_SIZE_MAX: ::c_int = 71; pub const _SC_HOST_NAME_MAX: ::c_int = 72; pub const _SC_LOGIN_NAME_MAX: ::c_int = 73; pub const _SC_MONOTONIC_CLOCK: ::c_int = 74; pub const _SC_MQ_PRIO_MAX: ::c_int = 75; pub const _SC_READER_WRITER_LOCKS: ::c_int = 76; pub const _SC_REGEXP: ::c_int = 77; pub const _SC_SHELL: ::c_int = 78; pub const _SC_SPAWN: ::c_int = 79; pub const _SC_SPIN_LOCKS: ::c_int = 80; pub const _SC_SPORADIC_SERVER: ::c_int = 81; pub const _SC_THREAD_ATTR_STACKADDR: ::c_int = 82; pub const _SC_THREAD_ATTR_STACKSIZE: ::c_int = 83; pub const _SC_THREAD_DESTRUCTOR_ITERATIONS: ::c_int = 85; pub const _SC_THREAD_KEYS_MAX: ::c_int = 86; pub const _SC_THREAD_PRIO_INHERIT: ::c_int = 87; pub const _SC_THREAD_PRIO_PROTECT: ::c_int = 88; pub const _SC_THREAD_PRIORITY_SCHEDULING: ::c_int = 89; pub const _SC_THREAD_PROCESS_SHARED: ::c_int = 90; pub const _SC_THREAD_SAFE_FUNCTIONS: ::c_int = 91; pub const _SC_THREAD_SPORADIC_SERVER: ::c_int = 92; pub const _SC_THREAD_STACK_MIN: ::c_int = 93; pub const _SC_THREAD_THREADS_MAX: ::c_int = 94; pub const _SC_TIMEOUTS: ::c_int = 95; pub const _SC_THREADS: ::c_int = 96; pub const _SC_TRACE: ::c_int = 97; pub const _SC_TRACE_EVENT_FILTER: ::c_int = 98; pub const _SC_TRACE_INHERIT: ::c_int = 99; pub const _SC_TRACE_LOG: ::c_int = 100; pub const _SC_TTY_NAME_MAX: ::c_int = 101; pub const _SC_TYPED_MEMORY_OBJECTS: ::c_int = 102; pub const _SC_V6_ILP32_OFF32: ::c_int = 103; pub const _SC_V6_ILP32_OFFBIG: ::c_int = 104; pub const _SC_V6_LP64_OFF64: ::c_int = 105; pub const _SC_V6_LPBIG_OFFBIG: ::c_int = 106; pub const _SC_ATEXIT_MAX: ::c_int = 107; pub const _SC_XOPEN_CRYPT: ::c_int = 108; pub const _SC_XOPEN_ENH_I18N: ::c_int = 109; pub const _SC_XOPEN_LEGACY: ::c_int = 110; pub const _SC_XOPEN_REALTIME: ::c_int = 111; pub const _SC_XOPEN_REALTIME_THREADS: ::c_int = 112; pub const _SC_XOPEN_SHM: ::c_int = 113; pub const _SC_XOPEN_STREAMS: ::c_int = 114; pub const _SC_XOPEN_UNIX: ::c_int = 115; pub const _SC_XOPEN_VERSION: ::c_int = 116; pub const _SC_XOPEN_XCU_VERSION: ::c_int = 117; pub const _SC_IPV6: ::c_int = 118; pub const _SC_RAW_SOCKETS: ::c_int = 119; pub const _SC_SYMLOOP_MAX: ::c_int = 120; pub const _SC_PHYS_PAGES: ::c_int = 121; pub const PTHREAD_MUTEX_INITIALIZER: pthread_mutex_t = 0 as *mut _; pub const PTHREAD_COND_INITIALIZER: pthread_cond_t = 0 as *mut _; pub const PTHREAD_RWLOCK_INITIALIZER: pthread_rwlock_t = 0 as *mut _; pub const PTHREAD_MUTEX_ERRORCHECK: ::c_int = 1; pub const PTHREAD_MUTEX_RECURSIVE: ::c_int = 2; pub const PTHREAD_MUTEX_NORMAL: ::c_int = 3; pub const PTHREAD_MUTEX_DEFAULT: ::c_int = PTHREAD_MUTEX_ERRORCHECK; pub const SCHED_FIFO: ::c_int = 1; pub const SCHED_OTHER: ::c_int = 2; pub const SCHED_RR: ::c_int = 3; pub const FD_SETSIZE: usize = 1024; pub const ST_NOSUID: ::c_ulong = 2; pub const NI_MAXHOST: ::size_t = 1025; pub const RTLD_LOCAL: ::c_int = 0; pub const RTLD_NODELETE: ::c_int = 0x1000; pub const RTLD_NOLOAD: ::c_int = 0x2000; pub const RTLD_GLOBAL: ::c_int = 0x100; pub const LOG_NTP: ::c_int = 12 << 3; pub const LOG_SECURITY: ::c_int = 13 << 3; pub const LOG_CONSOLE: ::c_int = 14 << 3; pub const LOG_NFACILITIES: ::c_int = 24; pub const TIOCEXCL: ::c_uint = 0x2000740d; pub const TIOCNXCL: ::c_uint = 0x2000740e; pub const TIOCFLUSH: ::c_ulong = 0x80047410; pub const TIOCGETA: ::c_uint = 0x402c7413; pub const TIOCSETA: ::c_ulong = 0x802c7414; pub const TIOCSETAW: ::c_ulong = 0x802c7415; pub const TIOCSETAF: ::c_ulong = 0x802c7416; pub const TIOCGETD: ::c_uint = 0x4004741a; pub const TIOCSETD: ::c_ulong = 0x8004741b; pub const TIOCGDRAINWAIT: ::c_uint = 0x40047456; pub const TIOCSDRAINWAIT: ::c_ulong = 0x80047457; pub const TIOCTIMESTAMP: ::c_uint = 0x40107459; pub const TIOCMGDTRWAIT: ::c_uint = 0x4004745a; pub const TIOCMSDTRWAIT: ::c_ulong = 0x8004745b; pub const TIOCDRAIN: ::c_uint = 0x2000745e; pub const TIOCEXT: ::c_ulong = 0x80047460; pub const TIOCSCTTY: ::c_uint = 0x20007461; pub const TIOCCONS: ::c_ulong = 0x80047462; pub const TIOCGSID: ::c_uint = 0x40047463; pub const TIOCSTAT: ::c_uint = 0x20007465; pub const TIOCUCNTL: ::c_ulong = 0x80047466; pub const TIOCSWINSZ: ::c_ulong = 0x80087467; pub const TIOCGWINSZ: ::c_uint = 0x40087468; pub const TIOCMGET: ::c_uint = 0x4004746a; pub const TIOCM_LE: ::c_int = 0x1; pub const TIOCM_DTR: ::c_int = 0x2; pub const TIOCM_RTS: ::c_int = 0x4; pub const TIOCM_ST: ::c_int = 0x8; pub const TIOCM_SR: ::c_int = 0x10; pub const TIOCM_CTS: ::c_int = 0x20; pub const TIOCM_RI: ::c_int = 0x80; pub const TIOCM_DSR: ::c_int = 0x100; pub const TIOCM_CD: ::c_int = 0x40; pub const TIOCM_CAR: ::c_int = 0x40; pub const TIOCM_RNG: ::c_int = 0x80; pub const TIOCMBIC: ::c_ulong = 0x8004746b; pub const TIOCMBIS: ::c_ulong = 0x8004746c; pub const TIOCMSET: ::c_ulong = 0x8004746d; pub const TIOCSTART: ::c_uint = 0x2000746e; pub const TIOCSTOP: ::c_uint = 0x2000746f; pub const TIOCPKT: ::c_ulong = 0x80047470; pub const TIOCPKT_DATA: ::c_int = 0x0; pub const TIOCPKT_FLUSHREAD: ::c_int = 0x1; pub const TIOCPKT_FLUSHWRITE: ::c_int = 0x2; pub const TIOCPKT_STOP: ::c_int = 0x4; pub const TIOCPKT_START: ::c_int = 0x8; pub const TIOCPKT_NOSTOP: ::c_int = 0x10; pub const TIOCPKT_DOSTOP: ::c_int = 0x20; pub const TIOCPKT_IOCTL: ::c_int = 0x40; pub const TIOCNOTTY: ::c_uint = 0x20007471; pub const TIOCSTI: ::c_ulong = 0x80017472; pub const TIOCOUTQ: ::c_uint = 0x40047473; pub const TIOCSPGRP: ::c_ulong = 0x80047476; pub const TIOCGPGRP: ::c_uint = 0x40047477; pub const TIOCCDTR: ::c_uint = 0x20007478; pub const TIOCSDTR: ::c_uint = 0x20007479; pub const TIOCCBRK: ::c_uint = 0x2000747a; pub const TIOCSBRK: ::c_uint = 0x2000747b; pub const TTYDISC: ::c_int = 0x0; pub const SLIPDISC: ::c_int = 0x4; pub const PPPDISC: ::c_int = 0x5; pub const NETGRAPHDISC: ::c_int = 0x6; pub const B0: speed_t = 0; pub const B50: speed_t = 50; pub const B75: speed_t = 75; pub const B110: speed_t = 110; pub const B134: speed_t = 134; pub const B150: speed_t = 150; pub const B200: speed_t = 200; pub const B300: speed_t = 300; pub const B600: speed_t = 600; pub const B1200: speed_t = 1200; pub const B1800: speed_t = 1800; pub const B2400: speed_t = 2400; pub const B4800: speed_t = 4800; pub const B9600: speed_t = 9600; pub const B19200: speed_t = 19200; pub const B38400: speed_t = 38400; pub const B7200: speed_t = 7200; pub const B14400: speed_t = 14400; pub const B28800: speed_t = 28800; pub const B57600: speed_t = 57600; pub const B76800: speed_t = 76800; pub const B115200: speed_t = 115200; pub const B230400: speed_t = 230400; pub const EXTA: speed_t = 19200; pub const EXTB: speed_t = 38400; pub const SEM_FAILED: *mut sem_t = 0 as *mut sem_t; pub const CRTSCTS: ::tcflag_t = 0x00030000; pub const CCTS_OFLOW: ::tcflag_t = 0x00010000; pub const CRTS_IFLOW: ::tcflag_t = 0x00020000; pub const CDTR_IFLOW: ::tcflag_t = 0x00040000; pub const CDSR_OFLOW: ::tcflag_t = 0x00080000; pub const CCAR_OFLOW: ::tcflag_t = 0x00100000; pub const VERASE2: usize = 7; pub const OCRNL: ::tcflag_t = 0x10; pub const ONOCR: ::tcflag_t = 0x20; pub const ONLRET: ::tcflag_t = 0x40; f! { pub fn WIFCONTINUED(status: ::c_int) -> bool { status == 0x13 } pub fn WSTOPSIG(status: ::c_int) -> ::c_int { status >> 8 } pub fn WIFSIGNALED(status: ::c_int) -> bool { (status & 0o177) != 0o177 && (status & 0o177) != 0 } pub fn WIFSTOPPED(status: ::c_int) -> bool { (status & 0o177) == 0o177 } } extern { pub fn lutimes(file: *const ::c_char, times: *const ::timeval) -> ::c_int; pub fn endutxent(); pub fn getutxent() -> *mut utmpx; pub fn getutxid(ut: *const utmpx) -> *mut utmpx; pub fn getutxline(ut: *const utmpx) -> *mut utmpx; pub fn pututxline(ut: *const utmpx) -> *mut utmpx; pub fn setutxent(); pub fn setresgid(rgid: ::gid_t, egid: ::gid_t, sgid: ::gid_t) -> ::c_int; pub fn setresuid(ruid: ::uid_t, euid: ::uid_t, suid: ::uid_t) -> ::c_int; } #[link(name = "util")] extern { pub fn aio_read(aiocbp: *mut aiocb) -> ::c_int; pub fn aio_write(aiocbp: *mut aiocb) -> ::c_int; pub fn aio_fsync(op: ::c_int, aiocbp: *mut aiocb) -> ::c_int; pub fn aio_error(aiocbp: *const aiocb) -> ::c_int; pub fn aio_return(aiocbp: *mut aiocb) -> ::ssize_t; pub fn aio_suspend(aiocb_list: *const *const aiocb, nitems: ::c_int, timeout: *const ::timespec) -> ::c_int; pub fn aio_cancel(fd: ::c_int, aiocbp: *mut aiocb) -> ::c_int; pub fn lio_listio(mode: ::c_int, aiocb_list: *const *mut aiocb, nitems: ::c_int, sevp: *mut sigevent) -> ::c_int; pub fn dirfd(dirp: *mut ::DIR) -> ::c_int; pub fn getnameinfo(sa: *const ::sockaddr, salen: ::socklen_t, host: *mut ::c_char, hostlen: ::size_t, serv: *mut ::c_char, servlen: ::size_t, flags: ::c_int) -> ::c_int; pub fn kevent(kq: ::c_int, changelist: *const ::kevent, nchanges: ::c_int, eventlist: *mut ::kevent, nevents: ::c_int, timeout: *const ::timespec) -> ::c_int; pub fn mincore(addr: *const ::c_void, len: ::size_t, vec: *mut ::c_char) -> ::c_int; pub fn pwritev(fd: ::c_int, iov: *const ::iovec, iovcnt: ::c_int, offset: ::off_t) -> ::ssize_t; pub fn preadv(fd: ::c_int, iov: *const ::iovec, iovcnt: ::c_int, offset: ::off_t) -> ::ssize_t; pub fn sysctlnametomib(name: *const ::c_char, mibp: *mut ::c_int, sizep: *mut ::size_t) -> ::c_int; pub fn shm_open(name: *const ::c_char, oflag: ::c_int, mode: ::mode_t) -> ::c_int; pub fn sysctl(name: *const ::c_int, namelen: ::c_uint, oldp: *mut ::c_void, oldlenp: *mut ::size_t, newp: *const ::c_void, newlen: ::size_t) -> ::c_int; pub fn sysctlbyname(name: *const ::c_char, oldp: *mut ::c_void, oldlenp: *mut ::size_t, newp: *const ::c_void, newlen: ::size_t) -> ::c_int; pub fn sched_setscheduler(pid: ::pid_t, policy: ::c_int, param: *const sched_param) -> ::c_int; pub fn sched_getscheduler(pid: ::pid_t) -> ::c_int; pub fn memrchr(cx: *const ::c_void, c: ::c_int, n: ::size_t) -> *mut ::c_void; pub fn sendfile(fd: ::c_int, s: ::c_int, offset: ::off_t, nbytes: ::size_t, hdtr: *mut ::sf_hdtr, sbytes: *mut ::off_t, flags: ::c_int) -> ::c_int; pub fn sigtimedwait(set: *const sigset_t, info: *mut siginfo_t, timeout: *const ::timespec) -> ::c_int; pub fn sigwaitinfo(set: *const sigset_t, info: *mut siginfo_t) -> ::c_int; pub fn openpty(amaster: *mut ::c_int, aslave: *mut ::c_int, name: *mut ::c_char, termp: *mut termios, winp: *mut ::winsize) -> ::c_int; pub fn forkpty(amaster: *mut ::c_int, name: *mut ::c_char, termp: *mut termios, winp: *mut ::winsize) -> ::pid_t; pub fn nl_langinfo_l(item: ::nl_item, locale: ::locale_t) -> *mut ::c_char; pub fn duplocale(base: ::locale_t) -> ::locale_t; pub fn newlocale(mask: ::c_int, locale: *const ::c_char, base: ::locale_t) -> ::locale_t; pub fn uselocale(loc: ::locale_t) -> ::locale_t; pub fn querylocale(mask: ::c_int, loc: ::locale_t) -> *const ::c_char; pub fn accept4(s: ::c_int, addr: *mut ::sockaddr, addrlen: *mut ::socklen_t, flags: ::c_int) -> ::c_int; pub fn pthread_set_name_np(tid: ::pthread_t, name: *const ::c_char); pub fn pthread_attr_get_np(tid: ::pthread_t, attr: *mut ::pthread_attr_t) -> ::c_int; pub fn pthread_attr_getguardsize(attr: *const ::pthread_attr_t, guardsize: *mut ::size_t) -> ::c_int; pub fn pthread_attr_getstack(attr: *const ::pthread_attr_t, stackaddr: *mut *mut ::c_void, stacksize: *mut ::size_t) -> ::c_int; pub fn pthread_condattr_setpshared(attr: *mut pthread_condattr_t, pshared: ::c_int) -> ::c_int; pub fn pthread_condattr_getpshared(attr: *const pthread_condattr_t, pshared: *mut ::c_int) -> ::c_int; pub fn pthread_mutexattr_setpshared(attr: *mut pthread_mutexattr_t, pshared: ::c_int) -> ::c_int; pub fn pthread_mutexattr_getpshared(attr: *const pthread_mutexattr_t, pshared: *mut ::c_int) -> ::c_int; pub fn pthread_rwlockattr_getpshared(attr: *const pthread_rwlockattr_t, val: *mut ::c_int) -> ::c_int; pub fn pthread_rwlockattr_setpshared(attr: *mut pthread_rwlockattr_t, val: ::c_int) -> ::c_int; pub fn getpriority(which: ::c_int, who: ::c_int) -> ::c_int; pub fn setpriority(which: ::c_int, who: ::c_int, prio: ::c_int) -> ::c_int; pub fn fdopendir(fd: ::c_int) -> *mut ::DIR; pub fn mknodat(dirfd: ::c_int, pathname: *const ::c_char, mode: ::mode_t, dev: dev_t) -> ::c_int; pub fn mkfifoat(dirfd: ::c_int, pathname: *const ::c_char, mode: ::mode_t) -> ::c_int; pub fn pthread_condattr_getclock(attr: *const pthread_condattr_t, clock_id: *mut clockid_t) -> ::c_int; pub fn pthread_condattr_setclock(attr: *mut pthread_condattr_t, clock_id: clockid_t) -> ::c_int; pub fn sethostname(name: *const ::c_char, len: ::c_int) -> ::c_int; pub fn sem_timedwait(sem: *mut sem_t, abstime: *const ::timespec) -> ::c_int; pub fn pthread_mutex_timedlock(lock: *mut pthread_mutex_t, abstime: *const ::timespec) -> ::c_int; pub fn pipe2(fds: *mut ::c_int, flags: ::c_int) -> ::c_int; pub fn ppoll(fds: *mut ::pollfd, nfds: ::nfds_t, timeout: *const ::timespec, sigmask: *const sigset_t) -> ::c_int; pub fn settimeofday(tv: *const ::timeval, tz: *const ::timezone) -> ::c_int; } cfg_if! { if #[cfg(target_os = "freebsd")] { mod freebsd; pub use self::freebsd::*; } else if #[cfg(target_os = "dragonfly")] { mod dragonfly; pub use self::dragonfly::*; } else { // ... } }
//! An asynchronous RabbitMQ client for proxy engine //! use std::io; use amq_protocol::uri::{AMQPUri}; use futures::{IntoFuture}; use futures::future::{Future}; use lapin_futures_rustls::{AMQPConnectionRustlsExt}; use lapin_futures_rustls::lapin::channel::{Channel, ConfirmSelectOptions}; use lapin_futures_rustls::lapin::client::{Client, ConnectionOptions}; use tokio::executor::{spawn}; use tokio_tcp::{TcpStream}; use super::super::cli::{CliOptions}; use super::super::error::{PathfinderError}; /// Alias for the lapin client with TLS. pub type LapinClient = Client<TcpStream>; /// Alias for the lapin channel. pub type LapinChannel = Channel<TcpStream>; /// Alias for generic future for pathfinder and RabbitMQ. pub type RabbitMQFuture = Box<Future<Item=(), Error=PathfinderError> + 'static>; /// A future-based asynchronous RabbitMQ client. pub struct RabbitMQClient { uri: AMQPUri, client: Option<Box<Future<Item=Client<TcpStream>, Error=io::Error> + Sync + Send + 'static>>, } impl RabbitMQClient { /// Returns a new instance of `RabbitMQClient`. pub fn new(cli: &CliOptions) -> RabbitMQClient { let schema = match cli.rabbitmq_secured { true => "amqps", false => "amqp", }; let uri = format!( "{}://{}:{}@{}:{}/{}", schema.to_string(), cli.rabbitmq_username.clone(), cli.rabbitmq_password.clone(), cli.rabbitmq_host.clone(), cli.rabbitmq_port, cli.rabbitmq_virtual_host.clone() ); RabbitMQClient { uri: uri.parse().unwrap(), client: None } } pub fn init(&mut self) { self.client = self.get_client(); } pub fn get_channel(&self) -> Box<Future<Item=LapinChannel, Error=io::Error> + Sync + Send + 'static> { Box::new( self.client.unwrap().and_then(|client| { client.create_confirm_channel(ConfirmSelectOptions::default()) }) ) } fn get_client(&mut self) -> Option<Box<Future<Item=LapinClient, Error=io::Error> + Sync + Send + 'static>> { match self.client { Some(client) => Some(client), None => { self.client = Some(Box::new(self.create_client())); self.client } } } fn create_client(&self) -> impl Future<Item=LapinClient, Error=io::Error> + Sync + Send + 'static { let address = self.get_address_to_rabbitmq().parse().unwrap(); let uri = self.uri.clone(); TcpStream::connect(&address).and_then(|stream| { Client::connect(stream, ConnectionOptions::from_uri(uri)) }) .and_then(|(client, heartbeat)| { spawn(heartbeat.map_err(|err| eprintln!("Heartbeat error: {:?}", err))) .into_future() .map(|_| client) .map_err(|_| io::Error::new(io::ErrorKind::Other, "Spawn error.")) }) } /// Generates a connection URL to RabbitMQ broker. fn get_address_to_rabbitmq(&self) -> String { format!("{}:{}", self.uri.authority.host, self.uri.authority.port) } } Added Send and Sync markers for RabbitMQ future //! An asynchronous RabbitMQ client for proxy engine //! use std::io; use amq_protocol::uri::{AMQPUri}; use futures::{IntoFuture}; use futures::future::{Future}; use lapin_futures_rustls::{AMQPConnectionRustlsExt}; use lapin_futures_rustls::lapin::channel::{Channel, ConfirmSelectOptions}; use lapin_futures_rustls::lapin::client::{Client, ConnectionOptions}; use tokio::executor::{spawn}; use tokio_tcp::{TcpStream}; use super::super::cli::{CliOptions}; use super::super::error::{PathfinderError}; /// Alias for the lapin client with TLS. pub type LapinClient = Client<TcpStream>; /// Alias for the lapin channel. pub type LapinChannel = Channel<TcpStream>; /// Alias for generic future for pathfinder and RabbitMQ. pub type RabbitMQFuture = Box<Future<Item=(), Error=PathfinderError> + Send + Sync + 'static>; /// A future-based asynchronous RabbitMQ client. pub struct RabbitMQClient { uri: AMQPUri, client: Option<Box<Future<Item=Client<TcpStream>, Error=io::Error> + Sync + Send + 'static>>, } impl RabbitMQClient { /// Returns a new instance of `RabbitMQClient`. pub fn new(cli: &CliOptions) -> RabbitMQClient { let schema = match cli.rabbitmq_secured { true => "amqps", false => "amqp", }; let uri = format!( "{}://{}:{}@{}:{}/{}", schema.to_string(), cli.rabbitmq_username.clone(), cli.rabbitmq_password.clone(), cli.rabbitmq_host.clone(), cli.rabbitmq_port, cli.rabbitmq_virtual_host.clone() ); RabbitMQClient { uri: uri.parse().unwrap(), client: None } } pub fn init(&mut self) { self.client = self.get_client(); } pub fn get_channel(&self) -> Box<Future<Item=LapinChannel, Error=io::Error> + Sync + Send + 'static> { Box::new( self.client.unwrap().and_then(|client| { client.create_confirm_channel(ConfirmSelectOptions::default()) }) ) } fn get_client(&mut self) -> Option<Box<Future<Item=LapinClient, Error=io::Error> + Sync + Send + 'static>> { match self.client { Some(client) => Some(client), None => { self.client = Some(Box::new(self.create_client())); self.client } } } fn create_client(&self) -> impl Future<Item=LapinClient, Error=io::Error> + Sync + Send + 'static { let address = self.get_address_to_rabbitmq().parse().unwrap(); let uri = self.uri.clone(); TcpStream::connect(&address).and_then(|stream| { Client::connect(stream, ConnectionOptions::from_uri(uri)) }) .and_then(|(client, heartbeat)| { spawn(heartbeat.map_err(|err| eprintln!("Heartbeat error: {:?}", err))) .into_future() .map(|_| client) .map_err(|_| io::Error::new(io::ErrorKind::Other, "Spawn error.")) }) } /// Generates a connection URL to RabbitMQ broker. fn get_address_to_rabbitmq(&self) -> String { format!("{}:{}", self.uri.authority.host, self.uri.authority.port) } }
//! QuotientFilter implementation. use std::collections::hash_map::DefaultHasher; use std::hash::{BuildHasher, BuildHasherDefault, Hash, Hasher}; use std::mem::size_of; use fixedbitset::FixedBitSet; use succinct::{IntVec, IntVecMut, IntVector}; use filters::Filter; /// Error that signals that the QuotientFilter is full. #[derive(Debug)] pub struct QuotientFilterFull; /// A QuotientFilter is a set-like data structure, that keeps track of elements it has seen without /// the need to store them. Looking up values has a certain false positive rate, but a false /// negative rate of 0%. /// /// # Examples /// ``` /// use pdatastructs::filters::Filter; /// use pdatastructs::filters::quotientfilter::QuotientFilter; /// /// // set up filter /// let bits_quotient = 16; /// let bits_remainder = 5; /// let mut filter = QuotientFilter::with_params(bits_quotient, bits_remainder); /// /// // add some data /// filter.insert(&"my super long string").unwrap(); /// /// // later /// assert!(filter.query(&"my super long string")); /// assert!(!filter.query(&"another super long string")); /// ``` /// /// # Applications /// - when a lot of data should be added to the set and a moderate false positive rate is /// acceptable, was used for spell checking /// - as a pre-filter for more expensive lookups, e.g. in combination with a real set, map or /// database, so the final false positive rate is 0% /// /// # How It Works /// There are `2^bits_quotient` slots, initial empty. For every slot, we store `bits_remainder` as /// fingerprint information, a `is_continuation` bit, a `is_occupied` bit and a `is_shifted` bit. /// All bits are initially set to false. /// /// On insertion, elements are hashed to 64 bits. From these, `bits_quotient` are used as a /// quotient and `bits_remainder` are used as remainder, the remaining bits are dropped. /// /// The quotient represents the canonical position in which the remainder should be inserted. If is /// is free, we use that position, set the `is_occupied` bit and are done. If not, linear probing /// is applied. First, the start of all shifted elements is searched. All these slots are together /// make a cluster. The cluster then is made out of runs, every run made of elements with the same /// quotient (but different remainder). The `is_shifted` bit is set for all but the first slot in /// the cluster. The `is_continuation` bit is set of all but the first slot in a run. The /// `is_occupied` bit is always set for the canonical position. Runs are sorted in the order of /// their canonical slots. /// /// # See Also /// - `std::collections::HashSet`: has a false positive rate of 0%, but also needs to store all /// elements /// /// # References /// - ["Don’t Thrash: How to Cache your Hash on Flash" (short version), Michael A. Bender and others, 2012](http://static.usenix.org/events/hotstorage11/tech/final_files/Bender.pdf) /// - ["Don’t Thrash: How to Cache your Hash on Flash" (long version), Michael A. Bender and others, 2012](https://www.vldb.org/pvldb/vol5/p1627_michaelabender_vldb2012.pdf) /// - [Wikipedia: Quotient Filter](https://en.wikipedia.org/wiki/Quotient_filter) #[derive(Clone, Debug)] pub struct QuotientFilter<B = BuildHasherDefault<DefaultHasher>> where B: BuildHasher + Clone + Eq, { is_occupied: FixedBitSet, is_continuation: FixedBitSet, is_shifted: FixedBitSet, remainders: IntVector, bits_quotient: usize, buildhasher: B, n_elements: usize, } impl QuotientFilter { /// Create new quotient filter with: /// /// - `bits_quotient`: number of bits used for a quotient, aka `2^bits_quotient` slots will be /// allocated /// - `bits_remainder`: number of bits used for the remainder, so every slot will require /// `bits_remainder + 3` bits of storage /// /// and a default hasher. pub fn with_params(bits_quotient: usize, bits_remainder: usize) -> Self { let buildhasher = BuildHasherDefault::<DefaultHasher>::default(); QuotientFilter::with_params_and_hash(bits_quotient, bits_remainder, buildhasher) } } impl<B> QuotientFilter<B> where B: BuildHasher + Clone + Eq, { /// Create new quotient filter with: /// /// - `bits_quotient`: number of bits used for a quotient, aka `2^bits_quotient` slots will be /// allocated /// - `bits_remainder`: number of bits used for the remainder, so every slot will require /// `bits_remainder + 3` bits of storage /// - `buildhasher`: hash implementation pub fn with_params_and_hash( bits_quotient: usize, bits_remainder: usize, buildhasher: B, ) -> Self { assert!( (bits_remainder > 0) && (bits_remainder <= size_of::<usize>() * 8), "bits_remainder ({}) must be greater than 0 and smaller or equal than {}", bits_remainder, size_of::<usize>() * 8, ); assert!( bits_quotient > 0, "bits_quotient ({}) must be greater than 0", bits_quotient, ); assert!( bits_remainder + bits_quotient <= 64, "bits_remainder ({}) + bits_quotient ({}) must be smaller or equal than 64", bits_remainder, bits_quotient, ); let len = 1 << bits_quotient; Self { is_occupied: FixedBitSet::with_capacity(len), is_continuation: FixedBitSet::with_capacity(len), is_shifted: FixedBitSet::with_capacity(len), remainders: IntVector::with_fill(bits_remainder, len as u64, 0), bits_quotient, buildhasher, n_elements: 0, } } /// Number of bits used for addressing slots. pub fn bits_quotient(&self) -> usize { self.bits_quotient } /// Number of bits stored as fingeprint information. pub fn bits_remainder(&self) -> usize { self.remainders.element_bits() } fn calc_quotient_remainder<T>(&self, obj: &T) -> (usize, usize) where T: Hash, { let bits_remainder = self.bits_remainder(); let mut hasher = self.buildhasher.build_hasher(); obj.hash(&mut hasher); let fingerprint = hasher.finish(); let bits_trash = 64 - bits_remainder - self.bits_quotient; let trash = if bits_trash > 0 { (fingerprint >> (64 - bits_trash)) << (64 - bits_trash) } else { 0 }; let fingerprint_clean = fingerprint - trash; let quotient = fingerprint_clean >> bits_remainder; let remainder = fingerprint_clean - (quotient << bits_remainder); (quotient as usize, remainder as usize) } fn decr(&self, pos: &mut usize) { *pos = if *pos == 0 { self.is_occupied.len() - 1 } else { *pos - 1 }; } fn incr(&self, pos: &mut usize) { *pos = if *pos == self.is_occupied.len() - 1 { 0 } else { *pos + 1 } } fn scan( &self, quotient: usize, remainder: usize, on_insert: bool, ) -> (bool, usize, bool, usize) { let run_exists = self.is_occupied[quotient]; if (!run_exists) && (!on_insert) { // fast-path for query, since we don't need to find the correct position for the // insertion process return (run_exists, quotient, run_exists, quotient); } // walk back to find the beginning of the cluster let mut b = quotient; while self.is_shifted[b] { self.decr(&mut b); } // walk forward to find the actual start of the run let mut s = b; while b != quotient { // invariant: `s` poins to first slot of bucket `b` // skip all elements in the current run loop { self.incr(&mut s); if !self.is_continuation[s] { break; } } // find the next occupied bucket loop { self.incr(&mut b); if self.is_occupied[b] || ((b == quotient) && on_insert) { break; } } } // `s` now points to the first remainder in bucket at `quotient` // search of remainder within the run let start_of_run = s; if run_exists { loop { let r = self.remainders.get(s as u64); if r == remainder { return (run_exists, s, run_exists, start_of_run); } if r > remainder { // remainders are sorted within run break; } self.incr(&mut s); if !self.is_continuation[s] { break; } } } (false, s, run_exists, start_of_run) } } impl<B> Filter for QuotientFilter<B> where B: BuildHasher + Clone + Eq, { type InsertErr = QuotientFilterFull; fn clear(&mut self) { self.is_occupied.clear(); self.is_continuation.clear(); self.is_shifted.clear(); self.remainders = IntVector::with_fill(self.remainders.element_bits(), self.remainders.len(), 0); self.n_elements = 0; } fn insert<T>(&mut self, t: &T) -> Result<(), Self::InsertErr> where T: Hash, { let (quotient, remainder) = self.calc_quotient_remainder(t); let (present, mut position, run_exists, start_of_run) = self.scan(quotient, remainder, true); // early exit if the element is already present if present { return Ok(()); } // we need to insert the element into the filter // error out if there is no space left if self.n_elements == self.is_occupied.len() { return Err(QuotientFilterFull); } // set up swap chain let mut current_is_continuation = self.is_continuation[position] || (run_exists && (position == start_of_run)); let mut current_remainder = self.remainders.get(position as u64); let mut current_used = self.is_occupied[position] || self.is_shifted[position]; // set current state self.remainders.set(position as u64, remainder); if position != start_of_run { // might be an append operation, ensure is_continuation and is_shifted are set self.is_continuation.set(position, true); } if position != quotient { // not at canonical slot self.is_shifted.set(position, true); } // run swap chain until nothing to do let start = position; while current_used { self.incr(&mut position); let next_is_continuation = self.is_continuation[position]; let next_remainder = self.remainders.get(position as u64); let next_used = self.is_occupied[position] || self.is_shifted[position]; self.is_shifted.set(position, true); self.is_continuation.set(position, current_is_continuation); self.remainders.set(position as u64, current_remainder); current_is_continuation = next_is_continuation; current_remainder = next_remainder; current_used = next_used; if position == start { panic!("infinite loop detected"); } } // mark canonical slot as occupied self.is_occupied.set(quotient, true); // done self.n_elements += 1; Ok(()) } fn is_empty(&self) -> bool { self.n_elements == 0 } fn len(&self) -> usize { self.n_elements } fn query<T>(&self, obj: &T) -> bool where T: Hash, { let (quotient, remainder) = self.calc_quotient_remainder(obj); let (present, _position, _run_exists, _start_of_run) = self.scan(quotient, remainder, false); present } } #[cfg(test)] mod tests { use super::QuotientFilter; use filters::Filter; #[test] #[should_panic(expected = "bits_quotient (0) must be greater than 0")] fn new_bits_quotient_0() { QuotientFilter::with_params(0, 16); } #[cfg(target_pointer_width = "32")] #[test] #[should_panic( expected = "bits_remainder (0) must be greater than 0 and smaller or equal than 32" )] fn new_bits_remainder_0() { QuotientFilter::with_params(3, 0); } #[cfg(target_pointer_width = "64")] #[test] #[should_panic( expected = "bits_remainder (0) must be greater than 0 and smaller or equal than 64" )] fn new_bits_remainder_0() { QuotientFilter::with_params(3, 0); } #[cfg(target_pointer_width = "32")] #[test] #[should_panic( expected = "bits_remainder (33) must be greater than 0 and smaller or equal than 32" )] fn new_bits_remainder_too_large() { QuotientFilter::with_params(3, 33); } #[cfg(target_pointer_width = "64")] #[test] #[should_panic( expected = "bits_remainder (65) must be greater than 0 and smaller or equal than 64" )] fn new_bits_remainder_too_large() { QuotientFilter::with_params(3, 65); } #[test] #[should_panic( expected = "bits_remainder (5) + bits_quotient (60) must be smaller or equal than 64" )] fn new_too_many_bits() { QuotientFilter::with_params(60, 5); } #[test] fn new() { let qf = QuotientFilter::with_params(3, 16); assert!(qf.is_empty()); assert_eq!(qf.len(), 0); assert!(!qf.query(&13)); assert_eq!(qf.bits_quotient(), 3); assert_eq!(qf.bits_remainder(), 16); } #[test] fn insert() { let mut qf = QuotientFilter::with_params(3, 16); qf.insert(&13).unwrap(); assert!(!qf.is_empty()); assert_eq!(qf.len(), 1); assert!(qf.query(&13)); assert!(!qf.query(&42)); } #[test] fn double_insert() { let mut qf = QuotientFilter::with_params(3, 16); qf.insert(&13).unwrap(); qf.insert(&13).unwrap(); assert!(!qf.is_empty()); assert_eq!(qf.len(), 1); assert!(qf.query(&13)); assert!(!qf.query(&42)); } #[test] fn full() { let mut qf = QuotientFilter::with_params(3, 16); for i in 0..8 { qf.insert(&i).unwrap(); for j in 0..i { assert!(qf.query(&j), "Cannot find {} after inserting {}", j, i); } } assert!(qf.insert(&1000).is_err()); } #[test] fn clear() { let mut qf = QuotientFilter::with_params(3, 16); qf.insert(&13).unwrap(); qf.clear(); assert!(qf.is_empty()); assert_eq!(qf.len(), 0); assert!(!qf.query(&13)); assert_eq!(qf.bits_quotient(), 3); assert_eq!(qf.bits_remainder(), 16); } #[test] fn clone() { let mut qf1 = QuotientFilter::with_params(3, 16); qf1.insert(&13).unwrap(); let mut qf2 = qf1.clone(); qf2.insert(&42).unwrap(); assert_eq!(qf1.len(), 1); assert!(qf1.query(&13)); assert!(!qf1.query(&42)); assert_eq!(qf2.len(), 2); assert!(qf2.query(&13)); assert!(qf2.query(&42)); } } quotientfilter: extend how-it-works //! QuotientFilter implementation. use std::collections::hash_map::DefaultHasher; use std::hash::{BuildHasher, BuildHasherDefault, Hash, Hasher}; use std::mem::size_of; use fixedbitset::FixedBitSet; use succinct::{IntVec, IntVecMut, IntVector}; use filters::Filter; /// Error that signals that the QuotientFilter is full. #[derive(Debug)] pub struct QuotientFilterFull; /// A QuotientFilter is a set-like data structure, that keeps track of elements it has seen without /// the need to store them. Looking up values has a certain false positive rate, but a false /// negative rate of 0%. /// /// # Examples /// ``` /// use pdatastructs::filters::Filter; /// use pdatastructs::filters::quotientfilter::QuotientFilter; /// /// // set up filter /// let bits_quotient = 16; /// let bits_remainder = 5; /// let mut filter = QuotientFilter::with_params(bits_quotient, bits_remainder); /// /// // add some data /// filter.insert(&"my super long string").unwrap(); /// /// // later /// assert!(filter.query(&"my super long string")); /// assert!(!filter.query(&"another super long string")); /// ``` /// /// # Applications /// - when a lot of data should be added to the set and a moderate false positive rate is /// acceptable, was used for spell checking /// - as a pre-filter for more expensive lookups, e.g. in combination with a real set, map or /// database, so the final false positive rate is 0% /// /// # How It Works /// /// ## Setup /// There are `2^bits_quotient` slots, initial empty. For every slot, we store `bits_remainder` as /// fingerprint information, a `is_continuation` bit, a `is_occupied` bit and a `is_shifted` bit. /// All bits are initially set to false. /// /// ```text /// bits_quotient = 3 /// bits_remainder = 4 /// /// +-----------------++-----+-----+-----+-----+-----+-----+-----+-----+ /// | position || 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | /// +-----------------++-----+-----+-----+-----+-----+-----+-----+-----+ /// | is_occupied || | | | | | | | | /// | is_continuation || | | | | | | | | /// | is_shifted || | | | | | | | | /// | remainder || 0x0 | 0x0 | 0x0 | 0x0 | 0x0 | 0x0 | 0x0 | 0x0 | /// +-----------------++-----+-----+-----+-----+-----+-----+-----+-----+ /// ``` /// /// ## Insertion /// On insertion, elements are hashed to 64 bits. From these, `bits_quotient` are used as a /// quotient and `bits_remainder` are used as remainder, the remaining bits are dropped. /// /// The quotient represents the canonical position in which the remainder should be inserted. If is /// is free, we use that position, set the `is_occupied` bit and are done. /// /// ```text /// x = "foo" /// h(x) = 0x0123456789abcda5 /// h(x) & 0x7f = 0x25 /// remainder = 0x5 /// quotient = 0x2 /// /// +-----------------++-----+-----+-----+-----+-----+-----+-----+-----+ /// | position || 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | /// +-----------------++-----+-----+-----+-----+-----+-----+-----+-----+ /// | is_occupied || | | X | | | | | | /// | is_continuation || | | | | | | | | /// | is_shifted || | | | | | | | | /// | remainder || 0x0 | 0x0 | 0x2 | 0x0 | 0x0 | 0x0 | 0x0 | 0x0 | /// +-----------------++-----+-----+-----+-----+-----+-----+-----+-----+ /// ``` /// /// If not, linear probing is applied. If an element with the same quotient is already in the /// filter, the so called "run" of it will be extended. For extensions, the `is_continuation` bit /// is set as well as the `is_shifted` bit because the stored remainder is not in its canonical /// position: /// /// ```text /// x = "bar" /// h(x) = 0xad8caa00248af32e /// h(x) & 0x7f = 0x2e /// remainder = 0xe /// quotient = 0x2 /// /// +-----------------++-----+-----+-----+-----+-----+-----+-----+-----+ /// | position || 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | /// +-----------------++-----+-----+-----+-----+-----+-----+-----+-----+ /// | is_occupied || | | X | | | | | | /// | is_continuation || | | | X | | | | | /// | is_shifted || | | | X | | | | | /// | remainder || 0x0 | 0x0 | 0x2 | 0xe | 0x0 | 0x0 | 0x0 | 0x0 | /// +-----------------++-----+-----+-----+-----+-----+-----+-----+-----+ /// | run || [=========] | /// +-----------------++-----------------------------------------------| /// ``` /// /// While doing so, the order of remainders within the run is preserved: /// /// ```text /// x = "elephant" /// h(x) = 0x34235511eeadbc26 /// h(x) & 0x7f = 0x26 /// remainder = 0x6 /// quotient = 0x2 /// /// +-----------------++-----+-----+-----+-----+-----+-----+-----+-----+ /// | position || 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | /// +-----------------++-----+-----+-----+-----+-----+-----+-----+-----+ /// | is_occupied || | | X | | | | | | /// | is_continuation || | | | X | X | | | | /// | is_shifted || | | | X | X | | | | /// | remainder || 0x0 | 0x0 | 0x2 | 0x6 | 0xe | 0x0 | 0x0 | 0x0 | /// +-----------------++-----+-----+-----+-----+-----+-----+-----+-----+ /// | run || [===============] | /// +-----------------++-----------------------------------------------| /// ``` /// /// If a new quotient is inserted but the corresponding run cannot start at the canonical position, /// the entire run will be shifted. A sequence of runs is also called "cluster". Even though the /// run is shifted, the original position will still be marked as occupied: /// /// ```text /// x = "banana" /// h(x) = 0xdfdfdfdfdfdfdf31 /// h(x) & 0x7f = 0x31 /// remainder = 0x1 /// quotient = 0x3 /// /// +-----------------++-----+-----+-----+-----+-----+-----+-----+-----+ /// | position || 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | /// +-----------------++-----+-----+-----+-----+-----+-----+-----+-----+ /// | is_occupied || | | X | X | | | | | /// | is_continuation || | | | X | X | | | | /// | is_shifted || | | | X | X | X | | | /// | remainder || 0x0 | 0x0 | 0x2 | 0x6 | 0xe | 0x1 | 0x0 | 0x0 | /// +-----------------++-----+-----+-----+-----+-----+-----+-----+-----+ /// | run || [===============] [===] | /// | cluster || [=====================] | /// +-----------------++-----------------------------------------------| /// ``` /// /// Remainders may duplicate over multiple runs: /// /// ```text /// x = "apple" /// h(x) = 0x0000000000000072 /// h(x) & 0x7f = 0x72 /// remainder = 0x2 /// quotient = 0x7 /// /// +-----------------++-----+-----+-----+-----+-----+-----+-----+-----+ /// | position || 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | /// +-----------------++-----+-----+-----+-----+-----+-----+-----+-----+ /// | is_occupied || | | X | X | | | | X | /// | is_continuation || | | | X | X | | | | /// | is_shifted || | | | X | X | X | | | /// | remainder || 0x0 | 0x0 | 0x2 | 0x6 | 0xe | 0x1 | 0x0 | 0x2 | /// +-----------------++-----+-----+-----+-----+-----+-----+-----+-----+ /// | run || [===============] [===] [===]| /// | cluster || [=====================] [===]| /// +-----------------++-----------------------------------------------| /// ``` /// /// The entire array works like a ring-buffer and operations can over- and underflow: /// /// ```text /// x = "last" /// h(x) = 0x11355343431323f3 /// h(x) & 0x7f = 0x73 /// remainder = 0x3 /// quotient = 0x7 /// /// +-----------------++-----+-----+-----+-----+-----+-----+-----+-----+ /// | position || 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | /// +-----------------++-----+-----+-----+-----+-----+-----+-----+-----+ /// | is_occupied || | | X | X | | | | X | /// | is_continuation || X | | | X | X | | | | /// | is_shifted || X | | | X | X | X | | | /// | remainder || 0x3 | 0x0 | 0x2 | 0x6 | 0xe | 0x1 | 0x0 | 0x2 | /// +-----------------++-----+-----+-----+-----+-----+-----+-----+-----+ /// | run ||====] [===============] [===] [====| /// | cluster ||====] [=====================] [====| /// +-----------------++-----------------------------------------------| /// ``` /// /// ## Lookup /// The lookup basically follows the insertion procedure. /// /// /// # See Also /// - `std::collections::HashSet`: has a false positive rate of 0%, but also needs to store all /// elements /// /// # References /// - ["Don’t Thrash: How to Cache your Hash on Flash" (short version), Michael A. Bender and others, 2012](http://static.usenix.org/events/hotstorage11/tech/final_files/Bender.pdf) /// - ["Don’t Thrash: How to Cache your Hash on Flash" (long version), Michael A. Bender and others, 2012](https://www.vldb.org/pvldb/vol5/p1627_michaelabender_vldb2012.pdf) /// - [Wikipedia: Quotient Filter](https://en.wikipedia.org/wiki/Quotient_filter) #[derive(Clone, Debug)] pub struct QuotientFilter<B = BuildHasherDefault<DefaultHasher>> where B: BuildHasher + Clone + Eq, { is_occupied: FixedBitSet, is_continuation: FixedBitSet, is_shifted: FixedBitSet, remainders: IntVector, bits_quotient: usize, buildhasher: B, n_elements: usize, } impl QuotientFilter { /// Create new quotient filter with: /// /// - `bits_quotient`: number of bits used for a quotient, aka `2^bits_quotient` slots will be /// allocated /// - `bits_remainder`: number of bits used for the remainder, so every slot will require /// `bits_remainder + 3` bits of storage /// /// and a default hasher. pub fn with_params(bits_quotient: usize, bits_remainder: usize) -> Self { let buildhasher = BuildHasherDefault::<DefaultHasher>::default(); QuotientFilter::with_params_and_hash(bits_quotient, bits_remainder, buildhasher) } } impl<B> QuotientFilter<B> where B: BuildHasher + Clone + Eq, { /// Create new quotient filter with: /// /// - `bits_quotient`: number of bits used for a quotient, aka `2^bits_quotient` slots will be /// allocated /// - `bits_remainder`: number of bits used for the remainder, so every slot will require /// `bits_remainder + 3` bits of storage /// - `buildhasher`: hash implementation pub fn with_params_and_hash( bits_quotient: usize, bits_remainder: usize, buildhasher: B, ) -> Self { assert!( (bits_remainder > 0) && (bits_remainder <= size_of::<usize>() * 8), "bits_remainder ({}) must be greater than 0 and smaller or equal than {}", bits_remainder, size_of::<usize>() * 8, ); assert!( bits_quotient > 0, "bits_quotient ({}) must be greater than 0", bits_quotient, ); assert!( bits_remainder + bits_quotient <= 64, "bits_remainder ({}) + bits_quotient ({}) must be smaller or equal than 64", bits_remainder, bits_quotient, ); let len = 1 << bits_quotient; Self { is_occupied: FixedBitSet::with_capacity(len), is_continuation: FixedBitSet::with_capacity(len), is_shifted: FixedBitSet::with_capacity(len), remainders: IntVector::with_fill(bits_remainder, len as u64, 0), bits_quotient, buildhasher, n_elements: 0, } } /// Number of bits used for addressing slots. pub fn bits_quotient(&self) -> usize { self.bits_quotient } /// Number of bits stored as fingeprint information. pub fn bits_remainder(&self) -> usize { self.remainders.element_bits() } fn calc_quotient_remainder<T>(&self, obj: &T) -> (usize, usize) where T: Hash, { let bits_remainder = self.bits_remainder(); let mut hasher = self.buildhasher.build_hasher(); obj.hash(&mut hasher); let fingerprint = hasher.finish(); let bits_trash = 64 - bits_remainder - self.bits_quotient; let trash = if bits_trash > 0 { (fingerprint >> (64 - bits_trash)) << (64 - bits_trash) } else { 0 }; let fingerprint_clean = fingerprint - trash; let quotient = fingerprint_clean >> bits_remainder; let remainder = fingerprint_clean - (quotient << bits_remainder); (quotient as usize, remainder as usize) } fn decr(&self, pos: &mut usize) { *pos = if *pos == 0 { self.is_occupied.len() - 1 } else { *pos - 1 }; } fn incr(&self, pos: &mut usize) { *pos = if *pos == self.is_occupied.len() - 1 { 0 } else { *pos + 1 } } fn scan( &self, quotient: usize, remainder: usize, on_insert: bool, ) -> (bool, usize, bool, usize) { let run_exists = self.is_occupied[quotient]; if (!run_exists) && (!on_insert) { // fast-path for query, since we don't need to find the correct position for the // insertion process return (run_exists, quotient, run_exists, quotient); } // walk back to find the beginning of the cluster let mut b = quotient; while self.is_shifted[b] { self.decr(&mut b); } // walk forward to find the actual start of the run let mut s = b; while b != quotient { // invariant: `s` poins to first slot of bucket `b` // skip all elements in the current run loop { self.incr(&mut s); if !self.is_continuation[s] { break; } } // find the next occupied bucket loop { self.incr(&mut b); if self.is_occupied[b] || ((b == quotient) && on_insert) { break; } } } // `s` now points to the first remainder in bucket at `quotient` // search of remainder within the run let start_of_run = s; if run_exists { loop { let r = self.remainders.get(s as u64); if r == remainder { return (run_exists, s, run_exists, start_of_run); } if r > remainder { // remainders are sorted within run break; } self.incr(&mut s); if !self.is_continuation[s] { break; } } } (false, s, run_exists, start_of_run) } } impl<B> Filter for QuotientFilter<B> where B: BuildHasher + Clone + Eq, { type InsertErr = QuotientFilterFull; fn clear(&mut self) { self.is_occupied.clear(); self.is_continuation.clear(); self.is_shifted.clear(); self.remainders = IntVector::with_fill(self.remainders.element_bits(), self.remainders.len(), 0); self.n_elements = 0; } fn insert<T>(&mut self, t: &T) -> Result<(), Self::InsertErr> where T: Hash, { let (quotient, remainder) = self.calc_quotient_remainder(t); let (present, mut position, run_exists, start_of_run) = self.scan(quotient, remainder, true); // early exit if the element is already present if present { return Ok(()); } // we need to insert the element into the filter // error out if there is no space left if self.n_elements == self.is_occupied.len() { return Err(QuotientFilterFull); } // set up swap chain let mut current_is_continuation = self.is_continuation[position] || (run_exists && (position == start_of_run)); let mut current_remainder = self.remainders.get(position as u64); let mut current_used = self.is_occupied[position] || self.is_shifted[position]; // set current state self.remainders.set(position as u64, remainder); if position != start_of_run { // might be an append operation, ensure is_continuation and is_shifted are set self.is_continuation.set(position, true); } if position != quotient { // not at canonical slot self.is_shifted.set(position, true); } // run swap chain until nothing to do let start = position; while current_used { self.incr(&mut position); let next_is_continuation = self.is_continuation[position]; let next_remainder = self.remainders.get(position as u64); let next_used = self.is_occupied[position] || self.is_shifted[position]; self.is_shifted.set(position, true); self.is_continuation.set(position, current_is_continuation); self.remainders.set(position as u64, current_remainder); current_is_continuation = next_is_continuation; current_remainder = next_remainder; current_used = next_used; if position == start { panic!("infinite loop detected"); } } // mark canonical slot as occupied self.is_occupied.set(quotient, true); // done self.n_elements += 1; Ok(()) } fn is_empty(&self) -> bool { self.n_elements == 0 } fn len(&self) -> usize { self.n_elements } fn query<T>(&self, obj: &T) -> bool where T: Hash, { let (quotient, remainder) = self.calc_quotient_remainder(obj); let (present, _position, _run_exists, _start_of_run) = self.scan(quotient, remainder, false); present } } #[cfg(test)] mod tests { use super::QuotientFilter; use filters::Filter; #[test] #[should_panic(expected = "bits_quotient (0) must be greater than 0")] fn new_bits_quotient_0() { QuotientFilter::with_params(0, 16); } #[cfg(target_pointer_width = "32")] #[test] #[should_panic( expected = "bits_remainder (0) must be greater than 0 and smaller or equal than 32" )] fn new_bits_remainder_0() { QuotientFilter::with_params(3, 0); } #[cfg(target_pointer_width = "64")] #[test] #[should_panic( expected = "bits_remainder (0) must be greater than 0 and smaller or equal than 64" )] fn new_bits_remainder_0() { QuotientFilter::with_params(3, 0); } #[cfg(target_pointer_width = "32")] #[test] #[should_panic( expected = "bits_remainder (33) must be greater than 0 and smaller or equal than 32" )] fn new_bits_remainder_too_large() { QuotientFilter::with_params(3, 33); } #[cfg(target_pointer_width = "64")] #[test] #[should_panic( expected = "bits_remainder (65) must be greater than 0 and smaller or equal than 64" )] fn new_bits_remainder_too_large() { QuotientFilter::with_params(3, 65); } #[test] #[should_panic( expected = "bits_remainder (5) + bits_quotient (60) must be smaller or equal than 64" )] fn new_too_many_bits() { QuotientFilter::with_params(60, 5); } #[test] fn new() { let qf = QuotientFilter::with_params(3, 16); assert!(qf.is_empty()); assert_eq!(qf.len(), 0); assert!(!qf.query(&13)); assert_eq!(qf.bits_quotient(), 3); assert_eq!(qf.bits_remainder(), 16); } #[test] fn insert() { let mut qf = QuotientFilter::with_params(3, 16); qf.insert(&13).unwrap(); assert!(!qf.is_empty()); assert_eq!(qf.len(), 1); assert!(qf.query(&13)); assert!(!qf.query(&42)); } #[test] fn double_insert() { let mut qf = QuotientFilter::with_params(3, 16); qf.insert(&13).unwrap(); qf.insert(&13).unwrap(); assert!(!qf.is_empty()); assert_eq!(qf.len(), 1); assert!(qf.query(&13)); assert!(!qf.query(&42)); } #[test] fn full() { let mut qf = QuotientFilter::with_params(3, 16); for i in 0..8 { qf.insert(&i).unwrap(); for j in 0..i { assert!(qf.query(&j), "Cannot find {} after inserting {}", j, i); } } assert!(qf.insert(&1000).is_err()); } #[test] fn clear() { let mut qf = QuotientFilter::with_params(3, 16); qf.insert(&13).unwrap(); qf.clear(); assert!(qf.is_empty()); assert_eq!(qf.len(), 0); assert!(!qf.query(&13)); assert_eq!(qf.bits_quotient(), 3); assert_eq!(qf.bits_remainder(), 16); } #[test] fn clone() { let mut qf1 = QuotientFilter::with_params(3, 16); qf1.insert(&13).unwrap(); let mut qf2 = qf1.clone(); qf2.insert(&42).unwrap(); assert_eq!(qf1.len(), 1); assert!(qf1.query(&13)); assert!(!qf1.query(&42)); assert_eq!(qf2.len(), 2); assert!(qf2.query(&13)); assert!(qf2.query(&42)); } }
extern crate structopt; use crate::exceptions::IOError; use std::path::PathBuf; use std::process::{Command, ExitStatus}; use std::fs; use structopt::StructOpt; #[derive(Clone, Debug, StructOpt)] #[structopt(name = "performance", about = "performance regression testing runner")] enum Performance { #[structopt(name = "measure")] Measure { #[structopt(parse(from_os_str))] #[structopt(short)] projects_dir: PathBuf, #[structopt(short)] branch_name: bool, }, #[structopt(name = "compare")] Compare { #[structopt(parse(from_os_str))] #[structopt(short)] results_dir: PathBuf, }, } #[derive(Debug, Clone)] struct Metric<'a> { name: &'a str, prepare: &'a str, cmd: &'a str, } impl Metric<'_> { fn outfile(&self, project: &str, branch: &str) -> String { [branch, "_", self.name, "_", project, ".json"].join("") } } // calls hyperfine via system command, and returns result of runs pub fn measure(projects_directory: &PathBuf, dbt_branch: &str) -> Result<Vec<ExitStatus>, IOError> { // to add a new metric to the test suite, simply define it in this list: // TODO read from some config file? let metrics: Vec<Metric> = vec![ Metric { name: "parse", prepare: "rm -rf target/", cmd: "dbt parse --no-version-check", }, ]; // run hyperfine on each target project in the directory fs::read_dir(projects_directory) .or_else(|e| Err(IOError::ReadErr(projects_directory.to_path_buf(), Some(e))))? .map(|entry| { metrics .clone() .into_iter() // for each entry-metric pair .map(|metric| { let ent = entry .as_ref() .or_else(|e| Err(IOError::ReadErr(projects_directory.to_path_buf(), None)))?; // TODO change to Some(e) let path: PathBuf = ent.path(); let project_name: &str = path .file_name() .ok_or_else(|| IOError::MissingFilenameErr(path.clone().to_path_buf())) .and_then(|x| x.to_str().ok_or_else(|| IOError::FilenameNotUnicodeErr(path.clone().to_path_buf())))?; Command::new("hyperfine") .current_dir(&path) // warms filesystem caches by running the command first without counting it. // alternatively we could clear them before each run .arg("--warmup") .arg("1") .arg("--prepare") .arg(metric.prepare) .arg([metric.cmd, " --profiles-dir ", "../../project_config/"].join("")) .arg("--export-json") .arg( ["../../results/", &metric.outfile(&project_name, &dbt_branch)].join(""), ) // this prevents capture dbt output. Noisy, but good for debugging when tests fail. .arg("--show-output") .status() // use spawn() here instead for more information .or_else(|e| Err(IOError::CommandErr(Some(e)))) }) .collect::<Vec<Result<ExitStatus, IOError>>>() }) .flatten() .collect() } refactor for simpler flow extern crate structopt; use crate::exceptions::IOError; use std::path::PathBuf; use std::process::{Command, ExitStatus}; use std::fs; use structopt::StructOpt; #[derive(Clone, Debug, StructOpt)] #[structopt(name = "performance", about = "performance regression testing runner")] enum Performance { #[structopt(name = "measure")] Measure { #[structopt(parse(from_os_str))] #[structopt(short)] projects_dir: PathBuf, #[structopt(short)] branch_name: bool, }, #[structopt(name = "compare")] Compare { #[structopt(parse(from_os_str))] #[structopt(short)] results_dir: PathBuf, }, } #[derive(Debug, Clone)] struct Metric<'a> { name: &'a str, prepare: &'a str, cmd: &'a str, } impl Metric<'_> { fn outfile(&self, project: &str, branch: &str) -> String { [branch, "_", self.name, "_", project, ".json"].join("") } } // calls hyperfine via system command, and returns result of runs pub fn measure(projects_directory: &PathBuf, dbt_branch: &str) -> Result<Vec<ExitStatus>, IOError> { // to add a new metric to the test suite, simply define it in this list: // TODO read from some config file? let metrics: Vec<Metric> = vec![ Metric { name: "parse", prepare: "rm -rf target/", cmd: "dbt parse --no-version-check", }, ]; fs::read_dir(projects_directory) .or_else(|e| Err(IOError::ReadErr(projects_directory.to_path_buf(), Some(e))))? .map(|entry| { let path = entry .or_else(|e| Err(IOError::ReadErr(projects_directory.to_path_buf(), Some(e))))? .path(); let project_name: String = path .file_name() .ok_or_else(|| IOError::MissingFilenameErr(path.clone().to_path_buf())) .and_then(|x| x.to_str().ok_or_else(|| IOError::FilenameNotUnicodeErr(path.clone().to_path_buf())))? .to_owned(); // each project-metric pair we will run let pairs = metrics .clone() .into_iter() .map(|metric| (path.clone(), project_name.clone(), metric)) .collect::<Vec<(PathBuf, String, Metric)>>(); Ok(pairs) }) .collect::<Result<Vec<Vec<(PathBuf, String, Metric)>>, IOError>>()? .concat() .into_iter() // run hyperfine on each pairing .map(|(path, project_name, metric)| { Command::new("hyperfine") .current_dir(&path) // warms filesystem caches by running the command first without counting it. // alternatively we could clear them before each run .arg("--warmup") .arg("1") .arg("--prepare") .arg(metric.prepare) .arg([metric.cmd, " --profiles-dir ", "../../project_config/"].join("")) .arg("--export-json") .arg( ["../../results/", &metric.outfile(&project_name, &dbt_branch)].join(""), ) // this prevents capture dbt output. Noisy, but good for debugging when tests fail. .arg("--show-output") .status() // use spawn() here instead for more information .or_else(|e| Err(IOError::CommandErr(Some(e)))) } ) .collect() }
use byteorder::*; use std::env; use std::fs::*; use std::io::{self, Read, Write, BufReader, BufWriter}; use std::path::*; use ordermap::OrderMap; use crusader::*; use formation::Formation; use super::Node; pub struct Cache { directory: PathBuf, } impl Cache { pub fn new(key: u64) -> Self { let digest = format!("{:x}", key); Cache { directory: env::temp_dir().join("cotli_helper").join(&digest), } } pub(super) fn write_to_cache(&self, formation: &Formation, node: &Node) { let path = self.path_to_cache_file(formation); self.try_write_to_cache(&path, node).unwrap(); } pub(super) fn load_from_cache<'a>( &self, formation: &Formation<'a>, crusaders: &'a [Crusader], ) -> Option<Node<'a>> { let file_path = self.path_to_cache_file(formation); if !file_path.join("search_data.toml").exists() { return None; } match self.try_load_from_cache(&file_path, crusaders) { Ok(x) => Some(x), Err(e) => { println!("Could not load from cache. Deleting directory. Error: {}", e); remove_dir_all(file_path).unwrap(); None } } } fn path_to_cache_file(&self, formation: &Formation) -> PathBuf { formation.placements().fold(self.directory.clone(), |path, (idx, crusader)| { let as_dir = format!("{}{:?}", idx, crusader.name); path.join(&as_dir) }) } fn try_write_to_cache(&self, path: &Path, node: &Node) -> io::Result<()> { create_dir_all(path)?; let mut file = BufWriter::new(File::create(path.join("search_data.toml"))?); file.write_u8(1)?; write_node(node, &mut file)?; Ok(()) } fn try_load_from_cache<'a>( &self, path: &Path, crusaders: &'a [Crusader], ) -> io::Result<Node<'a>> { let mut file = BufReader::new(File::open(path.join("search_data.toml"))?); let v = file.read_u8()?; if v == 1 { read_node(crusaders, &mut file) } else { Err(io::Error::new(io::ErrorKind::Other, "Invalid cache version")) } } } fn write_node<W: Write>(node: &Node, out: &mut W) -> io::Result<()> { out.write_f64::<NativeEndian>(node.highest_score_seen.into())?; out.write_f64::<NativeEndian>(node.total_score_seen.into())?; out.write_u32::<NativeEndian>(node.times_checked)?; out.write_u64::<NativeEndian>(node.children.len() as u64)?; for (&(idx, crusader), child) in &node.children { out.write_u8(idx as u8)?; out.write_u8(crusader.name as u8)?; write_node(child, out)?; } Ok(()) } fn read_node<'a, R: Read>(crusaders: &'a [Crusader], read: &mut R) -> io::Result<Node<'a>> { use std::mem::transmute; let highest_score_seen = read.read_f64::<NativeEndian>()?.into(); let total_score_seen = read.read_f64::<NativeEndian>()?.into(); let times_checked = read.read_u32::<NativeEndian>()?; let num_children = read.read_u64::<NativeEndian>()? as usize; let mut children = OrderMap::with_capacity(num_children); for _ in 0..num_children { let idx = read.read_u8()? as usize; let name = unsafe { transmute(read.read_u8()?) }; let crusader = crusaders.iter().find(|c| c.name == name).unwrap(); let node = read_node(crusaders, read)?; children.insert((idx, crusader), node); } Ok(Node { progress: Default::default(), highest_score_seen, total_score_seen, times_checked, children, }) } The cache does not use TOML use byteorder::*; use std::env; use std::fs::*; use std::io::{self, Read, Write, BufReader, BufWriter}; use std::path::*; use ordermap::OrderMap; use crusader::*; use formation::Formation; use super::Node; pub struct Cache { directory: PathBuf, } impl Cache { pub fn new(key: u64) -> Self { let digest = format!("{:x}", key); Cache { directory: env::temp_dir().join("cotli_helper").join(&digest), } } pub(super) fn write_to_cache(&self, formation: &Formation, node: &Node) { let path = self.path_to_cache_file(formation); self.try_write_to_cache(&path, node).unwrap(); } pub(super) fn load_from_cache<'a>( &self, formation: &Formation<'a>, crusaders: &'a [Crusader], ) -> Option<Node<'a>> { let file_path = self.path_to_cache_file(formation); if !file_path.join("search_data.dat").exists() { return None; } match self.try_load_from_cache(&file_path, crusaders) { Ok(x) => Some(x), Err(e) => { println!("Could not load from cache. Deleting directory. Error: {}", e); remove_dir_all(file_path).unwrap(); None } } } fn path_to_cache_file(&self, formation: &Formation) -> PathBuf { formation.placements().fold(self.directory.clone(), |path, (idx, crusader)| { let as_dir = format!("{}{:?}", idx, crusader.name); path.join(&as_dir) }) } fn try_write_to_cache(&self, path: &Path, node: &Node) -> io::Result<()> { create_dir_all(path)?; let mut file = BufWriter::new(File::create(path.join("search_data.dat"))?); file.write_u8(1)?; write_node(node, &mut file)?; Ok(()) } fn try_load_from_cache<'a>( &self, path: &Path, crusaders: &'a [Crusader], ) -> io::Result<Node<'a>> { let mut file = BufReader::new(File::open(path.join("search_data.dat"))?); let v = file.read_u8()?; if v == 1 { read_node(crusaders, &mut file) } else { Err(io::Error::new(io::ErrorKind::Other, "Invalid cache version")) } } } fn write_node<W: Write>(node: &Node, out: &mut W) -> io::Result<()> { out.write_f64::<NativeEndian>(node.highest_score_seen.into())?; out.write_f64::<NativeEndian>(node.total_score_seen.into())?; out.write_u32::<NativeEndian>(node.times_checked)?; out.write_u64::<NativeEndian>(node.children.len() as u64)?; for (&(idx, crusader), child) in &node.children { out.write_u8(idx as u8)?; out.write_u8(crusader.name as u8)?; write_node(child, out)?; } Ok(()) } fn read_node<'a, R: Read>(crusaders: &'a [Crusader], read: &mut R) -> io::Result<Node<'a>> { use std::mem::transmute; let highest_score_seen = read.read_f64::<NativeEndian>()?.into(); let total_score_seen = read.read_f64::<NativeEndian>()?.into(); let times_checked = read.read_u32::<NativeEndian>()?; let num_children = read.read_u64::<NativeEndian>()? as usize; let mut children = OrderMap::with_capacity(num_children); for _ in 0..num_children { let idx = read.read_u8()? as usize; let name = unsafe { transmute(read.read_u8()?) }; let crusader = crusaders.iter().find(|c| c.name == name).unwrap(); let node = read_node(crusaders, read)?; children.insert((idx, crusader), node); } Ok(Node { progress: Default::default(), highest_score_seen, total_score_seen, times_checked, children, }) }
/* Copyright 2014-2015 Zumero, LLC Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #![feature(box_syntax)] #![feature(associated_consts)] #![feature(vec_push_all)] #![feature(iter_arith)] extern crate bson; extern crate elmo; pub type Result<T> = elmo::Result<T>; extern crate sqlite3; struct IndexPrep { info: elmo::IndexInfo, stmt_insert: sqlite3::PreparedStatement, stmt_delete: sqlite3::PreparedStatement, } struct MyCollectionWriter { insert: sqlite3::PreparedStatement, delete: sqlite3::PreparedStatement, update: sqlite3::PreparedStatement, stmt_find_rowid: Option<sqlite3::PreparedStatement>, indexes: Vec<IndexPrep>, myconn: std::rc::Rc<MyConn>, } struct StatementBsonValueIterator { stmt: sqlite3::PreparedStatement, } impl StatementBsonValueIterator { fn iter_next(&mut self) -> Result<Option<elmo::Row>> { match try!(self.stmt.step().map_err(elmo::wrap_err)) { None => { Ok(None) }, Some(r) => { let b = r.column_blob(0).expect("NOT NULL"); let v = try!(bson::Document::from_bson(&b)); //println!("doc in row: {:?}", v); let v = bson::Value::BDocument(v); let row = elmo::Row { doc: v, pos: None, score: None, }; Ok(Some(row)) }, } } } impl Iterator for StatementBsonValueIterator { type Item = Result<elmo::Row>; fn next(&mut self) -> Option<Self::Item> { match self.iter_next() { Err(e) => { return Some(Err(e)); }, Ok(v) => { match v { None => { return None; }, Some(v) => { return Some(Ok(v)); } } }, } } } // TODO it is sad to have two completely distinct versions of // this iterator, one which owns the statement, and one which // does not. struct RefStatementBsonValueIterator<'a> { stmt: &'a mut sqlite3::PreparedStatement, } impl<'a> RefStatementBsonValueIterator<'a> { fn iter_next(&mut self) -> Result<Option<elmo::Row>> { match try!(self.stmt.step().map_err(elmo::wrap_err)) { None => { Ok(None) }, Some(r) => { let b = r.column_blob(0).expect("NOT NULL"); let v = try!(bson::Document::from_bson(&b)); //println!("doc: {:?}", v); let v = bson::Value::BDocument(v); let row = elmo::Row { doc: v, pos: None, score: None, }; Ok(Some(row)) }, } } } impl<'a> Iterator for RefStatementBsonValueIterator<'a> { type Item = Result<elmo::Row>; fn next(&mut self) -> Option<Self::Item> { match self.iter_next() { Err(e) => { return Some(Err(e)); }, Ok(v) => { match v { None => { return None; }, Some(v) => { return Some(Ok(v)); } } }, } } } // TODO std::iter::Empty? struct MyEmptyIterator; impl Iterator for MyEmptyIterator { type Item = Result<elmo::Row>; fn next(&mut self) -> Option<Self::Item> { None } } struct MyCollectionReader { commit_on_drop: bool, seq: Box<Iterator<Item=Result<elmo::Row>>>, myconn: std::rc::Rc<MyConn>, // TODO need counts here } struct MyReader { myconn: std::rc::Rc<MyConn>, in_tx: bool, } struct MyWriter { myconn: std::rc::Rc<MyConn>, in_tx: bool, } struct MyConn { conn: sqlite3::DatabaseConnection, } struct MyPublicConn { myconn: std::rc::Rc<MyConn>, } fn step_done(stmt: &mut sqlite3::PreparedStatement) -> Result<()> { match try!(stmt.step().map_err(elmo::wrap_err)) { Some(_) => { Err(elmo::Error::Misc(String::from("step_done() returned a row"))) }, None => { Ok(()) }, } } fn verify_changes(stmt: &sqlite3::PreparedStatement, shouldbe: u64) -> Result<()> { if stmt.changes() == shouldbe { Ok(()) } else { // TODO or should this be an assert? Err(elmo::Error::Misc(String::from("changes() is wrong"))) } } fn copy_dirs_from_normspec_to_vals(normspec: &Vec<(String, elmo::IndexType)>, vals: Vec<bson::Value>) -> Vec<(bson::Value, bool)> { // TODO if normspec.len() < vals.len() then panic? let mut a = Vec::new(); for (i,v) in vals.into_iter().enumerate() { let neg = normspec[i].1 == elmo::IndexType::Backward; a.push((v, neg)); } a } fn get_table_name_for_collection(db: &str, coll: &str) -> String { // TODO cleanse? format!("docs.{}.{}", db, coll) } fn get_table_name_for_index(db: &str, coll: &str, name: &str) -> String { // TODO cleanse? format!("ndx.{}.{}.{}", db, coll, name) } fn get_index_entries(new_doc: &bson::Document, normspec: &Vec<(String, elmo::IndexType)>, weights: &Option<std::collections::HashMap<String,i32>>, options: &bson::Document, entries: &mut Vec<Vec<(bson::Value,bool)>>) -> Result<()> { fn find_index_entry_vals(normspec: &Vec<(String, elmo::IndexType)>, new_doc: &bson::Document, sparse: bool) -> Vec<(bson::Value,bool)> { //println!("find_index_entry_vals: sparse = {:?}", sparse); let mut r = Vec::new(); for t in normspec { let k = &t.0; let typ = t.1; // TODO convert this to use walk_path() let mut v = new_doc.find_path(k); // now we replace any BUndefined with BNull. this seems, well, // kinda wrong, as it effectively encodes the index entries to // contain information that is slightly incorrect, since BNull // means "it was present and explicitly null", whereas BUndefined // means "it was absent". Still, this appears to be the exact // behavior of Mongo. Note that this only affects index entries. // The matcher can and must still distinguish between null and // undefined. let keep = if sparse { match v { bson::Value::BUndefined => false, _ => true, } } else { true }; if keep { v.replace_undefined(); let neg = elmo::IndexType::Backward == typ; r.push((v,neg)); } } r } // TODO what should the name of this func actually be? fn q(vals: &Vec<(bson::Value, bool)>, w: i32, s: &str, entries: &mut Vec<Vec<(bson::Value,bool)>>) { // TODO tokenize properly let a = s.split(" "); let a = a.into_iter().collect::<std::collections::HashSet<_>>(); for s in a { let s = String::from(s); let v = bson::Value::BArray(bson::Array {items: vec![bson::Value::BString(s), bson::Value::BInt32(w)]}); // TODO clone is ugly let mut vals = vals.clone(); vals.push((v, false)); entries.push(vals); } } fn maybe_text(vals: &Vec<(bson::Value, bool)>, new_doc: &bson::Document, weights: &Option<std::collections::HashMap<String,i32>>, entries: &mut Vec<Vec<(bson::Value,bool)>>) { //println!("in maybe_text: {:?}", vals); match weights { &Some(ref weights) => { for k in weights.keys() { if k == "$**" { let mut a = Vec::new(); new_doc.find_all_strings(& mut a); let w = weights[k]; for s in a { q(vals, w, s, entries); }; } else { // TODO convert this to use walk_path() match new_doc.find_path(k) { bson::Value::BUndefined => (), v => { match v { bson::Value::BString(s) => q(&vals, weights[k], &s, entries), bson::Value::BArray(ba) => { let a = ba.items.into_iter().collect::<std::collections::HashSet<_>>(); for v in a { match v { bson::Value::BString(s) => q(&vals, weights[k], &s, entries), _ => (), } } }, _ => (), } }, } } } }, &None => { // TODO clone is ugly //println!("in maybe_text, pushing to entries: {:?}", vals); entries.push(vals.clone()); }, } } fn replace_array_element<T:Clone>(vals: &Vec<T>, i: usize, v: T) -> Vec<T> { // TODO horrifying clone let mut v2 = vals.clone(); v2[i] = v; v2 } fn maybe_array(vals: &Vec<(bson::Value, bool)>, new_doc: &bson::Document, weights: &Option<std::collections::HashMap<String,i32>>, entries: &mut Vec<Vec<(bson::Value,bool)>>) { //println!("in maybe_array: {:?}", vals); // first do the index entries for the document without considering arrays maybe_text(vals, new_doc, weights, entries); // now, if any of the vals in the key are an array, we need // to generate more index entries for this document, one // for each item in the array. Mongo calls this a // multikey index. for i in 0 .. vals.len() { let t = &vals[i]; let v = &t.0; let typ = t.1; match v { &bson::Value::BArray(ref ba) => { let a = ba.items.iter().collect::<std::collections::HashSet<_>>(); for av in a { // TODO clone is ugly let replaced = replace_array_element(vals, i, (av.clone(), typ)); //println!("replaced for index: {:?}", replaced); maybe_array(&replaced, new_doc, weights, entries); } }, _ => () } } } let sparse = match options.get("sparse") { Some(&bson::Value::BBoolean(b)) => b, _ => false, }; let vals = find_index_entry_vals(normspec, new_doc, sparse); maybe_array(&vals, new_doc, weights, entries); //println!("entries: {:?}", entries); Ok(()) } fn get_index_info_from_row(r: &sqlite3::ResultRow) -> Result<elmo::IndexInfo> { let name = r.column_text(0).expect("NOT NULL"); let spec = try!(bson::Document::from_bson(&r.column_slice(1).expect("NOT NULL"))); let options = try!(bson::Document::from_bson(&r.column_slice(2).expect("NOT NULL"))); let db = r.column_text(3).expect("NOT NULL"); let coll = r.column_text(4).expect("NOT NULL"); let info = elmo::IndexInfo { db: String::from(db), coll: String::from(coll), name: String::from(name), spec: spec, options: options, }; Ok(info) } fn get_collection_info_from_row(r: &sqlite3::ResultRow) -> Result<elmo::CollectionInfo> { let db = r.column_text(0).expect("NOT NULL"); let coll = r.column_text(1).expect("NOT NULL"); let options = try!(bson::Document::from_bson(&r.column_slice(2).expect("NOT NULL"))); let info = elmo::CollectionInfo { db: String::from(db), coll: String::from(coll), options: options, }; Ok(info) } fn index_insert_step(stmt: &mut sqlite3::PreparedStatement, k: Vec<u8>, doc_rowid: i64) -> Result<()> { stmt.clear_bindings(); try!(stmt.bind_blob(1, &k).map_err(elmo::wrap_err)); try!(stmt.bind_int64(2, doc_rowid).map_err(elmo::wrap_err)); try!(step_done(stmt)); try!(verify_changes(stmt, 1)); stmt.reset(); Ok(()) } impl MyConn { fn get_collection_options(&self, db: &str, coll: &str) -> Result<Option<bson::Document>> { let mut stmt = try!(self.conn.prepare("SELECT options FROM \"collections\" WHERE dbName=? AND collName=?").map_err(elmo::wrap_err)); try!(stmt.bind_text(1, db).map_err(elmo::wrap_err)); try!(stmt.bind_text(2, coll).map_err(elmo::wrap_err)); // TODO step_row() ? match try!(stmt.step().map_err(elmo::wrap_err)) { None => Ok(None), Some(r) => { let v = try!(bson::Document::from_bson(&r.column_slice(0).expect("NOT NULL"))); Ok(Some(v)) }, } } fn get_stmt_for_index_scan(myconn: &MyConn, plan: elmo::QueryPlan) -> Result<sqlite3::PreparedStatement> { //println!("using plan in index scan: {:?}", plan); let tbl_coll = get_table_name_for_collection(&plan.ndx.db, &plan.ndx.coll); let tbl_ndx = get_table_name_for_index(&plan.ndx.db, &plan.ndx.coll, &plan.ndx.name); // TODO the following is way too heavy. all we need is the index types // so we can tell if they're supposed to be backwards or not. let (normspec, _weights) = try!(elmo::get_normalized_spec(&plan.ndx)); fn add_one(ba: &Vec<u8>) -> Vec<u8> { let mut a = ba.clone(); let mut i = a.len() - 1; loop { if a[i] == 255 { a[i] = 0; if i == 0 { panic!("TODO handle case where add_one to binary array overflows the first byte"); } else { i = i - 1; } } else { a[i] = a[i] + 1; break; } } a } // note that one of the reasons we need to do DISTINCT here is because a // single index in a single document can produce multiple index entries, // because, for example, when a value is an array, we don't just index // the array as a value, but we also index each of its elements. // // TODO it would be nice if the DISTINCT here was happening on the rowids, not on the blobs let f_twok = |kmin: Vec<u8>, kmax: Vec<u8>, op1: &str, op2: &str| -> Result<sqlite3::PreparedStatement> { let sql = format!("SELECT DISTINCT d.bson FROM \"{}\" d INNER JOIN \"{}\" i ON (d.did = i.doc_rowid) WHERE k {} ? AND k {} ?", tbl_coll, tbl_ndx, op1, op2); //println!("using sql: {}", sql); let mut stmt = try!(myconn.conn.prepare(&sql).map_err(elmo::wrap_err)); try!(stmt.bind_blob(1, &kmin).map_err(elmo::wrap_err)); try!(stmt.bind_blob(2, &kmax).map_err(elmo::wrap_err)); Ok(stmt) }; let f_two = |minvals: elmo::QueryKey, maxvals: elmo::QueryKey, op1: &str, op2: &str| -> Result<sqlite3::PreparedStatement> { let kmin = bson::Value::encode_multi_for_index(copy_dirs_from_normspec_to_vals(&normspec, minvals)); let kmax = bson::Value::encode_multi_for_index(copy_dirs_from_normspec_to_vals(&normspec, maxvals)); f_twok(kmin, kmax, op1, op2) }; let f_one = |vals: elmo::QueryKey, op: &str| -> Result<sqlite3::PreparedStatement> { let k = bson::Value::encode_multi_for_index(copy_dirs_from_normspec_to_vals(&normspec, vals)); let sql = format!("SELECT DISTINCT d.bson FROM \"{}\" d INNER JOIN \"{}\" i ON (d.did = i.doc_rowid) WHERE k {} ?", tbl_coll, tbl_ndx, op); let mut stmt = try!(myconn.conn.prepare(&sql).map_err(elmo::wrap_err)); try!(stmt.bind_blob(1, &k).map_err(elmo::wrap_err)); Ok(stmt) }; match plan.bounds { elmo::QueryBounds::Text(_,_) => unreachable!(), elmo::QueryBounds::GT(vals) => f_one(vals, ">"), elmo::QueryBounds::LT(vals) => f_one(vals, "<"), elmo::QueryBounds::GTE(vals) => f_one(vals, ">="), elmo::QueryBounds::LTE(vals) => f_one(vals, "<="), elmo::QueryBounds::GT_LT(minvals, maxvals) => f_two(minvals, maxvals, ">", "<"), elmo::QueryBounds::GTE_LT(minvals, maxvals) => f_two(minvals, maxvals, ">=", "<"), elmo::QueryBounds::GT_LTE(minvals, maxvals) => f_two(minvals, maxvals, ">", "<="), elmo::QueryBounds::GTE_LTE(minvals, maxvals) => f_two(minvals, maxvals, ">=", "<="), elmo::QueryBounds::EQ(vals) => { let kmin = bson::Value::encode_multi_for_index(copy_dirs_from_normspec_to_vals(&normspec, vals)); let kmax = add_one(&kmin); f_twok(kmin, kmax, ">=", "<") }, } } fn get_table_scan_reader(myconn: std::rc::Rc<MyConn>, commit_on_drop: bool, db: &str, coll: &str) -> Result<MyCollectionReader> { let tbl = get_table_name_for_collection(db, coll); let stmt = try!(myconn.conn.prepare(&format!("SELECT bson FROM \"{}\"", tbl)).map_err(elmo::wrap_err)); // TODO keep track of total keys examined, etc. let seq = StatementBsonValueIterator { stmt: stmt, }; let rdr = MyCollectionReader { commit_on_drop: commit_on_drop, seq: box seq, myconn: myconn, }; Ok(rdr) } fn get_nontext_index_scan_reader(myconn: std::rc::Rc<MyConn>, commit_on_drop: bool, plan: elmo::QueryPlan) -> Result<MyCollectionReader> { let stmt = try!(Self::get_stmt_for_index_scan(&myconn, plan)); // TODO keep track of total keys examined, etc. let seq = StatementBsonValueIterator { stmt: stmt, }; let rdr = MyCollectionReader { commit_on_drop: commit_on_drop, seq: box seq, myconn: myconn, }; Ok(rdr) } fn get_text_index_scan_reader(myconn: std::rc::Rc<MyConn>, commit_on_drop: bool, ndx: &elmo::IndexInfo, eq: elmo::QueryKey, terms: Vec<elmo::TextQueryTerm>) -> Result<MyCollectionReader> { let tbl_coll = get_table_name_for_collection(&ndx.db, &ndx.coll); let tbl_ndx = get_table_name_for_index(&ndx.db, &ndx.coll, &ndx.name); let (normspec, weights) = try!(elmo::get_normalized_spec(&ndx)); let weights = match weights { None => return Err(elmo::Error::Misc(String::from("non text index"))), Some(w) => w, }; fn lookup(stmt: &mut sqlite3::PreparedStatement, vals: &Vec<(bson::Value, bool)>, word: &str) -> Result<Vec<(i64,i32)>> { // TODO if we just search for the word without the weight, we could // use the add_one trick from EQ. Probably need key encoding of an array // to omit the array length. See comment there. let vmin = bson::Value::BArray(bson::Array {items: vec![bson::Value::BString(String::from(word)), bson::Value::BInt32(0)]}); let vmax = bson::Value::BArray(bson::Array {items: vec![bson::Value::BString(String::from(word)), bson::Value::BInt32(100000)]}); let mut minvals = vals.clone(); minvals.push((vmin,false)); let mut maxvals = vals.clone(); maxvals.push((vmax,false)); let kmin = bson::Value::encode_multi_for_index(minvals); let kmax = bson::Value::encode_multi_for_index(maxvals); stmt.clear_bindings(); try!(stmt.bind_blob(1, &kmin).map_err(elmo::wrap_err)); try!(stmt.bind_blob(2, &kmax).map_err(elmo::wrap_err)); let mut entries = Vec::new(); loop { match try!(stmt.step().map_err(elmo::wrap_err)) { None => break, Some(row) => { let k = row.column_slice(0).expect("NOT NULL"); let w = try!(bson::Value::get_weight_from_index_entry(k)); let did = row.column_int64(1); entries.push((did,w)); }, } } stmt.reset(); Ok(entries) }; let vals = copy_dirs_from_normspec_to_vals(&normspec, eq); let sql = format!("SELECT k, doc_rowid FROM \"{}\" i WHERE k > ? AND k < ?", tbl_ndx); let mut stmt = try!(myconn.conn.prepare(&sql).map_err(elmo::wrap_err)); let mut found = Vec::new(); for term in &terms { let entries = match term { &elmo::TextQueryTerm::Word(neg, ref s) => { let entries = try!(lookup(&mut stmt, &vals, &s)); entries }, &elmo::TextQueryTerm::Phrase(neg, ref s) => { // TODO tokenize properly let words = s.split(" "); let mut entries = Vec::new(); for w in words { entries.push_all(&try!(lookup(&mut stmt, &vals, w))); } entries }, }; let v = (term, entries); found.push(v); }; fn contains_phrase(weights: &std::collections::HashMap<String, i32>, doc: &bson::Value, p: &str) -> bool { for k in weights.keys() { let found = // TODO convert this to use walk_path() match doc.find_path(k) { bson::Value::BUndefined => false, v => match v { bson::Value::BString(s) => s.find(p).is_some(), _ => false, }, }; if found { return true; } } return false; } fn check_phrase(terms: &Vec<elmo::TextQueryTerm>, weights: &std::collections::HashMap<String, i32>, doc: &bson::Value) -> bool { for term in terms { let b = match term { &elmo::TextQueryTerm::Word(neg, ref s) => true, &elmo::TextQueryTerm::Phrase(neg, ref s) => { let has = contains_phrase(weights, doc, s); if neg { !has } else { has } }, }; if !b { return false; } } return true; } let mut pos_entries = Vec::new(); let mut neg_entries = Vec::new(); for e in found { let (term, entries) = e; match term { &elmo::TextQueryTerm::Word(neg, ref s) => { if neg { neg_entries.push_all(&entries); } else { pos_entries.push_all(&entries); } }, &elmo::TextQueryTerm::Phrase(neg, ref s) => { if neg { // TODO probably should not negate a doc just because it contains one of the words in a negated phrase // neg_entries.push_all(&entries); } else { pos_entries.push_all(&entries); } }, }; } let neg_docids = neg_entries.into_iter().map(|t| t.0).collect::<std::collections::HashSet<_>>(); let mut remaining = Vec::new(); for t in pos_entries { let (did, w) = t; if !neg_docids.contains(&did) { remaining.push((did, w)); } } let mut doc_weights: std::collections::HashMap<i64, Vec<i32>> = std::collections::HashMap::new(); for t in remaining { let (did, w) = t; if doc_weights.contains_key(&did) { let v = doc_weights.get_mut(&did).expect("just checked this"); v.push(w); } else { doc_weights.insert(did, vec![w]); } } let sql = format!("SELECT bson FROM \"{}\" WHERE did=?", tbl_coll); let mut stmt = try!(myconn.conn.prepare(&sql).map_err(elmo::wrap_err)); let mut res = Vec::new(); for (did, cur_weights) in doc_weights { try!(stmt.bind_int64(1, did).map_err(elmo::wrap_err)); { let rdr = RefStatementBsonValueIterator { stmt: &mut stmt, }; for r in rdr { let mut r = try!(r); let keep = check_phrase(&terms, &weights, &r.doc); if keep { // TODO this is not the way mongo does this calculation let score = cur_weights.iter().sum::<i32>() as f64; r.score = Some(score); res.push(Ok(r)); } } } stmt.reset(); } let rdr = MyCollectionReader { commit_on_drop: commit_on_drop, seq: box res.into_iter(), myconn: myconn, }; Ok(rdr) } fn get_collection_reader(&self, myconn: std::rc::Rc<MyConn>, commit_on_drop: bool, db: &str, coll: &str, plan: Option<elmo::QueryPlan>) -> Result<MyCollectionReader> { match try!(self.get_collection_options(db, coll)) { None => { let rdr = MyCollectionReader { commit_on_drop: commit_on_drop, seq: box MyEmptyIterator, myconn: myconn, }; Ok(rdr) }, Some(_) => { match plan { Some(plan) => { match plan.bounds { elmo::QueryBounds::Text(eq,terms) => { let rdr = try!(Self::get_text_index_scan_reader(myconn, commit_on_drop, &plan.ndx, eq, terms)); return Ok(rdr); }, _ => { let rdr = try!(Self::get_nontext_index_scan_reader(myconn, commit_on_drop, plan)); return Ok(rdr); }, } }, None => { let rdr = try!(Self::get_table_scan_reader(myconn, commit_on_drop, db, coll)); return Ok(rdr); }, }; }, } } fn get_index_info(&self, db: &str, coll: &str, name: &str) -> Result<Option<elmo::IndexInfo>> { // TODO DRY this string, below let mut stmt = try!(self.conn.prepare("SELECT ndxName, spec, options, dbName, collName FROM \"indexes\" WHERE dbName=? AND collName=? AND ndxName=?").map_err(elmo::wrap_err)); try!(stmt.bind_text(1, db).map_err(elmo::wrap_err)); try!(stmt.bind_text(2, coll).map_err(elmo::wrap_err)); try!(stmt.bind_text(3, name).map_err(elmo::wrap_err)); match try!(stmt.step().map_err(elmo::wrap_err)) { None => Ok(None), Some(row) => { let info = try!(get_index_info_from_row(&row)); Ok(Some(info)) }, } } fn base_list_indexes(&self) -> Result<Vec<elmo::IndexInfo>> { // TODO DRY this string, above let mut stmt = try!(self.conn.prepare("SELECT ndxName, spec, options, dbName, collName FROM \"indexes\"").map_err(elmo::wrap_err)); let mut v = Vec::new(); loop { match try!(stmt.step().map_err(elmo::wrap_err)) { None => break, Some(row) => { let info = try!(get_index_info_from_row(&row)); v.push(info); }, } } Ok(v) } fn base_list_collections(&self) -> Result<Vec<elmo::CollectionInfo>> { let mut stmt = try!(self.conn.prepare("SELECT dbName, collName, options FROM \"collections\" ORDER BY collName ASC").map_err(elmo::wrap_err)); let mut v = Vec::new(); loop { match try!(stmt.step().map_err(elmo::wrap_err)) { None => break, Some(row) => { let info = try!(get_collection_info_from_row(&row)); v.push(info); }, } } Ok(v) } } impl MyCollectionWriter { fn find_rowid(&mut self, v: &bson::Value) -> Result<Option<i64>> { match self.stmt_find_rowid { None => Ok(None), Some(ref mut stmt) => { stmt.clear_bindings(); let ba = bson::Value::encode_one_for_index(v, false); try!(stmt.bind_blob(1, &ba).map_err(elmo::wrap_err)); let r = match try!(stmt.step().map_err(elmo::wrap_err)) { None => Ok(None), Some(r) => { let rowid = r.column_int64(0); Ok(Some(rowid)) }, }; stmt.reset(); r }, } } fn update_indexes_delete(indexes: &mut Vec<IndexPrep>, rowid: i64) -> Result<()> { for t in indexes { t.stmt_delete.clear_bindings(); try!(t.stmt_delete.bind_int64(1, rowid).map_err(elmo::wrap_err)); try!(step_done(&mut t.stmt_delete)); t.stmt_delete.reset(); } Ok(()) } fn update_indexes_insert(indexes: &mut Vec<IndexPrep>, rowid: i64, v: &bson::Document) -> Result<()> { for t in indexes { let (normspec, weights) = try!(elmo::get_normalized_spec(&t.info)); let mut entries = Vec::new(); try!(get_index_entries(&v, &normspec, &weights, &t.info.options, &mut entries)); let entries = entries.into_iter().collect::<std::collections::HashSet<_>>(); for vals in entries { let k = bson::Value::encode_multi_for_index(vals); try!(index_insert_step(&mut t.stmt_insert, k, rowid)); } } Ok(()) } } impl elmo::StorageCollectionWriter for MyCollectionWriter { fn update(&mut self, v: &bson::Document) -> Result<()> { match v.get("_id") { None => Err(elmo::Error::Misc(String::from("cannot update without _id"))), Some(id) => { match try!(self.find_rowid(&id).map_err(elmo::wrap_err)) { None => Err(elmo::Error::Misc(String::from("update but does not exist"))), Some(rowid) => { let ba = v.to_bson_array(); self.update.clear_bindings(); try!(self.update.bind_blob(1,&ba).map_err(elmo::wrap_err)); try!(self.update.bind_int64(2, rowid).map_err(elmo::wrap_err)); try!(step_done(&mut self.update)); try!(verify_changes(&self.update, 1)); self.update.reset(); try!(Self::update_indexes_delete(&mut self.indexes, rowid)); try!(Self::update_indexes_insert(&mut self.indexes, rowid, &v)); Ok(()) }, } }, } } fn delete(&mut self, v: &bson::Value) -> Result<bool> { // TODO is v supposed to be the id? match try!(self.find_rowid(&v).map_err(elmo::wrap_err)) { None => Ok(false), Some(rowid) => { self.delete.clear_bindings(); try!(self.delete.bind_int64(1, rowid).map_err(elmo::wrap_err)); try!(step_done(&mut self.delete)); self.delete.reset(); let count = self.myconn.conn.changes(); if count == 1 { // TODO might not need index update here. foreign key cascade? try!(Self::update_indexes_delete(&mut self.indexes, rowid)); Ok(true) } else if count == 0 { Ok(false) } else { Err(elmo::Error::Misc(String::from("changes() after delete is wrong"))) } }, } } fn insert(&mut self, v: &bson::Document) -> Result<()> { let ba = v.to_bson_array(); self.insert.clear_bindings(); try!(self.insert.bind_blob(1,&ba).map_err(elmo::wrap_err)); try!(step_done(&mut self.insert)); try!(verify_changes(&self.insert, 1)); self.insert.reset(); let rowid = self.myconn.conn.last_insert_rowid(); try!(Self::update_indexes_delete(&mut self.indexes, rowid)); try!(Self::update_indexes_insert(&mut self.indexes, rowid, &v)); Ok(()) } } impl MyWriter { fn prepare_index_insert(&self, tbl: &str) -> Result<sqlite3::PreparedStatement> { let stmt = try!(self.myconn.conn.prepare(&format!("INSERT INTO \"{}\" (k,doc_rowid) VALUES (?,?)",tbl)).map_err(elmo::wrap_err)); Ok(stmt) } fn create_index(&self, info: elmo::IndexInfo) -> Result<bool> { //println!("create_index: {:?}", info); let _created = try!(self.base_create_collection(&info.db, &info.coll, bson::Document::new())); match try!(self.myconn.get_index_info(&info.db, &info.coll, &info.name)) { Some(already) => { if already.spec != info.spec { // note that we do not compare the options. // I think mongo does it this way too. Err(elmo::Error::Misc(String::from("index already exists with different keys"))) } else { Ok(false) } }, None => { // TODO if we already have a text index (where any of its spec keys are text) // then fail. let ba_spec = info.spec.to_bson_array(); let ba_options = info.options.to_bson_array(); let mut stmt = try!(self.myconn.conn.prepare("INSERT INTO \"indexes\" (dbName,collName,ndxName,spec,options) VALUES (?,?,?,?,?)").map_err(elmo::wrap_err)); try!(stmt.bind_text(1, &info.db).map_err(elmo::wrap_err)); try!(stmt.bind_text(2, &info.coll).map_err(elmo::wrap_err)); try!(stmt.bind_text(3, &info.name).map_err(elmo::wrap_err)); try!(stmt.bind_blob(4, &ba_spec).map_err(elmo::wrap_err)); try!(stmt.bind_blob(5, &ba_options).map_err(elmo::wrap_err)); match try!(stmt.step().map_err(elmo::wrap_err)) { None => { let tbl_coll = get_table_name_for_collection(&info.db, &info.coll); let tbl_ndx = get_table_name_for_index(&info.db, &info.coll, &info.name); let s = match info.options.get("unique") { Some(&bson::Value::BBoolean(true)) => { format!("CREATE TABLE \"{}\" (k BLOB NOT NULL, doc_rowid int NOT NULL REFERENCES \"{}\"(did) ON DELETE CASCADE, PRIMARY KEY (k))", tbl_ndx, tbl_coll) }, _ => { format!("CREATE TABLE \"{}\" (k BLOB NOT NULL, doc_rowid int NOT NULL REFERENCES \"{}\"(did) ON DELETE CASCADE, PRIMARY KEY (k,doc_rowid))", tbl_ndx, tbl_coll) }, }; try!(self.myconn.conn.exec(&s).map_err(elmo::wrap_err)); try!(self.myconn.conn.exec(&format!("CREATE INDEX \"childndx_{}\" ON \"{}\" (doc_rowid)", tbl_ndx, tbl_ndx)).map_err(elmo::wrap_err)); // now insert index entries for every doc that already exists let (normspec, weights) = try!(elmo::get_normalized_spec(&info)); let mut stmt2 = try!(self.myconn.conn.prepare(&format!("SELECT did,bson FROM \"{}\"", tbl_coll)).map_err(elmo::wrap_err)); let mut stmt_insert = try!(self.prepare_index_insert(&tbl_ndx)); loop { match try!(stmt2.step().map_err(elmo::wrap_err)) { None => break, Some(row) => { let doc_rowid = row.column_int64(0); let new_doc = try!(bson::Document::from_bson(&row.column_slice(1).expect("NOT NULL"))); let mut entries = Vec::new(); try!(get_index_entries(&new_doc, &normspec, &weights, &info.options, &mut entries)); let entries = entries.into_iter().collect::<std::collections::HashSet<_>>(); for vals in entries { //println!("index entry: {:?}", vals); let k = bson::Value::encode_multi_for_index(vals); try!(index_insert_step(&mut stmt_insert, k, doc_rowid)); } }, } } Ok(true) }, Some(_) => { Err(elmo::Error::Misc(String::from("insert stmt step() returned a row"))) }, } }, } } fn base_clear_collection(&self, db: &str, coll: &str) -> Result<bool> { match try!(self.myconn.get_collection_options(db, coll)) { None => { let created = try!(self.base_create_collection(db, coll, bson::Document::new())); Ok(created) }, Some(_) => { let tbl = get_table_name_for_collection(db, coll); try!(self.myconn.conn.exec(&format!("DROP TABLE \"{}\"", tbl)).map_err(elmo::wrap_err)); Ok(false) }, } } fn base_rename_collection(&self, old_name: &str, new_name: &str, drop_target: bool) -> Result<bool> { let (old_db, old_coll) = try!(bson::split_name(old_name)); let (new_db, new_coll) = try!(bson::split_name(new_name)); // jstests/core/rename8.js seems to think that renaming to/from a system collection is illegal unless // that collection is system.users, which is "whitelisted". for now, we emulate this behavior, even // though system.users isn't supported. if old_coll != "system.users" && old_coll.starts_with("system.") { return Err(elmo::Error::Misc(String::from("renameCollection with a system collection not allowed."))) } if new_coll != "system.users" && new_coll.starts_with("system.") { return Err(elmo::Error::Misc(String::from("renameCollection with a system collection not allowed."))) } if drop_target { let _deleted = try!(self.base_drop_collection(new_db, new_coll)); } match try!(self.myconn.get_collection_options(old_db, old_coll)) { None => { let created = try!(self.base_create_collection(new_db, new_coll, bson::Document::new())); Ok(created) }, Some(_) => { let old_tbl = get_table_name_for_collection(old_db, old_coll); let new_tbl = get_table_name_for_collection(new_db, new_coll); let mut stmt = try!(self.myconn.conn.prepare("UPDATE \"collections\" SET dbName=?, collName=? WHERE dbName=? AND collName=?").map_err(elmo::wrap_err)); try!(stmt.bind_text(1, new_db).map_err(elmo::wrap_err)); try!(stmt.bind_text(2, new_coll).map_err(elmo::wrap_err)); try!(stmt.bind_text(3, old_db).map_err(elmo::wrap_err)); try!(stmt.bind_text(4, old_coll).map_err(elmo::wrap_err)); try!(step_done(&mut stmt)); stmt.reset(); try!(self.myconn.conn.exec(&format!("ALTER TABLE \"{}\" RENAME TO \"{}\"", old_tbl, new_tbl)).map_err(elmo::wrap_err)); let indexes = try!(self.myconn.base_list_indexes()); for info in indexes { if info.db == old_db && info.coll == old_coll { let old_ndx_tbl = get_table_name_for_index(old_db, old_coll, &info.name); let new_ndx_tbl = get_table_name_for_index(new_db, new_coll, &info.name); try!(self.myconn.conn.exec(&format!("ALTER TABLE \"{}\" RENAME TO \"{}\"", old_ndx_tbl, new_ndx_tbl)).map_err(elmo::wrap_err)); } } Ok(false) }, } } fn base_create_collection(&self, db: &str, coll: &str, options: bson::Document) -> Result<bool> { match try!(self.myconn.get_collection_options(db, coll)) { Some(_) => Ok(false), None => { let v_options = options.to_bson_array(); let mut stmt = try!(self.myconn.conn.prepare("INSERT INTO \"collections\" (dbName,collName,options) VALUES (?,?,?)").map_err(elmo::wrap_err)); try!(stmt.bind_text(1, db).map_err(elmo::wrap_err)); try!(stmt.bind_text(2, coll).map_err(elmo::wrap_err)); try!(stmt.bind_blob(3, &v_options).map_err(elmo::wrap_err)); match try!(stmt.step().map_err(elmo::wrap_err)) { None => { let tbl = get_table_name_for_collection(db, coll); try!(self.myconn.conn.exec(&format!("CREATE TABLE \"{}\" (did INTEGER PRIMARY KEY, bson BLOB NOT NULL)", tbl)).map_err(elmo::wrap_err)); // now create mongo index for _id match options.get("autoIndexId") { Some(&bson::Value::BBoolean(false)) => (), _ => { let info = elmo::IndexInfo { db: String::from(db), coll: String::from(coll), name: String::from("_id_"), spec: bson::Document {pairs: vec![(String::from("_id"), bson::Value::BInt32(1))]}, options: bson::Document {pairs: vec![(String::from("unique"), bson::Value::BBoolean(true))]}, }; let _created = self.create_index(info); }, } Ok(true) }, Some(_) => { Err(elmo::Error::Misc(String::from("insert stmt step() returned a row"))) }, } }, } } fn base_create_indexes(&self, what: Vec<elmo::IndexInfo>) -> Result<Vec<bool>> { let mut v = Vec::new(); for info in what { let b = try!(self.create_index(info)); v.push(b); } Ok(v) } fn base_drop_index(&self, db: &str, coll: &str, name: &str) -> Result<bool> { match try!(self.myconn.get_index_info(db, coll, name)) { None => Ok(false), Some(_) => { let mut stmt = try!(self.myconn.conn.prepare("DELETE FROM \"indexes\" WHERE dbName=? AND collName=? AND ndxName=?").map_err(elmo::wrap_err)); try!(stmt.bind_text(1, db).map_err(elmo::wrap_err)); try!(stmt.bind_text(2, coll).map_err(elmo::wrap_err)); try!(stmt.bind_text(3, name).map_err(elmo::wrap_err)); try!(step_done(&mut stmt)); try!(verify_changes(&stmt, 1)); stmt.reset(); let tbl = get_table_name_for_index(db, coll, name); try!(self.myconn.conn.exec(&format!("DROP TABLE \"{}\"", tbl)).map_err(elmo::wrap_err)); Ok(true) }, } } fn base_drop_database(&self, db: &str) -> Result<bool> { let collections = try!(self.myconn.base_list_collections()); let mut b = false; for t in collections { if t.db == db { let _deleted = try!(self.base_drop_collection(&t.db, &t.coll)); assert!(_deleted); b = true; } } Ok(b) } fn base_drop_collection(&self, db: &str, coll: &str) -> Result<bool> { match try!(self.myconn.get_collection_options(db, coll)) { None => Ok(false), Some(_) => { let indexes = try!(self.myconn.base_list_indexes()); for info in indexes { if info.db == db && info.coll == coll { try!(self.base_drop_index(&info.db, &info.coll, &info.name)); } } let mut stmt = try!(self.myconn.conn.prepare("DELETE FROM \"collections\" WHERE dbName=? AND collName=?").map_err(elmo::wrap_err)); try!(stmt.bind_text(1, db).map_err(elmo::wrap_err)); try!(stmt.bind_text(2, coll).map_err(elmo::wrap_err)); try!(step_done(&mut stmt)); try!(verify_changes(&stmt, 1)); stmt.reset(); let tbl = get_table_name_for_collection(db, coll); try!(self.myconn.conn.exec(&format!("DROP TABLE \"{}\"", tbl)).map_err(elmo::wrap_err)); Ok(true) }, } } } impl elmo::StorageWriter for MyWriter { fn get_collection_writer(&self, db: &str, coll: &str) -> Result<Box<elmo::StorageCollectionWriter + 'static>> { let _created = try!(self.base_create_collection(db, coll, bson::Document::new())); let tbl = get_table_name_for_collection(db, coll); let stmt_insert = try!(self.myconn.conn.prepare(&format!("INSERT INTO \"{}\" (bson) VALUES (?)", tbl)).map_err(elmo::wrap_err)); let stmt_delete = try!(self.myconn.conn.prepare(&format!("DELETE FROM \"{}\" WHERE rowid=?", tbl)).map_err(elmo::wrap_err)); let stmt_update = try!(self.myconn.conn.prepare(&format!("UPDATE \"{}\" SET bson=? WHERE rowid=?", tbl)).map_err(elmo::wrap_err)); let indexes = try!(self.myconn.base_list_indexes()); let indexes = indexes.into_iter().filter( |ndx| ndx.db == db && ndx.coll == coll ).collect::<Vec<_>>(); let mut find_rowid = None; for info in &indexes { if info.name == "_id_" { let tbl = get_table_name_for_index(db, coll, &info.name); find_rowid = Some(try!(self.myconn.conn.prepare(&format!("SELECT doc_rowid FROM \"{}\" WHERE k=?", tbl)).map_err(elmo::wrap_err))); break; } } let mut index_stmts = Vec::new(); for info in indexes { let tbl_ndx = get_table_name_for_index(db, coll, &info.name); let stmt_insert = try!(self.prepare_index_insert(&tbl_ndx)); let stmt_delete = try!(self.myconn.conn.prepare(&format!("DELETE FROM \"{}\" WHERE doc_rowid=?", tbl_ndx)).map_err(elmo::wrap_err)); let t = IndexPrep { info: info, stmt_insert: stmt_insert, stmt_delete: stmt_delete }; index_stmts.push(t); } let c = MyCollectionWriter { insert: stmt_insert, delete: stmt_delete, update: stmt_update, stmt_find_rowid: find_rowid, indexes: index_stmts, myconn: self.myconn.clone(), }; Ok(box c) } fn commit(mut self: Box<Self>) -> Result<()> { try!(self.myconn.conn.exec("COMMIT TRANSACTION").map_err(elmo::wrap_err)); self.in_tx = false; Ok(()) } fn rollback(mut self: Box<Self>) -> Result<()> { try!(self.myconn.conn.exec("ROLLBACK TRANSACTION").map_err(elmo::wrap_err)); self.in_tx = false; Ok(()) } // TODO maybe just move all the stuff below from the private section into here? fn create_collection(&self, db: &str, coll: &str, options: bson::Document) -> Result<bool> { self.base_create_collection(db, coll, options) } fn drop_collection(&self, db: &str, coll: &str) -> Result<bool> { self.base_drop_collection(db, coll) } fn create_indexes(&self, what: Vec<elmo::IndexInfo>) -> Result<Vec<bool>> { self.base_create_indexes(what) } fn rename_collection(&self, old_name: &str, new_name: &str, drop_target: bool) -> Result<bool> { self.base_rename_collection(old_name, new_name, drop_target) } fn drop_index(&self, db: &str, coll: &str, name: &str) -> Result<bool> { self.base_drop_index(db, coll, name) } fn drop_database(&self, db: &str) -> Result<bool> { self.base_drop_database(db) } fn clear_collection(&self, db: &str, coll: &str) -> Result<bool> { self.base_clear_collection(db, coll) } } // TODO do we need to declare that StorageWriter must implement Drop ? impl Drop for MyWriter { fn drop(&mut self) { // TODO consider panic here if still in tx. force caller to // explicitly commit or rollback. if self.in_tx { // TODO should rollback be the default here? or commit? let _ignored = self.myconn.conn.exec("ROLLBACK TRANSACTION"); } } } // TODO do we need to declare that StorageReader must implement Drop ? impl Drop for MyReader { fn drop(&mut self) { // TODO consider panic here if still in tx. force caller to // explicitly end the tx. // this transaction was [supposed to be] read-only, so it doesn't // matter in principle whether we commit or rollback. in SQL Server, // if temp tables were created, commit is MUCH faster than rollback. // but this is sqlite. anyway... if self.in_tx { let _ignored = self.myconn.conn.exec("COMMIT TRANSACTION"); } } } impl Drop for MyCollectionReader { fn drop(&mut self) { // this transaction was [supposed to be] read-only, so it doesn't // matter in principle whether we commit or rollback. in SQL Server, // if temp tables were created, commit is MUCH faster than rollback. // but this is sqlite. anyway... if self.commit_on_drop { let _ignored = self.myconn.conn.exec("COMMIT TRANSACTION"); } } } impl Iterator for MyCollectionReader { type Item = Result<elmo::Row>; fn next(&mut self) -> Option<Self::Item> { self.seq.next() } } impl elmo::StorageBase for MyReader { fn get_collection_reader(&self, db: &str, coll: &str, plan: Option<elmo::QueryPlan>) -> Result<Box<Iterator<Item=Result<elmo::Row>> + 'static>> { let rdr = try!(self.myconn.get_collection_reader(self.myconn.clone(), false, db, coll, plan)); Ok(box rdr) } fn list_collections(&self) -> Result<Vec<elmo::CollectionInfo>> { self.myconn.base_list_collections() } fn list_indexes(&self) -> Result<Vec<elmo::IndexInfo>> { self.myconn.base_list_indexes() } } impl elmo::StorageReader for MyReader { fn into_collection_reader(mut self: Box<Self>, db: &str, coll: &str, plan: Option<elmo::QueryPlan>) -> Result<Box<Iterator<Item=Result<elmo::Row>> + 'static>> { self.in_tx = false; let rdr = try!(self.myconn.get_collection_reader(self.myconn.clone(), true, db, coll, plan)); Ok(box rdr) } } impl elmo::StorageBase for MyWriter { fn get_collection_reader(&self, db: &str, coll: &str, plan: Option<elmo::QueryPlan>) -> Result<Box<Iterator<Item=Result<elmo::Row>> + 'static>> { let rdr = try!(self.myconn.get_collection_reader(self.myconn.clone(), false, db, coll, plan)); Ok(box rdr) } fn list_collections(&self) -> Result<Vec<elmo::CollectionInfo>> { self.myconn.base_list_collections() } fn list_indexes(&self) -> Result<Vec<elmo::IndexInfo>> { self.myconn.base_list_indexes() } } impl elmo::StorageConnection for MyPublicConn { fn begin_write(&self) -> Result<Box<elmo::StorageWriter + 'static>> { try!(self.myconn.conn.exec("BEGIN IMMEDIATE TRANSACTION").map_err(elmo::wrap_err)); let w = MyWriter { myconn: self.myconn.clone(), in_tx: true, }; Ok(box w) } fn begin_read(&self) -> Result<Box<elmo::StorageReader + 'static>> { try!(self.myconn.conn.exec("BEGIN TRANSACTION").map_err(elmo::wrap_err)); let r = MyReader { myconn: self.myconn.clone(), in_tx: true, }; Ok(box r) } } fn base_connect(name: &str) -> sqlite3::SqliteResult<sqlite3::DatabaseConnection> { let access = sqlite3::access::ByFilename { flags: sqlite3::access::flags::OPEN_READWRITE | sqlite3::access::flags::OPEN_CREATE, filename: name}; let conn = try!(sqlite3::DatabaseConnection::new(access)); try!(conn.exec("PRAGMA journal_mode=WAL")); try!(conn.exec("PRAGMA foreign_keys=ON")); try!(conn.exec("CREATE TABLE IF NOT EXISTS \"collections\" (dbName TEXT NOT NULL, collName TEXT NOT NULL, options BLOB NOT NULL, PRIMARY KEY (dbName,collName))")); try!(conn.exec("CREATE TABLE IF NOT EXISTS \"indexes\" (dbName TEXT NOT NULL, collName TEXT NOT NULL, ndxName TEXT NOT NULL, spec BLOB NOT NULL, options BLOB NOT NULL, PRIMARY KEY (dbName, collName, ndxName), FOREIGN KEY (dbName,collName) REFERENCES \"collections\" ON DELETE CASCADE ON UPDATE CASCADE, UNIQUE (spec,dbName,collName))")); Ok(conn) } pub fn connect(name: &str) -> Result<Box<elmo::StorageConnection>> { let conn = try!(base_connect(name).map_err(elmo::wrap_err)); let c = MyConn { conn: conn, }; let c = MyPublicConn { myconn: std::rc::Rc::new(c) }; Ok(box c) } /* look at the non-allocating alternatives to column_text() and column_blob() */ fix bug in rename collection /* Copyright 2014-2015 Zumero, LLC Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #![feature(box_syntax)] #![feature(associated_consts)] #![feature(vec_push_all)] #![feature(iter_arith)] extern crate bson; extern crate elmo; pub type Result<T> = elmo::Result<T>; extern crate sqlite3; struct IndexPrep { info: elmo::IndexInfo, stmt_insert: sqlite3::PreparedStatement, stmt_delete: sqlite3::PreparedStatement, } struct MyCollectionWriter { insert: sqlite3::PreparedStatement, delete: sqlite3::PreparedStatement, update: sqlite3::PreparedStatement, stmt_find_rowid: Option<sqlite3::PreparedStatement>, indexes: Vec<IndexPrep>, myconn: std::rc::Rc<MyConn>, } struct StatementBsonValueIterator { stmt: sqlite3::PreparedStatement, } impl StatementBsonValueIterator { fn iter_next(&mut self) -> Result<Option<elmo::Row>> { match try!(self.stmt.step().map_err(elmo::wrap_err)) { None => { Ok(None) }, Some(r) => { let b = r.column_blob(0).expect("NOT NULL"); let v = try!(bson::Document::from_bson(&b)); //println!("doc in row: {:?}", v); let v = bson::Value::BDocument(v); let row = elmo::Row { doc: v, pos: None, score: None, }; Ok(Some(row)) }, } } } impl Iterator for StatementBsonValueIterator { type Item = Result<elmo::Row>; fn next(&mut self) -> Option<Self::Item> { match self.iter_next() { Err(e) => { return Some(Err(e)); }, Ok(v) => { match v { None => { return None; }, Some(v) => { return Some(Ok(v)); } } }, } } } // TODO it is sad to have two completely distinct versions of // this iterator, one which owns the statement, and one which // does not. struct RefStatementBsonValueIterator<'a> { stmt: &'a mut sqlite3::PreparedStatement, } impl<'a> RefStatementBsonValueIterator<'a> { fn iter_next(&mut self) -> Result<Option<elmo::Row>> { match try!(self.stmt.step().map_err(elmo::wrap_err)) { None => { Ok(None) }, Some(r) => { let b = r.column_blob(0).expect("NOT NULL"); let v = try!(bson::Document::from_bson(&b)); //println!("doc: {:?}", v); let v = bson::Value::BDocument(v); let row = elmo::Row { doc: v, pos: None, score: None, }; Ok(Some(row)) }, } } } impl<'a> Iterator for RefStatementBsonValueIterator<'a> { type Item = Result<elmo::Row>; fn next(&mut self) -> Option<Self::Item> { match self.iter_next() { Err(e) => { return Some(Err(e)); }, Ok(v) => { match v { None => { return None; }, Some(v) => { return Some(Ok(v)); } } }, } } } // TODO std::iter::Empty? struct MyEmptyIterator; impl Iterator for MyEmptyIterator { type Item = Result<elmo::Row>; fn next(&mut self) -> Option<Self::Item> { None } } struct MyCollectionReader { commit_on_drop: bool, seq: Box<Iterator<Item=Result<elmo::Row>>>, myconn: std::rc::Rc<MyConn>, // TODO need counts here } struct MyReader { myconn: std::rc::Rc<MyConn>, in_tx: bool, } struct MyWriter { myconn: std::rc::Rc<MyConn>, in_tx: bool, } struct MyConn { conn: sqlite3::DatabaseConnection, } struct MyPublicConn { myconn: std::rc::Rc<MyConn>, } fn step_done(stmt: &mut sqlite3::PreparedStatement) -> Result<()> { match try!(stmt.step().map_err(elmo::wrap_err)) { Some(_) => { Err(elmo::Error::Misc(String::from("step_done() returned a row"))) }, None => { Ok(()) }, } } fn verify_changes(stmt: &sqlite3::PreparedStatement, shouldbe: u64) -> Result<()> { if stmt.changes() == shouldbe { Ok(()) } else { // TODO or should this be an assert? Err(elmo::Error::Misc(String::from("changes() is wrong"))) } } fn copy_dirs_from_normspec_to_vals(normspec: &Vec<(String, elmo::IndexType)>, vals: Vec<bson::Value>) -> Vec<(bson::Value, bool)> { // TODO if normspec.len() < vals.len() then panic? let mut a = Vec::new(); for (i,v) in vals.into_iter().enumerate() { let neg = normspec[i].1 == elmo::IndexType::Backward; a.push((v, neg)); } a } fn get_table_name_for_collection(db: &str, coll: &str) -> String { // TODO cleanse? format!("docs.{}.{}", db, coll) } fn get_table_name_for_index(db: &str, coll: &str, name: &str) -> String { // TODO cleanse? format!("ndx.{}.{}.{}", db, coll, name) } fn get_index_entries(new_doc: &bson::Document, normspec: &Vec<(String, elmo::IndexType)>, weights: &Option<std::collections::HashMap<String,i32>>, options: &bson::Document, entries: &mut Vec<Vec<(bson::Value,bool)>>) -> Result<()> { fn find_index_entry_vals(normspec: &Vec<(String, elmo::IndexType)>, new_doc: &bson::Document, sparse: bool) -> Vec<(bson::Value,bool)> { //println!("find_index_entry_vals: sparse = {:?}", sparse); let mut r = Vec::new(); for t in normspec { let k = &t.0; let typ = t.1; // TODO convert this to use walk_path() let mut v = new_doc.find_path(k); // now we replace any BUndefined with BNull. this seems, well, // kinda wrong, as it effectively encodes the index entries to // contain information that is slightly incorrect, since BNull // means "it was present and explicitly null", whereas BUndefined // means "it was absent". Still, this appears to be the exact // behavior of Mongo. Note that this only affects index entries. // The matcher can and must still distinguish between null and // undefined. let keep = if sparse { match v { bson::Value::BUndefined => false, _ => true, } } else { true }; if keep { v.replace_undefined(); let neg = elmo::IndexType::Backward == typ; r.push((v,neg)); } } r } // TODO what should the name of this func actually be? fn q(vals: &Vec<(bson::Value, bool)>, w: i32, s: &str, entries: &mut Vec<Vec<(bson::Value,bool)>>) { // TODO tokenize properly let a = s.split(" "); let a = a.into_iter().collect::<std::collections::HashSet<_>>(); for s in a { let s = String::from(s); let v = bson::Value::BArray(bson::Array {items: vec![bson::Value::BString(s), bson::Value::BInt32(w)]}); // TODO clone is ugly let mut vals = vals.clone(); vals.push((v, false)); entries.push(vals); } } fn maybe_text(vals: &Vec<(bson::Value, bool)>, new_doc: &bson::Document, weights: &Option<std::collections::HashMap<String,i32>>, entries: &mut Vec<Vec<(bson::Value,bool)>>) { //println!("in maybe_text: {:?}", vals); match weights { &Some(ref weights) => { for k in weights.keys() { if k == "$**" { let mut a = Vec::new(); new_doc.find_all_strings(& mut a); let w = weights[k]; for s in a { q(vals, w, s, entries); }; } else { // TODO convert this to use walk_path() match new_doc.find_path(k) { bson::Value::BUndefined => (), v => { match v { bson::Value::BString(s) => q(&vals, weights[k], &s, entries), bson::Value::BArray(ba) => { let a = ba.items.into_iter().collect::<std::collections::HashSet<_>>(); for v in a { match v { bson::Value::BString(s) => q(&vals, weights[k], &s, entries), _ => (), } } }, _ => (), } }, } } } }, &None => { // TODO clone is ugly //println!("in maybe_text, pushing to entries: {:?}", vals); entries.push(vals.clone()); }, } } fn replace_array_element<T:Clone>(vals: &Vec<T>, i: usize, v: T) -> Vec<T> { // TODO horrifying clone let mut v2 = vals.clone(); v2[i] = v; v2 } fn maybe_array(vals: &Vec<(bson::Value, bool)>, new_doc: &bson::Document, weights: &Option<std::collections::HashMap<String,i32>>, entries: &mut Vec<Vec<(bson::Value,bool)>>) { //println!("in maybe_array: {:?}", vals); // first do the index entries for the document without considering arrays maybe_text(vals, new_doc, weights, entries); // now, if any of the vals in the key are an array, we need // to generate more index entries for this document, one // for each item in the array. Mongo calls this a // multikey index. for i in 0 .. vals.len() { let t = &vals[i]; let v = &t.0; let typ = t.1; match v { &bson::Value::BArray(ref ba) => { let a = ba.items.iter().collect::<std::collections::HashSet<_>>(); for av in a { // TODO clone is ugly let replaced = replace_array_element(vals, i, (av.clone(), typ)); //println!("replaced for index: {:?}", replaced); maybe_array(&replaced, new_doc, weights, entries); } }, _ => () } } } let sparse = match options.get("sparse") { Some(&bson::Value::BBoolean(b)) => b, _ => false, }; let vals = find_index_entry_vals(normspec, new_doc, sparse); maybe_array(&vals, new_doc, weights, entries); //println!("entries: {:?}", entries); Ok(()) } fn get_index_info_from_row(r: &sqlite3::ResultRow) -> Result<elmo::IndexInfo> { let name = r.column_text(0).expect("NOT NULL"); let spec = try!(bson::Document::from_bson(&r.column_slice(1).expect("NOT NULL"))); let options = try!(bson::Document::from_bson(&r.column_slice(2).expect("NOT NULL"))); let db = r.column_text(3).expect("NOT NULL"); let coll = r.column_text(4).expect("NOT NULL"); let info = elmo::IndexInfo { db: String::from(db), coll: String::from(coll), name: String::from(name), spec: spec, options: options, }; Ok(info) } fn get_collection_info_from_row(r: &sqlite3::ResultRow) -> Result<elmo::CollectionInfo> { let db = r.column_text(0).expect("NOT NULL"); let coll = r.column_text(1).expect("NOT NULL"); let options = try!(bson::Document::from_bson(&r.column_slice(2).expect("NOT NULL"))); let info = elmo::CollectionInfo { db: String::from(db), coll: String::from(coll), options: options, }; Ok(info) } fn index_insert_step(stmt: &mut sqlite3::PreparedStatement, k: Vec<u8>, doc_rowid: i64) -> Result<()> { stmt.clear_bindings(); try!(stmt.bind_blob(1, &k).map_err(elmo::wrap_err)); try!(stmt.bind_int64(2, doc_rowid).map_err(elmo::wrap_err)); try!(step_done(stmt)); try!(verify_changes(stmt, 1)); stmt.reset(); Ok(()) } impl MyConn { fn get_collection_options(&self, db: &str, coll: &str) -> Result<Option<bson::Document>> { let mut stmt = try!(self.conn.prepare("SELECT options FROM \"collections\" WHERE dbName=? AND collName=?").map_err(elmo::wrap_err)); try!(stmt.bind_text(1, db).map_err(elmo::wrap_err)); try!(stmt.bind_text(2, coll).map_err(elmo::wrap_err)); // TODO step_row() ? match try!(stmt.step().map_err(elmo::wrap_err)) { None => Ok(None), Some(r) => { let v = try!(bson::Document::from_bson(&r.column_slice(0).expect("NOT NULL"))); Ok(Some(v)) }, } } fn get_stmt_for_index_scan(myconn: &MyConn, plan: elmo::QueryPlan) -> Result<sqlite3::PreparedStatement> { //println!("using plan in index scan: {:?}", plan); let tbl_coll = get_table_name_for_collection(&plan.ndx.db, &plan.ndx.coll); let tbl_ndx = get_table_name_for_index(&plan.ndx.db, &plan.ndx.coll, &plan.ndx.name); // TODO the following is way too heavy. all we need is the index types // so we can tell if they're supposed to be backwards or not. let (normspec, _weights) = try!(elmo::get_normalized_spec(&plan.ndx)); fn add_one(ba: &Vec<u8>) -> Vec<u8> { let mut a = ba.clone(); let mut i = a.len() - 1; loop { if a[i] == 255 { a[i] = 0; if i == 0 { panic!("TODO handle case where add_one to binary array overflows the first byte"); } else { i = i - 1; } } else { a[i] = a[i] + 1; break; } } a } // note that one of the reasons we need to do DISTINCT here is because a // single index in a single document can produce multiple index entries, // because, for example, when a value is an array, we don't just index // the array as a value, but we also index each of its elements. // // TODO it would be nice if the DISTINCT here was happening on the rowids, not on the blobs let f_twok = |kmin: Vec<u8>, kmax: Vec<u8>, op1: &str, op2: &str| -> Result<sqlite3::PreparedStatement> { let sql = format!("SELECT DISTINCT d.bson FROM \"{}\" d INNER JOIN \"{}\" i ON (d.did = i.doc_rowid) WHERE k {} ? AND k {} ?", tbl_coll, tbl_ndx, op1, op2); //println!("using sql: {}", sql); let mut stmt = try!(myconn.conn.prepare(&sql).map_err(elmo::wrap_err)); try!(stmt.bind_blob(1, &kmin).map_err(elmo::wrap_err)); try!(stmt.bind_blob(2, &kmax).map_err(elmo::wrap_err)); Ok(stmt) }; let f_two = |minvals: elmo::QueryKey, maxvals: elmo::QueryKey, op1: &str, op2: &str| -> Result<sqlite3::PreparedStatement> { let kmin = bson::Value::encode_multi_for_index(copy_dirs_from_normspec_to_vals(&normspec, minvals)); let kmax = bson::Value::encode_multi_for_index(copy_dirs_from_normspec_to_vals(&normspec, maxvals)); f_twok(kmin, kmax, op1, op2) }; let f_one = |vals: elmo::QueryKey, op: &str| -> Result<sqlite3::PreparedStatement> { let k = bson::Value::encode_multi_for_index(copy_dirs_from_normspec_to_vals(&normspec, vals)); let sql = format!("SELECT DISTINCT d.bson FROM \"{}\" d INNER JOIN \"{}\" i ON (d.did = i.doc_rowid) WHERE k {} ?", tbl_coll, tbl_ndx, op); let mut stmt = try!(myconn.conn.prepare(&sql).map_err(elmo::wrap_err)); try!(stmt.bind_blob(1, &k).map_err(elmo::wrap_err)); Ok(stmt) }; match plan.bounds { elmo::QueryBounds::Text(_,_) => unreachable!(), elmo::QueryBounds::GT(vals) => f_one(vals, ">"), elmo::QueryBounds::LT(vals) => f_one(vals, "<"), elmo::QueryBounds::GTE(vals) => f_one(vals, ">="), elmo::QueryBounds::LTE(vals) => f_one(vals, "<="), elmo::QueryBounds::GT_LT(minvals, maxvals) => f_two(minvals, maxvals, ">", "<"), elmo::QueryBounds::GTE_LT(minvals, maxvals) => f_two(minvals, maxvals, ">=", "<"), elmo::QueryBounds::GT_LTE(minvals, maxvals) => f_two(minvals, maxvals, ">", "<="), elmo::QueryBounds::GTE_LTE(minvals, maxvals) => f_two(minvals, maxvals, ">=", "<="), elmo::QueryBounds::EQ(vals) => { let kmin = bson::Value::encode_multi_for_index(copy_dirs_from_normspec_to_vals(&normspec, vals)); let kmax = add_one(&kmin); f_twok(kmin, kmax, ">=", "<") }, } } fn get_table_scan_reader(myconn: std::rc::Rc<MyConn>, commit_on_drop: bool, db: &str, coll: &str) -> Result<MyCollectionReader> { let tbl = get_table_name_for_collection(db, coll); let stmt = try!(myconn.conn.prepare(&format!("SELECT bson FROM \"{}\"", tbl)).map_err(elmo::wrap_err)); // TODO keep track of total keys examined, etc. let seq = StatementBsonValueIterator { stmt: stmt, }; let rdr = MyCollectionReader { commit_on_drop: commit_on_drop, seq: box seq, myconn: myconn, }; Ok(rdr) } fn get_nontext_index_scan_reader(myconn: std::rc::Rc<MyConn>, commit_on_drop: bool, plan: elmo::QueryPlan) -> Result<MyCollectionReader> { let stmt = try!(Self::get_stmt_for_index_scan(&myconn, plan)); // TODO keep track of total keys examined, etc. let seq = StatementBsonValueIterator { stmt: stmt, }; let rdr = MyCollectionReader { commit_on_drop: commit_on_drop, seq: box seq, myconn: myconn, }; Ok(rdr) } fn get_text_index_scan_reader(myconn: std::rc::Rc<MyConn>, commit_on_drop: bool, ndx: &elmo::IndexInfo, eq: elmo::QueryKey, terms: Vec<elmo::TextQueryTerm>) -> Result<MyCollectionReader> { let tbl_coll = get_table_name_for_collection(&ndx.db, &ndx.coll); let tbl_ndx = get_table_name_for_index(&ndx.db, &ndx.coll, &ndx.name); let (normspec, weights) = try!(elmo::get_normalized_spec(&ndx)); let weights = match weights { None => return Err(elmo::Error::Misc(String::from("non text index"))), Some(w) => w, }; fn lookup(stmt: &mut sqlite3::PreparedStatement, vals: &Vec<(bson::Value, bool)>, word: &str) -> Result<Vec<(i64,i32)>> { // TODO if we just search for the word without the weight, we could // use the add_one trick from EQ. Probably need key encoding of an array // to omit the array length. See comment there. let vmin = bson::Value::BArray(bson::Array {items: vec![bson::Value::BString(String::from(word)), bson::Value::BInt32(0)]}); let vmax = bson::Value::BArray(bson::Array {items: vec![bson::Value::BString(String::from(word)), bson::Value::BInt32(100000)]}); let mut minvals = vals.clone(); minvals.push((vmin,false)); let mut maxvals = vals.clone(); maxvals.push((vmax,false)); let kmin = bson::Value::encode_multi_for_index(minvals); let kmax = bson::Value::encode_multi_for_index(maxvals); stmt.clear_bindings(); try!(stmt.bind_blob(1, &kmin).map_err(elmo::wrap_err)); try!(stmt.bind_blob(2, &kmax).map_err(elmo::wrap_err)); let mut entries = Vec::new(); loop { match try!(stmt.step().map_err(elmo::wrap_err)) { None => break, Some(row) => { let k = row.column_slice(0).expect("NOT NULL"); let w = try!(bson::Value::get_weight_from_index_entry(k)); let did = row.column_int64(1); entries.push((did,w)); }, } } stmt.reset(); Ok(entries) }; let vals = copy_dirs_from_normspec_to_vals(&normspec, eq); let sql = format!("SELECT k, doc_rowid FROM \"{}\" i WHERE k > ? AND k < ?", tbl_ndx); let mut stmt = try!(myconn.conn.prepare(&sql).map_err(elmo::wrap_err)); let mut found = Vec::new(); for term in &terms { let entries = match term { &elmo::TextQueryTerm::Word(neg, ref s) => { let entries = try!(lookup(&mut stmt, &vals, &s)); entries }, &elmo::TextQueryTerm::Phrase(neg, ref s) => { // TODO tokenize properly let words = s.split(" "); let mut entries = Vec::new(); for w in words { entries.push_all(&try!(lookup(&mut stmt, &vals, w))); } entries }, }; let v = (term, entries); found.push(v); }; fn contains_phrase(weights: &std::collections::HashMap<String, i32>, doc: &bson::Value, p: &str) -> bool { for k in weights.keys() { let found = // TODO convert this to use walk_path() match doc.find_path(k) { bson::Value::BUndefined => false, v => match v { bson::Value::BString(s) => s.find(p).is_some(), _ => false, }, }; if found { return true; } } return false; } fn check_phrase(terms: &Vec<elmo::TextQueryTerm>, weights: &std::collections::HashMap<String, i32>, doc: &bson::Value) -> bool { for term in terms { let b = match term { &elmo::TextQueryTerm::Word(neg, ref s) => true, &elmo::TextQueryTerm::Phrase(neg, ref s) => { let has = contains_phrase(weights, doc, s); if neg { !has } else { has } }, }; if !b { return false; } } return true; } let mut pos_entries = Vec::new(); let mut neg_entries = Vec::new(); for e in found { let (term, entries) = e; match term { &elmo::TextQueryTerm::Word(neg, ref s) => { if neg { neg_entries.push_all(&entries); } else { pos_entries.push_all(&entries); } }, &elmo::TextQueryTerm::Phrase(neg, ref s) => { if neg { // TODO probably should not negate a doc just because it contains one of the words in a negated phrase // neg_entries.push_all(&entries); } else { pos_entries.push_all(&entries); } }, }; } let neg_docids = neg_entries.into_iter().map(|t| t.0).collect::<std::collections::HashSet<_>>(); let mut remaining = Vec::new(); for t in pos_entries { let (did, w) = t; if !neg_docids.contains(&did) { remaining.push((did, w)); } } let mut doc_weights: std::collections::HashMap<i64, Vec<i32>> = std::collections::HashMap::new(); for t in remaining { let (did, w) = t; if doc_weights.contains_key(&did) { let v = doc_weights.get_mut(&did).expect("just checked this"); v.push(w); } else { doc_weights.insert(did, vec![w]); } } let sql = format!("SELECT bson FROM \"{}\" WHERE did=?", tbl_coll); let mut stmt = try!(myconn.conn.prepare(&sql).map_err(elmo::wrap_err)); let mut res = Vec::new(); for (did, cur_weights) in doc_weights { try!(stmt.bind_int64(1, did).map_err(elmo::wrap_err)); { let rdr = RefStatementBsonValueIterator { stmt: &mut stmt, }; for r in rdr { let mut r = try!(r); let keep = check_phrase(&terms, &weights, &r.doc); if keep { // TODO this is not the way mongo does this calculation let score = cur_weights.iter().sum::<i32>() as f64; r.score = Some(score); res.push(Ok(r)); } } } stmt.reset(); } let rdr = MyCollectionReader { commit_on_drop: commit_on_drop, seq: box res.into_iter(), myconn: myconn, }; Ok(rdr) } fn get_collection_reader(&self, myconn: std::rc::Rc<MyConn>, commit_on_drop: bool, db: &str, coll: &str, plan: Option<elmo::QueryPlan>) -> Result<MyCollectionReader> { match try!(self.get_collection_options(db, coll)) { None => { let rdr = MyCollectionReader { commit_on_drop: commit_on_drop, seq: box MyEmptyIterator, myconn: myconn, }; Ok(rdr) }, Some(_) => { match plan { Some(plan) => { match plan.bounds { elmo::QueryBounds::Text(eq,terms) => { let rdr = try!(Self::get_text_index_scan_reader(myconn, commit_on_drop, &plan.ndx, eq, terms)); return Ok(rdr); }, _ => { let rdr = try!(Self::get_nontext_index_scan_reader(myconn, commit_on_drop, plan)); return Ok(rdr); }, } }, None => { let rdr = try!(Self::get_table_scan_reader(myconn, commit_on_drop, db, coll)); return Ok(rdr); }, }; }, } } fn get_index_info(&self, db: &str, coll: &str, name: &str) -> Result<Option<elmo::IndexInfo>> { // TODO DRY this string, below let mut stmt = try!(self.conn.prepare("SELECT ndxName, spec, options, dbName, collName FROM \"indexes\" WHERE dbName=? AND collName=? AND ndxName=?").map_err(elmo::wrap_err)); try!(stmt.bind_text(1, db).map_err(elmo::wrap_err)); try!(stmt.bind_text(2, coll).map_err(elmo::wrap_err)); try!(stmt.bind_text(3, name).map_err(elmo::wrap_err)); match try!(stmt.step().map_err(elmo::wrap_err)) { None => Ok(None), Some(row) => { let info = try!(get_index_info_from_row(&row)); Ok(Some(info)) }, } } fn base_list_indexes(&self) -> Result<Vec<elmo::IndexInfo>> { // TODO DRY this string, above let mut stmt = try!(self.conn.prepare("SELECT ndxName, spec, options, dbName, collName FROM \"indexes\"").map_err(elmo::wrap_err)); let mut v = Vec::new(); loop { match try!(stmt.step().map_err(elmo::wrap_err)) { None => break, Some(row) => { let info = try!(get_index_info_from_row(&row)); v.push(info); }, } } Ok(v) } fn base_list_collections(&self) -> Result<Vec<elmo::CollectionInfo>> { let mut stmt = try!(self.conn.prepare("SELECT dbName, collName, options FROM \"collections\" ORDER BY collName ASC").map_err(elmo::wrap_err)); let mut v = Vec::new(); loop { match try!(stmt.step().map_err(elmo::wrap_err)) { None => break, Some(row) => { let info = try!(get_collection_info_from_row(&row)); v.push(info); }, } } Ok(v) } } impl MyCollectionWriter { fn find_rowid(&mut self, v: &bson::Value) -> Result<Option<i64>> { match self.stmt_find_rowid { None => Ok(None), Some(ref mut stmt) => { stmt.clear_bindings(); let ba = bson::Value::encode_one_for_index(v, false); try!(stmt.bind_blob(1, &ba).map_err(elmo::wrap_err)); let r = match try!(stmt.step().map_err(elmo::wrap_err)) { None => Ok(None), Some(r) => { let rowid = r.column_int64(0); Ok(Some(rowid)) }, }; stmt.reset(); r }, } } fn update_indexes_delete(indexes: &mut Vec<IndexPrep>, rowid: i64) -> Result<()> { for t in indexes { t.stmt_delete.clear_bindings(); try!(t.stmt_delete.bind_int64(1, rowid).map_err(elmo::wrap_err)); try!(step_done(&mut t.stmt_delete)); t.stmt_delete.reset(); } Ok(()) } fn update_indexes_insert(indexes: &mut Vec<IndexPrep>, rowid: i64, v: &bson::Document) -> Result<()> { for t in indexes { let (normspec, weights) = try!(elmo::get_normalized_spec(&t.info)); let mut entries = Vec::new(); try!(get_index_entries(&v, &normspec, &weights, &t.info.options, &mut entries)); let entries = entries.into_iter().collect::<std::collections::HashSet<_>>(); for vals in entries { let k = bson::Value::encode_multi_for_index(vals); try!(index_insert_step(&mut t.stmt_insert, k, rowid)); } } Ok(()) } } impl elmo::StorageCollectionWriter for MyCollectionWriter { fn update(&mut self, v: &bson::Document) -> Result<()> { match v.get("_id") { None => Err(elmo::Error::Misc(String::from("cannot update without _id"))), Some(id) => { match try!(self.find_rowid(&id).map_err(elmo::wrap_err)) { None => Err(elmo::Error::Misc(String::from("update but does not exist"))), Some(rowid) => { let ba = v.to_bson_array(); self.update.clear_bindings(); try!(self.update.bind_blob(1,&ba).map_err(elmo::wrap_err)); try!(self.update.bind_int64(2, rowid).map_err(elmo::wrap_err)); try!(step_done(&mut self.update)); try!(verify_changes(&self.update, 1)); self.update.reset(); try!(Self::update_indexes_delete(&mut self.indexes, rowid)); try!(Self::update_indexes_insert(&mut self.indexes, rowid, &v)); Ok(()) }, } }, } } fn delete(&mut self, v: &bson::Value) -> Result<bool> { // TODO is v supposed to be the id? match try!(self.find_rowid(&v).map_err(elmo::wrap_err)) { None => Ok(false), Some(rowid) => { self.delete.clear_bindings(); try!(self.delete.bind_int64(1, rowid).map_err(elmo::wrap_err)); try!(step_done(&mut self.delete)); self.delete.reset(); let count = self.myconn.conn.changes(); if count == 1 { // TODO might not need index update here. foreign key cascade? try!(Self::update_indexes_delete(&mut self.indexes, rowid)); Ok(true) } else if count == 0 { Ok(false) } else { Err(elmo::Error::Misc(String::from("changes() after delete is wrong"))) } }, } } fn insert(&mut self, v: &bson::Document) -> Result<()> { let ba = v.to_bson_array(); self.insert.clear_bindings(); try!(self.insert.bind_blob(1,&ba).map_err(elmo::wrap_err)); try!(step_done(&mut self.insert)); try!(verify_changes(&self.insert, 1)); self.insert.reset(); let rowid = self.myconn.conn.last_insert_rowid(); try!(Self::update_indexes_delete(&mut self.indexes, rowid)); try!(Self::update_indexes_insert(&mut self.indexes, rowid, &v)); Ok(()) } } impl MyWriter { fn prepare_index_insert(&self, tbl: &str) -> Result<sqlite3::PreparedStatement> { let stmt = try!(self.myconn.conn.prepare(&format!("INSERT INTO \"{}\" (k,doc_rowid) VALUES (?,?)",tbl)).map_err(elmo::wrap_err)); Ok(stmt) } fn create_index(&self, info: elmo::IndexInfo) -> Result<bool> { //println!("create_index: {:?}", info); let _created = try!(self.base_create_collection(&info.db, &info.coll, bson::Document::new())); match try!(self.myconn.get_index_info(&info.db, &info.coll, &info.name)) { Some(already) => { if already.spec != info.spec { // note that we do not compare the options. // I think mongo does it this way too. Err(elmo::Error::Misc(String::from("index already exists with different keys"))) } else { Ok(false) } }, None => { // TODO if we already have a text index (where any of its spec keys are text) // then fail. let ba_spec = info.spec.to_bson_array(); let ba_options = info.options.to_bson_array(); let mut stmt = try!(self.myconn.conn.prepare("INSERT INTO \"indexes\" (dbName,collName,ndxName,spec,options) VALUES (?,?,?,?,?)").map_err(elmo::wrap_err)); try!(stmt.bind_text(1, &info.db).map_err(elmo::wrap_err)); try!(stmt.bind_text(2, &info.coll).map_err(elmo::wrap_err)); try!(stmt.bind_text(3, &info.name).map_err(elmo::wrap_err)); try!(stmt.bind_blob(4, &ba_spec).map_err(elmo::wrap_err)); try!(stmt.bind_blob(5, &ba_options).map_err(elmo::wrap_err)); match try!(stmt.step().map_err(elmo::wrap_err)) { None => { let tbl_coll = get_table_name_for_collection(&info.db, &info.coll); let tbl_ndx = get_table_name_for_index(&info.db, &info.coll, &info.name); let s = match info.options.get("unique") { Some(&bson::Value::BBoolean(true)) => { format!("CREATE TABLE \"{}\" (k BLOB NOT NULL, doc_rowid int NOT NULL REFERENCES \"{}\"(did) ON DELETE CASCADE, PRIMARY KEY (k))", tbl_ndx, tbl_coll) }, _ => { format!("CREATE TABLE \"{}\" (k BLOB NOT NULL, doc_rowid int NOT NULL REFERENCES \"{}\"(did) ON DELETE CASCADE, PRIMARY KEY (k,doc_rowid))", tbl_ndx, tbl_coll) }, }; try!(self.myconn.conn.exec(&s).map_err(elmo::wrap_err)); try!(self.myconn.conn.exec(&format!("CREATE INDEX \"childndx_{}\" ON \"{}\" (doc_rowid)", tbl_ndx, tbl_ndx)).map_err(elmo::wrap_err)); // now insert index entries for every doc that already exists let (normspec, weights) = try!(elmo::get_normalized_spec(&info)); let mut stmt2 = try!(self.myconn.conn.prepare(&format!("SELECT did,bson FROM \"{}\"", tbl_coll)).map_err(elmo::wrap_err)); let mut stmt_insert = try!(self.prepare_index_insert(&tbl_ndx)); loop { match try!(stmt2.step().map_err(elmo::wrap_err)) { None => break, Some(row) => { let doc_rowid = row.column_int64(0); let new_doc = try!(bson::Document::from_bson(&row.column_slice(1).expect("NOT NULL"))); let mut entries = Vec::new(); try!(get_index_entries(&new_doc, &normspec, &weights, &info.options, &mut entries)); let entries = entries.into_iter().collect::<std::collections::HashSet<_>>(); for vals in entries { //println!("index entry: {:?}", vals); let k = bson::Value::encode_multi_for_index(vals); try!(index_insert_step(&mut stmt_insert, k, doc_rowid)); } }, } } Ok(true) }, Some(_) => { Err(elmo::Error::Misc(String::from("insert stmt step() returned a row"))) }, } }, } } fn base_clear_collection(&self, db: &str, coll: &str) -> Result<bool> { match try!(self.myconn.get_collection_options(db, coll)) { None => { let created = try!(self.base_create_collection(db, coll, bson::Document::new())); Ok(created) }, Some(_) => { let tbl = get_table_name_for_collection(db, coll); try!(self.myconn.conn.exec(&format!("DROP TABLE \"{}\"", tbl)).map_err(elmo::wrap_err)); Ok(false) }, } } fn base_rename_collection(&self, old_name: &str, new_name: &str, drop_target: bool) -> Result<bool> { let (old_db, old_coll) = try!(bson::split_name(old_name)); let (new_db, new_coll) = try!(bson::split_name(new_name)); // jstests/core/rename8.js seems to think that renaming to/from a system collection is illegal unless // that collection is system.users, which is "whitelisted". for now, we emulate this behavior, even // though system.users isn't supported. if old_coll != "system.users" && old_coll.starts_with("system.") { return Err(elmo::Error::Misc(String::from("renameCollection with a system collection not allowed."))) } if new_coll != "system.users" && new_coll.starts_with("system.") { return Err(elmo::Error::Misc(String::from("renameCollection with a system collection not allowed."))) } if drop_target { let _deleted = try!(self.base_drop_collection(new_db, new_coll)); } match try!(self.myconn.get_collection_options(old_db, old_coll)) { None => { let created = try!(self.base_create_collection(new_db, new_coll, bson::Document::new())); Ok(created) }, Some(_) => { let old_tbl = get_table_name_for_collection(old_db, old_coll); let new_tbl = get_table_name_for_collection(new_db, new_coll); let indexes = try!(self.myconn.base_list_indexes()); let indexes = indexes.into_iter().filter(|info| info.db == old_db && info.coll == old_coll ).collect::<Vec<_>>(); let mut stmt = try!(self.myconn.conn.prepare("UPDATE \"collections\" SET dbName=?, collName=? WHERE dbName=? AND collName=?").map_err(elmo::wrap_err)); try!(stmt.bind_text(1, new_db).map_err(elmo::wrap_err)); try!(stmt.bind_text(2, new_coll).map_err(elmo::wrap_err)); try!(stmt.bind_text(3, old_db).map_err(elmo::wrap_err)); try!(stmt.bind_text(4, old_coll).map_err(elmo::wrap_err)); try!(step_done(&mut stmt)); stmt.reset(); try!(self.myconn.conn.exec(&format!("ALTER TABLE \"{}\" RENAME TO \"{}\"", old_tbl, new_tbl)).map_err(elmo::wrap_err)); for info in indexes { let old_ndx_tbl = get_table_name_for_index(old_db, old_coll, &info.name); let new_ndx_tbl = get_table_name_for_index(new_db, new_coll, &info.name); try!(self.myconn.conn.exec(&format!("ALTER TABLE \"{}\" RENAME TO \"{}\"", old_ndx_tbl, new_ndx_tbl)).map_err(elmo::wrap_err)); } Ok(false) }, } } fn base_create_collection(&self, db: &str, coll: &str, options: bson::Document) -> Result<bool> { match try!(self.myconn.get_collection_options(db, coll)) { Some(_) => Ok(false), None => { let v_options = options.to_bson_array(); let mut stmt = try!(self.myconn.conn.prepare("INSERT INTO \"collections\" (dbName,collName,options) VALUES (?,?,?)").map_err(elmo::wrap_err)); try!(stmt.bind_text(1, db).map_err(elmo::wrap_err)); try!(stmt.bind_text(2, coll).map_err(elmo::wrap_err)); try!(stmt.bind_blob(3, &v_options).map_err(elmo::wrap_err)); match try!(stmt.step().map_err(elmo::wrap_err)) { None => { let tbl = get_table_name_for_collection(db, coll); try!(self.myconn.conn.exec(&format!("CREATE TABLE \"{}\" (did INTEGER PRIMARY KEY, bson BLOB NOT NULL)", tbl)).map_err(elmo::wrap_err)); // now create mongo index for _id match options.get("autoIndexId") { Some(&bson::Value::BBoolean(false)) => (), _ => { let info = elmo::IndexInfo { db: String::from(db), coll: String::from(coll), name: String::from("_id_"), spec: bson::Document {pairs: vec![(String::from("_id"), bson::Value::BInt32(1))]}, options: bson::Document {pairs: vec![(String::from("unique"), bson::Value::BBoolean(true))]}, }; let _created = self.create_index(info); }, } Ok(true) }, Some(_) => { Err(elmo::Error::Misc(String::from("insert stmt step() returned a row"))) }, } }, } } fn base_create_indexes(&self, what: Vec<elmo::IndexInfo>) -> Result<Vec<bool>> { let mut v = Vec::new(); for info in what { let b = try!(self.create_index(info)); v.push(b); } Ok(v) } fn base_drop_index(&self, db: &str, coll: &str, name: &str) -> Result<bool> { match try!(self.myconn.get_index_info(db, coll, name)) { None => Ok(false), Some(_) => { let mut stmt = try!(self.myconn.conn.prepare("DELETE FROM \"indexes\" WHERE dbName=? AND collName=? AND ndxName=?").map_err(elmo::wrap_err)); try!(stmt.bind_text(1, db).map_err(elmo::wrap_err)); try!(stmt.bind_text(2, coll).map_err(elmo::wrap_err)); try!(stmt.bind_text(3, name).map_err(elmo::wrap_err)); try!(step_done(&mut stmt)); try!(verify_changes(&stmt, 1)); stmt.reset(); let tbl = get_table_name_for_index(db, coll, name); try!(self.myconn.conn.exec(&format!("DROP TABLE \"{}\"", tbl)).map_err(elmo::wrap_err)); Ok(true) }, } } fn base_drop_database(&self, db: &str) -> Result<bool> { let collections = try!(self.myconn.base_list_collections()); let mut b = false; for t in collections { if t.db == db { let _deleted = try!(self.base_drop_collection(&t.db, &t.coll)); assert!(_deleted); b = true; } } Ok(b) } fn base_drop_collection(&self, db: &str, coll: &str) -> Result<bool> { match try!(self.myconn.get_collection_options(db, coll)) { None => Ok(false), Some(_) => { let indexes = try!(self.myconn.base_list_indexes()); for info in indexes { if info.db == db && info.coll == coll { try!(self.base_drop_index(&info.db, &info.coll, &info.name)); } } let mut stmt = try!(self.myconn.conn.prepare("DELETE FROM \"collections\" WHERE dbName=? AND collName=?").map_err(elmo::wrap_err)); try!(stmt.bind_text(1, db).map_err(elmo::wrap_err)); try!(stmt.bind_text(2, coll).map_err(elmo::wrap_err)); try!(step_done(&mut stmt)); try!(verify_changes(&stmt, 1)); stmt.reset(); let tbl = get_table_name_for_collection(db, coll); try!(self.myconn.conn.exec(&format!("DROP TABLE \"{}\"", tbl)).map_err(elmo::wrap_err)); Ok(true) }, } } } impl elmo::StorageWriter for MyWriter { fn get_collection_writer(&self, db: &str, coll: &str) -> Result<Box<elmo::StorageCollectionWriter + 'static>> { let _created = try!(self.base_create_collection(db, coll, bson::Document::new())); let tbl = get_table_name_for_collection(db, coll); let stmt_insert = try!(self.myconn.conn.prepare(&format!("INSERT INTO \"{}\" (bson) VALUES (?)", tbl)).map_err(elmo::wrap_err)); let stmt_delete = try!(self.myconn.conn.prepare(&format!("DELETE FROM \"{}\" WHERE rowid=?", tbl)).map_err(elmo::wrap_err)); let stmt_update = try!(self.myconn.conn.prepare(&format!("UPDATE \"{}\" SET bson=? WHERE rowid=?", tbl)).map_err(elmo::wrap_err)); let indexes = try!(self.myconn.base_list_indexes()); let indexes = indexes.into_iter().filter( |ndx| ndx.db == db && ndx.coll == coll ).collect::<Vec<_>>(); let mut find_rowid = None; for info in &indexes { if info.name == "_id_" { let tbl = get_table_name_for_index(db, coll, &info.name); find_rowid = Some(try!(self.myconn.conn.prepare(&format!("SELECT doc_rowid FROM \"{}\" WHERE k=?", tbl)).map_err(elmo::wrap_err))); break; } } let mut index_stmts = Vec::new(); for info in indexes { let tbl_ndx = get_table_name_for_index(db, coll, &info.name); let stmt_insert = try!(self.prepare_index_insert(&tbl_ndx)); let stmt_delete = try!(self.myconn.conn.prepare(&format!("DELETE FROM \"{}\" WHERE doc_rowid=?", tbl_ndx)).map_err(elmo::wrap_err)); let t = IndexPrep { info: info, stmt_insert: stmt_insert, stmt_delete: stmt_delete }; index_stmts.push(t); } let c = MyCollectionWriter { insert: stmt_insert, delete: stmt_delete, update: stmt_update, stmt_find_rowid: find_rowid, indexes: index_stmts, myconn: self.myconn.clone(), }; Ok(box c) } fn commit(mut self: Box<Self>) -> Result<()> { try!(self.myconn.conn.exec("COMMIT TRANSACTION").map_err(elmo::wrap_err)); self.in_tx = false; Ok(()) } fn rollback(mut self: Box<Self>) -> Result<()> { try!(self.myconn.conn.exec("ROLLBACK TRANSACTION").map_err(elmo::wrap_err)); self.in_tx = false; Ok(()) } // TODO maybe just move all the stuff below from the private section into here? fn create_collection(&self, db: &str, coll: &str, options: bson::Document) -> Result<bool> { self.base_create_collection(db, coll, options) } fn drop_collection(&self, db: &str, coll: &str) -> Result<bool> { self.base_drop_collection(db, coll) } fn create_indexes(&self, what: Vec<elmo::IndexInfo>) -> Result<Vec<bool>> { self.base_create_indexes(what) } fn rename_collection(&self, old_name: &str, new_name: &str, drop_target: bool) -> Result<bool> { self.base_rename_collection(old_name, new_name, drop_target) } fn drop_index(&self, db: &str, coll: &str, name: &str) -> Result<bool> { self.base_drop_index(db, coll, name) } fn drop_database(&self, db: &str) -> Result<bool> { self.base_drop_database(db) } fn clear_collection(&self, db: &str, coll: &str) -> Result<bool> { self.base_clear_collection(db, coll) } } // TODO do we need to declare that StorageWriter must implement Drop ? impl Drop for MyWriter { fn drop(&mut self) { // TODO consider panic here if still in tx. force caller to // explicitly commit or rollback. if self.in_tx { // TODO should rollback be the default here? or commit? let _ignored = self.myconn.conn.exec("ROLLBACK TRANSACTION"); } } } // TODO do we need to declare that StorageReader must implement Drop ? impl Drop for MyReader { fn drop(&mut self) { // TODO consider panic here if still in tx. force caller to // explicitly end the tx. // this transaction was [supposed to be] read-only, so it doesn't // matter in principle whether we commit or rollback. in SQL Server, // if temp tables were created, commit is MUCH faster than rollback. // but this is sqlite. anyway... if self.in_tx { let _ignored = self.myconn.conn.exec("COMMIT TRANSACTION"); } } } impl Drop for MyCollectionReader { fn drop(&mut self) { // this transaction was [supposed to be] read-only, so it doesn't // matter in principle whether we commit or rollback. in SQL Server, // if temp tables were created, commit is MUCH faster than rollback. // but this is sqlite. anyway... if self.commit_on_drop { let _ignored = self.myconn.conn.exec("COMMIT TRANSACTION"); } } } impl Iterator for MyCollectionReader { type Item = Result<elmo::Row>; fn next(&mut self) -> Option<Self::Item> { self.seq.next() } } impl elmo::StorageBase for MyReader { fn get_collection_reader(&self, db: &str, coll: &str, plan: Option<elmo::QueryPlan>) -> Result<Box<Iterator<Item=Result<elmo::Row>> + 'static>> { let rdr = try!(self.myconn.get_collection_reader(self.myconn.clone(), false, db, coll, plan)); Ok(box rdr) } fn list_collections(&self) -> Result<Vec<elmo::CollectionInfo>> { self.myconn.base_list_collections() } fn list_indexes(&self) -> Result<Vec<elmo::IndexInfo>> { self.myconn.base_list_indexes() } } impl elmo::StorageReader for MyReader { fn into_collection_reader(mut self: Box<Self>, db: &str, coll: &str, plan: Option<elmo::QueryPlan>) -> Result<Box<Iterator<Item=Result<elmo::Row>> + 'static>> { self.in_tx = false; let rdr = try!(self.myconn.get_collection_reader(self.myconn.clone(), true, db, coll, plan)); Ok(box rdr) } } impl elmo::StorageBase for MyWriter { fn get_collection_reader(&self, db: &str, coll: &str, plan: Option<elmo::QueryPlan>) -> Result<Box<Iterator<Item=Result<elmo::Row>> + 'static>> { let rdr = try!(self.myconn.get_collection_reader(self.myconn.clone(), false, db, coll, plan)); Ok(box rdr) } fn list_collections(&self) -> Result<Vec<elmo::CollectionInfo>> { self.myconn.base_list_collections() } fn list_indexes(&self) -> Result<Vec<elmo::IndexInfo>> { self.myconn.base_list_indexes() } } impl elmo::StorageConnection for MyPublicConn { fn begin_write(&self) -> Result<Box<elmo::StorageWriter + 'static>> { try!(self.myconn.conn.exec("BEGIN IMMEDIATE TRANSACTION").map_err(elmo::wrap_err)); let w = MyWriter { myconn: self.myconn.clone(), in_tx: true, }; Ok(box w) } fn begin_read(&self) -> Result<Box<elmo::StorageReader + 'static>> { try!(self.myconn.conn.exec("BEGIN TRANSACTION").map_err(elmo::wrap_err)); let r = MyReader { myconn: self.myconn.clone(), in_tx: true, }; Ok(box r) } } fn base_connect(name: &str) -> sqlite3::SqliteResult<sqlite3::DatabaseConnection> { let access = sqlite3::access::ByFilename { flags: sqlite3::access::flags::OPEN_READWRITE | sqlite3::access::flags::OPEN_CREATE, filename: name}; let conn = try!(sqlite3::DatabaseConnection::new(access)); try!(conn.exec("PRAGMA journal_mode=WAL")); try!(conn.exec("PRAGMA foreign_keys=ON")); try!(conn.exec("CREATE TABLE IF NOT EXISTS \"collections\" (dbName TEXT NOT NULL, collName TEXT NOT NULL, options BLOB NOT NULL, PRIMARY KEY (dbName,collName))")); try!(conn.exec("CREATE TABLE IF NOT EXISTS \"indexes\" (dbName TEXT NOT NULL, collName TEXT NOT NULL, ndxName TEXT NOT NULL, spec BLOB NOT NULL, options BLOB NOT NULL, PRIMARY KEY (dbName, collName, ndxName), FOREIGN KEY (dbName,collName) REFERENCES \"collections\" ON DELETE CASCADE ON UPDATE CASCADE, UNIQUE (spec,dbName,collName))")); Ok(conn) } pub fn connect(name: &str) -> Result<Box<elmo::StorageConnection>> { let conn = try!(base_connect(name).map_err(elmo::wrap_err)); let c = MyConn { conn: conn, }; let c = MyPublicConn { myconn: std::rc::Rc::new(c) }; Ok(box c) } /* look at the non-allocating alternatives to column_text() and column_blob() */
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. //! Parameterized string expansion use core::prelude::*; use core::{char, int, vec}; use core::iterator::IteratorUtil; #[deriving(Eq)] enum States { Nothing, Percent, SetVar, GetVar, PushParam, CharConstant, CharClose, IntConstant, SeekIfElse(int), SeekIfElsePercent(int), SeekIfEnd(int), SeekIfEndPercent(int) } /// Types of parameters a capability can use pub enum Param { String(~str), Number(int) } /// Container for static and dynamic variable arrays pub struct Variables { /// Static variables A-Z sta: [Param, ..26], /// Dynamic variables a-z dyn: [Param, ..26] } impl Variables { /// Return a new zero-initialized Variables pub fn new() -> Variables { Variables{ sta: [Number(0), ..26], dyn: [Number(0), ..26] } } } /** Expand a parameterized capability # Arguments * `cap` - string to expand * `params` - vector of params for %p1 etc * `vars` - Variables struct for %Pa etc To be compatible with ncurses, `vars` should be the same between calls to `expand` for multiple capabilities for the same terminal. */ pub fn expand(cap: &[u8], params: &[Param], vars: &mut Variables) -> Result<~[u8], ~str> { let mut state = Nothing; // expanded cap will only rarely be larger than the cap itself let mut output = vec::with_capacity(cap.len()); let mut stack: ~[Param] = ~[]; let mut intstate = ~[]; // Copy parameters into a local vector for mutability let mut mparams = [Number(0), ..9]; for mparams.mut_iter().zip(params.iter()).advance |(dst, &src)| { *dst = src; } for cap.iter().transform(|&x| x).advance |c| { let cur = c as char; let mut old_state = state; match state { Nothing => { if cur == '%' { state = Percent; } else { output.push(c); } }, Percent => { match cur { '%' => { output.push(c); state = Nothing }, 'c' => if stack.len() > 0 { match stack.pop() { // if c is 0, use 0200 (128) for ncurses compatibility Number(c) => output.push(if c == 0 { 128 } else { c } as u8), _ => return Err(~"a non-char was used with %c") } } else { return Err(~"stack is empty") }, 's' => if stack.len() > 0 { match stack.pop() { String(s) => output.push_all(s.as_bytes()), _ => return Err(~"a non-str was used with %s") } } else { return Err(~"stack is empty") }, 'd' => if stack.len() > 0 { match stack.pop() { Number(x) => { let s = x.to_str(); output.push_all(s.as_bytes()) } _ => return Err(~"a non-number was used with %d") } } else { return Err(~"stack is empty") }, 'p' => state = PushParam, 'P' => state = SetVar, 'g' => state = GetVar, '\'' => state = CharConstant, '{' => state = IntConstant, 'l' => if stack.len() > 0 { match stack.pop() { String(s) => stack.push(Number(s.len() as int)), _ => return Err(~"a non-str was used with %l") } } else { return Err(~"stack is empty") }, '+' => if stack.len() > 1 { match (stack.pop(), stack.pop()) { (Number(y), Number(x)) => stack.push(Number(x + y)), _ => return Err(~"non-numbers on stack with +") } } else { return Err(~"stack is empty") }, '-' => if stack.len() > 1 { match (stack.pop(), stack.pop()) { (Number(y), Number(x)) => stack.push(Number(x - y)), _ => return Err(~"non-numbers on stack with -") } } else { return Err(~"stack is empty") }, '*' => if stack.len() > 1 { match (stack.pop(), stack.pop()) { (Number(y), Number(x)) => stack.push(Number(x * y)), _ => return Err(~"non-numbers on stack with *") } } else { return Err(~"stack is empty") }, '/' => if stack.len() > 1 { match (stack.pop(), stack.pop()) { (Number(y), Number(x)) => stack.push(Number(x / y)), _ => return Err(~"non-numbers on stack with /") } } else { return Err(~"stack is empty") }, 'm' => if stack.len() > 1 { match (stack.pop(), stack.pop()) { (Number(y), Number(x)) => stack.push(Number(x % y)), _ => return Err(~"non-numbers on stack with %") } } else { return Err(~"stack is empty") }, '&' => if stack.len() > 1 { match (stack.pop(), stack.pop()) { (Number(y), Number(x)) => stack.push(Number(x & y)), _ => return Err(~"non-numbers on stack with &") } } else { return Err(~"stack is empty") }, '|' => if stack.len() > 1 { match (stack.pop(), stack.pop()) { (Number(y), Number(x)) => stack.push(Number(x | y)), _ => return Err(~"non-numbers on stack with |") } } else { return Err(~"stack is empty") }, '^' => if stack.len() > 1 { match (stack.pop(), stack.pop()) { (Number(y), Number(x)) => stack.push(Number(x ^ y)), _ => return Err(~"non-numbers on stack with ^") } } else { return Err(~"stack is empty") }, '=' => if stack.len() > 1 { match (stack.pop(), stack.pop()) { (Number(y), Number(x)) => stack.push(Number(if x == y { 1 } else { 0 })), _ => return Err(~"non-numbers on stack with =") } } else { return Err(~"stack is empty") }, '>' => if stack.len() > 1 { match (stack.pop(), stack.pop()) { (Number(y), Number(x)) => stack.push(Number(if x > y { 1 } else { 0 })), _ => return Err(~"non-numbers on stack with >") } } else { return Err(~"stack is empty") }, '<' => if stack.len() > 1 { match (stack.pop(), stack.pop()) { (Number(y), Number(x)) => stack.push(Number(if x < y { 1 } else { 0 })), _ => return Err(~"non-numbers on stack with <") } } else { return Err(~"stack is empty") }, 'A' => if stack.len() > 1 { match (stack.pop(), stack.pop()) { (Number(0), Number(_)) => stack.push(Number(0)), (Number(_), Number(0)) => stack.push(Number(0)), (Number(_), Number(_)) => stack.push(Number(1)), _ => return Err(~"non-numbers on stack with logical and") } } else { return Err(~"stack is empty") }, 'O' => if stack.len() > 1 { match (stack.pop(), stack.pop()) { (Number(0), Number(0)) => stack.push(Number(0)), (Number(_), Number(_)) => stack.push(Number(1)), _ => return Err(~"non-numbers on stack with logical or") } } else { return Err(~"stack is empty") }, '!' => if stack.len() > 0 { match stack.pop() { Number(0) => stack.push(Number(1)), Number(_) => stack.push(Number(0)), _ => return Err(~"non-number on stack with logical not") } } else { return Err(~"stack is empty") }, '~' => if stack.len() > 0 { match stack.pop() { Number(x) => stack.push(Number(!x)), _ => return Err(~"non-number on stack with %~") } } else { return Err(~"stack is empty") }, 'i' => match (copy mparams[0], copy mparams[1]) { (Number(x), Number(y)) => { mparams[0] = Number(x+1); mparams[1] = Number(y+1); }, (_, _) => return Err(~"first two params not numbers with %i") }, // conditionals '?' => (), 't' => if stack.len() > 0 { match stack.pop() { Number(0) => state = SeekIfElse(0), Number(_) => (), _ => return Err(~"non-number on stack with conditional") } } else { return Err(~"stack is empty") }, 'e' => state = SeekIfEnd(0), ';' => (), _ => return Err(fmt!("unrecognized format option %c", cur)) } }, PushParam => { // params are 1-indexed stack.push(copy mparams[match char::to_digit(cur, 10) { Some(d) => d - 1, None => return Err(~"bad param number") }]); }, SetVar => { if cur >= 'A' && cur <= 'Z' { if stack.len() > 0 { let idx = (cur as u8) - ('A' as u8); vars.sta[idx] = stack.pop(); } else { return Err(~"stack is empty") } } else if cur >= 'a' && cur <= 'z' { if stack.len() > 0 { let idx = (cur as u8) - ('a' as u8); vars.dyn[idx] = stack.pop(); } else { return Err(~"stack is empty") } } else { return Err(~"bad variable name in %P"); } }, GetVar => { if cur >= 'A' && cur <= 'Z' { let idx = (cur as u8) - ('A' as u8); stack.push(copy vars.sta[idx]); } else if cur >= 'a' && cur <= 'z' { let idx = (cur as u8) - ('a' as u8); stack.push(copy vars.dyn[idx]); } else { return Err(~"bad variable name in %g"); } }, CharConstant => { stack.push(Number(c as int)); state = CharClose; }, CharClose => { if cur != '\'' { return Err(~"malformed character constant"); } }, IntConstant => { if cur == '}' { stack.push(match int::parse_bytes(intstate, 10) { Some(n) => Number(n), None => return Err(~"bad int constant") }); intstate.clear(); state = Nothing; } else { intstate.push(cur as u8); old_state = Nothing; } } SeekIfElse(level) => { if cur == '%' { state = SeekIfElsePercent(level); } old_state = Nothing; } SeekIfElsePercent(level) => { if cur == ';' { if level == 0 { state = Nothing; } else { state = SeekIfElse(level-1); } } else if cur == 'e' && level == 0 { state = Nothing; } else if cur == '?' { state = SeekIfElse(level+1); } else { state = SeekIfElse(level); } } SeekIfEnd(level) => { if cur == '%' { state = SeekIfEndPercent(level); } old_state = Nothing; } SeekIfEndPercent(level) => { if cur == ';' { if level == 0 { state = Nothing; } else { state = SeekIfEnd(level-1); } } else if cur == '?' { state = SeekIfEnd(level+1); } else { state = SeekIfEnd(level); } } } if state == old_state { state = Nothing; } } Ok(output) } #[cfg(test)] mod test { use super::*; use core::result::Ok; #[test] fn test_basic_setabf() { let s = bytes!("\\E[48;5;%p1%dm"); assert_eq!(expand(s, [Number(1)], &mut Variables::new()).unwrap(), bytes!("\\E[48;5;1m").to_owned()); } #[test] fn test_multiple_int_constants() { assert_eq!(expand(bytes!("%{1}%{2}%d%d"), [], &mut Variables::new()).unwrap(), bytes!("21").to_owned()); } #[test] fn test_op_i() { let mut vars = Variables::new(); assert_eq!(expand(bytes!("%p1%d%p2%d%p3%d%i%p1%d%p2%d%p3%d"), [Number(1),Number(2),Number(3)], &mut vars), Ok(bytes!("123233").to_owned())); assert_eq!(expand(bytes!("%p1%d%p2%d%i%p1%d%p2%d"), [], &mut vars), Ok(bytes!("0011").to_owned())); } #[test] fn test_param_stack_failure_conditions() { let mut varstruct = Variables::new(); let vars = &mut varstruct; let caps = ["%d", "%c", "%s", "%Pa", "%l", "%!", "%~"]; for caps.iter().advance |cap| { let res = expand(cap.as_bytes(), [], vars); assert!(res.is_err(), "Op %s succeeded incorrectly with 0 stack entries", *cap); let p = if *cap == "%s" || *cap == "%l" { String(~"foo") } else { Number(97) }; let res = expand((bytes!("%p1")).to_owned() + cap.as_bytes(), [p], vars); assert!(res.is_ok(), "Op %s failed with 1 stack entry: %s", *cap, res.unwrap_err()); } let caps = ["%+", "%-", "%*", "%/", "%m", "%&", "%|", "%A", "%O"]; for caps.iter().advance |cap| { let res = expand(cap.as_bytes(), [], vars); assert!(res.is_err(), "Binop %s succeeded incorrectly with 0 stack entries", *cap); let res = expand((bytes!("%{1}")).to_owned() + cap.as_bytes(), [], vars); assert!(res.is_err(), "Binop %s succeeded incorrectly with 1 stack entry", *cap); let res = expand((bytes!("%{1}%{2}")).to_owned() + cap.as_bytes(), [], vars); assert!(res.is_ok(), "Binop %s failed with 2 stack entries: %s", *cap, res.unwrap_err()); } } #[test] fn test_push_bad_param() { assert!(expand(bytes!("%pa"), [], &mut Variables::new()).is_err()); } #[test] fn test_comparison_ops() { let v = [('<', [1u8, 0u8, 0u8]), ('=', [0u8, 1u8, 0u8]), ('>', [0u8, 0u8, 1u8])]; for v.iter().advance |&(op, bs)| { let s = fmt!("%%{1}%%{2}%%%c%%d", op); let res = expand(s.as_bytes(), [], &mut Variables::new()); assert!(res.is_ok(), res.unwrap_err()); assert_eq!(res.unwrap(), ~['0' as u8 + bs[0]]); let s = fmt!("%%{1}%%{1}%%%c%%d", op); let res = expand(s.as_bytes(), [], &mut Variables::new()); assert!(res.is_ok(), res.unwrap_err()); assert_eq!(res.unwrap(), ~['0' as u8 + bs[1]]); let s = fmt!("%%{2}%%{1}%%%c%%d", op); let res = expand(s.as_bytes(), [], &mut Variables::new()); assert!(res.is_ok(), res.unwrap_err()); assert_eq!(res.unwrap(), ~['0' as u8 + bs[2]]); } } #[test] fn test_conditionals() { let mut vars = Variables::new(); let s = bytes!("\\E[%?%p1%{8}%<%t3%p1%d%e%p1%{16}%<%t9%p1%{8}%-%d%e38;5;%p1%d%;m"); let res = expand(s, [Number(1)], &mut vars); assert!(res.is_ok(), res.unwrap_err()); assert_eq!(res.unwrap(), bytes!("\\E[31m").to_owned()); let res = expand(s, [Number(8)], &mut vars); assert!(res.is_ok(), res.unwrap_err()); assert_eq!(res.unwrap(), bytes!("\\E[90m").to_owned()); let res = expand(s, [Number(42)], &mut vars); assert!(res.is_ok(), res.unwrap_err()); assert_eq!(res.unwrap(), bytes!("\\E[38;5;42m").to_owned()); } } Support printf formats in terminfo strings terminfo parameterized strings supports a limited subset of printf-style formatting operations, such as %#5.3d. // Copyright 2012 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. //! Parameterized string expansion use core::prelude::*; use core::{char, vec, util}; use core::num::strconv::{SignNone,SignNeg,SignAll,DigAll,to_str_bytes_common}; use core::iterator::IteratorUtil; #[deriving(Eq)] enum States { Nothing, Percent, SetVar, GetVar, PushParam, CharConstant, CharClose, IntConstant(int), FormatPattern(Flags, FormatState), SeekIfElse(int), SeekIfElsePercent(int), SeekIfEnd(int), SeekIfEndPercent(int) } #[deriving(Eq)] enum FormatState { FormatStateFlags, FormatStateWidth, FormatStatePrecision } /// Types of parameters a capability can use pub enum Param { String(~str), Number(int) } /// Container for static and dynamic variable arrays pub struct Variables { /// Static variables A-Z sta: [Param, ..26], /// Dynamic variables a-z dyn: [Param, ..26] } impl Variables { /// Return a new zero-initialized Variables pub fn new() -> Variables { Variables{ sta: [Number(0), ..26], dyn: [Number(0), ..26] } } } /** Expand a parameterized capability # Arguments * `cap` - string to expand * `params` - vector of params for %p1 etc * `vars` - Variables struct for %Pa etc To be compatible with ncurses, `vars` should be the same between calls to `expand` for multiple capabilities for the same terminal. */ pub fn expand(cap: &[u8], params: &[Param], vars: &mut Variables) -> Result<~[u8], ~str> { let mut state = Nothing; // expanded cap will only rarely be larger than the cap itself let mut output = vec::with_capacity(cap.len()); let mut stack: ~[Param] = ~[]; // Copy parameters into a local vector for mutability let mut mparams = [Number(0), ..9]; for mparams.mut_iter().zip(params.iter()).advance |(dst, &src)| { *dst = src; } for cap.iter().transform(|&x| x).advance |c| { let cur = c as char; let mut old_state = state; match state { Nothing => { if cur == '%' { state = Percent; } else { output.push(c); } }, Percent => { match cur { '%' => { output.push(c); state = Nothing }, 'c' => if stack.len() > 0 { match stack.pop() { // if c is 0, use 0200 (128) for ncurses compatibility Number(c) => output.push(if c == 0 { 128 } else { c } as u8), _ => return Err(~"a non-char was used with %c") } } else { return Err(~"stack is empty") }, 'p' => state = PushParam, 'P' => state = SetVar, 'g' => state = GetVar, '\'' => state = CharConstant, '{' => state = IntConstant(0), 'l' => if stack.len() > 0 { match stack.pop() { String(s) => stack.push(Number(s.len() as int)), _ => return Err(~"a non-str was used with %l") } } else { return Err(~"stack is empty") }, '+' => if stack.len() > 1 { match (stack.pop(), stack.pop()) { (Number(y), Number(x)) => stack.push(Number(x + y)), _ => return Err(~"non-numbers on stack with +") } } else { return Err(~"stack is empty") }, '-' => if stack.len() > 1 { match (stack.pop(), stack.pop()) { (Number(y), Number(x)) => stack.push(Number(x - y)), _ => return Err(~"non-numbers on stack with -") } } else { return Err(~"stack is empty") }, '*' => if stack.len() > 1 { match (stack.pop(), stack.pop()) { (Number(y), Number(x)) => stack.push(Number(x * y)), _ => return Err(~"non-numbers on stack with *") } } else { return Err(~"stack is empty") }, '/' => if stack.len() > 1 { match (stack.pop(), stack.pop()) { (Number(y), Number(x)) => stack.push(Number(x / y)), _ => return Err(~"non-numbers on stack with /") } } else { return Err(~"stack is empty") }, 'm' => if stack.len() > 1 { match (stack.pop(), stack.pop()) { (Number(y), Number(x)) => stack.push(Number(x % y)), _ => return Err(~"non-numbers on stack with %") } } else { return Err(~"stack is empty") }, '&' => if stack.len() > 1 { match (stack.pop(), stack.pop()) { (Number(y), Number(x)) => stack.push(Number(x & y)), _ => return Err(~"non-numbers on stack with &") } } else { return Err(~"stack is empty") }, '|' => if stack.len() > 1 { match (stack.pop(), stack.pop()) { (Number(y), Number(x)) => stack.push(Number(x | y)), _ => return Err(~"non-numbers on stack with |") } } else { return Err(~"stack is empty") }, '^' => if stack.len() > 1 { match (stack.pop(), stack.pop()) { (Number(y), Number(x)) => stack.push(Number(x ^ y)), _ => return Err(~"non-numbers on stack with ^") } } else { return Err(~"stack is empty") }, '=' => if stack.len() > 1 { match (stack.pop(), stack.pop()) { (Number(y), Number(x)) => stack.push(Number(if x == y { 1 } else { 0 })), _ => return Err(~"non-numbers on stack with =") } } else { return Err(~"stack is empty") }, '>' => if stack.len() > 1 { match (stack.pop(), stack.pop()) { (Number(y), Number(x)) => stack.push(Number(if x > y { 1 } else { 0 })), _ => return Err(~"non-numbers on stack with >") } } else { return Err(~"stack is empty") }, '<' => if stack.len() > 1 { match (stack.pop(), stack.pop()) { (Number(y), Number(x)) => stack.push(Number(if x < y { 1 } else { 0 })), _ => return Err(~"non-numbers on stack with <") } } else { return Err(~"stack is empty") }, 'A' => if stack.len() > 1 { match (stack.pop(), stack.pop()) { (Number(0), Number(_)) => stack.push(Number(0)), (Number(_), Number(0)) => stack.push(Number(0)), (Number(_), Number(_)) => stack.push(Number(1)), _ => return Err(~"non-numbers on stack with logical and") } } else { return Err(~"stack is empty") }, 'O' => if stack.len() > 1 { match (stack.pop(), stack.pop()) { (Number(0), Number(0)) => stack.push(Number(0)), (Number(_), Number(_)) => stack.push(Number(1)), _ => return Err(~"non-numbers on stack with logical or") } } else { return Err(~"stack is empty") }, '!' => if stack.len() > 0 { match stack.pop() { Number(0) => stack.push(Number(1)), Number(_) => stack.push(Number(0)), _ => return Err(~"non-number on stack with logical not") } } else { return Err(~"stack is empty") }, '~' => if stack.len() > 0 { match stack.pop() { Number(x) => stack.push(Number(!x)), _ => return Err(~"non-number on stack with %~") } } else { return Err(~"stack is empty") }, 'i' => match (copy mparams[0], copy mparams[1]) { (Number(x), Number(y)) => { mparams[0] = Number(x+1); mparams[1] = Number(y+1); }, (_, _) => return Err(~"first two params not numbers with %i") }, // printf-style support for %doxXs 'd'|'o'|'x'|'X'|'s' => if stack.len() > 0 { let flags = Flags::new(); let res = format(stack.pop(), FormatOp::from_char(cur), flags); if res.is_err() { return res } output.push_all(res.unwrap()) } else { return Err(~"stack is empty") }, ':'|'#'|' '|'.'|'0'..'9' => { let mut flags = Flags::new(); let mut fstate = FormatStateFlags; match cur { ':' => (), '#' => flags.alternate = true, ' ' => flags.space = true, '.' => fstate = FormatStatePrecision, '0'..'9' => { flags.width = (cur - '0') as uint; fstate = FormatStateWidth; } _ => util::unreachable() } state = FormatPattern(flags, fstate); } // conditionals '?' => (), 't' => if stack.len() > 0 { match stack.pop() { Number(0) => state = SeekIfElse(0), Number(_) => (), _ => return Err(~"non-number on stack with conditional") } } else { return Err(~"stack is empty") }, 'e' => state = SeekIfEnd(0), ';' => (), _ => return Err(fmt!("unrecognized format option %c", cur)) } }, PushParam => { // params are 1-indexed stack.push(copy mparams[match char::to_digit(cur, 10) { Some(d) => d - 1, None => return Err(~"bad param number") }]); }, SetVar => { if cur >= 'A' && cur <= 'Z' { if stack.len() > 0 { let idx = (cur as u8) - ('A' as u8); vars.sta[idx] = stack.pop(); } else { return Err(~"stack is empty") } } else if cur >= 'a' && cur <= 'z' { if stack.len() > 0 { let idx = (cur as u8) - ('a' as u8); vars.dyn[idx] = stack.pop(); } else { return Err(~"stack is empty") } } else { return Err(~"bad variable name in %P"); } }, GetVar => { if cur >= 'A' && cur <= 'Z' { let idx = (cur as u8) - ('A' as u8); stack.push(copy vars.sta[idx]); } else if cur >= 'a' && cur <= 'z' { let idx = (cur as u8) - ('a' as u8); stack.push(copy vars.dyn[idx]); } else { return Err(~"bad variable name in %g"); } }, CharConstant => { stack.push(Number(c as int)); state = CharClose; }, CharClose => { if cur != '\'' { return Err(~"malformed character constant"); } }, IntConstant(i) => { match cur { '}' => { stack.push(Number(i)); state = Nothing; } '0'..'9' => { state = IntConstant(i*10 + ((cur - '0') as int)); old_state = Nothing; } _ => return Err(~"bad int constant") } } FormatPattern(ref mut flags, ref mut fstate) => { old_state = Nothing; match (*fstate, cur) { (_,'d')|(_,'o')|(_,'x')|(_,'X')|(_,'s') => if stack.len() > 0 { let res = format(stack.pop(), FormatOp::from_char(cur), *flags); if res.is_err() { return res } output.push_all(res.unwrap()); old_state = state; // will cause state to go to Nothing } else { return Err(~"stack is empty") }, (FormatStateFlags,'#') => { flags.alternate = true; } (FormatStateFlags,'-') => { flags.left = true; } (FormatStateFlags,'+') => { flags.sign = true; } (FormatStateFlags,' ') => { flags.space = true; } (FormatStateFlags,'0'..'9') => { flags.width = (cur - '0') as uint; *fstate = FormatStateWidth; } (FormatStateFlags,'.') => { *fstate = FormatStatePrecision; } (FormatStateWidth,'0'..'9') => { let old = flags.width; flags.width = flags.width * 10 + ((cur - '0') as uint); if flags.width < old { return Err(~"format width overflow") } } (FormatStateWidth,'.') => { *fstate = FormatStatePrecision; } (FormatStatePrecision,'0'..'9') => { let old = flags.precision; flags.precision = flags.precision * 10 + ((cur - '0') as uint); if flags.precision < old { return Err(~"format precision overflow") } } _ => return Err(~"invalid format specifier") } } SeekIfElse(level) => { if cur == '%' { state = SeekIfElsePercent(level); } old_state = Nothing; } SeekIfElsePercent(level) => { if cur == ';' { if level == 0 { state = Nothing; } else { state = SeekIfElse(level-1); } } else if cur == 'e' && level == 0 { state = Nothing; } else if cur == '?' { state = SeekIfElse(level+1); } else { state = SeekIfElse(level); } } SeekIfEnd(level) => { if cur == '%' { state = SeekIfEndPercent(level); } old_state = Nothing; } SeekIfEndPercent(level) => { if cur == ';' { if level == 0 { state = Nothing; } else { state = SeekIfEnd(level-1); } } else if cur == '?' { state = SeekIfEnd(level+1); } else { state = SeekIfEnd(level); } } } if state == old_state { state = Nothing; } } Ok(output) } #[deriving(Eq)] priv struct Flags { width: uint, precision: uint, alternate: bool, left: bool, sign: bool, space: bool } impl Flags { priv fn new() -> Flags { Flags{ width: 0, precision: 0, alternate: false, left: false, sign: false, space: false } } } priv enum FormatOp { FormatDigit, FormatOctal, FormatHex, FormatHEX, FormatString } impl FormatOp { priv fn from_char(c: char) -> FormatOp { match c { 'd' => FormatDigit, 'o' => FormatOctal, 'x' => FormatHex, 'X' => FormatHEX, 's' => FormatString, _ => fail!("bad FormatOp char") } } priv fn to_char(self) -> char { match self { FormatDigit => 'd', FormatOctal => 'o', FormatHex => 'x', FormatHEX => 'X', FormatString => 's' } } } priv fn format(val: Param, op: FormatOp, flags: Flags) -> Result<~[u8],~str> { let mut s = match val { Number(d) => { match op { FormatString => { return Err(~"non-number on stack with %s") } _ => { let radix = match op { FormatDigit => 10, FormatOctal => 8, FormatHex|FormatHEX => 16, FormatString => util::unreachable() }; let mut (s,_) = match op { FormatDigit => { let sign = if flags.sign { SignAll } else { SignNeg }; to_str_bytes_common(&d, radix, false, sign, DigAll) } _ => to_str_bytes_common(&(d as uint), radix, false, SignNone, DigAll) }; if flags.precision > s.len() { let mut s_ = vec::with_capacity(flags.precision); let n = flags.precision - s.len(); s_.grow(n, &('0' as u8)); s_.push_all_move(s); s = s_; } assert!(!s.is_empty(), "string conversion produced empty result"); match op { FormatDigit => { if flags.space && !(s[0] == '-' as u8 || s[0] == '+' as u8) { s.unshift(' ' as u8); } } FormatOctal => { if flags.alternate && s[0] != '0' as u8 { s.unshift('0' as u8); } } FormatHex => { if flags.alternate { let s_ = util::replace(&mut s, ~['0' as u8, 'x' as u8]); s.push_all_move(s_); } } FormatHEX => { s = s.into_ascii().to_upper().into_bytes(); if flags.alternate { let s_ = util::replace(&mut s, ~['0' as u8, 'X' as u8]); s.push_all_move(s_); } } FormatString => util::unreachable() } s } } } String(s) => { match op { FormatString => { let mut s = s.as_bytes_with_null_consume(); s.pop(); // remove the null if flags.precision > 0 && flags.precision < s.len() { s.truncate(flags.precision); } s } _ => { return Err(fmt!("non-string on stack with %%%c", op.to_char())) } } } }; if flags.width > s.len() { let n = flags.width - s.len(); if flags.left { s.grow(n, &(' ' as u8)); } else { let mut s_ = vec::with_capacity(flags.width); s_.grow(n, &(' ' as u8)); s_.push_all_move(s); s = s_; } } Ok(s) } #[cfg(test)] mod test { use super::*; use core::result::Ok; #[test] fn test_basic_setabf() { let s = bytes!("\\E[48;5;%p1%dm"); assert_eq!(expand(s, [Number(1)], &mut Variables::new()).unwrap(), bytes!("\\E[48;5;1m").to_owned()); } #[test] fn test_multiple_int_constants() { assert_eq!(expand(bytes!("%{1}%{2}%d%d"), [], &mut Variables::new()).unwrap(), bytes!("21").to_owned()); } #[test] fn test_op_i() { let mut vars = Variables::new(); assert_eq!(expand(bytes!("%p1%d%p2%d%p3%d%i%p1%d%p2%d%p3%d"), [Number(1),Number(2),Number(3)], &mut vars), Ok(bytes!("123233").to_owned())); assert_eq!(expand(bytes!("%p1%d%p2%d%i%p1%d%p2%d"), [], &mut vars), Ok(bytes!("0011").to_owned())); } #[test] fn test_param_stack_failure_conditions() { let mut varstruct = Variables::new(); let vars = &mut varstruct; let caps = ["%d", "%c", "%s", "%Pa", "%l", "%!", "%~"]; for caps.iter().advance |cap| { let res = expand(cap.as_bytes(), [], vars); assert!(res.is_err(), "Op %s succeeded incorrectly with 0 stack entries", *cap); let p = if *cap == "%s" || *cap == "%l" { String(~"foo") } else { Number(97) }; let res = expand((bytes!("%p1")).to_owned() + cap.as_bytes(), [p], vars); assert!(res.is_ok(), "Op %s failed with 1 stack entry: %s", *cap, res.unwrap_err()); } let caps = ["%+", "%-", "%*", "%/", "%m", "%&", "%|", "%A", "%O"]; for caps.iter().advance |cap| { let res = expand(cap.as_bytes(), [], vars); assert!(res.is_err(), "Binop %s succeeded incorrectly with 0 stack entries", *cap); let res = expand((bytes!("%{1}")).to_owned() + cap.as_bytes(), [], vars); assert!(res.is_err(), "Binop %s succeeded incorrectly with 1 stack entry", *cap); let res = expand((bytes!("%{1}%{2}")).to_owned() + cap.as_bytes(), [], vars); assert!(res.is_ok(), "Binop %s failed with 2 stack entries: %s", *cap, res.unwrap_err()); } } #[test] fn test_push_bad_param() { assert!(expand(bytes!("%pa"), [], &mut Variables::new()).is_err()); } #[test] fn test_comparison_ops() { let v = [('<', [1u8, 0u8, 0u8]), ('=', [0u8, 1u8, 0u8]), ('>', [0u8, 0u8, 1u8])]; for v.iter().advance |&(op, bs)| { let s = fmt!("%%{1}%%{2}%%%c%%d", op); let res = expand(s.as_bytes(), [], &mut Variables::new()); assert!(res.is_ok(), res.unwrap_err()); assert_eq!(res.unwrap(), ~['0' as u8 + bs[0]]); let s = fmt!("%%{1}%%{1}%%%c%%d", op); let res = expand(s.as_bytes(), [], &mut Variables::new()); assert!(res.is_ok(), res.unwrap_err()); assert_eq!(res.unwrap(), ~['0' as u8 + bs[1]]); let s = fmt!("%%{2}%%{1}%%%c%%d", op); let res = expand(s.as_bytes(), [], &mut Variables::new()); assert!(res.is_ok(), res.unwrap_err()); assert_eq!(res.unwrap(), ~['0' as u8 + bs[2]]); } } #[test] fn test_conditionals() { let mut vars = Variables::new(); let s = bytes!("\\E[%?%p1%{8}%<%t3%p1%d%e%p1%{16}%<%t9%p1%{8}%-%d%e38;5;%p1%d%;m"); let res = expand(s, [Number(1)], &mut vars); assert!(res.is_ok(), res.unwrap_err()); assert_eq!(res.unwrap(), bytes!("\\E[31m").to_owned()); let res = expand(s, [Number(8)], &mut vars); assert!(res.is_ok(), res.unwrap_err()); assert_eq!(res.unwrap(), bytes!("\\E[90m").to_owned()); let res = expand(s, [Number(42)], &mut vars); assert!(res.is_ok(), res.unwrap_err()); assert_eq!(res.unwrap(), bytes!("\\E[38;5;42m").to_owned()); } #[test] fn test_format() { let mut varstruct = Variables::new(); let vars = &mut varstruct; assert_eq!(expand(bytes!("%p1%s%p2%2s%p3%2s%p4%.2s"), [String(~"foo"), String(~"foo"), String(~"f"), String(~"foo")], vars), Ok(bytes!("foofoo ffo").to_owned())); assert_eq!(expand(bytes!("%p1%:-4.2s"), [String(~"foo")], vars), Ok(bytes!("fo ").to_owned())); assert_eq!(expand(bytes!("%p1%d%p1%.3d%p1%5d%p1%:+d"), [Number(1)], vars), Ok(bytes!("1001 1+1").to_owned())); assert_eq!(expand(bytes!("%p1%o%p1%#o%p2%6.4x%p2%#6.4X"), [Number(15), Number(27)], vars), Ok(bytes!("17017 001b0X001B").to_owned())); } }
// ignore-tidy-filelength //! Candidate selection. See the [rustc guide] for more information on how this works. //! //! [rustc guide]: https://rust-lang.github.io/rustc-guide/traits/resolution.html#selection use self::EvaluationResult::*; use self::SelectionCandidate::*; use super::coherence::{self, Conflict}; use super::project; use super::project::{normalize_with_depth, Normalized, ProjectionCacheKey}; use super::util; use super::DerivedObligationCause; use super::Selection; use super::SelectionResult; use super::TraitNotObjectSafe; use super::{BuiltinDerivedObligation, ImplDerivedObligation, ObligationCauseCode}; use super::{IntercrateMode, TraitQueryMode}; use super::{ObjectCastObligation, Obligation}; use super::{ObligationCause, PredicateObligation, TraitObligation}; use super::{OutputTypeParameterMismatch, Overflow, SelectionError, Unimplemented}; use super::{ VtableAutoImpl, VtableBuiltin, VtableClosure, VtableFnPointer, VtableGenerator, VtableImpl, VtableObject, VtableParam, VtableTraitAlias, }; use super::{ VtableAutoImplData, VtableBuiltinData, VtableClosureData, VtableFnPointerData, VtableGeneratorData, VtableImplData, VtableObjectData, VtableTraitAliasData, }; use crate::dep_graph::{DepKind, DepNodeIndex}; use crate::hir::def_id::DefId; use crate::infer::{CombinedSnapshot, InferCtxt, InferOk, PlaceholderMap, TypeFreshener}; use crate::middle::lang_items; use crate::mir::interpret::GlobalId; use crate::ty::fast_reject; use crate::ty::relate::TypeRelation; use crate::ty::subst::{Subst, SubstsRef}; use crate::ty::{self, ToPolyTraitRef, ToPredicate, Ty, TyCtxt, TypeFoldable}; use crate::hir; use rustc_data_structures::bit_set::GrowableBitSet; use rustc_data_structures::sync::Lock; use rustc_target::spec::abi::Abi; use std::cell::Cell; use std::cmp; use std::fmt::{self, Display}; use std::iter; use std::rc::Rc; use crate::util::nodemap::{FxHashMap, FxHashSet}; pub struct SelectionContext<'cx, 'gcx: 'cx + 'tcx, 'tcx: 'cx> { infcx: &'cx InferCtxt<'cx, 'gcx, 'tcx>, /// Freshener used specifically for entries on the obligation /// stack. This ensures that all entries on the stack at one time /// will have the same set of placeholder entries, which is /// important for checking for trait bounds that recursively /// require themselves. freshener: TypeFreshener<'cx, 'gcx, 'tcx>, /// If `true`, indicates that the evaluation should be conservative /// and consider the possibility of types outside this crate. /// This comes up primarily when resolving ambiguity. Imagine /// there is some trait reference `$0: Bar` where `$0` is an /// inference variable. If `intercrate` is true, then we can never /// say for sure that this reference is not implemented, even if /// there are *no impls at all for `Bar`*, because `$0` could be /// bound to some type that in a downstream crate that implements /// `Bar`. This is the suitable mode for coherence. Elsewhere, /// though, we set this to false, because we are only interested /// in types that the user could actually have written --- in /// other words, we consider `$0: Bar` to be unimplemented if /// there is no type that the user could *actually name* that /// would satisfy it. This avoids crippling inference, basically. intercrate: Option<IntercrateMode>, intercrate_ambiguity_causes: Option<Vec<IntercrateAmbiguityCause>>, /// Controls whether or not to filter out negative impls when selecting. /// This is used in librustdoc to distinguish between the lack of an impl /// and a negative impl allow_negative_impls: bool, /// The mode that trait queries run in, which informs our error handling /// policy. In essence, canonicalized queries need their errors propagated /// rather than immediately reported because we do not have accurate spans. query_mode: TraitQueryMode, } #[derive(Clone, Debug)] pub enum IntercrateAmbiguityCause { DownstreamCrate { trait_desc: String, self_desc: Option<String>, }, UpstreamCrateUpdate { trait_desc: String, self_desc: Option<String>, }, } impl IntercrateAmbiguityCause { /// Emits notes when the overlap is caused by complex intercrate ambiguities. /// See #23980 for details. pub fn add_intercrate_ambiguity_hint<'a, 'tcx>( &self, err: &mut errors::DiagnosticBuilder<'_>, ) { err.note(&self.intercrate_ambiguity_hint()); } pub fn intercrate_ambiguity_hint(&self) -> String { match self { &IntercrateAmbiguityCause::DownstreamCrate { ref trait_desc, ref self_desc, } => { let self_desc = if let &Some(ref ty) = self_desc { format!(" for type `{}`", ty) } else { String::new() }; format!( "downstream crates may implement trait `{}`{}", trait_desc, self_desc ) } &IntercrateAmbiguityCause::UpstreamCrateUpdate { ref trait_desc, ref self_desc, } => { let self_desc = if let &Some(ref ty) = self_desc { format!(" for type `{}`", ty) } else { String::new() }; format!( "upstream crates may add new impl of trait `{}`{} \ in future versions", trait_desc, self_desc ) } } } } // A stack that walks back up the stack frame. struct TraitObligationStack<'prev, 'tcx: 'prev> { obligation: &'prev TraitObligation<'tcx>, /// Trait ref from `obligation` but "freshened" with the /// selection-context's freshener. Used to check for recursion. fresh_trait_ref: ty::PolyTraitRef<'tcx>, /// Starts out equal to `depth` -- if, during evaluation, we /// encounter a cycle, then we will set this flag to the minimum /// depth of that cycle for all participants in the cycle. These /// participants will then forego caching their results. This is /// not the most efficient solution, but it addresses #60010. The /// problem we are trying to prevent: /// /// - If you have `A: AutoTrait` requires `B: AutoTrait` and `C: NonAutoTrait` /// - `B: AutoTrait` requires `A: AutoTrait` (coinductive cycle, ok) /// - `C: NonAutoTrait` requires `A: AutoTrait` (non-coinductive cycle, not ok) /// /// you don't want to cache that `B: AutoTrait` or `A: AutoTrait` /// is `EvaluatedToOk`; this is because they were only considered /// ok on the premise that if `A: AutoTrait` held, but we indeed /// encountered a problem (later on) with `A: AutoTrait. So we /// currently set a flag on the stack node for `B: AutoTrait` (as /// well as the second instance of `A: AutoTrait`) to supress /// caching. /// /// This is a simple, targeted fix. A more-performant fix requires /// deeper changes, but would permit more caching: we could /// basically defer caching until we have fully evaluated the /// tree, and then cache the entire tree at once. In any case, the /// performance impact here shouldn't be so horrible: every time /// this is hit, we do cache at least one trait, so we only /// evaluate each member of a cycle up to N times, where N is the /// length of the cycle. This means the performance impact is /// bounded and we shouldn't have any terrible worst-cases. reached_depth: Cell<usize>, previous: TraitObligationStackList<'prev, 'tcx>, /// Number of parent frames plus one -- so the topmost frame has depth 1. depth: usize, } #[derive(Clone, Default)] pub struct SelectionCache<'tcx> { hashmap: Lock< FxHashMap<ty::TraitRef<'tcx>, WithDepNode<SelectionResult<'tcx, SelectionCandidate<'tcx>>>>, >, } /// The selection process begins by considering all impls, where /// clauses, and so forth that might resolve an obligation. Sometimes /// we'll be able to say definitively that (e.g.) an impl does not /// apply to the obligation: perhaps it is defined for `usize` but the /// obligation is for `int`. In that case, we drop the impl out of the /// list. But the other cases are considered *candidates*. /// /// For selection to succeed, there must be exactly one matching /// candidate. If the obligation is fully known, this is guaranteed /// by coherence. However, if the obligation contains type parameters /// or variables, there may be multiple such impls. /// /// It is not a real problem if multiple matching impls exist because /// of type variables - it just means the obligation isn't sufficiently /// elaborated. In that case we report an ambiguity, and the caller can /// try again after more type information has been gathered or report a /// "type annotations required" error. /// /// However, with type parameters, this can be a real problem - type /// parameters don't unify with regular types, but they *can* unify /// with variables from blanket impls, and (unless we know its bounds /// will always be satisfied) picking the blanket impl will be wrong /// for at least *some* substitutions. To make this concrete, if we have /// /// trait AsDebug { type Out : fmt::Debug; fn debug(self) -> Self::Out; } /// impl<T: fmt::Debug> AsDebug for T { /// type Out = T; /// fn debug(self) -> fmt::Debug { self } /// } /// fn foo<T: AsDebug>(t: T) { println!("{:?}", <T as AsDebug>::debug(t)); } /// /// we can't just use the impl to resolve the <T as AsDebug> obligation /// - a type from another crate (that doesn't implement fmt::Debug) could /// implement AsDebug. /// /// Because where-clauses match the type exactly, multiple clauses can /// only match if there are unresolved variables, and we can mostly just /// report this ambiguity in that case. This is still a problem - we can't /// *do anything* with ambiguities that involve only regions. This is issue /// #21974. /// /// If a single where-clause matches and there are no inference /// variables left, then it definitely matches and we can just select /// it. /// /// In fact, we even select the where-clause when the obligation contains /// inference variables. The can lead to inference making "leaps of logic", /// for example in this situation: /// /// pub trait Foo<T> { fn foo(&self) -> T; } /// impl<T> Foo<()> for T { fn foo(&self) { } } /// impl Foo<bool> for bool { fn foo(&self) -> bool { *self } } /// /// pub fn foo<T>(t: T) where T: Foo<bool> { /// println!("{:?}", <T as Foo<_>>::foo(&t)); /// } /// fn main() { foo(false); } /// /// Here the obligation <T as Foo<$0>> can be matched by both the blanket /// impl and the where-clause. We select the where-clause and unify $0=bool, /// so the program prints "false". However, if the where-clause is omitted, /// the blanket impl is selected, we unify $0=(), and the program prints /// "()". /// /// Exactly the same issues apply to projection and object candidates, except /// that we can have both a projection candidate and a where-clause candidate /// for the same obligation. In that case either would do (except that /// different "leaps of logic" would occur if inference variables are /// present), and we just pick the where-clause. This is, for example, /// required for associated types to work in default impls, as the bounds /// are visible both as projection bounds and as where-clauses from the /// parameter environment. #[derive(PartialEq, Eq, Debug, Clone)] enum SelectionCandidate<'tcx> { /// If has_nested is false, there are no *further* obligations BuiltinCandidate { has_nested: bool, }, ParamCandidate(ty::PolyTraitRef<'tcx>), ImplCandidate(DefId), AutoImplCandidate(DefId), /// This is a trait matching with a projected type as `Self`, and /// we found an applicable bound in the trait definition. ProjectionCandidate, /// Implementation of a `Fn`-family trait by one of the anonymous types /// generated for a `||` expression. ClosureCandidate, /// Implementation of a `Generator` trait by one of the anonymous types /// generated for a generator. GeneratorCandidate, /// Implementation of a `Fn`-family trait by one of the anonymous /// types generated for a fn pointer type (e.g., `fn(int)->int`) FnPointerCandidate, TraitAliasCandidate(DefId), ObjectCandidate, BuiltinObjectCandidate, BuiltinUnsizeCandidate, } impl<'a, 'tcx> ty::Lift<'tcx> for SelectionCandidate<'a> { type Lifted = SelectionCandidate<'tcx>; fn lift_to_tcx<'b, 'gcx>(&self, tcx: TyCtxt<'b, 'gcx, 'tcx>) -> Option<Self::Lifted> { Some(match *self { BuiltinCandidate { has_nested } => BuiltinCandidate { has_nested }, ImplCandidate(def_id) => ImplCandidate(def_id), AutoImplCandidate(def_id) => AutoImplCandidate(def_id), ProjectionCandidate => ProjectionCandidate, ClosureCandidate => ClosureCandidate, GeneratorCandidate => GeneratorCandidate, FnPointerCandidate => FnPointerCandidate, TraitAliasCandidate(def_id) => TraitAliasCandidate(def_id), ObjectCandidate => ObjectCandidate, BuiltinObjectCandidate => BuiltinObjectCandidate, BuiltinUnsizeCandidate => BuiltinUnsizeCandidate, ParamCandidate(ref trait_ref) => { return tcx.lift(trait_ref).map(ParamCandidate); } }) } } struct SelectionCandidateSet<'tcx> { // a list of candidates that definitely apply to the current // obligation (meaning: types unify). vec: Vec<SelectionCandidate<'tcx>>, // if this is true, then there were candidates that might or might // not have applied, but we couldn't tell. This occurs when some // of the input types are type variables, in which case there are // various "builtin" rules that might or might not trigger. ambiguous: bool, } #[derive(PartialEq, Eq, Debug, Clone)] struct EvaluatedCandidate<'tcx> { candidate: SelectionCandidate<'tcx>, evaluation: EvaluationResult, } /// When does the builtin impl for `T: Trait` apply? enum BuiltinImplConditions<'tcx> { /// The impl is conditional on T1,T2,.. : Trait Where(ty::Binder<Vec<Ty<'tcx>>>), /// There is no built-in impl. There may be some other /// candidate (a where-clause or user-defined impl). None, /// It is unknown whether there is an impl. Ambiguous, } #[derive(Copy, Clone, Debug, PartialOrd, Ord, PartialEq, Eq)] /// The result of trait evaluation. The order is important /// here as the evaluation of a list is the maximum of the /// evaluations. /// /// The evaluation results are ordered: /// - `EvaluatedToOk` implies `EvaluatedToOkModuloRegions` /// implies `EvaluatedToAmbig` implies `EvaluatedToUnknown` /// - `EvaluatedToErr` implies `EvaluatedToRecur` /// - the "union" of evaluation results is equal to their maximum - /// all the "potential success" candidates can potentially succeed, /// so they are noops when unioned with a definite error, and within /// the categories it's easy to see that the unions are correct. pub enum EvaluationResult { /// Evaluation successful EvaluatedToOk, /// Evaluation successful, but there were unevaluated region obligations EvaluatedToOkModuloRegions, /// Evaluation is known to be ambiguous - it *might* hold for some /// assignment of inference variables, but it might not. /// /// While this has the same meaning as `EvaluatedToUnknown` - we can't /// know whether this obligation holds or not - it is the result we /// would get with an empty stack, and therefore is cacheable. EvaluatedToAmbig, /// Evaluation failed because of recursion involving inference /// variables. We are somewhat imprecise there, so we don't actually /// know the real result. /// /// This can't be trivially cached for the same reason as `EvaluatedToRecur`. EvaluatedToUnknown, /// Evaluation failed because we encountered an obligation we are already /// trying to prove on this branch. /// /// We know this branch can't be a part of a minimal proof-tree for /// the "root" of our cycle, because then we could cut out the recursion /// and maintain a valid proof tree. However, this does not mean /// that all the obligations on this branch do not hold - it's possible /// that we entered this branch "speculatively", and that there /// might be some other way to prove this obligation that does not /// go through this cycle - so we can't cache this as a failure. /// /// For example, suppose we have this: /// /// ```rust,ignore (pseudo-Rust) /// pub trait Trait { fn xyz(); } /// // This impl is "useless", but we can still have /// // an `impl Trait for SomeUnsizedType` somewhere. /// impl<T: Trait + Sized> Trait for T { fn xyz() {} } /// /// pub fn foo<T: Trait + ?Sized>() { /// <T as Trait>::xyz(); /// } /// ``` /// /// When checking `foo`, we have to prove `T: Trait`. This basically /// translates into this: /// /// ```plain,ignore /// (T: Trait + Sized →_\impl T: Trait), T: Trait ⊢ T: Trait /// ``` /// /// When we try to prove it, we first go the first option, which /// recurses. This shows us that the impl is "useless" -- it won't /// tell us that `T: Trait` unless it already implemented `Trait` /// by some other means. However, that does not prevent `T: Trait` /// does not hold, because of the bound (which can indeed be satisfied /// by `SomeUnsizedType` from another crate). // // FIXME: when an `EvaluatedToRecur` goes past its parent root, we // ought to convert it to an `EvaluatedToErr`, because we know // there definitely isn't a proof tree for that obligation. Not // doing so is still sound -- there isn't any proof tree, so the // branch still can't be a part of a minimal one -- but does not re-enable caching. EvaluatedToRecur, /// Evaluation failed. EvaluatedToErr, } impl EvaluationResult { /// Returns `true` if this evaluation result is known to apply, even /// considering outlives constraints. pub fn must_apply_considering_regions(self) -> bool { self == EvaluatedToOk } /// Returns `true` if this evaluation result is known to apply, ignoring /// outlives constraints. pub fn must_apply_modulo_regions(self) -> bool { self <= EvaluatedToOkModuloRegions } pub fn may_apply(self) -> bool { match self { EvaluatedToOk | EvaluatedToOkModuloRegions | EvaluatedToAmbig | EvaluatedToUnknown => { true } EvaluatedToErr | EvaluatedToRecur => false, } } fn is_stack_dependent(self) -> bool { match self { EvaluatedToUnknown | EvaluatedToRecur => true, EvaluatedToOk | EvaluatedToOkModuloRegions | EvaluatedToAmbig | EvaluatedToErr => false, } } } impl_stable_hash_for!(enum self::EvaluationResult { EvaluatedToOk, EvaluatedToOkModuloRegions, EvaluatedToAmbig, EvaluatedToUnknown, EvaluatedToRecur, EvaluatedToErr }); #[derive(Copy, Clone, Debug, PartialEq, Eq)] /// Indicates that trait evaluation caused overflow. pub struct OverflowError; impl_stable_hash_for!(struct OverflowError {}); impl<'tcx> From<OverflowError> for SelectionError<'tcx> { fn from(OverflowError: OverflowError) -> SelectionError<'tcx> { SelectionError::Overflow } } #[derive(Clone, Default)] pub struct EvaluationCache<'tcx> { hashmap: Lock<FxHashMap<ty::PolyTraitRef<'tcx>, WithDepNode<EvaluationResult>>>, } impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { pub fn new(infcx: &'cx InferCtxt<'cx, 'gcx, 'tcx>) -> SelectionContext<'cx, 'gcx, 'tcx> { SelectionContext { infcx, freshener: infcx.freshener(), intercrate: None, intercrate_ambiguity_causes: None, allow_negative_impls: false, query_mode: TraitQueryMode::Standard, } } pub fn intercrate( infcx: &'cx InferCtxt<'cx, 'gcx, 'tcx>, mode: IntercrateMode, ) -> SelectionContext<'cx, 'gcx, 'tcx> { debug!("intercrate({:?})", mode); SelectionContext { infcx, freshener: infcx.freshener(), intercrate: Some(mode), intercrate_ambiguity_causes: None, allow_negative_impls: false, query_mode: TraitQueryMode::Standard, } } pub fn with_negative( infcx: &'cx InferCtxt<'cx, 'gcx, 'tcx>, allow_negative_impls: bool, ) -> SelectionContext<'cx, 'gcx, 'tcx> { debug!("with_negative({:?})", allow_negative_impls); SelectionContext { infcx, freshener: infcx.freshener(), intercrate: None, intercrate_ambiguity_causes: None, allow_negative_impls, query_mode: TraitQueryMode::Standard, } } pub fn with_query_mode( infcx: &'cx InferCtxt<'cx, 'gcx, 'tcx>, query_mode: TraitQueryMode, ) -> SelectionContext<'cx, 'gcx, 'tcx> { debug!("with_query_mode({:?})", query_mode); SelectionContext { infcx, freshener: infcx.freshener(), intercrate: None, intercrate_ambiguity_causes: None, allow_negative_impls: false, query_mode, } } /// Enables tracking of intercrate ambiguity causes. These are /// used in coherence to give improved diagnostics. We don't do /// this until we detect a coherence error because it can lead to /// false overflow results (#47139) and because it costs /// computation time. pub fn enable_tracking_intercrate_ambiguity_causes(&mut self) { assert!(self.intercrate.is_some()); assert!(self.intercrate_ambiguity_causes.is_none()); self.intercrate_ambiguity_causes = Some(vec![]); debug!("selcx: enable_tracking_intercrate_ambiguity_causes"); } /// Gets the intercrate ambiguity causes collected since tracking /// was enabled and disables tracking at the same time. If /// tracking is not enabled, just returns an empty vector. pub fn take_intercrate_ambiguity_causes(&mut self) -> Vec<IntercrateAmbiguityCause> { assert!(self.intercrate.is_some()); self.intercrate_ambiguity_causes.take().unwrap_or(vec![]) } pub fn infcx(&self) -> &'cx InferCtxt<'cx, 'gcx, 'tcx> { self.infcx } pub fn tcx(&self) -> TyCtxt<'cx, 'gcx, 'tcx> { self.infcx.tcx } pub fn closure_typer(&self) -> &'cx InferCtxt<'cx, 'gcx, 'tcx> { self.infcx } /////////////////////////////////////////////////////////////////////////// // Selection // // The selection phase tries to identify *how* an obligation will // be resolved. For example, it will identify which impl or // parameter bound is to be used. The process can be inconclusive // if the self type in the obligation is not fully inferred. Selection // can result in an error in one of two ways: // // 1. If no applicable impl or parameter bound can be found. // 2. If the output type parameters in the obligation do not match // those specified by the impl/bound. For example, if the obligation // is `Vec<Foo>:Iterable<Bar>`, but the impl specifies // `impl<T> Iterable<T> for Vec<T>`, than an error would result. /// Attempts to satisfy the obligation. If successful, this will affect the surrounding /// type environment by performing unification. pub fn select( &mut self, obligation: &TraitObligation<'tcx>, ) -> SelectionResult<'tcx, Selection<'tcx>> { debug!("select({:?})", obligation); debug_assert!(!obligation.predicate.has_escaping_bound_vars()); let stack = self.push_stack(TraitObligationStackList::empty(), obligation); let candidate = match self.candidate_from_obligation(&stack) { Err(SelectionError::Overflow) => { // In standard mode, overflow must have been caught and reported // earlier. assert!(self.query_mode == TraitQueryMode::Canonical); return Err(SelectionError::Overflow); } Err(e) => { return Err(e); } Ok(None) => { return Ok(None); } Ok(Some(candidate)) => candidate, }; match self.confirm_candidate(obligation, candidate) { Err(SelectionError::Overflow) => { assert!(self.query_mode == TraitQueryMode::Canonical); Err(SelectionError::Overflow) } Err(e) => Err(e), Ok(candidate) => Ok(Some(candidate)), } } /////////////////////////////////////////////////////////////////////////// // EVALUATION // // Tests whether an obligation can be selected or whether an impl // can be applied to particular types. It skips the "confirmation" // step and hence completely ignores output type parameters. // // The result is "true" if the obligation *may* hold and "false" if // we can be sure it does not. /// Evaluates whether the obligation `obligation` can be satisfied (by any means). pub fn predicate_may_hold_fatal(&mut self, obligation: &PredicateObligation<'tcx>) -> bool { debug!("predicate_may_hold_fatal({:?})", obligation); // This fatal query is a stopgap that should only be used in standard mode, // where we do not expect overflow to be propagated. assert!(self.query_mode == TraitQueryMode::Standard); self.evaluate_root_obligation(obligation) .expect("Overflow should be caught earlier in standard query mode") .may_apply() } /// Evaluates whether the obligation `obligation` can be satisfied /// and returns an `EvaluationResult`. This is meant for the /// *initial* call. pub fn evaluate_root_obligation( &mut self, obligation: &PredicateObligation<'tcx>, ) -> Result<EvaluationResult, OverflowError> { self.evaluation_probe(|this| { this.evaluate_predicate_recursively( TraitObligationStackList::empty(), obligation.clone(), ) }) } fn evaluation_probe( &mut self, op: impl FnOnce(&mut Self) -> Result<EvaluationResult, OverflowError>, ) -> Result<EvaluationResult, OverflowError> { self.infcx.probe(|snapshot| -> Result<EvaluationResult, OverflowError> { let result = op(self)?; match self.infcx.region_constraints_added_in_snapshot(snapshot) { None => Ok(result), Some(_) => Ok(result.max(EvaluatedToOkModuloRegions)), } }) } /// Evaluates the predicates in `predicates` recursively. Note that /// this applies projections in the predicates, and therefore /// is run within an inference probe. fn evaluate_predicates_recursively<'a, 'o, I>( &mut self, stack: TraitObligationStackList<'o, 'tcx>, predicates: I, ) -> Result<EvaluationResult, OverflowError> where I: IntoIterator<Item = PredicateObligation<'tcx>>, 'tcx: 'a, { let mut result = EvaluatedToOk; for obligation in predicates { let eval = self.evaluate_predicate_recursively(stack, obligation.clone())?; debug!( "evaluate_predicate_recursively({:?}) = {:?}", obligation, eval ); if let EvaluatedToErr = eval { // fast-path - EvaluatedToErr is the top of the lattice, // so we don't need to look on the other predicates. return Ok(EvaluatedToErr); } else { result = cmp::max(result, eval); } } Ok(result) } fn evaluate_predicate_recursively<'o>( &mut self, previous_stack: TraitObligationStackList<'o, 'tcx>, obligation: PredicateObligation<'tcx>, ) -> Result<EvaluationResult, OverflowError> { debug!("evaluate_predicate_recursively(previous_stack={:?}, obligation={:?})", previous_stack.head(), obligation); // Previous_stack stores a TraitObligatiom, while 'obligation' is // a PredicateObligation. These are distinct types, so we can't // use any Option combinator method that would force them to be // the same match previous_stack.head() { Some(h) => self.check_recursion_limit(&obligation, h.obligation)?, None => self.check_recursion_limit(&obligation, &obligation)? } match obligation.predicate { ty::Predicate::Trait(ref t) => { debug_assert!(!t.has_escaping_bound_vars()); let obligation = obligation.with(t.clone()); self.evaluate_trait_predicate_recursively(previous_stack, obligation) } ty::Predicate::Subtype(ref p) => { // does this code ever run? match self.infcx .subtype_predicate(&obligation.cause, obligation.param_env, p) { Some(Ok(InferOk { mut obligations, .. })) => { self.add_depth(obligations.iter_mut(), obligation.recursion_depth); self.evaluate_predicates_recursively(previous_stack,obligations.into_iter()) } Some(Err(_)) => Ok(EvaluatedToErr), None => Ok(EvaluatedToAmbig), } } ty::Predicate::WellFormed(ty) => match ty::wf::obligations( self.infcx, obligation.param_env, obligation.cause.body_id, ty, obligation.cause.span, ) { Some(mut obligations) => { self.add_depth(obligations.iter_mut(), obligation.recursion_depth); self.evaluate_predicates_recursively(previous_stack, obligations.into_iter()) } None => Ok(EvaluatedToAmbig), }, ty::Predicate::TypeOutlives(..) | ty::Predicate::RegionOutlives(..) => { // we do not consider region relationships when // evaluating trait matches Ok(EvaluatedToOkModuloRegions) } ty::Predicate::ObjectSafe(trait_def_id) => { if self.tcx().is_object_safe(trait_def_id) { Ok(EvaluatedToOk) } else { Ok(EvaluatedToErr) } } ty::Predicate::Projection(ref data) => { let project_obligation = obligation.with(data.clone()); match project::poly_project_and_unify_type(self, &project_obligation) { Ok(Some(mut subobligations)) => { self.add_depth(subobligations.iter_mut(), obligation.recursion_depth); let result = self.evaluate_predicates_recursively( previous_stack, subobligations.into_iter(), ); if let Some(key) = ProjectionCacheKey::from_poly_projection_predicate(self, data) { self.infcx.projection_cache.borrow_mut().complete(key); } result } Ok(None) => Ok(EvaluatedToAmbig), Err(_) => Ok(EvaluatedToErr), } } ty::Predicate::ClosureKind(closure_def_id, closure_substs, kind) => { match self.infcx.closure_kind(closure_def_id, closure_substs) { Some(closure_kind) => { if closure_kind.extends(kind) { Ok(EvaluatedToOk) } else { Ok(EvaluatedToErr) } } None => Ok(EvaluatedToAmbig), } } ty::Predicate::ConstEvaluatable(def_id, substs) => { let tcx = self.tcx(); match tcx.lift_to_global(&(obligation.param_env, substs)) { Some((param_env, substs)) => { let instance = ty::Instance::resolve(tcx.global_tcx(), param_env, def_id, substs); if let Some(instance) = instance { let cid = GlobalId { instance, promoted: None, }; match self.tcx().const_eval(param_env.and(cid)) { Ok(_) => Ok(EvaluatedToOk), Err(_) => Ok(EvaluatedToErr), } } else { Ok(EvaluatedToErr) } } None => { // Inference variables still left in param_env or substs. Ok(EvaluatedToAmbig) } } } } } fn evaluate_trait_predicate_recursively<'o>( &mut self, previous_stack: TraitObligationStackList<'o, 'tcx>, mut obligation: TraitObligation<'tcx>, ) -> Result<EvaluationResult, OverflowError> { debug!("evaluate_trait_predicate_recursively({:?})", obligation); if self.intercrate.is_none() && obligation.is_global() && obligation .param_env .caller_bounds .iter() .all(|bound| bound.needs_subst()) { // If a param env has no global bounds, global obligations do not // depend on its particular value in order to work, so we can clear // out the param env and get better caching. debug!( "evaluate_trait_predicate_recursively({:?}) - in global", obligation ); obligation.param_env = obligation.param_env.without_caller_bounds(); } let stack = self.push_stack(previous_stack, &obligation); let fresh_trait_ref = stack.fresh_trait_ref; if let Some(result) = self.check_evaluation_cache(obligation.param_env, fresh_trait_ref) { debug!("CACHE HIT: EVAL({:?})={:?}", fresh_trait_ref, result); return Ok(result); } // Check if this is a match for something already on the // stack. If so, we don't want to insert the result into the // main cache (it is cycle dependent) nor the provisional // cache (which is meant for things that have completed but // for a "backedge" -- this result *is* the backedge). if let Some(cycle_result) = self.check_evaluation_cycle(&stack) { return Ok(cycle_result); } let (result, dep_node) = self.in_task(|this| this.evaluate_stack(&stack)); let result = result?; let reached_depth = stack.reached_depth.get(); if reached_depth >= stack.depth { debug!("CACHE MISS: EVAL({:?})={:?}", fresh_trait_ref, result); self.insert_evaluation_cache(obligation.param_env, fresh_trait_ref, dep_node, result); } else { debug!( "evaluate_trait_predicate_recursively: skipping cache because {:?} \ is a cycle participant (at depth {}, reached depth {})", fresh_trait_ref, stack.depth, reached_depth, ); } Ok(result) } /// If there is any previous entry on the stack that precisely /// matches this obligation, then we can assume that the /// obligation is satisfied for now (still all other conditions /// must be met of course). One obvious case this comes up is /// marker traits like `Send`. Think of a linked list: /// /// struct List<T> { data: T, next: Option<Box<List<T>>> } /// /// `Box<List<T>>` will be `Send` if `T` is `Send` and /// `Option<Box<List<T>>>` is `Send`, and in turn /// `Option<Box<List<T>>>` is `Send` if `Box<List<T>>` is /// `Send`. /// /// Note that we do this comparison using the `fresh_trait_ref` /// fields. Because these have all been freshened using /// `self.freshener`, we can be sure that (a) this will not /// affect the inferencer state and (b) that if we see two /// fresh regions with the same index, they refer to the same /// unbound type variable. fn check_evaluation_cycle( &mut self, stack: &TraitObligationStack<'_, 'tcx>, ) -> Option<EvaluationResult> { if let Some(cycle_depth) = stack.iter() .skip(1) // skip top-most frame .find(|prev| stack.obligation.param_env == prev.obligation.param_env && stack.fresh_trait_ref == prev.fresh_trait_ref) .map(|stack| stack.depth) { debug!( "evaluate_stack({:?}) --> recursive at depth {}", stack.fresh_trait_ref, cycle_depth, ); // If we have a stack like `A B C D E A`, where the top of // the stack is the final `A`, then this will iterate over // `A, E, D, C, B` -- i.e., all the participants apart // from the cycle head. We mark them as participating in a // cycle. This suppresses caching for those nodes. See // `in_cycle` field for more details. stack.update_reached_depth(cycle_depth); // Subtle: when checking for a coinductive cycle, we do // not compare using the "freshened trait refs" (which // have erased regions) but rather the fully explicit // trait refs. This is important because it's only a cycle // if the regions match exactly. let cycle = stack.iter().skip(1).take_while(|s| s.depth >= cycle_depth); let cycle = cycle.map(|stack| ty::Predicate::Trait(stack.obligation.predicate)); if self.coinductive_match(cycle) { debug!( "evaluate_stack({:?}) --> recursive, coinductive", stack.fresh_trait_ref ); Some(EvaluatedToOk) } else { debug!( "evaluate_stack({:?}) --> recursive, inductive", stack.fresh_trait_ref ); Some(EvaluatedToRecur) } } else { None } } fn evaluate_stack<'o>( &mut self, stack: &TraitObligationStack<'o, 'tcx>, ) -> Result<EvaluationResult, OverflowError> { // In intercrate mode, whenever any of the types are unbound, // there can always be an impl. Even if there are no impls in // this crate, perhaps the type would be unified with // something from another crate that does provide an impl. // // In intra mode, we must still be conservative. The reason is // that we want to avoid cycles. Imagine an impl like: // // impl<T:Eq> Eq for Vec<T> // // and a trait reference like `$0 : Eq` where `$0` is an // unbound variable. When we evaluate this trait-reference, we // will unify `$0` with `Vec<$1>` (for some fresh variable // `$1`), on the condition that `$1 : Eq`. We will then wind // up with many candidates (since that are other `Eq` impls // that apply) and try to winnow things down. This results in // a recursive evaluation that `$1 : Eq` -- as you can // imagine, this is just where we started. To avoid that, we // check for unbound variables and return an ambiguous (hence possible) // match if we've seen this trait before. // // This suffices to allow chains like `FnMut` implemented in // terms of `Fn` etc, but we could probably make this more // precise still. let unbound_input_types = stack .fresh_trait_ref .skip_binder() .input_types() .any(|ty| ty.is_fresh()); // this check was an imperfect workaround for a bug n the old // intercrate mode, it should be removed when that goes away. if unbound_input_types && self.intercrate == Some(IntercrateMode::Issue43355) { debug!( "evaluate_stack({:?}) --> unbound argument, intercrate --> ambiguous", stack.fresh_trait_ref ); // Heuristics: show the diagnostics when there are no candidates in crate. if self.intercrate_ambiguity_causes.is_some() { debug!("evaluate_stack: intercrate_ambiguity_causes is some"); if let Ok(candidate_set) = self.assemble_candidates(stack) { if !candidate_set.ambiguous && candidate_set.vec.is_empty() { let trait_ref = stack.obligation.predicate.skip_binder().trait_ref; let self_ty = trait_ref.self_ty(); let cause = IntercrateAmbiguityCause::DownstreamCrate { trait_desc: trait_ref.to_string(), self_desc: if self_ty.has_concrete_skeleton() { Some(self_ty.to_string()) } else { None }, }; debug!("evaluate_stack: pushing cause = {:?}", cause); self.intercrate_ambiguity_causes .as_mut() .unwrap() .push(cause); } } } return Ok(EvaluatedToAmbig); } if unbound_input_types && stack.iter().skip(1).any(|prev| { stack.obligation.param_env == prev.obligation.param_env && self.match_fresh_trait_refs(&stack.fresh_trait_ref, &prev.fresh_trait_ref) }) { debug!( "evaluate_stack({:?}) --> unbound argument, recursive --> giving up", stack.fresh_trait_ref ); return Ok(EvaluatedToUnknown); } match self.candidate_from_obligation(stack) { Ok(Some(c)) => self.evaluate_candidate(stack, &c), Ok(None) => Ok(EvaluatedToAmbig), Err(Overflow) => Err(OverflowError), Err(..) => Ok(EvaluatedToErr), } } /// For defaulted traits, we use a co-inductive strategy to solve, so /// that recursion is ok. This routine returns true if the top of the /// stack (`cycle[0]`): /// /// - is a defaulted trait, /// - it also appears in the backtrace at some position `X`, /// - all the predicates at positions `X..` between `X` an the top are /// also defaulted traits. pub fn coinductive_match<I>(&mut self, cycle: I) -> bool where I: Iterator<Item = ty::Predicate<'tcx>>, { let mut cycle = cycle; cycle.all(|predicate| self.coinductive_predicate(predicate)) } fn coinductive_predicate(&self, predicate: ty::Predicate<'tcx>) -> bool { let result = match predicate { ty::Predicate::Trait(ref data) => self.tcx().trait_is_auto(data.def_id()), _ => false, }; debug!("coinductive_predicate({:?}) = {:?}", predicate, result); result } /// Further evaluate `candidate` to decide whether all type parameters match and whether nested /// obligations are met. Returns whether `candidate` remains viable after this further /// scrutiny. fn evaluate_candidate<'o>( &mut self, stack: &TraitObligationStack<'o, 'tcx>, candidate: &SelectionCandidate<'tcx>, ) -> Result<EvaluationResult, OverflowError> { debug!( "evaluate_candidate: depth={} candidate={:?}", stack.obligation.recursion_depth, candidate ); let result = self.evaluation_probe(|this| { let candidate = (*candidate).clone(); match this.confirm_candidate(stack.obligation, candidate) { Ok(selection) => this.evaluate_predicates_recursively( stack.list(), selection.nested_obligations().into_iter() ), Err(..) => Ok(EvaluatedToErr), } })?; debug!( "evaluate_candidate: depth={} result={:?}", stack.obligation.recursion_depth, result ); Ok(result) } fn check_evaluation_cache( &self, param_env: ty::ParamEnv<'tcx>, trait_ref: ty::PolyTraitRef<'tcx>, ) -> Option<EvaluationResult> { let tcx = self.tcx(); if self.can_use_global_caches(param_env) { let cache = tcx.evaluation_cache.hashmap.borrow(); if let Some(cached) = cache.get(&trait_ref) { return Some(cached.get(tcx)); } } self.infcx .evaluation_cache .hashmap .borrow() .get(&trait_ref) .map(|v| v.get(tcx)) } fn insert_evaluation_cache( &mut self, param_env: ty::ParamEnv<'tcx>, trait_ref: ty::PolyTraitRef<'tcx>, dep_node: DepNodeIndex, result: EvaluationResult, ) { // Avoid caching results that depend on more than just the trait-ref // - the stack can create recursion. if result.is_stack_dependent() { return; } if self.can_use_global_caches(param_env) { if let Some(trait_ref) = self.tcx().lift_to_global(&trait_ref) { debug!( "insert_evaluation_cache(trait_ref={:?}, candidate={:?}) global", trait_ref, result, ); // This may overwrite the cache with the same value // FIXME: Due to #50507 this overwrites the different values // This should be changed to use HashMapExt::insert_same // when that is fixed self.tcx() .evaluation_cache .hashmap .borrow_mut() .insert(trait_ref, WithDepNode::new(dep_node, result)); return; } } debug!( "insert_evaluation_cache(trait_ref={:?}, candidate={:?})", trait_ref, result, ); self.infcx .evaluation_cache .hashmap .borrow_mut() .insert(trait_ref, WithDepNode::new(dep_node, result)); } // For various reasons, it's possible for a subobligation // to have a *lower* recursion_depth than the obligation used to create it. // Projection sub-obligations may be returned from the projection cache, // which results in obligations with an 'old' recursion_depth. // Additionally, methods like ty::wf::obligations and // InferCtxt.subtype_predicate produce subobligations without // taking in a 'parent' depth, causing the generated subobligations // to have a recursion_depth of 0 // // To ensure that obligation_depth never decreasees, we force all subobligations // to have at least the depth of the original obligation. fn add_depth<T: 'cx, I: Iterator<Item = &'cx mut Obligation<'tcx, T>>>(&self, it: I, min_depth: usize) { it.for_each(|o| o.recursion_depth = cmp::max(min_depth, o.recursion_depth) + 1); } // Check that the recursion limit has not been exceeded. // // The weird return type of this function allows it to be used with the 'try' (?) // operator within certain functions fn check_recursion_limit<T: Display + TypeFoldable<'tcx>, V: Display + TypeFoldable<'tcx>>( &self, obligation: &Obligation<'tcx, T>, error_obligation: &Obligation<'tcx, V> ) -> Result<(), OverflowError> { let recursion_limit = *self.infcx.tcx.sess.recursion_limit.get(); if obligation.recursion_depth >= recursion_limit { match self.query_mode { TraitQueryMode::Standard => { self.infcx().report_overflow_error(error_obligation, true); } TraitQueryMode::Canonical => { return Err(OverflowError); } } } Ok(()) } /////////////////////////////////////////////////////////////////////////// // CANDIDATE ASSEMBLY // // The selection process begins by examining all in-scope impls, // caller obligations, and so forth and assembling a list of // candidates. See the [rustc guide] for more details. // // [rustc guide]: // https://rust-lang.github.io/rustc-guide/traits/resolution.html#candidate-assembly fn candidate_from_obligation<'o>( &mut self, stack: &TraitObligationStack<'o, 'tcx>, ) -> SelectionResult<'tcx, SelectionCandidate<'tcx>> { // Watch out for overflow. This intentionally bypasses (and does // not update) the cache. self.check_recursion_limit(&stack.obligation, &stack.obligation)?; // Check the cache. Note that we freshen the trait-ref // separately rather than using `stack.fresh_trait_ref` -- // this is because we want the unbound variables to be // replaced with fresh types starting from index 0. let cache_fresh_trait_pred = self.infcx.freshen(stack.obligation.predicate.clone()); debug!( "candidate_from_obligation(cache_fresh_trait_pred={:?}, obligation={:?})", cache_fresh_trait_pred, stack ); debug_assert!(!stack.obligation.predicate.has_escaping_bound_vars()); if let Some(c) = self.check_candidate_cache(stack.obligation.param_env, &cache_fresh_trait_pred) { debug!("CACHE HIT: SELECT({:?})={:?}", cache_fresh_trait_pred, c); return c; } // If no match, compute result and insert into cache. // // FIXME(nikomatsakis) -- this cache is not taking into // account cycles that may have occurred in forming the // candidate. I don't know of any specific problems that // result but it seems awfully suspicious. let (candidate, dep_node) = self.in_task(|this| this.candidate_from_obligation_no_cache(stack)); debug!( "CACHE MISS: SELECT({:?})={:?}", cache_fresh_trait_pred, candidate ); self.insert_candidate_cache( stack.obligation.param_env, cache_fresh_trait_pred, dep_node, candidate.clone(), ); candidate } fn in_task<OP, R>(&mut self, op: OP) -> (R, DepNodeIndex) where OP: FnOnce(&mut Self) -> R, { let (result, dep_node) = self.tcx() .dep_graph .with_anon_task(DepKind::TraitSelect, || op(self)); self.tcx().dep_graph.read_index(dep_node); (result, dep_node) } // Treat negative impls as unimplemented fn filter_negative_impls( &self, candidate: SelectionCandidate<'tcx>, ) -> SelectionResult<'tcx, SelectionCandidate<'tcx>> { if let ImplCandidate(def_id) = candidate { if !self.allow_negative_impls && self.tcx().impl_polarity(def_id) == hir::ImplPolarity::Negative { return Err(Unimplemented); } } Ok(Some(candidate)) } fn candidate_from_obligation_no_cache<'o>( &mut self, stack: &TraitObligationStack<'o, 'tcx>, ) -> SelectionResult<'tcx, SelectionCandidate<'tcx>> { if stack.obligation.predicate.references_error() { // If we encounter a `Error`, we generally prefer the // most "optimistic" result in response -- that is, the // one least likely to report downstream errors. But // because this routine is shared by coherence and by // trait selection, there isn't an obvious "right" choice // here in that respect, so we opt to just return // ambiguity and let the upstream clients sort it out. return Ok(None); } if let Some(conflict) = self.is_knowable(stack) { debug!("coherence stage: not knowable"); if self.intercrate_ambiguity_causes.is_some() { debug!("evaluate_stack: intercrate_ambiguity_causes is some"); // Heuristics: show the diagnostics when there are no candidates in crate. if let Ok(candidate_set) = self.assemble_candidates(stack) { let mut no_candidates_apply = true; { let evaluated_candidates = candidate_set .vec .iter() .map(|c| self.evaluate_candidate(stack, &c)); for ec in evaluated_candidates { match ec { Ok(c) => { if c.may_apply() { no_candidates_apply = false; break; } } Err(e) => return Err(e.into()), } } } if !candidate_set.ambiguous && no_candidates_apply { let trait_ref = stack.obligation.predicate.skip_binder().trait_ref; let self_ty = trait_ref.self_ty(); let trait_desc = trait_ref.to_string(); let self_desc = if self_ty.has_concrete_skeleton() { Some(self_ty.to_string()) } else { None }; let cause = if let Conflict::Upstream = conflict { IntercrateAmbiguityCause::UpstreamCrateUpdate { trait_desc, self_desc, } } else { IntercrateAmbiguityCause::DownstreamCrate { trait_desc, self_desc, } }; debug!("evaluate_stack: pushing cause = {:?}", cause); self.intercrate_ambiguity_causes .as_mut() .unwrap() .push(cause); } } } return Ok(None); } let candidate_set = self.assemble_candidates(stack)?; if candidate_set.ambiguous { debug!("candidate set contains ambig"); return Ok(None); } let mut candidates = candidate_set.vec; debug!( "assembled {} candidates for {:?}: {:?}", candidates.len(), stack, candidates ); // At this point, we know that each of the entries in the // candidate set is *individually* applicable. Now we have to // figure out if they contain mutual incompatibilities. This // frequently arises if we have an unconstrained input type -- // for example, we are looking for $0:Eq where $0 is some // unconstrained type variable. In that case, we'll get a // candidate which assumes $0 == int, one that assumes $0 == // usize, etc. This spells an ambiguity. // If there is more than one candidate, first winnow them down // by considering extra conditions (nested obligations and so // forth). We don't winnow if there is exactly one // candidate. This is a relatively minor distinction but it // can lead to better inference and error-reporting. An // example would be if there was an impl: // // impl<T:Clone> Vec<T> { fn push_clone(...) { ... } } // // and we were to see some code `foo.push_clone()` where `boo` // is a `Vec<Bar>` and `Bar` does not implement `Clone`. If // we were to winnow, we'd wind up with zero candidates. // Instead, we select the right impl now but report `Bar does // not implement Clone`. if candidates.len() == 1 { return self.filter_negative_impls(candidates.pop().unwrap()); } // Winnow, but record the exact outcome of evaluation, which // is needed for specialization. Propagate overflow if it occurs. let mut candidates = candidates .into_iter() .map(|c| match self.evaluate_candidate(stack, &c) { Ok(eval) if eval.may_apply() => Ok(Some(EvaluatedCandidate { candidate: c, evaluation: eval, })), Ok(_) => Ok(None), Err(OverflowError) => Err(Overflow), }) .flat_map(Result::transpose) .collect::<Result<Vec<_>, _>>()?; debug!( "winnowed to {} candidates for {:?}: {:?}", candidates.len(), stack, candidates ); // If there are STILL multiple candidates, we can further // reduce the list by dropping duplicates -- including // resolving specializations. if candidates.len() > 1 { let mut i = 0; while i < candidates.len() { let is_dup = (0..candidates.len()).filter(|&j| i != j).any(|j| { self.candidate_should_be_dropped_in_favor_of(&candidates[i], &candidates[j]) }); if is_dup { debug!( "Dropping candidate #{}/{}: {:?}", i, candidates.len(), candidates[i] ); candidates.swap_remove(i); } else { debug!( "Retaining candidate #{}/{}: {:?}", i, candidates.len(), candidates[i] ); i += 1; // If there are *STILL* multiple candidates, give up // and report ambiguity. if i > 1 { debug!("multiple matches, ambig"); return Ok(None); } } } } // If there are *NO* candidates, then there are no impls -- // that we know of, anyway. Note that in the case where there // are unbound type variables within the obligation, it might // be the case that you could still satisfy the obligation // from another crate by instantiating the type variables with // a type from another crate that does have an impl. This case // is checked for in `evaluate_stack` (and hence users // who might care about this case, like coherence, should use // that function). if candidates.is_empty() { return Err(Unimplemented); } // Just one candidate left. self.filter_negative_impls(candidates.pop().unwrap().candidate) } fn is_knowable<'o>(&mut self, stack: &TraitObligationStack<'o, 'tcx>) -> Option<Conflict> { debug!("is_knowable(intercrate={:?})", self.intercrate); if !self.intercrate.is_some() { return None; } let obligation = &stack.obligation; let predicate = self.infcx() .resolve_vars_if_possible(&obligation.predicate); // Okay to skip binder because of the nature of the // trait-ref-is-knowable check, which does not care about // bound regions. let trait_ref = predicate.skip_binder().trait_ref; let result = coherence::trait_ref_is_knowable(self.tcx(), trait_ref); if let ( Some(Conflict::Downstream { used_to_be_broken: true, }), Some(IntercrateMode::Issue43355), ) = (result, self.intercrate) { debug!("is_knowable: IGNORING conflict to be bug-compatible with #43355"); None } else { result } } /// Returns `true` if the global caches can be used. /// Do note that if the type itself is not in the /// global tcx, the local caches will be used. fn can_use_global_caches(&self, param_env: ty::ParamEnv<'tcx>) -> bool { // If there are any where-clauses in scope, then we always use // a cache local to this particular scope. Otherwise, we // switch to a global cache. We used to try and draw // finer-grained distinctions, but that led to a serious of // annoying and weird bugs like #22019 and #18290. This simple // rule seems to be pretty clearly safe and also still retains // a very high hit rate (~95% when compiling rustc). if !param_env.caller_bounds.is_empty() { return false; } // Avoid using the master cache during coherence and just rely // on the local cache. This effectively disables caching // during coherence. It is really just a simplification to // avoid us having to fear that coherence results "pollute" // the master cache. Since coherence executes pretty quickly, // it's not worth going to more trouble to increase the // hit-rate I don't think. if self.intercrate.is_some() { return false; } // Otherwise, we can use the global cache. true } fn check_candidate_cache( &mut self, param_env: ty::ParamEnv<'tcx>, cache_fresh_trait_pred: &ty::PolyTraitPredicate<'tcx>, ) -> Option<SelectionResult<'tcx, SelectionCandidate<'tcx>>> { let tcx = self.tcx(); let trait_ref = &cache_fresh_trait_pred.skip_binder().trait_ref; if self.can_use_global_caches(param_env) { let cache = tcx.selection_cache.hashmap.borrow(); if let Some(cached) = cache.get(&trait_ref) { return Some(cached.get(tcx)); } } self.infcx .selection_cache .hashmap .borrow() .get(trait_ref) .map(|v| v.get(tcx)) } /// Determines whether can we safely cache the result /// of selecting an obligation. This is almost always 'true', /// except when dealing with certain ParamCandidates. /// /// Ordinarily, a ParamCandidate will contain no inference variables, /// since it was usually produced directly from a DefId. However, /// certain cases (currently only librustdoc's blanket impl finder), /// a ParamEnv may be explicitly constructed with inference types. /// When this is the case, we do *not* want to cache the resulting selection /// candidate. This is due to the fact that it might not always be possible /// to equate the obligation's trait ref and the candidate's trait ref, /// if more constraints end up getting added to an inference variable. /// /// Because of this, we always want to re-run the full selection /// process for our obligation the next time we see it, since /// we might end up picking a different SelectionCandidate (or none at all) fn can_cache_candidate(&self, result: &SelectionResult<'tcx, SelectionCandidate<'tcx>> ) -> bool { match result { Ok(Some(SelectionCandidate::ParamCandidate(trait_ref))) => { !trait_ref.skip_binder().input_types().any(|t| t.walk().any(|t_| t_.is_ty_infer())) }, _ => true } } fn insert_candidate_cache( &mut self, param_env: ty::ParamEnv<'tcx>, cache_fresh_trait_pred: ty::PolyTraitPredicate<'tcx>, dep_node: DepNodeIndex, candidate: SelectionResult<'tcx, SelectionCandidate<'tcx>>, ) { let tcx = self.tcx(); let trait_ref = cache_fresh_trait_pred.skip_binder().trait_ref; if !self.can_cache_candidate(&candidate) { debug!("insert_candidate_cache(trait_ref={:?}, candidate={:?} -\ candidate is not cacheable", trait_ref, candidate); return; } if self.can_use_global_caches(param_env) { if let Err(Overflow) = candidate { // Don't cache overflow globally; we only produce this // in certain modes. } else if let Some(trait_ref) = tcx.lift_to_global(&trait_ref) { if let Some(candidate) = tcx.lift_to_global(&candidate) { debug!( "insert_candidate_cache(trait_ref={:?}, candidate={:?}) global", trait_ref, candidate, ); // This may overwrite the cache with the same value tcx.selection_cache .hashmap .borrow_mut() .insert(trait_ref, WithDepNode::new(dep_node, candidate)); return; } } } debug!( "insert_candidate_cache(trait_ref={:?}, candidate={:?}) local", trait_ref, candidate, ); self.infcx .selection_cache .hashmap .borrow_mut() .insert(trait_ref, WithDepNode::new(dep_node, candidate)); } fn assemble_candidates<'o>( &mut self, stack: &TraitObligationStack<'o, 'tcx>, ) -> Result<SelectionCandidateSet<'tcx>, SelectionError<'tcx>> { let TraitObligationStack { obligation, .. } = *stack; let ref obligation = Obligation { param_env: obligation.param_env, cause: obligation.cause.clone(), recursion_depth: obligation.recursion_depth, predicate: self.infcx() .resolve_vars_if_possible(&obligation.predicate), }; if obligation.predicate.skip_binder().self_ty().is_ty_var() { // Self is a type variable (e.g., `_: AsRef<str>`). // // This is somewhat problematic, as the current scheme can't really // handle it turning to be a projection. This does end up as truly // ambiguous in most cases anyway. // // Take the fast path out - this also improves // performance by preventing assemble_candidates_from_impls from // matching every impl for this trait. return Ok(SelectionCandidateSet { vec: vec![], ambiguous: true, }); } let mut candidates = SelectionCandidateSet { vec: Vec::new(), ambiguous: false, }; self.assemble_candidates_for_trait_alias(obligation, &mut candidates)?; // Other bounds. Consider both in-scope bounds from fn decl // and applicable impls. There is a certain set of precedence rules here. let def_id = obligation.predicate.def_id(); let lang_items = self.tcx().lang_items(); if lang_items.copy_trait() == Some(def_id) { debug!( "obligation self ty is {:?}", obligation.predicate.skip_binder().self_ty() ); // User-defined copy impls are permitted, but only for // structs and enums. self.assemble_candidates_from_impls(obligation, &mut candidates)?; // For other types, we'll use the builtin rules. let copy_conditions = self.copy_clone_conditions(obligation); self.assemble_builtin_bound_candidates(copy_conditions, &mut candidates)?; } else if lang_items.sized_trait() == Some(def_id) { // Sized is never implementable by end-users, it is // always automatically computed. let sized_conditions = self.sized_conditions(obligation); self.assemble_builtin_bound_candidates(sized_conditions, &mut candidates)?; } else if lang_items.unsize_trait() == Some(def_id) { self.assemble_candidates_for_unsizing(obligation, &mut candidates); } else { if lang_items.clone_trait() == Some(def_id) { // Same builtin conditions as `Copy`, i.e., every type which has builtin support // for `Copy` also has builtin support for `Clone`, + tuples and arrays of `Clone` // types have builtin support for `Clone`. let clone_conditions = self.copy_clone_conditions(obligation); self.assemble_builtin_bound_candidates(clone_conditions, &mut candidates)?; } self.assemble_generator_candidates(obligation, &mut candidates)?; self.assemble_closure_candidates(obligation, &mut candidates)?; self.assemble_fn_pointer_candidates(obligation, &mut candidates)?; self.assemble_candidates_from_impls(obligation, &mut candidates)?; self.assemble_candidates_from_object_ty(obligation, &mut candidates); } self.assemble_candidates_from_projected_tys(obligation, &mut candidates); self.assemble_candidates_from_caller_bounds(stack, &mut candidates)?; // Auto implementations have lower priority, so we only // consider triggering a default if there is no other impl that can apply. if candidates.vec.is_empty() { self.assemble_candidates_from_auto_impls(obligation, &mut candidates)?; } debug!("candidate list size: {}", candidates.vec.len()); Ok(candidates) } fn assemble_candidates_from_projected_tys( &mut self, obligation: &TraitObligation<'tcx>, candidates: &mut SelectionCandidateSet<'tcx>, ) { debug!("assemble_candidates_for_projected_tys({:?})", obligation); // before we go into the whole placeholder thing, just // quickly check if the self-type is a projection at all. match obligation.predicate.skip_binder().trait_ref.self_ty().sty { ty::Projection(_) | ty::Opaque(..) => {} ty::Infer(ty::TyVar(_)) => { span_bug!( obligation.cause.span, "Self=_ should have been handled by assemble_candidates" ); } _ => return, } let result = self.infcx.probe(|snapshot| { self.match_projection_obligation_against_definition_bounds( obligation, snapshot, ) }); if result { candidates.vec.push(ProjectionCandidate); } } fn match_projection_obligation_against_definition_bounds( &mut self, obligation: &TraitObligation<'tcx>, snapshot: &CombinedSnapshot<'_, 'tcx>, ) -> bool { let poly_trait_predicate = self.infcx() .resolve_vars_if_possible(&obligation.predicate); let (placeholder_trait_predicate, placeholder_map) = self.infcx() .replace_bound_vars_with_placeholders(&poly_trait_predicate); debug!( "match_projection_obligation_against_definition_bounds: \ placeholder_trait_predicate={:?}", placeholder_trait_predicate, ); let (def_id, substs) = match placeholder_trait_predicate.trait_ref.self_ty().sty { ty::Projection(ref data) => (data.trait_ref(self.tcx()).def_id, data.substs), ty::Opaque(def_id, substs) => (def_id, substs), _ => { span_bug!( obligation.cause.span, "match_projection_obligation_against_definition_bounds() called \ but self-ty is not a projection: {:?}", placeholder_trait_predicate.trait_ref.self_ty() ); } }; debug!( "match_projection_obligation_against_definition_bounds: \ def_id={:?}, substs={:?}", def_id, substs ); let predicates_of = self.tcx().predicates_of(def_id); let bounds = predicates_of.instantiate(self.tcx(), substs); debug!( "match_projection_obligation_against_definition_bounds: \ bounds={:?}", bounds ); let elaborated_predicates = util::elaborate_predicates(self.tcx(), bounds.predicates); let matching_bound = elaborated_predicates .filter_to_traits() .find(|bound| { self.infcx.probe(|_| { self.match_projection( obligation, bound.clone(), placeholder_trait_predicate.trait_ref.clone(), &placeholder_map, snapshot, ) }) }); debug!( "match_projection_obligation_against_definition_bounds: \ matching_bound={:?}", matching_bound ); match matching_bound { None => false, Some(bound) => { // Repeat the successful match, if any, this time outside of a probe. let result = self.match_projection( obligation, bound, placeholder_trait_predicate.trait_ref.clone(), &placeholder_map, snapshot, ); assert!(result); true } } } fn match_projection( &mut self, obligation: &TraitObligation<'tcx>, trait_bound: ty::PolyTraitRef<'tcx>, placeholder_trait_ref: ty::TraitRef<'tcx>, placeholder_map: &PlaceholderMap<'tcx>, snapshot: &CombinedSnapshot<'_, 'tcx>, ) -> bool { debug_assert!(!placeholder_trait_ref.has_escaping_bound_vars()); self.infcx .at(&obligation.cause, obligation.param_env) .sup(ty::Binder::dummy(placeholder_trait_ref), trait_bound) .is_ok() && self.infcx.leak_check(false, placeholder_map, snapshot).is_ok() } /// Given an obligation like `<SomeTrait for T>`, search the obligations that the caller /// supplied to find out whether it is listed among them. /// /// Never affects inference environment. fn assemble_candidates_from_caller_bounds<'o>( &mut self, stack: &TraitObligationStack<'o, 'tcx>, candidates: &mut SelectionCandidateSet<'tcx>, ) -> Result<(), SelectionError<'tcx>> { debug!( "assemble_candidates_from_caller_bounds({:?})", stack.obligation ); let all_bounds = stack .obligation .param_env .caller_bounds .iter() .filter_map(|o| o.to_opt_poly_trait_ref()); // Micro-optimization: filter out predicates relating to different traits. let matching_bounds = all_bounds.filter(|p| p.def_id() == stack.obligation.predicate.def_id()); // Keep only those bounds which may apply, and propagate overflow if it occurs. let mut param_candidates = vec![]; for bound in matching_bounds { let wc = self.evaluate_where_clause(stack, bound.clone())?; if wc.may_apply() { param_candidates.push(ParamCandidate(bound)); } } candidates.vec.extend(param_candidates); Ok(()) } fn evaluate_where_clause<'o>( &mut self, stack: &TraitObligationStack<'o, 'tcx>, where_clause_trait_ref: ty::PolyTraitRef<'tcx>, ) -> Result<EvaluationResult, OverflowError> { self.evaluation_probe(|this| { match this.match_where_clause_trait_ref(stack.obligation, where_clause_trait_ref) { Ok(obligations) => { this.evaluate_predicates_recursively(stack.list(), obligations.into_iter()) } Err(()) => Ok(EvaluatedToErr), } }) } fn assemble_generator_candidates( &mut self, obligation: &TraitObligation<'tcx>, candidates: &mut SelectionCandidateSet<'tcx>, ) -> Result<(), SelectionError<'tcx>> { if self.tcx().lang_items().gen_trait() != Some(obligation.predicate.def_id()) { return Ok(()); } // Okay to skip binder because the substs on generator types never // touch bound regions, they just capture the in-scope // type/region parameters. let self_ty = *obligation.self_ty().skip_binder(); match self_ty.sty { ty::Generator(..) => { debug!( "assemble_generator_candidates: self_ty={:?} obligation={:?}", self_ty, obligation ); candidates.vec.push(GeneratorCandidate); } ty::Infer(ty::TyVar(_)) => { debug!("assemble_generator_candidates: ambiguous self-type"); candidates.ambiguous = true; } _ => {} } Ok(()) } /// Checks for the artificial impl that the compiler will create for an obligation like `X : /// FnMut<..>` where `X` is a closure type. /// /// Note: the type parameters on a closure candidate are modeled as *output* type /// parameters and hence do not affect whether this trait is a match or not. They will be /// unified during the confirmation step. fn assemble_closure_candidates( &mut self, obligation: &TraitObligation<'tcx>, candidates: &mut SelectionCandidateSet<'tcx>, ) -> Result<(), SelectionError<'tcx>> { let kind = match self.tcx() .lang_items() .fn_trait_kind(obligation.predicate.def_id()) { Some(k) => k, None => { return Ok(()); } }; // Okay to skip binder because the substs on closure types never // touch bound regions, they just capture the in-scope // type/region parameters match obligation.self_ty().skip_binder().sty { ty::Closure(closure_def_id, closure_substs) => { debug!( "assemble_unboxed_candidates: kind={:?} obligation={:?}", kind, obligation ); match self.infcx.closure_kind(closure_def_id, closure_substs) { Some(closure_kind) => { debug!( "assemble_unboxed_candidates: closure_kind = {:?}", closure_kind ); if closure_kind.extends(kind) { candidates.vec.push(ClosureCandidate); } } None => { debug!("assemble_unboxed_candidates: closure_kind not yet known"); candidates.vec.push(ClosureCandidate); } } } ty::Infer(ty::TyVar(_)) => { debug!("assemble_unboxed_closure_candidates: ambiguous self-type"); candidates.ambiguous = true; } _ => {} } Ok(()) } /// Implement one of the `Fn()` family for a fn pointer. fn assemble_fn_pointer_candidates( &mut self, obligation: &TraitObligation<'tcx>, candidates: &mut SelectionCandidateSet<'tcx>, ) -> Result<(), SelectionError<'tcx>> { // We provide impl of all fn traits for fn pointers. if self.tcx() .lang_items() .fn_trait_kind(obligation.predicate.def_id()) .is_none() { return Ok(()); } // Okay to skip binder because what we are inspecting doesn't involve bound regions let self_ty = *obligation.self_ty().skip_binder(); match self_ty.sty { ty::Infer(ty::TyVar(_)) => { debug!("assemble_fn_pointer_candidates: ambiguous self-type"); candidates.ambiguous = true; // could wind up being a fn() type } // provide an impl, but only for suitable `fn` pointers ty::FnDef(..) | ty::FnPtr(_) => { if let ty::FnSig { unsafety: hir::Unsafety::Normal, abi: Abi::Rust, c_variadic: false, .. } = self_ty.fn_sig(self.tcx()).skip_binder() { candidates.vec.push(FnPointerCandidate); } } _ => {} } Ok(()) } /// Search for impls that might apply to `obligation`. fn assemble_candidates_from_impls( &mut self, obligation: &TraitObligation<'tcx>, candidates: &mut SelectionCandidateSet<'tcx>, ) -> Result<(), SelectionError<'tcx>> { debug!( "assemble_candidates_from_impls(obligation={:?})", obligation ); self.tcx().for_each_relevant_impl( obligation.predicate.def_id(), obligation.predicate.skip_binder().trait_ref.self_ty(), |impl_def_id| { self.infcx.probe(|snapshot| { if let Ok(_substs) = self.match_impl(impl_def_id, obligation, snapshot) { candidates.vec.push(ImplCandidate(impl_def_id)); } }); }, ); Ok(()) } fn assemble_candidates_from_auto_impls( &mut self, obligation: &TraitObligation<'tcx>, candidates: &mut SelectionCandidateSet<'tcx>, ) -> Result<(), SelectionError<'tcx>> { // Okay to skip binder here because the tests we do below do not involve bound regions. let self_ty = *obligation.self_ty().skip_binder(); debug!("assemble_candidates_from_auto_impls(self_ty={:?})", self_ty); let def_id = obligation.predicate.def_id(); if self.tcx().trait_is_auto(def_id) { match self_ty.sty { ty::Dynamic(..) => { // For object types, we don't know what the closed // over types are. This means we conservatively // say nothing; a candidate may be added by // `assemble_candidates_from_object_ty`. } ty::Foreign(..) => { // Since the contents of foreign types is unknown, // we don't add any `..` impl. Default traits could // still be provided by a manual implementation for // this trait and type. } ty::Param(..) | ty::Projection(..) => { // In these cases, we don't know what the actual // type is. Therefore, we cannot break it down // into its constituent types. So we don't // consider the `..` impl but instead just add no // candidates: this means that typeck will only // succeed if there is another reason to believe // that this obligation holds. That could be a // where-clause or, in the case of an object type, // it could be that the object type lists the // trait (e.g., `Foo+Send : Send`). See // `compile-fail/typeck-default-trait-impl-send-param.rs` // for an example of a test case that exercises // this path. } ty::Infer(ty::TyVar(_)) => { // the auto impl might apply, we don't know candidates.ambiguous = true; } ty::Generator(_, _, movability) if self.tcx().lang_items().unpin_trait() == Some(def_id) => { match movability { hir::GeneratorMovability::Static => { // Immovable generators are never `Unpin`, so // suppress the normal auto-impl candidate for it. } hir::GeneratorMovability::Movable => { // Movable generators are always `Unpin`, so add an // unconditional builtin candidate. candidates.vec.push(BuiltinCandidate { has_nested: false, }); } } } _ => candidates.vec.push(AutoImplCandidate(def_id.clone())), } } Ok(()) } /// Search for impls that might apply to `obligation`. fn assemble_candidates_from_object_ty( &mut self, obligation: &TraitObligation<'tcx>, candidates: &mut SelectionCandidateSet<'tcx>, ) { debug!( "assemble_candidates_from_object_ty(self_ty={:?})", obligation.self_ty().skip_binder() ); self.infcx.probe(|_snapshot| { // The code below doesn't care about regions, and the // self-ty here doesn't escape this probe, so just erase // any LBR. let self_ty = self.tcx().erase_late_bound_regions(&obligation.self_ty()); let poly_trait_ref = match self_ty.sty { ty::Dynamic(ref data, ..) => { if data.auto_traits() .any(|did| did == obligation.predicate.def_id()) { debug!( "assemble_candidates_from_object_ty: matched builtin bound, \ pushing candidate" ); candidates.vec.push(BuiltinObjectCandidate); return; } if let Some(principal) = data.principal() { principal.with_self_ty(self.tcx(), self_ty) } else { // Only auto-trait bounds exist. return; } } ty::Infer(ty::TyVar(_)) => { debug!("assemble_candidates_from_object_ty: ambiguous"); candidates.ambiguous = true; // could wind up being an object type return; } _ => return, }; debug!( "assemble_candidates_from_object_ty: poly_trait_ref={:?}", poly_trait_ref ); // Count only those upcast versions that match the trait-ref // we are looking for. Specifically, do not only check for the // correct trait, but also the correct type parameters. // For example, we may be trying to upcast `Foo` to `Bar<i32>`, // but `Foo` is declared as `trait Foo : Bar<u32>`. let upcast_trait_refs = util::supertraits(self.tcx(), poly_trait_ref) .filter(|upcast_trait_ref| { self.infcx.probe(|_| { let upcast_trait_ref = upcast_trait_ref.clone(); self.match_poly_trait_ref(obligation, upcast_trait_ref) .is_ok() }) }) .count(); if upcast_trait_refs > 1 { // Can be upcast in many ways; need more type information. candidates.ambiguous = true; } else if upcast_trait_refs == 1 { candidates.vec.push(ObjectCandidate); } }) } /// Search for unsizing that might apply to `obligation`. fn assemble_candidates_for_unsizing( &mut self, obligation: &TraitObligation<'tcx>, candidates: &mut SelectionCandidateSet<'tcx>, ) { // We currently never consider higher-ranked obligations e.g. // `for<'a> &'a T: Unsize<Trait+'a>` to be implemented. This is not // because they are a priori invalid, and we could potentially add support // for them later, it's just that there isn't really a strong need for it. // A `T: Unsize<U>` obligation is always used as part of a `T: CoerceUnsize<U>` // impl, and those are generally applied to concrete types. // // That said, one might try to write a fn with a where clause like // for<'a> Foo<'a, T>: Unsize<Foo<'a, Trait>> // where the `'a` is kind of orthogonal to the relevant part of the `Unsize`. // Still, you'd be more likely to write that where clause as // T: Trait // so it seems ok if we (conservatively) fail to accept that `Unsize` // obligation above. Should be possible to extend this in the future. let source = match obligation.self_ty().no_bound_vars() { Some(t) => t, None => { // Don't add any candidates if there are bound regions. return; } }; let target = obligation .predicate .skip_binder() .trait_ref .substs .type_at(1); debug!( "assemble_candidates_for_unsizing(source={:?}, target={:?})", source, target ); let may_apply = match (&source.sty, &target.sty) { // Trait+Kx+'a -> Trait+Ky+'b (upcasts). (&ty::Dynamic(ref data_a, ..), &ty::Dynamic(ref data_b, ..)) => { // Upcasts permit two things: // // 1. Dropping builtin bounds, e.g., `Foo+Send` to `Foo` // 2. Tightening the region bound, e.g., `Foo+'a` to `Foo+'b` if `'a : 'b` // // Note that neither of these changes requires any // change at runtime. Eventually this will be // generalized. // // We always upcast when we can because of reason // #2 (region bounds). data_a.principal_def_id() == data_b.principal_def_id() && data_b.auto_traits() // All of a's auto traits need to be in b's auto traits. .all(|b| data_a.auto_traits().any(|a| a == b)) } // T -> Trait. (_, &ty::Dynamic(..)) => true, // Ambiguous handling is below T -> Trait, because inference // variables can still implement Unsize<Trait> and nested // obligations will have the final say (likely deferred). (&ty::Infer(ty::TyVar(_)), _) | (_, &ty::Infer(ty::TyVar(_))) => { debug!("assemble_candidates_for_unsizing: ambiguous"); candidates.ambiguous = true; false } // [T; n] -> [T]. (&ty::Array(..), &ty::Slice(_)) => true, // Struct<T> -> Struct<U>. (&ty::Adt(def_id_a, _), &ty::Adt(def_id_b, _)) if def_id_a.is_struct() => { def_id_a == def_id_b } // (.., T) -> (.., U). (&ty::Tuple(tys_a), &ty::Tuple(tys_b)) => tys_a.len() == tys_b.len(), _ => false, }; if may_apply { candidates.vec.push(BuiltinUnsizeCandidate); } } fn assemble_candidates_for_trait_alias( &mut self, obligation: &TraitObligation<'tcx>, candidates: &mut SelectionCandidateSet<'tcx>, ) -> Result<(), SelectionError<'tcx>> { // Okay to skip binder here because the tests we do below do not involve bound regions. let self_ty = *obligation.self_ty().skip_binder(); debug!("assemble_candidates_for_trait_alias(self_ty={:?})", self_ty); let def_id = obligation.predicate.def_id(); if self.tcx().is_trait_alias(def_id) { candidates.vec.push(TraitAliasCandidate(def_id.clone())); } Ok(()) } /////////////////////////////////////////////////////////////////////////// // WINNOW // // Winnowing is the process of attempting to resolve ambiguity by // probing further. During the winnowing process, we unify all // type variables and then we also attempt to evaluate recursive // bounds to see if they are satisfied. /// Returns `true` if `victim` should be dropped in favor of /// `other`. Generally speaking we will drop duplicate /// candidates and prefer where-clause candidates. /// /// See the comment for "SelectionCandidate" for more details. fn candidate_should_be_dropped_in_favor_of<'o>( &mut self, victim: &EvaluatedCandidate<'tcx>, other: &EvaluatedCandidate<'tcx>, ) -> bool { if victim.candidate == other.candidate { return true; } // Check if a bound would previously have been removed when normalizing // the param_env so that it can be given the lowest priority. See // #50825 for the motivation for this. let is_global = |cand: &ty::PolyTraitRef<'_>| cand.is_global() && !cand.has_late_bound_regions(); match other.candidate { // Prefer BuiltinCandidate { has_nested: false } to anything else. // This is a fix for #53123 and prevents winnowing from accidentally extending the // lifetime of a variable. BuiltinCandidate { has_nested: false } => true, ParamCandidate(ref cand) => match victim.candidate { AutoImplCandidate(..) => { bug!( "default implementations shouldn't be recorded \ when there are other valid candidates" ); } // Prefer BuiltinCandidate { has_nested: false } to anything else. // This is a fix for #53123 and prevents winnowing from accidentally extending the // lifetime of a variable. BuiltinCandidate { has_nested: false } => false, ImplCandidate(..) | ClosureCandidate | GeneratorCandidate | FnPointerCandidate | BuiltinObjectCandidate | BuiltinUnsizeCandidate | BuiltinCandidate { .. } | TraitAliasCandidate(..) => { // Global bounds from the where clause should be ignored // here (see issue #50825). Otherwise, we have a where // clause so don't go around looking for impls. !is_global(cand) } ObjectCandidate | ProjectionCandidate => { // Arbitrarily give param candidates priority // over projection and object candidates. !is_global(cand) } ParamCandidate(..) => false, }, ObjectCandidate | ProjectionCandidate => match victim.candidate { AutoImplCandidate(..) => { bug!( "default implementations shouldn't be recorded \ when there are other valid candidates" ); } // Prefer BuiltinCandidate { has_nested: false } to anything else. // This is a fix for #53123 and prevents winnowing from accidentally extending the // lifetime of a variable. BuiltinCandidate { has_nested: false } => false, ImplCandidate(..) | ClosureCandidate | GeneratorCandidate | FnPointerCandidate | BuiltinObjectCandidate | BuiltinUnsizeCandidate | BuiltinCandidate { .. } | TraitAliasCandidate(..) => true, ObjectCandidate | ProjectionCandidate => { // Arbitrarily give param candidates priority // over projection and object candidates. true } ParamCandidate(ref cand) => is_global(cand), }, ImplCandidate(other_def) => { // See if we can toss out `victim` based on specialization. // This requires us to know *for sure* that the `other` impl applies // i.e., EvaluatedToOk: if other.evaluation.must_apply_modulo_regions() { match victim.candidate { ImplCandidate(victim_def) => { let tcx = self.tcx().global_tcx(); return tcx.specializes((other_def, victim_def)) || tcx.impls_are_allowed_to_overlap( other_def, victim_def).is_some(); } ParamCandidate(ref cand) => { // Prefer the impl to a global where clause candidate. return is_global(cand); } _ => (), } } false } ClosureCandidate | GeneratorCandidate | FnPointerCandidate | BuiltinObjectCandidate | BuiltinUnsizeCandidate | BuiltinCandidate { has_nested: true } => { match victim.candidate { ParamCandidate(ref cand) => { // Prefer these to a global where-clause bound // (see issue #50825) is_global(cand) && other.evaluation.must_apply_modulo_regions() } _ => false, } } _ => false, } } /////////////////////////////////////////////////////////////////////////// // BUILTIN BOUNDS // // These cover the traits that are built-in to the language // itself: `Copy`, `Clone` and `Sized`. fn assemble_builtin_bound_candidates<'o>( &mut self, conditions: BuiltinImplConditions<'tcx>, candidates: &mut SelectionCandidateSet<'tcx>, ) -> Result<(), SelectionError<'tcx>> { match conditions { BuiltinImplConditions::Where(nested) => { debug!("builtin_bound: nested={:?}", nested); candidates.vec.push(BuiltinCandidate { has_nested: nested.skip_binder().len() > 0, }); } BuiltinImplConditions::None => {} BuiltinImplConditions::Ambiguous => { debug!("assemble_builtin_bound_candidates: ambiguous builtin"); candidates.ambiguous = true; } } Ok(()) } fn sized_conditions( &mut self, obligation: &TraitObligation<'tcx>, ) -> BuiltinImplConditions<'tcx> { use self::BuiltinImplConditions::{Ambiguous, None, Where}; // NOTE: binder moved to (*) let self_ty = self.infcx .shallow_resolve(obligation.predicate.skip_binder().self_ty()); match self_ty.sty { ty::Infer(ty::IntVar(_)) | ty::Infer(ty::FloatVar(_)) | ty::Uint(_) | ty::Int(_) | ty::Bool | ty::Float(_) | ty::FnDef(..) | ty::FnPtr(_) | ty::RawPtr(..) | ty::Char | ty::Ref(..) | ty::Generator(..) | ty::GeneratorWitness(..) | ty::Array(..) | ty::Closure(..) | ty::Never | ty::Error => { // safe for everything Where(ty::Binder::dummy(Vec::new())) } ty::Str | ty::Slice(_) | ty::Dynamic(..) | ty::Foreign(..) => None, ty::Tuple(tys) => { Where(ty::Binder::bind(tys.last().into_iter().map(|k| k.expect_ty()).collect())) } ty::Adt(def, substs) => { let sized_crit = def.sized_constraint(self.tcx()); // (*) binder moved here Where(ty::Binder::bind( sized_crit .iter() .map(|ty| ty.subst(self.tcx(), substs)) .collect(), )) } ty::Projection(_) | ty::Param(_) | ty::Opaque(..) => None, ty::Infer(ty::TyVar(_)) => Ambiguous, ty::UnnormalizedProjection(..) | ty::Placeholder(..) | ty::Bound(..) | ty::Infer(ty::FreshTy(_)) | ty::Infer(ty::FreshIntTy(_)) | ty::Infer(ty::FreshFloatTy(_)) => { bug!( "asked to assemble builtin bounds of unexpected type: {:?}", self_ty ); } } } fn copy_clone_conditions( &mut self, obligation: &TraitObligation<'tcx>, ) -> BuiltinImplConditions<'tcx> { // NOTE: binder moved to (*) let self_ty = self.infcx .shallow_resolve(obligation.predicate.skip_binder().self_ty()); use self::BuiltinImplConditions::{Ambiguous, None, Where}; match self_ty.sty { ty::Infer(ty::IntVar(_)) | ty::Infer(ty::FloatVar(_)) | ty::FnDef(..) | ty::FnPtr(_) | ty::Error => Where(ty::Binder::dummy(Vec::new())), ty::Uint(_) | ty::Int(_) | ty::Bool | ty::Float(_) | ty::Char | ty::RawPtr(..) | ty::Never | ty::Ref(_, _, hir::MutImmutable) => { // Implementations provided in libcore None } ty::Dynamic(..) | ty::Str | ty::Slice(..) | ty::Generator(..) | ty::GeneratorWitness(..) | ty::Foreign(..) | ty::Ref(_, _, hir::MutMutable) => None, ty::Array(element_ty, _) => { // (*) binder moved here Where(ty::Binder::bind(vec![element_ty])) } ty::Tuple(tys) => { // (*) binder moved here Where(ty::Binder::bind(tys.iter().map(|k| k.expect_ty()).collect())) } ty::Closure(def_id, substs) => { // (*) binder moved here Where(ty::Binder::bind( substs.upvar_tys(def_id, self.tcx()).collect(), )) } ty::Adt(..) | ty::Projection(..) | ty::Param(..) | ty::Opaque(..) => { // Fallback to whatever user-defined impls exist in this case. None } ty::Infer(ty::TyVar(_)) => { // Unbound type variable. Might or might not have // applicable impls and so forth, depending on what // those type variables wind up being bound to. Ambiguous } ty::UnnormalizedProjection(..) | ty::Placeholder(..) | ty::Bound(..) | ty::Infer(ty::FreshTy(_)) | ty::Infer(ty::FreshIntTy(_)) | ty::Infer(ty::FreshFloatTy(_)) => { bug!( "asked to assemble builtin bounds of unexpected type: {:?}", self_ty ); } } } /// For default impls, we need to break apart a type into its /// "constituent types" -- meaning, the types that it contains. /// /// Here are some (simple) examples: /// /// ``` /// (i32, u32) -> [i32, u32] /// Foo where struct Foo { x: i32, y: u32 } -> [i32, u32] /// Bar<i32> where struct Bar<T> { x: T, y: u32 } -> [i32, u32] /// Zed<i32> where enum Zed { A(T), B(u32) } -> [i32, u32] /// ``` fn constituent_types_for_ty(&self, t: Ty<'tcx>) -> Vec<Ty<'tcx>> { match t.sty { ty::Uint(_) | ty::Int(_) | ty::Bool | ty::Float(_) | ty::FnDef(..) | ty::FnPtr(_) | ty::Str | ty::Error | ty::Infer(ty::IntVar(_)) | ty::Infer(ty::FloatVar(_)) | ty::Never | ty::Char => Vec::new(), ty::UnnormalizedProjection(..) | ty::Placeholder(..) | ty::Dynamic(..) | ty::Param(..) | ty::Foreign(..) | ty::Projection(..) | ty::Bound(..) | ty::Infer(ty::TyVar(_)) | ty::Infer(ty::FreshTy(_)) | ty::Infer(ty::FreshIntTy(_)) | ty::Infer(ty::FreshFloatTy(_)) => { bug!( "asked to assemble constituent types of unexpected type: {:?}", t ); } ty::RawPtr(ty::TypeAndMut { ty: element_ty, .. }) | ty::Ref(_, element_ty, _) => { vec![element_ty] } ty::Array(element_ty, _) | ty::Slice(element_ty) => vec![element_ty], ty::Tuple(ref tys) => { // (T1, ..., Tn) -- meets any bound that all of T1...Tn meet tys.iter().map(|k| k.expect_ty()).collect() } ty::Closure(def_id, ref substs) => substs.upvar_tys(def_id, self.tcx()).collect(), ty::Generator(def_id, ref substs, _) => { let witness = substs.witness(def_id, self.tcx()); substs .upvar_tys(def_id, self.tcx()) .chain(iter::once(witness)) .collect() } ty::GeneratorWitness(types) => { // This is sound because no regions in the witness can refer to // the binder outside the witness. So we'll effectivly reuse // the implicit binder around the witness. types.skip_binder().to_vec() } // for `PhantomData<T>`, we pass `T` ty::Adt(def, substs) if def.is_phantom_data() => substs.types().collect(), ty::Adt(def, substs) => def.all_fields().map(|f| f.ty(self.tcx(), substs)).collect(), ty::Opaque(def_id, substs) => { // We can resolve the `impl Trait` to its concrete type, // which enforces a DAG between the functions requiring // the auto trait bounds in question. vec![self.tcx().type_of(def_id).subst(self.tcx(), substs)] } } } fn collect_predicates_for_types( &mut self, param_env: ty::ParamEnv<'tcx>, cause: ObligationCause<'tcx>, recursion_depth: usize, trait_def_id: DefId, types: ty::Binder<Vec<Ty<'tcx>>>, ) -> Vec<PredicateObligation<'tcx>> { // Because the types were potentially derived from // higher-ranked obligations they may reference late-bound // regions. For example, `for<'a> Foo<&'a int> : Copy` would // yield a type like `for<'a> &'a int`. In general, we // maintain the invariant that we never manipulate bound // regions, so we have to process these bound regions somehow. // // The strategy is to: // // 1. Instantiate those regions to placeholder regions (e.g., // `for<'a> &'a int` becomes `&0 int`. // 2. Produce something like `&'0 int : Copy` // 3. Re-bind the regions back to `for<'a> &'a int : Copy` types .skip_binder() .into_iter() .flat_map(|ty| { // binder moved -\ let ty: ty::Binder<Ty<'tcx>> = ty::Binder::bind(ty); // <----/ self.infcx.in_snapshot(|_| { let (skol_ty, _) = self.infcx .replace_bound_vars_with_placeholders(&ty); let Normalized { value: normalized_ty, mut obligations, } = project::normalize_with_depth( self, param_env, cause.clone(), recursion_depth, &skol_ty, ); let skol_obligation = self.tcx().predicate_for_trait_def( param_env, cause.clone(), trait_def_id, recursion_depth, normalized_ty, &[], ); obligations.push(skol_obligation); obligations }) }) .collect() } /////////////////////////////////////////////////////////////////////////// // CONFIRMATION // // Confirmation unifies the output type parameters of the trait // with the values found in the obligation, possibly yielding a // type error. See the [rustc guide] for more details. // // [rustc guide]: // https://rust-lang.github.io/rustc-guide/traits/resolution.html#confirmation fn confirm_candidate( &mut self, obligation: &TraitObligation<'tcx>, candidate: SelectionCandidate<'tcx>, ) -> Result<Selection<'tcx>, SelectionError<'tcx>> { debug!("confirm_candidate({:?}, {:?})", obligation, candidate); match candidate { BuiltinCandidate { has_nested } => { let data = self.confirm_builtin_candidate(obligation, has_nested); Ok(VtableBuiltin(data)) } ParamCandidate(param) => { let obligations = self.confirm_param_candidate(obligation, param); Ok(VtableParam(obligations)) } ImplCandidate(impl_def_id) => Ok(VtableImpl(self.confirm_impl_candidate( obligation, impl_def_id, ))), AutoImplCandidate(trait_def_id) => { let data = self.confirm_auto_impl_candidate(obligation, trait_def_id); Ok(VtableAutoImpl(data)) } ProjectionCandidate => { self.confirm_projection_candidate(obligation); Ok(VtableParam(Vec::new())) } ClosureCandidate => { let vtable_closure = self.confirm_closure_candidate(obligation)?; Ok(VtableClosure(vtable_closure)) } GeneratorCandidate => { let vtable_generator = self.confirm_generator_candidate(obligation)?; Ok(VtableGenerator(vtable_generator)) } FnPointerCandidate => { let data = self.confirm_fn_pointer_candidate(obligation)?; Ok(VtableFnPointer(data)) } TraitAliasCandidate(alias_def_id) => { let data = self.confirm_trait_alias_candidate(obligation, alias_def_id); Ok(VtableTraitAlias(data)) } ObjectCandidate => { let data = self.confirm_object_candidate(obligation); Ok(VtableObject(data)) } BuiltinObjectCandidate => { // This indicates something like `(Trait+Send) : // Send`. In this case, we know that this holds // because that's what the object type is telling us, // and there's really no additional obligations to // prove and no types in particular to unify etc. Ok(VtableParam(Vec::new())) } BuiltinUnsizeCandidate => { let data = self.confirm_builtin_unsize_candidate(obligation)?; Ok(VtableBuiltin(data)) } } } fn confirm_projection_candidate(&mut self, obligation: &TraitObligation<'tcx>) { self.infcx.in_snapshot(|snapshot| { let result = self.match_projection_obligation_against_definition_bounds( obligation, snapshot, ); assert!(result); }) } fn confirm_param_candidate( &mut self, obligation: &TraitObligation<'tcx>, param: ty::PolyTraitRef<'tcx>, ) -> Vec<PredicateObligation<'tcx>> { debug!("confirm_param_candidate({:?},{:?})", obligation, param); // During evaluation, we already checked that this // where-clause trait-ref could be unified with the obligation // trait-ref. Repeat that unification now without any // transactional boundary; it should not fail. match self.match_where_clause_trait_ref(obligation, param.clone()) { Ok(obligations) => obligations, Err(()) => { bug!( "Where clause `{:?}` was applicable to `{:?}` but now is not", param, obligation ); } } } fn confirm_builtin_candidate( &mut self, obligation: &TraitObligation<'tcx>, has_nested: bool, ) -> VtableBuiltinData<PredicateObligation<'tcx>> { debug!( "confirm_builtin_candidate({:?}, {:?})", obligation, has_nested ); let lang_items = self.tcx().lang_items(); let obligations = if has_nested { let trait_def = obligation.predicate.def_id(); let conditions = if Some(trait_def) == lang_items.sized_trait() { self.sized_conditions(obligation) } else if Some(trait_def) == lang_items.copy_trait() { self.copy_clone_conditions(obligation) } else if Some(trait_def) == lang_items.clone_trait() { self.copy_clone_conditions(obligation) } else { bug!("unexpected builtin trait {:?}", trait_def) }; let nested = match conditions { BuiltinImplConditions::Where(nested) => nested, _ => bug!( "obligation {:?} had matched a builtin impl but now doesn't", obligation ), }; let cause = obligation.derived_cause(BuiltinDerivedObligation); self.collect_predicates_for_types( obligation.param_env, cause, obligation.recursion_depth + 1, trait_def, nested, ) } else { vec![] }; debug!("confirm_builtin_candidate: obligations={:?}", obligations); VtableBuiltinData { nested: obligations, } } /// This handles the case where a `auto trait Foo` impl is being used. /// The idea is that the impl applies to `X : Foo` if the following conditions are met: /// /// 1. For each constituent type `Y` in `X`, `Y : Foo` holds /// 2. For each where-clause `C` declared on `Foo`, `[Self => X] C` holds. fn confirm_auto_impl_candidate( &mut self, obligation: &TraitObligation<'tcx>, trait_def_id: DefId, ) -> VtableAutoImplData<PredicateObligation<'tcx>> { debug!( "confirm_auto_impl_candidate({:?}, {:?})", obligation, trait_def_id ); let types = obligation.predicate.map_bound(|inner| { let self_ty = self.infcx.shallow_resolve(inner.self_ty()); self.constituent_types_for_ty(self_ty) }); self.vtable_auto_impl(obligation, trait_def_id, types) } /// See `confirm_auto_impl_candidate`. fn vtable_auto_impl( &mut self, obligation: &TraitObligation<'tcx>, trait_def_id: DefId, nested: ty::Binder<Vec<Ty<'tcx>>>, ) -> VtableAutoImplData<PredicateObligation<'tcx>> { debug!("vtable_auto_impl: nested={:?}", nested); let cause = obligation.derived_cause(BuiltinDerivedObligation); let mut obligations = self.collect_predicates_for_types( obligation.param_env, cause, obligation.recursion_depth + 1, trait_def_id, nested, ); let trait_obligations: Vec<PredicateObligation<'_>> = self.infcx.in_snapshot(|_| { let poly_trait_ref = obligation.predicate.to_poly_trait_ref(); let (trait_ref, _) = self.infcx .replace_bound_vars_with_placeholders(&poly_trait_ref); let cause = obligation.derived_cause(ImplDerivedObligation); self.impl_or_trait_obligations( cause, obligation.recursion_depth + 1, obligation.param_env, trait_def_id, &trait_ref.substs, ) }); // Adds the predicates from the trait. Note that this contains a `Self: Trait` // predicate as usual. It won't have any effect since auto traits are coinductive. obligations.extend(trait_obligations); debug!("vtable_auto_impl: obligations={:?}", obligations); VtableAutoImplData { trait_def_id, nested: obligations, } } fn confirm_impl_candidate( &mut self, obligation: &TraitObligation<'tcx>, impl_def_id: DefId, ) -> VtableImplData<'tcx, PredicateObligation<'tcx>> { debug!("confirm_impl_candidate({:?},{:?})", obligation, impl_def_id); // First, create the substitutions by matching the impl again, // this time not in a probe. self.infcx.in_snapshot(|snapshot| { let substs = self.rematch_impl(impl_def_id, obligation, snapshot); debug!("confirm_impl_candidate: substs={:?}", substs); let cause = obligation.derived_cause(ImplDerivedObligation); self.vtable_impl( impl_def_id, substs, cause, obligation.recursion_depth + 1, obligation.param_env, ) }) } fn vtable_impl( &mut self, impl_def_id: DefId, mut substs: Normalized<'tcx, SubstsRef<'tcx>>, cause: ObligationCause<'tcx>, recursion_depth: usize, param_env: ty::ParamEnv<'tcx>, ) -> VtableImplData<'tcx, PredicateObligation<'tcx>> { debug!( "vtable_impl(impl_def_id={:?}, substs={:?}, recursion_depth={})", impl_def_id, substs, recursion_depth, ); let mut impl_obligations = self.impl_or_trait_obligations( cause, recursion_depth, param_env, impl_def_id, &substs.value, ); debug!( "vtable_impl: impl_def_id={:?} impl_obligations={:?}", impl_def_id, impl_obligations ); // Because of RFC447, the impl-trait-ref and obligations // are sufficient to determine the impl substs, without // relying on projections in the impl-trait-ref. // // e.g., `impl<U: Tr, V: Iterator<Item=U>> Foo<<U as Tr>::T> for V` impl_obligations.append(&mut substs.obligations); VtableImplData { impl_def_id, substs: substs.value, nested: impl_obligations, } } fn confirm_object_candidate( &mut self, obligation: &TraitObligation<'tcx>, ) -> VtableObjectData<'tcx, PredicateObligation<'tcx>> { debug!("confirm_object_candidate({:?})", obligation); // FIXME(nmatsakis) skipping binder here seems wrong -- we should // probably flatten the binder from the obligation and the binder // from the object. Have to try to make a broken test case that // results. let self_ty = self.infcx .shallow_resolve(*obligation.self_ty().skip_binder()); let poly_trait_ref = match self_ty.sty { ty::Dynamic(ref data, ..) => data.principal().unwrap_or_else(|| { span_bug!(obligation.cause.span, "object candidate with no principal") }).with_self_ty(self.tcx(), self_ty), _ => span_bug!(obligation.cause.span, "object candidate with non-object"), }; let mut upcast_trait_ref = None; let mut nested = vec![]; let vtable_base; { let tcx = self.tcx(); // We want to find the first supertrait in the list of // supertraits that we can unify with, and do that // unification. We know that there is exactly one in the list // where we can unify because otherwise select would have // reported an ambiguity. (When we do find a match, also // record it for later.) let nonmatching = util::supertraits(tcx, poly_trait_ref).take_while( |&t| match self.infcx.commit_if_ok(|_| self.match_poly_trait_ref(obligation, t)) { Ok(obligations) => { upcast_trait_ref = Some(t); nested.extend(obligations); false } Err(_) => true, }, ); // Additionally, for each of the nonmatching predicates that // we pass over, we sum up the set of number of vtable // entries, so that we can compute the offset for the selected // trait. vtable_base = nonmatching.map(|t| tcx.count_own_vtable_entries(t)).sum(); } VtableObjectData { upcast_trait_ref: upcast_trait_ref.unwrap(), vtable_base, nested, } } fn confirm_fn_pointer_candidate( &mut self, obligation: &TraitObligation<'tcx>, ) -> Result<VtableFnPointerData<'tcx, PredicateObligation<'tcx>>, SelectionError<'tcx>> { debug!("confirm_fn_pointer_candidate({:?})", obligation); // Okay to skip binder; it is reintroduced below. let self_ty = self.infcx .shallow_resolve(*obligation.self_ty().skip_binder()); let sig = self_ty.fn_sig(self.tcx()); let trait_ref = self.tcx() .closure_trait_ref_and_return_type( obligation.predicate.def_id(), self_ty, sig, util::TupleArgumentsFlag::Yes, ) .map_bound(|(trait_ref, _)| trait_ref); let Normalized { value: trait_ref, obligations, } = project::normalize_with_depth( self, obligation.param_env, obligation.cause.clone(), obligation.recursion_depth + 1, &trait_ref, ); self.confirm_poly_trait_refs( obligation.cause.clone(), obligation.param_env, obligation.predicate.to_poly_trait_ref(), trait_ref, )?; Ok(VtableFnPointerData { fn_ty: self_ty, nested: obligations, }) } fn confirm_trait_alias_candidate( &mut self, obligation: &TraitObligation<'tcx>, alias_def_id: DefId, ) -> VtableTraitAliasData<'tcx, PredicateObligation<'tcx>> { debug!( "confirm_trait_alias_candidate({:?}, {:?})", obligation, alias_def_id ); self.infcx.in_snapshot(|_| { let (predicate, _) = self.infcx() .replace_bound_vars_with_placeholders(&obligation.predicate); let trait_ref = predicate.trait_ref; let trait_def_id = trait_ref.def_id; let substs = trait_ref.substs; let trait_obligations = self.impl_or_trait_obligations( obligation.cause.clone(), obligation.recursion_depth, obligation.param_env, trait_def_id, &substs, ); debug!( "confirm_trait_alias_candidate: trait_def_id={:?} trait_obligations={:?}", trait_def_id, trait_obligations ); VtableTraitAliasData { alias_def_id, substs: substs, nested: trait_obligations, } }) } fn confirm_generator_candidate( &mut self, obligation: &TraitObligation<'tcx>, ) -> Result<VtableGeneratorData<'tcx, PredicateObligation<'tcx>>, SelectionError<'tcx>> { // Okay to skip binder because the substs on generator types never // touch bound regions, they just capture the in-scope // type/region parameters. let self_ty = self.infcx.shallow_resolve(*obligation.self_ty().skip_binder()); let (generator_def_id, substs) = match self_ty.sty { ty::Generator(id, substs, _) => (id, substs), _ => bug!("closure candidate for non-closure {:?}", obligation), }; debug!( "confirm_generator_candidate({:?},{:?},{:?})", obligation, generator_def_id, substs ); let trait_ref = self.generator_trait_ref_unnormalized(obligation, generator_def_id, substs); let Normalized { value: trait_ref, mut obligations, } = normalize_with_depth( self, obligation.param_env, obligation.cause.clone(), obligation.recursion_depth + 1, &trait_ref, ); debug!( "confirm_generator_candidate(generator_def_id={:?}, \ trait_ref={:?}, obligations={:?})", generator_def_id, trait_ref, obligations ); obligations.extend(self.confirm_poly_trait_refs( obligation.cause.clone(), obligation.param_env, obligation.predicate.to_poly_trait_ref(), trait_ref, )?); Ok(VtableGeneratorData { generator_def_id: generator_def_id, substs: substs.clone(), nested: obligations, }) } fn confirm_closure_candidate( &mut self, obligation: &TraitObligation<'tcx>, ) -> Result<VtableClosureData<'tcx, PredicateObligation<'tcx>>, SelectionError<'tcx>> { debug!("confirm_closure_candidate({:?})", obligation); let kind = self.tcx() .lang_items() .fn_trait_kind(obligation.predicate.def_id()) .unwrap_or_else(|| bug!("closure candidate for non-fn trait {:?}", obligation)); // Okay to skip binder because the substs on closure types never // touch bound regions, they just capture the in-scope // type/region parameters. let self_ty = self.infcx.shallow_resolve(*obligation.self_ty().skip_binder()); let (closure_def_id, substs) = match self_ty.sty { ty::Closure(id, substs) => (id, substs), _ => bug!("closure candidate for non-closure {:?}", obligation), }; let trait_ref = self.closure_trait_ref_unnormalized(obligation, closure_def_id, substs); let Normalized { value: trait_ref, mut obligations, } = normalize_with_depth( self, obligation.param_env, obligation.cause.clone(), obligation.recursion_depth + 1, &trait_ref, ); debug!( "confirm_closure_candidate(closure_def_id={:?}, trait_ref={:?}, obligations={:?})", closure_def_id, trait_ref, obligations ); obligations.extend(self.confirm_poly_trait_refs( obligation.cause.clone(), obligation.param_env, obligation.predicate.to_poly_trait_ref(), trait_ref, )?); // FIXME: chalk if !self.tcx().sess.opts.debugging_opts.chalk { obligations.push(Obligation::new( obligation.cause.clone(), obligation.param_env, ty::Predicate::ClosureKind(closure_def_id, substs, kind), )); } Ok(VtableClosureData { closure_def_id, substs: substs.clone(), nested: obligations, }) } /// In the case of closure types and fn pointers, /// we currently treat the input type parameters on the trait as /// outputs. This means that when we have a match we have only /// considered the self type, so we have to go back and make sure /// to relate the argument types too. This is kind of wrong, but /// since we control the full set of impls, also not that wrong, /// and it DOES yield better error messages (since we don't report /// errors as if there is no applicable impl, but rather report /// errors are about mismatched argument types. /// /// Here is an example. Imagine we have a closure expression /// and we desugared it so that the type of the expression is /// `Closure`, and `Closure` expects an int as argument. Then it /// is "as if" the compiler generated this impl: /// /// impl Fn(int) for Closure { ... } /// /// Now imagine our obligation is `Fn(usize) for Closure`. So far /// we have matched the self type `Closure`. At this point we'll /// compare the `int` to `usize` and generate an error. /// /// Note that this checking occurs *after* the impl has selected, /// because these output type parameters should not affect the /// selection of the impl. Therefore, if there is a mismatch, we /// report an error to the user. fn confirm_poly_trait_refs( &mut self, obligation_cause: ObligationCause<'tcx>, obligation_param_env: ty::ParamEnv<'tcx>, obligation_trait_ref: ty::PolyTraitRef<'tcx>, expected_trait_ref: ty::PolyTraitRef<'tcx>, ) -> Result<Vec<PredicateObligation<'tcx>>, SelectionError<'tcx>> { let obligation_trait_ref = obligation_trait_ref.clone(); self.infcx .at(&obligation_cause, obligation_param_env) .sup(obligation_trait_ref, expected_trait_ref) .map(|InferOk { obligations, .. }| obligations) .map_err(|e| OutputTypeParameterMismatch(expected_trait_ref, obligation_trait_ref, e)) } fn confirm_builtin_unsize_candidate( &mut self, obligation: &TraitObligation<'tcx>, ) -> Result<VtableBuiltinData<PredicateObligation<'tcx>>, SelectionError<'tcx>> { let tcx = self.tcx(); // assemble_candidates_for_unsizing should ensure there are no late bound // regions here. See the comment there for more details. let source = self.infcx .shallow_resolve(obligation.self_ty().no_bound_vars().unwrap()); let target = obligation .predicate .skip_binder() .trait_ref .substs .type_at(1); let target = self.infcx.shallow_resolve(target); debug!( "confirm_builtin_unsize_candidate(source={:?}, target={:?})", source, target ); let mut nested = vec![]; match (&source.sty, &target.sty) { // Trait+Kx+'a -> Trait+Ky+'b (upcasts). (&ty::Dynamic(ref data_a, r_a), &ty::Dynamic(ref data_b, r_b)) => { // See assemble_candidates_for_unsizing for more info. let existential_predicates = data_a.map_bound(|data_a| { let iter = data_a.principal().map(|x| ty::ExistentialPredicate::Trait(x)) .into_iter().chain( data_a .projection_bounds() .map(|x| ty::ExistentialPredicate::Projection(x)), ) .chain( data_b .auto_traits() .map(ty::ExistentialPredicate::AutoTrait), ); tcx.mk_existential_predicates(iter) }); let source_trait = tcx.mk_dynamic(existential_predicates, r_b); // Require that the traits involved in this upcast are **equal**; // only the **lifetime bound** is changed. // // FIXME: This condition is arguably too strong -- it // would suffice for the source trait to be a // *subtype* of the target trait. In particular // changing from something like `for<'a, 'b> Foo<'a, // 'b>` to `for<'a> Foo<'a, 'a>` should be // permitted. And, indeed, in the in commit // 904a0bde93f0348f69914ee90b1f8b6e4e0d7cbc, this // condition was loosened. However, when the leak check was added // back, using subtype here actually guies the coercion code in // such a way that it accepts `old-lub-glb-object.rs`. This is probably // a good thing, but I've modified this to `.eq` because I want // to continue rejecting that test (as we have done for quite some time) // before we are firmly comfortable with what our behavior // should be there. -nikomatsakis let InferOk { obligations, .. } = self.infcx .at(&obligation.cause, obligation.param_env) .eq(target, source_trait) // FIXME -- see below .map_err(|_| Unimplemented)?; nested.extend(obligations); // Register one obligation for 'a: 'b. let cause = ObligationCause::new( obligation.cause.span, obligation.cause.body_id, ObjectCastObligation(target), ); let outlives = ty::OutlivesPredicate(r_a, r_b); nested.push(Obligation::with_depth( cause, obligation.recursion_depth + 1, obligation.param_env, ty::Binder::bind(outlives).to_predicate(), )); } // T -> Trait. (_, &ty::Dynamic(ref data, r)) => { let mut object_dids = data.auto_traits() .chain(data.principal_def_id()); if let Some(did) = object_dids.find(|did| !tcx.is_object_safe(*did)) { return Err(TraitNotObjectSafe(did)); } let cause = ObligationCause::new( obligation.cause.span, obligation.cause.body_id, ObjectCastObligation(target), ); let predicate_to_obligation = |predicate| { Obligation::with_depth( cause.clone(), obligation.recursion_depth + 1, obligation.param_env, predicate, ) }; // Create obligations: // - Casting T to Trait // - For all the various builtin bounds attached to the object cast. (In other // words, if the object type is Foo+Send, this would create an obligation for the // Send check.) // - Projection predicates nested.extend( data.iter() .map(|d| predicate_to_obligation(d.with_self_ty(tcx, source))), ); // We can only make objects from sized types. let tr = ty::TraitRef { def_id: tcx.require_lang_item(lang_items::SizedTraitLangItem), substs: tcx.mk_substs_trait(source, &[]), }; nested.push(predicate_to_obligation(tr.to_predicate())); // If the type is `Foo+'a`, ensures that the type // being cast to `Foo+'a` outlives `'a`: let outlives = ty::OutlivesPredicate(source, r); nested.push(predicate_to_obligation( ty::Binder::dummy(outlives).to_predicate(), )); } // [T; n] -> [T]. (&ty::Array(a, _), &ty::Slice(b)) => { let InferOk { obligations, .. } = self.infcx .at(&obligation.cause, obligation.param_env) .eq(b, a) .map_err(|_| Unimplemented)?; nested.extend(obligations); } // Struct<T> -> Struct<U>. (&ty::Adt(def, substs_a), &ty::Adt(_, substs_b)) => { let fields = def.all_fields() .map(|f| tcx.type_of(f.did)) .collect::<Vec<_>>(); // The last field of the structure has to exist and contain type parameters. let field = if let Some(&field) = fields.last() { field } else { return Err(Unimplemented); }; let mut ty_params = GrowableBitSet::new_empty(); let mut found = false; for ty in field.walk() { if let ty::Param(p) = ty.sty { ty_params.insert(p.index as usize); found = true; } } if !found { return Err(Unimplemented); } // Replace type parameters used in unsizing with // Error and ensure they do not affect any other fields. // This could be checked after type collection for any struct // with a potentially unsized trailing field. let params = substs_a.iter().enumerate().map(|(i, &k)| { if ty_params.contains(i) { tcx.types.err.into() } else { k } }); let substs = tcx.mk_substs(params); for &ty in fields.split_last().unwrap().1 { if ty.subst(tcx, substs).references_error() { return Err(Unimplemented); } } // Extract Field<T> and Field<U> from Struct<T> and Struct<U>. let inner_source = field.subst(tcx, substs_a); let inner_target = field.subst(tcx, substs_b); // Check that the source struct with the target's // unsized parameters is equal to the target. let params = substs_a.iter().enumerate().map(|(i, &k)| { if ty_params.contains(i) { substs_b.type_at(i).into() } else { k } }); let new_struct = tcx.mk_adt(def, tcx.mk_substs(params)); let InferOk { obligations, .. } = self.infcx .at(&obligation.cause, obligation.param_env) .eq(target, new_struct) .map_err(|_| Unimplemented)?; nested.extend(obligations); // Construct the nested Field<T>: Unsize<Field<U>> predicate. nested.push(tcx.predicate_for_trait_def( obligation.param_env, obligation.cause.clone(), obligation.predicate.def_id(), obligation.recursion_depth + 1, inner_source, &[inner_target.into()], )); } // (.., T) -> (.., U). (&ty::Tuple(tys_a), &ty::Tuple(tys_b)) => { assert_eq!(tys_a.len(), tys_b.len()); // The last field of the tuple has to exist. let (&a_last, a_mid) = if let Some(x) = tys_a.split_last() { x } else { return Err(Unimplemented); }; let &b_last = tys_b.last().unwrap(); // Check that the source tuple with the target's // last element is equal to the target. let new_tuple = tcx.mk_tup( a_mid.iter().map(|k| k.expect_ty()).chain(iter::once(b_last.expect_ty())), ); let InferOk { obligations, .. } = self.infcx .at(&obligation.cause, obligation.param_env) .eq(target, new_tuple) .map_err(|_| Unimplemented)?; nested.extend(obligations); // Construct the nested T: Unsize<U> predicate. nested.push(tcx.predicate_for_trait_def( obligation.param_env, obligation.cause.clone(), obligation.predicate.def_id(), obligation.recursion_depth + 1, a_last.expect_ty(), &[b_last.into()], )); } _ => bug!(), }; Ok(VtableBuiltinData { nested }) } /////////////////////////////////////////////////////////////////////////// // Matching // // Matching is a common path used for both evaluation and // confirmation. It basically unifies types that appear in impls // and traits. This does affect the surrounding environment; // therefore, when used during evaluation, match routines must be // run inside of a `probe()` so that their side-effects are // contained. fn rematch_impl( &mut self, impl_def_id: DefId, obligation: &TraitObligation<'tcx>, snapshot: &CombinedSnapshot<'_, 'tcx>, ) -> Normalized<'tcx, SubstsRef<'tcx>> { match self.match_impl(impl_def_id, obligation, snapshot) { Ok(substs) => substs, Err(()) => { bug!( "Impl {:?} was matchable against {:?} but now is not", impl_def_id, obligation ); } } } fn match_impl( &mut self, impl_def_id: DefId, obligation: &TraitObligation<'tcx>, snapshot: &CombinedSnapshot<'_, 'tcx>, ) -> Result<Normalized<'tcx, SubstsRef<'tcx>>, ()> { let impl_trait_ref = self.tcx().impl_trait_ref(impl_def_id).unwrap(); // Before we create the substitutions and everything, first // consider a "quick reject". This avoids creating more types // and so forth that we need to. if self.fast_reject_trait_refs(obligation, &impl_trait_ref) { return Err(()); } let (skol_obligation, placeholder_map) = self.infcx() .replace_bound_vars_with_placeholders(&obligation.predicate); let skol_obligation_trait_ref = skol_obligation.trait_ref; let impl_substs = self.infcx .fresh_substs_for_item(obligation.cause.span, impl_def_id); let impl_trait_ref = impl_trait_ref.subst(self.tcx(), impl_substs); let Normalized { value: impl_trait_ref, obligations: mut nested_obligations, } = project::normalize_with_depth( self, obligation.param_env, obligation.cause.clone(), obligation.recursion_depth + 1, &impl_trait_ref, ); debug!( "match_impl(impl_def_id={:?}, obligation={:?}, \ impl_trait_ref={:?}, skol_obligation_trait_ref={:?})", impl_def_id, obligation, impl_trait_ref, skol_obligation_trait_ref ); let InferOk { obligations, .. } = self.infcx .at(&obligation.cause, obligation.param_env) .eq(skol_obligation_trait_ref, impl_trait_ref) .map_err(|e| debug!("match_impl: failed eq_trait_refs due to `{}`", e))?; nested_obligations.extend(obligations); if let Err(e) = self.infcx.leak_check(false, &placeholder_map, snapshot) { debug!("match_impl: failed leak check due to `{}`", e); return Err(()); } debug!("match_impl: success impl_substs={:?}", impl_substs); Ok(Normalized { value: impl_substs, obligations: nested_obligations, }) } fn fast_reject_trait_refs( &mut self, obligation: &TraitObligation<'_>, impl_trait_ref: &ty::TraitRef<'_>, ) -> bool { // We can avoid creating type variables and doing the full // substitution if we find that any of the input types, when // simplified, do not match. obligation .predicate .skip_binder() .input_types() .zip(impl_trait_ref.input_types()) .any(|(obligation_ty, impl_ty)| { let simplified_obligation_ty = fast_reject::simplify_type(self.tcx(), obligation_ty, true); let simplified_impl_ty = fast_reject::simplify_type(self.tcx(), impl_ty, false); simplified_obligation_ty.is_some() && simplified_impl_ty.is_some() && simplified_obligation_ty != simplified_impl_ty }) } /// Normalize `where_clause_trait_ref` and try to match it against /// `obligation`. If successful, return any predicates that /// result from the normalization. Normalization is necessary /// because where-clauses are stored in the parameter environment /// unnormalized. fn match_where_clause_trait_ref( &mut self, obligation: &TraitObligation<'tcx>, where_clause_trait_ref: ty::PolyTraitRef<'tcx>, ) -> Result<Vec<PredicateObligation<'tcx>>, ()> { self.match_poly_trait_ref(obligation, where_clause_trait_ref) } /// Returns `Ok` if `poly_trait_ref` being true implies that the /// obligation is satisfied. fn match_poly_trait_ref( &mut self, obligation: &TraitObligation<'tcx>, poly_trait_ref: ty::PolyTraitRef<'tcx>, ) -> Result<Vec<PredicateObligation<'tcx>>, ()> { debug!( "match_poly_trait_ref: obligation={:?} poly_trait_ref={:?}", obligation, poly_trait_ref ); self.infcx .at(&obligation.cause, obligation.param_env) .sup(obligation.predicate.to_poly_trait_ref(), poly_trait_ref) .map(|InferOk { obligations, .. }| obligations) .map_err(|_| ()) } /////////////////////////////////////////////////////////////////////////// // Miscellany fn match_fresh_trait_refs( &self, previous: &ty::PolyTraitRef<'tcx>, current: &ty::PolyTraitRef<'tcx>, ) -> bool { let mut matcher = ty::_match::Match::new(self.tcx()); matcher.relate(previous, current).is_ok() } fn push_stack<'o, 's: 'o>( &mut self, previous_stack: TraitObligationStackList<'s, 'tcx>, obligation: &'o TraitObligation<'tcx>, ) -> TraitObligationStack<'o, 'tcx> { let fresh_trait_ref = obligation .predicate .to_poly_trait_ref() .fold_with(&mut self.freshener); let depth = previous_stack.depth() + 1; TraitObligationStack { obligation, fresh_trait_ref, reached_depth: Cell::new(depth), previous: previous_stack, depth, } } fn closure_trait_ref_unnormalized( &mut self, obligation: &TraitObligation<'tcx>, closure_def_id: DefId, substs: ty::ClosureSubsts<'tcx>, ) -> ty::PolyTraitRef<'tcx> { debug!( "closure_trait_ref_unnormalized(obligation={:?}, closure_def_id={:?}, substs={:?})", obligation, closure_def_id, substs, ); let closure_type = self.infcx.closure_sig(closure_def_id, substs); debug!( "closure_trait_ref_unnormalized: closure_type = {:?}", closure_type ); // (1) Feels icky to skip the binder here, but OTOH we know // that the self-type is an unboxed closure type and hence is // in fact unparameterized (or at least does not reference any // regions bound in the obligation). Still probably some // refactoring could make this nicer. self.tcx() .closure_trait_ref_and_return_type( obligation.predicate.def_id(), obligation.predicate.skip_binder().self_ty(), // (1) closure_type, util::TupleArgumentsFlag::No, ) .map_bound(|(trait_ref, _)| trait_ref) } fn generator_trait_ref_unnormalized( &mut self, obligation: &TraitObligation<'tcx>, closure_def_id: DefId, substs: ty::GeneratorSubsts<'tcx>, ) -> ty::PolyTraitRef<'tcx> { let gen_sig = substs.poly_sig(closure_def_id, self.tcx()); // (1) Feels icky to skip the binder here, but OTOH we know // that the self-type is an generator type and hence is // in fact unparameterized (or at least does not reference any // regions bound in the obligation). Still probably some // refactoring could make this nicer. self.tcx() .generator_trait_ref_and_outputs( obligation.predicate.def_id(), obligation.predicate.skip_binder().self_ty(), // (1) gen_sig, ) .map_bound(|(trait_ref, ..)| trait_ref) } /// Returns the obligations that are implied by instantiating an /// impl or trait. The obligations are substituted and fully /// normalized. This is used when confirming an impl or default /// impl. fn impl_or_trait_obligations( &mut self, cause: ObligationCause<'tcx>, recursion_depth: usize, param_env: ty::ParamEnv<'tcx>, def_id: DefId, // of impl or trait substs: SubstsRef<'tcx>, // for impl or trait ) -> Vec<PredicateObligation<'tcx>> { debug!("impl_or_trait_obligations(def_id={:?})", def_id); let tcx = self.tcx(); // To allow for one-pass evaluation of the nested obligation, // each predicate must be preceded by the obligations required // to normalize it. // for example, if we have: // impl<U: Iterator, V: Iterator<Item=U>> Foo for V where U::Item: Copy // the impl will have the following predicates: // <V as Iterator>::Item = U, // U: Iterator, U: Sized, // V: Iterator, V: Sized, // <U as Iterator>::Item: Copy // When we substitute, say, `V => IntoIter<u32>, U => $0`, the last // obligation will normalize to `<$0 as Iterator>::Item = $1` and // `$1: Copy`, so we must ensure the obligations are emitted in // that order. let predicates = tcx.predicates_of(def_id); assert_eq!(predicates.parent, None); let mut predicates: Vec<_> = predicates .predicates .iter() .flat_map(|(predicate, _)| { let predicate = normalize_with_depth( self, param_env, cause.clone(), recursion_depth, &predicate.subst(tcx, substs), ); predicate.obligations.into_iter().chain(Some(Obligation { cause: cause.clone(), recursion_depth, param_env, predicate: predicate.value, })) }) .collect(); // We are performing deduplication here to avoid exponential blowups // (#38528) from happening, but the real cause of the duplication is // unknown. What we know is that the deduplication avoids exponential // amount of predicates being propagated when processing deeply nested // types. // // This code is hot enough that it's worth avoiding the allocation // required for the FxHashSet when possible. Special-casing lengths 0, // 1 and 2 covers roughly 75--80% of the cases. if predicates.len() <= 1 { // No possibility of duplicates. } else if predicates.len() == 2 { // Only two elements. Drop the second if they are equal. if predicates[0] == predicates[1] { predicates.truncate(1); } } else { // Three or more elements. Use a general deduplication process. let mut seen = FxHashSet::default(); predicates.retain(|i| seen.insert(i.clone())); } predicates } } impl<'tcx> TraitObligation<'tcx> { #[allow(unused_comparisons)] pub fn derived_cause( &self, variant: fn(DerivedObligationCause<'tcx>) -> ObligationCauseCode<'tcx>, ) -> ObligationCause<'tcx> { /*! * Creates a cause for obligations that are derived from * `obligation` by a recursive search (e.g., for a builtin * bound, or eventually a `auto trait Foo`). If `obligation` * is itself a derived obligation, this is just a clone, but * otherwise we create a "derived obligation" cause so as to * keep track of the original root obligation for error * reporting. */ let obligation = self; // NOTE(flaper87): As of now, it keeps track of the whole error // chain. Ideally, we should have a way to configure this either // by using -Z verbose or just a CLI argument. if obligation.recursion_depth >= 0 { let derived_cause = DerivedObligationCause { parent_trait_ref: obligation.predicate.to_poly_trait_ref(), parent_code: Rc::new(obligation.cause.code.clone()), }; let derived_code = variant(derived_cause); ObligationCause::new( obligation.cause.span, obligation.cause.body_id, derived_code, ) } else { obligation.cause.clone() } } } impl<'tcx> SelectionCache<'tcx> { /// Actually frees the underlying memory in contrast to what stdlib containers do on `clear` pub fn clear(&self) { *self.hashmap.borrow_mut() = Default::default(); } } impl<'tcx> EvaluationCache<'tcx> { /// Actually frees the underlying memory in contrast to what stdlib containers do on `clear` pub fn clear(&self) { *self.hashmap.borrow_mut() = Default::default(); } } impl<'o, 'tcx> TraitObligationStack<'o, 'tcx> { fn list(&'o self) -> TraitObligationStackList<'o, 'tcx> { TraitObligationStackList::with(self) } fn iter(&'o self) -> TraitObligationStackList<'o, 'tcx> { self.list() } /// Indicates that attempting to evaluate this stack entry /// required accessing something from the stack at depth `reached_depth`. fn update_reached_depth(&self, reached_depth: usize) { assert!( self.depth > reached_depth, "invoked `update_reached_depth` with something under this stack: \ self.depth={} reached_depth={}", self.depth, reached_depth, ); debug!("update_reached_depth(reached_depth={})", reached_depth); let mut p = self; while reached_depth < p.depth { debug!("update_reached_depth: marking {:?} as cycle participant", p.fresh_trait_ref); p.reached_depth.set(p.reached_depth.get().min(reached_depth)); p = p.previous.head.unwrap(); } } } #[derive(Copy, Clone)] struct TraitObligationStackList<'o, 'tcx: 'o> { head: Option<&'o TraitObligationStack<'o, 'tcx>>, } impl<'o, 'tcx> TraitObligationStackList<'o, 'tcx> { fn empty() -> TraitObligationStackList<'o, 'tcx> { TraitObligationStackList { head: None } } fn with(r: &'o TraitObligationStack<'o, 'tcx>) -> TraitObligationStackList<'o, 'tcx> { TraitObligationStackList { head: Some(r) } } fn head(&self) -> Option<&'o TraitObligationStack<'o, 'tcx>> { self.head } fn depth(&self) -> usize { if let Some(head) = self.head { head.depth } else { 0 } } } impl<'o, 'tcx> Iterator for TraitObligationStackList<'o, 'tcx> { type Item = &'o TraitObligationStack<'o, 'tcx>; fn next(&mut self) -> Option<&'o TraitObligationStack<'o, 'tcx>> { match self.head { Some(o) => { *self = o.previous; Some(o) } None => None, } } } impl<'o, 'tcx> fmt::Debug for TraitObligationStack<'o, 'tcx> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "TraitObligationStack({:?})", self.obligation) } } #[derive(Clone, Eq, PartialEq)] pub struct WithDepNode<T> { dep_node: DepNodeIndex, cached_value: T, } impl<T: Clone> WithDepNode<T> { pub fn new(dep_node: DepNodeIndex, cached_value: T) -> Self { WithDepNode { dep_node, cached_value, } } pub fn get(&self, tcx: TyCtxt<'_, '_, '_>) -> T { tcx.dep_graph.read_index(self.dep_node); self.cached_value.clone() } } introduce ProvisionalEvaluationCache // ignore-tidy-filelength //! Candidate selection. See the [rustc guide] for more information on how this works. //! //! [rustc guide]: https://rust-lang.github.io/rustc-guide/traits/resolution.html#selection use self::EvaluationResult::*; use self::SelectionCandidate::*; use super::coherence::{self, Conflict}; use super::project; use super::project::{normalize_with_depth, Normalized, ProjectionCacheKey}; use super::util; use super::DerivedObligationCause; use super::Selection; use super::SelectionResult; use super::TraitNotObjectSafe; use super::{BuiltinDerivedObligation, ImplDerivedObligation, ObligationCauseCode}; use super::{IntercrateMode, TraitQueryMode}; use super::{ObjectCastObligation, Obligation}; use super::{ObligationCause, PredicateObligation, TraitObligation}; use super::{OutputTypeParameterMismatch, Overflow, SelectionError, Unimplemented}; use super::{ VtableAutoImpl, VtableBuiltin, VtableClosure, VtableFnPointer, VtableGenerator, VtableImpl, VtableObject, VtableParam, VtableTraitAlias, }; use super::{ VtableAutoImplData, VtableBuiltinData, VtableClosureData, VtableFnPointerData, VtableGeneratorData, VtableImplData, VtableObjectData, VtableTraitAliasData, }; use crate::dep_graph::{DepKind, DepNodeIndex}; use crate::hir::def_id::DefId; use crate::infer::{CombinedSnapshot, InferCtxt, InferOk, PlaceholderMap, TypeFreshener}; use crate::middle::lang_items; use crate::mir::interpret::GlobalId; use crate::ty::fast_reject; use crate::ty::relate::TypeRelation; use crate::ty::subst::{Subst, SubstsRef}; use crate::ty::{self, ToPolyTraitRef, ToPredicate, Ty, TyCtxt, TypeFoldable}; use crate::hir; use rustc_data_structures::bit_set::GrowableBitSet; use rustc_data_structures::sync::Lock; use rustc_target::spec::abi::Abi; use std::cell::Cell; use std::cmp; use std::fmt::{self, Display}; use std::iter; use std::rc::Rc; use crate::util::nodemap::{FxHashMap, FxHashSet}; pub struct SelectionContext<'cx, 'gcx: 'cx + 'tcx, 'tcx: 'cx> { infcx: &'cx InferCtxt<'cx, 'gcx, 'tcx>, /// Freshener used specifically for entries on the obligation /// stack. This ensures that all entries on the stack at one time /// will have the same set of placeholder entries, which is /// important for checking for trait bounds that recursively /// require themselves. freshener: TypeFreshener<'cx, 'gcx, 'tcx>, /// If `true`, indicates that the evaluation should be conservative /// and consider the possibility of types outside this crate. /// This comes up primarily when resolving ambiguity. Imagine /// there is some trait reference `$0: Bar` where `$0` is an /// inference variable. If `intercrate` is true, then we can never /// say for sure that this reference is not implemented, even if /// there are *no impls at all for `Bar`*, because `$0` could be /// bound to some type that in a downstream crate that implements /// `Bar`. This is the suitable mode for coherence. Elsewhere, /// though, we set this to false, because we are only interested /// in types that the user could actually have written --- in /// other words, we consider `$0: Bar` to be unimplemented if /// there is no type that the user could *actually name* that /// would satisfy it. This avoids crippling inference, basically. intercrate: Option<IntercrateMode>, intercrate_ambiguity_causes: Option<Vec<IntercrateAmbiguityCause>>, /// Controls whether or not to filter out negative impls when selecting. /// This is used in librustdoc to distinguish between the lack of an impl /// and a negative impl allow_negative_impls: bool, /// The mode that trait queries run in, which informs our error handling /// policy. In essence, canonicalized queries need their errors propagated /// rather than immediately reported because we do not have accurate spans. query_mode: TraitQueryMode, } #[derive(Clone, Debug)] pub enum IntercrateAmbiguityCause { DownstreamCrate { trait_desc: String, self_desc: Option<String>, }, UpstreamCrateUpdate { trait_desc: String, self_desc: Option<String>, }, } impl IntercrateAmbiguityCause { /// Emits notes when the overlap is caused by complex intercrate ambiguities. /// See #23980 for details. pub fn add_intercrate_ambiguity_hint<'a, 'tcx>( &self, err: &mut errors::DiagnosticBuilder<'_>, ) { err.note(&self.intercrate_ambiguity_hint()); } pub fn intercrate_ambiguity_hint(&self) -> String { match self { &IntercrateAmbiguityCause::DownstreamCrate { ref trait_desc, ref self_desc, } => { let self_desc = if let &Some(ref ty) = self_desc { format!(" for type `{}`", ty) } else { String::new() }; format!( "downstream crates may implement trait `{}`{}", trait_desc, self_desc ) } &IntercrateAmbiguityCause::UpstreamCrateUpdate { ref trait_desc, ref self_desc, } => { let self_desc = if let &Some(ref ty) = self_desc { format!(" for type `{}`", ty) } else { String::new() }; format!( "upstream crates may add new impl of trait `{}`{} \ in future versions", trait_desc, self_desc ) } } } } // A stack that walks back up the stack frame. struct TraitObligationStack<'prev, 'tcx: 'prev> { obligation: &'prev TraitObligation<'tcx>, /// Trait ref from `obligation` but "freshened" with the /// selection-context's freshener. Used to check for recursion. fresh_trait_ref: ty::PolyTraitRef<'tcx>, /// Starts out equal to `depth` -- if, during evaluation, we /// encounter a cycle, then we will set this flag to the minimum /// depth of that cycle for all participants in the cycle. These /// participants will then forego caching their results. This is /// not the most efficient solution, but it addresses #60010. The /// problem we are trying to prevent: /// /// - If you have `A: AutoTrait` requires `B: AutoTrait` and `C: NonAutoTrait` /// - `B: AutoTrait` requires `A: AutoTrait` (coinductive cycle, ok) /// - `C: NonAutoTrait` requires `A: AutoTrait` (non-coinductive cycle, not ok) /// /// you don't want to cache that `B: AutoTrait` or `A: AutoTrait` /// is `EvaluatedToOk`; this is because they were only considered /// ok on the premise that if `A: AutoTrait` held, but we indeed /// encountered a problem (later on) with `A: AutoTrait. So we /// currently set a flag on the stack node for `B: AutoTrait` (as /// well as the second instance of `A: AutoTrait`) to supress /// caching. /// /// This is a simple, targeted fix. A more-performant fix requires /// deeper changes, but would permit more caching: we could /// basically defer caching until we have fully evaluated the /// tree, and then cache the entire tree at once. In any case, the /// performance impact here shouldn't be so horrible: every time /// this is hit, we do cache at least one trait, so we only /// evaluate each member of a cycle up to N times, where N is the /// length of the cycle. This means the performance impact is /// bounded and we shouldn't have any terrible worst-cases. reached_depth: Cell<usize>, previous: TraitObligationStackList<'prev, 'tcx>, /// Number of parent frames plus one -- so the topmost frame has depth 1. depth: usize, } #[derive(Clone, Default)] pub struct SelectionCache<'tcx> { hashmap: Lock< FxHashMap<ty::TraitRef<'tcx>, WithDepNode<SelectionResult<'tcx, SelectionCandidate<'tcx>>>>, >, } /// The selection process begins by considering all impls, where /// clauses, and so forth that might resolve an obligation. Sometimes /// we'll be able to say definitively that (e.g.) an impl does not /// apply to the obligation: perhaps it is defined for `usize` but the /// obligation is for `int`. In that case, we drop the impl out of the /// list. But the other cases are considered *candidates*. /// /// For selection to succeed, there must be exactly one matching /// candidate. If the obligation is fully known, this is guaranteed /// by coherence. However, if the obligation contains type parameters /// or variables, there may be multiple such impls. /// /// It is not a real problem if multiple matching impls exist because /// of type variables - it just means the obligation isn't sufficiently /// elaborated. In that case we report an ambiguity, and the caller can /// try again after more type information has been gathered or report a /// "type annotations required" error. /// /// However, with type parameters, this can be a real problem - type /// parameters don't unify with regular types, but they *can* unify /// with variables from blanket impls, and (unless we know its bounds /// will always be satisfied) picking the blanket impl will be wrong /// for at least *some* substitutions. To make this concrete, if we have /// /// trait AsDebug { type Out : fmt::Debug; fn debug(self) -> Self::Out; } /// impl<T: fmt::Debug> AsDebug for T { /// type Out = T; /// fn debug(self) -> fmt::Debug { self } /// } /// fn foo<T: AsDebug>(t: T) { println!("{:?}", <T as AsDebug>::debug(t)); } /// /// we can't just use the impl to resolve the <T as AsDebug> obligation /// - a type from another crate (that doesn't implement fmt::Debug) could /// implement AsDebug. /// /// Because where-clauses match the type exactly, multiple clauses can /// only match if there are unresolved variables, and we can mostly just /// report this ambiguity in that case. This is still a problem - we can't /// *do anything* with ambiguities that involve only regions. This is issue /// #21974. /// /// If a single where-clause matches and there are no inference /// variables left, then it definitely matches and we can just select /// it. /// /// In fact, we even select the where-clause when the obligation contains /// inference variables. The can lead to inference making "leaps of logic", /// for example in this situation: /// /// pub trait Foo<T> { fn foo(&self) -> T; } /// impl<T> Foo<()> for T { fn foo(&self) { } } /// impl Foo<bool> for bool { fn foo(&self) -> bool { *self } } /// /// pub fn foo<T>(t: T) where T: Foo<bool> { /// println!("{:?}", <T as Foo<_>>::foo(&t)); /// } /// fn main() { foo(false); } /// /// Here the obligation <T as Foo<$0>> can be matched by both the blanket /// impl and the where-clause. We select the where-clause and unify $0=bool, /// so the program prints "false". However, if the where-clause is omitted, /// the blanket impl is selected, we unify $0=(), and the program prints /// "()". /// /// Exactly the same issues apply to projection and object candidates, except /// that we can have both a projection candidate and a where-clause candidate /// for the same obligation. In that case either would do (except that /// different "leaps of logic" would occur if inference variables are /// present), and we just pick the where-clause. This is, for example, /// required for associated types to work in default impls, as the bounds /// are visible both as projection bounds and as where-clauses from the /// parameter environment. #[derive(PartialEq, Eq, Debug, Clone)] enum SelectionCandidate<'tcx> { /// If has_nested is false, there are no *further* obligations BuiltinCandidate { has_nested: bool, }, ParamCandidate(ty::PolyTraitRef<'tcx>), ImplCandidate(DefId), AutoImplCandidate(DefId), /// This is a trait matching with a projected type as `Self`, and /// we found an applicable bound in the trait definition. ProjectionCandidate, /// Implementation of a `Fn`-family trait by one of the anonymous types /// generated for a `||` expression. ClosureCandidate, /// Implementation of a `Generator` trait by one of the anonymous types /// generated for a generator. GeneratorCandidate, /// Implementation of a `Fn`-family trait by one of the anonymous /// types generated for a fn pointer type (e.g., `fn(int)->int`) FnPointerCandidate, TraitAliasCandidate(DefId), ObjectCandidate, BuiltinObjectCandidate, BuiltinUnsizeCandidate, } impl<'a, 'tcx> ty::Lift<'tcx> for SelectionCandidate<'a> { type Lifted = SelectionCandidate<'tcx>; fn lift_to_tcx<'b, 'gcx>(&self, tcx: TyCtxt<'b, 'gcx, 'tcx>) -> Option<Self::Lifted> { Some(match *self { BuiltinCandidate { has_nested } => BuiltinCandidate { has_nested }, ImplCandidate(def_id) => ImplCandidate(def_id), AutoImplCandidate(def_id) => AutoImplCandidate(def_id), ProjectionCandidate => ProjectionCandidate, ClosureCandidate => ClosureCandidate, GeneratorCandidate => GeneratorCandidate, FnPointerCandidate => FnPointerCandidate, TraitAliasCandidate(def_id) => TraitAliasCandidate(def_id), ObjectCandidate => ObjectCandidate, BuiltinObjectCandidate => BuiltinObjectCandidate, BuiltinUnsizeCandidate => BuiltinUnsizeCandidate, ParamCandidate(ref trait_ref) => { return tcx.lift(trait_ref).map(ParamCandidate); } }) } } struct SelectionCandidateSet<'tcx> { // a list of candidates that definitely apply to the current // obligation (meaning: types unify). vec: Vec<SelectionCandidate<'tcx>>, // if this is true, then there were candidates that might or might // not have applied, but we couldn't tell. This occurs when some // of the input types are type variables, in which case there are // various "builtin" rules that might or might not trigger. ambiguous: bool, } #[derive(PartialEq, Eq, Debug, Clone)] struct EvaluatedCandidate<'tcx> { candidate: SelectionCandidate<'tcx>, evaluation: EvaluationResult, } /// When does the builtin impl for `T: Trait` apply? enum BuiltinImplConditions<'tcx> { /// The impl is conditional on T1,T2,.. : Trait Where(ty::Binder<Vec<Ty<'tcx>>>), /// There is no built-in impl. There may be some other /// candidate (a where-clause or user-defined impl). None, /// It is unknown whether there is an impl. Ambiguous, } #[derive(Copy, Clone, Debug, PartialOrd, Ord, PartialEq, Eq)] /// The result of trait evaluation. The order is important /// here as the evaluation of a list is the maximum of the /// evaluations. /// /// The evaluation results are ordered: /// - `EvaluatedToOk` implies `EvaluatedToOkModuloRegions` /// implies `EvaluatedToAmbig` implies `EvaluatedToUnknown` /// - `EvaluatedToErr` implies `EvaluatedToRecur` /// - the "union" of evaluation results is equal to their maximum - /// all the "potential success" candidates can potentially succeed, /// so they are noops when unioned with a definite error, and within /// the categories it's easy to see that the unions are correct. pub enum EvaluationResult { /// Evaluation successful EvaluatedToOk, /// Evaluation successful, but there were unevaluated region obligations EvaluatedToOkModuloRegions, /// Evaluation is known to be ambiguous - it *might* hold for some /// assignment of inference variables, but it might not. /// /// While this has the same meaning as `EvaluatedToUnknown` - we can't /// know whether this obligation holds or not - it is the result we /// would get with an empty stack, and therefore is cacheable. EvaluatedToAmbig, /// Evaluation failed because of recursion involving inference /// variables. We are somewhat imprecise there, so we don't actually /// know the real result. /// /// This can't be trivially cached for the same reason as `EvaluatedToRecur`. EvaluatedToUnknown, /// Evaluation failed because we encountered an obligation we are already /// trying to prove on this branch. /// /// We know this branch can't be a part of a minimal proof-tree for /// the "root" of our cycle, because then we could cut out the recursion /// and maintain a valid proof tree. However, this does not mean /// that all the obligations on this branch do not hold - it's possible /// that we entered this branch "speculatively", and that there /// might be some other way to prove this obligation that does not /// go through this cycle - so we can't cache this as a failure. /// /// For example, suppose we have this: /// /// ```rust,ignore (pseudo-Rust) /// pub trait Trait { fn xyz(); } /// // This impl is "useless", but we can still have /// // an `impl Trait for SomeUnsizedType` somewhere. /// impl<T: Trait + Sized> Trait for T { fn xyz() {} } /// /// pub fn foo<T: Trait + ?Sized>() { /// <T as Trait>::xyz(); /// } /// ``` /// /// When checking `foo`, we have to prove `T: Trait`. This basically /// translates into this: /// /// ```plain,ignore /// (T: Trait + Sized →_\impl T: Trait), T: Trait ⊢ T: Trait /// ``` /// /// When we try to prove it, we first go the first option, which /// recurses. This shows us that the impl is "useless" -- it won't /// tell us that `T: Trait` unless it already implemented `Trait` /// by some other means. However, that does not prevent `T: Trait` /// does not hold, because of the bound (which can indeed be satisfied /// by `SomeUnsizedType` from another crate). // // FIXME: when an `EvaluatedToRecur` goes past its parent root, we // ought to convert it to an `EvaluatedToErr`, because we know // there definitely isn't a proof tree for that obligation. Not // doing so is still sound -- there isn't any proof tree, so the // branch still can't be a part of a minimal one -- but does not re-enable caching. EvaluatedToRecur, /// Evaluation failed. EvaluatedToErr, } impl EvaluationResult { /// Returns `true` if this evaluation result is known to apply, even /// considering outlives constraints. pub fn must_apply_considering_regions(self) -> bool { self == EvaluatedToOk } /// Returns `true` if this evaluation result is known to apply, ignoring /// outlives constraints. pub fn must_apply_modulo_regions(self) -> bool { self <= EvaluatedToOkModuloRegions } pub fn may_apply(self) -> bool { match self { EvaluatedToOk | EvaluatedToOkModuloRegions | EvaluatedToAmbig | EvaluatedToUnknown => { true } EvaluatedToErr | EvaluatedToRecur => false, } } fn is_stack_dependent(self) -> bool { match self { EvaluatedToUnknown | EvaluatedToRecur => true, EvaluatedToOk | EvaluatedToOkModuloRegions | EvaluatedToAmbig | EvaluatedToErr => false, } } } impl_stable_hash_for!(enum self::EvaluationResult { EvaluatedToOk, EvaluatedToOkModuloRegions, EvaluatedToAmbig, EvaluatedToUnknown, EvaluatedToRecur, EvaluatedToErr }); #[derive(Copy, Clone, Debug, PartialEq, Eq)] /// Indicates that trait evaluation caused overflow. pub struct OverflowError; impl_stable_hash_for!(struct OverflowError {}); impl<'tcx> From<OverflowError> for SelectionError<'tcx> { fn from(OverflowError: OverflowError) -> SelectionError<'tcx> { SelectionError::Overflow } } #[derive(Clone, Default)] pub struct EvaluationCache<'tcx> { hashmap: Lock<FxHashMap<ty::PolyTraitRef<'tcx>, WithDepNode<EvaluationResult>>>, } impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { pub fn new(infcx: &'cx InferCtxt<'cx, 'gcx, 'tcx>) -> SelectionContext<'cx, 'gcx, 'tcx> { SelectionContext { infcx, freshener: infcx.freshener(), intercrate: None, intercrate_ambiguity_causes: None, allow_negative_impls: false, query_mode: TraitQueryMode::Standard, } } pub fn intercrate( infcx: &'cx InferCtxt<'cx, 'gcx, 'tcx>, mode: IntercrateMode, ) -> SelectionContext<'cx, 'gcx, 'tcx> { debug!("intercrate({:?})", mode); SelectionContext { infcx, freshener: infcx.freshener(), intercrate: Some(mode), intercrate_ambiguity_causes: None, allow_negative_impls: false, query_mode: TraitQueryMode::Standard, } } pub fn with_negative( infcx: &'cx InferCtxt<'cx, 'gcx, 'tcx>, allow_negative_impls: bool, ) -> SelectionContext<'cx, 'gcx, 'tcx> { debug!("with_negative({:?})", allow_negative_impls); SelectionContext { infcx, freshener: infcx.freshener(), intercrate: None, intercrate_ambiguity_causes: None, allow_negative_impls, query_mode: TraitQueryMode::Standard, } } pub fn with_query_mode( infcx: &'cx InferCtxt<'cx, 'gcx, 'tcx>, query_mode: TraitQueryMode, ) -> SelectionContext<'cx, 'gcx, 'tcx> { debug!("with_query_mode({:?})", query_mode); SelectionContext { infcx, freshener: infcx.freshener(), intercrate: None, intercrate_ambiguity_causes: None, allow_negative_impls: false, query_mode, } } /// Enables tracking of intercrate ambiguity causes. These are /// used in coherence to give improved diagnostics. We don't do /// this until we detect a coherence error because it can lead to /// false overflow results (#47139) and because it costs /// computation time. pub fn enable_tracking_intercrate_ambiguity_causes(&mut self) { assert!(self.intercrate.is_some()); assert!(self.intercrate_ambiguity_causes.is_none()); self.intercrate_ambiguity_causes = Some(vec![]); debug!("selcx: enable_tracking_intercrate_ambiguity_causes"); } /// Gets the intercrate ambiguity causes collected since tracking /// was enabled and disables tracking at the same time. If /// tracking is not enabled, just returns an empty vector. pub fn take_intercrate_ambiguity_causes(&mut self) -> Vec<IntercrateAmbiguityCause> { assert!(self.intercrate.is_some()); self.intercrate_ambiguity_causes.take().unwrap_or(vec![]) } pub fn infcx(&self) -> &'cx InferCtxt<'cx, 'gcx, 'tcx> { self.infcx } pub fn tcx(&self) -> TyCtxt<'cx, 'gcx, 'tcx> { self.infcx.tcx } pub fn closure_typer(&self) -> &'cx InferCtxt<'cx, 'gcx, 'tcx> { self.infcx } /////////////////////////////////////////////////////////////////////////// // Selection // // The selection phase tries to identify *how* an obligation will // be resolved. For example, it will identify which impl or // parameter bound is to be used. The process can be inconclusive // if the self type in the obligation is not fully inferred. Selection // can result in an error in one of two ways: // // 1. If no applicable impl or parameter bound can be found. // 2. If the output type parameters in the obligation do not match // those specified by the impl/bound. For example, if the obligation // is `Vec<Foo>:Iterable<Bar>`, but the impl specifies // `impl<T> Iterable<T> for Vec<T>`, than an error would result. /// Attempts to satisfy the obligation. If successful, this will affect the surrounding /// type environment by performing unification. pub fn select( &mut self, obligation: &TraitObligation<'tcx>, ) -> SelectionResult<'tcx, Selection<'tcx>> { debug!("select({:?})", obligation); debug_assert!(!obligation.predicate.has_escaping_bound_vars()); let pec = &ProvisionalEvaluationCache::default(); let stack = self.push_stack(TraitObligationStackList::empty(pec), obligation); let candidate = match self.candidate_from_obligation(&stack) { Err(SelectionError::Overflow) => { // In standard mode, overflow must have been caught and reported // earlier. assert!(self.query_mode == TraitQueryMode::Canonical); return Err(SelectionError::Overflow); } Err(e) => { return Err(e); } Ok(None) => { return Ok(None); } Ok(Some(candidate)) => candidate, }; match self.confirm_candidate(obligation, candidate) { Err(SelectionError::Overflow) => { assert!(self.query_mode == TraitQueryMode::Canonical); Err(SelectionError::Overflow) } Err(e) => Err(e), Ok(candidate) => Ok(Some(candidate)), } } /////////////////////////////////////////////////////////////////////////// // EVALUATION // // Tests whether an obligation can be selected or whether an impl // can be applied to particular types. It skips the "confirmation" // step and hence completely ignores output type parameters. // // The result is "true" if the obligation *may* hold and "false" if // we can be sure it does not. /// Evaluates whether the obligation `obligation` can be satisfied (by any means). pub fn predicate_may_hold_fatal(&mut self, obligation: &PredicateObligation<'tcx>) -> bool { debug!("predicate_may_hold_fatal({:?})", obligation); // This fatal query is a stopgap that should only be used in standard mode, // where we do not expect overflow to be propagated. assert!(self.query_mode == TraitQueryMode::Standard); self.evaluate_root_obligation(obligation) .expect("Overflow should be caught earlier in standard query mode") .may_apply() } /// Evaluates whether the obligation `obligation` can be satisfied /// and returns an `EvaluationResult`. This is meant for the /// *initial* call. pub fn evaluate_root_obligation( &mut self, obligation: &PredicateObligation<'tcx>, ) -> Result<EvaluationResult, OverflowError> { self.evaluation_probe(|this| { this.evaluate_predicate_recursively( TraitObligationStackList::empty(&ProvisionalEvaluationCache::default()), obligation.clone(), ) }) } fn evaluation_probe( &mut self, op: impl FnOnce(&mut Self) -> Result<EvaluationResult, OverflowError>, ) -> Result<EvaluationResult, OverflowError> { self.infcx.probe(|snapshot| -> Result<EvaluationResult, OverflowError> { let result = op(self)?; match self.infcx.region_constraints_added_in_snapshot(snapshot) { None => Ok(result), Some(_) => Ok(result.max(EvaluatedToOkModuloRegions)), } }) } /// Evaluates the predicates in `predicates` recursively. Note that /// this applies projections in the predicates, and therefore /// is run within an inference probe. fn evaluate_predicates_recursively<'a, 'o, I>( &mut self, stack: TraitObligationStackList<'o, 'tcx>, predicates: I, ) -> Result<EvaluationResult, OverflowError> where I: IntoIterator<Item = PredicateObligation<'tcx>>, 'tcx: 'a, { let mut result = EvaluatedToOk; for obligation in predicates { let eval = self.evaluate_predicate_recursively(stack, obligation.clone())?; debug!( "evaluate_predicate_recursively({:?}) = {:?}", obligation, eval ); if let EvaluatedToErr = eval { // fast-path - EvaluatedToErr is the top of the lattice, // so we don't need to look on the other predicates. return Ok(EvaluatedToErr); } else { result = cmp::max(result, eval); } } Ok(result) } fn evaluate_predicate_recursively<'o>( &mut self, previous_stack: TraitObligationStackList<'o, 'tcx>, obligation: PredicateObligation<'tcx>, ) -> Result<EvaluationResult, OverflowError> { debug!("evaluate_predicate_recursively(previous_stack={:?}, obligation={:?})", previous_stack.head(), obligation); // Previous_stack stores a TraitObligatiom, while 'obligation' is // a PredicateObligation. These are distinct types, so we can't // use any Option combinator method that would force them to be // the same match previous_stack.head() { Some(h) => self.check_recursion_limit(&obligation, h.obligation)?, None => self.check_recursion_limit(&obligation, &obligation)? } match obligation.predicate { ty::Predicate::Trait(ref t) => { debug_assert!(!t.has_escaping_bound_vars()); let obligation = obligation.with(t.clone()); self.evaluate_trait_predicate_recursively(previous_stack, obligation) } ty::Predicate::Subtype(ref p) => { // does this code ever run? match self.infcx .subtype_predicate(&obligation.cause, obligation.param_env, p) { Some(Ok(InferOk { mut obligations, .. })) => { self.add_depth(obligations.iter_mut(), obligation.recursion_depth); self.evaluate_predicates_recursively(previous_stack,obligations.into_iter()) } Some(Err(_)) => Ok(EvaluatedToErr), None => Ok(EvaluatedToAmbig), } } ty::Predicate::WellFormed(ty) => match ty::wf::obligations( self.infcx, obligation.param_env, obligation.cause.body_id, ty, obligation.cause.span, ) { Some(mut obligations) => { self.add_depth(obligations.iter_mut(), obligation.recursion_depth); self.evaluate_predicates_recursively(previous_stack, obligations.into_iter()) } None => Ok(EvaluatedToAmbig), }, ty::Predicate::TypeOutlives(..) | ty::Predicate::RegionOutlives(..) => { // we do not consider region relationships when // evaluating trait matches Ok(EvaluatedToOkModuloRegions) } ty::Predicate::ObjectSafe(trait_def_id) => { if self.tcx().is_object_safe(trait_def_id) { Ok(EvaluatedToOk) } else { Ok(EvaluatedToErr) } } ty::Predicate::Projection(ref data) => { let project_obligation = obligation.with(data.clone()); match project::poly_project_and_unify_type(self, &project_obligation) { Ok(Some(mut subobligations)) => { self.add_depth(subobligations.iter_mut(), obligation.recursion_depth); let result = self.evaluate_predicates_recursively( previous_stack, subobligations.into_iter(), ); if let Some(key) = ProjectionCacheKey::from_poly_projection_predicate(self, data) { self.infcx.projection_cache.borrow_mut().complete(key); } result } Ok(None) => Ok(EvaluatedToAmbig), Err(_) => Ok(EvaluatedToErr), } } ty::Predicate::ClosureKind(closure_def_id, closure_substs, kind) => { match self.infcx.closure_kind(closure_def_id, closure_substs) { Some(closure_kind) => { if closure_kind.extends(kind) { Ok(EvaluatedToOk) } else { Ok(EvaluatedToErr) } } None => Ok(EvaluatedToAmbig), } } ty::Predicate::ConstEvaluatable(def_id, substs) => { let tcx = self.tcx(); match tcx.lift_to_global(&(obligation.param_env, substs)) { Some((param_env, substs)) => { let instance = ty::Instance::resolve(tcx.global_tcx(), param_env, def_id, substs); if let Some(instance) = instance { let cid = GlobalId { instance, promoted: None, }; match self.tcx().const_eval(param_env.and(cid)) { Ok(_) => Ok(EvaluatedToOk), Err(_) => Ok(EvaluatedToErr), } } else { Ok(EvaluatedToErr) } } None => { // Inference variables still left in param_env or substs. Ok(EvaluatedToAmbig) } } } } } fn evaluate_trait_predicate_recursively<'o>( &mut self, previous_stack: TraitObligationStackList<'o, 'tcx>, mut obligation: TraitObligation<'tcx>, ) -> Result<EvaluationResult, OverflowError> { debug!("evaluate_trait_predicate_recursively({:?})", obligation); if self.intercrate.is_none() && obligation.is_global() && obligation .param_env .caller_bounds .iter() .all(|bound| bound.needs_subst()) { // If a param env has no global bounds, global obligations do not // depend on its particular value in order to work, so we can clear // out the param env and get better caching. debug!( "evaluate_trait_predicate_recursively({:?}) - in global", obligation ); obligation.param_env = obligation.param_env.without_caller_bounds(); } let stack = self.push_stack(previous_stack, &obligation); let fresh_trait_ref = stack.fresh_trait_ref; if let Some(result) = self.check_evaluation_cache(obligation.param_env, fresh_trait_ref) { debug!("CACHE HIT: EVAL({:?})={:?}", fresh_trait_ref, result); return Ok(result); } // Check if this is a match for something already on the // stack. If so, we don't want to insert the result into the // main cache (it is cycle dependent) nor the provisional // cache (which is meant for things that have completed but // for a "backedge" -- this result *is* the backedge). if let Some(cycle_result) = self.check_evaluation_cycle(&stack) { return Ok(cycle_result); } let (result, dep_node) = self.in_task(|this| this.evaluate_stack(&stack)); let result = result?; let reached_depth = stack.reached_depth.get(); if reached_depth >= stack.depth { debug!("CACHE MISS: EVAL({:?})={:?}", fresh_trait_ref, result); self.insert_evaluation_cache(obligation.param_env, fresh_trait_ref, dep_node, result); } else { debug!( "evaluate_trait_predicate_recursively: skipping cache because {:?} \ is a cycle participant (at depth {}, reached depth {})", fresh_trait_ref, stack.depth, reached_depth, ); } Ok(result) } /// If there is any previous entry on the stack that precisely /// matches this obligation, then we can assume that the /// obligation is satisfied for now (still all other conditions /// must be met of course). One obvious case this comes up is /// marker traits like `Send`. Think of a linked list: /// /// struct List<T> { data: T, next: Option<Box<List<T>>> } /// /// `Box<List<T>>` will be `Send` if `T` is `Send` and /// `Option<Box<List<T>>>` is `Send`, and in turn /// `Option<Box<List<T>>>` is `Send` if `Box<List<T>>` is /// `Send`. /// /// Note that we do this comparison using the `fresh_trait_ref` /// fields. Because these have all been freshened using /// `self.freshener`, we can be sure that (a) this will not /// affect the inferencer state and (b) that if we see two /// fresh regions with the same index, they refer to the same /// unbound type variable. fn check_evaluation_cycle( &mut self, stack: &TraitObligationStack<'_, 'tcx>, ) -> Option<EvaluationResult> { if let Some(cycle_depth) = stack.iter() .skip(1) // skip top-most frame .find(|prev| stack.obligation.param_env == prev.obligation.param_env && stack.fresh_trait_ref == prev.fresh_trait_ref) .map(|stack| stack.depth) { debug!( "evaluate_stack({:?}) --> recursive at depth {}", stack.fresh_trait_ref, cycle_depth, ); // If we have a stack like `A B C D E A`, where the top of // the stack is the final `A`, then this will iterate over // `A, E, D, C, B` -- i.e., all the participants apart // from the cycle head. We mark them as participating in a // cycle. This suppresses caching for those nodes. See // `in_cycle` field for more details. stack.update_reached_depth(cycle_depth); // Subtle: when checking for a coinductive cycle, we do // not compare using the "freshened trait refs" (which // have erased regions) but rather the fully explicit // trait refs. This is important because it's only a cycle // if the regions match exactly. let cycle = stack.iter().skip(1).take_while(|s| s.depth >= cycle_depth); let cycle = cycle.map(|stack| ty::Predicate::Trait(stack.obligation.predicate)); if self.coinductive_match(cycle) { debug!( "evaluate_stack({:?}) --> recursive, coinductive", stack.fresh_trait_ref ); Some(EvaluatedToOk) } else { debug!( "evaluate_stack({:?}) --> recursive, inductive", stack.fresh_trait_ref ); Some(EvaluatedToRecur) } } else { None } } fn evaluate_stack<'o>( &mut self, stack: &TraitObligationStack<'o, 'tcx>, ) -> Result<EvaluationResult, OverflowError> { // In intercrate mode, whenever any of the types are unbound, // there can always be an impl. Even if there are no impls in // this crate, perhaps the type would be unified with // something from another crate that does provide an impl. // // In intra mode, we must still be conservative. The reason is // that we want to avoid cycles. Imagine an impl like: // // impl<T:Eq> Eq for Vec<T> // // and a trait reference like `$0 : Eq` where `$0` is an // unbound variable. When we evaluate this trait-reference, we // will unify `$0` with `Vec<$1>` (for some fresh variable // `$1`), on the condition that `$1 : Eq`. We will then wind // up with many candidates (since that are other `Eq` impls // that apply) and try to winnow things down. This results in // a recursive evaluation that `$1 : Eq` -- as you can // imagine, this is just where we started. To avoid that, we // check for unbound variables and return an ambiguous (hence possible) // match if we've seen this trait before. // // This suffices to allow chains like `FnMut` implemented in // terms of `Fn` etc, but we could probably make this more // precise still. let unbound_input_types = stack .fresh_trait_ref .skip_binder() .input_types() .any(|ty| ty.is_fresh()); // this check was an imperfect workaround for a bug n the old // intercrate mode, it should be removed when that goes away. if unbound_input_types && self.intercrate == Some(IntercrateMode::Issue43355) { debug!( "evaluate_stack({:?}) --> unbound argument, intercrate --> ambiguous", stack.fresh_trait_ref ); // Heuristics: show the diagnostics when there are no candidates in crate. if self.intercrate_ambiguity_causes.is_some() { debug!("evaluate_stack: intercrate_ambiguity_causes is some"); if let Ok(candidate_set) = self.assemble_candidates(stack) { if !candidate_set.ambiguous && candidate_set.vec.is_empty() { let trait_ref = stack.obligation.predicate.skip_binder().trait_ref; let self_ty = trait_ref.self_ty(); let cause = IntercrateAmbiguityCause::DownstreamCrate { trait_desc: trait_ref.to_string(), self_desc: if self_ty.has_concrete_skeleton() { Some(self_ty.to_string()) } else { None }, }; debug!("evaluate_stack: pushing cause = {:?}", cause); self.intercrate_ambiguity_causes .as_mut() .unwrap() .push(cause); } } } return Ok(EvaluatedToAmbig); } if unbound_input_types && stack.iter().skip(1).any(|prev| { stack.obligation.param_env == prev.obligation.param_env && self.match_fresh_trait_refs(&stack.fresh_trait_ref, &prev.fresh_trait_ref) }) { debug!( "evaluate_stack({:?}) --> unbound argument, recursive --> giving up", stack.fresh_trait_ref ); return Ok(EvaluatedToUnknown); } match self.candidate_from_obligation(stack) { Ok(Some(c)) => self.evaluate_candidate(stack, &c), Ok(None) => Ok(EvaluatedToAmbig), Err(Overflow) => Err(OverflowError), Err(..) => Ok(EvaluatedToErr), } } /// For defaulted traits, we use a co-inductive strategy to solve, so /// that recursion is ok. This routine returns true if the top of the /// stack (`cycle[0]`): /// /// - is a defaulted trait, /// - it also appears in the backtrace at some position `X`, /// - all the predicates at positions `X..` between `X` an the top are /// also defaulted traits. pub fn coinductive_match<I>(&mut self, cycle: I) -> bool where I: Iterator<Item = ty::Predicate<'tcx>>, { let mut cycle = cycle; cycle.all(|predicate| self.coinductive_predicate(predicate)) } fn coinductive_predicate(&self, predicate: ty::Predicate<'tcx>) -> bool { let result = match predicate { ty::Predicate::Trait(ref data) => self.tcx().trait_is_auto(data.def_id()), _ => false, }; debug!("coinductive_predicate({:?}) = {:?}", predicate, result); result } /// Further evaluate `candidate` to decide whether all type parameters match and whether nested /// obligations are met. Returns whether `candidate` remains viable after this further /// scrutiny. fn evaluate_candidate<'o>( &mut self, stack: &TraitObligationStack<'o, 'tcx>, candidate: &SelectionCandidate<'tcx>, ) -> Result<EvaluationResult, OverflowError> { debug!( "evaluate_candidate: depth={} candidate={:?}", stack.obligation.recursion_depth, candidate ); let result = self.evaluation_probe(|this| { let candidate = (*candidate).clone(); match this.confirm_candidate(stack.obligation, candidate) { Ok(selection) => this.evaluate_predicates_recursively( stack.list(), selection.nested_obligations().into_iter() ), Err(..) => Ok(EvaluatedToErr), } })?; debug!( "evaluate_candidate: depth={} result={:?}", stack.obligation.recursion_depth, result ); Ok(result) } fn check_evaluation_cache( &self, param_env: ty::ParamEnv<'tcx>, trait_ref: ty::PolyTraitRef<'tcx>, ) -> Option<EvaluationResult> { let tcx = self.tcx(); if self.can_use_global_caches(param_env) { let cache = tcx.evaluation_cache.hashmap.borrow(); if let Some(cached) = cache.get(&trait_ref) { return Some(cached.get(tcx)); } } self.infcx .evaluation_cache .hashmap .borrow() .get(&trait_ref) .map(|v| v.get(tcx)) } fn insert_evaluation_cache( &mut self, param_env: ty::ParamEnv<'tcx>, trait_ref: ty::PolyTraitRef<'tcx>, dep_node: DepNodeIndex, result: EvaluationResult, ) { // Avoid caching results that depend on more than just the trait-ref // - the stack can create recursion. if result.is_stack_dependent() { return; } if self.can_use_global_caches(param_env) { if let Some(trait_ref) = self.tcx().lift_to_global(&trait_ref) { debug!( "insert_evaluation_cache(trait_ref={:?}, candidate={:?}) global", trait_ref, result, ); // This may overwrite the cache with the same value // FIXME: Due to #50507 this overwrites the different values // This should be changed to use HashMapExt::insert_same // when that is fixed self.tcx() .evaluation_cache .hashmap .borrow_mut() .insert(trait_ref, WithDepNode::new(dep_node, result)); return; } } debug!( "insert_evaluation_cache(trait_ref={:?}, candidate={:?})", trait_ref, result, ); self.infcx .evaluation_cache .hashmap .borrow_mut() .insert(trait_ref, WithDepNode::new(dep_node, result)); } // For various reasons, it's possible for a subobligation // to have a *lower* recursion_depth than the obligation used to create it. // Projection sub-obligations may be returned from the projection cache, // which results in obligations with an 'old' recursion_depth. // Additionally, methods like ty::wf::obligations and // InferCtxt.subtype_predicate produce subobligations without // taking in a 'parent' depth, causing the generated subobligations // to have a recursion_depth of 0 // // To ensure that obligation_depth never decreasees, we force all subobligations // to have at least the depth of the original obligation. fn add_depth<T: 'cx, I: Iterator<Item = &'cx mut Obligation<'tcx, T>>>(&self, it: I, min_depth: usize) { it.for_each(|o| o.recursion_depth = cmp::max(min_depth, o.recursion_depth) + 1); } // Check that the recursion limit has not been exceeded. // // The weird return type of this function allows it to be used with the 'try' (?) // operator within certain functions fn check_recursion_limit<T: Display + TypeFoldable<'tcx>, V: Display + TypeFoldable<'tcx>>( &self, obligation: &Obligation<'tcx, T>, error_obligation: &Obligation<'tcx, V> ) -> Result<(), OverflowError> { let recursion_limit = *self.infcx.tcx.sess.recursion_limit.get(); if obligation.recursion_depth >= recursion_limit { match self.query_mode { TraitQueryMode::Standard => { self.infcx().report_overflow_error(error_obligation, true); } TraitQueryMode::Canonical => { return Err(OverflowError); } } } Ok(()) } /////////////////////////////////////////////////////////////////////////// // CANDIDATE ASSEMBLY // // The selection process begins by examining all in-scope impls, // caller obligations, and so forth and assembling a list of // candidates. See the [rustc guide] for more details. // // [rustc guide]: // https://rust-lang.github.io/rustc-guide/traits/resolution.html#candidate-assembly fn candidate_from_obligation<'o>( &mut self, stack: &TraitObligationStack<'o, 'tcx>, ) -> SelectionResult<'tcx, SelectionCandidate<'tcx>> { // Watch out for overflow. This intentionally bypasses (and does // not update) the cache. self.check_recursion_limit(&stack.obligation, &stack.obligation)?; // Check the cache. Note that we freshen the trait-ref // separately rather than using `stack.fresh_trait_ref` -- // this is because we want the unbound variables to be // replaced with fresh types starting from index 0. let cache_fresh_trait_pred = self.infcx.freshen(stack.obligation.predicate.clone()); debug!( "candidate_from_obligation(cache_fresh_trait_pred={:?}, obligation={:?})", cache_fresh_trait_pred, stack ); debug_assert!(!stack.obligation.predicate.has_escaping_bound_vars()); if let Some(c) = self.check_candidate_cache(stack.obligation.param_env, &cache_fresh_trait_pred) { debug!("CACHE HIT: SELECT({:?})={:?}", cache_fresh_trait_pred, c); return c; } // If no match, compute result and insert into cache. // // FIXME(nikomatsakis) -- this cache is not taking into // account cycles that may have occurred in forming the // candidate. I don't know of any specific problems that // result but it seems awfully suspicious. let (candidate, dep_node) = self.in_task(|this| this.candidate_from_obligation_no_cache(stack)); debug!( "CACHE MISS: SELECT({:?})={:?}", cache_fresh_trait_pred, candidate ); self.insert_candidate_cache( stack.obligation.param_env, cache_fresh_trait_pred, dep_node, candidate.clone(), ); candidate } fn in_task<OP, R>(&mut self, op: OP) -> (R, DepNodeIndex) where OP: FnOnce(&mut Self) -> R, { let (result, dep_node) = self.tcx() .dep_graph .with_anon_task(DepKind::TraitSelect, || op(self)); self.tcx().dep_graph.read_index(dep_node); (result, dep_node) } // Treat negative impls as unimplemented fn filter_negative_impls( &self, candidate: SelectionCandidate<'tcx>, ) -> SelectionResult<'tcx, SelectionCandidate<'tcx>> { if let ImplCandidate(def_id) = candidate { if !self.allow_negative_impls && self.tcx().impl_polarity(def_id) == hir::ImplPolarity::Negative { return Err(Unimplemented); } } Ok(Some(candidate)) } fn candidate_from_obligation_no_cache<'o>( &mut self, stack: &TraitObligationStack<'o, 'tcx>, ) -> SelectionResult<'tcx, SelectionCandidate<'tcx>> { if stack.obligation.predicate.references_error() { // If we encounter a `Error`, we generally prefer the // most "optimistic" result in response -- that is, the // one least likely to report downstream errors. But // because this routine is shared by coherence and by // trait selection, there isn't an obvious "right" choice // here in that respect, so we opt to just return // ambiguity and let the upstream clients sort it out. return Ok(None); } if let Some(conflict) = self.is_knowable(stack) { debug!("coherence stage: not knowable"); if self.intercrate_ambiguity_causes.is_some() { debug!("evaluate_stack: intercrate_ambiguity_causes is some"); // Heuristics: show the diagnostics when there are no candidates in crate. if let Ok(candidate_set) = self.assemble_candidates(stack) { let mut no_candidates_apply = true; { let evaluated_candidates = candidate_set .vec .iter() .map(|c| self.evaluate_candidate(stack, &c)); for ec in evaluated_candidates { match ec { Ok(c) => { if c.may_apply() { no_candidates_apply = false; break; } } Err(e) => return Err(e.into()), } } } if !candidate_set.ambiguous && no_candidates_apply { let trait_ref = stack.obligation.predicate.skip_binder().trait_ref; let self_ty = trait_ref.self_ty(); let trait_desc = trait_ref.to_string(); let self_desc = if self_ty.has_concrete_skeleton() { Some(self_ty.to_string()) } else { None }; let cause = if let Conflict::Upstream = conflict { IntercrateAmbiguityCause::UpstreamCrateUpdate { trait_desc, self_desc, } } else { IntercrateAmbiguityCause::DownstreamCrate { trait_desc, self_desc, } }; debug!("evaluate_stack: pushing cause = {:?}", cause); self.intercrate_ambiguity_causes .as_mut() .unwrap() .push(cause); } } } return Ok(None); } let candidate_set = self.assemble_candidates(stack)?; if candidate_set.ambiguous { debug!("candidate set contains ambig"); return Ok(None); } let mut candidates = candidate_set.vec; debug!( "assembled {} candidates for {:?}: {:?}", candidates.len(), stack, candidates ); // At this point, we know that each of the entries in the // candidate set is *individually* applicable. Now we have to // figure out if they contain mutual incompatibilities. This // frequently arises if we have an unconstrained input type -- // for example, we are looking for $0:Eq where $0 is some // unconstrained type variable. In that case, we'll get a // candidate which assumes $0 == int, one that assumes $0 == // usize, etc. This spells an ambiguity. // If there is more than one candidate, first winnow them down // by considering extra conditions (nested obligations and so // forth). We don't winnow if there is exactly one // candidate. This is a relatively minor distinction but it // can lead to better inference and error-reporting. An // example would be if there was an impl: // // impl<T:Clone> Vec<T> { fn push_clone(...) { ... } } // // and we were to see some code `foo.push_clone()` where `boo` // is a `Vec<Bar>` and `Bar` does not implement `Clone`. If // we were to winnow, we'd wind up with zero candidates. // Instead, we select the right impl now but report `Bar does // not implement Clone`. if candidates.len() == 1 { return self.filter_negative_impls(candidates.pop().unwrap()); } // Winnow, but record the exact outcome of evaluation, which // is needed for specialization. Propagate overflow if it occurs. let mut candidates = candidates .into_iter() .map(|c| match self.evaluate_candidate(stack, &c) { Ok(eval) if eval.may_apply() => Ok(Some(EvaluatedCandidate { candidate: c, evaluation: eval, })), Ok(_) => Ok(None), Err(OverflowError) => Err(Overflow), }) .flat_map(Result::transpose) .collect::<Result<Vec<_>, _>>()?; debug!( "winnowed to {} candidates for {:?}: {:?}", candidates.len(), stack, candidates ); // If there are STILL multiple candidates, we can further // reduce the list by dropping duplicates -- including // resolving specializations. if candidates.len() > 1 { let mut i = 0; while i < candidates.len() { let is_dup = (0..candidates.len()).filter(|&j| i != j).any(|j| { self.candidate_should_be_dropped_in_favor_of(&candidates[i], &candidates[j]) }); if is_dup { debug!( "Dropping candidate #{}/{}: {:?}", i, candidates.len(), candidates[i] ); candidates.swap_remove(i); } else { debug!( "Retaining candidate #{}/{}: {:?}", i, candidates.len(), candidates[i] ); i += 1; // If there are *STILL* multiple candidates, give up // and report ambiguity. if i > 1 { debug!("multiple matches, ambig"); return Ok(None); } } } } // If there are *NO* candidates, then there are no impls -- // that we know of, anyway. Note that in the case where there // are unbound type variables within the obligation, it might // be the case that you could still satisfy the obligation // from another crate by instantiating the type variables with // a type from another crate that does have an impl. This case // is checked for in `evaluate_stack` (and hence users // who might care about this case, like coherence, should use // that function). if candidates.is_empty() { return Err(Unimplemented); } // Just one candidate left. self.filter_negative_impls(candidates.pop().unwrap().candidate) } fn is_knowable<'o>(&mut self, stack: &TraitObligationStack<'o, 'tcx>) -> Option<Conflict> { debug!("is_knowable(intercrate={:?})", self.intercrate); if !self.intercrate.is_some() { return None; } let obligation = &stack.obligation; let predicate = self.infcx() .resolve_vars_if_possible(&obligation.predicate); // Okay to skip binder because of the nature of the // trait-ref-is-knowable check, which does not care about // bound regions. let trait_ref = predicate.skip_binder().trait_ref; let result = coherence::trait_ref_is_knowable(self.tcx(), trait_ref); if let ( Some(Conflict::Downstream { used_to_be_broken: true, }), Some(IntercrateMode::Issue43355), ) = (result, self.intercrate) { debug!("is_knowable: IGNORING conflict to be bug-compatible with #43355"); None } else { result } } /// Returns `true` if the global caches can be used. /// Do note that if the type itself is not in the /// global tcx, the local caches will be used. fn can_use_global_caches(&self, param_env: ty::ParamEnv<'tcx>) -> bool { // If there are any where-clauses in scope, then we always use // a cache local to this particular scope. Otherwise, we // switch to a global cache. We used to try and draw // finer-grained distinctions, but that led to a serious of // annoying and weird bugs like #22019 and #18290. This simple // rule seems to be pretty clearly safe and also still retains // a very high hit rate (~95% when compiling rustc). if !param_env.caller_bounds.is_empty() { return false; } // Avoid using the master cache during coherence and just rely // on the local cache. This effectively disables caching // during coherence. It is really just a simplification to // avoid us having to fear that coherence results "pollute" // the master cache. Since coherence executes pretty quickly, // it's not worth going to more trouble to increase the // hit-rate I don't think. if self.intercrate.is_some() { return false; } // Otherwise, we can use the global cache. true } fn check_candidate_cache( &mut self, param_env: ty::ParamEnv<'tcx>, cache_fresh_trait_pred: &ty::PolyTraitPredicate<'tcx>, ) -> Option<SelectionResult<'tcx, SelectionCandidate<'tcx>>> { let tcx = self.tcx(); let trait_ref = &cache_fresh_trait_pred.skip_binder().trait_ref; if self.can_use_global_caches(param_env) { let cache = tcx.selection_cache.hashmap.borrow(); if let Some(cached) = cache.get(&trait_ref) { return Some(cached.get(tcx)); } } self.infcx .selection_cache .hashmap .borrow() .get(trait_ref) .map(|v| v.get(tcx)) } /// Determines whether can we safely cache the result /// of selecting an obligation. This is almost always 'true', /// except when dealing with certain ParamCandidates. /// /// Ordinarily, a ParamCandidate will contain no inference variables, /// since it was usually produced directly from a DefId. However, /// certain cases (currently only librustdoc's blanket impl finder), /// a ParamEnv may be explicitly constructed with inference types. /// When this is the case, we do *not* want to cache the resulting selection /// candidate. This is due to the fact that it might not always be possible /// to equate the obligation's trait ref and the candidate's trait ref, /// if more constraints end up getting added to an inference variable. /// /// Because of this, we always want to re-run the full selection /// process for our obligation the next time we see it, since /// we might end up picking a different SelectionCandidate (or none at all) fn can_cache_candidate(&self, result: &SelectionResult<'tcx, SelectionCandidate<'tcx>> ) -> bool { match result { Ok(Some(SelectionCandidate::ParamCandidate(trait_ref))) => { !trait_ref.skip_binder().input_types().any(|t| t.walk().any(|t_| t_.is_ty_infer())) }, _ => true } } fn insert_candidate_cache( &mut self, param_env: ty::ParamEnv<'tcx>, cache_fresh_trait_pred: ty::PolyTraitPredicate<'tcx>, dep_node: DepNodeIndex, candidate: SelectionResult<'tcx, SelectionCandidate<'tcx>>, ) { let tcx = self.tcx(); let trait_ref = cache_fresh_trait_pred.skip_binder().trait_ref; if !self.can_cache_candidate(&candidate) { debug!("insert_candidate_cache(trait_ref={:?}, candidate={:?} -\ candidate is not cacheable", trait_ref, candidate); return; } if self.can_use_global_caches(param_env) { if let Err(Overflow) = candidate { // Don't cache overflow globally; we only produce this // in certain modes. } else if let Some(trait_ref) = tcx.lift_to_global(&trait_ref) { if let Some(candidate) = tcx.lift_to_global(&candidate) { debug!( "insert_candidate_cache(trait_ref={:?}, candidate={:?}) global", trait_ref, candidate, ); // This may overwrite the cache with the same value tcx.selection_cache .hashmap .borrow_mut() .insert(trait_ref, WithDepNode::new(dep_node, candidate)); return; } } } debug!( "insert_candidate_cache(trait_ref={:?}, candidate={:?}) local", trait_ref, candidate, ); self.infcx .selection_cache .hashmap .borrow_mut() .insert(trait_ref, WithDepNode::new(dep_node, candidate)); } fn assemble_candidates<'o>( &mut self, stack: &TraitObligationStack<'o, 'tcx>, ) -> Result<SelectionCandidateSet<'tcx>, SelectionError<'tcx>> { let TraitObligationStack { obligation, .. } = *stack; let ref obligation = Obligation { param_env: obligation.param_env, cause: obligation.cause.clone(), recursion_depth: obligation.recursion_depth, predicate: self.infcx() .resolve_vars_if_possible(&obligation.predicate), }; if obligation.predicate.skip_binder().self_ty().is_ty_var() { // Self is a type variable (e.g., `_: AsRef<str>`). // // This is somewhat problematic, as the current scheme can't really // handle it turning to be a projection. This does end up as truly // ambiguous in most cases anyway. // // Take the fast path out - this also improves // performance by preventing assemble_candidates_from_impls from // matching every impl for this trait. return Ok(SelectionCandidateSet { vec: vec![], ambiguous: true, }); } let mut candidates = SelectionCandidateSet { vec: Vec::new(), ambiguous: false, }; self.assemble_candidates_for_trait_alias(obligation, &mut candidates)?; // Other bounds. Consider both in-scope bounds from fn decl // and applicable impls. There is a certain set of precedence rules here. let def_id = obligation.predicate.def_id(); let lang_items = self.tcx().lang_items(); if lang_items.copy_trait() == Some(def_id) { debug!( "obligation self ty is {:?}", obligation.predicate.skip_binder().self_ty() ); // User-defined copy impls are permitted, but only for // structs and enums. self.assemble_candidates_from_impls(obligation, &mut candidates)?; // For other types, we'll use the builtin rules. let copy_conditions = self.copy_clone_conditions(obligation); self.assemble_builtin_bound_candidates(copy_conditions, &mut candidates)?; } else if lang_items.sized_trait() == Some(def_id) { // Sized is never implementable by end-users, it is // always automatically computed. let sized_conditions = self.sized_conditions(obligation); self.assemble_builtin_bound_candidates(sized_conditions, &mut candidates)?; } else if lang_items.unsize_trait() == Some(def_id) { self.assemble_candidates_for_unsizing(obligation, &mut candidates); } else { if lang_items.clone_trait() == Some(def_id) { // Same builtin conditions as `Copy`, i.e., every type which has builtin support // for `Copy` also has builtin support for `Clone`, + tuples and arrays of `Clone` // types have builtin support for `Clone`. let clone_conditions = self.copy_clone_conditions(obligation); self.assemble_builtin_bound_candidates(clone_conditions, &mut candidates)?; } self.assemble_generator_candidates(obligation, &mut candidates)?; self.assemble_closure_candidates(obligation, &mut candidates)?; self.assemble_fn_pointer_candidates(obligation, &mut candidates)?; self.assemble_candidates_from_impls(obligation, &mut candidates)?; self.assemble_candidates_from_object_ty(obligation, &mut candidates); } self.assemble_candidates_from_projected_tys(obligation, &mut candidates); self.assemble_candidates_from_caller_bounds(stack, &mut candidates)?; // Auto implementations have lower priority, so we only // consider triggering a default if there is no other impl that can apply. if candidates.vec.is_empty() { self.assemble_candidates_from_auto_impls(obligation, &mut candidates)?; } debug!("candidate list size: {}", candidates.vec.len()); Ok(candidates) } fn assemble_candidates_from_projected_tys( &mut self, obligation: &TraitObligation<'tcx>, candidates: &mut SelectionCandidateSet<'tcx>, ) { debug!("assemble_candidates_for_projected_tys({:?})", obligation); // before we go into the whole placeholder thing, just // quickly check if the self-type is a projection at all. match obligation.predicate.skip_binder().trait_ref.self_ty().sty { ty::Projection(_) | ty::Opaque(..) => {} ty::Infer(ty::TyVar(_)) => { span_bug!( obligation.cause.span, "Self=_ should have been handled by assemble_candidates" ); } _ => return, } let result = self.infcx.probe(|snapshot| { self.match_projection_obligation_against_definition_bounds( obligation, snapshot, ) }); if result { candidates.vec.push(ProjectionCandidate); } } fn match_projection_obligation_against_definition_bounds( &mut self, obligation: &TraitObligation<'tcx>, snapshot: &CombinedSnapshot<'_, 'tcx>, ) -> bool { let poly_trait_predicate = self.infcx() .resolve_vars_if_possible(&obligation.predicate); let (placeholder_trait_predicate, placeholder_map) = self.infcx() .replace_bound_vars_with_placeholders(&poly_trait_predicate); debug!( "match_projection_obligation_against_definition_bounds: \ placeholder_trait_predicate={:?}", placeholder_trait_predicate, ); let (def_id, substs) = match placeholder_trait_predicate.trait_ref.self_ty().sty { ty::Projection(ref data) => (data.trait_ref(self.tcx()).def_id, data.substs), ty::Opaque(def_id, substs) => (def_id, substs), _ => { span_bug!( obligation.cause.span, "match_projection_obligation_against_definition_bounds() called \ but self-ty is not a projection: {:?}", placeholder_trait_predicate.trait_ref.self_ty() ); } }; debug!( "match_projection_obligation_against_definition_bounds: \ def_id={:?}, substs={:?}", def_id, substs ); let predicates_of = self.tcx().predicates_of(def_id); let bounds = predicates_of.instantiate(self.tcx(), substs); debug!( "match_projection_obligation_against_definition_bounds: \ bounds={:?}", bounds ); let elaborated_predicates = util::elaborate_predicates(self.tcx(), bounds.predicates); let matching_bound = elaborated_predicates .filter_to_traits() .find(|bound| { self.infcx.probe(|_| { self.match_projection( obligation, bound.clone(), placeholder_trait_predicate.trait_ref.clone(), &placeholder_map, snapshot, ) }) }); debug!( "match_projection_obligation_against_definition_bounds: \ matching_bound={:?}", matching_bound ); match matching_bound { None => false, Some(bound) => { // Repeat the successful match, if any, this time outside of a probe. let result = self.match_projection( obligation, bound, placeholder_trait_predicate.trait_ref.clone(), &placeholder_map, snapshot, ); assert!(result); true } } } fn match_projection( &mut self, obligation: &TraitObligation<'tcx>, trait_bound: ty::PolyTraitRef<'tcx>, placeholder_trait_ref: ty::TraitRef<'tcx>, placeholder_map: &PlaceholderMap<'tcx>, snapshot: &CombinedSnapshot<'_, 'tcx>, ) -> bool { debug_assert!(!placeholder_trait_ref.has_escaping_bound_vars()); self.infcx .at(&obligation.cause, obligation.param_env) .sup(ty::Binder::dummy(placeholder_trait_ref), trait_bound) .is_ok() && self.infcx.leak_check(false, placeholder_map, snapshot).is_ok() } /// Given an obligation like `<SomeTrait for T>`, search the obligations that the caller /// supplied to find out whether it is listed among them. /// /// Never affects inference environment. fn assemble_candidates_from_caller_bounds<'o>( &mut self, stack: &TraitObligationStack<'o, 'tcx>, candidates: &mut SelectionCandidateSet<'tcx>, ) -> Result<(), SelectionError<'tcx>> { debug!( "assemble_candidates_from_caller_bounds({:?})", stack.obligation ); let all_bounds = stack .obligation .param_env .caller_bounds .iter() .filter_map(|o| o.to_opt_poly_trait_ref()); // Micro-optimization: filter out predicates relating to different traits. let matching_bounds = all_bounds.filter(|p| p.def_id() == stack.obligation.predicate.def_id()); // Keep only those bounds which may apply, and propagate overflow if it occurs. let mut param_candidates = vec![]; for bound in matching_bounds { let wc = self.evaluate_where_clause(stack, bound.clone())?; if wc.may_apply() { param_candidates.push(ParamCandidate(bound)); } } candidates.vec.extend(param_candidates); Ok(()) } fn evaluate_where_clause<'o>( &mut self, stack: &TraitObligationStack<'o, 'tcx>, where_clause_trait_ref: ty::PolyTraitRef<'tcx>, ) -> Result<EvaluationResult, OverflowError> { self.evaluation_probe(|this| { match this.match_where_clause_trait_ref(stack.obligation, where_clause_trait_ref) { Ok(obligations) => { this.evaluate_predicates_recursively(stack.list(), obligations.into_iter()) } Err(()) => Ok(EvaluatedToErr), } }) } fn assemble_generator_candidates( &mut self, obligation: &TraitObligation<'tcx>, candidates: &mut SelectionCandidateSet<'tcx>, ) -> Result<(), SelectionError<'tcx>> { if self.tcx().lang_items().gen_trait() != Some(obligation.predicate.def_id()) { return Ok(()); } // Okay to skip binder because the substs on generator types never // touch bound regions, they just capture the in-scope // type/region parameters. let self_ty = *obligation.self_ty().skip_binder(); match self_ty.sty { ty::Generator(..) => { debug!( "assemble_generator_candidates: self_ty={:?} obligation={:?}", self_ty, obligation ); candidates.vec.push(GeneratorCandidate); } ty::Infer(ty::TyVar(_)) => { debug!("assemble_generator_candidates: ambiguous self-type"); candidates.ambiguous = true; } _ => {} } Ok(()) } /// Checks for the artificial impl that the compiler will create for an obligation like `X : /// FnMut<..>` where `X` is a closure type. /// /// Note: the type parameters on a closure candidate are modeled as *output* type /// parameters and hence do not affect whether this trait is a match or not. They will be /// unified during the confirmation step. fn assemble_closure_candidates( &mut self, obligation: &TraitObligation<'tcx>, candidates: &mut SelectionCandidateSet<'tcx>, ) -> Result<(), SelectionError<'tcx>> { let kind = match self.tcx() .lang_items() .fn_trait_kind(obligation.predicate.def_id()) { Some(k) => k, None => { return Ok(()); } }; // Okay to skip binder because the substs on closure types never // touch bound regions, they just capture the in-scope // type/region parameters match obligation.self_ty().skip_binder().sty { ty::Closure(closure_def_id, closure_substs) => { debug!( "assemble_unboxed_candidates: kind={:?} obligation={:?}", kind, obligation ); match self.infcx.closure_kind(closure_def_id, closure_substs) { Some(closure_kind) => { debug!( "assemble_unboxed_candidates: closure_kind = {:?}", closure_kind ); if closure_kind.extends(kind) { candidates.vec.push(ClosureCandidate); } } None => { debug!("assemble_unboxed_candidates: closure_kind not yet known"); candidates.vec.push(ClosureCandidate); } } } ty::Infer(ty::TyVar(_)) => { debug!("assemble_unboxed_closure_candidates: ambiguous self-type"); candidates.ambiguous = true; } _ => {} } Ok(()) } /// Implement one of the `Fn()` family for a fn pointer. fn assemble_fn_pointer_candidates( &mut self, obligation: &TraitObligation<'tcx>, candidates: &mut SelectionCandidateSet<'tcx>, ) -> Result<(), SelectionError<'tcx>> { // We provide impl of all fn traits for fn pointers. if self.tcx() .lang_items() .fn_trait_kind(obligation.predicate.def_id()) .is_none() { return Ok(()); } // Okay to skip binder because what we are inspecting doesn't involve bound regions let self_ty = *obligation.self_ty().skip_binder(); match self_ty.sty { ty::Infer(ty::TyVar(_)) => { debug!("assemble_fn_pointer_candidates: ambiguous self-type"); candidates.ambiguous = true; // could wind up being a fn() type } // provide an impl, but only for suitable `fn` pointers ty::FnDef(..) | ty::FnPtr(_) => { if let ty::FnSig { unsafety: hir::Unsafety::Normal, abi: Abi::Rust, c_variadic: false, .. } = self_ty.fn_sig(self.tcx()).skip_binder() { candidates.vec.push(FnPointerCandidate); } } _ => {} } Ok(()) } /// Search for impls that might apply to `obligation`. fn assemble_candidates_from_impls( &mut self, obligation: &TraitObligation<'tcx>, candidates: &mut SelectionCandidateSet<'tcx>, ) -> Result<(), SelectionError<'tcx>> { debug!( "assemble_candidates_from_impls(obligation={:?})", obligation ); self.tcx().for_each_relevant_impl( obligation.predicate.def_id(), obligation.predicate.skip_binder().trait_ref.self_ty(), |impl_def_id| { self.infcx.probe(|snapshot| { if let Ok(_substs) = self.match_impl(impl_def_id, obligation, snapshot) { candidates.vec.push(ImplCandidate(impl_def_id)); } }); }, ); Ok(()) } fn assemble_candidates_from_auto_impls( &mut self, obligation: &TraitObligation<'tcx>, candidates: &mut SelectionCandidateSet<'tcx>, ) -> Result<(), SelectionError<'tcx>> { // Okay to skip binder here because the tests we do below do not involve bound regions. let self_ty = *obligation.self_ty().skip_binder(); debug!("assemble_candidates_from_auto_impls(self_ty={:?})", self_ty); let def_id = obligation.predicate.def_id(); if self.tcx().trait_is_auto(def_id) { match self_ty.sty { ty::Dynamic(..) => { // For object types, we don't know what the closed // over types are. This means we conservatively // say nothing; a candidate may be added by // `assemble_candidates_from_object_ty`. } ty::Foreign(..) => { // Since the contents of foreign types is unknown, // we don't add any `..` impl. Default traits could // still be provided by a manual implementation for // this trait and type. } ty::Param(..) | ty::Projection(..) => { // In these cases, we don't know what the actual // type is. Therefore, we cannot break it down // into its constituent types. So we don't // consider the `..` impl but instead just add no // candidates: this means that typeck will only // succeed if there is another reason to believe // that this obligation holds. That could be a // where-clause or, in the case of an object type, // it could be that the object type lists the // trait (e.g., `Foo+Send : Send`). See // `compile-fail/typeck-default-trait-impl-send-param.rs` // for an example of a test case that exercises // this path. } ty::Infer(ty::TyVar(_)) => { // the auto impl might apply, we don't know candidates.ambiguous = true; } ty::Generator(_, _, movability) if self.tcx().lang_items().unpin_trait() == Some(def_id) => { match movability { hir::GeneratorMovability::Static => { // Immovable generators are never `Unpin`, so // suppress the normal auto-impl candidate for it. } hir::GeneratorMovability::Movable => { // Movable generators are always `Unpin`, so add an // unconditional builtin candidate. candidates.vec.push(BuiltinCandidate { has_nested: false, }); } } } _ => candidates.vec.push(AutoImplCandidate(def_id.clone())), } } Ok(()) } /// Search for impls that might apply to `obligation`. fn assemble_candidates_from_object_ty( &mut self, obligation: &TraitObligation<'tcx>, candidates: &mut SelectionCandidateSet<'tcx>, ) { debug!( "assemble_candidates_from_object_ty(self_ty={:?})", obligation.self_ty().skip_binder() ); self.infcx.probe(|_snapshot| { // The code below doesn't care about regions, and the // self-ty here doesn't escape this probe, so just erase // any LBR. let self_ty = self.tcx().erase_late_bound_regions(&obligation.self_ty()); let poly_trait_ref = match self_ty.sty { ty::Dynamic(ref data, ..) => { if data.auto_traits() .any(|did| did == obligation.predicate.def_id()) { debug!( "assemble_candidates_from_object_ty: matched builtin bound, \ pushing candidate" ); candidates.vec.push(BuiltinObjectCandidate); return; } if let Some(principal) = data.principal() { principal.with_self_ty(self.tcx(), self_ty) } else { // Only auto-trait bounds exist. return; } } ty::Infer(ty::TyVar(_)) => { debug!("assemble_candidates_from_object_ty: ambiguous"); candidates.ambiguous = true; // could wind up being an object type return; } _ => return, }; debug!( "assemble_candidates_from_object_ty: poly_trait_ref={:?}", poly_trait_ref ); // Count only those upcast versions that match the trait-ref // we are looking for. Specifically, do not only check for the // correct trait, but also the correct type parameters. // For example, we may be trying to upcast `Foo` to `Bar<i32>`, // but `Foo` is declared as `trait Foo : Bar<u32>`. let upcast_trait_refs = util::supertraits(self.tcx(), poly_trait_ref) .filter(|upcast_trait_ref| { self.infcx.probe(|_| { let upcast_trait_ref = upcast_trait_ref.clone(); self.match_poly_trait_ref(obligation, upcast_trait_ref) .is_ok() }) }) .count(); if upcast_trait_refs > 1 { // Can be upcast in many ways; need more type information. candidates.ambiguous = true; } else if upcast_trait_refs == 1 { candidates.vec.push(ObjectCandidate); } }) } /// Search for unsizing that might apply to `obligation`. fn assemble_candidates_for_unsizing( &mut self, obligation: &TraitObligation<'tcx>, candidates: &mut SelectionCandidateSet<'tcx>, ) { // We currently never consider higher-ranked obligations e.g. // `for<'a> &'a T: Unsize<Trait+'a>` to be implemented. This is not // because they are a priori invalid, and we could potentially add support // for them later, it's just that there isn't really a strong need for it. // A `T: Unsize<U>` obligation is always used as part of a `T: CoerceUnsize<U>` // impl, and those are generally applied to concrete types. // // That said, one might try to write a fn with a where clause like // for<'a> Foo<'a, T>: Unsize<Foo<'a, Trait>> // where the `'a` is kind of orthogonal to the relevant part of the `Unsize`. // Still, you'd be more likely to write that where clause as // T: Trait // so it seems ok if we (conservatively) fail to accept that `Unsize` // obligation above. Should be possible to extend this in the future. let source = match obligation.self_ty().no_bound_vars() { Some(t) => t, None => { // Don't add any candidates if there are bound regions. return; } }; let target = obligation .predicate .skip_binder() .trait_ref .substs .type_at(1); debug!( "assemble_candidates_for_unsizing(source={:?}, target={:?})", source, target ); let may_apply = match (&source.sty, &target.sty) { // Trait+Kx+'a -> Trait+Ky+'b (upcasts). (&ty::Dynamic(ref data_a, ..), &ty::Dynamic(ref data_b, ..)) => { // Upcasts permit two things: // // 1. Dropping builtin bounds, e.g., `Foo+Send` to `Foo` // 2. Tightening the region bound, e.g., `Foo+'a` to `Foo+'b` if `'a : 'b` // // Note that neither of these changes requires any // change at runtime. Eventually this will be // generalized. // // We always upcast when we can because of reason // #2 (region bounds). data_a.principal_def_id() == data_b.principal_def_id() && data_b.auto_traits() // All of a's auto traits need to be in b's auto traits. .all(|b| data_a.auto_traits().any(|a| a == b)) } // T -> Trait. (_, &ty::Dynamic(..)) => true, // Ambiguous handling is below T -> Trait, because inference // variables can still implement Unsize<Trait> and nested // obligations will have the final say (likely deferred). (&ty::Infer(ty::TyVar(_)), _) | (_, &ty::Infer(ty::TyVar(_))) => { debug!("assemble_candidates_for_unsizing: ambiguous"); candidates.ambiguous = true; false } // [T; n] -> [T]. (&ty::Array(..), &ty::Slice(_)) => true, // Struct<T> -> Struct<U>. (&ty::Adt(def_id_a, _), &ty::Adt(def_id_b, _)) if def_id_a.is_struct() => { def_id_a == def_id_b } // (.., T) -> (.., U). (&ty::Tuple(tys_a), &ty::Tuple(tys_b)) => tys_a.len() == tys_b.len(), _ => false, }; if may_apply { candidates.vec.push(BuiltinUnsizeCandidate); } } fn assemble_candidates_for_trait_alias( &mut self, obligation: &TraitObligation<'tcx>, candidates: &mut SelectionCandidateSet<'tcx>, ) -> Result<(), SelectionError<'tcx>> { // Okay to skip binder here because the tests we do below do not involve bound regions. let self_ty = *obligation.self_ty().skip_binder(); debug!("assemble_candidates_for_trait_alias(self_ty={:?})", self_ty); let def_id = obligation.predicate.def_id(); if self.tcx().is_trait_alias(def_id) { candidates.vec.push(TraitAliasCandidate(def_id.clone())); } Ok(()) } /////////////////////////////////////////////////////////////////////////// // WINNOW // // Winnowing is the process of attempting to resolve ambiguity by // probing further. During the winnowing process, we unify all // type variables and then we also attempt to evaluate recursive // bounds to see if they are satisfied. /// Returns `true` if `victim` should be dropped in favor of /// `other`. Generally speaking we will drop duplicate /// candidates and prefer where-clause candidates. /// /// See the comment for "SelectionCandidate" for more details. fn candidate_should_be_dropped_in_favor_of<'o>( &mut self, victim: &EvaluatedCandidate<'tcx>, other: &EvaluatedCandidate<'tcx>, ) -> bool { if victim.candidate == other.candidate { return true; } // Check if a bound would previously have been removed when normalizing // the param_env so that it can be given the lowest priority. See // #50825 for the motivation for this. let is_global = |cand: &ty::PolyTraitRef<'_>| cand.is_global() && !cand.has_late_bound_regions(); match other.candidate { // Prefer BuiltinCandidate { has_nested: false } to anything else. // This is a fix for #53123 and prevents winnowing from accidentally extending the // lifetime of a variable. BuiltinCandidate { has_nested: false } => true, ParamCandidate(ref cand) => match victim.candidate { AutoImplCandidate(..) => { bug!( "default implementations shouldn't be recorded \ when there are other valid candidates" ); } // Prefer BuiltinCandidate { has_nested: false } to anything else. // This is a fix for #53123 and prevents winnowing from accidentally extending the // lifetime of a variable. BuiltinCandidate { has_nested: false } => false, ImplCandidate(..) | ClosureCandidate | GeneratorCandidate | FnPointerCandidate | BuiltinObjectCandidate | BuiltinUnsizeCandidate | BuiltinCandidate { .. } | TraitAliasCandidate(..) => { // Global bounds from the where clause should be ignored // here (see issue #50825). Otherwise, we have a where // clause so don't go around looking for impls. !is_global(cand) } ObjectCandidate | ProjectionCandidate => { // Arbitrarily give param candidates priority // over projection and object candidates. !is_global(cand) } ParamCandidate(..) => false, }, ObjectCandidate | ProjectionCandidate => match victim.candidate { AutoImplCandidate(..) => { bug!( "default implementations shouldn't be recorded \ when there are other valid candidates" ); } // Prefer BuiltinCandidate { has_nested: false } to anything else. // This is a fix for #53123 and prevents winnowing from accidentally extending the // lifetime of a variable. BuiltinCandidate { has_nested: false } => false, ImplCandidate(..) | ClosureCandidate | GeneratorCandidate | FnPointerCandidate | BuiltinObjectCandidate | BuiltinUnsizeCandidate | BuiltinCandidate { .. } | TraitAliasCandidate(..) => true, ObjectCandidate | ProjectionCandidate => { // Arbitrarily give param candidates priority // over projection and object candidates. true } ParamCandidate(ref cand) => is_global(cand), }, ImplCandidate(other_def) => { // See if we can toss out `victim` based on specialization. // This requires us to know *for sure* that the `other` impl applies // i.e., EvaluatedToOk: if other.evaluation.must_apply_modulo_regions() { match victim.candidate { ImplCandidate(victim_def) => { let tcx = self.tcx().global_tcx(); return tcx.specializes((other_def, victim_def)) || tcx.impls_are_allowed_to_overlap( other_def, victim_def).is_some(); } ParamCandidate(ref cand) => { // Prefer the impl to a global where clause candidate. return is_global(cand); } _ => (), } } false } ClosureCandidate | GeneratorCandidate | FnPointerCandidate | BuiltinObjectCandidate | BuiltinUnsizeCandidate | BuiltinCandidate { has_nested: true } => { match victim.candidate { ParamCandidate(ref cand) => { // Prefer these to a global where-clause bound // (see issue #50825) is_global(cand) && other.evaluation.must_apply_modulo_regions() } _ => false, } } _ => false, } } /////////////////////////////////////////////////////////////////////////// // BUILTIN BOUNDS // // These cover the traits that are built-in to the language // itself: `Copy`, `Clone` and `Sized`. fn assemble_builtin_bound_candidates<'o>( &mut self, conditions: BuiltinImplConditions<'tcx>, candidates: &mut SelectionCandidateSet<'tcx>, ) -> Result<(), SelectionError<'tcx>> { match conditions { BuiltinImplConditions::Where(nested) => { debug!("builtin_bound: nested={:?}", nested); candidates.vec.push(BuiltinCandidate { has_nested: nested.skip_binder().len() > 0, }); } BuiltinImplConditions::None => {} BuiltinImplConditions::Ambiguous => { debug!("assemble_builtin_bound_candidates: ambiguous builtin"); candidates.ambiguous = true; } } Ok(()) } fn sized_conditions( &mut self, obligation: &TraitObligation<'tcx>, ) -> BuiltinImplConditions<'tcx> { use self::BuiltinImplConditions::{Ambiguous, None, Where}; // NOTE: binder moved to (*) let self_ty = self.infcx .shallow_resolve(obligation.predicate.skip_binder().self_ty()); match self_ty.sty { ty::Infer(ty::IntVar(_)) | ty::Infer(ty::FloatVar(_)) | ty::Uint(_) | ty::Int(_) | ty::Bool | ty::Float(_) | ty::FnDef(..) | ty::FnPtr(_) | ty::RawPtr(..) | ty::Char | ty::Ref(..) | ty::Generator(..) | ty::GeneratorWitness(..) | ty::Array(..) | ty::Closure(..) | ty::Never | ty::Error => { // safe for everything Where(ty::Binder::dummy(Vec::new())) } ty::Str | ty::Slice(_) | ty::Dynamic(..) | ty::Foreign(..) => None, ty::Tuple(tys) => { Where(ty::Binder::bind(tys.last().into_iter().map(|k| k.expect_ty()).collect())) } ty::Adt(def, substs) => { let sized_crit = def.sized_constraint(self.tcx()); // (*) binder moved here Where(ty::Binder::bind( sized_crit .iter() .map(|ty| ty.subst(self.tcx(), substs)) .collect(), )) } ty::Projection(_) | ty::Param(_) | ty::Opaque(..) => None, ty::Infer(ty::TyVar(_)) => Ambiguous, ty::UnnormalizedProjection(..) | ty::Placeholder(..) | ty::Bound(..) | ty::Infer(ty::FreshTy(_)) | ty::Infer(ty::FreshIntTy(_)) | ty::Infer(ty::FreshFloatTy(_)) => { bug!( "asked to assemble builtin bounds of unexpected type: {:?}", self_ty ); } } } fn copy_clone_conditions( &mut self, obligation: &TraitObligation<'tcx>, ) -> BuiltinImplConditions<'tcx> { // NOTE: binder moved to (*) let self_ty = self.infcx .shallow_resolve(obligation.predicate.skip_binder().self_ty()); use self::BuiltinImplConditions::{Ambiguous, None, Where}; match self_ty.sty { ty::Infer(ty::IntVar(_)) | ty::Infer(ty::FloatVar(_)) | ty::FnDef(..) | ty::FnPtr(_) | ty::Error => Where(ty::Binder::dummy(Vec::new())), ty::Uint(_) | ty::Int(_) | ty::Bool | ty::Float(_) | ty::Char | ty::RawPtr(..) | ty::Never | ty::Ref(_, _, hir::MutImmutable) => { // Implementations provided in libcore None } ty::Dynamic(..) | ty::Str | ty::Slice(..) | ty::Generator(..) | ty::GeneratorWitness(..) | ty::Foreign(..) | ty::Ref(_, _, hir::MutMutable) => None, ty::Array(element_ty, _) => { // (*) binder moved here Where(ty::Binder::bind(vec![element_ty])) } ty::Tuple(tys) => { // (*) binder moved here Where(ty::Binder::bind(tys.iter().map(|k| k.expect_ty()).collect())) } ty::Closure(def_id, substs) => { // (*) binder moved here Where(ty::Binder::bind( substs.upvar_tys(def_id, self.tcx()).collect(), )) } ty::Adt(..) | ty::Projection(..) | ty::Param(..) | ty::Opaque(..) => { // Fallback to whatever user-defined impls exist in this case. None } ty::Infer(ty::TyVar(_)) => { // Unbound type variable. Might or might not have // applicable impls and so forth, depending on what // those type variables wind up being bound to. Ambiguous } ty::UnnormalizedProjection(..) | ty::Placeholder(..) | ty::Bound(..) | ty::Infer(ty::FreshTy(_)) | ty::Infer(ty::FreshIntTy(_)) | ty::Infer(ty::FreshFloatTy(_)) => { bug!( "asked to assemble builtin bounds of unexpected type: {:?}", self_ty ); } } } /// For default impls, we need to break apart a type into its /// "constituent types" -- meaning, the types that it contains. /// /// Here are some (simple) examples: /// /// ``` /// (i32, u32) -> [i32, u32] /// Foo where struct Foo { x: i32, y: u32 } -> [i32, u32] /// Bar<i32> where struct Bar<T> { x: T, y: u32 } -> [i32, u32] /// Zed<i32> where enum Zed { A(T), B(u32) } -> [i32, u32] /// ``` fn constituent_types_for_ty(&self, t: Ty<'tcx>) -> Vec<Ty<'tcx>> { match t.sty { ty::Uint(_) | ty::Int(_) | ty::Bool | ty::Float(_) | ty::FnDef(..) | ty::FnPtr(_) | ty::Str | ty::Error | ty::Infer(ty::IntVar(_)) | ty::Infer(ty::FloatVar(_)) | ty::Never | ty::Char => Vec::new(), ty::UnnormalizedProjection(..) | ty::Placeholder(..) | ty::Dynamic(..) | ty::Param(..) | ty::Foreign(..) | ty::Projection(..) | ty::Bound(..) | ty::Infer(ty::TyVar(_)) | ty::Infer(ty::FreshTy(_)) | ty::Infer(ty::FreshIntTy(_)) | ty::Infer(ty::FreshFloatTy(_)) => { bug!( "asked to assemble constituent types of unexpected type: {:?}", t ); } ty::RawPtr(ty::TypeAndMut { ty: element_ty, .. }) | ty::Ref(_, element_ty, _) => { vec![element_ty] } ty::Array(element_ty, _) | ty::Slice(element_ty) => vec![element_ty], ty::Tuple(ref tys) => { // (T1, ..., Tn) -- meets any bound that all of T1...Tn meet tys.iter().map(|k| k.expect_ty()).collect() } ty::Closure(def_id, ref substs) => substs.upvar_tys(def_id, self.tcx()).collect(), ty::Generator(def_id, ref substs, _) => { let witness = substs.witness(def_id, self.tcx()); substs .upvar_tys(def_id, self.tcx()) .chain(iter::once(witness)) .collect() } ty::GeneratorWitness(types) => { // This is sound because no regions in the witness can refer to // the binder outside the witness. So we'll effectivly reuse // the implicit binder around the witness. types.skip_binder().to_vec() } // for `PhantomData<T>`, we pass `T` ty::Adt(def, substs) if def.is_phantom_data() => substs.types().collect(), ty::Adt(def, substs) => def.all_fields().map(|f| f.ty(self.tcx(), substs)).collect(), ty::Opaque(def_id, substs) => { // We can resolve the `impl Trait` to its concrete type, // which enforces a DAG between the functions requiring // the auto trait bounds in question. vec![self.tcx().type_of(def_id).subst(self.tcx(), substs)] } } } fn collect_predicates_for_types( &mut self, param_env: ty::ParamEnv<'tcx>, cause: ObligationCause<'tcx>, recursion_depth: usize, trait_def_id: DefId, types: ty::Binder<Vec<Ty<'tcx>>>, ) -> Vec<PredicateObligation<'tcx>> { // Because the types were potentially derived from // higher-ranked obligations they may reference late-bound // regions. For example, `for<'a> Foo<&'a int> : Copy` would // yield a type like `for<'a> &'a int`. In general, we // maintain the invariant that we never manipulate bound // regions, so we have to process these bound regions somehow. // // The strategy is to: // // 1. Instantiate those regions to placeholder regions (e.g., // `for<'a> &'a int` becomes `&0 int`. // 2. Produce something like `&'0 int : Copy` // 3. Re-bind the regions back to `for<'a> &'a int : Copy` types .skip_binder() .into_iter() .flat_map(|ty| { // binder moved -\ let ty: ty::Binder<Ty<'tcx>> = ty::Binder::bind(ty); // <----/ self.infcx.in_snapshot(|_| { let (skol_ty, _) = self.infcx .replace_bound_vars_with_placeholders(&ty); let Normalized { value: normalized_ty, mut obligations, } = project::normalize_with_depth( self, param_env, cause.clone(), recursion_depth, &skol_ty, ); let skol_obligation = self.tcx().predicate_for_trait_def( param_env, cause.clone(), trait_def_id, recursion_depth, normalized_ty, &[], ); obligations.push(skol_obligation); obligations }) }) .collect() } /////////////////////////////////////////////////////////////////////////// // CONFIRMATION // // Confirmation unifies the output type parameters of the trait // with the values found in the obligation, possibly yielding a // type error. See the [rustc guide] for more details. // // [rustc guide]: // https://rust-lang.github.io/rustc-guide/traits/resolution.html#confirmation fn confirm_candidate( &mut self, obligation: &TraitObligation<'tcx>, candidate: SelectionCandidate<'tcx>, ) -> Result<Selection<'tcx>, SelectionError<'tcx>> { debug!("confirm_candidate({:?}, {:?})", obligation, candidate); match candidate { BuiltinCandidate { has_nested } => { let data = self.confirm_builtin_candidate(obligation, has_nested); Ok(VtableBuiltin(data)) } ParamCandidate(param) => { let obligations = self.confirm_param_candidate(obligation, param); Ok(VtableParam(obligations)) } ImplCandidate(impl_def_id) => Ok(VtableImpl(self.confirm_impl_candidate( obligation, impl_def_id, ))), AutoImplCandidate(trait_def_id) => { let data = self.confirm_auto_impl_candidate(obligation, trait_def_id); Ok(VtableAutoImpl(data)) } ProjectionCandidate => { self.confirm_projection_candidate(obligation); Ok(VtableParam(Vec::new())) } ClosureCandidate => { let vtable_closure = self.confirm_closure_candidate(obligation)?; Ok(VtableClosure(vtable_closure)) } GeneratorCandidate => { let vtable_generator = self.confirm_generator_candidate(obligation)?; Ok(VtableGenerator(vtable_generator)) } FnPointerCandidate => { let data = self.confirm_fn_pointer_candidate(obligation)?; Ok(VtableFnPointer(data)) } TraitAliasCandidate(alias_def_id) => { let data = self.confirm_trait_alias_candidate(obligation, alias_def_id); Ok(VtableTraitAlias(data)) } ObjectCandidate => { let data = self.confirm_object_candidate(obligation); Ok(VtableObject(data)) } BuiltinObjectCandidate => { // This indicates something like `(Trait+Send) : // Send`. In this case, we know that this holds // because that's what the object type is telling us, // and there's really no additional obligations to // prove and no types in particular to unify etc. Ok(VtableParam(Vec::new())) } BuiltinUnsizeCandidate => { let data = self.confirm_builtin_unsize_candidate(obligation)?; Ok(VtableBuiltin(data)) } } } fn confirm_projection_candidate(&mut self, obligation: &TraitObligation<'tcx>) { self.infcx.in_snapshot(|snapshot| { let result = self.match_projection_obligation_against_definition_bounds( obligation, snapshot, ); assert!(result); }) } fn confirm_param_candidate( &mut self, obligation: &TraitObligation<'tcx>, param: ty::PolyTraitRef<'tcx>, ) -> Vec<PredicateObligation<'tcx>> { debug!("confirm_param_candidate({:?},{:?})", obligation, param); // During evaluation, we already checked that this // where-clause trait-ref could be unified with the obligation // trait-ref. Repeat that unification now without any // transactional boundary; it should not fail. match self.match_where_clause_trait_ref(obligation, param.clone()) { Ok(obligations) => obligations, Err(()) => { bug!( "Where clause `{:?}` was applicable to `{:?}` but now is not", param, obligation ); } } } fn confirm_builtin_candidate( &mut self, obligation: &TraitObligation<'tcx>, has_nested: bool, ) -> VtableBuiltinData<PredicateObligation<'tcx>> { debug!( "confirm_builtin_candidate({:?}, {:?})", obligation, has_nested ); let lang_items = self.tcx().lang_items(); let obligations = if has_nested { let trait_def = obligation.predicate.def_id(); let conditions = if Some(trait_def) == lang_items.sized_trait() { self.sized_conditions(obligation) } else if Some(trait_def) == lang_items.copy_trait() { self.copy_clone_conditions(obligation) } else if Some(trait_def) == lang_items.clone_trait() { self.copy_clone_conditions(obligation) } else { bug!("unexpected builtin trait {:?}", trait_def) }; let nested = match conditions { BuiltinImplConditions::Where(nested) => nested, _ => bug!( "obligation {:?} had matched a builtin impl but now doesn't", obligation ), }; let cause = obligation.derived_cause(BuiltinDerivedObligation); self.collect_predicates_for_types( obligation.param_env, cause, obligation.recursion_depth + 1, trait_def, nested, ) } else { vec![] }; debug!("confirm_builtin_candidate: obligations={:?}", obligations); VtableBuiltinData { nested: obligations, } } /// This handles the case where a `auto trait Foo` impl is being used. /// The idea is that the impl applies to `X : Foo` if the following conditions are met: /// /// 1. For each constituent type `Y` in `X`, `Y : Foo` holds /// 2. For each where-clause `C` declared on `Foo`, `[Self => X] C` holds. fn confirm_auto_impl_candidate( &mut self, obligation: &TraitObligation<'tcx>, trait_def_id: DefId, ) -> VtableAutoImplData<PredicateObligation<'tcx>> { debug!( "confirm_auto_impl_candidate({:?}, {:?})", obligation, trait_def_id ); let types = obligation.predicate.map_bound(|inner| { let self_ty = self.infcx.shallow_resolve(inner.self_ty()); self.constituent_types_for_ty(self_ty) }); self.vtable_auto_impl(obligation, trait_def_id, types) } /// See `confirm_auto_impl_candidate`. fn vtable_auto_impl( &mut self, obligation: &TraitObligation<'tcx>, trait_def_id: DefId, nested: ty::Binder<Vec<Ty<'tcx>>>, ) -> VtableAutoImplData<PredicateObligation<'tcx>> { debug!("vtable_auto_impl: nested={:?}", nested); let cause = obligation.derived_cause(BuiltinDerivedObligation); let mut obligations = self.collect_predicates_for_types( obligation.param_env, cause, obligation.recursion_depth + 1, trait_def_id, nested, ); let trait_obligations: Vec<PredicateObligation<'_>> = self.infcx.in_snapshot(|_| { let poly_trait_ref = obligation.predicate.to_poly_trait_ref(); let (trait_ref, _) = self.infcx .replace_bound_vars_with_placeholders(&poly_trait_ref); let cause = obligation.derived_cause(ImplDerivedObligation); self.impl_or_trait_obligations( cause, obligation.recursion_depth + 1, obligation.param_env, trait_def_id, &trait_ref.substs, ) }); // Adds the predicates from the trait. Note that this contains a `Self: Trait` // predicate as usual. It won't have any effect since auto traits are coinductive. obligations.extend(trait_obligations); debug!("vtable_auto_impl: obligations={:?}", obligations); VtableAutoImplData { trait_def_id, nested: obligations, } } fn confirm_impl_candidate( &mut self, obligation: &TraitObligation<'tcx>, impl_def_id: DefId, ) -> VtableImplData<'tcx, PredicateObligation<'tcx>> { debug!("confirm_impl_candidate({:?},{:?})", obligation, impl_def_id); // First, create the substitutions by matching the impl again, // this time not in a probe. self.infcx.in_snapshot(|snapshot| { let substs = self.rematch_impl(impl_def_id, obligation, snapshot); debug!("confirm_impl_candidate: substs={:?}", substs); let cause = obligation.derived_cause(ImplDerivedObligation); self.vtable_impl( impl_def_id, substs, cause, obligation.recursion_depth + 1, obligation.param_env, ) }) } fn vtable_impl( &mut self, impl_def_id: DefId, mut substs: Normalized<'tcx, SubstsRef<'tcx>>, cause: ObligationCause<'tcx>, recursion_depth: usize, param_env: ty::ParamEnv<'tcx>, ) -> VtableImplData<'tcx, PredicateObligation<'tcx>> { debug!( "vtable_impl(impl_def_id={:?}, substs={:?}, recursion_depth={})", impl_def_id, substs, recursion_depth, ); let mut impl_obligations = self.impl_or_trait_obligations( cause, recursion_depth, param_env, impl_def_id, &substs.value, ); debug!( "vtable_impl: impl_def_id={:?} impl_obligations={:?}", impl_def_id, impl_obligations ); // Because of RFC447, the impl-trait-ref and obligations // are sufficient to determine the impl substs, without // relying on projections in the impl-trait-ref. // // e.g., `impl<U: Tr, V: Iterator<Item=U>> Foo<<U as Tr>::T> for V` impl_obligations.append(&mut substs.obligations); VtableImplData { impl_def_id, substs: substs.value, nested: impl_obligations, } } fn confirm_object_candidate( &mut self, obligation: &TraitObligation<'tcx>, ) -> VtableObjectData<'tcx, PredicateObligation<'tcx>> { debug!("confirm_object_candidate({:?})", obligation); // FIXME(nmatsakis) skipping binder here seems wrong -- we should // probably flatten the binder from the obligation and the binder // from the object. Have to try to make a broken test case that // results. let self_ty = self.infcx .shallow_resolve(*obligation.self_ty().skip_binder()); let poly_trait_ref = match self_ty.sty { ty::Dynamic(ref data, ..) => data.principal().unwrap_or_else(|| { span_bug!(obligation.cause.span, "object candidate with no principal") }).with_self_ty(self.tcx(), self_ty), _ => span_bug!(obligation.cause.span, "object candidate with non-object"), }; let mut upcast_trait_ref = None; let mut nested = vec![]; let vtable_base; { let tcx = self.tcx(); // We want to find the first supertrait in the list of // supertraits that we can unify with, and do that // unification. We know that there is exactly one in the list // where we can unify because otherwise select would have // reported an ambiguity. (When we do find a match, also // record it for later.) let nonmatching = util::supertraits(tcx, poly_trait_ref).take_while( |&t| match self.infcx.commit_if_ok(|_| self.match_poly_trait_ref(obligation, t)) { Ok(obligations) => { upcast_trait_ref = Some(t); nested.extend(obligations); false } Err(_) => true, }, ); // Additionally, for each of the nonmatching predicates that // we pass over, we sum up the set of number of vtable // entries, so that we can compute the offset for the selected // trait. vtable_base = nonmatching.map(|t| tcx.count_own_vtable_entries(t)).sum(); } VtableObjectData { upcast_trait_ref: upcast_trait_ref.unwrap(), vtable_base, nested, } } fn confirm_fn_pointer_candidate( &mut self, obligation: &TraitObligation<'tcx>, ) -> Result<VtableFnPointerData<'tcx, PredicateObligation<'tcx>>, SelectionError<'tcx>> { debug!("confirm_fn_pointer_candidate({:?})", obligation); // Okay to skip binder; it is reintroduced below. let self_ty = self.infcx .shallow_resolve(*obligation.self_ty().skip_binder()); let sig = self_ty.fn_sig(self.tcx()); let trait_ref = self.tcx() .closure_trait_ref_and_return_type( obligation.predicate.def_id(), self_ty, sig, util::TupleArgumentsFlag::Yes, ) .map_bound(|(trait_ref, _)| trait_ref); let Normalized { value: trait_ref, obligations, } = project::normalize_with_depth( self, obligation.param_env, obligation.cause.clone(), obligation.recursion_depth + 1, &trait_ref, ); self.confirm_poly_trait_refs( obligation.cause.clone(), obligation.param_env, obligation.predicate.to_poly_trait_ref(), trait_ref, )?; Ok(VtableFnPointerData { fn_ty: self_ty, nested: obligations, }) } fn confirm_trait_alias_candidate( &mut self, obligation: &TraitObligation<'tcx>, alias_def_id: DefId, ) -> VtableTraitAliasData<'tcx, PredicateObligation<'tcx>> { debug!( "confirm_trait_alias_candidate({:?}, {:?})", obligation, alias_def_id ); self.infcx.in_snapshot(|_| { let (predicate, _) = self.infcx() .replace_bound_vars_with_placeholders(&obligation.predicate); let trait_ref = predicate.trait_ref; let trait_def_id = trait_ref.def_id; let substs = trait_ref.substs; let trait_obligations = self.impl_or_trait_obligations( obligation.cause.clone(), obligation.recursion_depth, obligation.param_env, trait_def_id, &substs, ); debug!( "confirm_trait_alias_candidate: trait_def_id={:?} trait_obligations={:?}", trait_def_id, trait_obligations ); VtableTraitAliasData { alias_def_id, substs: substs, nested: trait_obligations, } }) } fn confirm_generator_candidate( &mut self, obligation: &TraitObligation<'tcx>, ) -> Result<VtableGeneratorData<'tcx, PredicateObligation<'tcx>>, SelectionError<'tcx>> { // Okay to skip binder because the substs on generator types never // touch bound regions, they just capture the in-scope // type/region parameters. let self_ty = self.infcx.shallow_resolve(*obligation.self_ty().skip_binder()); let (generator_def_id, substs) = match self_ty.sty { ty::Generator(id, substs, _) => (id, substs), _ => bug!("closure candidate for non-closure {:?}", obligation), }; debug!( "confirm_generator_candidate({:?},{:?},{:?})", obligation, generator_def_id, substs ); let trait_ref = self.generator_trait_ref_unnormalized(obligation, generator_def_id, substs); let Normalized { value: trait_ref, mut obligations, } = normalize_with_depth( self, obligation.param_env, obligation.cause.clone(), obligation.recursion_depth + 1, &trait_ref, ); debug!( "confirm_generator_candidate(generator_def_id={:?}, \ trait_ref={:?}, obligations={:?})", generator_def_id, trait_ref, obligations ); obligations.extend(self.confirm_poly_trait_refs( obligation.cause.clone(), obligation.param_env, obligation.predicate.to_poly_trait_ref(), trait_ref, )?); Ok(VtableGeneratorData { generator_def_id: generator_def_id, substs: substs.clone(), nested: obligations, }) } fn confirm_closure_candidate( &mut self, obligation: &TraitObligation<'tcx>, ) -> Result<VtableClosureData<'tcx, PredicateObligation<'tcx>>, SelectionError<'tcx>> { debug!("confirm_closure_candidate({:?})", obligation); let kind = self.tcx() .lang_items() .fn_trait_kind(obligation.predicate.def_id()) .unwrap_or_else(|| bug!("closure candidate for non-fn trait {:?}", obligation)); // Okay to skip binder because the substs on closure types never // touch bound regions, they just capture the in-scope // type/region parameters. let self_ty = self.infcx.shallow_resolve(*obligation.self_ty().skip_binder()); let (closure_def_id, substs) = match self_ty.sty { ty::Closure(id, substs) => (id, substs), _ => bug!("closure candidate for non-closure {:?}", obligation), }; let trait_ref = self.closure_trait_ref_unnormalized(obligation, closure_def_id, substs); let Normalized { value: trait_ref, mut obligations, } = normalize_with_depth( self, obligation.param_env, obligation.cause.clone(), obligation.recursion_depth + 1, &trait_ref, ); debug!( "confirm_closure_candidate(closure_def_id={:?}, trait_ref={:?}, obligations={:?})", closure_def_id, trait_ref, obligations ); obligations.extend(self.confirm_poly_trait_refs( obligation.cause.clone(), obligation.param_env, obligation.predicate.to_poly_trait_ref(), trait_ref, )?); // FIXME: chalk if !self.tcx().sess.opts.debugging_opts.chalk { obligations.push(Obligation::new( obligation.cause.clone(), obligation.param_env, ty::Predicate::ClosureKind(closure_def_id, substs, kind), )); } Ok(VtableClosureData { closure_def_id, substs: substs.clone(), nested: obligations, }) } /// In the case of closure types and fn pointers, /// we currently treat the input type parameters on the trait as /// outputs. This means that when we have a match we have only /// considered the self type, so we have to go back and make sure /// to relate the argument types too. This is kind of wrong, but /// since we control the full set of impls, also not that wrong, /// and it DOES yield better error messages (since we don't report /// errors as if there is no applicable impl, but rather report /// errors are about mismatched argument types. /// /// Here is an example. Imagine we have a closure expression /// and we desugared it so that the type of the expression is /// `Closure`, and `Closure` expects an int as argument. Then it /// is "as if" the compiler generated this impl: /// /// impl Fn(int) for Closure { ... } /// /// Now imagine our obligation is `Fn(usize) for Closure`. So far /// we have matched the self type `Closure`. At this point we'll /// compare the `int` to `usize` and generate an error. /// /// Note that this checking occurs *after* the impl has selected, /// because these output type parameters should not affect the /// selection of the impl. Therefore, if there is a mismatch, we /// report an error to the user. fn confirm_poly_trait_refs( &mut self, obligation_cause: ObligationCause<'tcx>, obligation_param_env: ty::ParamEnv<'tcx>, obligation_trait_ref: ty::PolyTraitRef<'tcx>, expected_trait_ref: ty::PolyTraitRef<'tcx>, ) -> Result<Vec<PredicateObligation<'tcx>>, SelectionError<'tcx>> { let obligation_trait_ref = obligation_trait_ref.clone(); self.infcx .at(&obligation_cause, obligation_param_env) .sup(obligation_trait_ref, expected_trait_ref) .map(|InferOk { obligations, .. }| obligations) .map_err(|e| OutputTypeParameterMismatch(expected_trait_ref, obligation_trait_ref, e)) } fn confirm_builtin_unsize_candidate( &mut self, obligation: &TraitObligation<'tcx>, ) -> Result<VtableBuiltinData<PredicateObligation<'tcx>>, SelectionError<'tcx>> { let tcx = self.tcx(); // assemble_candidates_for_unsizing should ensure there are no late bound // regions here. See the comment there for more details. let source = self.infcx .shallow_resolve(obligation.self_ty().no_bound_vars().unwrap()); let target = obligation .predicate .skip_binder() .trait_ref .substs .type_at(1); let target = self.infcx.shallow_resolve(target); debug!( "confirm_builtin_unsize_candidate(source={:?}, target={:?})", source, target ); let mut nested = vec![]; match (&source.sty, &target.sty) { // Trait+Kx+'a -> Trait+Ky+'b (upcasts). (&ty::Dynamic(ref data_a, r_a), &ty::Dynamic(ref data_b, r_b)) => { // See assemble_candidates_for_unsizing for more info. let existential_predicates = data_a.map_bound(|data_a| { let iter = data_a.principal().map(|x| ty::ExistentialPredicate::Trait(x)) .into_iter().chain( data_a .projection_bounds() .map(|x| ty::ExistentialPredicate::Projection(x)), ) .chain( data_b .auto_traits() .map(ty::ExistentialPredicate::AutoTrait), ); tcx.mk_existential_predicates(iter) }); let source_trait = tcx.mk_dynamic(existential_predicates, r_b); // Require that the traits involved in this upcast are **equal**; // only the **lifetime bound** is changed. // // FIXME: This condition is arguably too strong -- it // would suffice for the source trait to be a // *subtype* of the target trait. In particular // changing from something like `for<'a, 'b> Foo<'a, // 'b>` to `for<'a> Foo<'a, 'a>` should be // permitted. And, indeed, in the in commit // 904a0bde93f0348f69914ee90b1f8b6e4e0d7cbc, this // condition was loosened. However, when the leak check was added // back, using subtype here actually guies the coercion code in // such a way that it accepts `old-lub-glb-object.rs`. This is probably // a good thing, but I've modified this to `.eq` because I want // to continue rejecting that test (as we have done for quite some time) // before we are firmly comfortable with what our behavior // should be there. -nikomatsakis let InferOk { obligations, .. } = self.infcx .at(&obligation.cause, obligation.param_env) .eq(target, source_trait) // FIXME -- see below .map_err(|_| Unimplemented)?; nested.extend(obligations); // Register one obligation for 'a: 'b. let cause = ObligationCause::new( obligation.cause.span, obligation.cause.body_id, ObjectCastObligation(target), ); let outlives = ty::OutlivesPredicate(r_a, r_b); nested.push(Obligation::with_depth( cause, obligation.recursion_depth + 1, obligation.param_env, ty::Binder::bind(outlives).to_predicate(), )); } // T -> Trait. (_, &ty::Dynamic(ref data, r)) => { let mut object_dids = data.auto_traits() .chain(data.principal_def_id()); if let Some(did) = object_dids.find(|did| !tcx.is_object_safe(*did)) { return Err(TraitNotObjectSafe(did)); } let cause = ObligationCause::new( obligation.cause.span, obligation.cause.body_id, ObjectCastObligation(target), ); let predicate_to_obligation = |predicate| { Obligation::with_depth( cause.clone(), obligation.recursion_depth + 1, obligation.param_env, predicate, ) }; // Create obligations: // - Casting T to Trait // - For all the various builtin bounds attached to the object cast. (In other // words, if the object type is Foo+Send, this would create an obligation for the // Send check.) // - Projection predicates nested.extend( data.iter() .map(|d| predicate_to_obligation(d.with_self_ty(tcx, source))), ); // We can only make objects from sized types. let tr = ty::TraitRef { def_id: tcx.require_lang_item(lang_items::SizedTraitLangItem), substs: tcx.mk_substs_trait(source, &[]), }; nested.push(predicate_to_obligation(tr.to_predicate())); // If the type is `Foo+'a`, ensures that the type // being cast to `Foo+'a` outlives `'a`: let outlives = ty::OutlivesPredicate(source, r); nested.push(predicate_to_obligation( ty::Binder::dummy(outlives).to_predicate(), )); } // [T; n] -> [T]. (&ty::Array(a, _), &ty::Slice(b)) => { let InferOk { obligations, .. } = self.infcx .at(&obligation.cause, obligation.param_env) .eq(b, a) .map_err(|_| Unimplemented)?; nested.extend(obligations); } // Struct<T> -> Struct<U>. (&ty::Adt(def, substs_a), &ty::Adt(_, substs_b)) => { let fields = def.all_fields() .map(|f| tcx.type_of(f.did)) .collect::<Vec<_>>(); // The last field of the structure has to exist and contain type parameters. let field = if let Some(&field) = fields.last() { field } else { return Err(Unimplemented); }; let mut ty_params = GrowableBitSet::new_empty(); let mut found = false; for ty in field.walk() { if let ty::Param(p) = ty.sty { ty_params.insert(p.index as usize); found = true; } } if !found { return Err(Unimplemented); } // Replace type parameters used in unsizing with // Error and ensure they do not affect any other fields. // This could be checked after type collection for any struct // with a potentially unsized trailing field. let params = substs_a.iter().enumerate().map(|(i, &k)| { if ty_params.contains(i) { tcx.types.err.into() } else { k } }); let substs = tcx.mk_substs(params); for &ty in fields.split_last().unwrap().1 { if ty.subst(tcx, substs).references_error() { return Err(Unimplemented); } } // Extract Field<T> and Field<U> from Struct<T> and Struct<U>. let inner_source = field.subst(tcx, substs_a); let inner_target = field.subst(tcx, substs_b); // Check that the source struct with the target's // unsized parameters is equal to the target. let params = substs_a.iter().enumerate().map(|(i, &k)| { if ty_params.contains(i) { substs_b.type_at(i).into() } else { k } }); let new_struct = tcx.mk_adt(def, tcx.mk_substs(params)); let InferOk { obligations, .. } = self.infcx .at(&obligation.cause, obligation.param_env) .eq(target, new_struct) .map_err(|_| Unimplemented)?; nested.extend(obligations); // Construct the nested Field<T>: Unsize<Field<U>> predicate. nested.push(tcx.predicate_for_trait_def( obligation.param_env, obligation.cause.clone(), obligation.predicate.def_id(), obligation.recursion_depth + 1, inner_source, &[inner_target.into()], )); } // (.., T) -> (.., U). (&ty::Tuple(tys_a), &ty::Tuple(tys_b)) => { assert_eq!(tys_a.len(), tys_b.len()); // The last field of the tuple has to exist. let (&a_last, a_mid) = if let Some(x) = tys_a.split_last() { x } else { return Err(Unimplemented); }; let &b_last = tys_b.last().unwrap(); // Check that the source tuple with the target's // last element is equal to the target. let new_tuple = tcx.mk_tup( a_mid.iter().map(|k| k.expect_ty()).chain(iter::once(b_last.expect_ty())), ); let InferOk { obligations, .. } = self.infcx .at(&obligation.cause, obligation.param_env) .eq(target, new_tuple) .map_err(|_| Unimplemented)?; nested.extend(obligations); // Construct the nested T: Unsize<U> predicate. nested.push(tcx.predicate_for_trait_def( obligation.param_env, obligation.cause.clone(), obligation.predicate.def_id(), obligation.recursion_depth + 1, a_last.expect_ty(), &[b_last.into()], )); } _ => bug!(), }; Ok(VtableBuiltinData { nested }) } /////////////////////////////////////////////////////////////////////////// // Matching // // Matching is a common path used for both evaluation and // confirmation. It basically unifies types that appear in impls // and traits. This does affect the surrounding environment; // therefore, when used during evaluation, match routines must be // run inside of a `probe()` so that their side-effects are // contained. fn rematch_impl( &mut self, impl_def_id: DefId, obligation: &TraitObligation<'tcx>, snapshot: &CombinedSnapshot<'_, 'tcx>, ) -> Normalized<'tcx, SubstsRef<'tcx>> { match self.match_impl(impl_def_id, obligation, snapshot) { Ok(substs) => substs, Err(()) => { bug!( "Impl {:?} was matchable against {:?} but now is not", impl_def_id, obligation ); } } } fn match_impl( &mut self, impl_def_id: DefId, obligation: &TraitObligation<'tcx>, snapshot: &CombinedSnapshot<'_, 'tcx>, ) -> Result<Normalized<'tcx, SubstsRef<'tcx>>, ()> { let impl_trait_ref = self.tcx().impl_trait_ref(impl_def_id).unwrap(); // Before we create the substitutions and everything, first // consider a "quick reject". This avoids creating more types // and so forth that we need to. if self.fast_reject_trait_refs(obligation, &impl_trait_ref) { return Err(()); } let (skol_obligation, placeholder_map) = self.infcx() .replace_bound_vars_with_placeholders(&obligation.predicate); let skol_obligation_trait_ref = skol_obligation.trait_ref; let impl_substs = self.infcx .fresh_substs_for_item(obligation.cause.span, impl_def_id); let impl_trait_ref = impl_trait_ref.subst(self.tcx(), impl_substs); let Normalized { value: impl_trait_ref, obligations: mut nested_obligations, } = project::normalize_with_depth( self, obligation.param_env, obligation.cause.clone(), obligation.recursion_depth + 1, &impl_trait_ref, ); debug!( "match_impl(impl_def_id={:?}, obligation={:?}, \ impl_trait_ref={:?}, skol_obligation_trait_ref={:?})", impl_def_id, obligation, impl_trait_ref, skol_obligation_trait_ref ); let InferOk { obligations, .. } = self.infcx .at(&obligation.cause, obligation.param_env) .eq(skol_obligation_trait_ref, impl_trait_ref) .map_err(|e| debug!("match_impl: failed eq_trait_refs due to `{}`", e))?; nested_obligations.extend(obligations); if let Err(e) = self.infcx.leak_check(false, &placeholder_map, snapshot) { debug!("match_impl: failed leak check due to `{}`", e); return Err(()); } debug!("match_impl: success impl_substs={:?}", impl_substs); Ok(Normalized { value: impl_substs, obligations: nested_obligations, }) } fn fast_reject_trait_refs( &mut self, obligation: &TraitObligation<'_>, impl_trait_ref: &ty::TraitRef<'_>, ) -> bool { // We can avoid creating type variables and doing the full // substitution if we find that any of the input types, when // simplified, do not match. obligation .predicate .skip_binder() .input_types() .zip(impl_trait_ref.input_types()) .any(|(obligation_ty, impl_ty)| { let simplified_obligation_ty = fast_reject::simplify_type(self.tcx(), obligation_ty, true); let simplified_impl_ty = fast_reject::simplify_type(self.tcx(), impl_ty, false); simplified_obligation_ty.is_some() && simplified_impl_ty.is_some() && simplified_obligation_ty != simplified_impl_ty }) } /// Normalize `where_clause_trait_ref` and try to match it against /// `obligation`. If successful, return any predicates that /// result from the normalization. Normalization is necessary /// because where-clauses are stored in the parameter environment /// unnormalized. fn match_where_clause_trait_ref( &mut self, obligation: &TraitObligation<'tcx>, where_clause_trait_ref: ty::PolyTraitRef<'tcx>, ) -> Result<Vec<PredicateObligation<'tcx>>, ()> { self.match_poly_trait_ref(obligation, where_clause_trait_ref) } /// Returns `Ok` if `poly_trait_ref` being true implies that the /// obligation is satisfied. fn match_poly_trait_ref( &mut self, obligation: &TraitObligation<'tcx>, poly_trait_ref: ty::PolyTraitRef<'tcx>, ) -> Result<Vec<PredicateObligation<'tcx>>, ()> { debug!( "match_poly_trait_ref: obligation={:?} poly_trait_ref={:?}", obligation, poly_trait_ref ); self.infcx .at(&obligation.cause, obligation.param_env) .sup(obligation.predicate.to_poly_trait_ref(), poly_trait_ref) .map(|InferOk { obligations, .. }| obligations) .map_err(|_| ()) } /////////////////////////////////////////////////////////////////////////// // Miscellany fn match_fresh_trait_refs( &self, previous: &ty::PolyTraitRef<'tcx>, current: &ty::PolyTraitRef<'tcx>, ) -> bool { let mut matcher = ty::_match::Match::new(self.tcx()); matcher.relate(previous, current).is_ok() } fn push_stack<'o, 's: 'o>( &mut self, previous_stack: TraitObligationStackList<'s, 'tcx>, obligation: &'o TraitObligation<'tcx>, ) -> TraitObligationStack<'o, 'tcx> { let fresh_trait_ref = obligation .predicate .to_poly_trait_ref() .fold_with(&mut self.freshener); let depth = previous_stack.depth() + 1; TraitObligationStack { obligation, fresh_trait_ref, reached_depth: Cell::new(depth), previous: previous_stack, depth, } } fn closure_trait_ref_unnormalized( &mut self, obligation: &TraitObligation<'tcx>, closure_def_id: DefId, substs: ty::ClosureSubsts<'tcx>, ) -> ty::PolyTraitRef<'tcx> { debug!( "closure_trait_ref_unnormalized(obligation={:?}, closure_def_id={:?}, substs={:?})", obligation, closure_def_id, substs, ); let closure_type = self.infcx.closure_sig(closure_def_id, substs); debug!( "closure_trait_ref_unnormalized: closure_type = {:?}", closure_type ); // (1) Feels icky to skip the binder here, but OTOH we know // that the self-type is an unboxed closure type and hence is // in fact unparameterized (or at least does not reference any // regions bound in the obligation). Still probably some // refactoring could make this nicer. self.tcx() .closure_trait_ref_and_return_type( obligation.predicate.def_id(), obligation.predicate.skip_binder().self_ty(), // (1) closure_type, util::TupleArgumentsFlag::No, ) .map_bound(|(trait_ref, _)| trait_ref) } fn generator_trait_ref_unnormalized( &mut self, obligation: &TraitObligation<'tcx>, closure_def_id: DefId, substs: ty::GeneratorSubsts<'tcx>, ) -> ty::PolyTraitRef<'tcx> { let gen_sig = substs.poly_sig(closure_def_id, self.tcx()); // (1) Feels icky to skip the binder here, but OTOH we know // that the self-type is an generator type and hence is // in fact unparameterized (or at least does not reference any // regions bound in the obligation). Still probably some // refactoring could make this nicer. self.tcx() .generator_trait_ref_and_outputs( obligation.predicate.def_id(), obligation.predicate.skip_binder().self_ty(), // (1) gen_sig, ) .map_bound(|(trait_ref, ..)| trait_ref) } /// Returns the obligations that are implied by instantiating an /// impl or trait. The obligations are substituted and fully /// normalized. This is used when confirming an impl or default /// impl. fn impl_or_trait_obligations( &mut self, cause: ObligationCause<'tcx>, recursion_depth: usize, param_env: ty::ParamEnv<'tcx>, def_id: DefId, // of impl or trait substs: SubstsRef<'tcx>, // for impl or trait ) -> Vec<PredicateObligation<'tcx>> { debug!("impl_or_trait_obligations(def_id={:?})", def_id); let tcx = self.tcx(); // To allow for one-pass evaluation of the nested obligation, // each predicate must be preceded by the obligations required // to normalize it. // for example, if we have: // impl<U: Iterator, V: Iterator<Item=U>> Foo for V where U::Item: Copy // the impl will have the following predicates: // <V as Iterator>::Item = U, // U: Iterator, U: Sized, // V: Iterator, V: Sized, // <U as Iterator>::Item: Copy // When we substitute, say, `V => IntoIter<u32>, U => $0`, the last // obligation will normalize to `<$0 as Iterator>::Item = $1` and // `$1: Copy`, so we must ensure the obligations are emitted in // that order. let predicates = tcx.predicates_of(def_id); assert_eq!(predicates.parent, None); let mut predicates: Vec<_> = predicates .predicates .iter() .flat_map(|(predicate, _)| { let predicate = normalize_with_depth( self, param_env, cause.clone(), recursion_depth, &predicate.subst(tcx, substs), ); predicate.obligations.into_iter().chain(Some(Obligation { cause: cause.clone(), recursion_depth, param_env, predicate: predicate.value, })) }) .collect(); // We are performing deduplication here to avoid exponential blowups // (#38528) from happening, but the real cause of the duplication is // unknown. What we know is that the deduplication avoids exponential // amount of predicates being propagated when processing deeply nested // types. // // This code is hot enough that it's worth avoiding the allocation // required for the FxHashSet when possible. Special-casing lengths 0, // 1 and 2 covers roughly 75--80% of the cases. if predicates.len() <= 1 { // No possibility of duplicates. } else if predicates.len() == 2 { // Only two elements. Drop the second if they are equal. if predicates[0] == predicates[1] { predicates.truncate(1); } } else { // Three or more elements. Use a general deduplication process. let mut seen = FxHashSet::default(); predicates.retain(|i| seen.insert(i.clone())); } predicates } } impl<'tcx> TraitObligation<'tcx> { #[allow(unused_comparisons)] pub fn derived_cause( &self, variant: fn(DerivedObligationCause<'tcx>) -> ObligationCauseCode<'tcx>, ) -> ObligationCause<'tcx> { /*! * Creates a cause for obligations that are derived from * `obligation` by a recursive search (e.g., for a builtin * bound, or eventually a `auto trait Foo`). If `obligation` * is itself a derived obligation, this is just a clone, but * otherwise we create a "derived obligation" cause so as to * keep track of the original root obligation for error * reporting. */ let obligation = self; // NOTE(flaper87): As of now, it keeps track of the whole error // chain. Ideally, we should have a way to configure this either // by using -Z verbose or just a CLI argument. if obligation.recursion_depth >= 0 { let derived_cause = DerivedObligationCause { parent_trait_ref: obligation.predicate.to_poly_trait_ref(), parent_code: Rc::new(obligation.cause.code.clone()), }; let derived_code = variant(derived_cause); ObligationCause::new( obligation.cause.span, obligation.cause.body_id, derived_code, ) } else { obligation.cause.clone() } } } impl<'tcx> SelectionCache<'tcx> { /// Actually frees the underlying memory in contrast to what stdlib containers do on `clear` pub fn clear(&self) { *self.hashmap.borrow_mut() = Default::default(); } } impl<'tcx> EvaluationCache<'tcx> { /// Actually frees the underlying memory in contrast to what stdlib containers do on `clear` pub fn clear(&self) { *self.hashmap.borrow_mut() = Default::default(); } } impl<'o, 'tcx> TraitObligationStack<'o, 'tcx> { fn list(&'o self) -> TraitObligationStackList<'o, 'tcx> { TraitObligationStackList::with(self) } fn cache(&self) -> &'o ProvisionalEvaluationCache<'tcx> { self.previous.cache } fn iter(&'o self) -> TraitObligationStackList<'o, 'tcx> { self.list() } /// Indicates that attempting to evaluate this stack entry /// required accessing something from the stack at depth `reached_depth`. fn update_reached_depth(&self, reached_depth: usize) { assert!( self.depth > reached_depth, "invoked `update_reached_depth` with something under this stack: \ self.depth={} reached_depth={}", self.depth, reached_depth, ); debug!("update_reached_depth(reached_depth={})", reached_depth); let mut p = self; while reached_depth < p.depth { debug!("update_reached_depth: marking {:?} as cycle participant", p.fresh_trait_ref); p.reached_depth.set(p.reached_depth.get().min(reached_depth)); p = p.previous.head.unwrap(); } } } #[derive(Default)] struct ProvisionalEvaluationCache<'tcx> { _dummy: Vec<&'tcx ()>, } #[derive(Copy, Clone)] struct TraitObligationStackList<'o, 'tcx: 'o> { cache: &'o ProvisionalEvaluationCache<'tcx>, head: Option<&'o TraitObligationStack<'o, 'tcx>>, } impl<'o, 'tcx> TraitObligationStackList<'o, 'tcx> { fn empty(cache: &'o ProvisionalEvaluationCache<'tcx>) -> TraitObligationStackList<'o, 'tcx> { TraitObligationStackList { cache, head: None } } fn with(r: &'o TraitObligationStack<'o, 'tcx>) -> TraitObligationStackList<'o, 'tcx> { TraitObligationStackList { cache: r.cache(), head: Some(r) } } fn head(&self) -> Option<&'o TraitObligationStack<'o, 'tcx>> { self.head } fn depth(&self) -> usize { if let Some(head) = self.head { head.depth } else { 0 } } } impl<'o, 'tcx> Iterator for TraitObligationStackList<'o, 'tcx> { type Item = &'o TraitObligationStack<'o, 'tcx>; fn next(&mut self) -> Option<&'o TraitObligationStack<'o, 'tcx>> { match self.head { Some(o) => { *self = o.previous; Some(o) } None => None, } } } impl<'o, 'tcx> fmt::Debug for TraitObligationStack<'o, 'tcx> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "TraitObligationStack({:?})", self.obligation) } } #[derive(Clone, Eq, PartialEq)] pub struct WithDepNode<T> { dep_node: DepNodeIndex, cached_value: T, } impl<T: Clone> WithDepNode<T> { pub fn new(dep_node: DepNodeIndex, cached_value: T) -> Self { WithDepNode { dep_node, cached_value, } } pub fn get(&self, tcx: TyCtxt<'_, '_, '_>) -> T { tcx.dep_graph.read_index(self.dep_node); self.cached_value.clone() } }
// Copyright 2016 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. #![feature(rustc_private)] #[macro_use] extern crate log; extern crate rustc; extern crate rustc_errors; extern crate rustc_target; extern crate syntax; extern crate syntax_pos; pub mod expand; pub static ALLOCATOR_METHODS: &[AllocatorMethod] = &[ AllocatorMethod { name: "alloc", inputs: &[AllocatorTy::Layout], output: AllocatorTy::ResultPtr, }, AllocatorMethod { name: "dealloc", inputs: &[AllocatorTy::Ptr, AllocatorTy::Layout], output: AllocatorTy::Unit, }, AllocatorMethod { name: "realloc", inputs: &[AllocatorTy::Ptr, AllocatorTy::Layout, AllocatorTy::Usize], output: AllocatorTy::ResultPtr, }, AllocatorMethod { name: "alloc_zeroed", inputs: &[AllocatorTy::Layout], output: AllocatorTy::ResultPtr, }, ]; pub struct AllocatorMethod { pub name: &'static str, pub inputs: &'static [AllocatorTy], pub output: AllocatorTy, } pub enum AllocatorTy { Layout, Ptr, ResultPtr, Unit, Usize, } [nll] librustc_allocator: enable feature(nll) for bootstrap // Copyright 2016 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. #![cfg_attr(not(stage0), feature(nll))] #![feature(rustc_private)] #[macro_use] extern crate log; extern crate rustc; extern crate rustc_errors; extern crate rustc_target; extern crate syntax; extern crate syntax_pos; pub mod expand; pub static ALLOCATOR_METHODS: &[AllocatorMethod] = &[ AllocatorMethod { name: "alloc", inputs: &[AllocatorTy::Layout], output: AllocatorTy::ResultPtr, }, AllocatorMethod { name: "dealloc", inputs: &[AllocatorTy::Ptr, AllocatorTy::Layout], output: AllocatorTy::Unit, }, AllocatorMethod { name: "realloc", inputs: &[AllocatorTy::Ptr, AllocatorTy::Layout, AllocatorTy::Usize], output: AllocatorTy::ResultPtr, }, AllocatorMethod { name: "alloc_zeroed", inputs: &[AllocatorTy::Layout], output: AllocatorTy::ResultPtr, }, ]; pub struct AllocatorMethod { pub name: &'static str, pub inputs: &'static [AllocatorTy], pub output: AllocatorTy, } pub enum AllocatorTy { Layout, Ptr, ResultPtr, Unit, Usize, }
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. //! The various pretty print routines. pub use self::UserIdentifiedItem::*; pub use self::PpSourceMode::*; pub use self::PpMode::*; use self::NodesMatchingUII::*; use {abort_on_err, driver}; use rustc::ty::{self, TyCtxt, GlobalArenas, Resolutions}; use rustc::cfg; use rustc::cfg::graphviz::LabelledCFG; use rustc::middle::cstore::CrateStore; use rustc::session::Session; use rustc::session::config::{Input, OutputFilenames}; use rustc_borrowck as borrowck; use rustc_borrowck::graphviz as borrowck_dot; use rustc_mir::util::{write_mir_pretty, write_mir_graphviz}; use syntax::ast::{self, BlockCheckMode}; use syntax::fold::{self, Folder}; use syntax::print::{pprust}; use syntax::print::pprust::PrintState; use syntax::ptr::P; use syntax::util::small_vector::SmallVector; use syntax_pos; use graphviz as dot; use std::cell::Cell; use std::fs::File; use std::io::{self, Write}; use std::option; use std::path::Path; use std::str::FromStr; use std::mem; use rustc::hir::map as hir_map; use rustc::hir::map::blocks; use rustc::hir; use rustc::hir::print as pprust_hir; use arena::DroplessArena; #[derive(Copy, Clone, PartialEq, Debug)] pub enum PpSourceMode { PpmNormal, PpmEveryBodyLoops, PpmExpanded, PpmIdentified, PpmExpandedIdentified, PpmExpandedHygiene, PpmTyped, } #[derive(Copy, Clone, PartialEq, Debug)] pub enum PpFlowGraphMode { Default, /// Drops the labels from the edges in the flowgraph output. This /// is mostly for use in the --unpretty flowgraph run-make tests, /// since the labels are largely uninteresting in those cases and /// have become a pain to maintain. UnlabelledEdges, } #[derive(Copy, Clone, PartialEq, Debug)] pub enum PpMode { PpmSource(PpSourceMode), PpmHir(PpSourceMode), PpmFlowGraph(PpFlowGraphMode), PpmMir, PpmMirCFG, } impl PpMode { pub fn needs_ast_map(&self, opt_uii: &Option<UserIdentifiedItem>) -> bool { match *self { PpmSource(PpmNormal) | PpmSource(PpmEveryBodyLoops) | PpmSource(PpmIdentified) => opt_uii.is_some(), PpmSource(PpmExpanded) | PpmSource(PpmExpandedIdentified) | PpmSource(PpmExpandedHygiene) | PpmHir(_) | PpmMir | PpmMirCFG | PpmFlowGraph(_) => true, PpmSource(PpmTyped) => panic!("invalid state"), } } pub fn needs_analysis(&self) -> bool { match *self { PpmMir | PpmMirCFG | PpmFlowGraph(_) => true, _ => false, } } } pub fn parse_pretty(sess: &Session, name: &str, extended: bool) -> (PpMode, Option<UserIdentifiedItem>) { let mut split = name.splitn(2, '='); let first = split.next().unwrap(); let opt_second = split.next(); let first = match (first, extended) { ("normal", _) => PpmSource(PpmNormal), ("identified", _) => PpmSource(PpmIdentified), ("everybody_loops", true) => PpmSource(PpmEveryBodyLoops), ("expanded", _) => PpmSource(PpmExpanded), ("expanded,identified", _) => PpmSource(PpmExpandedIdentified), ("expanded,hygiene", _) => PpmSource(PpmExpandedHygiene), ("hir", true) => PpmHir(PpmNormal), ("hir,identified", true) => PpmHir(PpmIdentified), ("hir,typed", true) => PpmHir(PpmTyped), ("mir", true) => PpmMir, ("mir-cfg", true) => PpmMirCFG, ("flowgraph", true) => PpmFlowGraph(PpFlowGraphMode::Default), ("flowgraph,unlabelled", true) => PpmFlowGraph(PpFlowGraphMode::UnlabelledEdges), _ => { if extended { sess.fatal(&format!("argument to `unpretty` must be one of `normal`, \ `expanded`, `flowgraph[,unlabelled]=<nodeid>`, \ `identified`, `expanded,identified`, `everybody_loops`, \ `hir`, `hir,identified`, `hir,typed`, or `mir`; got {}", name)); } else { sess.fatal(&format!("argument to `pretty` must be one of `normal`, `expanded`, \ `identified`, or `expanded,identified`; got {}", name)); } } }; let opt_second = opt_second.and_then(|s| s.parse::<UserIdentifiedItem>().ok()); (first, opt_second) } // This slightly awkward construction is to allow for each PpMode to // choose whether it needs to do analyses (which can consume the // Session) and then pass through the session (now attached to the // analysis results) on to the chosen pretty-printer, along with the // `&PpAnn` object. // // Note that since the `&PrinterSupport` is freshly constructed on each // call, it would not make sense to try to attach the lifetime of `self` // to the lifetime of the `&PrinterObject`. // // (The `use_once_payload` is working around the current lack of once // functions in the compiler.) impl PpSourceMode { /// Constructs a `PrinterSupport` object and passes it to `f`. fn call_with_pp_support<'tcx, A, F>(&self, sess: &'tcx Session, hir_map: Option<&hir_map::Map<'tcx>>, f: F) -> A where F: FnOnce(&PrinterSupport) -> A { match *self { PpmNormal | PpmEveryBodyLoops | PpmExpanded => { let annotation = NoAnn { sess, hir_map: hir_map.map(|m| m.clone()), }; f(&annotation) } PpmIdentified | PpmExpandedIdentified => { let annotation = IdentifiedAnnotation { sess, hir_map: hir_map.map(|m| m.clone()), }; f(&annotation) } PpmExpandedHygiene => { let annotation = HygieneAnnotation { sess, }; f(&annotation) } _ => panic!("Should use call_with_pp_support_hir"), } } fn call_with_pp_support_hir<'tcx, A, F>(&self, sess: &'tcx Session, cstore: &'tcx CrateStore, hir_map: &hir_map::Map<'tcx>, analysis: &ty::CrateAnalysis, resolutions: &Resolutions, arena: &'tcx DroplessArena, arenas: &'tcx GlobalArenas<'tcx>, output_filenames: &OutputFilenames, id: &str, f: F) -> A where F: FnOnce(&HirPrinterSupport, &hir::Crate) -> A { match *self { PpmNormal => { let annotation = NoAnn { sess, hir_map: Some(hir_map.clone()), }; f(&annotation, hir_map.forest.krate()) } PpmIdentified => { let annotation = IdentifiedAnnotation { sess, hir_map: Some(hir_map.clone()), }; f(&annotation, hir_map.forest.krate()) } PpmTyped => { abort_on_err(driver::phase_3_run_analysis_passes(sess, cstore, hir_map.clone(), analysis.clone(), resolutions.clone(), arena, arenas, id, output_filenames, |tcx, _, _, _| { let empty_tables = ty::TypeckTables::empty(None); let annotation = TypedAnnotation { tcx, tables: Cell::new(&empty_tables) }; let _ignore = tcx.dep_graph.in_ignore(); f(&annotation, hir_map.forest.krate()) }), sess) } _ => panic!("Should use call_with_pp_support"), } } } trait PrinterSupport: pprust::PpAnn { /// Provides a uniform interface for re-extracting a reference to a /// `Session` from a value that now owns it. fn sess<'a>(&'a self) -> &'a Session; /// Produces the pretty-print annotation object. /// /// (Rust does not yet support upcasting from a trait object to /// an object for one of its super-traits.) fn pp_ann<'a>(&'a self) -> &'a pprust::PpAnn; } trait HirPrinterSupport<'hir>: pprust_hir::PpAnn { /// Provides a uniform interface for re-extracting a reference to a /// `Session` from a value that now owns it. fn sess<'a>(&'a self) -> &'a Session; /// Provides a uniform interface for re-extracting a reference to an /// `hir_map::Map` from a value that now owns it. fn hir_map<'a>(&'a self) -> Option<&'a hir_map::Map<'hir>>; /// Produces the pretty-print annotation object. /// /// (Rust does not yet support upcasting from a trait object to /// an object for one of its super-traits.) fn pp_ann<'a>(&'a self) -> &'a pprust_hir::PpAnn; /// Computes an user-readable representation of a path, if possible. fn node_path(&self, id: ast::NodeId) -> Option<String> { self.hir_map().and_then(|map| map.def_path_from_id(id)).map(|path| { path.data .into_iter() .map(|elem| elem.data.to_string()) .collect::<Vec<_>>() .join("::") }) } } struct NoAnn<'hir> { sess: &'hir Session, hir_map: Option<hir_map::Map<'hir>>, } impl<'hir> PrinterSupport for NoAnn<'hir> { fn sess<'a>(&'a self) -> &'a Session { self.sess } fn pp_ann<'a>(&'a self) -> &'a pprust::PpAnn { self } } impl<'hir> HirPrinterSupport<'hir> for NoAnn<'hir> { fn sess<'a>(&'a self) -> &'a Session { self.sess } fn hir_map<'a>(&'a self) -> Option<&'a hir_map::Map<'hir>> { self.hir_map.as_ref() } fn pp_ann<'a>(&'a self) -> &'a pprust_hir::PpAnn { self } } impl<'hir> pprust::PpAnn for NoAnn<'hir> {} impl<'hir> pprust_hir::PpAnn for NoAnn<'hir> { fn nested(&self, state: &mut pprust_hir::State, nested: pprust_hir::Nested) -> io::Result<()> { if let Some(ref map) = self.hir_map { pprust_hir::PpAnn::nested(map, state, nested) } else { Ok(()) } } } struct IdentifiedAnnotation<'hir> { sess: &'hir Session, hir_map: Option<hir_map::Map<'hir>>, } impl<'hir> PrinterSupport for IdentifiedAnnotation<'hir> { fn sess<'a>(&'a self) -> &'a Session { self.sess } fn pp_ann<'a>(&'a self) -> &'a pprust::PpAnn { self } } impl<'hir> pprust::PpAnn for IdentifiedAnnotation<'hir> { fn pre(&self, s: &mut pprust::State, node: pprust::AnnNode) -> io::Result<()> { match node { pprust::NodeExpr(_) => s.popen(), _ => Ok(()), } } fn post(&self, s: &mut pprust::State, node: pprust::AnnNode) -> io::Result<()> { match node { pprust::NodeIdent(_) | pprust::NodeName(_) => Ok(()), pprust::NodeItem(item) => { s.s.space()?; s.synth_comment(item.id.to_string()) } pprust::NodeSubItem(id) => { s.s.space()?; s.synth_comment(id.to_string()) } pprust::NodeBlock(blk) => { s.s.space()?; s.synth_comment(format!("block {}", blk.id)) } pprust::NodeExpr(expr) => { s.s.space()?; s.synth_comment(expr.id.to_string())?; s.pclose() } pprust::NodePat(pat) => { s.s.space()?; s.synth_comment(format!("pat {}", pat.id)) } } } } impl<'hir> HirPrinterSupport<'hir> for IdentifiedAnnotation<'hir> { fn sess<'a>(&'a self) -> &'a Session { self.sess } fn hir_map<'a>(&'a self) -> Option<&'a hir_map::Map<'hir>> { self.hir_map.as_ref() } fn pp_ann<'a>(&'a self) -> &'a pprust_hir::PpAnn { self } } impl<'hir> pprust_hir::PpAnn for IdentifiedAnnotation<'hir> { fn nested(&self, state: &mut pprust_hir::State, nested: pprust_hir::Nested) -> io::Result<()> { if let Some(ref map) = self.hir_map { pprust_hir::PpAnn::nested(map, state, nested) } else { Ok(()) } } fn pre(&self, s: &mut pprust_hir::State, node: pprust_hir::AnnNode) -> io::Result<()> { match node { pprust_hir::NodeExpr(_) => s.popen(), _ => Ok(()), } } fn post(&self, s: &mut pprust_hir::State, node: pprust_hir::AnnNode) -> io::Result<()> { match node { pprust_hir::NodeName(_) => Ok(()), pprust_hir::NodeItem(item) => { s.s.space()?; s.synth_comment(item.id.to_string()) } pprust_hir::NodeSubItem(id) => { s.s.space()?; s.synth_comment(id.to_string()) } pprust_hir::NodeBlock(blk) => { s.s.space()?; s.synth_comment(format!("block {}", blk.id)) } pprust_hir::NodeExpr(expr) => { s.s.space()?; s.synth_comment(expr.id.to_string())?; s.pclose() } pprust_hir::NodePat(pat) => { s.s.space()?; s.synth_comment(format!("pat {}", pat.id)) } } } } struct HygieneAnnotation<'a> { sess: &'a Session } impl<'a> PrinterSupport for HygieneAnnotation<'a> { fn sess(&self) -> &Session { self.sess } fn pp_ann(&self) -> &pprust::PpAnn { self } } impl<'a> pprust::PpAnn for HygieneAnnotation<'a> { fn post(&self, s: &mut pprust::State, node: pprust::AnnNode) -> io::Result<()> { match node { pprust::NodeIdent(&ast::Ident { name, ctxt }) => { s.s.space()?; // FIXME #16420: this doesn't display the connections // between syntax contexts s.synth_comment(format!("{}{:?}", name.as_u32(), ctxt)) } pprust::NodeName(&name) => { s.s.space()?; s.synth_comment(name.as_u32().to_string()) } _ => Ok(()), } } } struct TypedAnnotation<'a, 'tcx: 'a> { tcx: TyCtxt<'a, 'tcx, 'tcx>, tables: Cell<&'a ty::TypeckTables<'tcx>>, } impl<'b, 'tcx> HirPrinterSupport<'tcx> for TypedAnnotation<'b, 'tcx> { fn sess<'a>(&'a self) -> &'a Session { &self.tcx.sess } fn hir_map<'a>(&'a self) -> Option<&'a hir_map::Map<'tcx>> { Some(&self.tcx.hir) } fn pp_ann<'a>(&'a self) -> &'a pprust_hir::PpAnn { self } fn node_path(&self, id: ast::NodeId) -> Option<String> { Some(self.tcx.node_path_str(id)) } } impl<'a, 'tcx> pprust_hir::PpAnn for TypedAnnotation<'a, 'tcx> { fn nested(&self, state: &mut pprust_hir::State, nested: pprust_hir::Nested) -> io::Result<()> { let old_tables = self.tables.get(); if let pprust_hir::Nested::Body(id) = nested { self.tables.set(self.tcx.body_tables(id)); } pprust_hir::PpAnn::nested(&self.tcx.hir, state, nested)?; self.tables.set(old_tables); Ok(()) } fn pre(&self, s: &mut pprust_hir::State, node: pprust_hir::AnnNode) -> io::Result<()> { match node { pprust_hir::NodeExpr(_) => s.popen(), _ => Ok(()), } } fn post(&self, s: &mut pprust_hir::State, node: pprust_hir::AnnNode) -> io::Result<()> { match node { pprust_hir::NodeExpr(expr) => { s.s.space()?; s.s.word("as")?; s.s.space()?; s.s.word(&self.tables.get().expr_ty(expr).to_string())?; s.pclose() } _ => Ok(()), } } } fn gather_flowgraph_variants(sess: &Session) -> Vec<borrowck_dot::Variant> { let print_loans = sess.opts.debugging_opts.flowgraph_print_loans; let print_moves = sess.opts.debugging_opts.flowgraph_print_moves; let print_assigns = sess.opts.debugging_opts.flowgraph_print_assigns; let print_all = sess.opts.debugging_opts.flowgraph_print_all; let mut variants = Vec::new(); if print_all || print_loans { variants.push(borrowck_dot::Loans); } if print_all || print_moves { variants.push(borrowck_dot::Moves); } if print_all || print_assigns { variants.push(borrowck_dot::Assigns); } variants } #[derive(Clone, Debug)] pub enum UserIdentifiedItem { ItemViaNode(ast::NodeId), ItemViaPath(Vec<String>), } impl FromStr for UserIdentifiedItem { type Err = (); fn from_str(s: &str) -> Result<UserIdentifiedItem, ()> { Ok(s.parse() .map(ast::NodeId::new) .map(ItemViaNode) .unwrap_or_else(|_| ItemViaPath(s.split("::").map(|s| s.to_string()).collect()))) } } enum NodesMatchingUII<'a, 'hir: 'a> { NodesMatchingDirect(option::IntoIter<ast::NodeId>), NodesMatchingSuffix(hir_map::NodesMatchingSuffix<'a, 'hir>), } impl<'a, 'hir> Iterator for NodesMatchingUII<'a, 'hir> { type Item = ast::NodeId; fn next(&mut self) -> Option<ast::NodeId> { match self { &mut NodesMatchingDirect(ref mut iter) => iter.next(), &mut NodesMatchingSuffix(ref mut iter) => iter.next(), } } } impl UserIdentifiedItem { fn reconstructed_input(&self) -> String { match *self { ItemViaNode(node_id) => node_id.to_string(), ItemViaPath(ref parts) => parts.join("::"), } } fn all_matching_node_ids<'a, 'hir>(&'a self, map: &'a hir_map::Map<'hir>) -> NodesMatchingUII<'a, 'hir> { match *self { ItemViaNode(node_id) => NodesMatchingDirect(Some(node_id).into_iter()), ItemViaPath(ref parts) => NodesMatchingSuffix(map.nodes_matching_suffix(&parts)), } } fn to_one_node_id(self, user_option: &str, sess: &Session, map: &hir_map::Map) -> ast::NodeId { let fail_because = |is_wrong_because| -> ast::NodeId { let message = format!("{} needs NodeId (int) or unique path suffix (b::c::d); got \ {}, which {}", user_option, self.reconstructed_input(), is_wrong_because); sess.fatal(&message) }; let mut saw_node = ast::DUMMY_NODE_ID; let mut seen = 0; for node in self.all_matching_node_ids(map) { saw_node = node; seen += 1; if seen > 1 { fail_because("does not resolve uniquely"); } } if seen == 0 { fail_because("does not resolve to any item"); } assert!(seen == 1); return saw_node; } } // Note: Also used by librustdoc, see PR #43348. Consider moving this struct elsewhere. // // FIXME: Currently the `everybody_loops` transformation is not applied to: // * `const fn`, due to issue #43636 that `loop` is not supported for const evaluation. We are // waiting for miri to fix that. // * `impl Trait`, due to issue #43869 that functions returning impl Trait cannot be diverging. // Solving this may require `!` to implement every trait, which relies on the an even more // ambitious form of the closed RFC #1637. See also [#34511]. // // [#34511]: https://github.com/rust-lang/rust/issues/34511#issuecomment-322340401 pub struct ReplaceBodyWithLoop { within_static_or_const: bool, } impl ReplaceBodyWithLoop { pub fn new() -> ReplaceBodyWithLoop { ReplaceBodyWithLoop { within_static_or_const: false } } fn run<R, F: FnOnce(&mut Self) -> R>(&mut self, is_const: bool, action: F) -> R { let old_const = mem::replace(&mut self.within_static_or_const, is_const); let ret = action(self); self.within_static_or_const = old_const; ret } fn should_ignore_fn(ret_ty: &ast::FnDecl) -> bool { if let ast::FunctionRetTy::Ty(ref ty) = ret_ty.output { fn involves_impl_trait(ty: &ast::Ty) -> bool { match ty.node { ast::TyKind::ImplTrait(_) => true, ast::TyKind::Slice(ref subty) | ast::TyKind::Array(ref subty, _) | ast::TyKind::Ptr(ast::MutTy { ty: ref subty, .. }) | ast::TyKind::Rptr(_, ast::MutTy { ty: ref subty, .. }) | ast::TyKind::Paren(ref subty) => involves_impl_trait(subty), ast::TyKind::Tup(ref tys) => any_involves_impl_trait(tys.iter()), ast::TyKind::Path(_, ref path) => path.segments.iter().any(|seg| { match seg.parameters.as_ref().map(|p| &**p) { None => false, Some(&ast::PathParameters::AngleBracketed(ref data)) => any_involves_impl_trait(data.types.iter()) || any_involves_impl_trait(data.bindings.iter().map(|b| &b.ty)), Some(&ast::PathParameters::Parenthesized(ref data)) => any_involves_impl_trait(data.inputs.iter()) || any_involves_impl_trait(data.output.iter()), } }), _ => false, } } fn any_involves_impl_trait<'a, I: Iterator<Item = &'a P<ast::Ty>>>(mut it: I) -> bool { it.any(|subty| involves_impl_trait(subty)) } involves_impl_trait(ty) } else { false } } } impl fold::Folder for ReplaceBodyWithLoop { fn fold_item_kind(&mut self, i: ast::ItemKind) -> ast::ItemKind { let is_const = match i { ast::ItemKind::Static(..) | ast::ItemKind::Const(..) => true, ast::ItemKind::Fn(ref decl, _, ref constness, _, _, _) => constness.node == ast::Constness::Const || Self::should_ignore_fn(decl), _ => false, }; self.run(is_const, |s| fold::noop_fold_item_kind(i, s)) } fn fold_trait_item(&mut self, i: ast::TraitItem) -> SmallVector<ast::TraitItem> { let is_const = match i.node { ast::TraitItemKind::Const(..) => true, ast::TraitItemKind::Method(ast::MethodSig { ref decl, ref constness, .. }, _) => constness.node == ast::Constness::Const || Self::should_ignore_fn(decl), _ => false, }; self.run(is_const, |s| fold::noop_fold_trait_item(i, s)) } fn fold_impl_item(&mut self, i: ast::ImplItem) -> SmallVector<ast::ImplItem> { let is_const = match i.node { ast::ImplItemKind::Const(..) => true, ast::ImplItemKind::Method(ast::MethodSig { ref decl, ref constness, .. }, _) => constness.node == ast::Constness::Const || Self::should_ignore_fn(decl), _ => false, }; self.run(is_const, |s| fold::noop_fold_impl_item(i, s)) } fn fold_block(&mut self, b: P<ast::Block>) -> P<ast::Block> { fn expr_to_block(rules: ast::BlockCheckMode, e: Option<P<ast::Expr>>) -> P<ast::Block> { P(ast::Block { stmts: e.map(|e| { ast::Stmt { id: ast::DUMMY_NODE_ID, span: e.span, node: ast::StmtKind::Expr(e), } }) .into_iter() .collect(), rules, id: ast::DUMMY_NODE_ID, span: syntax_pos::DUMMY_SP, }) } if !self.within_static_or_const { let empty_block = expr_to_block(BlockCheckMode::Default, None); let loop_expr = P(ast::Expr { node: ast::ExprKind::Loop(empty_block, None), id: ast::DUMMY_NODE_ID, span: syntax_pos::DUMMY_SP, attrs: ast::ThinVec::new(), }); expr_to_block(b.rules, Some(loop_expr)) } else { fold::noop_fold_block(b, self) } } // in general the pretty printer processes unexpanded code, so // we override the default `fold_mac` method which panics. fn fold_mac(&mut self, mac: ast::Mac) -> ast::Mac { fold::noop_fold_mac(mac, self) } } fn print_flowgraph<'a, 'tcx, W: Write>(variants: Vec<borrowck_dot::Variant>, tcx: TyCtxt<'a, 'tcx, 'tcx>, code: blocks::Code<'tcx>, mode: PpFlowGraphMode, mut out: W) -> io::Result<()> { let body_id = match code { blocks::Code::Expr(expr) => { // Find the function this expression is from. let mut node_id = expr.id; loop { let node = tcx.hir.get(node_id); if let Some(n) = hir::map::blocks::FnLikeNode::from_node(node) { break n.body(); } let parent = tcx.hir.get_parent_node(node_id); assert!(node_id != parent); node_id = parent; } } blocks::Code::FnLike(fn_like) => fn_like.body(), }; let body = tcx.hir.body(body_id); let cfg = cfg::CFG::new(tcx, &body); let labelled_edges = mode != PpFlowGraphMode::UnlabelledEdges; let lcfg = LabelledCFG { tcx, cfg: &cfg, name: format!("node_{}", code.id()), labelled_edges, }; match code { _ if variants.is_empty() => { let r = dot::render(&lcfg, &mut out); return expand_err_details(r); } blocks::Code::Expr(_) => { tcx.sess.err("--pretty flowgraph with -Z flowgraph-print annotations requires \ fn-like node id."); return Ok(()); } blocks::Code::FnLike(fn_like) => { let (bccx, analysis_data) = borrowck::build_borrowck_dataflow_data_for_fn(tcx, fn_like.body(), &cfg); let lcfg = borrowck_dot::DataflowLabeller { inner: lcfg, variants, borrowck_ctxt: &bccx, analysis_data: &analysis_data, }; let r = dot::render(&lcfg, &mut out); return expand_err_details(r); } } fn expand_err_details(r: io::Result<()>) -> io::Result<()> { r.map_err(|ioerr| { io::Error::new(io::ErrorKind::Other, format!("graphviz::render failed: {}", ioerr)) }) } } pub fn fold_crate(krate: ast::Crate, ppm: PpMode) -> ast::Crate { if let PpmSource(PpmEveryBodyLoops) = ppm { let mut fold = ReplaceBodyWithLoop::new(); fold.fold_crate(krate) } else { krate } } fn get_source(input: &Input, sess: &Session) -> (Vec<u8>, String) { let src_name = driver::source_name(input); let src = sess.codemap() .get_filemap(&src_name) .unwrap() .src .as_ref() .unwrap() .as_bytes() .to_vec(); (src, src_name) } fn write_output(out: Vec<u8>, ofile: Option<&Path>) { match ofile { None => print!("{}", String::from_utf8(out).unwrap()), Some(p) => { match File::create(p) { Ok(mut w) => w.write_all(&out).unwrap(), Err(e) => panic!("print-print failed to open {} due to {}", p.display(), e), } } } } pub fn print_after_parsing(sess: &Session, input: &Input, krate: &ast::Crate, ppm: PpMode, ofile: Option<&Path>) { let (src, src_name) = get_source(input, sess); let mut rdr = &*src; let mut out = Vec::new(); if let PpmSource(s) = ppm { // Silently ignores an identified node. let out: &mut Write = &mut out; s.call_with_pp_support(sess, None, move |annotation| { debug!("pretty printing source code {:?}", s); let sess = annotation.sess(); pprust::print_crate(sess.codemap(), &sess.parse_sess, krate, src_name.to_string(), &mut rdr, box out, annotation.pp_ann(), false) }) .unwrap() } else { unreachable!(); }; write_output(out, ofile); } pub fn print_after_hir_lowering<'tcx, 'a: 'tcx>(sess: &'a Session, cstore: &'tcx CrateStore, hir_map: &hir_map::Map<'tcx>, analysis: &ty::CrateAnalysis, resolutions: &Resolutions, input: &Input, krate: &ast::Crate, crate_name: &str, ppm: PpMode, arena: &'tcx DroplessArena, arenas: &'tcx GlobalArenas<'tcx>, output_filenames: &OutputFilenames, opt_uii: Option<UserIdentifiedItem>, ofile: Option<&Path>) { if ppm.needs_analysis() { print_with_analysis(sess, cstore, hir_map, analysis, resolutions, crate_name, arena, arenas, output_filenames, ppm, opt_uii, ofile); return; } let (src, src_name) = get_source(input, sess); let mut rdr = &src[..]; let mut out = Vec::new(); match (ppm, opt_uii) { (PpmSource(s), _) => { // Silently ignores an identified node. let out: &mut Write = &mut out; s.call_with_pp_support(sess, Some(hir_map), move |annotation| { debug!("pretty printing source code {:?}", s); let sess = annotation.sess(); pprust::print_crate(sess.codemap(), &sess.parse_sess, krate, src_name.to_string(), &mut rdr, box out, annotation.pp_ann(), true) }) } (PpmHir(s), None) => { let out: &mut Write = &mut out; s.call_with_pp_support_hir(sess, cstore, hir_map, analysis, resolutions, arena, arenas, output_filenames, crate_name, move |annotation, krate| { debug!("pretty printing source code {:?}", s); let sess = annotation.sess(); pprust_hir::print_crate(sess.codemap(), &sess.parse_sess, krate, src_name.to_string(), &mut rdr, box out, annotation.pp_ann(), true) }) } (PpmHir(s), Some(uii)) => { let out: &mut Write = &mut out; s.call_with_pp_support_hir(sess, cstore, hir_map, analysis, resolutions, arena, arenas, output_filenames, crate_name, move |annotation, _| { debug!("pretty printing source code {:?}", s); let sess = annotation.sess(); let hir_map = annotation.hir_map().expect("--unpretty missing HIR map"); let mut pp_state = pprust_hir::State::new_from_input(sess.codemap(), &sess.parse_sess, src_name.to_string(), &mut rdr, box out, annotation.pp_ann(), true); for node_id in uii.all_matching_node_ids(hir_map) { let node = hir_map.get(node_id); pp_state.print_node(node)?; pp_state.s.space()?; let path = annotation.node_path(node_id) .expect("--unpretty missing node paths"); pp_state.synth_comment(path)?; pp_state.s.hardbreak()?; } pp_state.s.eof() }) } _ => unreachable!(), } .unwrap(); write_output(out, ofile); } // In an ideal world, this would be a public function called by the driver after // analsysis is performed. However, we want to call `phase_3_run_analysis_passes` // with a different callback than the standard driver, so that isn't easy. // Instead, we call that function ourselves. fn print_with_analysis<'tcx, 'a: 'tcx>(sess: &'a Session, cstore: &'a CrateStore, hir_map: &hir_map::Map<'tcx>, analysis: &ty::CrateAnalysis, resolutions: &Resolutions, crate_name: &str, arena: &'tcx DroplessArena, arenas: &'tcx GlobalArenas<'tcx>, output_filenames: &OutputFilenames, ppm: PpMode, uii: Option<UserIdentifiedItem>, ofile: Option<&Path>) { let nodeid = if let Some(uii) = uii { debug!("pretty printing for {:?}", uii); Some(uii.to_one_node_id("--unpretty", sess, &hir_map)) } else { debug!("pretty printing for whole crate"); None }; let mut out = Vec::new(); abort_on_err(driver::phase_3_run_analysis_passes(sess, cstore, hir_map.clone(), analysis.clone(), resolutions.clone(), arena, arenas, crate_name, output_filenames, |tcx, _, _, _| { match ppm { PpmMir | PpmMirCFG => { if let Some(nodeid) = nodeid { let def_id = tcx.hir.local_def_id(nodeid); match ppm { PpmMir => write_mir_pretty(tcx, Some(def_id), &mut out), PpmMirCFG => write_mir_graphviz(tcx, Some(def_id), &mut out), _ => unreachable!(), }?; } else { match ppm { PpmMir => write_mir_pretty(tcx, None, &mut out), PpmMirCFG => write_mir_graphviz(tcx, None, &mut out), _ => unreachable!(), }?; } Ok(()) } PpmFlowGraph(mode) => { let nodeid = nodeid.expect("`pretty flowgraph=..` needs NodeId (int) or unique path \ suffix (b::c::d)"); let node = tcx.hir.find(nodeid).unwrap_or_else(|| { tcx.sess.fatal(&format!("--pretty flowgraph couldn't find id: {}", nodeid)) }); match blocks::Code::from_node(&tcx.hir, nodeid) { Some(code) => { let variants = gather_flowgraph_variants(tcx.sess); let out: &mut Write = &mut out; print_flowgraph(variants, tcx, code, mode, out) } None => { let message = format!("--pretty=flowgraph needs block, fn, or method; \ got {:?}", node); tcx.sess.span_fatal(tcx.hir.span(nodeid), &message) } } } _ => unreachable!(), } }), sess) .unwrap(); write_output(out, ofile); } add -Zunpretty=hir-tree This uses the debug impls to dump the raw HIR. Particularly useful when learning how the compiler works. // Copyright 2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. //! The various pretty print routines. pub use self::UserIdentifiedItem::*; pub use self::PpSourceMode::*; pub use self::PpMode::*; use self::NodesMatchingUII::*; use {abort_on_err, driver}; use rustc::ty::{self, TyCtxt, GlobalArenas, Resolutions}; use rustc::cfg; use rustc::cfg::graphviz::LabelledCFG; use rustc::middle::cstore::CrateStore; use rustc::session::Session; use rustc::session::config::{Input, OutputFilenames}; use rustc_borrowck as borrowck; use rustc_borrowck::graphviz as borrowck_dot; use rustc_mir::util::{write_mir_pretty, write_mir_graphviz}; use syntax::ast::{self, BlockCheckMode}; use syntax::fold::{self, Folder}; use syntax::print::{pprust}; use syntax::print::pprust::PrintState; use syntax::ptr::P; use syntax::util::small_vector::SmallVector; use syntax_pos; use graphviz as dot; use std::cell::Cell; use std::fs::File; use std::io::{self, Write}; use std::option; use std::path::Path; use std::str::FromStr; use std::mem; use rustc::hir::map as hir_map; use rustc::hir::map::blocks; use rustc::hir; use rustc::hir::print as pprust_hir; use arena::DroplessArena; #[derive(Copy, Clone, PartialEq, Debug)] pub enum PpSourceMode { PpmNormal, PpmEveryBodyLoops, PpmExpanded, PpmIdentified, PpmExpandedIdentified, PpmExpandedHygiene, PpmTyped, } #[derive(Copy, Clone, PartialEq, Debug)] pub enum PpFlowGraphMode { Default, /// Drops the labels from the edges in the flowgraph output. This /// is mostly for use in the --unpretty flowgraph run-make tests, /// since the labels are largely uninteresting in those cases and /// have become a pain to maintain. UnlabelledEdges, } #[derive(Copy, Clone, PartialEq, Debug)] pub enum PpMode { PpmSource(PpSourceMode), PpmHir(PpSourceMode), PpmHirTree(PpSourceMode), PpmFlowGraph(PpFlowGraphMode), PpmMir, PpmMirCFG, } impl PpMode { pub fn needs_ast_map(&self, opt_uii: &Option<UserIdentifiedItem>) -> bool { match *self { PpmSource(PpmNormal) | PpmSource(PpmEveryBodyLoops) | PpmSource(PpmIdentified) => opt_uii.is_some(), PpmSource(PpmExpanded) | PpmSource(PpmExpandedIdentified) | PpmSource(PpmExpandedHygiene) | PpmHir(_) | PpmHirTree(_) | PpmMir | PpmMirCFG | PpmFlowGraph(_) => true, PpmSource(PpmTyped) => panic!("invalid state"), } } pub fn needs_analysis(&self) -> bool { match *self { PpmMir | PpmMirCFG | PpmFlowGraph(_) => true, _ => false, } } } pub fn parse_pretty(sess: &Session, name: &str, extended: bool) -> (PpMode, Option<UserIdentifiedItem>) { let mut split = name.splitn(2, '='); let first = split.next().unwrap(); let opt_second = split.next(); let first = match (first, extended) { ("normal", _) => PpmSource(PpmNormal), ("identified", _) => PpmSource(PpmIdentified), ("everybody_loops", true) => PpmSource(PpmEveryBodyLoops), ("expanded", _) => PpmSource(PpmExpanded), ("expanded,identified", _) => PpmSource(PpmExpandedIdentified), ("expanded,hygiene", _) => PpmSource(PpmExpandedHygiene), ("hir", true) => PpmHir(PpmNormal), ("hir,identified", true) => PpmHir(PpmIdentified), ("hir,typed", true) => PpmHir(PpmTyped), ("hir-tree", true) => PpmHirTree(PpmNormal), ("mir", true) => PpmMir, ("mir-cfg", true) => PpmMirCFG, ("flowgraph", true) => PpmFlowGraph(PpFlowGraphMode::Default), ("flowgraph,unlabelled", true) => PpmFlowGraph(PpFlowGraphMode::UnlabelledEdges), _ => { if extended { sess.fatal(&format!("argument to `unpretty` must be one of `normal`, \ `expanded`, `flowgraph[,unlabelled]=<nodeid>`, \ `identified`, `expanded,identified`, `everybody_loops`, \ `hir`, `hir,identified`, `hir,typed`, or `mir`; got {}", name)); } else { sess.fatal(&format!("argument to `pretty` must be one of `normal`, `expanded`, \ `identified`, or `expanded,identified`; got {}", name)); } } }; let opt_second = opt_second.and_then(|s| s.parse::<UserIdentifiedItem>().ok()); (first, opt_second) } // This slightly awkward construction is to allow for each PpMode to // choose whether it needs to do analyses (which can consume the // Session) and then pass through the session (now attached to the // analysis results) on to the chosen pretty-printer, along with the // `&PpAnn` object. // // Note that since the `&PrinterSupport` is freshly constructed on each // call, it would not make sense to try to attach the lifetime of `self` // to the lifetime of the `&PrinterObject`. // // (The `use_once_payload` is working around the current lack of once // functions in the compiler.) impl PpSourceMode { /// Constructs a `PrinterSupport` object and passes it to `f`. fn call_with_pp_support<'tcx, A, F>(&self, sess: &'tcx Session, hir_map: Option<&hir_map::Map<'tcx>>, f: F) -> A where F: FnOnce(&PrinterSupport) -> A { match *self { PpmNormal | PpmEveryBodyLoops | PpmExpanded => { let annotation = NoAnn { sess, hir_map: hir_map.map(|m| m.clone()), }; f(&annotation) } PpmIdentified | PpmExpandedIdentified => { let annotation = IdentifiedAnnotation { sess, hir_map: hir_map.map(|m| m.clone()), }; f(&annotation) } PpmExpandedHygiene => { let annotation = HygieneAnnotation { sess, }; f(&annotation) } _ => panic!("Should use call_with_pp_support_hir"), } } fn call_with_pp_support_hir<'tcx, A, F>(&self, sess: &'tcx Session, cstore: &'tcx CrateStore, hir_map: &hir_map::Map<'tcx>, analysis: &ty::CrateAnalysis, resolutions: &Resolutions, arena: &'tcx DroplessArena, arenas: &'tcx GlobalArenas<'tcx>, output_filenames: &OutputFilenames, id: &str, f: F) -> A where F: FnOnce(&HirPrinterSupport, &hir::Crate) -> A { match *self { PpmNormal => { let annotation = NoAnn { sess, hir_map: Some(hir_map.clone()), }; f(&annotation, hir_map.forest.krate()) } PpmIdentified => { let annotation = IdentifiedAnnotation { sess, hir_map: Some(hir_map.clone()), }; f(&annotation, hir_map.forest.krate()) } PpmTyped => { abort_on_err(driver::phase_3_run_analysis_passes(sess, cstore, hir_map.clone(), analysis.clone(), resolutions.clone(), arena, arenas, id, output_filenames, |tcx, _, _, _| { let empty_tables = ty::TypeckTables::empty(None); let annotation = TypedAnnotation { tcx, tables: Cell::new(&empty_tables) }; let _ignore = tcx.dep_graph.in_ignore(); f(&annotation, hir_map.forest.krate()) }), sess) } _ => panic!("Should use call_with_pp_support"), } } } trait PrinterSupport: pprust::PpAnn { /// Provides a uniform interface for re-extracting a reference to a /// `Session` from a value that now owns it. fn sess<'a>(&'a self) -> &'a Session; /// Produces the pretty-print annotation object. /// /// (Rust does not yet support upcasting from a trait object to /// an object for one of its super-traits.) fn pp_ann<'a>(&'a self) -> &'a pprust::PpAnn; } trait HirPrinterSupport<'hir>: pprust_hir::PpAnn { /// Provides a uniform interface for re-extracting a reference to a /// `Session` from a value that now owns it. fn sess<'a>(&'a self) -> &'a Session; /// Provides a uniform interface for re-extracting a reference to an /// `hir_map::Map` from a value that now owns it. fn hir_map<'a>(&'a self) -> Option<&'a hir_map::Map<'hir>>; /// Produces the pretty-print annotation object. /// /// (Rust does not yet support upcasting from a trait object to /// an object for one of its super-traits.) fn pp_ann<'a>(&'a self) -> &'a pprust_hir::PpAnn; /// Computes an user-readable representation of a path, if possible. fn node_path(&self, id: ast::NodeId) -> Option<String> { self.hir_map().and_then(|map| map.def_path_from_id(id)).map(|path| { path.data .into_iter() .map(|elem| elem.data.to_string()) .collect::<Vec<_>>() .join("::") }) } } struct NoAnn<'hir> { sess: &'hir Session, hir_map: Option<hir_map::Map<'hir>>, } impl<'hir> PrinterSupport for NoAnn<'hir> { fn sess<'a>(&'a self) -> &'a Session { self.sess } fn pp_ann<'a>(&'a self) -> &'a pprust::PpAnn { self } } impl<'hir> HirPrinterSupport<'hir> for NoAnn<'hir> { fn sess<'a>(&'a self) -> &'a Session { self.sess } fn hir_map<'a>(&'a self) -> Option<&'a hir_map::Map<'hir>> { self.hir_map.as_ref() } fn pp_ann<'a>(&'a self) -> &'a pprust_hir::PpAnn { self } } impl<'hir> pprust::PpAnn for NoAnn<'hir> {} impl<'hir> pprust_hir::PpAnn for NoAnn<'hir> { fn nested(&self, state: &mut pprust_hir::State, nested: pprust_hir::Nested) -> io::Result<()> { if let Some(ref map) = self.hir_map { pprust_hir::PpAnn::nested(map, state, nested) } else { Ok(()) } } } struct IdentifiedAnnotation<'hir> { sess: &'hir Session, hir_map: Option<hir_map::Map<'hir>>, } impl<'hir> PrinterSupport for IdentifiedAnnotation<'hir> { fn sess<'a>(&'a self) -> &'a Session { self.sess } fn pp_ann<'a>(&'a self) -> &'a pprust::PpAnn { self } } impl<'hir> pprust::PpAnn for IdentifiedAnnotation<'hir> { fn pre(&self, s: &mut pprust::State, node: pprust::AnnNode) -> io::Result<()> { match node { pprust::NodeExpr(_) => s.popen(), _ => Ok(()), } } fn post(&self, s: &mut pprust::State, node: pprust::AnnNode) -> io::Result<()> { match node { pprust::NodeIdent(_) | pprust::NodeName(_) => Ok(()), pprust::NodeItem(item) => { s.s.space()?; s.synth_comment(item.id.to_string()) } pprust::NodeSubItem(id) => { s.s.space()?; s.synth_comment(id.to_string()) } pprust::NodeBlock(blk) => { s.s.space()?; s.synth_comment(format!("block {}", blk.id)) } pprust::NodeExpr(expr) => { s.s.space()?; s.synth_comment(expr.id.to_string())?; s.pclose() } pprust::NodePat(pat) => { s.s.space()?; s.synth_comment(format!("pat {}", pat.id)) } } } } impl<'hir> HirPrinterSupport<'hir> for IdentifiedAnnotation<'hir> { fn sess<'a>(&'a self) -> &'a Session { self.sess } fn hir_map<'a>(&'a self) -> Option<&'a hir_map::Map<'hir>> { self.hir_map.as_ref() } fn pp_ann<'a>(&'a self) -> &'a pprust_hir::PpAnn { self } } impl<'hir> pprust_hir::PpAnn for IdentifiedAnnotation<'hir> { fn nested(&self, state: &mut pprust_hir::State, nested: pprust_hir::Nested) -> io::Result<()> { if let Some(ref map) = self.hir_map { pprust_hir::PpAnn::nested(map, state, nested) } else { Ok(()) } } fn pre(&self, s: &mut pprust_hir::State, node: pprust_hir::AnnNode) -> io::Result<()> { match node { pprust_hir::NodeExpr(_) => s.popen(), _ => Ok(()), } } fn post(&self, s: &mut pprust_hir::State, node: pprust_hir::AnnNode) -> io::Result<()> { match node { pprust_hir::NodeName(_) => Ok(()), pprust_hir::NodeItem(item) => { s.s.space()?; s.synth_comment(item.id.to_string()) } pprust_hir::NodeSubItem(id) => { s.s.space()?; s.synth_comment(id.to_string()) } pprust_hir::NodeBlock(blk) => { s.s.space()?; s.synth_comment(format!("block {}", blk.id)) } pprust_hir::NodeExpr(expr) => { s.s.space()?; s.synth_comment(expr.id.to_string())?; s.pclose() } pprust_hir::NodePat(pat) => { s.s.space()?; s.synth_comment(format!("pat {}", pat.id)) } } } } struct HygieneAnnotation<'a> { sess: &'a Session } impl<'a> PrinterSupport for HygieneAnnotation<'a> { fn sess(&self) -> &Session { self.sess } fn pp_ann(&self) -> &pprust::PpAnn { self } } impl<'a> pprust::PpAnn for HygieneAnnotation<'a> { fn post(&self, s: &mut pprust::State, node: pprust::AnnNode) -> io::Result<()> { match node { pprust::NodeIdent(&ast::Ident { name, ctxt }) => { s.s.space()?; // FIXME #16420: this doesn't display the connections // between syntax contexts s.synth_comment(format!("{}{:?}", name.as_u32(), ctxt)) } pprust::NodeName(&name) => { s.s.space()?; s.synth_comment(name.as_u32().to_string()) } _ => Ok(()), } } } struct TypedAnnotation<'a, 'tcx: 'a> { tcx: TyCtxt<'a, 'tcx, 'tcx>, tables: Cell<&'a ty::TypeckTables<'tcx>>, } impl<'b, 'tcx> HirPrinterSupport<'tcx> for TypedAnnotation<'b, 'tcx> { fn sess<'a>(&'a self) -> &'a Session { &self.tcx.sess } fn hir_map<'a>(&'a self) -> Option<&'a hir_map::Map<'tcx>> { Some(&self.tcx.hir) } fn pp_ann<'a>(&'a self) -> &'a pprust_hir::PpAnn { self } fn node_path(&self, id: ast::NodeId) -> Option<String> { Some(self.tcx.node_path_str(id)) } } impl<'a, 'tcx> pprust_hir::PpAnn for TypedAnnotation<'a, 'tcx> { fn nested(&self, state: &mut pprust_hir::State, nested: pprust_hir::Nested) -> io::Result<()> { let old_tables = self.tables.get(); if let pprust_hir::Nested::Body(id) = nested { self.tables.set(self.tcx.body_tables(id)); } pprust_hir::PpAnn::nested(&self.tcx.hir, state, nested)?; self.tables.set(old_tables); Ok(()) } fn pre(&self, s: &mut pprust_hir::State, node: pprust_hir::AnnNode) -> io::Result<()> { match node { pprust_hir::NodeExpr(_) => s.popen(), _ => Ok(()), } } fn post(&self, s: &mut pprust_hir::State, node: pprust_hir::AnnNode) -> io::Result<()> { match node { pprust_hir::NodeExpr(expr) => { s.s.space()?; s.s.word("as")?; s.s.space()?; s.s.word(&self.tables.get().expr_ty(expr).to_string())?; s.pclose() } _ => Ok(()), } } } fn gather_flowgraph_variants(sess: &Session) -> Vec<borrowck_dot::Variant> { let print_loans = sess.opts.debugging_opts.flowgraph_print_loans; let print_moves = sess.opts.debugging_opts.flowgraph_print_moves; let print_assigns = sess.opts.debugging_opts.flowgraph_print_assigns; let print_all = sess.opts.debugging_opts.flowgraph_print_all; let mut variants = Vec::new(); if print_all || print_loans { variants.push(borrowck_dot::Loans); } if print_all || print_moves { variants.push(borrowck_dot::Moves); } if print_all || print_assigns { variants.push(borrowck_dot::Assigns); } variants } #[derive(Clone, Debug)] pub enum UserIdentifiedItem { ItemViaNode(ast::NodeId), ItemViaPath(Vec<String>), } impl FromStr for UserIdentifiedItem { type Err = (); fn from_str(s: &str) -> Result<UserIdentifiedItem, ()> { Ok(s.parse() .map(ast::NodeId::new) .map(ItemViaNode) .unwrap_or_else(|_| ItemViaPath(s.split("::").map(|s| s.to_string()).collect()))) } } enum NodesMatchingUII<'a, 'hir: 'a> { NodesMatchingDirect(option::IntoIter<ast::NodeId>), NodesMatchingSuffix(hir_map::NodesMatchingSuffix<'a, 'hir>), } impl<'a, 'hir> Iterator for NodesMatchingUII<'a, 'hir> { type Item = ast::NodeId; fn next(&mut self) -> Option<ast::NodeId> { match self { &mut NodesMatchingDirect(ref mut iter) => iter.next(), &mut NodesMatchingSuffix(ref mut iter) => iter.next(), } } } impl UserIdentifiedItem { fn reconstructed_input(&self) -> String { match *self { ItemViaNode(node_id) => node_id.to_string(), ItemViaPath(ref parts) => parts.join("::"), } } fn all_matching_node_ids<'a, 'hir>(&'a self, map: &'a hir_map::Map<'hir>) -> NodesMatchingUII<'a, 'hir> { match *self { ItemViaNode(node_id) => NodesMatchingDirect(Some(node_id).into_iter()), ItemViaPath(ref parts) => NodesMatchingSuffix(map.nodes_matching_suffix(&parts)), } } fn to_one_node_id(self, user_option: &str, sess: &Session, map: &hir_map::Map) -> ast::NodeId { let fail_because = |is_wrong_because| -> ast::NodeId { let message = format!("{} needs NodeId (int) or unique path suffix (b::c::d); got \ {}, which {}", user_option, self.reconstructed_input(), is_wrong_because); sess.fatal(&message) }; let mut saw_node = ast::DUMMY_NODE_ID; let mut seen = 0; for node in self.all_matching_node_ids(map) { saw_node = node; seen += 1; if seen > 1 { fail_because("does not resolve uniquely"); } } if seen == 0 { fail_because("does not resolve to any item"); } assert!(seen == 1); return saw_node; } } // Note: Also used by librustdoc, see PR #43348. Consider moving this struct elsewhere. // // FIXME: Currently the `everybody_loops` transformation is not applied to: // * `const fn`, due to issue #43636 that `loop` is not supported for const evaluation. We are // waiting for miri to fix that. // * `impl Trait`, due to issue #43869 that functions returning impl Trait cannot be diverging. // Solving this may require `!` to implement every trait, which relies on the an even more // ambitious form of the closed RFC #1637. See also [#34511]. // // [#34511]: https://github.com/rust-lang/rust/issues/34511#issuecomment-322340401 pub struct ReplaceBodyWithLoop { within_static_or_const: bool, } impl ReplaceBodyWithLoop { pub fn new() -> ReplaceBodyWithLoop { ReplaceBodyWithLoop { within_static_or_const: false } } fn run<R, F: FnOnce(&mut Self) -> R>(&mut self, is_const: bool, action: F) -> R { let old_const = mem::replace(&mut self.within_static_or_const, is_const); let ret = action(self); self.within_static_or_const = old_const; ret } fn should_ignore_fn(ret_ty: &ast::FnDecl) -> bool { if let ast::FunctionRetTy::Ty(ref ty) = ret_ty.output { fn involves_impl_trait(ty: &ast::Ty) -> bool { match ty.node { ast::TyKind::ImplTrait(_) => true, ast::TyKind::Slice(ref subty) | ast::TyKind::Array(ref subty, _) | ast::TyKind::Ptr(ast::MutTy { ty: ref subty, .. }) | ast::TyKind::Rptr(_, ast::MutTy { ty: ref subty, .. }) | ast::TyKind::Paren(ref subty) => involves_impl_trait(subty), ast::TyKind::Tup(ref tys) => any_involves_impl_trait(tys.iter()), ast::TyKind::Path(_, ref path) => path.segments.iter().any(|seg| { match seg.parameters.as_ref().map(|p| &**p) { None => false, Some(&ast::PathParameters::AngleBracketed(ref data)) => any_involves_impl_trait(data.types.iter()) || any_involves_impl_trait(data.bindings.iter().map(|b| &b.ty)), Some(&ast::PathParameters::Parenthesized(ref data)) => any_involves_impl_trait(data.inputs.iter()) || any_involves_impl_trait(data.output.iter()), } }), _ => false, } } fn any_involves_impl_trait<'a, I: Iterator<Item = &'a P<ast::Ty>>>(mut it: I) -> bool { it.any(|subty| involves_impl_trait(subty)) } involves_impl_trait(ty) } else { false } } } impl fold::Folder for ReplaceBodyWithLoop { fn fold_item_kind(&mut self, i: ast::ItemKind) -> ast::ItemKind { let is_const = match i { ast::ItemKind::Static(..) | ast::ItemKind::Const(..) => true, ast::ItemKind::Fn(ref decl, _, ref constness, _, _, _) => constness.node == ast::Constness::Const || Self::should_ignore_fn(decl), _ => false, }; self.run(is_const, |s| fold::noop_fold_item_kind(i, s)) } fn fold_trait_item(&mut self, i: ast::TraitItem) -> SmallVector<ast::TraitItem> { let is_const = match i.node { ast::TraitItemKind::Const(..) => true, ast::TraitItemKind::Method(ast::MethodSig { ref decl, ref constness, .. }, _) => constness.node == ast::Constness::Const || Self::should_ignore_fn(decl), _ => false, }; self.run(is_const, |s| fold::noop_fold_trait_item(i, s)) } fn fold_impl_item(&mut self, i: ast::ImplItem) -> SmallVector<ast::ImplItem> { let is_const = match i.node { ast::ImplItemKind::Const(..) => true, ast::ImplItemKind::Method(ast::MethodSig { ref decl, ref constness, .. }, _) => constness.node == ast::Constness::Const || Self::should_ignore_fn(decl), _ => false, }; self.run(is_const, |s| fold::noop_fold_impl_item(i, s)) } fn fold_block(&mut self, b: P<ast::Block>) -> P<ast::Block> { fn expr_to_block(rules: ast::BlockCheckMode, e: Option<P<ast::Expr>>) -> P<ast::Block> { P(ast::Block { stmts: e.map(|e| { ast::Stmt { id: ast::DUMMY_NODE_ID, span: e.span, node: ast::StmtKind::Expr(e), } }) .into_iter() .collect(), rules, id: ast::DUMMY_NODE_ID, span: syntax_pos::DUMMY_SP, }) } if !self.within_static_or_const { let empty_block = expr_to_block(BlockCheckMode::Default, None); let loop_expr = P(ast::Expr { node: ast::ExprKind::Loop(empty_block, None), id: ast::DUMMY_NODE_ID, span: syntax_pos::DUMMY_SP, attrs: ast::ThinVec::new(), }); expr_to_block(b.rules, Some(loop_expr)) } else { fold::noop_fold_block(b, self) } } // in general the pretty printer processes unexpanded code, so // we override the default `fold_mac` method which panics. fn fold_mac(&mut self, mac: ast::Mac) -> ast::Mac { fold::noop_fold_mac(mac, self) } } fn print_flowgraph<'a, 'tcx, W: Write>(variants: Vec<borrowck_dot::Variant>, tcx: TyCtxt<'a, 'tcx, 'tcx>, code: blocks::Code<'tcx>, mode: PpFlowGraphMode, mut out: W) -> io::Result<()> { let body_id = match code { blocks::Code::Expr(expr) => { // Find the function this expression is from. let mut node_id = expr.id; loop { let node = tcx.hir.get(node_id); if let Some(n) = hir::map::blocks::FnLikeNode::from_node(node) { break n.body(); } let parent = tcx.hir.get_parent_node(node_id); assert!(node_id != parent); node_id = parent; } } blocks::Code::FnLike(fn_like) => fn_like.body(), }; let body = tcx.hir.body(body_id); let cfg = cfg::CFG::new(tcx, &body); let labelled_edges = mode != PpFlowGraphMode::UnlabelledEdges; let lcfg = LabelledCFG { tcx, cfg: &cfg, name: format!("node_{}", code.id()), labelled_edges, }; match code { _ if variants.is_empty() => { let r = dot::render(&lcfg, &mut out); return expand_err_details(r); } blocks::Code::Expr(_) => { tcx.sess.err("--pretty flowgraph with -Z flowgraph-print annotations requires \ fn-like node id."); return Ok(()); } blocks::Code::FnLike(fn_like) => { let (bccx, analysis_data) = borrowck::build_borrowck_dataflow_data_for_fn(tcx, fn_like.body(), &cfg); let lcfg = borrowck_dot::DataflowLabeller { inner: lcfg, variants, borrowck_ctxt: &bccx, analysis_data: &analysis_data, }; let r = dot::render(&lcfg, &mut out); return expand_err_details(r); } } fn expand_err_details(r: io::Result<()>) -> io::Result<()> { r.map_err(|ioerr| { io::Error::new(io::ErrorKind::Other, format!("graphviz::render failed: {}", ioerr)) }) } } pub fn fold_crate(krate: ast::Crate, ppm: PpMode) -> ast::Crate { if let PpmSource(PpmEveryBodyLoops) = ppm { let mut fold = ReplaceBodyWithLoop::new(); fold.fold_crate(krate) } else { krate } } fn get_source(input: &Input, sess: &Session) -> (Vec<u8>, String) { let src_name = driver::source_name(input); let src = sess.codemap() .get_filemap(&src_name) .unwrap() .src .as_ref() .unwrap() .as_bytes() .to_vec(); (src, src_name) } fn write_output(out: Vec<u8>, ofile: Option<&Path>) { match ofile { None => print!("{}", String::from_utf8(out).unwrap()), Some(p) => { match File::create(p) { Ok(mut w) => w.write_all(&out).unwrap(), Err(e) => panic!("print-print failed to open {} due to {}", p.display(), e), } } } } pub fn print_after_parsing(sess: &Session, input: &Input, krate: &ast::Crate, ppm: PpMode, ofile: Option<&Path>) { let (src, src_name) = get_source(input, sess); let mut rdr = &*src; let mut out = Vec::new(); if let PpmSource(s) = ppm { // Silently ignores an identified node. let out: &mut Write = &mut out; s.call_with_pp_support(sess, None, move |annotation| { debug!("pretty printing source code {:?}", s); let sess = annotation.sess(); pprust::print_crate(sess.codemap(), &sess.parse_sess, krate, src_name.to_string(), &mut rdr, box out, annotation.pp_ann(), false) }) .unwrap() } else { unreachable!(); }; write_output(out, ofile); } pub fn print_after_hir_lowering<'tcx, 'a: 'tcx>(sess: &'a Session, cstore: &'tcx CrateStore, hir_map: &hir_map::Map<'tcx>, analysis: &ty::CrateAnalysis, resolutions: &Resolutions, input: &Input, krate: &ast::Crate, crate_name: &str, ppm: PpMode, arena: &'tcx DroplessArena, arenas: &'tcx GlobalArenas<'tcx>, output_filenames: &OutputFilenames, opt_uii: Option<UserIdentifiedItem>, ofile: Option<&Path>) { if ppm.needs_analysis() { print_with_analysis(sess, cstore, hir_map, analysis, resolutions, crate_name, arena, arenas, output_filenames, ppm, opt_uii, ofile); return; } let (src, src_name) = get_source(input, sess); let mut rdr = &src[..]; let mut out = Vec::new(); match (ppm, opt_uii) { (PpmSource(s), _) => { // Silently ignores an identified node. let out: &mut Write = &mut out; s.call_with_pp_support(sess, Some(hir_map), move |annotation| { debug!("pretty printing source code {:?}", s); let sess = annotation.sess(); pprust::print_crate(sess.codemap(), &sess.parse_sess, krate, src_name.to_string(), &mut rdr, box out, annotation.pp_ann(), true) }) } (PpmHir(s), None) => { let out: &mut Write = &mut out; s.call_with_pp_support_hir(sess, cstore, hir_map, analysis, resolutions, arena, arenas, output_filenames, crate_name, move |annotation, krate| { debug!("pretty printing source code {:?}", s); let sess = annotation.sess(); pprust_hir::print_crate(sess.codemap(), &sess.parse_sess, krate, src_name.to_string(), &mut rdr, box out, annotation.pp_ann(), true) }) } (PpmHirTree(s), None) => { let out: &mut Write = &mut out; s.call_with_pp_support_hir(sess, cstore, hir_map, analysis, resolutions, arena, arenas, output_filenames, crate_name, move |_annotation, krate| { debug!("pretty printing source code {:?}", s); write!(out, "{:#?}", krate) }) } (PpmHir(s), Some(uii)) => { let out: &mut Write = &mut out; s.call_with_pp_support_hir(sess, cstore, hir_map, analysis, resolutions, arena, arenas, output_filenames, crate_name, move |annotation, _| { debug!("pretty printing source code {:?}", s); let sess = annotation.sess(); let hir_map = annotation.hir_map().expect("--unpretty missing HIR map"); let mut pp_state = pprust_hir::State::new_from_input(sess.codemap(), &sess.parse_sess, src_name.to_string(), &mut rdr, box out, annotation.pp_ann(), true); for node_id in uii.all_matching_node_ids(hir_map) { let node = hir_map.get(node_id); pp_state.print_node(node)?; pp_state.s.space()?; let path = annotation.node_path(node_id) .expect("--unpretty missing node paths"); pp_state.synth_comment(path)?; pp_state.s.hardbreak()?; } pp_state.s.eof() }) } (PpmHirTree(s), Some(uii)) => { let out: &mut Write = &mut out; s.call_with_pp_support_hir(sess, cstore, hir_map, analysis, resolutions, arena, arenas, output_filenames, crate_name, move |_annotation, _krate| { debug!("pretty printing source code {:?}", s); for node_id in uii.all_matching_node_ids(hir_map) { let node = hir_map.get(node_id); write!(out, "{:#?}", node)?; } Ok(()) }) } _ => unreachable!(), } .unwrap(); write_output(out, ofile); } // In an ideal world, this would be a public function called by the driver after // analsysis is performed. However, we want to call `phase_3_run_analysis_passes` // with a different callback than the standard driver, so that isn't easy. // Instead, we call that function ourselves. fn print_with_analysis<'tcx, 'a: 'tcx>(sess: &'a Session, cstore: &'a CrateStore, hir_map: &hir_map::Map<'tcx>, analysis: &ty::CrateAnalysis, resolutions: &Resolutions, crate_name: &str, arena: &'tcx DroplessArena, arenas: &'tcx GlobalArenas<'tcx>, output_filenames: &OutputFilenames, ppm: PpMode, uii: Option<UserIdentifiedItem>, ofile: Option<&Path>) { let nodeid = if let Some(uii) = uii { debug!("pretty printing for {:?}", uii); Some(uii.to_one_node_id("--unpretty", sess, &hir_map)) } else { debug!("pretty printing for whole crate"); None }; let mut out = Vec::new(); abort_on_err(driver::phase_3_run_analysis_passes(sess, cstore, hir_map.clone(), analysis.clone(), resolutions.clone(), arena, arenas, crate_name, output_filenames, |tcx, _, _, _| { match ppm { PpmMir | PpmMirCFG => { if let Some(nodeid) = nodeid { let def_id = tcx.hir.local_def_id(nodeid); match ppm { PpmMir => write_mir_pretty(tcx, Some(def_id), &mut out), PpmMirCFG => write_mir_graphviz(tcx, Some(def_id), &mut out), _ => unreachable!(), }?; } else { match ppm { PpmMir => write_mir_pretty(tcx, None, &mut out), PpmMirCFG => write_mir_graphviz(tcx, None, &mut out), _ => unreachable!(), }?; } Ok(()) } PpmFlowGraph(mode) => { let nodeid = nodeid.expect("`pretty flowgraph=..` needs NodeId (int) or unique path \ suffix (b::c::d)"); let node = tcx.hir.find(nodeid).unwrap_or_else(|| { tcx.sess.fatal(&format!("--pretty flowgraph couldn't find id: {}", nodeid)) }); match blocks::Code::from_node(&tcx.hir, nodeid) { Some(code) => { let variants = gather_flowgraph_variants(tcx.sess); let out: &mut Write = &mut out; print_flowgraph(variants, tcx, code, mode, out) } None => { let message = format!("--pretty=flowgraph needs block, fn, or method; \ got {:?}", node); tcx.sess.span_fatal(tcx.hir.span(nodeid), &message) } } } _ => unreachable!(), } }), sess) .unwrap(); write_output(out, ofile); }
use std::cell::RefCell; use std::default::Default; use std::fmt; use std::hash::{Hash, Hasher}; use std::iter::FromIterator; use std::lazy::SyncOnceCell as OnceCell; use std::rc::Rc; use std::sync::Arc; use std::{slice, vec}; use arrayvec::ArrayVec; use rustc_ast::attr; use rustc_ast::util::comments::beautify_doc_string; use rustc_ast::{self as ast, AttrStyle}; use rustc_attr::{ConstStability, Deprecation, Stability, StabilityLevel}; use rustc_data_structures::fx::{FxHashMap, FxHashSet}; use rustc_feature::UnstableFeatures; use rustc_hir as hir; use rustc_hir::def::{CtorKind, Res}; use rustc_hir::def_id::{CrateNum, DefId, DefIndex}; use rustc_hir::lang_items::LangItem; use rustc_hir::Mutability; use rustc_index::vec::IndexVec; use rustc_middle::ty::{self, TyCtxt}; use rustc_session::Session; use rustc_span::hygiene::MacroKind; use rustc_span::source_map::DUMMY_SP; use rustc_span::symbol::{kw, sym, Ident, Symbol, SymbolStr}; use rustc_span::{self, FileName, Loc}; use rustc_target::abi::VariantIdx; use rustc_target::spec::abi::Abi; use crate::clean::cfg::Cfg; use crate::clean::external_path; use crate::clean::inline; use crate::clean::types::Type::{QPath, ResolvedPath}; use crate::clean::Clean; use crate::core::DocContext; use crate::formats::cache::Cache; use crate::formats::item_type::ItemType; use crate::html::render::cache::ExternalLocation; use self::FnRetTy::*; use self::ItemKind::*; use self::SelfTy::*; use self::Type::*; thread_local!(crate static MAX_DEF_IDX: RefCell<FxHashMap<CrateNum, DefIndex>> = Default::default()); #[derive(Clone, Debug)] crate struct Crate { crate name: Symbol, crate version: Option<String>, crate src: FileName, crate module: Option<Item>, crate externs: Vec<(CrateNum, ExternalCrate)>, crate primitives: Vec<(DefId, PrimitiveType)>, // These are later on moved into `CACHEKEY`, leaving the map empty. // Only here so that they can be filtered through the rustdoc passes. crate external_traits: Rc<RefCell<FxHashMap<DefId, Trait>>>, crate masked_crates: FxHashSet<CrateNum>, crate collapsed: bool, } #[derive(Clone, Debug)] crate struct ExternalCrate { crate name: Symbol, crate src: FileName, crate attrs: Attributes, crate primitives: Vec<(DefId, PrimitiveType)>, crate keywords: Vec<(DefId, Symbol)>, } /// Anything with a source location and set of attributes and, optionally, a /// name. That is, anything that can be documented. This doesn't correspond /// directly to the AST's concept of an item; it's a strict superset. #[derive(Clone)] crate struct Item { /// Stringified span crate source: Span, /// Not everything has a name. E.g., impls crate name: Option<Symbol>, crate attrs: Box<Attributes>, crate visibility: Visibility, crate kind: Box<ItemKind>, crate def_id: DefId, } // `Item` is used a lot. Make sure it doesn't unintentionally get bigger. #[cfg(target_arch = "x86_64")] rustc_data_structures::static_assert_size!(Item, 48); impl fmt::Debug for Item { fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { let def_id: &dyn fmt::Debug = if self.is_fake() { &"**FAKE**" } else { &self.def_id }; fmt.debug_struct("Item") .field("source", &self.source) .field("name", &self.name) .field("attrs", &self.attrs) .field("kind", &self.kind) .field("visibility", &self.visibility) .field("def_id", def_id) .finish() } } impl Item { crate fn stability<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Option<&'tcx Stability> { if self.is_fake() { None } else { tcx.lookup_stability(self.def_id) } } crate fn const_stability<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Option<&'tcx ConstStability> { if self.is_fake() { None } else { tcx.lookup_const_stability(self.def_id) } } crate fn deprecation(&self, tcx: TyCtxt<'_>) -> Option<Deprecation> { if self.is_fake() { None } else { tcx.lookup_deprecation(self.def_id) } } /// Finds the `doc` attribute as a NameValue and returns the corresponding /// value found. crate fn doc_value(&self) -> Option<String> { self.attrs.doc_value() } /// Convenience wrapper around [`Self::from_def_id_and_parts`] which converts /// `hir_id` to a [`DefId`] pub fn from_hir_id_and_parts( hir_id: hir::HirId, name: Option<Symbol>, kind: ItemKind, cx: &DocContext<'_>, ) -> Item { Item::from_def_id_and_parts(cx.tcx.hir().local_def_id(hir_id).to_def_id(), name, kind, cx) } pub fn from_def_id_and_parts( def_id: DefId, name: Option<Symbol>, kind: ItemKind, cx: &DocContext<'_>, ) -> Item { debug!("name={:?}, def_id={:?}", name, def_id); // `span_if_local()` lies about functions and only gives the span of the function signature let source = def_id.as_local().map_or_else( || cx.tcx.def_span(def_id), |local| { let hir = cx.tcx.hir(); hir.span_with_body(hir.local_def_id_to_hir_id(local)) }, ); Item { def_id, kind: box kind, name, source: source.clean(cx), attrs: box cx.tcx.get_attrs(def_id).clean(cx), visibility: cx.tcx.visibility(def_id).clean(cx), } } /// Finds all `doc` attributes as NameValues and returns their corresponding values, joined /// with newlines. crate fn collapsed_doc_value(&self) -> Option<String> { self.attrs.collapsed_doc_value() } crate fn links(&self, cache: &Cache) -> Vec<RenderedLink> { self.attrs.links(&self.def_id.krate, cache) } crate fn is_crate(&self) -> bool { matches!( *self.kind, StrippedItem(box ModuleItem(Module { is_crate: true, .. })) | ModuleItem(Module { is_crate: true, .. }) ) } crate fn is_mod(&self) -> bool { self.type_() == ItemType::Module } crate fn is_trait(&self) -> bool { self.type_() == ItemType::Trait } crate fn is_struct(&self) -> bool { self.type_() == ItemType::Struct } crate fn is_enum(&self) -> bool { self.type_() == ItemType::Enum } crate fn is_variant(&self) -> bool { self.type_() == ItemType::Variant } crate fn is_associated_type(&self) -> bool { self.type_() == ItemType::AssocType } crate fn is_associated_const(&self) -> bool { self.type_() == ItemType::AssocConst } crate fn is_method(&self) -> bool { self.type_() == ItemType::Method } crate fn is_ty_method(&self) -> bool { self.type_() == ItemType::TyMethod } crate fn is_typedef(&self) -> bool { self.type_() == ItemType::Typedef } crate fn is_primitive(&self) -> bool { self.type_() == ItemType::Primitive } crate fn is_union(&self) -> bool { self.type_() == ItemType::Union } crate fn is_import(&self) -> bool { self.type_() == ItemType::Import } crate fn is_extern_crate(&self) -> bool { self.type_() == ItemType::ExternCrate } crate fn is_keyword(&self) -> bool { self.type_() == ItemType::Keyword } crate fn is_stripped(&self) -> bool { match *self.kind { StrippedItem(..) => true, ImportItem(ref i) => !i.should_be_displayed, _ => false, } } crate fn has_stripped_fields(&self) -> Option<bool> { match *self.kind { StructItem(ref _struct) => Some(_struct.fields_stripped), UnionItem(ref union) => Some(union.fields_stripped), VariantItem(Variant::Struct(ref vstruct)) => Some(vstruct.fields_stripped), _ => None, } } crate fn stability_class(&self, tcx: TyCtxt<'_>) -> Option<String> { self.stability(tcx).as_ref().and_then(|ref s| { let mut classes = Vec::with_capacity(2); if s.level.is_unstable() { classes.push("unstable"); } // FIXME: what about non-staged API items that are deprecated? if self.deprecation(tcx).is_some() { classes.push("deprecated"); } if !classes.is_empty() { Some(classes.join(" ")) } else { None } }) } crate fn stable_since(&self, tcx: TyCtxt<'_>) -> Option<SymbolStr> { match self.stability(tcx)?.level { StabilityLevel::Stable { since, .. } => Some(since.as_str()), StabilityLevel::Unstable { .. } => None, } } crate fn const_stable_since(&self, tcx: TyCtxt<'_>) -> Option<SymbolStr> { match self.const_stability(tcx)?.level { StabilityLevel::Stable { since, .. } => Some(since.as_str()), StabilityLevel::Unstable { .. } => None, } } crate fn is_non_exhaustive(&self) -> bool { self.attrs.other_attrs.iter().any(|a| a.has_name(sym::non_exhaustive)) } /// Returns a documentation-level item type from the item. crate fn type_(&self) -> ItemType { ItemType::from(self) } crate fn is_default(&self) -> bool { match *self.kind { ItemKind::MethodItem(_, Some(defaultness)) => { defaultness.has_value() && !defaultness.is_final() } _ => false, } } /// See the documentation for [`next_def_id()`]. /// /// [`next_def_id()`]: DocContext::next_def_id() crate fn is_fake(&self) -> bool { MAX_DEF_IDX.with(|m| { m.borrow().get(&self.def_id.krate).map(|&idx| idx <= self.def_id.index).unwrap_or(false) }) } } #[derive(Clone, Debug)] crate enum ItemKind { ExternCrateItem(Symbol, Option<Symbol>), ImportItem(Import), StructItem(Struct), UnionItem(Union), EnumItem(Enum), FunctionItem(Function), ModuleItem(Module), TypedefItem(Typedef, bool /* is associated type */), OpaqueTyItem(OpaqueTy), StaticItem(Static), ConstantItem(Constant), TraitItem(Trait), TraitAliasItem(TraitAlias), ImplItem(Impl), /// A method signature only. Used for required methods in traits (ie, /// non-default-methods). TyMethodItem(Function), /// A method with a body. MethodItem(Function, Option<hir::Defaultness>), StructFieldItem(Type), VariantItem(Variant), /// `fn`s from an extern block ForeignFunctionItem(Function), /// `static`s from an extern block ForeignStaticItem(Static), /// `type`s from an extern block ForeignTypeItem, MacroItem(Macro), ProcMacroItem(ProcMacro), PrimitiveItem(PrimitiveType), AssocConstItem(Type, Option<String>), /// An associated item in a trait or trait impl. /// /// The bounds may be non-empty if there is a `where` clause. /// The `Option<Type>` is the default concrete type (e.g. `trait Trait { type Target = usize; }`) AssocTypeItem(Vec<GenericBound>, Option<Type>), /// An item that has been stripped by a rustdoc pass StrippedItem(Box<ItemKind>), KeywordItem(Symbol), } impl ItemKind { /// Some items contain others such as structs (for their fields) and Enums /// (for their variants). This method returns those contained items. crate fn inner_items(&self) -> impl Iterator<Item = &Item> { match self { StructItem(s) => s.fields.iter(), UnionItem(u) => u.fields.iter(), VariantItem(Variant::Struct(v)) => v.fields.iter(), EnumItem(e) => e.variants.iter(), TraitItem(t) => t.items.iter(), ImplItem(i) => i.items.iter(), ModuleItem(m) => m.items.iter(), ExternCrateItem(_, _) | ImportItem(_) | FunctionItem(_) | TypedefItem(_, _) | OpaqueTyItem(_) | StaticItem(_) | ConstantItem(_) | TraitAliasItem(_) | TyMethodItem(_) | MethodItem(_, _) | StructFieldItem(_) | VariantItem(_) | ForeignFunctionItem(_) | ForeignStaticItem(_) | ForeignTypeItem | MacroItem(_) | ProcMacroItem(_) | PrimitiveItem(_) | AssocConstItem(_, _) | AssocTypeItem(_, _) | StrippedItem(_) | KeywordItem(_) => [].iter(), } } crate fn is_type_alias(&self) -> bool { matches!(self, ItemKind::TypedefItem(..) | ItemKind::AssocTypeItem(..)) } } #[derive(Clone, Debug)] crate struct Module { crate items: Vec<Item>, crate is_crate: bool, } crate struct ListAttributesIter<'a> { attrs: slice::Iter<'a, ast::Attribute>, current_list: vec::IntoIter<ast::NestedMetaItem>, name: Symbol, } impl<'a> Iterator for ListAttributesIter<'a> { type Item = ast::NestedMetaItem; fn next(&mut self) -> Option<Self::Item> { if let Some(nested) = self.current_list.next() { return Some(nested); } for attr in &mut self.attrs { if let Some(list) = attr.meta_item_list() { if attr.has_name(self.name) { self.current_list = list.into_iter(); if let Some(nested) = self.current_list.next() { return Some(nested); } } } } None } fn size_hint(&self) -> (usize, Option<usize>) { let lower = self.current_list.len(); (lower, None) } } crate trait AttributesExt { /// Finds an attribute as List and returns the list of attributes nested inside. fn lists(&self, name: Symbol) -> ListAttributesIter<'_>; } impl AttributesExt for [ast::Attribute] { fn lists(&self, name: Symbol) -> ListAttributesIter<'_> { ListAttributesIter { attrs: self.iter(), current_list: Vec::new().into_iter(), name } } } crate trait NestedAttributesExt { /// Returns `true` if the attribute list contains a specific `Word` fn has_word(self, word: Symbol) -> bool; fn get_word_attr(self, word: Symbol) -> (Option<ast::NestedMetaItem>, bool); } impl<I: Iterator<Item = ast::NestedMetaItem> + IntoIterator<Item = ast::NestedMetaItem>> NestedAttributesExt for I { fn has_word(self, word: Symbol) -> bool { self.into_iter().any(|attr| attr.is_word() && attr.has_name(word)) } fn get_word_attr(mut self, word: Symbol) -> (Option<ast::NestedMetaItem>, bool) { match self.find(|attr| attr.is_word() && attr.has_name(word)) { Some(a) => (Some(a), true), None => (None, false), } } } /// A portion of documentation, extracted from a `#[doc]` attribute. /// /// Each variant contains the line number within the complete doc-comment where the fragment /// starts, as well as the Span where the corresponding doc comment or attribute is located. /// /// Included files are kept separate from inline doc comments so that proper line-number /// information can be given when a doctest fails. Sugared doc comments and "raw" doc comments are /// kept separate because of issue #42760. #[derive(Clone, PartialEq, Eq, Debug, Hash)] crate struct DocFragment { crate line: usize, crate span: rustc_span::Span, /// The module this doc-comment came from. /// /// This allows distinguishing between the original documentation and a pub re-export. /// If it is `None`, the item was not re-exported. crate parent_module: Option<DefId>, crate doc: Symbol, crate kind: DocFragmentKind, crate need_backline: bool, crate indent: usize, } #[derive(Clone, Copy, PartialEq, Eq, Debug, Hash)] crate enum DocFragmentKind { /// A doc fragment created from a `///` or `//!` doc comment. SugaredDoc, /// A doc fragment created from a "raw" `#[doc=""]` attribute. RawDoc, /// A doc fragment created from a `#[doc(include="filename")]` attribute. Contains both the /// given filename and the file contents. Include { filename: Symbol }, } // The goal of this function is to apply the `DocFragment` transformations that are required when // transforming into the final markdown. So the transformations in here are: // // * Applying the computed indent to each lines in each doc fragment (a `DocFragment` can contain // multiple lines in case of `#[doc = ""]`). // * Adding backlines between `DocFragment`s and adding an extra one if required (stored in the // `need_backline` field). fn add_doc_fragment(out: &mut String, frag: &DocFragment) { let s = frag.doc.as_str(); let mut iter = s.lines().peekable(); while let Some(line) = iter.next() { if line.chars().any(|c| !c.is_whitespace()) { assert!(line.len() >= frag.indent); out.push_str(&line[frag.indent..]); } else { out.push_str(line); } if iter.peek().is_some() { out.push('\n'); } } if frag.need_backline { out.push('\n'); } } impl<'a> FromIterator<&'a DocFragment> for String { fn from_iter<T>(iter: T) -> Self where T: IntoIterator<Item = &'a DocFragment>, { let mut prev_kind: Option<DocFragmentKind> = None; iter.into_iter().fold(String::new(), |mut acc, frag| { if !acc.is_empty() && prev_kind .take() .map(|p| matches!(p, DocFragmentKind::Include { .. }) && p != frag.kind) .unwrap_or(false) { acc.push('\n'); } add_doc_fragment(&mut acc, &frag); prev_kind = Some(frag.kind); acc }) } } #[derive(Clone, Debug, Default)] crate struct Attributes { crate doc_strings: Vec<DocFragment>, crate other_attrs: Vec<ast::Attribute>, crate cfg: Option<Arc<Cfg>>, crate span: Option<rustc_span::Span>, /// map from Rust paths to resolved defs and potential URL fragments crate links: Vec<ItemLink>, crate inner_docs: bool, } #[derive(Clone, Debug, Default, PartialEq, Eq, Hash)] /// A link that has not yet been rendered. /// /// This link will be turned into a rendered link by [`Attributes::links`] crate struct ItemLink { /// The original link written in the markdown pub(crate) link: String, /// The link text displayed in the HTML. /// /// This may not be the same as `link` if there was a disambiguator /// in an intra-doc link (e.g. \[`fn@f`\]) pub(crate) link_text: String, pub(crate) did: Option<DefId>, /// The url fragment to append to the link pub(crate) fragment: Option<String>, } pub struct RenderedLink { /// The text the link was original written as. /// /// This could potentially include disambiguators and backticks. pub(crate) original_text: String, /// The text to display in the HTML pub(crate) new_text: String, /// The URL to put in the `href` pub(crate) href: String, } impl Attributes { /// Extracts the content from an attribute `#[doc(cfg(content))]`. crate fn extract_cfg(mi: &ast::MetaItem) -> Option<&ast::MetaItem> { use rustc_ast::NestedMetaItem::MetaItem; if let ast::MetaItemKind::List(ref nmis) = mi.kind { if nmis.len() == 1 { if let MetaItem(ref cfg_mi) = nmis[0] { if cfg_mi.has_name(sym::cfg) { if let ast::MetaItemKind::List(ref cfg_nmis) = cfg_mi.kind { if cfg_nmis.len() == 1 { if let MetaItem(ref content_mi) = cfg_nmis[0] { return Some(content_mi); } } } } } } } None } /// Reads a `MetaItem` from within an attribute, looks for whether it is a /// `#[doc(include="file")]`, and returns the filename and contents of the file as loaded from /// its expansion. crate fn extract_include(mi: &ast::MetaItem) -> Option<(Symbol, Symbol)> { mi.meta_item_list().and_then(|list| { for meta in list { if meta.has_name(sym::include) { // the actual compiled `#[doc(include="filename")]` gets expanded to // `#[doc(include(file="filename", contents="file contents")]` so we need to // look for that instead return meta.meta_item_list().and_then(|list| { let mut filename: Option<Symbol> = None; let mut contents: Option<Symbol> = None; for it in list { if it.has_name(sym::file) { if let Some(name) = it.value_str() { filename = Some(name); } } else if it.has_name(sym::contents) { if let Some(docs) = it.value_str() { contents = Some(docs); } } } if let (Some(filename), Some(contents)) = (filename, contents) { Some((filename, contents)) } else { None } }); } } None }) } crate fn has_doc_flag(&self, flag: Symbol) -> bool { for attr in &self.other_attrs { if !attr.has_name(sym::doc) { continue; } if let Some(items) = attr.meta_item_list() { if items.iter().filter_map(|i| i.meta_item()).any(|it| it.has_name(flag)) { return true; } } } false } crate fn from_ast( diagnostic: &::rustc_errors::Handler, attrs: &[ast::Attribute], additional_attrs: Option<(&[ast::Attribute], DefId)>, ) -> Attributes { let mut doc_strings: Vec<DocFragment> = vec![]; let mut sp = None; let mut cfg = Cfg::True; let mut doc_line = 0; fn update_need_backline(doc_strings: &mut Vec<DocFragment>, frag: &DocFragment) { if let Some(prev) = doc_strings.last_mut() { if matches!(prev.kind, DocFragmentKind::Include { .. }) || prev.kind != frag.kind || prev.parent_module != frag.parent_module { // add a newline for extra padding between segments prev.need_backline = prev.kind == DocFragmentKind::SugaredDoc || prev.kind == DocFragmentKind::RawDoc } else { prev.need_backline = true; } } } let clean_attr = |(attr, parent_module): (&ast::Attribute, _)| { if let Some(value) = attr.doc_str() { trace!("got doc_str={:?}", value); let value = beautify_doc_string(value); let kind = if attr.is_doc_comment() { DocFragmentKind::SugaredDoc } else { DocFragmentKind::RawDoc }; let line = doc_line; doc_line += value.as_str().lines().count(); let frag = DocFragment { line, span: attr.span, doc: value, kind, parent_module, need_backline: false, indent: 0, }; update_need_backline(&mut doc_strings, &frag); doc_strings.push(frag); if sp.is_none() { sp = Some(attr.span); } None } else { if attr.has_name(sym::doc) { if let Some(mi) = attr.meta() { if let Some(cfg_mi) = Attributes::extract_cfg(&mi) { // Extracted #[doc(cfg(...))] match Cfg::parse(cfg_mi) { Ok(new_cfg) => cfg &= new_cfg, Err(e) => diagnostic.span_err(e.span, e.msg), } } else if let Some((filename, contents)) = Attributes::extract_include(&mi) { let line = doc_line; doc_line += contents.as_str().lines().count(); let frag = DocFragment { line, span: attr.span, doc: contents, kind: DocFragmentKind::Include { filename }, parent_module, need_backline: false, indent: 0, }; update_need_backline(&mut doc_strings, &frag); doc_strings.push(frag); } } } Some(attr.clone()) } }; // Additional documentation should be shown before the original documentation let other_attrs = additional_attrs .into_iter() .map(|(attrs, id)| attrs.iter().map(move |attr| (attr, Some(id)))) .flatten() .chain(attrs.iter().map(|attr| (attr, None))) .filter_map(clean_attr) .collect(); // treat #[target_feature(enable = "feat")] attributes as if they were // #[doc(cfg(target_feature = "feat"))] attributes as well for attr in attrs.lists(sym::target_feature) { if attr.has_name(sym::enable) { if let Some(feat) = attr.value_str() { let meta = attr::mk_name_value_item_str( Ident::with_dummy_span(sym::target_feature), feat, DUMMY_SP, ); if let Ok(feat_cfg) = Cfg::parse(&meta) { cfg &= feat_cfg; } } } } let inner_docs = attrs .iter() .find(|a| a.doc_str().is_some()) .map_or(true, |a| a.style == AttrStyle::Inner); Attributes { doc_strings, other_attrs, cfg: if cfg == Cfg::True { None } else { Some(Arc::new(cfg)) }, span: sp, links: vec![], inner_docs, } } /// Finds the `doc` attribute as a NameValue and returns the corresponding /// value found. crate fn doc_value(&self) -> Option<String> { let mut iter = self.doc_strings.iter(); let ori = iter.next()?; let mut out = String::new(); add_doc_fragment(&mut out, &ori); while let Some(new_frag) = iter.next() { if matches!(ori.kind, DocFragmentKind::Include { .. }) || new_frag.kind != ori.kind || new_frag.parent_module != ori.parent_module { break; } add_doc_fragment(&mut out, &new_frag); } if out.is_empty() { None } else { Some(out) } } /// Return the doc-comments on this item, grouped by the module they came from. /// /// The module can be different if this is a re-export with added documentation. crate fn collapsed_doc_value_by_module_level(&self) -> FxHashMap<Option<DefId>, String> { let mut ret = FxHashMap::default(); for new_frag in self.doc_strings.iter() { let out = ret.entry(new_frag.parent_module).or_default(); add_doc_fragment(out, &new_frag); } ret } /// Finds all `doc` attributes as NameValues and returns their corresponding values, joined /// with newlines. crate fn collapsed_doc_value(&self) -> Option<String> { if self.doc_strings.is_empty() { None } else { Some(self.doc_strings.iter().collect()) } } /// Gets links as a vector /// /// Cache must be populated before call crate fn links(&self, krate: &CrateNum, cache: &Cache) -> Vec<RenderedLink> { use crate::html::format::href; use crate::html::render::CURRENT_DEPTH; self.links .iter() .filter_map(|ItemLink { link: s, link_text, did, fragment }| { match *did { Some(did) => { if let Some((mut href, ..)) = href(did, cache) { if let Some(ref fragment) = *fragment { href.push('#'); href.push_str(fragment); } Some(RenderedLink { original_text: s.clone(), new_text: link_text.clone(), href, }) } else { None } } None => { if let Some(ref fragment) = *fragment { let url = match cache.extern_locations.get(krate) { Some(&(_, _, ExternalLocation::Local)) => { let depth = CURRENT_DEPTH.with(|l| l.get()); "../".repeat(depth) } Some(&(_, _, ExternalLocation::Remote(ref s))) => s.to_string(), Some(&(_, _, ExternalLocation::Unknown)) | None => String::from( // NOTE: intentionally doesn't pass crate name to avoid having // different primitive links between crates if UnstableFeatures::from_environment(None).is_nightly_build() { "https://doc.rust-lang.org/nightly" } else { "https://doc.rust-lang.org" }, ), }; // This is a primitive so the url is done "by hand". let tail = fragment.find('#').unwrap_or_else(|| fragment.len()); Some(RenderedLink { original_text: s.clone(), new_text: link_text.clone(), href: format!( "{}{}std/primitive.{}.html{}", url, if !url.ends_with('/') { "/" } else { "" }, &fragment[..tail], &fragment[tail..] ), }) } else { panic!("This isn't a primitive?!"); } } } }) .collect() } crate fn get_doc_aliases(&self) -> FxHashSet<String> { self.other_attrs .lists(sym::doc) .filter(|a| a.has_name(sym::alias)) .filter_map(|a| a.value_str().map(|s| s.to_string())) .filter(|v| !v.is_empty()) .collect::<FxHashSet<_>>() } } impl PartialEq for Attributes { fn eq(&self, rhs: &Self) -> bool { self.doc_strings == rhs.doc_strings && self.cfg == rhs.cfg && self.span == rhs.span && self.links == rhs.links && self .other_attrs .iter() .map(|attr| attr.id) .eq(rhs.other_attrs.iter().map(|attr| attr.id)) } } impl Eq for Attributes {} impl Hash for Attributes { fn hash<H: Hasher>(&self, hasher: &mut H) { self.doc_strings.hash(hasher); self.cfg.hash(hasher); self.span.hash(hasher); self.links.hash(hasher); for attr in &self.other_attrs { attr.id.hash(hasher); } } } impl AttributesExt for Attributes { fn lists(&self, name: Symbol) -> ListAttributesIter<'_> { self.other_attrs.lists(name) } } #[derive(Clone, PartialEq, Eq, Debug, Hash)] crate enum GenericBound { TraitBound(PolyTrait, hir::TraitBoundModifier), Outlives(Lifetime), } impl GenericBound { crate fn maybe_sized(cx: &DocContext<'_>) -> GenericBound { let did = cx.tcx.require_lang_item(LangItem::Sized, None); let empty = cx.tcx.intern_substs(&[]); let path = external_path(cx, cx.tcx.item_name(did), Some(did), false, vec![], empty); inline::record_extern_fqn(cx, did, TypeKind::Trait); GenericBound::TraitBound( PolyTrait { trait_: ResolvedPath { path, param_names: None, did, is_generic: false }, generic_params: Vec::new(), }, hir::TraitBoundModifier::Maybe, ) } crate fn is_sized_bound(&self, cx: &DocContext<'_>) -> bool { use rustc_hir::TraitBoundModifier as TBM; if let GenericBound::TraitBound(PolyTrait { ref trait_, .. }, TBM::None) = *self { if trait_.def_id() == cx.tcx.lang_items().sized_trait() { return true; } } false } crate fn get_poly_trait(&self) -> Option<PolyTrait> { if let GenericBound::TraitBound(ref p, _) = *self { return Some(p.clone()); } None } crate fn get_trait_type(&self) -> Option<Type> { if let GenericBound::TraitBound(PolyTrait { ref trait_, .. }, _) = *self { Some(trait_.clone()) } else { None } } } #[derive(Clone, PartialEq, Eq, Debug, Hash)] crate struct Lifetime(pub Symbol); impl Lifetime { crate fn get_ref(&self) -> SymbolStr { self.0.as_str() } crate fn statik() -> Lifetime { Lifetime(kw::StaticLifetime) } crate fn elided() -> Lifetime { Lifetime(kw::UnderscoreLifetime) } } #[derive(Clone, Debug)] crate enum WherePredicate { BoundPredicate { ty: Type, bounds: Vec<GenericBound> }, RegionPredicate { lifetime: Lifetime, bounds: Vec<GenericBound> }, EqPredicate { lhs: Type, rhs: Type }, } impl WherePredicate { crate fn get_bounds(&self) -> Option<&[GenericBound]> { match *self { WherePredicate::BoundPredicate { ref bounds, .. } => Some(bounds), WherePredicate::RegionPredicate { ref bounds, .. } => Some(bounds), _ => None, } } } #[derive(Clone, PartialEq, Eq, Debug, Hash)] crate enum GenericParamDefKind { Lifetime, Type { did: DefId, bounds: Vec<GenericBound>, default: Option<Type>, synthetic: Option<hir::SyntheticTyParamKind>, }, Const { did: DefId, ty: Type, }, } impl GenericParamDefKind { crate fn is_type(&self) -> bool { matches!(self, GenericParamDefKind::Type { .. }) } // FIXME(eddyb) this either returns the default of a type parameter, or the // type of a `const` parameter. It seems that the intention is to *visit* // any embedded types, but `get_type` seems to be the wrong name for that. crate fn get_type(&self) -> Option<Type> { match self { GenericParamDefKind::Type { default, .. } => default.clone(), GenericParamDefKind::Const { ty, .. } => Some(ty.clone()), GenericParamDefKind::Lifetime => None, } } } #[derive(Clone, PartialEq, Eq, Debug, Hash)] crate struct GenericParamDef { crate name: Symbol, crate kind: GenericParamDefKind, } impl GenericParamDef { crate fn is_synthetic_type_param(&self) -> bool { match self.kind { GenericParamDefKind::Lifetime | GenericParamDefKind::Const { .. } => false, GenericParamDefKind::Type { ref synthetic, .. } => synthetic.is_some(), } } crate fn is_type(&self) -> bool { self.kind.is_type() } crate fn get_type(&self) -> Option<Type> { self.kind.get_type() } crate fn get_bounds(&self) -> Option<&[GenericBound]> { match self.kind { GenericParamDefKind::Type { ref bounds, .. } => Some(bounds), _ => None, } } } // maybe use a Generic enum and use Vec<Generic>? #[derive(Clone, Debug, Default)] crate struct Generics { crate params: Vec<GenericParamDef>, crate where_predicates: Vec<WherePredicate>, } #[derive(Clone, Debug)] crate struct Function { crate decl: FnDecl, crate generics: Generics, crate header: hir::FnHeader, crate all_types: Vec<(Type, TypeKind)>, crate ret_types: Vec<(Type, TypeKind)>, } #[derive(Clone, PartialEq, Eq, Debug, Hash)] crate struct FnDecl { crate inputs: Arguments, crate output: FnRetTy, crate c_variadic: bool, crate attrs: Attributes, } impl FnDecl { crate fn self_type(&self) -> Option<SelfTy> { self.inputs.values.get(0).and_then(|v| v.to_self()) } /// Returns the sugared return type for an async function. /// /// For example, if the return type is `impl std::future::Future<Output = i32>`, this function /// will return `i32`. /// /// # Panics /// /// This function will panic if the return type does not match the expected sugaring for async /// functions. crate fn sugared_async_return_type(&self) -> FnRetTy { match &self.output { FnRetTy::Return(Type::ImplTrait(bounds)) => match &bounds[0] { GenericBound::TraitBound(PolyTrait { trait_, .. }, ..) => { let bindings = trait_.bindings().unwrap(); FnRetTy::Return(bindings[0].ty().clone()) } _ => panic!("unexpected desugaring of async function"), }, _ => panic!("unexpected desugaring of async function"), } } } #[derive(Clone, PartialEq, Eq, Debug, Hash)] crate struct Arguments { crate values: Vec<Argument>, } #[derive(Clone, PartialEq, Eq, Debug, Hash)] crate struct Argument { crate type_: Type, crate name: Symbol, } #[derive(Clone, PartialEq, Debug)] crate enum SelfTy { SelfValue, SelfBorrowed(Option<Lifetime>, Mutability), SelfExplicit(Type), } impl Argument { crate fn to_self(&self) -> Option<SelfTy> { if self.name != kw::SelfLower { return None; } if self.type_.is_self_type() { return Some(SelfValue); } match self.type_ { BorrowedRef { ref lifetime, mutability, ref type_ } if type_.is_self_type() => { Some(SelfBorrowed(lifetime.clone(), mutability)) } _ => Some(SelfExplicit(self.type_.clone())), } } } #[derive(Clone, PartialEq, Eq, Debug, Hash)] crate enum FnRetTy { Return(Type), DefaultReturn, } impl GetDefId for FnRetTy { fn def_id(&self) -> Option<DefId> { match *self { Return(ref ty) => ty.def_id(), DefaultReturn => None, } } fn def_id_full(&self, cache: &Cache) -> Option<DefId> { match *self { Return(ref ty) => ty.def_id_full(cache), DefaultReturn => None, } } } #[derive(Clone, Debug)] crate struct Trait { crate unsafety: hir::Unsafety, crate items: Vec<Item>, crate generics: Generics, crate bounds: Vec<GenericBound>, crate is_spotlight: bool, crate is_auto: bool, } #[derive(Clone, Debug)] crate struct TraitAlias { crate generics: Generics, crate bounds: Vec<GenericBound>, } /// A trait reference, which may have higher ranked lifetimes. #[derive(Clone, PartialEq, Eq, Debug, Hash)] crate struct PolyTrait { crate trait_: Type, crate generic_params: Vec<GenericParamDef>, } /// A representation of a type suitable for hyperlinking purposes. Ideally, one can get the original /// type out of the AST/`TyCtxt` given one of these, if more information is needed. Most /// importantly, it does not preserve mutability or boxes. #[derive(Clone, PartialEq, Eq, Debug, Hash)] crate enum Type { /// Structs/enums/traits (most that would be an `hir::TyKind::Path`). ResolvedPath { path: Path, param_names: Option<Vec<GenericBound>>, did: DefId, /// `true` if is a `T::Name` path for associated types. is_generic: bool, }, /// For parameterized types, so the consumer of the JSON don't go /// looking for types which don't exist anywhere. Generic(Symbol), /// Primitives are the fixed-size numeric types (plus int/usize/float), char, /// arrays, slices, and tuples. Primitive(PrimitiveType), /// `extern "ABI" fn` BareFunction(Box<BareFunctionDecl>), Tuple(Vec<Type>), Slice(Box<Type>), /// The `String` field is about the size or the constant representing the array's length. Array(Box<Type>, String), Never, RawPointer(Mutability, Box<Type>), BorrowedRef { lifetime: Option<Lifetime>, mutability: Mutability, type_: Box<Type>, }, // `<Type as Trait>::Name` QPath { name: Symbol, self_type: Box<Type>, trait_: Box<Type>, }, // `_` Infer, // `impl TraitA + TraitB + ...` ImplTrait(Vec<GenericBound>), } #[derive(Clone, PartialEq, Eq, Hash, Copy, Debug)] /// N.B. this has to be different from `hir::PrimTy` because it also includes types that aren't /// paths, like `Unit`. crate enum PrimitiveType { Isize, I8, I16, I32, I64, I128, Usize, U8, U16, U32, U64, U128, F32, F64, Char, Bool, Str, Slice, Array, Tuple, Unit, RawPointer, Reference, Fn, Never, } #[derive(Clone, PartialEq, Eq, Hash, Copy, Debug)] crate enum TypeKind { Enum, Function, Module, Const, Static, Struct, Union, Trait, Typedef, Foreign, Macro, Attr, Derive, TraitAlias, Primitive, } crate trait GetDefId { /// Use this method to get the [`DefId`] of a [`clean`] AST node. /// This will return [`None`] when called on a primitive [`clean::Type`]. /// Use [`Self::def_id_full`] if you want to include primitives. /// /// [`clean`]: crate::clean /// [`clean::Type`]: crate::clean::Type // FIXME: get rid of this function and always use `def_id_full` fn def_id(&self) -> Option<DefId>; /// Use this method to get the [DefId] of a [clean] AST node, including [PrimitiveType]s. /// /// See [`Self::def_id`] for more. /// /// [clean]: crate::clean fn def_id_full(&self, cache: &Cache) -> Option<DefId>; } impl<T: GetDefId> GetDefId for Option<T> { fn def_id(&self) -> Option<DefId> { self.as_ref().and_then(|d| d.def_id()) } fn def_id_full(&self, cache: &Cache) -> Option<DefId> { self.as_ref().and_then(|d| d.def_id_full(cache)) } } impl Type { crate fn primitive_type(&self) -> Option<PrimitiveType> { match *self { Primitive(p) | BorrowedRef { type_: box Primitive(p), .. } => Some(p), Slice(..) | BorrowedRef { type_: box Slice(..), .. } => Some(PrimitiveType::Slice), Array(..) | BorrowedRef { type_: box Array(..), .. } => Some(PrimitiveType::Array), Tuple(ref tys) => { if tys.is_empty() { Some(PrimitiveType::Unit) } else { Some(PrimitiveType::Tuple) } } RawPointer(..) => Some(PrimitiveType::RawPointer), BorrowedRef { type_: box Generic(..), .. } => Some(PrimitiveType::Reference), BareFunction(..) => Some(PrimitiveType::Fn), Never => Some(PrimitiveType::Never), _ => None, } } crate fn is_generic(&self) -> bool { match *self { ResolvedPath { is_generic, .. } => is_generic, _ => false, } } crate fn is_self_type(&self) -> bool { match *self { Generic(name) => name == kw::SelfUpper, _ => false, } } crate fn generics(&self) -> Option<Vec<Type>> { match *self { ResolvedPath { ref path, .. } => path.segments.last().and_then(|seg| { if let GenericArgs::AngleBracketed { ref args, .. } = seg.args { Some( args.iter() .filter_map(|arg| match arg { GenericArg::Type(ty) => Some(ty.clone()), _ => None, }) .collect(), ) } else { None } }), _ => None, } } crate fn bindings(&self) -> Option<&[TypeBinding]> { match *self { ResolvedPath { ref path, .. } => path.segments.last().and_then(|seg| { if let GenericArgs::AngleBracketed { ref bindings, .. } = seg.args { Some(&**bindings) } else { None } }), _ => None, } } crate fn is_full_generic(&self) -> bool { matches!(self, Type::Generic(_)) } crate fn is_primitive(&self) -> bool { match self { Self::Primitive(_) => true, Self::BorrowedRef { ref type_, .. } | Self::RawPointer(_, ref type_) => { type_.is_primitive() } _ => false, } } crate fn projection(&self) -> Option<(&Type, DefId, Symbol)> { let (self_, trait_, name) = match self { QPath { self_type, trait_, name } => (self_type, trait_, name), _ => return None, }; let trait_did = match **trait_ { ResolvedPath { did, .. } => did, _ => return None, }; Some((&self_, trait_did, *name)) } } impl Type { fn inner_def_id(&self, cache: Option<&Cache>) -> Option<DefId> { let t: PrimitiveType = match *self { ResolvedPath { did, .. } => return Some(did), Primitive(p) => return cache.and_then(|c| c.primitive_locations.get(&p).cloned()), BorrowedRef { type_: box Generic(..), .. } => PrimitiveType::Reference, BorrowedRef { ref type_, .. } => return type_.inner_def_id(cache), Tuple(ref tys) => { if tys.is_empty() { PrimitiveType::Unit } else { PrimitiveType::Tuple } } BareFunction(..) => PrimitiveType::Fn, Never => PrimitiveType::Never, Slice(..) => PrimitiveType::Slice, Array(..) => PrimitiveType::Array, RawPointer(..) => PrimitiveType::RawPointer, QPath { ref self_type, .. } => return self_type.inner_def_id(cache), Generic(_) | Infer | ImplTrait(_) => return None, }; cache.and_then(|c| Primitive(t).def_id_full(c)) } } impl GetDefId for Type { fn def_id(&self) -> Option<DefId> { self.inner_def_id(None) } fn def_id_full(&self, cache: &Cache) -> Option<DefId> { self.inner_def_id(Some(cache)) } } impl PrimitiveType { crate fn from_hir(prim: hir::PrimTy) -> PrimitiveType { use ast::{FloatTy, IntTy, UintTy}; match prim { hir::PrimTy::Int(IntTy::Isize) => PrimitiveType::Isize, hir::PrimTy::Int(IntTy::I8) => PrimitiveType::I8, hir::PrimTy::Int(IntTy::I16) => PrimitiveType::I16, hir::PrimTy::Int(IntTy::I32) => PrimitiveType::I32, hir::PrimTy::Int(IntTy::I64) => PrimitiveType::I64, hir::PrimTy::Int(IntTy::I128) => PrimitiveType::I128, hir::PrimTy::Uint(UintTy::Usize) => PrimitiveType::Usize, hir::PrimTy::Uint(UintTy::U8) => PrimitiveType::U8, hir::PrimTy::Uint(UintTy::U16) => PrimitiveType::U16, hir::PrimTy::Uint(UintTy::U32) => PrimitiveType::U32, hir::PrimTy::Uint(UintTy::U64) => PrimitiveType::U64, hir::PrimTy::Uint(UintTy::U128) => PrimitiveType::U128, hir::PrimTy::Float(FloatTy::F32) => PrimitiveType::F32, hir::PrimTy::Float(FloatTy::F64) => PrimitiveType::F64, hir::PrimTy::Str => PrimitiveType::Str, hir::PrimTy::Bool => PrimitiveType::Bool, hir::PrimTy::Char => PrimitiveType::Char, } } crate fn from_symbol(s: Symbol) -> Option<PrimitiveType> { match s { sym::isize => Some(PrimitiveType::Isize), sym::i8 => Some(PrimitiveType::I8), sym::i16 => Some(PrimitiveType::I16), sym::i32 => Some(PrimitiveType::I32), sym::i64 => Some(PrimitiveType::I64), sym::i128 => Some(PrimitiveType::I128), sym::usize => Some(PrimitiveType::Usize), sym::u8 => Some(PrimitiveType::U8), sym::u16 => Some(PrimitiveType::U16), sym::u32 => Some(PrimitiveType::U32), sym::u64 => Some(PrimitiveType::U64), sym::u128 => Some(PrimitiveType::U128), sym::bool => Some(PrimitiveType::Bool), sym::char => Some(PrimitiveType::Char), sym::str => Some(PrimitiveType::Str), sym::f32 => Some(PrimitiveType::F32), sym::f64 => Some(PrimitiveType::F64), sym::array => Some(PrimitiveType::Array), sym::slice => Some(PrimitiveType::Slice), sym::tuple => Some(PrimitiveType::Tuple), sym::unit => Some(PrimitiveType::Unit), sym::pointer => Some(PrimitiveType::RawPointer), sym::reference => Some(PrimitiveType::Reference), kw::Fn => Some(PrimitiveType::Fn), sym::never => Some(PrimitiveType::Never), _ => None, } } crate fn as_str(&self) -> &'static str { use self::PrimitiveType::*; match *self { Isize => "isize", I8 => "i8", I16 => "i16", I32 => "i32", I64 => "i64", I128 => "i128", Usize => "usize", U8 => "u8", U16 => "u16", U32 => "u32", U64 => "u64", U128 => "u128", F32 => "f32", F64 => "f64", Str => "str", Bool => "bool", Char => "char", Array => "array", Slice => "slice", Tuple => "tuple", Unit => "unit", RawPointer => "pointer", Reference => "reference", Fn => "fn", Never => "never", } } crate fn impls(&self, tcx: TyCtxt<'_>) -> &'static ArrayVec<[DefId; 4]> { Self::all_impls(tcx).get(self).expect("missing impl for primitive type") } crate fn all_impls(tcx: TyCtxt<'_>) -> &'static FxHashMap<PrimitiveType, ArrayVec<[DefId; 4]>> { static CELL: OnceCell<FxHashMap<PrimitiveType, ArrayVec<[DefId; 4]>>> = OnceCell::new(); CELL.get_or_init(move || { use self::PrimitiveType::*; /// A macro to create a FxHashMap. /// /// Example: /// /// ``` /// let letters = map!{"a" => "b", "c" => "d"}; /// ``` /// /// Trailing commas are allowed. /// Commas between elements are required (even if the expression is a block). macro_rules! map { ($( $key: expr => $val: expr ),* $(,)*) => {{ let mut map = ::rustc_data_structures::fx::FxHashMap::default(); $( map.insert($key, $val); )* map }} } let single = |a: Option<DefId>| a.into_iter().collect(); let both = |a: Option<DefId>, b: Option<DefId>| -> ArrayVec<_> { a.into_iter().chain(b).collect() }; let lang_items = tcx.lang_items(); map! { Isize => single(lang_items.isize_impl()), I8 => single(lang_items.i8_impl()), I16 => single(lang_items.i16_impl()), I32 => single(lang_items.i32_impl()), I64 => single(lang_items.i64_impl()), I128 => single(lang_items.i128_impl()), Usize => single(lang_items.usize_impl()), U8 => single(lang_items.u8_impl()), U16 => single(lang_items.u16_impl()), U32 => single(lang_items.u32_impl()), U64 => single(lang_items.u64_impl()), U128 => single(lang_items.u128_impl()), F32 => both(lang_items.f32_impl(), lang_items.f32_runtime_impl()), F64 => both(lang_items.f64_impl(), lang_items.f64_runtime_impl()), Char => single(lang_items.char_impl()), Bool => single(lang_items.bool_impl()), Str => both(lang_items.str_impl(), lang_items.str_alloc_impl()), Slice => { lang_items .slice_impl() .into_iter() .chain(lang_items.slice_u8_impl()) .chain(lang_items.slice_alloc_impl()) .chain(lang_items.slice_u8_alloc_impl()) .collect() }, Array => single(lang_items.array_impl()), Tuple => ArrayVec::new(), Unit => ArrayVec::new(), RawPointer => { lang_items .const_ptr_impl() .into_iter() .chain(lang_items.mut_ptr_impl()) .chain(lang_items.const_slice_ptr_impl()) .chain(lang_items.mut_slice_ptr_impl()) .collect() }, Reference => ArrayVec::new(), Fn => ArrayVec::new(), Never => ArrayVec::new(), } }) } crate fn to_url_str(&self) -> &'static str { self.as_str() } crate fn as_sym(&self) -> Symbol { use PrimitiveType::*; match self { Isize => sym::isize, I8 => sym::i8, I16 => sym::i16, I32 => sym::i32, I64 => sym::i64, I128 => sym::i128, Usize => sym::usize, U8 => sym::u8, U16 => sym::u16, U32 => sym::u32, U64 => sym::u64, U128 => sym::u128, F32 => sym::f32, F64 => sym::f64, Str => sym::str, Bool => sym::bool, Char => sym::char, Array => sym::array, Slice => sym::slice, Tuple => sym::tuple, Unit => sym::unit, RawPointer => sym::pointer, Reference => sym::reference, Fn => kw::Fn, Never => sym::never, } } } impl From<ast::IntTy> for PrimitiveType { fn from(int_ty: ast::IntTy) -> PrimitiveType { match int_ty { ast::IntTy::Isize => PrimitiveType::Isize, ast::IntTy::I8 => PrimitiveType::I8, ast::IntTy::I16 => PrimitiveType::I16, ast::IntTy::I32 => PrimitiveType::I32, ast::IntTy::I64 => PrimitiveType::I64, ast::IntTy::I128 => PrimitiveType::I128, } } } impl From<ast::UintTy> for PrimitiveType { fn from(uint_ty: ast::UintTy) -> PrimitiveType { match uint_ty { ast::UintTy::Usize => PrimitiveType::Usize, ast::UintTy::U8 => PrimitiveType::U8, ast::UintTy::U16 => PrimitiveType::U16, ast::UintTy::U32 => PrimitiveType::U32, ast::UintTy::U64 => PrimitiveType::U64, ast::UintTy::U128 => PrimitiveType::U128, } } } impl From<ast::FloatTy> for PrimitiveType { fn from(float_ty: ast::FloatTy) -> PrimitiveType { match float_ty { ast::FloatTy::F32 => PrimitiveType::F32, ast::FloatTy::F64 => PrimitiveType::F64, } } } impl From<ty::IntTy> for PrimitiveType { fn from(int_ty: ty::IntTy) -> PrimitiveType { match int_ty { ty::IntTy::Isize => PrimitiveType::Isize, ty::IntTy::I8 => PrimitiveType::I8, ty::IntTy::I16 => PrimitiveType::I16, ty::IntTy::I32 => PrimitiveType::I32, ty::IntTy::I64 => PrimitiveType::I64, ty::IntTy::I128 => PrimitiveType::I128, } } } impl From<ty::UintTy> for PrimitiveType { fn from(uint_ty: ty::UintTy) -> PrimitiveType { match uint_ty { ty::UintTy::Usize => PrimitiveType::Usize, ty::UintTy::U8 => PrimitiveType::U8, ty::UintTy::U16 => PrimitiveType::U16, ty::UintTy::U32 => PrimitiveType::U32, ty::UintTy::U64 => PrimitiveType::U64, ty::UintTy::U128 => PrimitiveType::U128, } } } impl From<ty::FloatTy> for PrimitiveType { fn from(float_ty: ty::FloatTy) -> PrimitiveType { match float_ty { ty::FloatTy::F32 => PrimitiveType::F32, ty::FloatTy::F64 => PrimitiveType::F64, } } } impl From<hir::PrimTy> for PrimitiveType { fn from(prim_ty: hir::PrimTy) -> PrimitiveType { match prim_ty { hir::PrimTy::Int(int_ty) => int_ty.into(), hir::PrimTy::Uint(uint_ty) => uint_ty.into(), hir::PrimTy::Float(float_ty) => float_ty.into(), hir::PrimTy::Str => PrimitiveType::Str, hir::PrimTy::Bool => PrimitiveType::Bool, hir::PrimTy::Char => PrimitiveType::Char, } } } #[derive(Copy, Clone, Debug)] crate enum Visibility { Public, Inherited, Restricted(DefId), } impl Visibility { crate fn is_public(&self) -> bool { matches!(self, Visibility::Public) } } #[derive(Clone, Debug)] crate struct Struct { crate struct_type: CtorKind, crate generics: Generics, crate fields: Vec<Item>, crate fields_stripped: bool, } #[derive(Clone, Debug)] crate struct Union { crate generics: Generics, crate fields: Vec<Item>, crate fields_stripped: bool, } /// This is a more limited form of the standard Struct, different in that /// it lacks the things most items have (name, id, parameterization). Found /// only as a variant in an enum. #[derive(Clone, Debug)] crate struct VariantStruct { crate struct_type: CtorKind, crate fields: Vec<Item>, crate fields_stripped: bool, } #[derive(Clone, Debug)] crate struct Enum { crate variants: IndexVec<VariantIdx, Item>, crate generics: Generics, crate variants_stripped: bool, } #[derive(Clone, Debug)] crate enum Variant { CLike, Tuple(Vec<Type>), Struct(VariantStruct), } /// Small wrapper around `rustc_span::Span` that adds helper methods and enforces calling `source_callsite`. #[derive(Clone, Debug)] crate struct Span(rustc_span::Span); impl Span { crate fn from_rustc_span(sp: rustc_span::Span) -> Self { // Get the macro invocation instead of the definition, // in case the span is result of a macro expansion. // (See rust-lang/rust#39726) Self(sp.source_callsite()) } crate fn dummy() -> Self { Self(rustc_span::DUMMY_SP) } crate fn span(&self) -> rustc_span::Span { self.0 } crate fn filename(&self, sess: &Session) -> FileName { sess.source_map().span_to_filename(self.0) } crate fn lo(&self, sess: &Session) -> Loc { sess.source_map().lookup_char_pos(self.0.lo()) } crate fn hi(&self, sess: &Session) -> Loc { sess.source_map().lookup_char_pos(self.0.hi()) } crate fn cnum(&self, sess: &Session) -> CrateNum { // FIXME: is there a time when the lo and hi crate would be different? self.lo(sess).file.cnum } } #[derive(Clone, PartialEq, Eq, Debug, Hash)] crate struct Path { crate global: bool, crate res: Res, crate segments: Vec<PathSegment>, } impl Path { crate fn last(&self) -> Symbol { self.segments.last().expect("segments were empty").name } crate fn last_name(&self) -> SymbolStr { self.segments.last().expect("segments were empty").name.as_str() } crate fn whole_name(&self) -> String { String::from(if self.global { "::" } else { "" }) + &self.segments.iter().map(|s| s.name.to_string()).collect::<Vec<_>>().join("::") } } #[derive(Clone, PartialEq, Eq, Debug, Hash)] crate enum GenericArg { Lifetime(Lifetime), Type(Type), Const(Constant), } #[derive(Clone, PartialEq, Eq, Debug, Hash)] crate enum GenericArgs { AngleBracketed { args: Vec<GenericArg>, bindings: Vec<TypeBinding> }, Parenthesized { inputs: Vec<Type>, output: Option<Type> }, } #[derive(Clone, PartialEq, Eq, Debug, Hash)] crate struct PathSegment { crate name: Symbol, crate args: GenericArgs, } #[derive(Clone, Debug)] crate struct Typedef { crate type_: Type, crate generics: Generics, /// `type_` can come from either the HIR or from metadata. If it comes from HIR, it may be a type /// alias instead of the final type. This will always have the final type, regardless of whether /// `type_` came from HIR or from metadata. /// /// If `item_type.is_none()`, `type_` is guarenteed to come from metadata (and therefore hold the /// final type). crate item_type: Option<Type>, } impl GetDefId for Typedef { fn def_id(&self) -> Option<DefId> { self.type_.def_id() } fn def_id_full(&self, cache: &Cache) -> Option<DefId> { self.type_.def_id_full(cache) } } #[derive(Clone, Debug)] crate struct OpaqueTy { crate bounds: Vec<GenericBound>, crate generics: Generics, } #[derive(Clone, PartialEq, Eq, Debug, Hash)] crate struct BareFunctionDecl { crate unsafety: hir::Unsafety, crate generic_params: Vec<GenericParamDef>, crate decl: FnDecl, crate abi: Abi, } #[derive(Clone, Debug)] crate struct Static { crate type_: Type, crate mutability: Mutability, /// It's useful to have the value of a static documented, but I have no /// desire to represent expressions (that'd basically be all of the AST, /// which is huge!). So, have a string. crate expr: String, } #[derive(Clone, PartialEq, Eq, Hash, Debug)] crate struct Constant { crate type_: Type, crate expr: String, crate value: Option<String>, crate is_literal: bool, } #[derive(Clone, Debug)] crate struct Impl { crate unsafety: hir::Unsafety, crate generics: Generics, crate provided_trait_methods: FxHashSet<Symbol>, crate trait_: Option<Type>, crate for_: Type, crate items: Vec<Item>, crate negative_polarity: bool, crate synthetic: bool, crate blanket_impl: Option<Type>, } #[derive(Clone, Debug)] crate struct Import { crate kind: ImportKind, crate source: ImportSource, crate should_be_displayed: bool, } impl Import { crate fn new_simple(name: Symbol, source: ImportSource, should_be_displayed: bool) -> Self { Self { kind: ImportKind::Simple(name), source, should_be_displayed } } crate fn new_glob(source: ImportSource, should_be_displayed: bool) -> Self { Self { kind: ImportKind::Glob, source, should_be_displayed } } } #[derive(Clone, Debug)] crate enum ImportKind { // use source as str; Simple(Symbol), // use source::*; Glob, } #[derive(Clone, Debug)] crate struct ImportSource { crate path: Path, crate did: Option<DefId>, } #[derive(Clone, Debug)] crate struct Macro { crate source: String, crate imported_from: Option<Symbol>, } #[derive(Clone, Debug)] crate struct ProcMacro { crate kind: MacroKind, crate helpers: Vec<Symbol>, } /// An type binding on an associated type (e.g., `A = Bar` in `Foo<A = Bar>` or /// `A: Send + Sync` in `Foo<A: Send + Sync>`). #[derive(Clone, PartialEq, Eq, Debug, Hash)] crate struct TypeBinding { crate name: Symbol, crate kind: TypeBindingKind, } #[derive(Clone, PartialEq, Eq, Debug, Hash)] crate enum TypeBindingKind { Equality { ty: Type }, Constraint { bounds: Vec<GenericBound> }, } impl TypeBinding { crate fn ty(&self) -> &Type { match self.kind { TypeBindingKind::Equality { ref ty } => ty, _ => panic!("expected equality type binding for parenthesized generic args"), } } } Explicitly match all DefKind item in conversion to TypeKind use std::cell::RefCell; use std::default::Default; use std::fmt; use std::hash::{Hash, Hasher}; use std::iter::FromIterator; use std::lazy::SyncOnceCell as OnceCell; use std::rc::Rc; use std::sync::Arc; use std::{slice, vec}; use arrayvec::ArrayVec; use rustc_ast::attr; use rustc_ast::util::comments::beautify_doc_string; use rustc_ast::{self as ast, AttrStyle}; use rustc_attr::{ConstStability, Deprecation, Stability, StabilityLevel}; use rustc_data_structures::fx::{FxHashMap, FxHashSet}; use rustc_feature::UnstableFeatures; use rustc_hir as hir; use rustc_hir::def::{CtorKind, Res}; use rustc_hir::def_id::{CrateNum, DefId, DefIndex}; use rustc_hir::lang_items::LangItem; use rustc_hir::Mutability; use rustc_index::vec::IndexVec; use rustc_middle::ty::{self, TyCtxt}; use rustc_session::Session; use rustc_span::hygiene::MacroKind; use rustc_span::source_map::DUMMY_SP; use rustc_span::symbol::{kw, sym, Ident, Symbol, SymbolStr}; use rustc_span::{self, FileName, Loc}; use rustc_target::abi::VariantIdx; use rustc_target::spec::abi::Abi; use crate::clean::cfg::Cfg; use crate::clean::external_path; use crate::clean::inline; use crate::clean::types::Type::{QPath, ResolvedPath}; use crate::clean::Clean; use crate::core::DocContext; use crate::formats::cache::Cache; use crate::formats::item_type::ItemType; use crate::html::render::cache::ExternalLocation; use self::FnRetTy::*; use self::ItemKind::*; use self::SelfTy::*; use self::Type::*; thread_local!(crate static MAX_DEF_IDX: RefCell<FxHashMap<CrateNum, DefIndex>> = Default::default()); #[derive(Clone, Debug)] crate struct Crate { crate name: Symbol, crate version: Option<String>, crate src: FileName, crate module: Option<Item>, crate externs: Vec<(CrateNum, ExternalCrate)>, crate primitives: Vec<(DefId, PrimitiveType)>, // These are later on moved into `CACHEKEY`, leaving the map empty. // Only here so that they can be filtered through the rustdoc passes. crate external_traits: Rc<RefCell<FxHashMap<DefId, Trait>>>, crate masked_crates: FxHashSet<CrateNum>, crate collapsed: bool, } #[derive(Clone, Debug)] crate struct ExternalCrate { crate name: Symbol, crate src: FileName, crate attrs: Attributes, crate primitives: Vec<(DefId, PrimitiveType)>, crate keywords: Vec<(DefId, Symbol)>, } /// Anything with a source location and set of attributes and, optionally, a /// name. That is, anything that can be documented. This doesn't correspond /// directly to the AST's concept of an item; it's a strict superset. #[derive(Clone)] crate struct Item { /// Stringified span crate source: Span, /// Not everything has a name. E.g., impls crate name: Option<Symbol>, crate attrs: Box<Attributes>, crate visibility: Visibility, crate kind: Box<ItemKind>, crate def_id: DefId, } // `Item` is used a lot. Make sure it doesn't unintentionally get bigger. #[cfg(target_arch = "x86_64")] rustc_data_structures::static_assert_size!(Item, 48); impl fmt::Debug for Item { fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { let def_id: &dyn fmt::Debug = if self.is_fake() { &"**FAKE**" } else { &self.def_id }; fmt.debug_struct("Item") .field("source", &self.source) .field("name", &self.name) .field("attrs", &self.attrs) .field("kind", &self.kind) .field("visibility", &self.visibility) .field("def_id", def_id) .finish() } } impl Item { crate fn stability<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Option<&'tcx Stability> { if self.is_fake() { None } else { tcx.lookup_stability(self.def_id) } } crate fn const_stability<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Option<&'tcx ConstStability> { if self.is_fake() { None } else { tcx.lookup_const_stability(self.def_id) } } crate fn deprecation(&self, tcx: TyCtxt<'_>) -> Option<Deprecation> { if self.is_fake() { None } else { tcx.lookup_deprecation(self.def_id) } } /// Finds the `doc` attribute as a NameValue and returns the corresponding /// value found. crate fn doc_value(&self) -> Option<String> { self.attrs.doc_value() } /// Convenience wrapper around [`Self::from_def_id_and_parts`] which converts /// `hir_id` to a [`DefId`] pub fn from_hir_id_and_parts( hir_id: hir::HirId, name: Option<Symbol>, kind: ItemKind, cx: &DocContext<'_>, ) -> Item { Item::from_def_id_and_parts(cx.tcx.hir().local_def_id(hir_id).to_def_id(), name, kind, cx) } pub fn from_def_id_and_parts( def_id: DefId, name: Option<Symbol>, kind: ItemKind, cx: &DocContext<'_>, ) -> Item { debug!("name={:?}, def_id={:?}", name, def_id); // `span_if_local()` lies about functions and only gives the span of the function signature let source = def_id.as_local().map_or_else( || cx.tcx.def_span(def_id), |local| { let hir = cx.tcx.hir(); hir.span_with_body(hir.local_def_id_to_hir_id(local)) }, ); Item { def_id, kind: box kind, name, source: source.clean(cx), attrs: box cx.tcx.get_attrs(def_id).clean(cx), visibility: cx.tcx.visibility(def_id).clean(cx), } } /// Finds all `doc` attributes as NameValues and returns their corresponding values, joined /// with newlines. crate fn collapsed_doc_value(&self) -> Option<String> { self.attrs.collapsed_doc_value() } crate fn links(&self, cache: &Cache) -> Vec<RenderedLink> { self.attrs.links(&self.def_id.krate, cache) } crate fn is_crate(&self) -> bool { matches!( *self.kind, StrippedItem(box ModuleItem(Module { is_crate: true, .. })) | ModuleItem(Module { is_crate: true, .. }) ) } crate fn is_mod(&self) -> bool { self.type_() == ItemType::Module } crate fn is_trait(&self) -> bool { self.type_() == ItemType::Trait } crate fn is_struct(&self) -> bool { self.type_() == ItemType::Struct } crate fn is_enum(&self) -> bool { self.type_() == ItemType::Enum } crate fn is_variant(&self) -> bool { self.type_() == ItemType::Variant } crate fn is_associated_type(&self) -> bool { self.type_() == ItemType::AssocType } crate fn is_associated_const(&self) -> bool { self.type_() == ItemType::AssocConst } crate fn is_method(&self) -> bool { self.type_() == ItemType::Method } crate fn is_ty_method(&self) -> bool { self.type_() == ItemType::TyMethod } crate fn is_typedef(&self) -> bool { self.type_() == ItemType::Typedef } crate fn is_primitive(&self) -> bool { self.type_() == ItemType::Primitive } crate fn is_union(&self) -> bool { self.type_() == ItemType::Union } crate fn is_import(&self) -> bool { self.type_() == ItemType::Import } crate fn is_extern_crate(&self) -> bool { self.type_() == ItemType::ExternCrate } crate fn is_keyword(&self) -> bool { self.type_() == ItemType::Keyword } crate fn is_stripped(&self) -> bool { match *self.kind { StrippedItem(..) => true, ImportItem(ref i) => !i.should_be_displayed, _ => false, } } crate fn has_stripped_fields(&self) -> Option<bool> { match *self.kind { StructItem(ref _struct) => Some(_struct.fields_stripped), UnionItem(ref union) => Some(union.fields_stripped), VariantItem(Variant::Struct(ref vstruct)) => Some(vstruct.fields_stripped), _ => None, } } crate fn stability_class(&self, tcx: TyCtxt<'_>) -> Option<String> { self.stability(tcx).as_ref().and_then(|ref s| { let mut classes = Vec::with_capacity(2); if s.level.is_unstable() { classes.push("unstable"); } // FIXME: what about non-staged API items that are deprecated? if self.deprecation(tcx).is_some() { classes.push("deprecated"); } if !classes.is_empty() { Some(classes.join(" ")) } else { None } }) } crate fn stable_since(&self, tcx: TyCtxt<'_>) -> Option<SymbolStr> { match self.stability(tcx)?.level { StabilityLevel::Stable { since, .. } => Some(since.as_str()), StabilityLevel::Unstable { .. } => None, } } crate fn const_stable_since(&self, tcx: TyCtxt<'_>) -> Option<SymbolStr> { match self.const_stability(tcx)?.level { StabilityLevel::Stable { since, .. } => Some(since.as_str()), StabilityLevel::Unstable { .. } => None, } } crate fn is_non_exhaustive(&self) -> bool { self.attrs.other_attrs.iter().any(|a| a.has_name(sym::non_exhaustive)) } /// Returns a documentation-level item type from the item. crate fn type_(&self) -> ItemType { ItemType::from(self) } crate fn is_default(&self) -> bool { match *self.kind { ItemKind::MethodItem(_, Some(defaultness)) => { defaultness.has_value() && !defaultness.is_final() } _ => false, } } /// See the documentation for [`next_def_id()`]. /// /// [`next_def_id()`]: DocContext::next_def_id() crate fn is_fake(&self) -> bool { MAX_DEF_IDX.with(|m| { m.borrow().get(&self.def_id.krate).map(|&idx| idx <= self.def_id.index).unwrap_or(false) }) } } #[derive(Clone, Debug)] crate enum ItemKind { ExternCrateItem(Symbol, Option<Symbol>), ImportItem(Import), StructItem(Struct), UnionItem(Union), EnumItem(Enum), FunctionItem(Function), ModuleItem(Module), TypedefItem(Typedef, bool /* is associated type */), OpaqueTyItem(OpaqueTy), StaticItem(Static), ConstantItem(Constant), TraitItem(Trait), TraitAliasItem(TraitAlias), ImplItem(Impl), /// A method signature only. Used for required methods in traits (ie, /// non-default-methods). TyMethodItem(Function), /// A method with a body. MethodItem(Function, Option<hir::Defaultness>), StructFieldItem(Type), VariantItem(Variant), /// `fn`s from an extern block ForeignFunctionItem(Function), /// `static`s from an extern block ForeignStaticItem(Static), /// `type`s from an extern block ForeignTypeItem, MacroItem(Macro), ProcMacroItem(ProcMacro), PrimitiveItem(PrimitiveType), AssocConstItem(Type, Option<String>), /// An associated item in a trait or trait impl. /// /// The bounds may be non-empty if there is a `where` clause. /// The `Option<Type>` is the default concrete type (e.g. `trait Trait { type Target = usize; }`) AssocTypeItem(Vec<GenericBound>, Option<Type>), /// An item that has been stripped by a rustdoc pass StrippedItem(Box<ItemKind>), KeywordItem(Symbol), } impl ItemKind { /// Some items contain others such as structs (for their fields) and Enums /// (for their variants). This method returns those contained items. crate fn inner_items(&self) -> impl Iterator<Item = &Item> { match self { StructItem(s) => s.fields.iter(), UnionItem(u) => u.fields.iter(), VariantItem(Variant::Struct(v)) => v.fields.iter(), EnumItem(e) => e.variants.iter(), TraitItem(t) => t.items.iter(), ImplItem(i) => i.items.iter(), ModuleItem(m) => m.items.iter(), ExternCrateItem(_, _) | ImportItem(_) | FunctionItem(_) | TypedefItem(_, _) | OpaqueTyItem(_) | StaticItem(_) | ConstantItem(_) | TraitAliasItem(_) | TyMethodItem(_) | MethodItem(_, _) | StructFieldItem(_) | VariantItem(_) | ForeignFunctionItem(_) | ForeignStaticItem(_) | ForeignTypeItem | MacroItem(_) | ProcMacroItem(_) | PrimitiveItem(_) | AssocConstItem(_, _) | AssocTypeItem(_, _) | StrippedItem(_) | KeywordItem(_) => [].iter(), } } crate fn is_type_alias(&self) -> bool { matches!(self, ItemKind::TypedefItem(..) | ItemKind::AssocTypeItem(..)) } } #[derive(Clone, Debug)] crate struct Module { crate items: Vec<Item>, crate is_crate: bool, } crate struct ListAttributesIter<'a> { attrs: slice::Iter<'a, ast::Attribute>, current_list: vec::IntoIter<ast::NestedMetaItem>, name: Symbol, } impl<'a> Iterator for ListAttributesIter<'a> { type Item = ast::NestedMetaItem; fn next(&mut self) -> Option<Self::Item> { if let Some(nested) = self.current_list.next() { return Some(nested); } for attr in &mut self.attrs { if let Some(list) = attr.meta_item_list() { if attr.has_name(self.name) { self.current_list = list.into_iter(); if let Some(nested) = self.current_list.next() { return Some(nested); } } } } None } fn size_hint(&self) -> (usize, Option<usize>) { let lower = self.current_list.len(); (lower, None) } } crate trait AttributesExt { /// Finds an attribute as List and returns the list of attributes nested inside. fn lists(&self, name: Symbol) -> ListAttributesIter<'_>; } impl AttributesExt for [ast::Attribute] { fn lists(&self, name: Symbol) -> ListAttributesIter<'_> { ListAttributesIter { attrs: self.iter(), current_list: Vec::new().into_iter(), name } } } crate trait NestedAttributesExt { /// Returns `true` if the attribute list contains a specific `Word` fn has_word(self, word: Symbol) -> bool; fn get_word_attr(self, word: Symbol) -> (Option<ast::NestedMetaItem>, bool); } impl<I: Iterator<Item = ast::NestedMetaItem> + IntoIterator<Item = ast::NestedMetaItem>> NestedAttributesExt for I { fn has_word(self, word: Symbol) -> bool { self.into_iter().any(|attr| attr.is_word() && attr.has_name(word)) } fn get_word_attr(mut self, word: Symbol) -> (Option<ast::NestedMetaItem>, bool) { match self.find(|attr| attr.is_word() && attr.has_name(word)) { Some(a) => (Some(a), true), None => (None, false), } } } /// A portion of documentation, extracted from a `#[doc]` attribute. /// /// Each variant contains the line number within the complete doc-comment where the fragment /// starts, as well as the Span where the corresponding doc comment or attribute is located. /// /// Included files are kept separate from inline doc comments so that proper line-number /// information can be given when a doctest fails. Sugared doc comments and "raw" doc comments are /// kept separate because of issue #42760. #[derive(Clone, PartialEq, Eq, Debug, Hash)] crate struct DocFragment { crate line: usize, crate span: rustc_span::Span, /// The module this doc-comment came from. /// /// This allows distinguishing between the original documentation and a pub re-export. /// If it is `None`, the item was not re-exported. crate parent_module: Option<DefId>, crate doc: Symbol, crate kind: DocFragmentKind, crate need_backline: bool, crate indent: usize, } #[derive(Clone, Copy, PartialEq, Eq, Debug, Hash)] crate enum DocFragmentKind { /// A doc fragment created from a `///` or `//!` doc comment. SugaredDoc, /// A doc fragment created from a "raw" `#[doc=""]` attribute. RawDoc, /// A doc fragment created from a `#[doc(include="filename")]` attribute. Contains both the /// given filename and the file contents. Include { filename: Symbol }, } // The goal of this function is to apply the `DocFragment` transformations that are required when // transforming into the final markdown. So the transformations in here are: // // * Applying the computed indent to each lines in each doc fragment (a `DocFragment` can contain // multiple lines in case of `#[doc = ""]`). // * Adding backlines between `DocFragment`s and adding an extra one if required (stored in the // `need_backline` field). fn add_doc_fragment(out: &mut String, frag: &DocFragment) { let s = frag.doc.as_str(); let mut iter = s.lines().peekable(); while let Some(line) = iter.next() { if line.chars().any(|c| !c.is_whitespace()) { assert!(line.len() >= frag.indent); out.push_str(&line[frag.indent..]); } else { out.push_str(line); } if iter.peek().is_some() { out.push('\n'); } } if frag.need_backline { out.push('\n'); } } impl<'a> FromIterator<&'a DocFragment> for String { fn from_iter<T>(iter: T) -> Self where T: IntoIterator<Item = &'a DocFragment>, { let mut prev_kind: Option<DocFragmentKind> = None; iter.into_iter().fold(String::new(), |mut acc, frag| { if !acc.is_empty() && prev_kind .take() .map(|p| matches!(p, DocFragmentKind::Include { .. }) && p != frag.kind) .unwrap_or(false) { acc.push('\n'); } add_doc_fragment(&mut acc, &frag); prev_kind = Some(frag.kind); acc }) } } #[derive(Clone, Debug, Default)] crate struct Attributes { crate doc_strings: Vec<DocFragment>, crate other_attrs: Vec<ast::Attribute>, crate cfg: Option<Arc<Cfg>>, crate span: Option<rustc_span::Span>, /// map from Rust paths to resolved defs and potential URL fragments crate links: Vec<ItemLink>, crate inner_docs: bool, } #[derive(Clone, Debug, Default, PartialEq, Eq, Hash)] /// A link that has not yet been rendered. /// /// This link will be turned into a rendered link by [`Attributes::links`] crate struct ItemLink { /// The original link written in the markdown pub(crate) link: String, /// The link text displayed in the HTML. /// /// This may not be the same as `link` if there was a disambiguator /// in an intra-doc link (e.g. \[`fn@f`\]) pub(crate) link_text: String, pub(crate) did: Option<DefId>, /// The url fragment to append to the link pub(crate) fragment: Option<String>, } pub struct RenderedLink { /// The text the link was original written as. /// /// This could potentially include disambiguators and backticks. pub(crate) original_text: String, /// The text to display in the HTML pub(crate) new_text: String, /// The URL to put in the `href` pub(crate) href: String, } impl Attributes { /// Extracts the content from an attribute `#[doc(cfg(content))]`. crate fn extract_cfg(mi: &ast::MetaItem) -> Option<&ast::MetaItem> { use rustc_ast::NestedMetaItem::MetaItem; if let ast::MetaItemKind::List(ref nmis) = mi.kind { if nmis.len() == 1 { if let MetaItem(ref cfg_mi) = nmis[0] { if cfg_mi.has_name(sym::cfg) { if let ast::MetaItemKind::List(ref cfg_nmis) = cfg_mi.kind { if cfg_nmis.len() == 1 { if let MetaItem(ref content_mi) = cfg_nmis[0] { return Some(content_mi); } } } } } } } None } /// Reads a `MetaItem` from within an attribute, looks for whether it is a /// `#[doc(include="file")]`, and returns the filename and contents of the file as loaded from /// its expansion. crate fn extract_include(mi: &ast::MetaItem) -> Option<(Symbol, Symbol)> { mi.meta_item_list().and_then(|list| { for meta in list { if meta.has_name(sym::include) { // the actual compiled `#[doc(include="filename")]` gets expanded to // `#[doc(include(file="filename", contents="file contents")]` so we need to // look for that instead return meta.meta_item_list().and_then(|list| { let mut filename: Option<Symbol> = None; let mut contents: Option<Symbol> = None; for it in list { if it.has_name(sym::file) { if let Some(name) = it.value_str() { filename = Some(name); } } else if it.has_name(sym::contents) { if let Some(docs) = it.value_str() { contents = Some(docs); } } } if let (Some(filename), Some(contents)) = (filename, contents) { Some((filename, contents)) } else { None } }); } } None }) } crate fn has_doc_flag(&self, flag: Symbol) -> bool { for attr in &self.other_attrs { if !attr.has_name(sym::doc) { continue; } if let Some(items) = attr.meta_item_list() { if items.iter().filter_map(|i| i.meta_item()).any(|it| it.has_name(flag)) { return true; } } } false } crate fn from_ast( diagnostic: &::rustc_errors::Handler, attrs: &[ast::Attribute], additional_attrs: Option<(&[ast::Attribute], DefId)>, ) -> Attributes { let mut doc_strings: Vec<DocFragment> = vec![]; let mut sp = None; let mut cfg = Cfg::True; let mut doc_line = 0; fn update_need_backline(doc_strings: &mut Vec<DocFragment>, frag: &DocFragment) { if let Some(prev) = doc_strings.last_mut() { if matches!(prev.kind, DocFragmentKind::Include { .. }) || prev.kind != frag.kind || prev.parent_module != frag.parent_module { // add a newline for extra padding between segments prev.need_backline = prev.kind == DocFragmentKind::SugaredDoc || prev.kind == DocFragmentKind::RawDoc } else { prev.need_backline = true; } } } let clean_attr = |(attr, parent_module): (&ast::Attribute, _)| { if let Some(value) = attr.doc_str() { trace!("got doc_str={:?}", value); let value = beautify_doc_string(value); let kind = if attr.is_doc_comment() { DocFragmentKind::SugaredDoc } else { DocFragmentKind::RawDoc }; let line = doc_line; doc_line += value.as_str().lines().count(); let frag = DocFragment { line, span: attr.span, doc: value, kind, parent_module, need_backline: false, indent: 0, }; update_need_backline(&mut doc_strings, &frag); doc_strings.push(frag); if sp.is_none() { sp = Some(attr.span); } None } else { if attr.has_name(sym::doc) { if let Some(mi) = attr.meta() { if let Some(cfg_mi) = Attributes::extract_cfg(&mi) { // Extracted #[doc(cfg(...))] match Cfg::parse(cfg_mi) { Ok(new_cfg) => cfg &= new_cfg, Err(e) => diagnostic.span_err(e.span, e.msg), } } else if let Some((filename, contents)) = Attributes::extract_include(&mi) { let line = doc_line; doc_line += contents.as_str().lines().count(); let frag = DocFragment { line, span: attr.span, doc: contents, kind: DocFragmentKind::Include { filename }, parent_module, need_backline: false, indent: 0, }; update_need_backline(&mut doc_strings, &frag); doc_strings.push(frag); } } } Some(attr.clone()) } }; // Additional documentation should be shown before the original documentation let other_attrs = additional_attrs .into_iter() .map(|(attrs, id)| attrs.iter().map(move |attr| (attr, Some(id)))) .flatten() .chain(attrs.iter().map(|attr| (attr, None))) .filter_map(clean_attr) .collect(); // treat #[target_feature(enable = "feat")] attributes as if they were // #[doc(cfg(target_feature = "feat"))] attributes as well for attr in attrs.lists(sym::target_feature) { if attr.has_name(sym::enable) { if let Some(feat) = attr.value_str() { let meta = attr::mk_name_value_item_str( Ident::with_dummy_span(sym::target_feature), feat, DUMMY_SP, ); if let Ok(feat_cfg) = Cfg::parse(&meta) { cfg &= feat_cfg; } } } } let inner_docs = attrs .iter() .find(|a| a.doc_str().is_some()) .map_or(true, |a| a.style == AttrStyle::Inner); Attributes { doc_strings, other_attrs, cfg: if cfg == Cfg::True { None } else { Some(Arc::new(cfg)) }, span: sp, links: vec![], inner_docs, } } /// Finds the `doc` attribute as a NameValue and returns the corresponding /// value found. crate fn doc_value(&self) -> Option<String> { let mut iter = self.doc_strings.iter(); let ori = iter.next()?; let mut out = String::new(); add_doc_fragment(&mut out, &ori); while let Some(new_frag) = iter.next() { if matches!(ori.kind, DocFragmentKind::Include { .. }) || new_frag.kind != ori.kind || new_frag.parent_module != ori.parent_module { break; } add_doc_fragment(&mut out, &new_frag); } if out.is_empty() { None } else { Some(out) } } /// Return the doc-comments on this item, grouped by the module they came from. /// /// The module can be different if this is a re-export with added documentation. crate fn collapsed_doc_value_by_module_level(&self) -> FxHashMap<Option<DefId>, String> { let mut ret = FxHashMap::default(); for new_frag in self.doc_strings.iter() { let out = ret.entry(new_frag.parent_module).or_default(); add_doc_fragment(out, &new_frag); } ret } /// Finds all `doc` attributes as NameValues and returns their corresponding values, joined /// with newlines. crate fn collapsed_doc_value(&self) -> Option<String> { if self.doc_strings.is_empty() { None } else { Some(self.doc_strings.iter().collect()) } } /// Gets links as a vector /// /// Cache must be populated before call crate fn links(&self, krate: &CrateNum, cache: &Cache) -> Vec<RenderedLink> { use crate::html::format::href; use crate::html::render::CURRENT_DEPTH; self.links .iter() .filter_map(|ItemLink { link: s, link_text, did, fragment }| { match *did { Some(did) => { if let Some((mut href, ..)) = href(did, cache) { if let Some(ref fragment) = *fragment { href.push('#'); href.push_str(fragment); } Some(RenderedLink { original_text: s.clone(), new_text: link_text.clone(), href, }) } else { None } } None => { if let Some(ref fragment) = *fragment { let url = match cache.extern_locations.get(krate) { Some(&(_, _, ExternalLocation::Local)) => { let depth = CURRENT_DEPTH.with(|l| l.get()); "../".repeat(depth) } Some(&(_, _, ExternalLocation::Remote(ref s))) => s.to_string(), Some(&(_, _, ExternalLocation::Unknown)) | None => String::from( // NOTE: intentionally doesn't pass crate name to avoid having // different primitive links between crates if UnstableFeatures::from_environment(None).is_nightly_build() { "https://doc.rust-lang.org/nightly" } else { "https://doc.rust-lang.org" }, ), }; // This is a primitive so the url is done "by hand". let tail = fragment.find('#').unwrap_or_else(|| fragment.len()); Some(RenderedLink { original_text: s.clone(), new_text: link_text.clone(), href: format!( "{}{}std/primitive.{}.html{}", url, if !url.ends_with('/') { "/" } else { "" }, &fragment[..tail], &fragment[tail..] ), }) } else { panic!("This isn't a primitive?!"); } } } }) .collect() } crate fn get_doc_aliases(&self) -> FxHashSet<String> { self.other_attrs .lists(sym::doc) .filter(|a| a.has_name(sym::alias)) .filter_map(|a| a.value_str().map(|s| s.to_string())) .filter(|v| !v.is_empty()) .collect::<FxHashSet<_>>() } } impl PartialEq for Attributes { fn eq(&self, rhs: &Self) -> bool { self.doc_strings == rhs.doc_strings && self.cfg == rhs.cfg && self.span == rhs.span && self.links == rhs.links && self .other_attrs .iter() .map(|attr| attr.id) .eq(rhs.other_attrs.iter().map(|attr| attr.id)) } } impl Eq for Attributes {} impl Hash for Attributes { fn hash<H: Hasher>(&self, hasher: &mut H) { self.doc_strings.hash(hasher); self.cfg.hash(hasher); self.span.hash(hasher); self.links.hash(hasher); for attr in &self.other_attrs { attr.id.hash(hasher); } } } impl AttributesExt for Attributes { fn lists(&self, name: Symbol) -> ListAttributesIter<'_> { self.other_attrs.lists(name) } } #[derive(Clone, PartialEq, Eq, Debug, Hash)] crate enum GenericBound { TraitBound(PolyTrait, hir::TraitBoundModifier), Outlives(Lifetime), } impl GenericBound { crate fn maybe_sized(cx: &DocContext<'_>) -> GenericBound { let did = cx.tcx.require_lang_item(LangItem::Sized, None); let empty = cx.tcx.intern_substs(&[]); let path = external_path(cx, cx.tcx.item_name(did), Some(did), false, vec![], empty); inline::record_extern_fqn(cx, did, TypeKind::Trait); GenericBound::TraitBound( PolyTrait { trait_: ResolvedPath { path, param_names: None, did, is_generic: false }, generic_params: Vec::new(), }, hir::TraitBoundModifier::Maybe, ) } crate fn is_sized_bound(&self, cx: &DocContext<'_>) -> bool { use rustc_hir::TraitBoundModifier as TBM; if let GenericBound::TraitBound(PolyTrait { ref trait_, .. }, TBM::None) = *self { if trait_.def_id() == cx.tcx.lang_items().sized_trait() { return true; } } false } crate fn get_poly_trait(&self) -> Option<PolyTrait> { if let GenericBound::TraitBound(ref p, _) = *self { return Some(p.clone()); } None } crate fn get_trait_type(&self) -> Option<Type> { if let GenericBound::TraitBound(PolyTrait { ref trait_, .. }, _) = *self { Some(trait_.clone()) } else { None } } } #[derive(Clone, PartialEq, Eq, Debug, Hash)] crate struct Lifetime(pub Symbol); impl Lifetime { crate fn get_ref(&self) -> SymbolStr { self.0.as_str() } crate fn statik() -> Lifetime { Lifetime(kw::StaticLifetime) } crate fn elided() -> Lifetime { Lifetime(kw::UnderscoreLifetime) } } #[derive(Clone, Debug)] crate enum WherePredicate { BoundPredicate { ty: Type, bounds: Vec<GenericBound> }, RegionPredicate { lifetime: Lifetime, bounds: Vec<GenericBound> }, EqPredicate { lhs: Type, rhs: Type }, } impl WherePredicate { crate fn get_bounds(&self) -> Option<&[GenericBound]> { match *self { WherePredicate::BoundPredicate { ref bounds, .. } => Some(bounds), WherePredicate::RegionPredicate { ref bounds, .. } => Some(bounds), _ => None, } } } #[derive(Clone, PartialEq, Eq, Debug, Hash)] crate enum GenericParamDefKind { Lifetime, Type { did: DefId, bounds: Vec<GenericBound>, default: Option<Type>, synthetic: Option<hir::SyntheticTyParamKind>, }, Const { did: DefId, ty: Type, }, } impl GenericParamDefKind { crate fn is_type(&self) -> bool { matches!(self, GenericParamDefKind::Type { .. }) } // FIXME(eddyb) this either returns the default of a type parameter, or the // type of a `const` parameter. It seems that the intention is to *visit* // any embedded types, but `get_type` seems to be the wrong name for that. crate fn get_type(&self) -> Option<Type> { match self { GenericParamDefKind::Type { default, .. } => default.clone(), GenericParamDefKind::Const { ty, .. } => Some(ty.clone()), GenericParamDefKind::Lifetime => None, } } } #[derive(Clone, PartialEq, Eq, Debug, Hash)] crate struct GenericParamDef { crate name: Symbol, crate kind: GenericParamDefKind, } impl GenericParamDef { crate fn is_synthetic_type_param(&self) -> bool { match self.kind { GenericParamDefKind::Lifetime | GenericParamDefKind::Const { .. } => false, GenericParamDefKind::Type { ref synthetic, .. } => synthetic.is_some(), } } crate fn is_type(&self) -> bool { self.kind.is_type() } crate fn get_type(&self) -> Option<Type> { self.kind.get_type() } crate fn get_bounds(&self) -> Option<&[GenericBound]> { match self.kind { GenericParamDefKind::Type { ref bounds, .. } => Some(bounds), _ => None, } } } // maybe use a Generic enum and use Vec<Generic>? #[derive(Clone, Debug, Default)] crate struct Generics { crate params: Vec<GenericParamDef>, crate where_predicates: Vec<WherePredicate>, } #[derive(Clone, Debug)] crate struct Function { crate decl: FnDecl, crate generics: Generics, crate header: hir::FnHeader, crate all_types: Vec<(Type, TypeKind)>, crate ret_types: Vec<(Type, TypeKind)>, } #[derive(Clone, PartialEq, Eq, Debug, Hash)] crate struct FnDecl { crate inputs: Arguments, crate output: FnRetTy, crate c_variadic: bool, crate attrs: Attributes, } impl FnDecl { crate fn self_type(&self) -> Option<SelfTy> { self.inputs.values.get(0).and_then(|v| v.to_self()) } /// Returns the sugared return type for an async function. /// /// For example, if the return type is `impl std::future::Future<Output = i32>`, this function /// will return `i32`. /// /// # Panics /// /// This function will panic if the return type does not match the expected sugaring for async /// functions. crate fn sugared_async_return_type(&self) -> FnRetTy { match &self.output { FnRetTy::Return(Type::ImplTrait(bounds)) => match &bounds[0] { GenericBound::TraitBound(PolyTrait { trait_, .. }, ..) => { let bindings = trait_.bindings().unwrap(); FnRetTy::Return(bindings[0].ty().clone()) } _ => panic!("unexpected desugaring of async function"), }, _ => panic!("unexpected desugaring of async function"), } } } #[derive(Clone, PartialEq, Eq, Debug, Hash)] crate struct Arguments { crate values: Vec<Argument>, } #[derive(Clone, PartialEq, Eq, Debug, Hash)] crate struct Argument { crate type_: Type, crate name: Symbol, } #[derive(Clone, PartialEq, Debug)] crate enum SelfTy { SelfValue, SelfBorrowed(Option<Lifetime>, Mutability), SelfExplicit(Type), } impl Argument { crate fn to_self(&self) -> Option<SelfTy> { if self.name != kw::SelfLower { return None; } if self.type_.is_self_type() { return Some(SelfValue); } match self.type_ { BorrowedRef { ref lifetime, mutability, ref type_ } if type_.is_self_type() => { Some(SelfBorrowed(lifetime.clone(), mutability)) } _ => Some(SelfExplicit(self.type_.clone())), } } } #[derive(Clone, PartialEq, Eq, Debug, Hash)] crate enum FnRetTy { Return(Type), DefaultReturn, } impl GetDefId for FnRetTy { fn def_id(&self) -> Option<DefId> { match *self { Return(ref ty) => ty.def_id(), DefaultReturn => None, } } fn def_id_full(&self, cache: &Cache) -> Option<DefId> { match *self { Return(ref ty) => ty.def_id_full(cache), DefaultReturn => None, } } } #[derive(Clone, Debug)] crate struct Trait { crate unsafety: hir::Unsafety, crate items: Vec<Item>, crate generics: Generics, crate bounds: Vec<GenericBound>, crate is_spotlight: bool, crate is_auto: bool, } #[derive(Clone, Debug)] crate struct TraitAlias { crate generics: Generics, crate bounds: Vec<GenericBound>, } /// A trait reference, which may have higher ranked lifetimes. #[derive(Clone, PartialEq, Eq, Debug, Hash)] crate struct PolyTrait { crate trait_: Type, crate generic_params: Vec<GenericParamDef>, } /// A representation of a type suitable for hyperlinking purposes. Ideally, one can get the original /// type out of the AST/`TyCtxt` given one of these, if more information is needed. Most /// importantly, it does not preserve mutability or boxes. #[derive(Clone, PartialEq, Eq, Debug, Hash)] crate enum Type { /// Structs/enums/traits (most that would be an `hir::TyKind::Path`). ResolvedPath { path: Path, param_names: Option<Vec<GenericBound>>, did: DefId, /// `true` if is a `T::Name` path for associated types. is_generic: bool, }, /// For parameterized types, so the consumer of the JSON don't go /// looking for types which don't exist anywhere. Generic(Symbol), /// Primitives are the fixed-size numeric types (plus int/usize/float), char, /// arrays, slices, and tuples. Primitive(PrimitiveType), /// `extern "ABI" fn` BareFunction(Box<BareFunctionDecl>), Tuple(Vec<Type>), Slice(Box<Type>), /// The `String` field is about the size or the constant representing the array's length. Array(Box<Type>, String), Never, RawPointer(Mutability, Box<Type>), BorrowedRef { lifetime: Option<Lifetime>, mutability: Mutability, type_: Box<Type>, }, // `<Type as Trait>::Name` QPath { name: Symbol, self_type: Box<Type>, trait_: Box<Type>, }, // `_` Infer, // `impl TraitA + TraitB + ...` ImplTrait(Vec<GenericBound>), } #[derive(Clone, PartialEq, Eq, Hash, Copy, Debug)] /// N.B. this has to be different from `hir::PrimTy` because it also includes types that aren't /// paths, like `Unit`. crate enum PrimitiveType { Isize, I8, I16, I32, I64, I128, Usize, U8, U16, U32, U64, U128, F32, F64, Char, Bool, Str, Slice, Array, Tuple, Unit, RawPointer, Reference, Fn, Never, } #[derive(Clone, PartialEq, Eq, Hash, Copy, Debug)] crate enum TypeKind { Enum, Function, Module, Const, Static, Struct, Union, Trait, Typedef, Foreign, Macro, Attr, Derive, TraitAlias, Primitive, } impl<'a> From<&'a hir::def::DefKind> for TypeKind { fn from(other: &hir::def::DefKind) -> Self { Self::from(*other) } } impl From<hir::def::DefKind> for TypeKind { fn from(other: hir::def::DefKind) -> Self { match other { hir::def::DefKind::Enum => Self::Enum, hir::def::DefKind::Fn => Self::Function, hir::def::DefKind::Mod => Self::Module, hir::def::DefKind::Const => Self::Const, hir::def::DefKind::Static => Self::Static, hir::def::DefKind::Struct => Self::Struct, hir::def::DefKind::Union => Self::Union, hir::def::DefKind::Trait => Self::Trait, hir::def::DefKind::TyAlias => Self::Typedef, hir::def::DefKind::ForeignTy => Self::Foreign, hir::def::DefKind::TraitAlias => Self::TraitAlias, hir::def::DefKind::Macro(_) => Self::Macro, hir::def::DefKind::Variant | hir::def::DefKind::AssocTy | hir::def::DefKind::TyParam | hir::def::DefKind::ConstParam | hir::def::DefKind::Ctor(..) | hir::def::DefKind::AssocFn | hir::def::DefKind::AssocConst | hir::def::DefKind::ExternCrate | hir::def::DefKind::Use | hir::def::DefKind::ForeignMod | hir::def::DefKind::AnonConst | hir::def::DefKind::OpaqueTy | hir::def::DefKind::Field | hir::def::DefKind::LifetimeParam | hir::def::DefKind::GlobalAsm | hir::def::DefKind::Impl | hir::def::DefKind::Closure | hir::def::DefKind::Generator => Self::Foreign, } } } crate trait GetDefId { /// Use this method to get the [`DefId`] of a [`clean`] AST node. /// This will return [`None`] when called on a primitive [`clean::Type`]. /// Use [`Self::def_id_full`] if you want to include primitives. /// /// [`clean`]: crate::clean /// [`clean::Type`]: crate::clean::Type // FIXME: get rid of this function and always use `def_id_full` fn def_id(&self) -> Option<DefId>; /// Use this method to get the [DefId] of a [clean] AST node, including [PrimitiveType]s. /// /// See [`Self::def_id`] for more. /// /// [clean]: crate::clean fn def_id_full(&self, cache: &Cache) -> Option<DefId>; } impl<T: GetDefId> GetDefId for Option<T> { fn def_id(&self) -> Option<DefId> { self.as_ref().and_then(|d| d.def_id()) } fn def_id_full(&self, cache: &Cache) -> Option<DefId> { self.as_ref().and_then(|d| d.def_id_full(cache)) } } impl Type { crate fn primitive_type(&self) -> Option<PrimitiveType> { match *self { Primitive(p) | BorrowedRef { type_: box Primitive(p), .. } => Some(p), Slice(..) | BorrowedRef { type_: box Slice(..), .. } => Some(PrimitiveType::Slice), Array(..) | BorrowedRef { type_: box Array(..), .. } => Some(PrimitiveType::Array), Tuple(ref tys) => { if tys.is_empty() { Some(PrimitiveType::Unit) } else { Some(PrimitiveType::Tuple) } } RawPointer(..) => Some(PrimitiveType::RawPointer), BorrowedRef { type_: box Generic(..), .. } => Some(PrimitiveType::Reference), BareFunction(..) => Some(PrimitiveType::Fn), Never => Some(PrimitiveType::Never), _ => None, } } crate fn is_generic(&self) -> bool { match *self { ResolvedPath { is_generic, .. } => is_generic, _ => false, } } crate fn is_self_type(&self) -> bool { match *self { Generic(name) => name == kw::SelfUpper, _ => false, } } crate fn generics(&self) -> Option<Vec<Type>> { match *self { ResolvedPath { ref path, .. } => path.segments.last().and_then(|seg| { if let GenericArgs::AngleBracketed { ref args, .. } = seg.args { Some( args.iter() .filter_map(|arg| match arg { GenericArg::Type(ty) => Some(ty.clone()), _ => None, }) .collect(), ) } else { None } }), _ => None, } } crate fn bindings(&self) -> Option<&[TypeBinding]> { match *self { ResolvedPath { ref path, .. } => path.segments.last().and_then(|seg| { if let GenericArgs::AngleBracketed { ref bindings, .. } = seg.args { Some(&**bindings) } else { None } }), _ => None, } } crate fn is_full_generic(&self) -> bool { matches!(self, Type::Generic(_)) } crate fn is_primitive(&self) -> bool { match self { Self::Primitive(_) => true, Self::BorrowedRef { ref type_, .. } | Self::RawPointer(_, ref type_) => { type_.is_primitive() } _ => false, } } crate fn projection(&self) -> Option<(&Type, DefId, Symbol)> { let (self_, trait_, name) = match self { QPath { self_type, trait_, name } => (self_type, trait_, name), _ => return None, }; let trait_did = match **trait_ { ResolvedPath { did, .. } => did, _ => return None, }; Some((&self_, trait_did, *name)) } } impl Type { fn inner_def_id(&self, cache: Option<&Cache>) -> Option<DefId> { let t: PrimitiveType = match *self { ResolvedPath { did, .. } => return Some(did), Primitive(p) => return cache.and_then(|c| c.primitive_locations.get(&p).cloned()), BorrowedRef { type_: box Generic(..), .. } => PrimitiveType::Reference, BorrowedRef { ref type_, .. } => return type_.inner_def_id(cache), Tuple(ref tys) => { if tys.is_empty() { PrimitiveType::Unit } else { PrimitiveType::Tuple } } BareFunction(..) => PrimitiveType::Fn, Never => PrimitiveType::Never, Slice(..) => PrimitiveType::Slice, Array(..) => PrimitiveType::Array, RawPointer(..) => PrimitiveType::RawPointer, QPath { ref self_type, .. } => return self_type.inner_def_id(cache), Generic(_) | Infer | ImplTrait(_) => return None, }; cache.and_then(|c| Primitive(t).def_id_full(c)) } } impl GetDefId for Type { fn def_id(&self) -> Option<DefId> { self.inner_def_id(None) } fn def_id_full(&self, cache: &Cache) -> Option<DefId> { self.inner_def_id(Some(cache)) } } impl PrimitiveType { crate fn from_hir(prim: hir::PrimTy) -> PrimitiveType { use ast::{FloatTy, IntTy, UintTy}; match prim { hir::PrimTy::Int(IntTy::Isize) => PrimitiveType::Isize, hir::PrimTy::Int(IntTy::I8) => PrimitiveType::I8, hir::PrimTy::Int(IntTy::I16) => PrimitiveType::I16, hir::PrimTy::Int(IntTy::I32) => PrimitiveType::I32, hir::PrimTy::Int(IntTy::I64) => PrimitiveType::I64, hir::PrimTy::Int(IntTy::I128) => PrimitiveType::I128, hir::PrimTy::Uint(UintTy::Usize) => PrimitiveType::Usize, hir::PrimTy::Uint(UintTy::U8) => PrimitiveType::U8, hir::PrimTy::Uint(UintTy::U16) => PrimitiveType::U16, hir::PrimTy::Uint(UintTy::U32) => PrimitiveType::U32, hir::PrimTy::Uint(UintTy::U64) => PrimitiveType::U64, hir::PrimTy::Uint(UintTy::U128) => PrimitiveType::U128, hir::PrimTy::Float(FloatTy::F32) => PrimitiveType::F32, hir::PrimTy::Float(FloatTy::F64) => PrimitiveType::F64, hir::PrimTy::Str => PrimitiveType::Str, hir::PrimTy::Bool => PrimitiveType::Bool, hir::PrimTy::Char => PrimitiveType::Char, } } crate fn from_symbol(s: Symbol) -> Option<PrimitiveType> { match s { sym::isize => Some(PrimitiveType::Isize), sym::i8 => Some(PrimitiveType::I8), sym::i16 => Some(PrimitiveType::I16), sym::i32 => Some(PrimitiveType::I32), sym::i64 => Some(PrimitiveType::I64), sym::i128 => Some(PrimitiveType::I128), sym::usize => Some(PrimitiveType::Usize), sym::u8 => Some(PrimitiveType::U8), sym::u16 => Some(PrimitiveType::U16), sym::u32 => Some(PrimitiveType::U32), sym::u64 => Some(PrimitiveType::U64), sym::u128 => Some(PrimitiveType::U128), sym::bool => Some(PrimitiveType::Bool), sym::char => Some(PrimitiveType::Char), sym::str => Some(PrimitiveType::Str), sym::f32 => Some(PrimitiveType::F32), sym::f64 => Some(PrimitiveType::F64), sym::array => Some(PrimitiveType::Array), sym::slice => Some(PrimitiveType::Slice), sym::tuple => Some(PrimitiveType::Tuple), sym::unit => Some(PrimitiveType::Unit), sym::pointer => Some(PrimitiveType::RawPointer), sym::reference => Some(PrimitiveType::Reference), kw::Fn => Some(PrimitiveType::Fn), sym::never => Some(PrimitiveType::Never), _ => None, } } crate fn as_str(&self) -> &'static str { use self::PrimitiveType::*; match *self { Isize => "isize", I8 => "i8", I16 => "i16", I32 => "i32", I64 => "i64", I128 => "i128", Usize => "usize", U8 => "u8", U16 => "u16", U32 => "u32", U64 => "u64", U128 => "u128", F32 => "f32", F64 => "f64", Str => "str", Bool => "bool", Char => "char", Array => "array", Slice => "slice", Tuple => "tuple", Unit => "unit", RawPointer => "pointer", Reference => "reference", Fn => "fn", Never => "never", } } crate fn impls(&self, tcx: TyCtxt<'_>) -> &'static ArrayVec<[DefId; 4]> { Self::all_impls(tcx).get(self).expect("missing impl for primitive type") } crate fn all_impls(tcx: TyCtxt<'_>) -> &'static FxHashMap<PrimitiveType, ArrayVec<[DefId; 4]>> { static CELL: OnceCell<FxHashMap<PrimitiveType, ArrayVec<[DefId; 4]>>> = OnceCell::new(); CELL.get_or_init(move || { use self::PrimitiveType::*; /// A macro to create a FxHashMap. /// /// Example: /// /// ``` /// let letters = map!{"a" => "b", "c" => "d"}; /// ``` /// /// Trailing commas are allowed. /// Commas between elements are required (even if the expression is a block). macro_rules! map { ($( $key: expr => $val: expr ),* $(,)*) => {{ let mut map = ::rustc_data_structures::fx::FxHashMap::default(); $( map.insert($key, $val); )* map }} } let single = |a: Option<DefId>| a.into_iter().collect(); let both = |a: Option<DefId>, b: Option<DefId>| -> ArrayVec<_> { a.into_iter().chain(b).collect() }; let lang_items = tcx.lang_items(); map! { Isize => single(lang_items.isize_impl()), I8 => single(lang_items.i8_impl()), I16 => single(lang_items.i16_impl()), I32 => single(lang_items.i32_impl()), I64 => single(lang_items.i64_impl()), I128 => single(lang_items.i128_impl()), Usize => single(lang_items.usize_impl()), U8 => single(lang_items.u8_impl()), U16 => single(lang_items.u16_impl()), U32 => single(lang_items.u32_impl()), U64 => single(lang_items.u64_impl()), U128 => single(lang_items.u128_impl()), F32 => both(lang_items.f32_impl(), lang_items.f32_runtime_impl()), F64 => both(lang_items.f64_impl(), lang_items.f64_runtime_impl()), Char => single(lang_items.char_impl()), Bool => single(lang_items.bool_impl()), Str => both(lang_items.str_impl(), lang_items.str_alloc_impl()), Slice => { lang_items .slice_impl() .into_iter() .chain(lang_items.slice_u8_impl()) .chain(lang_items.slice_alloc_impl()) .chain(lang_items.slice_u8_alloc_impl()) .collect() }, Array => single(lang_items.array_impl()), Tuple => ArrayVec::new(), Unit => ArrayVec::new(), RawPointer => { lang_items .const_ptr_impl() .into_iter() .chain(lang_items.mut_ptr_impl()) .chain(lang_items.const_slice_ptr_impl()) .chain(lang_items.mut_slice_ptr_impl()) .collect() }, Reference => ArrayVec::new(), Fn => ArrayVec::new(), Never => ArrayVec::new(), } }) } crate fn to_url_str(&self) -> &'static str { self.as_str() } crate fn as_sym(&self) -> Symbol { use PrimitiveType::*; match self { Isize => sym::isize, I8 => sym::i8, I16 => sym::i16, I32 => sym::i32, I64 => sym::i64, I128 => sym::i128, Usize => sym::usize, U8 => sym::u8, U16 => sym::u16, U32 => sym::u32, U64 => sym::u64, U128 => sym::u128, F32 => sym::f32, F64 => sym::f64, Str => sym::str, Bool => sym::bool, Char => sym::char, Array => sym::array, Slice => sym::slice, Tuple => sym::tuple, Unit => sym::unit, RawPointer => sym::pointer, Reference => sym::reference, Fn => kw::Fn, Never => sym::never, } } } impl From<ast::IntTy> for PrimitiveType { fn from(int_ty: ast::IntTy) -> PrimitiveType { match int_ty { ast::IntTy::Isize => PrimitiveType::Isize, ast::IntTy::I8 => PrimitiveType::I8, ast::IntTy::I16 => PrimitiveType::I16, ast::IntTy::I32 => PrimitiveType::I32, ast::IntTy::I64 => PrimitiveType::I64, ast::IntTy::I128 => PrimitiveType::I128, } } } impl From<ast::UintTy> for PrimitiveType { fn from(uint_ty: ast::UintTy) -> PrimitiveType { match uint_ty { ast::UintTy::Usize => PrimitiveType::Usize, ast::UintTy::U8 => PrimitiveType::U8, ast::UintTy::U16 => PrimitiveType::U16, ast::UintTy::U32 => PrimitiveType::U32, ast::UintTy::U64 => PrimitiveType::U64, ast::UintTy::U128 => PrimitiveType::U128, } } } impl From<ast::FloatTy> for PrimitiveType { fn from(float_ty: ast::FloatTy) -> PrimitiveType { match float_ty { ast::FloatTy::F32 => PrimitiveType::F32, ast::FloatTy::F64 => PrimitiveType::F64, } } } impl From<ty::IntTy> for PrimitiveType { fn from(int_ty: ty::IntTy) -> PrimitiveType { match int_ty { ty::IntTy::Isize => PrimitiveType::Isize, ty::IntTy::I8 => PrimitiveType::I8, ty::IntTy::I16 => PrimitiveType::I16, ty::IntTy::I32 => PrimitiveType::I32, ty::IntTy::I64 => PrimitiveType::I64, ty::IntTy::I128 => PrimitiveType::I128, } } } impl From<ty::UintTy> for PrimitiveType { fn from(uint_ty: ty::UintTy) -> PrimitiveType { match uint_ty { ty::UintTy::Usize => PrimitiveType::Usize, ty::UintTy::U8 => PrimitiveType::U8, ty::UintTy::U16 => PrimitiveType::U16, ty::UintTy::U32 => PrimitiveType::U32, ty::UintTy::U64 => PrimitiveType::U64, ty::UintTy::U128 => PrimitiveType::U128, } } } impl From<ty::FloatTy> for PrimitiveType { fn from(float_ty: ty::FloatTy) -> PrimitiveType { match float_ty { ty::FloatTy::F32 => PrimitiveType::F32, ty::FloatTy::F64 => PrimitiveType::F64, } } } impl From<hir::PrimTy> for PrimitiveType { fn from(prim_ty: hir::PrimTy) -> PrimitiveType { match prim_ty { hir::PrimTy::Int(int_ty) => int_ty.into(), hir::PrimTy::Uint(uint_ty) => uint_ty.into(), hir::PrimTy::Float(float_ty) => float_ty.into(), hir::PrimTy::Str => PrimitiveType::Str, hir::PrimTy::Bool => PrimitiveType::Bool, hir::PrimTy::Char => PrimitiveType::Char, } } } #[derive(Copy, Clone, Debug)] crate enum Visibility { Public, Inherited, Restricted(DefId), } impl Visibility { crate fn is_public(&self) -> bool { matches!(self, Visibility::Public) } } #[derive(Clone, Debug)] crate struct Struct { crate struct_type: CtorKind, crate generics: Generics, crate fields: Vec<Item>, crate fields_stripped: bool, } #[derive(Clone, Debug)] crate struct Union { crate generics: Generics, crate fields: Vec<Item>, crate fields_stripped: bool, } /// This is a more limited form of the standard Struct, different in that /// it lacks the things most items have (name, id, parameterization). Found /// only as a variant in an enum. #[derive(Clone, Debug)] crate struct VariantStruct { crate struct_type: CtorKind, crate fields: Vec<Item>, crate fields_stripped: bool, } #[derive(Clone, Debug)] crate struct Enum { crate variants: IndexVec<VariantIdx, Item>, crate generics: Generics, crate variants_stripped: bool, } #[derive(Clone, Debug)] crate enum Variant { CLike, Tuple(Vec<Type>), Struct(VariantStruct), } /// Small wrapper around `rustc_span::Span` that adds helper methods and enforces calling `source_callsite`. #[derive(Clone, Debug)] crate struct Span(rustc_span::Span); impl Span { crate fn from_rustc_span(sp: rustc_span::Span) -> Self { // Get the macro invocation instead of the definition, // in case the span is result of a macro expansion. // (See rust-lang/rust#39726) Self(sp.source_callsite()) } crate fn dummy() -> Self { Self(rustc_span::DUMMY_SP) } crate fn span(&self) -> rustc_span::Span { self.0 } crate fn filename(&self, sess: &Session) -> FileName { sess.source_map().span_to_filename(self.0) } crate fn lo(&self, sess: &Session) -> Loc { sess.source_map().lookup_char_pos(self.0.lo()) } crate fn hi(&self, sess: &Session) -> Loc { sess.source_map().lookup_char_pos(self.0.hi()) } crate fn cnum(&self, sess: &Session) -> CrateNum { // FIXME: is there a time when the lo and hi crate would be different? self.lo(sess).file.cnum } } #[derive(Clone, PartialEq, Eq, Debug, Hash)] crate struct Path { crate global: bool, crate res: Res, crate segments: Vec<PathSegment>, } impl Path { crate fn last(&self) -> Symbol { self.segments.last().expect("segments were empty").name } crate fn last_name(&self) -> SymbolStr { self.segments.last().expect("segments were empty").name.as_str() } crate fn whole_name(&self) -> String { String::from(if self.global { "::" } else { "" }) + &self.segments.iter().map(|s| s.name.to_string()).collect::<Vec<_>>().join("::") } } #[derive(Clone, PartialEq, Eq, Debug, Hash)] crate enum GenericArg { Lifetime(Lifetime), Type(Type), Const(Constant), } #[derive(Clone, PartialEq, Eq, Debug, Hash)] crate enum GenericArgs { AngleBracketed { args: Vec<GenericArg>, bindings: Vec<TypeBinding> }, Parenthesized { inputs: Vec<Type>, output: Option<Type> }, } #[derive(Clone, PartialEq, Eq, Debug, Hash)] crate struct PathSegment { crate name: Symbol, crate args: GenericArgs, } #[derive(Clone, Debug)] crate struct Typedef { crate type_: Type, crate generics: Generics, /// `type_` can come from either the HIR or from metadata. If it comes from HIR, it may be a type /// alias instead of the final type. This will always have the final type, regardless of whether /// `type_` came from HIR or from metadata. /// /// If `item_type.is_none()`, `type_` is guarenteed to come from metadata (and therefore hold the /// final type). crate item_type: Option<Type>, } impl GetDefId for Typedef { fn def_id(&self) -> Option<DefId> { self.type_.def_id() } fn def_id_full(&self, cache: &Cache) -> Option<DefId> { self.type_.def_id_full(cache) } } #[derive(Clone, Debug)] crate struct OpaqueTy { crate bounds: Vec<GenericBound>, crate generics: Generics, } #[derive(Clone, PartialEq, Eq, Debug, Hash)] crate struct BareFunctionDecl { crate unsafety: hir::Unsafety, crate generic_params: Vec<GenericParamDef>, crate decl: FnDecl, crate abi: Abi, } #[derive(Clone, Debug)] crate struct Static { crate type_: Type, crate mutability: Mutability, /// It's useful to have the value of a static documented, but I have no /// desire to represent expressions (that'd basically be all of the AST, /// which is huge!). So, have a string. crate expr: String, } #[derive(Clone, PartialEq, Eq, Hash, Debug)] crate struct Constant { crate type_: Type, crate expr: String, crate value: Option<String>, crate is_literal: bool, } #[derive(Clone, Debug)] crate struct Impl { crate unsafety: hir::Unsafety, crate generics: Generics, crate provided_trait_methods: FxHashSet<Symbol>, crate trait_: Option<Type>, crate for_: Type, crate items: Vec<Item>, crate negative_polarity: bool, crate synthetic: bool, crate blanket_impl: Option<Type>, } #[derive(Clone, Debug)] crate struct Import { crate kind: ImportKind, crate source: ImportSource, crate should_be_displayed: bool, } impl Import { crate fn new_simple(name: Symbol, source: ImportSource, should_be_displayed: bool) -> Self { Self { kind: ImportKind::Simple(name), source, should_be_displayed } } crate fn new_glob(source: ImportSource, should_be_displayed: bool) -> Self { Self { kind: ImportKind::Glob, source, should_be_displayed } } } #[derive(Clone, Debug)] crate enum ImportKind { // use source as str; Simple(Symbol), // use source::*; Glob, } #[derive(Clone, Debug)] crate struct ImportSource { crate path: Path, crate did: Option<DefId>, } #[derive(Clone, Debug)] crate struct Macro { crate source: String, crate imported_from: Option<Symbol>, } #[derive(Clone, Debug)] crate struct ProcMacro { crate kind: MacroKind, crate helpers: Vec<Symbol>, } /// An type binding on an associated type (e.g., `A = Bar` in `Foo<A = Bar>` or /// `A: Send + Sync` in `Foo<A: Send + Sync>`). #[derive(Clone, PartialEq, Eq, Debug, Hash)] crate struct TypeBinding { crate name: Symbol, crate kind: TypeBindingKind, } #[derive(Clone, PartialEq, Eq, Debug, Hash)] crate enum TypeBindingKind { Equality { ty: Type }, Constraint { bounds: Vec<GenericBound> }, } impl TypeBinding { crate fn ty(&self) -> &Type { match self.kind { TypeBindingKind::Equality { ref ty } => ty, _ => panic!("expected equality type binding for parenthesized generic args"), } } }
// Copyright 2013-2015 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. //! Rustdoc's HTML Rendering module //! //! This modules contains the bulk of the logic necessary for rendering a //! rustdoc `clean::Crate` instance to a set of static HTML pages. This //! rendering process is largely driven by the `format!` syntax extension to //! perform all I/O into files and streams. //! //! The rendering process is largely driven by the `Context` and `Cache` //! structures. The cache is pre-populated by crawling the crate in question, //! and then it is shared among the various rendering threads. The cache is meant //! to be a fairly large structure not implementing `Clone` (because it's shared //! among threads). The context, however, should be a lightweight structure. This //! is cloned per-thread and contains information about what is currently being //! rendered. //! //! In order to speed up rendering (mostly because of markdown rendering), the //! rendering process has been parallelized. This parallelization is only //! exposed through the `crate` method on the context, and then also from the //! fact that the shared cache is stored in TLS (and must be accessed as such). //! //! In addition to rendering the crate itself, this module is also responsible //! for creating the corresponding search index and source file renderings. //! These threads are not parallelized (they haven't been a bottleneck yet), and //! both occur before the crate is rendered. pub use self::ExternalLocation::*; use std::ascii::AsciiExt; use std::cell::RefCell; use std::cmp::Ordering; use std::collections::BTreeMap; use std::default::Default; use std::error; use std::fmt::{self, Display, Formatter, Write as FmtWrite}; use std::fs::{self, File, OpenOptions}; use std::io::prelude::*; use std::io::{self, BufWriter, BufReader}; use std::iter::repeat; use std::mem; use std::path::{PathBuf, Path, Component}; use std::str; use std::sync::Arc; use externalfiles::ExternalHtml; use serialize::json::{ToJson, Json, as_json}; use syntax::{abi, ast}; use syntax::feature_gate::UnstableFeatures; use rustc::hir::def_id::{CrateNum, CRATE_DEF_INDEX, DefId}; use rustc::middle::privacy::AccessLevels; use rustc::middle::stability; use rustc::hir; use rustc::util::nodemap::{FxHashMap, FxHashSet}; use rustc::session::config::nightly_options::is_nightly_build; use rustc_data_structures::flock; use clean::{self, AttributesExt, GetDefId, SelfTy, Mutability, Span}; use doctree; use fold::DocFolder; use html::escape::Escape; use html::format::{ConstnessSpace}; use html::format::{TyParamBounds, WhereClause, href, AbiSpace}; use html::format::{VisSpace, Method, UnsafetySpace, MutableSpace}; use html::format::fmt_impl_for_trait_page; use html::item_type::ItemType; use html::markdown::{self, Markdown, MarkdownHtml, MarkdownSummaryLine, RenderType}; use html::{highlight, layout}; use html_diff; /// A pair of name and its optional document. pub type NameDoc = (String, Option<String>); /// Major driving force in all rustdoc rendering. This contains information /// about where in the tree-like hierarchy rendering is occurring and controls /// how the current page is being rendered. /// /// It is intended that this context is a lightweight object which can be fairly /// easily cloned because it is cloned per work-job (about once per item in the /// rustdoc tree). #[derive(Clone)] pub struct Context { /// Current hierarchy of components leading down to what's currently being /// rendered pub current: Vec<String>, /// The current destination folder of where HTML artifacts should be placed. /// This changes as the context descends into the module hierarchy. pub dst: PathBuf, /// A flag, which when `true`, will render pages which redirect to the /// real location of an item. This is used to allow external links to /// publicly reused items to redirect to the right location. pub render_redirect_pages: bool, pub shared: Arc<SharedContext>, pub render_type: RenderType, } pub struct SharedContext { /// The path to the crate root source minus the file name. /// Used for simplifying paths to the highlighted source code files. pub src_root: PathBuf, /// This describes the layout of each page, and is not modified after /// creation of the context (contains info like the favicon and added html). pub layout: layout::Layout, /// This flag indicates whether [src] links should be generated or not. If /// the source files are present in the html rendering, then this will be /// `true`. pub include_sources: bool, /// The local file sources we've emitted and their respective url-paths. pub local_sources: FxHashMap<PathBuf, String>, /// All the passes that were run on this crate. pub passes: FxHashSet<String>, /// The base-URL of the issue tracker for when an item has been tagged with /// an issue number. pub issue_tracker_base_url: Option<String>, /// The given user css file which allow to customize the generated /// documentation theme. pub css_file_extension: Option<PathBuf>, /// Warnings for the user if rendering would differ using different markdown /// parsers. pub markdown_warnings: RefCell<Vec<(Span, String, Vec<html_diff::Difference>)>>, } /// Indicates where an external crate can be found. pub enum ExternalLocation { /// Remote URL root of the external crate Remote(String), /// This external crate can be found in the local doc/ folder Local, /// The external crate could not be found. Unknown, } /// Metadata about an implementor of a trait. pub struct Implementor { pub def_id: DefId, pub stability: Option<clean::Stability>, pub impl_: clean::Impl, } /// Metadata about implementations for a type. #[derive(Clone)] pub struct Impl { pub impl_item: clean::Item, } impl Impl { fn inner_impl(&self) -> &clean::Impl { match self.impl_item.inner { clean::ImplItem(ref impl_) => impl_, _ => panic!("non-impl item found in impl") } } fn trait_did(&self) -> Option<DefId> { self.inner_impl().trait_.def_id() } } #[derive(Debug)] pub struct Error { file: PathBuf, error: io::Error, } impl error::Error for Error { fn description(&self) -> &str { self.error.description() } } impl Display for Error { fn fmt(&self, f: &mut Formatter) -> fmt::Result { write!(f, "\"{}\": {}", self.file.display(), self.error) } } impl Error { pub fn new(e: io::Error, file: &Path) -> Error { Error { file: file.to_path_buf(), error: e, } } } macro_rules! try_err { ($e:expr, $file:expr) => ({ match $e { Ok(e) => e, Err(e) => return Err(Error::new(e, $file)), } }) } /// This cache is used to store information about the `clean::Crate` being /// rendered in order to provide more useful documentation. This contains /// information like all implementors of a trait, all traits a type implements, /// documentation for all known traits, etc. /// /// This structure purposefully does not implement `Clone` because it's intended /// to be a fairly large and expensive structure to clone. Instead this adheres /// to `Send` so it may be stored in a `Arc` instance and shared among the various /// rendering threads. #[derive(Default)] pub struct Cache { /// Mapping of typaram ids to the name of the type parameter. This is used /// when pretty-printing a type (so pretty printing doesn't have to /// painfully maintain a context like this) pub typarams: FxHashMap<DefId, String>, /// Maps a type id to all known implementations for that type. This is only /// recognized for intra-crate `ResolvedPath` types, and is used to print /// out extra documentation on the page of an enum/struct. /// /// The values of the map are a list of implementations and documentation /// found on that implementation. pub impls: FxHashMap<DefId, Vec<Impl>>, /// Maintains a mapping of local crate node ids to the fully qualified name /// and "short type description" of that node. This is used when generating /// URLs when a type is being linked to. External paths are not located in /// this map because the `External` type itself has all the information /// necessary. pub paths: FxHashMap<DefId, (Vec<String>, ItemType)>, /// Similar to `paths`, but only holds external paths. This is only used for /// generating explicit hyperlinks to other crates. pub external_paths: FxHashMap<DefId, (Vec<String>, ItemType)>, /// This map contains information about all known traits of this crate. /// Implementations of a crate should inherit the documentation of the /// parent trait if no extra documentation is specified, and default methods /// should show up in documentation about trait implementations. pub traits: FxHashMap<DefId, clean::Trait>, /// When rendering traits, it's often useful to be able to list all /// implementors of the trait, and this mapping is exactly, that: a mapping /// of trait ids to the list of known implementors of the trait pub implementors: FxHashMap<DefId, Vec<Implementor>>, /// Cache of where external crate documentation can be found. pub extern_locations: FxHashMap<CrateNum, (String, PathBuf, ExternalLocation)>, /// Cache of where documentation for primitives can be found. pub primitive_locations: FxHashMap<clean::PrimitiveType, DefId>, // Note that external items for which `doc(hidden)` applies to are shown as // non-reachable while local items aren't. This is because we're reusing // the access levels from crateanalysis. pub access_levels: Arc<AccessLevels<DefId>>, // Private fields only used when initially crawling a crate to build a cache stack: Vec<String>, parent_stack: Vec<DefId>, parent_is_trait_impl: bool, search_index: Vec<IndexItem>, stripped_mod: bool, deref_trait_did: Option<DefId>, deref_mut_trait_did: Option<DefId>, owned_box_did: Option<DefId>, // In rare case where a structure is defined in one module but implemented // in another, if the implementing module is parsed before defining module, // then the fully qualified name of the structure isn't presented in `paths` // yet when its implementation methods are being indexed. Caches such methods // and their parent id here and indexes them at the end of crate parsing. orphan_impl_items: Vec<(DefId, clean::Item)>, } /// Temporary storage for data obtained during `RustdocVisitor::clean()`. /// Later on moved into `CACHE_KEY`. #[derive(Default)] pub struct RenderInfo { pub inlined: FxHashSet<DefId>, pub external_paths: ::core::ExternalPaths, pub external_typarams: FxHashMap<DefId, String>, pub deref_trait_did: Option<DefId>, pub deref_mut_trait_did: Option<DefId>, pub owned_box_did: Option<DefId>, } /// Helper struct to render all source code to HTML pages struct SourceCollector<'a> { scx: &'a mut SharedContext, /// Root destination to place all HTML output into dst: PathBuf, } /// Wrapper struct to render the source code of a file. This will do things like /// adding line numbers to the left-hand side. struct Source<'a>(&'a str); // Helper structs for rendering items/sidebars and carrying along contextual // information #[derive(Copy, Clone)] struct Item<'a> { cx: &'a Context, item: &'a clean::Item, } struct Sidebar<'a> { cx: &'a Context, item: &'a clean::Item, } /// Struct representing one entry in the JS search index. These are all emitted /// by hand to a large JS file at the end of cache-creation. struct IndexItem { ty: ItemType, name: String, path: String, desc: String, parent: Option<DefId>, parent_idx: Option<usize>, search_type: Option<IndexItemFunctionType>, } impl ToJson for IndexItem { fn to_json(&self) -> Json { assert_eq!(self.parent.is_some(), self.parent_idx.is_some()); let mut data = Vec::with_capacity(6); data.push((self.ty as usize).to_json()); data.push(self.name.to_json()); data.push(self.path.to_json()); data.push(self.desc.to_json()); data.push(self.parent_idx.to_json()); data.push(self.search_type.to_json()); Json::Array(data) } } /// A type used for the search index. struct Type { name: Option<String>, } impl ToJson for Type { fn to_json(&self) -> Json { match self.name { Some(ref name) => { let mut data = BTreeMap::new(); data.insert("name".to_owned(), name.to_json()); Json::Object(data) }, None => Json::Null } } } /// Full type of functions/methods in the search index. struct IndexItemFunctionType { inputs: Vec<Type>, output: Option<Type> } impl ToJson for IndexItemFunctionType { fn to_json(&self) -> Json { // If we couldn't figure out a type, just write `null`. if self.inputs.iter().chain(self.output.iter()).any(|ref i| i.name.is_none()) { Json::Null } else { let mut data = BTreeMap::new(); data.insert("inputs".to_owned(), self.inputs.to_json()); data.insert("output".to_owned(), self.output.to_json()); Json::Object(data) } } } // TLS keys used to carry information around during rendering. thread_local!(static CACHE_KEY: RefCell<Arc<Cache>> = Default::default()); thread_local!(pub static CURRENT_LOCATION_KEY: RefCell<Vec<String>> = RefCell::new(Vec::new())); thread_local!(static USED_ID_MAP: RefCell<FxHashMap<String, usize>> = RefCell::new(init_ids())); fn init_ids() -> FxHashMap<String, usize> { [ "main", "search", "help", "TOC", "render-detail", "associated-types", "associated-const", "required-methods", "provided-methods", "implementors", "implementors-list", "methods", "deref-methods", "implementations", ].into_iter().map(|id| (String::from(*id), 1)).collect() } /// This method resets the local table of used ID attributes. This is typically /// used at the beginning of rendering an entire HTML page to reset from the /// previous state (if any). pub fn reset_ids(embedded: bool) { USED_ID_MAP.with(|s| { *s.borrow_mut() = if embedded { init_ids() } else { FxHashMap() }; }); } pub fn derive_id(candidate: String) -> String { USED_ID_MAP.with(|map| { let id = match map.borrow_mut().get_mut(&candidate) { None => candidate, Some(a) => { let id = format!("{}-{}", candidate, *a); *a += 1; id } }; map.borrow_mut().insert(id.clone(), 1); id }) } /// Generates the documentation for `crate` into the directory `dst` pub fn run(mut krate: clean::Crate, external_html: &ExternalHtml, playground_url: Option<String>, dst: PathBuf, passes: FxHashSet<String>, css_file_extension: Option<PathBuf>, renderinfo: RenderInfo, render_type: RenderType) -> Result<(), Error> { let src_root = match krate.src.parent() { Some(p) => p.to_path_buf(), None => PathBuf::new(), }; let mut scx = SharedContext { src_root, passes, include_sources: true, local_sources: FxHashMap(), issue_tracker_base_url: None, layout: layout::Layout { logo: "".to_string(), favicon: "".to_string(), external_html: external_html.clone(), krate: krate.name.clone(), }, css_file_extension: css_file_extension.clone(), markdown_warnings: RefCell::new(vec![]), }; // If user passed in `--playground-url` arg, we fill in crate name here if let Some(url) = playground_url { markdown::PLAYGROUND.with(|slot| { *slot.borrow_mut() = Some((Some(krate.name.clone()), url)); }); } // Crawl the crate attributes looking for attributes which control how we're // going to emit HTML if let Some(attrs) = krate.module.as_ref().map(|m| &m.attrs) { for attr in attrs.lists("doc") { let name = attr.name().map(|s| s.as_str()); match (name.as_ref().map(|s| &s[..]), attr.value_str()) { (Some("html_favicon_url"), Some(s)) => { scx.layout.favicon = s.to_string(); } (Some("html_logo_url"), Some(s)) => { scx.layout.logo = s.to_string(); } (Some("html_playground_url"), Some(s)) => { markdown::PLAYGROUND.with(|slot| { let name = krate.name.clone(); *slot.borrow_mut() = Some((Some(name), s.to_string())); }); } (Some("issue_tracker_base_url"), Some(s)) => { scx.issue_tracker_base_url = Some(s.to_string()); } (Some("html_no_source"), None) if attr.is_word() => { scx.include_sources = false; } _ => {} } } } try_err!(fs::create_dir_all(&dst), &dst); krate = render_sources(&dst, &mut scx, krate)?; let cx = Context { current: Vec::new(), dst, render_redirect_pages: false, shared: Arc::new(scx), render_type, }; // Crawl the crate to build various caches used for the output let RenderInfo { inlined: _, external_paths, external_typarams, deref_trait_did, deref_mut_trait_did, owned_box_did, } = renderinfo; let external_paths = external_paths.into_iter() .map(|(k, (v, t))| (k, (v, ItemType::from(t)))) .collect(); let mut cache = Cache { impls: FxHashMap(), external_paths, paths: FxHashMap(), implementors: FxHashMap(), stack: Vec::new(), parent_stack: Vec::new(), search_index: Vec::new(), parent_is_trait_impl: false, extern_locations: FxHashMap(), primitive_locations: FxHashMap(), stripped_mod: false, access_levels: krate.access_levels.clone(), orphan_impl_items: Vec::new(), traits: mem::replace(&mut krate.external_traits, FxHashMap()), deref_trait_did, deref_mut_trait_did, owned_box_did, typarams: external_typarams, }; // Cache where all our extern crates are located for &(n, ref e) in &krate.externs { let src_root = match Path::new(&e.src).parent() { Some(p) => p.to_path_buf(), None => PathBuf::new(), }; cache.extern_locations.insert(n, (e.name.clone(), src_root, extern_location(e, &cx.dst))); let did = DefId { krate: n, index: CRATE_DEF_INDEX }; cache.external_paths.insert(did, (vec![e.name.to_string()], ItemType::Module)); } // Cache where all known primitives have their documentation located. // // Favor linking to as local extern as possible, so iterate all crates in // reverse topological order. for &(_, ref e) in krate.externs.iter().rev() { for &(def_id, prim, _) in &e.primitives { cache.primitive_locations.insert(prim, def_id); } } for &(def_id, prim, _) in &krate.primitives { cache.primitive_locations.insert(prim, def_id); } cache.stack.push(krate.name.clone()); krate = cache.fold_crate(krate); // Build our search index let index = build_index(&krate, &mut cache); // Freeze the cache now that the index has been built. Put an Arc into TLS // for future parallelization opportunities let cache = Arc::new(cache); CACHE_KEY.with(|v| *v.borrow_mut() = cache.clone()); CURRENT_LOCATION_KEY.with(|s| s.borrow_mut().clear()); write_shared(&cx, &krate, &*cache, index)?; let scx = cx.shared.clone(); // And finally render the whole crate's documentation let result = cx.krate(krate); let markdown_warnings = scx.markdown_warnings.borrow(); if !markdown_warnings.is_empty() { println!("WARNING: documentation for this crate may be rendered \ differently using the new Pulldown renderer."); println!(" See https://github.com/rust-lang/rust/issues/44229 for details."); for &(ref span, ref text, ref diffs) in &*markdown_warnings { println!("WARNING: rendering difference in `{}`", concise_str(text)); println!(" --> {}:{}:{}", span.filename, span.loline, span.locol); for d in diffs { render_difference(d); } } } result } // A short, single-line view of `s`. fn concise_str(s: &str) -> String { if s.contains('\n') { return format!("{}...", s.lines().next().expect("Impossible! We just found a newline")); } if s.len() > 70 { return format!("{} ... {}", &s[..50], &s[s.len()-20..]); } s.to_owned() } // Returns short versions of s1 and s2, starting from where the strings differ. fn concise_compared_strs(s1: &str, s2: &str) -> (String, String) { let s1 = s1.trim(); let s2 = s2.trim(); if !s1.contains('\n') && !s2.contains('\n') && s1.len() <= 70 && s2.len() <= 70 { return (s1.to_owned(), s2.to_owned()); } let mut start_byte = 0; for (c1, c2) in s1.chars().zip(s2.chars()) { if c1 != c2 { break; } start_byte += c1.len_utf8(); } if start_byte == 0 { return (concise_str(s1), concise_str(s2)); } let s1 = &s1[start_byte..]; let s2 = &s2[start_byte..]; (format!("...{}", concise_str(s1)), format!("...{}", concise_str(s2))) } fn render_difference(diff: &html_diff::Difference) { match *diff { html_diff::Difference::NodeType { ref elem, ref opposite_elem } => { println!(" {} Types differ: expected: `{}`, found: `{}`", elem.path, elem.element_name, opposite_elem.element_name); } html_diff::Difference::NodeName { ref elem, ref opposite_elem } => { println!(" {} Tags differ: expected: `{}`, found: `{}`", elem.path, elem.element_name, opposite_elem.element_name); } html_diff::Difference::NodeAttributes { ref elem, ref elem_attributes, ref opposite_elem_attributes, .. } => { println!(" {} Attributes differ in `{}`: expected: `{:?}`, found: `{:?}`", elem.path, elem.element_name, elem_attributes, opposite_elem_attributes); } html_diff::Difference::NodeText { ref elem, ref elem_text, ref opposite_elem_text, .. } => { let (s1, s2) = concise_compared_strs(elem_text, opposite_elem_text); println!(" {} Text differs:\n expected: `{}`\n found: `{}`", elem.path, s1, s2); } html_diff::Difference::NotPresent { ref elem, ref opposite_elem } => { if let Some(ref elem) = *elem { println!(" {} One element is missing: expected: `{}`", elem.path, elem.element_name); } else if let Some(ref elem) = *opposite_elem { if elem.element_name.is_empty() { println!(" {} Unexpected element: `{}`", elem.path, concise_str(&elem.element_content)); } else { println!(" {} Unexpected element `{}`: found: `{}`", elem.path, elem.element_name, concise_str(&elem.element_content)); } } } } } /// Build the search index from the collected metadata fn build_index(krate: &clean::Crate, cache: &mut Cache) -> String { let mut nodeid_to_pathid = FxHashMap(); let mut crate_items = Vec::with_capacity(cache.search_index.len()); let mut crate_paths = Vec::<Json>::new(); let Cache { ref mut search_index, ref orphan_impl_items, ref mut paths, .. } = *cache; // Attach all orphan items to the type's definition if the type // has since been learned. for &(did, ref item) in orphan_impl_items { if let Some(&(ref fqp, _)) = paths.get(&did) { search_index.push(IndexItem { ty: item.type_(), name: item.name.clone().unwrap(), path: fqp[..fqp.len() - 1].join("::"), desc: plain_summary_line(item.doc_value()), parent: Some(did), parent_idx: None, search_type: get_index_search_type(&item), }); } } // Reduce `NodeId` in paths into smaller sequential numbers, // and prune the paths that do not appear in the index. let mut lastpath = String::new(); let mut lastpathid = 0usize; for item in search_index { item.parent_idx = item.parent.map(|nodeid| { if nodeid_to_pathid.contains_key(&nodeid) { *nodeid_to_pathid.get(&nodeid).unwrap() } else { let pathid = lastpathid; nodeid_to_pathid.insert(nodeid, pathid); lastpathid += 1; let &(ref fqp, short) = paths.get(&nodeid).unwrap(); crate_paths.push(((short as usize), fqp.last().unwrap().clone()).to_json()); pathid } }); // Omit the parent path if it is same to that of the prior item. if lastpath == item.path { item.path.clear(); } else { lastpath = item.path.clone(); } crate_items.push(item.to_json()); } let crate_doc = krate.module.as_ref().map(|module| { plain_summary_line(module.doc_value()) }).unwrap_or(String::new()); let mut crate_data = BTreeMap::new(); crate_data.insert("doc".to_owned(), Json::String(crate_doc)); crate_data.insert("items".to_owned(), Json::Array(crate_items)); crate_data.insert("paths".to_owned(), Json::Array(crate_paths)); // Collect the index into a string format!("searchIndex[{}] = {};", as_json(&krate.name), Json::Object(crate_data)) } fn write_shared(cx: &Context, krate: &clean::Crate, cache: &Cache, search_index: String) -> Result<(), Error> { // Write out the shared files. Note that these are shared among all rustdoc // docs placed in the output directory, so this needs to be a synchronized // operation with respect to all other rustdocs running around. try_err!(fs::create_dir_all(&cx.dst), &cx.dst); let _lock = flock::Lock::panicking_new(&cx.dst.join(".lock"), true, true, true); // Add all the static files. These may already exist, but we just // overwrite them anyway to make sure that they're fresh and up-to-date. write(cx.dst.join("main.js"), include_bytes!("static/main.js"))?; write(cx.dst.join("rustdoc.css"), include_bytes!("static/rustdoc.css"))?; write(cx.dst.join("main.css"), include_bytes!("static/styles/main.css"))?; if let Some(ref css) = cx.shared.css_file_extension { let mut content = String::new(); let css = css.as_path(); let mut f = try_err!(File::open(css), css); try_err!(f.read_to_string(&mut content), css); let css = cx.dst.join("theme.css"); let css = css.as_path(); let mut f = try_err!(File::create(css), css); try_err!(write!(f, "{}", &content), css); } write(cx.dst.join("normalize.css"), include_bytes!("static/normalize.css"))?; write(cx.dst.join("FiraSans-Regular.woff"), include_bytes!("static/FiraSans-Regular.woff"))?; write(cx.dst.join("FiraSans-Medium.woff"), include_bytes!("static/FiraSans-Medium.woff"))?; write(cx.dst.join("FiraSans-LICENSE.txt"), include_bytes!("static/FiraSans-LICENSE.txt"))?; write(cx.dst.join("Heuristica-Italic.woff"), include_bytes!("static/Heuristica-Italic.woff"))?; write(cx.dst.join("Heuristica-LICENSE.txt"), include_bytes!("static/Heuristica-LICENSE.txt"))?; write(cx.dst.join("SourceSerifPro-Regular.woff"), include_bytes!("static/SourceSerifPro-Regular.woff"))?; write(cx.dst.join("SourceSerifPro-Bold.woff"), include_bytes!("static/SourceSerifPro-Bold.woff"))?; write(cx.dst.join("SourceSerifPro-LICENSE.txt"), include_bytes!("static/SourceSerifPro-LICENSE.txt"))?; write(cx.dst.join("SourceCodePro-Regular.woff"), include_bytes!("static/SourceCodePro-Regular.woff"))?; write(cx.dst.join("SourceCodePro-Semibold.woff"), include_bytes!("static/SourceCodePro-Semibold.woff"))?; write(cx.dst.join("SourceCodePro-LICENSE.txt"), include_bytes!("static/SourceCodePro-LICENSE.txt"))?; write(cx.dst.join("LICENSE-MIT.txt"), include_bytes!("static/LICENSE-MIT.txt"))?; write(cx.dst.join("LICENSE-APACHE.txt"), include_bytes!("static/LICENSE-APACHE.txt"))?; write(cx.dst.join("COPYRIGHT.txt"), include_bytes!("static/COPYRIGHT.txt"))?; fn collect(path: &Path, krate: &str, key: &str) -> io::Result<Vec<String>> { let mut ret = Vec::new(); if path.exists() { for line in BufReader::new(File::open(path)?).lines() { let line = line?; if !line.starts_with(key) { continue; } if line.starts_with(&format!(r#"{}["{}"]"#, key, krate)) { continue; } ret.push(line.to_string()); } } Ok(ret) } // Update the search index let dst = cx.dst.join("search-index.js"); let mut all_indexes = try_err!(collect(&dst, &krate.name, "searchIndex"), &dst); all_indexes.push(search_index); // Sort the indexes by crate so the file will be generated identically even // with rustdoc running in parallel. all_indexes.sort(); let mut w = try_err!(File::create(&dst), &dst); try_err!(writeln!(&mut w, "var searchIndex = {{}};"), &dst); for index in &all_indexes { try_err!(writeln!(&mut w, "{}", *index), &dst); } try_err!(writeln!(&mut w, "initSearch(searchIndex);"), &dst); // Update the list of all implementors for traits let dst = cx.dst.join("implementors"); for (&did, imps) in &cache.implementors { // Private modules can leak through to this phase of rustdoc, which // could contain implementations for otherwise private types. In some // rare cases we could find an implementation for an item which wasn't // indexed, so we just skip this step in that case. // // FIXME: this is a vague explanation for why this can't be a `get`, in // theory it should be... let &(ref remote_path, remote_item_type) = match cache.paths.get(&did) { Some(p) => p, None => match cache.external_paths.get(&did) { Some(p) => p, None => continue, } }; let mut have_impls = false; let mut implementors = format!(r#"implementors["{}"] = ["#, krate.name); for imp in imps { // If the trait and implementation are in the same crate, then // there's no need to emit information about it (there's inlining // going on). If they're in different crates then the crate defining // the trait will be interested in our implementation. if imp.def_id.krate == did.krate { continue } // If the implementation is from another crate then that crate // should add it. if !imp.def_id.is_local() { continue } have_impls = true; write!(implementors, "{},", as_json(&imp.impl_.to_string())).unwrap(); } implementors.push_str("];"); // Only create a js file if we have impls to add to it. If the trait is // documented locally though we always create the file to avoid dead // links. if !have_impls && !cache.paths.contains_key(&did) { continue; } let mut mydst = dst.clone(); for part in &remote_path[..remote_path.len() - 1] { mydst.push(part); } try_err!(fs::create_dir_all(&mydst), &mydst); mydst.push(&format!("{}.{}.js", remote_item_type.css_class(), remote_path[remote_path.len() - 1])); let mut all_implementors = try_err!(collect(&mydst, &krate.name, "implementors"), &mydst); all_implementors.push(implementors); // Sort the implementors by crate so the file will be generated // identically even with rustdoc running in parallel. all_implementors.sort(); let mut f = try_err!(File::create(&mydst), &mydst); try_err!(writeln!(&mut f, "(function() {{var implementors = {{}};"), &mydst); for implementor in &all_implementors { try_err!(writeln!(&mut f, "{}", *implementor), &mydst); } try_err!(writeln!(&mut f, "{}", r" if (window.register_implementors) { window.register_implementors(implementors); } else { window.pending_implementors = implementors; } "), &mydst); try_err!(writeln!(&mut f, r"}})()"), &mydst); } Ok(()) } fn render_sources(dst: &Path, scx: &mut SharedContext, krate: clean::Crate) -> Result<clean::Crate, Error> { info!("emitting source files"); let dst = dst.join("src").join(&krate.name); try_err!(fs::create_dir_all(&dst), &dst); let mut folder = SourceCollector { dst, scx, }; Ok(folder.fold_crate(krate)) } /// Writes the entire contents of a string to a destination, not attempting to /// catch any errors. fn write(dst: PathBuf, contents: &[u8]) -> Result<(), Error> { Ok(try_err!(try_err!(File::create(&dst), &dst).write_all(contents), &dst)) } /// Takes a path to a source file and cleans the path to it. This canonicalizes /// things like ".." to components which preserve the "top down" hierarchy of a /// static HTML tree. Each component in the cleaned path will be passed as an /// argument to `f`. The very last component of the path (ie the file name) will /// be passed to `f` if `keep_filename` is true, and ignored otherwise. // FIXME (#9639): The closure should deal with &[u8] instead of &str // FIXME (#9639): This is too conservative, rejecting non-UTF-8 paths fn clean_srcpath<F>(src_root: &Path, p: &Path, keep_filename: bool, mut f: F) where F: FnMut(&str), { // make it relative, if possible let p = p.strip_prefix(src_root).unwrap_or(p); let mut iter = p.components().peekable(); while let Some(c) = iter.next() { if !keep_filename && iter.peek().is_none() { break; } match c { Component::ParentDir => f("up"), Component::Normal(c) => f(c.to_str().unwrap()), _ => continue, } } } /// Attempts to find where an external crate is located, given that we're /// rendering in to the specified source destination. fn extern_location(e: &clean::ExternalCrate, dst: &Path) -> ExternalLocation { // See if there's documentation generated into the local directory let local_location = dst.join(&e.name); if local_location.is_dir() { return Local; } // Failing that, see if there's an attribute specifying where to find this // external crate e.attrs.lists("doc") .filter(|a| a.check_name("html_root_url")) .filter_map(|a| a.value_str()) .map(|url| { let mut url = url.to_string(); if !url.ends_with("/") { url.push('/') } Remote(url) }).next().unwrap_or(Unknown) // Well, at least we tried. } impl<'a> DocFolder for SourceCollector<'a> { fn fold_item(&mut self, item: clean::Item) -> Option<clean::Item> { // If we're including source files, and we haven't seen this file yet, // then we need to render it out to the filesystem. if self.scx.include_sources // skip all invalid spans && item.source.filename != "" // skip non-local items && item.def_id.is_local() // Macros from other libraries get special filenames which we can // safely ignore. && !(item.source.filename.starts_with("<") && item.source.filename.ends_with("macros>")) { // If it turns out that we couldn't read this file, then we probably // can't read any of the files (generating html output from json or // something like that), so just don't include sources for the // entire crate. The other option is maintaining this mapping on a // per-file basis, but that's probably not worth it... self.scx .include_sources = match self.emit_source(&item.source.filename) { Ok(()) => true, Err(e) => { println!("warning: source code was requested to be rendered, \ but processing `{}` had an error: {}", item.source.filename, e); println!(" skipping rendering of source code"); false } }; } self.fold_item_recur(item) } } impl<'a> SourceCollector<'a> { /// Renders the given filename into its corresponding HTML source file. fn emit_source(&mut self, filename: &str) -> io::Result<()> { let p = PathBuf::from(filename); if self.scx.local_sources.contains_key(&p) { // We've already emitted this source return Ok(()); } let mut contents = Vec::new(); File::open(&p).and_then(|mut f| f.read_to_end(&mut contents))?; let contents = str::from_utf8(&contents).unwrap(); // Remove the utf-8 BOM if any let contents = if contents.starts_with("\u{feff}") { &contents[3..] } else { contents }; // Create the intermediate directories let mut cur = self.dst.clone(); let mut root_path = String::from("../../"); let mut href = String::new(); clean_srcpath(&self.scx.src_root, &p, false, |component| { cur.push(component); fs::create_dir_all(&cur).unwrap(); root_path.push_str("../"); href.push_str(component); href.push('/'); }); let mut fname = p.file_name().expect("source has no filename") .to_os_string(); fname.push(".html"); cur.push(&fname); href.push_str(&fname.to_string_lossy()); let mut w = BufWriter::new(File::create(&cur)?); let title = format!("{} -- source", cur.file_name().unwrap() .to_string_lossy()); let desc = format!("Source to the Rust file `{}`.", filename); let page = layout::Page { title: &title, css_class: "source", root_path: &root_path, description: &desc, keywords: BASIC_KEYWORDS, }; layout::render(&mut w, &self.scx.layout, &page, &(""), &Source(contents), self.scx.css_file_extension.is_some())?; w.flush()?; self.scx.local_sources.insert(p, href); Ok(()) } } impl DocFolder for Cache { fn fold_item(&mut self, item: clean::Item) -> Option<clean::Item> { // If this is a stripped module, // we don't want it or its children in the search index. let orig_stripped_mod = match item.inner { clean::StrippedItem(box clean::ModuleItem(..)) => { mem::replace(&mut self.stripped_mod, true) } _ => self.stripped_mod, }; // Register any generics to their corresponding string. This is used // when pretty-printing types. if let Some(generics) = item.inner.generics() { self.generics(generics); } // Propagate a trait method's documentation to all implementors of the // trait. if let clean::TraitItem(ref t) = item.inner { self.traits.entry(item.def_id).or_insert_with(|| t.clone()); } // Collect all the implementors of traits. if let clean::ImplItem(ref i) = item.inner { if let Some(did) = i.trait_.def_id() { self.implementors.entry(did).or_insert(vec![]).push(Implementor { def_id: item.def_id, stability: item.stability.clone(), impl_: i.clone(), }); } } // Index this method for searching later on. if let Some(ref s) = item.name { let (parent, is_inherent_impl_item) = match item.inner { clean::StrippedItem(..) => ((None, None), false), clean::AssociatedConstItem(..) | clean::TypedefItem(_, true) if self.parent_is_trait_impl => { // skip associated items in trait impls ((None, None), false) } clean::AssociatedTypeItem(..) | clean::TyMethodItem(..) | clean::StructFieldItem(..) | clean::VariantItem(..) => { ((Some(*self.parent_stack.last().unwrap()), Some(&self.stack[..self.stack.len() - 1])), false) } clean::MethodItem(..) | clean::AssociatedConstItem(..) => { if self.parent_stack.is_empty() { ((None, None), false) } else { let last = self.parent_stack.last().unwrap(); let did = *last; let path = match self.paths.get(&did) { // The current stack not necessarily has correlation // for where the type was defined. On the other // hand, `paths` always has the right // information if present. Some(&(ref fqp, ItemType::Trait)) | Some(&(ref fqp, ItemType::Struct)) | Some(&(ref fqp, ItemType::Union)) | Some(&(ref fqp, ItemType::Enum)) => Some(&fqp[..fqp.len() - 1]), Some(..) => Some(&*self.stack), None => None }; ((Some(*last), path), true) } } _ => ((None, Some(&*self.stack)), false) }; match parent { (parent, Some(path)) if is_inherent_impl_item || (!self.stripped_mod) => { debug_assert!(!item.is_stripped()); // A crate has a module at its root, containing all items, // which should not be indexed. The crate-item itself is // inserted later on when serializing the search-index. if item.def_id.index != CRATE_DEF_INDEX { self.search_index.push(IndexItem { ty: item.type_(), name: s.to_string(), path: path.join("::").to_string(), desc: plain_summary_line(item.doc_value()), parent, parent_idx: None, search_type: get_index_search_type(&item), }); } } (Some(parent), None) if is_inherent_impl_item => { // We have a parent, but we don't know where they're // defined yet. Wait for later to index this item. self.orphan_impl_items.push((parent, item.clone())); } _ => {} } } // Keep track of the fully qualified path for this item. let pushed = match item.name { Some(ref n) if !n.is_empty() => { self.stack.push(n.to_string()); true } _ => false, }; match item.inner { clean::StructItem(..) | clean::EnumItem(..) | clean::TypedefItem(..) | clean::TraitItem(..) | clean::FunctionItem(..) | clean::ModuleItem(..) | clean::ForeignFunctionItem(..) | clean::ForeignStaticItem(..) | clean::ConstantItem(..) | clean::StaticItem(..) | clean::UnionItem(..) if !self.stripped_mod => { // Reexported items mean that the same id can show up twice // in the rustdoc ast that we're looking at. We know, // however, that a reexported item doesn't show up in the // `public_items` map, so we can skip inserting into the // paths map if there was already an entry present and we're // not a public item. if !self.paths.contains_key(&item.def_id) || self.access_levels.is_public(item.def_id) { self.paths.insert(item.def_id, (self.stack.clone(), item.type_())); } } // Link variants to their parent enum because pages aren't emitted // for each variant. clean::VariantItem(..) if !self.stripped_mod => { let mut stack = self.stack.clone(); stack.pop(); self.paths.insert(item.def_id, (stack, ItemType::Enum)); } clean::PrimitiveItem(..) if item.visibility.is_some() => { self.paths.insert(item.def_id, (self.stack.clone(), item.type_())); } _ => {} } // Maintain the parent stack let orig_parent_is_trait_impl = self.parent_is_trait_impl; let parent_pushed = match item.inner { clean::TraitItem(..) | clean::EnumItem(..) | clean::StructItem(..) | clean::UnionItem(..) => { self.parent_stack.push(item.def_id); self.parent_is_trait_impl = false; true } clean::ImplItem(ref i) => { self.parent_is_trait_impl = i.trait_.is_some(); match i.for_ { clean::ResolvedPath{ did, .. } => { self.parent_stack.push(did); true } ref t => { let prim_did = t.primitive_type().and_then(|t| { self.primitive_locations.get(&t).cloned() }); match prim_did { Some(did) => { self.parent_stack.push(did); true } None => false, } } } } _ => false }; // Once we've recursively found all the generics, hoard off all the // implementations elsewhere. let ret = self.fold_item_recur(item).and_then(|item| { if let clean::Item { inner: clean::ImplItem(_), .. } = item { // Figure out the id of this impl. This may map to a // primitive rather than always to a struct/enum. // Note: matching twice to restrict the lifetime of the `i` borrow. let did = if let clean::Item { inner: clean::ImplItem(ref i), .. } = item { match i.for_ { clean::ResolvedPath { did, .. } | clean::BorrowedRef { type_: box clean::ResolvedPath { did, .. }, .. } => { Some(did) } ref t => { t.primitive_type().and_then(|t| { self.primitive_locations.get(&t).cloned() }) } } } else { unreachable!() }; if let Some(did) = did { self.impls.entry(did).or_insert(vec![]).push(Impl { impl_item: item, }); } None } else { Some(item) } }); if pushed { self.stack.pop().unwrap(); } if parent_pushed { self.parent_stack.pop().unwrap(); } self.stripped_mod = orig_stripped_mod; self.parent_is_trait_impl = orig_parent_is_trait_impl; ret } } impl<'a> Cache { fn generics(&mut self, generics: &clean::Generics) { for typ in &generics.type_params { self.typarams.insert(typ.did, typ.name.clone()); } } } impl Context { /// String representation of how to get back to the root path of the 'doc/' /// folder in terms of a relative URL. fn root_path(&self) -> String { repeat("../").take(self.current.len()).collect::<String>() } /// Recurse in the directory structure and change the "root path" to make /// sure it always points to the top (relatively). fn recurse<T, F>(&mut self, s: String, f: F) -> T where F: FnOnce(&mut Context) -> T, { if s.is_empty() { panic!("Unexpected empty destination: {:?}", self.current); } let prev = self.dst.clone(); self.dst.push(&s); self.current.push(s); info!("Recursing into {}", self.dst.display()); let ret = f(self); info!("Recursed; leaving {}", self.dst.display()); // Go back to where we were at self.dst = prev; self.current.pop().unwrap(); ret } /// Main method for rendering a crate. /// /// This currently isn't parallelized, but it'd be pretty easy to add /// parallelization to this function. fn krate(self, mut krate: clean::Crate) -> Result<(), Error> { let mut item = match krate.module.take() { Some(i) => i, None => return Ok(()), }; item.name = Some(krate.name); // Render the crate documentation let mut work = vec![(self, item)]; while let Some((mut cx, item)) = work.pop() { cx.item(item, |cx, item| { work.push((cx.clone(), item)) })? } Ok(()) } fn render_item(&self, writer: &mut io::Write, it: &clean::Item, pushname: bool) -> io::Result<()> { // A little unfortunate that this is done like this, but it sure // does make formatting *a lot* nicer. CURRENT_LOCATION_KEY.with(|slot| { *slot.borrow_mut() = self.current.clone(); }); let mut title = if it.is_primitive() { // No need to include the namespace for primitive types String::new() } else { self.current.join("::") }; if pushname { if !title.is_empty() { title.push_str("::"); } title.push_str(it.name.as_ref().unwrap()); } title.push_str(" - Rust"); let tyname = it.type_().css_class(); let desc = if it.is_crate() { format!("API documentation for the Rust `{}` crate.", self.shared.layout.krate) } else { format!("API documentation for the Rust `{}` {} in crate `{}`.", it.name.as_ref().unwrap(), tyname, self.shared.layout.krate) }; let keywords = make_item_keywords(it); let page = layout::Page { css_class: tyname, root_path: &self.root_path(), title: &title, description: &desc, keywords: &keywords, }; reset_ids(true); if !self.render_redirect_pages { layout::render(writer, &self.shared.layout, &page, &Sidebar{ cx: self, item: it }, &Item{ cx: self, item: it }, self.shared.css_file_extension.is_some())?; } else { let mut url = self.root_path(); if let Some(&(ref names, ty)) = cache().paths.get(&it.def_id) { for name in &names[..names.len() - 1] { url.push_str(name); url.push_str("/"); } url.push_str(&item_path(ty, names.last().unwrap())); layout::redirect(writer, &url)?; } } Ok(()) } /// Non-parallelized version of rendering an item. This will take the input /// item, render its contents, and then invoke the specified closure with /// all sub-items which need to be rendered. /// /// The rendering driver uses this closure to queue up more work. fn item<F>(&mut self, item: clean::Item, mut f: F) -> Result<(), Error> where F: FnMut(&mut Context, clean::Item), { // Stripped modules survive the rustdoc passes (i.e. `strip-private`) // if they contain impls for public types. These modules can also // contain items such as publicly reexported structures. // // External crates will provide links to these structures, so // these modules are recursed into, but not rendered normally // (a flag on the context). if !self.render_redirect_pages { self.render_redirect_pages = item.is_stripped(); } if item.is_mod() { // modules are special because they add a namespace. We also need to // recurse into the items of the module as well. let name = item.name.as_ref().unwrap().to_string(); let mut item = Some(item); self.recurse(name, |this| { let item = item.take().unwrap(); let mut buf = Vec::new(); this.render_item(&mut buf, &item, false).unwrap(); // buf will be empty if the module is stripped and there is no redirect for it if !buf.is_empty() { let joint_dst = this.dst.join("index.html"); try_err!(fs::create_dir_all(&this.dst), &this.dst); let mut dst = try_err!(File::create(&joint_dst), &joint_dst); try_err!(dst.write_all(&buf), &joint_dst); } let m = match item.inner { clean::StrippedItem(box clean::ModuleItem(m)) | clean::ModuleItem(m) => m, _ => unreachable!() }; // Render sidebar-items.js used throughout this module. if !this.render_redirect_pages { let items = this.build_sidebar_items(&m); let js_dst = this.dst.join("sidebar-items.js"); let mut js_out = BufWriter::new(try_err!(File::create(&js_dst), &js_dst)); try_err!(write!(&mut js_out, "initSidebarItems({});", as_json(&items)), &js_dst); } for item in m.items { f(this,item); } Ok(()) })?; } else if item.name.is_some() { let mut buf = Vec::new(); self.render_item(&mut buf, &item, true).unwrap(); // buf will be empty if the item is stripped and there is no redirect for it if !buf.is_empty() { let name = item.name.as_ref().unwrap(); let item_type = item.type_(); let file_name = &item_path(item_type, name); let joint_dst = self.dst.join(file_name); try_err!(fs::create_dir_all(&self.dst), &self.dst); let mut dst = try_err!(File::create(&joint_dst), &joint_dst); try_err!(dst.write_all(&buf), &joint_dst); // Redirect from a sane URL using the namespace to Rustdoc's // URL for the page. let redir_name = format!("{}.{}.html", name, item_type.name_space()); let redir_dst = self.dst.join(redir_name); if let Ok(mut redirect_out) = OpenOptions::new().create_new(true) .write(true) .open(&redir_dst) { try_err!(layout::redirect(&mut redirect_out, file_name), &redir_dst); } // If the item is a macro, redirect from the old macro URL (with !) // to the new one (without). // FIXME(#35705) remove this redirect. if item_type == ItemType::Macro { let redir_name = format!("{}.{}!.html", item_type, name); let redir_dst = self.dst.join(redir_name); let mut redirect_out = try_err!(File::create(&redir_dst), &redir_dst); try_err!(layout::redirect(&mut redirect_out, file_name), &redir_dst); } } } Ok(()) } fn build_sidebar_items(&self, m: &clean::Module) -> BTreeMap<String, Vec<NameDoc>> { // BTreeMap instead of HashMap to get a sorted output let mut map = BTreeMap::new(); for item in &m.items { if item.is_stripped() { continue } let short = item.type_().css_class(); let myname = match item.name { None => continue, Some(ref s) => s.to_string(), }; let short = short.to_string(); map.entry(short).or_insert(vec![]) .push((myname, Some(plain_summary_line(item.doc_value())))); } for (_, items) in &mut map { items.sort(); } map } } impl<'a> Item<'a> { /// Generate a url appropriate for an `href` attribute back to the source of /// this item. /// /// The url generated, when clicked, will redirect the browser back to the /// original source code. /// /// If `None` is returned, then a source link couldn't be generated. This /// may happen, for example, with externally inlined items where the source /// of their crate documentation isn't known. fn src_href(&self) -> Option<String> { let mut root = self.cx.root_path(); let cache = cache(); let mut path = String::new(); let (krate, path) = if self.item.def_id.is_local() { let path = PathBuf::from(&self.item.source.filename); if let Some(path) = self.cx.shared.local_sources.get(&path) { (&self.cx.shared.layout.krate, path) } else { return None; } } else { // Macros from other libraries get special filenames which we can // safely ignore. if self.item.source.filename.starts_with("<") && self.item.source.filename.ends_with("macros>") { return None; } let (krate, src_root) = match cache.extern_locations.get(&self.item.def_id.krate) { Some(&(ref name, ref src, Local)) => (name, src), Some(&(ref name, ref src, Remote(ref s))) => { root = s.to_string(); (name, src) } Some(&(_, _, Unknown)) | None => return None, }; let file = Path::new(&self.item.source.filename); clean_srcpath(&src_root, file, false, |component| { path.push_str(component); path.push('/'); }); let mut fname = file.file_name().expect("source has no filename") .to_os_string(); fname.push(".html"); path.push_str(&fname.to_string_lossy()); (krate, &path) }; let lines = if self.item.source.loline == self.item.source.hiline { format!("{}", self.item.source.loline) } else { format!("{}-{}", self.item.source.loline, self.item.source.hiline) }; Some(format!("{root}src/{krate}/{path}#{lines}", root = root, krate = krate, path = path, lines = lines)) } } impl<'a> fmt::Display for Item<'a> { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { debug_assert!(!self.item.is_stripped()); // Write the breadcrumb trail header for the top write!(fmt, "\n<h1 class='fqn'><span class='in-band'>")?; match self.item.inner { clean::ModuleItem(ref m) => if m.is_crate { write!(fmt, "Crate ")?; } else { write!(fmt, "Module ")?; }, clean::FunctionItem(..) | clean::ForeignFunctionItem(..) => write!(fmt, "Function ")?, clean::TraitItem(..) => write!(fmt, "Trait ")?, clean::StructItem(..) => write!(fmt, "Struct ")?, clean::UnionItem(..) => write!(fmt, "Union ")?, clean::EnumItem(..) => write!(fmt, "Enum ")?, clean::TypedefItem(..) => write!(fmt, "Type Definition ")?, clean::MacroItem(..) => write!(fmt, "Macro ")?, clean::PrimitiveItem(..) => write!(fmt, "Primitive Type ")?, clean::StaticItem(..) | clean::ForeignStaticItem(..) => write!(fmt, "Static ")?, clean::ConstantItem(..) => write!(fmt, "Constant ")?, _ => { // We don't generate pages for any other type. unreachable!(); } } if !self.item.is_primitive() { let cur = &self.cx.current; let amt = if self.item.is_mod() { cur.len() - 1 } else { cur.len() }; for (i, component) in cur.iter().enumerate().take(amt) { write!(fmt, "<a href='{}index.html'>{}</a>::<wbr>", repeat("../").take(cur.len() - i - 1) .collect::<String>(), component)?; } } write!(fmt, "<a class=\"{}\" href=''>{}</a>", self.item.type_(), self.item.name.as_ref().unwrap())?; write!(fmt, "</span>")?; // in-band write!(fmt, "<span class='out-of-band'>")?; if let Some(version) = self.item.stable_since() { write!(fmt, "<span class='since' title='Stable since Rust version {0}'>{0}</span>", version)?; } write!(fmt, r##"<span id='render-detail'> <a id="toggle-all-docs" href="javascript:void(0)" title="collapse all docs"> [<span class='inner'>&#x2212;</span>] </a> </span>"##)?; // Write `src` tag // // When this item is part of a `pub use` in a downstream crate, the // [src] link in the downstream documentation will actually come back to // this page, and this link will be auto-clicked. The `id` attribute is // used to find the link to auto-click. if self.cx.shared.include_sources && !self.item.is_primitive() { if let Some(l) = self.src_href() { write!(fmt, "<a class='srclink' href='{}' title='{}'>[src]</a>", l, "goto source code")?; } } write!(fmt, "</span>")?; // out-of-band write!(fmt, "</h1>\n")?; match self.item.inner { clean::ModuleItem(ref m) => { item_module(fmt, self.cx, self.item, &m.items) } clean::FunctionItem(ref f) | clean::ForeignFunctionItem(ref f) => item_function(fmt, self.cx, self.item, f), clean::TraitItem(ref t) => item_trait(fmt, self.cx, self.item, t), clean::StructItem(ref s) => item_struct(fmt, self.cx, self.item, s), clean::UnionItem(ref s) => item_union(fmt, self.cx, self.item, s), clean::EnumItem(ref e) => item_enum(fmt, self.cx, self.item, e), clean::TypedefItem(ref t, _) => item_typedef(fmt, self.cx, self.item, t), clean::MacroItem(ref m) => item_macro(fmt, self.cx, self.item, m), clean::PrimitiveItem(ref p) => item_primitive(fmt, self.cx, self.item, p), clean::StaticItem(ref i) | clean::ForeignStaticItem(ref i) => item_static(fmt, self.cx, self.item, i), clean::ConstantItem(ref c) => item_constant(fmt, self.cx, self.item, c), _ => { // We don't generate pages for any other type. unreachable!(); } } } } fn item_path(ty: ItemType, name: &str) -> String { match ty { ItemType::Module => format!("{}/index.html", name), _ => format!("{}.{}.html", ty.css_class(), name), } } fn full_path(cx: &Context, item: &clean::Item) -> String { let mut s = cx.current.join("::"); s.push_str("::"); s.push_str(item.name.as_ref().unwrap()); s } fn shorter<'a>(s: Option<&'a str>) -> String { match s { Some(s) => s.lines().take_while(|line|{ (*line).chars().any(|chr|{ !chr.is_whitespace() }) }).collect::<Vec<_>>().join("\n"), None => "".to_string() } } #[inline] fn plain_summary_line(s: Option<&str>) -> String { let line = shorter(s).replace("\n", " "); markdown::plain_summary_line(&line[..]) } fn document(w: &mut fmt::Formatter, cx: &Context, item: &clean::Item) -> fmt::Result { document_stability(w, cx, item)?; let prefix = render_assoc_const_value(item); document_full(w, item, cx, &prefix)?; Ok(()) } /// Render md_text as markdown. Warns the user if there are difference in /// rendering between Pulldown and Hoedown. fn render_markdown(w: &mut fmt::Formatter, md_text: &str, span: Span, render_type: RenderType, prefix: &str, scx: &SharedContext) -> fmt::Result { let hoedown_output = format!("{}", Markdown(md_text, RenderType::Hoedown)); // We only emit warnings if the user has opted-in to Pulldown rendering. let output = if render_type == RenderType::Pulldown { let pulldown_output = format!("{}", Markdown(md_text, RenderType::Pulldown)); let differences = html_diff::get_differences(&pulldown_output, &hoedown_output); let differences = differences.into_iter() .filter(|s| { match *s { html_diff::Difference::NodeText { ref elem_text, ref opposite_elem_text, .. } if match_non_whitespace(elem_text, opposite_elem_text) => false, _ => true, } }) .collect::<Vec<_>>(); if !differences.is_empty() { scx.markdown_warnings.borrow_mut().push((span, md_text.to_owned(), differences)); } pulldown_output } else { hoedown_output }; write!(w, "<div class='docblock'>{}{}</div>", prefix, output) } // Returns true iff s1 and s2 match, ignoring whitespace. fn match_non_whitespace(s1: &str, s2: &str) -> bool { let s1 = s1.trim(); let s2 = s2.trim(); let mut cs1 = s1.chars(); let mut cs2 = s2.chars(); while let Some(c1) = cs1.next() { if c1.is_whitespace() { continue; } loop { if let Some(c2) = cs2.next() { if !c2.is_whitespace() { if c1 != c2 { return false; } break; } } else { return false; } } } while let Some(c2) = cs2.next() { if !c2.is_whitespace() { return false; } } true } fn document_short(w: &mut fmt::Formatter, item: &clean::Item, link: AssocItemLink, cx: &Context, prefix: &str) -> fmt::Result { if let Some(s) = item.doc_value() { let markdown = if s.contains('\n') { format!("{} [Read more]({})", &plain_summary_line(Some(s)), naive_assoc_href(item, link)) } else { format!("{}", &plain_summary_line(Some(s))) }; render_markdown(w, &markdown, item.source.clone(), cx.render_type, prefix, &cx.shared)?; } else if !prefix.is_empty() { write!(w, "<div class='docblock'>{}</div>", prefix)?; } Ok(()) } fn render_assoc_const_value(item: &clean::Item) -> String { match item.inner { clean::AssociatedConstItem(ref ty, Some(ref default)) => { highlight::render_with_highlighting( &format!("{}: {:#} = {}", item.name.as_ref().unwrap(), ty, default), None, None, None, ) } _ => String::new(), } } fn document_full(w: &mut fmt::Formatter, item: &clean::Item, cx: &Context, prefix: &str) -> fmt::Result { if let Some(s) = item.doc_value() { render_markdown(w, s, item.source.clone(), cx.render_type, prefix, &cx.shared)?; } else if !prefix.is_empty() { write!(w, "<div class='docblock'>{}</div>", prefix)?; } Ok(()) } fn document_stability(w: &mut fmt::Formatter, cx: &Context, item: &clean::Item) -> fmt::Result { let stabilities = short_stability(item, cx, true); if !stabilities.is_empty() { write!(w, "<div class='stability'>")?; for stability in stabilities { write!(w, "{}", stability)?; } write!(w, "</div>")?; } Ok(()) } fn name_key(name: &str) -> (&str, u64, usize) { // find number at end let split = name.bytes().rposition(|b| b < b'0' || b'9' < b).map_or(0, |s| s + 1); // count leading zeroes let after_zeroes = name[split..].bytes().position(|b| b != b'0').map_or(name.len(), |extra| split + extra); // sort leading zeroes last let num_zeroes = after_zeroes - split; match name[split..].parse() { Ok(n) => (&name[..split], n, num_zeroes), Err(_) => (name, 0, num_zeroes), } } fn item_module(w: &mut fmt::Formatter, cx: &Context, item: &clean::Item, items: &[clean::Item]) -> fmt::Result { document(w, cx, item)?; let mut indices = (0..items.len()).filter(|i| { if let clean::DefaultImplItem(..) = items[*i].inner { return false; } !items[*i].is_stripped() }).collect::<Vec<usize>>(); // the order of item types in the listing fn reorder(ty: ItemType) -> u8 { match ty { ItemType::ExternCrate => 0, ItemType::Import => 1, ItemType::Primitive => 2, ItemType::Module => 3, ItemType::Macro => 4, ItemType::Struct => 5, ItemType::Enum => 6, ItemType::Constant => 7, ItemType::Static => 8, ItemType::Trait => 9, ItemType::Function => 10, ItemType::Typedef => 12, ItemType::Union => 13, _ => 14 + ty as u8, } } fn cmp(i1: &clean::Item, i2: &clean::Item, idx1: usize, idx2: usize) -> Ordering { let ty1 = i1.type_(); let ty2 = i2.type_(); if ty1 != ty2 { return (reorder(ty1), idx1).cmp(&(reorder(ty2), idx2)) } let s1 = i1.stability.as_ref().map(|s| s.level); let s2 = i2.stability.as_ref().map(|s| s.level); match (s1, s2) { (Some(stability::Unstable), Some(stability::Stable)) => return Ordering::Greater, (Some(stability::Stable), Some(stability::Unstable)) => return Ordering::Less, _ => {} } let lhs = i1.name.as_ref().map_or("", |s| &**s); let rhs = i2.name.as_ref().map_or("", |s| &**s); name_key(lhs).cmp(&name_key(rhs)) } indices.sort_by(|&i1, &i2| cmp(&items[i1], &items[i2], i1, i2)); // This call is to remove reexport duplicates in cases such as: // // ``` // pub mod foo { // pub mod bar { // pub trait Double { fn foo(); } // } // } // // pub use foo::bar::*; // pub use foo::*; // ``` // // `Double` will appear twice in the generated docs. // // FIXME: This code is quite ugly and could be improved. Small issue: DefId // can be identical even if the elements are different (mostly in imports). // So in case this is an import, we keep everything by adding a "unique id" // (which is the position in the vector). indices.dedup_by_key(|i| (items[*i].def_id, if items[*i].name.as_ref().is_some() { Some(full_path(cx, &items[*i]).clone()) } else { None }, items[*i].type_(), if items[*i].is_import() { *i } else { 0 })); debug!("{:?}", indices); let mut curty = None; for &idx in &indices { let myitem = &items[idx]; if myitem.is_stripped() { continue; } let myty = Some(myitem.type_()); if curty == Some(ItemType::ExternCrate) && myty == Some(ItemType::Import) { // Put `extern crate` and `use` re-exports in the same section. curty = myty; } else if myty != curty { if curty.is_some() { write!(w, "</table>")?; } curty = myty; let (short, name) = match myty.unwrap() { ItemType::ExternCrate | ItemType::Import => ("reexports", "Reexports"), ItemType::Module => ("modules", "Modules"), ItemType::Struct => ("structs", "Structs"), ItemType::Union => ("unions", "Unions"), ItemType::Enum => ("enums", "Enums"), ItemType::Function => ("functions", "Functions"), ItemType::Typedef => ("types", "Type Definitions"), ItemType::Static => ("statics", "Statics"), ItemType::Constant => ("constants", "Constants"), ItemType::Trait => ("traits", "Traits"), ItemType::Impl => ("impls", "Implementations"), ItemType::TyMethod => ("tymethods", "Type Methods"), ItemType::Method => ("methods", "Methods"), ItemType::StructField => ("fields", "Struct Fields"), ItemType::Variant => ("variants", "Variants"), ItemType::Macro => ("macros", "Macros"), ItemType::Primitive => ("primitives", "Primitive Types"), ItemType::AssociatedType => ("associated-types", "Associated Types"), ItemType::AssociatedConst => ("associated-consts", "Associated Constants"), }; write!(w, "<h2 id='{id}' class='section-header'>\ <a href=\"#{id}\">{name}</a></h2>\n<table>", id = derive_id(short.to_owned()), name = name)?; } match myitem.inner { clean::ExternCrateItem(ref name, ref src) => { use html::format::HRef; match *src { Some(ref src) => { write!(w, "<tr><td><code>{}extern crate {} as {};", VisSpace(&myitem.visibility), HRef::new(myitem.def_id, src), name)? } None => { write!(w, "<tr><td><code>{}extern crate {};", VisSpace(&myitem.visibility), HRef::new(myitem.def_id, name))? } } write!(w, "</code></td></tr>")?; } clean::ImportItem(ref import) => { write!(w, "<tr><td><code>{}{}</code></td></tr>", VisSpace(&myitem.visibility), *import)?; } _ => { if myitem.name.is_none() { continue } let stabilities = short_stability(myitem, cx, false); let stab_docs = if !stabilities.is_empty() { stabilities.iter() .map(|s| format!("[{}]", s)) .collect::<Vec<_>>() .as_slice() .join(" ") } else { String::new() }; let unsafety_flag = match myitem.inner { clean::FunctionItem(ref func) | clean::ForeignFunctionItem(ref func) if func.unsafety == hir::Unsafety::Unsafe => { "<a title='unsafe function' href='#'><sup>⚠</sup></a>" } _ => "", }; let doc_value = myitem.doc_value().unwrap_or(""); write!(w, " <tr class='{stab} module-item'> <td><a class=\"{class}\" href=\"{href}\" title='{title_type} {title}'>{name}</a>{unsafety_flag}</td> <td class='docblock-short'> {stab_docs} {docs} </td> </tr>", name = *myitem.name.as_ref().unwrap(), stab_docs = stab_docs, docs = if cx.render_type == RenderType::Hoedown { format!("{}", shorter(Some(&Markdown(doc_value, RenderType::Hoedown).to_string()))) } else { format!("{}", MarkdownSummaryLine(doc_value)) }, class = myitem.type_(), stab = myitem.stability_class().unwrap_or("".to_string()), unsafety_flag = unsafety_flag, href = item_path(myitem.type_(), myitem.name.as_ref().unwrap()), title_type = myitem.type_(), title = full_path(cx, myitem))?; } } } if curty.is_some() { write!(w, "</table>")?; } Ok(()) } fn short_stability(item: &clean::Item, cx: &Context, show_reason: bool) -> Vec<String> { let mut stability = vec![]; if let Some(stab) = item.stability.as_ref() { let deprecated_reason = if show_reason && !stab.deprecated_reason.is_empty() { format!(": {}", stab.deprecated_reason) } else { String::new() }; if !stab.deprecated_since.is_empty() { let since = if show_reason { format!(" since {}", Escape(&stab.deprecated_since)) } else { String::new() }; let text = format!("Deprecated{}{}", since, MarkdownHtml(&deprecated_reason, cx.render_type)); stability.push(format!("<div class='stab deprecated'>{}</div>", text)) }; if stab.level == stability::Unstable { if show_reason { let unstable_extra = match (!stab.feature.is_empty(), &cx.shared.issue_tracker_base_url, stab.issue) { (true, &Some(ref tracker_url), Some(issue_no)) if issue_no > 0 => format!(" (<code>{} </code><a href=\"{}{}\">#{}</a>)", Escape(&stab.feature), tracker_url, issue_no, issue_no), (false, &Some(ref tracker_url), Some(issue_no)) if issue_no > 0 => format!(" (<a href=\"{}{}\">#{}</a>)", Escape(&tracker_url), issue_no, issue_no), (true, ..) => format!(" (<code>{}</code>)", Escape(&stab.feature)), _ => String::new(), }; if stab.unstable_reason.is_empty() { stability.push(format!("<div class='stab unstable'>\ <span class=microscope>🔬</span> \ This is a nightly-only experimental API. {}\ </div>", unstable_extra)); } else { let text = format!("<summary><span class=microscope>🔬</span> \ This is a nightly-only experimental API. {}\ </summary>{}", unstable_extra, MarkdownHtml(&stab.unstable_reason, cx.render_type)); stability.push(format!("<div class='stab unstable'><details>{}</details></div>", text)); } } else { stability.push(format!("<div class='stab unstable'>Experimental</div>")) } }; } else if let Some(depr) = item.deprecation.as_ref() { let note = if show_reason && !depr.note.is_empty() { format!(": {}", depr.note) } else { String::new() }; let since = if show_reason && !depr.since.is_empty() { format!(" since {}", Escape(&depr.since)) } else { String::new() }; let text = format!("Deprecated{}{}", since, MarkdownHtml(&note, cx.render_type)); stability.push(format!("<div class='stab deprecated'>{}</div>", text)) } if let Some(ref cfg) = item.attrs.cfg { stability.push(format!("<div class='stab portability'>{}</div>", if show_reason { cfg.render_long_html() } else { cfg.render_short_html() })); } stability } struct Initializer<'a>(&'a str); impl<'a> fmt::Display for Initializer<'a> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { let Initializer(s) = *self; if s.is_empty() { return Ok(()); } write!(f, "<code> = </code>")?; write!(f, "<code>{}</code>", Escape(s)) } } fn item_constant(w: &mut fmt::Formatter, cx: &Context, it: &clean::Item, c: &clean::Constant) -> fmt::Result { write!(w, "<pre class='rust const'>")?; render_attributes(w, it)?; write!(w, "{vis}const \ {name}: {typ}{init}</pre>", vis = VisSpace(&it.visibility), name = it.name.as_ref().unwrap(), typ = c.type_, init = Initializer(&c.expr))?; document(w, cx, it) } fn item_static(w: &mut fmt::Formatter, cx: &Context, it: &clean::Item, s: &clean::Static) -> fmt::Result { write!(w, "<pre class='rust static'>")?; render_attributes(w, it)?; write!(w, "{vis}static {mutability}\ {name}: {typ}{init}</pre>", vis = VisSpace(&it.visibility), mutability = MutableSpace(s.mutability), name = it.name.as_ref().unwrap(), typ = s.type_, init = Initializer(&s.expr))?; document(w, cx, it) } fn item_function(w: &mut fmt::Formatter, cx: &Context, it: &clean::Item, f: &clean::Function) -> fmt::Result { // FIXME(#24111): remove when `const_fn` is stabilized let vis_constness = match UnstableFeatures::from_environment() { UnstableFeatures::Allow => f.constness, _ => hir::Constness::NotConst }; let name_len = format!("{}{}{}{:#}fn {}{:#}", VisSpace(&it.visibility), ConstnessSpace(vis_constness), UnsafetySpace(f.unsafety), AbiSpace(f.abi), it.name.as_ref().unwrap(), f.generics).len(); write!(w, "<pre class='rust fn'>")?; render_attributes(w, it)?; write!(w, "{vis}{constness}{unsafety}{abi}fn \ {name}{generics}{decl}{where_clause}</pre>", vis = VisSpace(&it.visibility), constness = ConstnessSpace(vis_constness), unsafety = UnsafetySpace(f.unsafety), abi = AbiSpace(f.abi), name = it.name.as_ref().unwrap(), generics = f.generics, where_clause = WhereClause { gens: &f.generics, indent: 0, end_newline: true }, decl = Method { decl: &f.decl, name_len, indent: 0, })?; document(w, cx, it) } fn item_trait(w: &mut fmt::Formatter, cx: &Context, it: &clean::Item, t: &clean::Trait) -> fmt::Result { let mut bounds = String::new(); let mut bounds_plain = String::new(); if !t.bounds.is_empty() { if !bounds.is_empty() { bounds.push(' '); bounds_plain.push(' '); } bounds.push_str(": "); bounds_plain.push_str(": "); for (i, p) in t.bounds.iter().enumerate() { if i > 0 { bounds.push_str(" + "); bounds_plain.push_str(" + "); } bounds.push_str(&format!("{}", *p)); bounds_plain.push_str(&format!("{:#}", *p)); } } // Output the trait definition write!(w, "<pre class='rust trait'>")?; render_attributes(w, it)?; write!(w, "{}{}trait {}{}{}", VisSpace(&it.visibility), UnsafetySpace(t.unsafety), it.name.as_ref().unwrap(), t.generics, bounds)?; if !t.generics.where_predicates.is_empty() { write!(w, "{}", WhereClause { gens: &t.generics, indent: 0, end_newline: true })?; } else { write!(w, " ")?; } let types = t.items.iter().filter(|m| m.is_associated_type()).collect::<Vec<_>>(); let consts = t.items.iter().filter(|m| m.is_associated_const()).collect::<Vec<_>>(); let required = t.items.iter().filter(|m| m.is_ty_method()).collect::<Vec<_>>(); let provided = t.items.iter().filter(|m| m.is_method()).collect::<Vec<_>>(); if t.items.is_empty() { write!(w, "{{ }}")?; } else { // FIXME: we should be using a derived_id for the Anchors here write!(w, "{{\n")?; for t in &types { write!(w, " ")?; render_assoc_item(w, t, AssocItemLink::Anchor(None), ItemType::Trait)?; write!(w, ";\n")?; } if !types.is_empty() && !consts.is_empty() { w.write_str("\n")?; } for t in &consts { write!(w, " ")?; render_assoc_item(w, t, AssocItemLink::Anchor(None), ItemType::Trait)?; write!(w, ";\n")?; } if !consts.is_empty() && !required.is_empty() { w.write_str("\n")?; } for (pos, m) in required.iter().enumerate() { write!(w, " ")?; render_assoc_item(w, m, AssocItemLink::Anchor(None), ItemType::Trait)?; write!(w, ";\n")?; if pos < required.len() - 1 { write!(w, "<div class='item-spacer'></div>")?; } } if !required.is_empty() && !provided.is_empty() { w.write_str("\n")?; } for (pos, m) in provided.iter().enumerate() { write!(w, " ")?; render_assoc_item(w, m, AssocItemLink::Anchor(None), ItemType::Trait)?; match m.inner { clean::MethodItem(ref inner) if !inner.generics.where_predicates.is_empty() => { write!(w, ",\n {{ ... }}\n")?; }, _ => { write!(w, " {{ ... }}\n")?; }, } if pos < provided.len() - 1 { write!(w, "<div class='item-spacer'></div>")?; } } write!(w, "}}")?; } write!(w, "</pre>")?; // Trait documentation document(w, cx, it)?; fn trait_item(w: &mut fmt::Formatter, cx: &Context, m: &clean::Item, t: &clean::Item) -> fmt::Result { let name = m.name.as_ref().unwrap(); let item_type = m.type_(); let id = derive_id(format!("{}.{}", item_type, name)); let ns_id = derive_id(format!("{}.{}", name, item_type.name_space())); write!(w, "<h3 id='{id}' class='method'>\ <span id='{ns_id}' class='invisible'><code>", id = id, ns_id = ns_id)?; render_assoc_item(w, m, AssocItemLink::Anchor(Some(&id)), ItemType::Impl)?; write!(w, "</code>")?; render_stability_since(w, m, t)?; write!(w, "</span></h3>")?; document(w, cx, m)?; Ok(()) } if !types.is_empty() { write!(w, " <h2 id='associated-types' class='small-section-header'> Associated Types<a href='#associated-types' class='anchor'></a> </h2> <div class='methods'> ")?; for t in &types { trait_item(w, cx, *t, it)?; } write!(w, "</div>")?; } if !consts.is_empty() { write!(w, " <h2 id='associated-const' class='small-section-header'> Associated Constants<a href='#associated-const' class='anchor'></a> </h2> <div class='methods'> ")?; for t in &consts { trait_item(w, cx, *t, it)?; } write!(w, "</div>")?; } // Output the documentation for each function individually if !required.is_empty() { write!(w, " <h2 id='required-methods' class='small-section-header'> Required Methods<a href='#required-methods' class='anchor'></a> </h2> <div class='methods'> ")?; for m in &required { trait_item(w, cx, *m, it)?; } write!(w, "</div>")?; } if !provided.is_empty() { write!(w, " <h2 id='provided-methods' class='small-section-header'> Provided Methods<a href='#provided-methods' class='anchor'></a> </h2> <div class='methods'> ")?; for m in &provided { trait_item(w, cx, *m, it)?; } write!(w, "</div>")?; } // If there are methods directly on this trait object, render them here. render_assoc_items(w, cx, it, it.def_id, AssocItemRender::All)?; let cache = cache(); write!(w, " <h2 id='implementors' class='small-section-header'> Implementors<a href='#implementors' class='anchor'></a> </h2> <ul class='item-list' id='implementors-list'> ")?; if let Some(implementors) = cache.implementors.get(&it.def_id) { // The DefId is for the first Type found with that name. The bool is // if any Types with the same name but different DefId have been found. let mut implementor_dups: FxHashMap<&str, (DefId, bool)> = FxHashMap(); for implementor in implementors { match implementor.impl_.for_ { clean::ResolvedPath { ref path, did, is_generic: false, .. } | clean::BorrowedRef { type_: box clean::ResolvedPath { ref path, did, is_generic: false, .. }, .. } => { let &mut (prev_did, ref mut has_duplicates) = implementor_dups.entry(path.last_name()).or_insert((did, false)); if prev_did != did { *has_duplicates = true; } } _ => {} } } for implementor in implementors { write!(w, "<li><code>")?; // If there's already another implementor that has the same abbridged name, use the // full path, for example in `std::iter::ExactSizeIterator` let use_absolute = match implementor.impl_.for_ { clean::ResolvedPath { ref path, is_generic: false, .. } | clean::BorrowedRef { type_: box clean::ResolvedPath { ref path, is_generic: false, .. }, .. } => implementor_dups[path.last_name()].1, _ => false, }; fmt_impl_for_trait_page(&implementor.impl_, w, use_absolute)?; for it in &implementor.impl_.items { if let clean::TypedefItem(ref tydef, _) = it.inner { write!(w, "<span class=\"where fmt-newline\"> ")?; assoc_type(w, it, &vec![], Some(&tydef.type_), AssocItemLink::Anchor(None))?; write!(w, ";</span>")?; } } writeln!(w, "</code></li>")?; } } write!(w, "</ul>")?; write!(w, r#"<script type="text/javascript" async src="{root_path}/implementors/{path}/{ty}.{name}.js"> </script>"#, root_path = vec![".."; cx.current.len()].join("/"), path = if it.def_id.is_local() { cx.current.join("/") } else { let (ref path, _) = cache.external_paths[&it.def_id]; path[..path.len() - 1].join("/") }, ty = it.type_().css_class(), name = *it.name.as_ref().unwrap())?; Ok(()) } fn naive_assoc_href(it: &clean::Item, link: AssocItemLink) -> String { use html::item_type::ItemType::*; let name = it.name.as_ref().unwrap(); let ty = match it.type_() { Typedef | AssociatedType => AssociatedType, s@_ => s, }; let anchor = format!("#{}.{}", ty, name); match link { AssocItemLink::Anchor(Some(ref id)) => format!("#{}", id), AssocItemLink::Anchor(None) => anchor, AssocItemLink::GotoSource(did, _) => { href(did).map(|p| format!("{}{}", p.0, anchor)).unwrap_or(anchor) } } } fn assoc_const(w: &mut fmt::Formatter, it: &clean::Item, ty: &clean::Type, _default: Option<&String>, link: AssocItemLink) -> fmt::Result { write!(w, "const <a href='{}' class=\"constant\"><b>{}</b></a>: {}", naive_assoc_href(it, link), it.name.as_ref().unwrap(), ty)?; Ok(()) } fn assoc_type(w: &mut fmt::Formatter, it: &clean::Item, bounds: &Vec<clean::TyParamBound>, default: Option<&clean::Type>, link: AssocItemLink) -> fmt::Result { write!(w, "type <a href='{}' class=\"type\">{}</a>", naive_assoc_href(it, link), it.name.as_ref().unwrap())?; if !bounds.is_empty() { write!(w, ": {}", TyParamBounds(bounds))? } if let Some(default) = default { write!(w, " = {}", default)?; } Ok(()) } fn render_stability_since_raw<'a>(w: &mut fmt::Formatter, ver: Option<&'a str>, containing_ver: Option<&'a str>) -> fmt::Result { if let Some(v) = ver { if containing_ver != ver && v.len() > 0 { write!(w, "<div class='since' title='Stable since Rust version {0}'>{0}</div>", v)? } } Ok(()) } fn render_stability_since(w: &mut fmt::Formatter, item: &clean::Item, containing_item: &clean::Item) -> fmt::Result { render_stability_since_raw(w, item.stable_since(), containing_item.stable_since()) } fn render_assoc_item(w: &mut fmt::Formatter, item: &clean::Item, link: AssocItemLink, parent: ItemType) -> fmt::Result { fn method(w: &mut fmt::Formatter, meth: &clean::Item, unsafety: hir::Unsafety, constness: hir::Constness, abi: abi::Abi, g: &clean::Generics, d: &clean::FnDecl, link: AssocItemLink, parent: ItemType) -> fmt::Result { let name = meth.name.as_ref().unwrap(); let anchor = format!("#{}.{}", meth.type_(), name); let href = match link { AssocItemLink::Anchor(Some(ref id)) => format!("#{}", id), AssocItemLink::Anchor(None) => anchor, AssocItemLink::GotoSource(did, provided_methods) => { // We're creating a link from an impl-item to the corresponding // trait-item and need to map the anchored type accordingly. let ty = if provided_methods.contains(name) { ItemType::Method } else { ItemType::TyMethod }; href(did).map(|p| format!("{}#{}.{}", p.0, ty, name)).unwrap_or(anchor) } }; // FIXME(#24111): remove when `const_fn` is stabilized let vis_constness = if is_nightly_build() { constness } else { hir::Constness::NotConst }; let mut head_len = format!("{}{}{:#}fn {}{:#}", ConstnessSpace(vis_constness), UnsafetySpace(unsafety), AbiSpace(abi), name, *g).len(); let (indent, end_newline) = if parent == ItemType::Trait { head_len += 4; (4, false) } else { (0, true) }; write!(w, "{}{}{}fn <a href='{href}' class='fnname'>{name}</a>\ {generics}{decl}{where_clause}", ConstnessSpace(vis_constness), UnsafetySpace(unsafety), AbiSpace(abi), href = href, name = name, generics = *g, decl = Method { decl: d, name_len: head_len, indent, }, where_clause = WhereClause { gens: g, indent, end_newline, }) } match item.inner { clean::StrippedItem(..) => Ok(()), clean::TyMethodItem(ref m) => { method(w, item, m.unsafety, hir::Constness::NotConst, m.abi, &m.generics, &m.decl, link, parent) } clean::MethodItem(ref m) => { method(w, item, m.unsafety, m.constness, m.abi, &m.generics, &m.decl, link, parent) } clean::AssociatedConstItem(ref ty, ref default) => { assoc_const(w, item, ty, default.as_ref(), link) } clean::AssociatedTypeItem(ref bounds, ref default) => { assoc_type(w, item, bounds, default.as_ref(), link) } _ => panic!("render_assoc_item called on non-associated-item") } } fn item_struct(w: &mut fmt::Formatter, cx: &Context, it: &clean::Item, s: &clean::Struct) -> fmt::Result { write!(w, "<pre class='rust struct'>")?; render_attributes(w, it)?; render_struct(w, it, Some(&s.generics), s.struct_type, &s.fields, "", true)?; write!(w, "</pre>")?; document(w, cx, it)?; let mut fields = s.fields.iter().filter_map(|f| { match f.inner { clean::StructFieldItem(ref ty) => Some((f, ty)), _ => None, } }).peekable(); if let doctree::Plain = s.struct_type { if fields.peek().is_some() { write!(w, "<h2 id='fields' class='fields small-section-header'> Fields<a href='#fields' class='anchor'></a></h2>")?; for (field, ty) in fields { let id = derive_id(format!("{}.{}", ItemType::StructField, field.name.as_ref().unwrap())); let ns_id = derive_id(format!("{}.{}", field.name.as_ref().unwrap(), ItemType::StructField.name_space())); write!(w, "<span id='{id}' class=\"{item_type}\"> <span id='{ns_id}' class='invisible'> <code>{name}: {ty}</code> </span></span>", item_type = ItemType::StructField, id = id, ns_id = ns_id, name = field.name.as_ref().unwrap(), ty = ty)?; if let Some(stability_class) = field.stability_class() { write!(w, "<span class='stab {stab}'></span>", stab = stability_class)?; } document(w, cx, field)?; } } } render_assoc_items(w, cx, it, it.def_id, AssocItemRender::All) } fn item_union(w: &mut fmt::Formatter, cx: &Context, it: &clean::Item, s: &clean::Union) -> fmt::Result { write!(w, "<pre class='rust union'>")?; render_attributes(w, it)?; render_union(w, it, Some(&s.generics), &s.fields, "", true)?; write!(w, "</pre>")?; document(w, cx, it)?; let mut fields = s.fields.iter().filter_map(|f| { match f.inner { clean::StructFieldItem(ref ty) => Some((f, ty)), _ => None, } }).peekable(); if fields.peek().is_some() { write!(w, "<h2 id='fields' class='fields small-section-header'> Fields<a href='#fields' class='anchor'></a></h2>")?; for (field, ty) in fields { write!(w, "<span id='{shortty}.{name}' class=\"{shortty}\"><code>{name}: {ty}</code> </span>", shortty = ItemType::StructField, name = field.name.as_ref().unwrap(), ty = ty)?; if let Some(stability_class) = field.stability_class() { write!(w, "<span class='stab {stab}'></span>", stab = stability_class)?; } document(w, cx, field)?; } } render_assoc_items(w, cx, it, it.def_id, AssocItemRender::All) } fn item_enum(w: &mut fmt::Formatter, cx: &Context, it: &clean::Item, e: &clean::Enum) -> fmt::Result { write!(w, "<pre class='rust enum'>")?; render_attributes(w, it)?; write!(w, "{}enum {}{}{}", VisSpace(&it.visibility), it.name.as_ref().unwrap(), e.generics, WhereClause { gens: &e.generics, indent: 0, end_newline: true })?; if e.variants.is_empty() && !e.variants_stripped { write!(w, " {{}}")?; } else { write!(w, " {{\n")?; for v in &e.variants { write!(w, " ")?; let name = v.name.as_ref().unwrap(); match v.inner { clean::VariantItem(ref var) => { match var.kind { clean::VariantKind::CLike => write!(w, "{}", name)?, clean::VariantKind::Tuple(ref tys) => { write!(w, "{}(", name)?; for (i, ty) in tys.iter().enumerate() { if i > 0 { write!(w, ",&nbsp;")? } write!(w, "{}", *ty)?; } write!(w, ")")?; } clean::VariantKind::Struct(ref s) => { render_struct(w, v, None, s.struct_type, &s.fields, " ", false)?; } } } _ => unreachable!() } write!(w, ",\n")?; } if e.variants_stripped { write!(w, " // some variants omitted\n")?; } write!(w, "}}")?; } write!(w, "</pre>")?; document(w, cx, it)?; if !e.variants.is_empty() { write!(w, "<h2 id='variants' class='variants small-section-header'> Variants<a href='#variants' class='anchor'></a></h2>\n")?; for variant in &e.variants { let id = derive_id(format!("{}.{}", ItemType::Variant, variant.name.as_ref().unwrap())); let ns_id = derive_id(format!("{}.{}", variant.name.as_ref().unwrap(), ItemType::Variant.name_space())); write!(w, "<span id='{id}' class='variant'>\ <span id='{ns_id}' class='invisible'><code>{name}", id = id, ns_id = ns_id, name = variant.name.as_ref().unwrap())?; if let clean::VariantItem(ref var) = variant.inner { if let clean::VariantKind::Tuple(ref tys) = var.kind { write!(w, "(")?; for (i, ty) in tys.iter().enumerate() { if i > 0 { write!(w, ",&nbsp;")?; } write!(w, "{}", *ty)?; } write!(w, ")")?; } } write!(w, "</code></span></span>")?; document(w, cx, variant)?; use clean::{Variant, VariantKind}; if let clean::VariantItem(Variant { kind: VariantKind::Struct(ref s) }) = variant.inner { let variant_id = derive_id(format!("{}.{}.fields", ItemType::Variant, variant.name.as_ref().unwrap())); write!(w, "<span class='docblock autohide sub-variant' id='{id}'>", id = variant_id)?; write!(w, "<h3 class='fields'>Fields of <code>{name}</code></h3>\n <table>", name = variant.name.as_ref().unwrap())?; for field in &s.fields { use clean::StructFieldItem; if let StructFieldItem(ref ty) = field.inner { let id = derive_id(format!("variant.{}.field.{}", variant.name.as_ref().unwrap(), field.name.as_ref().unwrap())); let ns_id = derive_id(format!("{}.{}.{}.{}", variant.name.as_ref().unwrap(), ItemType::Variant.name_space(), field.name.as_ref().unwrap(), ItemType::StructField.name_space())); write!(w, "<tr><td \ id='{id}'>\ <span id='{ns_id}' class='invisible'>\ <code>{f}:&nbsp;{t}</code></span></td><td>", id = id, ns_id = ns_id, f = field.name.as_ref().unwrap(), t = *ty)?; document(w, cx, field)?; write!(w, "</td></tr>")?; } } write!(w, "</table></span>")?; } render_stability_since(w, variant, it)?; } } render_assoc_items(w, cx, it, it.def_id, AssocItemRender::All)?; Ok(()) } fn render_attribute(attr: &ast::MetaItem) -> Option<String> { let name = attr.name(); if attr.is_word() { Some(format!("{}", name)) } else if let Some(v) = attr.value_str() { Some(format!("{} = {:?}", name, v.as_str())) } else if let Some(values) = attr.meta_item_list() { let display: Vec<_> = values.iter().filter_map(|attr| { attr.meta_item().and_then(|mi| render_attribute(mi)) }).collect(); if display.len() > 0 { Some(format!("{}({})", name, display.join(", "))) } else { None } } else { None } } const ATTRIBUTE_WHITELIST: &'static [&'static str] = &[ "export_name", "lang", "link_section", "must_use", "no_mangle", "repr", "unsafe_destructor_blind_to_params" ]; fn render_attributes(w: &mut fmt::Formatter, it: &clean::Item) -> fmt::Result { let mut attrs = String::new(); for attr in &it.attrs.other_attrs { let name = attr.name().unwrap(); if !ATTRIBUTE_WHITELIST.contains(&&*name.as_str()) { continue; } if let Some(s) = render_attribute(&attr.meta().unwrap()) { attrs.push_str(&format!("#[{}]\n", s)); } } if attrs.len() > 0 { write!(w, "<div class=\"docblock attributes\">{}</div>", &attrs)?; } Ok(()) } fn render_struct(w: &mut fmt::Formatter, it: &clean::Item, g: Option<&clean::Generics>, ty: doctree::StructType, fields: &[clean::Item], tab: &str, structhead: bool) -> fmt::Result { write!(w, "{}{}{}", VisSpace(&it.visibility), if structhead {"struct "} else {""}, it.name.as_ref().unwrap())?; if let Some(g) = g { write!(w, "{}", g)? } match ty { doctree::Plain => { if let Some(g) = g { write!(w, "{}", WhereClause { gens: g, indent: 0, end_newline: true })? } let mut has_visible_fields = false; write!(w, " {{")?; for field in fields { if let clean::StructFieldItem(ref ty) = field.inner { write!(w, "\n{} {}{}: {},", tab, VisSpace(&field.visibility), field.name.as_ref().unwrap(), *ty)?; has_visible_fields = true; } } if has_visible_fields { if it.has_stripped_fields().unwrap() { write!(w, "\n{} // some fields omitted", tab)?; } write!(w, "\n{}", tab)?; } else if it.has_stripped_fields().unwrap() { // If there are no visible fields we can just display // `{ /* fields omitted */ }` to save space. write!(w, " /* fields omitted */ ")?; } write!(w, "}}")?; } doctree::Tuple => { write!(w, "(")?; for (i, field) in fields.iter().enumerate() { if i > 0 { write!(w, ", ")?; } match field.inner { clean::StrippedItem(box clean::StructFieldItem(..)) => { write!(w, "_")? } clean::StructFieldItem(ref ty) => { write!(w, "{}{}", VisSpace(&field.visibility), *ty)? } _ => unreachable!() } } write!(w, ")")?; if let Some(g) = g { write!(w, "{}", WhereClause { gens: g, indent: 0, end_newline: false })? } write!(w, ";")?; } doctree::Unit => { // Needed for PhantomData. if let Some(g) = g { write!(w, "{}", WhereClause { gens: g, indent: 0, end_newline: false })? } write!(w, ";")?; } } Ok(()) } fn render_union(w: &mut fmt::Formatter, it: &clean::Item, g: Option<&clean::Generics>, fields: &[clean::Item], tab: &str, structhead: bool) -> fmt::Result { write!(w, "{}{}{}", VisSpace(&it.visibility), if structhead {"union "} else {""}, it.name.as_ref().unwrap())?; if let Some(g) = g { write!(w, "{}", g)?; write!(w, "{}", WhereClause { gens: g, indent: 0, end_newline: true })?; } write!(w, " {{\n{}", tab)?; for field in fields { if let clean::StructFieldItem(ref ty) = field.inner { write!(w, " {}{}: {},\n{}", VisSpace(&field.visibility), field.name.as_ref().unwrap(), *ty, tab)?; } } if it.has_stripped_fields().unwrap() { write!(w, " // some fields omitted\n{}", tab)?; } write!(w, "}}")?; Ok(()) } #[derive(Copy, Clone)] enum AssocItemLink<'a> { Anchor(Option<&'a str>), GotoSource(DefId, &'a FxHashSet<String>), } impl<'a> AssocItemLink<'a> { fn anchor(&self, id: &'a String) -> Self { match *self { AssocItemLink::Anchor(_) => { AssocItemLink::Anchor(Some(&id)) }, ref other => *other, } } } enum AssocItemRender<'a> { All, DerefFor { trait_: &'a clean::Type, type_: &'a clean::Type, deref_mut_: bool } } #[derive(Copy, Clone, PartialEq)] enum RenderMode { Normal, ForDeref { mut_: bool }, } fn render_assoc_items(w: &mut fmt::Formatter, cx: &Context, containing_item: &clean::Item, it: DefId, what: AssocItemRender) -> fmt::Result { let c = cache(); let v = match c.impls.get(&it) { Some(v) => v, None => return Ok(()), }; let (non_trait, traits): (Vec<_>, _) = v.iter().partition(|i| { i.inner_impl().trait_.is_none() }); if !non_trait.is_empty() { let render_mode = match what { AssocItemRender::All => { write!(w, " <h2 id='methods' class='small-section-header'> Methods<a href='#methods' class='anchor'></a> </h2> ")?; RenderMode::Normal } AssocItemRender::DerefFor { trait_, type_, deref_mut_ } => { write!(w, " <h2 id='deref-methods' class='small-section-header'> Methods from {}&lt;Target = {}&gt;<a href='#deref-methods' class='anchor'></a> </h2> ", trait_, type_)?; RenderMode::ForDeref { mut_: deref_mut_ } } }; for i in &non_trait { render_impl(w, cx, i, AssocItemLink::Anchor(None), render_mode, containing_item.stable_since())?; } } if let AssocItemRender::DerefFor { .. } = what { return Ok(()); } if !traits.is_empty() { let deref_impl = traits.iter().find(|t| { t.inner_impl().trait_.def_id() == c.deref_trait_did }); if let Some(impl_) = deref_impl { let has_deref_mut = traits.iter().find(|t| { t.inner_impl().trait_.def_id() == c.deref_mut_trait_did }).is_some(); render_deref_methods(w, cx, impl_, containing_item, has_deref_mut)?; } write!(w, " <h2 id='implementations' class='small-section-header'> Trait Implementations<a href='#implementations' class='anchor'></a> </h2> ")?; for i in &traits { let did = i.trait_did().unwrap(); let assoc_link = AssocItemLink::GotoSource(did, &i.inner_impl().provided_trait_methods); render_impl(w, cx, i, assoc_link, RenderMode::Normal, containing_item.stable_since())?; } } Ok(()) } fn render_deref_methods(w: &mut fmt::Formatter, cx: &Context, impl_: &Impl, container_item: &clean::Item, deref_mut: bool) -> fmt::Result { let deref_type = impl_.inner_impl().trait_.as_ref().unwrap(); let target = impl_.inner_impl().items.iter().filter_map(|item| { match item.inner { clean::TypedefItem(ref t, true) => Some(&t.type_), _ => None, } }).next().expect("Expected associated type binding"); let what = AssocItemRender::DerefFor { trait_: deref_type, type_: target, deref_mut_: deref_mut }; if let Some(did) = target.def_id() { render_assoc_items(w, cx, container_item, did, what) } else { if let Some(prim) = target.primitive_type() { if let Some(&did) = cache().primitive_locations.get(&prim) { render_assoc_items(w, cx, container_item, did, what)?; } } Ok(()) } } fn render_impl(w: &mut fmt::Formatter, cx: &Context, i: &Impl, link: AssocItemLink, render_mode: RenderMode, outer_version: Option<&str>) -> fmt::Result { if render_mode == RenderMode::Normal { let id = derive_id(match i.inner_impl().trait_ { Some(ref t) => format!("impl-{}", Escape(&format!("{:#}", t))), None => "impl".to_string(), }); write!(w, "<h3 id='{}' class='impl'><span class='in-band'><code>{}</code>", id, i.inner_impl())?; write!(w, "<a href='#{}' class='anchor'></a>", id)?; write!(w, "</span><span class='out-of-band'>")?; let since = i.impl_item.stability.as_ref().map(|s| &s.since[..]); if let Some(l) = (Item { item: &i.impl_item, cx: cx }).src_href() { write!(w, "<div class='ghost'></div>")?; render_stability_since_raw(w, since, outer_version)?; write!(w, "<a class='srclink' href='{}' title='{}'>[src]</a>", l, "goto source code")?; } else { render_stability_since_raw(w, since, outer_version)?; } write!(w, "</span>")?; write!(w, "</h3>\n")?; if let Some(ref dox) = i.impl_item.doc_value() { write!(w, "<div class='docblock'>{}</div>", Markdown(dox, cx.render_type))?; } } fn doc_impl_item(w: &mut fmt::Formatter, cx: &Context, item: &clean::Item, link: AssocItemLink, render_mode: RenderMode, is_default_item: bool, outer_version: Option<&str>, trait_: Option<&clean::Trait>) -> fmt::Result { let item_type = item.type_(); let name = item.name.as_ref().unwrap(); let render_method_item: bool = match render_mode { RenderMode::Normal => true, RenderMode::ForDeref { mut_: deref_mut_ } => { let self_type_opt = match item.inner { clean::MethodItem(ref method) => method.decl.self_type(), clean::TyMethodItem(ref method) => method.decl.self_type(), _ => None }; if let Some(self_ty) = self_type_opt { let (by_mut_ref, by_box) = match self_ty { SelfTy::SelfBorrowed(_, mutability) | SelfTy::SelfExplicit(clean::BorrowedRef { mutability, .. }) => { (mutability == Mutability::Mutable, false) }, SelfTy::SelfExplicit(clean::ResolvedPath { did, .. }) => { (false, Some(did) == cache().owned_box_did) }, _ => (false, false), }; (deref_mut_ || !by_mut_ref) && !by_box } else { false } }, }; match item.inner { clean::MethodItem(..) | clean::TyMethodItem(..) => { // Only render when the method is not static or we allow static methods if render_method_item { let id = derive_id(format!("{}.{}", item_type, name)); let ns_id = derive_id(format!("{}.{}", name, item_type.name_space())); write!(w, "<h4 id='{}' class=\"{}\">", id, item_type)?; write!(w, "<span id='{}' class='invisible'>", ns_id)?; write!(w, "<code>")?; render_assoc_item(w, item, link.anchor(&id), ItemType::Impl)?; write!(w, "</code>")?; if let Some(l) = (Item { cx, item }).src_href() { write!(w, "</span><span class='out-of-band'>")?; write!(w, "<div class='ghost'></div>")?; render_stability_since_raw(w, item.stable_since(), outer_version)?; write!(w, "<a class='srclink' href='{}' title='{}'>[src]</a>", l, "goto source code")?; } else { render_stability_since_raw(w, item.stable_since(), outer_version)?; } write!(w, "</span></h4>\n")?; } } clean::TypedefItem(ref tydef, _) => { let id = derive_id(format!("{}.{}", ItemType::AssociatedType, name)); let ns_id = derive_id(format!("{}.{}", name, item_type.name_space())); write!(w, "<h4 id='{}' class=\"{}\">", id, item_type)?; write!(w, "<span id='{}' class='invisible'><code>", ns_id)?; assoc_type(w, item, &Vec::new(), Some(&tydef.type_), link.anchor(&id))?; write!(w, "</code></span></h4>\n")?; } clean::AssociatedConstItem(ref ty, ref default) => { let id = derive_id(format!("{}.{}", item_type, name)); let ns_id = derive_id(format!("{}.{}", name, item_type.name_space())); write!(w, "<h4 id='{}' class=\"{}\">", id, item_type)?; write!(w, "<span id='{}' class='invisible'><code>", ns_id)?; assoc_const(w, item, ty, default.as_ref(), link.anchor(&id))?; write!(w, "</code></span></h4>\n")?; } clean::AssociatedTypeItem(ref bounds, ref default) => { let id = derive_id(format!("{}.{}", item_type, name)); let ns_id = derive_id(format!("{}.{}", name, item_type.name_space())); write!(w, "<h4 id='{}' class=\"{}\">", id, item_type)?; write!(w, "<span id='{}' class='invisible'><code>", ns_id)?; assoc_type(w, item, bounds, default.as_ref(), link.anchor(&id))?; write!(w, "</code></span></h4>\n")?; } clean::StrippedItem(..) => return Ok(()), _ => panic!("can't make docs for trait item with name {:?}", item.name) } if render_method_item || render_mode == RenderMode::Normal { let prefix = render_assoc_const_value(item); if !is_default_item { if let Some(t) = trait_ { // The trait item may have been stripped so we might not // find any documentation or stability for it. if let Some(it) = t.items.iter().find(|i| i.name == item.name) { // We need the stability of the item from the trait // because impls can't have a stability. document_stability(w, cx, it)?; if item.doc_value().is_some() { document_full(w, item, cx, &prefix)?; } else { // In case the item isn't documented, // provide short documentation from the trait. document_short(w, it, link, cx, &prefix)?; } } } else { document_stability(w, cx, item)?; document_full(w, item, cx, &prefix)?; } } else { document_stability(w, cx, item)?; document_short(w, item, link, cx, &prefix)?; } } Ok(()) } let traits = &cache().traits; let trait_ = i.trait_did().and_then(|did| traits.get(&did)); write!(w, "<div class='impl-items'>")?; for trait_item in &i.inner_impl().items { doc_impl_item(w, cx, trait_item, link, render_mode, false, outer_version, trait_)?; } fn render_default_items(w: &mut fmt::Formatter, cx: &Context, t: &clean::Trait, i: &clean::Impl, render_mode: RenderMode, outer_version: Option<&str>) -> fmt::Result { for trait_item in &t.items { let n = trait_item.name.clone(); if i.items.iter().find(|m| m.name == n).is_some() { continue; } let did = i.trait_.as_ref().unwrap().def_id().unwrap(); let assoc_link = AssocItemLink::GotoSource(did, &i.provided_trait_methods); doc_impl_item(w, cx, trait_item, assoc_link, render_mode, true, outer_version, None)?; } Ok(()) } // If we've implemented a trait, then also emit documentation for all // default items which weren't overridden in the implementation block. if let Some(t) = trait_ { render_default_items(w, cx, t, &i.inner_impl(), render_mode, outer_version)?; } write!(w, "</div>")?; Ok(()) } fn item_typedef(w: &mut fmt::Formatter, cx: &Context, it: &clean::Item, t: &clean::Typedef) -> fmt::Result { write!(w, "<pre class='rust typedef'>")?; render_attributes(w, it)?; write!(w, "type {}{}{where_clause} = {type_};</pre>", it.name.as_ref().unwrap(), t.generics, where_clause = WhereClause { gens: &t.generics, indent: 0, end_newline: true }, type_ = t.type_)?; document(w, cx, it)?; // Render any items associated directly to this alias, as otherwise they // won't be visible anywhere in the docs. It would be nice to also show // associated items from the aliased type (see discussion in #32077), but // we need #14072 to make sense of the generics. render_assoc_items(w, cx, it, it.def_id, AssocItemRender::All) } impl<'a> fmt::Display for Sidebar<'a> { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { let cx = self.cx; let it = self.item; let parentlen = cx.current.len() - if it.is_mod() {1} else {0}; if it.is_struct() || it.is_trait() || it.is_primitive() || it.is_union() || it.is_enum() || it.is_mod() || it.is_typedef() { write!(fmt, "<p class='location'>")?; match it.inner { clean::StructItem(..) => write!(fmt, "Struct ")?, clean::TraitItem(..) => write!(fmt, "Trait ")?, clean::PrimitiveItem(..) => write!(fmt, "Primitive Type ")?, clean::UnionItem(..) => write!(fmt, "Union ")?, clean::EnumItem(..) => write!(fmt, "Enum ")?, clean::TypedefItem(..) => write!(fmt, "Type Definition ")?, clean::ModuleItem(..) => if it.is_crate() { write!(fmt, "Crate ")?; } else { write!(fmt, "Module ")?; }, _ => (), } write!(fmt, "{}", it.name.as_ref().unwrap())?; write!(fmt, "</p>")?; match it.inner { clean::StructItem(ref s) => sidebar_struct(fmt, it, s)?, clean::TraitItem(ref t) => sidebar_trait(fmt, it, t)?, clean::PrimitiveItem(ref p) => sidebar_primitive(fmt, it, p)?, clean::UnionItem(ref u) => sidebar_union(fmt, it, u)?, clean::EnumItem(ref e) => sidebar_enum(fmt, it, e)?, clean::TypedefItem(ref t, _) => sidebar_typedef(fmt, it, t)?, clean::ModuleItem(ref m) => sidebar_module(fmt, it, &m.items)?, _ => (), } } // The sidebar is designed to display sibling functions, modules and // other miscellaneous information. since there are lots of sibling // items (and that causes quadratic growth in large modules), // we refactor common parts into a shared JavaScript file per module. // still, we don't move everything into JS because we want to preserve // as much HTML as possible in order to allow non-JS-enabled browsers // to navigate the documentation (though slightly inefficiently). write!(fmt, "<p class='location'>")?; for (i, name) in cx.current.iter().take(parentlen).enumerate() { if i > 0 { write!(fmt, "::<wbr>")?; } write!(fmt, "<a href='{}index.html'>{}</a>", &cx.root_path()[..(cx.current.len() - i - 1) * 3], *name)?; } write!(fmt, "</p>")?; // Sidebar refers to the enclosing module, not this module. let relpath = if it.is_mod() { "../" } else { "" }; write!(fmt, "<script>window.sidebarCurrent = {{\ name: '{name}', \ ty: '{ty}', \ relpath: '{path}'\ }};</script>", name = it.name.as_ref().map(|x| &x[..]).unwrap_or(""), ty = it.type_().css_class(), path = relpath)?; if parentlen == 0 { // There is no sidebar-items.js beyond the crate root path // FIXME maybe dynamic crate loading can be merged here } else { write!(fmt, "<script defer src=\"{path}sidebar-items.js\"></script>", path = relpath)?; } Ok(()) } } fn sidebar_assoc_items(it: &clean::Item) -> String { let mut out = String::new(); let c = cache(); if let Some(v) = c.impls.get(&it.def_id) { if v.iter().any(|i| i.inner_impl().trait_.is_none()) { out.push_str("<li><a href=\"#methods\">Methods</a></li>"); } if v.iter().any(|i| i.inner_impl().trait_.is_some()) { if let Some(impl_) = v.iter() .filter(|i| i.inner_impl().trait_.is_some()) .find(|i| i.inner_impl().trait_.def_id() == c.deref_trait_did) { if let Some(target) = impl_.inner_impl().items.iter().filter_map(|item| { match item.inner { clean::TypedefItem(ref t, true) => Some(&t.type_), _ => None, } }).next() { let inner_impl = target.def_id().or(target.primitive_type().and_then(|prim| { c.primitive_locations.get(&prim).cloned() })).and_then(|did| c.impls.get(&did)); if inner_impl.is_some() { out.push_str("<li><a href=\"#deref-methods\">"); out.push_str(&format!("Methods from {:#}&lt;Target={:#}&gt;", impl_.inner_impl().trait_.as_ref().unwrap(), target)); out.push_str("</a></li>"); } } } out.push_str("<li><a href=\"#implementations\">Trait Implementations</a></li>"); } } out } fn sidebar_struct(fmt: &mut fmt::Formatter, it: &clean::Item, s: &clean::Struct) -> fmt::Result { let mut sidebar = String::new(); if s.fields.iter() .any(|f| if let clean::StructFieldItem(..) = f.inner { true } else { false }) { if let doctree::Plain = s.struct_type { sidebar.push_str("<li><a href=\"#fields\">Fields</a></li>"); } } sidebar.push_str(&sidebar_assoc_items(it)); if !sidebar.is_empty() { write!(fmt, "<div class=\"block items\"><ul>{}</ul></div>", sidebar)?; } Ok(()) } fn sidebar_trait(fmt: &mut fmt::Formatter, it: &clean::Item, t: &clean::Trait) -> fmt::Result { let mut sidebar = String::new(); let has_types = t.items.iter().any(|m| m.is_associated_type()); let has_consts = t.items.iter().any(|m| m.is_associated_const()); let has_required = t.items.iter().any(|m| m.is_ty_method()); let has_provided = t.items.iter().any(|m| m.is_method()); if has_types { sidebar.push_str("<li><a href=\"#associated-types\">Associated Types</a></li>"); } if has_consts { sidebar.push_str("<li><a href=\"#associated-const\">Associated Constants</a></li>"); } if has_required { sidebar.push_str("<li><a href=\"#required-methods\">Required Methods</a></li>"); } if has_provided { sidebar.push_str("<li><a href=\"#provided-methods\">Provided Methods</a></li>"); } sidebar.push_str(&sidebar_assoc_items(it)); sidebar.push_str("<li><a href=\"#implementors\">Implementors</a></li>"); write!(fmt, "<div class=\"block items\"><ul>{}</ul></div>", sidebar) } fn sidebar_primitive(fmt: &mut fmt::Formatter, it: &clean::Item, _p: &clean::PrimitiveType) -> fmt::Result { let sidebar = sidebar_assoc_items(it); if !sidebar.is_empty() { write!(fmt, "<div class=\"block items\"><ul>{}</ul></div>", sidebar)?; } Ok(()) } fn sidebar_typedef(fmt: &mut fmt::Formatter, it: &clean::Item, _t: &clean::Typedef) -> fmt::Result { let sidebar = sidebar_assoc_items(it); if !sidebar.is_empty() { write!(fmt, "<div class=\"block items\"><ul>{}</ul></div>", sidebar)?; } Ok(()) } fn sidebar_union(fmt: &mut fmt::Formatter, it: &clean::Item, u: &clean::Union) -> fmt::Result { let mut sidebar = String::new(); if u.fields.iter() .any(|f| if let clean::StructFieldItem(..) = f.inner { true } else { false }) { sidebar.push_str("<li><a href=\"#fields\">Fields</a></li>"); } sidebar.push_str(&sidebar_assoc_items(it)); if !sidebar.is_empty() { write!(fmt, "<div class=\"block items\"><ul>{}</ul></div>", sidebar)?; } Ok(()) } fn sidebar_enum(fmt: &mut fmt::Formatter, it: &clean::Item, e: &clean::Enum) -> fmt::Result { let mut sidebar = String::new(); if !e.variants.is_empty() { sidebar.push_str("<li><a href=\"#variants\">Variants</a></li>"); } sidebar.push_str(&sidebar_assoc_items(it)); if !sidebar.is_empty() { write!(fmt, "<div class=\"block items\"><ul>{}</ul></div>", sidebar)?; } Ok(()) } fn sidebar_module(fmt: &mut fmt::Formatter, _it: &clean::Item, items: &[clean::Item]) -> fmt::Result { let mut sidebar = String::new(); if items.iter().any(|it| it.type_() == ItemType::ExternCrate || it.type_() == ItemType::Import) { sidebar.push_str(&format!("<li><a href=\"#{id}\">{name}</a></li>", id = "reexports", name = "Reexports")); } // ordering taken from item_module, reorder, where it prioritized elements in a certain order // to print its headings for &myty in &[ItemType::Primitive, ItemType::Module, ItemType::Macro, ItemType::Struct, ItemType::Enum, ItemType::Constant, ItemType::Static, ItemType::Trait, ItemType::Function, ItemType::Typedef, ItemType::Union, ItemType::Impl, ItemType::TyMethod, ItemType::Method, ItemType::StructField, ItemType::Variant, ItemType::AssociatedType, ItemType::AssociatedConst] { if items.iter().any(|it| { if let clean::DefaultImplItem(..) = it.inner { false } else { !it.is_stripped() && it.type_() == myty } }) { let (short, name) = match myty { ItemType::ExternCrate | ItemType::Import => ("reexports", "Reexports"), ItemType::Module => ("modules", "Modules"), ItemType::Struct => ("structs", "Structs"), ItemType::Union => ("unions", "Unions"), ItemType::Enum => ("enums", "Enums"), ItemType::Function => ("functions", "Functions"), ItemType::Typedef => ("types", "Type Definitions"), ItemType::Static => ("statics", "Statics"), ItemType::Constant => ("constants", "Constants"), ItemType::Trait => ("traits", "Traits"), ItemType::Impl => ("impls", "Implementations"), ItemType::TyMethod => ("tymethods", "Type Methods"), ItemType::Method => ("methods", "Methods"), ItemType::StructField => ("fields", "Struct Fields"), ItemType::Variant => ("variants", "Variants"), ItemType::Macro => ("macros", "Macros"), ItemType::Primitive => ("primitives", "Primitive Types"), ItemType::AssociatedType => ("associated-types", "Associated Types"), ItemType::AssociatedConst => ("associated-consts", "Associated Constants"), }; sidebar.push_str(&format!("<li><a href=\"#{id}\">{name}</a></li>", id = short, name = name)); } } if !sidebar.is_empty() { write!(fmt, "<div class=\"block items\"><ul>{}</ul></div>", sidebar)?; } Ok(()) } impl<'a> fmt::Display for Source<'a> { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { let Source(s) = *self; let lines = s.lines().count(); let mut cols = 0; let mut tmp = lines; while tmp > 0 { cols += 1; tmp /= 10; } write!(fmt, "<pre class=\"line-numbers\">")?; for i in 1..lines + 1 { write!(fmt, "<span id=\"{0}\">{0:1$}</span>\n", i, cols)?; } write!(fmt, "</pre>")?; write!(fmt, "{}", highlight::render_with_highlighting(s, None, None, None))?; Ok(()) } } fn item_macro(w: &mut fmt::Formatter, cx: &Context, it: &clean::Item, t: &clean::Macro) -> fmt::Result { w.write_str(&highlight::render_with_highlighting(&t.source, Some("macro"), None, None))?; document(w, cx, it) } fn item_primitive(w: &mut fmt::Formatter, cx: &Context, it: &clean::Item, _p: &clean::PrimitiveType) -> fmt::Result { document(w, cx, it)?; render_assoc_items(w, cx, it, it.def_id, AssocItemRender::All) } const BASIC_KEYWORDS: &'static str = "rust, rustlang, rust-lang"; fn make_item_keywords(it: &clean::Item) -> String { format!("{}, {}", BASIC_KEYWORDS, it.name.as_ref().unwrap()) } fn get_index_search_type(item: &clean::Item) -> Option<IndexItemFunctionType> { let decl = match item.inner { clean::FunctionItem(ref f) => &f.decl, clean::MethodItem(ref m) => &m.decl, clean::TyMethodItem(ref m) => &m.decl, _ => return None }; let inputs = decl.inputs.values.iter().map(|arg| get_index_type(&arg.type_)).collect(); let output = match decl.output { clean::FunctionRetTy::Return(ref return_type) => Some(get_index_type(return_type)), _ => None }; Some(IndexItemFunctionType { inputs: inputs, output: output }) } fn get_index_type(clean_type: &clean::Type) -> Type { Type { name: get_index_type_name(clean_type).map(|s| s.to_ascii_lowercase()) } } fn get_index_type_name(clean_type: &clean::Type) -> Option<String> { match *clean_type { clean::ResolvedPath { ref path, .. } => { let segments = &path.segments; Some(segments[segments.len() - 1].name.clone()) }, clean::Generic(ref s) => Some(s.clone()), clean::Primitive(ref p) => Some(format!("{:?}", p)), clean::BorrowedRef { ref type_, .. } => get_index_type_name(type_), // FIXME: add all from clean::Type. _ => None } } pub fn cache() -> Arc<Cache> { CACHE_KEY.with(|c| c.borrow().clone()) } #[cfg(test)] #[test] fn test_unique_id() { let input = ["foo", "examples", "examples", "method.into_iter","examples", "method.into_iter", "foo", "main", "search", "methods", "examples", "method.into_iter", "assoc_type.Item", "assoc_type.Item"]; let expected = ["foo", "examples", "examples-1", "method.into_iter", "examples-2", "method.into_iter-1", "foo-1", "main-1", "search-1", "methods-1", "examples-3", "method.into_iter-2", "assoc_type.Item", "assoc_type.Item-1"]; let test = || { let actual: Vec<String> = input.iter().map(|s| derive_id(s.to_string())).collect(); assert_eq!(&actual[..], expected); }; test(); reset_ids(true); test(); } #[cfg(test)] #[test] fn test_name_key() { assert_eq!(name_key("0"), ("", 0, 1)); assert_eq!(name_key("123"), ("", 123, 0)); assert_eq!(name_key("Fruit"), ("Fruit", 0, 0)); assert_eq!(name_key("Fruit0"), ("Fruit", 0, 1)); assert_eq!(name_key("Fruit0000"), ("Fruit", 0, 4)); assert_eq!(name_key("Fruit01"), ("Fruit", 1, 1)); assert_eq!(name_key("Fruit10"), ("Fruit", 10, 0)); assert_eq!(name_key("Fruit123"), ("Fruit", 123, 0)); } #[cfg(test)] #[test] fn test_name_sorting() { let names = ["Apple", "Banana", "Fruit", "Fruit0", "Fruit00", "Fruit1", "Fruit01", "Fruit2", "Fruit02", "Fruit20", "Fruit100", "Pear"]; let mut sorted = names.to_owned(); sorted.sort_by_key(|&s| name_key(s)); assert_eq!(names, sorted); } #[cfg(test)] #[test] fn test_match_non_whitespace() { assert!(match_non_whitespace("", "")); assert!(match_non_whitespace(" ", "")); assert!(match_non_whitespace("", " ")); assert!(match_non_whitespace("a", "a")); assert!(match_non_whitespace(" a ", "a")); assert!(match_non_whitespace("a", " a")); assert!(match_non_whitespace("abc", "abc")); assert!(match_non_whitespace("abc", " abc ")); assert!(match_non_whitespace("abc ", "abc")); assert!(match_non_whitespace("abc xyz", "abc xyz")); assert!(match_non_whitespace("abc xyz", "abc\nxyz")); assert!(match_non_whitespace("abc xyz", "abcxyz")); assert!(match_non_whitespace("abcxyz", "abc xyz")); assert!(match_non_whitespace("abc xyz ", " abc xyz\n")); assert!(!match_non_whitespace("a", "b")); assert!(!match_non_whitespace(" a ", "c")); assert!(!match_non_whitespace("a", " aa")); assert!(!match_non_whitespace("abc", "ac")); assert!(!match_non_whitespace("abc", " adc ")); assert!(!match_non_whitespace("abc ", "abca")); assert!(!match_non_whitespace("abc xyz", "abc xy")); assert!(!match_non_whitespace("abc xyz", "bc\nxyz")); assert!(!match_non_whitespace("abc xyz", "abc.xyz")); assert!(!match_non_whitespace("abcxyz", "abc.xyz")); assert!(!match_non_whitespace("abc xyz ", " abc xyz w")); } rustdoc: Don't counts ids twice when using --enable-commonmark // Copyright 2013-2015 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. //! Rustdoc's HTML Rendering module //! //! This modules contains the bulk of the logic necessary for rendering a //! rustdoc `clean::Crate` instance to a set of static HTML pages. This //! rendering process is largely driven by the `format!` syntax extension to //! perform all I/O into files and streams. //! //! The rendering process is largely driven by the `Context` and `Cache` //! structures. The cache is pre-populated by crawling the crate in question, //! and then it is shared among the various rendering threads. The cache is meant //! to be a fairly large structure not implementing `Clone` (because it's shared //! among threads). The context, however, should be a lightweight structure. This //! is cloned per-thread and contains information about what is currently being //! rendered. //! //! In order to speed up rendering (mostly because of markdown rendering), the //! rendering process has been parallelized. This parallelization is only //! exposed through the `crate` method on the context, and then also from the //! fact that the shared cache is stored in TLS (and must be accessed as such). //! //! In addition to rendering the crate itself, this module is also responsible //! for creating the corresponding search index and source file renderings. //! These threads are not parallelized (they haven't been a bottleneck yet), and //! both occur before the crate is rendered. pub use self::ExternalLocation::*; use std::ascii::AsciiExt; use std::cell::RefCell; use std::cmp::Ordering; use std::collections::BTreeMap; use std::default::Default; use std::error; use std::fmt::{self, Display, Formatter, Write as FmtWrite}; use std::fs::{self, File, OpenOptions}; use std::io::prelude::*; use std::io::{self, BufWriter, BufReader}; use std::iter::repeat; use std::mem; use std::path::{PathBuf, Path, Component}; use std::str; use std::sync::Arc; use externalfiles::ExternalHtml; use serialize::json::{ToJson, Json, as_json}; use syntax::{abi, ast}; use syntax::feature_gate::UnstableFeatures; use rustc::hir::def_id::{CrateNum, CRATE_DEF_INDEX, DefId}; use rustc::middle::privacy::AccessLevels; use rustc::middle::stability; use rustc::hir; use rustc::util::nodemap::{FxHashMap, FxHashSet}; use rustc::session::config::nightly_options::is_nightly_build; use rustc_data_structures::flock; use clean::{self, AttributesExt, GetDefId, SelfTy, Mutability, Span}; use doctree; use fold::DocFolder; use html::escape::Escape; use html::format::{ConstnessSpace}; use html::format::{TyParamBounds, WhereClause, href, AbiSpace}; use html::format::{VisSpace, Method, UnsafetySpace, MutableSpace}; use html::format::fmt_impl_for_trait_page; use html::item_type::ItemType; use html::markdown::{self, Markdown, MarkdownHtml, MarkdownSummaryLine, RenderType}; use html::{highlight, layout}; use html_diff; /// A pair of name and its optional document. pub type NameDoc = (String, Option<String>); /// Major driving force in all rustdoc rendering. This contains information /// about where in the tree-like hierarchy rendering is occurring and controls /// how the current page is being rendered. /// /// It is intended that this context is a lightweight object which can be fairly /// easily cloned because it is cloned per work-job (about once per item in the /// rustdoc tree). #[derive(Clone)] pub struct Context { /// Current hierarchy of components leading down to what's currently being /// rendered pub current: Vec<String>, /// The current destination folder of where HTML artifacts should be placed. /// This changes as the context descends into the module hierarchy. pub dst: PathBuf, /// A flag, which when `true`, will render pages which redirect to the /// real location of an item. This is used to allow external links to /// publicly reused items to redirect to the right location. pub render_redirect_pages: bool, pub shared: Arc<SharedContext>, pub render_type: RenderType, } pub struct SharedContext { /// The path to the crate root source minus the file name. /// Used for simplifying paths to the highlighted source code files. pub src_root: PathBuf, /// This describes the layout of each page, and is not modified after /// creation of the context (contains info like the favicon and added html). pub layout: layout::Layout, /// This flag indicates whether [src] links should be generated or not. If /// the source files are present in the html rendering, then this will be /// `true`. pub include_sources: bool, /// The local file sources we've emitted and their respective url-paths. pub local_sources: FxHashMap<PathBuf, String>, /// All the passes that were run on this crate. pub passes: FxHashSet<String>, /// The base-URL of the issue tracker for when an item has been tagged with /// an issue number. pub issue_tracker_base_url: Option<String>, /// The given user css file which allow to customize the generated /// documentation theme. pub css_file_extension: Option<PathBuf>, /// Warnings for the user if rendering would differ using different markdown /// parsers. pub markdown_warnings: RefCell<Vec<(Span, String, Vec<html_diff::Difference>)>>, } /// Indicates where an external crate can be found. pub enum ExternalLocation { /// Remote URL root of the external crate Remote(String), /// This external crate can be found in the local doc/ folder Local, /// The external crate could not be found. Unknown, } /// Metadata about an implementor of a trait. pub struct Implementor { pub def_id: DefId, pub stability: Option<clean::Stability>, pub impl_: clean::Impl, } /// Metadata about implementations for a type. #[derive(Clone)] pub struct Impl { pub impl_item: clean::Item, } impl Impl { fn inner_impl(&self) -> &clean::Impl { match self.impl_item.inner { clean::ImplItem(ref impl_) => impl_, _ => panic!("non-impl item found in impl") } } fn trait_did(&self) -> Option<DefId> { self.inner_impl().trait_.def_id() } } #[derive(Debug)] pub struct Error { file: PathBuf, error: io::Error, } impl error::Error for Error { fn description(&self) -> &str { self.error.description() } } impl Display for Error { fn fmt(&self, f: &mut Formatter) -> fmt::Result { write!(f, "\"{}\": {}", self.file.display(), self.error) } } impl Error { pub fn new(e: io::Error, file: &Path) -> Error { Error { file: file.to_path_buf(), error: e, } } } macro_rules! try_err { ($e:expr, $file:expr) => ({ match $e { Ok(e) => e, Err(e) => return Err(Error::new(e, $file)), } }) } /// This cache is used to store information about the `clean::Crate` being /// rendered in order to provide more useful documentation. This contains /// information like all implementors of a trait, all traits a type implements, /// documentation for all known traits, etc. /// /// This structure purposefully does not implement `Clone` because it's intended /// to be a fairly large and expensive structure to clone. Instead this adheres /// to `Send` so it may be stored in a `Arc` instance and shared among the various /// rendering threads. #[derive(Default)] pub struct Cache { /// Mapping of typaram ids to the name of the type parameter. This is used /// when pretty-printing a type (so pretty printing doesn't have to /// painfully maintain a context like this) pub typarams: FxHashMap<DefId, String>, /// Maps a type id to all known implementations for that type. This is only /// recognized for intra-crate `ResolvedPath` types, and is used to print /// out extra documentation on the page of an enum/struct. /// /// The values of the map are a list of implementations and documentation /// found on that implementation. pub impls: FxHashMap<DefId, Vec<Impl>>, /// Maintains a mapping of local crate node ids to the fully qualified name /// and "short type description" of that node. This is used when generating /// URLs when a type is being linked to. External paths are not located in /// this map because the `External` type itself has all the information /// necessary. pub paths: FxHashMap<DefId, (Vec<String>, ItemType)>, /// Similar to `paths`, but only holds external paths. This is only used for /// generating explicit hyperlinks to other crates. pub external_paths: FxHashMap<DefId, (Vec<String>, ItemType)>, /// This map contains information about all known traits of this crate. /// Implementations of a crate should inherit the documentation of the /// parent trait if no extra documentation is specified, and default methods /// should show up in documentation about trait implementations. pub traits: FxHashMap<DefId, clean::Trait>, /// When rendering traits, it's often useful to be able to list all /// implementors of the trait, and this mapping is exactly, that: a mapping /// of trait ids to the list of known implementors of the trait pub implementors: FxHashMap<DefId, Vec<Implementor>>, /// Cache of where external crate documentation can be found. pub extern_locations: FxHashMap<CrateNum, (String, PathBuf, ExternalLocation)>, /// Cache of where documentation for primitives can be found. pub primitive_locations: FxHashMap<clean::PrimitiveType, DefId>, // Note that external items for which `doc(hidden)` applies to are shown as // non-reachable while local items aren't. This is because we're reusing // the access levels from crateanalysis. pub access_levels: Arc<AccessLevels<DefId>>, // Private fields only used when initially crawling a crate to build a cache stack: Vec<String>, parent_stack: Vec<DefId>, parent_is_trait_impl: bool, search_index: Vec<IndexItem>, stripped_mod: bool, deref_trait_did: Option<DefId>, deref_mut_trait_did: Option<DefId>, owned_box_did: Option<DefId>, // In rare case where a structure is defined in one module but implemented // in another, if the implementing module is parsed before defining module, // then the fully qualified name of the structure isn't presented in `paths` // yet when its implementation methods are being indexed. Caches such methods // and their parent id here and indexes them at the end of crate parsing. orphan_impl_items: Vec<(DefId, clean::Item)>, } /// Temporary storage for data obtained during `RustdocVisitor::clean()`. /// Later on moved into `CACHE_KEY`. #[derive(Default)] pub struct RenderInfo { pub inlined: FxHashSet<DefId>, pub external_paths: ::core::ExternalPaths, pub external_typarams: FxHashMap<DefId, String>, pub deref_trait_did: Option<DefId>, pub deref_mut_trait_did: Option<DefId>, pub owned_box_did: Option<DefId>, } /// Helper struct to render all source code to HTML pages struct SourceCollector<'a> { scx: &'a mut SharedContext, /// Root destination to place all HTML output into dst: PathBuf, } /// Wrapper struct to render the source code of a file. This will do things like /// adding line numbers to the left-hand side. struct Source<'a>(&'a str); // Helper structs for rendering items/sidebars and carrying along contextual // information #[derive(Copy, Clone)] struct Item<'a> { cx: &'a Context, item: &'a clean::Item, } struct Sidebar<'a> { cx: &'a Context, item: &'a clean::Item, } /// Struct representing one entry in the JS search index. These are all emitted /// by hand to a large JS file at the end of cache-creation. struct IndexItem { ty: ItemType, name: String, path: String, desc: String, parent: Option<DefId>, parent_idx: Option<usize>, search_type: Option<IndexItemFunctionType>, } impl ToJson for IndexItem { fn to_json(&self) -> Json { assert_eq!(self.parent.is_some(), self.parent_idx.is_some()); let mut data = Vec::with_capacity(6); data.push((self.ty as usize).to_json()); data.push(self.name.to_json()); data.push(self.path.to_json()); data.push(self.desc.to_json()); data.push(self.parent_idx.to_json()); data.push(self.search_type.to_json()); Json::Array(data) } } /// A type used for the search index. struct Type { name: Option<String>, } impl ToJson for Type { fn to_json(&self) -> Json { match self.name { Some(ref name) => { let mut data = BTreeMap::new(); data.insert("name".to_owned(), name.to_json()); Json::Object(data) }, None => Json::Null } } } /// Full type of functions/methods in the search index. struct IndexItemFunctionType { inputs: Vec<Type>, output: Option<Type> } impl ToJson for IndexItemFunctionType { fn to_json(&self) -> Json { // If we couldn't figure out a type, just write `null`. if self.inputs.iter().chain(self.output.iter()).any(|ref i| i.name.is_none()) { Json::Null } else { let mut data = BTreeMap::new(); data.insert("inputs".to_owned(), self.inputs.to_json()); data.insert("output".to_owned(), self.output.to_json()); Json::Object(data) } } } // TLS keys used to carry information around during rendering. thread_local!(static CACHE_KEY: RefCell<Arc<Cache>> = Default::default()); thread_local!(pub static CURRENT_LOCATION_KEY: RefCell<Vec<String>> = RefCell::new(Vec::new())); thread_local!(static USED_ID_MAP: RefCell<FxHashMap<String, usize>> = RefCell::new(init_ids())); fn init_ids() -> FxHashMap<String, usize> { [ "main", "search", "help", "TOC", "render-detail", "associated-types", "associated-const", "required-methods", "provided-methods", "implementors", "implementors-list", "methods", "deref-methods", "implementations", ].into_iter().map(|id| (String::from(*id), 1)).collect() } /// This method resets the local table of used ID attributes. This is typically /// used at the beginning of rendering an entire HTML page to reset from the /// previous state (if any). pub fn reset_ids(embedded: bool) { USED_ID_MAP.with(|s| { *s.borrow_mut() = if embedded { init_ids() } else { FxHashMap() }; }); } pub fn derive_id(candidate: String) -> String { USED_ID_MAP.with(|map| { let id = match map.borrow_mut().get_mut(&candidate) { None => candidate, Some(a) => { let id = format!("{}-{}", candidate, *a); *a += 1; id } }; map.borrow_mut().insert(id.clone(), 1); id }) } /// Generates the documentation for `crate` into the directory `dst` pub fn run(mut krate: clean::Crate, external_html: &ExternalHtml, playground_url: Option<String>, dst: PathBuf, passes: FxHashSet<String>, css_file_extension: Option<PathBuf>, renderinfo: RenderInfo, render_type: RenderType) -> Result<(), Error> { let src_root = match krate.src.parent() { Some(p) => p.to_path_buf(), None => PathBuf::new(), }; let mut scx = SharedContext { src_root, passes, include_sources: true, local_sources: FxHashMap(), issue_tracker_base_url: None, layout: layout::Layout { logo: "".to_string(), favicon: "".to_string(), external_html: external_html.clone(), krate: krate.name.clone(), }, css_file_extension: css_file_extension.clone(), markdown_warnings: RefCell::new(vec![]), }; // If user passed in `--playground-url` arg, we fill in crate name here if let Some(url) = playground_url { markdown::PLAYGROUND.with(|slot| { *slot.borrow_mut() = Some((Some(krate.name.clone()), url)); }); } // Crawl the crate attributes looking for attributes which control how we're // going to emit HTML if let Some(attrs) = krate.module.as_ref().map(|m| &m.attrs) { for attr in attrs.lists("doc") { let name = attr.name().map(|s| s.as_str()); match (name.as_ref().map(|s| &s[..]), attr.value_str()) { (Some("html_favicon_url"), Some(s)) => { scx.layout.favicon = s.to_string(); } (Some("html_logo_url"), Some(s)) => { scx.layout.logo = s.to_string(); } (Some("html_playground_url"), Some(s)) => { markdown::PLAYGROUND.with(|slot| { let name = krate.name.clone(); *slot.borrow_mut() = Some((Some(name), s.to_string())); }); } (Some("issue_tracker_base_url"), Some(s)) => { scx.issue_tracker_base_url = Some(s.to_string()); } (Some("html_no_source"), None) if attr.is_word() => { scx.include_sources = false; } _ => {} } } } try_err!(fs::create_dir_all(&dst), &dst); krate = render_sources(&dst, &mut scx, krate)?; let cx = Context { current: Vec::new(), dst, render_redirect_pages: false, shared: Arc::new(scx), render_type, }; // Crawl the crate to build various caches used for the output let RenderInfo { inlined: _, external_paths, external_typarams, deref_trait_did, deref_mut_trait_did, owned_box_did, } = renderinfo; let external_paths = external_paths.into_iter() .map(|(k, (v, t))| (k, (v, ItemType::from(t)))) .collect(); let mut cache = Cache { impls: FxHashMap(), external_paths, paths: FxHashMap(), implementors: FxHashMap(), stack: Vec::new(), parent_stack: Vec::new(), search_index: Vec::new(), parent_is_trait_impl: false, extern_locations: FxHashMap(), primitive_locations: FxHashMap(), stripped_mod: false, access_levels: krate.access_levels.clone(), orphan_impl_items: Vec::new(), traits: mem::replace(&mut krate.external_traits, FxHashMap()), deref_trait_did, deref_mut_trait_did, owned_box_did, typarams: external_typarams, }; // Cache where all our extern crates are located for &(n, ref e) in &krate.externs { let src_root = match Path::new(&e.src).parent() { Some(p) => p.to_path_buf(), None => PathBuf::new(), }; cache.extern_locations.insert(n, (e.name.clone(), src_root, extern_location(e, &cx.dst))); let did = DefId { krate: n, index: CRATE_DEF_INDEX }; cache.external_paths.insert(did, (vec![e.name.to_string()], ItemType::Module)); } // Cache where all known primitives have their documentation located. // // Favor linking to as local extern as possible, so iterate all crates in // reverse topological order. for &(_, ref e) in krate.externs.iter().rev() { for &(def_id, prim, _) in &e.primitives { cache.primitive_locations.insert(prim, def_id); } } for &(def_id, prim, _) in &krate.primitives { cache.primitive_locations.insert(prim, def_id); } cache.stack.push(krate.name.clone()); krate = cache.fold_crate(krate); // Build our search index let index = build_index(&krate, &mut cache); // Freeze the cache now that the index has been built. Put an Arc into TLS // for future parallelization opportunities let cache = Arc::new(cache); CACHE_KEY.with(|v| *v.borrow_mut() = cache.clone()); CURRENT_LOCATION_KEY.with(|s| s.borrow_mut().clear()); write_shared(&cx, &krate, &*cache, index)?; let scx = cx.shared.clone(); // And finally render the whole crate's documentation let result = cx.krate(krate); let markdown_warnings = scx.markdown_warnings.borrow(); if !markdown_warnings.is_empty() { println!("WARNING: documentation for this crate may be rendered \ differently using the new Pulldown renderer."); println!(" See https://github.com/rust-lang/rust/issues/44229 for details."); for &(ref span, ref text, ref diffs) in &*markdown_warnings { println!("WARNING: rendering difference in `{}`", concise_str(text)); println!(" --> {}:{}:{}", span.filename, span.loline, span.locol); for d in diffs { render_difference(d); } } } result } // A short, single-line view of `s`. fn concise_str(s: &str) -> String { if s.contains('\n') { return format!("{}...", s.lines().next().expect("Impossible! We just found a newline")); } if s.len() > 70 { return format!("{} ... {}", &s[..50], &s[s.len()-20..]); } s.to_owned() } // Returns short versions of s1 and s2, starting from where the strings differ. fn concise_compared_strs(s1: &str, s2: &str) -> (String, String) { let s1 = s1.trim(); let s2 = s2.trim(); if !s1.contains('\n') && !s2.contains('\n') && s1.len() <= 70 && s2.len() <= 70 { return (s1.to_owned(), s2.to_owned()); } let mut start_byte = 0; for (c1, c2) in s1.chars().zip(s2.chars()) { if c1 != c2 { break; } start_byte += c1.len_utf8(); } if start_byte == 0 { return (concise_str(s1), concise_str(s2)); } let s1 = &s1[start_byte..]; let s2 = &s2[start_byte..]; (format!("...{}", concise_str(s1)), format!("...{}", concise_str(s2))) } fn render_difference(diff: &html_diff::Difference) { match *diff { html_diff::Difference::NodeType { ref elem, ref opposite_elem } => { println!(" {} Types differ: expected: `{}`, found: `{}`", elem.path, elem.element_name, opposite_elem.element_name); } html_diff::Difference::NodeName { ref elem, ref opposite_elem } => { println!(" {} Tags differ: expected: `{}`, found: `{}`", elem.path, elem.element_name, opposite_elem.element_name); } html_diff::Difference::NodeAttributes { ref elem, ref elem_attributes, ref opposite_elem_attributes, .. } => { println!(" {} Attributes differ in `{}`: expected: `{:?}`, found: `{:?}`", elem.path, elem.element_name, elem_attributes, opposite_elem_attributes); } html_diff::Difference::NodeText { ref elem, ref elem_text, ref opposite_elem_text, .. } => { let (s1, s2) = concise_compared_strs(elem_text, opposite_elem_text); println!(" {} Text differs:\n expected: `{}`\n found: `{}`", elem.path, s1, s2); } html_diff::Difference::NotPresent { ref elem, ref opposite_elem } => { if let Some(ref elem) = *elem { println!(" {} One element is missing: expected: `{}`", elem.path, elem.element_name); } else if let Some(ref elem) = *opposite_elem { if elem.element_name.is_empty() { println!(" {} Unexpected element: `{}`", elem.path, concise_str(&elem.element_content)); } else { println!(" {} Unexpected element `{}`: found: `{}`", elem.path, elem.element_name, concise_str(&elem.element_content)); } } } } } /// Build the search index from the collected metadata fn build_index(krate: &clean::Crate, cache: &mut Cache) -> String { let mut nodeid_to_pathid = FxHashMap(); let mut crate_items = Vec::with_capacity(cache.search_index.len()); let mut crate_paths = Vec::<Json>::new(); let Cache { ref mut search_index, ref orphan_impl_items, ref mut paths, .. } = *cache; // Attach all orphan items to the type's definition if the type // has since been learned. for &(did, ref item) in orphan_impl_items { if let Some(&(ref fqp, _)) = paths.get(&did) { search_index.push(IndexItem { ty: item.type_(), name: item.name.clone().unwrap(), path: fqp[..fqp.len() - 1].join("::"), desc: plain_summary_line(item.doc_value()), parent: Some(did), parent_idx: None, search_type: get_index_search_type(&item), }); } } // Reduce `NodeId` in paths into smaller sequential numbers, // and prune the paths that do not appear in the index. let mut lastpath = String::new(); let mut lastpathid = 0usize; for item in search_index { item.parent_idx = item.parent.map(|nodeid| { if nodeid_to_pathid.contains_key(&nodeid) { *nodeid_to_pathid.get(&nodeid).unwrap() } else { let pathid = lastpathid; nodeid_to_pathid.insert(nodeid, pathid); lastpathid += 1; let &(ref fqp, short) = paths.get(&nodeid).unwrap(); crate_paths.push(((short as usize), fqp.last().unwrap().clone()).to_json()); pathid } }); // Omit the parent path if it is same to that of the prior item. if lastpath == item.path { item.path.clear(); } else { lastpath = item.path.clone(); } crate_items.push(item.to_json()); } let crate_doc = krate.module.as_ref().map(|module| { plain_summary_line(module.doc_value()) }).unwrap_or(String::new()); let mut crate_data = BTreeMap::new(); crate_data.insert("doc".to_owned(), Json::String(crate_doc)); crate_data.insert("items".to_owned(), Json::Array(crate_items)); crate_data.insert("paths".to_owned(), Json::Array(crate_paths)); // Collect the index into a string format!("searchIndex[{}] = {};", as_json(&krate.name), Json::Object(crate_data)) } fn write_shared(cx: &Context, krate: &clean::Crate, cache: &Cache, search_index: String) -> Result<(), Error> { // Write out the shared files. Note that these are shared among all rustdoc // docs placed in the output directory, so this needs to be a synchronized // operation with respect to all other rustdocs running around. try_err!(fs::create_dir_all(&cx.dst), &cx.dst); let _lock = flock::Lock::panicking_new(&cx.dst.join(".lock"), true, true, true); // Add all the static files. These may already exist, but we just // overwrite them anyway to make sure that they're fresh and up-to-date. write(cx.dst.join("main.js"), include_bytes!("static/main.js"))?; write(cx.dst.join("rustdoc.css"), include_bytes!("static/rustdoc.css"))?; write(cx.dst.join("main.css"), include_bytes!("static/styles/main.css"))?; if let Some(ref css) = cx.shared.css_file_extension { let mut content = String::new(); let css = css.as_path(); let mut f = try_err!(File::open(css), css); try_err!(f.read_to_string(&mut content), css); let css = cx.dst.join("theme.css"); let css = css.as_path(); let mut f = try_err!(File::create(css), css); try_err!(write!(f, "{}", &content), css); } write(cx.dst.join("normalize.css"), include_bytes!("static/normalize.css"))?; write(cx.dst.join("FiraSans-Regular.woff"), include_bytes!("static/FiraSans-Regular.woff"))?; write(cx.dst.join("FiraSans-Medium.woff"), include_bytes!("static/FiraSans-Medium.woff"))?; write(cx.dst.join("FiraSans-LICENSE.txt"), include_bytes!("static/FiraSans-LICENSE.txt"))?; write(cx.dst.join("Heuristica-Italic.woff"), include_bytes!("static/Heuristica-Italic.woff"))?; write(cx.dst.join("Heuristica-LICENSE.txt"), include_bytes!("static/Heuristica-LICENSE.txt"))?; write(cx.dst.join("SourceSerifPro-Regular.woff"), include_bytes!("static/SourceSerifPro-Regular.woff"))?; write(cx.dst.join("SourceSerifPro-Bold.woff"), include_bytes!("static/SourceSerifPro-Bold.woff"))?; write(cx.dst.join("SourceSerifPro-LICENSE.txt"), include_bytes!("static/SourceSerifPro-LICENSE.txt"))?; write(cx.dst.join("SourceCodePro-Regular.woff"), include_bytes!("static/SourceCodePro-Regular.woff"))?; write(cx.dst.join("SourceCodePro-Semibold.woff"), include_bytes!("static/SourceCodePro-Semibold.woff"))?; write(cx.dst.join("SourceCodePro-LICENSE.txt"), include_bytes!("static/SourceCodePro-LICENSE.txt"))?; write(cx.dst.join("LICENSE-MIT.txt"), include_bytes!("static/LICENSE-MIT.txt"))?; write(cx.dst.join("LICENSE-APACHE.txt"), include_bytes!("static/LICENSE-APACHE.txt"))?; write(cx.dst.join("COPYRIGHT.txt"), include_bytes!("static/COPYRIGHT.txt"))?; fn collect(path: &Path, krate: &str, key: &str) -> io::Result<Vec<String>> { let mut ret = Vec::new(); if path.exists() { for line in BufReader::new(File::open(path)?).lines() { let line = line?; if !line.starts_with(key) { continue; } if line.starts_with(&format!(r#"{}["{}"]"#, key, krate)) { continue; } ret.push(line.to_string()); } } Ok(ret) } // Update the search index let dst = cx.dst.join("search-index.js"); let mut all_indexes = try_err!(collect(&dst, &krate.name, "searchIndex"), &dst); all_indexes.push(search_index); // Sort the indexes by crate so the file will be generated identically even // with rustdoc running in parallel. all_indexes.sort(); let mut w = try_err!(File::create(&dst), &dst); try_err!(writeln!(&mut w, "var searchIndex = {{}};"), &dst); for index in &all_indexes { try_err!(writeln!(&mut w, "{}", *index), &dst); } try_err!(writeln!(&mut w, "initSearch(searchIndex);"), &dst); // Update the list of all implementors for traits let dst = cx.dst.join("implementors"); for (&did, imps) in &cache.implementors { // Private modules can leak through to this phase of rustdoc, which // could contain implementations for otherwise private types. In some // rare cases we could find an implementation for an item which wasn't // indexed, so we just skip this step in that case. // // FIXME: this is a vague explanation for why this can't be a `get`, in // theory it should be... let &(ref remote_path, remote_item_type) = match cache.paths.get(&did) { Some(p) => p, None => match cache.external_paths.get(&did) { Some(p) => p, None => continue, } }; let mut have_impls = false; let mut implementors = format!(r#"implementors["{}"] = ["#, krate.name); for imp in imps { // If the trait and implementation are in the same crate, then // there's no need to emit information about it (there's inlining // going on). If they're in different crates then the crate defining // the trait will be interested in our implementation. if imp.def_id.krate == did.krate { continue } // If the implementation is from another crate then that crate // should add it. if !imp.def_id.is_local() { continue } have_impls = true; write!(implementors, "{},", as_json(&imp.impl_.to_string())).unwrap(); } implementors.push_str("];"); // Only create a js file if we have impls to add to it. If the trait is // documented locally though we always create the file to avoid dead // links. if !have_impls && !cache.paths.contains_key(&did) { continue; } let mut mydst = dst.clone(); for part in &remote_path[..remote_path.len() - 1] { mydst.push(part); } try_err!(fs::create_dir_all(&mydst), &mydst); mydst.push(&format!("{}.{}.js", remote_item_type.css_class(), remote_path[remote_path.len() - 1])); let mut all_implementors = try_err!(collect(&mydst, &krate.name, "implementors"), &mydst); all_implementors.push(implementors); // Sort the implementors by crate so the file will be generated // identically even with rustdoc running in parallel. all_implementors.sort(); let mut f = try_err!(File::create(&mydst), &mydst); try_err!(writeln!(&mut f, "(function() {{var implementors = {{}};"), &mydst); for implementor in &all_implementors { try_err!(writeln!(&mut f, "{}", *implementor), &mydst); } try_err!(writeln!(&mut f, "{}", r" if (window.register_implementors) { window.register_implementors(implementors); } else { window.pending_implementors = implementors; } "), &mydst); try_err!(writeln!(&mut f, r"}})()"), &mydst); } Ok(()) } fn render_sources(dst: &Path, scx: &mut SharedContext, krate: clean::Crate) -> Result<clean::Crate, Error> { info!("emitting source files"); let dst = dst.join("src").join(&krate.name); try_err!(fs::create_dir_all(&dst), &dst); let mut folder = SourceCollector { dst, scx, }; Ok(folder.fold_crate(krate)) } /// Writes the entire contents of a string to a destination, not attempting to /// catch any errors. fn write(dst: PathBuf, contents: &[u8]) -> Result<(), Error> { Ok(try_err!(try_err!(File::create(&dst), &dst).write_all(contents), &dst)) } /// Takes a path to a source file and cleans the path to it. This canonicalizes /// things like ".." to components which preserve the "top down" hierarchy of a /// static HTML tree. Each component in the cleaned path will be passed as an /// argument to `f`. The very last component of the path (ie the file name) will /// be passed to `f` if `keep_filename` is true, and ignored otherwise. // FIXME (#9639): The closure should deal with &[u8] instead of &str // FIXME (#9639): This is too conservative, rejecting non-UTF-8 paths fn clean_srcpath<F>(src_root: &Path, p: &Path, keep_filename: bool, mut f: F) where F: FnMut(&str), { // make it relative, if possible let p = p.strip_prefix(src_root).unwrap_or(p); let mut iter = p.components().peekable(); while let Some(c) = iter.next() { if !keep_filename && iter.peek().is_none() { break; } match c { Component::ParentDir => f("up"), Component::Normal(c) => f(c.to_str().unwrap()), _ => continue, } } } /// Attempts to find where an external crate is located, given that we're /// rendering in to the specified source destination. fn extern_location(e: &clean::ExternalCrate, dst: &Path) -> ExternalLocation { // See if there's documentation generated into the local directory let local_location = dst.join(&e.name); if local_location.is_dir() { return Local; } // Failing that, see if there's an attribute specifying where to find this // external crate e.attrs.lists("doc") .filter(|a| a.check_name("html_root_url")) .filter_map(|a| a.value_str()) .map(|url| { let mut url = url.to_string(); if !url.ends_with("/") { url.push('/') } Remote(url) }).next().unwrap_or(Unknown) // Well, at least we tried. } impl<'a> DocFolder for SourceCollector<'a> { fn fold_item(&mut self, item: clean::Item) -> Option<clean::Item> { // If we're including source files, and we haven't seen this file yet, // then we need to render it out to the filesystem. if self.scx.include_sources // skip all invalid spans && item.source.filename != "" // skip non-local items && item.def_id.is_local() // Macros from other libraries get special filenames which we can // safely ignore. && !(item.source.filename.starts_with("<") && item.source.filename.ends_with("macros>")) { // If it turns out that we couldn't read this file, then we probably // can't read any of the files (generating html output from json or // something like that), so just don't include sources for the // entire crate. The other option is maintaining this mapping on a // per-file basis, but that's probably not worth it... self.scx .include_sources = match self.emit_source(&item.source.filename) { Ok(()) => true, Err(e) => { println!("warning: source code was requested to be rendered, \ but processing `{}` had an error: {}", item.source.filename, e); println!(" skipping rendering of source code"); false } }; } self.fold_item_recur(item) } } impl<'a> SourceCollector<'a> { /// Renders the given filename into its corresponding HTML source file. fn emit_source(&mut self, filename: &str) -> io::Result<()> { let p = PathBuf::from(filename); if self.scx.local_sources.contains_key(&p) { // We've already emitted this source return Ok(()); } let mut contents = Vec::new(); File::open(&p).and_then(|mut f| f.read_to_end(&mut contents))?; let contents = str::from_utf8(&contents).unwrap(); // Remove the utf-8 BOM if any let contents = if contents.starts_with("\u{feff}") { &contents[3..] } else { contents }; // Create the intermediate directories let mut cur = self.dst.clone(); let mut root_path = String::from("../../"); let mut href = String::new(); clean_srcpath(&self.scx.src_root, &p, false, |component| { cur.push(component); fs::create_dir_all(&cur).unwrap(); root_path.push_str("../"); href.push_str(component); href.push('/'); }); let mut fname = p.file_name().expect("source has no filename") .to_os_string(); fname.push(".html"); cur.push(&fname); href.push_str(&fname.to_string_lossy()); let mut w = BufWriter::new(File::create(&cur)?); let title = format!("{} -- source", cur.file_name().unwrap() .to_string_lossy()); let desc = format!("Source to the Rust file `{}`.", filename); let page = layout::Page { title: &title, css_class: "source", root_path: &root_path, description: &desc, keywords: BASIC_KEYWORDS, }; layout::render(&mut w, &self.scx.layout, &page, &(""), &Source(contents), self.scx.css_file_extension.is_some())?; w.flush()?; self.scx.local_sources.insert(p, href); Ok(()) } } impl DocFolder for Cache { fn fold_item(&mut self, item: clean::Item) -> Option<clean::Item> { // If this is a stripped module, // we don't want it or its children in the search index. let orig_stripped_mod = match item.inner { clean::StrippedItem(box clean::ModuleItem(..)) => { mem::replace(&mut self.stripped_mod, true) } _ => self.stripped_mod, }; // Register any generics to their corresponding string. This is used // when pretty-printing types. if let Some(generics) = item.inner.generics() { self.generics(generics); } // Propagate a trait method's documentation to all implementors of the // trait. if let clean::TraitItem(ref t) = item.inner { self.traits.entry(item.def_id).or_insert_with(|| t.clone()); } // Collect all the implementors of traits. if let clean::ImplItem(ref i) = item.inner { if let Some(did) = i.trait_.def_id() { self.implementors.entry(did).or_insert(vec![]).push(Implementor { def_id: item.def_id, stability: item.stability.clone(), impl_: i.clone(), }); } } // Index this method for searching later on. if let Some(ref s) = item.name { let (parent, is_inherent_impl_item) = match item.inner { clean::StrippedItem(..) => ((None, None), false), clean::AssociatedConstItem(..) | clean::TypedefItem(_, true) if self.parent_is_trait_impl => { // skip associated items in trait impls ((None, None), false) } clean::AssociatedTypeItem(..) | clean::TyMethodItem(..) | clean::StructFieldItem(..) | clean::VariantItem(..) => { ((Some(*self.parent_stack.last().unwrap()), Some(&self.stack[..self.stack.len() - 1])), false) } clean::MethodItem(..) | clean::AssociatedConstItem(..) => { if self.parent_stack.is_empty() { ((None, None), false) } else { let last = self.parent_stack.last().unwrap(); let did = *last; let path = match self.paths.get(&did) { // The current stack not necessarily has correlation // for where the type was defined. On the other // hand, `paths` always has the right // information if present. Some(&(ref fqp, ItemType::Trait)) | Some(&(ref fqp, ItemType::Struct)) | Some(&(ref fqp, ItemType::Union)) | Some(&(ref fqp, ItemType::Enum)) => Some(&fqp[..fqp.len() - 1]), Some(..) => Some(&*self.stack), None => None }; ((Some(*last), path), true) } } _ => ((None, Some(&*self.stack)), false) }; match parent { (parent, Some(path)) if is_inherent_impl_item || (!self.stripped_mod) => { debug_assert!(!item.is_stripped()); // A crate has a module at its root, containing all items, // which should not be indexed. The crate-item itself is // inserted later on when serializing the search-index. if item.def_id.index != CRATE_DEF_INDEX { self.search_index.push(IndexItem { ty: item.type_(), name: s.to_string(), path: path.join("::").to_string(), desc: plain_summary_line(item.doc_value()), parent, parent_idx: None, search_type: get_index_search_type(&item), }); } } (Some(parent), None) if is_inherent_impl_item => { // We have a parent, but we don't know where they're // defined yet. Wait for later to index this item. self.orphan_impl_items.push((parent, item.clone())); } _ => {} } } // Keep track of the fully qualified path for this item. let pushed = match item.name { Some(ref n) if !n.is_empty() => { self.stack.push(n.to_string()); true } _ => false, }; match item.inner { clean::StructItem(..) | clean::EnumItem(..) | clean::TypedefItem(..) | clean::TraitItem(..) | clean::FunctionItem(..) | clean::ModuleItem(..) | clean::ForeignFunctionItem(..) | clean::ForeignStaticItem(..) | clean::ConstantItem(..) | clean::StaticItem(..) | clean::UnionItem(..) if !self.stripped_mod => { // Reexported items mean that the same id can show up twice // in the rustdoc ast that we're looking at. We know, // however, that a reexported item doesn't show up in the // `public_items` map, so we can skip inserting into the // paths map if there was already an entry present and we're // not a public item. if !self.paths.contains_key(&item.def_id) || self.access_levels.is_public(item.def_id) { self.paths.insert(item.def_id, (self.stack.clone(), item.type_())); } } // Link variants to their parent enum because pages aren't emitted // for each variant. clean::VariantItem(..) if !self.stripped_mod => { let mut stack = self.stack.clone(); stack.pop(); self.paths.insert(item.def_id, (stack, ItemType::Enum)); } clean::PrimitiveItem(..) if item.visibility.is_some() => { self.paths.insert(item.def_id, (self.stack.clone(), item.type_())); } _ => {} } // Maintain the parent stack let orig_parent_is_trait_impl = self.parent_is_trait_impl; let parent_pushed = match item.inner { clean::TraitItem(..) | clean::EnumItem(..) | clean::StructItem(..) | clean::UnionItem(..) => { self.parent_stack.push(item.def_id); self.parent_is_trait_impl = false; true } clean::ImplItem(ref i) => { self.parent_is_trait_impl = i.trait_.is_some(); match i.for_ { clean::ResolvedPath{ did, .. } => { self.parent_stack.push(did); true } ref t => { let prim_did = t.primitive_type().and_then(|t| { self.primitive_locations.get(&t).cloned() }); match prim_did { Some(did) => { self.parent_stack.push(did); true } None => false, } } } } _ => false }; // Once we've recursively found all the generics, hoard off all the // implementations elsewhere. let ret = self.fold_item_recur(item).and_then(|item| { if let clean::Item { inner: clean::ImplItem(_), .. } = item { // Figure out the id of this impl. This may map to a // primitive rather than always to a struct/enum. // Note: matching twice to restrict the lifetime of the `i` borrow. let did = if let clean::Item { inner: clean::ImplItem(ref i), .. } = item { match i.for_ { clean::ResolvedPath { did, .. } | clean::BorrowedRef { type_: box clean::ResolvedPath { did, .. }, .. } => { Some(did) } ref t => { t.primitive_type().and_then(|t| { self.primitive_locations.get(&t).cloned() }) } } } else { unreachable!() }; if let Some(did) = did { self.impls.entry(did).or_insert(vec![]).push(Impl { impl_item: item, }); } None } else { Some(item) } }); if pushed { self.stack.pop().unwrap(); } if parent_pushed { self.parent_stack.pop().unwrap(); } self.stripped_mod = orig_stripped_mod; self.parent_is_trait_impl = orig_parent_is_trait_impl; ret } } impl<'a> Cache { fn generics(&mut self, generics: &clean::Generics) { for typ in &generics.type_params { self.typarams.insert(typ.did, typ.name.clone()); } } } impl Context { /// String representation of how to get back to the root path of the 'doc/' /// folder in terms of a relative URL. fn root_path(&self) -> String { repeat("../").take(self.current.len()).collect::<String>() } /// Recurse in the directory structure and change the "root path" to make /// sure it always points to the top (relatively). fn recurse<T, F>(&mut self, s: String, f: F) -> T where F: FnOnce(&mut Context) -> T, { if s.is_empty() { panic!("Unexpected empty destination: {:?}", self.current); } let prev = self.dst.clone(); self.dst.push(&s); self.current.push(s); info!("Recursing into {}", self.dst.display()); let ret = f(self); info!("Recursed; leaving {}", self.dst.display()); // Go back to where we were at self.dst = prev; self.current.pop().unwrap(); ret } /// Main method for rendering a crate. /// /// This currently isn't parallelized, but it'd be pretty easy to add /// parallelization to this function. fn krate(self, mut krate: clean::Crate) -> Result<(), Error> { let mut item = match krate.module.take() { Some(i) => i, None => return Ok(()), }; item.name = Some(krate.name); // Render the crate documentation let mut work = vec![(self, item)]; while let Some((mut cx, item)) = work.pop() { cx.item(item, |cx, item| { work.push((cx.clone(), item)) })? } Ok(()) } fn render_item(&self, writer: &mut io::Write, it: &clean::Item, pushname: bool) -> io::Result<()> { // A little unfortunate that this is done like this, but it sure // does make formatting *a lot* nicer. CURRENT_LOCATION_KEY.with(|slot| { *slot.borrow_mut() = self.current.clone(); }); let mut title = if it.is_primitive() { // No need to include the namespace for primitive types String::new() } else { self.current.join("::") }; if pushname { if !title.is_empty() { title.push_str("::"); } title.push_str(it.name.as_ref().unwrap()); } title.push_str(" - Rust"); let tyname = it.type_().css_class(); let desc = if it.is_crate() { format!("API documentation for the Rust `{}` crate.", self.shared.layout.krate) } else { format!("API documentation for the Rust `{}` {} in crate `{}`.", it.name.as_ref().unwrap(), tyname, self.shared.layout.krate) }; let keywords = make_item_keywords(it); let page = layout::Page { css_class: tyname, root_path: &self.root_path(), title: &title, description: &desc, keywords: &keywords, }; reset_ids(true); if !self.render_redirect_pages { layout::render(writer, &self.shared.layout, &page, &Sidebar{ cx: self, item: it }, &Item{ cx: self, item: it }, self.shared.css_file_extension.is_some())?; } else { let mut url = self.root_path(); if let Some(&(ref names, ty)) = cache().paths.get(&it.def_id) { for name in &names[..names.len() - 1] { url.push_str(name); url.push_str("/"); } url.push_str(&item_path(ty, names.last().unwrap())); layout::redirect(writer, &url)?; } } Ok(()) } /// Non-parallelized version of rendering an item. This will take the input /// item, render its contents, and then invoke the specified closure with /// all sub-items which need to be rendered. /// /// The rendering driver uses this closure to queue up more work. fn item<F>(&mut self, item: clean::Item, mut f: F) -> Result<(), Error> where F: FnMut(&mut Context, clean::Item), { // Stripped modules survive the rustdoc passes (i.e. `strip-private`) // if they contain impls for public types. These modules can also // contain items such as publicly reexported structures. // // External crates will provide links to these structures, so // these modules are recursed into, but not rendered normally // (a flag on the context). if !self.render_redirect_pages { self.render_redirect_pages = item.is_stripped(); } if item.is_mod() { // modules are special because they add a namespace. We also need to // recurse into the items of the module as well. let name = item.name.as_ref().unwrap().to_string(); let mut item = Some(item); self.recurse(name, |this| { let item = item.take().unwrap(); let mut buf = Vec::new(); this.render_item(&mut buf, &item, false).unwrap(); // buf will be empty if the module is stripped and there is no redirect for it if !buf.is_empty() { let joint_dst = this.dst.join("index.html"); try_err!(fs::create_dir_all(&this.dst), &this.dst); let mut dst = try_err!(File::create(&joint_dst), &joint_dst); try_err!(dst.write_all(&buf), &joint_dst); } let m = match item.inner { clean::StrippedItem(box clean::ModuleItem(m)) | clean::ModuleItem(m) => m, _ => unreachable!() }; // Render sidebar-items.js used throughout this module. if !this.render_redirect_pages { let items = this.build_sidebar_items(&m); let js_dst = this.dst.join("sidebar-items.js"); let mut js_out = BufWriter::new(try_err!(File::create(&js_dst), &js_dst)); try_err!(write!(&mut js_out, "initSidebarItems({});", as_json(&items)), &js_dst); } for item in m.items { f(this,item); } Ok(()) })?; } else if item.name.is_some() { let mut buf = Vec::new(); self.render_item(&mut buf, &item, true).unwrap(); // buf will be empty if the item is stripped and there is no redirect for it if !buf.is_empty() { let name = item.name.as_ref().unwrap(); let item_type = item.type_(); let file_name = &item_path(item_type, name); let joint_dst = self.dst.join(file_name); try_err!(fs::create_dir_all(&self.dst), &self.dst); let mut dst = try_err!(File::create(&joint_dst), &joint_dst); try_err!(dst.write_all(&buf), &joint_dst); // Redirect from a sane URL using the namespace to Rustdoc's // URL for the page. let redir_name = format!("{}.{}.html", name, item_type.name_space()); let redir_dst = self.dst.join(redir_name); if let Ok(mut redirect_out) = OpenOptions::new().create_new(true) .write(true) .open(&redir_dst) { try_err!(layout::redirect(&mut redirect_out, file_name), &redir_dst); } // If the item is a macro, redirect from the old macro URL (with !) // to the new one (without). // FIXME(#35705) remove this redirect. if item_type == ItemType::Macro { let redir_name = format!("{}.{}!.html", item_type, name); let redir_dst = self.dst.join(redir_name); let mut redirect_out = try_err!(File::create(&redir_dst), &redir_dst); try_err!(layout::redirect(&mut redirect_out, file_name), &redir_dst); } } } Ok(()) } fn build_sidebar_items(&self, m: &clean::Module) -> BTreeMap<String, Vec<NameDoc>> { // BTreeMap instead of HashMap to get a sorted output let mut map = BTreeMap::new(); for item in &m.items { if item.is_stripped() { continue } let short = item.type_().css_class(); let myname = match item.name { None => continue, Some(ref s) => s.to_string(), }; let short = short.to_string(); map.entry(short).or_insert(vec![]) .push((myname, Some(plain_summary_line(item.doc_value())))); } for (_, items) in &mut map { items.sort(); } map } } impl<'a> Item<'a> { /// Generate a url appropriate for an `href` attribute back to the source of /// this item. /// /// The url generated, when clicked, will redirect the browser back to the /// original source code. /// /// If `None` is returned, then a source link couldn't be generated. This /// may happen, for example, with externally inlined items where the source /// of their crate documentation isn't known. fn src_href(&self) -> Option<String> { let mut root = self.cx.root_path(); let cache = cache(); let mut path = String::new(); let (krate, path) = if self.item.def_id.is_local() { let path = PathBuf::from(&self.item.source.filename); if let Some(path) = self.cx.shared.local_sources.get(&path) { (&self.cx.shared.layout.krate, path) } else { return None; } } else { // Macros from other libraries get special filenames which we can // safely ignore. if self.item.source.filename.starts_with("<") && self.item.source.filename.ends_with("macros>") { return None; } let (krate, src_root) = match cache.extern_locations.get(&self.item.def_id.krate) { Some(&(ref name, ref src, Local)) => (name, src), Some(&(ref name, ref src, Remote(ref s))) => { root = s.to_string(); (name, src) } Some(&(_, _, Unknown)) | None => return None, }; let file = Path::new(&self.item.source.filename); clean_srcpath(&src_root, file, false, |component| { path.push_str(component); path.push('/'); }); let mut fname = file.file_name().expect("source has no filename") .to_os_string(); fname.push(".html"); path.push_str(&fname.to_string_lossy()); (krate, &path) }; let lines = if self.item.source.loline == self.item.source.hiline { format!("{}", self.item.source.loline) } else { format!("{}-{}", self.item.source.loline, self.item.source.hiline) }; Some(format!("{root}src/{krate}/{path}#{lines}", root = root, krate = krate, path = path, lines = lines)) } } impl<'a> fmt::Display for Item<'a> { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { debug_assert!(!self.item.is_stripped()); // Write the breadcrumb trail header for the top write!(fmt, "\n<h1 class='fqn'><span class='in-band'>")?; match self.item.inner { clean::ModuleItem(ref m) => if m.is_crate { write!(fmt, "Crate ")?; } else { write!(fmt, "Module ")?; }, clean::FunctionItem(..) | clean::ForeignFunctionItem(..) => write!(fmt, "Function ")?, clean::TraitItem(..) => write!(fmt, "Trait ")?, clean::StructItem(..) => write!(fmt, "Struct ")?, clean::UnionItem(..) => write!(fmt, "Union ")?, clean::EnumItem(..) => write!(fmt, "Enum ")?, clean::TypedefItem(..) => write!(fmt, "Type Definition ")?, clean::MacroItem(..) => write!(fmt, "Macro ")?, clean::PrimitiveItem(..) => write!(fmt, "Primitive Type ")?, clean::StaticItem(..) | clean::ForeignStaticItem(..) => write!(fmt, "Static ")?, clean::ConstantItem(..) => write!(fmt, "Constant ")?, _ => { // We don't generate pages for any other type. unreachable!(); } } if !self.item.is_primitive() { let cur = &self.cx.current; let amt = if self.item.is_mod() { cur.len() - 1 } else { cur.len() }; for (i, component) in cur.iter().enumerate().take(amt) { write!(fmt, "<a href='{}index.html'>{}</a>::<wbr>", repeat("../").take(cur.len() - i - 1) .collect::<String>(), component)?; } } write!(fmt, "<a class=\"{}\" href=''>{}</a>", self.item.type_(), self.item.name.as_ref().unwrap())?; write!(fmt, "</span>")?; // in-band write!(fmt, "<span class='out-of-band'>")?; if let Some(version) = self.item.stable_since() { write!(fmt, "<span class='since' title='Stable since Rust version {0}'>{0}</span>", version)?; } write!(fmt, r##"<span id='render-detail'> <a id="toggle-all-docs" href="javascript:void(0)" title="collapse all docs"> [<span class='inner'>&#x2212;</span>] </a> </span>"##)?; // Write `src` tag // // When this item is part of a `pub use` in a downstream crate, the // [src] link in the downstream documentation will actually come back to // this page, and this link will be auto-clicked. The `id` attribute is // used to find the link to auto-click. if self.cx.shared.include_sources && !self.item.is_primitive() { if let Some(l) = self.src_href() { write!(fmt, "<a class='srclink' href='{}' title='{}'>[src]</a>", l, "goto source code")?; } } write!(fmt, "</span>")?; // out-of-band write!(fmt, "</h1>\n")?; match self.item.inner { clean::ModuleItem(ref m) => { item_module(fmt, self.cx, self.item, &m.items) } clean::FunctionItem(ref f) | clean::ForeignFunctionItem(ref f) => item_function(fmt, self.cx, self.item, f), clean::TraitItem(ref t) => item_trait(fmt, self.cx, self.item, t), clean::StructItem(ref s) => item_struct(fmt, self.cx, self.item, s), clean::UnionItem(ref s) => item_union(fmt, self.cx, self.item, s), clean::EnumItem(ref e) => item_enum(fmt, self.cx, self.item, e), clean::TypedefItem(ref t, _) => item_typedef(fmt, self.cx, self.item, t), clean::MacroItem(ref m) => item_macro(fmt, self.cx, self.item, m), clean::PrimitiveItem(ref p) => item_primitive(fmt, self.cx, self.item, p), clean::StaticItem(ref i) | clean::ForeignStaticItem(ref i) => item_static(fmt, self.cx, self.item, i), clean::ConstantItem(ref c) => item_constant(fmt, self.cx, self.item, c), _ => { // We don't generate pages for any other type. unreachable!(); } } } } fn item_path(ty: ItemType, name: &str) -> String { match ty { ItemType::Module => format!("{}/index.html", name), _ => format!("{}.{}.html", ty.css_class(), name), } } fn full_path(cx: &Context, item: &clean::Item) -> String { let mut s = cx.current.join("::"); s.push_str("::"); s.push_str(item.name.as_ref().unwrap()); s } fn shorter<'a>(s: Option<&'a str>) -> String { match s { Some(s) => s.lines().take_while(|line|{ (*line).chars().any(|chr|{ !chr.is_whitespace() }) }).collect::<Vec<_>>().join("\n"), None => "".to_string() } } #[inline] fn plain_summary_line(s: Option<&str>) -> String { let line = shorter(s).replace("\n", " "); markdown::plain_summary_line(&line[..]) } fn document(w: &mut fmt::Formatter, cx: &Context, item: &clean::Item) -> fmt::Result { document_stability(w, cx, item)?; let prefix = render_assoc_const_value(item); document_full(w, item, cx, &prefix)?; Ok(()) } /// Render md_text as markdown. Warns the user if there are difference in /// rendering between Pulldown and Hoedown. fn render_markdown(w: &mut fmt::Formatter, md_text: &str, span: Span, render_type: RenderType, prefix: &str, scx: &SharedContext) -> fmt::Result { // We only emit warnings if the user has opted-in to Pulldown rendering. let output = if render_type == RenderType::Pulldown { // Save the state of USED_ID_MAP so it only gets updated once even // though we're rendering twice. let orig_used_id_map = USED_ID_MAP.with(|map| map.borrow().clone()); let hoedown_output = format!("{}", Markdown(md_text, RenderType::Hoedown)); USED_ID_MAP.with(|map| *map.borrow_mut() = orig_used_id_map); let pulldown_output = format!("{}", Markdown(md_text, RenderType::Pulldown)); let differences = html_diff::get_differences(&pulldown_output, &hoedown_output); let differences = differences.into_iter() .filter(|s| { match *s { html_diff::Difference::NodeText { ref elem_text, ref opposite_elem_text, .. } if match_non_whitespace(elem_text, opposite_elem_text) => false, _ => true, } }) .collect::<Vec<_>>(); if !differences.is_empty() { scx.markdown_warnings.borrow_mut().push((span, md_text.to_owned(), differences)); } pulldown_output } else { format!("{}", Markdown(md_text, RenderType::Hoedown)) }; write!(w, "<div class='docblock'>{}{}</div>", prefix, output) } // Returns true iff s1 and s2 match, ignoring whitespace. fn match_non_whitespace(s1: &str, s2: &str) -> bool { let s1 = s1.trim(); let s2 = s2.trim(); let mut cs1 = s1.chars(); let mut cs2 = s2.chars(); while let Some(c1) = cs1.next() { if c1.is_whitespace() { continue; } loop { if let Some(c2) = cs2.next() { if !c2.is_whitespace() { if c1 != c2 { return false; } break; } } else { return false; } } } while let Some(c2) = cs2.next() { if !c2.is_whitespace() { return false; } } true } fn document_short(w: &mut fmt::Formatter, item: &clean::Item, link: AssocItemLink, cx: &Context, prefix: &str) -> fmt::Result { if let Some(s) = item.doc_value() { let markdown = if s.contains('\n') { format!("{} [Read more]({})", &plain_summary_line(Some(s)), naive_assoc_href(item, link)) } else { format!("{}", &plain_summary_line(Some(s))) }; render_markdown(w, &markdown, item.source.clone(), cx.render_type, prefix, &cx.shared)?; } else if !prefix.is_empty() { write!(w, "<div class='docblock'>{}</div>", prefix)?; } Ok(()) } fn render_assoc_const_value(item: &clean::Item) -> String { match item.inner { clean::AssociatedConstItem(ref ty, Some(ref default)) => { highlight::render_with_highlighting( &format!("{}: {:#} = {}", item.name.as_ref().unwrap(), ty, default), None, None, None, ) } _ => String::new(), } } fn document_full(w: &mut fmt::Formatter, item: &clean::Item, cx: &Context, prefix: &str) -> fmt::Result { if let Some(s) = item.doc_value() { render_markdown(w, s, item.source.clone(), cx.render_type, prefix, &cx.shared)?; } else if !prefix.is_empty() { write!(w, "<div class='docblock'>{}</div>", prefix)?; } Ok(()) } fn document_stability(w: &mut fmt::Formatter, cx: &Context, item: &clean::Item) -> fmt::Result { let stabilities = short_stability(item, cx, true); if !stabilities.is_empty() { write!(w, "<div class='stability'>")?; for stability in stabilities { write!(w, "{}", stability)?; } write!(w, "</div>")?; } Ok(()) } fn name_key(name: &str) -> (&str, u64, usize) { // find number at end let split = name.bytes().rposition(|b| b < b'0' || b'9' < b).map_or(0, |s| s + 1); // count leading zeroes let after_zeroes = name[split..].bytes().position(|b| b != b'0').map_or(name.len(), |extra| split + extra); // sort leading zeroes last let num_zeroes = after_zeroes - split; match name[split..].parse() { Ok(n) => (&name[..split], n, num_zeroes), Err(_) => (name, 0, num_zeroes), } } fn item_module(w: &mut fmt::Formatter, cx: &Context, item: &clean::Item, items: &[clean::Item]) -> fmt::Result { document(w, cx, item)?; let mut indices = (0..items.len()).filter(|i| { if let clean::DefaultImplItem(..) = items[*i].inner { return false; } !items[*i].is_stripped() }).collect::<Vec<usize>>(); // the order of item types in the listing fn reorder(ty: ItemType) -> u8 { match ty { ItemType::ExternCrate => 0, ItemType::Import => 1, ItemType::Primitive => 2, ItemType::Module => 3, ItemType::Macro => 4, ItemType::Struct => 5, ItemType::Enum => 6, ItemType::Constant => 7, ItemType::Static => 8, ItemType::Trait => 9, ItemType::Function => 10, ItemType::Typedef => 12, ItemType::Union => 13, _ => 14 + ty as u8, } } fn cmp(i1: &clean::Item, i2: &clean::Item, idx1: usize, idx2: usize) -> Ordering { let ty1 = i1.type_(); let ty2 = i2.type_(); if ty1 != ty2 { return (reorder(ty1), idx1).cmp(&(reorder(ty2), idx2)) } let s1 = i1.stability.as_ref().map(|s| s.level); let s2 = i2.stability.as_ref().map(|s| s.level); match (s1, s2) { (Some(stability::Unstable), Some(stability::Stable)) => return Ordering::Greater, (Some(stability::Stable), Some(stability::Unstable)) => return Ordering::Less, _ => {} } let lhs = i1.name.as_ref().map_or("", |s| &**s); let rhs = i2.name.as_ref().map_or("", |s| &**s); name_key(lhs).cmp(&name_key(rhs)) } indices.sort_by(|&i1, &i2| cmp(&items[i1], &items[i2], i1, i2)); // This call is to remove reexport duplicates in cases such as: // // ``` // pub mod foo { // pub mod bar { // pub trait Double { fn foo(); } // } // } // // pub use foo::bar::*; // pub use foo::*; // ``` // // `Double` will appear twice in the generated docs. // // FIXME: This code is quite ugly and could be improved. Small issue: DefId // can be identical even if the elements are different (mostly in imports). // So in case this is an import, we keep everything by adding a "unique id" // (which is the position in the vector). indices.dedup_by_key(|i| (items[*i].def_id, if items[*i].name.as_ref().is_some() { Some(full_path(cx, &items[*i]).clone()) } else { None }, items[*i].type_(), if items[*i].is_import() { *i } else { 0 })); debug!("{:?}", indices); let mut curty = None; for &idx in &indices { let myitem = &items[idx]; if myitem.is_stripped() { continue; } let myty = Some(myitem.type_()); if curty == Some(ItemType::ExternCrate) && myty == Some(ItemType::Import) { // Put `extern crate` and `use` re-exports in the same section. curty = myty; } else if myty != curty { if curty.is_some() { write!(w, "</table>")?; } curty = myty; let (short, name) = match myty.unwrap() { ItemType::ExternCrate | ItemType::Import => ("reexports", "Reexports"), ItemType::Module => ("modules", "Modules"), ItemType::Struct => ("structs", "Structs"), ItemType::Union => ("unions", "Unions"), ItemType::Enum => ("enums", "Enums"), ItemType::Function => ("functions", "Functions"), ItemType::Typedef => ("types", "Type Definitions"), ItemType::Static => ("statics", "Statics"), ItemType::Constant => ("constants", "Constants"), ItemType::Trait => ("traits", "Traits"), ItemType::Impl => ("impls", "Implementations"), ItemType::TyMethod => ("tymethods", "Type Methods"), ItemType::Method => ("methods", "Methods"), ItemType::StructField => ("fields", "Struct Fields"), ItemType::Variant => ("variants", "Variants"), ItemType::Macro => ("macros", "Macros"), ItemType::Primitive => ("primitives", "Primitive Types"), ItemType::AssociatedType => ("associated-types", "Associated Types"), ItemType::AssociatedConst => ("associated-consts", "Associated Constants"), }; write!(w, "<h2 id='{id}' class='section-header'>\ <a href=\"#{id}\">{name}</a></h2>\n<table>", id = derive_id(short.to_owned()), name = name)?; } match myitem.inner { clean::ExternCrateItem(ref name, ref src) => { use html::format::HRef; match *src { Some(ref src) => { write!(w, "<tr><td><code>{}extern crate {} as {};", VisSpace(&myitem.visibility), HRef::new(myitem.def_id, src), name)? } None => { write!(w, "<tr><td><code>{}extern crate {};", VisSpace(&myitem.visibility), HRef::new(myitem.def_id, name))? } } write!(w, "</code></td></tr>")?; } clean::ImportItem(ref import) => { write!(w, "<tr><td><code>{}{}</code></td></tr>", VisSpace(&myitem.visibility), *import)?; } _ => { if myitem.name.is_none() { continue } let stabilities = short_stability(myitem, cx, false); let stab_docs = if !stabilities.is_empty() { stabilities.iter() .map(|s| format!("[{}]", s)) .collect::<Vec<_>>() .as_slice() .join(" ") } else { String::new() }; let unsafety_flag = match myitem.inner { clean::FunctionItem(ref func) | clean::ForeignFunctionItem(ref func) if func.unsafety == hir::Unsafety::Unsafe => { "<a title='unsafe function' href='#'><sup>⚠</sup></a>" } _ => "", }; let doc_value = myitem.doc_value().unwrap_or(""); write!(w, " <tr class='{stab} module-item'> <td><a class=\"{class}\" href=\"{href}\" title='{title_type} {title}'>{name}</a>{unsafety_flag}</td> <td class='docblock-short'> {stab_docs} {docs} </td> </tr>", name = *myitem.name.as_ref().unwrap(), stab_docs = stab_docs, docs = if cx.render_type == RenderType::Hoedown { format!("{}", shorter(Some(&Markdown(doc_value, RenderType::Hoedown).to_string()))) } else { format!("{}", MarkdownSummaryLine(doc_value)) }, class = myitem.type_(), stab = myitem.stability_class().unwrap_or("".to_string()), unsafety_flag = unsafety_flag, href = item_path(myitem.type_(), myitem.name.as_ref().unwrap()), title_type = myitem.type_(), title = full_path(cx, myitem))?; } } } if curty.is_some() { write!(w, "</table>")?; } Ok(()) } fn short_stability(item: &clean::Item, cx: &Context, show_reason: bool) -> Vec<String> { let mut stability = vec![]; if let Some(stab) = item.stability.as_ref() { let deprecated_reason = if show_reason && !stab.deprecated_reason.is_empty() { format!(": {}", stab.deprecated_reason) } else { String::new() }; if !stab.deprecated_since.is_empty() { let since = if show_reason { format!(" since {}", Escape(&stab.deprecated_since)) } else { String::new() }; let text = format!("Deprecated{}{}", since, MarkdownHtml(&deprecated_reason, cx.render_type)); stability.push(format!("<div class='stab deprecated'>{}</div>", text)) }; if stab.level == stability::Unstable { if show_reason { let unstable_extra = match (!stab.feature.is_empty(), &cx.shared.issue_tracker_base_url, stab.issue) { (true, &Some(ref tracker_url), Some(issue_no)) if issue_no > 0 => format!(" (<code>{} </code><a href=\"{}{}\">#{}</a>)", Escape(&stab.feature), tracker_url, issue_no, issue_no), (false, &Some(ref tracker_url), Some(issue_no)) if issue_no > 0 => format!(" (<a href=\"{}{}\">#{}</a>)", Escape(&tracker_url), issue_no, issue_no), (true, ..) => format!(" (<code>{}</code>)", Escape(&stab.feature)), _ => String::new(), }; if stab.unstable_reason.is_empty() { stability.push(format!("<div class='stab unstable'>\ <span class=microscope>🔬</span> \ This is a nightly-only experimental API. {}\ </div>", unstable_extra)); } else { let text = format!("<summary><span class=microscope>🔬</span> \ This is a nightly-only experimental API. {}\ </summary>{}", unstable_extra, MarkdownHtml(&stab.unstable_reason, cx.render_type)); stability.push(format!("<div class='stab unstable'><details>{}</details></div>", text)); } } else { stability.push(format!("<div class='stab unstable'>Experimental</div>")) } }; } else if let Some(depr) = item.deprecation.as_ref() { let note = if show_reason && !depr.note.is_empty() { format!(": {}", depr.note) } else { String::new() }; let since = if show_reason && !depr.since.is_empty() { format!(" since {}", Escape(&depr.since)) } else { String::new() }; let text = format!("Deprecated{}{}", since, MarkdownHtml(&note, cx.render_type)); stability.push(format!("<div class='stab deprecated'>{}</div>", text)) } if let Some(ref cfg) = item.attrs.cfg { stability.push(format!("<div class='stab portability'>{}</div>", if show_reason { cfg.render_long_html() } else { cfg.render_short_html() })); } stability } struct Initializer<'a>(&'a str); impl<'a> fmt::Display for Initializer<'a> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { let Initializer(s) = *self; if s.is_empty() { return Ok(()); } write!(f, "<code> = </code>")?; write!(f, "<code>{}</code>", Escape(s)) } } fn item_constant(w: &mut fmt::Formatter, cx: &Context, it: &clean::Item, c: &clean::Constant) -> fmt::Result { write!(w, "<pre class='rust const'>")?; render_attributes(w, it)?; write!(w, "{vis}const \ {name}: {typ}{init}</pre>", vis = VisSpace(&it.visibility), name = it.name.as_ref().unwrap(), typ = c.type_, init = Initializer(&c.expr))?; document(w, cx, it) } fn item_static(w: &mut fmt::Formatter, cx: &Context, it: &clean::Item, s: &clean::Static) -> fmt::Result { write!(w, "<pre class='rust static'>")?; render_attributes(w, it)?; write!(w, "{vis}static {mutability}\ {name}: {typ}{init}</pre>", vis = VisSpace(&it.visibility), mutability = MutableSpace(s.mutability), name = it.name.as_ref().unwrap(), typ = s.type_, init = Initializer(&s.expr))?; document(w, cx, it) } fn item_function(w: &mut fmt::Formatter, cx: &Context, it: &clean::Item, f: &clean::Function) -> fmt::Result { // FIXME(#24111): remove when `const_fn` is stabilized let vis_constness = match UnstableFeatures::from_environment() { UnstableFeatures::Allow => f.constness, _ => hir::Constness::NotConst }; let name_len = format!("{}{}{}{:#}fn {}{:#}", VisSpace(&it.visibility), ConstnessSpace(vis_constness), UnsafetySpace(f.unsafety), AbiSpace(f.abi), it.name.as_ref().unwrap(), f.generics).len(); write!(w, "<pre class='rust fn'>")?; render_attributes(w, it)?; write!(w, "{vis}{constness}{unsafety}{abi}fn \ {name}{generics}{decl}{where_clause}</pre>", vis = VisSpace(&it.visibility), constness = ConstnessSpace(vis_constness), unsafety = UnsafetySpace(f.unsafety), abi = AbiSpace(f.abi), name = it.name.as_ref().unwrap(), generics = f.generics, where_clause = WhereClause { gens: &f.generics, indent: 0, end_newline: true }, decl = Method { decl: &f.decl, name_len, indent: 0, })?; document(w, cx, it) } fn item_trait(w: &mut fmt::Formatter, cx: &Context, it: &clean::Item, t: &clean::Trait) -> fmt::Result { let mut bounds = String::new(); let mut bounds_plain = String::new(); if !t.bounds.is_empty() { if !bounds.is_empty() { bounds.push(' '); bounds_plain.push(' '); } bounds.push_str(": "); bounds_plain.push_str(": "); for (i, p) in t.bounds.iter().enumerate() { if i > 0 { bounds.push_str(" + "); bounds_plain.push_str(" + "); } bounds.push_str(&format!("{}", *p)); bounds_plain.push_str(&format!("{:#}", *p)); } } // Output the trait definition write!(w, "<pre class='rust trait'>")?; render_attributes(w, it)?; write!(w, "{}{}trait {}{}{}", VisSpace(&it.visibility), UnsafetySpace(t.unsafety), it.name.as_ref().unwrap(), t.generics, bounds)?; if !t.generics.where_predicates.is_empty() { write!(w, "{}", WhereClause { gens: &t.generics, indent: 0, end_newline: true })?; } else { write!(w, " ")?; } let types = t.items.iter().filter(|m| m.is_associated_type()).collect::<Vec<_>>(); let consts = t.items.iter().filter(|m| m.is_associated_const()).collect::<Vec<_>>(); let required = t.items.iter().filter(|m| m.is_ty_method()).collect::<Vec<_>>(); let provided = t.items.iter().filter(|m| m.is_method()).collect::<Vec<_>>(); if t.items.is_empty() { write!(w, "{{ }}")?; } else { // FIXME: we should be using a derived_id for the Anchors here write!(w, "{{\n")?; for t in &types { write!(w, " ")?; render_assoc_item(w, t, AssocItemLink::Anchor(None), ItemType::Trait)?; write!(w, ";\n")?; } if !types.is_empty() && !consts.is_empty() { w.write_str("\n")?; } for t in &consts { write!(w, " ")?; render_assoc_item(w, t, AssocItemLink::Anchor(None), ItemType::Trait)?; write!(w, ";\n")?; } if !consts.is_empty() && !required.is_empty() { w.write_str("\n")?; } for (pos, m) in required.iter().enumerate() { write!(w, " ")?; render_assoc_item(w, m, AssocItemLink::Anchor(None), ItemType::Trait)?; write!(w, ";\n")?; if pos < required.len() - 1 { write!(w, "<div class='item-spacer'></div>")?; } } if !required.is_empty() && !provided.is_empty() { w.write_str("\n")?; } for (pos, m) in provided.iter().enumerate() { write!(w, " ")?; render_assoc_item(w, m, AssocItemLink::Anchor(None), ItemType::Trait)?; match m.inner { clean::MethodItem(ref inner) if !inner.generics.where_predicates.is_empty() => { write!(w, ",\n {{ ... }}\n")?; }, _ => { write!(w, " {{ ... }}\n")?; }, } if pos < provided.len() - 1 { write!(w, "<div class='item-spacer'></div>")?; } } write!(w, "}}")?; } write!(w, "</pre>")?; // Trait documentation document(w, cx, it)?; fn trait_item(w: &mut fmt::Formatter, cx: &Context, m: &clean::Item, t: &clean::Item) -> fmt::Result { let name = m.name.as_ref().unwrap(); let item_type = m.type_(); let id = derive_id(format!("{}.{}", item_type, name)); let ns_id = derive_id(format!("{}.{}", name, item_type.name_space())); write!(w, "<h3 id='{id}' class='method'>\ <span id='{ns_id}' class='invisible'><code>", id = id, ns_id = ns_id)?; render_assoc_item(w, m, AssocItemLink::Anchor(Some(&id)), ItemType::Impl)?; write!(w, "</code>")?; render_stability_since(w, m, t)?; write!(w, "</span></h3>")?; document(w, cx, m)?; Ok(()) } if !types.is_empty() { write!(w, " <h2 id='associated-types' class='small-section-header'> Associated Types<a href='#associated-types' class='anchor'></a> </h2> <div class='methods'> ")?; for t in &types { trait_item(w, cx, *t, it)?; } write!(w, "</div>")?; } if !consts.is_empty() { write!(w, " <h2 id='associated-const' class='small-section-header'> Associated Constants<a href='#associated-const' class='anchor'></a> </h2> <div class='methods'> ")?; for t in &consts { trait_item(w, cx, *t, it)?; } write!(w, "</div>")?; } // Output the documentation for each function individually if !required.is_empty() { write!(w, " <h2 id='required-methods' class='small-section-header'> Required Methods<a href='#required-methods' class='anchor'></a> </h2> <div class='methods'> ")?; for m in &required { trait_item(w, cx, *m, it)?; } write!(w, "</div>")?; } if !provided.is_empty() { write!(w, " <h2 id='provided-methods' class='small-section-header'> Provided Methods<a href='#provided-methods' class='anchor'></a> </h2> <div class='methods'> ")?; for m in &provided { trait_item(w, cx, *m, it)?; } write!(w, "</div>")?; } // If there are methods directly on this trait object, render them here. render_assoc_items(w, cx, it, it.def_id, AssocItemRender::All)?; let cache = cache(); write!(w, " <h2 id='implementors' class='small-section-header'> Implementors<a href='#implementors' class='anchor'></a> </h2> <ul class='item-list' id='implementors-list'> ")?; if let Some(implementors) = cache.implementors.get(&it.def_id) { // The DefId is for the first Type found with that name. The bool is // if any Types with the same name but different DefId have been found. let mut implementor_dups: FxHashMap<&str, (DefId, bool)> = FxHashMap(); for implementor in implementors { match implementor.impl_.for_ { clean::ResolvedPath { ref path, did, is_generic: false, .. } | clean::BorrowedRef { type_: box clean::ResolvedPath { ref path, did, is_generic: false, .. }, .. } => { let &mut (prev_did, ref mut has_duplicates) = implementor_dups.entry(path.last_name()).or_insert((did, false)); if prev_did != did { *has_duplicates = true; } } _ => {} } } for implementor in implementors { write!(w, "<li><code>")?; // If there's already another implementor that has the same abbridged name, use the // full path, for example in `std::iter::ExactSizeIterator` let use_absolute = match implementor.impl_.for_ { clean::ResolvedPath { ref path, is_generic: false, .. } | clean::BorrowedRef { type_: box clean::ResolvedPath { ref path, is_generic: false, .. }, .. } => implementor_dups[path.last_name()].1, _ => false, }; fmt_impl_for_trait_page(&implementor.impl_, w, use_absolute)?; for it in &implementor.impl_.items { if let clean::TypedefItem(ref tydef, _) = it.inner { write!(w, "<span class=\"where fmt-newline\"> ")?; assoc_type(w, it, &vec![], Some(&tydef.type_), AssocItemLink::Anchor(None))?; write!(w, ";</span>")?; } } writeln!(w, "</code></li>")?; } } write!(w, "</ul>")?; write!(w, r#"<script type="text/javascript" async src="{root_path}/implementors/{path}/{ty}.{name}.js"> </script>"#, root_path = vec![".."; cx.current.len()].join("/"), path = if it.def_id.is_local() { cx.current.join("/") } else { let (ref path, _) = cache.external_paths[&it.def_id]; path[..path.len() - 1].join("/") }, ty = it.type_().css_class(), name = *it.name.as_ref().unwrap())?; Ok(()) } fn naive_assoc_href(it: &clean::Item, link: AssocItemLink) -> String { use html::item_type::ItemType::*; let name = it.name.as_ref().unwrap(); let ty = match it.type_() { Typedef | AssociatedType => AssociatedType, s@_ => s, }; let anchor = format!("#{}.{}", ty, name); match link { AssocItemLink::Anchor(Some(ref id)) => format!("#{}", id), AssocItemLink::Anchor(None) => anchor, AssocItemLink::GotoSource(did, _) => { href(did).map(|p| format!("{}{}", p.0, anchor)).unwrap_or(anchor) } } } fn assoc_const(w: &mut fmt::Formatter, it: &clean::Item, ty: &clean::Type, _default: Option<&String>, link: AssocItemLink) -> fmt::Result { write!(w, "const <a href='{}' class=\"constant\"><b>{}</b></a>: {}", naive_assoc_href(it, link), it.name.as_ref().unwrap(), ty)?; Ok(()) } fn assoc_type(w: &mut fmt::Formatter, it: &clean::Item, bounds: &Vec<clean::TyParamBound>, default: Option<&clean::Type>, link: AssocItemLink) -> fmt::Result { write!(w, "type <a href='{}' class=\"type\">{}</a>", naive_assoc_href(it, link), it.name.as_ref().unwrap())?; if !bounds.is_empty() { write!(w, ": {}", TyParamBounds(bounds))? } if let Some(default) = default { write!(w, " = {}", default)?; } Ok(()) } fn render_stability_since_raw<'a>(w: &mut fmt::Formatter, ver: Option<&'a str>, containing_ver: Option<&'a str>) -> fmt::Result { if let Some(v) = ver { if containing_ver != ver && v.len() > 0 { write!(w, "<div class='since' title='Stable since Rust version {0}'>{0}</div>", v)? } } Ok(()) } fn render_stability_since(w: &mut fmt::Formatter, item: &clean::Item, containing_item: &clean::Item) -> fmt::Result { render_stability_since_raw(w, item.stable_since(), containing_item.stable_since()) } fn render_assoc_item(w: &mut fmt::Formatter, item: &clean::Item, link: AssocItemLink, parent: ItemType) -> fmt::Result { fn method(w: &mut fmt::Formatter, meth: &clean::Item, unsafety: hir::Unsafety, constness: hir::Constness, abi: abi::Abi, g: &clean::Generics, d: &clean::FnDecl, link: AssocItemLink, parent: ItemType) -> fmt::Result { let name = meth.name.as_ref().unwrap(); let anchor = format!("#{}.{}", meth.type_(), name); let href = match link { AssocItemLink::Anchor(Some(ref id)) => format!("#{}", id), AssocItemLink::Anchor(None) => anchor, AssocItemLink::GotoSource(did, provided_methods) => { // We're creating a link from an impl-item to the corresponding // trait-item and need to map the anchored type accordingly. let ty = if provided_methods.contains(name) { ItemType::Method } else { ItemType::TyMethod }; href(did).map(|p| format!("{}#{}.{}", p.0, ty, name)).unwrap_or(anchor) } }; // FIXME(#24111): remove when `const_fn` is stabilized let vis_constness = if is_nightly_build() { constness } else { hir::Constness::NotConst }; let mut head_len = format!("{}{}{:#}fn {}{:#}", ConstnessSpace(vis_constness), UnsafetySpace(unsafety), AbiSpace(abi), name, *g).len(); let (indent, end_newline) = if parent == ItemType::Trait { head_len += 4; (4, false) } else { (0, true) }; write!(w, "{}{}{}fn <a href='{href}' class='fnname'>{name}</a>\ {generics}{decl}{where_clause}", ConstnessSpace(vis_constness), UnsafetySpace(unsafety), AbiSpace(abi), href = href, name = name, generics = *g, decl = Method { decl: d, name_len: head_len, indent, }, where_clause = WhereClause { gens: g, indent, end_newline, }) } match item.inner { clean::StrippedItem(..) => Ok(()), clean::TyMethodItem(ref m) => { method(w, item, m.unsafety, hir::Constness::NotConst, m.abi, &m.generics, &m.decl, link, parent) } clean::MethodItem(ref m) => { method(w, item, m.unsafety, m.constness, m.abi, &m.generics, &m.decl, link, parent) } clean::AssociatedConstItem(ref ty, ref default) => { assoc_const(w, item, ty, default.as_ref(), link) } clean::AssociatedTypeItem(ref bounds, ref default) => { assoc_type(w, item, bounds, default.as_ref(), link) } _ => panic!("render_assoc_item called on non-associated-item") } } fn item_struct(w: &mut fmt::Formatter, cx: &Context, it: &clean::Item, s: &clean::Struct) -> fmt::Result { write!(w, "<pre class='rust struct'>")?; render_attributes(w, it)?; render_struct(w, it, Some(&s.generics), s.struct_type, &s.fields, "", true)?; write!(w, "</pre>")?; document(w, cx, it)?; let mut fields = s.fields.iter().filter_map(|f| { match f.inner { clean::StructFieldItem(ref ty) => Some((f, ty)), _ => None, } }).peekable(); if let doctree::Plain = s.struct_type { if fields.peek().is_some() { write!(w, "<h2 id='fields' class='fields small-section-header'> Fields<a href='#fields' class='anchor'></a></h2>")?; for (field, ty) in fields { let id = derive_id(format!("{}.{}", ItemType::StructField, field.name.as_ref().unwrap())); let ns_id = derive_id(format!("{}.{}", field.name.as_ref().unwrap(), ItemType::StructField.name_space())); write!(w, "<span id='{id}' class=\"{item_type}\"> <span id='{ns_id}' class='invisible'> <code>{name}: {ty}</code> </span></span>", item_type = ItemType::StructField, id = id, ns_id = ns_id, name = field.name.as_ref().unwrap(), ty = ty)?; if let Some(stability_class) = field.stability_class() { write!(w, "<span class='stab {stab}'></span>", stab = stability_class)?; } document(w, cx, field)?; } } } render_assoc_items(w, cx, it, it.def_id, AssocItemRender::All) } fn item_union(w: &mut fmt::Formatter, cx: &Context, it: &clean::Item, s: &clean::Union) -> fmt::Result { write!(w, "<pre class='rust union'>")?; render_attributes(w, it)?; render_union(w, it, Some(&s.generics), &s.fields, "", true)?; write!(w, "</pre>")?; document(w, cx, it)?; let mut fields = s.fields.iter().filter_map(|f| { match f.inner { clean::StructFieldItem(ref ty) => Some((f, ty)), _ => None, } }).peekable(); if fields.peek().is_some() { write!(w, "<h2 id='fields' class='fields small-section-header'> Fields<a href='#fields' class='anchor'></a></h2>")?; for (field, ty) in fields { write!(w, "<span id='{shortty}.{name}' class=\"{shortty}\"><code>{name}: {ty}</code> </span>", shortty = ItemType::StructField, name = field.name.as_ref().unwrap(), ty = ty)?; if let Some(stability_class) = field.stability_class() { write!(w, "<span class='stab {stab}'></span>", stab = stability_class)?; } document(w, cx, field)?; } } render_assoc_items(w, cx, it, it.def_id, AssocItemRender::All) } fn item_enum(w: &mut fmt::Formatter, cx: &Context, it: &clean::Item, e: &clean::Enum) -> fmt::Result { write!(w, "<pre class='rust enum'>")?; render_attributes(w, it)?; write!(w, "{}enum {}{}{}", VisSpace(&it.visibility), it.name.as_ref().unwrap(), e.generics, WhereClause { gens: &e.generics, indent: 0, end_newline: true })?; if e.variants.is_empty() && !e.variants_stripped { write!(w, " {{}}")?; } else { write!(w, " {{\n")?; for v in &e.variants { write!(w, " ")?; let name = v.name.as_ref().unwrap(); match v.inner { clean::VariantItem(ref var) => { match var.kind { clean::VariantKind::CLike => write!(w, "{}", name)?, clean::VariantKind::Tuple(ref tys) => { write!(w, "{}(", name)?; for (i, ty) in tys.iter().enumerate() { if i > 0 { write!(w, ",&nbsp;")? } write!(w, "{}", *ty)?; } write!(w, ")")?; } clean::VariantKind::Struct(ref s) => { render_struct(w, v, None, s.struct_type, &s.fields, " ", false)?; } } } _ => unreachable!() } write!(w, ",\n")?; } if e.variants_stripped { write!(w, " // some variants omitted\n")?; } write!(w, "}}")?; } write!(w, "</pre>")?; document(w, cx, it)?; if !e.variants.is_empty() { write!(w, "<h2 id='variants' class='variants small-section-header'> Variants<a href='#variants' class='anchor'></a></h2>\n")?; for variant in &e.variants { let id = derive_id(format!("{}.{}", ItemType::Variant, variant.name.as_ref().unwrap())); let ns_id = derive_id(format!("{}.{}", variant.name.as_ref().unwrap(), ItemType::Variant.name_space())); write!(w, "<span id='{id}' class='variant'>\ <span id='{ns_id}' class='invisible'><code>{name}", id = id, ns_id = ns_id, name = variant.name.as_ref().unwrap())?; if let clean::VariantItem(ref var) = variant.inner { if let clean::VariantKind::Tuple(ref tys) = var.kind { write!(w, "(")?; for (i, ty) in tys.iter().enumerate() { if i > 0 { write!(w, ",&nbsp;")?; } write!(w, "{}", *ty)?; } write!(w, ")")?; } } write!(w, "</code></span></span>")?; document(w, cx, variant)?; use clean::{Variant, VariantKind}; if let clean::VariantItem(Variant { kind: VariantKind::Struct(ref s) }) = variant.inner { let variant_id = derive_id(format!("{}.{}.fields", ItemType::Variant, variant.name.as_ref().unwrap())); write!(w, "<span class='docblock autohide sub-variant' id='{id}'>", id = variant_id)?; write!(w, "<h3 class='fields'>Fields of <code>{name}</code></h3>\n <table>", name = variant.name.as_ref().unwrap())?; for field in &s.fields { use clean::StructFieldItem; if let StructFieldItem(ref ty) = field.inner { let id = derive_id(format!("variant.{}.field.{}", variant.name.as_ref().unwrap(), field.name.as_ref().unwrap())); let ns_id = derive_id(format!("{}.{}.{}.{}", variant.name.as_ref().unwrap(), ItemType::Variant.name_space(), field.name.as_ref().unwrap(), ItemType::StructField.name_space())); write!(w, "<tr><td \ id='{id}'>\ <span id='{ns_id}' class='invisible'>\ <code>{f}:&nbsp;{t}</code></span></td><td>", id = id, ns_id = ns_id, f = field.name.as_ref().unwrap(), t = *ty)?; document(w, cx, field)?; write!(w, "</td></tr>")?; } } write!(w, "</table></span>")?; } render_stability_since(w, variant, it)?; } } render_assoc_items(w, cx, it, it.def_id, AssocItemRender::All)?; Ok(()) } fn render_attribute(attr: &ast::MetaItem) -> Option<String> { let name = attr.name(); if attr.is_word() { Some(format!("{}", name)) } else if let Some(v) = attr.value_str() { Some(format!("{} = {:?}", name, v.as_str())) } else if let Some(values) = attr.meta_item_list() { let display: Vec<_> = values.iter().filter_map(|attr| { attr.meta_item().and_then(|mi| render_attribute(mi)) }).collect(); if display.len() > 0 { Some(format!("{}({})", name, display.join(", "))) } else { None } } else { None } } const ATTRIBUTE_WHITELIST: &'static [&'static str] = &[ "export_name", "lang", "link_section", "must_use", "no_mangle", "repr", "unsafe_destructor_blind_to_params" ]; fn render_attributes(w: &mut fmt::Formatter, it: &clean::Item) -> fmt::Result { let mut attrs = String::new(); for attr in &it.attrs.other_attrs { let name = attr.name().unwrap(); if !ATTRIBUTE_WHITELIST.contains(&&*name.as_str()) { continue; } if let Some(s) = render_attribute(&attr.meta().unwrap()) { attrs.push_str(&format!("#[{}]\n", s)); } } if attrs.len() > 0 { write!(w, "<div class=\"docblock attributes\">{}</div>", &attrs)?; } Ok(()) } fn render_struct(w: &mut fmt::Formatter, it: &clean::Item, g: Option<&clean::Generics>, ty: doctree::StructType, fields: &[clean::Item], tab: &str, structhead: bool) -> fmt::Result { write!(w, "{}{}{}", VisSpace(&it.visibility), if structhead {"struct "} else {""}, it.name.as_ref().unwrap())?; if let Some(g) = g { write!(w, "{}", g)? } match ty { doctree::Plain => { if let Some(g) = g { write!(w, "{}", WhereClause { gens: g, indent: 0, end_newline: true })? } let mut has_visible_fields = false; write!(w, " {{")?; for field in fields { if let clean::StructFieldItem(ref ty) = field.inner { write!(w, "\n{} {}{}: {},", tab, VisSpace(&field.visibility), field.name.as_ref().unwrap(), *ty)?; has_visible_fields = true; } } if has_visible_fields { if it.has_stripped_fields().unwrap() { write!(w, "\n{} // some fields omitted", tab)?; } write!(w, "\n{}", tab)?; } else if it.has_stripped_fields().unwrap() { // If there are no visible fields we can just display // `{ /* fields omitted */ }` to save space. write!(w, " /* fields omitted */ ")?; } write!(w, "}}")?; } doctree::Tuple => { write!(w, "(")?; for (i, field) in fields.iter().enumerate() { if i > 0 { write!(w, ", ")?; } match field.inner { clean::StrippedItem(box clean::StructFieldItem(..)) => { write!(w, "_")? } clean::StructFieldItem(ref ty) => { write!(w, "{}{}", VisSpace(&field.visibility), *ty)? } _ => unreachable!() } } write!(w, ")")?; if let Some(g) = g { write!(w, "{}", WhereClause { gens: g, indent: 0, end_newline: false })? } write!(w, ";")?; } doctree::Unit => { // Needed for PhantomData. if let Some(g) = g { write!(w, "{}", WhereClause { gens: g, indent: 0, end_newline: false })? } write!(w, ";")?; } } Ok(()) } fn render_union(w: &mut fmt::Formatter, it: &clean::Item, g: Option<&clean::Generics>, fields: &[clean::Item], tab: &str, structhead: bool) -> fmt::Result { write!(w, "{}{}{}", VisSpace(&it.visibility), if structhead {"union "} else {""}, it.name.as_ref().unwrap())?; if let Some(g) = g { write!(w, "{}", g)?; write!(w, "{}", WhereClause { gens: g, indent: 0, end_newline: true })?; } write!(w, " {{\n{}", tab)?; for field in fields { if let clean::StructFieldItem(ref ty) = field.inner { write!(w, " {}{}: {},\n{}", VisSpace(&field.visibility), field.name.as_ref().unwrap(), *ty, tab)?; } } if it.has_stripped_fields().unwrap() { write!(w, " // some fields omitted\n{}", tab)?; } write!(w, "}}")?; Ok(()) } #[derive(Copy, Clone)] enum AssocItemLink<'a> { Anchor(Option<&'a str>), GotoSource(DefId, &'a FxHashSet<String>), } impl<'a> AssocItemLink<'a> { fn anchor(&self, id: &'a String) -> Self { match *self { AssocItemLink::Anchor(_) => { AssocItemLink::Anchor(Some(&id)) }, ref other => *other, } } } enum AssocItemRender<'a> { All, DerefFor { trait_: &'a clean::Type, type_: &'a clean::Type, deref_mut_: bool } } #[derive(Copy, Clone, PartialEq)] enum RenderMode { Normal, ForDeref { mut_: bool }, } fn render_assoc_items(w: &mut fmt::Formatter, cx: &Context, containing_item: &clean::Item, it: DefId, what: AssocItemRender) -> fmt::Result { let c = cache(); let v = match c.impls.get(&it) { Some(v) => v, None => return Ok(()), }; let (non_trait, traits): (Vec<_>, _) = v.iter().partition(|i| { i.inner_impl().trait_.is_none() }); if !non_trait.is_empty() { let render_mode = match what { AssocItemRender::All => { write!(w, " <h2 id='methods' class='small-section-header'> Methods<a href='#methods' class='anchor'></a> </h2> ")?; RenderMode::Normal } AssocItemRender::DerefFor { trait_, type_, deref_mut_ } => { write!(w, " <h2 id='deref-methods' class='small-section-header'> Methods from {}&lt;Target = {}&gt;<a href='#deref-methods' class='anchor'></a> </h2> ", trait_, type_)?; RenderMode::ForDeref { mut_: deref_mut_ } } }; for i in &non_trait { render_impl(w, cx, i, AssocItemLink::Anchor(None), render_mode, containing_item.stable_since())?; } } if let AssocItemRender::DerefFor { .. } = what { return Ok(()); } if !traits.is_empty() { let deref_impl = traits.iter().find(|t| { t.inner_impl().trait_.def_id() == c.deref_trait_did }); if let Some(impl_) = deref_impl { let has_deref_mut = traits.iter().find(|t| { t.inner_impl().trait_.def_id() == c.deref_mut_trait_did }).is_some(); render_deref_methods(w, cx, impl_, containing_item, has_deref_mut)?; } write!(w, " <h2 id='implementations' class='small-section-header'> Trait Implementations<a href='#implementations' class='anchor'></a> </h2> ")?; for i in &traits { let did = i.trait_did().unwrap(); let assoc_link = AssocItemLink::GotoSource(did, &i.inner_impl().provided_trait_methods); render_impl(w, cx, i, assoc_link, RenderMode::Normal, containing_item.stable_since())?; } } Ok(()) } fn render_deref_methods(w: &mut fmt::Formatter, cx: &Context, impl_: &Impl, container_item: &clean::Item, deref_mut: bool) -> fmt::Result { let deref_type = impl_.inner_impl().trait_.as_ref().unwrap(); let target = impl_.inner_impl().items.iter().filter_map(|item| { match item.inner { clean::TypedefItem(ref t, true) => Some(&t.type_), _ => None, } }).next().expect("Expected associated type binding"); let what = AssocItemRender::DerefFor { trait_: deref_type, type_: target, deref_mut_: deref_mut }; if let Some(did) = target.def_id() { render_assoc_items(w, cx, container_item, did, what) } else { if let Some(prim) = target.primitive_type() { if let Some(&did) = cache().primitive_locations.get(&prim) { render_assoc_items(w, cx, container_item, did, what)?; } } Ok(()) } } fn render_impl(w: &mut fmt::Formatter, cx: &Context, i: &Impl, link: AssocItemLink, render_mode: RenderMode, outer_version: Option<&str>) -> fmt::Result { if render_mode == RenderMode::Normal { let id = derive_id(match i.inner_impl().trait_ { Some(ref t) => format!("impl-{}", Escape(&format!("{:#}", t))), None => "impl".to_string(), }); write!(w, "<h3 id='{}' class='impl'><span class='in-band'><code>{}</code>", id, i.inner_impl())?; write!(w, "<a href='#{}' class='anchor'></a>", id)?; write!(w, "</span><span class='out-of-band'>")?; let since = i.impl_item.stability.as_ref().map(|s| &s.since[..]); if let Some(l) = (Item { item: &i.impl_item, cx: cx }).src_href() { write!(w, "<div class='ghost'></div>")?; render_stability_since_raw(w, since, outer_version)?; write!(w, "<a class='srclink' href='{}' title='{}'>[src]</a>", l, "goto source code")?; } else { render_stability_since_raw(w, since, outer_version)?; } write!(w, "</span>")?; write!(w, "</h3>\n")?; if let Some(ref dox) = i.impl_item.doc_value() { write!(w, "<div class='docblock'>{}</div>", Markdown(dox, cx.render_type))?; } } fn doc_impl_item(w: &mut fmt::Formatter, cx: &Context, item: &clean::Item, link: AssocItemLink, render_mode: RenderMode, is_default_item: bool, outer_version: Option<&str>, trait_: Option<&clean::Trait>) -> fmt::Result { let item_type = item.type_(); let name = item.name.as_ref().unwrap(); let render_method_item: bool = match render_mode { RenderMode::Normal => true, RenderMode::ForDeref { mut_: deref_mut_ } => { let self_type_opt = match item.inner { clean::MethodItem(ref method) => method.decl.self_type(), clean::TyMethodItem(ref method) => method.decl.self_type(), _ => None }; if let Some(self_ty) = self_type_opt { let (by_mut_ref, by_box) = match self_ty { SelfTy::SelfBorrowed(_, mutability) | SelfTy::SelfExplicit(clean::BorrowedRef { mutability, .. }) => { (mutability == Mutability::Mutable, false) }, SelfTy::SelfExplicit(clean::ResolvedPath { did, .. }) => { (false, Some(did) == cache().owned_box_did) }, _ => (false, false), }; (deref_mut_ || !by_mut_ref) && !by_box } else { false } }, }; match item.inner { clean::MethodItem(..) | clean::TyMethodItem(..) => { // Only render when the method is not static or we allow static methods if render_method_item { let id = derive_id(format!("{}.{}", item_type, name)); let ns_id = derive_id(format!("{}.{}", name, item_type.name_space())); write!(w, "<h4 id='{}' class=\"{}\">", id, item_type)?; write!(w, "<span id='{}' class='invisible'>", ns_id)?; write!(w, "<code>")?; render_assoc_item(w, item, link.anchor(&id), ItemType::Impl)?; write!(w, "</code>")?; if let Some(l) = (Item { cx, item }).src_href() { write!(w, "</span><span class='out-of-band'>")?; write!(w, "<div class='ghost'></div>")?; render_stability_since_raw(w, item.stable_since(), outer_version)?; write!(w, "<a class='srclink' href='{}' title='{}'>[src]</a>", l, "goto source code")?; } else { render_stability_since_raw(w, item.stable_since(), outer_version)?; } write!(w, "</span></h4>\n")?; } } clean::TypedefItem(ref tydef, _) => { let id = derive_id(format!("{}.{}", ItemType::AssociatedType, name)); let ns_id = derive_id(format!("{}.{}", name, item_type.name_space())); write!(w, "<h4 id='{}' class=\"{}\">", id, item_type)?; write!(w, "<span id='{}' class='invisible'><code>", ns_id)?; assoc_type(w, item, &Vec::new(), Some(&tydef.type_), link.anchor(&id))?; write!(w, "</code></span></h4>\n")?; } clean::AssociatedConstItem(ref ty, ref default) => { let id = derive_id(format!("{}.{}", item_type, name)); let ns_id = derive_id(format!("{}.{}", name, item_type.name_space())); write!(w, "<h4 id='{}' class=\"{}\">", id, item_type)?; write!(w, "<span id='{}' class='invisible'><code>", ns_id)?; assoc_const(w, item, ty, default.as_ref(), link.anchor(&id))?; write!(w, "</code></span></h4>\n")?; } clean::AssociatedTypeItem(ref bounds, ref default) => { let id = derive_id(format!("{}.{}", item_type, name)); let ns_id = derive_id(format!("{}.{}", name, item_type.name_space())); write!(w, "<h4 id='{}' class=\"{}\">", id, item_type)?; write!(w, "<span id='{}' class='invisible'><code>", ns_id)?; assoc_type(w, item, bounds, default.as_ref(), link.anchor(&id))?; write!(w, "</code></span></h4>\n")?; } clean::StrippedItem(..) => return Ok(()), _ => panic!("can't make docs for trait item with name {:?}", item.name) } if render_method_item || render_mode == RenderMode::Normal { let prefix = render_assoc_const_value(item); if !is_default_item { if let Some(t) = trait_ { // The trait item may have been stripped so we might not // find any documentation or stability for it. if let Some(it) = t.items.iter().find(|i| i.name == item.name) { // We need the stability of the item from the trait // because impls can't have a stability. document_stability(w, cx, it)?; if item.doc_value().is_some() { document_full(w, item, cx, &prefix)?; } else { // In case the item isn't documented, // provide short documentation from the trait. document_short(w, it, link, cx, &prefix)?; } } } else { document_stability(w, cx, item)?; document_full(w, item, cx, &prefix)?; } } else { document_stability(w, cx, item)?; document_short(w, item, link, cx, &prefix)?; } } Ok(()) } let traits = &cache().traits; let trait_ = i.trait_did().and_then(|did| traits.get(&did)); write!(w, "<div class='impl-items'>")?; for trait_item in &i.inner_impl().items { doc_impl_item(w, cx, trait_item, link, render_mode, false, outer_version, trait_)?; } fn render_default_items(w: &mut fmt::Formatter, cx: &Context, t: &clean::Trait, i: &clean::Impl, render_mode: RenderMode, outer_version: Option<&str>) -> fmt::Result { for trait_item in &t.items { let n = trait_item.name.clone(); if i.items.iter().find(|m| m.name == n).is_some() { continue; } let did = i.trait_.as_ref().unwrap().def_id().unwrap(); let assoc_link = AssocItemLink::GotoSource(did, &i.provided_trait_methods); doc_impl_item(w, cx, trait_item, assoc_link, render_mode, true, outer_version, None)?; } Ok(()) } // If we've implemented a trait, then also emit documentation for all // default items which weren't overridden in the implementation block. if let Some(t) = trait_ { render_default_items(w, cx, t, &i.inner_impl(), render_mode, outer_version)?; } write!(w, "</div>")?; Ok(()) } fn item_typedef(w: &mut fmt::Formatter, cx: &Context, it: &clean::Item, t: &clean::Typedef) -> fmt::Result { write!(w, "<pre class='rust typedef'>")?; render_attributes(w, it)?; write!(w, "type {}{}{where_clause} = {type_};</pre>", it.name.as_ref().unwrap(), t.generics, where_clause = WhereClause { gens: &t.generics, indent: 0, end_newline: true }, type_ = t.type_)?; document(w, cx, it)?; // Render any items associated directly to this alias, as otherwise they // won't be visible anywhere in the docs. It would be nice to also show // associated items from the aliased type (see discussion in #32077), but // we need #14072 to make sense of the generics. render_assoc_items(w, cx, it, it.def_id, AssocItemRender::All) } impl<'a> fmt::Display for Sidebar<'a> { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { let cx = self.cx; let it = self.item; let parentlen = cx.current.len() - if it.is_mod() {1} else {0}; if it.is_struct() || it.is_trait() || it.is_primitive() || it.is_union() || it.is_enum() || it.is_mod() || it.is_typedef() { write!(fmt, "<p class='location'>")?; match it.inner { clean::StructItem(..) => write!(fmt, "Struct ")?, clean::TraitItem(..) => write!(fmt, "Trait ")?, clean::PrimitiveItem(..) => write!(fmt, "Primitive Type ")?, clean::UnionItem(..) => write!(fmt, "Union ")?, clean::EnumItem(..) => write!(fmt, "Enum ")?, clean::TypedefItem(..) => write!(fmt, "Type Definition ")?, clean::ModuleItem(..) => if it.is_crate() { write!(fmt, "Crate ")?; } else { write!(fmt, "Module ")?; }, _ => (), } write!(fmt, "{}", it.name.as_ref().unwrap())?; write!(fmt, "</p>")?; match it.inner { clean::StructItem(ref s) => sidebar_struct(fmt, it, s)?, clean::TraitItem(ref t) => sidebar_trait(fmt, it, t)?, clean::PrimitiveItem(ref p) => sidebar_primitive(fmt, it, p)?, clean::UnionItem(ref u) => sidebar_union(fmt, it, u)?, clean::EnumItem(ref e) => sidebar_enum(fmt, it, e)?, clean::TypedefItem(ref t, _) => sidebar_typedef(fmt, it, t)?, clean::ModuleItem(ref m) => sidebar_module(fmt, it, &m.items)?, _ => (), } } // The sidebar is designed to display sibling functions, modules and // other miscellaneous information. since there are lots of sibling // items (and that causes quadratic growth in large modules), // we refactor common parts into a shared JavaScript file per module. // still, we don't move everything into JS because we want to preserve // as much HTML as possible in order to allow non-JS-enabled browsers // to navigate the documentation (though slightly inefficiently). write!(fmt, "<p class='location'>")?; for (i, name) in cx.current.iter().take(parentlen).enumerate() { if i > 0 { write!(fmt, "::<wbr>")?; } write!(fmt, "<a href='{}index.html'>{}</a>", &cx.root_path()[..(cx.current.len() - i - 1) * 3], *name)?; } write!(fmt, "</p>")?; // Sidebar refers to the enclosing module, not this module. let relpath = if it.is_mod() { "../" } else { "" }; write!(fmt, "<script>window.sidebarCurrent = {{\ name: '{name}', \ ty: '{ty}', \ relpath: '{path}'\ }};</script>", name = it.name.as_ref().map(|x| &x[..]).unwrap_or(""), ty = it.type_().css_class(), path = relpath)?; if parentlen == 0 { // There is no sidebar-items.js beyond the crate root path // FIXME maybe dynamic crate loading can be merged here } else { write!(fmt, "<script defer src=\"{path}sidebar-items.js\"></script>", path = relpath)?; } Ok(()) } } fn sidebar_assoc_items(it: &clean::Item) -> String { let mut out = String::new(); let c = cache(); if let Some(v) = c.impls.get(&it.def_id) { if v.iter().any(|i| i.inner_impl().trait_.is_none()) { out.push_str("<li><a href=\"#methods\">Methods</a></li>"); } if v.iter().any(|i| i.inner_impl().trait_.is_some()) { if let Some(impl_) = v.iter() .filter(|i| i.inner_impl().trait_.is_some()) .find(|i| i.inner_impl().trait_.def_id() == c.deref_trait_did) { if let Some(target) = impl_.inner_impl().items.iter().filter_map(|item| { match item.inner { clean::TypedefItem(ref t, true) => Some(&t.type_), _ => None, } }).next() { let inner_impl = target.def_id().or(target.primitive_type().and_then(|prim| { c.primitive_locations.get(&prim).cloned() })).and_then(|did| c.impls.get(&did)); if inner_impl.is_some() { out.push_str("<li><a href=\"#deref-methods\">"); out.push_str(&format!("Methods from {:#}&lt;Target={:#}&gt;", impl_.inner_impl().trait_.as_ref().unwrap(), target)); out.push_str("</a></li>"); } } } out.push_str("<li><a href=\"#implementations\">Trait Implementations</a></li>"); } } out } fn sidebar_struct(fmt: &mut fmt::Formatter, it: &clean::Item, s: &clean::Struct) -> fmt::Result { let mut sidebar = String::new(); if s.fields.iter() .any(|f| if let clean::StructFieldItem(..) = f.inner { true } else { false }) { if let doctree::Plain = s.struct_type { sidebar.push_str("<li><a href=\"#fields\">Fields</a></li>"); } } sidebar.push_str(&sidebar_assoc_items(it)); if !sidebar.is_empty() { write!(fmt, "<div class=\"block items\"><ul>{}</ul></div>", sidebar)?; } Ok(()) } fn sidebar_trait(fmt: &mut fmt::Formatter, it: &clean::Item, t: &clean::Trait) -> fmt::Result { let mut sidebar = String::new(); let has_types = t.items.iter().any(|m| m.is_associated_type()); let has_consts = t.items.iter().any(|m| m.is_associated_const()); let has_required = t.items.iter().any(|m| m.is_ty_method()); let has_provided = t.items.iter().any(|m| m.is_method()); if has_types { sidebar.push_str("<li><a href=\"#associated-types\">Associated Types</a></li>"); } if has_consts { sidebar.push_str("<li><a href=\"#associated-const\">Associated Constants</a></li>"); } if has_required { sidebar.push_str("<li><a href=\"#required-methods\">Required Methods</a></li>"); } if has_provided { sidebar.push_str("<li><a href=\"#provided-methods\">Provided Methods</a></li>"); } sidebar.push_str(&sidebar_assoc_items(it)); sidebar.push_str("<li><a href=\"#implementors\">Implementors</a></li>"); write!(fmt, "<div class=\"block items\"><ul>{}</ul></div>", sidebar) } fn sidebar_primitive(fmt: &mut fmt::Formatter, it: &clean::Item, _p: &clean::PrimitiveType) -> fmt::Result { let sidebar = sidebar_assoc_items(it); if !sidebar.is_empty() { write!(fmt, "<div class=\"block items\"><ul>{}</ul></div>", sidebar)?; } Ok(()) } fn sidebar_typedef(fmt: &mut fmt::Formatter, it: &clean::Item, _t: &clean::Typedef) -> fmt::Result { let sidebar = sidebar_assoc_items(it); if !sidebar.is_empty() { write!(fmt, "<div class=\"block items\"><ul>{}</ul></div>", sidebar)?; } Ok(()) } fn sidebar_union(fmt: &mut fmt::Formatter, it: &clean::Item, u: &clean::Union) -> fmt::Result { let mut sidebar = String::new(); if u.fields.iter() .any(|f| if let clean::StructFieldItem(..) = f.inner { true } else { false }) { sidebar.push_str("<li><a href=\"#fields\">Fields</a></li>"); } sidebar.push_str(&sidebar_assoc_items(it)); if !sidebar.is_empty() { write!(fmt, "<div class=\"block items\"><ul>{}</ul></div>", sidebar)?; } Ok(()) } fn sidebar_enum(fmt: &mut fmt::Formatter, it: &clean::Item, e: &clean::Enum) -> fmt::Result { let mut sidebar = String::new(); if !e.variants.is_empty() { sidebar.push_str("<li><a href=\"#variants\">Variants</a></li>"); } sidebar.push_str(&sidebar_assoc_items(it)); if !sidebar.is_empty() { write!(fmt, "<div class=\"block items\"><ul>{}</ul></div>", sidebar)?; } Ok(()) } fn sidebar_module(fmt: &mut fmt::Formatter, _it: &clean::Item, items: &[clean::Item]) -> fmt::Result { let mut sidebar = String::new(); if items.iter().any(|it| it.type_() == ItemType::ExternCrate || it.type_() == ItemType::Import) { sidebar.push_str(&format!("<li><a href=\"#{id}\">{name}</a></li>", id = "reexports", name = "Reexports")); } // ordering taken from item_module, reorder, where it prioritized elements in a certain order // to print its headings for &myty in &[ItemType::Primitive, ItemType::Module, ItemType::Macro, ItemType::Struct, ItemType::Enum, ItemType::Constant, ItemType::Static, ItemType::Trait, ItemType::Function, ItemType::Typedef, ItemType::Union, ItemType::Impl, ItemType::TyMethod, ItemType::Method, ItemType::StructField, ItemType::Variant, ItemType::AssociatedType, ItemType::AssociatedConst] { if items.iter().any(|it| { if let clean::DefaultImplItem(..) = it.inner { false } else { !it.is_stripped() && it.type_() == myty } }) { let (short, name) = match myty { ItemType::ExternCrate | ItemType::Import => ("reexports", "Reexports"), ItemType::Module => ("modules", "Modules"), ItemType::Struct => ("structs", "Structs"), ItemType::Union => ("unions", "Unions"), ItemType::Enum => ("enums", "Enums"), ItemType::Function => ("functions", "Functions"), ItemType::Typedef => ("types", "Type Definitions"), ItemType::Static => ("statics", "Statics"), ItemType::Constant => ("constants", "Constants"), ItemType::Trait => ("traits", "Traits"), ItemType::Impl => ("impls", "Implementations"), ItemType::TyMethod => ("tymethods", "Type Methods"), ItemType::Method => ("methods", "Methods"), ItemType::StructField => ("fields", "Struct Fields"), ItemType::Variant => ("variants", "Variants"), ItemType::Macro => ("macros", "Macros"), ItemType::Primitive => ("primitives", "Primitive Types"), ItemType::AssociatedType => ("associated-types", "Associated Types"), ItemType::AssociatedConst => ("associated-consts", "Associated Constants"), }; sidebar.push_str(&format!("<li><a href=\"#{id}\">{name}</a></li>", id = short, name = name)); } } if !sidebar.is_empty() { write!(fmt, "<div class=\"block items\"><ul>{}</ul></div>", sidebar)?; } Ok(()) } impl<'a> fmt::Display for Source<'a> { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { let Source(s) = *self; let lines = s.lines().count(); let mut cols = 0; let mut tmp = lines; while tmp > 0 { cols += 1; tmp /= 10; } write!(fmt, "<pre class=\"line-numbers\">")?; for i in 1..lines + 1 { write!(fmt, "<span id=\"{0}\">{0:1$}</span>\n", i, cols)?; } write!(fmt, "</pre>")?; write!(fmt, "{}", highlight::render_with_highlighting(s, None, None, None))?; Ok(()) } } fn item_macro(w: &mut fmt::Formatter, cx: &Context, it: &clean::Item, t: &clean::Macro) -> fmt::Result { w.write_str(&highlight::render_with_highlighting(&t.source, Some("macro"), None, None))?; document(w, cx, it) } fn item_primitive(w: &mut fmt::Formatter, cx: &Context, it: &clean::Item, _p: &clean::PrimitiveType) -> fmt::Result { document(w, cx, it)?; render_assoc_items(w, cx, it, it.def_id, AssocItemRender::All) } const BASIC_KEYWORDS: &'static str = "rust, rustlang, rust-lang"; fn make_item_keywords(it: &clean::Item) -> String { format!("{}, {}", BASIC_KEYWORDS, it.name.as_ref().unwrap()) } fn get_index_search_type(item: &clean::Item) -> Option<IndexItemFunctionType> { let decl = match item.inner { clean::FunctionItem(ref f) => &f.decl, clean::MethodItem(ref m) => &m.decl, clean::TyMethodItem(ref m) => &m.decl, _ => return None }; let inputs = decl.inputs.values.iter().map(|arg| get_index_type(&arg.type_)).collect(); let output = match decl.output { clean::FunctionRetTy::Return(ref return_type) => Some(get_index_type(return_type)), _ => None }; Some(IndexItemFunctionType { inputs: inputs, output: output }) } fn get_index_type(clean_type: &clean::Type) -> Type { Type { name: get_index_type_name(clean_type).map(|s| s.to_ascii_lowercase()) } } fn get_index_type_name(clean_type: &clean::Type) -> Option<String> { match *clean_type { clean::ResolvedPath { ref path, .. } => { let segments = &path.segments; Some(segments[segments.len() - 1].name.clone()) }, clean::Generic(ref s) => Some(s.clone()), clean::Primitive(ref p) => Some(format!("{:?}", p)), clean::BorrowedRef { ref type_, .. } => get_index_type_name(type_), // FIXME: add all from clean::Type. _ => None } } pub fn cache() -> Arc<Cache> { CACHE_KEY.with(|c| c.borrow().clone()) } #[cfg(test)] #[test] fn test_unique_id() { let input = ["foo", "examples", "examples", "method.into_iter","examples", "method.into_iter", "foo", "main", "search", "methods", "examples", "method.into_iter", "assoc_type.Item", "assoc_type.Item"]; let expected = ["foo", "examples", "examples-1", "method.into_iter", "examples-2", "method.into_iter-1", "foo-1", "main-1", "search-1", "methods-1", "examples-3", "method.into_iter-2", "assoc_type.Item", "assoc_type.Item-1"]; let test = || { let actual: Vec<String> = input.iter().map(|s| derive_id(s.to_string())).collect(); assert_eq!(&actual[..], expected); }; test(); reset_ids(true); test(); } #[cfg(test)] #[test] fn test_name_key() { assert_eq!(name_key("0"), ("", 0, 1)); assert_eq!(name_key("123"), ("", 123, 0)); assert_eq!(name_key("Fruit"), ("Fruit", 0, 0)); assert_eq!(name_key("Fruit0"), ("Fruit", 0, 1)); assert_eq!(name_key("Fruit0000"), ("Fruit", 0, 4)); assert_eq!(name_key("Fruit01"), ("Fruit", 1, 1)); assert_eq!(name_key("Fruit10"), ("Fruit", 10, 0)); assert_eq!(name_key("Fruit123"), ("Fruit", 123, 0)); } #[cfg(test)] #[test] fn test_name_sorting() { let names = ["Apple", "Banana", "Fruit", "Fruit0", "Fruit00", "Fruit1", "Fruit01", "Fruit2", "Fruit02", "Fruit20", "Fruit100", "Pear"]; let mut sorted = names.to_owned(); sorted.sort_by_key(|&s| name_key(s)); assert_eq!(names, sorted); } #[cfg(test)] #[test] fn test_match_non_whitespace() { assert!(match_non_whitespace("", "")); assert!(match_non_whitespace(" ", "")); assert!(match_non_whitespace("", " ")); assert!(match_non_whitespace("a", "a")); assert!(match_non_whitespace(" a ", "a")); assert!(match_non_whitespace("a", " a")); assert!(match_non_whitespace("abc", "abc")); assert!(match_non_whitespace("abc", " abc ")); assert!(match_non_whitespace("abc ", "abc")); assert!(match_non_whitespace("abc xyz", "abc xyz")); assert!(match_non_whitespace("abc xyz", "abc\nxyz")); assert!(match_non_whitespace("abc xyz", "abcxyz")); assert!(match_non_whitespace("abcxyz", "abc xyz")); assert!(match_non_whitespace("abc xyz ", " abc xyz\n")); assert!(!match_non_whitespace("a", "b")); assert!(!match_non_whitespace(" a ", "c")); assert!(!match_non_whitespace("a", " aa")); assert!(!match_non_whitespace("abc", "ac")); assert!(!match_non_whitespace("abc", " adc ")); assert!(!match_non_whitespace("abc ", "abca")); assert!(!match_non_whitespace("abc xyz", "abc xy")); assert!(!match_non_whitespace("abc xyz", "bc\nxyz")); assert!(!match_non_whitespace("abc xyz", "abc.xyz")); assert!(!match_non_whitespace("abcxyz", "abc.xyz")); assert!(!match_non_whitespace("abc xyz ", " abc xyz w")); }
// Copyright 2013-2015 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. //! Rustdoc's HTML Rendering module //! //! This modules contains the bulk of the logic necessary for rendering a //! rustdoc `clean::Crate` instance to a set of static HTML pages. This //! rendering process is largely driven by the `format!` syntax extension to //! perform all I/O into files and streams. //! //! The rendering process is largely driven by the `Context` and `Cache` //! structures. The cache is pre-populated by crawling the crate in question, //! and then it is shared among the various rendering threads. The cache is meant //! to be a fairly large structure not implementing `Clone` (because it's shared //! among threads). The context, however, should be a lightweight structure. This //! is cloned per-thread and contains information about what is currently being //! rendered. //! //! In order to speed up rendering (mostly because of markdown rendering), the //! rendering process has been parallelized. This parallelization is only //! exposed through the `crate` method on the context, and then also from the //! fact that the shared cache is stored in TLS (and must be accessed as such). //! //! In addition to rendering the crate itself, this module is also responsible //! for creating the corresponding search index and source file renderings. //! These threads are not parallelized (they haven't been a bottleneck yet), and //! both occur before the crate is rendered. pub use self::ExternalLocation::*; use std::borrow::Cow; use std::cell::RefCell; use std::cmp::Ordering; use std::collections::{BTreeMap, HashSet, VecDeque}; use std::default::Default; use std::error; use std::fmt::{self, Display, Formatter, Write as FmtWrite}; use std::ffi::OsStr; use std::fs::{self, File, OpenOptions}; use std::io::prelude::*; use std::io::{self, BufWriter, BufReader}; use std::iter::repeat; use std::mem; use std::path::{PathBuf, Path, Component}; use std::str; use std::sync::Arc; use externalfiles::ExternalHtml; use serialize::json::{ToJson, Json, as_json}; use syntax::ast; use syntax::codemap::FileName; use rustc::hir::def_id::{CrateNum, CRATE_DEF_INDEX, DefId}; use rustc::middle::privacy::AccessLevels; use rustc::middle::stability; use rustc::hir; use rustc::util::nodemap::{FxHashMap, FxHashSet}; use rustc_data_structures::flock; use clean::{self, AttributesExt, GetDefId, SelfTy, Mutability}; use doctree; use fold::DocFolder; use html::escape::Escape; use html::format::{AsyncSpace, ConstnessSpace}; use html::format::{GenericBounds, WhereClause, href, AbiSpace}; use html::format::{VisSpace, Method, UnsafetySpace, MutableSpace}; use html::format::fmt_impl_for_trait_page; use html::item_type::ItemType; use html::markdown::{self, Markdown, MarkdownHtml, MarkdownSummaryLine}; use html::{highlight, layout}; use minifier; /// A pair of name and its optional document. pub type NameDoc = (String, Option<String>); /// Major driving force in all rustdoc rendering. This contains information /// about where in the tree-like hierarchy rendering is occurring and controls /// how the current page is being rendered. /// /// It is intended that this context is a lightweight object which can be fairly /// easily cloned because it is cloned per work-job (about once per item in the /// rustdoc tree). #[derive(Clone)] pub struct Context { /// Current hierarchy of components leading down to what's currently being /// rendered pub current: Vec<String>, /// The current destination folder of where HTML artifacts should be placed. /// This changes as the context descends into the module hierarchy. pub dst: PathBuf, /// A flag, which when `true`, will render pages which redirect to the /// real location of an item. This is used to allow external links to /// publicly reused items to redirect to the right location. pub render_redirect_pages: bool, pub shared: Arc<SharedContext>, } pub struct SharedContext { /// The path to the crate root source minus the file name. /// Used for simplifying paths to the highlighted source code files. pub src_root: PathBuf, /// This describes the layout of each page, and is not modified after /// creation of the context (contains info like the favicon and added html). pub layout: layout::Layout, /// This flag indicates whether `[src]` links should be generated or not. If /// the source files are present in the html rendering, then this will be /// `true`. pub include_sources: bool, /// The local file sources we've emitted and their respective url-paths. pub local_sources: FxHashMap<PathBuf, String>, /// All the passes that were run on this crate. pub passes: FxHashSet<String>, /// The base-URL of the issue tracker for when an item has been tagged with /// an issue number. pub issue_tracker_base_url: Option<String>, /// The given user css file which allow to customize the generated /// documentation theme. pub css_file_extension: Option<PathBuf>, /// The directories that have already been created in this doc run. Used to reduce the number /// of spurious `create_dir_all` calls. pub created_dirs: RefCell<FxHashSet<PathBuf>>, /// This flag indicates whether listings of modules (in the side bar and documentation itself) /// should be ordered alphabetically or in order of appearance (in the source code). pub sort_modules_alphabetically: bool, /// Additional themes to be added to the generated docs. pub themes: Vec<PathBuf>, /// Suffix to be added on resource files (if suffix is "-v2" then "light.css" becomes /// "light-v2.css"). pub resource_suffix: String, } impl SharedContext { fn ensure_dir(&self, dst: &Path) -> io::Result<()> { let mut dirs = self.created_dirs.borrow_mut(); if !dirs.contains(dst) { fs::create_dir_all(dst)?; dirs.insert(dst.to_path_buf()); } Ok(()) } } impl SharedContext { /// Returns whether the `collapse-docs` pass was run on this crate. pub fn was_collapsed(&self) -> bool { self.passes.contains("collapse-docs") } /// Based on whether the `collapse-docs` pass was run, return either the `doc_value` or the /// `collapsed_doc_value` of the given item. pub fn maybe_collapsed_doc_value<'a>(&self, item: &'a clean::Item) -> Option<Cow<'a, str>> { if self.was_collapsed() { item.collapsed_doc_value().map(|s| s.into()) } else { item.doc_value().map(|s| s.into()) } } } /// Indicates where an external crate can be found. pub enum ExternalLocation { /// Remote URL root of the external crate Remote(String), /// This external crate can be found in the local doc/ folder Local, /// The external crate could not be found. Unknown, } /// Metadata about implementations for a type or trait. #[derive(Clone)] pub struct Impl { pub impl_item: clean::Item, } impl Impl { fn inner_impl(&self) -> &clean::Impl { match self.impl_item.inner { clean::ImplItem(ref impl_) => impl_, _ => panic!("non-impl item found in impl") } } fn trait_did(&self) -> Option<DefId> { self.inner_impl().trait_.def_id() } } #[derive(Debug)] pub struct Error { file: PathBuf, error: io::Error, } impl error::Error for Error { fn description(&self) -> &str { self.error.description() } } impl Display for Error { fn fmt(&self, f: &mut Formatter) -> fmt::Result { write!(f, "\"{}\": {}", self.file.display(), self.error) } } impl Error { pub fn new(e: io::Error, file: &Path) -> Error { Error { file: file.to_path_buf(), error: e, } } } macro_rules! try_none { ($e:expr, $file:expr) => ({ use std::io; match $e { Some(e) => e, None => return Err(Error::new(io::Error::new(io::ErrorKind::Other, "not found"), $file)) } }) } macro_rules! try_err { ($e:expr, $file:expr) => ({ match $e { Ok(e) => e, Err(e) => return Err(Error::new(e, $file)), } }) } /// This cache is used to store information about the `clean::Crate` being /// rendered in order to provide more useful documentation. This contains /// information like all implementors of a trait, all traits a type implements, /// documentation for all known traits, etc. /// /// This structure purposefully does not implement `Clone` because it's intended /// to be a fairly large and expensive structure to clone. Instead this adheres /// to `Send` so it may be stored in a `Arc` instance and shared among the various /// rendering threads. #[derive(Default)] pub struct Cache { /// Mapping of typaram ids to the name of the type parameter. This is used /// when pretty-printing a type (so pretty printing doesn't have to /// painfully maintain a context like this) pub typarams: FxHashMap<DefId, String>, /// Maps a type id to all known implementations for that type. This is only /// recognized for intra-crate `ResolvedPath` types, and is used to print /// out extra documentation on the page of an enum/struct. /// /// The values of the map are a list of implementations and documentation /// found on that implementation. pub impls: FxHashMap<DefId, Vec<Impl>>, /// Maintains a mapping of local crate node ids to the fully qualified name /// and "short type description" of that node. This is used when generating /// URLs when a type is being linked to. External paths are not located in /// this map because the `External` type itself has all the information /// necessary. pub paths: FxHashMap<DefId, (Vec<String>, ItemType)>, /// Similar to `paths`, but only holds external paths. This is only used for /// generating explicit hyperlinks to other crates. pub external_paths: FxHashMap<DefId, (Vec<String>, ItemType)>, /// Maps local def ids of exported types to fully qualified paths. /// Unlike 'paths', this mapping ignores any renames that occur /// due to 'use' statements. /// /// This map is used when writing out the special 'implementors' /// javascript file. By using the exact path that the type /// is declared with, we ensure that each path will be identical /// to the path used if the corresponding type is inlined. By /// doing this, we can detect duplicate impls on a trait page, and only display /// the impl for the inlined type. pub exact_paths: FxHashMap<DefId, Vec<String>>, /// This map contains information about all known traits of this crate. /// Implementations of a crate should inherit the documentation of the /// parent trait if no extra documentation is specified, and default methods /// should show up in documentation about trait implementations. pub traits: FxHashMap<DefId, clean::Trait>, /// When rendering traits, it's often useful to be able to list all /// implementors of the trait, and this mapping is exactly, that: a mapping /// of trait ids to the list of known implementors of the trait pub implementors: FxHashMap<DefId, Vec<Impl>>, /// Cache of where external crate documentation can be found. pub extern_locations: FxHashMap<CrateNum, (String, PathBuf, ExternalLocation)>, /// Cache of where documentation for primitives can be found. pub primitive_locations: FxHashMap<clean::PrimitiveType, DefId>, // Note that external items for which `doc(hidden)` applies to are shown as // non-reachable while local items aren't. This is because we're reusing // the access levels from crateanalysis. pub access_levels: Arc<AccessLevels<DefId>>, /// The version of the crate being documented, if given fron the `--crate-version` flag. pub crate_version: Option<String>, // Private fields only used when initially crawling a crate to build a cache stack: Vec<String>, parent_stack: Vec<DefId>, parent_is_trait_impl: bool, search_index: Vec<IndexItem>, stripped_mod: bool, deref_trait_did: Option<DefId>, deref_mut_trait_did: Option<DefId>, owned_box_did: Option<DefId>, masked_crates: FxHashSet<CrateNum>, // In rare case where a structure is defined in one module but implemented // in another, if the implementing module is parsed before defining module, // then the fully qualified name of the structure isn't presented in `paths` // yet when its implementation methods are being indexed. Caches such methods // and their parent id here and indexes them at the end of crate parsing. orphan_impl_items: Vec<(DefId, clean::Item)>, /// Aliases added through `#[doc(alias = "...")]`. Since a few items can have the same alias, /// we need the alias element to have an array of items. aliases: FxHashMap<String, Vec<IndexItem>>, } /// Temporary storage for data obtained during `RustdocVisitor::clean()`. /// Later on moved into `CACHE_KEY`. #[derive(Default)] pub struct RenderInfo { pub inlined: FxHashSet<DefId>, pub external_paths: ::core::ExternalPaths, pub external_typarams: FxHashMap<DefId, String>, pub exact_paths: FxHashMap<DefId, Vec<String>>, pub deref_trait_did: Option<DefId>, pub deref_mut_trait_did: Option<DefId>, pub owned_box_did: Option<DefId>, } /// Helper struct to render all source code to HTML pages struct SourceCollector<'a> { scx: &'a mut SharedContext, /// Root destination to place all HTML output into dst: PathBuf, } /// Wrapper struct to render the source code of a file. This will do things like /// adding line numbers to the left-hand side. struct Source<'a>(&'a str); // Helper structs for rendering items/sidebars and carrying along contextual // information #[derive(Copy, Clone)] struct Item<'a> { cx: &'a Context, item: &'a clean::Item, } struct Sidebar<'a> { cx: &'a Context, item: &'a clean::Item, } /// Struct representing one entry in the JS search index. These are all emitted /// by hand to a large JS file at the end of cache-creation. #[derive(Debug)] struct IndexItem { ty: ItemType, name: String, path: String, desc: String, parent: Option<DefId>, parent_idx: Option<usize>, search_type: Option<IndexItemFunctionType>, } impl ToJson for IndexItem { fn to_json(&self) -> Json { assert_eq!(self.parent.is_some(), self.parent_idx.is_some()); let mut data = Vec::with_capacity(6); data.push((self.ty as usize).to_json()); data.push(self.name.to_json()); data.push(self.path.to_json()); data.push(self.desc.to_json()); data.push(self.parent_idx.to_json()); data.push(self.search_type.to_json()); Json::Array(data) } } /// A type used for the search index. #[derive(Debug)] struct Type { name: Option<String>, generics: Option<Vec<String>>, } impl ToJson for Type { fn to_json(&self) -> Json { match self.name { Some(ref name) => { let mut data = BTreeMap::new(); data.insert("n".to_owned(), name.to_json()); if let Some(ref generics) = self.generics { data.insert("g".to_owned(), generics.to_json()); } Json::Object(data) }, None => Json::Null } } } /// Full type of functions/methods in the search index. #[derive(Debug)] struct IndexItemFunctionType { inputs: Vec<Type>, output: Option<Type>, } impl ToJson for IndexItemFunctionType { fn to_json(&self) -> Json { // If we couldn't figure out a type, just write `null`. if self.inputs.iter().chain(self.output.iter()).any(|ref i| i.name.is_none()) { Json::Null } else { let mut data = BTreeMap::new(); if !self.inputs.is_empty() { data.insert("i".to_owned(), self.inputs.to_json()); } if let Some(ref output) = self.output { data.insert("o".to_owned(), output.to_json()); } Json::Object(data) } } } thread_local!(static CACHE_KEY: RefCell<Arc<Cache>> = Default::default()); thread_local!(pub static CURRENT_LOCATION_KEY: RefCell<Vec<String>> = RefCell::new(Vec::new())); thread_local!(pub static USED_ID_MAP: RefCell<FxHashMap<String, usize>> = RefCell::new(init_ids())); fn init_ids() -> FxHashMap<String, usize> { [ "main", "search", "help", "TOC", "render-detail", "associated-types", "associated-const", "required-methods", "provided-methods", "implementors", "synthetic-implementors", "implementors-list", "synthetic-implementors-list", "methods", "deref-methods", "implementations", ].into_iter().map(|id| (String::from(*id), 1)).collect() } /// This method resets the local table of used ID attributes. This is typically /// used at the beginning of rendering an entire HTML page to reset from the /// previous state (if any). pub fn reset_ids(embedded: bool) { USED_ID_MAP.with(|s| { *s.borrow_mut() = if embedded { init_ids() } else { FxHashMap() }; }); } pub fn derive_id(candidate: String) -> String { USED_ID_MAP.with(|map| { let id = match map.borrow_mut().get_mut(&candidate) { None => candidate, Some(a) => { let id = format!("{}-{}", candidate, *a); *a += 1; id } }; map.borrow_mut().insert(id.clone(), 1); id }) } /// Generates the documentation for `crate` into the directory `dst` pub fn run(mut krate: clean::Crate, external_html: &ExternalHtml, playground_url: Option<String>, dst: PathBuf, resource_suffix: String, passes: FxHashSet<String>, css_file_extension: Option<PathBuf>, renderinfo: RenderInfo, sort_modules_alphabetically: bool, themes: Vec<PathBuf>, enable_minification: bool) -> Result<(), Error> { let src_root = match krate.src { FileName::Real(ref p) => match p.parent() { Some(p) => p.to_path_buf(), None => PathBuf::new(), }, _ => PathBuf::new(), }; let mut scx = SharedContext { src_root, passes, include_sources: true, local_sources: FxHashMap(), issue_tracker_base_url: None, layout: layout::Layout { logo: "".to_string(), favicon: "".to_string(), external_html: external_html.clone(), krate: krate.name.clone(), }, css_file_extension: css_file_extension.clone(), created_dirs: RefCell::new(FxHashSet()), sort_modules_alphabetically, themes, resource_suffix, }; // If user passed in `--playground-url` arg, we fill in crate name here if let Some(url) = playground_url { markdown::PLAYGROUND.with(|slot| { *slot.borrow_mut() = Some((Some(krate.name.clone()), url)); }); } // Crawl the crate attributes looking for attributes which control how we're // going to emit HTML if let Some(attrs) = krate.module.as_ref().map(|m| &m.attrs) { for attr in attrs.lists("doc") { let name = attr.name().map(|s| s.as_str()); match (name.as_ref().map(|s| &s[..]), attr.value_str()) { (Some("html_favicon_url"), Some(s)) => { scx.layout.favicon = s.to_string(); } (Some("html_logo_url"), Some(s)) => { scx.layout.logo = s.to_string(); } (Some("html_playground_url"), Some(s)) => { markdown::PLAYGROUND.with(|slot| { let name = krate.name.clone(); *slot.borrow_mut() = Some((Some(name), s.to_string())); }); } (Some("issue_tracker_base_url"), Some(s)) => { scx.issue_tracker_base_url = Some(s.to_string()); } (Some("html_no_source"), None) if attr.is_word() => { scx.include_sources = false; } _ => {} } } } try_err!(fs::create_dir_all(&dst), &dst); krate = render_sources(&dst, &mut scx, krate)?; let cx = Context { current: Vec::new(), dst, render_redirect_pages: false, shared: Arc::new(scx), }; // Crawl the crate to build various caches used for the output let RenderInfo { inlined: _, external_paths, external_typarams, exact_paths, deref_trait_did, deref_mut_trait_did, owned_box_did, } = renderinfo; let external_paths = external_paths.into_iter() .map(|(k, (v, t))| (k, (v, ItemType::from(t)))) .collect(); let mut cache = Cache { impls: FxHashMap(), external_paths, exact_paths, paths: FxHashMap(), implementors: FxHashMap(), stack: Vec::new(), parent_stack: Vec::new(), search_index: Vec::new(), parent_is_trait_impl: false, extern_locations: FxHashMap(), primitive_locations: FxHashMap(), stripped_mod: false, access_levels: krate.access_levels.clone(), crate_version: krate.version.take(), orphan_impl_items: Vec::new(), traits: mem::replace(&mut krate.external_traits, FxHashMap()), deref_trait_did, deref_mut_trait_did, owned_box_did, masked_crates: mem::replace(&mut krate.masked_crates, FxHashSet()), typarams: external_typarams, aliases: FxHashMap(), }; // Cache where all our extern crates are located for &(n, ref e) in &krate.externs { let src_root = match e.src { FileName::Real(ref p) => match p.parent() { Some(p) => p.to_path_buf(), None => PathBuf::new(), }, _ => PathBuf::new(), }; cache.extern_locations.insert(n, (e.name.clone(), src_root, extern_location(e, &cx.dst))); let did = DefId { krate: n, index: CRATE_DEF_INDEX }; cache.external_paths.insert(did, (vec![e.name.to_string()], ItemType::Module)); } // Cache where all known primitives have their documentation located. // // Favor linking to as local extern as possible, so iterate all crates in // reverse topological order. for &(_, ref e) in krate.externs.iter().rev() { for &(def_id, prim, _) in &e.primitives { cache.primitive_locations.insert(prim, def_id); } } for &(def_id, prim, _) in &krate.primitives { cache.primitive_locations.insert(prim, def_id); } cache.stack.push(krate.name.clone()); krate = cache.fold_crate(krate); // Build our search index let index = build_index(&krate, &mut cache); // Freeze the cache now that the index has been built. Put an Arc into TLS // for future parallelization opportunities let cache = Arc::new(cache); CACHE_KEY.with(|v| *v.borrow_mut() = cache.clone()); CURRENT_LOCATION_KEY.with(|s| s.borrow_mut().clear()); write_shared(&cx, &krate, &*cache, index, enable_minification)?; // And finally render the whole crate's documentation cx.krate(krate) } /// Build the search index from the collected metadata fn build_index(krate: &clean::Crate, cache: &mut Cache) -> String { let mut nodeid_to_pathid = FxHashMap(); let mut crate_items = Vec::with_capacity(cache.search_index.len()); let mut crate_paths = Vec::<Json>::new(); let Cache { ref mut search_index, ref orphan_impl_items, ref mut paths, .. } = *cache; // Attach all orphan items to the type's definition if the type // has since been learned. for &(did, ref item) in orphan_impl_items { if let Some(&(ref fqp, _)) = paths.get(&did) { search_index.push(IndexItem { ty: item.type_(), name: item.name.clone().unwrap(), path: fqp[..fqp.len() - 1].join("::"), desc: plain_summary_line(item.doc_value()), parent: Some(did), parent_idx: None, search_type: get_index_search_type(&item), }); } } // Reduce `NodeId` in paths into smaller sequential numbers, // and prune the paths that do not appear in the index. let mut lastpath = String::new(); let mut lastpathid = 0usize; for item in search_index { item.parent_idx = item.parent.map(|nodeid| { if nodeid_to_pathid.contains_key(&nodeid) { *nodeid_to_pathid.get(&nodeid).unwrap() } else { let pathid = lastpathid; nodeid_to_pathid.insert(nodeid, pathid); lastpathid += 1; let &(ref fqp, short) = paths.get(&nodeid).unwrap(); crate_paths.push(((short as usize), fqp.last().unwrap().clone()).to_json()); pathid } }); // Omit the parent path if it is same to that of the prior item. if lastpath == item.path { item.path.clear(); } else { lastpath = item.path.clone(); } crate_items.push(item.to_json()); } let crate_doc = krate.module.as_ref().map(|module| { plain_summary_line(module.doc_value()) }).unwrap_or(String::new()); let mut crate_data = BTreeMap::new(); crate_data.insert("doc".to_owned(), Json::String(crate_doc)); crate_data.insert("items".to_owned(), Json::Array(crate_items)); crate_data.insert("paths".to_owned(), Json::Array(crate_paths)); // Collect the index into a string format!("searchIndex[{}] = {};", as_json(&krate.name), Json::Object(crate_data)) } fn write_shared(cx: &Context, krate: &clean::Crate, cache: &Cache, search_index: String, enable_minification: bool) -> Result<(), Error> { // Write out the shared files. Note that these are shared among all rustdoc // docs placed in the output directory, so this needs to be a synchronized // operation with respect to all other rustdocs running around. let _lock = flock::Lock::panicking_new(&cx.dst.join(".lock"), true, true, true); // Add all the static files. These may already exist, but we just // overwrite them anyway to make sure that they're fresh and up-to-date. write_minify(cx.dst.join(&format!("rustdoc{}.css", cx.shared.resource_suffix)), include_str!("static/rustdoc.css"), enable_minification)?; write_minify(cx.dst.join(&format!("settings{}.css", cx.shared.resource_suffix)), include_str!("static/settings.css"), enable_minification)?; // To avoid "light.css" to be overwritten, we'll first run over the received themes and only // then we'll run over the "official" styles. let mut themes: HashSet<String> = HashSet::new(); for entry in &cx.shared.themes { let mut content = Vec::with_capacity(100000); let mut f = try_err!(File::open(&entry), &entry); try_err!(f.read_to_end(&mut content), &entry); let theme = try_none!(try_none!(entry.file_stem(), &entry).to_str(), &entry); let extension = try_none!(try_none!(entry.extension(), &entry).to_str(), &entry); write(cx.dst.join(format!("{}{}.{}", theme, cx.shared.resource_suffix, extension)), content.as_slice())?; themes.insert(theme.to_owned()); } write(cx.dst.join(&format!("brush{}.svg", cx.shared.resource_suffix)), include_bytes!("static/brush.svg"))?; write(cx.dst.join(&format!("wheel{}.svg", cx.shared.resource_suffix)), include_bytes!("static/wheel.svg"))?; write_minify(cx.dst.join(&format!("light{}.css", cx.shared.resource_suffix)), include_str!("static/themes/light.css"), enable_minification)?; themes.insert("light".to_owned()); write_minify(cx.dst.join(&format!("dark{}.css", cx.shared.resource_suffix)), include_str!("static/themes/dark.css"), enable_minification)?; themes.insert("dark".to_owned()); let mut themes: Vec<&String> = themes.iter().collect(); themes.sort(); // To avoid theme switch latencies as much as possible, we put everything theme related // at the beginning of the html files into another js file. write(cx.dst.join(&format!("theme{}.js", cx.shared.resource_suffix)), format!( r#"var themes = document.getElementById("theme-choices"); var themePicker = document.getElementById("theme-picker"); function switchThemeButtonState() {{ if (themes.style.display === "block") {{ themes.style.display = "none"; themePicker.style.borderBottomRightRadius = "3px"; themePicker.style.borderBottomLeftRadius = "3px"; }} else {{ themes.style.display = "block"; themePicker.style.borderBottomRightRadius = "0"; themePicker.style.borderBottomLeftRadius = "0"; }} }}; function handleThemeButtonsBlur(e) {{ var active = document.activeElement; var related = e.relatedTarget; if (active.id !== "themePicker" && (!active.parentNode || active.parentNode.id !== "theme-choices") && (!related || (related.id !== "themePicker" && (!related.parentNode || related.parentNode.id !== "theme-choices")))) {{ switchThemeButtonState(); }} }} themePicker.onclick = switchThemeButtonState; themePicker.onblur = handleThemeButtonsBlur; [{}].forEach(function(item) {{ var but = document.createElement('button'); but.innerHTML = item; but.onclick = function(el) {{ switchTheme(currentTheme, mainTheme, item); }}; but.onblur = handleThemeButtonsBlur; themes.appendChild(but); }});"#, themes.iter() .map(|s| format!("\"{}\"", s)) .collect::<Vec<String>>() .join(",")).as_bytes(), )?; write_minify(cx.dst.join(&format!("main{}.js", cx.shared.resource_suffix)), include_str!("static/main.js"), enable_minification)?; write_minify(cx.dst.join(&format!("settings{}.js", cx.shared.resource_suffix)), include_str!("static/settings.js"), enable_minification)?; { let mut data = format!("var resourcesSuffix = \"{}\";\n", cx.shared.resource_suffix); data.push_str(include_str!("static/storage.js")); write_minify(cx.dst.join(&format!("storage{}.js", cx.shared.resource_suffix)), &data, enable_minification)?; } if let Some(ref css) = cx.shared.css_file_extension { let out = cx.dst.join(&format!("theme{}.css", cx.shared.resource_suffix)); if !enable_minification { try_err!(fs::copy(css, out), css); } else { let mut f = try_err!(File::open(css), css); let mut buffer = String::with_capacity(1000); try_err!(f.read_to_string(&mut buffer), css); write_minify(out, &buffer, enable_minification)?; } } write_minify(cx.dst.join(&format!("normalize{}.css", cx.shared.resource_suffix)), include_str!("static/normalize.css"), enable_minification)?; write(cx.dst.join("FiraSans-Regular.woff"), include_bytes!("static/FiraSans-Regular.woff"))?; write(cx.dst.join("FiraSans-Medium.woff"), include_bytes!("static/FiraSans-Medium.woff"))?; write(cx.dst.join("FiraSans-LICENSE.txt"), include_bytes!("static/FiraSans-LICENSE.txt"))?; write(cx.dst.join("Heuristica-Italic.woff"), include_bytes!("static/Heuristica-Italic.woff"))?; write(cx.dst.join("Heuristica-LICENSE.txt"), include_bytes!("static/Heuristica-LICENSE.txt"))?; write(cx.dst.join("SourceSerifPro-Regular.woff"), include_bytes!("static/SourceSerifPro-Regular.woff"))?; write(cx.dst.join("SourceSerifPro-Bold.woff"), include_bytes!("static/SourceSerifPro-Bold.woff"))?; write(cx.dst.join("SourceSerifPro-LICENSE.txt"), include_bytes!("static/SourceSerifPro-LICENSE.txt"))?; write(cx.dst.join("SourceCodePro-Regular.woff"), include_bytes!("static/SourceCodePro-Regular.woff"))?; write(cx.dst.join("SourceCodePro-Semibold.woff"), include_bytes!("static/SourceCodePro-Semibold.woff"))?; write(cx.dst.join("SourceCodePro-LICENSE.txt"), include_bytes!("static/SourceCodePro-LICENSE.txt"))?; write(cx.dst.join("LICENSE-MIT.txt"), include_bytes!("static/LICENSE-MIT.txt"))?; write(cx.dst.join("LICENSE-APACHE.txt"), include_bytes!("static/LICENSE-APACHE.txt"))?; write(cx.dst.join("COPYRIGHT.txt"), include_bytes!("static/COPYRIGHT.txt"))?; fn collect(path: &Path, krate: &str, key: &str) -> io::Result<Vec<String>> { let mut ret = Vec::new(); if path.exists() { for line in BufReader::new(File::open(path)?).lines() { let line = line?; if !line.starts_with(key) { continue; } if line.starts_with(&format!(r#"{}["{}"]"#, key, krate)) { continue; } ret.push(line.to_string()); } } Ok(ret) } fn show_item(item: &IndexItem, krate: &str) -> String { format!("{{'crate':'{}','ty':{},'name':'{}','desc':'{}','p':'{}'{}}}", krate, item.ty as usize, item.name, item.desc.replace("'", "\\'"), item.path, if let Some(p) = item.parent_idx { format!(",'parent':{}", p) } else { String::new() }) } let dst = cx.dst.join("aliases.js"); { let mut all_aliases = try_err!(collect(&dst, &krate.name, "ALIASES"), &dst); let mut w = try_err!(File::create(&dst), &dst); let mut output = String::with_capacity(100); for (alias, items) in &cache.aliases { if items.is_empty() { continue } output.push_str(&format!("\"{}\":[{}],", alias, items.iter() .map(|v| show_item(v, &krate.name)) .collect::<Vec<_>>() .join(","))); } all_aliases.push(format!("ALIASES['{}'] = {{{}}};", krate.name, output)); all_aliases.sort(); try_err!(writeln!(&mut w, "var ALIASES = {{}};"), &dst); for aliases in &all_aliases { try_err!(writeln!(&mut w, "{}", aliases), &dst); } } // Update the search index let dst = cx.dst.join("search-index.js"); let mut all_indexes = try_err!(collect(&dst, &krate.name, "searchIndex"), &dst); all_indexes.push(search_index); // Sort the indexes by crate so the file will be generated identically even // with rustdoc running in parallel. all_indexes.sort(); let mut w = try_err!(File::create(&dst), &dst); try_err!(writeln!(&mut w, "var searchIndex = {{}};"), &dst); for index in &all_indexes { try_err!(writeln!(&mut w, "{}", *index), &dst); } try_err!(writeln!(&mut w, "initSearch(searchIndex);"), &dst); // Update the list of all implementors for traits let dst = cx.dst.join("implementors"); for (&did, imps) in &cache.implementors { // Private modules can leak through to this phase of rustdoc, which // could contain implementations for otherwise private types. In some // rare cases we could find an implementation for an item which wasn't // indexed, so we just skip this step in that case. // // FIXME: this is a vague explanation for why this can't be a `get`, in // theory it should be... let &(ref remote_path, remote_item_type) = match cache.paths.get(&did) { Some(p) => p, None => match cache.external_paths.get(&did) { Some(p) => p, None => continue, } }; let mut have_impls = false; let mut implementors = format!(r#"implementors["{}"] = ["#, krate.name); for imp in imps { // If the trait and implementation are in the same crate, then // there's no need to emit information about it (there's inlining // going on). If they're in different crates then the crate defining // the trait will be interested in our implementation. if imp.impl_item.def_id.krate == did.krate { continue } // If the implementation is from another crate then that crate // should add it. if !imp.impl_item.def_id.is_local() { continue } have_impls = true; write!(implementors, "{{text:{},synthetic:{},types:{}}},", as_json(&imp.inner_impl().to_string()), imp.inner_impl().synthetic, as_json(&collect_paths_for_type(imp.inner_impl().for_.clone()))).unwrap(); } implementors.push_str("];"); // Only create a js file if we have impls to add to it. If the trait is // documented locally though we always create the file to avoid dead // links. if !have_impls && !cache.paths.contains_key(&did) { continue; } let mut mydst = dst.clone(); for part in &remote_path[..remote_path.len() - 1] { mydst.push(part); } try_err!(fs::create_dir_all(&mydst), &mydst); mydst.push(&format!("{}.{}.js", remote_item_type.css_class(), remote_path[remote_path.len() - 1])); let mut all_implementors = try_err!(collect(&mydst, &krate.name, "implementors"), &mydst); all_implementors.push(implementors); // Sort the implementors by crate so the file will be generated // identically even with rustdoc running in parallel. all_implementors.sort(); let mut f = try_err!(File::create(&mydst), &mydst); try_err!(writeln!(&mut f, "(function() {{var implementors = {{}};"), &mydst); for implementor in &all_implementors { try_err!(writeln!(&mut f, "{}", *implementor), &mydst); } try_err!(writeln!(&mut f, "{}", r" if (window.register_implementors) { window.register_implementors(implementors); } else { window.pending_implementors = implementors; } "), &mydst); try_err!(writeln!(&mut f, r"}})()"), &mydst); } Ok(()) } fn render_sources(dst: &Path, scx: &mut SharedContext, krate: clean::Crate) -> Result<clean::Crate, Error> { info!("emitting source files"); let dst = dst.join("src").join(&krate.name); try_err!(fs::create_dir_all(&dst), &dst); let mut folder = SourceCollector { dst, scx, }; Ok(folder.fold_crate(krate)) } /// Writes the entire contents of a string to a destination, not attempting to /// catch any errors. fn write(dst: PathBuf, contents: &[u8]) -> Result<(), Error> { Ok(try_err!(fs::write(&dst, contents), &dst)) } fn write_minify(dst: PathBuf, contents: &str, enable_minification: bool) -> Result<(), Error> { if enable_minification { if dst.extension() == Some(&OsStr::new("css")) { let res = try_none!(minifier::css::minify(contents).ok(), &dst); write(dst, res.as_bytes()) } else { write(dst, minifier::js::minify(contents).as_bytes()) } } else { write(dst, contents.as_bytes()) } } /// Takes a path to a source file and cleans the path to it. This canonicalizes /// things like ".." to components which preserve the "top down" hierarchy of a /// static HTML tree. Each component in the cleaned path will be passed as an /// argument to `f`. The very last component of the path (ie the file name) will /// be passed to `f` if `keep_filename` is true, and ignored otherwise. // FIXME (#9639): The closure should deal with &[u8] instead of &str // FIXME (#9639): This is too conservative, rejecting non-UTF-8 paths fn clean_srcpath<F>(src_root: &Path, p: &Path, keep_filename: bool, mut f: F) where F: FnMut(&str), { // make it relative, if possible let p = p.strip_prefix(src_root).unwrap_or(p); let mut iter = p.components().peekable(); while let Some(c) = iter.next() { if !keep_filename && iter.peek().is_none() { break; } match c { Component::ParentDir => f("up"), Component::Normal(c) => f(c.to_str().unwrap()), _ => continue, } } } /// Attempts to find where an external crate is located, given that we're /// rendering in to the specified source destination. fn extern_location(e: &clean::ExternalCrate, dst: &Path) -> ExternalLocation { // See if there's documentation generated into the local directory let local_location = dst.join(&e.name); if local_location.is_dir() { return Local; } // Failing that, see if there's an attribute specifying where to find this // external crate e.attrs.lists("doc") .filter(|a| a.check_name("html_root_url")) .filter_map(|a| a.value_str()) .map(|url| { let mut url = url.to_string(); if !url.ends_with("/") { url.push('/') } Remote(url) }).next().unwrap_or(Unknown) // Well, at least we tried. } impl<'a> DocFolder for SourceCollector<'a> { fn fold_item(&mut self, item: clean::Item) -> Option<clean::Item> { // If we're including source files, and we haven't seen this file yet, // then we need to render it out to the filesystem. if self.scx.include_sources // skip all invalid or macro spans && item.source.filename.is_real() // skip non-local items && item.def_id.is_local() { // If it turns out that we couldn't read this file, then we probably // can't read any of the files (generating html output from json or // something like that), so just don't include sources for the // entire crate. The other option is maintaining this mapping on a // per-file basis, but that's probably not worth it... self.scx .include_sources = match self.emit_source(&item.source.filename) { Ok(()) => true, Err(e) => { println!("warning: source code was requested to be rendered, \ but processing `{}` had an error: {}", item.source.filename, e); println!(" skipping rendering of source code"); false } }; } self.fold_item_recur(item) } } impl<'a> SourceCollector<'a> { /// Renders the given filename into its corresponding HTML source file. fn emit_source(&mut self, filename: &FileName) -> io::Result<()> { let p = match *filename { FileName::Real(ref file) => file, _ => return Ok(()), }; if self.scx.local_sources.contains_key(&**p) { // We've already emitted this source return Ok(()); } let contents = fs::read_to_string(&p)?; // Remove the utf-8 BOM if any let contents = if contents.starts_with("\u{feff}") { &contents[3..] } else { &contents[..] }; // Create the intermediate directories let mut cur = self.dst.clone(); let mut root_path = String::from("../../"); let mut href = String::new(); clean_srcpath(&self.scx.src_root, &p, false, |component| { cur.push(component); fs::create_dir_all(&cur).unwrap(); root_path.push_str("../"); href.push_str(component); href.push('/'); }); let mut fname = p.file_name() .expect("source has no filename") .to_os_string(); fname.push(".html"); cur.push(&fname); href.push_str(&fname.to_string_lossy()); let mut w = BufWriter::new(File::create(&cur)?); let title = format!("{} -- source", cur.file_name().unwrap() .to_string_lossy()); let desc = format!("Source to the Rust file `{}`.", filename); let page = layout::Page { title: &title, css_class: "source", root_path: &root_path, description: &desc, keywords: BASIC_KEYWORDS, resource_suffix: &self.scx.resource_suffix, }; layout::render(&mut w, &self.scx.layout, &page, &(""), &Source(contents), self.scx.css_file_extension.is_some(), &self.scx.themes)?; w.flush()?; self.scx.local_sources.insert(p.clone(), href); Ok(()) } } impl DocFolder for Cache { fn fold_item(&mut self, item: clean::Item) -> Option<clean::Item> { // If this is a stripped module, // we don't want it or its children in the search index. let orig_stripped_mod = match item.inner { clean::StrippedItem(box clean::ModuleItem(..)) => { mem::replace(&mut self.stripped_mod, true) } _ => self.stripped_mod, }; // If the impl is from a masked crate or references something from a // masked crate then remove it completely. if let clean::ImplItem(ref i) = item.inner { if self.masked_crates.contains(&item.def_id.krate) || i.trait_.def_id().map_or(false, |d| self.masked_crates.contains(&d.krate)) || i.for_.def_id().map_or(false, |d| self.masked_crates.contains(&d.krate)) { return None; } } // Register any generics to their corresponding string. This is used // when pretty-printing types. if let Some(generics) = item.inner.generics() { self.generics(generics); } // Propagate a trait method's documentation to all implementors of the // trait. if let clean::TraitItem(ref t) = item.inner { self.traits.entry(item.def_id).or_insert_with(|| t.clone()); } // Collect all the implementors of traits. if let clean::ImplItem(ref i) = item.inner { if let Some(did) = i.trait_.def_id() { self.implementors.entry(did).or_insert(vec![]).push(Impl { impl_item: item.clone(), }); } } // Index this method for searching later on. if let Some(ref s) = item.name { let (parent, is_inherent_impl_item) = match item.inner { clean::StrippedItem(..) => ((None, None), false), clean::AssociatedConstItem(..) | clean::TypedefItem(_, true) if self.parent_is_trait_impl => { // skip associated items in trait impls ((None, None), false) } clean::AssociatedTypeItem(..) | clean::TyMethodItem(..) | clean::StructFieldItem(..) | clean::VariantItem(..) => { ((Some(*self.parent_stack.last().unwrap()), Some(&self.stack[..self.stack.len() - 1])), false) } clean::MethodItem(..) | clean::AssociatedConstItem(..) => { if self.parent_stack.is_empty() { ((None, None), false) } else { let last = self.parent_stack.last().unwrap(); let did = *last; let path = match self.paths.get(&did) { // The current stack not necessarily has correlation // for where the type was defined. On the other // hand, `paths` always has the right // information if present. Some(&(ref fqp, ItemType::Trait)) | Some(&(ref fqp, ItemType::Struct)) | Some(&(ref fqp, ItemType::Union)) | Some(&(ref fqp, ItemType::Enum)) => Some(&fqp[..fqp.len() - 1]), Some(..) => Some(&*self.stack), None => None }; ((Some(*last), path), true) } } _ => ((None, Some(&*self.stack)), false) }; match parent { (parent, Some(path)) if is_inherent_impl_item || (!self.stripped_mod) => { debug_assert!(!item.is_stripped()); // A crate has a module at its root, containing all items, // which should not be indexed. The crate-item itself is // inserted later on when serializing the search-index. if item.def_id.index != CRATE_DEF_INDEX { self.search_index.push(IndexItem { ty: item.type_(), name: s.to_string(), path: path.join("::").to_string(), desc: plain_summary_line(item.doc_value()), parent, parent_idx: None, search_type: get_index_search_type(&item), }); } } (Some(parent), None) if is_inherent_impl_item => { // We have a parent, but we don't know where they're // defined yet. Wait for later to index this item. self.orphan_impl_items.push((parent, item.clone())); } _ => {} } } // Keep track of the fully qualified path for this item. let pushed = match item.name { Some(ref n) if !n.is_empty() => { self.stack.push(n.to_string()); true } _ => false, }; match item.inner { clean::StructItem(..) | clean::EnumItem(..) | clean::TypedefItem(..) | clean::TraitItem(..) | clean::FunctionItem(..) | clean::ModuleItem(..) | clean::ForeignFunctionItem(..) | clean::ForeignStaticItem(..) | clean::ConstantItem(..) | clean::StaticItem(..) | clean::UnionItem(..) | clean::ForeignTypeItem | clean::MacroItem(..) if !self.stripped_mod => { // Re-exported items mean that the same id can show up twice // in the rustdoc ast that we're looking at. We know, // however, that a re-exported item doesn't show up in the // `public_items` map, so we can skip inserting into the // paths map if there was already an entry present and we're // not a public item. if !self.paths.contains_key(&item.def_id) || self.access_levels.is_public(item.def_id) { self.paths.insert(item.def_id, (self.stack.clone(), item.type_())); } self.add_aliases(&item); } // Link variants to their parent enum because pages aren't emitted // for each variant. clean::VariantItem(..) if !self.stripped_mod => { let mut stack = self.stack.clone(); stack.pop(); self.paths.insert(item.def_id, (stack, ItemType::Enum)); } clean::PrimitiveItem(..) if item.visibility.is_some() => { self.add_aliases(&item); self.paths.insert(item.def_id, (self.stack.clone(), item.type_())); } _ => {} } // Maintain the parent stack let orig_parent_is_trait_impl = self.parent_is_trait_impl; let parent_pushed = match item.inner { clean::TraitItem(..) | clean::EnumItem(..) | clean::ForeignTypeItem | clean::StructItem(..) | clean::UnionItem(..) => { self.parent_stack.push(item.def_id); self.parent_is_trait_impl = false; true } clean::ImplItem(ref i) => { self.parent_is_trait_impl = i.trait_.is_some(); match i.for_ { clean::ResolvedPath{ did, .. } => { self.parent_stack.push(did); true } ref t => { let prim_did = t.primitive_type().and_then(|t| { self.primitive_locations.get(&t).cloned() }); match prim_did { Some(did) => { self.parent_stack.push(did); true } None => false, } } } } _ => false }; // Once we've recursively found all the generics, hoard off all the // implementations elsewhere. let ret = self.fold_item_recur(item).and_then(|item| { if let clean::Item { inner: clean::ImplItem(_), .. } = item { // Figure out the id of this impl. This may map to a // primitive rather than always to a struct/enum. // Note: matching twice to restrict the lifetime of the `i` borrow. let mut dids = FxHashSet(); if let clean::Item { inner: clean::ImplItem(ref i), .. } = item { match i.for_ { clean::ResolvedPath { did, .. } | clean::BorrowedRef { type_: box clean::ResolvedPath { did, .. }, .. } => { dids.insert(did); } ref t => { let did = t.primitive_type().and_then(|t| { self.primitive_locations.get(&t).cloned() }); if let Some(did) = did { dids.insert(did); } } } if let Some(generics) = i.trait_.as_ref().and_then(|t| t.generics()) { for bound in generics { if let Some(did) = bound.def_id() { dids.insert(did); } } } } else { unreachable!() }; for did in dids { self.impls.entry(did).or_insert(vec![]).push(Impl { impl_item: item.clone(), }); } None } else { Some(item) } }); if pushed { self.stack.pop().unwrap(); } if parent_pushed { self.parent_stack.pop().unwrap(); } self.stripped_mod = orig_stripped_mod; self.parent_is_trait_impl = orig_parent_is_trait_impl; ret } } impl<'a> Cache { fn generics(&mut self, generics: &clean::Generics) { for param in &generics.params { match param.kind { clean::GenericParamDefKind::Lifetime => {} clean::GenericParamDefKind::Type { did, .. } => { self.typarams.insert(did, param.name.clone()); } } } } fn add_aliases(&mut self, item: &clean::Item) { if item.def_id.index == CRATE_DEF_INDEX { return } if let Some(ref item_name) = item.name { let path = self.paths.get(&item.def_id) .map(|p| p.0[..p.0.len() - 1].join("::")) .unwrap_or("std".to_owned()); for alias in item.attrs.lists("doc") .filter(|a| a.check_name("alias")) .filter_map(|a| a.value_str() .map(|s| s.to_string().replace("\"", ""))) .filter(|v| !v.is_empty()) .collect::<FxHashSet<_>>() .into_iter() { self.aliases.entry(alias) .or_insert(Vec::with_capacity(1)) .push(IndexItem { ty: item.type_(), name: item_name.to_string(), path: path.clone(), desc: plain_summary_line(item.doc_value()), parent: None, parent_idx: None, search_type: get_index_search_type(&item), }); } } } } #[derive(Debug, Eq, PartialEq, Hash)] struct ItemEntry { url: String, name: String, } impl ItemEntry { fn new(mut url: String, name: String) -> ItemEntry { while url.starts_with('/') { url.remove(0); } ItemEntry { url, name, } } } impl fmt::Display for ItemEntry { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "<a href='{}'>{}</a>", self.url, Escape(&self.name)) } } impl PartialOrd for ItemEntry { fn partial_cmp(&self, other: &ItemEntry) -> Option<::std::cmp::Ordering> { Some(self.cmp(other)) } } impl Ord for ItemEntry { fn cmp(&self, other: &ItemEntry) -> ::std::cmp::Ordering { self.name.cmp(&other.name) } } #[derive(Debug)] struct AllTypes { structs: HashSet<ItemEntry>, enums: HashSet<ItemEntry>, unions: HashSet<ItemEntry>, primitives: HashSet<ItemEntry>, traits: HashSet<ItemEntry>, macros: HashSet<ItemEntry>, functions: HashSet<ItemEntry>, typedefs: HashSet<ItemEntry>, statics: HashSet<ItemEntry>, constants: HashSet<ItemEntry>, keywords: HashSet<ItemEntry>, } impl AllTypes { fn new() -> AllTypes { AllTypes { structs: HashSet::with_capacity(100), enums: HashSet::with_capacity(100), unions: HashSet::with_capacity(100), primitives: HashSet::with_capacity(26), traits: HashSet::with_capacity(100), macros: HashSet::with_capacity(100), functions: HashSet::with_capacity(100), typedefs: HashSet::with_capacity(100), statics: HashSet::with_capacity(100), constants: HashSet::with_capacity(100), keywords: HashSet::with_capacity(100), } } fn append(&mut self, item_name: String, item_type: &ItemType) { let mut url: Vec<_> = item_name.split("::").skip(1).collect(); if let Some(name) = url.pop() { let new_url = format!("{}/{}.{}.html", url.join("/"), item_type, name); url.push(name); let name = url.join("::"); match *item_type { ItemType::Struct => self.structs.insert(ItemEntry::new(new_url, name)), ItemType::Enum => self.enums.insert(ItemEntry::new(new_url, name)), ItemType::Union => self.unions.insert(ItemEntry::new(new_url, name)), ItemType::Primitive => self.primitives.insert(ItemEntry::new(new_url, name)), ItemType::Trait => self.traits.insert(ItemEntry::new(new_url, name)), ItemType::Macro => self.macros.insert(ItemEntry::new(new_url, name)), ItemType::Function => self.functions.insert(ItemEntry::new(new_url, name)), ItemType::Typedef => self.typedefs.insert(ItemEntry::new(new_url, name)), ItemType::Static => self.statics.insert(ItemEntry::new(new_url, name)), ItemType::Constant => self.constants.insert(ItemEntry::new(new_url, name)), _ => true, }; } } } fn print_entries(f: &mut fmt::Formatter, e: &HashSet<ItemEntry>, title: &str, class: &str) -> fmt::Result { if !e.is_empty() { let mut e: Vec<&ItemEntry> = e.iter().collect(); e.sort(); write!(f, "<h3 id='{}'>{}</h3><ul class='{} docblock'>{}</ul>", title, Escape(title), class, e.iter().map(|s| format!("<li>{}</li>", s)).collect::<String>())?; } Ok(()) } impl fmt::Display for AllTypes { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "<h1 class='fqn'>\ <span class='in-band'>List of all items</span>\ <span class='out-of-band'>\ <span id='render-detail'>\ <a id=\"toggle-all-docs\" href=\"javascript:void(0)\" title=\"collapse all docs\">\ [<span class='inner'>&#x2212;</span>]\ </a>\ </span> </span> </h1>")?; print_entries(f, &self.structs, "Structs", "structs")?; print_entries(f, &self.enums, "Enums", "enums")?; print_entries(f, &self.unions, "Unions", "unions")?; print_entries(f, &self.primitives, "Primitives", "primitives")?; print_entries(f, &self.traits, "Traits", "traits")?; print_entries(f, &self.macros, "Macros", "macros")?; print_entries(f, &self.functions, "Functions", "functions")?; print_entries(f, &self.typedefs, "Typedefs", "typedefs")?; print_entries(f, &self.statics, "Statics", "statics")?; print_entries(f, &self.constants, "Constants", "constants") } } #[derive(Debug)] struct Settings<'a> { // (id, explanation, default value) settings: Vec<(&'static str, &'static str, bool)>, root_path: &'a str, suffix: &'a str, } impl<'a> Settings<'a> { pub fn new(root_path: &'a str, suffix: &'a str) -> Settings<'a> { Settings { settings: vec![ ("item-declarations", "Auto-hide item declarations.", true), ("item-attributes", "Auto-hide item attributes.", true), ("go-to-only-result", "Directly go to item in search if there is only one result", false), ], root_path, suffix, } } } impl<'a> fmt::Display for Settings<'a> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "<h1 class='fqn'>\ <span class='in-band'>Rustdoc settings</span>\ </h1>\ <div class='settings'>{}</div>\ <script src='{}settings{}.js'></script>", self.settings.iter() .map(|(id, text, enabled)| { format!("<div class='setting-line'>\ <label class='toggle'>\ <input type='checkbox' id='{}' {}>\ <span class='slider'></span>\ </label>\ <div>{}</div>\ </div>", id, if *enabled { " checked" } else { "" }, text) }) .collect::<String>(), self.root_path, self.suffix) } } impl Context { /// String representation of how to get back to the root path of the 'doc/' /// folder in terms of a relative URL. fn root_path(&self) -> String { repeat("../").take(self.current.len()).collect::<String>() } /// Recurse in the directory structure and change the "root path" to make /// sure it always points to the top (relatively). fn recurse<T, F>(&mut self, s: String, f: F) -> T where F: FnOnce(&mut Context) -> T, { if s.is_empty() { panic!("Unexpected empty destination: {:?}", self.current); } let prev = self.dst.clone(); self.dst.push(&s); self.current.push(s); info!("Recursing into {}", self.dst.display()); let ret = f(self); info!("Recursed; leaving {}", self.dst.display()); // Go back to where we were at self.dst = prev; self.current.pop().unwrap(); ret } /// Main method for rendering a crate. /// /// This currently isn't parallelized, but it'd be pretty easy to add /// parallelization to this function. fn krate(self, mut krate: clean::Crate) -> Result<(), Error> { let mut item = match krate.module.take() { Some(i) => i, None => return Ok(()), }; let final_file = self.dst.join(&krate.name) .join("all.html"); let settings_file = self.dst.join("settings.html"); let crate_name = krate.name.clone(); item.name = Some(krate.name); let mut all = AllTypes::new(); { // Render the crate documentation let mut work = vec![(self.clone(), item)]; while let Some((mut cx, item)) = work.pop() { cx.item(item, &mut all, |cx, item| { work.push((cx.clone(), item)) })? } } let mut w = BufWriter::new(try_err!(File::create(&final_file), &final_file)); let mut root_path = self.dst.to_str().expect("invalid path").to_owned(); if !root_path.ends_with('/') { root_path.push('/'); } let mut page = layout::Page { title: "List of all items in this crate", css_class: "mod", root_path: "../", description: "List of all items in this crate", keywords: BASIC_KEYWORDS, resource_suffix: &self.shared.resource_suffix, }; let sidebar = if let Some(ref version) = cache().crate_version { format!("<p class='location'>Crate {}</p>\ <div class='block version'>\ <p>Version {}</p>\ </div>\ <a id='all-types' href='index.html'><p>Back to index</p></a>", crate_name, version) } else { String::new() }; try_err!(layout::render(&mut w, &self.shared.layout, &page, &sidebar, &all, self.shared.css_file_extension.is_some(), &self.shared.themes), &final_file); // Generating settings page. let settings = Settings::new("./", &self.shared.resource_suffix); page.title = "Rustdoc settings"; page.description = "Settings of Rustdoc"; page.root_path = "./"; let mut w = BufWriter::new(try_err!(File::create(&settings_file), &settings_file)); let mut themes = self.shared.themes.clone(); let sidebar = "<p class='location'>Settings</p><div class='sidebar-elems'></div>"; themes.push(PathBuf::from("settings.css")); let mut layout = self.shared.layout.clone(); layout.krate = String::new(); layout.logo = String::new(); layout.favicon = String::new(); try_err!(layout::render(&mut w, &layout, &page, &sidebar, &settings, self.shared.css_file_extension.is_some(), &themes), &settings_file); Ok(()) } fn render_item(&self, writer: &mut io::Write, it: &clean::Item, pushname: bool) -> io::Result<()> { // A little unfortunate that this is done like this, but it sure // does make formatting *a lot* nicer. CURRENT_LOCATION_KEY.with(|slot| { *slot.borrow_mut() = self.current.clone(); }); let mut title = if it.is_primitive() { // No need to include the namespace for primitive types String::new() } else { self.current.join("::") }; if pushname { if !title.is_empty() { title.push_str("::"); } title.push_str(it.name.as_ref().unwrap()); } title.push_str(" - Rust"); let tyname = it.type_().css_class(); let desc = if it.is_crate() { format!("API documentation for the Rust `{}` crate.", self.shared.layout.krate) } else { format!("API documentation for the Rust `{}` {} in crate `{}`.", it.name.as_ref().unwrap(), tyname, self.shared.layout.krate) }; let keywords = make_item_keywords(it); let page = layout::Page { css_class: tyname, root_path: &self.root_path(), title: &title, description: &desc, keywords: &keywords, resource_suffix: &self.shared.resource_suffix, }; reset_ids(true); if !self.render_redirect_pages { layout::render(writer, &self.shared.layout, &page, &Sidebar{ cx: self, item: it }, &Item{ cx: self, item: it }, self.shared.css_file_extension.is_some(), &self.shared.themes)?; } else { let mut url = self.root_path(); if let Some(&(ref names, ty)) = cache().paths.get(&it.def_id) { for name in &names[..names.len() - 1] { url.push_str(name); url.push_str("/"); } url.push_str(&item_path(ty, names.last().unwrap())); layout::redirect(writer, &url)?; } } Ok(()) } /// Non-parallelized version of rendering an item. This will take the input /// item, render its contents, and then invoke the specified closure with /// all sub-items which need to be rendered. /// /// The rendering driver uses this closure to queue up more work. fn item<F>(&mut self, item: clean::Item, all: &mut AllTypes, mut f: F) -> Result<(), Error> where F: FnMut(&mut Context, clean::Item), { // Stripped modules survive the rustdoc passes (i.e. `strip-private`) // if they contain impls for public types. These modules can also // contain items such as publicly re-exported structures. // // External crates will provide links to these structures, so // these modules are recursed into, but not rendered normally // (a flag on the context). if !self.render_redirect_pages { self.render_redirect_pages = item.is_stripped(); } if item.is_mod() { // modules are special because they add a namespace. We also need to // recurse into the items of the module as well. let name = item.name.as_ref().unwrap().to_string(); let mut item = Some(item); self.recurse(name, |this| { let item = item.take().unwrap(); let mut buf = Vec::new(); this.render_item(&mut buf, &item, false).unwrap(); // buf will be empty if the module is stripped and there is no redirect for it if !buf.is_empty() { try_err!(this.shared.ensure_dir(&this.dst), &this.dst); let joint_dst = this.dst.join("index.html"); let mut dst = try_err!(File::create(&joint_dst), &joint_dst); try_err!(dst.write_all(&buf), &joint_dst); } let m = match item.inner { clean::StrippedItem(box clean::ModuleItem(m)) | clean::ModuleItem(m) => m, _ => unreachable!() }; // Render sidebar-items.js used throughout this module. if !this.render_redirect_pages { let items = this.build_sidebar_items(&m); let js_dst = this.dst.join("sidebar-items.js"); let mut js_out = BufWriter::new(try_err!(File::create(&js_dst), &js_dst)); try_err!(write!(&mut js_out, "initSidebarItems({});", as_json(&items)), &js_dst); } for item in m.items { f(this, item); } Ok(()) })?; } else if item.name.is_some() { let mut buf = Vec::new(); self.render_item(&mut buf, &item, true).unwrap(); // buf will be empty if the item is stripped and there is no redirect for it if !buf.is_empty() { let name = item.name.as_ref().unwrap(); let item_type = item.type_(); let file_name = &item_path(item_type, name); try_err!(self.shared.ensure_dir(&self.dst), &self.dst); let joint_dst = self.dst.join(file_name); let mut dst = try_err!(File::create(&joint_dst), &joint_dst); try_err!(dst.write_all(&buf), &joint_dst); if !self.render_redirect_pages { all.append(full_path(self, &item), &item_type); } // Redirect from a sane URL using the namespace to Rustdoc's // URL for the page. let redir_name = format!("{}.{}.html", name, item_type.name_space()); let redir_dst = self.dst.join(redir_name); if let Ok(redirect_out) = OpenOptions::new().create_new(true) .write(true) .open(&redir_dst) { let mut redirect_out = BufWriter::new(redirect_out); try_err!(layout::redirect(&mut redirect_out, file_name), &redir_dst); } // If the item is a macro, redirect from the old macro URL (with !) // to the new one (without). // FIXME(#35705) remove this redirect. if item_type == ItemType::Macro { let redir_name = format!("{}.{}!.html", item_type, name); let redir_dst = self.dst.join(redir_name); let redirect_out = try_err!(File::create(&redir_dst), &redir_dst); let mut redirect_out = BufWriter::new(redirect_out); try_err!(layout::redirect(&mut redirect_out, file_name), &redir_dst); } } } Ok(()) } fn build_sidebar_items(&self, m: &clean::Module) -> BTreeMap<String, Vec<NameDoc>> { // BTreeMap instead of HashMap to get a sorted output let mut map = BTreeMap::new(); for item in &m.items { if item.is_stripped() { continue } let short = item.type_().css_class(); let myname = match item.name { None => continue, Some(ref s) => s.to_string(), }; let short = short.to_string(); map.entry(short).or_insert(vec![]) .push((myname, Some(plain_summary_line(item.doc_value())))); } if self.shared.sort_modules_alphabetically { for (_, items) in &mut map { items.sort(); } } map } } impl<'a> Item<'a> { /// Generate a url appropriate for an `href` attribute back to the source of /// this item. /// /// The url generated, when clicked, will redirect the browser back to the /// original source code. /// /// If `None` is returned, then a source link couldn't be generated. This /// may happen, for example, with externally inlined items where the source /// of their crate documentation isn't known. fn src_href(&self) -> Option<String> { let mut root = self.cx.root_path(); let cache = cache(); let mut path = String::new(); // We can safely ignore macros from other libraries let file = match self.item.source.filename { FileName::Real(ref path) => path, _ => return None, }; let (krate, path) = if self.item.def_id.is_local() { if let Some(path) = self.cx.shared.local_sources.get(file) { (&self.cx.shared.layout.krate, path) } else { return None; } } else { let (krate, src_root) = match cache.extern_locations.get(&self.item.def_id.krate) { Some(&(ref name, ref src, Local)) => (name, src), Some(&(ref name, ref src, Remote(ref s))) => { root = s.to_string(); (name, src) } Some(&(_, _, Unknown)) | None => return None, }; clean_srcpath(&src_root, file, false, |component| { path.push_str(component); path.push('/'); }); let mut fname = file.file_name().expect("source has no filename") .to_os_string(); fname.push(".html"); path.push_str(&fname.to_string_lossy()); (krate, &path) }; let lines = if self.item.source.loline == self.item.source.hiline { format!("{}", self.item.source.loline) } else { format!("{}-{}", self.item.source.loline, self.item.source.hiline) }; Some(format!("{root}src/{krate}/{path}#{lines}", root = Escape(&root), krate = krate, path = path, lines = lines)) } } fn wrap_into_docblock<F>(w: &mut fmt::Formatter, f: F) -> fmt::Result where F: Fn(&mut fmt::Formatter) -> fmt::Result { write!(w, "<div class=\"docblock type-decl\">")?; f(w)?; write!(w, "</div>") } impl<'a> fmt::Display for Item<'a> { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { debug_assert!(!self.item.is_stripped()); // Write the breadcrumb trail header for the top write!(fmt, "<h1 class='fqn'><span class='in-band'>")?; match self.item.inner { clean::ModuleItem(ref m) => if m.is_crate { write!(fmt, "Crate ")?; } else { write!(fmt, "Module ")?; }, clean::FunctionItem(..) | clean::ForeignFunctionItem(..) => write!(fmt, "Function ")?, clean::TraitItem(..) => write!(fmt, "Trait ")?, clean::StructItem(..) => write!(fmt, "Struct ")?, clean::UnionItem(..) => write!(fmt, "Union ")?, clean::EnumItem(..) => write!(fmt, "Enum ")?, clean::TypedefItem(..) => write!(fmt, "Type Definition ")?, clean::MacroItem(..) => write!(fmt, "Macro ")?, clean::PrimitiveItem(..) => write!(fmt, "Primitive Type ")?, clean::StaticItem(..) | clean::ForeignStaticItem(..) => write!(fmt, "Static ")?, clean::ConstantItem(..) => write!(fmt, "Constant ")?, clean::ForeignTypeItem => write!(fmt, "Foreign Type ")?, clean::KeywordItem(..) => write!(fmt, "Keyword ")?, _ => { // We don't generate pages for any other type. unreachable!(); } } if !self.item.is_primitive() && !self.item.is_keyword() { let cur = &self.cx.current; let amt = if self.item.is_mod() { cur.len() - 1 } else { cur.len() }; for (i, component) in cur.iter().enumerate().take(amt) { write!(fmt, "<a href='{}index.html'>{}</a>::<wbr>", repeat("../").take(cur.len() - i - 1) .collect::<String>(), component)?; } } write!(fmt, "<a class=\"{}\" href=''>{}</a>", self.item.type_(), self.item.name.as_ref().unwrap())?; write!(fmt, "</span>")?; // in-band write!(fmt, "<span class='out-of-band'>")?; if let Some(version) = self.item.stable_since() { write!(fmt, "<span class='since' title='Stable since Rust version {0}'>{0}</span>", version)?; } write!(fmt, "<span id='render-detail'>\ <a id=\"toggle-all-docs\" href=\"javascript:void(0)\" \ title=\"collapse all docs\">\ [<span class='inner'>&#x2212;</span>]\ </a>\ </span>")?; // Write `src` tag // // When this item is part of a `pub use` in a downstream crate, the // [src] link in the downstream documentation will actually come back to // this page, and this link will be auto-clicked. The `id` attribute is // used to find the link to auto-click. if self.cx.shared.include_sources && !self.item.is_primitive() { if let Some(l) = self.src_href() { write!(fmt, "<a class='srclink' href='{}' title='{}'>[src]</a>", l, "goto source code")?; } } write!(fmt, "</span></h1>")?; // out-of-band match self.item.inner { clean::ModuleItem(ref m) => item_module(fmt, self.cx, self.item, &m.items), clean::FunctionItem(ref f) | clean::ForeignFunctionItem(ref f) => item_function(fmt, self.cx, self.item, f), clean::TraitItem(ref t) => item_trait(fmt, self.cx, self.item, t), clean::StructItem(ref s) => item_struct(fmt, self.cx, self.item, s), clean::UnionItem(ref s) => item_union(fmt, self.cx, self.item, s), clean::EnumItem(ref e) => item_enum(fmt, self.cx, self.item, e), clean::TypedefItem(ref t, _) => item_typedef(fmt, self.cx, self.item, t), clean::MacroItem(ref m) => item_macro(fmt, self.cx, self.item, m), clean::PrimitiveItem(ref p) => item_primitive(fmt, self.cx, self.item, p), clean::StaticItem(ref i) | clean::ForeignStaticItem(ref i) => item_static(fmt, self.cx, self.item, i), clean::ConstantItem(ref c) => item_constant(fmt, self.cx, self.item, c), clean::ForeignTypeItem => item_foreign_type(fmt, self.cx, self.item), clean::KeywordItem(ref k) => item_keyword(fmt, self.cx, self.item, k), _ => { // We don't generate pages for any other type. unreachable!(); } } } } fn item_path(ty: ItemType, name: &str) -> String { match ty { ItemType::Module => format!("{}/index.html", name), _ => format!("{}.{}.html", ty.css_class(), name), } } fn full_path(cx: &Context, item: &clean::Item) -> String { let mut s = cx.current.join("::"); s.push_str("::"); s.push_str(item.name.as_ref().unwrap()); s } fn shorter<'a>(s: Option<&'a str>) -> String { match s { Some(s) => s.lines() .skip_while(|s| s.chars().all(|c| c.is_whitespace())) .take_while(|line|{ (*line).chars().any(|chr|{ !chr.is_whitespace() }) }).collect::<Vec<_>>().join("\n"), None => "".to_string() } } #[inline] fn plain_summary_line(s: Option<&str>) -> String { let line = shorter(s).replace("\n", " "); markdown::plain_summary_line(&line[..]) } fn document(w: &mut fmt::Formatter, cx: &Context, item: &clean::Item) -> fmt::Result { if let Some(ref name) = item.name { info!("Documenting {}", name); } document_stability(w, cx, item)?; document_non_exhaustive(w, item)?; let prefix = render_assoc_const_value(item); document_full(w, item, cx, &prefix)?; Ok(()) } /// Render md_text as markdown. fn render_markdown(w: &mut fmt::Formatter, md_text: &str, links: Vec<(String, String)>, prefix: &str,) -> fmt::Result { write!(w, "<div class='docblock'>{}{}</div>", prefix, Markdown(md_text, &links)) } fn document_short(w: &mut fmt::Formatter, item: &clean::Item, link: AssocItemLink, prefix: &str) -> fmt::Result { if let Some(s) = item.doc_value() { let markdown = if s.contains('\n') { format!("{} [Read more]({})", &plain_summary_line(Some(s)), naive_assoc_href(item, link)) } else { format!("{}", &plain_summary_line(Some(s))) }; render_markdown(w, &markdown, item.links(), prefix)?; } else if !prefix.is_empty() { write!(w, "<div class='docblock'>{}</div>", prefix)?; } Ok(()) } fn render_assoc_const_value(item: &clean::Item) -> String { match item.inner { clean::AssociatedConstItem(ref ty, Some(ref default)) => { highlight::render_with_highlighting( &format!("{}: {:#} = {}", item.name.as_ref().unwrap(), ty, default), None, None, None, None, ) } _ => String::new(), } } fn document_full(w: &mut fmt::Formatter, item: &clean::Item, cx: &Context, prefix: &str) -> fmt::Result { if let Some(s) = cx.shared.maybe_collapsed_doc_value(item) { debug!("Doc block: =====\n{}\n=====", s); render_markdown(w, &*s, item.links(), prefix)?; } else if !prefix.is_empty() { write!(w, "<div class='docblock'>{}</div>", prefix)?; } Ok(()) } fn document_stability(w: &mut fmt::Formatter, cx: &Context, item: &clean::Item) -> fmt::Result { let stabilities = short_stability(item, cx, true); if !stabilities.is_empty() { write!(w, "<div class='stability'>")?; for stability in stabilities { write!(w, "{}", stability)?; } write!(w, "</div>")?; } Ok(()) } fn document_non_exhaustive(w: &mut fmt::Formatter, item: &clean::Item) -> fmt::Result { if item.non_exhaustive { let name = item.type_(); write!(w, r##" <div class='non-exhaustive'> <div class='stab non-exhaustive'> <details> <summary> <span class=microscope>🔬</span> This {} is marked as non exhaustive. </summary> <p> This {} will require a wildcard arm in any match statements or constructors. </p> </details> </div> </div> "##, name, name)?; } Ok(()) } fn name_key(name: &str) -> (&str, u64, usize) { // find number at end let split = name.bytes().rposition(|b| b < b'0' || b'9' < b).map_or(0, |s| s + 1); // count leading zeroes let after_zeroes = name[split..].bytes().position(|b| b != b'0').map_or(name.len(), |extra| split + extra); // sort leading zeroes last let num_zeroes = after_zeroes - split; match name[split..].parse() { Ok(n) => (&name[..split], n, num_zeroes), Err(_) => (name, 0, num_zeroes), } } fn item_module(w: &mut fmt::Formatter, cx: &Context, item: &clean::Item, items: &[clean::Item]) -> fmt::Result { document(w, cx, item)?; let mut indices = (0..items.len()).filter(|i| !items[*i].is_stripped()).collect::<Vec<usize>>(); // the order of item types in the listing fn reorder(ty: ItemType) -> u8 { match ty { ItemType::ExternCrate => 0, ItemType::Import => 1, ItemType::Primitive => 2, ItemType::Module => 3, ItemType::Macro => 4, ItemType::Struct => 5, ItemType::Enum => 6, ItemType::Constant => 7, ItemType::Static => 8, ItemType::Trait => 9, ItemType::Function => 10, ItemType::Typedef => 12, ItemType::Union => 13, _ => 14 + ty as u8, } } fn cmp(i1: &clean::Item, i2: &clean::Item, idx1: usize, idx2: usize) -> Ordering { let ty1 = i1.type_(); let ty2 = i2.type_(); if ty1 != ty2 { return (reorder(ty1), idx1).cmp(&(reorder(ty2), idx2)) } let s1 = i1.stability.as_ref().map(|s| s.level); let s2 = i2.stability.as_ref().map(|s| s.level); match (s1, s2) { (Some(stability::Unstable), Some(stability::Stable)) => return Ordering::Greater, (Some(stability::Stable), Some(stability::Unstable)) => return Ordering::Less, _ => {} } let lhs = i1.name.as_ref().map_or("", |s| &**s); let rhs = i2.name.as_ref().map_or("", |s| &**s); name_key(lhs).cmp(&name_key(rhs)) } if cx.shared.sort_modules_alphabetically { indices.sort_by(|&i1, &i2| cmp(&items[i1], &items[i2], i1, i2)); } // This call is to remove re-export duplicates in cases such as: // // ``` // pub mod foo { // pub mod bar { // pub trait Double { fn foo(); } // } // } // // pub use foo::bar::*; // pub use foo::*; // ``` // // `Double` will appear twice in the generated docs. // // FIXME: This code is quite ugly and could be improved. Small issue: DefId // can be identical even if the elements are different (mostly in imports). // So in case this is an import, we keep everything by adding a "unique id" // (which is the position in the vector). indices.dedup_by_key(|i| (items[*i].def_id, if items[*i].name.as_ref().is_some() { Some(full_path(cx, &items[*i]).clone()) } else { None }, items[*i].type_(), if items[*i].is_import() { *i } else { 0 })); debug!("{:?}", indices); let mut curty = None; for &idx in &indices { let myitem = &items[idx]; if myitem.is_stripped() { continue; } let myty = Some(myitem.type_()); if curty == Some(ItemType::ExternCrate) && myty == Some(ItemType::Import) { // Put `extern crate` and `use` re-exports in the same section. curty = myty; } else if myty != curty { if curty.is_some() { write!(w, "</table>")?; } curty = myty; let (short, name) = item_ty_to_strs(&myty.unwrap()); write!(w, "<h2 id='{id}' class='section-header'>\ <a href=\"#{id}\">{name}</a></h2>\n<table>", id = derive_id(short.to_owned()), name = name)?; } match myitem.inner { clean::ExternCrateItem(ref name, ref src) => { use html::format::HRef; match *src { Some(ref src) => { write!(w, "<tr><td><code>{}extern crate {} as {};", VisSpace(&myitem.visibility), HRef::new(myitem.def_id, src), name)? } None => { write!(w, "<tr><td><code>{}extern crate {};", VisSpace(&myitem.visibility), HRef::new(myitem.def_id, name))? } } write!(w, "</code></td></tr>")?; } clean::ImportItem(ref import) => { write!(w, "<tr><td><code>{}{}</code></td></tr>", VisSpace(&myitem.visibility), *import)?; } _ => { if myitem.name.is_none() { continue } let stabilities = short_stability(myitem, cx, false); let stab_docs = if !stabilities.is_empty() { stabilities.iter() .map(|s| format!("[{}]", s)) .collect::<Vec<_>>() .as_slice() .join(" ") } else { String::new() }; let unsafety_flag = match myitem.inner { clean::FunctionItem(ref func) | clean::ForeignFunctionItem(ref func) if func.header.unsafety == hir::Unsafety::Unsafe => { "<a title='unsafe function' href='#'><sup>⚠</sup></a>" } _ => "", }; let doc_value = myitem.doc_value().unwrap_or(""); write!(w, " <tr class='{stab} module-item'> <td><a class=\"{class}\" href=\"{href}\" title='{title_type} {title}'>{name}</a>{unsafety_flag}</td> <td class='docblock-short'> {stab_docs} {docs} </td> </tr>", name = *myitem.name.as_ref().unwrap(), stab_docs = stab_docs, docs = MarkdownSummaryLine(doc_value, &myitem.links()), class = myitem.type_(), stab = myitem.stability_class().unwrap_or("".to_string()), unsafety_flag = unsafety_flag, href = item_path(myitem.type_(), myitem.name.as_ref().unwrap()), title_type = myitem.type_(), title = full_path(cx, myitem))?; } } } if curty.is_some() { write!(w, "</table>")?; } Ok(()) } fn short_stability(item: &clean::Item, cx: &Context, show_reason: bool) -> Vec<String> { let mut stability = vec![]; if let Some(stab) = item.stability.as_ref() { let deprecated_reason = if show_reason && !stab.deprecated_reason.is_empty() { format!(": {}", stab.deprecated_reason) } else { String::new() }; if !stab.deprecated_since.is_empty() { let since = if show_reason { format!(" since {}", Escape(&stab.deprecated_since)) } else { String::new() }; let text = if stability::deprecation_in_effect(&stab.deprecated_since) { format!("Deprecated{}{}", since, MarkdownHtml(&deprecated_reason)) } else { format!("Deprecating in {}{}", Escape(&stab.deprecated_since), MarkdownHtml(&deprecated_reason)) }; stability.push(format!("<div class='stab deprecated'>{}</div>", text)) }; if stab.level == stability::Unstable { if show_reason { let unstable_extra = match (!stab.feature.is_empty(), &cx.shared.issue_tracker_base_url, stab.issue) { (true, &Some(ref tracker_url), Some(issue_no)) if issue_no > 0 => format!(" (<code>{} </code><a href=\"{}{}\">#{}</a>)", Escape(&stab.feature), tracker_url, issue_no, issue_no), (false, &Some(ref tracker_url), Some(issue_no)) if issue_no > 0 => format!(" (<a href=\"{}{}\">#{}</a>)", Escape(&tracker_url), issue_no, issue_no), (true, ..) => format!(" (<code>{}</code>)", Escape(&stab.feature)), _ => String::new(), }; if stab.unstable_reason.is_empty() { stability.push(format!("<div class='stab unstable'>\ <span class=microscope>🔬</span> \ This is a nightly-only experimental API. {}\ </div>", unstable_extra)); } else { let text = format!("<summary><span class=microscope>🔬</span> \ This is a nightly-only experimental API. {}\ </summary>{}", unstable_extra, MarkdownHtml(&stab.unstable_reason)); stability.push(format!("<div class='stab unstable'><details>{}</details></div>", text)); } } else { stability.push(format!("<div class='stab unstable'>Experimental</div>")) } }; } else if let Some(depr) = item.deprecation.as_ref() { let note = if show_reason && !depr.note.is_empty() { format!(": {}", depr.note) } else { String::new() }; let since = if show_reason && !depr.since.is_empty() { format!(" since {}", Escape(&depr.since)) } else { String::new() }; let text = if stability::deprecation_in_effect(&depr.since) { format!("Deprecated{}{}", since, MarkdownHtml(&note)) } else { format!("Deprecating in {}{}", Escape(&depr.since), MarkdownHtml(&note)) }; stability.push(format!("<div class='stab deprecated'>{}</div>", text)) } if let Some(ref cfg) = item.attrs.cfg { stability.push(format!("<div class='stab portability'>{}</div>", if show_reason { cfg.render_long_html() } else { cfg.render_short_html() })); } stability } struct Initializer<'a>(&'a str); impl<'a> fmt::Display for Initializer<'a> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { let Initializer(s) = *self; if s.is_empty() { return Ok(()); } write!(f, "<code> = </code>")?; write!(f, "<code>{}</code>", Escape(s)) } } fn item_constant(w: &mut fmt::Formatter, cx: &Context, it: &clean::Item, c: &clean::Constant) -> fmt::Result { write!(w, "<pre class='rust const'>")?; render_attributes(w, it)?; write!(w, "{vis}const \ {name}: {typ}{init}</pre>", vis = VisSpace(&it.visibility), name = it.name.as_ref().unwrap(), typ = c.type_, init = Initializer(&c.expr))?; document(w, cx, it) } fn item_static(w: &mut fmt::Formatter, cx: &Context, it: &clean::Item, s: &clean::Static) -> fmt::Result { write!(w, "<pre class='rust static'>")?; render_attributes(w, it)?; write!(w, "{vis}static {mutability}\ {name}: {typ}{init}</pre>", vis = VisSpace(&it.visibility), mutability = MutableSpace(s.mutability), name = it.name.as_ref().unwrap(), typ = s.type_, init = Initializer(&s.expr))?; document(w, cx, it) } fn item_function(w: &mut fmt::Formatter, cx: &Context, it: &clean::Item, f: &clean::Function) -> fmt::Result { let name_len = format!("{}{}{}{}{:#}fn {}{:#}", VisSpace(&it.visibility), ConstnessSpace(f.header.constness), UnsafetySpace(f.header.unsafety), AsyncSpace(f.header.asyncness), AbiSpace(f.header.abi), it.name.as_ref().unwrap(), f.generics).len(); write!(w, "{}<pre class='rust fn'>", render_spotlight_traits(it)?)?; render_attributes(w, it)?; write!(w, "{vis}{constness}{unsafety}{asyncness}{abi}fn \ {name}{generics}{decl}{where_clause}</pre>", vis = VisSpace(&it.visibility), constness = ConstnessSpace(f.header.constness), unsafety = UnsafetySpace(f.header.unsafety), asyncness = AsyncSpace(f.header.asyncness), abi = AbiSpace(f.header.abi), name = it.name.as_ref().unwrap(), generics = f.generics, where_clause = WhereClause { gens: &f.generics, indent: 0, end_newline: true }, decl = Method { decl: &f.decl, name_len, indent: 0, })?; document(w, cx, it) } fn render_implementor(cx: &Context, implementor: &Impl, w: &mut fmt::Formatter, implementor_dups: &FxHashMap<&str, (DefId, bool)>) -> fmt::Result { write!(w, "<li><table class='table-display'><tbody><tr><td><code>")?; // If there's already another implementor that has the same abbridged name, use the // full path, for example in `std::iter::ExactSizeIterator` let use_absolute = match implementor.inner_impl().for_ { clean::ResolvedPath { ref path, is_generic: false, .. } | clean::BorrowedRef { type_: box clean::ResolvedPath { ref path, is_generic: false, .. }, .. } => implementor_dups[path.last_name()].1, _ => false, }; fmt_impl_for_trait_page(&implementor.inner_impl(), w, use_absolute)?; for it in &implementor.inner_impl().items { if let clean::TypedefItem(ref tydef, _) = it.inner { write!(w, "<span class=\"where fmt-newline\"> ")?; assoc_type(w, it, &vec![], Some(&tydef.type_), AssocItemLink::Anchor(None))?; write!(w, ";</span>")?; } } write!(w, "</code><td>")?; if let Some(l) = (Item { cx, item: &implementor.impl_item }).src_href() { write!(w, "<div class='out-of-band'>")?; write!(w, "<a class='srclink' href='{}' title='{}'>[src]</a>", l, "goto source code")?; write!(w, "</div>")?; } writeln!(w, "</td></tr></tbody></table></li>")?; Ok(()) } fn render_impls(cx: &Context, w: &mut fmt::Formatter, traits: &[&&Impl], containing_item: &clean::Item) -> fmt::Result { for i in traits { let did = i.trait_did().unwrap(); let assoc_link = AssocItemLink::GotoSource(did, &i.inner_impl().provided_trait_methods); render_impl(w, cx, i, assoc_link, RenderMode::Normal, containing_item.stable_since(), true)?; } Ok(()) } fn item_trait(w: &mut fmt::Formatter, cx: &Context, it: &clean::Item, t: &clean::Trait) -> fmt::Result { let mut bounds = String::new(); let mut bounds_plain = String::new(); if !t.bounds.is_empty() { if !bounds.is_empty() { bounds.push(' '); bounds_plain.push(' '); } bounds.push_str(": "); bounds_plain.push_str(": "); for (i, p) in t.bounds.iter().enumerate() { if i > 0 { bounds.push_str(" + "); bounds_plain.push_str(" + "); } bounds.push_str(&format!("{}", *p)); bounds_plain.push_str(&format!("{:#}", *p)); } } let types = t.items.iter().filter(|m| m.is_associated_type()).collect::<Vec<_>>(); let consts = t.items.iter().filter(|m| m.is_associated_const()).collect::<Vec<_>>(); let required = t.items.iter().filter(|m| m.is_ty_method()).collect::<Vec<_>>(); let provided = t.items.iter().filter(|m| m.is_method()).collect::<Vec<_>>(); // Output the trait definition wrap_into_docblock(w, |w| { write!(w, "<pre class='rust trait'>")?; render_attributes(w, it)?; write!(w, "{}{}{}trait {}{}{}", VisSpace(&it.visibility), UnsafetySpace(t.unsafety), if t.is_auto { "auto " } else { "" }, it.name.as_ref().unwrap(), t.generics, bounds)?; if !t.generics.where_predicates.is_empty() { write!(w, "{}", WhereClause { gens: &t.generics, indent: 0, end_newline: true })?; } else { write!(w, " ")?; } if t.items.is_empty() { write!(w, "{{ }}")?; } else { // FIXME: we should be using a derived_id for the Anchors here write!(w, "{{\n")?; for t in &types { write!(w, " ")?; render_assoc_item(w, t, AssocItemLink::Anchor(None), ItemType::Trait)?; write!(w, ";\n")?; } if !types.is_empty() && !consts.is_empty() { w.write_str("\n")?; } for t in &consts { write!(w, " ")?; render_assoc_item(w, t, AssocItemLink::Anchor(None), ItemType::Trait)?; write!(w, ";\n")?; } if !consts.is_empty() && !required.is_empty() { w.write_str("\n")?; } for (pos, m) in required.iter().enumerate() { write!(w, " ")?; render_assoc_item(w, m, AssocItemLink::Anchor(None), ItemType::Trait)?; write!(w, ";\n")?; if pos < required.len() - 1 { write!(w, "<div class='item-spacer'></div>")?; } } if !required.is_empty() && !provided.is_empty() { w.write_str("\n")?; } for (pos, m) in provided.iter().enumerate() { write!(w, " ")?; render_assoc_item(w, m, AssocItemLink::Anchor(None), ItemType::Trait)?; match m.inner { clean::MethodItem(ref inner) if !inner.generics.where_predicates.is_empty() => { write!(w, ",\n {{ ... }}\n")?; }, _ => { write!(w, " {{ ... }}\n")?; }, } if pos < provided.len() - 1 { write!(w, "<div class='item-spacer'></div>")?; } } write!(w, "}}")?; } write!(w, "</pre>") })?; // Trait documentation document(w, cx, it)?; fn trait_item(w: &mut fmt::Formatter, cx: &Context, m: &clean::Item, t: &clean::Item) -> fmt::Result { let name = m.name.as_ref().unwrap(); let item_type = m.type_(); let id = derive_id(format!("{}.{}", item_type, name)); let ns_id = derive_id(format!("{}.{}", name, item_type.name_space())); write!(w, "{extra}<h3 id='{id}' class='method'>\ <span id='{ns_id}' class='invisible'><code>", extra = render_spotlight_traits(m)?, id = id, ns_id = ns_id)?; render_assoc_item(w, m, AssocItemLink::Anchor(Some(&id)), ItemType::Impl)?; write!(w, "</code>")?; render_stability_since(w, m, t)?; write!(w, "</span></h3>")?; document(w, cx, m)?; Ok(()) } if !types.is_empty() { write!(w, " <h2 id='associated-types' class='small-section-header'> Associated Types<a href='#associated-types' class='anchor'></a> </h2> <div class='methods'> ")?; for t in &types { trait_item(w, cx, *t, it)?; } write!(w, "</div>")?; } if !consts.is_empty() { write!(w, " <h2 id='associated-const' class='small-section-header'> Associated Constants<a href='#associated-const' class='anchor'></a> </h2> <div class='methods'> ")?; for t in &consts { trait_item(w, cx, *t, it)?; } write!(w, "</div>")?; } // Output the documentation for each function individually if !required.is_empty() { write!(w, " <h2 id='required-methods' class='small-section-header'> Required Methods<a href='#required-methods' class='anchor'></a> </h2> <div class='methods'> ")?; for m in &required { trait_item(w, cx, *m, it)?; } write!(w, "</div>")?; } if !provided.is_empty() { write!(w, " <h2 id='provided-methods' class='small-section-header'> Provided Methods<a href='#provided-methods' class='anchor'></a> </h2> <div class='methods'> ")?; for m in &provided { trait_item(w, cx, *m, it)?; } write!(w, "</div>")?; } // If there are methods directly on this trait object, render them here. render_assoc_items(w, cx, it, it.def_id, AssocItemRender::All)?; let cache = cache(); let impl_header = " <h2 id='implementors' class='small-section-header'> Implementors<a href='#implementors' class='anchor'></a> </h2> <ul class='item-list' id='implementors-list'> "; let synthetic_impl_header = " <h2 id='synthetic-implementors' class='small-section-header'> Auto implementors<a href='#synthetic-implementors' class='anchor'></a> </h2> <ul class='item-list' id='synthetic-implementors-list'> "; let mut synthetic_types = Vec::new(); if let Some(implementors) = cache.implementors.get(&it.def_id) { // The DefId is for the first Type found with that name. The bool is // if any Types with the same name but different DefId have been found. let mut implementor_dups: FxHashMap<&str, (DefId, bool)> = FxHashMap(); for implementor in implementors { match implementor.inner_impl().for_ { clean::ResolvedPath { ref path, did, is_generic: false, .. } | clean::BorrowedRef { type_: box clean::ResolvedPath { ref path, did, is_generic: false, .. }, .. } => { let &mut (prev_did, ref mut has_duplicates) = implementor_dups.entry(path.last_name()).or_insert((did, false)); if prev_did != did { *has_duplicates = true; } } _ => {} } } let (local, foreign) = implementors.iter() .partition::<Vec<_>, _>(|i| i.inner_impl().for_.def_id() .map_or(true, |d| cache.paths.contains_key(&d))); let (synthetic, concrete) = local.iter() .partition::<Vec<_>, _>(|i| i.inner_impl().synthetic); if !foreign.is_empty() { write!(w, " <h2 id='foreign-impls' class='small-section-header'> Implementations on Foreign Types<a href='#foreign-impls' class='anchor'></a> </h2> ")?; for implementor in foreign { let assoc_link = AssocItemLink::GotoSource( implementor.impl_item.def_id, &implementor.inner_impl().provided_trait_methods ); render_impl(w, cx, &implementor, assoc_link, RenderMode::Normal, implementor.impl_item.stable_since(), false)?; } } write!(w, "{}", impl_header)?; for implementor in concrete { render_implementor(cx, implementor, w, &implementor_dups)?; } write!(w, "</ul>")?; if t.auto { write!(w, "{}", synthetic_impl_header)?; for implementor in synthetic { synthetic_types.extend( collect_paths_for_type(implementor.inner_impl().for_.clone()) ); render_implementor(cx, implementor, w, &implementor_dups)?; } write!(w, "</ul>")?; } } else { // even without any implementations to write in, we still want the heading and list, so the // implementors javascript file pulled in below has somewhere to write the impls into write!(w, "{}", impl_header)?; write!(w, "</ul>")?; if t.auto { write!(w, "{}", synthetic_impl_header)?; write!(w, "</ul>")?; } } write!(w, r#"<script type="text/javascript">window.inlined_types=new Set({});</script>"#, as_json(&synthetic_types))?; write!(w, r#"<script type="text/javascript" async src="{root_path}/implementors/{path}/{ty}.{name}.js"> </script>"#, root_path = vec![".."; cx.current.len()].join("/"), path = if it.def_id.is_local() { cx.current.join("/") } else { let (ref path, _) = cache.external_paths[&it.def_id]; path[..path.len() - 1].join("/") }, ty = it.type_().css_class(), name = *it.name.as_ref().unwrap())?; Ok(()) } fn naive_assoc_href(it: &clean::Item, link: AssocItemLink) -> String { use html::item_type::ItemType::*; let name = it.name.as_ref().unwrap(); let ty = match it.type_() { Typedef | AssociatedType => AssociatedType, s@_ => s, }; let anchor = format!("#{}.{}", ty, name); match link { AssocItemLink::Anchor(Some(ref id)) => format!("#{}", id), AssocItemLink::Anchor(None) => anchor, AssocItemLink::GotoSource(did, _) => { href(did).map(|p| format!("{}{}", p.0, anchor)).unwrap_or(anchor) } } } fn assoc_const(w: &mut fmt::Formatter, it: &clean::Item, ty: &clean::Type, _default: Option<&String>, link: AssocItemLink) -> fmt::Result { write!(w, "{}const <a href='{}' class=\"constant\"><b>{}</b></a>: {}", VisSpace(&it.visibility), naive_assoc_href(it, link), it.name.as_ref().unwrap(), ty)?; Ok(()) } fn assoc_type<W: fmt::Write>(w: &mut W, it: &clean::Item, bounds: &Vec<clean::GenericBound>, default: Option<&clean::Type>, link: AssocItemLink) -> fmt::Result { write!(w, "type <a href='{}' class=\"type\">{}</a>", naive_assoc_href(it, link), it.name.as_ref().unwrap())?; if !bounds.is_empty() { write!(w, ": {}", GenericBounds(bounds))? } if let Some(default) = default { write!(w, " = {}", default)?; } Ok(()) } fn render_stability_since_raw<'a>(w: &mut fmt::Formatter, ver: Option<&'a str>, containing_ver: Option<&'a str>) -> fmt::Result { if let Some(v) = ver { if containing_ver != ver && v.len() > 0 { write!(w, "<div class='since' title='Stable since Rust version {0}'>{0}</div>", v)? } } Ok(()) } fn render_stability_since(w: &mut fmt::Formatter, item: &clean::Item, containing_item: &clean::Item) -> fmt::Result { render_stability_since_raw(w, item.stable_since(), containing_item.stable_since()) } fn render_assoc_item(w: &mut fmt::Formatter, item: &clean::Item, link: AssocItemLink, parent: ItemType) -> fmt::Result { fn method(w: &mut fmt::Formatter, meth: &clean::Item, header: hir::FnHeader, g: &clean::Generics, d: &clean::FnDecl, link: AssocItemLink, parent: ItemType) -> fmt::Result { let name = meth.name.as_ref().unwrap(); let anchor = format!("#{}.{}", meth.type_(), name); let href = match link { AssocItemLink::Anchor(Some(ref id)) => format!("#{}", id), AssocItemLink::Anchor(None) => anchor, AssocItemLink::GotoSource(did, provided_methods) => { // We're creating a link from an impl-item to the corresponding // trait-item and need to map the anchored type accordingly. let ty = if provided_methods.contains(name) { ItemType::Method } else { ItemType::TyMethod }; href(did).map(|p| format!("{}#{}.{}", p.0, ty, name)).unwrap_or(anchor) } }; let mut head_len = format!("{}{}{}{}{:#}fn {}{:#}", VisSpace(&meth.visibility), ConstnessSpace(header.constness), UnsafetySpace(header.unsafety), AsyncSpace(header.asyncness), AbiSpace(header.abi), name, *g).len(); let (indent, end_newline) = if parent == ItemType::Trait { head_len += 4; (4, false) } else { (0, true) }; render_attributes(w, meth)?; write!(w, "{}{}{}{}{}fn <a href='{href}' class='fnname'>{name}</a>\ {generics}{decl}{where_clause}", VisSpace(&meth.visibility), ConstnessSpace(header.constness), UnsafetySpace(header.unsafety), AsyncSpace(header.asyncness), AbiSpace(header.abi), href = href, name = name, generics = *g, decl = Method { decl: d, name_len: head_len, indent, }, where_clause = WhereClause { gens: g, indent, end_newline, }) } match item.inner { clean::StrippedItem(..) => Ok(()), clean::TyMethodItem(ref m) => { method(w, item, m.header, &m.generics, &m.decl, link, parent) } clean::MethodItem(ref m) => { method(w, item, m.header, &m.generics, &m.decl, link, parent) } clean::AssociatedConstItem(ref ty, ref default) => { assoc_const(w, item, ty, default.as_ref(), link) } clean::AssociatedTypeItem(ref bounds, ref default) => { assoc_type(w, item, bounds, default.as_ref(), link) } _ => panic!("render_assoc_item called on non-associated-item") } } fn item_struct(w: &mut fmt::Formatter, cx: &Context, it: &clean::Item, s: &clean::Struct) -> fmt::Result { wrap_into_docblock(w, |w| { write!(w, "<pre class='rust struct'>")?; render_attributes(w, it)?; render_struct(w, it, Some(&s.generics), s.struct_type, &s.fields, "", true)?; write!(w, "</pre>") })?; document(w, cx, it)?; let mut fields = s.fields.iter().filter_map(|f| { match f.inner { clean::StructFieldItem(ref ty) => Some((f, ty)), _ => None, } }).peekable(); if let doctree::Plain = s.struct_type { if fields.peek().is_some() { write!(w, "<h2 id='fields' class='fields small-section-header'> Fields<a href='#fields' class='anchor'></a></h2>")?; for (field, ty) in fields { let id = derive_id(format!("{}.{}", ItemType::StructField, field.name.as_ref().unwrap())); let ns_id = derive_id(format!("{}.{}", field.name.as_ref().unwrap(), ItemType::StructField.name_space())); write!(w, "<span id=\"{id}\" class=\"{item_type} small-section-header\"> <a href=\"#{id}\" class=\"anchor field\"></a> <span id=\"{ns_id}\" class='invisible'> <code>{name}: {ty}</code> </span></span>", item_type = ItemType::StructField, id = id, ns_id = ns_id, name = field.name.as_ref().unwrap(), ty = ty)?; if let Some(stability_class) = field.stability_class() { write!(w, "<span class='stab {stab}'></span>", stab = stability_class)?; } document(w, cx, field)?; } } } render_assoc_items(w, cx, it, it.def_id, AssocItemRender::All) } fn item_union(w: &mut fmt::Formatter, cx: &Context, it: &clean::Item, s: &clean::Union) -> fmt::Result { wrap_into_docblock(w, |w| { write!(w, "<pre class='rust union'>")?; render_attributes(w, it)?; render_union(w, it, Some(&s.generics), &s.fields, "", true)?; write!(w, "</pre>") })?; document(w, cx, it)?; let mut fields = s.fields.iter().filter_map(|f| { match f.inner { clean::StructFieldItem(ref ty) => Some((f, ty)), _ => None, } }).peekable(); if fields.peek().is_some() { write!(w, "<h2 id='fields' class='fields small-section-header'> Fields<a href='#fields' class='anchor'></a></h2>")?; for (field, ty) in fields { let name = field.name.as_ref().expect("union field name"); let id = format!("{}.{}", ItemType::StructField, name); write!(w, "<span id=\"{id}\" class=\"{shortty} small-section-header\">\ <a href=\"#{id}\" class=\"anchor field\"></a>\ <span class='invisible'><code>{name}: {ty}</code></span>\ </span>", id = id, name = name, shortty = ItemType::StructField, ty = ty)?; if let Some(stability_class) = field.stability_class() { write!(w, "<span class='stab {stab}'></span>", stab = stability_class)?; } document(w, cx, field)?; } } render_assoc_items(w, cx, it, it.def_id, AssocItemRender::All) } fn item_enum(w: &mut fmt::Formatter, cx: &Context, it: &clean::Item, e: &clean::Enum) -> fmt::Result { wrap_into_docblock(w, |w| { write!(w, "<pre class='rust enum'>")?; render_attributes(w, it)?; write!(w, "{}enum {}{}{}", VisSpace(&it.visibility), it.name.as_ref().unwrap(), e.generics, WhereClause { gens: &e.generics, indent: 0, end_newline: true })?; if e.variants.is_empty() && !e.variants_stripped { write!(w, " {{}}")?; } else { write!(w, " {{\n")?; for v in &e.variants { write!(w, " ")?; let name = v.name.as_ref().unwrap(); match v.inner { clean::VariantItem(ref var) => { match var.kind { clean::VariantKind::CLike => write!(w, "{}", name)?, clean::VariantKind::Tuple(ref tys) => { write!(w, "{}(", name)?; for (i, ty) in tys.iter().enumerate() { if i > 0 { write!(w, ",&nbsp;")? } write!(w, "{}", *ty)?; } write!(w, ")")?; } clean::VariantKind::Struct(ref s) => { render_struct(w, v, None, s.struct_type, &s.fields, " ", false)?; } } } _ => unreachable!() } write!(w, ",\n")?; } if e.variants_stripped { write!(w, " // some variants omitted\n")?; } write!(w, "}}")?; } write!(w, "</pre>") })?; document(w, cx, it)?; if !e.variants.is_empty() { write!(w, "<h2 id='variants' class='variants small-section-header'> Variants<a href='#variants' class='anchor'></a></h2>\n")?; for variant in &e.variants { let id = derive_id(format!("{}.{}", ItemType::Variant, variant.name.as_ref().unwrap())); let ns_id = derive_id(format!("{}.{}", variant.name.as_ref().unwrap(), ItemType::Variant.name_space())); write!(w, "<span id=\"{id}\" class=\"variant small-section-header\">\ <a href=\"#{id}\" class=\"anchor field\"></a>\ <span id='{ns_id}' class='invisible'><code>{name}", id = id, ns_id = ns_id, name = variant.name.as_ref().unwrap())?; if let clean::VariantItem(ref var) = variant.inner { if let clean::VariantKind::Tuple(ref tys) = var.kind { write!(w, "(")?; for (i, ty) in tys.iter().enumerate() { if i > 0 { write!(w, ",&nbsp;")?; } write!(w, "{}", *ty)?; } write!(w, ")")?; } } write!(w, "</code></span></span>")?; document(w, cx, variant)?; use clean::{Variant, VariantKind}; if let clean::VariantItem(Variant { kind: VariantKind::Struct(ref s) }) = variant.inner { let variant_id = derive_id(format!("{}.{}.fields", ItemType::Variant, variant.name.as_ref().unwrap())); write!(w, "<span class='docblock autohide sub-variant' id='{id}'>", id = variant_id)?; write!(w, "<h3 class='fields'>Fields of <code>{name}</code></h3>\n <table>", name = variant.name.as_ref().unwrap())?; for field in &s.fields { use clean::StructFieldItem; if let StructFieldItem(ref ty) = field.inner { let id = derive_id(format!("variant.{}.field.{}", variant.name.as_ref().unwrap(), field.name.as_ref().unwrap())); let ns_id = derive_id(format!("{}.{}.{}.{}", variant.name.as_ref().unwrap(), ItemType::Variant.name_space(), field.name.as_ref().unwrap(), ItemType::StructField.name_space())); write!(w, "<tr><td \ id='{id}'>\ <span id='{ns_id}' class='invisible'>\ <code>{f}:&nbsp;{t}</code></span></td><td>", id = id, ns_id = ns_id, f = field.name.as_ref().unwrap(), t = *ty)?; document(w, cx, field)?; write!(w, "</td></tr>")?; } } write!(w, "</table></span>")?; } render_stability_since(w, variant, it)?; } } render_assoc_items(w, cx, it, it.def_id, AssocItemRender::All)?; Ok(()) } fn render_attribute(attr: &ast::MetaItem) -> Option<String> { let name = attr.name(); if attr.is_word() { Some(format!("{}", name)) } else if let Some(v) = attr.value_str() { Some(format!("{} = {:?}", name, v.as_str())) } else if let Some(values) = attr.meta_item_list() { let display: Vec<_> = values.iter().filter_map(|attr| { attr.meta_item().and_then(|mi| render_attribute(mi)) }).collect(); if display.len() > 0 { Some(format!("{}({})", name, display.join(", "))) } else { None } } else { None } } const ATTRIBUTE_WHITELIST: &'static [&'static str] = &[ "export_name", "lang", "link_section", "must_use", "no_mangle", "repr", "unsafe_destructor_blind_to_params" ]; fn render_attributes(w: &mut fmt::Formatter, it: &clean::Item) -> fmt::Result { let mut attrs = String::new(); for attr in &it.attrs.other_attrs { let name = attr.name(); if !ATTRIBUTE_WHITELIST.contains(&&*name.as_str()) { continue; } if let Some(s) = render_attribute(&attr.meta().unwrap()) { attrs.push_str(&format!("#[{}]\n", s)); } } if attrs.len() > 0 { write!(w, "<div class=\"docblock attributes\">{}</div>", &attrs)?; } Ok(()) } fn render_struct(w: &mut fmt::Formatter, it: &clean::Item, g: Option<&clean::Generics>, ty: doctree::StructType, fields: &[clean::Item], tab: &str, structhead: bool) -> fmt::Result { write!(w, "{}{}{}", VisSpace(&it.visibility), if structhead {"struct "} else {""}, it.name.as_ref().unwrap())?; if let Some(g) = g { write!(w, "{}", g)? } match ty { doctree::Plain => { if let Some(g) = g { write!(w, "{}", WhereClause { gens: g, indent: 0, end_newline: true })? } let mut has_visible_fields = false; write!(w, " {{")?; for field in fields { if let clean::StructFieldItem(ref ty) = field.inner { write!(w, "\n{} {}{}: {},", tab, VisSpace(&field.visibility), field.name.as_ref().unwrap(), *ty)?; has_visible_fields = true; } } if has_visible_fields { if it.has_stripped_fields().unwrap() { write!(w, "\n{} // some fields omitted", tab)?; } write!(w, "\n{}", tab)?; } else if it.has_stripped_fields().unwrap() { // If there are no visible fields we can just display // `{ /* fields omitted */ }` to save space. write!(w, " /* fields omitted */ ")?; } write!(w, "}}")?; } doctree::Tuple => { write!(w, "(")?; for (i, field) in fields.iter().enumerate() { if i > 0 { write!(w, ", ")?; } match field.inner { clean::StrippedItem(box clean::StructFieldItem(..)) => { write!(w, "_")? } clean::StructFieldItem(ref ty) => { write!(w, "{}{}", VisSpace(&field.visibility), *ty)? } _ => unreachable!() } } write!(w, ")")?; if let Some(g) = g { write!(w, "{}", WhereClause { gens: g, indent: 0, end_newline: false })? } write!(w, ";")?; } doctree::Unit => { // Needed for PhantomData. if let Some(g) = g { write!(w, "{}", WhereClause { gens: g, indent: 0, end_newline: false })? } write!(w, ";")?; } } Ok(()) } fn render_union(w: &mut fmt::Formatter, it: &clean::Item, g: Option<&clean::Generics>, fields: &[clean::Item], tab: &str, structhead: bool) -> fmt::Result { write!(w, "{}{}{}", VisSpace(&it.visibility), if structhead {"union "} else {""}, it.name.as_ref().unwrap())?; if let Some(g) = g { write!(w, "{}", g)?; write!(w, "{}", WhereClause { gens: g, indent: 0, end_newline: true })?; } write!(w, " {{\n{}", tab)?; for field in fields { if let clean::StructFieldItem(ref ty) = field.inner { write!(w, " {}{}: {},\n{}", VisSpace(&field.visibility), field.name.as_ref().unwrap(), *ty, tab)?; } } if it.has_stripped_fields().unwrap() { write!(w, " // some fields omitted\n{}", tab)?; } write!(w, "}}")?; Ok(()) } #[derive(Copy, Clone)] enum AssocItemLink<'a> { Anchor(Option<&'a str>), GotoSource(DefId, &'a FxHashSet<String>), } impl<'a> AssocItemLink<'a> { fn anchor(&self, id: &'a String) -> Self { match *self { AssocItemLink::Anchor(_) => { AssocItemLink::Anchor(Some(&id)) }, ref other => *other, } } } enum AssocItemRender<'a> { All, DerefFor { trait_: &'a clean::Type, type_: &'a clean::Type, deref_mut_: bool } } #[derive(Copy, Clone, PartialEq)] enum RenderMode { Normal, ForDeref { mut_: bool }, } fn render_assoc_items(w: &mut fmt::Formatter, cx: &Context, containing_item: &clean::Item, it: DefId, what: AssocItemRender) -> fmt::Result { let c = cache(); let v = match c.impls.get(&it) { Some(v) => v, None => return Ok(()), }; let (non_trait, traits): (Vec<_>, _) = v.iter().partition(|i| { i.inner_impl().trait_.is_none() }); if !non_trait.is_empty() { let render_mode = match what { AssocItemRender::All => { write!(w, " <h2 id='methods' class='small-section-header'> Methods<a href='#methods' class='anchor'></a> </h2> ")?; RenderMode::Normal } AssocItemRender::DerefFor { trait_, type_, deref_mut_ } => { write!(w, " <h2 id='deref-methods' class='small-section-header'> Methods from {}&lt;Target = {}&gt;<a href='#deref-methods' class='anchor'></a> </h2> ", trait_, type_)?; RenderMode::ForDeref { mut_: deref_mut_ } } }; for i in &non_trait { render_impl(w, cx, i, AssocItemLink::Anchor(None), render_mode, containing_item.stable_since(), true)?; } } if let AssocItemRender::DerefFor { .. } = what { return Ok(()); } if !traits.is_empty() { let deref_impl = traits.iter().find(|t| { t.inner_impl().trait_.def_id() == c.deref_trait_did }); if let Some(impl_) = deref_impl { let has_deref_mut = traits.iter().find(|t| { t.inner_impl().trait_.def_id() == c.deref_mut_trait_did }).is_some(); render_deref_methods(w, cx, impl_, containing_item, has_deref_mut)?; } let (synthetic, concrete) = traits .iter() .partition::<Vec<_>, _>(|t| t.inner_impl().synthetic); struct RendererStruct<'a, 'b, 'c>(&'a Context, Vec<&'b &'b Impl>, &'c clean::Item); impl<'a, 'b, 'c> fmt::Display for RendererStruct<'a, 'b, 'c> { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { render_impls(self.0, fmt, &self.1, self.2) } } let impls = format!("{}", RendererStruct(cx, concrete, containing_item)); if !impls.is_empty() { write!(w, " <h2 id='implementations' class='small-section-header'> Trait Implementations<a href='#implementations' class='anchor'></a> </h2> <div id='implementations-list'>{}</div>", impls)?; } if !synthetic.is_empty() { write!(w, " <h2 id='synthetic-implementations' class='small-section-header'> Auto Trait Implementations<a href='#synthetic-implementations' class='anchor'></a> </h2> <div id='synthetic-implementations-list'> ")?; render_impls(cx, w, &synthetic, containing_item)?; write!(w, "</div>")?; } } Ok(()) } fn render_deref_methods(w: &mut fmt::Formatter, cx: &Context, impl_: &Impl, container_item: &clean::Item, deref_mut: bool) -> fmt::Result { let deref_type = impl_.inner_impl().trait_.as_ref().unwrap(); let target = impl_.inner_impl().items.iter().filter_map(|item| { match item.inner { clean::TypedefItem(ref t, true) => Some(&t.type_), _ => None, } }).next().expect("Expected associated type binding"); let what = AssocItemRender::DerefFor { trait_: deref_type, type_: target, deref_mut_: deref_mut }; if let Some(did) = target.def_id() { render_assoc_items(w, cx, container_item, did, what) } else { if let Some(prim) = target.primitive_type() { if let Some(&did) = cache().primitive_locations.get(&prim) { render_assoc_items(w, cx, container_item, did, what)?; } } Ok(()) } } fn should_render_item(item: &clean::Item, deref_mut_: bool) -> bool { let self_type_opt = match item.inner { clean::MethodItem(ref method) => method.decl.self_type(), clean::TyMethodItem(ref method) => method.decl.self_type(), _ => None }; if let Some(self_ty) = self_type_opt { let (by_mut_ref, by_box, by_value) = match self_ty { SelfTy::SelfBorrowed(_, mutability) | SelfTy::SelfExplicit(clean::BorrowedRef { mutability, .. }) => { (mutability == Mutability::Mutable, false, false) }, SelfTy::SelfExplicit(clean::ResolvedPath { did, .. }) => { (false, Some(did) == cache().owned_box_did, false) }, SelfTy::SelfValue => (false, false, true), _ => (false, false, false), }; (deref_mut_ || !by_mut_ref) && !by_box && !by_value } else { false } } fn render_spotlight_traits(item: &clean::Item) -> Result<String, fmt::Error> { let mut out = String::new(); match item.inner { clean::FunctionItem(clean::Function { ref decl, .. }) | clean::TyMethodItem(clean::TyMethod { ref decl, .. }) | clean::MethodItem(clean::Method { ref decl, .. }) | clean::ForeignFunctionItem(clean::Function { ref decl, .. }) => { out = spotlight_decl(decl)?; } _ => {} } Ok(out) } fn spotlight_decl(decl: &clean::FnDecl) -> Result<String, fmt::Error> { let mut out = String::new(); let mut trait_ = String::new(); if let Some(did) = decl.output.def_id() { let c = cache(); if let Some(impls) = c.impls.get(&did) { for i in impls { let impl_ = i.inner_impl(); if impl_.trait_.def_id().map_or(false, |d| c.traits[&d].is_spotlight) { if out.is_empty() { out.push_str( &format!("<h3 class=\"important\">Important traits for {}</h3>\ <code class=\"content\">", impl_.for_)); trait_.push_str(&format!("{}", impl_.for_)); } //use the "where" class here to make it small out.push_str(&format!("<span class=\"where fmt-newline\">{}</span>", impl_)); let t_did = impl_.trait_.def_id().unwrap(); for it in &impl_.items { if let clean::TypedefItem(ref tydef, _) = it.inner { out.push_str("<span class=\"where fmt-newline\"> "); assoc_type(&mut out, it, &vec![], Some(&tydef.type_), AssocItemLink::GotoSource(t_did, &FxHashSet()))?; out.push_str(";</span>"); } } } } } } if !out.is_empty() { out.insert_str(0, &format!("<div class=\"important-traits\"><div class='tooltip'>ⓘ\ <span class='tooltiptext'>Important traits for {}</span></div>\ <div class=\"content hidden\">", trait_)); out.push_str("</code></div></div>"); } Ok(out) } fn render_impl(w: &mut fmt::Formatter, cx: &Context, i: &Impl, link: AssocItemLink, render_mode: RenderMode, outer_version: Option<&str>, show_def_docs: bool) -> fmt::Result { if render_mode == RenderMode::Normal { let id = derive_id(match i.inner_impl().trait_ { Some(ref t) => format!("impl-{}", small_url_encode(&format!("{:#}", t))), None => "impl".to_string(), }); write!(w, "<h3 id='{}' class='impl'><span class='in-band'><table class='table-display'>\ <tbody><tr><td><code>{}</code>", id, i.inner_impl())?; write!(w, "<a href='#{}' class='anchor'></a>", id)?; write!(w, "</span></td><td><span class='out-of-band'>")?; let since = i.impl_item.stability.as_ref().map(|s| &s.since[..]); if let Some(l) = (Item { item: &i.impl_item, cx: cx }).src_href() { write!(w, "<div class='ghost'></div>")?; render_stability_since_raw(w, since, outer_version)?; write!(w, "<a class='srclink' href='{}' title='{}'>[src]</a>", l, "goto source code")?; } else { render_stability_since_raw(w, since, outer_version)?; } write!(w, "</span></td></tr></tbody></table></h3>")?; if let Some(ref dox) = cx.shared.maybe_collapsed_doc_value(&i.impl_item) { write!(w, "<div class='docblock'>{}</div>", Markdown(&*dox, &i.impl_item.links()))?; } } fn doc_impl_item(w: &mut fmt::Formatter, cx: &Context, item: &clean::Item, link: AssocItemLink, render_mode: RenderMode, is_default_item: bool, outer_version: Option<&str>, trait_: Option<&clean::Trait>, show_def_docs: bool) -> fmt::Result { let item_type = item.type_(); let name = item.name.as_ref().unwrap(); let render_method_item: bool = match render_mode { RenderMode::Normal => true, RenderMode::ForDeref { mut_: deref_mut_ } => should_render_item(&item, deref_mut_), }; match item.inner { clean::MethodItem(clean::Method { ref decl, .. }) | clean::TyMethodItem(clean::TyMethod{ ref decl, .. }) => { // Only render when the method is not static or we allow static methods if render_method_item { let id = derive_id(format!("{}.{}", item_type, name)); let ns_id = derive_id(format!("{}.{}", name, item_type.name_space())); write!(w, "<h4 id='{}' class=\"{}\">", id, item_type)?; write!(w, "{}", spotlight_decl(decl)?)?; write!(w, "<span id='{}' class='invisible'>", ns_id)?; write!(w, "<table class='table-display'><tbody><tr><td><code>")?; render_assoc_item(w, item, link.anchor(&id), ItemType::Impl)?; write!(w, "</code>")?; if let Some(l) = (Item { cx, item }).src_href() { write!(w, "</span></td><td><span class='out-of-band'>")?; write!(w, "<div class='ghost'></div>")?; render_stability_since_raw(w, item.stable_since(), outer_version)?; write!(w, "<a class='srclink' href='{}' title='{}'>[src]</a>", l, "goto source code")?; } else { write!(w, "</td><td>")?; render_stability_since_raw(w, item.stable_since(), outer_version)?; } write!(w, "</td></tr></tbody></table></span></h4>")?; } } clean::TypedefItem(ref tydef, _) => { let id = derive_id(format!("{}.{}", ItemType::AssociatedType, name)); let ns_id = derive_id(format!("{}.{}", name, item_type.name_space())); write!(w, "<h4 id='{}' class=\"{}\">", id, item_type)?; write!(w, "<span id='{}' class='invisible'><code>", ns_id)?; assoc_type(w, item, &Vec::new(), Some(&tydef.type_), link.anchor(&id))?; write!(w, "</code></span></h4>\n")?; } clean::AssociatedConstItem(ref ty, ref default) => { let id = derive_id(format!("{}.{}", item_type, name)); let ns_id = derive_id(format!("{}.{}", name, item_type.name_space())); write!(w, "<h4 id='{}' class=\"{}\">", id, item_type)?; write!(w, "<span id='{}' class='invisible'><code>", ns_id)?; assoc_const(w, item, ty, default.as_ref(), link.anchor(&id))?; write!(w, "</code></span></h4>\n")?; } clean::AssociatedTypeItem(ref bounds, ref default) => { let id = derive_id(format!("{}.{}", item_type, name)); let ns_id = derive_id(format!("{}.{}", name, item_type.name_space())); write!(w, "<h4 id='{}' class=\"{}\">", id, item_type)?; write!(w, "<span id='{}' class='invisible'><code>", ns_id)?; assoc_type(w, item, bounds, default.as_ref(), link.anchor(&id))?; write!(w, "</code></span></h4>\n")?; } clean::StrippedItem(..) => return Ok(()), _ => panic!("can't make docs for trait item with name {:?}", item.name) } if render_method_item || render_mode == RenderMode::Normal { let prefix = render_assoc_const_value(item); if !is_default_item { if let Some(t) = trait_ { // The trait item may have been stripped so we might not // find any documentation or stability for it. if let Some(it) = t.items.iter().find(|i| i.name == item.name) { // We need the stability of the item from the trait // because impls can't have a stability. document_stability(w, cx, it)?; if item.doc_value().is_some() { document_full(w, item, cx, &prefix)?; } else if show_def_docs { // In case the item isn't documented, // provide short documentation from the trait. document_short(w, it, link, &prefix)?; } } } else { document_stability(w, cx, item)?; if show_def_docs { document_full(w, item, cx, &prefix)?; } } } else { document_stability(w, cx, item)?; if show_def_docs { document_short(w, item, link, &prefix)?; } } } Ok(()) } let traits = &cache().traits; let trait_ = i.trait_did().map(|did| &traits[&did]); if !show_def_docs { write!(w, "<span class='docblock autohide'>")?; } write!(w, "<div class='impl-items'>")?; for trait_item in &i.inner_impl().items { doc_impl_item(w, cx, trait_item, link, render_mode, false, outer_version, trait_, show_def_docs)?; } fn render_default_items(w: &mut fmt::Formatter, cx: &Context, t: &clean::Trait, i: &clean::Impl, render_mode: RenderMode, outer_version: Option<&str>, show_def_docs: bool) -> fmt::Result { for trait_item in &t.items { let n = trait_item.name.clone(); if i.items.iter().find(|m| m.name == n).is_some() { continue; } let did = i.trait_.as_ref().unwrap().def_id().unwrap(); let assoc_link = AssocItemLink::GotoSource(did, &i.provided_trait_methods); doc_impl_item(w, cx, trait_item, assoc_link, render_mode, true, outer_version, None, show_def_docs)?; } Ok(()) } // If we've implemented a trait, then also emit documentation for all // default items which weren't overridden in the implementation block. if let Some(t) = trait_ { render_default_items(w, cx, t, &i.inner_impl(), render_mode, outer_version, show_def_docs)?; } write!(w, "</div>")?; if !show_def_docs { write!(w, "</span>")?; } Ok(()) } fn item_typedef(w: &mut fmt::Formatter, cx: &Context, it: &clean::Item, t: &clean::Typedef) -> fmt::Result { write!(w, "<pre class='rust typedef'>")?; render_attributes(w, it)?; write!(w, "type {}{}{where_clause} = {type_};</pre>", it.name.as_ref().unwrap(), t.generics, where_clause = WhereClause { gens: &t.generics, indent: 0, end_newline: true }, type_ = t.type_)?; document(w, cx, it)?; // Render any items associated directly to this alias, as otherwise they // won't be visible anywhere in the docs. It would be nice to also show // associated items from the aliased type (see discussion in #32077), but // we need #14072 to make sense of the generics. render_assoc_items(w, cx, it, it.def_id, AssocItemRender::All) } fn item_foreign_type(w: &mut fmt::Formatter, cx: &Context, it: &clean::Item) -> fmt::Result { writeln!(w, "<pre class='rust foreigntype'>extern {{")?; render_attributes(w, it)?; write!( w, " {}type {};\n}}</pre>", VisSpace(&it.visibility), it.name.as_ref().unwrap(), )?; document(w, cx, it)?; render_assoc_items(w, cx, it, it.def_id, AssocItemRender::All) } impl<'a> fmt::Display for Sidebar<'a> { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { let cx = self.cx; let it = self.item; let parentlen = cx.current.len() - if it.is_mod() {1} else {0}; if it.is_struct() || it.is_trait() || it.is_primitive() || it.is_union() || it.is_enum() || it.is_mod() || it.is_typedef() { write!(fmt, "<p class='location'>{}{}</p>", match it.inner { clean::StructItem(..) => "Struct ", clean::TraitItem(..) => "Trait ", clean::PrimitiveItem(..) => "Primitive Type ", clean::UnionItem(..) => "Union ", clean::EnumItem(..) => "Enum ", clean::TypedefItem(..) => "Type Definition ", clean::ForeignTypeItem => "Foreign Type ", clean::ModuleItem(..) => if it.is_crate() { "Crate " } else { "Module " }, _ => "", }, it.name.as_ref().unwrap())?; } if it.is_crate() { if let Some(ref version) = cache().crate_version { write!(fmt, "<div class='block version'>\ <p>Version {}</p>\ </div> <a id='all-types' href='all.html'><p>See all {}'s items</p></a>", version, it.name.as_ref().unwrap())?; } } write!(fmt, "<div class=\"sidebar-elems\">")?; match it.inner { clean::StructItem(ref s) => sidebar_struct(fmt, it, s)?, clean::TraitItem(ref t) => sidebar_trait(fmt, it, t)?, clean::PrimitiveItem(ref p) => sidebar_primitive(fmt, it, p)?, clean::UnionItem(ref u) => sidebar_union(fmt, it, u)?, clean::EnumItem(ref e) => sidebar_enum(fmt, it, e)?, clean::TypedefItem(ref t, _) => sidebar_typedef(fmt, it, t)?, clean::ModuleItem(ref m) => sidebar_module(fmt, it, &m.items)?, clean::ForeignTypeItem => sidebar_foreign_type(fmt, it)?, _ => (), } // The sidebar is designed to display sibling functions, modules and // other miscellaneous information. since there are lots of sibling // items (and that causes quadratic growth in large modules), // we refactor common parts into a shared JavaScript file per module. // still, we don't move everything into JS because we want to preserve // as much HTML as possible in order to allow non-JS-enabled browsers // to navigate the documentation (though slightly inefficiently). write!(fmt, "<p class='location'>")?; for (i, name) in cx.current.iter().take(parentlen).enumerate() { if i > 0 { write!(fmt, "::<wbr>")?; } write!(fmt, "<a href='{}index.html'>{}</a>", &cx.root_path()[..(cx.current.len() - i - 1) * 3], *name)?; } write!(fmt, "</p>")?; // Sidebar refers to the enclosing module, not this module. let relpath = if it.is_mod() { "../" } else { "" }; write!(fmt, "<script>window.sidebarCurrent = {{\ name: '{name}', \ ty: '{ty}', \ relpath: '{path}'\ }};</script>", name = it.name.as_ref().map(|x| &x[..]).unwrap_or(""), ty = it.type_().css_class(), path = relpath)?; if parentlen == 0 { // There is no sidebar-items.js beyond the crate root path // FIXME maybe dynamic crate loading can be merged here } else { write!(fmt, "<script defer src=\"{path}sidebar-items.js\"></script>", path = relpath)?; } // Closes sidebar-elems div. write!(fmt, "</div>")?; Ok(()) } } fn get_methods(i: &clean::Impl, for_deref: bool) -> Vec<String> { i.items.iter().filter_map(|item| { match item.name { // Maybe check with clean::Visibility::Public as well? Some(ref name) if !name.is_empty() && item.visibility.is_some() && item.is_method() => { if !for_deref || should_render_item(item, false) { Some(format!("<a href=\"#method.{name}\">{name}</a>", name = name)) } else { None } } _ => None, } }).collect::<Vec<_>>() } // The point is to url encode any potential character from a type with genericity. fn small_url_encode(s: &str) -> String { s.replace("<", "%3C") .replace(">", "%3E") .replace(" ", "%20") .replace("?", "%3F") .replace("'", "%27") .replace("&", "%26") .replace(",", "%2C") .replace(":", "%3A") .replace(";", "%3B") .replace("[", "%5B") .replace("]", "%5D") .replace("\"", "%22") } fn sidebar_assoc_items(it: &clean::Item) -> String { let mut out = String::new(); let c = cache(); if let Some(v) = c.impls.get(&it.def_id) { let ret = v.iter() .filter(|i| i.inner_impl().trait_.is_none()) .flat_map(|i| get_methods(i.inner_impl(), false)) .collect::<String>(); if !ret.is_empty() { out.push_str(&format!("<a class=\"sidebar-title\" href=\"#methods\">Methods\ </a><div class=\"sidebar-links\">{}</div>", ret)); } if v.iter().any(|i| i.inner_impl().trait_.is_some()) { if let Some(impl_) = v.iter() .filter(|i| i.inner_impl().trait_.is_some()) .find(|i| i.inner_impl().trait_.def_id() == c.deref_trait_did) { if let Some(target) = impl_.inner_impl().items.iter().filter_map(|item| { match item.inner { clean::TypedefItem(ref t, true) => Some(&t.type_), _ => None, } }).next() { let inner_impl = target.def_id().or(target.primitive_type().and_then(|prim| { c.primitive_locations.get(&prim).cloned() })).and_then(|did| c.impls.get(&did)); if let Some(impls) = inner_impl { out.push_str("<a class=\"sidebar-title\" href=\"#deref-methods\">"); out.push_str(&format!("Methods from {}&lt;Target={}&gt;", Escape(&format!("{:#}", impl_.inner_impl().trait_.as_ref().unwrap())), Escape(&format!("{:#}", target)))); out.push_str("</a>"); let ret = impls.iter() .filter(|i| i.inner_impl().trait_.is_none()) .flat_map(|i| get_methods(i.inner_impl(), true)) .collect::<String>(); out.push_str(&format!("<div class=\"sidebar-links\">{}</div>", ret)); } } } let format_impls = |impls: Vec<&Impl>| { let mut links = HashSet::new(); impls.iter() .filter_map(|i| { let is_negative_impl = is_negative_impl(i.inner_impl()); if let Some(ref i) = i.inner_impl().trait_ { let i_display = format!("{:#}", i); let out = Escape(&i_display); let encoded = small_url_encode(&format!("{:#}", i)); let generated = format!("<a href=\"#impl-{}\">{}{}</a>", encoded, if is_negative_impl { "!" } else { "" }, out); if links.insert(generated.clone()) { Some(generated) } else { None } } else { None } }) .collect::<String>() }; let (synthetic, concrete) = v .iter() .partition::<Vec<_>, _>(|i| i.inner_impl().synthetic); let concrete_format = format_impls(concrete); let synthetic_format = format_impls(synthetic); if !concrete_format.is_empty() { out.push_str("<a class=\"sidebar-title\" href=\"#implementations\">\ Trait Implementations</a>"); out.push_str(&format!("<div class=\"sidebar-links\">{}</div>", concrete_format)); } if !synthetic_format.is_empty() { out.push_str("<a class=\"sidebar-title\" href=\"#synthetic-implementations\">\ Auto Trait Implementations</a>"); out.push_str(&format!("<div class=\"sidebar-links\">{}</div>", synthetic_format)); } } } out } fn sidebar_struct(fmt: &mut fmt::Formatter, it: &clean::Item, s: &clean::Struct) -> fmt::Result { let mut sidebar = String::new(); let fields = get_struct_fields_name(&s.fields); if !fields.is_empty() { if let doctree::Plain = s.struct_type { sidebar.push_str(&format!("<a class=\"sidebar-title\" href=\"#fields\">Fields</a>\ <div class=\"sidebar-links\">{}</div>", fields)); } } sidebar.push_str(&sidebar_assoc_items(it)); if !sidebar.is_empty() { write!(fmt, "<div class=\"block items\">{}</div>", sidebar)?; } Ok(()) } fn extract_for_impl_name(item: &clean::Item) -> Option<(String, String)> { match item.inner { clean::ItemEnum::ImplItem(ref i) => { if let Some(ref trait_) = i.trait_ { Some((format!("{:#}", i.for_), format!("{:#}", trait_))) } else { None } }, _ => None, } } fn is_negative_impl(i: &clean::Impl) -> bool { i.polarity == Some(clean::ImplPolarity::Negative) } fn sidebar_trait(fmt: &mut fmt::Formatter, it: &clean::Item, t: &clean::Trait) -> fmt::Result { let mut sidebar = String::new(); let types = t.items .iter() .filter_map(|m| { match m.name { Some(ref name) if m.is_associated_type() => { Some(format!("<a href=\"#associatedtype.{name}\">{name}</a>", name=name)) } _ => None, } }) .collect::<String>(); let consts = t.items .iter() .filter_map(|m| { match m.name { Some(ref name) if m.is_associated_const() => { Some(format!("<a href=\"#associatedconstant.{name}\">{name}</a>", name=name)) } _ => None, } }) .collect::<String>(); let required = t.items .iter() .filter_map(|m| { match m.name { Some(ref name) if m.is_ty_method() => { Some(format!("<a href=\"#tymethod.{name}\">{name}</a>", name=name)) } _ => None, } }) .collect::<String>(); let provided = t.items .iter() .filter_map(|m| { match m.name { Some(ref name) if m.is_method() => { Some(format!("<a href=\"#method.{name}\">{name}</a>", name=name)) } _ => None, } }) .collect::<String>(); if !types.is_empty() { sidebar.push_str(&format!("<a class=\"sidebar-title\" href=\"#associated-types\">\ Associated Types</a><div class=\"sidebar-links\">{}</div>", types)); } if !consts.is_empty() { sidebar.push_str(&format!("<a class=\"sidebar-title\" href=\"#associated-const\">\ Associated Constants</a><div class=\"sidebar-links\">{}</div>", consts)); } if !required.is_empty() { sidebar.push_str(&format!("<a class=\"sidebar-title\" href=\"#required-methods\">\ Required Methods</a><div class=\"sidebar-links\">{}</div>", required)); } if !provided.is_empty() { sidebar.push_str(&format!("<a class=\"sidebar-title\" href=\"#provided-methods\">\ Provided Methods</a><div class=\"sidebar-links\">{}</div>", provided)); } let c = cache(); if let Some(implementors) = c.implementors.get(&it.def_id) { let res = implementors.iter() .filter(|i| i.inner_impl().for_.def_id() .map_or(false, |d| !c.paths.contains_key(&d))) .filter_map(|i| { match extract_for_impl_name(&i.impl_item) { Some((ref name, ref url)) => { Some(format!("<a href=\"#impl-{}\">{}</a>", small_url_encode(url), Escape(name))) } _ => None, } }) .collect::<String>(); if !res.is_empty() { sidebar.push_str(&format!("<a class=\"sidebar-title\" href=\"#foreign-impls\">\ Implementations on Foreign Types</a><div \ class=\"sidebar-links\">{}</div>", res)); } } sidebar.push_str("<a class=\"sidebar-title\" href=\"#implementors\">Implementors</a>"); if t.auto { sidebar.push_str("<a class=\"sidebar-title\" \ href=\"#synthetic-implementors\">Auto Implementors</a>"); } sidebar.push_str(&sidebar_assoc_items(it)); write!(fmt, "<div class=\"block items\">{}</div>", sidebar) } fn sidebar_primitive(fmt: &mut fmt::Formatter, it: &clean::Item, _p: &clean::PrimitiveType) -> fmt::Result { let sidebar = sidebar_assoc_items(it); if !sidebar.is_empty() { write!(fmt, "<div class=\"block items\">{}</div>", sidebar)?; } Ok(()) } fn sidebar_typedef(fmt: &mut fmt::Formatter, it: &clean::Item, _t: &clean::Typedef) -> fmt::Result { let sidebar = sidebar_assoc_items(it); if !sidebar.is_empty() { write!(fmt, "<div class=\"block items\">{}</div>", sidebar)?; } Ok(()) } fn get_struct_fields_name(fields: &[clean::Item]) -> String { fields.iter() .filter(|f| if let clean::StructFieldItem(..) = f.inner { true } else { false }) .filter_map(|f| match f.name { Some(ref name) => Some(format!("<a href=\"#structfield.{name}\">\ {name}</a>", name=name)), _ => None, }) .collect() } fn sidebar_union(fmt: &mut fmt::Formatter, it: &clean::Item, u: &clean::Union) -> fmt::Result { let mut sidebar = String::new(); let fields = get_struct_fields_name(&u.fields); if !fields.is_empty() { sidebar.push_str(&format!("<a class=\"sidebar-title\" href=\"#fields\">Fields</a>\ <div class=\"sidebar-links\">{}</div>", fields)); } sidebar.push_str(&sidebar_assoc_items(it)); if !sidebar.is_empty() { write!(fmt, "<div class=\"block items\">{}</div>", sidebar)?; } Ok(()) } fn sidebar_enum(fmt: &mut fmt::Formatter, it: &clean::Item, e: &clean::Enum) -> fmt::Result { let mut sidebar = String::new(); let variants = e.variants.iter() .filter_map(|v| match v.name { Some(ref name) => Some(format!("<a href=\"#variant.{name}\">{name}\ </a>", name = name)), _ => None, }) .collect::<String>(); if !variants.is_empty() { sidebar.push_str(&format!("<a class=\"sidebar-title\" href=\"#variants\">Variants</a>\ <div class=\"sidebar-links\">{}</div>", variants)); } sidebar.push_str(&sidebar_assoc_items(it)); if !sidebar.is_empty() { write!(fmt, "<div class=\"block items\">{}</div>", sidebar)?; } Ok(()) } fn item_ty_to_strs(ty: &ItemType) -> (&'static str, &'static str) { match *ty { ItemType::ExternCrate | ItemType::Import => ("reexports", "Re-exports"), ItemType::Module => ("modules", "Modules"), ItemType::Struct => ("structs", "Structs"), ItemType::Union => ("unions", "Unions"), ItemType::Enum => ("enums", "Enums"), ItemType::Function => ("functions", "Functions"), ItemType::Typedef => ("types", "Type Definitions"), ItemType::Static => ("statics", "Statics"), ItemType::Constant => ("constants", "Constants"), ItemType::Trait => ("traits", "Traits"), ItemType::Impl => ("impls", "Implementations"), ItemType::TyMethod => ("tymethods", "Type Methods"), ItemType::Method => ("methods", "Methods"), ItemType::StructField => ("fields", "Struct Fields"), ItemType::Variant => ("variants", "Variants"), ItemType::Macro => ("macros", "Macros"), ItemType::Primitive => ("primitives", "Primitive Types"), ItemType::AssociatedType => ("associated-types", "Associated Types"), ItemType::AssociatedConst => ("associated-consts", "Associated Constants"), ItemType::ForeignType => ("foreign-types", "Foreign Types"), ItemType::Keyword => ("keywords", "Keywords"), } } fn sidebar_module(fmt: &mut fmt::Formatter, _it: &clean::Item, items: &[clean::Item]) -> fmt::Result { let mut sidebar = String::new(); if items.iter().any(|it| it.type_() == ItemType::ExternCrate || it.type_() == ItemType::Import) { sidebar.push_str(&format!("<li><a href=\"#{id}\">{name}</a></li>", id = "reexports", name = "Re-exports")); } // ordering taken from item_module, reorder, where it prioritized elements in a certain order // to print its headings for &myty in &[ItemType::Primitive, ItemType::Module, ItemType::Macro, ItemType::Struct, ItemType::Enum, ItemType::Constant, ItemType::Static, ItemType::Trait, ItemType::Function, ItemType::Typedef, ItemType::Union, ItemType::Impl, ItemType::TyMethod, ItemType::Method, ItemType::StructField, ItemType::Variant, ItemType::AssociatedType, ItemType::AssociatedConst, ItemType::ForeignType] { if items.iter().any(|it| !it.is_stripped() && it.type_() == myty) { let (short, name) = item_ty_to_strs(&myty); sidebar.push_str(&format!("<li><a href=\"#{id}\">{name}</a></li>", id = short, name = name)); } } if !sidebar.is_empty() { write!(fmt, "<div class=\"block items\"><ul>{}</ul></div>", sidebar)?; } Ok(()) } fn sidebar_foreign_type(fmt: &mut fmt::Formatter, it: &clean::Item) -> fmt::Result { let sidebar = sidebar_assoc_items(it); if !sidebar.is_empty() { write!(fmt, "<div class=\"block items\">{}</div>", sidebar)?; } Ok(()) } impl<'a> fmt::Display for Source<'a> { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { let Source(s) = *self; let lines = s.lines().count(); let mut cols = 0; let mut tmp = lines; while tmp > 0 { cols += 1; tmp /= 10; } write!(fmt, "<pre class=\"line-numbers\">")?; for i in 1..lines + 1 { write!(fmt, "<span id=\"{0}\">{0:1$}</span>\n", i, cols)?; } write!(fmt, "</pre>")?; write!(fmt, "{}", highlight::render_with_highlighting(s, None, None, None, None))?; Ok(()) } } fn item_macro(w: &mut fmt::Formatter, cx: &Context, it: &clean::Item, t: &clean::Macro) -> fmt::Result { wrap_into_docblock(w, |w| { w.write_str(&highlight::render_with_highlighting(&t.source, Some("macro"), None, None, None)) })?; document(w, cx, it) } fn item_primitive(w: &mut fmt::Formatter, cx: &Context, it: &clean::Item, _p: &clean::PrimitiveType) -> fmt::Result { document(w, cx, it)?; render_assoc_items(w, cx, it, it.def_id, AssocItemRender::All) } fn item_keyword(w: &mut fmt::Formatter, cx: &Context, it: &clean::Item, _p: &str) -> fmt::Result { document(w, cx, it) } const BASIC_KEYWORDS: &'static str = "rust, rustlang, rust-lang"; fn make_item_keywords(it: &clean::Item) -> String { format!("{}, {}", BASIC_KEYWORDS, it.name.as_ref().unwrap()) } fn get_index_search_type(item: &clean::Item) -> Option<IndexItemFunctionType> { let decl = match item.inner { clean::FunctionItem(ref f) => &f.decl, clean::MethodItem(ref m) => &m.decl, clean::TyMethodItem(ref m) => &m.decl, _ => return None }; let inputs = decl.inputs.values.iter().map(|arg| get_index_type(&arg.type_)).collect(); let output = match decl.output { clean::FunctionRetTy::Return(ref return_type) => Some(get_index_type(return_type)), _ => None }; Some(IndexItemFunctionType { inputs: inputs, output: output }) } fn get_index_type(clean_type: &clean::Type) -> Type { let t = Type { name: get_index_type_name(clean_type, true).map(|s| s.to_ascii_lowercase()), generics: get_generics(clean_type), }; t } /// Returns a list of all paths used in the type. /// This is used to help deduplicate imported impls /// for reexported types. If any of the contained /// types are re-exported, we don't use the corresponding /// entry from the js file, as inlining will have already /// picked up the impl fn collect_paths_for_type(first_ty: clean::Type) -> Vec<String> { let mut out = Vec::new(); let mut visited = FxHashSet(); let mut work = VecDeque::new(); let cache = cache(); work.push_back(first_ty); while let Some(ty) = work.pop_front() { if !visited.insert(ty.clone()) { continue; } match ty { clean::Type::ResolvedPath { did, .. } => { let get_extern = || cache.external_paths.get(&did).map(|s| s.0.clone()); let fqp = cache.exact_paths.get(&did).cloned().or_else(get_extern); match fqp { Some(path) => { out.push(path.join("::")); }, _ => {} }; }, clean::Type::Tuple(tys) => { work.extend(tys.into_iter()); }, clean::Type::Slice(ty) => { work.push_back(*ty); } clean::Type::Array(ty, _) => { work.push_back(*ty); }, clean::Type::Unique(ty) => { work.push_back(*ty); }, clean::Type::RawPointer(_, ty) => { work.push_back(*ty); }, clean::Type::BorrowedRef { type_, .. } => { work.push_back(*type_); }, clean::Type::QPath { self_type, trait_, .. } => { work.push_back(*self_type); work.push_back(*trait_); }, _ => {} } }; out } fn get_index_type_name(clean_type: &clean::Type, accept_generic: bool) -> Option<String> { match *clean_type { clean::ResolvedPath { ref path, .. } => { let segments = &path.segments; let path_segment = segments.into_iter().last().unwrap_or_else(|| panic!( "get_index_type_name(clean_type: {:?}, accept_generic: {:?}) had length zero path", clean_type, accept_generic )); Some(path_segment.name.clone()) } clean::Generic(ref s) if accept_generic => Some(s.clone()), clean::Primitive(ref p) => Some(format!("{:?}", p)), clean::BorrowedRef { ref type_, .. } => get_index_type_name(type_, accept_generic), // FIXME: add all from clean::Type. _ => None } } fn get_generics(clean_type: &clean::Type) -> Option<Vec<String>> { clean_type.generics() .and_then(|types| { let r = types.iter() .filter_map(|t| get_index_type_name(t, false)) .map(|s| s.to_ascii_lowercase()) .collect::<Vec<_>>(); if r.is_empty() { None } else { Some(r) } }) } pub fn cache() -> Arc<Cache> { CACHE_KEY.with(|c| c.borrow().clone()) } #[cfg(test)] #[test] fn test_unique_id() { let input = ["foo", "examples", "examples", "method.into_iter","examples", "method.into_iter", "foo", "main", "search", "methods", "examples", "method.into_iter", "assoc_type.Item", "assoc_type.Item"]; let expected = ["foo", "examples", "examples-1", "method.into_iter", "examples-2", "method.into_iter-1", "foo-1", "main-1", "search-1", "methods-1", "examples-3", "method.into_iter-2", "assoc_type.Item", "assoc_type.Item-1"]; let test = || { let actual: Vec<String> = input.iter().map(|s| derive_id(s.to_string())).collect(); assert_eq!(&actual[..], expected); }; test(); reset_ids(true); test(); } #[cfg(test)] #[test] fn test_name_key() { assert_eq!(name_key("0"), ("", 0, 1)); assert_eq!(name_key("123"), ("", 123, 0)); assert_eq!(name_key("Fruit"), ("Fruit", 0, 0)); assert_eq!(name_key("Fruit0"), ("Fruit", 0, 1)); assert_eq!(name_key("Fruit0000"), ("Fruit", 0, 4)); assert_eq!(name_key("Fruit01"), ("Fruit", 1, 1)); assert_eq!(name_key("Fruit10"), ("Fruit", 10, 0)); assert_eq!(name_key("Fruit123"), ("Fruit", 123, 0)); } #[cfg(test)] #[test] fn test_name_sorting() { let names = ["Apple", "Banana", "Fruit", "Fruit0", "Fruit00", "Fruit1", "Fruit01", "Fruit2", "Fruit02", "Fruit20", "Fruit100", "Pear"]; let mut sorted = names.to_owned(); sorted.sort_by_key(|&s| name_key(s)); assert_eq!(names, sorted); } Improved non_exhaustive message. // Copyright 2013-2015 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. //! Rustdoc's HTML Rendering module //! //! This modules contains the bulk of the logic necessary for rendering a //! rustdoc `clean::Crate` instance to a set of static HTML pages. This //! rendering process is largely driven by the `format!` syntax extension to //! perform all I/O into files and streams. //! //! The rendering process is largely driven by the `Context` and `Cache` //! structures. The cache is pre-populated by crawling the crate in question, //! and then it is shared among the various rendering threads. The cache is meant //! to be a fairly large structure not implementing `Clone` (because it's shared //! among threads). The context, however, should be a lightweight structure. This //! is cloned per-thread and contains information about what is currently being //! rendered. //! //! In order to speed up rendering (mostly because of markdown rendering), the //! rendering process has been parallelized. This parallelization is only //! exposed through the `crate` method on the context, and then also from the //! fact that the shared cache is stored in TLS (and must be accessed as such). //! //! In addition to rendering the crate itself, this module is also responsible //! for creating the corresponding search index and source file renderings. //! These threads are not parallelized (they haven't been a bottleneck yet), and //! both occur before the crate is rendered. pub use self::ExternalLocation::*; use std::borrow::Cow; use std::cell::RefCell; use std::cmp::Ordering; use std::collections::{BTreeMap, HashSet, VecDeque}; use std::default::Default; use std::error; use std::fmt::{self, Display, Formatter, Write as FmtWrite}; use std::ffi::OsStr; use std::fs::{self, File, OpenOptions}; use std::io::prelude::*; use std::io::{self, BufWriter, BufReader}; use std::iter::repeat; use std::mem; use std::path::{PathBuf, Path, Component}; use std::str; use std::sync::Arc; use externalfiles::ExternalHtml; use serialize::json::{ToJson, Json, as_json}; use syntax::ast; use syntax::codemap::FileName; use rustc::hir::def_id::{CrateNum, CRATE_DEF_INDEX, DefId}; use rustc::middle::privacy::AccessLevels; use rustc::middle::stability; use rustc::hir; use rustc::util::nodemap::{FxHashMap, FxHashSet}; use rustc_data_structures::flock; use clean::{self, AttributesExt, GetDefId, SelfTy, Mutability}; use doctree; use fold::DocFolder; use html::escape::Escape; use html::format::{AsyncSpace, ConstnessSpace}; use html::format::{GenericBounds, WhereClause, href, AbiSpace}; use html::format::{VisSpace, Method, UnsafetySpace, MutableSpace}; use html::format::fmt_impl_for_trait_page; use html::item_type::ItemType; use html::markdown::{self, Markdown, MarkdownHtml, MarkdownSummaryLine}; use html::{highlight, layout}; use minifier; /// A pair of name and its optional document. pub type NameDoc = (String, Option<String>); /// Major driving force in all rustdoc rendering. This contains information /// about where in the tree-like hierarchy rendering is occurring and controls /// how the current page is being rendered. /// /// It is intended that this context is a lightweight object which can be fairly /// easily cloned because it is cloned per work-job (about once per item in the /// rustdoc tree). #[derive(Clone)] pub struct Context { /// Current hierarchy of components leading down to what's currently being /// rendered pub current: Vec<String>, /// The current destination folder of where HTML artifacts should be placed. /// This changes as the context descends into the module hierarchy. pub dst: PathBuf, /// A flag, which when `true`, will render pages which redirect to the /// real location of an item. This is used to allow external links to /// publicly reused items to redirect to the right location. pub render_redirect_pages: bool, pub shared: Arc<SharedContext>, } pub struct SharedContext { /// The path to the crate root source minus the file name. /// Used for simplifying paths to the highlighted source code files. pub src_root: PathBuf, /// This describes the layout of each page, and is not modified after /// creation of the context (contains info like the favicon and added html). pub layout: layout::Layout, /// This flag indicates whether `[src]` links should be generated or not. If /// the source files are present in the html rendering, then this will be /// `true`. pub include_sources: bool, /// The local file sources we've emitted and their respective url-paths. pub local_sources: FxHashMap<PathBuf, String>, /// All the passes that were run on this crate. pub passes: FxHashSet<String>, /// The base-URL of the issue tracker for when an item has been tagged with /// an issue number. pub issue_tracker_base_url: Option<String>, /// The given user css file which allow to customize the generated /// documentation theme. pub css_file_extension: Option<PathBuf>, /// The directories that have already been created in this doc run. Used to reduce the number /// of spurious `create_dir_all` calls. pub created_dirs: RefCell<FxHashSet<PathBuf>>, /// This flag indicates whether listings of modules (in the side bar and documentation itself) /// should be ordered alphabetically or in order of appearance (in the source code). pub sort_modules_alphabetically: bool, /// Additional themes to be added to the generated docs. pub themes: Vec<PathBuf>, /// Suffix to be added on resource files (if suffix is "-v2" then "light.css" becomes /// "light-v2.css"). pub resource_suffix: String, } impl SharedContext { fn ensure_dir(&self, dst: &Path) -> io::Result<()> { let mut dirs = self.created_dirs.borrow_mut(); if !dirs.contains(dst) { fs::create_dir_all(dst)?; dirs.insert(dst.to_path_buf()); } Ok(()) } } impl SharedContext { /// Returns whether the `collapse-docs` pass was run on this crate. pub fn was_collapsed(&self) -> bool { self.passes.contains("collapse-docs") } /// Based on whether the `collapse-docs` pass was run, return either the `doc_value` or the /// `collapsed_doc_value` of the given item. pub fn maybe_collapsed_doc_value<'a>(&self, item: &'a clean::Item) -> Option<Cow<'a, str>> { if self.was_collapsed() { item.collapsed_doc_value().map(|s| s.into()) } else { item.doc_value().map(|s| s.into()) } } } /// Indicates where an external crate can be found. pub enum ExternalLocation { /// Remote URL root of the external crate Remote(String), /// This external crate can be found in the local doc/ folder Local, /// The external crate could not be found. Unknown, } /// Metadata about implementations for a type or trait. #[derive(Clone)] pub struct Impl { pub impl_item: clean::Item, } impl Impl { fn inner_impl(&self) -> &clean::Impl { match self.impl_item.inner { clean::ImplItem(ref impl_) => impl_, _ => panic!("non-impl item found in impl") } } fn trait_did(&self) -> Option<DefId> { self.inner_impl().trait_.def_id() } } #[derive(Debug)] pub struct Error { file: PathBuf, error: io::Error, } impl error::Error for Error { fn description(&self) -> &str { self.error.description() } } impl Display for Error { fn fmt(&self, f: &mut Formatter) -> fmt::Result { write!(f, "\"{}\": {}", self.file.display(), self.error) } } impl Error { pub fn new(e: io::Error, file: &Path) -> Error { Error { file: file.to_path_buf(), error: e, } } } macro_rules! try_none { ($e:expr, $file:expr) => ({ use std::io; match $e { Some(e) => e, None => return Err(Error::new(io::Error::new(io::ErrorKind::Other, "not found"), $file)) } }) } macro_rules! try_err { ($e:expr, $file:expr) => ({ match $e { Ok(e) => e, Err(e) => return Err(Error::new(e, $file)), } }) } /// This cache is used to store information about the `clean::Crate` being /// rendered in order to provide more useful documentation. This contains /// information like all implementors of a trait, all traits a type implements, /// documentation for all known traits, etc. /// /// This structure purposefully does not implement `Clone` because it's intended /// to be a fairly large and expensive structure to clone. Instead this adheres /// to `Send` so it may be stored in a `Arc` instance and shared among the various /// rendering threads. #[derive(Default)] pub struct Cache { /// Mapping of typaram ids to the name of the type parameter. This is used /// when pretty-printing a type (so pretty printing doesn't have to /// painfully maintain a context like this) pub typarams: FxHashMap<DefId, String>, /// Maps a type id to all known implementations for that type. This is only /// recognized for intra-crate `ResolvedPath` types, and is used to print /// out extra documentation on the page of an enum/struct. /// /// The values of the map are a list of implementations and documentation /// found on that implementation. pub impls: FxHashMap<DefId, Vec<Impl>>, /// Maintains a mapping of local crate node ids to the fully qualified name /// and "short type description" of that node. This is used when generating /// URLs when a type is being linked to. External paths are not located in /// this map because the `External` type itself has all the information /// necessary. pub paths: FxHashMap<DefId, (Vec<String>, ItemType)>, /// Similar to `paths`, but only holds external paths. This is only used for /// generating explicit hyperlinks to other crates. pub external_paths: FxHashMap<DefId, (Vec<String>, ItemType)>, /// Maps local def ids of exported types to fully qualified paths. /// Unlike 'paths', this mapping ignores any renames that occur /// due to 'use' statements. /// /// This map is used when writing out the special 'implementors' /// javascript file. By using the exact path that the type /// is declared with, we ensure that each path will be identical /// to the path used if the corresponding type is inlined. By /// doing this, we can detect duplicate impls on a trait page, and only display /// the impl for the inlined type. pub exact_paths: FxHashMap<DefId, Vec<String>>, /// This map contains information about all known traits of this crate. /// Implementations of a crate should inherit the documentation of the /// parent trait if no extra documentation is specified, and default methods /// should show up in documentation about trait implementations. pub traits: FxHashMap<DefId, clean::Trait>, /// When rendering traits, it's often useful to be able to list all /// implementors of the trait, and this mapping is exactly, that: a mapping /// of trait ids to the list of known implementors of the trait pub implementors: FxHashMap<DefId, Vec<Impl>>, /// Cache of where external crate documentation can be found. pub extern_locations: FxHashMap<CrateNum, (String, PathBuf, ExternalLocation)>, /// Cache of where documentation for primitives can be found. pub primitive_locations: FxHashMap<clean::PrimitiveType, DefId>, // Note that external items for which `doc(hidden)` applies to are shown as // non-reachable while local items aren't. This is because we're reusing // the access levels from crateanalysis. pub access_levels: Arc<AccessLevels<DefId>>, /// The version of the crate being documented, if given fron the `--crate-version` flag. pub crate_version: Option<String>, // Private fields only used when initially crawling a crate to build a cache stack: Vec<String>, parent_stack: Vec<DefId>, parent_is_trait_impl: bool, search_index: Vec<IndexItem>, stripped_mod: bool, deref_trait_did: Option<DefId>, deref_mut_trait_did: Option<DefId>, owned_box_did: Option<DefId>, masked_crates: FxHashSet<CrateNum>, // In rare case where a structure is defined in one module but implemented // in another, if the implementing module is parsed before defining module, // then the fully qualified name of the structure isn't presented in `paths` // yet when its implementation methods are being indexed. Caches such methods // and their parent id here and indexes them at the end of crate parsing. orphan_impl_items: Vec<(DefId, clean::Item)>, /// Aliases added through `#[doc(alias = "...")]`. Since a few items can have the same alias, /// we need the alias element to have an array of items. aliases: FxHashMap<String, Vec<IndexItem>>, } /// Temporary storage for data obtained during `RustdocVisitor::clean()`. /// Later on moved into `CACHE_KEY`. #[derive(Default)] pub struct RenderInfo { pub inlined: FxHashSet<DefId>, pub external_paths: ::core::ExternalPaths, pub external_typarams: FxHashMap<DefId, String>, pub exact_paths: FxHashMap<DefId, Vec<String>>, pub deref_trait_did: Option<DefId>, pub deref_mut_trait_did: Option<DefId>, pub owned_box_did: Option<DefId>, } /// Helper struct to render all source code to HTML pages struct SourceCollector<'a> { scx: &'a mut SharedContext, /// Root destination to place all HTML output into dst: PathBuf, } /// Wrapper struct to render the source code of a file. This will do things like /// adding line numbers to the left-hand side. struct Source<'a>(&'a str); // Helper structs for rendering items/sidebars and carrying along contextual // information #[derive(Copy, Clone)] struct Item<'a> { cx: &'a Context, item: &'a clean::Item, } struct Sidebar<'a> { cx: &'a Context, item: &'a clean::Item, } /// Struct representing one entry in the JS search index. These are all emitted /// by hand to a large JS file at the end of cache-creation. #[derive(Debug)] struct IndexItem { ty: ItemType, name: String, path: String, desc: String, parent: Option<DefId>, parent_idx: Option<usize>, search_type: Option<IndexItemFunctionType>, } impl ToJson for IndexItem { fn to_json(&self) -> Json { assert_eq!(self.parent.is_some(), self.parent_idx.is_some()); let mut data = Vec::with_capacity(6); data.push((self.ty as usize).to_json()); data.push(self.name.to_json()); data.push(self.path.to_json()); data.push(self.desc.to_json()); data.push(self.parent_idx.to_json()); data.push(self.search_type.to_json()); Json::Array(data) } } /// A type used for the search index. #[derive(Debug)] struct Type { name: Option<String>, generics: Option<Vec<String>>, } impl ToJson for Type { fn to_json(&self) -> Json { match self.name { Some(ref name) => { let mut data = BTreeMap::new(); data.insert("n".to_owned(), name.to_json()); if let Some(ref generics) = self.generics { data.insert("g".to_owned(), generics.to_json()); } Json::Object(data) }, None => Json::Null } } } /// Full type of functions/methods in the search index. #[derive(Debug)] struct IndexItemFunctionType { inputs: Vec<Type>, output: Option<Type>, } impl ToJson for IndexItemFunctionType { fn to_json(&self) -> Json { // If we couldn't figure out a type, just write `null`. if self.inputs.iter().chain(self.output.iter()).any(|ref i| i.name.is_none()) { Json::Null } else { let mut data = BTreeMap::new(); if !self.inputs.is_empty() { data.insert("i".to_owned(), self.inputs.to_json()); } if let Some(ref output) = self.output { data.insert("o".to_owned(), output.to_json()); } Json::Object(data) } } } thread_local!(static CACHE_KEY: RefCell<Arc<Cache>> = Default::default()); thread_local!(pub static CURRENT_LOCATION_KEY: RefCell<Vec<String>> = RefCell::new(Vec::new())); thread_local!(pub static USED_ID_MAP: RefCell<FxHashMap<String, usize>> = RefCell::new(init_ids())); fn init_ids() -> FxHashMap<String, usize> { [ "main", "search", "help", "TOC", "render-detail", "associated-types", "associated-const", "required-methods", "provided-methods", "implementors", "synthetic-implementors", "implementors-list", "synthetic-implementors-list", "methods", "deref-methods", "implementations", ].into_iter().map(|id| (String::from(*id), 1)).collect() } /// This method resets the local table of used ID attributes. This is typically /// used at the beginning of rendering an entire HTML page to reset from the /// previous state (if any). pub fn reset_ids(embedded: bool) { USED_ID_MAP.with(|s| { *s.borrow_mut() = if embedded { init_ids() } else { FxHashMap() }; }); } pub fn derive_id(candidate: String) -> String { USED_ID_MAP.with(|map| { let id = match map.borrow_mut().get_mut(&candidate) { None => candidate, Some(a) => { let id = format!("{}-{}", candidate, *a); *a += 1; id } }; map.borrow_mut().insert(id.clone(), 1); id }) } /// Generates the documentation for `crate` into the directory `dst` pub fn run(mut krate: clean::Crate, external_html: &ExternalHtml, playground_url: Option<String>, dst: PathBuf, resource_suffix: String, passes: FxHashSet<String>, css_file_extension: Option<PathBuf>, renderinfo: RenderInfo, sort_modules_alphabetically: bool, themes: Vec<PathBuf>, enable_minification: bool) -> Result<(), Error> { let src_root = match krate.src { FileName::Real(ref p) => match p.parent() { Some(p) => p.to_path_buf(), None => PathBuf::new(), }, _ => PathBuf::new(), }; let mut scx = SharedContext { src_root, passes, include_sources: true, local_sources: FxHashMap(), issue_tracker_base_url: None, layout: layout::Layout { logo: "".to_string(), favicon: "".to_string(), external_html: external_html.clone(), krate: krate.name.clone(), }, css_file_extension: css_file_extension.clone(), created_dirs: RefCell::new(FxHashSet()), sort_modules_alphabetically, themes, resource_suffix, }; // If user passed in `--playground-url` arg, we fill in crate name here if let Some(url) = playground_url { markdown::PLAYGROUND.with(|slot| { *slot.borrow_mut() = Some((Some(krate.name.clone()), url)); }); } // Crawl the crate attributes looking for attributes which control how we're // going to emit HTML if let Some(attrs) = krate.module.as_ref().map(|m| &m.attrs) { for attr in attrs.lists("doc") { let name = attr.name().map(|s| s.as_str()); match (name.as_ref().map(|s| &s[..]), attr.value_str()) { (Some("html_favicon_url"), Some(s)) => { scx.layout.favicon = s.to_string(); } (Some("html_logo_url"), Some(s)) => { scx.layout.logo = s.to_string(); } (Some("html_playground_url"), Some(s)) => { markdown::PLAYGROUND.with(|slot| { let name = krate.name.clone(); *slot.borrow_mut() = Some((Some(name), s.to_string())); }); } (Some("issue_tracker_base_url"), Some(s)) => { scx.issue_tracker_base_url = Some(s.to_string()); } (Some("html_no_source"), None) if attr.is_word() => { scx.include_sources = false; } _ => {} } } } try_err!(fs::create_dir_all(&dst), &dst); krate = render_sources(&dst, &mut scx, krate)?; let cx = Context { current: Vec::new(), dst, render_redirect_pages: false, shared: Arc::new(scx), }; // Crawl the crate to build various caches used for the output let RenderInfo { inlined: _, external_paths, external_typarams, exact_paths, deref_trait_did, deref_mut_trait_did, owned_box_did, } = renderinfo; let external_paths = external_paths.into_iter() .map(|(k, (v, t))| (k, (v, ItemType::from(t)))) .collect(); let mut cache = Cache { impls: FxHashMap(), external_paths, exact_paths, paths: FxHashMap(), implementors: FxHashMap(), stack: Vec::new(), parent_stack: Vec::new(), search_index: Vec::new(), parent_is_trait_impl: false, extern_locations: FxHashMap(), primitive_locations: FxHashMap(), stripped_mod: false, access_levels: krate.access_levels.clone(), crate_version: krate.version.take(), orphan_impl_items: Vec::new(), traits: mem::replace(&mut krate.external_traits, FxHashMap()), deref_trait_did, deref_mut_trait_did, owned_box_did, masked_crates: mem::replace(&mut krate.masked_crates, FxHashSet()), typarams: external_typarams, aliases: FxHashMap(), }; // Cache where all our extern crates are located for &(n, ref e) in &krate.externs { let src_root = match e.src { FileName::Real(ref p) => match p.parent() { Some(p) => p.to_path_buf(), None => PathBuf::new(), }, _ => PathBuf::new(), }; cache.extern_locations.insert(n, (e.name.clone(), src_root, extern_location(e, &cx.dst))); let did = DefId { krate: n, index: CRATE_DEF_INDEX }; cache.external_paths.insert(did, (vec![e.name.to_string()], ItemType::Module)); } // Cache where all known primitives have their documentation located. // // Favor linking to as local extern as possible, so iterate all crates in // reverse topological order. for &(_, ref e) in krate.externs.iter().rev() { for &(def_id, prim, _) in &e.primitives { cache.primitive_locations.insert(prim, def_id); } } for &(def_id, prim, _) in &krate.primitives { cache.primitive_locations.insert(prim, def_id); } cache.stack.push(krate.name.clone()); krate = cache.fold_crate(krate); // Build our search index let index = build_index(&krate, &mut cache); // Freeze the cache now that the index has been built. Put an Arc into TLS // for future parallelization opportunities let cache = Arc::new(cache); CACHE_KEY.with(|v| *v.borrow_mut() = cache.clone()); CURRENT_LOCATION_KEY.with(|s| s.borrow_mut().clear()); write_shared(&cx, &krate, &*cache, index, enable_minification)?; // And finally render the whole crate's documentation cx.krate(krate) } /// Build the search index from the collected metadata fn build_index(krate: &clean::Crate, cache: &mut Cache) -> String { let mut nodeid_to_pathid = FxHashMap(); let mut crate_items = Vec::with_capacity(cache.search_index.len()); let mut crate_paths = Vec::<Json>::new(); let Cache { ref mut search_index, ref orphan_impl_items, ref mut paths, .. } = *cache; // Attach all orphan items to the type's definition if the type // has since been learned. for &(did, ref item) in orphan_impl_items { if let Some(&(ref fqp, _)) = paths.get(&did) { search_index.push(IndexItem { ty: item.type_(), name: item.name.clone().unwrap(), path: fqp[..fqp.len() - 1].join("::"), desc: plain_summary_line(item.doc_value()), parent: Some(did), parent_idx: None, search_type: get_index_search_type(&item), }); } } // Reduce `NodeId` in paths into smaller sequential numbers, // and prune the paths that do not appear in the index. let mut lastpath = String::new(); let mut lastpathid = 0usize; for item in search_index { item.parent_idx = item.parent.map(|nodeid| { if nodeid_to_pathid.contains_key(&nodeid) { *nodeid_to_pathid.get(&nodeid).unwrap() } else { let pathid = lastpathid; nodeid_to_pathid.insert(nodeid, pathid); lastpathid += 1; let &(ref fqp, short) = paths.get(&nodeid).unwrap(); crate_paths.push(((short as usize), fqp.last().unwrap().clone()).to_json()); pathid } }); // Omit the parent path if it is same to that of the prior item. if lastpath == item.path { item.path.clear(); } else { lastpath = item.path.clone(); } crate_items.push(item.to_json()); } let crate_doc = krate.module.as_ref().map(|module| { plain_summary_line(module.doc_value()) }).unwrap_or(String::new()); let mut crate_data = BTreeMap::new(); crate_data.insert("doc".to_owned(), Json::String(crate_doc)); crate_data.insert("items".to_owned(), Json::Array(crate_items)); crate_data.insert("paths".to_owned(), Json::Array(crate_paths)); // Collect the index into a string format!("searchIndex[{}] = {};", as_json(&krate.name), Json::Object(crate_data)) } fn write_shared(cx: &Context, krate: &clean::Crate, cache: &Cache, search_index: String, enable_minification: bool) -> Result<(), Error> { // Write out the shared files. Note that these are shared among all rustdoc // docs placed in the output directory, so this needs to be a synchronized // operation with respect to all other rustdocs running around. let _lock = flock::Lock::panicking_new(&cx.dst.join(".lock"), true, true, true); // Add all the static files. These may already exist, but we just // overwrite them anyway to make sure that they're fresh and up-to-date. write_minify(cx.dst.join(&format!("rustdoc{}.css", cx.shared.resource_suffix)), include_str!("static/rustdoc.css"), enable_minification)?; write_minify(cx.dst.join(&format!("settings{}.css", cx.shared.resource_suffix)), include_str!("static/settings.css"), enable_minification)?; // To avoid "light.css" to be overwritten, we'll first run over the received themes and only // then we'll run over the "official" styles. let mut themes: HashSet<String> = HashSet::new(); for entry in &cx.shared.themes { let mut content = Vec::with_capacity(100000); let mut f = try_err!(File::open(&entry), &entry); try_err!(f.read_to_end(&mut content), &entry); let theme = try_none!(try_none!(entry.file_stem(), &entry).to_str(), &entry); let extension = try_none!(try_none!(entry.extension(), &entry).to_str(), &entry); write(cx.dst.join(format!("{}{}.{}", theme, cx.shared.resource_suffix, extension)), content.as_slice())?; themes.insert(theme.to_owned()); } write(cx.dst.join(&format!("brush{}.svg", cx.shared.resource_suffix)), include_bytes!("static/brush.svg"))?; write(cx.dst.join(&format!("wheel{}.svg", cx.shared.resource_suffix)), include_bytes!("static/wheel.svg"))?; write_minify(cx.dst.join(&format!("light{}.css", cx.shared.resource_suffix)), include_str!("static/themes/light.css"), enable_minification)?; themes.insert("light".to_owned()); write_minify(cx.dst.join(&format!("dark{}.css", cx.shared.resource_suffix)), include_str!("static/themes/dark.css"), enable_minification)?; themes.insert("dark".to_owned()); let mut themes: Vec<&String> = themes.iter().collect(); themes.sort(); // To avoid theme switch latencies as much as possible, we put everything theme related // at the beginning of the html files into another js file. write(cx.dst.join(&format!("theme{}.js", cx.shared.resource_suffix)), format!( r#"var themes = document.getElementById("theme-choices"); var themePicker = document.getElementById("theme-picker"); function switchThemeButtonState() {{ if (themes.style.display === "block") {{ themes.style.display = "none"; themePicker.style.borderBottomRightRadius = "3px"; themePicker.style.borderBottomLeftRadius = "3px"; }} else {{ themes.style.display = "block"; themePicker.style.borderBottomRightRadius = "0"; themePicker.style.borderBottomLeftRadius = "0"; }} }}; function handleThemeButtonsBlur(e) {{ var active = document.activeElement; var related = e.relatedTarget; if (active.id !== "themePicker" && (!active.parentNode || active.parentNode.id !== "theme-choices") && (!related || (related.id !== "themePicker" && (!related.parentNode || related.parentNode.id !== "theme-choices")))) {{ switchThemeButtonState(); }} }} themePicker.onclick = switchThemeButtonState; themePicker.onblur = handleThemeButtonsBlur; [{}].forEach(function(item) {{ var but = document.createElement('button'); but.innerHTML = item; but.onclick = function(el) {{ switchTheme(currentTheme, mainTheme, item); }}; but.onblur = handleThemeButtonsBlur; themes.appendChild(but); }});"#, themes.iter() .map(|s| format!("\"{}\"", s)) .collect::<Vec<String>>() .join(",")).as_bytes(), )?; write_minify(cx.dst.join(&format!("main{}.js", cx.shared.resource_suffix)), include_str!("static/main.js"), enable_minification)?; write_minify(cx.dst.join(&format!("settings{}.js", cx.shared.resource_suffix)), include_str!("static/settings.js"), enable_minification)?; { let mut data = format!("var resourcesSuffix = \"{}\";\n", cx.shared.resource_suffix); data.push_str(include_str!("static/storage.js")); write_minify(cx.dst.join(&format!("storage{}.js", cx.shared.resource_suffix)), &data, enable_minification)?; } if let Some(ref css) = cx.shared.css_file_extension { let out = cx.dst.join(&format!("theme{}.css", cx.shared.resource_suffix)); if !enable_minification { try_err!(fs::copy(css, out), css); } else { let mut f = try_err!(File::open(css), css); let mut buffer = String::with_capacity(1000); try_err!(f.read_to_string(&mut buffer), css); write_minify(out, &buffer, enable_minification)?; } } write_minify(cx.dst.join(&format!("normalize{}.css", cx.shared.resource_suffix)), include_str!("static/normalize.css"), enable_minification)?; write(cx.dst.join("FiraSans-Regular.woff"), include_bytes!("static/FiraSans-Regular.woff"))?; write(cx.dst.join("FiraSans-Medium.woff"), include_bytes!("static/FiraSans-Medium.woff"))?; write(cx.dst.join("FiraSans-LICENSE.txt"), include_bytes!("static/FiraSans-LICENSE.txt"))?; write(cx.dst.join("Heuristica-Italic.woff"), include_bytes!("static/Heuristica-Italic.woff"))?; write(cx.dst.join("Heuristica-LICENSE.txt"), include_bytes!("static/Heuristica-LICENSE.txt"))?; write(cx.dst.join("SourceSerifPro-Regular.woff"), include_bytes!("static/SourceSerifPro-Regular.woff"))?; write(cx.dst.join("SourceSerifPro-Bold.woff"), include_bytes!("static/SourceSerifPro-Bold.woff"))?; write(cx.dst.join("SourceSerifPro-LICENSE.txt"), include_bytes!("static/SourceSerifPro-LICENSE.txt"))?; write(cx.dst.join("SourceCodePro-Regular.woff"), include_bytes!("static/SourceCodePro-Regular.woff"))?; write(cx.dst.join("SourceCodePro-Semibold.woff"), include_bytes!("static/SourceCodePro-Semibold.woff"))?; write(cx.dst.join("SourceCodePro-LICENSE.txt"), include_bytes!("static/SourceCodePro-LICENSE.txt"))?; write(cx.dst.join("LICENSE-MIT.txt"), include_bytes!("static/LICENSE-MIT.txt"))?; write(cx.dst.join("LICENSE-APACHE.txt"), include_bytes!("static/LICENSE-APACHE.txt"))?; write(cx.dst.join("COPYRIGHT.txt"), include_bytes!("static/COPYRIGHT.txt"))?; fn collect(path: &Path, krate: &str, key: &str) -> io::Result<Vec<String>> { let mut ret = Vec::new(); if path.exists() { for line in BufReader::new(File::open(path)?).lines() { let line = line?; if !line.starts_with(key) { continue; } if line.starts_with(&format!(r#"{}["{}"]"#, key, krate)) { continue; } ret.push(line.to_string()); } } Ok(ret) } fn show_item(item: &IndexItem, krate: &str) -> String { format!("{{'crate':'{}','ty':{},'name':'{}','desc':'{}','p':'{}'{}}}", krate, item.ty as usize, item.name, item.desc.replace("'", "\\'"), item.path, if let Some(p) = item.parent_idx { format!(",'parent':{}", p) } else { String::new() }) } let dst = cx.dst.join("aliases.js"); { let mut all_aliases = try_err!(collect(&dst, &krate.name, "ALIASES"), &dst); let mut w = try_err!(File::create(&dst), &dst); let mut output = String::with_capacity(100); for (alias, items) in &cache.aliases { if items.is_empty() { continue } output.push_str(&format!("\"{}\":[{}],", alias, items.iter() .map(|v| show_item(v, &krate.name)) .collect::<Vec<_>>() .join(","))); } all_aliases.push(format!("ALIASES['{}'] = {{{}}};", krate.name, output)); all_aliases.sort(); try_err!(writeln!(&mut w, "var ALIASES = {{}};"), &dst); for aliases in &all_aliases { try_err!(writeln!(&mut w, "{}", aliases), &dst); } } // Update the search index let dst = cx.dst.join("search-index.js"); let mut all_indexes = try_err!(collect(&dst, &krate.name, "searchIndex"), &dst); all_indexes.push(search_index); // Sort the indexes by crate so the file will be generated identically even // with rustdoc running in parallel. all_indexes.sort(); let mut w = try_err!(File::create(&dst), &dst); try_err!(writeln!(&mut w, "var searchIndex = {{}};"), &dst); for index in &all_indexes { try_err!(writeln!(&mut w, "{}", *index), &dst); } try_err!(writeln!(&mut w, "initSearch(searchIndex);"), &dst); // Update the list of all implementors for traits let dst = cx.dst.join("implementors"); for (&did, imps) in &cache.implementors { // Private modules can leak through to this phase of rustdoc, which // could contain implementations for otherwise private types. In some // rare cases we could find an implementation for an item which wasn't // indexed, so we just skip this step in that case. // // FIXME: this is a vague explanation for why this can't be a `get`, in // theory it should be... let &(ref remote_path, remote_item_type) = match cache.paths.get(&did) { Some(p) => p, None => match cache.external_paths.get(&did) { Some(p) => p, None => continue, } }; let mut have_impls = false; let mut implementors = format!(r#"implementors["{}"] = ["#, krate.name); for imp in imps { // If the trait and implementation are in the same crate, then // there's no need to emit information about it (there's inlining // going on). If they're in different crates then the crate defining // the trait will be interested in our implementation. if imp.impl_item.def_id.krate == did.krate { continue } // If the implementation is from another crate then that crate // should add it. if !imp.impl_item.def_id.is_local() { continue } have_impls = true; write!(implementors, "{{text:{},synthetic:{},types:{}}},", as_json(&imp.inner_impl().to_string()), imp.inner_impl().synthetic, as_json(&collect_paths_for_type(imp.inner_impl().for_.clone()))).unwrap(); } implementors.push_str("];"); // Only create a js file if we have impls to add to it. If the trait is // documented locally though we always create the file to avoid dead // links. if !have_impls && !cache.paths.contains_key(&did) { continue; } let mut mydst = dst.clone(); for part in &remote_path[..remote_path.len() - 1] { mydst.push(part); } try_err!(fs::create_dir_all(&mydst), &mydst); mydst.push(&format!("{}.{}.js", remote_item_type.css_class(), remote_path[remote_path.len() - 1])); let mut all_implementors = try_err!(collect(&mydst, &krate.name, "implementors"), &mydst); all_implementors.push(implementors); // Sort the implementors by crate so the file will be generated // identically even with rustdoc running in parallel. all_implementors.sort(); let mut f = try_err!(File::create(&mydst), &mydst); try_err!(writeln!(&mut f, "(function() {{var implementors = {{}};"), &mydst); for implementor in &all_implementors { try_err!(writeln!(&mut f, "{}", *implementor), &mydst); } try_err!(writeln!(&mut f, "{}", r" if (window.register_implementors) { window.register_implementors(implementors); } else { window.pending_implementors = implementors; } "), &mydst); try_err!(writeln!(&mut f, r"}})()"), &mydst); } Ok(()) } fn render_sources(dst: &Path, scx: &mut SharedContext, krate: clean::Crate) -> Result<clean::Crate, Error> { info!("emitting source files"); let dst = dst.join("src").join(&krate.name); try_err!(fs::create_dir_all(&dst), &dst); let mut folder = SourceCollector { dst, scx, }; Ok(folder.fold_crate(krate)) } /// Writes the entire contents of a string to a destination, not attempting to /// catch any errors. fn write(dst: PathBuf, contents: &[u8]) -> Result<(), Error> { Ok(try_err!(fs::write(&dst, contents), &dst)) } fn write_minify(dst: PathBuf, contents: &str, enable_minification: bool) -> Result<(), Error> { if enable_minification { if dst.extension() == Some(&OsStr::new("css")) { let res = try_none!(minifier::css::minify(contents).ok(), &dst); write(dst, res.as_bytes()) } else { write(dst, minifier::js::minify(contents).as_bytes()) } } else { write(dst, contents.as_bytes()) } } /// Takes a path to a source file and cleans the path to it. This canonicalizes /// things like ".." to components which preserve the "top down" hierarchy of a /// static HTML tree. Each component in the cleaned path will be passed as an /// argument to `f`. The very last component of the path (ie the file name) will /// be passed to `f` if `keep_filename` is true, and ignored otherwise. // FIXME (#9639): The closure should deal with &[u8] instead of &str // FIXME (#9639): This is too conservative, rejecting non-UTF-8 paths fn clean_srcpath<F>(src_root: &Path, p: &Path, keep_filename: bool, mut f: F) where F: FnMut(&str), { // make it relative, if possible let p = p.strip_prefix(src_root).unwrap_or(p); let mut iter = p.components().peekable(); while let Some(c) = iter.next() { if !keep_filename && iter.peek().is_none() { break; } match c { Component::ParentDir => f("up"), Component::Normal(c) => f(c.to_str().unwrap()), _ => continue, } } } /// Attempts to find where an external crate is located, given that we're /// rendering in to the specified source destination. fn extern_location(e: &clean::ExternalCrate, dst: &Path) -> ExternalLocation { // See if there's documentation generated into the local directory let local_location = dst.join(&e.name); if local_location.is_dir() { return Local; } // Failing that, see if there's an attribute specifying where to find this // external crate e.attrs.lists("doc") .filter(|a| a.check_name("html_root_url")) .filter_map(|a| a.value_str()) .map(|url| { let mut url = url.to_string(); if !url.ends_with("/") { url.push('/') } Remote(url) }).next().unwrap_or(Unknown) // Well, at least we tried. } impl<'a> DocFolder for SourceCollector<'a> { fn fold_item(&mut self, item: clean::Item) -> Option<clean::Item> { // If we're including source files, and we haven't seen this file yet, // then we need to render it out to the filesystem. if self.scx.include_sources // skip all invalid or macro spans && item.source.filename.is_real() // skip non-local items && item.def_id.is_local() { // If it turns out that we couldn't read this file, then we probably // can't read any of the files (generating html output from json or // something like that), so just don't include sources for the // entire crate. The other option is maintaining this mapping on a // per-file basis, but that's probably not worth it... self.scx .include_sources = match self.emit_source(&item.source.filename) { Ok(()) => true, Err(e) => { println!("warning: source code was requested to be rendered, \ but processing `{}` had an error: {}", item.source.filename, e); println!(" skipping rendering of source code"); false } }; } self.fold_item_recur(item) } } impl<'a> SourceCollector<'a> { /// Renders the given filename into its corresponding HTML source file. fn emit_source(&mut self, filename: &FileName) -> io::Result<()> { let p = match *filename { FileName::Real(ref file) => file, _ => return Ok(()), }; if self.scx.local_sources.contains_key(&**p) { // We've already emitted this source return Ok(()); } let contents = fs::read_to_string(&p)?; // Remove the utf-8 BOM if any let contents = if contents.starts_with("\u{feff}") { &contents[3..] } else { &contents[..] }; // Create the intermediate directories let mut cur = self.dst.clone(); let mut root_path = String::from("../../"); let mut href = String::new(); clean_srcpath(&self.scx.src_root, &p, false, |component| { cur.push(component); fs::create_dir_all(&cur).unwrap(); root_path.push_str("../"); href.push_str(component); href.push('/'); }); let mut fname = p.file_name() .expect("source has no filename") .to_os_string(); fname.push(".html"); cur.push(&fname); href.push_str(&fname.to_string_lossy()); let mut w = BufWriter::new(File::create(&cur)?); let title = format!("{} -- source", cur.file_name().unwrap() .to_string_lossy()); let desc = format!("Source to the Rust file `{}`.", filename); let page = layout::Page { title: &title, css_class: "source", root_path: &root_path, description: &desc, keywords: BASIC_KEYWORDS, resource_suffix: &self.scx.resource_suffix, }; layout::render(&mut w, &self.scx.layout, &page, &(""), &Source(contents), self.scx.css_file_extension.is_some(), &self.scx.themes)?; w.flush()?; self.scx.local_sources.insert(p.clone(), href); Ok(()) } } impl DocFolder for Cache { fn fold_item(&mut self, item: clean::Item) -> Option<clean::Item> { // If this is a stripped module, // we don't want it or its children in the search index. let orig_stripped_mod = match item.inner { clean::StrippedItem(box clean::ModuleItem(..)) => { mem::replace(&mut self.stripped_mod, true) } _ => self.stripped_mod, }; // If the impl is from a masked crate or references something from a // masked crate then remove it completely. if let clean::ImplItem(ref i) = item.inner { if self.masked_crates.contains(&item.def_id.krate) || i.trait_.def_id().map_or(false, |d| self.masked_crates.contains(&d.krate)) || i.for_.def_id().map_or(false, |d| self.masked_crates.contains(&d.krate)) { return None; } } // Register any generics to their corresponding string. This is used // when pretty-printing types. if let Some(generics) = item.inner.generics() { self.generics(generics); } // Propagate a trait method's documentation to all implementors of the // trait. if let clean::TraitItem(ref t) = item.inner { self.traits.entry(item.def_id).or_insert_with(|| t.clone()); } // Collect all the implementors of traits. if let clean::ImplItem(ref i) = item.inner { if let Some(did) = i.trait_.def_id() { self.implementors.entry(did).or_insert(vec![]).push(Impl { impl_item: item.clone(), }); } } // Index this method for searching later on. if let Some(ref s) = item.name { let (parent, is_inherent_impl_item) = match item.inner { clean::StrippedItem(..) => ((None, None), false), clean::AssociatedConstItem(..) | clean::TypedefItem(_, true) if self.parent_is_trait_impl => { // skip associated items in trait impls ((None, None), false) } clean::AssociatedTypeItem(..) | clean::TyMethodItem(..) | clean::StructFieldItem(..) | clean::VariantItem(..) => { ((Some(*self.parent_stack.last().unwrap()), Some(&self.stack[..self.stack.len() - 1])), false) } clean::MethodItem(..) | clean::AssociatedConstItem(..) => { if self.parent_stack.is_empty() { ((None, None), false) } else { let last = self.parent_stack.last().unwrap(); let did = *last; let path = match self.paths.get(&did) { // The current stack not necessarily has correlation // for where the type was defined. On the other // hand, `paths` always has the right // information if present. Some(&(ref fqp, ItemType::Trait)) | Some(&(ref fqp, ItemType::Struct)) | Some(&(ref fqp, ItemType::Union)) | Some(&(ref fqp, ItemType::Enum)) => Some(&fqp[..fqp.len() - 1]), Some(..) => Some(&*self.stack), None => None }; ((Some(*last), path), true) } } _ => ((None, Some(&*self.stack)), false) }; match parent { (parent, Some(path)) if is_inherent_impl_item || (!self.stripped_mod) => { debug_assert!(!item.is_stripped()); // A crate has a module at its root, containing all items, // which should not be indexed. The crate-item itself is // inserted later on when serializing the search-index. if item.def_id.index != CRATE_DEF_INDEX { self.search_index.push(IndexItem { ty: item.type_(), name: s.to_string(), path: path.join("::").to_string(), desc: plain_summary_line(item.doc_value()), parent, parent_idx: None, search_type: get_index_search_type(&item), }); } } (Some(parent), None) if is_inherent_impl_item => { // We have a parent, but we don't know where they're // defined yet. Wait for later to index this item. self.orphan_impl_items.push((parent, item.clone())); } _ => {} } } // Keep track of the fully qualified path for this item. let pushed = match item.name { Some(ref n) if !n.is_empty() => { self.stack.push(n.to_string()); true } _ => false, }; match item.inner { clean::StructItem(..) | clean::EnumItem(..) | clean::TypedefItem(..) | clean::TraitItem(..) | clean::FunctionItem(..) | clean::ModuleItem(..) | clean::ForeignFunctionItem(..) | clean::ForeignStaticItem(..) | clean::ConstantItem(..) | clean::StaticItem(..) | clean::UnionItem(..) | clean::ForeignTypeItem | clean::MacroItem(..) if !self.stripped_mod => { // Re-exported items mean that the same id can show up twice // in the rustdoc ast that we're looking at. We know, // however, that a re-exported item doesn't show up in the // `public_items` map, so we can skip inserting into the // paths map if there was already an entry present and we're // not a public item. if !self.paths.contains_key(&item.def_id) || self.access_levels.is_public(item.def_id) { self.paths.insert(item.def_id, (self.stack.clone(), item.type_())); } self.add_aliases(&item); } // Link variants to their parent enum because pages aren't emitted // for each variant. clean::VariantItem(..) if !self.stripped_mod => { let mut stack = self.stack.clone(); stack.pop(); self.paths.insert(item.def_id, (stack, ItemType::Enum)); } clean::PrimitiveItem(..) if item.visibility.is_some() => { self.add_aliases(&item); self.paths.insert(item.def_id, (self.stack.clone(), item.type_())); } _ => {} } // Maintain the parent stack let orig_parent_is_trait_impl = self.parent_is_trait_impl; let parent_pushed = match item.inner { clean::TraitItem(..) | clean::EnumItem(..) | clean::ForeignTypeItem | clean::StructItem(..) | clean::UnionItem(..) => { self.parent_stack.push(item.def_id); self.parent_is_trait_impl = false; true } clean::ImplItem(ref i) => { self.parent_is_trait_impl = i.trait_.is_some(); match i.for_ { clean::ResolvedPath{ did, .. } => { self.parent_stack.push(did); true } ref t => { let prim_did = t.primitive_type().and_then(|t| { self.primitive_locations.get(&t).cloned() }); match prim_did { Some(did) => { self.parent_stack.push(did); true } None => false, } } } } _ => false }; // Once we've recursively found all the generics, hoard off all the // implementations elsewhere. let ret = self.fold_item_recur(item).and_then(|item| { if let clean::Item { inner: clean::ImplItem(_), .. } = item { // Figure out the id of this impl. This may map to a // primitive rather than always to a struct/enum. // Note: matching twice to restrict the lifetime of the `i` borrow. let mut dids = FxHashSet(); if let clean::Item { inner: clean::ImplItem(ref i), .. } = item { match i.for_ { clean::ResolvedPath { did, .. } | clean::BorrowedRef { type_: box clean::ResolvedPath { did, .. }, .. } => { dids.insert(did); } ref t => { let did = t.primitive_type().and_then(|t| { self.primitive_locations.get(&t).cloned() }); if let Some(did) = did { dids.insert(did); } } } if let Some(generics) = i.trait_.as_ref().and_then(|t| t.generics()) { for bound in generics { if let Some(did) = bound.def_id() { dids.insert(did); } } } } else { unreachable!() }; for did in dids { self.impls.entry(did).or_insert(vec![]).push(Impl { impl_item: item.clone(), }); } None } else { Some(item) } }); if pushed { self.stack.pop().unwrap(); } if parent_pushed { self.parent_stack.pop().unwrap(); } self.stripped_mod = orig_stripped_mod; self.parent_is_trait_impl = orig_parent_is_trait_impl; ret } } impl<'a> Cache { fn generics(&mut self, generics: &clean::Generics) { for param in &generics.params { match param.kind { clean::GenericParamDefKind::Lifetime => {} clean::GenericParamDefKind::Type { did, .. } => { self.typarams.insert(did, param.name.clone()); } } } } fn add_aliases(&mut self, item: &clean::Item) { if item.def_id.index == CRATE_DEF_INDEX { return } if let Some(ref item_name) = item.name { let path = self.paths.get(&item.def_id) .map(|p| p.0[..p.0.len() - 1].join("::")) .unwrap_or("std".to_owned()); for alias in item.attrs.lists("doc") .filter(|a| a.check_name("alias")) .filter_map(|a| a.value_str() .map(|s| s.to_string().replace("\"", ""))) .filter(|v| !v.is_empty()) .collect::<FxHashSet<_>>() .into_iter() { self.aliases.entry(alias) .or_insert(Vec::with_capacity(1)) .push(IndexItem { ty: item.type_(), name: item_name.to_string(), path: path.clone(), desc: plain_summary_line(item.doc_value()), parent: None, parent_idx: None, search_type: get_index_search_type(&item), }); } } } } #[derive(Debug, Eq, PartialEq, Hash)] struct ItemEntry { url: String, name: String, } impl ItemEntry { fn new(mut url: String, name: String) -> ItemEntry { while url.starts_with('/') { url.remove(0); } ItemEntry { url, name, } } } impl fmt::Display for ItemEntry { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "<a href='{}'>{}</a>", self.url, Escape(&self.name)) } } impl PartialOrd for ItemEntry { fn partial_cmp(&self, other: &ItemEntry) -> Option<::std::cmp::Ordering> { Some(self.cmp(other)) } } impl Ord for ItemEntry { fn cmp(&self, other: &ItemEntry) -> ::std::cmp::Ordering { self.name.cmp(&other.name) } } #[derive(Debug)] struct AllTypes { structs: HashSet<ItemEntry>, enums: HashSet<ItemEntry>, unions: HashSet<ItemEntry>, primitives: HashSet<ItemEntry>, traits: HashSet<ItemEntry>, macros: HashSet<ItemEntry>, functions: HashSet<ItemEntry>, typedefs: HashSet<ItemEntry>, statics: HashSet<ItemEntry>, constants: HashSet<ItemEntry>, keywords: HashSet<ItemEntry>, } impl AllTypes { fn new() -> AllTypes { AllTypes { structs: HashSet::with_capacity(100), enums: HashSet::with_capacity(100), unions: HashSet::with_capacity(100), primitives: HashSet::with_capacity(26), traits: HashSet::with_capacity(100), macros: HashSet::with_capacity(100), functions: HashSet::with_capacity(100), typedefs: HashSet::with_capacity(100), statics: HashSet::with_capacity(100), constants: HashSet::with_capacity(100), keywords: HashSet::with_capacity(100), } } fn append(&mut self, item_name: String, item_type: &ItemType) { let mut url: Vec<_> = item_name.split("::").skip(1).collect(); if let Some(name) = url.pop() { let new_url = format!("{}/{}.{}.html", url.join("/"), item_type, name); url.push(name); let name = url.join("::"); match *item_type { ItemType::Struct => self.structs.insert(ItemEntry::new(new_url, name)), ItemType::Enum => self.enums.insert(ItemEntry::new(new_url, name)), ItemType::Union => self.unions.insert(ItemEntry::new(new_url, name)), ItemType::Primitive => self.primitives.insert(ItemEntry::new(new_url, name)), ItemType::Trait => self.traits.insert(ItemEntry::new(new_url, name)), ItemType::Macro => self.macros.insert(ItemEntry::new(new_url, name)), ItemType::Function => self.functions.insert(ItemEntry::new(new_url, name)), ItemType::Typedef => self.typedefs.insert(ItemEntry::new(new_url, name)), ItemType::Static => self.statics.insert(ItemEntry::new(new_url, name)), ItemType::Constant => self.constants.insert(ItemEntry::new(new_url, name)), _ => true, }; } } } fn print_entries(f: &mut fmt::Formatter, e: &HashSet<ItemEntry>, title: &str, class: &str) -> fmt::Result { if !e.is_empty() { let mut e: Vec<&ItemEntry> = e.iter().collect(); e.sort(); write!(f, "<h3 id='{}'>{}</h3><ul class='{} docblock'>{}</ul>", title, Escape(title), class, e.iter().map(|s| format!("<li>{}</li>", s)).collect::<String>())?; } Ok(()) } impl fmt::Display for AllTypes { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "<h1 class='fqn'>\ <span class='in-band'>List of all items</span>\ <span class='out-of-band'>\ <span id='render-detail'>\ <a id=\"toggle-all-docs\" href=\"javascript:void(0)\" title=\"collapse all docs\">\ [<span class='inner'>&#x2212;</span>]\ </a>\ </span> </span> </h1>")?; print_entries(f, &self.structs, "Structs", "structs")?; print_entries(f, &self.enums, "Enums", "enums")?; print_entries(f, &self.unions, "Unions", "unions")?; print_entries(f, &self.primitives, "Primitives", "primitives")?; print_entries(f, &self.traits, "Traits", "traits")?; print_entries(f, &self.macros, "Macros", "macros")?; print_entries(f, &self.functions, "Functions", "functions")?; print_entries(f, &self.typedefs, "Typedefs", "typedefs")?; print_entries(f, &self.statics, "Statics", "statics")?; print_entries(f, &self.constants, "Constants", "constants") } } #[derive(Debug)] struct Settings<'a> { // (id, explanation, default value) settings: Vec<(&'static str, &'static str, bool)>, root_path: &'a str, suffix: &'a str, } impl<'a> Settings<'a> { pub fn new(root_path: &'a str, suffix: &'a str) -> Settings<'a> { Settings { settings: vec![ ("item-declarations", "Auto-hide item declarations.", true), ("item-attributes", "Auto-hide item attributes.", true), ("go-to-only-result", "Directly go to item in search if there is only one result", false), ], root_path, suffix, } } } impl<'a> fmt::Display for Settings<'a> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "<h1 class='fqn'>\ <span class='in-band'>Rustdoc settings</span>\ </h1>\ <div class='settings'>{}</div>\ <script src='{}settings{}.js'></script>", self.settings.iter() .map(|(id, text, enabled)| { format!("<div class='setting-line'>\ <label class='toggle'>\ <input type='checkbox' id='{}' {}>\ <span class='slider'></span>\ </label>\ <div>{}</div>\ </div>", id, if *enabled { " checked" } else { "" }, text) }) .collect::<String>(), self.root_path, self.suffix) } } impl Context { /// String representation of how to get back to the root path of the 'doc/' /// folder in terms of a relative URL. fn root_path(&self) -> String { repeat("../").take(self.current.len()).collect::<String>() } /// Recurse in the directory structure and change the "root path" to make /// sure it always points to the top (relatively). fn recurse<T, F>(&mut self, s: String, f: F) -> T where F: FnOnce(&mut Context) -> T, { if s.is_empty() { panic!("Unexpected empty destination: {:?}", self.current); } let prev = self.dst.clone(); self.dst.push(&s); self.current.push(s); info!("Recursing into {}", self.dst.display()); let ret = f(self); info!("Recursed; leaving {}", self.dst.display()); // Go back to where we were at self.dst = prev; self.current.pop().unwrap(); ret } /// Main method for rendering a crate. /// /// This currently isn't parallelized, but it'd be pretty easy to add /// parallelization to this function. fn krate(self, mut krate: clean::Crate) -> Result<(), Error> { let mut item = match krate.module.take() { Some(i) => i, None => return Ok(()), }; let final_file = self.dst.join(&krate.name) .join("all.html"); let settings_file = self.dst.join("settings.html"); let crate_name = krate.name.clone(); item.name = Some(krate.name); let mut all = AllTypes::new(); { // Render the crate documentation let mut work = vec![(self.clone(), item)]; while let Some((mut cx, item)) = work.pop() { cx.item(item, &mut all, |cx, item| { work.push((cx.clone(), item)) })? } } let mut w = BufWriter::new(try_err!(File::create(&final_file), &final_file)); let mut root_path = self.dst.to_str().expect("invalid path").to_owned(); if !root_path.ends_with('/') { root_path.push('/'); } let mut page = layout::Page { title: "List of all items in this crate", css_class: "mod", root_path: "../", description: "List of all items in this crate", keywords: BASIC_KEYWORDS, resource_suffix: &self.shared.resource_suffix, }; let sidebar = if let Some(ref version) = cache().crate_version { format!("<p class='location'>Crate {}</p>\ <div class='block version'>\ <p>Version {}</p>\ </div>\ <a id='all-types' href='index.html'><p>Back to index</p></a>", crate_name, version) } else { String::new() }; try_err!(layout::render(&mut w, &self.shared.layout, &page, &sidebar, &all, self.shared.css_file_extension.is_some(), &self.shared.themes), &final_file); // Generating settings page. let settings = Settings::new("./", &self.shared.resource_suffix); page.title = "Rustdoc settings"; page.description = "Settings of Rustdoc"; page.root_path = "./"; let mut w = BufWriter::new(try_err!(File::create(&settings_file), &settings_file)); let mut themes = self.shared.themes.clone(); let sidebar = "<p class='location'>Settings</p><div class='sidebar-elems'></div>"; themes.push(PathBuf::from("settings.css")); let mut layout = self.shared.layout.clone(); layout.krate = String::new(); layout.logo = String::new(); layout.favicon = String::new(); try_err!(layout::render(&mut w, &layout, &page, &sidebar, &settings, self.shared.css_file_extension.is_some(), &themes), &settings_file); Ok(()) } fn render_item(&self, writer: &mut io::Write, it: &clean::Item, pushname: bool) -> io::Result<()> { // A little unfortunate that this is done like this, but it sure // does make formatting *a lot* nicer. CURRENT_LOCATION_KEY.with(|slot| { *slot.borrow_mut() = self.current.clone(); }); let mut title = if it.is_primitive() { // No need to include the namespace for primitive types String::new() } else { self.current.join("::") }; if pushname { if !title.is_empty() { title.push_str("::"); } title.push_str(it.name.as_ref().unwrap()); } title.push_str(" - Rust"); let tyname = it.type_().css_class(); let desc = if it.is_crate() { format!("API documentation for the Rust `{}` crate.", self.shared.layout.krate) } else { format!("API documentation for the Rust `{}` {} in crate `{}`.", it.name.as_ref().unwrap(), tyname, self.shared.layout.krate) }; let keywords = make_item_keywords(it); let page = layout::Page { css_class: tyname, root_path: &self.root_path(), title: &title, description: &desc, keywords: &keywords, resource_suffix: &self.shared.resource_suffix, }; reset_ids(true); if !self.render_redirect_pages { layout::render(writer, &self.shared.layout, &page, &Sidebar{ cx: self, item: it }, &Item{ cx: self, item: it }, self.shared.css_file_extension.is_some(), &self.shared.themes)?; } else { let mut url = self.root_path(); if let Some(&(ref names, ty)) = cache().paths.get(&it.def_id) { for name in &names[..names.len() - 1] { url.push_str(name); url.push_str("/"); } url.push_str(&item_path(ty, names.last().unwrap())); layout::redirect(writer, &url)?; } } Ok(()) } /// Non-parallelized version of rendering an item. This will take the input /// item, render its contents, and then invoke the specified closure with /// all sub-items which need to be rendered. /// /// The rendering driver uses this closure to queue up more work. fn item<F>(&mut self, item: clean::Item, all: &mut AllTypes, mut f: F) -> Result<(), Error> where F: FnMut(&mut Context, clean::Item), { // Stripped modules survive the rustdoc passes (i.e. `strip-private`) // if they contain impls for public types. These modules can also // contain items such as publicly re-exported structures. // // External crates will provide links to these structures, so // these modules are recursed into, but not rendered normally // (a flag on the context). if !self.render_redirect_pages { self.render_redirect_pages = item.is_stripped(); } if item.is_mod() { // modules are special because they add a namespace. We also need to // recurse into the items of the module as well. let name = item.name.as_ref().unwrap().to_string(); let mut item = Some(item); self.recurse(name, |this| { let item = item.take().unwrap(); let mut buf = Vec::new(); this.render_item(&mut buf, &item, false).unwrap(); // buf will be empty if the module is stripped and there is no redirect for it if !buf.is_empty() { try_err!(this.shared.ensure_dir(&this.dst), &this.dst); let joint_dst = this.dst.join("index.html"); let mut dst = try_err!(File::create(&joint_dst), &joint_dst); try_err!(dst.write_all(&buf), &joint_dst); } let m = match item.inner { clean::StrippedItem(box clean::ModuleItem(m)) | clean::ModuleItem(m) => m, _ => unreachable!() }; // Render sidebar-items.js used throughout this module. if !this.render_redirect_pages { let items = this.build_sidebar_items(&m); let js_dst = this.dst.join("sidebar-items.js"); let mut js_out = BufWriter::new(try_err!(File::create(&js_dst), &js_dst)); try_err!(write!(&mut js_out, "initSidebarItems({});", as_json(&items)), &js_dst); } for item in m.items { f(this, item); } Ok(()) })?; } else if item.name.is_some() { let mut buf = Vec::new(); self.render_item(&mut buf, &item, true).unwrap(); // buf will be empty if the item is stripped and there is no redirect for it if !buf.is_empty() { let name = item.name.as_ref().unwrap(); let item_type = item.type_(); let file_name = &item_path(item_type, name); try_err!(self.shared.ensure_dir(&self.dst), &self.dst); let joint_dst = self.dst.join(file_name); let mut dst = try_err!(File::create(&joint_dst), &joint_dst); try_err!(dst.write_all(&buf), &joint_dst); if !self.render_redirect_pages { all.append(full_path(self, &item), &item_type); } // Redirect from a sane URL using the namespace to Rustdoc's // URL for the page. let redir_name = format!("{}.{}.html", name, item_type.name_space()); let redir_dst = self.dst.join(redir_name); if let Ok(redirect_out) = OpenOptions::new().create_new(true) .write(true) .open(&redir_dst) { let mut redirect_out = BufWriter::new(redirect_out); try_err!(layout::redirect(&mut redirect_out, file_name), &redir_dst); } // If the item is a macro, redirect from the old macro URL (with !) // to the new one (without). // FIXME(#35705) remove this redirect. if item_type == ItemType::Macro { let redir_name = format!("{}.{}!.html", item_type, name); let redir_dst = self.dst.join(redir_name); let redirect_out = try_err!(File::create(&redir_dst), &redir_dst); let mut redirect_out = BufWriter::new(redirect_out); try_err!(layout::redirect(&mut redirect_out, file_name), &redir_dst); } } } Ok(()) } fn build_sidebar_items(&self, m: &clean::Module) -> BTreeMap<String, Vec<NameDoc>> { // BTreeMap instead of HashMap to get a sorted output let mut map = BTreeMap::new(); for item in &m.items { if item.is_stripped() { continue } let short = item.type_().css_class(); let myname = match item.name { None => continue, Some(ref s) => s.to_string(), }; let short = short.to_string(); map.entry(short).or_insert(vec![]) .push((myname, Some(plain_summary_line(item.doc_value())))); } if self.shared.sort_modules_alphabetically { for (_, items) in &mut map { items.sort(); } } map } } impl<'a> Item<'a> { /// Generate a url appropriate for an `href` attribute back to the source of /// this item. /// /// The url generated, when clicked, will redirect the browser back to the /// original source code. /// /// If `None` is returned, then a source link couldn't be generated. This /// may happen, for example, with externally inlined items where the source /// of their crate documentation isn't known. fn src_href(&self) -> Option<String> { let mut root = self.cx.root_path(); let cache = cache(); let mut path = String::new(); // We can safely ignore macros from other libraries let file = match self.item.source.filename { FileName::Real(ref path) => path, _ => return None, }; let (krate, path) = if self.item.def_id.is_local() { if let Some(path) = self.cx.shared.local_sources.get(file) { (&self.cx.shared.layout.krate, path) } else { return None; } } else { let (krate, src_root) = match cache.extern_locations.get(&self.item.def_id.krate) { Some(&(ref name, ref src, Local)) => (name, src), Some(&(ref name, ref src, Remote(ref s))) => { root = s.to_string(); (name, src) } Some(&(_, _, Unknown)) | None => return None, }; clean_srcpath(&src_root, file, false, |component| { path.push_str(component); path.push('/'); }); let mut fname = file.file_name().expect("source has no filename") .to_os_string(); fname.push(".html"); path.push_str(&fname.to_string_lossy()); (krate, &path) }; let lines = if self.item.source.loline == self.item.source.hiline { format!("{}", self.item.source.loline) } else { format!("{}-{}", self.item.source.loline, self.item.source.hiline) }; Some(format!("{root}src/{krate}/{path}#{lines}", root = Escape(&root), krate = krate, path = path, lines = lines)) } } fn wrap_into_docblock<F>(w: &mut fmt::Formatter, f: F) -> fmt::Result where F: Fn(&mut fmt::Formatter) -> fmt::Result { write!(w, "<div class=\"docblock type-decl\">")?; f(w)?; write!(w, "</div>") } impl<'a> fmt::Display for Item<'a> { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { debug_assert!(!self.item.is_stripped()); // Write the breadcrumb trail header for the top write!(fmt, "<h1 class='fqn'><span class='in-band'>")?; match self.item.inner { clean::ModuleItem(ref m) => if m.is_crate { write!(fmt, "Crate ")?; } else { write!(fmt, "Module ")?; }, clean::FunctionItem(..) | clean::ForeignFunctionItem(..) => write!(fmt, "Function ")?, clean::TraitItem(..) => write!(fmt, "Trait ")?, clean::StructItem(..) => write!(fmt, "Struct ")?, clean::UnionItem(..) => write!(fmt, "Union ")?, clean::EnumItem(..) => write!(fmt, "Enum ")?, clean::TypedefItem(..) => write!(fmt, "Type Definition ")?, clean::MacroItem(..) => write!(fmt, "Macro ")?, clean::PrimitiveItem(..) => write!(fmt, "Primitive Type ")?, clean::StaticItem(..) | clean::ForeignStaticItem(..) => write!(fmt, "Static ")?, clean::ConstantItem(..) => write!(fmt, "Constant ")?, clean::ForeignTypeItem => write!(fmt, "Foreign Type ")?, clean::KeywordItem(..) => write!(fmt, "Keyword ")?, _ => { // We don't generate pages for any other type. unreachable!(); } } if !self.item.is_primitive() && !self.item.is_keyword() { let cur = &self.cx.current; let amt = if self.item.is_mod() { cur.len() - 1 } else { cur.len() }; for (i, component) in cur.iter().enumerate().take(amt) { write!(fmt, "<a href='{}index.html'>{}</a>::<wbr>", repeat("../").take(cur.len() - i - 1) .collect::<String>(), component)?; } } write!(fmt, "<a class=\"{}\" href=''>{}</a>", self.item.type_(), self.item.name.as_ref().unwrap())?; write!(fmt, "</span>")?; // in-band write!(fmt, "<span class='out-of-band'>")?; if let Some(version) = self.item.stable_since() { write!(fmt, "<span class='since' title='Stable since Rust version {0}'>{0}</span>", version)?; } write!(fmt, "<span id='render-detail'>\ <a id=\"toggle-all-docs\" href=\"javascript:void(0)\" \ title=\"collapse all docs\">\ [<span class='inner'>&#x2212;</span>]\ </a>\ </span>")?; // Write `src` tag // // When this item is part of a `pub use` in a downstream crate, the // [src] link in the downstream documentation will actually come back to // this page, and this link will be auto-clicked. The `id` attribute is // used to find the link to auto-click. if self.cx.shared.include_sources && !self.item.is_primitive() { if let Some(l) = self.src_href() { write!(fmt, "<a class='srclink' href='{}' title='{}'>[src]</a>", l, "goto source code")?; } } write!(fmt, "</span></h1>")?; // out-of-band match self.item.inner { clean::ModuleItem(ref m) => item_module(fmt, self.cx, self.item, &m.items), clean::FunctionItem(ref f) | clean::ForeignFunctionItem(ref f) => item_function(fmt, self.cx, self.item, f), clean::TraitItem(ref t) => item_trait(fmt, self.cx, self.item, t), clean::StructItem(ref s) => item_struct(fmt, self.cx, self.item, s), clean::UnionItem(ref s) => item_union(fmt, self.cx, self.item, s), clean::EnumItem(ref e) => item_enum(fmt, self.cx, self.item, e), clean::TypedefItem(ref t, _) => item_typedef(fmt, self.cx, self.item, t), clean::MacroItem(ref m) => item_macro(fmt, self.cx, self.item, m), clean::PrimitiveItem(ref p) => item_primitive(fmt, self.cx, self.item, p), clean::StaticItem(ref i) | clean::ForeignStaticItem(ref i) => item_static(fmt, self.cx, self.item, i), clean::ConstantItem(ref c) => item_constant(fmt, self.cx, self.item, c), clean::ForeignTypeItem => item_foreign_type(fmt, self.cx, self.item), clean::KeywordItem(ref k) => item_keyword(fmt, self.cx, self.item, k), _ => { // We don't generate pages for any other type. unreachable!(); } } } } fn item_path(ty: ItemType, name: &str) -> String { match ty { ItemType::Module => format!("{}/index.html", name), _ => format!("{}.{}.html", ty.css_class(), name), } } fn full_path(cx: &Context, item: &clean::Item) -> String { let mut s = cx.current.join("::"); s.push_str("::"); s.push_str(item.name.as_ref().unwrap()); s } fn shorter<'a>(s: Option<&'a str>) -> String { match s { Some(s) => s.lines() .skip_while(|s| s.chars().all(|c| c.is_whitespace())) .take_while(|line|{ (*line).chars().any(|chr|{ !chr.is_whitespace() }) }).collect::<Vec<_>>().join("\n"), None => "".to_string() } } #[inline] fn plain_summary_line(s: Option<&str>) -> String { let line = shorter(s).replace("\n", " "); markdown::plain_summary_line(&line[..]) } fn document(w: &mut fmt::Formatter, cx: &Context, item: &clean::Item) -> fmt::Result { if let Some(ref name) = item.name { info!("Documenting {}", name); } document_stability(w, cx, item)?; document_non_exhaustive(w, item)?; let prefix = render_assoc_const_value(item); document_full(w, item, cx, &prefix)?; Ok(()) } /// Render md_text as markdown. fn render_markdown(w: &mut fmt::Formatter, md_text: &str, links: Vec<(String, String)>, prefix: &str,) -> fmt::Result { write!(w, "<div class='docblock'>{}{}</div>", prefix, Markdown(md_text, &links)) } fn document_short(w: &mut fmt::Formatter, item: &clean::Item, link: AssocItemLink, prefix: &str) -> fmt::Result { if let Some(s) = item.doc_value() { let markdown = if s.contains('\n') { format!("{} [Read more]({})", &plain_summary_line(Some(s)), naive_assoc_href(item, link)) } else { format!("{}", &plain_summary_line(Some(s))) }; render_markdown(w, &markdown, item.links(), prefix)?; } else if !prefix.is_empty() { write!(w, "<div class='docblock'>{}</div>", prefix)?; } Ok(()) } fn render_assoc_const_value(item: &clean::Item) -> String { match item.inner { clean::AssociatedConstItem(ref ty, Some(ref default)) => { highlight::render_with_highlighting( &format!("{}: {:#} = {}", item.name.as_ref().unwrap(), ty, default), None, None, None, None, ) } _ => String::new(), } } fn document_full(w: &mut fmt::Formatter, item: &clean::Item, cx: &Context, prefix: &str) -> fmt::Result { if let Some(s) = cx.shared.maybe_collapsed_doc_value(item) { debug!("Doc block: =====\n{}\n=====", s); render_markdown(w, &*s, item.links(), prefix)?; } else if !prefix.is_empty() { write!(w, "<div class='docblock'>{}</div>", prefix)?; } Ok(()) } fn document_stability(w: &mut fmt::Formatter, cx: &Context, item: &clean::Item) -> fmt::Result { let stabilities = short_stability(item, cx, true); if !stabilities.is_empty() { write!(w, "<div class='stability'>")?; for stability in stabilities { write!(w, "{}", stability)?; } write!(w, "</div>")?; } Ok(()) } fn document_non_exhaustive(w: &mut fmt::Formatter, item: &clean::Item) -> fmt::Result { if item.non_exhaustive { write!(w, "<div class='non-exhaustive'><div class='stab non-exhaustive'>")?; write!(w, "<details><summary><span class=microscope>🔬</span>")?; if item.is_struct() { write!(w, "This struct is marked as non exhaustive.")?; } else if item.is_enum() { write!(w, "This enum is marked as non exhaustive.")?; } else { write!(w, "This type is marked as non exhaustive.")?; } write!(w, "</summary><p>")?; if item.is_struct() { write!(w, "This struct is marked as non-exhaustive as additional fields may be \ added in the future. This means that this struct cannot be constructed in \ external crates using the traditional <code>Struct {{ .. }}</code> syntax; cannot be matched against without a wildcard <code>..</code>; and \ functional-record-updates do not work on this struct.")?; } else if item.is_enum() { write!(w, "This enum is marked as non-exhaustive, and additional variants may be \ added in the future. When matching over values of this type, an extra \ <code>_</code> arm must be added to account for future extensions.")?; } else { write!(w, "This type will require a wildcard arm in any match statements or \ constructors.")?; } write!(w, "</p></details></div></div>")?; } Ok(()) } fn name_key(name: &str) -> (&str, u64, usize) { // find number at end let split = name.bytes().rposition(|b| b < b'0' || b'9' < b).map_or(0, |s| s + 1); // count leading zeroes let after_zeroes = name[split..].bytes().position(|b| b != b'0').map_or(name.len(), |extra| split + extra); // sort leading zeroes last let num_zeroes = after_zeroes - split; match name[split..].parse() { Ok(n) => (&name[..split], n, num_zeroes), Err(_) => (name, 0, num_zeroes), } } fn item_module(w: &mut fmt::Formatter, cx: &Context, item: &clean::Item, items: &[clean::Item]) -> fmt::Result { document(w, cx, item)?; let mut indices = (0..items.len()).filter(|i| !items[*i].is_stripped()).collect::<Vec<usize>>(); // the order of item types in the listing fn reorder(ty: ItemType) -> u8 { match ty { ItemType::ExternCrate => 0, ItemType::Import => 1, ItemType::Primitive => 2, ItemType::Module => 3, ItemType::Macro => 4, ItemType::Struct => 5, ItemType::Enum => 6, ItemType::Constant => 7, ItemType::Static => 8, ItemType::Trait => 9, ItemType::Function => 10, ItemType::Typedef => 12, ItemType::Union => 13, _ => 14 + ty as u8, } } fn cmp(i1: &clean::Item, i2: &clean::Item, idx1: usize, idx2: usize) -> Ordering { let ty1 = i1.type_(); let ty2 = i2.type_(); if ty1 != ty2 { return (reorder(ty1), idx1).cmp(&(reorder(ty2), idx2)) } let s1 = i1.stability.as_ref().map(|s| s.level); let s2 = i2.stability.as_ref().map(|s| s.level); match (s1, s2) { (Some(stability::Unstable), Some(stability::Stable)) => return Ordering::Greater, (Some(stability::Stable), Some(stability::Unstable)) => return Ordering::Less, _ => {} } let lhs = i1.name.as_ref().map_or("", |s| &**s); let rhs = i2.name.as_ref().map_or("", |s| &**s); name_key(lhs).cmp(&name_key(rhs)) } if cx.shared.sort_modules_alphabetically { indices.sort_by(|&i1, &i2| cmp(&items[i1], &items[i2], i1, i2)); } // This call is to remove re-export duplicates in cases such as: // // ``` // pub mod foo { // pub mod bar { // pub trait Double { fn foo(); } // } // } // // pub use foo::bar::*; // pub use foo::*; // ``` // // `Double` will appear twice in the generated docs. // // FIXME: This code is quite ugly and could be improved. Small issue: DefId // can be identical even if the elements are different (mostly in imports). // So in case this is an import, we keep everything by adding a "unique id" // (which is the position in the vector). indices.dedup_by_key(|i| (items[*i].def_id, if items[*i].name.as_ref().is_some() { Some(full_path(cx, &items[*i]).clone()) } else { None }, items[*i].type_(), if items[*i].is_import() { *i } else { 0 })); debug!("{:?}", indices); let mut curty = None; for &idx in &indices { let myitem = &items[idx]; if myitem.is_stripped() { continue; } let myty = Some(myitem.type_()); if curty == Some(ItemType::ExternCrate) && myty == Some(ItemType::Import) { // Put `extern crate` and `use` re-exports in the same section. curty = myty; } else if myty != curty { if curty.is_some() { write!(w, "</table>")?; } curty = myty; let (short, name) = item_ty_to_strs(&myty.unwrap()); write!(w, "<h2 id='{id}' class='section-header'>\ <a href=\"#{id}\">{name}</a></h2>\n<table>", id = derive_id(short.to_owned()), name = name)?; } match myitem.inner { clean::ExternCrateItem(ref name, ref src) => { use html::format::HRef; match *src { Some(ref src) => { write!(w, "<tr><td><code>{}extern crate {} as {};", VisSpace(&myitem.visibility), HRef::new(myitem.def_id, src), name)? } None => { write!(w, "<tr><td><code>{}extern crate {};", VisSpace(&myitem.visibility), HRef::new(myitem.def_id, name))? } } write!(w, "</code></td></tr>")?; } clean::ImportItem(ref import) => { write!(w, "<tr><td><code>{}{}</code></td></tr>", VisSpace(&myitem.visibility), *import)?; } _ => { if myitem.name.is_none() { continue } let stabilities = short_stability(myitem, cx, false); let stab_docs = if !stabilities.is_empty() { stabilities.iter() .map(|s| format!("[{}]", s)) .collect::<Vec<_>>() .as_slice() .join(" ") } else { String::new() }; let unsafety_flag = match myitem.inner { clean::FunctionItem(ref func) | clean::ForeignFunctionItem(ref func) if func.header.unsafety == hir::Unsafety::Unsafe => { "<a title='unsafe function' href='#'><sup>⚠</sup></a>" } _ => "", }; let doc_value = myitem.doc_value().unwrap_or(""); write!(w, " <tr class='{stab} module-item'> <td><a class=\"{class}\" href=\"{href}\" title='{title_type} {title}'>{name}</a>{unsafety_flag}</td> <td class='docblock-short'> {stab_docs} {docs} </td> </tr>", name = *myitem.name.as_ref().unwrap(), stab_docs = stab_docs, docs = MarkdownSummaryLine(doc_value, &myitem.links()), class = myitem.type_(), stab = myitem.stability_class().unwrap_or("".to_string()), unsafety_flag = unsafety_flag, href = item_path(myitem.type_(), myitem.name.as_ref().unwrap()), title_type = myitem.type_(), title = full_path(cx, myitem))?; } } } if curty.is_some() { write!(w, "</table>")?; } Ok(()) } fn short_stability(item: &clean::Item, cx: &Context, show_reason: bool) -> Vec<String> { let mut stability = vec![]; if let Some(stab) = item.stability.as_ref() { let deprecated_reason = if show_reason && !stab.deprecated_reason.is_empty() { format!(": {}", stab.deprecated_reason) } else { String::new() }; if !stab.deprecated_since.is_empty() { let since = if show_reason { format!(" since {}", Escape(&stab.deprecated_since)) } else { String::new() }; let text = if stability::deprecation_in_effect(&stab.deprecated_since) { format!("Deprecated{}{}", since, MarkdownHtml(&deprecated_reason)) } else { format!("Deprecating in {}{}", Escape(&stab.deprecated_since), MarkdownHtml(&deprecated_reason)) }; stability.push(format!("<div class='stab deprecated'>{}</div>", text)) }; if stab.level == stability::Unstable { if show_reason { let unstable_extra = match (!stab.feature.is_empty(), &cx.shared.issue_tracker_base_url, stab.issue) { (true, &Some(ref tracker_url), Some(issue_no)) if issue_no > 0 => format!(" (<code>{} </code><a href=\"{}{}\">#{}</a>)", Escape(&stab.feature), tracker_url, issue_no, issue_no), (false, &Some(ref tracker_url), Some(issue_no)) if issue_no > 0 => format!(" (<a href=\"{}{}\">#{}</a>)", Escape(&tracker_url), issue_no, issue_no), (true, ..) => format!(" (<code>{}</code>)", Escape(&stab.feature)), _ => String::new(), }; if stab.unstable_reason.is_empty() { stability.push(format!("<div class='stab unstable'>\ <span class=microscope>🔬</span> \ This is a nightly-only experimental API. {}\ </div>", unstable_extra)); } else { let text = format!("<summary><span class=microscope>🔬</span> \ This is a nightly-only experimental API. {}\ </summary>{}", unstable_extra, MarkdownHtml(&stab.unstable_reason)); stability.push(format!("<div class='stab unstable'><details>{}</details></div>", text)); } } else { stability.push(format!("<div class='stab unstable'>Experimental</div>")) } }; } else if let Some(depr) = item.deprecation.as_ref() { let note = if show_reason && !depr.note.is_empty() { format!(": {}", depr.note) } else { String::new() }; let since = if show_reason && !depr.since.is_empty() { format!(" since {}", Escape(&depr.since)) } else { String::new() }; let text = if stability::deprecation_in_effect(&depr.since) { format!("Deprecated{}{}", since, MarkdownHtml(&note)) } else { format!("Deprecating in {}{}", Escape(&depr.since), MarkdownHtml(&note)) }; stability.push(format!("<div class='stab deprecated'>{}</div>", text)) } if let Some(ref cfg) = item.attrs.cfg { stability.push(format!("<div class='stab portability'>{}</div>", if show_reason { cfg.render_long_html() } else { cfg.render_short_html() })); } stability } struct Initializer<'a>(&'a str); impl<'a> fmt::Display for Initializer<'a> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { let Initializer(s) = *self; if s.is_empty() { return Ok(()); } write!(f, "<code> = </code>")?; write!(f, "<code>{}</code>", Escape(s)) } } fn item_constant(w: &mut fmt::Formatter, cx: &Context, it: &clean::Item, c: &clean::Constant) -> fmt::Result { write!(w, "<pre class='rust const'>")?; render_attributes(w, it)?; write!(w, "{vis}const \ {name}: {typ}{init}</pre>", vis = VisSpace(&it.visibility), name = it.name.as_ref().unwrap(), typ = c.type_, init = Initializer(&c.expr))?; document(w, cx, it) } fn item_static(w: &mut fmt::Formatter, cx: &Context, it: &clean::Item, s: &clean::Static) -> fmt::Result { write!(w, "<pre class='rust static'>")?; render_attributes(w, it)?; write!(w, "{vis}static {mutability}\ {name}: {typ}{init}</pre>", vis = VisSpace(&it.visibility), mutability = MutableSpace(s.mutability), name = it.name.as_ref().unwrap(), typ = s.type_, init = Initializer(&s.expr))?; document(w, cx, it) } fn item_function(w: &mut fmt::Formatter, cx: &Context, it: &clean::Item, f: &clean::Function) -> fmt::Result { let name_len = format!("{}{}{}{}{:#}fn {}{:#}", VisSpace(&it.visibility), ConstnessSpace(f.header.constness), UnsafetySpace(f.header.unsafety), AsyncSpace(f.header.asyncness), AbiSpace(f.header.abi), it.name.as_ref().unwrap(), f.generics).len(); write!(w, "{}<pre class='rust fn'>", render_spotlight_traits(it)?)?; render_attributes(w, it)?; write!(w, "{vis}{constness}{unsafety}{asyncness}{abi}fn \ {name}{generics}{decl}{where_clause}</pre>", vis = VisSpace(&it.visibility), constness = ConstnessSpace(f.header.constness), unsafety = UnsafetySpace(f.header.unsafety), asyncness = AsyncSpace(f.header.asyncness), abi = AbiSpace(f.header.abi), name = it.name.as_ref().unwrap(), generics = f.generics, where_clause = WhereClause { gens: &f.generics, indent: 0, end_newline: true }, decl = Method { decl: &f.decl, name_len, indent: 0, })?; document(w, cx, it) } fn render_implementor(cx: &Context, implementor: &Impl, w: &mut fmt::Formatter, implementor_dups: &FxHashMap<&str, (DefId, bool)>) -> fmt::Result { write!(w, "<li><table class='table-display'><tbody><tr><td><code>")?; // If there's already another implementor that has the same abbridged name, use the // full path, for example in `std::iter::ExactSizeIterator` let use_absolute = match implementor.inner_impl().for_ { clean::ResolvedPath { ref path, is_generic: false, .. } | clean::BorrowedRef { type_: box clean::ResolvedPath { ref path, is_generic: false, .. }, .. } => implementor_dups[path.last_name()].1, _ => false, }; fmt_impl_for_trait_page(&implementor.inner_impl(), w, use_absolute)?; for it in &implementor.inner_impl().items { if let clean::TypedefItem(ref tydef, _) = it.inner { write!(w, "<span class=\"where fmt-newline\"> ")?; assoc_type(w, it, &vec![], Some(&tydef.type_), AssocItemLink::Anchor(None))?; write!(w, ";</span>")?; } } write!(w, "</code><td>")?; if let Some(l) = (Item { cx, item: &implementor.impl_item }).src_href() { write!(w, "<div class='out-of-band'>")?; write!(w, "<a class='srclink' href='{}' title='{}'>[src]</a>", l, "goto source code")?; write!(w, "</div>")?; } writeln!(w, "</td></tr></tbody></table></li>")?; Ok(()) } fn render_impls(cx: &Context, w: &mut fmt::Formatter, traits: &[&&Impl], containing_item: &clean::Item) -> fmt::Result { for i in traits { let did = i.trait_did().unwrap(); let assoc_link = AssocItemLink::GotoSource(did, &i.inner_impl().provided_trait_methods); render_impl(w, cx, i, assoc_link, RenderMode::Normal, containing_item.stable_since(), true)?; } Ok(()) } fn item_trait(w: &mut fmt::Formatter, cx: &Context, it: &clean::Item, t: &clean::Trait) -> fmt::Result { let mut bounds = String::new(); let mut bounds_plain = String::new(); if !t.bounds.is_empty() { if !bounds.is_empty() { bounds.push(' '); bounds_plain.push(' '); } bounds.push_str(": "); bounds_plain.push_str(": "); for (i, p) in t.bounds.iter().enumerate() { if i > 0 { bounds.push_str(" + "); bounds_plain.push_str(" + "); } bounds.push_str(&format!("{}", *p)); bounds_plain.push_str(&format!("{:#}", *p)); } } let types = t.items.iter().filter(|m| m.is_associated_type()).collect::<Vec<_>>(); let consts = t.items.iter().filter(|m| m.is_associated_const()).collect::<Vec<_>>(); let required = t.items.iter().filter(|m| m.is_ty_method()).collect::<Vec<_>>(); let provided = t.items.iter().filter(|m| m.is_method()).collect::<Vec<_>>(); // Output the trait definition wrap_into_docblock(w, |w| { write!(w, "<pre class='rust trait'>")?; render_attributes(w, it)?; write!(w, "{}{}{}trait {}{}{}", VisSpace(&it.visibility), UnsafetySpace(t.unsafety), if t.is_auto { "auto " } else { "" }, it.name.as_ref().unwrap(), t.generics, bounds)?; if !t.generics.where_predicates.is_empty() { write!(w, "{}", WhereClause { gens: &t.generics, indent: 0, end_newline: true })?; } else { write!(w, " ")?; } if t.items.is_empty() { write!(w, "{{ }}")?; } else { // FIXME: we should be using a derived_id for the Anchors here write!(w, "{{\n")?; for t in &types { write!(w, " ")?; render_assoc_item(w, t, AssocItemLink::Anchor(None), ItemType::Trait)?; write!(w, ";\n")?; } if !types.is_empty() && !consts.is_empty() { w.write_str("\n")?; } for t in &consts { write!(w, " ")?; render_assoc_item(w, t, AssocItemLink::Anchor(None), ItemType::Trait)?; write!(w, ";\n")?; } if !consts.is_empty() && !required.is_empty() { w.write_str("\n")?; } for (pos, m) in required.iter().enumerate() { write!(w, " ")?; render_assoc_item(w, m, AssocItemLink::Anchor(None), ItemType::Trait)?; write!(w, ";\n")?; if pos < required.len() - 1 { write!(w, "<div class='item-spacer'></div>")?; } } if !required.is_empty() && !provided.is_empty() { w.write_str("\n")?; } for (pos, m) in provided.iter().enumerate() { write!(w, " ")?; render_assoc_item(w, m, AssocItemLink::Anchor(None), ItemType::Trait)?; match m.inner { clean::MethodItem(ref inner) if !inner.generics.where_predicates.is_empty() => { write!(w, ",\n {{ ... }}\n")?; }, _ => { write!(w, " {{ ... }}\n")?; }, } if pos < provided.len() - 1 { write!(w, "<div class='item-spacer'></div>")?; } } write!(w, "}}")?; } write!(w, "</pre>") })?; // Trait documentation document(w, cx, it)?; fn trait_item(w: &mut fmt::Formatter, cx: &Context, m: &clean::Item, t: &clean::Item) -> fmt::Result { let name = m.name.as_ref().unwrap(); let item_type = m.type_(); let id = derive_id(format!("{}.{}", item_type, name)); let ns_id = derive_id(format!("{}.{}", name, item_type.name_space())); write!(w, "{extra}<h3 id='{id}' class='method'>\ <span id='{ns_id}' class='invisible'><code>", extra = render_spotlight_traits(m)?, id = id, ns_id = ns_id)?; render_assoc_item(w, m, AssocItemLink::Anchor(Some(&id)), ItemType::Impl)?; write!(w, "</code>")?; render_stability_since(w, m, t)?; write!(w, "</span></h3>")?; document(w, cx, m)?; Ok(()) } if !types.is_empty() { write!(w, " <h2 id='associated-types' class='small-section-header'> Associated Types<a href='#associated-types' class='anchor'></a> </h2> <div class='methods'> ")?; for t in &types { trait_item(w, cx, *t, it)?; } write!(w, "</div>")?; } if !consts.is_empty() { write!(w, " <h2 id='associated-const' class='small-section-header'> Associated Constants<a href='#associated-const' class='anchor'></a> </h2> <div class='methods'> ")?; for t in &consts { trait_item(w, cx, *t, it)?; } write!(w, "</div>")?; } // Output the documentation for each function individually if !required.is_empty() { write!(w, " <h2 id='required-methods' class='small-section-header'> Required Methods<a href='#required-methods' class='anchor'></a> </h2> <div class='methods'> ")?; for m in &required { trait_item(w, cx, *m, it)?; } write!(w, "</div>")?; } if !provided.is_empty() { write!(w, " <h2 id='provided-methods' class='small-section-header'> Provided Methods<a href='#provided-methods' class='anchor'></a> </h2> <div class='methods'> ")?; for m in &provided { trait_item(w, cx, *m, it)?; } write!(w, "</div>")?; } // If there are methods directly on this trait object, render them here. render_assoc_items(w, cx, it, it.def_id, AssocItemRender::All)?; let cache = cache(); let impl_header = " <h2 id='implementors' class='small-section-header'> Implementors<a href='#implementors' class='anchor'></a> </h2> <ul class='item-list' id='implementors-list'> "; let synthetic_impl_header = " <h2 id='synthetic-implementors' class='small-section-header'> Auto implementors<a href='#synthetic-implementors' class='anchor'></a> </h2> <ul class='item-list' id='synthetic-implementors-list'> "; let mut synthetic_types = Vec::new(); if let Some(implementors) = cache.implementors.get(&it.def_id) { // The DefId is for the first Type found with that name. The bool is // if any Types with the same name but different DefId have been found. let mut implementor_dups: FxHashMap<&str, (DefId, bool)> = FxHashMap(); for implementor in implementors { match implementor.inner_impl().for_ { clean::ResolvedPath { ref path, did, is_generic: false, .. } | clean::BorrowedRef { type_: box clean::ResolvedPath { ref path, did, is_generic: false, .. }, .. } => { let &mut (prev_did, ref mut has_duplicates) = implementor_dups.entry(path.last_name()).or_insert((did, false)); if prev_did != did { *has_duplicates = true; } } _ => {} } } let (local, foreign) = implementors.iter() .partition::<Vec<_>, _>(|i| i.inner_impl().for_.def_id() .map_or(true, |d| cache.paths.contains_key(&d))); let (synthetic, concrete) = local.iter() .partition::<Vec<_>, _>(|i| i.inner_impl().synthetic); if !foreign.is_empty() { write!(w, " <h2 id='foreign-impls' class='small-section-header'> Implementations on Foreign Types<a href='#foreign-impls' class='anchor'></a> </h2> ")?; for implementor in foreign { let assoc_link = AssocItemLink::GotoSource( implementor.impl_item.def_id, &implementor.inner_impl().provided_trait_methods ); render_impl(w, cx, &implementor, assoc_link, RenderMode::Normal, implementor.impl_item.stable_since(), false)?; } } write!(w, "{}", impl_header)?; for implementor in concrete { render_implementor(cx, implementor, w, &implementor_dups)?; } write!(w, "</ul>")?; if t.auto { write!(w, "{}", synthetic_impl_header)?; for implementor in synthetic { synthetic_types.extend( collect_paths_for_type(implementor.inner_impl().for_.clone()) ); render_implementor(cx, implementor, w, &implementor_dups)?; } write!(w, "</ul>")?; } } else { // even without any implementations to write in, we still want the heading and list, so the // implementors javascript file pulled in below has somewhere to write the impls into write!(w, "{}", impl_header)?; write!(w, "</ul>")?; if t.auto { write!(w, "{}", synthetic_impl_header)?; write!(w, "</ul>")?; } } write!(w, r#"<script type="text/javascript">window.inlined_types=new Set({});</script>"#, as_json(&synthetic_types))?; write!(w, r#"<script type="text/javascript" async src="{root_path}/implementors/{path}/{ty}.{name}.js"> </script>"#, root_path = vec![".."; cx.current.len()].join("/"), path = if it.def_id.is_local() { cx.current.join("/") } else { let (ref path, _) = cache.external_paths[&it.def_id]; path[..path.len() - 1].join("/") }, ty = it.type_().css_class(), name = *it.name.as_ref().unwrap())?; Ok(()) } fn naive_assoc_href(it: &clean::Item, link: AssocItemLink) -> String { use html::item_type::ItemType::*; let name = it.name.as_ref().unwrap(); let ty = match it.type_() { Typedef | AssociatedType => AssociatedType, s@_ => s, }; let anchor = format!("#{}.{}", ty, name); match link { AssocItemLink::Anchor(Some(ref id)) => format!("#{}", id), AssocItemLink::Anchor(None) => anchor, AssocItemLink::GotoSource(did, _) => { href(did).map(|p| format!("{}{}", p.0, anchor)).unwrap_or(anchor) } } } fn assoc_const(w: &mut fmt::Formatter, it: &clean::Item, ty: &clean::Type, _default: Option<&String>, link: AssocItemLink) -> fmt::Result { write!(w, "{}const <a href='{}' class=\"constant\"><b>{}</b></a>: {}", VisSpace(&it.visibility), naive_assoc_href(it, link), it.name.as_ref().unwrap(), ty)?; Ok(()) } fn assoc_type<W: fmt::Write>(w: &mut W, it: &clean::Item, bounds: &Vec<clean::GenericBound>, default: Option<&clean::Type>, link: AssocItemLink) -> fmt::Result { write!(w, "type <a href='{}' class=\"type\">{}</a>", naive_assoc_href(it, link), it.name.as_ref().unwrap())?; if !bounds.is_empty() { write!(w, ": {}", GenericBounds(bounds))? } if let Some(default) = default { write!(w, " = {}", default)?; } Ok(()) } fn render_stability_since_raw<'a>(w: &mut fmt::Formatter, ver: Option<&'a str>, containing_ver: Option<&'a str>) -> fmt::Result { if let Some(v) = ver { if containing_ver != ver && v.len() > 0 { write!(w, "<div class='since' title='Stable since Rust version {0}'>{0}</div>", v)? } } Ok(()) } fn render_stability_since(w: &mut fmt::Formatter, item: &clean::Item, containing_item: &clean::Item) -> fmt::Result { render_stability_since_raw(w, item.stable_since(), containing_item.stable_since()) } fn render_assoc_item(w: &mut fmt::Formatter, item: &clean::Item, link: AssocItemLink, parent: ItemType) -> fmt::Result { fn method(w: &mut fmt::Formatter, meth: &clean::Item, header: hir::FnHeader, g: &clean::Generics, d: &clean::FnDecl, link: AssocItemLink, parent: ItemType) -> fmt::Result { let name = meth.name.as_ref().unwrap(); let anchor = format!("#{}.{}", meth.type_(), name); let href = match link { AssocItemLink::Anchor(Some(ref id)) => format!("#{}", id), AssocItemLink::Anchor(None) => anchor, AssocItemLink::GotoSource(did, provided_methods) => { // We're creating a link from an impl-item to the corresponding // trait-item and need to map the anchored type accordingly. let ty = if provided_methods.contains(name) { ItemType::Method } else { ItemType::TyMethod }; href(did).map(|p| format!("{}#{}.{}", p.0, ty, name)).unwrap_or(anchor) } }; let mut head_len = format!("{}{}{}{}{:#}fn {}{:#}", VisSpace(&meth.visibility), ConstnessSpace(header.constness), UnsafetySpace(header.unsafety), AsyncSpace(header.asyncness), AbiSpace(header.abi), name, *g).len(); let (indent, end_newline) = if parent == ItemType::Trait { head_len += 4; (4, false) } else { (0, true) }; render_attributes(w, meth)?; write!(w, "{}{}{}{}{}fn <a href='{href}' class='fnname'>{name}</a>\ {generics}{decl}{where_clause}", VisSpace(&meth.visibility), ConstnessSpace(header.constness), UnsafetySpace(header.unsafety), AsyncSpace(header.asyncness), AbiSpace(header.abi), href = href, name = name, generics = *g, decl = Method { decl: d, name_len: head_len, indent, }, where_clause = WhereClause { gens: g, indent, end_newline, }) } match item.inner { clean::StrippedItem(..) => Ok(()), clean::TyMethodItem(ref m) => { method(w, item, m.header, &m.generics, &m.decl, link, parent) } clean::MethodItem(ref m) => { method(w, item, m.header, &m.generics, &m.decl, link, parent) } clean::AssociatedConstItem(ref ty, ref default) => { assoc_const(w, item, ty, default.as_ref(), link) } clean::AssociatedTypeItem(ref bounds, ref default) => { assoc_type(w, item, bounds, default.as_ref(), link) } _ => panic!("render_assoc_item called on non-associated-item") } } fn item_struct(w: &mut fmt::Formatter, cx: &Context, it: &clean::Item, s: &clean::Struct) -> fmt::Result { wrap_into_docblock(w, |w| { write!(w, "<pre class='rust struct'>")?; render_attributes(w, it)?; render_struct(w, it, Some(&s.generics), s.struct_type, &s.fields, "", true)?; write!(w, "</pre>") })?; document(w, cx, it)?; let mut fields = s.fields.iter().filter_map(|f| { match f.inner { clean::StructFieldItem(ref ty) => Some((f, ty)), _ => None, } }).peekable(); if let doctree::Plain = s.struct_type { if fields.peek().is_some() { write!(w, "<h2 id='fields' class='fields small-section-header'> Fields<a href='#fields' class='anchor'></a></h2>")?; for (field, ty) in fields { let id = derive_id(format!("{}.{}", ItemType::StructField, field.name.as_ref().unwrap())); let ns_id = derive_id(format!("{}.{}", field.name.as_ref().unwrap(), ItemType::StructField.name_space())); write!(w, "<span id=\"{id}\" class=\"{item_type} small-section-header\"> <a href=\"#{id}\" class=\"anchor field\"></a> <span id=\"{ns_id}\" class='invisible'> <code>{name}: {ty}</code> </span></span>", item_type = ItemType::StructField, id = id, ns_id = ns_id, name = field.name.as_ref().unwrap(), ty = ty)?; if let Some(stability_class) = field.stability_class() { write!(w, "<span class='stab {stab}'></span>", stab = stability_class)?; } document(w, cx, field)?; } } } render_assoc_items(w, cx, it, it.def_id, AssocItemRender::All) } fn item_union(w: &mut fmt::Formatter, cx: &Context, it: &clean::Item, s: &clean::Union) -> fmt::Result { wrap_into_docblock(w, |w| { write!(w, "<pre class='rust union'>")?; render_attributes(w, it)?; render_union(w, it, Some(&s.generics), &s.fields, "", true)?; write!(w, "</pre>") })?; document(w, cx, it)?; let mut fields = s.fields.iter().filter_map(|f| { match f.inner { clean::StructFieldItem(ref ty) => Some((f, ty)), _ => None, } }).peekable(); if fields.peek().is_some() { write!(w, "<h2 id='fields' class='fields small-section-header'> Fields<a href='#fields' class='anchor'></a></h2>")?; for (field, ty) in fields { let name = field.name.as_ref().expect("union field name"); let id = format!("{}.{}", ItemType::StructField, name); write!(w, "<span id=\"{id}\" class=\"{shortty} small-section-header\">\ <a href=\"#{id}\" class=\"anchor field\"></a>\ <span class='invisible'><code>{name}: {ty}</code></span>\ </span>", id = id, name = name, shortty = ItemType::StructField, ty = ty)?; if let Some(stability_class) = field.stability_class() { write!(w, "<span class='stab {stab}'></span>", stab = stability_class)?; } document(w, cx, field)?; } } render_assoc_items(w, cx, it, it.def_id, AssocItemRender::All) } fn item_enum(w: &mut fmt::Formatter, cx: &Context, it: &clean::Item, e: &clean::Enum) -> fmt::Result { wrap_into_docblock(w, |w| { write!(w, "<pre class='rust enum'>")?; render_attributes(w, it)?; write!(w, "{}enum {}{}{}", VisSpace(&it.visibility), it.name.as_ref().unwrap(), e.generics, WhereClause { gens: &e.generics, indent: 0, end_newline: true })?; if e.variants.is_empty() && !e.variants_stripped { write!(w, " {{}}")?; } else { write!(w, " {{\n")?; for v in &e.variants { write!(w, " ")?; let name = v.name.as_ref().unwrap(); match v.inner { clean::VariantItem(ref var) => { match var.kind { clean::VariantKind::CLike => write!(w, "{}", name)?, clean::VariantKind::Tuple(ref tys) => { write!(w, "{}(", name)?; for (i, ty) in tys.iter().enumerate() { if i > 0 { write!(w, ",&nbsp;")? } write!(w, "{}", *ty)?; } write!(w, ")")?; } clean::VariantKind::Struct(ref s) => { render_struct(w, v, None, s.struct_type, &s.fields, " ", false)?; } } } _ => unreachable!() } write!(w, ",\n")?; } if e.variants_stripped { write!(w, " // some variants omitted\n")?; } write!(w, "}}")?; } write!(w, "</pre>") })?; document(w, cx, it)?; if !e.variants.is_empty() { write!(w, "<h2 id='variants' class='variants small-section-header'> Variants<a href='#variants' class='anchor'></a></h2>\n")?; for variant in &e.variants { let id = derive_id(format!("{}.{}", ItemType::Variant, variant.name.as_ref().unwrap())); let ns_id = derive_id(format!("{}.{}", variant.name.as_ref().unwrap(), ItemType::Variant.name_space())); write!(w, "<span id=\"{id}\" class=\"variant small-section-header\">\ <a href=\"#{id}\" class=\"anchor field\"></a>\ <span id='{ns_id}' class='invisible'><code>{name}", id = id, ns_id = ns_id, name = variant.name.as_ref().unwrap())?; if let clean::VariantItem(ref var) = variant.inner { if let clean::VariantKind::Tuple(ref tys) = var.kind { write!(w, "(")?; for (i, ty) in tys.iter().enumerate() { if i > 0 { write!(w, ",&nbsp;")?; } write!(w, "{}", *ty)?; } write!(w, ")")?; } } write!(w, "</code></span></span>")?; document(w, cx, variant)?; use clean::{Variant, VariantKind}; if let clean::VariantItem(Variant { kind: VariantKind::Struct(ref s) }) = variant.inner { let variant_id = derive_id(format!("{}.{}.fields", ItemType::Variant, variant.name.as_ref().unwrap())); write!(w, "<span class='docblock autohide sub-variant' id='{id}'>", id = variant_id)?; write!(w, "<h3 class='fields'>Fields of <code>{name}</code></h3>\n <table>", name = variant.name.as_ref().unwrap())?; for field in &s.fields { use clean::StructFieldItem; if let StructFieldItem(ref ty) = field.inner { let id = derive_id(format!("variant.{}.field.{}", variant.name.as_ref().unwrap(), field.name.as_ref().unwrap())); let ns_id = derive_id(format!("{}.{}.{}.{}", variant.name.as_ref().unwrap(), ItemType::Variant.name_space(), field.name.as_ref().unwrap(), ItemType::StructField.name_space())); write!(w, "<tr><td \ id='{id}'>\ <span id='{ns_id}' class='invisible'>\ <code>{f}:&nbsp;{t}</code></span></td><td>", id = id, ns_id = ns_id, f = field.name.as_ref().unwrap(), t = *ty)?; document(w, cx, field)?; write!(w, "</td></tr>")?; } } write!(w, "</table></span>")?; } render_stability_since(w, variant, it)?; } } render_assoc_items(w, cx, it, it.def_id, AssocItemRender::All)?; Ok(()) } fn render_attribute(attr: &ast::MetaItem) -> Option<String> { let name = attr.name(); if attr.is_word() { Some(format!("{}", name)) } else if let Some(v) = attr.value_str() { Some(format!("{} = {:?}", name, v.as_str())) } else if let Some(values) = attr.meta_item_list() { let display: Vec<_> = values.iter().filter_map(|attr| { attr.meta_item().and_then(|mi| render_attribute(mi)) }).collect(); if display.len() > 0 { Some(format!("{}({})", name, display.join(", "))) } else { None } } else { None } } const ATTRIBUTE_WHITELIST: &'static [&'static str] = &[ "export_name", "lang", "link_section", "must_use", "no_mangle", "repr", "unsafe_destructor_blind_to_params" ]; fn render_attributes(w: &mut fmt::Formatter, it: &clean::Item) -> fmt::Result { let mut attrs = String::new(); for attr in &it.attrs.other_attrs { let name = attr.name(); if !ATTRIBUTE_WHITELIST.contains(&&*name.as_str()) { continue; } if let Some(s) = render_attribute(&attr.meta().unwrap()) { attrs.push_str(&format!("#[{}]\n", s)); } } if attrs.len() > 0 { write!(w, "<div class=\"docblock attributes\">{}</div>", &attrs)?; } Ok(()) } fn render_struct(w: &mut fmt::Formatter, it: &clean::Item, g: Option<&clean::Generics>, ty: doctree::StructType, fields: &[clean::Item], tab: &str, structhead: bool) -> fmt::Result { write!(w, "{}{}{}", VisSpace(&it.visibility), if structhead {"struct "} else {""}, it.name.as_ref().unwrap())?; if let Some(g) = g { write!(w, "{}", g)? } match ty { doctree::Plain => { if let Some(g) = g { write!(w, "{}", WhereClause { gens: g, indent: 0, end_newline: true })? } let mut has_visible_fields = false; write!(w, " {{")?; for field in fields { if let clean::StructFieldItem(ref ty) = field.inner { write!(w, "\n{} {}{}: {},", tab, VisSpace(&field.visibility), field.name.as_ref().unwrap(), *ty)?; has_visible_fields = true; } } if has_visible_fields { if it.has_stripped_fields().unwrap() { write!(w, "\n{} // some fields omitted", tab)?; } write!(w, "\n{}", tab)?; } else if it.has_stripped_fields().unwrap() { // If there are no visible fields we can just display // `{ /* fields omitted */ }` to save space. write!(w, " /* fields omitted */ ")?; } write!(w, "}}")?; } doctree::Tuple => { write!(w, "(")?; for (i, field) in fields.iter().enumerate() { if i > 0 { write!(w, ", ")?; } match field.inner { clean::StrippedItem(box clean::StructFieldItem(..)) => { write!(w, "_")? } clean::StructFieldItem(ref ty) => { write!(w, "{}{}", VisSpace(&field.visibility), *ty)? } _ => unreachable!() } } write!(w, ")")?; if let Some(g) = g { write!(w, "{}", WhereClause { gens: g, indent: 0, end_newline: false })? } write!(w, ";")?; } doctree::Unit => { // Needed for PhantomData. if let Some(g) = g { write!(w, "{}", WhereClause { gens: g, indent: 0, end_newline: false })? } write!(w, ";")?; } } Ok(()) } fn render_union(w: &mut fmt::Formatter, it: &clean::Item, g: Option<&clean::Generics>, fields: &[clean::Item], tab: &str, structhead: bool) -> fmt::Result { write!(w, "{}{}{}", VisSpace(&it.visibility), if structhead {"union "} else {""}, it.name.as_ref().unwrap())?; if let Some(g) = g { write!(w, "{}", g)?; write!(w, "{}", WhereClause { gens: g, indent: 0, end_newline: true })?; } write!(w, " {{\n{}", tab)?; for field in fields { if let clean::StructFieldItem(ref ty) = field.inner { write!(w, " {}{}: {},\n{}", VisSpace(&field.visibility), field.name.as_ref().unwrap(), *ty, tab)?; } } if it.has_stripped_fields().unwrap() { write!(w, " // some fields omitted\n{}", tab)?; } write!(w, "}}")?; Ok(()) } #[derive(Copy, Clone)] enum AssocItemLink<'a> { Anchor(Option<&'a str>), GotoSource(DefId, &'a FxHashSet<String>), } impl<'a> AssocItemLink<'a> { fn anchor(&self, id: &'a String) -> Self { match *self { AssocItemLink::Anchor(_) => { AssocItemLink::Anchor(Some(&id)) }, ref other => *other, } } } enum AssocItemRender<'a> { All, DerefFor { trait_: &'a clean::Type, type_: &'a clean::Type, deref_mut_: bool } } #[derive(Copy, Clone, PartialEq)] enum RenderMode { Normal, ForDeref { mut_: bool }, } fn render_assoc_items(w: &mut fmt::Formatter, cx: &Context, containing_item: &clean::Item, it: DefId, what: AssocItemRender) -> fmt::Result { let c = cache(); let v = match c.impls.get(&it) { Some(v) => v, None => return Ok(()), }; let (non_trait, traits): (Vec<_>, _) = v.iter().partition(|i| { i.inner_impl().trait_.is_none() }); if !non_trait.is_empty() { let render_mode = match what { AssocItemRender::All => { write!(w, " <h2 id='methods' class='small-section-header'> Methods<a href='#methods' class='anchor'></a> </h2> ")?; RenderMode::Normal } AssocItemRender::DerefFor { trait_, type_, deref_mut_ } => { write!(w, " <h2 id='deref-methods' class='small-section-header'> Methods from {}&lt;Target = {}&gt;<a href='#deref-methods' class='anchor'></a> </h2> ", trait_, type_)?; RenderMode::ForDeref { mut_: deref_mut_ } } }; for i in &non_trait { render_impl(w, cx, i, AssocItemLink::Anchor(None), render_mode, containing_item.stable_since(), true)?; } } if let AssocItemRender::DerefFor { .. } = what { return Ok(()); } if !traits.is_empty() { let deref_impl = traits.iter().find(|t| { t.inner_impl().trait_.def_id() == c.deref_trait_did }); if let Some(impl_) = deref_impl { let has_deref_mut = traits.iter().find(|t| { t.inner_impl().trait_.def_id() == c.deref_mut_trait_did }).is_some(); render_deref_methods(w, cx, impl_, containing_item, has_deref_mut)?; } let (synthetic, concrete) = traits .iter() .partition::<Vec<_>, _>(|t| t.inner_impl().synthetic); struct RendererStruct<'a, 'b, 'c>(&'a Context, Vec<&'b &'b Impl>, &'c clean::Item); impl<'a, 'b, 'c> fmt::Display for RendererStruct<'a, 'b, 'c> { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { render_impls(self.0, fmt, &self.1, self.2) } } let impls = format!("{}", RendererStruct(cx, concrete, containing_item)); if !impls.is_empty() { write!(w, " <h2 id='implementations' class='small-section-header'> Trait Implementations<a href='#implementations' class='anchor'></a> </h2> <div id='implementations-list'>{}</div>", impls)?; } if !synthetic.is_empty() { write!(w, " <h2 id='synthetic-implementations' class='small-section-header'> Auto Trait Implementations<a href='#synthetic-implementations' class='anchor'></a> </h2> <div id='synthetic-implementations-list'> ")?; render_impls(cx, w, &synthetic, containing_item)?; write!(w, "</div>")?; } } Ok(()) } fn render_deref_methods(w: &mut fmt::Formatter, cx: &Context, impl_: &Impl, container_item: &clean::Item, deref_mut: bool) -> fmt::Result { let deref_type = impl_.inner_impl().trait_.as_ref().unwrap(); let target = impl_.inner_impl().items.iter().filter_map(|item| { match item.inner { clean::TypedefItem(ref t, true) => Some(&t.type_), _ => None, } }).next().expect("Expected associated type binding"); let what = AssocItemRender::DerefFor { trait_: deref_type, type_: target, deref_mut_: deref_mut }; if let Some(did) = target.def_id() { render_assoc_items(w, cx, container_item, did, what) } else { if let Some(prim) = target.primitive_type() { if let Some(&did) = cache().primitive_locations.get(&prim) { render_assoc_items(w, cx, container_item, did, what)?; } } Ok(()) } } fn should_render_item(item: &clean::Item, deref_mut_: bool) -> bool { let self_type_opt = match item.inner { clean::MethodItem(ref method) => method.decl.self_type(), clean::TyMethodItem(ref method) => method.decl.self_type(), _ => None }; if let Some(self_ty) = self_type_opt { let (by_mut_ref, by_box, by_value) = match self_ty { SelfTy::SelfBorrowed(_, mutability) | SelfTy::SelfExplicit(clean::BorrowedRef { mutability, .. }) => { (mutability == Mutability::Mutable, false, false) }, SelfTy::SelfExplicit(clean::ResolvedPath { did, .. }) => { (false, Some(did) == cache().owned_box_did, false) }, SelfTy::SelfValue => (false, false, true), _ => (false, false, false), }; (deref_mut_ || !by_mut_ref) && !by_box && !by_value } else { false } } fn render_spotlight_traits(item: &clean::Item) -> Result<String, fmt::Error> { let mut out = String::new(); match item.inner { clean::FunctionItem(clean::Function { ref decl, .. }) | clean::TyMethodItem(clean::TyMethod { ref decl, .. }) | clean::MethodItem(clean::Method { ref decl, .. }) | clean::ForeignFunctionItem(clean::Function { ref decl, .. }) => { out = spotlight_decl(decl)?; } _ => {} } Ok(out) } fn spotlight_decl(decl: &clean::FnDecl) -> Result<String, fmt::Error> { let mut out = String::new(); let mut trait_ = String::new(); if let Some(did) = decl.output.def_id() { let c = cache(); if let Some(impls) = c.impls.get(&did) { for i in impls { let impl_ = i.inner_impl(); if impl_.trait_.def_id().map_or(false, |d| c.traits[&d].is_spotlight) { if out.is_empty() { out.push_str( &format!("<h3 class=\"important\">Important traits for {}</h3>\ <code class=\"content\">", impl_.for_)); trait_.push_str(&format!("{}", impl_.for_)); } //use the "where" class here to make it small out.push_str(&format!("<span class=\"where fmt-newline\">{}</span>", impl_)); let t_did = impl_.trait_.def_id().unwrap(); for it in &impl_.items { if let clean::TypedefItem(ref tydef, _) = it.inner { out.push_str("<span class=\"where fmt-newline\"> "); assoc_type(&mut out, it, &vec![], Some(&tydef.type_), AssocItemLink::GotoSource(t_did, &FxHashSet()))?; out.push_str(";</span>"); } } } } } } if !out.is_empty() { out.insert_str(0, &format!("<div class=\"important-traits\"><div class='tooltip'>ⓘ\ <span class='tooltiptext'>Important traits for {}</span></div>\ <div class=\"content hidden\">", trait_)); out.push_str("</code></div></div>"); } Ok(out) } fn render_impl(w: &mut fmt::Formatter, cx: &Context, i: &Impl, link: AssocItemLink, render_mode: RenderMode, outer_version: Option<&str>, show_def_docs: bool) -> fmt::Result { if render_mode == RenderMode::Normal { let id = derive_id(match i.inner_impl().trait_ { Some(ref t) => format!("impl-{}", small_url_encode(&format!("{:#}", t))), None => "impl".to_string(), }); write!(w, "<h3 id='{}' class='impl'><span class='in-band'><table class='table-display'>\ <tbody><tr><td><code>{}</code>", id, i.inner_impl())?; write!(w, "<a href='#{}' class='anchor'></a>", id)?; write!(w, "</span></td><td><span class='out-of-band'>")?; let since = i.impl_item.stability.as_ref().map(|s| &s.since[..]); if let Some(l) = (Item { item: &i.impl_item, cx: cx }).src_href() { write!(w, "<div class='ghost'></div>")?; render_stability_since_raw(w, since, outer_version)?; write!(w, "<a class='srclink' href='{}' title='{}'>[src]</a>", l, "goto source code")?; } else { render_stability_since_raw(w, since, outer_version)?; } write!(w, "</span></td></tr></tbody></table></h3>")?; if let Some(ref dox) = cx.shared.maybe_collapsed_doc_value(&i.impl_item) { write!(w, "<div class='docblock'>{}</div>", Markdown(&*dox, &i.impl_item.links()))?; } } fn doc_impl_item(w: &mut fmt::Formatter, cx: &Context, item: &clean::Item, link: AssocItemLink, render_mode: RenderMode, is_default_item: bool, outer_version: Option<&str>, trait_: Option<&clean::Trait>, show_def_docs: bool) -> fmt::Result { let item_type = item.type_(); let name = item.name.as_ref().unwrap(); let render_method_item: bool = match render_mode { RenderMode::Normal => true, RenderMode::ForDeref { mut_: deref_mut_ } => should_render_item(&item, deref_mut_), }; match item.inner { clean::MethodItem(clean::Method { ref decl, .. }) | clean::TyMethodItem(clean::TyMethod{ ref decl, .. }) => { // Only render when the method is not static or we allow static methods if render_method_item { let id = derive_id(format!("{}.{}", item_type, name)); let ns_id = derive_id(format!("{}.{}", name, item_type.name_space())); write!(w, "<h4 id='{}' class=\"{}\">", id, item_type)?; write!(w, "{}", spotlight_decl(decl)?)?; write!(w, "<span id='{}' class='invisible'>", ns_id)?; write!(w, "<table class='table-display'><tbody><tr><td><code>")?; render_assoc_item(w, item, link.anchor(&id), ItemType::Impl)?; write!(w, "</code>")?; if let Some(l) = (Item { cx, item }).src_href() { write!(w, "</span></td><td><span class='out-of-band'>")?; write!(w, "<div class='ghost'></div>")?; render_stability_since_raw(w, item.stable_since(), outer_version)?; write!(w, "<a class='srclink' href='{}' title='{}'>[src]</a>", l, "goto source code")?; } else { write!(w, "</td><td>")?; render_stability_since_raw(w, item.stable_since(), outer_version)?; } write!(w, "</td></tr></tbody></table></span></h4>")?; } } clean::TypedefItem(ref tydef, _) => { let id = derive_id(format!("{}.{}", ItemType::AssociatedType, name)); let ns_id = derive_id(format!("{}.{}", name, item_type.name_space())); write!(w, "<h4 id='{}' class=\"{}\">", id, item_type)?; write!(w, "<span id='{}' class='invisible'><code>", ns_id)?; assoc_type(w, item, &Vec::new(), Some(&tydef.type_), link.anchor(&id))?; write!(w, "</code></span></h4>\n")?; } clean::AssociatedConstItem(ref ty, ref default) => { let id = derive_id(format!("{}.{}", item_type, name)); let ns_id = derive_id(format!("{}.{}", name, item_type.name_space())); write!(w, "<h4 id='{}' class=\"{}\">", id, item_type)?; write!(w, "<span id='{}' class='invisible'><code>", ns_id)?; assoc_const(w, item, ty, default.as_ref(), link.anchor(&id))?; write!(w, "</code></span></h4>\n")?; } clean::AssociatedTypeItem(ref bounds, ref default) => { let id = derive_id(format!("{}.{}", item_type, name)); let ns_id = derive_id(format!("{}.{}", name, item_type.name_space())); write!(w, "<h4 id='{}' class=\"{}\">", id, item_type)?; write!(w, "<span id='{}' class='invisible'><code>", ns_id)?; assoc_type(w, item, bounds, default.as_ref(), link.anchor(&id))?; write!(w, "</code></span></h4>\n")?; } clean::StrippedItem(..) => return Ok(()), _ => panic!("can't make docs for trait item with name {:?}", item.name) } if render_method_item || render_mode == RenderMode::Normal { let prefix = render_assoc_const_value(item); if !is_default_item { if let Some(t) = trait_ { // The trait item may have been stripped so we might not // find any documentation or stability for it. if let Some(it) = t.items.iter().find(|i| i.name == item.name) { // We need the stability of the item from the trait // because impls can't have a stability. document_stability(w, cx, it)?; if item.doc_value().is_some() { document_full(w, item, cx, &prefix)?; } else if show_def_docs { // In case the item isn't documented, // provide short documentation from the trait. document_short(w, it, link, &prefix)?; } } } else { document_stability(w, cx, item)?; if show_def_docs { document_full(w, item, cx, &prefix)?; } } } else { document_stability(w, cx, item)?; if show_def_docs { document_short(w, item, link, &prefix)?; } } } Ok(()) } let traits = &cache().traits; let trait_ = i.trait_did().map(|did| &traits[&did]); if !show_def_docs { write!(w, "<span class='docblock autohide'>")?; } write!(w, "<div class='impl-items'>")?; for trait_item in &i.inner_impl().items { doc_impl_item(w, cx, trait_item, link, render_mode, false, outer_version, trait_, show_def_docs)?; } fn render_default_items(w: &mut fmt::Formatter, cx: &Context, t: &clean::Trait, i: &clean::Impl, render_mode: RenderMode, outer_version: Option<&str>, show_def_docs: bool) -> fmt::Result { for trait_item in &t.items { let n = trait_item.name.clone(); if i.items.iter().find(|m| m.name == n).is_some() { continue; } let did = i.trait_.as_ref().unwrap().def_id().unwrap(); let assoc_link = AssocItemLink::GotoSource(did, &i.provided_trait_methods); doc_impl_item(w, cx, trait_item, assoc_link, render_mode, true, outer_version, None, show_def_docs)?; } Ok(()) } // If we've implemented a trait, then also emit documentation for all // default items which weren't overridden in the implementation block. if let Some(t) = trait_ { render_default_items(w, cx, t, &i.inner_impl(), render_mode, outer_version, show_def_docs)?; } write!(w, "</div>")?; if !show_def_docs { write!(w, "</span>")?; } Ok(()) } fn item_typedef(w: &mut fmt::Formatter, cx: &Context, it: &clean::Item, t: &clean::Typedef) -> fmt::Result { write!(w, "<pre class='rust typedef'>")?; render_attributes(w, it)?; write!(w, "type {}{}{where_clause} = {type_};</pre>", it.name.as_ref().unwrap(), t.generics, where_clause = WhereClause { gens: &t.generics, indent: 0, end_newline: true }, type_ = t.type_)?; document(w, cx, it)?; // Render any items associated directly to this alias, as otherwise they // won't be visible anywhere in the docs. It would be nice to also show // associated items from the aliased type (see discussion in #32077), but // we need #14072 to make sense of the generics. render_assoc_items(w, cx, it, it.def_id, AssocItemRender::All) } fn item_foreign_type(w: &mut fmt::Formatter, cx: &Context, it: &clean::Item) -> fmt::Result { writeln!(w, "<pre class='rust foreigntype'>extern {{")?; render_attributes(w, it)?; write!( w, " {}type {};\n}}</pre>", VisSpace(&it.visibility), it.name.as_ref().unwrap(), )?; document(w, cx, it)?; render_assoc_items(w, cx, it, it.def_id, AssocItemRender::All) } impl<'a> fmt::Display for Sidebar<'a> { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { let cx = self.cx; let it = self.item; let parentlen = cx.current.len() - if it.is_mod() {1} else {0}; if it.is_struct() || it.is_trait() || it.is_primitive() || it.is_union() || it.is_enum() || it.is_mod() || it.is_typedef() { write!(fmt, "<p class='location'>{}{}</p>", match it.inner { clean::StructItem(..) => "Struct ", clean::TraitItem(..) => "Trait ", clean::PrimitiveItem(..) => "Primitive Type ", clean::UnionItem(..) => "Union ", clean::EnumItem(..) => "Enum ", clean::TypedefItem(..) => "Type Definition ", clean::ForeignTypeItem => "Foreign Type ", clean::ModuleItem(..) => if it.is_crate() { "Crate " } else { "Module " }, _ => "", }, it.name.as_ref().unwrap())?; } if it.is_crate() { if let Some(ref version) = cache().crate_version { write!(fmt, "<div class='block version'>\ <p>Version {}</p>\ </div> <a id='all-types' href='all.html'><p>See all {}'s items</p></a>", version, it.name.as_ref().unwrap())?; } } write!(fmt, "<div class=\"sidebar-elems\">")?; match it.inner { clean::StructItem(ref s) => sidebar_struct(fmt, it, s)?, clean::TraitItem(ref t) => sidebar_trait(fmt, it, t)?, clean::PrimitiveItem(ref p) => sidebar_primitive(fmt, it, p)?, clean::UnionItem(ref u) => sidebar_union(fmt, it, u)?, clean::EnumItem(ref e) => sidebar_enum(fmt, it, e)?, clean::TypedefItem(ref t, _) => sidebar_typedef(fmt, it, t)?, clean::ModuleItem(ref m) => sidebar_module(fmt, it, &m.items)?, clean::ForeignTypeItem => sidebar_foreign_type(fmt, it)?, _ => (), } // The sidebar is designed to display sibling functions, modules and // other miscellaneous information. since there are lots of sibling // items (and that causes quadratic growth in large modules), // we refactor common parts into a shared JavaScript file per module. // still, we don't move everything into JS because we want to preserve // as much HTML as possible in order to allow non-JS-enabled browsers // to navigate the documentation (though slightly inefficiently). write!(fmt, "<p class='location'>")?; for (i, name) in cx.current.iter().take(parentlen).enumerate() { if i > 0 { write!(fmt, "::<wbr>")?; } write!(fmt, "<a href='{}index.html'>{}</a>", &cx.root_path()[..(cx.current.len() - i - 1) * 3], *name)?; } write!(fmt, "</p>")?; // Sidebar refers to the enclosing module, not this module. let relpath = if it.is_mod() { "../" } else { "" }; write!(fmt, "<script>window.sidebarCurrent = {{\ name: '{name}', \ ty: '{ty}', \ relpath: '{path}'\ }};</script>", name = it.name.as_ref().map(|x| &x[..]).unwrap_or(""), ty = it.type_().css_class(), path = relpath)?; if parentlen == 0 { // There is no sidebar-items.js beyond the crate root path // FIXME maybe dynamic crate loading can be merged here } else { write!(fmt, "<script defer src=\"{path}sidebar-items.js\"></script>", path = relpath)?; } // Closes sidebar-elems div. write!(fmt, "</div>")?; Ok(()) } } fn get_methods(i: &clean::Impl, for_deref: bool) -> Vec<String> { i.items.iter().filter_map(|item| { match item.name { // Maybe check with clean::Visibility::Public as well? Some(ref name) if !name.is_empty() && item.visibility.is_some() && item.is_method() => { if !for_deref || should_render_item(item, false) { Some(format!("<a href=\"#method.{name}\">{name}</a>", name = name)) } else { None } } _ => None, } }).collect::<Vec<_>>() } // The point is to url encode any potential character from a type with genericity. fn small_url_encode(s: &str) -> String { s.replace("<", "%3C") .replace(">", "%3E") .replace(" ", "%20") .replace("?", "%3F") .replace("'", "%27") .replace("&", "%26") .replace(",", "%2C") .replace(":", "%3A") .replace(";", "%3B") .replace("[", "%5B") .replace("]", "%5D") .replace("\"", "%22") } fn sidebar_assoc_items(it: &clean::Item) -> String { let mut out = String::new(); let c = cache(); if let Some(v) = c.impls.get(&it.def_id) { let ret = v.iter() .filter(|i| i.inner_impl().trait_.is_none()) .flat_map(|i| get_methods(i.inner_impl(), false)) .collect::<String>(); if !ret.is_empty() { out.push_str(&format!("<a class=\"sidebar-title\" href=\"#methods\">Methods\ </a><div class=\"sidebar-links\">{}</div>", ret)); } if v.iter().any(|i| i.inner_impl().trait_.is_some()) { if let Some(impl_) = v.iter() .filter(|i| i.inner_impl().trait_.is_some()) .find(|i| i.inner_impl().trait_.def_id() == c.deref_trait_did) { if let Some(target) = impl_.inner_impl().items.iter().filter_map(|item| { match item.inner { clean::TypedefItem(ref t, true) => Some(&t.type_), _ => None, } }).next() { let inner_impl = target.def_id().or(target.primitive_type().and_then(|prim| { c.primitive_locations.get(&prim).cloned() })).and_then(|did| c.impls.get(&did)); if let Some(impls) = inner_impl { out.push_str("<a class=\"sidebar-title\" href=\"#deref-methods\">"); out.push_str(&format!("Methods from {}&lt;Target={}&gt;", Escape(&format!("{:#}", impl_.inner_impl().trait_.as_ref().unwrap())), Escape(&format!("{:#}", target)))); out.push_str("</a>"); let ret = impls.iter() .filter(|i| i.inner_impl().trait_.is_none()) .flat_map(|i| get_methods(i.inner_impl(), true)) .collect::<String>(); out.push_str(&format!("<div class=\"sidebar-links\">{}</div>", ret)); } } } let format_impls = |impls: Vec<&Impl>| { let mut links = HashSet::new(); impls.iter() .filter_map(|i| { let is_negative_impl = is_negative_impl(i.inner_impl()); if let Some(ref i) = i.inner_impl().trait_ { let i_display = format!("{:#}", i); let out = Escape(&i_display); let encoded = small_url_encode(&format!("{:#}", i)); let generated = format!("<a href=\"#impl-{}\">{}{}</a>", encoded, if is_negative_impl { "!" } else { "" }, out); if links.insert(generated.clone()) { Some(generated) } else { None } } else { None } }) .collect::<String>() }; let (synthetic, concrete) = v .iter() .partition::<Vec<_>, _>(|i| i.inner_impl().synthetic); let concrete_format = format_impls(concrete); let synthetic_format = format_impls(synthetic); if !concrete_format.is_empty() { out.push_str("<a class=\"sidebar-title\" href=\"#implementations\">\ Trait Implementations</a>"); out.push_str(&format!("<div class=\"sidebar-links\">{}</div>", concrete_format)); } if !synthetic_format.is_empty() { out.push_str("<a class=\"sidebar-title\" href=\"#synthetic-implementations\">\ Auto Trait Implementations</a>"); out.push_str(&format!("<div class=\"sidebar-links\">{}</div>", synthetic_format)); } } } out } fn sidebar_struct(fmt: &mut fmt::Formatter, it: &clean::Item, s: &clean::Struct) -> fmt::Result { let mut sidebar = String::new(); let fields = get_struct_fields_name(&s.fields); if !fields.is_empty() { if let doctree::Plain = s.struct_type { sidebar.push_str(&format!("<a class=\"sidebar-title\" href=\"#fields\">Fields</a>\ <div class=\"sidebar-links\">{}</div>", fields)); } } sidebar.push_str(&sidebar_assoc_items(it)); if !sidebar.is_empty() { write!(fmt, "<div class=\"block items\">{}</div>", sidebar)?; } Ok(()) } fn extract_for_impl_name(item: &clean::Item) -> Option<(String, String)> { match item.inner { clean::ItemEnum::ImplItem(ref i) => { if let Some(ref trait_) = i.trait_ { Some((format!("{:#}", i.for_), format!("{:#}", trait_))) } else { None } }, _ => None, } } fn is_negative_impl(i: &clean::Impl) -> bool { i.polarity == Some(clean::ImplPolarity::Negative) } fn sidebar_trait(fmt: &mut fmt::Formatter, it: &clean::Item, t: &clean::Trait) -> fmt::Result { let mut sidebar = String::new(); let types = t.items .iter() .filter_map(|m| { match m.name { Some(ref name) if m.is_associated_type() => { Some(format!("<a href=\"#associatedtype.{name}\">{name}</a>", name=name)) } _ => None, } }) .collect::<String>(); let consts = t.items .iter() .filter_map(|m| { match m.name { Some(ref name) if m.is_associated_const() => { Some(format!("<a href=\"#associatedconstant.{name}\">{name}</a>", name=name)) } _ => None, } }) .collect::<String>(); let required = t.items .iter() .filter_map(|m| { match m.name { Some(ref name) if m.is_ty_method() => { Some(format!("<a href=\"#tymethod.{name}\">{name}</a>", name=name)) } _ => None, } }) .collect::<String>(); let provided = t.items .iter() .filter_map(|m| { match m.name { Some(ref name) if m.is_method() => { Some(format!("<a href=\"#method.{name}\">{name}</a>", name=name)) } _ => None, } }) .collect::<String>(); if !types.is_empty() { sidebar.push_str(&format!("<a class=\"sidebar-title\" href=\"#associated-types\">\ Associated Types</a><div class=\"sidebar-links\">{}</div>", types)); } if !consts.is_empty() { sidebar.push_str(&format!("<a class=\"sidebar-title\" href=\"#associated-const\">\ Associated Constants</a><div class=\"sidebar-links\">{}</div>", consts)); } if !required.is_empty() { sidebar.push_str(&format!("<a class=\"sidebar-title\" href=\"#required-methods\">\ Required Methods</a><div class=\"sidebar-links\">{}</div>", required)); } if !provided.is_empty() { sidebar.push_str(&format!("<a class=\"sidebar-title\" href=\"#provided-methods\">\ Provided Methods</a><div class=\"sidebar-links\">{}</div>", provided)); } let c = cache(); if let Some(implementors) = c.implementors.get(&it.def_id) { let res = implementors.iter() .filter(|i| i.inner_impl().for_.def_id() .map_or(false, |d| !c.paths.contains_key(&d))) .filter_map(|i| { match extract_for_impl_name(&i.impl_item) { Some((ref name, ref url)) => { Some(format!("<a href=\"#impl-{}\">{}</a>", small_url_encode(url), Escape(name))) } _ => None, } }) .collect::<String>(); if !res.is_empty() { sidebar.push_str(&format!("<a class=\"sidebar-title\" href=\"#foreign-impls\">\ Implementations on Foreign Types</a><div \ class=\"sidebar-links\">{}</div>", res)); } } sidebar.push_str("<a class=\"sidebar-title\" href=\"#implementors\">Implementors</a>"); if t.auto { sidebar.push_str("<a class=\"sidebar-title\" \ href=\"#synthetic-implementors\">Auto Implementors</a>"); } sidebar.push_str(&sidebar_assoc_items(it)); write!(fmt, "<div class=\"block items\">{}</div>", sidebar) } fn sidebar_primitive(fmt: &mut fmt::Formatter, it: &clean::Item, _p: &clean::PrimitiveType) -> fmt::Result { let sidebar = sidebar_assoc_items(it); if !sidebar.is_empty() { write!(fmt, "<div class=\"block items\">{}</div>", sidebar)?; } Ok(()) } fn sidebar_typedef(fmt: &mut fmt::Formatter, it: &clean::Item, _t: &clean::Typedef) -> fmt::Result { let sidebar = sidebar_assoc_items(it); if !sidebar.is_empty() { write!(fmt, "<div class=\"block items\">{}</div>", sidebar)?; } Ok(()) } fn get_struct_fields_name(fields: &[clean::Item]) -> String { fields.iter() .filter(|f| if let clean::StructFieldItem(..) = f.inner { true } else { false }) .filter_map(|f| match f.name { Some(ref name) => Some(format!("<a href=\"#structfield.{name}\">\ {name}</a>", name=name)), _ => None, }) .collect() } fn sidebar_union(fmt: &mut fmt::Formatter, it: &clean::Item, u: &clean::Union) -> fmt::Result { let mut sidebar = String::new(); let fields = get_struct_fields_name(&u.fields); if !fields.is_empty() { sidebar.push_str(&format!("<a class=\"sidebar-title\" href=\"#fields\">Fields</a>\ <div class=\"sidebar-links\">{}</div>", fields)); } sidebar.push_str(&sidebar_assoc_items(it)); if !sidebar.is_empty() { write!(fmt, "<div class=\"block items\">{}</div>", sidebar)?; } Ok(()) } fn sidebar_enum(fmt: &mut fmt::Formatter, it: &clean::Item, e: &clean::Enum) -> fmt::Result { let mut sidebar = String::new(); let variants = e.variants.iter() .filter_map(|v| match v.name { Some(ref name) => Some(format!("<a href=\"#variant.{name}\">{name}\ </a>", name = name)), _ => None, }) .collect::<String>(); if !variants.is_empty() { sidebar.push_str(&format!("<a class=\"sidebar-title\" href=\"#variants\">Variants</a>\ <div class=\"sidebar-links\">{}</div>", variants)); } sidebar.push_str(&sidebar_assoc_items(it)); if !sidebar.is_empty() { write!(fmt, "<div class=\"block items\">{}</div>", sidebar)?; } Ok(()) } fn item_ty_to_strs(ty: &ItemType) -> (&'static str, &'static str) { match *ty { ItemType::ExternCrate | ItemType::Import => ("reexports", "Re-exports"), ItemType::Module => ("modules", "Modules"), ItemType::Struct => ("structs", "Structs"), ItemType::Union => ("unions", "Unions"), ItemType::Enum => ("enums", "Enums"), ItemType::Function => ("functions", "Functions"), ItemType::Typedef => ("types", "Type Definitions"), ItemType::Static => ("statics", "Statics"), ItemType::Constant => ("constants", "Constants"), ItemType::Trait => ("traits", "Traits"), ItemType::Impl => ("impls", "Implementations"), ItemType::TyMethod => ("tymethods", "Type Methods"), ItemType::Method => ("methods", "Methods"), ItemType::StructField => ("fields", "Struct Fields"), ItemType::Variant => ("variants", "Variants"), ItemType::Macro => ("macros", "Macros"), ItemType::Primitive => ("primitives", "Primitive Types"), ItemType::AssociatedType => ("associated-types", "Associated Types"), ItemType::AssociatedConst => ("associated-consts", "Associated Constants"), ItemType::ForeignType => ("foreign-types", "Foreign Types"), ItemType::Keyword => ("keywords", "Keywords"), } } fn sidebar_module(fmt: &mut fmt::Formatter, _it: &clean::Item, items: &[clean::Item]) -> fmt::Result { let mut sidebar = String::new(); if items.iter().any(|it| it.type_() == ItemType::ExternCrate || it.type_() == ItemType::Import) { sidebar.push_str(&format!("<li><a href=\"#{id}\">{name}</a></li>", id = "reexports", name = "Re-exports")); } // ordering taken from item_module, reorder, where it prioritized elements in a certain order // to print its headings for &myty in &[ItemType::Primitive, ItemType::Module, ItemType::Macro, ItemType::Struct, ItemType::Enum, ItemType::Constant, ItemType::Static, ItemType::Trait, ItemType::Function, ItemType::Typedef, ItemType::Union, ItemType::Impl, ItemType::TyMethod, ItemType::Method, ItemType::StructField, ItemType::Variant, ItemType::AssociatedType, ItemType::AssociatedConst, ItemType::ForeignType] { if items.iter().any(|it| !it.is_stripped() && it.type_() == myty) { let (short, name) = item_ty_to_strs(&myty); sidebar.push_str(&format!("<li><a href=\"#{id}\">{name}</a></li>", id = short, name = name)); } } if !sidebar.is_empty() { write!(fmt, "<div class=\"block items\"><ul>{}</ul></div>", sidebar)?; } Ok(()) } fn sidebar_foreign_type(fmt: &mut fmt::Formatter, it: &clean::Item) -> fmt::Result { let sidebar = sidebar_assoc_items(it); if !sidebar.is_empty() { write!(fmt, "<div class=\"block items\">{}</div>", sidebar)?; } Ok(()) } impl<'a> fmt::Display for Source<'a> { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { let Source(s) = *self; let lines = s.lines().count(); let mut cols = 0; let mut tmp = lines; while tmp > 0 { cols += 1; tmp /= 10; } write!(fmt, "<pre class=\"line-numbers\">")?; for i in 1..lines + 1 { write!(fmt, "<span id=\"{0}\">{0:1$}</span>\n", i, cols)?; } write!(fmt, "</pre>")?; write!(fmt, "{}", highlight::render_with_highlighting(s, None, None, None, None))?; Ok(()) } } fn item_macro(w: &mut fmt::Formatter, cx: &Context, it: &clean::Item, t: &clean::Macro) -> fmt::Result { wrap_into_docblock(w, |w| { w.write_str(&highlight::render_with_highlighting(&t.source, Some("macro"), None, None, None)) })?; document(w, cx, it) } fn item_primitive(w: &mut fmt::Formatter, cx: &Context, it: &clean::Item, _p: &clean::PrimitiveType) -> fmt::Result { document(w, cx, it)?; render_assoc_items(w, cx, it, it.def_id, AssocItemRender::All) } fn item_keyword(w: &mut fmt::Formatter, cx: &Context, it: &clean::Item, _p: &str) -> fmt::Result { document(w, cx, it) } const BASIC_KEYWORDS: &'static str = "rust, rustlang, rust-lang"; fn make_item_keywords(it: &clean::Item) -> String { format!("{}, {}", BASIC_KEYWORDS, it.name.as_ref().unwrap()) } fn get_index_search_type(item: &clean::Item) -> Option<IndexItemFunctionType> { let decl = match item.inner { clean::FunctionItem(ref f) => &f.decl, clean::MethodItem(ref m) => &m.decl, clean::TyMethodItem(ref m) => &m.decl, _ => return None }; let inputs = decl.inputs.values.iter().map(|arg| get_index_type(&arg.type_)).collect(); let output = match decl.output { clean::FunctionRetTy::Return(ref return_type) => Some(get_index_type(return_type)), _ => None }; Some(IndexItemFunctionType { inputs: inputs, output: output }) } fn get_index_type(clean_type: &clean::Type) -> Type { let t = Type { name: get_index_type_name(clean_type, true).map(|s| s.to_ascii_lowercase()), generics: get_generics(clean_type), }; t } /// Returns a list of all paths used in the type. /// This is used to help deduplicate imported impls /// for reexported types. If any of the contained /// types are re-exported, we don't use the corresponding /// entry from the js file, as inlining will have already /// picked up the impl fn collect_paths_for_type(first_ty: clean::Type) -> Vec<String> { let mut out = Vec::new(); let mut visited = FxHashSet(); let mut work = VecDeque::new(); let cache = cache(); work.push_back(first_ty); while let Some(ty) = work.pop_front() { if !visited.insert(ty.clone()) { continue; } match ty { clean::Type::ResolvedPath { did, .. } => { let get_extern = || cache.external_paths.get(&did).map(|s| s.0.clone()); let fqp = cache.exact_paths.get(&did).cloned().or_else(get_extern); match fqp { Some(path) => { out.push(path.join("::")); }, _ => {} }; }, clean::Type::Tuple(tys) => { work.extend(tys.into_iter()); }, clean::Type::Slice(ty) => { work.push_back(*ty); } clean::Type::Array(ty, _) => { work.push_back(*ty); }, clean::Type::Unique(ty) => { work.push_back(*ty); }, clean::Type::RawPointer(_, ty) => { work.push_back(*ty); }, clean::Type::BorrowedRef { type_, .. } => { work.push_back(*type_); }, clean::Type::QPath { self_type, trait_, .. } => { work.push_back(*self_type); work.push_back(*trait_); }, _ => {} } }; out } fn get_index_type_name(clean_type: &clean::Type, accept_generic: bool) -> Option<String> { match *clean_type { clean::ResolvedPath { ref path, .. } => { let segments = &path.segments; let path_segment = segments.into_iter().last().unwrap_or_else(|| panic!( "get_index_type_name(clean_type: {:?}, accept_generic: {:?}) had length zero path", clean_type, accept_generic )); Some(path_segment.name.clone()) } clean::Generic(ref s) if accept_generic => Some(s.clone()), clean::Primitive(ref p) => Some(format!("{:?}", p)), clean::BorrowedRef { ref type_, .. } => get_index_type_name(type_, accept_generic), // FIXME: add all from clean::Type. _ => None } } fn get_generics(clean_type: &clean::Type) -> Option<Vec<String>> { clean_type.generics() .and_then(|types| { let r = types.iter() .filter_map(|t| get_index_type_name(t, false)) .map(|s| s.to_ascii_lowercase()) .collect::<Vec<_>>(); if r.is_empty() { None } else { Some(r) } }) } pub fn cache() -> Arc<Cache> { CACHE_KEY.with(|c| c.borrow().clone()) } #[cfg(test)] #[test] fn test_unique_id() { let input = ["foo", "examples", "examples", "method.into_iter","examples", "method.into_iter", "foo", "main", "search", "methods", "examples", "method.into_iter", "assoc_type.Item", "assoc_type.Item"]; let expected = ["foo", "examples", "examples-1", "method.into_iter", "examples-2", "method.into_iter-1", "foo-1", "main-1", "search-1", "methods-1", "examples-3", "method.into_iter-2", "assoc_type.Item", "assoc_type.Item-1"]; let test = || { let actual: Vec<String> = input.iter().map(|s| derive_id(s.to_string())).collect(); assert_eq!(&actual[..], expected); }; test(); reset_ids(true); test(); } #[cfg(test)] #[test] fn test_name_key() { assert_eq!(name_key("0"), ("", 0, 1)); assert_eq!(name_key("123"), ("", 123, 0)); assert_eq!(name_key("Fruit"), ("Fruit", 0, 0)); assert_eq!(name_key("Fruit0"), ("Fruit", 0, 1)); assert_eq!(name_key("Fruit0000"), ("Fruit", 0, 4)); assert_eq!(name_key("Fruit01"), ("Fruit", 1, 1)); assert_eq!(name_key("Fruit10"), ("Fruit", 10, 0)); assert_eq!(name_key("Fruit123"), ("Fruit", 123, 0)); } #[cfg(test)] #[test] fn test_name_sorting() { let names = ["Apple", "Banana", "Fruit", "Fruit0", "Fruit00", "Fruit1", "Fruit01", "Fruit2", "Fruit02", "Fruit20", "Fruit100", "Pear"]; let mut sorted = names.to_owned(); sorted.sort_by_key(|&s| name_key(s)); assert_eq!(names, sorted); }
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. //! Feature gating //! //! This module implements the gating necessary for preventing certain compiler //! features from being used by default. This module will crawl a pre-expanded //! AST to ensure that there are no features which are used that are not //! enabled. //! //! Features are enabled in programs via the crate-level attributes of //! `#![feature(...)]` with a comma-separated list of features. //! //! For the purpose of future feature-tracking, once code for detection of feature //! gate usage is added, *do not remove it again* even once the feature //! becomes stable. use self::AttributeType::*; use self::AttributeGate::*; use abi::Abi; use ast::{self, NodeId, PatKind, RangeEnd}; use attr; use codemap::Spanned; use syntax_pos::Span; use errors::{DiagnosticBuilder, Handler, FatalError}; use visit::{self, FnKind, Visitor}; use parse::ParseSess; use symbol::Symbol; use std::ascii::AsciiExt; use std::env; macro_rules! set { (proc_macro) => {{ fn f(features: &mut Features, span: Span) { features.declared_lib_features.push((Symbol::intern("proc_macro"), span)); features.proc_macro = true; } f as fn(&mut Features, Span) }}; ($field: ident) => {{ fn f(features: &mut Features, _: Span) { features.$field = true; } f as fn(&mut Features, Span) }} } macro_rules! declare_features { ($((active, $feature: ident, $ver: expr, $issue: expr),)+) => { /// Represents active features that are currently being implemented or /// currently being considered for addition/removal. const ACTIVE_FEATURES: &'static [(&'static str, &'static str, Option<u32>, fn(&mut Features, Span))] = &[$((stringify!($feature), $ver, $issue, set!($feature))),+]; /// A set of features to be used by later passes. pub struct Features { /// #![feature] attrs for stable language features, for error reporting pub declared_stable_lang_features: Vec<(Symbol, Span)>, /// #![feature] attrs for non-language (library) features pub declared_lib_features: Vec<(Symbol, Span)>, $(pub $feature: bool),+ } impl Features { pub fn new() -> Features { Features { declared_stable_lang_features: Vec::new(), declared_lib_features: Vec::new(), $($feature: false),+ } } } }; ($((removed, $feature: ident, $ver: expr, $issue: expr),)+) => { /// Represents unstable features which have since been removed (it was once Active) const REMOVED_FEATURES: &'static [(&'static str, &'static str, Option<u32>)] = &[ $((stringify!($feature), $ver, $issue)),+ ]; }; ($((stable_removed, $feature: ident, $ver: expr, $issue: expr),)+) => { /// Represents stable features which have since been removed (it was once Accepted) const STABLE_REMOVED_FEATURES: &'static [(&'static str, &'static str, Option<u32>)] = &[ $((stringify!($feature), $ver, $issue)),+ ]; }; ($((accepted, $feature: ident, $ver: expr, $issue: expr),)+) => { /// Those language feature has since been Accepted (it was once Active) const ACCEPTED_FEATURES: &'static [(&'static str, &'static str, Option<u32>)] = &[ $((stringify!($feature), $ver, $issue)),+ ]; } } // If you change this, please modify src/doc/unstable-book as well. // // Don't ever remove anything from this list; set them to 'Removed'. // // The version numbers here correspond to the version in which the current status // was set. This is most important for knowing when a particular feature became // stable (active). // // NB: The featureck.py script parses this information directly out of the source // so take care when modifying it. declare_features! ( (active, asm, "1.0.0", Some(29722)), (active, concat_idents, "1.0.0", Some(29599)), (active, link_args, "1.0.0", Some(29596)), (active, log_syntax, "1.0.0", Some(29598)), (active, non_ascii_idents, "1.0.0", Some(28979)), (active, plugin_registrar, "1.0.0", Some(29597)), (active, thread_local, "1.0.0", Some(29594)), (active, trace_macros, "1.0.0", Some(29598)), // rustc internal, for now: (active, intrinsics, "1.0.0", None), (active, lang_items, "1.0.0", None), (active, link_llvm_intrinsics, "1.0.0", Some(29602)), (active, linkage, "1.0.0", Some(29603)), (active, quote, "1.0.0", Some(29601)), (active, simd, "1.0.0", Some(27731)), // rustc internal (active, rustc_diagnostic_macros, "1.0.0", None), (active, advanced_slice_patterns, "1.0.0", Some(23121)), (active, box_syntax, "1.0.0", Some(27779)), (active, placement_in_syntax, "1.0.0", Some(27779)), (active, unboxed_closures, "1.0.0", Some(29625)), (active, fundamental, "1.0.0", Some(29635)), (active, main, "1.0.0", Some(29634)), (active, needs_allocator, "1.4.0", Some(27389)), (active, on_unimplemented, "1.0.0", Some(29628)), (active, plugin, "1.0.0", Some(29597)), (active, simd_ffi, "1.0.0", Some(27731)), (active, start, "1.0.0", Some(29633)), (active, structural_match, "1.8.0", Some(31434)), (active, panic_runtime, "1.10.0", Some(32837)), (active, needs_panic_runtime, "1.10.0", Some(32837)), // OIBIT specific features (active, optin_builtin_traits, "1.0.0", Some(13231)), // macro reexport needs more discussion and stabilization (active, macro_reexport, "1.0.0", Some(29638)), // Allows use of #[staged_api] // rustc internal (active, staged_api, "1.0.0", None), // Allows using #![no_core] (active, no_core, "1.3.0", Some(29639)), // Allows using `box` in patterns; RFC 469 (active, box_patterns, "1.0.0", Some(29641)), // Allows using the unsafe_destructor_blind_to_params attribute; // RFC 1238 (active, dropck_parametricity, "1.3.0", Some(28498)), // Allows using the may_dangle attribute; RFC 1327 (active, dropck_eyepatch, "1.10.0", Some(34761)), // Allows the use of custom attributes; RFC 572 (active, custom_attribute, "1.0.0", Some(29642)), // Allows the use of #[derive(Anything)] as sugar for // #[derive_Anything]. (active, custom_derive, "1.0.0", Some(29644)), // Allows the use of rustc_* attributes; RFC 572 (active, rustc_attrs, "1.0.0", Some(29642)), // Allows the use of #[allow_internal_unstable]. This is an // attribute on macro_rules! and can't use the attribute handling // below (it has to be checked before expansion possibly makes // macros disappear). // // rustc internal (active, allow_internal_unstable, "1.0.0", None), // Allows the use of #[allow_internal_unsafe]. This is an // attribute on macro_rules! and can't use the attribute handling // below (it has to be checked before expansion possibly makes // macros disappear). // // rustc internal (active, allow_internal_unsafe, "1.0.0", None), // #23121. Array patterns have some hazards yet. (active, slice_patterns, "1.0.0", Some(23121)), // Allows the definition of `const fn` functions. (active, const_fn, "1.2.0", Some(24111)), // Allows indexing into constant arrays. (active, const_indexing, "1.4.0", Some(29947)), // Allows using #[prelude_import] on glob `use` items. // // rustc internal (active, prelude_import, "1.2.0", None), // Allows default type parameters to influence type inference. (active, default_type_parameter_fallback, "1.3.0", Some(27336)), // Allows associated type defaults (active, associated_type_defaults, "1.2.0", Some(29661)), // allow `repr(simd)`, and importing the various simd intrinsics (active, repr_simd, "1.4.0", Some(27731)), // Allows cfg(target_feature = "..."). (active, cfg_target_feature, "1.4.0", Some(29717)), // allow `extern "platform-intrinsic" { ... }` (active, platform_intrinsics, "1.4.0", Some(27731)), // allow `#[unwind]` // rust runtime internal (active, unwind_attributes, "1.4.0", None), // allow the use of `#[naked]` on functions. (active, naked_functions, "1.9.0", Some(32408)), // allow `#[no_debug]` (active, no_debug, "1.5.0", Some(29721)), // allow `#[omit_gdb_pretty_printer_section]` // rustc internal. (active, omit_gdb_pretty_printer_section, "1.5.0", None), // Allows cfg(target_vendor = "..."). (active, cfg_target_vendor, "1.5.0", Some(29718)), // Allow attributes on expressions and non-item statements (active, stmt_expr_attributes, "1.6.0", Some(15701)), // allow using type ascription in expressions (active, type_ascription, "1.6.0", Some(23416)), // Allows cfg(target_thread_local) (active, cfg_target_thread_local, "1.7.0", Some(29594)), // rustc internal (active, abi_vectorcall, "1.7.0", None), // a...b and ...b (active, inclusive_range_syntax, "1.7.0", Some(28237)), // X..Y patterns (active, exclusive_range_pattern, "1.11.0", Some(37854)), // impl specialization (RFC 1210) (active, specialization, "1.7.0", Some(31844)), // Allow Drop types in statics/const functions (RFC 1440) (active, drop_types_in_const, "1.9.0", Some(33156)), // Allows cfg(target_has_atomic = "..."). (active, cfg_target_has_atomic, "1.9.0", Some(32976)), // Allows `impl Trait` in function return types. (active, conservative_impl_trait, "1.12.0", Some(34511)), // The `!` type (active, never_type, "1.13.0", Some(35121)), // Allows all literals in attribute lists and values of key-value pairs. (active, attr_literals, "1.13.0", Some(34981)), // Allows the sysV64 ABI to be specified on all platforms // instead of just the platforms on which it is the C ABI (active, abi_sysv64, "1.13.0", Some(36167)), // Allows untagged unions `union U { ... }` (active, untagged_unions, "1.13.0", Some(32836)), // Used to identify the `compiler_builtins` crate // rustc internal (active, compiler_builtins, "1.13.0", None), // Allows attributes on lifetime/type formal parameters in generics (RFC 1327) (active, generic_param_attrs, "1.11.0", Some(34761)), // Allows #[link(..., cfg(..))] (active, link_cfg, "1.14.0", Some(37406)), (active, use_extern_macros, "1.15.0", Some(35896)), // Allows #[target_feature(...)] (active, target_feature, "1.15.0", None), // `extern "ptx-*" fn()` (active, abi_ptx, "1.15.0", None), // The `i128` type (active, i128_type, "1.16.0", Some(35118)), // The `unadjusted` ABI. Perma unstable. (active, abi_unadjusted, "1.16.0", None), // Procedural macros 2.0. (active, proc_macro, "1.16.0", Some(38356)), // Declarative macros 2.0 (`macro`). (active, decl_macro, "1.17.0", Some(39412)), // Allows #[link(kind="static-nobundle"...] (active, static_nobundle, "1.16.0", Some(37403)), // `extern "msp430-interrupt" fn()` (active, abi_msp430_interrupt, "1.16.0", Some(38487)), // Used to identify crates that contain sanitizer runtimes // rustc internal (active, sanitizer_runtime, "1.17.0", None), // Used to identify crates that contain the profiler runtime // rustc internal (active, profiler_runtime, "1.18.0", None), // `extern "x86-interrupt" fn()` (active, abi_x86_interrupt, "1.17.0", Some(40180)), // Allows the `catch {...}` expression (active, catch_expr, "1.17.0", Some(31436)), // Allows `repr(align(u16))` struct attribute (RFC 1358) (active, repr_align, "1.17.0", Some(33626)), // Used to preserve symbols (see llvm.used) (active, used, "1.18.0", Some(40289)), // Allows module-level inline assembly by way of global_asm!() (active, global_asm, "1.18.0", Some(35119)), // Allows overlapping impls of marker traits (active, overlapping_marker_traits, "1.18.0", Some(29864)), // Allows use of the :vis macro fragment specifier (active, macro_vis_matcher, "1.18.0", Some(41022)), // rustc internal (active, abi_thiscall, "1.19.0", None), // Allows a test to fail without failing the whole suite (active, allow_fail, "1.19.0", Some(42219)), // Allows unsized tuple coercion. (active, unsized_tuple_coercion, "1.20.0", Some(42877)), // global allocators and their internals (active, global_allocator, "1.20.0", None), (active, allocator_internals, "1.20.0", None), // #[doc(cfg(...))] (active, doc_cfg, "1.21.0", Some(43781)), // allow `#[must_use]` on functions (RFC 1940) (active, fn_must_use, "1.21.0", Some(43302)), ); declare_features! ( (removed, import_shadowing, "1.0.0", None), (removed, managed_boxes, "1.0.0", None), // Allows use of unary negate on unsigned integers, e.g. -e for e: u8 (removed, negate_unsigned, "1.0.0", Some(29645)), (removed, reflect, "1.0.0", Some(27749)), // A way to temporarily opt out of opt in copy. This will *never* be accepted. (removed, opt_out_copy, "1.0.0", None), (removed, quad_precision_float, "1.0.0", None), (removed, struct_inherit, "1.0.0", None), (removed, test_removed_feature, "1.0.0", None), (removed, visible_private_types, "1.0.0", None), (removed, unsafe_no_drop_flag, "1.0.0", None), // Allows using items which are missing stability attributes // rustc internal (removed, unmarked_api, "1.0.0", None), (removed, pushpop_unsafe, "1.2.0", None), (removed, allocator, "1.0.0", None), ); declare_features! ( (stable_removed, no_stack_check, "1.0.0", None), ); declare_features! ( (accepted, associated_types, "1.0.0", None), // allow overloading augmented assignment operations like `a += b` (accepted, augmented_assignments, "1.8.0", Some(28235)), // allow empty structs and enum variants with braces (accepted, braced_empty_structs, "1.8.0", Some(29720)), (accepted, default_type_params, "1.0.0", None), (accepted, globs, "1.0.0", None), (accepted, if_let, "1.0.0", None), // A temporary feature gate used to enable parser extensions needed // to bootstrap fix for #5723. (accepted, issue_5723_bootstrap, "1.0.0", None), (accepted, macro_rules, "1.0.0", None), // Allows using #![no_std] (accepted, no_std, "1.6.0", None), (accepted, slicing_syntax, "1.0.0", None), (accepted, struct_variant, "1.0.0", None), // These are used to test this portion of the compiler, they don't actually // mean anything (accepted, test_accepted_feature, "1.0.0", None), (accepted, tuple_indexing, "1.0.0", None), // Allows macros to appear in the type position. (accepted, type_macros, "1.13.0", Some(27245)), (accepted, while_let, "1.0.0", None), // Allows `#[deprecated]` attribute (accepted, deprecated, "1.9.0", Some(29935)), // `expr?` (accepted, question_mark, "1.13.0", Some(31436)), // Allows `..` in tuple (struct) patterns (accepted, dotdot_in_tuple_patterns, "1.14.0", Some(33627)), (accepted, item_like_imports, "1.15.0", Some(35120)), // Allows using `Self` and associated types in struct expressions and patterns. (accepted, more_struct_aliases, "1.16.0", Some(37544)), // elide `'static` lifetimes in `static`s and `const`s (accepted, static_in_const, "1.17.0", Some(35897)), // Allows field shorthands (`x` meaning `x: x`) in struct literal expressions. (accepted, field_init_shorthand, "1.17.0", Some(37340)), // Allows the definition recursive static items. (accepted, static_recursion, "1.17.0", Some(29719)), // pub(restricted) visibilities (RFC 1422) (accepted, pub_restricted, "1.18.0", Some(32409)), // The #![windows_subsystem] attribute (accepted, windows_subsystem, "1.18.0", Some(37499)), // Allows `break {expr}` with a value inside `loop`s. (accepted, loop_break_value, "1.19.0", Some(37339)), // Permits numeric fields in struct expressions and patterns. (accepted, relaxed_adts, "1.19.0", Some(35626)), // Coerces non capturing closures to function pointers (accepted, closure_to_fn_coercion, "1.19.0", Some(39817)), // Allows attributes on struct literal fields. (accepted, struct_field_attributes, "1.20.0", Some(38814)), // Allows the definition of associated constants in `trait` or `impl` // blocks. (accepted, associated_consts, "1.20.0", Some(29646)), // Usage of the `compile_error!` macro (accepted, compile_error, "1.20.0", Some(40872)), // See rust-lang/rfcs#1414. Allows code like `let x: &'static u32 = &42` to work. (accepted, rvalue_static_promotion, "1.21.0", Some(38865)), ); // If you change this, please modify src/doc/unstable-book as well. You must // move that documentation into the relevant place in the other docs, and // remove the chapter on the flag. #[derive(PartialEq, Copy, Clone, Debug)] pub enum AttributeType { /// Normal, builtin attribute that is consumed /// by the compiler before the unused_attribute check Normal, /// Builtin attribute that may not be consumed by the compiler /// before the unused_attribute check. These attributes /// will be ignored by the unused_attribute lint Whitelisted, /// Builtin attribute that is only allowed at the crate level CrateLevel, } pub enum AttributeGate { /// Is gated by a given feature gate, reason /// and function to check if enabled Gated(Stability, &'static str, &'static str, fn(&Features) -> bool), /// Ungated attribute, can be used on all release channels Ungated, } impl AttributeGate { fn is_deprecated(&self) -> bool { match *self { Gated(Stability::Deprecated(_), ..) => true, _ => false, } } } #[derive(Copy, Clone, PartialEq, Eq, Debug)] pub enum Stability { Unstable, // Argument is tracking issue link. Deprecated(&'static str), } // fn() is not Debug impl ::std::fmt::Debug for AttributeGate { fn fmt(&self, fmt: &mut ::std::fmt::Formatter) -> ::std::fmt::Result { match *self { Gated(ref stab, name, expl, _) => write!(fmt, "Gated({:?}, {}, {})", stab, name, expl), Ungated => write!(fmt, "Ungated") } } } macro_rules! cfg_fn { ($field: ident) => {{ fn f(features: &Features) -> bool { features.$field } f as fn(&Features) -> bool }} } pub fn deprecated_attributes() -> Vec<&'static (&'static str, AttributeType, AttributeGate)> { BUILTIN_ATTRIBUTES.iter().filter(|a| a.2.is_deprecated()).collect() } pub fn is_builtin_attr(attr: &ast::Attribute) -> bool { BUILTIN_ATTRIBUTES.iter().any(|&(builtin_name, _, _)| attr.check_name(builtin_name)) } // Attributes that have a special meaning to rustc or rustdoc pub const BUILTIN_ATTRIBUTES: &'static [(&'static str, AttributeType, AttributeGate)] = &[ // Normal attributes ("warn", Normal, Ungated), ("allow", Normal, Ungated), ("forbid", Normal, Ungated), ("deny", Normal, Ungated), ("macro_reexport", Normal, Ungated), ("macro_use", Normal, Ungated), ("macro_export", Normal, Ungated), ("plugin_registrar", Normal, Ungated), ("cfg", Normal, Ungated), ("cfg_attr", Normal, Ungated), ("main", Normal, Ungated), ("start", Normal, Ungated), ("test", Normal, Ungated), ("bench", Normal, Ungated), ("simd", Normal, Ungated), ("repr", Normal, Ungated), ("path", Normal, Ungated), ("abi", Normal, Ungated), ("automatically_derived", Normal, Ungated), ("no_mangle", Normal, Ungated), ("no_link", Normal, Ungated), ("derive", Normal, Ungated), ("should_panic", Normal, Ungated), ("ignore", Normal, Ungated), ("no_implicit_prelude", Normal, Ungated), ("reexport_test_harness_main", Normal, Ungated), ("link_args", Normal, Gated(Stability::Unstable, "link_args", "the `link_args` attribute is experimental and not \ portable across platforms, it is recommended to \ use `#[link(name = \"foo\")] instead", cfg_fn!(link_args))), ("macro_escape", Normal, Ungated), // RFC #1445. ("structural_match", Whitelisted, Gated(Stability::Unstable, "structural_match", "the semantics of constant patterns is \ not yet settled", cfg_fn!(structural_match))), ("plugin", CrateLevel, Gated(Stability::Unstable, "plugin", "compiler plugins are experimental \ and possibly buggy", cfg_fn!(plugin))), ("no_std", CrateLevel, Ungated), ("no_core", CrateLevel, Gated(Stability::Unstable, "no_core", "no_core is experimental", cfg_fn!(no_core))), ("lang", Normal, Gated(Stability::Unstable, "lang_items", "language items are subject to change", cfg_fn!(lang_items))), ("linkage", Whitelisted, Gated(Stability::Unstable, "linkage", "the `linkage` attribute is experimental \ and not portable across platforms", cfg_fn!(linkage))), ("thread_local", Whitelisted, Gated(Stability::Unstable, "thread_local", "`#[thread_local]` is an experimental feature, and does \ not currently handle destructors. There is no \ corresponding `#[task_local]` mapping to the task \ model", cfg_fn!(thread_local))), ("rustc_on_unimplemented", Normal, Gated(Stability::Unstable, "on_unimplemented", "the `#[rustc_on_unimplemented]` attribute \ is an experimental feature", cfg_fn!(on_unimplemented))), ("global_allocator", Normal, Gated(Stability::Unstable, "global_allocator", "the `#[global_allocator]` attribute is \ an experimental feature", cfg_fn!(global_allocator))), ("default_lib_allocator", Whitelisted, Gated(Stability::Unstable, "allocator_internals", "the `#[default_lib_allocator]` \ attribute is an experimental feature", cfg_fn!(allocator_internals))), ("needs_allocator", Normal, Gated(Stability::Unstable, "allocator_internals", "the `#[needs_allocator]` \ attribute is an experimental \ feature", cfg_fn!(allocator_internals))), ("panic_runtime", Whitelisted, Gated(Stability::Unstable, "panic_runtime", "the `#[panic_runtime]` attribute is \ an experimental feature", cfg_fn!(panic_runtime))), ("needs_panic_runtime", Whitelisted, Gated(Stability::Unstable, "needs_panic_runtime", "the `#[needs_panic_runtime]` \ attribute is an experimental \ feature", cfg_fn!(needs_panic_runtime))), ("rustc_variance", Normal, Gated(Stability::Unstable, "rustc_attrs", "the `#[rustc_variance]` attribute \ is just used for rustc unit tests \ and will never be stable", cfg_fn!(rustc_attrs))), ("rustc_error", Whitelisted, Gated(Stability::Unstable, "rustc_attrs", "the `#[rustc_error]` attribute \ is just used for rustc unit tests \ and will never be stable", cfg_fn!(rustc_attrs))), ("rustc_if_this_changed", Whitelisted, Gated(Stability::Unstable, "rustc_attrs", "the `#[rustc_if_this_changed]` attribute \ is just used for rustc unit tests \ and will never be stable", cfg_fn!(rustc_attrs))), ("rustc_then_this_would_need", Whitelisted, Gated(Stability::Unstable, "rustc_attrs", "the `#[rustc_if_this_changed]` attribute \ is just used for rustc unit tests \ and will never be stable", cfg_fn!(rustc_attrs))), ("rustc_dirty", Whitelisted, Gated(Stability::Unstable, "rustc_attrs", "the `#[rustc_dirty]` attribute \ is just used for rustc unit tests \ and will never be stable", cfg_fn!(rustc_attrs))), ("rustc_clean", Whitelisted, Gated(Stability::Unstable, "rustc_attrs", "the `#[rustc_clean]` attribute \ is just used for rustc unit tests \ and will never be stable", cfg_fn!(rustc_attrs))), ("rustc_metadata_dirty", Whitelisted, Gated(Stability::Unstable, "rustc_attrs", "the `#[rustc_metadata_dirty]` attribute \ is just used for rustc unit tests \ and will never be stable", cfg_fn!(rustc_attrs))), ("rustc_metadata_clean", Whitelisted, Gated(Stability::Unstable, "rustc_attrs", "the `#[rustc_metadata_clean]` attribute \ is just used for rustc unit tests \ and will never be stable", cfg_fn!(rustc_attrs))), ("rustc_partition_reused", Whitelisted, Gated(Stability::Unstable, "rustc_attrs", "this attribute \ is just used for rustc unit tests \ and will never be stable", cfg_fn!(rustc_attrs))), ("rustc_partition_translated", Whitelisted, Gated(Stability::Unstable, "rustc_attrs", "this attribute \ is just used for rustc unit tests \ and will never be stable", cfg_fn!(rustc_attrs))), ("rustc_symbol_name", Whitelisted, Gated(Stability::Unstable, "rustc_attrs", "internal rustc attributes will never be stable", cfg_fn!(rustc_attrs))), ("rustc_item_path", Whitelisted, Gated(Stability::Unstable, "rustc_attrs", "internal rustc attributes will never be stable", cfg_fn!(rustc_attrs))), ("rustc_mir", Whitelisted, Gated(Stability::Unstable, "rustc_attrs", "the `#[rustc_mir]` attribute \ is just used for rustc unit tests \ and will never be stable", cfg_fn!(rustc_attrs))), ("rustc_inherit_overflow_checks", Whitelisted, Gated(Stability::Unstable, "rustc_attrs", "the `#[rustc_inherit_overflow_checks]` \ attribute is just used to control \ overflow checking behavior of several \ libcore functions that are inlined \ across crates and will never be stable", cfg_fn!(rustc_attrs))), ("compiler_builtins", Whitelisted, Gated(Stability::Unstable, "compiler_builtins", "the `#[compiler_builtins]` attribute is used to \ identify the `compiler_builtins` crate which \ contains compiler-rt intrinsics and will never be \ stable", cfg_fn!(compiler_builtins))), ("sanitizer_runtime", Whitelisted, Gated(Stability::Unstable, "sanitizer_runtime", "the `#[sanitizer_runtime]` attribute is used to \ identify crates that contain the runtime of a \ sanitizer and will never be stable", cfg_fn!(sanitizer_runtime))), ("profiler_runtime", Whitelisted, Gated(Stability::Unstable, "profiler_runtime", "the `#[profiler_runtime]` attribute is used to \ identify the `profiler_builtins` crate which \ contains the profiler runtime and will never be \ stable", cfg_fn!(profiler_runtime))), ("allow_internal_unstable", Normal, Gated(Stability::Unstable, "allow_internal_unstable", EXPLAIN_ALLOW_INTERNAL_UNSTABLE, cfg_fn!(allow_internal_unstable))), ("allow_internal_unsafe", Normal, Gated(Stability::Unstable, "allow_internal_unsafe", EXPLAIN_ALLOW_INTERNAL_UNSAFE, cfg_fn!(allow_internal_unsafe))), ("fundamental", Whitelisted, Gated(Stability::Unstable, "fundamental", "the `#[fundamental]` attribute \ is an experimental feature", cfg_fn!(fundamental))), ("proc_macro_derive", Normal, Ungated), ("rustc_copy_clone_marker", Whitelisted, Gated(Stability::Unstable, "rustc_attrs", "internal implementation detail", cfg_fn!(rustc_attrs))), // FIXME: #14408 whitelist docs since rustdoc looks at them ("doc", Whitelisted, Ungated), // FIXME: #14406 these are processed in trans, which happens after the // lint pass ("cold", Whitelisted, Ungated), ("naked", Whitelisted, Gated(Stability::Unstable, "naked_functions", "the `#[naked]` attribute \ is an experimental feature", cfg_fn!(naked_functions))), ("target_feature", Whitelisted, Gated( Stability::Unstable, "target_feature", "the `#[target_feature]` attribute is an experimental feature", cfg_fn!(target_feature))), ("export_name", Whitelisted, Ungated), ("inline", Whitelisted, Ungated), ("link", Whitelisted, Ungated), ("link_name", Whitelisted, Ungated), ("link_section", Whitelisted, Ungated), ("no_builtins", Whitelisted, Ungated), ("no_mangle", Whitelisted, Ungated), ("no_debug", Whitelisted, Gated( Stability::Deprecated("https://github.com/rust-lang/rust/issues/29721"), "no_debug", "the `#[no_debug]` attribute is an experimental feature", cfg_fn!(no_debug))), ("omit_gdb_pretty_printer_section", Whitelisted, Gated(Stability::Unstable, "omit_gdb_pretty_printer_section", "the `#[omit_gdb_pretty_printer_section]` \ attribute is just used for the Rust test \ suite", cfg_fn!(omit_gdb_pretty_printer_section))), ("unsafe_destructor_blind_to_params", Normal, Gated(Stability::Deprecated("https://github.com/rust-lang/rust/issues/34761"), "dropck_parametricity", "unsafe_destructor_blind_to_params has been replaced by \ may_dangle and will be removed in the future", cfg_fn!(dropck_parametricity))), ("may_dangle", Normal, Gated(Stability::Unstable, "dropck_eyepatch", "may_dangle has unstable semantics and may be removed in the future", cfg_fn!(dropck_eyepatch))), ("unwind", Whitelisted, Gated(Stability::Unstable, "unwind_attributes", "#[unwind] is experimental", cfg_fn!(unwind_attributes))), ("used", Whitelisted, Gated( Stability::Unstable, "used", "the `#[used]` attribute is an experimental feature", cfg_fn!(used))), // used in resolve ("prelude_import", Whitelisted, Gated(Stability::Unstable, "prelude_import", "`#[prelude_import]` is for use by rustc only", cfg_fn!(prelude_import))), // FIXME: #14407 these are only looked at on-demand so we can't // guarantee they'll have already been checked ("rustc_deprecated", Whitelisted, Ungated), ("must_use", Whitelisted, Ungated), ("stable", Whitelisted, Ungated), ("unstable", Whitelisted, Ungated), ("deprecated", Normal, Ungated), ("rustc_paren_sugar", Normal, Gated(Stability::Unstable, "unboxed_closures", "unboxed_closures are still evolving", cfg_fn!(unboxed_closures))), ("windows_subsystem", Whitelisted, Ungated), ("proc_macro_attribute", Normal, Gated(Stability::Unstable, "proc_macro", "attribute proc macros are currently unstable", cfg_fn!(proc_macro))), ("proc_macro", Normal, Gated(Stability::Unstable, "proc_macro", "function-like proc macros are currently unstable", cfg_fn!(proc_macro))), ("rustc_derive_registrar", Normal, Gated(Stability::Unstable, "rustc_derive_registrar", "used internally by rustc", cfg_fn!(rustc_attrs))), ("allow_fail", Normal, Gated(Stability::Unstable, "allow_fail", "allow_fail attribute is currently unstable", cfg_fn!(allow_fail))), // Crate level attributes ("crate_name", CrateLevel, Ungated), ("crate_type", CrateLevel, Ungated), ("crate_id", CrateLevel, Ungated), ("feature", CrateLevel, Ungated), ("no_start", CrateLevel, Ungated), ("no_main", CrateLevel, Ungated), ("no_builtins", CrateLevel, Ungated), ("recursion_limit", CrateLevel, Ungated), ("type_length_limit", CrateLevel, Ungated), ]; // cfg(...)'s that are feature gated const GATED_CFGS: &[(&str, &str, fn(&Features) -> bool)] = &[ // (name in cfg, feature, function to check if the feature is enabled) ("target_feature", "cfg_target_feature", cfg_fn!(cfg_target_feature)), ("target_vendor", "cfg_target_vendor", cfg_fn!(cfg_target_vendor)), ("target_thread_local", "cfg_target_thread_local", cfg_fn!(cfg_target_thread_local)), ("target_has_atomic", "cfg_target_has_atomic", cfg_fn!(cfg_target_has_atomic)), ]; #[derive(Debug, Eq, PartialEq)] pub struct GatedCfg { span: Span, index: usize, } impl GatedCfg { pub fn gate(cfg: &ast::MetaItem) -> Option<GatedCfg> { let name = cfg.name().as_str(); GATED_CFGS.iter() .position(|info| info.0 == name) .map(|idx| { GatedCfg { span: cfg.span, index: idx } }) } pub fn check_and_emit(&self, sess: &ParseSess, features: &Features) { let (cfg, feature, has_feature) = GATED_CFGS[self.index]; if !has_feature(features) && !self.span.allows_unstable() { let explain = format!("`cfg({})` is experimental and subject to change", cfg); emit_feature_err(sess, feature, self.span, GateIssue::Language, &explain); } } } struct Context<'a> { features: &'a Features, parse_sess: &'a ParseSess, plugin_attributes: &'a [(String, AttributeType)], } macro_rules! gate_feature_fn { ($cx: expr, $has_feature: expr, $span: expr, $name: expr, $explain: expr) => {{ let (cx, has_feature, span, name, explain) = ($cx, $has_feature, $span, $name, $explain); let has_feature: bool = has_feature(&$cx.features); debug!("gate_feature(feature = {:?}, span = {:?}); has? {}", name, span, has_feature); if !has_feature && !span.allows_unstable() { emit_feature_err(cx.parse_sess, name, span, GateIssue::Language, explain); } }} } macro_rules! gate_feature { ($cx: expr, $feature: ident, $span: expr, $explain: expr) => { gate_feature_fn!($cx, |x:&Features| x.$feature, $span, stringify!($feature), $explain) } } impl<'a> Context<'a> { fn check_attribute(&self, attr: &ast::Attribute, is_macro: bool) { debug!("check_attribute(attr = {:?})", attr); let name = unwrap_or!(attr.name(), return).as_str(); for &(n, ty, ref gateage) in BUILTIN_ATTRIBUTES { if name == n { if let Gated(_, name, desc, ref has_feature) = *gateage { gate_feature_fn!(self, has_feature, attr.span, name, desc); } debug!("check_attribute: {:?} is builtin, {:?}, {:?}", attr.path, ty, gateage); return; } } for &(ref n, ref ty) in self.plugin_attributes { if attr.path == &**n { // Plugins can't gate attributes, so we don't check for it // unlike the code above; we only use this loop to // short-circuit to avoid the checks below debug!("check_attribute: {:?} is registered by a plugin, {:?}", attr.path, ty); return; } } if name.starts_with("rustc_") { gate_feature!(self, rustc_attrs, attr.span, "unless otherwise specified, attributes \ with the prefix `rustc_` \ are reserved for internal compiler diagnostics"); } else if name.starts_with("derive_") { gate_feature!(self, custom_derive, attr.span, EXPLAIN_DERIVE_UNDERSCORE); } else if !attr::is_known(attr) { // Only run the custom attribute lint during regular // feature gate checking. Macro gating runs // before the plugin attributes are registered // so we skip this then if !is_macro { gate_feature!(self, custom_attribute, attr.span, &format!("The attribute `{}` is currently \ unknown to the compiler and \ may have meaning \ added to it in the future", attr.path)); } } } } pub fn check_attribute(attr: &ast::Attribute, parse_sess: &ParseSess, features: &Features) { let cx = Context { features: features, parse_sess: parse_sess, plugin_attributes: &[] }; cx.check_attribute(attr, true); } pub fn find_lang_feature_accepted_version(feature: &str) -> Option<&'static str> { ACCEPTED_FEATURES.iter().find(|t| t.0 == feature).map(|t| t.1) } fn find_lang_feature_issue(feature: &str) -> Option<u32> { if let Some(info) = ACTIVE_FEATURES.iter().find(|t| t.0 == feature) { let issue = info.2; // FIXME (#28244): enforce that active features have issue numbers // assert!(issue.is_some()) issue } else { // search in Accepted, Removed, or Stable Removed features let found = ACCEPTED_FEATURES.iter().chain(REMOVED_FEATURES).chain(STABLE_REMOVED_FEATURES) .find(|t| t.0 == feature); match found { Some(&(_, _, issue)) => issue, None => panic!("Feature `{}` is not declared anywhere", feature), } } } pub enum GateIssue { Language, Library(Option<u32>) } pub fn emit_feature_err(sess: &ParseSess, feature: &str, span: Span, issue: GateIssue, explain: &str) { feature_err(sess, feature, span, issue, explain).emit(); } pub fn feature_err<'a>(sess: &'a ParseSess, feature: &str, span: Span, issue: GateIssue, explain: &str) -> DiagnosticBuilder<'a> { let diag = &sess.span_diagnostic; let issue = match issue { GateIssue::Language => find_lang_feature_issue(feature), GateIssue::Library(lib) => lib, }; let mut err = if let Some(n) = issue { diag.struct_span_err(span, &format!("{} (see issue #{})", explain, n)) } else { diag.struct_span_err(span, explain) }; // #23973: do not suggest `#![feature(...)]` if we are in beta/stable if sess.unstable_features.is_nightly_build() { err.help(&format!("add #![feature({})] to the \ crate attributes to enable", feature)); } err } const EXPLAIN_BOX_SYNTAX: &'static str = "box expression syntax is experimental; you can call `Box::new` instead."; pub const EXPLAIN_STMT_ATTR_SYNTAX: &'static str = "attributes on non-item statements and expressions are experimental."; pub const EXPLAIN_ASM: &'static str = "inline assembly is not stable enough for use and is subject to change"; pub const EXPLAIN_GLOBAL_ASM: &'static str = "`global_asm!` is not stable enough for use and is subject to change"; pub const EXPLAIN_LOG_SYNTAX: &'static str = "`log_syntax!` is not stable enough for use and is subject to change"; pub const EXPLAIN_CONCAT_IDENTS: &'static str = "`concat_idents` is not stable enough for use and is subject to change"; pub const EXPLAIN_TRACE_MACROS: &'static str = "`trace_macros` is not stable enough for use and is subject to change"; pub const EXPLAIN_ALLOW_INTERNAL_UNSTABLE: &'static str = "allow_internal_unstable side-steps feature gating and stability checks"; pub const EXPLAIN_ALLOW_INTERNAL_UNSAFE: &'static str = "allow_internal_unsafe side-steps the unsafe_code lint"; pub const EXPLAIN_CUSTOM_DERIVE: &'static str = "`#[derive]` for custom traits is deprecated and will be removed in the future."; pub const EXPLAIN_DEPR_CUSTOM_DERIVE: &'static str = "`#[derive]` for custom traits is deprecated and will be removed in the future. \ Prefer using procedural macro custom derive."; pub const EXPLAIN_DERIVE_UNDERSCORE: &'static str = "attributes of the form `#[derive_*]` are reserved for the compiler"; pub const EXPLAIN_VIS_MATCHER: &'static str = ":vis fragment specifier is experimental and subject to change"; pub const EXPLAIN_PLACEMENT_IN: &'static str = "placement-in expression syntax is experimental and subject to change."; pub const EXPLAIN_UNSIZED_TUPLE_COERCION: &'static str = "Unsized tuple coercion is not stable enough for use and is subject to change"; struct PostExpansionVisitor<'a> { context: &'a Context<'a>, } macro_rules! gate_feature_post { ($cx: expr, $feature: ident, $span: expr, $explain: expr) => {{ let (cx, span) = ($cx, $span); if !span.allows_unstable() { gate_feature!(cx.context, $feature, span, $explain) } }} } impl<'a> PostExpansionVisitor<'a> { fn check_abi(&self, abi: Abi, span: Span) { match abi { Abi::RustIntrinsic => { gate_feature_post!(&self, intrinsics, span, "intrinsics are subject to change"); }, Abi::PlatformIntrinsic => { gate_feature_post!(&self, platform_intrinsics, span, "platform intrinsics are experimental and possibly buggy"); }, Abi::Vectorcall => { gate_feature_post!(&self, abi_vectorcall, span, "vectorcall is experimental and subject to change"); }, Abi::Thiscall => { gate_feature_post!(&self, abi_thiscall, span, "thiscall is experimental and subject to change"); }, Abi::RustCall => { gate_feature_post!(&self, unboxed_closures, span, "rust-call ABI is subject to change"); }, Abi::SysV64 => { gate_feature_post!(&self, abi_sysv64, span, "sysv64 ABI is experimental and subject to change"); }, Abi::PtxKernel => { gate_feature_post!(&self, abi_ptx, span, "PTX ABIs are experimental and subject to change"); }, Abi::Unadjusted => { gate_feature_post!(&self, abi_unadjusted, span, "unadjusted ABI is an implementation detail and perma-unstable"); }, Abi::Msp430Interrupt => { gate_feature_post!(&self, abi_msp430_interrupt, span, "msp430-interrupt ABI is experimental and subject to change"); }, Abi::X86Interrupt => { gate_feature_post!(&self, abi_x86_interrupt, span, "x86-interrupt ABI is experimental and subject to change"); }, // Stable Abi::Cdecl | Abi::Stdcall | Abi::Fastcall | Abi::Aapcs | Abi::Win64 | Abi::Rust | Abi::C | Abi::System => {} } } } fn contains_novel_literal(item: &ast::MetaItem) -> bool { use ast::MetaItemKind::*; use ast::NestedMetaItemKind::*; match item.node { Word => false, NameValue(ref lit) => !lit.node.is_str(), List(ref list) => list.iter().any(|li| { match li.node { MetaItem(ref mi) => contains_novel_literal(mi), Literal(_) => true, } }), } } impl<'a> Visitor<'a> for PostExpansionVisitor<'a> { fn visit_attribute(&mut self, attr: &ast::Attribute) { if !attr.span.allows_unstable() { // check for gated attributes self.context.check_attribute(attr, false); } if attr.check_name("doc") { if let Some(content) = attr.meta_item_list() { if content.len() == 1 && content[0].check_name("cfg") { gate_feature_post!(&self, doc_cfg, attr.span, "#[doc(cfg(...))] is experimental" ); } } } if self.context.features.proc_macro && attr::is_known(attr) { return } let meta = panictry!(attr.parse_meta(self.context.parse_sess)); if contains_novel_literal(&meta) { gate_feature_post!(&self, attr_literals, attr.span, "non-string literals in attributes, or string \ literals in top-level positions, are experimental"); } } fn visit_name(&mut self, sp: Span, name: ast::Name) { if !name.as_str().is_ascii() { gate_feature_post!(&self, non_ascii_idents, sp, "non-ascii idents are not fully supported."); } } fn visit_item(&mut self, i: &'a ast::Item) { match i.node { ast::ItemKind::ExternCrate(_) => { if attr::contains_name(&i.attrs[..], "macro_reexport") { gate_feature_post!(&self, macro_reexport, i.span, "macros reexports are experimental \ and possibly buggy"); } } ast::ItemKind::ForeignMod(ref foreign_module) => { self.check_abi(foreign_module.abi, i.span); } ast::ItemKind::Fn(..) => { if attr::contains_name(&i.attrs[..], "plugin_registrar") { gate_feature_post!(&self, plugin_registrar, i.span, "compiler plugins are experimental and possibly buggy"); } if attr::contains_name(&i.attrs[..], "start") { gate_feature_post!(&self, start, i.span, "a #[start] function is an experimental \ feature whose signature may change \ over time"); } if attr::contains_name(&i.attrs[..], "main") { gate_feature_post!(&self, main, i.span, "declaration of a nonstandard #[main] \ function may change over time, for now \ a top-level `fn main()` is required"); } if attr::contains_name(&i.attrs[..], "must_use") { gate_feature_post!(&self, fn_must_use, i.span, "`#[must_use]` on functions is experimental"); } } ast::ItemKind::Struct(..) => { if attr::contains_name(&i.attrs[..], "simd") { gate_feature_post!(&self, simd, i.span, "SIMD types are experimental and possibly buggy"); self.context.parse_sess.span_diagnostic.span_warn(i.span, "the `#[simd]` attribute \ is deprecated, use \ `#[repr(simd)]` instead"); } for attr in &i.attrs { if attr.path == "repr" { for item in attr.meta_item_list().unwrap_or_else(Vec::new) { if item.check_name("simd") { gate_feature_post!(&self, repr_simd, i.span, "SIMD types are experimental \ and possibly buggy"); } if item.check_name("align") { gate_feature_post!(&self, repr_align, i.span, "the struct `#[repr(align(u16))]` attribute \ is experimental"); } } } } } ast::ItemKind::DefaultImpl(..) => { gate_feature_post!(&self, optin_builtin_traits, i.span, "default trait implementations are experimental \ and possibly buggy"); } ast::ItemKind::Impl(_, polarity, defaultness, _, _, _, _) => { if polarity == ast::ImplPolarity::Negative { gate_feature_post!(&self, optin_builtin_traits, i.span, "negative trait bounds are not yet fully implemented; \ use marker types for now"); } if let ast::Defaultness::Default = defaultness { gate_feature_post!(&self, specialization, i.span, "specialization is unstable"); } } ast::ItemKind::MacroDef(ast::MacroDef { legacy: false, .. }) => { let msg = "`macro` is experimental"; gate_feature_post!(&self, decl_macro, i.span, msg); } _ => {} } visit::walk_item(self, i); } fn visit_foreign_item(&mut self, i: &'a ast::ForeignItem) { let links_to_llvm = match attr::first_attr_value_str_by_name(&i.attrs, "link_name") { Some(val) => val.as_str().starts_with("llvm."), _ => false }; if links_to_llvm { gate_feature_post!(&self, link_llvm_intrinsics, i.span, "linking to LLVM intrinsics is experimental"); } visit::walk_foreign_item(self, i) } fn visit_ty(&mut self, ty: &'a ast::Ty) { match ty.node { ast::TyKind::BareFn(ref bare_fn_ty) => { self.check_abi(bare_fn_ty.abi, ty.span); } ast::TyKind::ImplTrait(..) => { gate_feature_post!(&self, conservative_impl_trait, ty.span, "`impl Trait` is experimental"); } ast::TyKind::Never => { gate_feature_post!(&self, never_type, ty.span, "The `!` type is experimental"); }, _ => {} } visit::walk_ty(self, ty) } fn visit_fn_ret_ty(&mut self, ret_ty: &'a ast::FunctionRetTy) { if let ast::FunctionRetTy::Ty(ref output_ty) = *ret_ty { if output_ty.node != ast::TyKind::Never { self.visit_ty(output_ty) } } } fn visit_expr(&mut self, e: &'a ast::Expr) { match e.node { ast::ExprKind::Box(_) => { gate_feature_post!(&self, box_syntax, e.span, EXPLAIN_BOX_SYNTAX); } ast::ExprKind::Type(..) => { gate_feature_post!(&self, type_ascription, e.span, "type ascription is experimental"); } ast::ExprKind::Range(_, _, ast::RangeLimits::Closed) => { gate_feature_post!(&self, inclusive_range_syntax, e.span, "inclusive range syntax is experimental"); } ast::ExprKind::InPlace(..) => { gate_feature_post!(&self, placement_in_syntax, e.span, EXPLAIN_PLACEMENT_IN); } ast::ExprKind::Lit(ref lit) => { if let ast::LitKind::Int(_, ref ty) = lit.node { match *ty { ast::LitIntType::Signed(ast::IntTy::I128) | ast::LitIntType::Unsigned(ast::UintTy::U128) => { gate_feature_post!(&self, i128_type, e.span, "128-bit integers are not stable"); } _ => {} } } } ast::ExprKind::Catch(_) => { gate_feature_post!(&self, catch_expr, e.span, "`catch` expression is experimental"); } _ => {} } visit::walk_expr(self, e); } fn visit_pat(&mut self, pattern: &'a ast::Pat) { match pattern.node { PatKind::Slice(_, Some(_), ref last) if !last.is_empty() => { gate_feature_post!(&self, advanced_slice_patterns, pattern.span, "multiple-element slice matches anywhere \ but at the end of a slice (e.g. \ `[0, ..xs, 0]`) are experimental") } PatKind::Slice(..) => { gate_feature_post!(&self, slice_patterns, pattern.span, "slice pattern syntax is experimental"); } PatKind::Box(..) => { gate_feature_post!(&self, box_patterns, pattern.span, "box pattern syntax is experimental"); } PatKind::Range(_, _, RangeEnd::Excluded) => { gate_feature_post!(&self, exclusive_range_pattern, pattern.span, "exclusive range pattern syntax is experimental"); } _ => {} } visit::walk_pat(self, pattern) } fn visit_fn(&mut self, fn_kind: FnKind<'a>, fn_decl: &'a ast::FnDecl, span: Span, _node_id: NodeId) { // check for const fn declarations if let FnKind::ItemFn(_, _, _, Spanned { node: ast::Constness::Const, .. }, _, _, _) = fn_kind { gate_feature_post!(&self, const_fn, span, "const fn is unstable"); } // stability of const fn methods are covered in // visit_trait_item and visit_impl_item below; this is // because default methods don't pass through this // point. match fn_kind { FnKind::ItemFn(_, _, _, _, abi, _, _) | FnKind::Method(_, &ast::MethodSig { abi, .. }, _, _) => { self.check_abi(abi, span); } _ => {} } visit::walk_fn(self, fn_kind, fn_decl, span); } fn visit_trait_item(&mut self, ti: &'a ast::TraitItem) { match ti.node { ast::TraitItemKind::Method(ref sig, ref block) => { if block.is_none() { self.check_abi(sig.abi, ti.span); } if sig.constness.node == ast::Constness::Const { gate_feature_post!(&self, const_fn, ti.span, "const fn is unstable"); } } ast::TraitItemKind::Type(_, Some(_)) => { gate_feature_post!(&self, associated_type_defaults, ti.span, "associated type defaults are unstable"); } _ => {} } visit::walk_trait_item(self, ti); } fn visit_impl_item(&mut self, ii: &'a ast::ImplItem) { if ii.defaultness == ast::Defaultness::Default { gate_feature_post!(&self, specialization, ii.span, "specialization is unstable"); } match ii.node { ast::ImplItemKind::Method(ref sig, _) => { if sig.constness.node == ast::Constness::Const { gate_feature_post!(&self, const_fn, ii.span, "const fn is unstable"); } } _ => {} } visit::walk_impl_item(self, ii); } fn visit_generics(&mut self, g: &'a ast::Generics) { for t in &g.ty_params { if !t.attrs.is_empty() { gate_feature_post!(&self, generic_param_attrs, t.attrs[0].span, "attributes on type parameter bindings are experimental"); } } visit::walk_generics(self, g) } fn visit_lifetime_def(&mut self, lifetime_def: &'a ast::LifetimeDef) { if !lifetime_def.attrs.is_empty() { gate_feature_post!(&self, generic_param_attrs, lifetime_def.attrs[0].span, "attributes on lifetime bindings are experimental"); } visit::walk_lifetime_def(self, lifetime_def) } } pub fn get_features(span_handler: &Handler, krate_attrs: &[ast::Attribute]) -> Features { let mut features = Features::new(); let mut feature_checker = MutexFeatureChecker::default(); for attr in krate_attrs { if !attr.check_name("feature") { continue } match attr.meta_item_list() { None => { span_err!(span_handler, attr.span, E0555, "malformed feature attribute, expected #![feature(...)]"); } Some(list) => { for mi in list { let name = if let Some(word) = mi.word() { word.name() } else { span_err!(span_handler, mi.span, E0556, "malformed feature, expected just one word"); continue }; if let Some(&(_, _, _, set)) = ACTIVE_FEATURES.iter() .find(|& &(n, _, _, _)| name == n) { set(&mut features, mi.span); feature_checker.collect(&features, mi.span); } else if let Some(&(_, _, _)) = REMOVED_FEATURES.iter() .find(|& &(n, _, _)| name == n) .or_else(|| STABLE_REMOVED_FEATURES.iter() .find(|& &(n, _, _)| name == n)) { span_err!(span_handler, mi.span, E0557, "feature has been removed"); } else if let Some(&(_, _, _)) = ACCEPTED_FEATURES.iter() .find(|& &(n, _, _)| name == n) { features.declared_stable_lang_features.push((name, mi.span)); } else { features.declared_lib_features.push((name, mi.span)); } } } } } feature_checker.check(span_handler); features } // A collector for mutually-exclusive features and their flag spans #[derive(Default)] struct MutexFeatureChecker { proc_macro: Option<Span>, custom_attribute: Option<Span>, } impl MutexFeatureChecker { // If this method turns out to be a hotspot due to branching, // the branching can be eliminated by modifying `set!()` to set these spans // only for the features that need to be checked for mutual exclusion. fn collect(&mut self, features: &Features, span: Span) { if features.proc_macro { // If self.proc_macro is None, set to Some(span) self.proc_macro = self.proc_macro.or(Some(span)); } if features.custom_attribute { self.custom_attribute = self.custom_attribute.or(Some(span)); } } fn check(self, handler: &Handler) { if let (Some(pm_span), Some(ca_span)) = (self.proc_macro, self.custom_attribute) { handler.struct_span_err(pm_span, "Cannot use `#![feature(proc_macro)]` and \ `#![feature(custom_attribute)] at the same time") .span_note(ca_span, "`#![feature(custom_attribute)]` declared here") .emit(); panic!(FatalError); } } } pub fn check_crate(krate: &ast::Crate, sess: &ParseSess, features: &Features, plugin_attributes: &[(String, AttributeType)], unstable: UnstableFeatures) { maybe_stage_features(&sess.span_diagnostic, krate, unstable); let ctx = Context { features, parse_sess: sess, plugin_attributes, }; visit::walk_crate(&mut PostExpansionVisitor { context: &ctx }, krate); } #[derive(Clone, Copy, PartialEq, Eq, Hash)] pub enum UnstableFeatures { /// Hard errors for unstable features are active, as on /// beta/stable channels. Disallow, /// Allow features to be activated, as on nightly. Allow, /// Errors are bypassed for bootstrapping. This is required any time /// during the build that feature-related lints are set to warn or above /// because the build turns on warnings-as-errors and uses lots of unstable /// features. As a result, this is always required for building Rust itself. Cheat } impl UnstableFeatures { pub fn from_environment() -> UnstableFeatures { // Whether this is a feature-staged build, i.e. on the beta or stable channel let disable_unstable_features = option_env!("CFG_DISABLE_UNSTABLE_FEATURES").is_some(); // Whether we should enable unstable features for bootstrapping let bootstrap = env::var("RUSTC_BOOTSTRAP").is_ok(); match (disable_unstable_features, bootstrap) { (_, true) => UnstableFeatures::Cheat, (true, _) => UnstableFeatures::Disallow, (false, _) => UnstableFeatures::Allow } } pub fn is_nightly_build(&self) -> bool { match *self { UnstableFeatures::Allow | UnstableFeatures::Cheat => true, _ => false, } } } fn maybe_stage_features(span_handler: &Handler, krate: &ast::Crate, unstable: UnstableFeatures) { let allow_features = match unstable { UnstableFeatures::Allow => true, UnstableFeatures::Disallow => false, UnstableFeatures::Cheat => true }; if !allow_features { for attr in &krate.attrs { if attr.check_name("feature") { let release_channel = option_env!("CFG_RELEASE_CHANNEL").unwrap_or("(unknown)"); span_err!(span_handler, attr.span, E0554, "#![feature] may not be used on the {} release channel", release_channel); } } } } correct comment re feature-checking tooling The featureck.py that this comment referred to was removed in 9dd3c54a (March 2016). // Copyright 2013 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. //! Feature gating //! //! This module implements the gating necessary for preventing certain compiler //! features from being used by default. This module will crawl a pre-expanded //! AST to ensure that there are no features which are used that are not //! enabled. //! //! Features are enabled in programs via the crate-level attributes of //! `#![feature(...)]` with a comma-separated list of features. //! //! For the purpose of future feature-tracking, once code for detection of feature //! gate usage is added, *do not remove it again* even once the feature //! becomes stable. use self::AttributeType::*; use self::AttributeGate::*; use abi::Abi; use ast::{self, NodeId, PatKind, RangeEnd}; use attr; use codemap::Spanned; use syntax_pos::Span; use errors::{DiagnosticBuilder, Handler, FatalError}; use visit::{self, FnKind, Visitor}; use parse::ParseSess; use symbol::Symbol; use std::ascii::AsciiExt; use std::env; macro_rules! set { (proc_macro) => {{ fn f(features: &mut Features, span: Span) { features.declared_lib_features.push((Symbol::intern("proc_macro"), span)); features.proc_macro = true; } f as fn(&mut Features, Span) }}; ($field: ident) => {{ fn f(features: &mut Features, _: Span) { features.$field = true; } f as fn(&mut Features, Span) }} } macro_rules! declare_features { ($((active, $feature: ident, $ver: expr, $issue: expr),)+) => { /// Represents active features that are currently being implemented or /// currently being considered for addition/removal. const ACTIVE_FEATURES: &'static [(&'static str, &'static str, Option<u32>, fn(&mut Features, Span))] = &[$((stringify!($feature), $ver, $issue, set!($feature))),+]; /// A set of features to be used by later passes. pub struct Features { /// #![feature] attrs for stable language features, for error reporting pub declared_stable_lang_features: Vec<(Symbol, Span)>, /// #![feature] attrs for non-language (library) features pub declared_lib_features: Vec<(Symbol, Span)>, $(pub $feature: bool),+ } impl Features { pub fn new() -> Features { Features { declared_stable_lang_features: Vec::new(), declared_lib_features: Vec::new(), $($feature: false),+ } } } }; ($((removed, $feature: ident, $ver: expr, $issue: expr),)+) => { /// Represents unstable features which have since been removed (it was once Active) const REMOVED_FEATURES: &'static [(&'static str, &'static str, Option<u32>)] = &[ $((stringify!($feature), $ver, $issue)),+ ]; }; ($((stable_removed, $feature: ident, $ver: expr, $issue: expr),)+) => { /// Represents stable features which have since been removed (it was once Accepted) const STABLE_REMOVED_FEATURES: &'static [(&'static str, &'static str, Option<u32>)] = &[ $((stringify!($feature), $ver, $issue)),+ ]; }; ($((accepted, $feature: ident, $ver: expr, $issue: expr),)+) => { /// Those language feature has since been Accepted (it was once Active) const ACCEPTED_FEATURES: &'static [(&'static str, &'static str, Option<u32>)] = &[ $((stringify!($feature), $ver, $issue)),+ ]; } } // If you change this, please modify src/doc/unstable-book as well. // // Don't ever remove anything from this list; set them to 'Removed'. // // The version numbers here correspond to the version in which the current status // was set. This is most important for knowing when a particular feature became // stable (active). // // NB: tools/tidy/src/features.rs parses this information directly out of the // source, so take care when modifying it. declare_features! ( (active, asm, "1.0.0", Some(29722)), (active, concat_idents, "1.0.0", Some(29599)), (active, link_args, "1.0.0", Some(29596)), (active, log_syntax, "1.0.0", Some(29598)), (active, non_ascii_idents, "1.0.0", Some(28979)), (active, plugin_registrar, "1.0.0", Some(29597)), (active, thread_local, "1.0.0", Some(29594)), (active, trace_macros, "1.0.0", Some(29598)), // rustc internal, for now: (active, intrinsics, "1.0.0", None), (active, lang_items, "1.0.0", None), (active, link_llvm_intrinsics, "1.0.0", Some(29602)), (active, linkage, "1.0.0", Some(29603)), (active, quote, "1.0.0", Some(29601)), (active, simd, "1.0.0", Some(27731)), // rustc internal (active, rustc_diagnostic_macros, "1.0.0", None), (active, advanced_slice_patterns, "1.0.0", Some(23121)), (active, box_syntax, "1.0.0", Some(27779)), (active, placement_in_syntax, "1.0.0", Some(27779)), (active, unboxed_closures, "1.0.0", Some(29625)), (active, fundamental, "1.0.0", Some(29635)), (active, main, "1.0.0", Some(29634)), (active, needs_allocator, "1.4.0", Some(27389)), (active, on_unimplemented, "1.0.0", Some(29628)), (active, plugin, "1.0.0", Some(29597)), (active, simd_ffi, "1.0.0", Some(27731)), (active, start, "1.0.0", Some(29633)), (active, structural_match, "1.8.0", Some(31434)), (active, panic_runtime, "1.10.0", Some(32837)), (active, needs_panic_runtime, "1.10.0", Some(32837)), // OIBIT specific features (active, optin_builtin_traits, "1.0.0", Some(13231)), // macro reexport needs more discussion and stabilization (active, macro_reexport, "1.0.0", Some(29638)), // Allows use of #[staged_api] // rustc internal (active, staged_api, "1.0.0", None), // Allows using #![no_core] (active, no_core, "1.3.0", Some(29639)), // Allows using `box` in patterns; RFC 469 (active, box_patterns, "1.0.0", Some(29641)), // Allows using the unsafe_destructor_blind_to_params attribute; // RFC 1238 (active, dropck_parametricity, "1.3.0", Some(28498)), // Allows using the may_dangle attribute; RFC 1327 (active, dropck_eyepatch, "1.10.0", Some(34761)), // Allows the use of custom attributes; RFC 572 (active, custom_attribute, "1.0.0", Some(29642)), // Allows the use of #[derive(Anything)] as sugar for // #[derive_Anything]. (active, custom_derive, "1.0.0", Some(29644)), // Allows the use of rustc_* attributes; RFC 572 (active, rustc_attrs, "1.0.0", Some(29642)), // Allows the use of #[allow_internal_unstable]. This is an // attribute on macro_rules! and can't use the attribute handling // below (it has to be checked before expansion possibly makes // macros disappear). // // rustc internal (active, allow_internal_unstable, "1.0.0", None), // Allows the use of #[allow_internal_unsafe]. This is an // attribute on macro_rules! and can't use the attribute handling // below (it has to be checked before expansion possibly makes // macros disappear). // // rustc internal (active, allow_internal_unsafe, "1.0.0", None), // #23121. Array patterns have some hazards yet. (active, slice_patterns, "1.0.0", Some(23121)), // Allows the definition of `const fn` functions. (active, const_fn, "1.2.0", Some(24111)), // Allows indexing into constant arrays. (active, const_indexing, "1.4.0", Some(29947)), // Allows using #[prelude_import] on glob `use` items. // // rustc internal (active, prelude_import, "1.2.0", None), // Allows default type parameters to influence type inference. (active, default_type_parameter_fallback, "1.3.0", Some(27336)), // Allows associated type defaults (active, associated_type_defaults, "1.2.0", Some(29661)), // allow `repr(simd)`, and importing the various simd intrinsics (active, repr_simd, "1.4.0", Some(27731)), // Allows cfg(target_feature = "..."). (active, cfg_target_feature, "1.4.0", Some(29717)), // allow `extern "platform-intrinsic" { ... }` (active, platform_intrinsics, "1.4.0", Some(27731)), // allow `#[unwind]` // rust runtime internal (active, unwind_attributes, "1.4.0", None), // allow the use of `#[naked]` on functions. (active, naked_functions, "1.9.0", Some(32408)), // allow `#[no_debug]` (active, no_debug, "1.5.0", Some(29721)), // allow `#[omit_gdb_pretty_printer_section]` // rustc internal. (active, omit_gdb_pretty_printer_section, "1.5.0", None), // Allows cfg(target_vendor = "..."). (active, cfg_target_vendor, "1.5.0", Some(29718)), // Allow attributes on expressions and non-item statements (active, stmt_expr_attributes, "1.6.0", Some(15701)), // allow using type ascription in expressions (active, type_ascription, "1.6.0", Some(23416)), // Allows cfg(target_thread_local) (active, cfg_target_thread_local, "1.7.0", Some(29594)), // rustc internal (active, abi_vectorcall, "1.7.0", None), // a...b and ...b (active, inclusive_range_syntax, "1.7.0", Some(28237)), // X..Y patterns (active, exclusive_range_pattern, "1.11.0", Some(37854)), // impl specialization (RFC 1210) (active, specialization, "1.7.0", Some(31844)), // Allow Drop types in statics/const functions (RFC 1440) (active, drop_types_in_const, "1.9.0", Some(33156)), // Allows cfg(target_has_atomic = "..."). (active, cfg_target_has_atomic, "1.9.0", Some(32976)), // Allows `impl Trait` in function return types. (active, conservative_impl_trait, "1.12.0", Some(34511)), // The `!` type (active, never_type, "1.13.0", Some(35121)), // Allows all literals in attribute lists and values of key-value pairs. (active, attr_literals, "1.13.0", Some(34981)), // Allows the sysV64 ABI to be specified on all platforms // instead of just the platforms on which it is the C ABI (active, abi_sysv64, "1.13.0", Some(36167)), // Allows untagged unions `union U { ... }` (active, untagged_unions, "1.13.0", Some(32836)), // Used to identify the `compiler_builtins` crate // rustc internal (active, compiler_builtins, "1.13.0", None), // Allows attributes on lifetime/type formal parameters in generics (RFC 1327) (active, generic_param_attrs, "1.11.0", Some(34761)), // Allows #[link(..., cfg(..))] (active, link_cfg, "1.14.0", Some(37406)), (active, use_extern_macros, "1.15.0", Some(35896)), // Allows #[target_feature(...)] (active, target_feature, "1.15.0", None), // `extern "ptx-*" fn()` (active, abi_ptx, "1.15.0", None), // The `i128` type (active, i128_type, "1.16.0", Some(35118)), // The `unadjusted` ABI. Perma unstable. (active, abi_unadjusted, "1.16.0", None), // Procedural macros 2.0. (active, proc_macro, "1.16.0", Some(38356)), // Declarative macros 2.0 (`macro`). (active, decl_macro, "1.17.0", Some(39412)), // Allows #[link(kind="static-nobundle"...] (active, static_nobundle, "1.16.0", Some(37403)), // `extern "msp430-interrupt" fn()` (active, abi_msp430_interrupt, "1.16.0", Some(38487)), // Used to identify crates that contain sanitizer runtimes // rustc internal (active, sanitizer_runtime, "1.17.0", None), // Used to identify crates that contain the profiler runtime // rustc internal (active, profiler_runtime, "1.18.0", None), // `extern "x86-interrupt" fn()` (active, abi_x86_interrupt, "1.17.0", Some(40180)), // Allows the `catch {...}` expression (active, catch_expr, "1.17.0", Some(31436)), // Allows `repr(align(u16))` struct attribute (RFC 1358) (active, repr_align, "1.17.0", Some(33626)), // Used to preserve symbols (see llvm.used) (active, used, "1.18.0", Some(40289)), // Allows module-level inline assembly by way of global_asm!() (active, global_asm, "1.18.0", Some(35119)), // Allows overlapping impls of marker traits (active, overlapping_marker_traits, "1.18.0", Some(29864)), // Allows use of the :vis macro fragment specifier (active, macro_vis_matcher, "1.18.0", Some(41022)), // rustc internal (active, abi_thiscall, "1.19.0", None), // Allows a test to fail without failing the whole suite (active, allow_fail, "1.19.0", Some(42219)), // Allows unsized tuple coercion. (active, unsized_tuple_coercion, "1.20.0", Some(42877)), // global allocators and their internals (active, global_allocator, "1.20.0", None), (active, allocator_internals, "1.20.0", None), // #[doc(cfg(...))] (active, doc_cfg, "1.21.0", Some(43781)), // allow `#[must_use]` on functions (RFC 1940) (active, fn_must_use, "1.21.0", Some(43302)), ); declare_features! ( (removed, import_shadowing, "1.0.0", None), (removed, managed_boxes, "1.0.0", None), // Allows use of unary negate on unsigned integers, e.g. -e for e: u8 (removed, negate_unsigned, "1.0.0", Some(29645)), (removed, reflect, "1.0.0", Some(27749)), // A way to temporarily opt out of opt in copy. This will *never* be accepted. (removed, opt_out_copy, "1.0.0", None), (removed, quad_precision_float, "1.0.0", None), (removed, struct_inherit, "1.0.0", None), (removed, test_removed_feature, "1.0.0", None), (removed, visible_private_types, "1.0.0", None), (removed, unsafe_no_drop_flag, "1.0.0", None), // Allows using items which are missing stability attributes // rustc internal (removed, unmarked_api, "1.0.0", None), (removed, pushpop_unsafe, "1.2.0", None), (removed, allocator, "1.0.0", None), ); declare_features! ( (stable_removed, no_stack_check, "1.0.0", None), ); declare_features! ( (accepted, associated_types, "1.0.0", None), // allow overloading augmented assignment operations like `a += b` (accepted, augmented_assignments, "1.8.0", Some(28235)), // allow empty structs and enum variants with braces (accepted, braced_empty_structs, "1.8.0", Some(29720)), (accepted, default_type_params, "1.0.0", None), (accepted, globs, "1.0.0", None), (accepted, if_let, "1.0.0", None), // A temporary feature gate used to enable parser extensions needed // to bootstrap fix for #5723. (accepted, issue_5723_bootstrap, "1.0.0", None), (accepted, macro_rules, "1.0.0", None), // Allows using #![no_std] (accepted, no_std, "1.6.0", None), (accepted, slicing_syntax, "1.0.0", None), (accepted, struct_variant, "1.0.0", None), // These are used to test this portion of the compiler, they don't actually // mean anything (accepted, test_accepted_feature, "1.0.0", None), (accepted, tuple_indexing, "1.0.0", None), // Allows macros to appear in the type position. (accepted, type_macros, "1.13.0", Some(27245)), (accepted, while_let, "1.0.0", None), // Allows `#[deprecated]` attribute (accepted, deprecated, "1.9.0", Some(29935)), // `expr?` (accepted, question_mark, "1.13.0", Some(31436)), // Allows `..` in tuple (struct) patterns (accepted, dotdot_in_tuple_patterns, "1.14.0", Some(33627)), (accepted, item_like_imports, "1.15.0", Some(35120)), // Allows using `Self` and associated types in struct expressions and patterns. (accepted, more_struct_aliases, "1.16.0", Some(37544)), // elide `'static` lifetimes in `static`s and `const`s (accepted, static_in_const, "1.17.0", Some(35897)), // Allows field shorthands (`x` meaning `x: x`) in struct literal expressions. (accepted, field_init_shorthand, "1.17.0", Some(37340)), // Allows the definition recursive static items. (accepted, static_recursion, "1.17.0", Some(29719)), // pub(restricted) visibilities (RFC 1422) (accepted, pub_restricted, "1.18.0", Some(32409)), // The #![windows_subsystem] attribute (accepted, windows_subsystem, "1.18.0", Some(37499)), // Allows `break {expr}` with a value inside `loop`s. (accepted, loop_break_value, "1.19.0", Some(37339)), // Permits numeric fields in struct expressions and patterns. (accepted, relaxed_adts, "1.19.0", Some(35626)), // Coerces non capturing closures to function pointers (accepted, closure_to_fn_coercion, "1.19.0", Some(39817)), // Allows attributes on struct literal fields. (accepted, struct_field_attributes, "1.20.0", Some(38814)), // Allows the definition of associated constants in `trait` or `impl` // blocks. (accepted, associated_consts, "1.20.0", Some(29646)), // Usage of the `compile_error!` macro (accepted, compile_error, "1.20.0", Some(40872)), // See rust-lang/rfcs#1414. Allows code like `let x: &'static u32 = &42` to work. (accepted, rvalue_static_promotion, "1.21.0", Some(38865)), ); // If you change this, please modify src/doc/unstable-book as well. You must // move that documentation into the relevant place in the other docs, and // remove the chapter on the flag. #[derive(PartialEq, Copy, Clone, Debug)] pub enum AttributeType { /// Normal, builtin attribute that is consumed /// by the compiler before the unused_attribute check Normal, /// Builtin attribute that may not be consumed by the compiler /// before the unused_attribute check. These attributes /// will be ignored by the unused_attribute lint Whitelisted, /// Builtin attribute that is only allowed at the crate level CrateLevel, } pub enum AttributeGate { /// Is gated by a given feature gate, reason /// and function to check if enabled Gated(Stability, &'static str, &'static str, fn(&Features) -> bool), /// Ungated attribute, can be used on all release channels Ungated, } impl AttributeGate { fn is_deprecated(&self) -> bool { match *self { Gated(Stability::Deprecated(_), ..) => true, _ => false, } } } #[derive(Copy, Clone, PartialEq, Eq, Debug)] pub enum Stability { Unstable, // Argument is tracking issue link. Deprecated(&'static str), } // fn() is not Debug impl ::std::fmt::Debug for AttributeGate { fn fmt(&self, fmt: &mut ::std::fmt::Formatter) -> ::std::fmt::Result { match *self { Gated(ref stab, name, expl, _) => write!(fmt, "Gated({:?}, {}, {})", stab, name, expl), Ungated => write!(fmt, "Ungated") } } } macro_rules! cfg_fn { ($field: ident) => {{ fn f(features: &Features) -> bool { features.$field } f as fn(&Features) -> bool }} } pub fn deprecated_attributes() -> Vec<&'static (&'static str, AttributeType, AttributeGate)> { BUILTIN_ATTRIBUTES.iter().filter(|a| a.2.is_deprecated()).collect() } pub fn is_builtin_attr(attr: &ast::Attribute) -> bool { BUILTIN_ATTRIBUTES.iter().any(|&(builtin_name, _, _)| attr.check_name(builtin_name)) } // Attributes that have a special meaning to rustc or rustdoc pub const BUILTIN_ATTRIBUTES: &'static [(&'static str, AttributeType, AttributeGate)] = &[ // Normal attributes ("warn", Normal, Ungated), ("allow", Normal, Ungated), ("forbid", Normal, Ungated), ("deny", Normal, Ungated), ("macro_reexport", Normal, Ungated), ("macro_use", Normal, Ungated), ("macro_export", Normal, Ungated), ("plugin_registrar", Normal, Ungated), ("cfg", Normal, Ungated), ("cfg_attr", Normal, Ungated), ("main", Normal, Ungated), ("start", Normal, Ungated), ("test", Normal, Ungated), ("bench", Normal, Ungated), ("simd", Normal, Ungated), ("repr", Normal, Ungated), ("path", Normal, Ungated), ("abi", Normal, Ungated), ("automatically_derived", Normal, Ungated), ("no_mangle", Normal, Ungated), ("no_link", Normal, Ungated), ("derive", Normal, Ungated), ("should_panic", Normal, Ungated), ("ignore", Normal, Ungated), ("no_implicit_prelude", Normal, Ungated), ("reexport_test_harness_main", Normal, Ungated), ("link_args", Normal, Gated(Stability::Unstable, "link_args", "the `link_args` attribute is experimental and not \ portable across platforms, it is recommended to \ use `#[link(name = \"foo\")] instead", cfg_fn!(link_args))), ("macro_escape", Normal, Ungated), // RFC #1445. ("structural_match", Whitelisted, Gated(Stability::Unstable, "structural_match", "the semantics of constant patterns is \ not yet settled", cfg_fn!(structural_match))), ("plugin", CrateLevel, Gated(Stability::Unstable, "plugin", "compiler plugins are experimental \ and possibly buggy", cfg_fn!(plugin))), ("no_std", CrateLevel, Ungated), ("no_core", CrateLevel, Gated(Stability::Unstable, "no_core", "no_core is experimental", cfg_fn!(no_core))), ("lang", Normal, Gated(Stability::Unstable, "lang_items", "language items are subject to change", cfg_fn!(lang_items))), ("linkage", Whitelisted, Gated(Stability::Unstable, "linkage", "the `linkage` attribute is experimental \ and not portable across platforms", cfg_fn!(linkage))), ("thread_local", Whitelisted, Gated(Stability::Unstable, "thread_local", "`#[thread_local]` is an experimental feature, and does \ not currently handle destructors. There is no \ corresponding `#[task_local]` mapping to the task \ model", cfg_fn!(thread_local))), ("rustc_on_unimplemented", Normal, Gated(Stability::Unstable, "on_unimplemented", "the `#[rustc_on_unimplemented]` attribute \ is an experimental feature", cfg_fn!(on_unimplemented))), ("global_allocator", Normal, Gated(Stability::Unstable, "global_allocator", "the `#[global_allocator]` attribute is \ an experimental feature", cfg_fn!(global_allocator))), ("default_lib_allocator", Whitelisted, Gated(Stability::Unstable, "allocator_internals", "the `#[default_lib_allocator]` \ attribute is an experimental feature", cfg_fn!(allocator_internals))), ("needs_allocator", Normal, Gated(Stability::Unstable, "allocator_internals", "the `#[needs_allocator]` \ attribute is an experimental \ feature", cfg_fn!(allocator_internals))), ("panic_runtime", Whitelisted, Gated(Stability::Unstable, "panic_runtime", "the `#[panic_runtime]` attribute is \ an experimental feature", cfg_fn!(panic_runtime))), ("needs_panic_runtime", Whitelisted, Gated(Stability::Unstable, "needs_panic_runtime", "the `#[needs_panic_runtime]` \ attribute is an experimental \ feature", cfg_fn!(needs_panic_runtime))), ("rustc_variance", Normal, Gated(Stability::Unstable, "rustc_attrs", "the `#[rustc_variance]` attribute \ is just used for rustc unit tests \ and will never be stable", cfg_fn!(rustc_attrs))), ("rustc_error", Whitelisted, Gated(Stability::Unstable, "rustc_attrs", "the `#[rustc_error]` attribute \ is just used for rustc unit tests \ and will never be stable", cfg_fn!(rustc_attrs))), ("rustc_if_this_changed", Whitelisted, Gated(Stability::Unstable, "rustc_attrs", "the `#[rustc_if_this_changed]` attribute \ is just used for rustc unit tests \ and will never be stable", cfg_fn!(rustc_attrs))), ("rustc_then_this_would_need", Whitelisted, Gated(Stability::Unstable, "rustc_attrs", "the `#[rustc_if_this_changed]` attribute \ is just used for rustc unit tests \ and will never be stable", cfg_fn!(rustc_attrs))), ("rustc_dirty", Whitelisted, Gated(Stability::Unstable, "rustc_attrs", "the `#[rustc_dirty]` attribute \ is just used for rustc unit tests \ and will never be stable", cfg_fn!(rustc_attrs))), ("rustc_clean", Whitelisted, Gated(Stability::Unstable, "rustc_attrs", "the `#[rustc_clean]` attribute \ is just used for rustc unit tests \ and will never be stable", cfg_fn!(rustc_attrs))), ("rustc_metadata_dirty", Whitelisted, Gated(Stability::Unstable, "rustc_attrs", "the `#[rustc_metadata_dirty]` attribute \ is just used for rustc unit tests \ and will never be stable", cfg_fn!(rustc_attrs))), ("rustc_metadata_clean", Whitelisted, Gated(Stability::Unstable, "rustc_attrs", "the `#[rustc_metadata_clean]` attribute \ is just used for rustc unit tests \ and will never be stable", cfg_fn!(rustc_attrs))), ("rustc_partition_reused", Whitelisted, Gated(Stability::Unstable, "rustc_attrs", "this attribute \ is just used for rustc unit tests \ and will never be stable", cfg_fn!(rustc_attrs))), ("rustc_partition_translated", Whitelisted, Gated(Stability::Unstable, "rustc_attrs", "this attribute \ is just used for rustc unit tests \ and will never be stable", cfg_fn!(rustc_attrs))), ("rustc_symbol_name", Whitelisted, Gated(Stability::Unstable, "rustc_attrs", "internal rustc attributes will never be stable", cfg_fn!(rustc_attrs))), ("rustc_item_path", Whitelisted, Gated(Stability::Unstable, "rustc_attrs", "internal rustc attributes will never be stable", cfg_fn!(rustc_attrs))), ("rustc_mir", Whitelisted, Gated(Stability::Unstable, "rustc_attrs", "the `#[rustc_mir]` attribute \ is just used for rustc unit tests \ and will never be stable", cfg_fn!(rustc_attrs))), ("rustc_inherit_overflow_checks", Whitelisted, Gated(Stability::Unstable, "rustc_attrs", "the `#[rustc_inherit_overflow_checks]` \ attribute is just used to control \ overflow checking behavior of several \ libcore functions that are inlined \ across crates and will never be stable", cfg_fn!(rustc_attrs))), ("compiler_builtins", Whitelisted, Gated(Stability::Unstable, "compiler_builtins", "the `#[compiler_builtins]` attribute is used to \ identify the `compiler_builtins` crate which \ contains compiler-rt intrinsics and will never be \ stable", cfg_fn!(compiler_builtins))), ("sanitizer_runtime", Whitelisted, Gated(Stability::Unstable, "sanitizer_runtime", "the `#[sanitizer_runtime]` attribute is used to \ identify crates that contain the runtime of a \ sanitizer and will never be stable", cfg_fn!(sanitizer_runtime))), ("profiler_runtime", Whitelisted, Gated(Stability::Unstable, "profiler_runtime", "the `#[profiler_runtime]` attribute is used to \ identify the `profiler_builtins` crate which \ contains the profiler runtime and will never be \ stable", cfg_fn!(profiler_runtime))), ("allow_internal_unstable", Normal, Gated(Stability::Unstable, "allow_internal_unstable", EXPLAIN_ALLOW_INTERNAL_UNSTABLE, cfg_fn!(allow_internal_unstable))), ("allow_internal_unsafe", Normal, Gated(Stability::Unstable, "allow_internal_unsafe", EXPLAIN_ALLOW_INTERNAL_UNSAFE, cfg_fn!(allow_internal_unsafe))), ("fundamental", Whitelisted, Gated(Stability::Unstable, "fundamental", "the `#[fundamental]` attribute \ is an experimental feature", cfg_fn!(fundamental))), ("proc_macro_derive", Normal, Ungated), ("rustc_copy_clone_marker", Whitelisted, Gated(Stability::Unstable, "rustc_attrs", "internal implementation detail", cfg_fn!(rustc_attrs))), // FIXME: #14408 whitelist docs since rustdoc looks at them ("doc", Whitelisted, Ungated), // FIXME: #14406 these are processed in trans, which happens after the // lint pass ("cold", Whitelisted, Ungated), ("naked", Whitelisted, Gated(Stability::Unstable, "naked_functions", "the `#[naked]` attribute \ is an experimental feature", cfg_fn!(naked_functions))), ("target_feature", Whitelisted, Gated( Stability::Unstable, "target_feature", "the `#[target_feature]` attribute is an experimental feature", cfg_fn!(target_feature))), ("export_name", Whitelisted, Ungated), ("inline", Whitelisted, Ungated), ("link", Whitelisted, Ungated), ("link_name", Whitelisted, Ungated), ("link_section", Whitelisted, Ungated), ("no_builtins", Whitelisted, Ungated), ("no_mangle", Whitelisted, Ungated), ("no_debug", Whitelisted, Gated( Stability::Deprecated("https://github.com/rust-lang/rust/issues/29721"), "no_debug", "the `#[no_debug]` attribute is an experimental feature", cfg_fn!(no_debug))), ("omit_gdb_pretty_printer_section", Whitelisted, Gated(Stability::Unstable, "omit_gdb_pretty_printer_section", "the `#[omit_gdb_pretty_printer_section]` \ attribute is just used for the Rust test \ suite", cfg_fn!(omit_gdb_pretty_printer_section))), ("unsafe_destructor_blind_to_params", Normal, Gated(Stability::Deprecated("https://github.com/rust-lang/rust/issues/34761"), "dropck_parametricity", "unsafe_destructor_blind_to_params has been replaced by \ may_dangle and will be removed in the future", cfg_fn!(dropck_parametricity))), ("may_dangle", Normal, Gated(Stability::Unstable, "dropck_eyepatch", "may_dangle has unstable semantics and may be removed in the future", cfg_fn!(dropck_eyepatch))), ("unwind", Whitelisted, Gated(Stability::Unstable, "unwind_attributes", "#[unwind] is experimental", cfg_fn!(unwind_attributes))), ("used", Whitelisted, Gated( Stability::Unstable, "used", "the `#[used]` attribute is an experimental feature", cfg_fn!(used))), // used in resolve ("prelude_import", Whitelisted, Gated(Stability::Unstable, "prelude_import", "`#[prelude_import]` is for use by rustc only", cfg_fn!(prelude_import))), // FIXME: #14407 these are only looked at on-demand so we can't // guarantee they'll have already been checked ("rustc_deprecated", Whitelisted, Ungated), ("must_use", Whitelisted, Ungated), ("stable", Whitelisted, Ungated), ("unstable", Whitelisted, Ungated), ("deprecated", Normal, Ungated), ("rustc_paren_sugar", Normal, Gated(Stability::Unstable, "unboxed_closures", "unboxed_closures are still evolving", cfg_fn!(unboxed_closures))), ("windows_subsystem", Whitelisted, Ungated), ("proc_macro_attribute", Normal, Gated(Stability::Unstable, "proc_macro", "attribute proc macros are currently unstable", cfg_fn!(proc_macro))), ("proc_macro", Normal, Gated(Stability::Unstable, "proc_macro", "function-like proc macros are currently unstable", cfg_fn!(proc_macro))), ("rustc_derive_registrar", Normal, Gated(Stability::Unstable, "rustc_derive_registrar", "used internally by rustc", cfg_fn!(rustc_attrs))), ("allow_fail", Normal, Gated(Stability::Unstable, "allow_fail", "allow_fail attribute is currently unstable", cfg_fn!(allow_fail))), // Crate level attributes ("crate_name", CrateLevel, Ungated), ("crate_type", CrateLevel, Ungated), ("crate_id", CrateLevel, Ungated), ("feature", CrateLevel, Ungated), ("no_start", CrateLevel, Ungated), ("no_main", CrateLevel, Ungated), ("no_builtins", CrateLevel, Ungated), ("recursion_limit", CrateLevel, Ungated), ("type_length_limit", CrateLevel, Ungated), ]; // cfg(...)'s that are feature gated const GATED_CFGS: &[(&str, &str, fn(&Features) -> bool)] = &[ // (name in cfg, feature, function to check if the feature is enabled) ("target_feature", "cfg_target_feature", cfg_fn!(cfg_target_feature)), ("target_vendor", "cfg_target_vendor", cfg_fn!(cfg_target_vendor)), ("target_thread_local", "cfg_target_thread_local", cfg_fn!(cfg_target_thread_local)), ("target_has_atomic", "cfg_target_has_atomic", cfg_fn!(cfg_target_has_atomic)), ]; #[derive(Debug, Eq, PartialEq)] pub struct GatedCfg { span: Span, index: usize, } impl GatedCfg { pub fn gate(cfg: &ast::MetaItem) -> Option<GatedCfg> { let name = cfg.name().as_str(); GATED_CFGS.iter() .position(|info| info.0 == name) .map(|idx| { GatedCfg { span: cfg.span, index: idx } }) } pub fn check_and_emit(&self, sess: &ParseSess, features: &Features) { let (cfg, feature, has_feature) = GATED_CFGS[self.index]; if !has_feature(features) && !self.span.allows_unstable() { let explain = format!("`cfg({})` is experimental and subject to change", cfg); emit_feature_err(sess, feature, self.span, GateIssue::Language, &explain); } } } struct Context<'a> { features: &'a Features, parse_sess: &'a ParseSess, plugin_attributes: &'a [(String, AttributeType)], } macro_rules! gate_feature_fn { ($cx: expr, $has_feature: expr, $span: expr, $name: expr, $explain: expr) => {{ let (cx, has_feature, span, name, explain) = ($cx, $has_feature, $span, $name, $explain); let has_feature: bool = has_feature(&$cx.features); debug!("gate_feature(feature = {:?}, span = {:?}); has? {}", name, span, has_feature); if !has_feature && !span.allows_unstable() { emit_feature_err(cx.parse_sess, name, span, GateIssue::Language, explain); } }} } macro_rules! gate_feature { ($cx: expr, $feature: ident, $span: expr, $explain: expr) => { gate_feature_fn!($cx, |x:&Features| x.$feature, $span, stringify!($feature), $explain) } } impl<'a> Context<'a> { fn check_attribute(&self, attr: &ast::Attribute, is_macro: bool) { debug!("check_attribute(attr = {:?})", attr); let name = unwrap_or!(attr.name(), return).as_str(); for &(n, ty, ref gateage) in BUILTIN_ATTRIBUTES { if name == n { if let Gated(_, name, desc, ref has_feature) = *gateage { gate_feature_fn!(self, has_feature, attr.span, name, desc); } debug!("check_attribute: {:?} is builtin, {:?}, {:?}", attr.path, ty, gateage); return; } } for &(ref n, ref ty) in self.plugin_attributes { if attr.path == &**n { // Plugins can't gate attributes, so we don't check for it // unlike the code above; we only use this loop to // short-circuit to avoid the checks below debug!("check_attribute: {:?} is registered by a plugin, {:?}", attr.path, ty); return; } } if name.starts_with("rustc_") { gate_feature!(self, rustc_attrs, attr.span, "unless otherwise specified, attributes \ with the prefix `rustc_` \ are reserved for internal compiler diagnostics"); } else if name.starts_with("derive_") { gate_feature!(self, custom_derive, attr.span, EXPLAIN_DERIVE_UNDERSCORE); } else if !attr::is_known(attr) { // Only run the custom attribute lint during regular // feature gate checking. Macro gating runs // before the plugin attributes are registered // so we skip this then if !is_macro { gate_feature!(self, custom_attribute, attr.span, &format!("The attribute `{}` is currently \ unknown to the compiler and \ may have meaning \ added to it in the future", attr.path)); } } } } pub fn check_attribute(attr: &ast::Attribute, parse_sess: &ParseSess, features: &Features) { let cx = Context { features: features, parse_sess: parse_sess, plugin_attributes: &[] }; cx.check_attribute(attr, true); } pub fn find_lang_feature_accepted_version(feature: &str) -> Option<&'static str> { ACCEPTED_FEATURES.iter().find(|t| t.0 == feature).map(|t| t.1) } fn find_lang_feature_issue(feature: &str) -> Option<u32> { if let Some(info) = ACTIVE_FEATURES.iter().find(|t| t.0 == feature) { let issue = info.2; // FIXME (#28244): enforce that active features have issue numbers // assert!(issue.is_some()) issue } else { // search in Accepted, Removed, or Stable Removed features let found = ACCEPTED_FEATURES.iter().chain(REMOVED_FEATURES).chain(STABLE_REMOVED_FEATURES) .find(|t| t.0 == feature); match found { Some(&(_, _, issue)) => issue, None => panic!("Feature `{}` is not declared anywhere", feature), } } } pub enum GateIssue { Language, Library(Option<u32>) } pub fn emit_feature_err(sess: &ParseSess, feature: &str, span: Span, issue: GateIssue, explain: &str) { feature_err(sess, feature, span, issue, explain).emit(); } pub fn feature_err<'a>(sess: &'a ParseSess, feature: &str, span: Span, issue: GateIssue, explain: &str) -> DiagnosticBuilder<'a> { let diag = &sess.span_diagnostic; let issue = match issue { GateIssue::Language => find_lang_feature_issue(feature), GateIssue::Library(lib) => lib, }; let mut err = if let Some(n) = issue { diag.struct_span_err(span, &format!("{} (see issue #{})", explain, n)) } else { diag.struct_span_err(span, explain) }; // #23973: do not suggest `#![feature(...)]` if we are in beta/stable if sess.unstable_features.is_nightly_build() { err.help(&format!("add #![feature({})] to the \ crate attributes to enable", feature)); } err } const EXPLAIN_BOX_SYNTAX: &'static str = "box expression syntax is experimental; you can call `Box::new` instead."; pub const EXPLAIN_STMT_ATTR_SYNTAX: &'static str = "attributes on non-item statements and expressions are experimental."; pub const EXPLAIN_ASM: &'static str = "inline assembly is not stable enough for use and is subject to change"; pub const EXPLAIN_GLOBAL_ASM: &'static str = "`global_asm!` is not stable enough for use and is subject to change"; pub const EXPLAIN_LOG_SYNTAX: &'static str = "`log_syntax!` is not stable enough for use and is subject to change"; pub const EXPLAIN_CONCAT_IDENTS: &'static str = "`concat_idents` is not stable enough for use and is subject to change"; pub const EXPLAIN_TRACE_MACROS: &'static str = "`trace_macros` is not stable enough for use and is subject to change"; pub const EXPLAIN_ALLOW_INTERNAL_UNSTABLE: &'static str = "allow_internal_unstable side-steps feature gating and stability checks"; pub const EXPLAIN_ALLOW_INTERNAL_UNSAFE: &'static str = "allow_internal_unsafe side-steps the unsafe_code lint"; pub const EXPLAIN_CUSTOM_DERIVE: &'static str = "`#[derive]` for custom traits is deprecated and will be removed in the future."; pub const EXPLAIN_DEPR_CUSTOM_DERIVE: &'static str = "`#[derive]` for custom traits is deprecated and will be removed in the future. \ Prefer using procedural macro custom derive."; pub const EXPLAIN_DERIVE_UNDERSCORE: &'static str = "attributes of the form `#[derive_*]` are reserved for the compiler"; pub const EXPLAIN_VIS_MATCHER: &'static str = ":vis fragment specifier is experimental and subject to change"; pub const EXPLAIN_PLACEMENT_IN: &'static str = "placement-in expression syntax is experimental and subject to change."; pub const EXPLAIN_UNSIZED_TUPLE_COERCION: &'static str = "Unsized tuple coercion is not stable enough for use and is subject to change"; struct PostExpansionVisitor<'a> { context: &'a Context<'a>, } macro_rules! gate_feature_post { ($cx: expr, $feature: ident, $span: expr, $explain: expr) => {{ let (cx, span) = ($cx, $span); if !span.allows_unstable() { gate_feature!(cx.context, $feature, span, $explain) } }} } impl<'a> PostExpansionVisitor<'a> { fn check_abi(&self, abi: Abi, span: Span) { match abi { Abi::RustIntrinsic => { gate_feature_post!(&self, intrinsics, span, "intrinsics are subject to change"); }, Abi::PlatformIntrinsic => { gate_feature_post!(&self, platform_intrinsics, span, "platform intrinsics are experimental and possibly buggy"); }, Abi::Vectorcall => { gate_feature_post!(&self, abi_vectorcall, span, "vectorcall is experimental and subject to change"); }, Abi::Thiscall => { gate_feature_post!(&self, abi_thiscall, span, "thiscall is experimental and subject to change"); }, Abi::RustCall => { gate_feature_post!(&self, unboxed_closures, span, "rust-call ABI is subject to change"); }, Abi::SysV64 => { gate_feature_post!(&self, abi_sysv64, span, "sysv64 ABI is experimental and subject to change"); }, Abi::PtxKernel => { gate_feature_post!(&self, abi_ptx, span, "PTX ABIs are experimental and subject to change"); }, Abi::Unadjusted => { gate_feature_post!(&self, abi_unadjusted, span, "unadjusted ABI is an implementation detail and perma-unstable"); }, Abi::Msp430Interrupt => { gate_feature_post!(&self, abi_msp430_interrupt, span, "msp430-interrupt ABI is experimental and subject to change"); }, Abi::X86Interrupt => { gate_feature_post!(&self, abi_x86_interrupt, span, "x86-interrupt ABI is experimental and subject to change"); }, // Stable Abi::Cdecl | Abi::Stdcall | Abi::Fastcall | Abi::Aapcs | Abi::Win64 | Abi::Rust | Abi::C | Abi::System => {} } } } fn contains_novel_literal(item: &ast::MetaItem) -> bool { use ast::MetaItemKind::*; use ast::NestedMetaItemKind::*; match item.node { Word => false, NameValue(ref lit) => !lit.node.is_str(), List(ref list) => list.iter().any(|li| { match li.node { MetaItem(ref mi) => contains_novel_literal(mi), Literal(_) => true, } }), } } impl<'a> Visitor<'a> for PostExpansionVisitor<'a> { fn visit_attribute(&mut self, attr: &ast::Attribute) { if !attr.span.allows_unstable() { // check for gated attributes self.context.check_attribute(attr, false); } if attr.check_name("doc") { if let Some(content) = attr.meta_item_list() { if content.len() == 1 && content[0].check_name("cfg") { gate_feature_post!(&self, doc_cfg, attr.span, "#[doc(cfg(...))] is experimental" ); } } } if self.context.features.proc_macro && attr::is_known(attr) { return } let meta = panictry!(attr.parse_meta(self.context.parse_sess)); if contains_novel_literal(&meta) { gate_feature_post!(&self, attr_literals, attr.span, "non-string literals in attributes, or string \ literals in top-level positions, are experimental"); } } fn visit_name(&mut self, sp: Span, name: ast::Name) { if !name.as_str().is_ascii() { gate_feature_post!(&self, non_ascii_idents, sp, "non-ascii idents are not fully supported."); } } fn visit_item(&mut self, i: &'a ast::Item) { match i.node { ast::ItemKind::ExternCrate(_) => { if attr::contains_name(&i.attrs[..], "macro_reexport") { gate_feature_post!(&self, macro_reexport, i.span, "macros reexports are experimental \ and possibly buggy"); } } ast::ItemKind::ForeignMod(ref foreign_module) => { self.check_abi(foreign_module.abi, i.span); } ast::ItemKind::Fn(..) => { if attr::contains_name(&i.attrs[..], "plugin_registrar") { gate_feature_post!(&self, plugin_registrar, i.span, "compiler plugins are experimental and possibly buggy"); } if attr::contains_name(&i.attrs[..], "start") { gate_feature_post!(&self, start, i.span, "a #[start] function is an experimental \ feature whose signature may change \ over time"); } if attr::contains_name(&i.attrs[..], "main") { gate_feature_post!(&self, main, i.span, "declaration of a nonstandard #[main] \ function may change over time, for now \ a top-level `fn main()` is required"); } if attr::contains_name(&i.attrs[..], "must_use") { gate_feature_post!(&self, fn_must_use, i.span, "`#[must_use]` on functions is experimental"); } } ast::ItemKind::Struct(..) => { if attr::contains_name(&i.attrs[..], "simd") { gate_feature_post!(&self, simd, i.span, "SIMD types are experimental and possibly buggy"); self.context.parse_sess.span_diagnostic.span_warn(i.span, "the `#[simd]` attribute \ is deprecated, use \ `#[repr(simd)]` instead"); } for attr in &i.attrs { if attr.path == "repr" { for item in attr.meta_item_list().unwrap_or_else(Vec::new) { if item.check_name("simd") { gate_feature_post!(&self, repr_simd, i.span, "SIMD types are experimental \ and possibly buggy"); } if item.check_name("align") { gate_feature_post!(&self, repr_align, i.span, "the struct `#[repr(align(u16))]` attribute \ is experimental"); } } } } } ast::ItemKind::DefaultImpl(..) => { gate_feature_post!(&self, optin_builtin_traits, i.span, "default trait implementations are experimental \ and possibly buggy"); } ast::ItemKind::Impl(_, polarity, defaultness, _, _, _, _) => { if polarity == ast::ImplPolarity::Negative { gate_feature_post!(&self, optin_builtin_traits, i.span, "negative trait bounds are not yet fully implemented; \ use marker types for now"); } if let ast::Defaultness::Default = defaultness { gate_feature_post!(&self, specialization, i.span, "specialization is unstable"); } } ast::ItemKind::MacroDef(ast::MacroDef { legacy: false, .. }) => { let msg = "`macro` is experimental"; gate_feature_post!(&self, decl_macro, i.span, msg); } _ => {} } visit::walk_item(self, i); } fn visit_foreign_item(&mut self, i: &'a ast::ForeignItem) { let links_to_llvm = match attr::first_attr_value_str_by_name(&i.attrs, "link_name") { Some(val) => val.as_str().starts_with("llvm."), _ => false }; if links_to_llvm { gate_feature_post!(&self, link_llvm_intrinsics, i.span, "linking to LLVM intrinsics is experimental"); } visit::walk_foreign_item(self, i) } fn visit_ty(&mut self, ty: &'a ast::Ty) { match ty.node { ast::TyKind::BareFn(ref bare_fn_ty) => { self.check_abi(bare_fn_ty.abi, ty.span); } ast::TyKind::ImplTrait(..) => { gate_feature_post!(&self, conservative_impl_trait, ty.span, "`impl Trait` is experimental"); } ast::TyKind::Never => { gate_feature_post!(&self, never_type, ty.span, "The `!` type is experimental"); }, _ => {} } visit::walk_ty(self, ty) } fn visit_fn_ret_ty(&mut self, ret_ty: &'a ast::FunctionRetTy) { if let ast::FunctionRetTy::Ty(ref output_ty) = *ret_ty { if output_ty.node != ast::TyKind::Never { self.visit_ty(output_ty) } } } fn visit_expr(&mut self, e: &'a ast::Expr) { match e.node { ast::ExprKind::Box(_) => { gate_feature_post!(&self, box_syntax, e.span, EXPLAIN_BOX_SYNTAX); } ast::ExprKind::Type(..) => { gate_feature_post!(&self, type_ascription, e.span, "type ascription is experimental"); } ast::ExprKind::Range(_, _, ast::RangeLimits::Closed) => { gate_feature_post!(&self, inclusive_range_syntax, e.span, "inclusive range syntax is experimental"); } ast::ExprKind::InPlace(..) => { gate_feature_post!(&self, placement_in_syntax, e.span, EXPLAIN_PLACEMENT_IN); } ast::ExprKind::Lit(ref lit) => { if let ast::LitKind::Int(_, ref ty) = lit.node { match *ty { ast::LitIntType::Signed(ast::IntTy::I128) | ast::LitIntType::Unsigned(ast::UintTy::U128) => { gate_feature_post!(&self, i128_type, e.span, "128-bit integers are not stable"); } _ => {} } } } ast::ExprKind::Catch(_) => { gate_feature_post!(&self, catch_expr, e.span, "`catch` expression is experimental"); } _ => {} } visit::walk_expr(self, e); } fn visit_pat(&mut self, pattern: &'a ast::Pat) { match pattern.node { PatKind::Slice(_, Some(_), ref last) if !last.is_empty() => { gate_feature_post!(&self, advanced_slice_patterns, pattern.span, "multiple-element slice matches anywhere \ but at the end of a slice (e.g. \ `[0, ..xs, 0]`) are experimental") } PatKind::Slice(..) => { gate_feature_post!(&self, slice_patterns, pattern.span, "slice pattern syntax is experimental"); } PatKind::Box(..) => { gate_feature_post!(&self, box_patterns, pattern.span, "box pattern syntax is experimental"); } PatKind::Range(_, _, RangeEnd::Excluded) => { gate_feature_post!(&self, exclusive_range_pattern, pattern.span, "exclusive range pattern syntax is experimental"); } _ => {} } visit::walk_pat(self, pattern) } fn visit_fn(&mut self, fn_kind: FnKind<'a>, fn_decl: &'a ast::FnDecl, span: Span, _node_id: NodeId) { // check for const fn declarations if let FnKind::ItemFn(_, _, _, Spanned { node: ast::Constness::Const, .. }, _, _, _) = fn_kind { gate_feature_post!(&self, const_fn, span, "const fn is unstable"); } // stability of const fn methods are covered in // visit_trait_item and visit_impl_item below; this is // because default methods don't pass through this // point. match fn_kind { FnKind::ItemFn(_, _, _, _, abi, _, _) | FnKind::Method(_, &ast::MethodSig { abi, .. }, _, _) => { self.check_abi(abi, span); } _ => {} } visit::walk_fn(self, fn_kind, fn_decl, span); } fn visit_trait_item(&mut self, ti: &'a ast::TraitItem) { match ti.node { ast::TraitItemKind::Method(ref sig, ref block) => { if block.is_none() { self.check_abi(sig.abi, ti.span); } if sig.constness.node == ast::Constness::Const { gate_feature_post!(&self, const_fn, ti.span, "const fn is unstable"); } } ast::TraitItemKind::Type(_, Some(_)) => { gate_feature_post!(&self, associated_type_defaults, ti.span, "associated type defaults are unstable"); } _ => {} } visit::walk_trait_item(self, ti); } fn visit_impl_item(&mut self, ii: &'a ast::ImplItem) { if ii.defaultness == ast::Defaultness::Default { gate_feature_post!(&self, specialization, ii.span, "specialization is unstable"); } match ii.node { ast::ImplItemKind::Method(ref sig, _) => { if sig.constness.node == ast::Constness::Const { gate_feature_post!(&self, const_fn, ii.span, "const fn is unstable"); } } _ => {} } visit::walk_impl_item(self, ii); } fn visit_generics(&mut self, g: &'a ast::Generics) { for t in &g.ty_params { if !t.attrs.is_empty() { gate_feature_post!(&self, generic_param_attrs, t.attrs[0].span, "attributes on type parameter bindings are experimental"); } } visit::walk_generics(self, g) } fn visit_lifetime_def(&mut self, lifetime_def: &'a ast::LifetimeDef) { if !lifetime_def.attrs.is_empty() { gate_feature_post!(&self, generic_param_attrs, lifetime_def.attrs[0].span, "attributes on lifetime bindings are experimental"); } visit::walk_lifetime_def(self, lifetime_def) } } pub fn get_features(span_handler: &Handler, krate_attrs: &[ast::Attribute]) -> Features { let mut features = Features::new(); let mut feature_checker = MutexFeatureChecker::default(); for attr in krate_attrs { if !attr.check_name("feature") { continue } match attr.meta_item_list() { None => { span_err!(span_handler, attr.span, E0555, "malformed feature attribute, expected #![feature(...)]"); } Some(list) => { for mi in list { let name = if let Some(word) = mi.word() { word.name() } else { span_err!(span_handler, mi.span, E0556, "malformed feature, expected just one word"); continue }; if let Some(&(_, _, _, set)) = ACTIVE_FEATURES.iter() .find(|& &(n, _, _, _)| name == n) { set(&mut features, mi.span); feature_checker.collect(&features, mi.span); } else if let Some(&(_, _, _)) = REMOVED_FEATURES.iter() .find(|& &(n, _, _)| name == n) .or_else(|| STABLE_REMOVED_FEATURES.iter() .find(|& &(n, _, _)| name == n)) { span_err!(span_handler, mi.span, E0557, "feature has been removed"); } else if let Some(&(_, _, _)) = ACCEPTED_FEATURES.iter() .find(|& &(n, _, _)| name == n) { features.declared_stable_lang_features.push((name, mi.span)); } else { features.declared_lib_features.push((name, mi.span)); } } } } } feature_checker.check(span_handler); features } // A collector for mutually-exclusive features and their flag spans #[derive(Default)] struct MutexFeatureChecker { proc_macro: Option<Span>, custom_attribute: Option<Span>, } impl MutexFeatureChecker { // If this method turns out to be a hotspot due to branching, // the branching can be eliminated by modifying `set!()` to set these spans // only for the features that need to be checked for mutual exclusion. fn collect(&mut self, features: &Features, span: Span) { if features.proc_macro { // If self.proc_macro is None, set to Some(span) self.proc_macro = self.proc_macro.or(Some(span)); } if features.custom_attribute { self.custom_attribute = self.custom_attribute.or(Some(span)); } } fn check(self, handler: &Handler) { if let (Some(pm_span), Some(ca_span)) = (self.proc_macro, self.custom_attribute) { handler.struct_span_err(pm_span, "Cannot use `#![feature(proc_macro)]` and \ `#![feature(custom_attribute)] at the same time") .span_note(ca_span, "`#![feature(custom_attribute)]` declared here") .emit(); panic!(FatalError); } } } pub fn check_crate(krate: &ast::Crate, sess: &ParseSess, features: &Features, plugin_attributes: &[(String, AttributeType)], unstable: UnstableFeatures) { maybe_stage_features(&sess.span_diagnostic, krate, unstable); let ctx = Context { features, parse_sess: sess, plugin_attributes, }; visit::walk_crate(&mut PostExpansionVisitor { context: &ctx }, krate); } #[derive(Clone, Copy, PartialEq, Eq, Hash)] pub enum UnstableFeatures { /// Hard errors for unstable features are active, as on /// beta/stable channels. Disallow, /// Allow features to be activated, as on nightly. Allow, /// Errors are bypassed for bootstrapping. This is required any time /// during the build that feature-related lints are set to warn or above /// because the build turns on warnings-as-errors and uses lots of unstable /// features. As a result, this is always required for building Rust itself. Cheat } impl UnstableFeatures { pub fn from_environment() -> UnstableFeatures { // Whether this is a feature-staged build, i.e. on the beta or stable channel let disable_unstable_features = option_env!("CFG_DISABLE_UNSTABLE_FEATURES").is_some(); // Whether we should enable unstable features for bootstrapping let bootstrap = env::var("RUSTC_BOOTSTRAP").is_ok(); match (disable_unstable_features, bootstrap) { (_, true) => UnstableFeatures::Cheat, (true, _) => UnstableFeatures::Disallow, (false, _) => UnstableFeatures::Allow } } pub fn is_nightly_build(&self) -> bool { match *self { UnstableFeatures::Allow | UnstableFeatures::Cheat => true, _ => false, } } } fn maybe_stage_features(span_handler: &Handler, krate: &ast::Crate, unstable: UnstableFeatures) { let allow_features = match unstable { UnstableFeatures::Allow => true, UnstableFeatures::Disallow => false, UnstableFeatures::Cheat => true }; if !allow_features { for attr in &krate.attrs { if attr.check_name("feature") { let release_channel = option_env!("CFG_RELEASE_CHANNEL").unwrap_or("(unknown)"); span_err!(span_handler, attr.span, E0554, "#![feature] may not be used on the {} release channel", release_channel); } } } }
use std::cmp::Ordering; use std::fmt; use base::ast::is_operator_char; use base::pos::{BytePos, Column, Line, Location, Span, Spanned, NO_EXPANSION}; use combine::primitives::{Consumed, Error as CombineError, Info, RangeStream}; use combine::combinator::EnvParser; use combine::range::{take, take_while}; use combine::*; use combine::char::{alpha_num, char, letter, spaces, string}; use combine_language::{LanguageEnv, LanguageDef, Identifier}; #[derive(Clone)] pub struct LocatedStream<I> { location: Location, input: I, } impl<I> StreamOnce for LocatedStream<I> where I: StreamOnce<Item = char>, { type Item = I::Item; type Range = I::Range; type Position = Location; fn uncons(&mut self) -> Result<Self::Item, CombineError<Self::Item, Self::Range>> { self.input .uncons() .map(|ch| { self.location.bump(ch); // HACK: The layout algorithm expects `1` indexing for columns - // this could be altered in the future though if self.location.column == Column::from(0) { self.location.column = Column::from(1); } ch }) } fn position(&self) -> Self::Position { self.location } } impl<'input, I> RangeStream for LocatedStream<I> where I: RangeStream<Item = char, Range = &'input str>, { fn uncons_range(&mut self, len: usize) -> Result<Self::Range, CombineError<Self::Item, Self::Range>> { self.input .uncons_range(len) .map(|range| { for ch in range.chars() { self.location.bump(ch) } range }) } fn uncons_while<F>(&mut self, mut predicate: F) -> Result<Self::Range, CombineError<Self::Item, Self::Range>> where F: FnMut(Self::Item) -> bool, { let location = &mut self.location; self.input.uncons_while(|t| { if predicate(t.clone()) { location.bump(t); true } else { false } }) } } #[derive(Copy, Clone, PartialEq, Debug)] pub enum Delimiter { Brace, Bracket, Paren, } impl Delimiter { fn as_str(&self) -> &'static str { use self::Delimiter::*; match *self { Brace => "Brace", Bracket => "Bracket", Paren => "Paren", } } } impl fmt::Display for Delimiter { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { self.as_str().fmt(f) } } pub type Error<'input> = CombineError<Token<&'input str>, Token<&'input str>>; pub type StrToken<'input> = Token<&'input str>; #[derive(Clone, PartialEq, Debug)] pub enum Token<I> { Identifier(I), Operator(I), String(String), Char(char), Int(i64), Byte(u8), Float(f64), DocComment(String), Let, And, In, Type, Match, With, If, Then, Else, Open(Delimiter), Close(Delimiter), Lambda, RightArrow, Colon, Dot, Comma, Pipe, Equals, OpenBlock, CloseBlock, Semi, EOF, } impl<I> fmt::Display for Token<I> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { use self::Token::*; use self::Delimiter::*; let s = match *self { Identifier(_) => "Identifier", Operator(_) => "Operator", String(_) => "String", Char(_) => "Char", Int(_) => "Int", Byte(_) => "Byte", Float(_) => "Float", DocComment(_) => "DocComment", Let => "Let", And => "And", In => "In", Type => "Type", Match => "Match", With => "With", If => "If", Then => "Then", Else => "Else", Open(Brace) => "OpenBrace", Close(Brace) => "CloseBrace", Open(Paren) => "OpenParen", Close(Paren) => "CloseParen", Open(Bracket) => "OpenBracket", Close(Bracket) => "CloseBracket", Lambda => "Lambda", RightArrow => "RightArrow", Colon => "Colon", Dot => "Dot", Comma => "Comma", Pipe => "Pipe", Equals => "Equal", OpenBlock => "OpenBlock", CloseBlock => "CloseBlock", Semi => "Semi", EOF => "EOF", }; s.fmt(f) } } pub type SpannedToken<'input> = Spanned<Token<&'input str>, Location>; #[derive(Copy, Clone, Debug)] struct Offside { location: Location, context: Context, } #[derive(Copy, Clone, Debug, PartialEq)] enum Context { /// Context which contains several expressions/declarations separated by semicolons Block { emit_semi: bool }, /// A simple expression Expr, Let, Type, If, Delimiter(Delimiter), MatchClause, Lambda, } /// Parser passes the environment to each parser function type LanguageParser<'input: 'lexer, 'lexer, I: 'lexer, T> = EnvParser<&'lexer Lexer<'input, I>, LocatedStream<I>, T>; struct Contexts { stack: Vec<Offside>, } impl Contexts { fn last(&self) -> Option<&Offside> { self.stack.last() } fn last_mut(&mut self) -> Option<&mut Offside> { self.stack.last_mut() } fn pop(&mut self) -> Option<Offside> { self.stack.pop() } fn push(&mut self, offside: Offside) -> Result<(), Error<'static>> { self.check_unindentation_limit(&offside)?; self.stack.push(offside); Ok(()) } fn check_unindentation_limit(&mut self, offside: &Offside) -> Result<(), Error<'static>> { let mut skip_block = false; for other_offside in self.stack.iter().rev() { match other_offside.context { Context::Lambda => { skip_block = true; continue; } Context::Delimiter(_) => return Ok(()), Context::Block { .. } if skip_block => continue, // New context should not be unindented past the closest enclosing block context Context::MatchClause | Context::Type | Context::Let | Context::Block { .. } if offside.location.column < other_offside.location.column => (), _ => continue, } debug!("Unindentation error: {:?} < {:?}", offside, other_offside); return Err(CombineError::Message("Line was unindented to far".into())); } Ok(()) } } pub struct Lexer<'input, I> where I: RangeStream<Item = char, Range = &'input str>, { env: LanguageEnv<'input, LocatedStream<I>>, input: Option<LocatedStream<I>>, unprocessed_tokens: Vec<SpannedToken<'input>>, indent_levels: Contexts, } impl<'input, I> Lexer<'input, I> where I: RangeStream<Item = char, Range = &'input str> + 'input, I::Range: fmt::Debug + 'input, { pub fn new(input: I) -> Lexer<'input, I> { let env = LanguageEnv::new(LanguageDef { ident: Identifier { start: letter().or(char('_')), rest: alpha_num() .or(char('_')) .or(char('\'')), // ["if", "then", "else", "let", "and", "in", "type", "case", "of"] // .iter() // .map(|x| (*x).into()) // .collect(), reserved: Vec::new(), }, op: Identifier { start: satisfy(is_operator_char), rest: satisfy(is_operator_char), reserved: Vec::new(), }, comment_start: satisfy(|_| false).map(|_| ()), comment_end: satisfy(|_| false).map(|_| ()), comment_line: satisfy(|_| false).map(|_| ()), }); Lexer { env: env, input: Some(LocatedStream { location: Location { line: Line::from(0), column: Column::from(1), absolute: BytePos::from(0), }, input: input, }), unprocessed_tokens: Vec::new(), indent_levels: Contexts { stack: Vec::new() }, } } fn parser<'a, T>(&'a self, parser: fn(&Lexer<'input, I>, LocatedStream<I>) -> ParseResult<T, LocatedStream<I>>) -> LanguageParser<'input, 'a, I, T> { env_parser(self, parser) } /// Parses an operator fn op<'a>(&'a self) -> LanguageParser<'input, 'a, I, &'input str> { self.parser(Lexer::parse_op) } fn parse_op(&self, input: LocatedStream<I>) -> ParseResult<&'input str, LocatedStream<I>> { let initial = input.clone(); let ((builtin, op), _) = (optional((char('#'), take_while(char::is_alphabetic))), try(self.env.op_())).parse_stream(input)?; let len = builtin.map_or(0, |(c, typ)| c.len_utf8() + typ.len()) + op.len(); take(len).parse_stream(initial) } fn ident<'a>(&'a self) -> LanguageParser<'input, 'a, I, Token<&'input str>> { self.parser(Lexer::parse_ident) } fn parse_ident(&self, input: LocatedStream<I>) -> ParseResult<Token<&'input str>, LocatedStream<I>> { self.env .range_identifier_() .map(Token::Identifier) .parse_stream(input) } fn layout_independent_token(&mut self, token: SpannedToken<'input>) -> Result<SpannedToken<'input>, Error<'input>> { layout(self, token) } fn id_to_keyword(&self, id: Token<&'input str>) -> Token<&'input str> { match id { Token::Identifier("let") => Token::Let, Token::Identifier("type") => Token::Type, Token::Identifier("and") => Token::And, Token::Identifier("in") => Token::In, Token::Identifier("match") => Token::Match, Token::Identifier("with") => Token::With, Token::Identifier("if") => Token::If, Token::Identifier("then") => Token::Then, Token::Identifier("else") => Token::Else, id => id, } } fn next_token(&mut self) -> SpannedToken<'input> { if let Some(token) = self.unprocessed_tokens.pop() { return token; } let input = match self.input.take() { Some(input) => input, None => { let loc = Location { line: Line::from(0), column: Column::from(1), absolute: BytePos::from(0), }; return SpannedToken { span: Span { start: loc, end: loc, expansion_id: NO_EXPANSION, }, value: Token::EOF, }; } }; let mut start = input.position(); let result = self.next_token_(&mut start, input); match result { Ok((token, input)) => { let input = input.into_inner(); let end = input.position(); let input = match self.env.white_space().parse_stream(input.clone()) { Ok(((), input)) => input.into_inner(), Err(_) => input, }; self.input = Some(input); SpannedToken { span: Span { start: start, end: end, expansion_id: NO_EXPANSION, }, value: token, } } Err(err) => { let err = err.into_inner(); debug!("Error tokenizing: {:?}", err); let span = Span { start: start, end: start, expansion_id: NO_EXPANSION, }; SpannedToken { span: span, value: Token::CloseBlock, } } } } fn next_token_(&mut self, location: &mut Location, mut input: LocatedStream<I>) -> ParseResult<Token<&'input str>, LocatedStream<I>> { loop { // Skip all whitespace before the token let parsed_spaces: Result<_, _> = spaces().parse_lazy(input).into(); let (_, new_input) = parsed_spaces?; input = new_input.into_inner(); *location = input.position(); let (first, one_char_consumed) = any().parse_stream(input.clone())?; // Decide how to tokenize depending on what the first char is // ie if its an operator then more operators will follow if is_operator_char(first) || first == '#' { let (op, new_input) = self.op().parse_stream(input)?; input = new_input.into_inner(); let tok = match op { "=" => Token::Equals, "->" => Token::RightArrow, "|" => Token::Pipe, _ => { if op.starts_with("///") { let mut comment = String::new(); let ((), new_input) = spaces().parse_stream(input)?; input = new_input.into_inner(); // Merge consecutive line comments loop { let mut line = satisfy(|c| c != '\n' && c != '\r').iter(input); comment.extend(line.by_ref()); comment.push('\n'); let ((), new_input) = line.into_result(())?; input = new_input.into_inner(); let mut p = spaces().with(try(string("///"))).skip(spaces()); match p.parse_stream(input.clone()) { Ok((_, new_input)) => input = new_input.into_inner(), Err(_) => break, } } comment.pop(); return Ok((Token::DocComment(comment), Consumed::Consumed(input))); } else if op.starts_with("/**") { return self.block_doc_comment(input); } else if op.starts_with("//") { let result: Result<_, _> = skip_many(satisfy(|c| c != '\n' && c != '\r')) .parse_lazy(input) .into(); let ((), new_input) = result?; input = new_input.into_inner(); continue; } else if op.starts_with("/*") { // Skip over normal comments and try to parse a new token let ((), new_input) = self.skip_block_comment(input)?; input = new_input.into_inner(); continue; } else { Token::Operator(op) } } }; return Ok((tok, Consumed::Consumed(input))); } else if first.is_digit(10) { let int_or_byte = (self.env.integer_(), optional(char('b'))); return try(int_or_byte.skip(not_followed_by(string(".")))) .and_then(|(i, byte)| { if byte.is_none() { Ok(Token::Int(i)) } else { if i >= 0 && i <= 256 { Ok(Token::Byte(i as u8)) } else { Err(CombineError::Message("Byte literal out of range".into())) } } }) .or(self.env.float_().map(Token::Float)) .parse_stream(input); } else if first.is_alphabetic() || first == '_' { return self.ident().map(|t| self.id_to_keyword(t)).parse_stream(input); } let tok = match first { '(' => { match self.ident().map(|t| self.id_to_keyword(t)).parse_stream(input) { Ok(x) => return Ok(x), Err(_) => Token::Open(Delimiter::Paren), } } ')' => Token::Close(Delimiter::Paren), '{' => Token::Open(Delimiter::Brace), '}' => Token::Close(Delimiter::Brace), '[' => Token::Open(Delimiter::Bracket), ']' => Token::Close(Delimiter::Bracket), ':' => Token::Colon, ',' => Token::Comma, '.' => Token::Dot, '\\' => Token::Lambda, '"' => return self.env.string_literal_().map(Token::String).parse_stream(input), '\'' => return self.env.char_literal_().map(Token::Char).parse_stream(input), _ => Token::EOF, }; return Ok((tok, one_char_consumed)); } } fn skip_block_comment(&self, input: LocatedStream<I>) -> ParseResult<(), LocatedStream<I>> { let mut block_doc_comment = parser(|input| { let mut input = Consumed::Empty(input); loop { match input.clone() .combine(|input| try(string("*/")).parse_lazy(input).into()) { Ok((_, input)) => return Ok(((), input)), Err(_) => { match input.combine(|input| any().parse_stream(input)) { Ok((_, rest)) => { input = rest; } Err(err) => return Err(err), } } } } }); block_doc_comment.parse_stream(input) } fn block_doc_comment(&self, input: LocatedStream<I>) -> ParseResult<Token<&'input str>, LocatedStream<I>> { let mut block_doc_comment = parser(|input| { let ((), mut input) = spaces().parse_stream(input)?; let mut out = String::new(); loop { match input.clone() .combine(|input| try(string("*/")).parse_lazy(input).into()) { Ok((_, input)) => return Ok((Token::DocComment(out), input)), Err(_) => { match input.combine(|input| any().parse_stream(input)) { Ok((c, rest)) => { out.push(c); input = rest } Err(err) => return Err(err), } } } } }); block_doc_comment.parse_stream(input) } fn layout_token(&mut self, token: SpannedToken<'input>, layout_token: Token<&'input str>) -> SpannedToken<'input> { let span = token.span; self.unprocessed_tokens.push(token); SpannedToken { span: span, value: layout_token, } } fn uncons_next(&mut self) -> Result<SpannedToken<'input>, Error<'input>> { let token = self.next_token(); match self.layout_independent_token(token)? { SpannedToken { value: Token::EOF, .. } => Err(Error::end_of_input()), token => { debug!("Lex {:?}", token); Ok(token) } } } } fn layout<'input, I>(lexer: &mut Lexer<'input, I>, mut token: SpannedToken<'input>) -> Result<SpannedToken<'input>, Error<'input>> where I: RangeStream<Item = char, Range = &'input str> + 'input, I::Range: fmt::Debug, { if token.value == Token::EOF { token.span.start.column = Column::from(0); } loop { // Retrieve the current indentation level if one exists let offside = match (&token.value, lexer.indent_levels.last().cloned()) { (_, Some(offside)) => offside, (&Token::EOF, None) => return Ok(token), (_, None) => { lexer.indent_levels .push(Offside { context: Context::Block { emit_semi: false }, location: token.span.start, })?; debug!("Default block {:?}", token); return Ok(lexer.layout_token(token, Token::OpenBlock)); } }; debug!("--------\n{:?}\n{:?}", token, offside); let ordering = token.span.start.column.cmp(&offside.location.column); // If it is closing token we remove contexts until a context for that token is found if [Token::In, Token::CloseBlock, Token::Else, Token::Close(Delimiter::Brace), Token::Close(Delimiter::Bracket), Token::Close(Delimiter::Paren), Token::Comma] .iter() .any(|t| *t == token.value) { if token.value == Token::Comma && (offside.context == Context::Delimiter(Delimiter::Brace) || offside.context == Context::Delimiter(Delimiter::Bracket)) { return Ok(token); } lexer.indent_levels.pop(); match (&token.value, offside.context) { (&Token::Else, Context::If) => (), (&Token::Close(close_delim), Context::Delimiter(context_delim)) if close_delim == context_delim => return Ok(token), (&Token::CloseBlock, Context::Block { .. }) => { if let Some(offside) = lexer.indent_levels.last_mut() { // The enclosing block should not emit a block separator for the next // expression if let Context::Block { ref mut emit_semi, .. } = offside.context { *emit_semi = false; } } return Ok(token); } (&Token::In, Context::Let) | (&Token::In, Context::Type) => { let location = { let offside = lexer.indent_levels.last_mut().expect("No top level block found"); // The enclosing block should not emit a block separator for the next // expression if let Context::Block { ref mut emit_semi, .. } = offside.context { *emit_semi = false; } offside.location }; // Inject a block to ensure that a sequence of expressions end up in the `let` body // ``` // let x = 1 // a // b // ``` // `let x = 1 in {{ a; b }}` and not `{{ (let x = 1 in a) ; b }}` lexer.indent_levels .push(Offside { location: location, context: Context::Block { emit_semi: false }, })?; lexer.unprocessed_tokens.push(SpannedToken { span: token.span, value: Token::OpenBlock, }); return Ok(token); } (_, Context::Block { .. }) => { return Ok(lexer.layout_token(token, Token::CloseBlock)); } (_, _) => continue, } } // Next we check offside rules for each of the contexts match (offside.context, ordering) { (Context::Block { .. }, Ordering::Less) => { lexer.unprocessed_tokens.push(token.clone()); token.value = Token::CloseBlock; continue; } (Context::Block { emit_semi }, Ordering::Equal) => { match token.value { _ if emit_semi => { if let Some(offside) = lexer.indent_levels.last_mut() { // The enclosing block should not emit a block separator for the // next expression if let Context::Block { ref mut emit_semi, .. } = offside.context { *emit_semi = false; } } return Ok(lexer.layout_token(token, Token::Semi)); } Token::DocComment(_) | Token::OpenBlock => (), _ => { // If it is the first token in a sequence we dont want to emit a // separator if let Some(offside) = lexer.indent_levels.last_mut() { if let Context::Block { ref mut emit_semi, .. } = offside.context { *emit_semi = true; } } } } } (Context::Expr, _) | (Context::Lambda, _) => { if ordering != Ordering::Greater { lexer.indent_levels.pop(); continue; } } (Context::MatchClause, _) => { // Must allow `|` to be on the same line if ordering == Ordering::Less || (ordering == Ordering::Equal && token.value != Token::Pipe) { lexer.indent_levels.pop(); continue; } } // `and` and `}` are allowed to be on the same line as the `let` or `type` (Context::Let, Ordering::Equal) | (Context::Type, Ordering::Equal) if token.value != Token::And && token.value != Token::Close(Delimiter::Brace) => { // Insert an `in` token lexer.indent_levels.pop(); let location = { let offside = lexer.indent_levels.last_mut().expect("No top level block found"); // The enclosing block should not emit a block separator for the next // expression if let Context::Block { ref mut emit_semi, .. } = offside.context { *emit_semi = false; } offside.location }; let span = token.span; let result = Ok(lexer.layout_token(token, Token::In)); // Inject a block to ensure that a sequence of expressions end up in the `let` body // ``` // let x = 1 // a // b // ``` // `let x = 1 in {{ a; b }}` and not `{{ (let x = 1 in a) ; b }}` lexer.indent_levels .push(Offside { location: location, context: Context::Block { emit_semi: false }, })?; lexer.unprocessed_tokens.push(SpannedToken { span: span, value: Token::OpenBlock, }); return result; } _ => (), } // Some tokens directly inserts a new context when emitted let push_context = match token.value { Token::Let => Some(Context::Let), Token::If => Some(Context::If), Token::Type => Some(Context::Type), Token::Match => Some(Context::Expr), Token::Lambda => Some(Context::Lambda), Token::Open(delim) => Some(Context::Delimiter(delim)), _ => None, }; if let Some(context) = push_context { let offside = Offside { location: token.span.start, context: context, }; return lexer.indent_levels.push(offside).map(move |()| token); } // For other tokens we need to scan for the next token to get its position match (&token.value, offside.context) { (&Token::In, context) => { lexer.indent_levels.pop(); if let Context::Block { .. } = context { return Ok(lexer.layout_token(token, Token::CloseBlock)); } } (&Token::Equals, Context::Let) | (&Token::RightArrow, Context::Lambda) | (&Token::RightArrow, Context::MatchClause) | (&Token::Then, _) => scan_for_next_block(lexer, Context::Block { emit_semi: false })?, (&Token::With, _) => scan_for_next_block(lexer, Context::MatchClause)?, (&Token::Else, _) => { let next = lexer.next_token(); // Need to allow "else if" expressions so avoid inserting a block for those cases // (A block would be inserted at column 5 and we would then get unindentation // errors on the branches) // if x then // 1 // else if y then // 2 // else // 3 let add_block = next.value != Token::If || next.span.start.line != token.span.start.line; lexer.unprocessed_tokens.push(next); if add_block { scan_for_next_block(lexer, Context::Block { emit_semi: false })?; } } (&Token::Comma, _) => { // Prevent a semi to be emitted before the next token if let Some(offside) = lexer.indent_levels.last_mut() { // The enclosing block should not emit a block separator for the next // expression if let Context::Block { ref mut emit_semi, .. } = offside.context { *emit_semi = false; } } } (_, _) => (), } return Ok(token); } } fn scan_for_next_block<'input, 'a, I>(lexer: &mut Lexer<'input, I>, context: Context) -> Result<(), Error<'input>> where I: RangeStream<Item = char, Range = &'input str> + 'input, I::Range: fmt::Debug + 'input, { let next = lexer.next_token(); let span = next.span; lexer.unprocessed_tokens.push(next); if let Context::Block { .. } = context { lexer.unprocessed_tokens.push(SpannedToken { span: span, value: Token::OpenBlock, }); } lexer.indent_levels.push(Offside { location: span.start, context: context, }) } // Converts an error into a static error by transforming any range arguments into strings fn static_error<'input>(e: CombineError<Token<&'input str>, Token<&'input str>>) -> CombineError<String, String> { let static_info = |i: Info<Token<&'input str>, Token<&'input str>>| { match i { Info::Token(t) => Info::Token(t.to_string()), Info::Range(t) => Info::Range(t.to_string()), Info::Borrowed(t) => Info::Borrowed(t), Info::Owned(t) => Info::Owned(t), } }; match e { CombineError::Unexpected(t) => CombineError::Unexpected(static_info(t)), CombineError::Expected(t) => CombineError::Expected(static_info(t)), CombineError::Message(t) => CombineError::Message(static_info(t)), CombineError::Other(t) => CombineError::Other(t), } } // Adapt lexer for use with LALRPOP impl<'input, I> Iterator for Lexer<'input, I> where I: RangeStream<Item = char, Range = &'input str> + 'input, I::Range: fmt::Debug, { type Item = Result<(BytePos, Token<&'input str>, BytePos), CombineError<String, String>>; fn next (&mut self) -> Option<Result<(BytePos, Token<&'input str>, BytePos), CombineError<String, String>>> { match self.uncons_next() { Ok(Spanned { value: Token::EOF, .. }) => None, Err(ref err) if *err == Error::end_of_input() => None, Ok(Spanned { span: Span { start, end, .. }, value }) => { Some(Ok((start.absolute, value, end.absolute))) } Err(error) => Some(Err(static_error(error))), } } } Collapse some methods in the lexer use std::cmp::Ordering; use std::fmt; use base::ast::is_operator_char; use base::pos::{BytePos, Column, Line, Location, Span, Spanned, NO_EXPANSION}; use combine::primitives::{Consumed, Error as CombineError, Info, RangeStream}; use combine::combinator::EnvParser; use combine::range::{take, take_while}; use combine::*; use combine::char::{alpha_num, char, letter, spaces, string}; use combine_language::{LanguageEnv, LanguageDef, Identifier}; #[derive(Clone)] pub struct LocatedStream<I> { location: Location, input: I, } impl<I> StreamOnce for LocatedStream<I> where I: StreamOnce<Item = char>, { type Item = I::Item; type Range = I::Range; type Position = Location; fn uncons(&mut self) -> Result<Self::Item, CombineError<Self::Item, Self::Range>> { self.input .uncons() .map(|ch| { self.location.bump(ch); // HACK: The layout algorithm expects `1` indexing for columns - // this could be altered in the future though if self.location.column == Column::from(0) { self.location.column = Column::from(1); } ch }) } fn position(&self) -> Self::Position { self.location } } impl<'input, I> RangeStream for LocatedStream<I> where I: RangeStream<Item = char, Range = &'input str>, { fn uncons_range(&mut self, len: usize) -> Result<Self::Range, CombineError<Self::Item, Self::Range>> { self.input .uncons_range(len) .map(|range| { for ch in range.chars() { self.location.bump(ch) } range }) } fn uncons_while<F>(&mut self, mut predicate: F) -> Result<Self::Range, CombineError<Self::Item, Self::Range>> where F: FnMut(Self::Item) -> bool, { let location = &mut self.location; self.input.uncons_while(|t| { if predicate(t.clone()) { location.bump(t); true } else { false } }) } } #[derive(Copy, Clone, PartialEq, Debug)] pub enum Delimiter { Brace, Bracket, Paren, } impl Delimiter { fn as_str(&self) -> &'static str { use self::Delimiter::*; match *self { Brace => "Brace", Bracket => "Bracket", Paren => "Paren", } } } impl fmt::Display for Delimiter { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { self.as_str().fmt(f) } } pub type Error<'input> = CombineError<Token<&'input str>, Token<&'input str>>; pub type StrToken<'input> = Token<&'input str>; #[derive(Clone, PartialEq, Debug)] pub enum Token<I> { Identifier(I), Operator(I), String(String), Char(char), Int(i64), Byte(u8), Float(f64), DocComment(String), Let, And, In, Type, Match, With, If, Then, Else, Open(Delimiter), Close(Delimiter), Lambda, RightArrow, Colon, Dot, Comma, Pipe, Equals, OpenBlock, CloseBlock, Semi, EOF, } impl<I> fmt::Display for Token<I> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { use self::Token::*; use self::Delimiter::*; let s = match *self { Identifier(_) => "Identifier", Operator(_) => "Operator", String(_) => "String", Char(_) => "Char", Int(_) => "Int", Byte(_) => "Byte", Float(_) => "Float", DocComment(_) => "DocComment", Let => "Let", And => "And", In => "In", Type => "Type", Match => "Match", With => "With", If => "If", Then => "Then", Else => "Else", Open(Brace) => "OpenBrace", Close(Brace) => "CloseBrace", Open(Paren) => "OpenParen", Close(Paren) => "CloseParen", Open(Bracket) => "OpenBracket", Close(Bracket) => "CloseBracket", Lambda => "Lambda", RightArrow => "RightArrow", Colon => "Colon", Dot => "Dot", Comma => "Comma", Pipe => "Pipe", Equals => "Equal", OpenBlock => "OpenBlock", CloseBlock => "CloseBlock", Semi => "Semi", EOF => "EOF", }; s.fmt(f) } } pub type SpannedToken<'input> = Spanned<Token<&'input str>, Location>; #[derive(Copy, Clone, Debug)] struct Offside { location: Location, context: Context, } #[derive(Copy, Clone, Debug, PartialEq)] enum Context { /// Context which contains several expressions/declarations separated by semicolons Block { emit_semi: bool }, /// A simple expression Expr, Let, Type, If, Delimiter(Delimiter), MatchClause, Lambda, } /// Parser passes the environment to each parser function type LanguageParser<'input: 'lexer, 'lexer, I: 'lexer, T> = EnvParser<&'lexer Lexer<'input, I>, LocatedStream<I>, T>; struct Contexts { stack: Vec<Offside>, } impl Contexts { fn last(&self) -> Option<&Offside> { self.stack.last() } fn last_mut(&mut self) -> Option<&mut Offside> { self.stack.last_mut() } fn pop(&mut self) -> Option<Offside> { self.stack.pop() } fn push(&mut self, offside: Offside) -> Result<(), Error<'static>> { self.check_unindentation_limit(&offside)?; self.stack.push(offside); Ok(()) } fn check_unindentation_limit(&mut self, offside: &Offside) -> Result<(), Error<'static>> { let mut skip_block = false; for other_offside in self.stack.iter().rev() { match other_offside.context { Context::Lambda => { skip_block = true; continue; } Context::Delimiter(_) => return Ok(()), Context::Block { .. } if skip_block => continue, // New context should not be unindented past the closest enclosing block context Context::MatchClause | Context::Type | Context::Let | Context::Block { .. } if offside.location.column < other_offside.location.column => (), _ => continue, } debug!("Unindentation error: {:?} < {:?}", offside, other_offside); return Err(CombineError::Message("Line was unindented to far".into())); } Ok(()) } } pub struct Lexer<'input, I> where I: RangeStream<Item = char, Range = &'input str>, { env: LanguageEnv<'input, LocatedStream<I>>, input: Option<LocatedStream<I>>, unprocessed_tokens: Vec<SpannedToken<'input>>, indent_levels: Contexts, } impl<'input, I> Lexer<'input, I> where I: RangeStream<Item = char, Range = &'input str> + 'input, I::Range: fmt::Debug + 'input, { pub fn new(input: I) -> Lexer<'input, I> { let env = LanguageEnv::new(LanguageDef { ident: Identifier { start: letter().or(char('_')), rest: alpha_num() .or(char('_')) .or(char('\'')), // ["if", "then", "else", "let", "and", "in", "type", "case", "of"] // .iter() // .map(|x| (*x).into()) // .collect(), reserved: Vec::new(), }, op: Identifier { start: satisfy(is_operator_char), rest: satisfy(is_operator_char), reserved: Vec::new(), }, comment_start: satisfy(|_| false).map(|_| ()), comment_end: satisfy(|_| false).map(|_| ()), comment_line: satisfy(|_| false).map(|_| ()), }); Lexer { env: env, input: Some(LocatedStream { location: Location { line: Line::from(0), column: Column::from(1), absolute: BytePos::from(0), }, input: input, }), unprocessed_tokens: Vec::new(), indent_levels: Contexts { stack: Vec::new() }, } } fn parser<'a, T>(&'a self, parser: fn(&Lexer<'input, I>, LocatedStream<I>) -> ParseResult<T, LocatedStream<I>>) -> LanguageParser<'input, 'a, I, T> { env_parser(self, parser) } /// Parses an operator fn op<'a>(&'a self) -> LanguageParser<'input, 'a, I, &'input str> { self.parser(Lexer::parse_op) } fn parse_op(&self, input: LocatedStream<I>) -> ParseResult<&'input str, LocatedStream<I>> { let initial = input.clone(); let ((builtin, op), _) = (optional((char('#'), take_while(char::is_alphabetic))), try(self.env.op_())).parse_stream(input)?; let len = builtin.map_or(0, |(c, typ)| c.len_utf8() + typ.len()) + op.len(); take(len).parse_stream(initial) } fn ident<'a>(&'a self) -> LanguageParser<'input, 'a, I, Token<&'input str>> { self.parser(Lexer::parse_ident) } fn parse_ident(&self, input: LocatedStream<I>) -> ParseResult<Token<&'input str>, LocatedStream<I>> { self.env .range_identifier_() .map(Token::Identifier) .parse_stream(input) } fn id_to_keyword(&self, id: Token<&'input str>) -> Token<&'input str> { match id { Token::Identifier("let") => Token::Let, Token::Identifier("type") => Token::Type, Token::Identifier("and") => Token::And, Token::Identifier("in") => Token::In, Token::Identifier("match") => Token::Match, Token::Identifier("with") => Token::With, Token::Identifier("if") => Token::If, Token::Identifier("then") => Token::Then, Token::Identifier("else") => Token::Else, id => id, } } fn next_token(&mut self) -> SpannedToken<'input> { if let Some(token) = self.unprocessed_tokens.pop() { return token; } let input = match self.input.take() { Some(input) => input, None => { let loc = Location { line: Line::from(0), column: Column::from(1), absolute: BytePos::from(0), }; return SpannedToken { span: Span { start: loc, end: loc, expansion_id: NO_EXPANSION, }, value: Token::EOF, }; } }; let mut start = input.position(); let result = self.next_token_(&mut start, input); match result { Ok((token, input)) => { let input = input.into_inner(); let end = input.position(); let input = match self.env.white_space().parse_stream(input.clone()) { Ok(((), input)) => input.into_inner(), Err(_) => input, }; self.input = Some(input); SpannedToken { span: Span { start: start, end: end, expansion_id: NO_EXPANSION, }, value: token, } } Err(err) => { let err = err.into_inner(); debug!("Error tokenizing: {:?}", err); let span = Span { start: start, end: start, expansion_id: NO_EXPANSION, }; SpannedToken { span: span, value: Token::CloseBlock, } } } } fn next_token_(&mut self, location: &mut Location, mut input: LocatedStream<I>) -> ParseResult<Token<&'input str>, LocatedStream<I>> { loop { // Skip all whitespace before the token let parsed_spaces: Result<_, _> = spaces().parse_lazy(input).into(); let (_, new_input) = parsed_spaces?; input = new_input.into_inner(); *location = input.position(); let (first, one_char_consumed) = any().parse_stream(input.clone())?; // Decide how to tokenize depending on what the first char is // ie if its an operator then more operators will follow if is_operator_char(first) || first == '#' { let (op, new_input) = self.op().parse_stream(input)?; input = new_input.into_inner(); let tok = match op { "=" => Token::Equals, "->" => Token::RightArrow, "|" => Token::Pipe, _ => { if op.starts_with("///") { let mut comment = String::new(); let ((), new_input) = spaces().parse_stream(input)?; input = new_input.into_inner(); // Merge consecutive line comments loop { let mut line = satisfy(|c| c != '\n' && c != '\r').iter(input); comment.extend(line.by_ref()); comment.push('\n'); let ((), new_input) = line.into_result(())?; input = new_input.into_inner(); let mut p = spaces().with(try(string("///"))).skip(spaces()); match p.parse_stream(input.clone()) { Ok((_, new_input)) => input = new_input.into_inner(), Err(_) => break, } } comment.pop(); return Ok((Token::DocComment(comment), Consumed::Consumed(input))); } else if op.starts_with("/**") { return self.block_doc_comment(input); } else if op.starts_with("//") { let result: Result<_, _> = skip_many(satisfy(|c| c != '\n' && c != '\r')) .parse_lazy(input) .into(); let ((), new_input) = result?; input = new_input.into_inner(); continue; } else if op.starts_with("/*") { // Skip over normal comments and try to parse a new token let ((), new_input) = self.skip_block_comment(input)?; input = new_input.into_inner(); continue; } else { Token::Operator(op) } } }; return Ok((tok, Consumed::Consumed(input))); } else if first.is_digit(10) { let int_or_byte = (self.env.integer_(), optional(char('b'))); return try(int_or_byte.skip(not_followed_by(string(".")))) .and_then(|(i, byte)| { if byte.is_none() { Ok(Token::Int(i)) } else { if i >= 0 && i <= 256 { Ok(Token::Byte(i as u8)) } else { Err(CombineError::Message("Byte literal out of range".into())) } } }) .or(self.env.float_().map(Token::Float)) .parse_stream(input); } else if first.is_alphabetic() || first == '_' { return self.ident().map(|t| self.id_to_keyword(t)).parse_stream(input); } let tok = match first { '(' => { match self.ident().map(|t| self.id_to_keyword(t)).parse_stream(input) { Ok(x) => return Ok(x), Err(_) => Token::Open(Delimiter::Paren), } } ')' => Token::Close(Delimiter::Paren), '{' => Token::Open(Delimiter::Brace), '}' => Token::Close(Delimiter::Brace), '[' => Token::Open(Delimiter::Bracket), ']' => Token::Close(Delimiter::Bracket), ':' => Token::Colon, ',' => Token::Comma, '.' => Token::Dot, '\\' => Token::Lambda, '"' => return self.env.string_literal_().map(Token::String).parse_stream(input), '\'' => return self.env.char_literal_().map(Token::Char).parse_stream(input), _ => Token::EOF, }; return Ok((tok, one_char_consumed)); } } fn skip_block_comment(&self, input: LocatedStream<I>) -> ParseResult<(), LocatedStream<I>> { let mut block_doc_comment = parser(|input| { let mut input = Consumed::Empty(input); loop { match input.clone() .combine(|input| try(string("*/")).parse_lazy(input).into()) { Ok((_, input)) => return Ok(((), input)), Err(_) => { match input.combine(|input| any().parse_stream(input)) { Ok((_, rest)) => { input = rest; } Err(err) => return Err(err), } } } } }); block_doc_comment.parse_stream(input) } fn block_doc_comment(&self, input: LocatedStream<I>) -> ParseResult<Token<&'input str>, LocatedStream<I>> { let mut block_doc_comment = parser(|input| { let ((), mut input) = spaces().parse_stream(input)?; let mut out = String::new(); loop { match input.clone() .combine(|input| try(string("*/")).parse_lazy(input).into()) { Ok((_, input)) => return Ok((Token::DocComment(out), input)), Err(_) => { match input.combine(|input| any().parse_stream(input)) { Ok((c, rest)) => { out.push(c); input = rest } Err(err) => return Err(err), } } } } }); block_doc_comment.parse_stream(input) } fn layout_token(&mut self, token: SpannedToken<'input>, layout_token: Token<&'input str>) -> SpannedToken<'input> { let span = token.span; self.unprocessed_tokens.push(token); SpannedToken { span: span, value: layout_token, } } } fn layout<'input, I>(lexer: &mut Lexer<'input, I>, mut token: SpannedToken<'input>) -> Result<SpannedToken<'input>, Error<'input>> where I: RangeStream<Item = char, Range = &'input str> + 'input, I::Range: fmt::Debug, { if token.value == Token::EOF { token.span.start.column = Column::from(0); } loop { // Retrieve the current indentation level if one exists let offside = match (&token.value, lexer.indent_levels.last().cloned()) { (_, Some(offside)) => offside, (&Token::EOF, None) => return Ok(token), (_, None) => { lexer.indent_levels .push(Offside { context: Context::Block { emit_semi: false }, location: token.span.start, })?; debug!("Default block {:?}", token); return Ok(lexer.layout_token(token, Token::OpenBlock)); } }; debug!("--------\n{:?}\n{:?}", token, offside); let ordering = token.span.start.column.cmp(&offside.location.column); // If it is closing token we remove contexts until a context for that token is found if [Token::In, Token::CloseBlock, Token::Else, Token::Close(Delimiter::Brace), Token::Close(Delimiter::Bracket), Token::Close(Delimiter::Paren), Token::Comma] .iter() .any(|t| *t == token.value) { if token.value == Token::Comma && (offside.context == Context::Delimiter(Delimiter::Brace) || offside.context == Context::Delimiter(Delimiter::Bracket)) { return Ok(token); } lexer.indent_levels.pop(); match (&token.value, offside.context) { (&Token::Else, Context::If) => (), (&Token::Close(close_delim), Context::Delimiter(context_delim)) if close_delim == context_delim => return Ok(token), (&Token::CloseBlock, Context::Block { .. }) => { if let Some(offside) = lexer.indent_levels.last_mut() { // The enclosing block should not emit a block separator for the next // expression if let Context::Block { ref mut emit_semi, .. } = offside.context { *emit_semi = false; } } return Ok(token); } (&Token::In, Context::Let) | (&Token::In, Context::Type) => { let location = { let offside = lexer.indent_levels.last_mut().expect("No top level block found"); // The enclosing block should not emit a block separator for the next // expression if let Context::Block { ref mut emit_semi, .. } = offside.context { *emit_semi = false; } offside.location }; // Inject a block to ensure that a sequence of expressions end up in the `let` body // ``` // let x = 1 // a // b // ``` // `let x = 1 in {{ a; b }}` and not `{{ (let x = 1 in a) ; b }}` lexer.indent_levels .push(Offside { location: location, context: Context::Block { emit_semi: false }, })?; lexer.unprocessed_tokens.push(SpannedToken { span: token.span, value: Token::OpenBlock, }); return Ok(token); } (_, Context::Block { .. }) => { return Ok(lexer.layout_token(token, Token::CloseBlock)); } (_, _) => continue, } } // Next we check offside rules for each of the contexts match (offside.context, ordering) { (Context::Block { .. }, Ordering::Less) => { lexer.unprocessed_tokens.push(token.clone()); token.value = Token::CloseBlock; continue; } (Context::Block { emit_semi }, Ordering::Equal) => { match token.value { _ if emit_semi => { if let Some(offside) = lexer.indent_levels.last_mut() { // The enclosing block should not emit a block separator for the // next expression if let Context::Block { ref mut emit_semi, .. } = offside.context { *emit_semi = false; } } return Ok(lexer.layout_token(token, Token::Semi)); } Token::DocComment(_) | Token::OpenBlock => (), _ => { // If it is the first token in a sequence we dont want to emit a // separator if let Some(offside) = lexer.indent_levels.last_mut() { if let Context::Block { ref mut emit_semi, .. } = offside.context { *emit_semi = true; } } } } } (Context::Expr, _) | (Context::Lambda, _) => { if ordering != Ordering::Greater { lexer.indent_levels.pop(); continue; } } (Context::MatchClause, _) => { // Must allow `|` to be on the same line if ordering == Ordering::Less || (ordering == Ordering::Equal && token.value != Token::Pipe) { lexer.indent_levels.pop(); continue; } } // `and` and `}` are allowed to be on the same line as the `let` or `type` (Context::Let, Ordering::Equal) | (Context::Type, Ordering::Equal) if token.value != Token::And && token.value != Token::Close(Delimiter::Brace) => { // Insert an `in` token lexer.indent_levels.pop(); let location = { let offside = lexer.indent_levels.last_mut().expect("No top level block found"); // The enclosing block should not emit a block separator for the next // expression if let Context::Block { ref mut emit_semi, .. } = offside.context { *emit_semi = false; } offside.location }; let span = token.span; let result = Ok(lexer.layout_token(token, Token::In)); // Inject a block to ensure that a sequence of expressions end up in the `let` body // ``` // let x = 1 // a // b // ``` // `let x = 1 in {{ a; b }}` and not `{{ (let x = 1 in a) ; b }}` lexer.indent_levels .push(Offside { location: location, context: Context::Block { emit_semi: false }, })?; lexer.unprocessed_tokens.push(SpannedToken { span: span, value: Token::OpenBlock, }); return result; } _ => (), } // Some tokens directly inserts a new context when emitted let push_context = match token.value { Token::Let => Some(Context::Let), Token::If => Some(Context::If), Token::Type => Some(Context::Type), Token::Match => Some(Context::Expr), Token::Lambda => Some(Context::Lambda), Token::Open(delim) => Some(Context::Delimiter(delim)), _ => None, }; if let Some(context) = push_context { let offside = Offside { location: token.span.start, context: context, }; return lexer.indent_levels.push(offside).map(move |()| token); } // For other tokens we need to scan for the next token to get its position match (&token.value, offside.context) { (&Token::In, context) => { lexer.indent_levels.pop(); if let Context::Block { .. } = context { return Ok(lexer.layout_token(token, Token::CloseBlock)); } } (&Token::Equals, Context::Let) | (&Token::RightArrow, Context::Lambda) | (&Token::RightArrow, Context::MatchClause) | (&Token::Then, _) => scan_for_next_block(lexer, Context::Block { emit_semi: false })?, (&Token::With, _) => scan_for_next_block(lexer, Context::MatchClause)?, (&Token::Else, _) => { let next = lexer.next_token(); // Need to allow "else if" expressions so avoid inserting a block for those cases // (A block would be inserted at column 5 and we would then get unindentation // errors on the branches) // if x then // 1 // else if y then // 2 // else // 3 let add_block = next.value != Token::If || next.span.start.line != token.span.start.line; lexer.unprocessed_tokens.push(next); if add_block { scan_for_next_block(lexer, Context::Block { emit_semi: false })?; } } (&Token::Comma, _) => { // Prevent a semi to be emitted before the next token if let Some(offside) = lexer.indent_levels.last_mut() { // The enclosing block should not emit a block separator for the next // expression if let Context::Block { ref mut emit_semi, .. } = offside.context { *emit_semi = false; } } } (_, _) => (), } return Ok(token); } } fn scan_for_next_block<'input, 'a, I>(lexer: &mut Lexer<'input, I>, context: Context) -> Result<(), Error<'input>> where I: RangeStream<Item = char, Range = &'input str> + 'input, I::Range: fmt::Debug + 'input, { let next = lexer.next_token(); let span = next.span; lexer.unprocessed_tokens.push(next); if let Context::Block { .. } = context { lexer.unprocessed_tokens.push(SpannedToken { span: span, value: Token::OpenBlock, }); } lexer.indent_levels.push(Offside { location: span.start, context: context, }) } // Converts an error into a static error by transforming any range arguments into strings fn static_error<'input>(e: CombineError<Token<&'input str>, Token<&'input str>>) -> CombineError<String, String> { let static_info = |i: Info<Token<&'input str>, Token<&'input str>>| { match i { Info::Token(t) => Info::Token(t.to_string()), Info::Range(t) => Info::Range(t.to_string()), Info::Borrowed(t) => Info::Borrowed(t), Info::Owned(t) => Info::Owned(t), } }; match e { CombineError::Unexpected(t) => CombineError::Unexpected(static_info(t)), CombineError::Expected(t) => CombineError::Expected(static_info(t)), CombineError::Message(t) => CombineError::Message(static_info(t)), CombineError::Other(t) => CombineError::Other(t), } } // Adapt lexer for use with LALRPOP impl<'input, I> Iterator for Lexer<'input, I> where I: RangeStream<Item = char, Range = &'input str> + 'input, I::Range: fmt::Debug, { type Item = Result<(BytePos, Token<&'input str>, BytePos), CombineError<String, String>>; fn next (&mut self) -> Option<Result<(BytePos, Token<&'input str>, BytePos), CombineError<String, String>>> { let token = self.next_token(); match layout(self, token) { Err(error) => Some(Err(static_error(error))), Ok(SpannedToken { value: Token::EOF, .. }) => None, Ok(token) => { debug!("Lex {:?}", token.value); let Span { start, end, .. } = token.span; Some(Ok((start.absolute, token.value, end.absolute))) } } } }
use std::rc::Rc; use std::cell::RefCell; use std::collections::HashSet; use std::convert::TryFrom; use std::sync::{Arc, atomic::AtomicBool}; use std::path::PathBuf; use std::os::unix::io::{AsRawFd, RawFd}; use calloop::{generic::Generic, InsertError, LoopHandle, Source}; use drm::{Device as BasicDevice, ClientCapability, DriverCapability}; use drm::control::{ResourceHandles, PlaneResourceHandles, Device as ControlDevice, Event, Mode, PlaneType, crtc, plane, connector, property}; use nix::libc::dev_t; use nix::sys::stat::fstat; pub(super) mod atomic; pub(super) mod legacy; use atomic::AtomicDrmDevice; use legacy::LegacyDrmDevice; use super::surface::{DrmSurface, DrmSurfaceInternal, atomic::AtomicDrmSurface, legacy::LegacyDrmSurface}; use super::error::Error; use crate::backend::allocator::{Fourcc, Format, Modifier}; pub struct DrmDevice<A: AsRawFd + 'static> { pub(super) dev_id: dev_t, pub(crate) internal: Arc<DrmDeviceInternal<A>>, handler: Rc<RefCell<Option<Box<dyn DeviceHandler>>>>, #[cfg(feature = "backend_session")] pub(super) links: RefCell<Vec<crate::signaling::SignalToken>>, has_universal_planes: bool, resources: ResourceHandles, planes: PlaneResourceHandles, pub(super) logger: ::slog::Logger, } impl<A: AsRawFd + 'static> AsRawFd for DrmDevice<A> { fn as_raw_fd(&self) -> RawFd { match &*self.internal { DrmDeviceInternal::Atomic(dev) => dev.fd.as_raw_fd(), DrmDeviceInternal::Legacy(dev) => dev.fd.as_raw_fd(), } } } impl<A: AsRawFd + 'static> BasicDevice for DrmDevice<A> {} impl<A: AsRawFd + 'static> ControlDevice for DrmDevice<A> {} pub struct FdWrapper<A: AsRawFd + 'static> { fd: A, pub(super) privileged: bool, logger: ::slog::Logger, } impl<A: AsRawFd + 'static> AsRawFd for FdWrapper<A> { fn as_raw_fd(&self) -> RawFd { self.fd.as_raw_fd() } } impl<A: AsRawFd + 'static> BasicDevice for FdWrapper<A> {} impl<A: AsRawFd + 'static> ControlDevice for FdWrapper<A> {} impl<A: AsRawFd + 'static> Drop for FdWrapper<A> { fn drop(&mut self) { info!(self.logger, "Dropping device: {:?}", self.dev_path()); if self.privileged { if let Err(err) = self.release_master_lock() { error!(self.logger, "Failed to drop drm master state. Error: {}", err); } } } } pub enum DrmDeviceInternal<A: AsRawFd + 'static> { Atomic(AtomicDrmDevice<A>), Legacy(LegacyDrmDevice<A>), } impl<A: AsRawFd + 'static> AsRawFd for DrmDeviceInternal<A> { fn as_raw_fd(&self) -> RawFd { match self { DrmDeviceInternal::Atomic(dev) => dev.fd.as_raw_fd(), DrmDeviceInternal::Legacy(dev) => dev.fd.as_raw_fd(), } } } impl<A: AsRawFd + 'static> BasicDevice for DrmDeviceInternal<A> {} impl<A: AsRawFd + 'static> ControlDevice for DrmDeviceInternal<A> {} impl<A: AsRawFd + 'static> DrmDevice<A> { pub fn new<L>(fd: A, disable_connectors: bool, logger: L) -> Result<Self, Error> where A: AsRawFd + Clone + 'static, L: Into<Option<::slog::Logger>>, { let log = crate::slog_or_fallback(logger).new(o!("smithay_module" => "backend_drm")); info!(log, "DrmDevice initializing"); let dev_id = fstat(fd.as_raw_fd()) .map_err(Error::UnableToGetDeviceId)? .st_rdev; let active = Arc::new(AtomicBool::new(true)); let dev = Arc::new({ let mut dev = FdWrapper { fd: fd.clone(), privileged: false, logger: log.clone(), }; // We want to modeset, so we better be the master, if we run via a tty session. // This is only needed on older kernels. Newer kernels grant this permission, // if no other process is already the *master*. So we skip over this error. if dev.acquire_master_lock().is_err() { warn!(log, "Unable to become drm master, assuming unprivileged mode"); } else { dev.privileged = true; } dev }); let has_universal_planes = dev.set_client_capability(ClientCapability::UniversalPlanes, true).is_ok(); let resources = dev.resource_handles().map_err(|source| Error::Access { errmsg: "Error loading resource handles", dev: dev.dev_path(), source, })?; let planes = dev.plane_handles().map_err(|source| Error::Access { errmsg: "Error loading plane handles", dev: dev.dev_path(), source, })?; let internal = Arc::new(DrmDevice::create_internal(dev, active, disable_connectors, log.clone())?); Ok(DrmDevice { dev_id, internal, handler: Rc::new(RefCell::new(None)), #[cfg(feature = "backend_session")] links: RefCell::new(Vec::new()), has_universal_planes, resources, planes, logger: log, }) } fn create_internal(dev: Arc<FdWrapper<A>>, active: Arc<AtomicBool>, disable_connectors: bool, log: ::slog::Logger) -> Result<DrmDeviceInternal<A>, Error> { let force_legacy = std::env::var("SMITHAY_USE_LEGACY") .map(|x| { x == "1" || x.to_lowercase() == "true" || x.to_lowercase() == "yes" || x.to_lowercase() == "y" }) .unwrap_or(false); if force_legacy { info!(log, "SMITHAY_USE_LEGACY is set. Forcing LegacyDrmDevice."); }; Ok(if dev.set_client_capability(ClientCapability::Atomic, true).is_ok() && !force_legacy { DrmDeviceInternal::Atomic(AtomicDrmDevice::new(dev, active, disable_connectors, log)?) } else { info!(log, "Falling back to LegacyDrmDevice"); DrmDeviceInternal::Legacy(LegacyDrmDevice::new(dev, active, disable_connectors, log)?) }) } pub fn process_events(&mut self) { match self.receive_events() { Ok(events) => { for event in events { if let Event::PageFlip(event) = event { trace!(self.logger, "Got a page-flip event for crtc ({:?})", event.crtc); if let Some(handler) = self.handler.borrow_mut().as_mut() { handler.vblank(event.crtc); } } else { trace!( self.logger, "Got a non-page-flip event of device '{:?}'.", self.dev_path() ); } } } Err(source) => { if let Some(handler) = self.handler.borrow_mut().as_mut() { handler.error(Error::Access { errmsg: "Error processing drm events", dev: self.dev_path(), source, }); } } } } pub fn is_atomic(&self) -> bool { match *self.internal { DrmDeviceInternal::Atomic(_) => true, DrmDeviceInternal::Legacy(_) => false, } } pub fn set_handler(&mut self, handler: impl DeviceHandler + 'static) { let handler = Some(Box::new(handler) as Box<dyn DeviceHandler + 'static>); *self.handler.borrow_mut() = handler; } pub fn clear_handler(&mut self) { self.handler.borrow_mut().take(); } pub fn crtcs(&self) -> &[crtc::Handle] { self.resources.crtcs() } pub fn planes(&self, crtc: &crtc::Handle) -> Result<Planes, Error> { let mut primary = None; let mut cursor = None; let mut overlay = Vec::new(); for plane in self.planes.planes() { let info = self.get_plane(*plane).map_err(|source| Error::Access { errmsg: "Failed to get plane information", dev: self.dev_path(), source, })?; let filter = info.possible_crtcs(); if self.resources.filter_crtcs(filter).contains(crtc) { match self.plane_type(*plane)? { PlaneType::Primary => { primary = Some(*plane); }, PlaneType::Cursor => { cursor = Some(*plane); }, PlaneType::Overlay => { overlay.push(*plane); }, }; } } Ok(Planes { primary: primary.expect("Crtc has no primary plane"), cursor, overlay: if self.has_universal_planes { Some(overlay) } else { None }, }) } fn plane_type(&self, plane: plane::Handle) -> Result<PlaneType, Error> { let props = self.get_properties(plane).map_err(|source| Error::Access { errmsg: "Failed to get properties of plane", dev: self.dev_path(), source, })?; let (ids, vals) = props.as_props_and_values(); for (&id, &val) in ids.iter().zip(vals.iter()) { let info = self.get_property(id).map_err(|source| Error::Access { errmsg: "Failed to get property info", dev: self.dev_path(), source, })?; if info.name().to_str().map(|x| x == "type").unwrap_or(false) { return Ok(match val { x if x == (PlaneType::Primary as u64) => PlaneType::Primary, x if x == (PlaneType::Cursor as u64) => PlaneType::Cursor, _ => PlaneType::Overlay, }); } } unreachable!() } pub fn create_surface(&self, crtc: crtc::Handle, plane: plane::Handle, mode: Mode, connectors: &[connector::Handle]) -> Result<DrmSurface<A>, Error> { if connectors.is_empty() { return Err(Error::SurfaceWithoutConnectors(crtc)); } let info = self.get_plane(plane).map_err(|source| Error::Access { errmsg: "Failed to get plane info", dev: self.dev_path(), source })?; let filter = info.possible_crtcs(); if !self.resources.filter_crtcs(filter).contains(&crtc) { return Err(Error::PlaneNotCompatible(crtc, plane)); } let active = match &*self.internal { DrmDeviceInternal::Atomic(dev) => dev.active.clone(), DrmDeviceInternal::Legacy(dev) => dev.active.clone(), }; let internal = if self.is_atomic() { let mapping = match &*self.internal { DrmDeviceInternal::Atomic(dev) => dev.prop_mapping.clone(), _ => unreachable!(), }; DrmSurfaceInternal::Atomic(AtomicDrmSurface::new(self.internal.clone(), active, crtc, plane, mapping, mode, connectors, self.logger.clone())?) } else { if self.plane_type(plane)? != PlaneType::Primary { return Err(Error::NonPrimaryPlane(plane)); } DrmSurfaceInternal::Legacy(LegacyDrmSurface::new(self.internal.clone(), active, crtc, mode, connectors, self.logger.clone())?) }; // get plane formats let plane_info = self.get_plane(plane).map_err(|source| Error::Access { errmsg: "Error loading plane info", dev: self.dev_path(), source, })?; let mut formats = HashSet::new(); for code in plane_info.formats().iter().flat_map(|x| Fourcc::try_from(*x).ok()) { formats.insert(Format { code, modifier: Modifier::Invalid, }); } if let (Ok(1), &DrmSurfaceInternal::Atomic(ref surf)) = (self.get_driver_capability(DriverCapability::AddFB2Modifiers), &internal) { let set = self.get_properties(plane).map_err(|source| Error::Access { errmsg: "Failed to query properties", dev: self.dev_path(), source })?; if let Ok(prop) = surf.plane_prop_handle(plane, "IN_FORMATS") { let prop_info = self.get_property(prop).map_err(|source| Error::Access { errmsg: "Failed to query property", dev: self.dev_path(), source, })?; let (handles, raw_values) = set.as_props_and_values(); let raw_value = raw_values[handles.iter().enumerate().find_map(|(i, handle)| if *handle == prop { Some(i) } else { None }).unwrap()]; if let property::Value::Blob(blob) = prop_info.value_type().convert_value(raw_value) { let data = self.get_property_blob(blob).map_err(|source| Error::Access { errmsg: "Failed to query property blob data", dev: self.dev_path(), source, })?; // be careful here, we have no idea about the alignment inside the blob, so always copy using `read_unaligned`, // although slice::from_raw_parts would be so much nicer to iterate and to read. unsafe { let fmt_mod_blob_ptr = data.as_ptr() as *const drm_ffi::drm_format_modifier_blob; let fmt_mod_blob = &*fmt_mod_blob_ptr; let formats_ptr: *const u32 = fmt_mod_blob_ptr.cast::<u8>().offset(fmt_mod_blob.formats_offset as isize) as *const _; let modifiers_ptr: *const drm_ffi::drm_format_modifier = fmt_mod_blob_ptr.cast::<u8>().offset(fmt_mod_blob.modifiers_offset as isize) as *const _; let formats_ptr = formats_ptr as *const u32; let modifiers_ptr = modifiers_ptr as *const drm_ffi::drm_format_modifier; for i in 0..fmt_mod_blob.count_modifiers { let mod_info = modifiers_ptr.offset(i as isize).read_unaligned(); for j in 0..64 { if mod_info.formats & (1u64 << j) != 0 { let code = Fourcc::try_from(formats_ptr.offset((j + mod_info.offset) as isize).read_unaligned()).ok(); let modifier = Modifier::from(mod_info.modifier); if let Some(code) = code { formats.insert(Format { code, modifier, }); } } } } } } } } else if self.plane_type(plane)? == PlaneType::Cursor { // Force a LINEAR layout for the cursor if the driver doesn't support modifiers for format in formats.clone() { formats.insert(Format { code: format.code, modifier: Modifier::Linear, }); } } if formats.is_empty() { formats.insert(Format { code: Fourcc::Argb8888, modifier: Modifier::Invalid, }); } info!(self.logger, "Supported scan-out formats for plane ({:?}): {:?}", plane, formats); Ok(DrmSurface { crtc, plane, internal: Arc::new(internal), formats, }) } pub fn device_id(&self) -> dev_t { self.dev_id } } pub struct Planes { pub primary: plane::Handle, pub cursor: Option<plane::Handle>, pub overlay: Option<Vec<plane::Handle>>, } impl<A: AsRawFd + 'static> DrmDeviceInternal<A> { pub(super) fn reset_state(&self) -> Result<(), Error> { match self { DrmDeviceInternal::Atomic(dev) => dev.reset_state(), DrmDeviceInternal::Legacy(dev) => dev.reset_state(), } } } /// Trait to receive events of a bound [`DrmDevice`] /// /// See [`device_bind`] pub trait DeviceHandler { /// A vblank blank event on the provided crtc has happend fn vblank(&mut self, crtc: crtc::Handle); /// An error happend while processing events fn error(&mut self, error: Error); } /// Trait representing open devices that *may* return a `Path` pub trait DevPath { /// Returns the path of the open device if possible fn dev_path(&self) -> Option<PathBuf>; } impl<A: AsRawFd> DevPath for A { fn dev_path(&self) -> Option<PathBuf> { use std::fs; fs::read_link(format!("/proc/self/fd/{:?}", self.as_raw_fd())).ok() } } /// calloop source associated with a Device pub type DrmSource<A> = Generic<DrmDevice<A>>; /// Bind a `Device` to an [`EventLoop`](calloop::EventLoop), /// /// This will cause it to recieve events and feed them into a previously /// set [`DeviceHandler`](DeviceHandler). pub fn device_bind<A, Data>( handle: &LoopHandle<Data>, device: DrmDevice<A>, ) -> ::std::result::Result<Source<DrmSource<A>>, InsertError<DrmSource<A>>> where A: AsRawFd + 'static, Data: 'static, { let source = Generic::new(device, calloop::Interest::Readable, calloop::Mode::Level); handle.insert_source(source, |_, source, _| { source.process_events(); Ok(()) }) } drm: Fix unnecessary Clone requirement use std::rc::Rc; use std::cell::RefCell; use std::collections::HashSet; use std::convert::TryFrom; use std::sync::{Arc, atomic::AtomicBool}; use std::path::PathBuf; use std::os::unix::io::{AsRawFd, RawFd}; use calloop::{generic::Generic, InsertError, LoopHandle, Source}; use drm::{Device as BasicDevice, ClientCapability, DriverCapability}; use drm::control::{ResourceHandles, PlaneResourceHandles, Device as ControlDevice, Event, Mode, PlaneType, crtc, plane, connector, property}; use nix::libc::dev_t; use nix::sys::stat::fstat; pub(super) mod atomic; pub(super) mod legacy; use atomic::AtomicDrmDevice; use legacy::LegacyDrmDevice; use super::surface::{DrmSurface, DrmSurfaceInternal, atomic::AtomicDrmSurface, legacy::LegacyDrmSurface}; use super::error::Error; use crate::backend::allocator::{Fourcc, Format, Modifier}; pub struct DrmDevice<A: AsRawFd + 'static> { pub(super) dev_id: dev_t, pub(crate) internal: Arc<DrmDeviceInternal<A>>, handler: Rc<RefCell<Option<Box<dyn DeviceHandler>>>>, #[cfg(feature = "backend_session")] pub(super) links: RefCell<Vec<crate::signaling::SignalToken>>, has_universal_planes: bool, resources: ResourceHandles, planes: PlaneResourceHandles, pub(super) logger: ::slog::Logger, } impl<A: AsRawFd + 'static> AsRawFd for DrmDevice<A> { fn as_raw_fd(&self) -> RawFd { match &*self.internal { DrmDeviceInternal::Atomic(dev) => dev.fd.as_raw_fd(), DrmDeviceInternal::Legacy(dev) => dev.fd.as_raw_fd(), } } } impl<A: AsRawFd + 'static> BasicDevice for DrmDevice<A> {} impl<A: AsRawFd + 'static> ControlDevice for DrmDevice<A> {} pub struct FdWrapper<A: AsRawFd + 'static> { fd: A, pub(super) privileged: bool, logger: ::slog::Logger, } impl<A: AsRawFd + 'static> AsRawFd for FdWrapper<A> { fn as_raw_fd(&self) -> RawFd { self.fd.as_raw_fd() } } impl<A: AsRawFd + 'static> BasicDevice for FdWrapper<A> {} impl<A: AsRawFd + 'static> ControlDevice for FdWrapper<A> {} impl<A: AsRawFd + 'static> Drop for FdWrapper<A> { fn drop(&mut self) { info!(self.logger, "Dropping device: {:?}", self.dev_path()); if self.privileged { if let Err(err) = self.release_master_lock() { error!(self.logger, "Failed to drop drm master state. Error: {}", err); } } } } pub enum DrmDeviceInternal<A: AsRawFd + 'static> { Atomic(AtomicDrmDevice<A>), Legacy(LegacyDrmDevice<A>), } impl<A: AsRawFd + 'static> AsRawFd for DrmDeviceInternal<A> { fn as_raw_fd(&self) -> RawFd { match self { DrmDeviceInternal::Atomic(dev) => dev.fd.as_raw_fd(), DrmDeviceInternal::Legacy(dev) => dev.fd.as_raw_fd(), } } } impl<A: AsRawFd + 'static> BasicDevice for DrmDeviceInternal<A> {} impl<A: AsRawFd + 'static> ControlDevice for DrmDeviceInternal<A> {} impl<A: AsRawFd + 'static> DrmDevice<A> { pub fn new<L>(fd: A, disable_connectors: bool, logger: L) -> Result<Self, Error> where A: AsRawFd + 'static, L: Into<Option<::slog::Logger>>, { let log = crate::slog_or_fallback(logger).new(o!("smithay_module" => "backend_drm")); info!(log, "DrmDevice initializing"); let dev_id = fstat(fd.as_raw_fd()) .map_err(Error::UnableToGetDeviceId)? .st_rdev; let active = Arc::new(AtomicBool::new(true)); let dev = Arc::new({ let mut dev = FdWrapper { fd, privileged: false, logger: log.clone(), }; // We want to modeset, so we better be the master, if we run via a tty session. // This is only needed on older kernels. Newer kernels grant this permission, // if no other process is already the *master*. So we skip over this error. if dev.acquire_master_lock().is_err() { warn!(log, "Unable to become drm master, assuming unprivileged mode"); } else { dev.privileged = true; } dev }); let has_universal_planes = dev.set_client_capability(ClientCapability::UniversalPlanes, true).is_ok(); let resources = dev.resource_handles().map_err(|source| Error::Access { errmsg: "Error loading resource handles", dev: dev.dev_path(), source, })?; let planes = dev.plane_handles().map_err(|source| Error::Access { errmsg: "Error loading plane handles", dev: dev.dev_path(), source, })?; let internal = Arc::new(DrmDevice::create_internal(dev, active, disable_connectors, log.clone())?); Ok(DrmDevice { dev_id, internal, handler: Rc::new(RefCell::new(None)), #[cfg(feature = "backend_session")] links: RefCell::new(Vec::new()), has_universal_planes, resources, planes, logger: log, }) } fn create_internal(dev: Arc<FdWrapper<A>>, active: Arc<AtomicBool>, disable_connectors: bool, log: ::slog::Logger) -> Result<DrmDeviceInternal<A>, Error> { let force_legacy = std::env::var("SMITHAY_USE_LEGACY") .map(|x| { x == "1" || x.to_lowercase() == "true" || x.to_lowercase() == "yes" || x.to_lowercase() == "y" }) .unwrap_or(false); if force_legacy { info!(log, "SMITHAY_USE_LEGACY is set. Forcing LegacyDrmDevice."); }; Ok(if dev.set_client_capability(ClientCapability::Atomic, true).is_ok() && !force_legacy { DrmDeviceInternal::Atomic(AtomicDrmDevice::new(dev, active, disable_connectors, log)?) } else { info!(log, "Falling back to LegacyDrmDevice"); DrmDeviceInternal::Legacy(LegacyDrmDevice::new(dev, active, disable_connectors, log)?) }) } pub fn process_events(&mut self) { match self.receive_events() { Ok(events) => { for event in events { if let Event::PageFlip(event) = event { trace!(self.logger, "Got a page-flip event for crtc ({:?})", event.crtc); if let Some(handler) = self.handler.borrow_mut().as_mut() { handler.vblank(event.crtc); } } else { trace!( self.logger, "Got a non-page-flip event of device '{:?}'.", self.dev_path() ); } } } Err(source) => { if let Some(handler) = self.handler.borrow_mut().as_mut() { handler.error(Error::Access { errmsg: "Error processing drm events", dev: self.dev_path(), source, }); } } } } pub fn is_atomic(&self) -> bool { match *self.internal { DrmDeviceInternal::Atomic(_) => true, DrmDeviceInternal::Legacy(_) => false, } } pub fn set_handler(&mut self, handler: impl DeviceHandler + 'static) { let handler = Some(Box::new(handler) as Box<dyn DeviceHandler + 'static>); *self.handler.borrow_mut() = handler; } pub fn clear_handler(&mut self) { self.handler.borrow_mut().take(); } pub fn crtcs(&self) -> &[crtc::Handle] { self.resources.crtcs() } pub fn planes(&self, crtc: &crtc::Handle) -> Result<Planes, Error> { let mut primary = None; let mut cursor = None; let mut overlay = Vec::new(); for plane in self.planes.planes() { let info = self.get_plane(*plane).map_err(|source| Error::Access { errmsg: "Failed to get plane information", dev: self.dev_path(), source, })?; let filter = info.possible_crtcs(); if self.resources.filter_crtcs(filter).contains(crtc) { match self.plane_type(*plane)? { PlaneType::Primary => { primary = Some(*plane); }, PlaneType::Cursor => { cursor = Some(*plane); }, PlaneType::Overlay => { overlay.push(*plane); }, }; } } Ok(Planes { primary: primary.expect("Crtc has no primary plane"), cursor, overlay: if self.has_universal_planes { Some(overlay) } else { None }, }) } fn plane_type(&self, plane: plane::Handle) -> Result<PlaneType, Error> { let props = self.get_properties(plane).map_err(|source| Error::Access { errmsg: "Failed to get properties of plane", dev: self.dev_path(), source, })?; let (ids, vals) = props.as_props_and_values(); for (&id, &val) in ids.iter().zip(vals.iter()) { let info = self.get_property(id).map_err(|source| Error::Access { errmsg: "Failed to get property info", dev: self.dev_path(), source, })?; if info.name().to_str().map(|x| x == "type").unwrap_or(false) { return Ok(match val { x if x == (PlaneType::Primary as u64) => PlaneType::Primary, x if x == (PlaneType::Cursor as u64) => PlaneType::Cursor, _ => PlaneType::Overlay, }); } } unreachable!() } pub fn create_surface(&self, crtc: crtc::Handle, plane: plane::Handle, mode: Mode, connectors: &[connector::Handle]) -> Result<DrmSurface<A>, Error> { if connectors.is_empty() { return Err(Error::SurfaceWithoutConnectors(crtc)); } let info = self.get_plane(plane).map_err(|source| Error::Access { errmsg: "Failed to get plane info", dev: self.dev_path(), source })?; let filter = info.possible_crtcs(); if !self.resources.filter_crtcs(filter).contains(&crtc) { return Err(Error::PlaneNotCompatible(crtc, plane)); } let active = match &*self.internal { DrmDeviceInternal::Atomic(dev) => dev.active.clone(), DrmDeviceInternal::Legacy(dev) => dev.active.clone(), }; let internal = if self.is_atomic() { let mapping = match &*self.internal { DrmDeviceInternal::Atomic(dev) => dev.prop_mapping.clone(), _ => unreachable!(), }; DrmSurfaceInternal::Atomic(AtomicDrmSurface::new(self.internal.clone(), active, crtc, plane, mapping, mode, connectors, self.logger.clone())?) } else { if self.plane_type(plane)? != PlaneType::Primary { return Err(Error::NonPrimaryPlane(plane)); } DrmSurfaceInternal::Legacy(LegacyDrmSurface::new(self.internal.clone(), active, crtc, mode, connectors, self.logger.clone())?) }; // get plane formats let plane_info = self.get_plane(plane).map_err(|source| Error::Access { errmsg: "Error loading plane info", dev: self.dev_path(), source, })?; let mut formats = HashSet::new(); for code in plane_info.formats().iter().flat_map(|x| Fourcc::try_from(*x).ok()) { formats.insert(Format { code, modifier: Modifier::Invalid, }); } if let (Ok(1), &DrmSurfaceInternal::Atomic(ref surf)) = (self.get_driver_capability(DriverCapability::AddFB2Modifiers), &internal) { let set = self.get_properties(plane).map_err(|source| Error::Access { errmsg: "Failed to query properties", dev: self.dev_path(), source })?; if let Ok(prop) = surf.plane_prop_handle(plane, "IN_FORMATS") { let prop_info = self.get_property(prop).map_err(|source| Error::Access { errmsg: "Failed to query property", dev: self.dev_path(), source, })?; let (handles, raw_values) = set.as_props_and_values(); let raw_value = raw_values[handles.iter().enumerate().find_map(|(i, handle)| if *handle == prop { Some(i) } else { None }).unwrap()]; if let property::Value::Blob(blob) = prop_info.value_type().convert_value(raw_value) { let data = self.get_property_blob(blob).map_err(|source| Error::Access { errmsg: "Failed to query property blob data", dev: self.dev_path(), source, })?; // be careful here, we have no idea about the alignment inside the blob, so always copy using `read_unaligned`, // although slice::from_raw_parts would be so much nicer to iterate and to read. unsafe { let fmt_mod_blob_ptr = data.as_ptr() as *const drm_ffi::drm_format_modifier_blob; let fmt_mod_blob = &*fmt_mod_blob_ptr; let formats_ptr: *const u32 = fmt_mod_blob_ptr.cast::<u8>().offset(fmt_mod_blob.formats_offset as isize) as *const _; let modifiers_ptr: *const drm_ffi::drm_format_modifier = fmt_mod_blob_ptr.cast::<u8>().offset(fmt_mod_blob.modifiers_offset as isize) as *const _; let formats_ptr = formats_ptr as *const u32; let modifiers_ptr = modifiers_ptr as *const drm_ffi::drm_format_modifier; for i in 0..fmt_mod_blob.count_modifiers { let mod_info = modifiers_ptr.offset(i as isize).read_unaligned(); for j in 0..64 { if mod_info.formats & (1u64 << j) != 0 { let code = Fourcc::try_from(formats_ptr.offset((j + mod_info.offset) as isize).read_unaligned()).ok(); let modifier = Modifier::from(mod_info.modifier); if let Some(code) = code { formats.insert(Format { code, modifier, }); } } } } } } } } else if self.plane_type(plane)? == PlaneType::Cursor { // Force a LINEAR layout for the cursor if the driver doesn't support modifiers for format in formats.clone() { formats.insert(Format { code: format.code, modifier: Modifier::Linear, }); } } if formats.is_empty() { formats.insert(Format { code: Fourcc::Argb8888, modifier: Modifier::Invalid, }); } info!(self.logger, "Supported scan-out formats for plane ({:?}): {:?}", plane, formats); Ok(DrmSurface { crtc, plane, internal: Arc::new(internal), formats, }) } pub fn device_id(&self) -> dev_t { self.dev_id } } pub struct Planes { pub primary: plane::Handle, pub cursor: Option<plane::Handle>, pub overlay: Option<Vec<plane::Handle>>, } impl<A: AsRawFd + 'static> DrmDeviceInternal<A> { pub(super) fn reset_state(&self) -> Result<(), Error> { match self { DrmDeviceInternal::Atomic(dev) => dev.reset_state(), DrmDeviceInternal::Legacy(dev) => dev.reset_state(), } } } /// Trait to receive events of a bound [`DrmDevice`] /// /// See [`device_bind`] pub trait DeviceHandler { /// A vblank blank event on the provided crtc has happend fn vblank(&mut self, crtc: crtc::Handle); /// An error happend while processing events fn error(&mut self, error: Error); } /// Trait representing open devices that *may* return a `Path` pub trait DevPath { /// Returns the path of the open device if possible fn dev_path(&self) -> Option<PathBuf>; } impl<A: AsRawFd> DevPath for A { fn dev_path(&self) -> Option<PathBuf> { use std::fs; fs::read_link(format!("/proc/self/fd/{:?}", self.as_raw_fd())).ok() } } /// calloop source associated with a Device pub type DrmSource<A> = Generic<DrmDevice<A>>; /// Bind a `Device` to an [`EventLoop`](calloop::EventLoop), /// /// This will cause it to recieve events and feed them into a previously /// set [`DeviceHandler`](DeviceHandler). pub fn device_bind<A, Data>( handle: &LoopHandle<Data>, device: DrmDevice<A>, ) -> ::std::result::Result<Source<DrmSource<A>>, InsertError<DrmSource<A>>> where A: AsRawFd + 'static, Data: 'static, { let source = Generic::new(device, calloop::Interest::Readable, calloop::Mode::Level); handle.insert_source(source, |_, source, _| { source.process_events(); Ok(()) }) }
//! Extension traits for `Stream` implementing various operators. //! //! A collection of functions taking typed `Stream` objects as input and producing new `Stream` //! objects as output. Many of the operators provide simple, composable functionality. Some of the //! operators are more complicated, for use with advanced timely dataflow features. //! //! The [`Unary`](./unary/index.html) and [`Binary`](./binary/index.html) operators provide general //! operators whose behavior can be supplied using closures accepting input and output handles. //! Most of the operators in this module are defined using these two general operators. pub use self::enterleave::{Enter, EnterAt, Leave}; pub use self::unary::Unary; pub use self::queue::*; pub use self::input::Input; pub use self::feedback::{LoopVariable, ConnectLoop}; pub use self::concat::{Concat, Concatenate}; pub use self::partition::Partition; pub use self::map::Map; pub use self::inspect::Inspect; pub use self::filter::Filter; pub use self::binary::Binary; pub use self::delay::Delay; pub use self::exchange::Exchange as ExchangeExtension; pub use self::broadcast::Broadcast; pub use self::probe::Probe; pub use self::to_stream::ToStream; pub use self::capture::Capture; pub mod enterleave; pub mod unary; pub mod queue; pub mod input; pub mod feedback; pub mod concat; pub mod partition; pub mod map; pub mod inspect; pub mod filter; pub mod binary; pub mod delay; pub mod exchange; pub mod broadcast; pub mod probe; pub mod to_stream; pub mod capture; pub mod aggregation; // keep the handle constructors private mod handles; pub use self::handles::{InputHandle, OutputHandle}; mod notificator; pub use self::notificator::Notificator; // keep "mint" module-private mod capability; pub use self::capability::Capability; commented out queue.rs, because it is dumb //! Extension traits for `Stream` implementing various operators. //! //! A collection of functions taking typed `Stream` objects as input and producing new `Stream` //! objects as output. Many of the operators provide simple, composable functionality. Some of the //! operators are more complicated, for use with advanced timely dataflow features. //! //! The [`Unary`](./unary/index.html) and [`Binary`](./binary/index.html) operators provide general //! operators whose behavior can be supplied using closures accepting input and output handles. //! Most of the operators in this module are defined using these two general operators. pub use self::enterleave::{Enter, EnterAt, Leave}; pub use self::unary::Unary; // pub use self::queue::*; pub use self::input::Input; pub use self::feedback::{LoopVariable, ConnectLoop}; pub use self::concat::{Concat, Concatenate}; pub use self::partition::Partition; pub use self::map::Map; pub use self::inspect::Inspect; pub use self::filter::Filter; pub use self::binary::Binary; pub use self::delay::Delay; pub use self::exchange::Exchange as ExchangeExtension; pub use self::broadcast::Broadcast; pub use self::probe::Probe; pub use self::to_stream::ToStream; pub use self::capture::Capture; pub mod enterleave; pub mod unary; // pub mod queue; pub mod input; pub mod feedback; pub mod concat; pub mod partition; pub mod map; pub mod inspect; pub mod filter; pub mod binary; pub mod delay; pub mod exchange; pub mod broadcast; pub mod probe; pub mod to_stream; pub mod capture; pub mod aggregation; // keep the handle constructors private mod handles; pub use self::handles::{InputHandle, OutputHandle}; mod notificator; pub use self::notificator::Notificator; // keep "mint" module-private mod capability; pub use self::capability::Capability;
//! An interpreter for the rust-installer package format. Responsible //! for installing from a directory or tarball to an installation //! prefix, represented by a `Components` instance. use std::collections::{HashMap, HashSet}; use std::fmt; use std::io::{self, ErrorKind as IOErrorKind, Read}; use std::mem; use std::path::{Path, PathBuf}; use anyhow::{anyhow, bail, Context, Result}; use tar::EntryType; use crate::diskio::{get_executor, CompletedIo, Executor, FileBuffer, Item, Kind, IO_CHUNK_SIZE}; use crate::dist::component::components::*; use crate::dist::component::transaction::*; use crate::dist::temp; use crate::errors::*; use crate::process; use crate::utils::notifications::Notification; use crate::utils::utils; /// The current metadata revision used by rust-installer pub(crate) const INSTALLER_VERSION: &str = "3"; pub(crate) const VERSION_FILE: &str = "rust-installer-version"; pub trait Package: fmt::Debug { fn contains(&self, component: &str, short_name: Option<&str>) -> bool; fn install<'a>( &self, target: &Components, component: &str, short_name: Option<&str>, tx: Transaction<'a>, ) -> Result<Transaction<'a>>; fn components(&self) -> Vec<String>; } #[derive(Debug)] pub struct DirectoryPackage { path: PathBuf, components: HashSet<String>, copy: bool, } impl DirectoryPackage { pub fn new(path: PathBuf, copy: bool) -> Result<Self> { validate_installer_version(&path)?; let content = utils::read_file("package components", &path.join("components"))?; let components = content .lines() .map(std::borrow::ToOwned::to_owned) .collect(); Ok(Self { path, components, copy, }) } } fn validate_installer_version(path: &Path) -> Result<()> { let file = utils::read_file("installer version", &path.join(VERSION_FILE))?; let v = file.trim(); if v == INSTALLER_VERSION { Ok(()) } else { Err(anyhow!(format!("unsupported installer version: {}", v))) } } impl Package for DirectoryPackage { fn contains(&self, component: &str, short_name: Option<&str>) -> bool { self.components.contains(component) || if let Some(n) = short_name { self.components.contains(n) } else { false } } fn install<'a>( &self, target: &Components, name: &str, short_name: Option<&str>, tx: Transaction<'a>, ) -> Result<Transaction<'a>> { let actual_name = if self.components.contains(name) { name } else if let Some(n) = short_name { n } else { name }; let root = self.path.join(actual_name); let manifest = utils::read_file("package manifest", &root.join("manifest.in"))?; let mut builder = target.add(name, tx); for l in manifest.lines() { let part = ComponentPart::decode(l) .ok_or_else(|| RustupError::CorruptComponent(name.to_owned()))?; let path = part.1; let src_path = root.join(&path); match &*part.0 { "file" => { if self.copy { builder.copy_file(path.clone(), &src_path)? } else { builder.move_file(path.clone(), &src_path)? } } "dir" => { if self.copy { builder.copy_dir(path.clone(), &src_path)? } else { builder.move_dir(path.clone(), &src_path)? } } _ => return Err(RustupError::CorruptComponent(name.to_owned()).into()), } } let tx = builder.finish()?; Ok(tx) } fn components(&self) -> Vec<String> { self.components.iter().cloned().collect() } } #[derive(Debug)] pub(crate) struct TarPackage<'a>(DirectoryPackage, temp::Dir<'a>); impl<'a> TarPackage<'a> { pub(crate) fn new<R: Read>( stream: R, temp_cfg: &'a temp::Cfg, notify_handler: Option<&'a dyn Fn(Notification<'_>)>, ) -> Result<Self> { let temp_dir = temp_cfg.new_directory()?; let mut archive = tar::Archive::new(stream); // The rust-installer packages unpack to a directory called // $pkgname-$version-$target. Skip that directory when // unpacking. unpack_without_first_dir(&mut archive, &*temp_dir, notify_handler) .context("failed to extract package (perhaps you ran out of disk space?)")?; Ok(TarPackage( DirectoryPackage::new(temp_dir.to_owned(), false)?, temp_dir, )) } } // Probably this should live in diskio but ¯\_(ツ)_/¯ fn unpack_ram( io_chunk_size: usize, effective_max_ram: Option<usize>, notify_handler: Option<&dyn Fn(Notification<'_>)>, ) -> usize { const RAM_ALLOWANCE_FOR_RUSTUP_AND_BUFFERS: usize = 200 * 1024 * 1024; let minimum_ram = io_chunk_size * 2; let default_max_unpack_ram = if let Some(effective_max_ram) = effective_max_ram { if effective_max_ram > minimum_ram + RAM_ALLOWANCE_FOR_RUSTUP_AND_BUFFERS { effective_max_ram - RAM_ALLOWANCE_FOR_RUSTUP_AND_BUFFERS } else { minimum_ram } } else { // Rustup does not know how much RAM the machine has: use the minimum minimum_ram }; let unpack_ram = match process() .var("RUSTUP_UNPACK_RAM") .ok() .and_then(|budget_str| budget_str.parse::<usize>().ok()) { Some(budget) => { if budget < minimum_ram { warn!( "Ignoring RUSTUP_UNPACK_RAM ({}) less than minimum of {}.", budget, minimum_ram ); minimum_ram } else if budget > default_max_unpack_ram { warn!( "Ignoring RUSTUP_UNPACK_RAM ({}) greater than detected available RAM of {}.", budget, default_max_unpack_ram ); default_max_unpack_ram } else { budget } } None => { if let Some(h) = notify_handler { h(Notification::SetDefaultBufferSize(default_max_unpack_ram)) } default_max_unpack_ram } }; if minimum_ram > unpack_ram { panic!("RUSTUP_UNPACK_RAM must be larger than {}", minimum_ram); } else { unpack_ram } } /// Handle the async result of io operations /// Replaces op.result with Ok(()) fn filter_result(op: &mut CompletedIo) -> io::Result<()> { if let CompletedIo::Item(op) = op { let result = mem::replace(&mut op.result, Ok(())); match result { Ok(_) => Ok(()), Err(e) => match e.kind() { IOErrorKind::AlreadyExists => { // mkdir of e.g. ~/.rustup already existing is just fine; // for others it would be better to know whether it is // expected to exist or not -so put a flag in the state. if let Kind::Directory = op.kind { Ok(()) } else { Err(e) } } _ => Err(e), }, } } else { Ok(()) } } /// Dequeue the children of directories queued up waiting for the directory to /// be created. /// /// Currently the volume of queued items does not count as backpressure against /// the main tar extraction process. /// Returns the number of triggered children fn trigger_children( io_executor: &dyn Executor, directories: &mut HashMap<PathBuf, DirStatus>, op: CompletedIo, ) -> Result<usize> { let mut result = 0; if let CompletedIo::Item(item) = op { if let Kind::Directory = item.kind { let mut pending = Vec::new(); directories .entry(item.full_path) .and_modify(|status| match status { DirStatus::Exists => unreachable!(), DirStatus::Pending(pending_inner) => { pending.append(pending_inner); *status = DirStatus::Exists; } }) .or_insert_with(|| unreachable!()); result += pending.len(); for pending_item in pending.into_iter() { for mut item in io_executor.execute(pending_item).collect::<Vec<_>>() { // TODO capture metrics filter_result(&mut item)?; result += trigger_children(io_executor, directories, item)?; } } } }; Ok(result) } /// What is the status of this directory ? enum DirStatus { Exists, Pending(Vec<Item>), } fn unpack_without_first_dir<'a, R: Read>( archive: &mut tar::Archive<R>, path: &Path, notify_handler: Option<&'a dyn Fn(Notification<'_>)>, ) -> Result<()> { let entries = archive.entries()?; let effective_max_ram = match effective_limits::memory_limit() { Ok(ram) => Some(ram as usize), Err(e) => { if let Some(h) = notify_handler { h(Notification::Error(e.to_string())) } None } }; let unpack_ram = unpack_ram(IO_CHUNK_SIZE, effective_max_ram, notify_handler); let mut io_executor: Box<dyn Executor> = get_executor(notify_handler, unpack_ram)?; let mut directories: HashMap<PathBuf, DirStatus> = HashMap::new(); // Path is presumed to exist. Call it a precondition. directories.insert(path.to_owned(), DirStatus::Exists); 'entries: for entry in entries { // drain completed results to keep memory pressure low and respond // rapidly to completed events even if we couldn't submit work (because // our unpacked item is pending dequeue) for mut item in io_executor.completed().collect::<Vec<_>>() { // TODO capture metrics filter_result(&mut item)?; trigger_children(&*io_executor, &mut directories, item)?; } let mut entry = entry?; let relpath = { let path = entry.path(); let path = path?; path.into_owned() }; // Reject path components that are not normal (..|/| etc) for part in relpath.components() { match part { // Some very early rust tarballs include a "." segment which we have to // support, despite not liking it. std::path::Component::Normal(_) | std::path::Component::CurDir => {} _ => bail!(format!("tar path '{}' is not supported", relpath.display())), } } let mut components = relpath.components(); // Throw away the first path component: our root was supplied. components.next(); let full_path = path.join(&components.as_path()); if full_path == path { // The tmp dir code makes the root dir for us. continue; } struct SenderEntry<'a, 'b, R: std::io::Read> { sender: Box<dyn FnMut(FileBuffer) -> bool + 'a>, entry: tar::Entry<'b, R>, } /// true if either no sender_entry was provided, or the incremental file /// has been fully dispatched. fn flush_ios<'a, R: std::io::Read, P: AsRef<Path>>( io_executor: &mut dyn Executor, mut directories: &mut HashMap<PathBuf, DirStatus>, mut sender_entry: Option<&mut SenderEntry<'a, '_, R>>, full_path: P, ) -> Result<bool> { let mut result = sender_entry.is_none(); for mut op in io_executor.completed().collect::<Vec<_>>() { // TODO capture metrics filter_result(&mut op)?; trigger_children(&*io_executor, &mut directories, op)?; } // Maybe stream a file incrementally if let Some(sender) = sender_entry.as_mut() { if io_executor.buffer_available(IO_CHUNK_SIZE) { let mut buffer = io_executor.get_buffer(IO_CHUNK_SIZE); let len = sender .entry .by_ref() .take(IO_CHUNK_SIZE as u64) .read_to_end(&mut buffer)?; buffer = buffer.finished(); if len == 0 { result = true; } if !(sender.sender)(buffer) { bail!(format!( "IO receiver for '{}' disconnected", full_path.as_ref().display() )) } } } Ok(result) } // Bail out if we get hard links, device nodes or any other unusual content // - it is most likely an attack, as rusts cross-platform nature precludes // such artifacts let kind = entry.header().entry_type(); // https://github.com/rust-lang/rustup/issues/1140 and before that // https://github.com/rust-lang/rust/issues/25479 // tl;dr: code got convoluted and we *may* have damaged tarballs out // there. // However the mandate we have is very simple: unpack as the current // user with modes matching the tar contents. No documented tars with // bad modes are in the bug tracker : the previous permission splatting // code was inherited from interactions with sudo that are best // addressed outside of rustup (run with an appropriate effective uid). // THAT SAID: If regressions turn up immediately post release this code - // https://play.rust-lang.org/?version=stable&mode=debug&edition=2018&gist=a8549057f0827bf3a068d8917256765a // is a translation of the prior helper function into an in-iterator // application. let tar_mode = entry.header().mode().ok().unwrap(); // That said, the tarballs that are shipped way back have single-user // permissions: // -rwx------ rustbuild/rustbuild ..... release/test-release.sh // so we should normalise the mode to match the previous behaviour users // may be expecting where the above file would end up with mode 0o755 let u_mode = tar_mode & 0o700; let g_mode = (u_mode & 0o0500) >> 3; let o_mode = g_mode >> 3; let mode = u_mode | g_mode | o_mode; let file_size = entry.header().size()?; let size = std::cmp::min(IO_CHUNK_SIZE as u64, file_size); while !io_executor.buffer_available(size as usize) { flush_ios::<tar::Entry<'_, R>, _>( &mut *io_executor, &mut directories, None, &full_path, )?; } let mut incremental_file_sender: Option<Box<dyn FnMut(FileBuffer) -> bool + '_>> = None; let mut item = match kind { EntryType::Directory => { directories.insert(full_path.to_owned(), DirStatus::Pending(Vec::new())); Item::make_dir(full_path.clone(), mode) } EntryType::Regular => { if file_size > IO_CHUNK_SIZE as u64 { let (item, sender) = Item::write_file_segmented( full_path.clone(), mode, io_executor.incremental_file_state(), )?; incremental_file_sender = Some(sender); item } else { let mut content = io_executor.get_buffer(size as usize); entry.read_to_end(&mut content)?; content = content.finished(); Item::write_file(full_path.clone(), mode, content) } } _ => bail!(format!("tar entry kind '{:?}' is not supported", kind)), }; let item = loop { // Create the full path to the entry if it does not exist already if let Some(parent) = item.full_path.to_owned().parent() { match directories.get_mut(parent) { None => { // Tar has item before containing directory // Complain about this so we can see if these exist. writeln!( process().stderr(), "Unexpected: missing parent '{}' for '{}'", parent.display(), entry.path()?.display() )?; directories.insert(parent.to_owned(), DirStatus::Pending(vec![item])); item = Item::make_dir(parent.to_owned(), 0o755); // Check the parent's parent continue; } Some(DirStatus::Exists) => { break Some(item); } Some(DirStatus::Pending(pending)) => { // Parent dir is being made pending.push(item); if incremental_file_sender.is_none() { // take next item from tar continue 'entries; } else { // don't submit a new item for processing, but do be ready to feed data to the incremental file. break None; } } } } else { // We should never see a path with no parent. panic!(); } }; if let Some(item) = item { // Submit the new item for mut item in io_executor.execute(item).collect::<Vec<_>>() { // TODO capture metrics filter_result(&mut item)?; trigger_children(&*io_executor, &mut directories, item)?; } } let mut incremental_file_sender = incremental_file_sender.map(|incremental_file_sender| SenderEntry { sender: incremental_file_sender, entry, }); // monitor io queue and feed in the content of the file (if needed) while !flush_ios( &mut *io_executor, &mut directories, incremental_file_sender.as_mut(), &full_path, )? {} } loop { let mut triggered = 0; for mut item in io_executor.join().collect::<Vec<_>>() { // handle final IOs // TODO capture metrics filter_result(&mut item)?; triggered += trigger_children(&*io_executor, &mut directories, item)?; } if triggered == 0 { // None of the IO submitted before the prior join triggered any new // submissions break; } } Ok(()) } impl<'a> Package for TarPackage<'a> { fn contains(&self, component: &str, short_name: Option<&str>) -> bool { self.0.contains(component, short_name) } fn install<'b>( &self, target: &Components, component: &str, short_name: Option<&str>, tx: Transaction<'b>, ) -> Result<Transaction<'b>> { self.0.install(target, component, short_name, tx) } fn components(&self) -> Vec<String> { self.0.components() } } #[derive(Debug)] pub(crate) struct TarGzPackage<'a>(TarPackage<'a>); impl<'a> TarGzPackage<'a> { pub(crate) fn new<R: Read>( stream: R, temp_cfg: &'a temp::Cfg, notify_handler: Option<&'a dyn Fn(Notification<'_>)>, ) -> Result<Self> { let stream = flate2::read::GzDecoder::new(stream); Ok(TarGzPackage(TarPackage::new( stream, temp_cfg, notify_handler, )?)) } } impl<'a> Package for TarGzPackage<'a> { fn contains(&self, component: &str, short_name: Option<&str>) -> bool { self.0.contains(component, short_name) } fn install<'b>( &self, target: &Components, component: &str, short_name: Option<&str>, tx: Transaction<'b>, ) -> Result<Transaction<'b>> { self.0.install(target, component, short_name, tx) } fn components(&self) -> Vec<String> { self.0.components() } } #[derive(Debug)] pub(crate) struct TarXzPackage<'a>(TarPackage<'a>); impl<'a> TarXzPackage<'a> { pub(crate) fn new<R: Read>( stream: R, temp_cfg: &'a temp::Cfg, notify_handler: Option<&'a dyn Fn(Notification<'_>)>, ) -> Result<Self> { let stream = xz2::read::XzDecoder::new(stream); Ok(TarXzPackage(TarPackage::new( stream, temp_cfg, notify_handler, )?)) } } impl<'a> Package for TarXzPackage<'a> { fn contains(&self, component: &str, short_name: Option<&str>) -> bool { self.0.contains(component, short_name) } fn install<'b>( &self, target: &Components, component: &str, short_name: Option<&str>, tx: Transaction<'b>, ) -> Result<Transaction<'b>> { self.0.install(target, component, short_name, tx) } fn components(&self) -> Vec<String> { self.0.components() } } #[derive(Debug)] pub(crate) struct TarZStdPackage<'a>(TarPackage<'a>); impl<'a> TarZStdPackage<'a> { pub(crate) fn new<R: Read>( stream: R, temp_cfg: &'a temp::Cfg, notify_handler: Option<&'a dyn Fn(Notification<'_>)>, ) -> Result<Self> { let stream = zstd::stream::read::Decoder::new(stream)?; Ok(TarZStdPackage(TarPackage::new( stream, temp_cfg, notify_handler, )?)) } } impl<'a> Package for TarZStdPackage<'a> { fn contains(&self, component: &str, short_name: Option<&str>) -> bool { self.0.contains(component, short_name) } fn install<'b>( &self, target: &Components, component: &str, short_name: Option<&str>, tx: Transaction<'b>, ) -> Result<Transaction<'b>> { self.0.install(target, component, short_name, tx) } fn components(&self) -> Vec<String> { self.0.components() } } remove unnecessary mut ref Signed-off-by: hi-rustin <c7d168847ad25898cad3ce2d480246882ded574b@gmail.com> //! An interpreter for the rust-installer package format. Responsible //! for installing from a directory or tarball to an installation //! prefix, represented by a `Components` instance. use std::collections::{HashMap, HashSet}; use std::fmt; use std::io::{self, ErrorKind as IOErrorKind, Read}; use std::mem; use std::path::{Path, PathBuf}; use anyhow::{anyhow, bail, Context, Result}; use tar::EntryType; use crate::diskio::{get_executor, CompletedIo, Executor, FileBuffer, Item, Kind, IO_CHUNK_SIZE}; use crate::dist::component::components::*; use crate::dist::component::transaction::*; use crate::dist::temp; use crate::errors::*; use crate::process; use crate::utils::notifications::Notification; use crate::utils::utils; /// The current metadata revision used by rust-installer pub(crate) const INSTALLER_VERSION: &str = "3"; pub(crate) const VERSION_FILE: &str = "rust-installer-version"; pub trait Package: fmt::Debug { fn contains(&self, component: &str, short_name: Option<&str>) -> bool; fn install<'a>( &self, target: &Components, component: &str, short_name: Option<&str>, tx: Transaction<'a>, ) -> Result<Transaction<'a>>; fn components(&self) -> Vec<String>; } #[derive(Debug)] pub struct DirectoryPackage { path: PathBuf, components: HashSet<String>, copy: bool, } impl DirectoryPackage { pub fn new(path: PathBuf, copy: bool) -> Result<Self> { validate_installer_version(&path)?; let content = utils::read_file("package components", &path.join("components"))?; let components = content .lines() .map(std::borrow::ToOwned::to_owned) .collect(); Ok(Self { path, components, copy, }) } } fn validate_installer_version(path: &Path) -> Result<()> { let file = utils::read_file("installer version", &path.join(VERSION_FILE))?; let v = file.trim(); if v == INSTALLER_VERSION { Ok(()) } else { Err(anyhow!(format!("unsupported installer version: {}", v))) } } impl Package for DirectoryPackage { fn contains(&self, component: &str, short_name: Option<&str>) -> bool { self.components.contains(component) || if let Some(n) = short_name { self.components.contains(n) } else { false } } fn install<'a>( &self, target: &Components, name: &str, short_name: Option<&str>, tx: Transaction<'a>, ) -> Result<Transaction<'a>> { let actual_name = if self.components.contains(name) { name } else if let Some(n) = short_name { n } else { name }; let root = self.path.join(actual_name); let manifest = utils::read_file("package manifest", &root.join("manifest.in"))?; let mut builder = target.add(name, tx); for l in manifest.lines() { let part = ComponentPart::decode(l) .ok_or_else(|| RustupError::CorruptComponent(name.to_owned()))?; let path = part.1; let src_path = root.join(&path); match &*part.0 { "file" => { if self.copy { builder.copy_file(path.clone(), &src_path)? } else { builder.move_file(path.clone(), &src_path)? } } "dir" => { if self.copy { builder.copy_dir(path.clone(), &src_path)? } else { builder.move_dir(path.clone(), &src_path)? } } _ => return Err(RustupError::CorruptComponent(name.to_owned()).into()), } } let tx = builder.finish()?; Ok(tx) } fn components(&self) -> Vec<String> { self.components.iter().cloned().collect() } } #[derive(Debug)] pub(crate) struct TarPackage<'a>(DirectoryPackage, temp::Dir<'a>); impl<'a> TarPackage<'a> { pub(crate) fn new<R: Read>( stream: R, temp_cfg: &'a temp::Cfg, notify_handler: Option<&'a dyn Fn(Notification<'_>)>, ) -> Result<Self> { let temp_dir = temp_cfg.new_directory()?; let mut archive = tar::Archive::new(stream); // The rust-installer packages unpack to a directory called // $pkgname-$version-$target. Skip that directory when // unpacking. unpack_without_first_dir(&mut archive, &*temp_dir, notify_handler) .context("failed to extract package (perhaps you ran out of disk space?)")?; Ok(TarPackage( DirectoryPackage::new(temp_dir.to_owned(), false)?, temp_dir, )) } } // Probably this should live in diskio but ¯\_(ツ)_/¯ fn unpack_ram( io_chunk_size: usize, effective_max_ram: Option<usize>, notify_handler: Option<&dyn Fn(Notification<'_>)>, ) -> usize { const RAM_ALLOWANCE_FOR_RUSTUP_AND_BUFFERS: usize = 200 * 1024 * 1024; let minimum_ram = io_chunk_size * 2; let default_max_unpack_ram = if let Some(effective_max_ram) = effective_max_ram { if effective_max_ram > minimum_ram + RAM_ALLOWANCE_FOR_RUSTUP_AND_BUFFERS { effective_max_ram - RAM_ALLOWANCE_FOR_RUSTUP_AND_BUFFERS } else { minimum_ram } } else { // Rustup does not know how much RAM the machine has: use the minimum minimum_ram }; let unpack_ram = match process() .var("RUSTUP_UNPACK_RAM") .ok() .and_then(|budget_str| budget_str.parse::<usize>().ok()) { Some(budget) => { if budget < minimum_ram { warn!( "Ignoring RUSTUP_UNPACK_RAM ({}) less than minimum of {}.", budget, minimum_ram ); minimum_ram } else if budget > default_max_unpack_ram { warn!( "Ignoring RUSTUP_UNPACK_RAM ({}) greater than detected available RAM of {}.", budget, default_max_unpack_ram ); default_max_unpack_ram } else { budget } } None => { if let Some(h) = notify_handler { h(Notification::SetDefaultBufferSize(default_max_unpack_ram)) } default_max_unpack_ram } }; if minimum_ram > unpack_ram { panic!("RUSTUP_UNPACK_RAM must be larger than {}", minimum_ram); } else { unpack_ram } } /// Handle the async result of io operations /// Replaces op.result with Ok(()) fn filter_result(op: &mut CompletedIo) -> io::Result<()> { if let CompletedIo::Item(op) = op { let result = mem::replace(&mut op.result, Ok(())); match result { Ok(_) => Ok(()), Err(e) => match e.kind() { IOErrorKind::AlreadyExists => { // mkdir of e.g. ~/.rustup already existing is just fine; // for others it would be better to know whether it is // expected to exist or not -so put a flag in the state. if let Kind::Directory = op.kind { Ok(()) } else { Err(e) } } _ => Err(e), }, } } else { Ok(()) } } /// Dequeue the children of directories queued up waiting for the directory to /// be created. /// /// Currently the volume of queued items does not count as backpressure against /// the main tar extraction process. /// Returns the number of triggered children fn trigger_children( io_executor: &dyn Executor, directories: &mut HashMap<PathBuf, DirStatus>, op: CompletedIo, ) -> Result<usize> { let mut result = 0; if let CompletedIo::Item(item) = op { if let Kind::Directory = item.kind { let mut pending = Vec::new(); directories .entry(item.full_path) .and_modify(|status| match status { DirStatus::Exists => unreachable!(), DirStatus::Pending(pending_inner) => { pending.append(pending_inner); *status = DirStatus::Exists; } }) .or_insert_with(|| unreachable!()); result += pending.len(); for pending_item in pending.into_iter() { for mut item in io_executor.execute(pending_item).collect::<Vec<_>>() { // TODO capture metrics filter_result(&mut item)?; result += trigger_children(io_executor, directories, item)?; } } } }; Ok(result) } /// What is the status of this directory ? enum DirStatus { Exists, Pending(Vec<Item>), } fn unpack_without_first_dir<'a, R: Read>( archive: &mut tar::Archive<R>, path: &Path, notify_handler: Option<&'a dyn Fn(Notification<'_>)>, ) -> Result<()> { let entries = archive.entries()?; let effective_max_ram = match effective_limits::memory_limit() { Ok(ram) => Some(ram as usize), Err(e) => { if let Some(h) = notify_handler { h(Notification::Error(e.to_string())) } None } }; let unpack_ram = unpack_ram(IO_CHUNK_SIZE, effective_max_ram, notify_handler); let mut io_executor: Box<dyn Executor> = get_executor(notify_handler, unpack_ram)?; let mut directories: HashMap<PathBuf, DirStatus> = HashMap::new(); // Path is presumed to exist. Call it a precondition. directories.insert(path.to_owned(), DirStatus::Exists); 'entries: for entry in entries { // drain completed results to keep memory pressure low and respond // rapidly to completed events even if we couldn't submit work (because // our unpacked item is pending dequeue) for mut item in io_executor.completed().collect::<Vec<_>>() { // TODO capture metrics filter_result(&mut item)?; trigger_children(&*io_executor, &mut directories, item)?; } let mut entry = entry?; let relpath = { let path = entry.path(); let path = path?; path.into_owned() }; // Reject path components that are not normal (..|/| etc) for part in relpath.components() { match part { // Some very early rust tarballs include a "." segment which we have to // support, despite not liking it. std::path::Component::Normal(_) | std::path::Component::CurDir => {} _ => bail!(format!("tar path '{}' is not supported", relpath.display())), } } let mut components = relpath.components(); // Throw away the first path component: our root was supplied. components.next(); let full_path = path.join(&components.as_path()); if full_path == path { // The tmp dir code makes the root dir for us. continue; } struct SenderEntry<'a, 'b, R: std::io::Read> { sender: Box<dyn FnMut(FileBuffer) -> bool + 'a>, entry: tar::Entry<'b, R>, } /// true if either no sender_entry was provided, or the incremental file /// has been fully dispatched. fn flush_ios<'a, R: std::io::Read, P: AsRef<Path>>( io_executor: &mut dyn Executor, directories: &mut HashMap<PathBuf, DirStatus>, mut sender_entry: Option<&mut SenderEntry<'a, '_, R>>, full_path: P, ) -> Result<bool> { let mut result = sender_entry.is_none(); for mut op in io_executor.completed().collect::<Vec<_>>() { // TODO capture metrics filter_result(&mut op)?; trigger_children(&*io_executor, directories, op)?; } // Maybe stream a file incrementally if let Some(sender) = sender_entry.as_mut() { if io_executor.buffer_available(IO_CHUNK_SIZE) { let mut buffer = io_executor.get_buffer(IO_CHUNK_SIZE); let len = sender .entry .by_ref() .take(IO_CHUNK_SIZE as u64) .read_to_end(&mut buffer)?; buffer = buffer.finished(); if len == 0 { result = true; } if !(sender.sender)(buffer) { bail!(format!( "IO receiver for '{}' disconnected", full_path.as_ref().display() )) } } } Ok(result) } // Bail out if we get hard links, device nodes or any other unusual content // - it is most likely an attack, as rusts cross-platform nature precludes // such artifacts let kind = entry.header().entry_type(); // https://github.com/rust-lang/rustup/issues/1140 and before that // https://github.com/rust-lang/rust/issues/25479 // tl;dr: code got convoluted and we *may* have damaged tarballs out // there. // However the mandate we have is very simple: unpack as the current // user with modes matching the tar contents. No documented tars with // bad modes are in the bug tracker : the previous permission splatting // code was inherited from interactions with sudo that are best // addressed outside of rustup (run with an appropriate effective uid). // THAT SAID: If regressions turn up immediately post release this code - // https://play.rust-lang.org/?version=stable&mode=debug&edition=2018&gist=a8549057f0827bf3a068d8917256765a // is a translation of the prior helper function into an in-iterator // application. let tar_mode = entry.header().mode().ok().unwrap(); // That said, the tarballs that are shipped way back have single-user // permissions: // -rwx------ rustbuild/rustbuild ..... release/test-release.sh // so we should normalise the mode to match the previous behaviour users // may be expecting where the above file would end up with mode 0o755 let u_mode = tar_mode & 0o700; let g_mode = (u_mode & 0o0500) >> 3; let o_mode = g_mode >> 3; let mode = u_mode | g_mode | o_mode; let file_size = entry.header().size()?; let size = std::cmp::min(IO_CHUNK_SIZE as u64, file_size); while !io_executor.buffer_available(size as usize) { flush_ios::<tar::Entry<'_, R>, _>( &mut *io_executor, &mut directories, None, &full_path, )?; } let mut incremental_file_sender: Option<Box<dyn FnMut(FileBuffer) -> bool + '_>> = None; let mut item = match kind { EntryType::Directory => { directories.insert(full_path.to_owned(), DirStatus::Pending(Vec::new())); Item::make_dir(full_path.clone(), mode) } EntryType::Regular => { if file_size > IO_CHUNK_SIZE as u64 { let (item, sender) = Item::write_file_segmented( full_path.clone(), mode, io_executor.incremental_file_state(), )?; incremental_file_sender = Some(sender); item } else { let mut content = io_executor.get_buffer(size as usize); entry.read_to_end(&mut content)?; content = content.finished(); Item::write_file(full_path.clone(), mode, content) } } _ => bail!(format!("tar entry kind '{:?}' is not supported", kind)), }; let item = loop { // Create the full path to the entry if it does not exist already if let Some(parent) = item.full_path.to_owned().parent() { match directories.get_mut(parent) { None => { // Tar has item before containing directory // Complain about this so we can see if these exist. writeln!( process().stderr(), "Unexpected: missing parent '{}' for '{}'", parent.display(), entry.path()?.display() )?; directories.insert(parent.to_owned(), DirStatus::Pending(vec![item])); item = Item::make_dir(parent.to_owned(), 0o755); // Check the parent's parent continue; } Some(DirStatus::Exists) => { break Some(item); } Some(DirStatus::Pending(pending)) => { // Parent dir is being made pending.push(item); if incremental_file_sender.is_none() { // take next item from tar continue 'entries; } else { // don't submit a new item for processing, but do be ready to feed data to the incremental file. break None; } } } } else { // We should never see a path with no parent. panic!(); } }; if let Some(item) = item { // Submit the new item for mut item in io_executor.execute(item).collect::<Vec<_>>() { // TODO capture metrics filter_result(&mut item)?; trigger_children(&*io_executor, &mut directories, item)?; } } let mut incremental_file_sender = incremental_file_sender.map(|incremental_file_sender| SenderEntry { sender: incremental_file_sender, entry, }); // monitor io queue and feed in the content of the file (if needed) while !flush_ios( &mut *io_executor, &mut directories, incremental_file_sender.as_mut(), &full_path, )? {} } loop { let mut triggered = 0; for mut item in io_executor.join().collect::<Vec<_>>() { // handle final IOs // TODO capture metrics filter_result(&mut item)?; triggered += trigger_children(&*io_executor, &mut directories, item)?; } if triggered == 0 { // None of the IO submitted before the prior join triggered any new // submissions break; } } Ok(()) } impl<'a> Package for TarPackage<'a> { fn contains(&self, component: &str, short_name: Option<&str>) -> bool { self.0.contains(component, short_name) } fn install<'b>( &self, target: &Components, component: &str, short_name: Option<&str>, tx: Transaction<'b>, ) -> Result<Transaction<'b>> { self.0.install(target, component, short_name, tx) } fn components(&self) -> Vec<String> { self.0.components() } } #[derive(Debug)] pub(crate) struct TarGzPackage<'a>(TarPackage<'a>); impl<'a> TarGzPackage<'a> { pub(crate) fn new<R: Read>( stream: R, temp_cfg: &'a temp::Cfg, notify_handler: Option<&'a dyn Fn(Notification<'_>)>, ) -> Result<Self> { let stream = flate2::read::GzDecoder::new(stream); Ok(TarGzPackage(TarPackage::new( stream, temp_cfg, notify_handler, )?)) } } impl<'a> Package for TarGzPackage<'a> { fn contains(&self, component: &str, short_name: Option<&str>) -> bool { self.0.contains(component, short_name) } fn install<'b>( &self, target: &Components, component: &str, short_name: Option<&str>, tx: Transaction<'b>, ) -> Result<Transaction<'b>> { self.0.install(target, component, short_name, tx) } fn components(&self) -> Vec<String> { self.0.components() } } #[derive(Debug)] pub(crate) struct TarXzPackage<'a>(TarPackage<'a>); impl<'a> TarXzPackage<'a> { pub(crate) fn new<R: Read>( stream: R, temp_cfg: &'a temp::Cfg, notify_handler: Option<&'a dyn Fn(Notification<'_>)>, ) -> Result<Self> { let stream = xz2::read::XzDecoder::new(stream); Ok(TarXzPackage(TarPackage::new( stream, temp_cfg, notify_handler, )?)) } } impl<'a> Package for TarXzPackage<'a> { fn contains(&self, component: &str, short_name: Option<&str>) -> bool { self.0.contains(component, short_name) } fn install<'b>( &self, target: &Components, component: &str, short_name: Option<&str>, tx: Transaction<'b>, ) -> Result<Transaction<'b>> { self.0.install(target, component, short_name, tx) } fn components(&self) -> Vec<String> { self.0.components() } } #[derive(Debug)] pub(crate) struct TarZStdPackage<'a>(TarPackage<'a>); impl<'a> TarZStdPackage<'a> { pub(crate) fn new<R: Read>( stream: R, temp_cfg: &'a temp::Cfg, notify_handler: Option<&'a dyn Fn(Notification<'_>)>, ) -> Result<Self> { let stream = zstd::stream::read::Decoder::new(stream)?; Ok(TarZStdPackage(TarPackage::new( stream, temp_cfg, notify_handler, )?)) } } impl<'a> Package for TarZStdPackage<'a> { fn contains(&self, component: &str, short_name: Option<&str>) -> bool { self.0.contains(component, short_name) } fn install<'b>( &self, target: &Components, component: &str, short_name: Option<&str>, tx: Transaction<'b>, ) -> Result<Transaction<'b>> { self.0.install(target, component, short_name, tx) } fn components(&self) -> Vec<String> { self.0.components() } }
// Copyright 2018 Developers of the Rand project. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // https://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. //! Weighted index sampling //! //! The [`WeightedIndex`] distribution allows `O(log N)` sampling from a //! sequence of weights. As the name implies, the result is the index in that //! sequence, which may be used to look up an associated value. //! //! Note also that the `rand_distr` crate provides //! `rand_distr::alias_method::WeightedIndex`, which allows `O(1)` sampling; //! this distribution however has a much greater set-up cost, thus is only //! recommended where *many* samples are required. // TODO: link alias_method impl when published in rand_distr #[allow(missing_docs)] #[deprecated(since = "0.8.0", note = "moved to rand_distr crate")] pub mod alias_method { // This module exists to provide a deprecation warning which minimises // compile errors, but still fails to compile if ever used. use std::marker::PhantomData; use super::WeightedError; #[derive(Debug)] pub struct WeightedIndex<W: Weight> { _phantom: PhantomData<W>, } impl<W: Weight> WeightedIndex<W> { pub fn new(_weights: Vec<W>) -> Result<Self, WeightedError> { Err(WeightedError::NoItem) } } pub trait Weight {} macro_rules! impl_weight { () => {}; ($T:ident, $($more:ident,)*) => { impl Weight for $T {} impl_weight!($($more,)*); }; } impl_weight!(f64, f32,); impl_weight!(u8, u16, u32, u64, usize,); impl_weight!(i8, i16, i32, i64, isize,); #[cfg(not(target_os = "emscripten"))] impl_weight!(u128, i128,); } use crate::distributions::uniform::{SampleBorrow, SampleUniform, UniformSampler}; use crate::distributions::Distribution; use crate::Rng; use core::cmp::PartialOrd; use core::fmt; // Note that this whole module is only imported if feature="alloc" is enabled. #[cfg(not(feature = "std"))] use crate::alloc::vec::Vec; /// A distribution using weighted sampling to pick a discretely selected /// item. /// /// Sampling a `WeightedIndex` distribution returns the index of a randomly /// selected element from the iterator used when the `WeightedIndex` was /// created. The chance of a given element being picked is proportional to the /// value of the element. The weights can use any type `X` for which an /// implementation of [`Uniform<X>`] exists. /// /// # Performance /// /// A `WeightedIndex<X>` contains a `Vec<X>` and a [`Uniform<X>`] and so its /// size is the sum of the size of those objects, possibly plus some alignment. /// /// Creating a `WeightedIndex<X>` will allocate enough space to hold `N - 1` /// weights of type `X`, where `N` is the number of weights. However, since /// `Vec` doesn't guarantee a particular growth strategy, additional memory /// might be allocated but not used. Since the `WeightedIndex` object also /// contains, this might cause additional allocations, though for primitive /// types, ['Uniform<X>`] doesn't allocate any memory. /// /// Time complexity of sampling from `WeightedIndex` is `O(log N)` where /// `N` is the number of weights. /// /// Sampling from `WeightedIndex` will result in a single call to /// `Uniform<X>::sample` (method of the [`Distribution`] trait), which typically /// will request a single value from the underlying [`RngCore`], though the /// exact number depends on the implementaiton of `Uniform<X>::sample`. /// /// # Example /// /// ``` /// use rand::prelude::*; /// use rand::distributions::WeightedIndex; /// /// let choices = ['a', 'b', 'c']; /// let weights = [2, 1, 1]; /// let dist = WeightedIndex::new(&weights).unwrap(); /// let mut rng = thread_rng(); /// for _ in 0..100 { /// // 50% chance to print 'a', 25% chance to print 'b', 25% chance to print 'c' /// println!("{}", choices[dist.sample(&mut rng)]); /// } /// /// let items = [('a', 0), ('b', 3), ('c', 7)]; /// let dist2 = WeightedIndex::new(items.iter().map(|item| item.1)).unwrap(); /// for _ in 0..100 { /// // 0% chance to print 'a', 30% chance to print 'b', 70% chance to print 'c' /// println!("{}", items[dist2.sample(&mut rng)].0); /// } /// ``` /// /// [`Uniform<X>`]: crate::distributions::uniform::Uniform /// [`RngCore`]: crate::RngCore #[derive(Debug, Clone)] pub struct WeightedIndex<X: SampleUniform + PartialOrd> { cumulative_weights: Vec<X>, total_weight: X, weight_distribution: X::Sampler, } impl<X: SampleUniform + PartialOrd> WeightedIndex<X> { /// Creates a new a `WeightedIndex` [`Distribution`] using the values /// in `weights`. The weights can use any type `X` for which an /// implementation of [`Uniform<X>`] exists. /// /// Returns an error if the iterator is empty, if any weight is `< 0`, or /// if its total value is 0. /// /// [`Uniform<X>`]: crate::distributions::uniform::Uniform pub fn new<I>(weights: I) -> Result<WeightedIndex<X>, WeightedError> where I: IntoIterator, I::Item: SampleBorrow<X>, X: for<'a> ::core::ops::AddAssign<&'a X> + Clone + Default, { let mut iter = weights.into_iter(); let mut total_weight: X = iter.next().ok_or(WeightedError::NoItem)?.borrow().clone(); let zero = <X as Default>::default(); if total_weight < zero { return Err(WeightedError::InvalidWeight); } let mut weights = Vec::<X>::with_capacity(iter.size_hint().0); for w in iter { if *w.borrow() < zero { return Err(WeightedError::InvalidWeight); } weights.push(total_weight.clone()); total_weight += w.borrow(); } if total_weight == zero { return Err(WeightedError::AllWeightsZero); } let distr = X::Sampler::new(zero, total_weight.clone()); Ok(WeightedIndex { cumulative_weights: weights, total_weight, weight_distribution: distr, }) } /// Update a subset of weights, without changing the number of weights. /// /// `new_weights` must be sorted by the index. /// /// Using this method instead of `new` might be more efficient if only a small number of /// weights is modified. No allocations are performed, unless the weight type `X` uses /// allocation internally. /// /// In case of error, `self` is not modified. pub fn update_weights(&mut self, new_weights: &[(usize, &X)]) -> Result<(), WeightedError> where X: for<'a> ::core::ops::AddAssign<&'a X> + for<'a> ::core::ops::SubAssign<&'a X> + Clone + Default { if new_weights.is_empty() { return Ok(()); } let zero = <X as Default>::default(); let mut total_weight = self.total_weight.clone(); // Check for errors first, so we don't modify `self` in case something // goes wrong. let mut prev_i = None; for &(i, w) in new_weights { if let Some(old_i) = prev_i { if old_i >= i { return Err(WeightedError::InvalidWeight); } } if *w < zero { return Err(WeightedError::InvalidWeight); } if i >= self.cumulative_weights.len() + 1 { return Err(WeightedError::TooMany); } let mut old_w = if i < self.cumulative_weights.len() { self.cumulative_weights[i].clone() } else { self.total_weight.clone() }; if i > 0 { old_w -= &self.cumulative_weights[i - 1]; } total_weight -= &old_w; total_weight += w; prev_i = Some(i); } if total_weight == zero { return Err(WeightedError::AllWeightsZero); } // Update the weights. Because we checked all the preconditions in the // previous loop, this should never panic. let mut iter = new_weights.iter(); let mut prev_weight = zero.clone(); let mut next_new_weight = iter.next(); let &(first_new_index, _) = next_new_weight.unwrap(); let mut cumulative_weight = if first_new_index > 0 { self.cumulative_weights[first_new_index - 1].clone() } else { zero.clone() }; for i in first_new_index..self.cumulative_weights.len() { match next_new_weight { Some(&(j, w)) if i == j => { cumulative_weight += w; next_new_weight = iter.next(); } _ => { let mut tmp = self.cumulative_weights[i].clone(); tmp -= &prev_weight; // We know this is positive. cumulative_weight += &tmp; } } prev_weight = cumulative_weight.clone(); core::mem::swap(&mut prev_weight, &mut self.cumulative_weights[i]); } self.total_weight = total_weight; self.weight_distribution = X::Sampler::new(zero, self.total_weight.clone()); Ok(()) } } impl<X> Distribution<usize> for WeightedIndex<X> where X: SampleUniform + PartialOrd { fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> usize { use ::core::cmp::Ordering; let chosen_weight = self.weight_distribution.sample(rng); // Find the first item which has a weight *higher* than the chosen weight. self.cumulative_weights .binary_search_by(|w| { if *w <= chosen_weight { Ordering::Less } else { Ordering::Greater } }) .unwrap_err() } } #[cfg(test)] mod test { use super::*; #[test] #[cfg_attr(miri, ignore)] // Miri is too slow fn test_weightedindex() { let mut r = crate::test::rng(700); const N_REPS: u32 = 5000; let weights = [1u32, 2, 3, 0, 5, 6, 7, 1, 2, 3, 4, 5, 6, 7]; let total_weight = weights.iter().sum::<u32>() as f32; let verify = |result: [i32; 14]| { for (i, count) in result.iter().enumerate() { let exp = (weights[i] * N_REPS) as f32 / total_weight; let mut err = (*count as f32 - exp).abs(); if err != 0.0 { err /= exp; } assert!(err <= 0.25); } }; // WeightedIndex from vec let mut chosen = [0i32; 14]; let distr = WeightedIndex::new(weights.to_vec()).unwrap(); for _ in 0..N_REPS { chosen[distr.sample(&mut r)] += 1; } verify(chosen); // WeightedIndex from slice chosen = [0i32; 14]; let distr = WeightedIndex::new(&weights[..]).unwrap(); for _ in 0..N_REPS { chosen[distr.sample(&mut r)] += 1; } verify(chosen); // WeightedIndex from iterator chosen = [0i32; 14]; let distr = WeightedIndex::new(weights.iter()).unwrap(); for _ in 0..N_REPS { chosen[distr.sample(&mut r)] += 1; } verify(chosen); for _ in 0..5 { assert_eq!(WeightedIndex::new(&[0, 1]).unwrap().sample(&mut r), 1); assert_eq!(WeightedIndex::new(&[1, 0]).unwrap().sample(&mut r), 0); assert_eq!( WeightedIndex::new(&[0, 0, 0, 0, 10, 0]) .unwrap() .sample(&mut r), 4 ); } assert_eq!( WeightedIndex::new(&[10][0..0]).unwrap_err(), WeightedError::NoItem ); assert_eq!( WeightedIndex::new(&[0]).unwrap_err(), WeightedError::AllWeightsZero ); assert_eq!( WeightedIndex::new(&[10, 20, -1, 30]).unwrap_err(), WeightedError::InvalidWeight ); assert_eq!( WeightedIndex::new(&[-10, 20, 1, 30]).unwrap_err(), WeightedError::InvalidWeight ); assert_eq!( WeightedIndex::new(&[-10]).unwrap_err(), WeightedError::InvalidWeight ); } #[test] fn test_update_weights() { let data = [ ( &[10u32, 2, 3, 4][..], &[(1, &100), (2, &4)][..], // positive change &[10, 100, 4, 4][..], ), ( &[1u32, 2, 3, 0, 5, 6, 7, 1, 2, 3, 4, 5, 6, 7][..], &[(2, &1), (5, &1), (13, &100)][..], // negative change and last element &[1u32, 2, 1, 0, 5, 1, 7, 1, 2, 3, 4, 5, 6, 100][..], ), ]; for (weights, update, expected_weights) in data.iter() { let total_weight = weights.iter().sum::<u32>(); let mut distr = WeightedIndex::new(weights.to_vec()).unwrap(); assert_eq!(distr.total_weight, total_weight); distr.update_weights(update).unwrap(); let expected_total_weight = expected_weights.iter().sum::<u32>(); let expected_distr = WeightedIndex::new(expected_weights.to_vec()).unwrap(); assert_eq!(distr.total_weight, expected_total_weight); assert_eq!(distr.total_weight, expected_distr.total_weight); assert_eq!(distr.cumulative_weights, expected_distr.cumulative_weights); } } #[test] fn value_stability() { fn test_samples<X: SampleUniform + PartialOrd, I>( weights: I, buf: &mut [usize], expected: &[usize], ) where I: IntoIterator, I::Item: SampleBorrow<X>, X: for<'a> ::core::ops::AddAssign<&'a X> + Clone + Default, { assert_eq!(buf.len(), expected.len()); let distr = WeightedIndex::new(weights).unwrap(); let mut rng = crate::test::rng(701); for r in buf.iter_mut() { *r = rng.sample(&distr); } assert_eq!(buf, expected); } let mut buf = [0; 10]; test_samples(&[1i32, 1, 1, 1, 1, 1, 1, 1, 1], &mut buf, &[ 0, 6, 2, 6, 3, 4, 7, 8, 2, 5, ]); test_samples(&[0.7f32, 0.1, 0.1, 0.1], &mut buf, &[ 0, 0, 0, 1, 0, 0, 2, 3, 0, 0, ]); test_samples(&[1.0f64, 0.999, 0.998, 0.997], &mut buf, &[ 2, 2, 1, 3, 2, 1, 3, 3, 2, 1, ]); } } /// Error type returned from `WeightedIndex::new`. #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum WeightedError { /// The provided weight collection contains no items. NoItem, /// A weight is either less than zero, greater than the supported maximum or /// otherwise invalid. InvalidWeight, /// All items in the provided weight collection are zero. AllWeightsZero, /// Too many weights are provided (length greater than `u32::MAX`) TooMany, } #[cfg(feature = "std")] impl ::std::error::Error for WeightedError {} impl fmt::Display for WeightedError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { WeightedError::NoItem => write!(f, "No weights provided."), WeightedError::InvalidWeight => write!(f, "A weight is invalid."), WeightedError::AllWeightsZero => write!(f, "All weights are zero."), WeightedError::TooMany => write!(f, "Too many weights (hit u32::MAX)"), } } } Fix alloc-no-std build // Copyright 2018 Developers of the Rand project. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // https://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. //! Weighted index sampling //! //! The [`WeightedIndex`] distribution allows `O(log N)` sampling from a //! sequence of weights. As the name implies, the result is the index in that //! sequence, which may be used to look up an associated value. //! //! Note also that the `rand_distr` crate provides //! `rand_distr::alias_method::WeightedIndex`, which allows `O(1)` sampling; //! this distribution however has a much greater set-up cost, thus is only //! recommended where *many* samples are required. // TODO: link alias_method impl when published in rand_distr #[allow(missing_docs)] #[deprecated(since = "0.8.0", note = "moved to rand_distr crate")] pub mod alias_method { // This module exists to provide a deprecation warning which minimises // compile errors, but still fails to compile if ever used. use core::marker::PhantomData; #[cfg(not(feature = "std"))] use crate::alloc::vec::Vec; use super::WeightedError; #[derive(Debug)] pub struct WeightedIndex<W: Weight> { _phantom: PhantomData<W>, } impl<W: Weight> WeightedIndex<W> { pub fn new(_weights: Vec<W>) -> Result<Self, WeightedError> { Err(WeightedError::NoItem) } } pub trait Weight {} macro_rules! impl_weight { () => {}; ($T:ident, $($more:ident,)*) => { impl Weight for $T {} impl_weight!($($more,)*); }; } impl_weight!(f64, f32,); impl_weight!(u8, u16, u32, u64, usize,); impl_weight!(i8, i16, i32, i64, isize,); #[cfg(not(target_os = "emscripten"))] impl_weight!(u128, i128,); } use crate::distributions::uniform::{SampleBorrow, SampleUniform, UniformSampler}; use crate::distributions::Distribution; use crate::Rng; use core::cmp::PartialOrd; use core::fmt; // Note that this whole module is only imported if feature="alloc" is enabled. #[cfg(not(feature = "std"))] use crate::alloc::vec::Vec; /// A distribution using weighted sampling to pick a discretely selected /// item. /// /// Sampling a `WeightedIndex` distribution returns the index of a randomly /// selected element from the iterator used when the `WeightedIndex` was /// created. The chance of a given element being picked is proportional to the /// value of the element. The weights can use any type `X` for which an /// implementation of [`Uniform<X>`] exists. /// /// # Performance /// /// A `WeightedIndex<X>` contains a `Vec<X>` and a [`Uniform<X>`] and so its /// size is the sum of the size of those objects, possibly plus some alignment. /// /// Creating a `WeightedIndex<X>` will allocate enough space to hold `N - 1` /// weights of type `X`, where `N` is the number of weights. However, since /// `Vec` doesn't guarantee a particular growth strategy, additional memory /// might be allocated but not used. Since the `WeightedIndex` object also /// contains, this might cause additional allocations, though for primitive /// types, ['Uniform<X>`] doesn't allocate any memory. /// /// Time complexity of sampling from `WeightedIndex` is `O(log N)` where /// `N` is the number of weights. /// /// Sampling from `WeightedIndex` will result in a single call to /// `Uniform<X>::sample` (method of the [`Distribution`] trait), which typically /// will request a single value from the underlying [`RngCore`], though the /// exact number depends on the implementaiton of `Uniform<X>::sample`. /// /// # Example /// /// ``` /// use rand::prelude::*; /// use rand::distributions::WeightedIndex; /// /// let choices = ['a', 'b', 'c']; /// let weights = [2, 1, 1]; /// let dist = WeightedIndex::new(&weights).unwrap(); /// let mut rng = thread_rng(); /// for _ in 0..100 { /// // 50% chance to print 'a', 25% chance to print 'b', 25% chance to print 'c' /// println!("{}", choices[dist.sample(&mut rng)]); /// } /// /// let items = [('a', 0), ('b', 3), ('c', 7)]; /// let dist2 = WeightedIndex::new(items.iter().map(|item| item.1)).unwrap(); /// for _ in 0..100 { /// // 0% chance to print 'a', 30% chance to print 'b', 70% chance to print 'c' /// println!("{}", items[dist2.sample(&mut rng)].0); /// } /// ``` /// /// [`Uniform<X>`]: crate::distributions::uniform::Uniform /// [`RngCore`]: crate::RngCore #[derive(Debug, Clone)] pub struct WeightedIndex<X: SampleUniform + PartialOrd> { cumulative_weights: Vec<X>, total_weight: X, weight_distribution: X::Sampler, } impl<X: SampleUniform + PartialOrd> WeightedIndex<X> { /// Creates a new a `WeightedIndex` [`Distribution`] using the values /// in `weights`. The weights can use any type `X` for which an /// implementation of [`Uniform<X>`] exists. /// /// Returns an error if the iterator is empty, if any weight is `< 0`, or /// if its total value is 0. /// /// [`Uniform<X>`]: crate::distributions::uniform::Uniform pub fn new<I>(weights: I) -> Result<WeightedIndex<X>, WeightedError> where I: IntoIterator, I::Item: SampleBorrow<X>, X: for<'a> ::core::ops::AddAssign<&'a X> + Clone + Default, { let mut iter = weights.into_iter(); let mut total_weight: X = iter.next().ok_or(WeightedError::NoItem)?.borrow().clone(); let zero = <X as Default>::default(); if total_weight < zero { return Err(WeightedError::InvalidWeight); } let mut weights = Vec::<X>::with_capacity(iter.size_hint().0); for w in iter { if *w.borrow() < zero { return Err(WeightedError::InvalidWeight); } weights.push(total_weight.clone()); total_weight += w.borrow(); } if total_weight == zero { return Err(WeightedError::AllWeightsZero); } let distr = X::Sampler::new(zero, total_weight.clone()); Ok(WeightedIndex { cumulative_weights: weights, total_weight, weight_distribution: distr, }) } /// Update a subset of weights, without changing the number of weights. /// /// `new_weights` must be sorted by the index. /// /// Using this method instead of `new` might be more efficient if only a small number of /// weights is modified. No allocations are performed, unless the weight type `X` uses /// allocation internally. /// /// In case of error, `self` is not modified. pub fn update_weights(&mut self, new_weights: &[(usize, &X)]) -> Result<(), WeightedError> where X: for<'a> ::core::ops::AddAssign<&'a X> + for<'a> ::core::ops::SubAssign<&'a X> + Clone + Default { if new_weights.is_empty() { return Ok(()); } let zero = <X as Default>::default(); let mut total_weight = self.total_weight.clone(); // Check for errors first, so we don't modify `self` in case something // goes wrong. let mut prev_i = None; for &(i, w) in new_weights { if let Some(old_i) = prev_i { if old_i >= i { return Err(WeightedError::InvalidWeight); } } if *w < zero { return Err(WeightedError::InvalidWeight); } if i >= self.cumulative_weights.len() + 1 { return Err(WeightedError::TooMany); } let mut old_w = if i < self.cumulative_weights.len() { self.cumulative_weights[i].clone() } else { self.total_weight.clone() }; if i > 0 { old_w -= &self.cumulative_weights[i - 1]; } total_weight -= &old_w; total_weight += w; prev_i = Some(i); } if total_weight == zero { return Err(WeightedError::AllWeightsZero); } // Update the weights. Because we checked all the preconditions in the // previous loop, this should never panic. let mut iter = new_weights.iter(); let mut prev_weight = zero.clone(); let mut next_new_weight = iter.next(); let &(first_new_index, _) = next_new_weight.unwrap(); let mut cumulative_weight = if first_new_index > 0 { self.cumulative_weights[first_new_index - 1].clone() } else { zero.clone() }; for i in first_new_index..self.cumulative_weights.len() { match next_new_weight { Some(&(j, w)) if i == j => { cumulative_weight += w; next_new_weight = iter.next(); } _ => { let mut tmp = self.cumulative_weights[i].clone(); tmp -= &prev_weight; // We know this is positive. cumulative_weight += &tmp; } } prev_weight = cumulative_weight.clone(); core::mem::swap(&mut prev_weight, &mut self.cumulative_weights[i]); } self.total_weight = total_weight; self.weight_distribution = X::Sampler::new(zero, self.total_weight.clone()); Ok(()) } } impl<X> Distribution<usize> for WeightedIndex<X> where X: SampleUniform + PartialOrd { fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> usize { use ::core::cmp::Ordering; let chosen_weight = self.weight_distribution.sample(rng); // Find the first item which has a weight *higher* than the chosen weight. self.cumulative_weights .binary_search_by(|w| { if *w <= chosen_weight { Ordering::Less } else { Ordering::Greater } }) .unwrap_err() } } #[cfg(test)] mod test { use super::*; #[test] #[cfg_attr(miri, ignore)] // Miri is too slow fn test_weightedindex() { let mut r = crate::test::rng(700); const N_REPS: u32 = 5000; let weights = [1u32, 2, 3, 0, 5, 6, 7, 1, 2, 3, 4, 5, 6, 7]; let total_weight = weights.iter().sum::<u32>() as f32; let verify = |result: [i32; 14]| { for (i, count) in result.iter().enumerate() { let exp = (weights[i] * N_REPS) as f32 / total_weight; let mut err = (*count as f32 - exp).abs(); if err != 0.0 { err /= exp; } assert!(err <= 0.25); } }; // WeightedIndex from vec let mut chosen = [0i32; 14]; let distr = WeightedIndex::new(weights.to_vec()).unwrap(); for _ in 0..N_REPS { chosen[distr.sample(&mut r)] += 1; } verify(chosen); // WeightedIndex from slice chosen = [0i32; 14]; let distr = WeightedIndex::new(&weights[..]).unwrap(); for _ in 0..N_REPS { chosen[distr.sample(&mut r)] += 1; } verify(chosen); // WeightedIndex from iterator chosen = [0i32; 14]; let distr = WeightedIndex::new(weights.iter()).unwrap(); for _ in 0..N_REPS { chosen[distr.sample(&mut r)] += 1; } verify(chosen); for _ in 0..5 { assert_eq!(WeightedIndex::new(&[0, 1]).unwrap().sample(&mut r), 1); assert_eq!(WeightedIndex::new(&[1, 0]).unwrap().sample(&mut r), 0); assert_eq!( WeightedIndex::new(&[0, 0, 0, 0, 10, 0]) .unwrap() .sample(&mut r), 4 ); } assert_eq!( WeightedIndex::new(&[10][0..0]).unwrap_err(), WeightedError::NoItem ); assert_eq!( WeightedIndex::new(&[0]).unwrap_err(), WeightedError::AllWeightsZero ); assert_eq!( WeightedIndex::new(&[10, 20, -1, 30]).unwrap_err(), WeightedError::InvalidWeight ); assert_eq!( WeightedIndex::new(&[-10, 20, 1, 30]).unwrap_err(), WeightedError::InvalidWeight ); assert_eq!( WeightedIndex::new(&[-10]).unwrap_err(), WeightedError::InvalidWeight ); } #[test] fn test_update_weights() { let data = [ ( &[10u32, 2, 3, 4][..], &[(1, &100), (2, &4)][..], // positive change &[10, 100, 4, 4][..], ), ( &[1u32, 2, 3, 0, 5, 6, 7, 1, 2, 3, 4, 5, 6, 7][..], &[(2, &1), (5, &1), (13, &100)][..], // negative change and last element &[1u32, 2, 1, 0, 5, 1, 7, 1, 2, 3, 4, 5, 6, 100][..], ), ]; for (weights, update, expected_weights) in data.iter() { let total_weight = weights.iter().sum::<u32>(); let mut distr = WeightedIndex::new(weights.to_vec()).unwrap(); assert_eq!(distr.total_weight, total_weight); distr.update_weights(update).unwrap(); let expected_total_weight = expected_weights.iter().sum::<u32>(); let expected_distr = WeightedIndex::new(expected_weights.to_vec()).unwrap(); assert_eq!(distr.total_weight, expected_total_weight); assert_eq!(distr.total_weight, expected_distr.total_weight); assert_eq!(distr.cumulative_weights, expected_distr.cumulative_weights); } } #[test] fn value_stability() { fn test_samples<X: SampleUniform + PartialOrd, I>( weights: I, buf: &mut [usize], expected: &[usize], ) where I: IntoIterator, I::Item: SampleBorrow<X>, X: for<'a> ::core::ops::AddAssign<&'a X> + Clone + Default, { assert_eq!(buf.len(), expected.len()); let distr = WeightedIndex::new(weights).unwrap(); let mut rng = crate::test::rng(701); for r in buf.iter_mut() { *r = rng.sample(&distr); } assert_eq!(buf, expected); } let mut buf = [0; 10]; test_samples(&[1i32, 1, 1, 1, 1, 1, 1, 1, 1], &mut buf, &[ 0, 6, 2, 6, 3, 4, 7, 8, 2, 5, ]); test_samples(&[0.7f32, 0.1, 0.1, 0.1], &mut buf, &[ 0, 0, 0, 1, 0, 0, 2, 3, 0, 0, ]); test_samples(&[1.0f64, 0.999, 0.998, 0.997], &mut buf, &[ 2, 2, 1, 3, 2, 1, 3, 3, 2, 1, ]); } } /// Error type returned from `WeightedIndex::new`. #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum WeightedError { /// The provided weight collection contains no items. NoItem, /// A weight is either less than zero, greater than the supported maximum or /// otherwise invalid. InvalidWeight, /// All items in the provided weight collection are zero. AllWeightsZero, /// Too many weights are provided (length greater than `u32::MAX`) TooMany, } #[cfg(feature = "std")] impl ::std::error::Error for WeightedError {} impl fmt::Display for WeightedError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { WeightedError::NoItem => write!(f, "No weights provided."), WeightedError::InvalidWeight => write!(f, "A weight is invalid."), WeightedError::AllWeightsZero => write!(f, "All weights are zero."), WeightedError::TooMany => write!(f, "Too many weights (hit u32::MAX)"), } } }
// Copyright 2014 Michael Yang. All rights reserved. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. use libc::{ c_char, c_int, }; use types::{ CLPK_complex, CLPK_doublecomplex, CLPK_doublereal, CLPK_integer, CLPK_real, }; #[link(name = "lapack")] extern "C" { pub fn ssygv_(itype: CLPK_integer, jobz: c_char, uplo: c_char, n: *const CLPK_integer, a: *mut CLPK_real, lda: *const CLPK_integer, b: *mut CLPK_real, ldb: *const CLPK_integer, w: CLPK_real, work: CLPK_real, lwork: CLPK_integer, info: *mut CLPK_integer) -> c_int; pub fn dsygv_(itype: CLPK_integer, jobz: c_char, uplo: c_char, n: *const CLPK_integer, a: *mut CLPK_doublereal, lda: *const CLPK_integer, b: *mut CLPK_doublereal, ldb: *const CLPK_integer, w: CLPK_doublereal, work: CLPK_doublereal, lwork: CLPK_integer, info: *mut CLPK_integer) -> c_int; pub fn chegv_(itype: CLPK_integer, jobz: c_char, uplo: c_char, n: *const CLPK_integer, a: *mut CLPK_complex, lda: *const CLPK_integer, b: *mut CLPK_complex, ldb: *const CLPK_integer, w: CLPK_real, work: CLPK_complex, lwork: CLPK_integer, rwork: CLPK_real, info: *mut CLPK_integer) -> c_int; pub fn zhegv_(itype: CLPK_integer, jobz: c_char, uplo: c_char, n: *const CLPK_integer, a: *mut CLPK_doublecomplex, lda: *const CLPK_integer, b: *mut CLPK_doublecomplex, ldb: *const CLPK_integer, w: CLPK_doublereal, work: CLPK_doublecomplex, lwork: CLPK_integer, rwork: CLPK_doublereal, info: *mut CLPK_integer) -> c_int; pub fn sspgv_(itype: CLPK_integer, jobz: c_char, uplo: c_char, n: *const CLPK_integer, ap: CLPK_real, bp: CLPK_real, w: CLPK_real, z: CLPK_real, ldz: CLPK_integer, work: CLPK_real, info: *mut CLPK_integer) -> c_int; pub fn dspgv_(itype: CLPK_integer, jobz: c_char, uplo: c_char, n: *const CLPK_integer, ap: CLPK_doublereal, bp: CLPK_doublereal, w: CLPK_doublereal, z: CLPK_doublereal, ldz: CLPK_integer, work: CLPK_doublereal, info: *mut CLPK_integer) -> c_int; pub fn chpgv_(itype: CLPK_integer, jobz: c_char, uplo: c_char, n: *const CLPK_integer, ap: CLPK_complex, bp: CLPK_complex, w: CLPK_real, z: CLPK_complex, ldz: CLPK_integer, work: CLPK_complex, rwork: CLPK_real, info: *mut CLPK_integer) -> c_int; pub fn zhpgv_(itype: CLPK_integer, jobz: c_char, uplo: c_char, n: *const CLPK_integer, ap: CLPK_doublecomplex, bp: CLPK_doublecomplex, w: CLPK_doublereal, z: CLPK_doublecomplex, ldz: CLPK_integer, work: CLPK_doublecomplex, rwork: CLPK_doublereal, info: *mut CLPK_integer) -> c_int; pub fn ssbgv_(jobz: c_char, uplo: c_char, n: *const CLPK_integer, ka: *mut CLPK_integer, kb: CLPK_integer, ab: CLPK_real, ldab: CLPK_integer, bb: CLPK_real, ldbb: CLPK_integer, w: CLPK_real, z: CLPK_real, ldz: CLPK_integer, work: CLPK_real, info: *mut CLPK_integer) -> c_int; pub fn dsbgv_(jobz: c_char, uplo: c_char, n: *const CLPK_integer, ka: *mut CLPK_integer, kb: CLPK_integer, ab: CLPK_doublereal, ldab: CLPK_integer, bb: CLPK_doublereal, ldbb: CLPK_integer, w: CLPK_doublereal, z: CLPK_doublereal, ldz: CLPK_integer, work: CLPK_doublereal, info: *mut CLPK_integer) -> c_int; pub fn chbgv_(jobz: c_char, uplo: c_char, n: *const CLPK_integer, ka: *mut CLPK_integer, kb: CLPK_integer, ab: CLPK_complex, ldab: CLPK_integer, bb: CLPK_complex, ldbb: CLPK_integer, w: CLPK_real, z: CLPK_complex, ldz: CLPK_integer, work: CLPK_complex, rwork: CLPK_real, info: *mut CLPK_integer) -> c_int; pub fn zhbgv_(jobz: c_char, uplo: c_char, n: *const CLPK_integer, ka: *mut CLPK_integer, kb: CLPK_integer, ab: CLPK_doublecomplex, ldab: CLPK_integer, bb: CLPK_doublecomplex, ldbb: CLPK_integer, w: CLPK_doublereal, z: CLPK_doublecomplex, ldz: CLPK_integer, work: CLPK_doublecomplex, rwork: CLPK_doublereal, info: *mut CLPK_integer) -> c_int; } Add Ggev trait // Copyright 2014 Michael Yang. All rights reserved. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. use libc::{ c_char, c_int, }; use types::{ CLPK_complex, CLPK_doublecomplex, CLPK_doublereal, CLPK_integer, CLPK_real, }; #[link(name = "lapack")] extern "C" { pub fn ssygv_(itype: CLPK_integer, jobz: c_char, uplo: c_char, n: *const CLPK_integer, a: *mut CLPK_real, lda: *const CLPK_integer, b: *mut CLPK_real, ldb: *const CLPK_integer, w: CLPK_real, work: CLPK_real, lwork: CLPK_integer, info: *mut CLPK_integer) -> c_int; pub fn dsygv_(itype: CLPK_integer, jobz: c_char, uplo: c_char, n: *const CLPK_integer, a: *mut CLPK_doublereal, lda: *const CLPK_integer, b: *mut CLPK_doublereal, ldb: *const CLPK_integer, w: CLPK_doublereal, work: CLPK_doublereal, lwork: CLPK_integer, info: *mut CLPK_integer) -> c_int; pub fn chegv_(itype: CLPK_integer, jobz: c_char, uplo: c_char, n: *const CLPK_integer, a: *mut CLPK_complex, lda: *const CLPK_integer, b: *mut CLPK_complex, ldb: *const CLPK_integer, w: CLPK_real, work: CLPK_complex, lwork: CLPK_integer, rwork: CLPK_real, info: *mut CLPK_integer) -> c_int; pub fn zhegv_(itype: CLPK_integer, jobz: c_char, uplo: c_char, n: *const CLPK_integer, a: *mut CLPK_doublecomplex, lda: *const CLPK_integer, b: *mut CLPK_doublecomplex, ldb: *const CLPK_integer, w: CLPK_doublereal, work: CLPK_doublecomplex, lwork: CLPK_integer, rwork: CLPK_doublereal, info: *mut CLPK_integer) -> c_int; pub fn sspgv_(itype: CLPK_integer, jobz: c_char, uplo: c_char, n: *const CLPK_integer, ap: CLPK_real, bp: CLPK_real, w: CLPK_real, z: CLPK_real, ldz: CLPK_integer, work: CLPK_real, info: *mut CLPK_integer) -> c_int; pub fn dspgv_(itype: CLPK_integer, jobz: c_char, uplo: c_char, n: *const CLPK_integer, ap: CLPK_doublereal, bp: CLPK_doublereal, w: CLPK_doublereal, z: CLPK_doublereal, ldz: CLPK_integer, work: CLPK_doublereal, info: *mut CLPK_integer) -> c_int; pub fn chpgv_(itype: CLPK_integer, jobz: c_char, uplo: c_char, n: *const CLPK_integer, ap: CLPK_complex, bp: CLPK_complex, w: CLPK_real, z: CLPK_complex, ldz: CLPK_integer, work: CLPK_complex, rwork: CLPK_real, info: *mut CLPK_integer) -> c_int; pub fn zhpgv_(itype: CLPK_integer, jobz: c_char, uplo: c_char, n: *const CLPK_integer, ap: CLPK_doublecomplex, bp: CLPK_doublecomplex, w: CLPK_doublereal, z: CLPK_doublecomplex, ldz: CLPK_integer, work: CLPK_doublecomplex, rwork: CLPK_doublereal, info: *mut CLPK_integer) -> c_int; pub fn ssbgv_(jobz: c_char, uplo: c_char, n: *const CLPK_integer, ka: *mut CLPK_integer, kb: CLPK_integer, ab: CLPK_real, ldab: CLPK_integer, bb: CLPK_real, ldbb: CLPK_integer, w: CLPK_real, z: CLPK_real, ldz: CLPK_integer, work: CLPK_real, info: *mut CLPK_integer) -> c_int; pub fn dsbgv_(jobz: c_char, uplo: c_char, n: *const CLPK_integer, ka: *mut CLPK_integer, kb: CLPK_integer, ab: CLPK_doublereal, ldab: CLPK_integer, bb: CLPK_doublereal, ldbb: CLPK_integer, w: CLPK_doublereal, z: CLPK_doublereal, ldz: CLPK_integer, work: CLPK_doublereal, info: *mut CLPK_integer) -> c_int; pub fn chbgv_(jobz: c_char, uplo: c_char, n: *const CLPK_integer, ka: *mut CLPK_integer, kb: CLPK_integer, ab: CLPK_complex, ldab: CLPK_integer, bb: CLPK_complex, ldbb: CLPK_integer, w: CLPK_real, z: CLPK_complex, ldz: CLPK_integer, work: CLPK_complex, rwork: CLPK_real, info: *mut CLPK_integer) -> c_int; pub fn zhbgv_(jobz: c_char, uplo: c_char, n: *const CLPK_integer, ka: *mut CLPK_integer, kb: CLPK_integer, ab: CLPK_doublecomplex, ldab: CLPK_integer, bb: CLPK_doublecomplex, ldbb: CLPK_integer, w: CLPK_doublereal, z: CLPK_doublecomplex, ldz: CLPK_integer, work: CLPK_doublecomplex, rwork: CLPK_doublereal, info: *mut CLPK_integer) -> c_int; pub fn sggev_(jobvl: c_char, jobvr: c_char, n: *const CLPK_integer, a: *mut CLPK_real, lda: *const CLPK_integer, b: *mut CLPK_real, ldb: *const CLPK_integer, alphar: CLPK_real, alphai: CLPK_real, beta: *mut CLPK_real, vl: CLPK_real, ldvl: CLPK_integer, vr: CLPK_real, ldvr: CLPK_integer, work: CLPK_real, lwork: CLPK_integer, info: *mut CLPK_integer) -> c_int; pub fn dggev_(jobvl: c_char, jobvr: c_char, n: *const CLPK_integer, a: *mut CLPK_doublereal, lda: *const CLPK_integer, b: *mut CLPK_doublereal, ldb: *const CLPK_integer, alphar: CLPK_doublereal, alphai: CLPK_doublereal, beta: *mut CLPK_doublereal, vl: CLPK_doublereal, ldvl: CLPK_integer, vr: CLPK_doublereal, ldvr: CLPK_integer, work: CLPK_doublereal, lwork: CLPK_integer, info: *mut CLPK_integer) -> c_int; pub fn cggev_(jobvl: c_char, jobvr: c_char, n: *const CLPK_integer, a: *mut CLPK_complex, lda: *const CLPK_integer, b: *mut CLPK_complex, ldb: *const CLPK_integer, alpha: *mut CLPK_complex, beta: *mut CLPK_complex, vl: CLPK_complex, ldvl: CLPK_integer, vr: CLPK_complex, ldvr: CLPK_integer, work: CLPK_complex, lwork: CLPK_integer, rwork: CLPK_real, info: *mut CLPK_integer) -> c_int; pub fn zggev_(jobvl: c_char, jobvr: c_char, n: *const CLPK_integer, a: *mut CLPK_doublecomplex, lda: *const CLPK_integer, b: *mut CLPK_doublecomplex, ldb: *const CLPK_integer, alpha: *mut CLPK_doublecomplex, beta: *mut CLPK_doublecomplex, vl: CLPK_doublecomplex, ldvl: CLPK_integer, vr: CLPK_doublecomplex, ldvr: CLPK_integer, work: CLPK_doublecomplex, lwork: CLPK_integer, rwork: CLPK_doublereal, info: *mut CLPK_integer) -> c_int; }
// Copyright 2017 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //! cmake wrapper for build extern crate cmake; extern crate fs_extra; use cmake::Config; use std::env; use std::path::{PathBuf}; use fs_extra::dir::*; fn main() { let path_str = env::var("OUT_DIR").unwrap(); let mut out_path = PathBuf::from(&path_str); out_path.pop(); out_path.pop(); out_path.pop(); let mut plugin_path = PathBuf::from(&path_str); plugin_path.push("build"); plugin_path.push("plugins"); //Collect the files and directories we care about let dir_content = get_dir_content("plugins").unwrap(); for d in dir_content.directories { let file_content = get_dir_content(d).unwrap(); for f in file_content.files { println!("cargo:rerun-if-changed={}",f); } } for f in dir_content.files{ println!("cargo:rerun-if-changed={}",f); } //panic!("marp"); let dst = Config::new("plugins") //.define("FOO","BAR") //whatever flags go here //.cflag("-foo") //and here .build_target("") .build(); println!("Plugin path: {:?}", plugin_path); println!("OUT PATH: {:?}", out_path); let mut options = CopyOptions::new(); options.overwrite=true; if let Err(e) = copy(plugin_path, out_path, &options) { println!("{:?}", e); } println!("cargo:rustc-link-search=native={}", dst.display()); } fail on empty cuckoo directory // Copyright 2017 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //! cmake wrapper for build extern crate cmake; extern crate fs_extra; use cmake::Config; use std::{env,fs}; use std::path::{PathBuf}; use fs_extra::dir::*; /// Tests whether source cuckoo directory exists pub fn fail_on_empty_directory(name: &str){ if fs::read_dir(name).unwrap().count()==0 { println!("The `{}` directory is empty. Did you forget to pull the submodules?", name); println!("Try `git submodule update --init --recursive`"); panic!(); } } fn main() { fail_on_empty_directory("plugins/cuckoo"); let path_str = env::var("OUT_DIR").unwrap(); let mut out_path = PathBuf::from(&path_str); out_path.pop(); out_path.pop(); out_path.pop(); let mut plugin_path = PathBuf::from(&path_str); plugin_path.push("build"); plugin_path.push("plugins"); //Collect the files and directories we care about let dir_content = get_dir_content("plugins").unwrap(); for d in dir_content.directories { let file_content = get_dir_content(d).unwrap(); for f in file_content.files { println!("cargo:rerun-if-changed={}",f); } } for f in dir_content.files{ println!("cargo:rerun-if-changed={}",f); } //panic!("marp"); let dst = Config::new("plugins") //.define("FOO","BAR") //whatever flags go here //.cflag("-foo") //and here .build_target("") .build(); println!("Plugin path: {:?}", plugin_path); println!("OUT PATH: {:?}", out_path); let mut options = CopyOptions::new(); options.overwrite=true; if let Err(e) = copy(plugin_path, out_path, &options) { println!("{:?}", e); } println!("cargo:rustc-link-search=native={}", dst.display()); }
//! Manager that is called when an output is created or destroyed. //! Pass a struct that implements this trait to the `Compositor` during //! initialization. use libc; use manager::{OutputHandler, UserOutput}; use types::OutputHandle; use wlroots_sys::wlr_output; use wayland_sys::server::WAYLAND_SERVER_HANDLE; use wayland_sys::server::signal::wl_signal_add; /// Used to ensure the output sets the mode before doing any other /// operation on the Output. pub struct OutputBuilder<'output> { output: &'output mut OutputHandle } /// Used to ensure that the builder is used to construct /// the OutputHandler instance. pub struct OutputBuilderResult<'output> { pub output: &'output mut OutputHandle, result: Box<OutputHandler> } /// Wrapper around Output destruction so that you can't call /// unsafe methods (e.g anything like setting the mode). pub struct OutputDestruction<'output>(&'output mut OutputHandle); /// Handles output addition and removal. pub trait OutputManagerHandler { /// Called whenever an output is added. fn output_added<'output>(&mut self, _: OutputBuilder<'output>) -> Option<OutputBuilderResult<'output>> { None } /// Called whenever an output is removed. fn output_removed(&mut self, OutputDestruction) { // TODO } /// Called every time the output frame is updated. fn output_frame(&mut self, &mut OutputHandle) {} /// Called every time the output resolution is updated. fn output_resolution(&mut self, &mut OutputHandle) {} } impl<'output> OutputBuilder<'output> { pub fn build_best_mode<T: OutputHandler + 'static>(self, data: T) -> OutputBuilderResult<'output> { // NOTE Rationale for why this is safe: // * The builder is only constructed in output_added callback // * Can't be copied or otherwise escape (due to the lifetime constraints) // * Is only called once per output because this function consumes unsafe { self.output.choose_best_mode(); } OutputBuilderResult { output: self.output, result: Box::new(data) } } } impl<'output> OutputDestruction<'output> { // TODO Functions which are safe to use } wayland_listener!(OutputManager, (Vec<Box<UserOutput>>, Box<OutputManagerHandler>), [ add_listener => add_notify: |this: &mut OutputManager, data: *mut libc::c_void,| unsafe { let (ref mut outputs, ref mut manager) = this.data; let data = data as *mut wlr_output; let mut output = OutputHandle::from_ptr(data as *mut wlr_output); let builder = OutputBuilder { output: &mut output }; if let Some(OutputBuilderResult {result: output, ..}) = manager.output_added(builder) { let mut output = UserOutput::new((data, output)); // Add the output frame event to this manager wl_signal_add(&mut (*data).events.frame as *mut _ as _, output.frame_listener() as _); // Add the output resolution event to this manager wl_signal_add(&mut (*data).events.resolution as *mut _ as _, output.resolution_listener() as _); // Store the user UserOutput, free later in remove listener outputs.push(output); } }; remove_listener => remove_notify: |this: &mut OutputManager, data: *mut libc::c_void,| unsafe { let (ref mut outputs, ref mut manager) = this.data; let data = data as *mut wlr_output; let mut output = OutputHandle::from_ptr(data); manager.output_removed(OutputDestruction(&mut output)); if let Some(layout) = output.layout() { layout.borrow_mut().remove(&mut output); } // Remove user output data if let Some(index) = outputs.iter().position(|output| output.output_ptr() == data) { let mut removed_output = outputs.remove(index); ffi_dispatch!(WAYLAND_SERVER_HANDLE, wl_list_remove, &mut (*removed_output.frame_listener()).link as *mut _ as _); ffi_dispatch!(WAYLAND_SERVER_HANDLE, wl_list_remove, &mut (*removed_output.resolution_listener()).link as *mut _ as _); } }; ]); Removed OutputManager::output_{frame,resolution} //! Manager that is called when an output is created or destroyed. //! Pass a struct that implements this trait to the `Compositor` during //! initialization. use libc; use manager::{OutputHandler, UserOutput}; use types::OutputHandle; use wlroots_sys::wlr_output; use wayland_sys::server::WAYLAND_SERVER_HANDLE; use wayland_sys::server::signal::wl_signal_add; /// Used to ensure the output sets the mode before doing any other /// operation on the Output. pub struct OutputBuilder<'output> { output: &'output mut OutputHandle } /// Used to ensure that the builder is used to construct /// the OutputHandler instance. pub struct OutputBuilderResult<'output> { pub output: &'output mut OutputHandle, result: Box<OutputHandler> } /// Wrapper around Output destruction so that you can't call /// unsafe methods (e.g anything like setting the mode). pub struct OutputDestruction<'output>(&'output mut OutputHandle); /// Handles output addition and removal. pub trait OutputManagerHandler { /// Called whenever an output is added. fn output_added<'output>(&mut self, _: OutputBuilder<'output>) -> Option<OutputBuilderResult<'output>> { None } /// Called whenever an output is removed. fn output_removed(&mut self, OutputDestruction) { // TODO } } impl<'output> OutputBuilder<'output> { pub fn build_best_mode<T: OutputHandler + 'static>(self, data: T) -> OutputBuilderResult<'output> { // NOTE Rationale for why this is safe: // * The builder is only constructed in output_added callback // * Can't be copied or otherwise escape (due to the lifetime constraints) // * Is only called once per output because this function consumes unsafe { self.output.choose_best_mode(); } OutputBuilderResult { output: self.output, result: Box::new(data) } } } impl<'output> OutputDestruction<'output> { // TODO Functions which are safe to use } wayland_listener!(OutputManager, (Vec<Box<UserOutput>>, Box<OutputManagerHandler>), [ add_listener => add_notify: |this: &mut OutputManager, data: *mut libc::c_void,| unsafe { let (ref mut outputs, ref mut manager) = this.data; let data = data as *mut wlr_output; let mut output = OutputHandle::from_ptr(data as *mut wlr_output); let builder = OutputBuilder { output: &mut output }; if let Some(OutputBuilderResult {result: output, ..}) = manager.output_added(builder) { let mut output = UserOutput::new((data, output)); // Add the output frame event to this manager wl_signal_add(&mut (*data).events.frame as *mut _ as _, output.frame_listener() as _); // Add the output resolution event to this manager wl_signal_add(&mut (*data).events.resolution as *mut _ as _, output.resolution_listener() as _); // Store the user UserOutput, free later in remove listener outputs.push(output); } }; remove_listener => remove_notify: |this: &mut OutputManager, data: *mut libc::c_void,| unsafe { let (ref mut outputs, ref mut manager) = this.data; let data = data as *mut wlr_output; let mut output = OutputHandle::from_ptr(data); manager.output_removed(OutputDestruction(&mut output)); if let Some(layout) = output.layout() { layout.borrow_mut().remove(&mut output); } // Remove user output data if let Some(index) = outputs.iter().position(|output| output.output_ptr() == data) { let mut removed_output = outputs.remove(index); ffi_dispatch!(WAYLAND_SERVER_HANDLE, wl_list_remove, &mut (*removed_output.frame_listener()).link as *mut _ as _); ffi_dispatch!(WAYLAND_SERVER_HANDLE, wl_list_remove, &mut (*removed_output.resolution_listener()).link as *mut _ as _); } }; ]);
use std::io::{self,ErrorKind,Read,Write}; use std::net::SocketAddr; use mio::tcp::{TcpListener,TcpStream}; use rustls::{ServerSession, Session}; use net2::TcpBuilder; use net2::unix::UnixTcpBuilderExt; #[cfg(feature = "use-openssl")] use openssl::ssl::{ErrorCode, SslStream}; #[derive(Debug,PartialEq,Copy,Clone)] pub enum SocketResult { Continue, Closed, WouldBlock, Error } pub trait SocketHandler { fn socket_read(&mut self, buf: &mut[u8]) -> (usize, SocketResult); fn socket_write(&mut self, buf: &[u8]) -> (usize, SocketResult); fn socket_ref(&self) -> &TcpStream; } impl SocketHandler for TcpStream { fn socket_read(&mut self, buf: &mut[u8]) -> (usize, SocketResult) { let mut size = 0usize; loop { if size == buf.len() { return (size, SocketResult::Continue); } match self.read(&mut buf[size..]) { Ok(0) => return (size, SocketResult::Continue), Ok(sz) => size +=sz, Err(e) => match e.kind() { ErrorKind::WouldBlock => return (size, SocketResult::WouldBlock), ErrorKind::ConnectionReset | ErrorKind::ConnectionAborted | ErrorKind::BrokenPipe => { return (size, SocketResult::Closed) }, _ => { error!("SOCKET\tsocket_read error={:?}", e); return (size, SocketResult::Error) }, } } } } fn socket_write(&mut self, buf: &[u8]) -> (usize, SocketResult) { let mut size = 0usize; loop { if size == buf.len() { return (size, SocketResult::Continue); } match self.write(&buf[size..]) { Ok(0) => return (size, SocketResult::Continue), Ok(sz) => size += sz, Err(e) => match e.kind() { ErrorKind::WouldBlock => return (size, SocketResult::WouldBlock), ErrorKind::ConnectionReset | ErrorKind::ConnectionAborted | ErrorKind::BrokenPipe => { return (size, SocketResult::Closed) }, _ => { //FIXME: timeout and other common errors should be sent up error!("SOCKET\tsocket_write error={:?}", e); return (size, SocketResult::Error) }, } } } } fn socket_ref(&self) -> &TcpStream { self } } #[cfg(feature = "use-openssl")] impl SocketHandler for SslStream<TcpStream> { fn socket_read(&mut self, buf: &mut[u8]) -> (usize, SocketResult) { let mut size = 0usize; loop { if size == buf.len() { return (size, SocketResult::Continue); } match self.ssl_read(&mut buf[size..]) { Ok(0) => return (size, SocketResult::Continue), Ok(sz) => size += sz, Err(e) => { match e.code() { ErrorCode::WANT_READ => return (size, SocketResult::WouldBlock), ErrorCode::WANT_WRITE => return (size, SocketResult::WouldBlock), ErrorCode::SSL => { error!("SOCKET-TLS\treadable TLS socket SSL error: {:?}", e); return (size, SocketResult::Error) }, ErrorCode::SYSCALL => { return (size, SocketResult::Error) }, ErrorCode::ZERO_RETURN => { return (size, SocketResult::Closed) }, _ => { error!("SOCKET-TLS\treadable TLS socket error={:?}", e); return (size, SocketResult::Error) } } } } } } fn socket_write(&mut self, buf: &[u8]) -> (usize, SocketResult) { let mut size = 0usize; loop { if size == buf.len() { return (size, SocketResult::Continue); } match self.ssl_write(&buf[size..]) { Ok(0) => return (size, SocketResult::Continue), Ok(sz) => size +=sz, Err(e) => { match e.code() { ErrorCode::WANT_READ => return (size, SocketResult::WouldBlock), ErrorCode::WANT_WRITE => return (size, SocketResult::WouldBlock), ErrorCode::SSL => { error!("SOCKET-TLS\twritable TLS socket SSL error: {:?}", e); return (size, SocketResult::Error) }, ErrorCode::SYSCALL => { error!("SOCKET-TLS\twritable TLS socket syscall error: {:?}", e); return (size, SocketResult::Error) }, ErrorCode::ZERO_RETURN => { return (size, SocketResult::Closed) }, _ => { error!("SOCKET-TLS\twritable TLS socket error={:?}", e); return (size, SocketResult::Error) } } } } } } fn socket_ref(&self) -> &TcpStream { self.get_ref() } } pub struct FrontRustls { pub stream: TcpStream, pub session: ServerSession, } impl SocketHandler for FrontRustls { fn socket_read(&mut self, buf: &mut[u8]) -> (usize, SocketResult) { let mut size = 0usize; let mut can_read = true; let mut is_error = false; let mut is_closed = false; loop { if size == buf.len() { break; } if !can_read | is_error | is_closed { break; } match self.session.read_tls(&mut self.stream) { Ok(0) => { can_read = false; is_closed = true; }, Ok(sz) => {}, Err(e) => match e.kind() { ErrorKind::WouldBlock => { can_read = false; }, ErrorKind::ConnectionReset | ErrorKind::ConnectionAborted | ErrorKind::BrokenPipe => { is_closed = true; }, _ => { error!("could not read TLS stream from socket: {:?}", e); is_error = true; break; } } } if let Err(e) = self.session.process_new_packets() { error!("could not process read TLS packets: {:?}", e); is_error = true; break; } while !self.session.wants_read() { match self.session.read(&mut buf[size..]) { Ok(sz) => size += sz, Err(e) => match e.kind() { ErrorKind::WouldBlock => { break; }, ErrorKind::ConnectionReset | ErrorKind::ConnectionAborted | ErrorKind::BrokenPipe => { is_closed = true; break; }, _ => { error!("could not read data from TLS stream: {:?}", e); is_error = true; break; } } } } } if is_error { (size, SocketResult::Error) } else if is_closed { (size, SocketResult::Closed) } else if !can_read { (size, SocketResult::WouldBlock) } else { (size, SocketResult::Continue) } } fn socket_write(&mut self, buf: &[u8]) -> (usize, SocketResult) { let mut buffered_size = 0usize; let mut sent_size = 0usize; let mut can_write = true; let mut is_error = false; let mut is_closed = false; loop { if buffered_size == buf.len() { break; } if !can_write | is_error | is_closed { break; } match self.session.write(&buf[buffered_size..]) { Ok(0) => { break; }, Ok(sz) => { buffered_size += sz; }, Err(e) => match e.kind() { ErrorKind::WouldBlock => { // we don't need to do anything, the session will return false in wants_write? //error!("rustls socket_write wouldblock"); }, ErrorKind::ConnectionReset | ErrorKind::ConnectionAborted | ErrorKind::BrokenPipe => { //FIXME: this should probably not happen here is_closed = true; break; }, _ => { error!("could not write data to TLS stream: {:?}", e); is_error = true; break; } } } loop { match self.session.write_tls(&mut self.stream) { Ok(0) => { //can_write = false; break; }, Ok(sz) => { sent_size += sz; }, Err(e) => match e.kind() { ErrorKind::WouldBlock => can_write = false, ErrorKind::ConnectionReset | ErrorKind::ConnectionAborted | ErrorKind::BrokenPipe => { is_closed = true; break; }, _ => { error!("could not write TLS stream to socket: {:?}", e); is_error = true; break; } } } } } if is_error { (buffered_size, SocketResult::Error) } else if is_closed { (buffered_size, SocketResult::Closed) } else if !can_write { (buffered_size, SocketResult::WouldBlock) } else { (buffered_size, SocketResult::Continue) } } fn socket_ref(&self) -> &TcpStream { &self.stream } } pub fn server_bind(addr: &SocketAddr) -> io::Result<TcpListener> { let sock = try!(match *addr { SocketAddr::V4(..) => TcpBuilder::new_v4(), SocketAddr::V6(..) => TcpBuilder::new_v6(), }); // set so_reuseaddr, but only on unix (mirrors what libstd does) if cfg!(unix) { try!(sock.reuse_address(true)); } try!(sock.reuse_port(true)); // bind the socket try!(sock.bind(addr)); // listen // FIXME: make the backlog configurable? let listener = try!(sock.listen(1024)); TcpListener::from_std(listener) } mark tls socket error messages as debug use std::io::{self,ErrorKind,Read,Write}; use std::net::SocketAddr; use mio::tcp::{TcpListener,TcpStream}; use rustls::{ServerSession, Session}; use net2::TcpBuilder; use net2::unix::UnixTcpBuilderExt; #[cfg(feature = "use-openssl")] use openssl::ssl::{ErrorCode, SslStream}; #[derive(Debug,PartialEq,Copy,Clone)] pub enum SocketResult { Continue, Closed, WouldBlock, Error } pub trait SocketHandler { fn socket_read(&mut self, buf: &mut[u8]) -> (usize, SocketResult); fn socket_write(&mut self, buf: &[u8]) -> (usize, SocketResult); fn socket_ref(&self) -> &TcpStream; } impl SocketHandler for TcpStream { fn socket_read(&mut self, buf: &mut[u8]) -> (usize, SocketResult) { let mut size = 0usize; loop { if size == buf.len() { return (size, SocketResult::Continue); } match self.read(&mut buf[size..]) { Ok(0) => return (size, SocketResult::Continue), Ok(sz) => size +=sz, Err(e) => match e.kind() { ErrorKind::WouldBlock => return (size, SocketResult::WouldBlock), ErrorKind::ConnectionReset | ErrorKind::ConnectionAborted | ErrorKind::BrokenPipe => { return (size, SocketResult::Closed) }, _ => { error!("SOCKET\tsocket_read error={:?}", e); return (size, SocketResult::Error) }, } } } } fn socket_write(&mut self, buf: &[u8]) -> (usize, SocketResult) { let mut size = 0usize; loop { if size == buf.len() { return (size, SocketResult::Continue); } match self.write(&buf[size..]) { Ok(0) => return (size, SocketResult::Continue), Ok(sz) => size += sz, Err(e) => match e.kind() { ErrorKind::WouldBlock => return (size, SocketResult::WouldBlock), ErrorKind::ConnectionReset | ErrorKind::ConnectionAborted | ErrorKind::BrokenPipe => { return (size, SocketResult::Closed) }, _ => { //FIXME: timeout and other common errors should be sent up error!("SOCKET\tsocket_write error={:?}", e); return (size, SocketResult::Error) }, } } } } fn socket_ref(&self) -> &TcpStream { self } } #[cfg(feature = "use-openssl")] impl SocketHandler for SslStream<TcpStream> { fn socket_read(&mut self, buf: &mut[u8]) -> (usize, SocketResult) { let mut size = 0usize; loop { if size == buf.len() { return (size, SocketResult::Continue); } match self.ssl_read(&mut buf[size..]) { Ok(0) => return (size, SocketResult::Continue), Ok(sz) => size += sz, Err(e) => { match e.code() { ErrorCode::WANT_READ => return (size, SocketResult::WouldBlock), ErrorCode::WANT_WRITE => return (size, SocketResult::WouldBlock), ErrorCode::SSL => { debug!("SOCKET-TLS\treadable TLS socket SSL error: {:?}", e); return (size, SocketResult::Error) }, ErrorCode::SYSCALL => { return (size, SocketResult::Error) }, ErrorCode::ZERO_RETURN => { return (size, SocketResult::Closed) }, _ => { debug!("SOCKET-TLS\treadable TLS socket error={:?}", e); return (size, SocketResult::Error) } } } } } } fn socket_write(&mut self, buf: &[u8]) -> (usize, SocketResult) { let mut size = 0usize; loop { if size == buf.len() { return (size, SocketResult::Continue); } match self.ssl_write(&buf[size..]) { Ok(0) => return (size, SocketResult::Continue), Ok(sz) => size +=sz, Err(e) => { match e.code() { ErrorCode::WANT_READ => return (size, SocketResult::WouldBlock), ErrorCode::WANT_WRITE => return (size, SocketResult::WouldBlock), ErrorCode::SSL => { debug!("SOCKET-TLS\twritable TLS socket SSL error: {:?}", e); return (size, SocketResult::Error) }, ErrorCode::SYSCALL => { debug!("SOCKET-TLS\twritable TLS socket syscall error: {:?}", e); return (size, SocketResult::Error) }, ErrorCode::ZERO_RETURN => { return (size, SocketResult::Closed) }, _ => { debug!("SOCKET-TLS\twritable TLS socket error={:?}", e); return (size, SocketResult::Error) } } } } } } fn socket_ref(&self) -> &TcpStream { self.get_ref() } } pub struct FrontRustls { pub stream: TcpStream, pub session: ServerSession, } impl SocketHandler for FrontRustls { fn socket_read(&mut self, buf: &mut[u8]) -> (usize, SocketResult) { let mut size = 0usize; let mut can_read = true; let mut is_error = false; let mut is_closed = false; loop { if size == buf.len() { break; } if !can_read | is_error | is_closed { break; } match self.session.read_tls(&mut self.stream) { Ok(0) => { can_read = false; is_closed = true; }, Ok(sz) => {}, Err(e) => match e.kind() { ErrorKind::WouldBlock => { can_read = false; }, ErrorKind::ConnectionReset | ErrorKind::ConnectionAborted | ErrorKind::BrokenPipe => { is_closed = true; }, _ => { error!("could not read TLS stream from socket: {:?}", e); is_error = true; break; } } } if let Err(e) = self.session.process_new_packets() { error!("could not process read TLS packets: {:?}", e); is_error = true; break; } while !self.session.wants_read() { match self.session.read(&mut buf[size..]) { Ok(sz) => size += sz, Err(e) => match e.kind() { ErrorKind::WouldBlock => { break; }, ErrorKind::ConnectionReset | ErrorKind::ConnectionAborted | ErrorKind::BrokenPipe => { is_closed = true; break; }, _ => { error!("could not read data from TLS stream: {:?}", e); is_error = true; break; } } } } } if is_error { (size, SocketResult::Error) } else if is_closed { (size, SocketResult::Closed) } else if !can_read { (size, SocketResult::WouldBlock) } else { (size, SocketResult::Continue) } } fn socket_write(&mut self, buf: &[u8]) -> (usize, SocketResult) { let mut buffered_size = 0usize; let mut sent_size = 0usize; let mut can_write = true; let mut is_error = false; let mut is_closed = false; loop { if buffered_size == buf.len() { break; } if !can_write | is_error | is_closed { break; } match self.session.write(&buf[buffered_size..]) { Ok(0) => { break; }, Ok(sz) => { buffered_size += sz; }, Err(e) => match e.kind() { ErrorKind::WouldBlock => { // we don't need to do anything, the session will return false in wants_write? //error!("rustls socket_write wouldblock"); }, ErrorKind::ConnectionReset | ErrorKind::ConnectionAborted | ErrorKind::BrokenPipe => { //FIXME: this should probably not happen here is_closed = true; break; }, _ => { error!("could not write data to TLS stream: {:?}", e); is_error = true; break; } } } loop { match self.session.write_tls(&mut self.stream) { Ok(0) => { //can_write = false; break; }, Ok(sz) => { sent_size += sz; }, Err(e) => match e.kind() { ErrorKind::WouldBlock => can_write = false, ErrorKind::ConnectionReset | ErrorKind::ConnectionAborted | ErrorKind::BrokenPipe => { is_closed = true; break; }, _ => { error!("could not write TLS stream to socket: {:?}", e); is_error = true; break; } } } } } if is_error { (buffered_size, SocketResult::Error) } else if is_closed { (buffered_size, SocketResult::Closed) } else if !can_write { (buffered_size, SocketResult::WouldBlock) } else { (buffered_size, SocketResult::Continue) } } fn socket_ref(&self) -> &TcpStream { &self.stream } } pub fn server_bind(addr: &SocketAddr) -> io::Result<TcpListener> { let sock = try!(match *addr { SocketAddr::V4(..) => TcpBuilder::new_v4(), SocketAddr::V6(..) => TcpBuilder::new_v6(), }); // set so_reuseaddr, but only on unix (mirrors what libstd does) if cfg!(unix) { try!(sock.reuse_address(true)); } try!(sock.reuse_port(true)); // bind the socket try!(sock.bind(addr)); // listen // FIXME: make the backlog configurable? let listener = try!(sock.listen(1024)); TcpListener::from_std(listener) }
/* Copyright 2014-2015 Zumero, LLC Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #![feature(core)] #![feature(collections)] #![feature(box_syntax)] #![feature(convert)] #![feature(collections_drain)] #![feature(associated_consts)] // TODO turn the following warnings back on later #![allow(non_snake_case)] #![allow(non_camel_case_types)] use std::io; use std::io::Seek; use std::io::Read; use std::io::Write; use std::io::SeekFrom; use std::cmp::Ordering; use std::fs::File; use std::fs::OpenOptions; use std::collections::HashMap; use std::collections::HashSet; use std::ops::Index; use std::error::Error; const SIZE_32: usize = 4; // like std::mem::size_of::<u32>() const SIZE_16: usize = 2; // like std::mem::size_of::<u16>() pub type PageNum = u32; // type PageSize = u32; // TODO also perhaps the type representing size of a value, u32 // size of a value should NOT be usize, right? // TODO there is code which assumes that PageNum is u32. // but that's the nature of the file format. the type alias // isn't so much so that we can change it, but rather, to make // reading the code easier. pub enum Blob { Stream(Box<Read>), Array(Box<[u8]>), Tombstone, } #[derive(Debug)] enum LsmError { // TODO remove Misc Misc(&'static str), // TODO more detail within CorruptFile CorruptFile(&'static str), Io(std::io::Error), CursorNotValid, InvalidPageNumber, InvalidPageType, RootPageNotInSegmentBlockList, Poisoned, } impl std::fmt::Display for LsmError { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { match *self { LsmError::Io(ref err) => write!(f, "IO error: {}", err), LsmError::Misc(s) => write!(f, "Misc error: {}", s), LsmError::CorruptFile(s) => write!(f, "Corrupt file: {}", s), LsmError::Poisoned => write!(f, "Poisoned"), LsmError::CursorNotValid => write!(f, "Cursor not valid"), LsmError::InvalidPageNumber => write!(f, "Invalid page number"), LsmError::InvalidPageType => write!(f, "Invalid page type"), LsmError::RootPageNotInSegmentBlockList => write!(f, "Root page not in segment block list"), } } } impl std::error::Error for LsmError { fn description(&self) -> &str { match *self { LsmError::Io(ref err) => std::error::Error::description(err), LsmError::Misc(s) => s, LsmError::CorruptFile(s) => s, LsmError::Poisoned => "poisoned", LsmError::CursorNotValid => "cursor not valid", LsmError::InvalidPageNumber => "invalid page number", LsmError::InvalidPageType => "invalid page type", LsmError::RootPageNotInSegmentBlockList => "Root page not in segment block list", } } // TODO cause } impl From<io::Error> for LsmError { fn from(err: io::Error) -> LsmError { LsmError::Io(err) } } impl<T> From<std::sync::PoisonError<T>> for LsmError { fn from(_err: std::sync::PoisonError<T>) -> LsmError { LsmError::Poisoned } } pub type Result<T> = std::result::Result<T, LsmError>; // kvp is the struct used to provide key-value pairs downward, // for storage into the database. pub struct kvp { Key : Box<[u8]>, Value : Blob, } struct PendingSegment { blockList: Vec<PageBlock>, segnum: SegmentNum, } // TODO this is experimental. it might not be very useful unless // it can be used everywhere a regular slice can be used. but we // obviously don't want to just pass around an Index<Output=u8> // trait object if that forces us into dynamic dispatch everywhere. struct SplitSlice<'a> { front: &'a [u8], back: &'a [u8], } impl<'a> SplitSlice<'a> { fn new(front: &'a [u8], back: &'a [u8]) -> SplitSlice<'a> { SplitSlice {front: front, back: back} } fn len(&self) -> usize { self.front.len() + self.back.len() } fn into_boxed_slice(self) -> Box<[u8]> { let mut k = Vec::new(); k.push_all(&self.front); k.push_all(&self.back); k.into_boxed_slice() } } impl<'a> Index<usize> for SplitSlice<'a> { type Output = u8; fn index(&self, _index: usize) -> &u8 { if _index >= self.front.len() { &self.back[_index - self.front.len()] } else { &self.front[_index] } } } fn split3<T>(a: &mut [T], i: usize) -> (&mut [T], &mut [T], &mut [T]) { let (before, a2) = a.split_at_mut(i); let (islice, after) = a2.split_at_mut(1); (before, islice, after) } pub enum KeyRef<'a> { // for an overflowed key, we just punt and read it into memory Overflowed(Box<[u8]>), // the other two are references into the page Prefixed(&'a [u8],&'a [u8]), Array(&'a [u8]), } impl<'a> std::fmt::Debug for KeyRef<'a> { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::result::Result<(), std::fmt::Error> { match *self { KeyRef::Overflowed(ref a) => write!(f, "Overflowed, a={:?}", a), KeyRef::Prefixed(front,back) => write!(f, "Prefixed, front={:?}, back={:?}", front, back), KeyRef::Array(a) => write!(f, "Array, val={:?}", a), } } } impl<'a> KeyRef<'a> { pub fn len(&self) -> usize { match *self { KeyRef::Overflowed(ref a) => a.len(), KeyRef::Array(a) => a.len(), KeyRef::Prefixed(front,back) => front.len() + back.len(), } } pub fn from_boxed_slice(k: Box<[u8]>) -> KeyRef<'a> { KeyRef::Overflowed(k) } pub fn for_slice(k: &[u8]) -> KeyRef { KeyRef::Array(k) } pub fn into_boxed_slice(self) -> Box<[u8]> { match self { KeyRef::Overflowed(a) => { a }, KeyRef::Array(a) => { let mut k = Vec::with_capacity(a.len()); k.push_all(a); k.into_boxed_slice() }, KeyRef::Prefixed(front,back) => { let mut k = Vec::with_capacity(front.len() + back.len()); k.push_all(front); k.push_all(back); k.into_boxed_slice() }, } } // TODO move this to the bcmp module? fn compare_px_py(px: &[u8], x: &[u8], py: &[u8], y: &[u8]) -> Ordering { let xlen = px.len() + x.len(); let ylen = py.len() + y.len(); let len = std::cmp::min(xlen, ylen); for i in 0 .. len { let xval = if i<px.len() { px[i] } else { x[i - px.len()] }; let yval = if i<py.len() { py[i] } else { y[i - py.len()] }; let c = xval.cmp(&yval); if c != Ordering::Equal { return c; } } return xlen.cmp(&ylen); } // TODO move this to the bcmp module? fn compare_px_y(px: &[u8], x: &[u8], y: &[u8]) -> Ordering { let xlen = px.len() + x.len(); let ylen = y.len(); let len = std::cmp::min(xlen, ylen); for i in 0 .. len { let xval = if i<px.len() { px[i] } else { x[i - px.len()] }; let yval = y[i]; let c = xval.cmp(&yval); if c != Ordering::Equal { return c; } } return xlen.cmp(&ylen); } // TODO move this to the bcmp module? fn compare_x_py(x: &[u8], py: &[u8], y: &[u8]) -> Ordering { let xlen = x.len(); let ylen = py.len() + y.len(); let len = std::cmp::min(xlen, ylen); for i in 0 .. len { let xval = x[i]; let yval = if i<py.len() { py[i] } else { y[i - py.len()] }; let c = xval.cmp(&yval); if c != Ordering::Equal { return c; } } return xlen.cmp(&ylen); } pub fn cmp(x: &KeyRef, y: &KeyRef) -> Ordering { match (x,y) { (&KeyRef::Overflowed(ref x_k), &KeyRef::Overflowed(ref y_k)) => { bcmp::Compare(&x_k, &y_k) }, (&KeyRef::Overflowed(ref x_k), &KeyRef::Prefixed(ref y_p, ref y_k)) => { Self::compare_x_py(&x_k, y_p, y_k) }, (&KeyRef::Overflowed(ref x_k), &KeyRef::Array(ref y_k)) => { bcmp::Compare(&x_k, &y_k) }, (&KeyRef::Prefixed(ref x_p, ref x_k), &KeyRef::Overflowed(ref y_k)) => { Self::compare_px_y(x_p, x_k, &y_k) }, (&KeyRef::Array(ref x_k), &KeyRef::Overflowed(ref y_k)) => { bcmp::Compare(&x_k, &y_k) }, (&KeyRef::Prefixed(ref x_p, ref x_k), &KeyRef::Prefixed(ref y_p, ref y_k)) => { Self::compare_px_py(x_p, x_k, y_p, y_k) }, (&KeyRef::Prefixed(ref x_p, ref x_k), &KeyRef::Array(ref y_k)) => { Self::compare_px_y(x_p, x_k, y_k) }, (&KeyRef::Array(ref x_k), &KeyRef::Prefixed(ref y_p, ref y_k)) => { Self::compare_x_py(x_k, y_p, y_k) }, (&KeyRef::Array(ref x_k), &KeyRef::Array(ref y_k)) => { bcmp::Compare(&x_k, &y_k) }, } } } pub enum ValueRef<'a> { Array(&'a [u8]), Overflowed(usize, Box<Read>), Tombstone, } impl<'a> ValueRef<'a> { pub fn len(&self) -> Option<usize> { match *self { ValueRef::Array(a) => Some(a.len()), ValueRef::Overflowed(len, _) => Some(len), ValueRef::Tombstone => None, } } pub fn into_blob(self) -> Blob { match self { ValueRef::Array(a) => { let mut k = Vec::new(); k.push_all(a); Blob::Array(k.into_boxed_slice()) }, ValueRef::Overflowed(len, r) => Blob::Stream(r), ValueRef::Tombstone => Blob::Tombstone, } } } impl<'a> std::fmt::Debug for ValueRef<'a> { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::result::Result<(), std::fmt::Error> { match *self { ValueRef::Array(a) => write!(f, "Array, len={:?}", a), ValueRef::Overflowed(klen,_) => write!(f, "Overflowed, len={}", klen), ValueRef::Tombstone => write!(f, "Tombstone"), } } } #[derive(Hash,PartialEq,Eq,Copy,Clone,Debug)] struct PageBlock { firstPage: PageNum, lastPage: PageNum, } impl PageBlock { fn new(first: PageNum, last: PageNum) -> PageBlock { PageBlock { firstPage: first, lastPage: last } } fn count_pages(&self) -> PageNum { self.lastPage - self.firstPage + 1 } fn contains_page(&self, pgnum: PageNum) -> bool { (pgnum >= self.firstPage) && (pgnum <= self.lastPage) } } fn block_list_contains_page(blocks: &Vec<PageBlock>, pgnum: PageNum) -> bool { for blk in blocks.iter() { if blk.contains_page(pgnum) { return true; } } return false; } pub type SegmentNum = u64; trait IPages { fn PageSize(&self) -> usize; fn Begin(&self) -> Result<PendingSegment>; fn GetBlock(&self, token: &mut PendingSegment) -> Result<PageBlock>; fn End(&self, token: PendingSegment, page: PageNum) -> Result<SegmentNum>; } #[derive(PartialEq,Copy,Clone)] pub enum SeekOp { SEEK_EQ = 0, SEEK_LE = 1, SEEK_GE = 2, } // this code was ported from F# which assumes that any Stream // that supports Seek also can give you its Length. That method // isn't part of the Seek trait, but this implementation should // suffice. fn seek_len<R>(fs: &mut R) -> io::Result<u64> where R : Seek { // remember where we are let pos = try!(fs.seek(SeekFrom::Current(0))); // seek the end let len = try!(fs.seek(SeekFrom::End(0))); // restore to where we were let _ = try!(fs.seek(SeekFrom::Start(pos))); Ok(len) } struct CursorIterator<'a> { csr: MultiCursor<'a> } impl<'a> CursorIterator<'a> { fn new(it: MultiCursor) -> CursorIterator { CursorIterator { csr: it } } } impl<'a> Iterator for CursorIterator<'a> { type Item = Result<kvp>; fn next(&mut self) -> Option<Result<kvp>> { if self.csr.IsValid() { let k = self.csr.Key(); if k.is_err() { return Some(Err(k.err().unwrap())); } let k = k.unwrap(); let v = self.csr.Value(); if v.is_err() { return Some(Err(v.err().unwrap())); } let v = v.unwrap(); let r = self.csr.Next(); if r.is_err() { return Some(Err(r.err().unwrap())); } Some(Ok(kvp{Key:k, Value:v})) } else { return None; } } } #[derive(Copy,Clone,Debug)] pub enum SeekResult { Invalid, Unequal, Equal, } impl SeekResult { fn from_cursor<'a, T: ICursor<'a>>(csr: &T, k: &KeyRef) -> Result<SeekResult> { if csr.IsValid() { if Ordering::Equal == try!(csr.KeyCompare(k)) { Ok(SeekResult::Equal) } else { Ok(SeekResult::Unequal) } } else { Ok(SeekResult::Invalid) } } fn is_valid(self) -> bool { match self { SeekResult::Invalid => false, SeekResult::Unequal => true, SeekResult::Equal => true, } } fn is_valid_and_equal(self) -> bool { match self { SeekResult::Invalid => false, SeekResult::Unequal => false, SeekResult::Equal => true, } } } pub trait ICursor<'a> { fn SeekRef(&mut self, k: &KeyRef, sop: SeekOp) -> Result<SeekResult>; fn First(&mut self) -> Result<()>; fn Last(&mut self) -> Result<()>; fn Next(&mut self) -> Result<()>; fn Prev(&mut self) -> Result<()>; fn IsValid(&self) -> bool; fn KeyRef(&'a self) -> Result<KeyRef<'a>>; fn ValueRef(&'a self) -> Result<ValueRef<'a>>; // TODO we wish to remove these. but they're // faster for the merge iterator, which is sad. fn Key(&self) -> Result<Box<[u8]>>; fn Value(&self) -> Result<Blob>; fn ValueLength(&self) -> Result<Option<usize>>; // tombstone is None fn KeyCompare(&self, k: &KeyRef) -> Result<Ordering>; } //#[derive(Copy,Clone)] pub struct DbSettings { pub AutoMergeEnabled : bool, pub AutoMergeMinimumPages : PageNum, pub DefaultPageSize : usize, pub PagesPerBlock : PageNum, } pub const DEFAULT_SETTINGS : DbSettings = DbSettings { AutoMergeEnabled : true, AutoMergeMinimumPages : 4, DefaultPageSize : 4096, PagesPerBlock : 256, }; #[derive(Clone)] struct SegmentInfo { root : PageNum, age : u32, // TODO does this grow? shouldn't it be a boxed array? // yes, but then derive clone complains. // ideally we could just stop cloning this struct. blocks : Vec<PageBlock> } pub mod utils { use std::io; use std::io::Seek; use std::io::Read; use std::io::SeekFrom; use super::PageNum; use super::LsmError; use super::Result; pub fn SeekPage(strm: &mut Seek, pgsz: usize, pageNumber: PageNum) -> Result<u64> { if 0==pageNumber { return Err(LsmError::InvalidPageNumber); } let pos = ((pageNumber as u64) - 1) * (pgsz as u64); let v = try!(strm.seek(SeekFrom::Start(pos))); Ok(v) } pub fn ReadFully(strm: &mut Read, buf: &mut [u8]) -> io::Result<usize> { let mut sofar = 0; let len = buf.len(); loop { let cur = &mut buf[sofar..len]; let n = try!(strm.read(cur)); if n==0 { break; } sofar += n; if sofar==len { break; } } let res : io::Result<usize> = Ok(sofar); res } } mod bcmp { use std::cmp::Ordering; use std::cmp::min; // TODO get rid of this function. regular cmp() is apparently lexicographic. #[inline(always)] pub fn Compare(x: &[u8], y: &[u8]) -> Ordering { x.cmp(y) } #[inline(always)] pub fn CompareWithPrefix(prefix: &[u8], x: &[u8], y: &[u8]) -> Ordering { assert!(prefix.len() > 0); if y.len() <= prefix.len() { prefix.cmp(y) } else { let c = prefix.cmp(&y[0 .. prefix.len()]); if c != Ordering::Equal { c } else { x.cmp(&y[prefix.len() .. y.len()]) } } } pub fn PrefixMatch(x: &[u8], y: &[u8], max: usize) -> usize { let len = min(x.len(), y.len()); let lim = min(len, max); let mut i = 0; while i<lim && x[i]==y[i] { i = i + 1; } i } // TODO rm fn StartsWith(x: &[u8], y: &[u8], max: usize) -> bool { if x.len() < y.len() { false } else { let len = y.len(); let mut i = 0; while i<len && x[i]==y[i] { i = i + 1; } i==len } } } mod Varint { // TODO this doesn't need to be usize. u8 is enough. pub fn SpaceNeededFor(v: u64) -> usize { if v<=240 { 1 } else if v<=2287 { 2 } else if v<=67823 { 3 } else if v<=16777215 { 4 } else if v<=4294967295 { 5 } else if v<=1099511627775 { 6 } else if v<=281474976710655 { 7 } else if v<=72057594037927935 { 8 } else { 9 } } // TODO stronger inline hint? pub fn read(buf: &[u8], cur: &mut usize) -> u64 { let c = *cur; let a0 = buf[c] as u64; if a0 <= 240u64 { *cur = *cur + 1; a0 } else if a0 <= 248u64 { let a1 = buf[c+1] as u64; let r = 240u64 + 256u64 * (a0 - 241u64) + a1; *cur = *cur + 2; r } else if a0 == 249u64 { let a1 = buf[c+1] as u64; let a2 = buf[c+2] as u64; let r = 2288u64 + 256u64 * a1 + a2; *cur = *cur + 3; r } else if a0 == 250u64 { let a1 = buf[c+1] as u64; let a2 = buf[c+2] as u64; let a3 = buf[c+3] as u64; let r = (a1<<16) | (a2<<8) | a3; *cur = *cur + 4; r } else if a0 == 251u64 { let a1 = buf[c+1] as u64; let a2 = buf[c+2] as u64; let a3 = buf[c+3] as u64; let a4 = buf[c+4] as u64; let r = (a1<<24) | (a2<<16) | (a3<<8) | a4; *cur = *cur + 5; r } else if a0 == 252u64 { let a1 = buf[c+1] as u64; let a2 = buf[c+2] as u64; let a3 = buf[c+3] as u64; let a4 = buf[c+4] as u64; let a5 = buf[c+5] as u64; let r = (a1<<32) | (a2<<24) | (a3<<16) | (a4<<8) | a5; *cur = *cur + 6; r } else if a0 == 253u64 { let a1 = buf[c+1] as u64; let a2 = buf[c+2] as u64; let a3 = buf[c+3] as u64; let a4 = buf[c+4] as u64; let a5 = buf[c+5] as u64; let a6 = buf[c+6] as u64; let r = (a1<<40) | (a2<<32) | (a3<<24) | (a4<<16) | (a5<<8) | a6; *cur = *cur + 7; r } else if a0 == 254u64 { let a1 = buf[c+1] as u64; let a2 = buf[c+2] as u64; let a3 = buf[c+3] as u64; let a4 = buf[c+4] as u64; let a5 = buf[c+5] as u64; let a6 = buf[c+6] as u64; let a7 = buf[c+7] as u64; let r = (a1<<48) | (a2<<40) | (a3<<32) | (a4<<24) | (a5<<16) | (a6<<8) | a7; *cur = *cur + 8; r } else { let a1 = buf[c+1] as u64; let a2 = buf[c+2] as u64; let a3 = buf[c+3] as u64; let a4 = buf[c+4] as u64; let a5 = buf[c+5] as u64; let a6 = buf[c+6] as u64; let a7 = buf[c+7] as u64; let a8 = buf[c+8] as u64; let r = (a1<<56) | (a2<<48) | (a3<<40) | (a4<<32) | (a5<<24) | (a6<<16) | (a7<<8) | a8; *cur = *cur + 9; r } } pub fn write(buf: &mut [u8], cur: &mut usize, v: u64) { let c = *cur; if v<=240u64 { buf[c] = v as u8; *cur = *cur + 1; } else if v<=2287u64 { buf[c] = ((v - 240u64) / 256u64 + 241u64) as u8; buf[c+1] = ((v - 240u64) % 256u64) as u8; *cur = *cur + 2; } else if v<=67823u64 { buf[c] = 249u8; buf[c+1] = ((v - 2288u64) / 256u64) as u8; buf[c+2] = ((v - 2288u64) % 256u64) as u8; *cur = *cur + 3; } else if v<=16777215u64 { buf[c] = 250u8; buf[c+1] = (v >> 16) as u8; buf[c+2] = (v >> 8) as u8; buf[c+3] = (v >> 0) as u8; *cur = *cur + 4; } else if v<=4294967295u64 { buf[c] = 251u8; buf[c+1] = (v >> 24) as u8; buf[c+2] = (v >> 16) as u8; buf[c+3] = (v >> 8) as u8; buf[c+4] = (v >> 0) as u8; *cur = *cur + 5; } else if v<=1099511627775u64 { buf[c] = 252u8; buf[c+1] = (v >> 32) as u8; buf[c+2] = (v >> 24) as u8; buf[c+3] = (v >> 16) as u8; buf[c+4] = (v >> 8) as u8; buf[c+5] = (v >> 0) as u8; *cur = *cur + 6; } else if v<=281474976710655u64 { buf[c] = 253u8; buf[c+1] = (v >> 40) as u8; buf[c+2] = (v >> 32) as u8; buf[c+3] = (v >> 24) as u8; buf[c+4] = (v >> 16) as u8; buf[c+5] = (v >> 8) as u8; buf[c+6] = (v >> 0) as u8; *cur = *cur + 7; } else if v<=72057594037927935u64 { buf[c] = 254u8; buf[c+1] = (v >> 48) as u8; buf[c+2] = (v >> 40) as u8; buf[c+3] = (v >> 32) as u8; buf[c+4] = (v >> 24) as u8; buf[c+5] = (v >> 16) as u8; buf[c+6] = (v >> 8) as u8; buf[c+7] = (v >> 0) as u8; *cur = *cur + 8; } else { buf[c] = 255u8; buf[c+1] = (v >> 56) as u8; buf[c+2] = (v >> 48) as u8; buf[c+3] = (v >> 40) as u8; buf[c+4] = (v >> 32) as u8; buf[c+5] = (v >> 24) as u8; buf[c+6] = (v >> 16) as u8; buf[c+7] = (v >> 8) as u8; buf[c+8] = (v >> 0) as u8; *cur = *cur + 9; } } } /* fn write_u32_le(v: &mut [u8], i: u32) { v[0] = ((i>> 0) & 0xff_u32) as u8; v[1] = ((i>> 8) & 0xff_u32) as u8; v[2] = ((i>>16) & 0xff_u32) as u8; v[3] = ((i>>24) & 0xff_u32) as u8; } */ fn write_u32_be(v: &mut [u8], i: u32) { v[0] = ((i>>24) & 0xff_u32) as u8; v[1] = ((i>>16) & 0xff_u32) as u8; v[2] = ((i>> 8) & 0xff_u32) as u8; v[3] = ((i>> 0) & 0xff_u32) as u8; } fn read_u32_be(v: &[u8]) -> u32 { let a0 = v[0] as u64; let a1 = v[1] as u64; let a2 = v[2] as u64; let a3 = v[3] as u64; let r = (a0 << 24) | (a1 << 16) | (a2 << 8) | (a3 << 0); // assert r fits r as u32 } fn read_u16_be(v: &[u8]) -> u16 { let a0 = v[0] as u64; let a1 = v[1] as u64; let r = (a0 << 8) | (a1 << 0); // assert r fits r as u16 } fn write_u16_be(v: &mut [u8], i: u16) { v[0] = ((i>>8) & 0xff_u16) as u8; v[1] = ((i>>0) & 0xff_u16) as u8; } struct PageBuilder { cur : usize, buf : Box<[u8]>, } // TODO bundling cur with the buf almost seems sad, because there are // cases where we want buf to be mutable but not cur. :-) impl PageBuilder { fn new(pgsz : usize) -> PageBuilder { let ba = vec![0;pgsz as usize].into_boxed_slice(); PageBuilder { cur: 0, buf:ba } } fn Reset(&mut self) { self.cur = 0; } fn Write(&self, strm: &mut Write) -> io::Result<()> { strm.write_all(&*self.buf) } fn PageSize(&self) -> usize { self.buf.len() } fn Buffer(&self) -> &[u8] { &self.buf } fn Position(&self) -> usize { self.cur } fn Available(&self) -> usize { self.buf.len() - self.cur } fn SetPageFlag(&mut self, x: u8) { self.buf[1] = self.buf[1] | (x); } fn PutByte(&mut self, x: u8) { self.buf[self.cur] = x; self.cur = self.cur + 1; } fn PutStream2(&mut self, s: &mut Read, len: usize) -> io::Result<usize> { let n = try!(utils::ReadFully(s, &mut self.buf[self.cur .. self.cur + len])); self.cur = self.cur + n; let res : io::Result<usize> = Ok(n); res } // TODO rm this function fn PutStream(&mut self, s: &mut Read, len: usize) -> io::Result<usize> { let n = try!(self.PutStream2(s, len)); // TODO if n != len fail, which may mean a different result type here let res : io::Result<usize> = Ok(len); res } fn PutArray(&mut self, ba: &[u8]) { self.buf[self.cur .. self.cur + ba.len()].clone_from_slice(ba); self.cur = self.cur + ba.len(); } fn PutInt32(&mut self, ov: u32) { let at = self.cur; write_u32_be(&mut self.buf[at .. at + SIZE_32], ov); self.cur = self.cur + SIZE_32; } fn SetSecondToLastInt32(&mut self, page: u32) { let len = self.buf.len(); let at = len - 2 * SIZE_32; if self.cur > at { panic!("SetSecondToLastInt32 is squashing data"); } write_u32_be(&mut self.buf[at .. at + SIZE_32], page); } fn SetLastInt32(&mut self, page: u32) { let len = self.buf.len(); let at = len - 1 * SIZE_32; if self.cur > at { panic!("SetLastInt32 is squashing data"); } write_u32_be(&mut self.buf[at .. at + SIZE_32], page); } fn PutInt16(&mut self, ov: u16) { let at = self.cur; write_u16_be(&mut self.buf[at .. at + SIZE_16], ov); self.cur = self.cur + SIZE_16; } // TODO rm fn PutInt16At(&mut self, at: usize, ov: u16) { write_u16_be(&mut self.buf[at .. at + SIZE_16], ov); } fn PutVarint(&mut self, ov: u64) { Varint::write(&mut *self.buf, &mut self.cur, ov); } } // TODO this struct should just go away. just use the buf. struct PageBuffer { buf : Box<[u8]>, } impl PageBuffer { fn new(pgsz: usize) -> PageBuffer { let ba = vec![0;pgsz as usize].into_boxed_slice(); PageBuffer { buf:ba } } fn PageSize(&self) -> usize { self.buf.len() } fn Read(&mut self, strm: &mut Read) -> io::Result<usize> { utils::ReadFully(strm, &mut self.buf) } fn ReadPart(&mut self, strm: &mut Read, off: usize, len: usize) -> io::Result<usize> { utils::ReadFully(strm, &mut self.buf[off .. len-off]) } fn Compare(&self, cur: usize, len: usize, other: &[u8]) -> Ordering { let slice = &self.buf[cur .. cur + len]; bcmp::Compare(slice, other) } fn CompareWithPrefix(&self, cur: usize, prefix: &[u8], len: usize, other: &[u8]) -> Ordering { let slice = &self.buf[cur .. cur + len - prefix.len()]; bcmp::CompareWithPrefix(prefix, slice, other) } fn PageType(&self) -> Result<PageType> { PageType::from_u8(self.buf[0]) } fn GetByte(&self, cur: &mut usize) -> u8 { let r = self.buf[*cur]; *cur = *cur + 1; r } fn GetInt32(&self, cur: &mut usize) -> u32 { let at = *cur; let r = read_u32_be(&self.buf[at .. at + SIZE_32]); *cur = *cur + SIZE_32; r } fn GetInt32At(&self, at: usize) -> u32 { read_u32_be(&self.buf[at .. at + SIZE_32]) } fn CheckPageFlag(&self, f: u8) -> bool { 0 != (self.buf[1] & f) } fn GetSecondToLastInt32(&self) -> u32 { let len = self.buf.len(); let at = len - 2 * SIZE_32; self.GetInt32At(at) } fn GetLastInt32(&self) -> u32 { let len = self.buf.len(); let at = len - 1 * SIZE_32; self.GetInt32At(at) } fn GetInt16(&self, cur: &mut usize) -> u16 { let at = *cur; let r = read_u16_be(&self.buf[at .. at + SIZE_16]); *cur = *cur + SIZE_16; r } fn get_slice(&self, start: usize, len: usize) -> &[u8] { &self.buf[start .. start + len] } fn GetIntoArray(&self, cur: &mut usize, a : &mut [u8]) { let len = a.len(); a.clone_from_slice(&self.buf[*cur .. *cur + len]); *cur = *cur + a.len(); } // TODO this function shows up a lot in the profiler // TODO inline hint? fn GetVarint(&self, cur: &mut usize) -> u64 { Varint::read(&*self.buf, cur) } } #[derive(PartialEq,Copy,Clone)] enum Direction { FORWARD = 0, BACKWARD = 1, WANDERING = 2, } struct MultiCursor<'a> { subcursors: Box<[SegmentCursor<'a>]>, sorted: Box<[(usize,Option<Ordering>)]>, cur: Option<usize>, dir: Direction, } impl<'a> MultiCursor<'a> { fn sort(&mut self, want_max: bool) -> Result<()> { if self.subcursors.is_empty() { return Ok(()) } // init the orderings to None. // the invalid cursors will stay that way. for i in 0 .. self.sorted.len() { self.sorted[i].1 = None; } for i in 1 .. self.sorted.len() { let mut j = i; while j > 0 { let nj = self.sorted[j].0; let nprev = self.sorted[j - 1].0; match (self.subcursors[nj].IsValid(), self.subcursors[nprev].IsValid()) { (true,true) => { let c = { if want_max { try!(SegmentCursor::compare_two(&self.subcursors[nprev],&self.subcursors[nj])) } else { try!(SegmentCursor::compare_two(&self.subcursors[nj],&self.subcursors[nprev])) } }; match c { Ordering::Greater => { self.sorted[j].1 = Some(Ordering::Greater); break; }, Ordering::Equal => { match nj.cmp(&nprev) { Ordering::Equal => { unreachable!(); }, Ordering::Greater => { self.sorted[j].1 = Some(Ordering::Equal); break; }, Ordering::Less => { self.sorted[j - 1].1 = Some(Ordering::Equal); // keep going }, } }, Ordering::Less => { // keep going self.sorted[j - 1].1 = Some(Ordering::Greater); }, } }, (true,false) => { // keep going }, (false,true) => { break; }, (false,false) => { match nj.cmp(&nprev) { Ordering::Equal => { unreachable!(); }, Ordering::Greater => { break; }, Ordering::Less => { // keep going }, } } }; self.sorted.swap(j, j - 1); j = j - 1; } } // fix the first one if self.sorted.len() > 0 { let n = self.sorted[0].0; if self.subcursors[n].IsValid() { self.sorted[0].1 = Some(Ordering::Equal); } } /* println!("{:?} : {}", self.sorted, if want_max { "backward" } else {"forward"} ); for i in 0 .. self.sorted.len() { let (n, ord) = self.sorted[i]; println!(" {:?}", ka[n]); } */ Ok(()) } fn sorted_first(&self) -> Option<usize> { let n = self.sorted[0].0; if self.sorted[0].1.is_some() { Some(n) } else { None } } fn findMin(&mut self) -> Result<Option<usize>> { if self.subcursors.is_empty() { Ok(None) } else { try!(self.sort(false)); Ok(self.sorted_first()) } } fn findMax(&mut self) -> Result<Option<usize>> { if self.subcursors.is_empty() { Ok(None) } else { try!(self.sort(true)); Ok(self.sorted_first()) } } fn Create(subs: Vec<SegmentCursor>) -> MultiCursor { let s = subs.into_boxed_slice(); let mut sorted = Vec::new(); for i in 0 .. s.len() { sorted.push((i, None)); } MultiCursor { subcursors: s, sorted: sorted.into_boxed_slice(), cur: None, dir: Direction::WANDERING, } } } impl<'a> ICursor<'a> for MultiCursor<'a> { fn IsValid(&self) -> bool { match self.cur { Some(i) => self.subcursors[i].IsValid(), None => false } } fn First(&mut self) -> Result<()> { for i in 0 .. self.subcursors.len() { try!(self.subcursors[i].First()); } self.cur = try!(self.findMin()); self.dir = Direction::FORWARD; Ok(()) } fn Last(&mut self) -> Result<()> { for i in 0 .. self.subcursors.len() { try!(self.subcursors[i].Last()); } self.cur = try!(self.findMax()); self.dir = Direction::BACKWARD; Ok(()) } fn Key(&self) -> Result<Box<[u8]>> { match self.cur { None => Err(LsmError::CursorNotValid), Some(icur) => self.subcursors[icur].Key(), } } fn KeyRef(&'a self) -> Result<KeyRef<'a>> { match self.cur { None => Err(LsmError::CursorNotValid), Some(icur) => self.subcursors[icur].KeyRef(), } } fn ValueRef(&'a self) -> Result<ValueRef<'a>> { match self.cur { None => Err(LsmError::CursorNotValid), Some(icur) => self.subcursors[icur].ValueRef(), } } fn KeyCompare(&self, k: &KeyRef) -> Result<Ordering> { match self.cur { None => Err(LsmError::CursorNotValid), Some(icur) => self.subcursors[icur].KeyCompare(k), } } fn Value(&self) -> Result<Blob> { match self.cur { None => Err(LsmError::CursorNotValid), Some(icur) => self.subcursors[icur].Value(), } } fn ValueLength(&self) -> Result<Option<usize>> { match self.cur { None => Err(LsmError::CursorNotValid), Some(icur) => self.subcursors[icur].ValueLength(), } } fn Next(&mut self) -> Result<()> { match self.cur { None => Err(LsmError::CursorNotValid), Some(icur) => { if (self.dir == Direction::FORWARD) { // TODO self.sorted[0] is cur. // immediately after that, there may (or may not be) some // entries which were Ordering:Equal to cur. call Next on // each of these. assert!(icur == self.sorted[0].0); for i in 1 .. self.sorted.len() { //println!("sorted[{}] : {:?}", i, self.sorted[i]); let (n,c) = self.sorted[i]; match c { None => { break; }, Some(c) => { if c == Ordering::Equal { try!(self.subcursors[n].Next()); } else { break; } }, } } } else { // TODO consider simplifying all the stuff below. // all this complexity may not be worth it. // we need to fix every cursor to point to its min // value > icur. // if perf didn't matter, this would be simple. // call Next on icur. and call Seek(GE) (and maybe Next) // on every other cursor. // but there are several cases where we can do a lot // less work than a Seek. And we have the information // to identify those cases. So, this function is // pretty complicated, but it's fast. // -------- // the current cursor (icur) is easy. it just needs Next(). // we'll do it last, so we can use it for comparisons. // for now we deal with all the others. // the current direction of the multicursor tells us // something about the state of all the others. fn half(dir: Direction, ki: &KeyRef, subs: &mut [SegmentCursor]) -> Result<()> { match dir { Direction::FORWARD => { // this is the happy case. each cursor is at most // one step away. // direction is FORWARD, so we know that every valid cursor // is pointing at a key which is either == to icur, or // it is already the min key > icur. for csr in subs { if csr.IsValid() { let cmp = { let k = try!(csr.KeyRef()); let cmp = KeyRef::cmp(&k, ki); cmp }; match cmp { Ordering::Less => { // should never happen, because FORWARD unreachable!(); }, Ordering::Greater => { // TODO assert that j.Prev is <= icur? // done }, Ordering::Equal => { try!(csr.Next()); }, } } } Ok(()) }, Direction::BACKWARD => { // this case isn't too bad. each cursor is either // one step away or two. // every other cursor is either == icur or it is the // max value < icur. // find the invalid cursors first. we have to call seek // on these, because we don't know if they might have // a valid value which is > icur. we save the list and // deal with them after the others. for csr in subs { if csr.IsValid() { let cmp = { let k = try!(csr.KeyRef()); let cmp = KeyRef::cmp(&k, ki); cmp }; match cmp { Ordering::Less => { try!(csr.Next()); // we moved one step. let's see if we need to move one more. if csr.IsValid() { let cmp = { let k = try!(csr.KeyRef()); let cmp = KeyRef::cmp(&k, ki); cmp }; match cmp { Ordering::Less => { // should never happen. we should not have // been more than one step away from icur. unreachable!(); }, Ordering::Greater => { // done }, Ordering::Equal => { // and one more step try!(csr.Next()); }, } } }, Ordering::Greater => { // should never happen, because BACKWARD unreachable!(); }, Ordering::Equal => { // one step away try!(csr.Next()); }, } } else { let sr = try!(csr.SeekRef(&ki, SeekOp::SEEK_GE)); if sr.is_valid_and_equal() { try!(csr.Next()); } } } Ok(()) }, Direction::WANDERING => { // we have no idea where all the other cursors are. // so we have to do a seek on each one. // unfortunately, we have to make a copy of the icur Key. // Seek only needs a reference to a slice for the key, // and because we don't handle the case where icur == j, // there should be no mutability conflict, in theory. // But Rust doesn't know that. It knows that both // cursors are in the same array, so we cannot have a // mutable reference (to seek) into that array while // there is any other reference (the icur key). // also, KeyRef() gives a KeyRef, which Seek can't handle. for j in 0 .. subs.len() { let csr = &mut subs[j]; let sr = try!(csr.SeekRef(&ki, SeekOp::SEEK_GE)); if sr.is_valid_and_equal() { try!(csr.Next()); } } Ok(()) }, } } { let (before, middle, after) = split3(&mut *self.subcursors, icur); let icsr = &middle[0]; let ki = try!(icsr.KeyRef()); half(self.dir, &ki, before); half(self.dir, &ki, after); } } // now the current cursor try!(self.subcursors[icur].Next()); // now find the min. // this seems kinda awful. we just walked through the entire cursor list, // and now we're doing it again. should we have just kept track along // the way? maybe, but it doesn't save any key comparisons. it just // moves those compares from a separate loop into the loops above. still // might be a good idea. TODO. self.cur = try!(self.findMin()); self.dir = Direction::FORWARD; Ok(()) }, } } // TODO fix Prev like Next fn Prev(&mut self) -> Result<()> { match self.cur { None => Err(LsmError::CursorNotValid), Some(icur) => { let k = { let k = try!(self.subcursors[icur].KeyRef()); let k = k.into_boxed_slice(); let k = KeyRef::from_boxed_slice(k); k }; for j in 0 .. self.subcursors.len() { let csr = &mut self.subcursors[j]; if (self.dir != Direction::BACKWARD) && (icur != j) { try!(csr.SeekRef(&k, SeekOp::SEEK_LE)); } if csr.IsValid() && (Ordering::Equal == try!(csr.KeyCompare(&k))) { try!(csr.Prev()); } } self.cur = try!(self.findMax()); self.dir = Direction::BACKWARD; Ok(()) }, } } fn SeekRef(&mut self, k: &KeyRef, sop:SeekOp) -> Result<SeekResult> { self.cur = None; self.dir = Direction::WANDERING; for j in 0 .. self.subcursors.len() { let sr = try!(self.subcursors[j].SeekRef(k, sop)); if sr.is_valid_and_equal() { self.cur = Some(j); return Ok(sr); } } match sop { SeekOp::SEEK_GE => { self.cur = try!(self.findMin()); match self.cur { Some(i) => { self.dir = Direction::FORWARD; SeekResult::from_cursor(&self.subcursors[i], k) }, None => { Ok(SeekResult::Invalid) }, } }, SeekOp::SEEK_LE => { self.cur = try!(self.findMax()); match self.cur { Some(i) => { self.dir = Direction::BACKWARD; SeekResult::from_cursor(&self.subcursors[i], k) }, None => { Ok(SeekResult::Invalid) }, } }, SeekOp::SEEK_EQ => { Ok(SeekResult::Invalid) }, } } } pub struct LivingCursor<'a> { chain : MultiCursor<'a> } impl<'a> LivingCursor<'a> { fn skipTombstonesForward(&mut self) -> Result<()> { while self.chain.IsValid() && try!(self.chain.ValueLength()).is_none() { try!(self.chain.Next()); } Ok(()) } fn skipTombstonesBackward(&mut self) -> Result<()> { while self.chain.IsValid() && try!(self.chain.ValueLength()).is_none() { try!(self.chain.Prev()); } Ok(()) } fn Create(ch : MultiCursor) -> LivingCursor { LivingCursor { chain : ch } } } impl<'a> ICursor<'a> for LivingCursor<'a> { fn First(&mut self) -> Result<()> { try!(self.chain.First()); try!(self.skipTombstonesForward()); Ok(()) } fn Last(&mut self) -> Result<()> { try!(self.chain.Last()); try!(self.skipTombstonesBackward()); Ok(()) } fn Key(&self) -> Result<Box<[u8]>> { self.chain.Key() } fn KeyRef(&'a self) -> Result<KeyRef<'a>> { self.chain.KeyRef() } fn ValueRef(&'a self) -> Result<ValueRef<'a>> { self.chain.ValueRef() } fn Value(&self) -> Result<Blob> { self.chain.Value() } fn ValueLength(&self) -> Result<Option<usize>> { self.chain.ValueLength() } fn IsValid(&self) -> bool { self.chain.IsValid() && { let r = self.chain.ValueLength(); if r.is_ok() { r.unwrap().is_some() } else { false } } } fn KeyCompare(&self, k: &KeyRef) -> Result<Ordering> { self.chain.KeyCompare(k) } fn Next(&mut self) -> Result<()> { try!(self.chain.Next()); try!(self.skipTombstonesForward()); Ok(()) } fn Prev(&mut self) -> Result<()> { try!(self.chain.Prev()); try!(self.skipTombstonesBackward()); Ok(()) } fn SeekRef(&mut self, k: &KeyRef, sop:SeekOp) -> Result<SeekResult> { let sr = try!(self.chain.SeekRef(k, sop)); match sop { SeekOp::SEEK_GE => { if sr.is_valid() && self.chain.ValueLength().unwrap().is_none() { try!(self.skipTombstonesForward()); SeekResult::from_cursor(&self.chain, k) } else { Ok(sr) } }, SeekOp::SEEK_LE => { if sr.is_valid() && self.chain.ValueLength().unwrap().is_none() { try!(self.skipTombstonesBackward()); SeekResult::from_cursor(&self.chain, k) } else { Ok(sr) } }, SeekOp::SEEK_EQ => Ok(sr), } } } #[derive(Hash,PartialEq,Eq,Copy,Clone,Debug)] #[repr(u8)] enum PageType { LEAF_NODE, PARENT_NODE, OVERFLOW_NODE, } impl PageType { #[inline(always)] fn to_u8(self) -> u8 { match self { PageType::LEAF_NODE => 1, PageType::PARENT_NODE => 2, PageType::OVERFLOW_NODE => 3, } } #[inline(always)] fn from_u8(v: u8) -> Result<PageType> { match v { 1 => Ok(PageType::LEAF_NODE), 2 => Ok(PageType::PARENT_NODE), 3 => Ok(PageType::OVERFLOW_NODE), _ => Err(LsmError::InvalidPageType), } } } mod ValueFlag { pub const FLAG_OVERFLOW: u8 = 1; pub const FLAG_TOMBSTONE: u8 = 2; } mod PageFlag { pub const FLAG_ROOT_NODE: u8 = 1; pub const FLAG_BOUNDARY_NODE: u8 = 2; pub const FLAG_ENDS_ON_BOUNDARY: u8 = 3; } #[derive(Debug)] // this struct is used to remember pages we have written. // for each page, we need to remember a key, and it needs // to be in a box because the original copy is gone and // the page has been written out to disk. struct pgitem { page : PageNum, key : Box<[u8]>, } struct ParentState { sofar : usize, nextGeneration : Vec<pgitem>, blk : PageBlock, } // this enum keeps track of what happened to a key as we // processed it. either we determined that it will fit // inline or we wrote it as an overflow. enum KeyLocation { Inline, Overflowed(PageNum), } // this enum keeps track of what happened to a value as we // processed it. it might have already been overflowed. if // it's going to fit in the page, we still have the data // buffer. enum ValueLocation { Tombstone, // when this is a Buffer, this gets ownership of kvp.Value Buffer(Box<[u8]>), Overflowed(usize,PageNum), } struct LeafPair { // key gets ownership of kvp.Key key : Box<[u8]>, kLoc : KeyLocation, vLoc : ValueLocation, } struct LeafState { sofarLeaf : usize, keys_in_this_leaf : Vec<LeafPair>, prevLeaf : PageNum, prefixLen : usize, firstLeaf : PageNum, leaves : Vec<pgitem>, blk : PageBlock, } fn CreateFromSortedSequenceOfKeyValuePairs<I,SeekWrite>(fs: &mut SeekWrite, pageManager: &IPages, source: I, ) -> Result<(SegmentNum,PageNum)> where I:Iterator<Item=Result<kvp>>, SeekWrite : Seek+Write { fn writeOverflow<SeekWrite>(startingBlock: PageBlock, ba: &mut Read, pageManager: &IPages, fs: &mut SeekWrite ) -> Result<(usize,PageBlock)> where SeekWrite : Seek+Write { fn buildFirstPage(ba: &mut Read, pbFirstOverflow : &mut PageBuilder, pgsz: usize) -> Result<(usize,bool)> { pbFirstOverflow.Reset(); pbFirstOverflow.PutByte(PageType::OVERFLOW_NODE.to_u8()); pbFirstOverflow.PutByte(0u8); // starts 0, may be changed later let room = pgsz - (2 + SIZE_32); // something will be put in lastInt32 later let put = try!(pbFirstOverflow.PutStream2(ba, room)); Ok((put, put<room)) }; fn buildRegularPage(ba: &mut Read, pbOverflow : &mut PageBuilder, pgsz: usize) -> Result<(usize,bool)> { pbOverflow.Reset(); let room = pgsz; let put = try!(pbOverflow.PutStream2(ba, room)); Ok((put, put<room)) }; fn buildBoundaryPage(ba: &mut Read, pbOverflow : &mut PageBuilder, pgsz: usize) -> Result<(usize,bool)> { pbOverflow.Reset(); let room = pgsz - SIZE_32; // something will be put in lastInt32 before the page is written let put = try!(pbOverflow.PutStream2(ba, room)); Ok((put, put<room)) } fn writeRegularPages<SeekWrite>(max: PageNum, sofar: usize, pb: &mut PageBuilder, fs: &mut SeekWrite, ba: &mut Read, pgsz: usize ) -> Result<(PageNum,usize,bool)> where SeekWrite : Seek+Write { let mut i = 0; let mut sofar = sofar; loop { if i < max { let (put, finished) = try!(buildRegularPage(ba, pb, pgsz)); if put==0 { return Ok((i, sofar, true)); } else { sofar = sofar + put; try!(pb.Write(fs)); if finished { return Ok((i+1, sofar, true)); } else { i = i + 1; } } } else { return Ok((i, sofar, false)); } } } // TODO misnamed fn writeOneBlock<SeekWrite>(param_sofar: usize, param_firstBlk: PageBlock, fs: &mut SeekWrite, ba: &mut Read, pgsz: usize, pbOverflow: &mut PageBuilder, pbFirstOverflow: &mut PageBuilder, pageManager: &IPages, token: &mut PendingSegment ) -> Result<(usize,PageBlock)> where SeekWrite : Seek+Write { // each trip through this loop will write out one // block, starting with the overflow first page, // followed by zero-or-more "regular" overflow pages, // which have no header. we'll stop at the block boundary, // either because we land there or because the whole overflow // won't fit and we have to continue into the next block. // the boundary page will be like a regular overflow page, // headerless, but it is four bytes smaller. let mut loop_sofar = param_sofar; let mut loop_firstBlk = param_firstBlk; loop { let sofar = loop_sofar; let firstBlk = loop_firstBlk; let (putFirst,finished) = try!(buildFirstPage (ba, pbFirstOverflow, pgsz)); if putFirst==0 { return Ok((sofar, firstBlk)); } else { // note that we haven't written the first page yet. we may have to fix // a couple of things before it gets written out. let sofar = sofar + putFirst; if firstBlk.firstPage == firstBlk.lastPage { // the first page landed on a boundary. // we can just set the flag and write it now. pbFirstOverflow.SetPageFlag(PageFlag::FLAG_BOUNDARY_NODE); let blk = try!(pageManager.GetBlock(&mut *token)); pbFirstOverflow.SetLastInt32(blk.firstPage); try!(pbFirstOverflow.Write(fs)); try!(utils::SeekPage(fs, pgsz, blk.firstPage)); if !finished { loop_sofar = sofar; loop_firstBlk = blk; } else { return Ok((sofar, blk)); } } else { let firstRegularPageNumber = firstBlk.firstPage + 1; if finished { // the first page is also the last one pbFirstOverflow.SetLastInt32(0); // offset to last used page in this block, which is this one try!(pbFirstOverflow.Write(fs)); return Ok((sofar, PageBlock::new(firstRegularPageNumber,firstBlk.lastPage))); } else { // we need to write more pages, // until the end of the block, // or the end of the stream, // whichever comes first try!(utils::SeekPage(fs, pgsz, firstRegularPageNumber)); // availableBeforeBoundary is the number of pages until the boundary, // NOT counting the boundary page, and the first page in the block // has already been accounted for, so we're just talking about data pages. let availableBeforeBoundary = if firstBlk.lastPage > 0 { (firstBlk.lastPage - firstRegularPageNumber) } else { PageNum::max_value() } ; let (numRegularPages, sofar, finished) = try!(writeRegularPages(availableBeforeBoundary, sofar, pbOverflow, fs, ba, pgsz)); if finished { // go back and fix the first page pbFirstOverflow.SetLastInt32(numRegularPages); try!(utils::SeekPage(fs, pgsz, firstBlk.firstPage)); try!(pbFirstOverflow.Write(fs)); // now reset to the next page in the block let blk = PageBlock::new(firstRegularPageNumber + numRegularPages, firstBlk.lastPage); try!(utils::SeekPage(fs, pgsz, blk.firstPage)); return Ok((sofar,blk)); } else { // we need to write out a regular page except with a // boundary pointer in it. and we need to set // FLAG_ENDS_ON_BOUNDARY on the first // overflow page in this block. let (putBoundary,finished) = try!(buildBoundaryPage (ba, pbOverflow, pgsz)); if putBoundary==0 { // go back and fix the first page pbFirstOverflow.SetLastInt32(numRegularPages); try!(utils::SeekPage(fs, pgsz, firstBlk.firstPage)); try!(pbFirstOverflow.Write(fs)); // now reset to the next page in the block let blk = PageBlock::new(firstRegularPageNumber + numRegularPages, firstBlk.lastPage); try!(utils::SeekPage(fs, pgsz, firstBlk.lastPage)); return Ok((sofar,blk)); } else { // write the boundary page let sofar = sofar + putBoundary; let blk = try!(pageManager.GetBlock(&mut *token)); pbOverflow.SetLastInt32(blk.firstPage); try!(pbOverflow.Write(fs)); // go back and fix the first page pbFirstOverflow.SetPageFlag(PageFlag::FLAG_ENDS_ON_BOUNDARY); pbFirstOverflow.SetLastInt32(numRegularPages + 1); try!(utils::SeekPage(fs, pgsz, firstBlk.firstPage)); try!(pbFirstOverflow.Write(fs)); // now reset to the first page in the next block try!(utils::SeekPage(fs, pgsz, blk.firstPage)); if finished { loop_sofar = sofar; loop_firstBlk = blk; } else { return Ok((sofar,blk)); } } } } } } } } let pgsz = pageManager.PageSize(); let mut token = try!(pageManager.Begin()); let mut pbFirstOverflow = PageBuilder::new(pgsz); let mut pbOverflow = PageBuilder::new(pgsz); writeOneBlock(0, startingBlock, fs, ba, pgsz, &mut pbOverflow, &mut pbFirstOverflow, pageManager, &mut token) } fn writeLeaves<I,SeekWrite>(leavesBlk:PageBlock, pageManager: &IPages, source: I, vbuf: &mut [u8], fs: &mut SeekWrite, pb: &mut PageBuilder, token: &mut PendingSegment, ) -> Result<(PageBlock,Vec<pgitem>,PageNum)> where I: Iterator<Item=Result<kvp>> , SeekWrite : Seek+Write { // 2 for the page type and flags // 4 for the prev page // 2 for the stored count // 4 for lastInt32 (which isn't in pb.Available) const LEAF_PAGE_OVERHEAD: usize = 2 + 4 + 2 + 4; fn buildLeaf(st: &mut LeafState, pb: &mut PageBuilder) -> Box<[u8]> { pb.Reset(); pb.PutByte(PageType::LEAF_NODE.to_u8()); pb.PutByte(0u8); // flags pb.PutInt32 (st.prevLeaf); // prev page num. // TODO prefixLen is one byte. should it be two? pb.PutByte(st.prefixLen as u8); if st.prefixLen > 0 { pb.PutArray(&st.keys_in_this_leaf[0].key[0 .. st.prefixLen]); } let count_keys_in_this_leaf = st.keys_in_this_leaf.len(); // TODO should we support more than 64k keys in a leaf? // either way, overflow-check this cast. pb.PutInt16 (count_keys_in_this_leaf as u16); fn f(pb: &mut PageBuilder, prefixLen: usize, lp: &LeafPair) { match lp.kLoc { KeyLocation::Inline => { pb.PutByte(0u8); // flags pb.PutVarint(lp.key.len() as u64); pb.PutArray(&lp.key[prefixLen .. lp.key.len()]); }, KeyLocation::Overflowed(kpage) => { pb.PutByte(ValueFlag::FLAG_OVERFLOW); pb.PutVarint(lp.key.len() as u64); pb.PutInt32(kpage); }, } match lp.vLoc { ValueLocation::Tombstone => { pb.PutByte(ValueFlag::FLAG_TOMBSTONE); }, ValueLocation::Buffer (ref vbuf) => { pb.PutByte(0u8); pb.PutVarint(vbuf.len() as u64); pb.PutArray(&vbuf); }, ValueLocation::Overflowed (vlen,vpage) => { pb.PutByte(ValueFlag::FLAG_OVERFLOW); pb.PutVarint(vlen as u64); pb.PutInt32(vpage); }, } } // deal with all the keys except the last one for lp in st.keys_in_this_leaf.drain(0 .. count_keys_in_this_leaf-1) { f(pb, st.prefixLen, &lp); } assert!(st.keys_in_this_leaf.len() == 1); let lp = st.keys_in_this_leaf.remove(0); assert!(st.keys_in_this_leaf.is_empty()); f(pb, st.prefixLen, &lp); lp.key } fn writeLeaf<SeekWrite>(st: &mut LeafState, isRootPage: bool, pb: &mut PageBuilder, fs: &mut SeekWrite, pgsz: usize, pageManager: &IPages, token: &mut PendingSegment, ) -> Result<()> where SeekWrite : Seek+Write { let last_key = buildLeaf(st, pb); assert!(st.keys_in_this_leaf.is_empty()); let thisPageNumber = st.blk.firstPage; let firstLeaf = if st.leaves.is_empty() { thisPageNumber } else { st.firstLeaf }; let nextBlk = if isRootPage { PageBlock::new(thisPageNumber + 1, st.blk.lastPage) } else if thisPageNumber == st.blk.lastPage { pb.SetPageFlag(PageFlag::FLAG_BOUNDARY_NODE); let newBlk = try!(pageManager.GetBlock(&mut *token)); pb.SetLastInt32(newBlk.firstPage); newBlk } else { PageBlock::new(thisPageNumber + 1, st.blk.lastPage) }; try!(pb.Write(fs)); if nextBlk.firstPage != (thisPageNumber+1) { try!(utils::SeekPage(fs, pgsz, nextBlk.firstPage)); } let pg = pgitem {page:thisPageNumber, key:last_key}; st.leaves.push(pg); st.sofarLeaf = 0; st.prevLeaf = thisPageNumber; st.prefixLen = 0; st.firstLeaf = firstLeaf; st.blk = nextBlk; Ok(()) } // TODO can the overflow page number become a varint? const NEEDED_FOR_OVERFLOW_PAGE_NUMBER: usize = 4; // the max limit of an inline key is when that key is the only // one in the leaf, and its value is overflowed. let pgsz = pageManager.PageSize(); let maxKeyInline = pgsz - LEAF_PAGE_OVERHEAD - 1 // prefixLen - 1 // key flags - Varint::SpaceNeededFor(pgsz as u64) // approx worst case inline key len - 1 // value flags - 9 // worst case varint value len - NEEDED_FOR_OVERFLOW_PAGE_NUMBER; // overflowed value page fn kLocNeed(k: &[u8], kloc: &KeyLocation, prefixLen: usize) -> usize { let klen = k.len(); match *kloc { KeyLocation::Inline => { 1 + Varint::SpaceNeededFor(klen as u64) + klen - prefixLen }, KeyLocation::Overflowed(_) => { 1 + Varint::SpaceNeededFor(klen as u64) + NEEDED_FOR_OVERFLOW_PAGE_NUMBER }, } } fn vLocNeed (vloc: &ValueLocation) -> usize { match *vloc { ValueLocation::Tombstone => { 1 }, ValueLocation::Buffer(ref vbuf) => { let vlen = vbuf.len(); 1 + Varint::SpaceNeededFor(vlen as u64) + vlen }, ValueLocation::Overflowed(vlen,_) => { 1 + Varint::SpaceNeededFor(vlen as u64) + NEEDED_FOR_OVERFLOW_PAGE_NUMBER }, } } fn leafPairSize(prefixLen: usize, lp: &LeafPair) -> usize { kLocNeed(&lp.key, &lp.kLoc, prefixLen) + vLocNeed(&lp.vLoc) } fn defaultPrefixLen(k: &[u8]) -> usize { // TODO max prefix. relative to page size? currently must fit in one byte. if k.len() > 255 { 255 } else { k.len() } } // this is the body of writeLeaves let mut st = LeafState { sofarLeaf: 0, firstLeaf: 0, prevLeaf: 0, keys_in_this_leaf:Vec::new(), prefixLen: 0, leaves:Vec::new(), blk:leavesBlk, }; for result_pair in source { let mut pair = try!(result_pair); let k = pair.Key; // TODO is it possible for this to conclude that the key must be overflowed // when it would actually fit because of prefixing? let (blkAfterKey,kloc) = if k.len() <= maxKeyInline { (st.blk, KeyLocation::Inline) } else { let vPage = st.blk.firstPage; let (_,newBlk) = try!(writeOverflow(st.blk, &mut &*k, pageManager, fs)); (newBlk, KeyLocation::Overflowed(vPage)) }; // the max limit of an inline value is when the key is inline // on a new page. // TODO this is a usize, so it might cause integer underflow. let availableOnNewPageAfterKey = pgsz - LEAF_PAGE_OVERHEAD - 1 // prefixLen - 1 // key flags - Varint::SpaceNeededFor(k.len() as u64) - k.len() - 1 // value flags ; // availableOnNewPageAfterKey needs to accomodate the value and its length as a varint. // it might already be <=0 because of the key length let maxValueInline = if availableOnNewPageAfterKey > 0 { let neededForVarintLen = Varint::SpaceNeededFor(availableOnNewPageAfterKey as u64); let avail2 = availableOnNewPageAfterKey - neededForVarintLen; if avail2 > 0 { avail2 } else { 0 } } else { 0 }; let (blkAfterValue, vloc) = match pair.Value { Blob::Tombstone => { (blkAfterKey, ValueLocation::Tombstone) }, _ => match kloc { KeyLocation::Inline => { if maxValueInline == 0 { match pair.Value { Blob::Tombstone => { (blkAfterKey, ValueLocation::Tombstone) }, Blob::Stream(ref mut strm) => { let valuePage = blkAfterKey.firstPage; let (len,newBlk) = try!(writeOverflow(blkAfterKey, &mut *strm, pageManager, fs)); (newBlk, ValueLocation::Overflowed(len,valuePage)) }, Blob::Array(a) => { if a.is_empty() { // TODO maybe we need ValueLocation::Empty (blkAfterKey, ValueLocation::Buffer(a)) } else { let valuePage = blkAfterKey.firstPage; let strm = a; // TODO need a Read for this let (len,newBlk) = try!(writeOverflow(blkAfterKey, &mut &*strm, pageManager, fs)); (newBlk, ValueLocation::Overflowed(len,valuePage)) } }, } } else { match pair.Value { Blob::Tombstone => { (blkAfterKey, ValueLocation::Tombstone) }, Blob::Stream(ref mut strm) => { // not sure reusing vbuf is worth it. maybe we should just // alloc here. ownership will get passed into the // ValueLocation when it fits. let vread = try!(utils::ReadFully(&mut *strm, &mut vbuf[0 .. maxValueInline+1])); let vbuf = &vbuf[0 .. vread]; if vread < maxValueInline { // TODO this alloc+copy is unfortunate let mut va = Vec::new(); for i in 0 .. vbuf.len() { va.push(vbuf[i]); } (blkAfterKey, ValueLocation::Buffer(va.into_boxed_slice())) } else { let valuePage = blkAfterKey.firstPage; let (len,newBlk) = try!(writeOverflow(blkAfterKey, &mut (vbuf.chain(strm)), pageManager, fs)); (newBlk, ValueLocation::Overflowed (len,valuePage)) } }, Blob::Array(a) => { if a.len() < maxValueInline { (blkAfterKey, ValueLocation::Buffer(a)) } else { let valuePage = blkAfterKey.firstPage; let (len,newBlk) = try!(writeOverflow(blkAfterKey, &mut &*a, pageManager, fs)); (newBlk, ValueLocation::Overflowed(len,valuePage)) } }, } } }, KeyLocation::Overflowed(_) => { match pair.Value { Blob::Tombstone => { (blkAfterKey, ValueLocation::Tombstone) }, Blob::Stream(ref mut strm) => { let valuePage = blkAfterKey.firstPage; let (len,newBlk) = try!(writeOverflow(blkAfterKey, &mut *strm, pageManager, fs)); (newBlk, ValueLocation::Overflowed(len,valuePage)) }, Blob::Array(a) => { if a.is_empty() { // TODO maybe we need ValueLocation::Empty (blkAfterKey, ValueLocation::Buffer(a)) } else { let valuePage = blkAfterKey.firstPage; let (len,newBlk) = try!(writeOverflow(blkAfterKey, &mut &*a, pageManager, fs)); (newBlk, ValueLocation::Overflowed(len,valuePage)) } } } } } }; // whether/not the key/value are to be overflowed is now already decided. // now all we have to do is decide if this key/value are going into this leaf // or not. note that it is possible to overflow these and then have them not // fit into the current leaf and end up landing in the next leaf. st.blk = blkAfterValue; // TODO ignore prefixLen for overflowed keys? let newPrefixLen = if st.keys_in_this_leaf.is_empty() { defaultPrefixLen(&k) } else { bcmp::PrefixMatch(&*st.keys_in_this_leaf[0].key, &k, st.prefixLen) }; let sofar = if newPrefixLen < st.prefixLen { // the prefixLen would change with the addition of this key, // so we need to recalc sofar let sum = st.keys_in_this_leaf.iter().map(|lp| leafPairSize(newPrefixLen, lp)).sum();; sum } else { st.sofarLeaf }; let fit = { let needed = kLocNeed(&k, &kloc, newPrefixLen) + vLocNeed(&vloc); let used = sofar + LEAF_PAGE_OVERHEAD + 1 + newPrefixLen; if pgsz > used { let available = pgsz - used; (available >= needed) } else { false } }; let writeThisPage = (! st.keys_in_this_leaf.is_empty()) && (! fit); if writeThisPage { try!(writeLeaf(&mut st, false, pb, fs, pgsz, pageManager, &mut *token)); } // TODO ignore prefixLen for overflowed keys? let newPrefixLen = if st.keys_in_this_leaf.is_empty() { defaultPrefixLen(&k) } else { bcmp::PrefixMatch(&*st.keys_in_this_leaf[0].key, &k, st.prefixLen) }; let sofar = if newPrefixLen < st.prefixLen { // the prefixLen will change with the addition of this key, // so we need to recalc sofar let sum = st.keys_in_this_leaf.iter().map(|lp| leafPairSize(newPrefixLen, lp)).sum();; sum } else { st.sofarLeaf }; // note that the LeafPair struct gets ownership of the key provided // from above. let lp = LeafPair { key:k, kLoc:kloc, vLoc:vloc, }; st.sofarLeaf=sofar + leafPairSize(newPrefixLen, &lp); st.keys_in_this_leaf.push(lp); st.prefixLen=newPrefixLen; } if !st.keys_in_this_leaf.is_empty() { let isRootNode = st.leaves.is_empty(); try!(writeLeaf(&mut st, isRootNode, pb, fs, pgsz, pageManager, &mut *token)); } Ok((st.blk,st.leaves,st.firstLeaf)) } fn writeParentNodes<SeekWrite>(startingBlk: PageBlock, children: &mut Vec<pgitem>, pgsz: usize, fs: &mut SeekWrite, pageManager: &IPages, token: &mut PendingSegment, lastLeaf: PageNum, firstLeaf: PageNum, pb: &mut PageBuilder, ) -> Result<(PageBlock, Vec<pgitem>)> where SeekWrite : Seek+Write { // 2 for the page type and flags // 2 for the stored count // 5 for the extra ptr we will add at the end, a varint, 5 is worst case (page num < 4294967295L) // 4 for lastInt32 const PARENT_PAGE_OVERHEAD: usize = 2 + 2 + 5 + 4; fn calcAvailable(currentSize: usize, couldBeRoot: bool, pgsz: usize) -> usize { let basicSize = pgsz - currentSize; let allowanceForRootNode = if couldBeRoot { SIZE_32 } else { 0 }; // first/last Leaf, lastInt32 already // TODO can this cause integer overflow? basicSize - allowanceForRootNode } fn buildParentPage(items: &mut Vec<pgitem>, lastPtr: PageNum, overflows: &HashMap<usize,PageNum>, pb : &mut PageBuilder, ) { pb.Reset(); pb.PutByte(PageType::PARENT_NODE.to_u8()); pb.PutByte(0u8); pb.PutInt16(items.len() as u16); // store all the ptrs, n+1 of them for x in items.iter() { pb.PutVarint(x.page as u64); } pb.PutVarint(lastPtr as u64); // store all the keys, n of them for (i,x) in items.drain(..).enumerate() { match overflows.get(&i) { Some(pg) => { pb.PutByte(ValueFlag::FLAG_OVERFLOW); pb.PutVarint(x.key.len() as u64); pb.PutInt32(*pg as PageNum); }, None => { pb.PutByte(0u8); pb.PutVarint(x.key.len() as u64); pb.PutArray(&x.key); }, } } } fn writeParentPage<SeekWrite>(st: &mut ParentState, items: &mut Vec<pgitem>, overflows: &HashMap<usize,PageNum>, pgnum: PageNum, key: Box<[u8]>, isRootNode: bool, pb: &mut PageBuilder, lastLeaf: PageNum, fs: &mut SeekWrite, pageManager: &IPages, pgsz: usize, token: &mut PendingSegment, firstLeaf: PageNum, ) -> Result<()> where SeekWrite : Seek+Write { // assert st.sofar > 0 let thisPageNumber = st.blk.firstPage; buildParentPage(items, pgnum, &overflows, pb); let nextBlk = if isRootNode { pb.SetPageFlag(PageFlag::FLAG_ROOT_NODE); pb.SetSecondToLastInt32(firstLeaf); pb.SetLastInt32(lastLeaf); PageBlock::new(thisPageNumber+1,st.blk.lastPage) } else { if st.blk.firstPage == st.blk.lastPage { pb.SetPageFlag(PageFlag::FLAG_BOUNDARY_NODE); let newBlk = try!(pageManager.GetBlock(&mut *token)); pb.SetLastInt32(newBlk.firstPage); newBlk } else { PageBlock::new(thisPageNumber+1,st.blk.lastPage) } }; try!(pb.Write(fs)); if nextBlk.firstPage != (thisPageNumber+1) { try!(utils::SeekPage(fs, pgsz, nextBlk.firstPage)); } st.sofar = 0; st.blk = nextBlk; let pg = pgitem {page:thisPageNumber, key:key}; st.nextGeneration.push(pg); Ok(()) } // this is the body of writeParentNodes let mut st = ParentState {nextGeneration:Vec::new(),sofar: 0,blk:startingBlk,}; let mut items = Vec::new(); let mut overflows = HashMap::new(); let count_children = children.len(); // deal with all the children except the last one for pair in children.drain(0 .. count_children-1) { let pgnum = pair.page; let neededEitherWay = 1 + Varint::SpaceNeededFor(pair.key.len() as u64) + Varint::SpaceNeededFor(pgnum as u64); let neededForInline = neededEitherWay + pair.key.len(); let neededForOverflow = neededEitherWay + SIZE_32; let couldBeRoot = st.nextGeneration.is_empty(); let available = calcAvailable(st.sofar, couldBeRoot, pgsz); let fitsInline = available >= neededForInline; let wouldFitInlineOnNextPage = (pgsz - PARENT_PAGE_OVERHEAD) >= neededForInline; let fitsOverflow = available >= neededForOverflow; let writeThisPage = (! fitsInline) && (wouldFitInlineOnNextPage || (! fitsOverflow)); if writeThisPage { // assert sofar > 0 // we need to make a copy of this key because writeParentPage needs to own one, // but we still need to put this pair in the items (below). let mut copy_key = vec![0; pair.key.len()].into_boxed_slice(); copy_key.clone_from_slice(&pair.key); try!(writeParentPage(&mut st, &mut items, &overflows, pair.page, copy_key, false, pb, lastLeaf, fs, pageManager, pgsz, &mut *token, firstLeaf)); assert!(items.is_empty()); } if st.sofar == 0 { st.sofar = PARENT_PAGE_OVERHEAD; assert!(items.is_empty()); } if calcAvailable(st.sofar, st.nextGeneration.is_empty(), pgsz) >= neededForInline { st.sofar = st.sofar + neededForInline; } else { let keyOverflowFirstPage = st.blk.firstPage; let (_,newBlk) = try!(writeOverflow(st.blk, &mut &*pair.key, pageManager, fs)); st.sofar = st.sofar + neededForOverflow; st.blk = newBlk; // items.len() is the index that this pair is about to get, just below overflows.insert(items.len(),keyOverflowFirstPage); } items.push(pair); } assert!(children.len() == 1); let isRootNode = st.nextGeneration.is_empty(); let pgitem {page: pgnum, key: key} = children.remove(0); assert!(children.is_empty()); try!(writeParentPage(&mut st, &mut items, &overflows, pgnum, key, isRootNode, pb, lastLeaf, fs, pageManager, pgsz, &mut *token, firstLeaf)); Ok((st.blk,st.nextGeneration)) } // this is the body of Create let pgsz = pageManager.PageSize(); let mut pb = PageBuilder::new(pgsz); let mut token = try!(pageManager.Begin()); let startingBlk = try!(pageManager.GetBlock(&mut token)); try!(utils::SeekPage(fs, pgsz, startingBlk.firstPage)); // TODO this is a buffer just for the purpose of being reused // in cases where the blob is provided as a stream, and we need // read a bit of it to figure out if it might fit inline rather // than overflow. let mut vbuf = vec![0;pgsz].into_boxed_slice(); let (blkAfterLeaves, leaves, firstLeaf) = try!(writeLeaves(startingBlk, pageManager, source, &mut vbuf, fs, &mut pb, &mut token)); // all the leaves are written. // now write the parent pages. // maybe more than one level of them. // keep writing until we have written a level which has only one node, // which is the root node. let lastLeaf = leaves[leaves.len()-1].page; let rootPage = { let mut blk = blkAfterLeaves; let mut children = leaves; loop { let (newBlk, newChildren) = try!(writeParentNodes(blk, &mut children, pgsz, fs, pageManager, &mut token, lastLeaf, firstLeaf, &mut pb)); assert!(children.is_empty()); blk = newBlk; children = newChildren; if children.len()==1 { break; } } children[0].page }; let g = try!(pageManager.End(token, rootPage)); Ok((g,rootPage)) } struct myOverflowReadStream { fs: File, len: usize, // same type as ValueLength(), max len of a single value firstPage: PageNum, // TODO will be needed later for Seek trait buf: Box<[u8]>, currentPage: PageNum, sofarOverall: usize, sofarThisPage: usize, firstPageInBlock: PageNum, offsetToLastPageInThisBlock: PageNum, countRegularDataPagesInBlock: PageNum, boundaryPageNumber: PageNum, bytesOnThisPage: usize, offsetOnThisPage: usize, } impl myOverflowReadStream { fn new(path: &str, pgsz: usize, firstPage: PageNum, len: usize) -> Result<myOverflowReadStream> { let f = try!(OpenOptions::new() .read(true) .open(path)); let mut res = myOverflowReadStream { fs: f, len: len, firstPage: firstPage, buf: vec![0;pgsz].into_boxed_slice(), currentPage: firstPage, sofarOverall: 0, sofarThisPage: 0, firstPageInBlock: 0, offsetToLastPageInThisBlock: 0, // add to firstPageInBlock to get the last one countRegularDataPagesInBlock: 0, boundaryPageNumber: 0, bytesOnThisPage: 0, offsetOnThisPage: 0, }; try!(res.ReadFirstPage()); Ok(res) } fn len(&self) -> usize { self.len } // TODO consider supporting Seek trait fn ReadPage(&mut self) -> Result<()> { try!(utils::SeekPage(&mut self.fs, self.buf.len(), self.currentPage)); try!(utils::ReadFully(&mut self.fs, &mut *self.buf)); // assert PageType is OVERFLOW self.sofarThisPage = 0; if self.currentPage == self.firstPageInBlock { self.bytesOnThisPage = self.buf.len() - (2 + SIZE_32); self.offsetOnThisPage = 2; } else if self.currentPage == self.boundaryPageNumber { self.bytesOnThisPage = self.buf.len() - SIZE_32; self.offsetOnThisPage = 0; } else { // assert currentPage > firstPageInBlock // assert currentPage < boundaryPageNumber OR boundaryPageNumber = 0 self.bytesOnThisPage = self.buf.len(); self.offsetOnThisPage = 0; } Ok(()) } fn GetLastInt32(&self) -> u32 { let at = self.buf.len() - SIZE_32; read_u32_be(&self.buf[at .. at+4]) } fn PageType(&self) -> Result<PageType> { PageType::from_u8(self.buf[0]) } fn CheckPageFlag(&self, f: u8) -> bool { 0 != (self.buf[1] & f) } fn ReadFirstPage(&mut self) -> Result<()> { self.firstPageInBlock = self.currentPage; try!(self.ReadPage()); if try!(self.PageType()) != (PageType::OVERFLOW_NODE) { return Err(LsmError::CorruptFile("first overflow page has invalid page type")); } if self.CheckPageFlag(PageFlag::FLAG_BOUNDARY_NODE) { // first page landed on a boundary node // lastInt32 is the next page number, which we'll fetch later self.boundaryPageNumber = self.currentPage; self.offsetToLastPageInThisBlock = 0; self.countRegularDataPagesInBlock = 0; } else { self.offsetToLastPageInThisBlock = self.GetLastInt32(); if self.CheckPageFlag(PageFlag::FLAG_ENDS_ON_BOUNDARY) { self.boundaryPageNumber = self.currentPage + self.offsetToLastPageInThisBlock; self.countRegularDataPagesInBlock = self.offsetToLastPageInThisBlock - 1; } else { self.boundaryPageNumber = 0; self.countRegularDataPagesInBlock = self.offsetToLastPageInThisBlock; } } Ok(()) } fn Read(&mut self, ba: &mut [u8], offset: usize, wanted: usize) -> Result<usize> { if self.sofarOverall >= self.len { Ok(0) } else { let mut direct = false; if self.sofarThisPage >= self.bytesOnThisPage { if self.currentPage == self.boundaryPageNumber { self.currentPage = self.GetLastInt32(); try!(self.ReadFirstPage()); } else { // we need a new page. and if it's a full data page, // and if wanted is big enough to take all of it, then // we want to read (at least) it directly into the // buffer provided by the caller. we already know // this candidate page cannot be the first page in a // block. let maybeDataPage = self.currentPage + 1; let isDataPage = if self.boundaryPageNumber > 0 { ((self.len - self.sofarOverall) >= self.buf.len()) && (self.countRegularDataPagesInBlock > 0) && (maybeDataPage > self.firstPageInBlock) && (maybeDataPage < self.boundaryPageNumber) } else { ((self.len - self.sofarOverall) >= self.buf.len()) && (self.countRegularDataPagesInBlock > 0) && (maybeDataPage > self.firstPageInBlock) && (maybeDataPage <= (self.firstPageInBlock + self.countRegularDataPagesInBlock)) }; if isDataPage && (wanted >= self.buf.len()) { // assert (currentPage + 1) > firstPageInBlock // // don't increment currentPage here because below, we will // calculate how many pages we actually want to do. direct = true; self.bytesOnThisPage = self.buf.len(); self.sofarThisPage = 0; self.offsetOnThisPage = 0; } else { self.currentPage = self.currentPage + 1; try!(self.ReadPage()); } } } if direct { // currentPage has not been incremented yet // // skip the buffer. note, therefore, that the contents of the // buffer are "invalid" in that they do not correspond to currentPage // let numPagesWanted = (wanted / self.buf.len()) as PageNum; // assert countRegularDataPagesInBlock > 0 let lastDataPageInThisBlock = self.firstPageInBlock + self.countRegularDataPagesInBlock; let theDataPage = self.currentPage + 1; let numPagesAvailable = if self.boundaryPageNumber>0 { self.boundaryPageNumber - theDataPage } else { lastDataPageInThisBlock - theDataPage + 1 }; let numPagesToFetch = std::cmp::min(numPagesWanted, numPagesAvailable) as PageNum; let bytesToFetch = { let bytesToFetch = (numPagesToFetch as usize) * self.buf.len(); let available = self.len - self.sofarOverall; if bytesToFetch > available { available } else { bytesToFetch } }; // assert bytesToFetch <= wanted try!(utils::SeekPage(&mut self.fs, self.buf.len(), theDataPage)); try!(utils::ReadFully(&mut self.fs, &mut ba[offset .. offset + bytesToFetch])); self.sofarOverall = self.sofarOverall + bytesToFetch; self.currentPage = self.currentPage + numPagesToFetch; self.sofarThisPage = self.buf.len(); Ok(bytesToFetch) } else { let available = std::cmp::min(self.bytesOnThisPage - self.sofarThisPage, self.len - self.sofarOverall); let num = std::cmp::min(available, wanted); for i in 0 .. num { ba[offset+i] = self.buf[self.offsetOnThisPage + self.sofarThisPage + i]; } self.sofarOverall = self.sofarOverall + num; self.sofarThisPage = self.sofarThisPage + num; Ok(num) } } } } impl Read for myOverflowReadStream { fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> { let len = buf.len(); match self.Read(buf, 0, len) { Ok(v) => Ok(v), Err(e) => { // this interface requires io::Result, so we shoehorn the others into it match e { LsmError::Io(e) => Err(e), _ => Err(std::io::Error::new(std::io::ErrorKind::Other, e.description())), } }, } } } fn readOverflow(path: &str, pgsz: usize, firstPage: PageNum, buf: &mut [u8]) -> Result<usize> { let mut ostrm = try!(myOverflowReadStream::new(path, pgsz, firstPage, buf.len())); let res = try!(utils::ReadFully(&mut ostrm, buf)); Ok(res) } struct SegmentCursor<'a> { path: String, // TODO in the f# version, these three were a closure. // it would be nice to make it work that way again. // so that this code would not have specific knowledge // of the InnerPart type. inner: &'a InnerPart, segnum: SegmentNum, csrnum: u64, blocks: Vec<PageBlock>, // TODO will be needed later for stray checking fs: File, len: u64, rootPage: PageNum, pr: PageBuffer, currentPage: PageNum, leafKeys: Vec<usize>, previousLeaf: PageNum, currentKey: Option<usize>, prefix: Option<Box<[u8]>>, firstLeaf: PageNum, lastLeaf: PageNum, } impl<'a> SegmentCursor<'a> { fn new(path: &str, pgsz: usize, rootPage: PageNum, blocks: Vec<PageBlock>, inner: &'a InnerPart, segnum: SegmentNum, csrnum: u64 ) -> Result<SegmentCursor<'a>> { // TODO consider not passsing in the path, and instead, // making the cursor call back to inner.OpenForReading... let mut f = try!(OpenOptions::new() .read(true) .open(path)); // TODO the len is used for checking to make sure we don't stray // to far. This should probably be done with the blocks provided // by the caller, not by looking at the full length of the file, // which this cursor shouldn't care about. let len = try!(seek_len(&mut f)); let mut res = SegmentCursor { path: String::from_str(path), fs: f, blocks: blocks, inner: inner, segnum: segnum, csrnum: csrnum, len: len, rootPage: rootPage, pr: PageBuffer::new(pgsz), currentPage: 0, leafKeys: Vec::new(), previousLeaf: 0, currentKey: None, prefix: None, firstLeaf: 0, // temporary lastLeaf: 0, // temporary }; if ! try!(res.setCurrentPage(rootPage)) { // TODO fix this error. or assert, because we previously verified // that the root page was in the block list we were given. return Err(LsmError::Misc("failed to read root page")); } let pt = try!(res.pr.PageType()); if pt == PageType::LEAF_NODE { res.firstLeaf = rootPage; res.lastLeaf = rootPage; } else if pt == PageType::PARENT_NODE { if ! res.pr.CheckPageFlag(PageFlag::FLAG_ROOT_NODE) { return Err(LsmError::CorruptFile("root page lacks flag")); } res.firstLeaf = res.pr.GetSecondToLastInt32() as PageNum; res.lastLeaf = res.pr.GetLastInt32() as PageNum; } else { return Err(LsmError::CorruptFile("root page has invalid page type")); } Ok(res) } fn resetLeaf(&mut self) { self.leafKeys.clear(); self.previousLeaf = 0; self.currentKey = None; self.prefix = None; } fn setCurrentPage(&mut self, pgnum: PageNum) -> Result<bool> { // TODO use self.blocks to make sure we are not straying out of bounds. // TODO so I think this function actually should be Result<()>. // it used to return Ok(false) in situations that I think should // actually have been errors. not 100% sure yet. still trying // to verify all the cases. // TODO if currentPage = pgnum already... self.currentPage = pgnum; self.resetLeaf(); if 0 == self.currentPage { Err(LsmError::InvalidPageNumber) //Ok(false) } else { // refuse to go to a page beyond the end of the stream // TODO is this the right place for this check? let pos = (self.currentPage - 1) as u64 * self.pr.PageSize() as u64; if pos + self.pr.PageSize() as u64 <= self.len { try!(utils::SeekPage(&mut self.fs, self.pr.PageSize(), self.currentPage)); try!(self.pr.Read(&mut self.fs)); Ok(true) } else { Err(LsmError::InvalidPageNumber) //Ok(false) } } } fn nextInLeaf(&mut self) -> bool { match self.currentKey { Some(cur) => { if (cur+1) < self.leafKeys.len() { self.currentKey = Some(cur + 1); true } else { false } }, None => { false }, } } fn prevInLeaf(&mut self) -> bool { match self.currentKey { Some(cur) => { if cur > 0 { self.currentKey = Some(cur - 1); true } else { false } }, None => { false }, } } fn skipKey(&self, cur: &mut usize) { let kflag = self.pr.GetByte(cur); let klen = self.pr.GetVarint(cur) as usize; if 0 == (kflag & ValueFlag::FLAG_OVERFLOW) { let prefixLen = match self.prefix { Some(ref a) => a.len(), None => 0 }; *cur = *cur + (klen - prefixLen); } else { *cur = *cur + SIZE_32; } } fn skipValue(&self, cur: &mut usize) { let vflag = self.pr.GetByte(cur); if 0 != (vflag & ValueFlag::FLAG_TOMBSTONE) { () } else { let vlen = self.pr.GetVarint(cur) as usize; if 0 != (vflag & ValueFlag::FLAG_OVERFLOW) { *cur = *cur + SIZE_32; } else { *cur = *cur + vlen; } } } fn readLeaf(&mut self) -> Result<()> { self.resetLeaf(); let mut cur = 0; let pt = try!(PageType::from_u8(self.pr.GetByte(&mut cur))); if pt != PageType::LEAF_NODE { return Err(LsmError::CorruptFile("leaf has invalid page type")); } self.pr.GetByte(&mut cur); self.previousLeaf = self.pr.GetInt32(&mut cur) as PageNum; let prefixLen = self.pr.GetByte(&mut cur) as usize; if prefixLen > 0 { let mut a = vec![0;prefixLen].into_boxed_slice(); self.pr.GetIntoArray(&mut cur, &mut a); self.prefix = Some(a); } else { self.prefix = None; } let countLeafKeys = self.pr.GetInt16(&mut cur) as usize; // assert countLeafKeys>0 self.leafKeys.truncate(countLeafKeys); while self.leafKeys.len() < countLeafKeys { self.leafKeys.push(0); } for i in 0 .. countLeafKeys { self.leafKeys[i] = cur; self.skipKey(&mut cur); self.skipValue(&mut cur); } Ok(()) } fn keyInLeaf2(&'a self, n: usize) -> Result<KeyRef<'a>> { let mut cur = self.leafKeys[n as usize]; let kflag = self.pr.GetByte(&mut cur); let klen = self.pr.GetVarint(&mut cur) as usize; if 0 == (kflag & ValueFlag::FLAG_OVERFLOW) { match self.prefix { Some(ref a) => { Ok(KeyRef::Prefixed(&a, self.pr.get_slice(cur, klen - a.len()))) }, None => { Ok(KeyRef::Array(self.pr.get_slice(cur, klen))) }, } } else { let pgnum = self.pr.GetInt32(&mut cur) as PageNum; let mut ostrm = try!(myOverflowReadStream::new(&self.path, self.pr.PageSize(), pgnum, klen)); let mut x_k = Vec::new(); try!(ostrm.read_to_end(&mut x_k)); let x_k = x_k.into_boxed_slice(); Ok(KeyRef::Overflowed(x_k)) } } fn keyInLeaf(&self, n: usize) -> Result<Box<[u8]>> { let mut cur = self.leafKeys[n as usize]; let kflag = self.pr.GetByte(&mut cur); let klen = self.pr.GetVarint(&mut cur) as usize; let mut res = vec![0;klen].into_boxed_slice(); if 0 == (kflag & ValueFlag::FLAG_OVERFLOW) { match self.prefix { Some(ref a) => { let prefixLen = a.len(); for i in 0 .. prefixLen { res[i] = a[i]; } self.pr.GetIntoArray(&mut cur, &mut res[prefixLen .. klen]); Ok(res) }, None => { self.pr.GetIntoArray(&mut cur, &mut res); Ok(res) }, } } else { let pgnum = self.pr.GetInt32(&mut cur) as PageNum; try!(readOverflow(&self.path, self.pr.PageSize(), pgnum, &mut res)); Ok(res) } } fn compareKeyInLeaf(&self, n: usize, other: &[u8]) -> Result<Ordering> { let mut cur = self.leafKeys[n as usize]; let kflag = self.pr.GetByte(&mut cur); let klen = self.pr.GetVarint(&mut cur) as usize; if 0 == (kflag & ValueFlag::FLAG_OVERFLOW) { let res = match self.prefix { Some(ref a) => { self.pr.CompareWithPrefix(cur, a, klen, other) }, None => { self.pr.Compare(cur, klen, other) }, }; Ok(res) } else { // TODO this could be more efficient. we could compare the key // in place in the overflow without fetching the entire thing. // TODO overflowed keys are not prefixed. should they be? let pgnum = self.pr.GetInt32(&mut cur) as PageNum; let mut k = vec![0;klen].into_boxed_slice(); try!(readOverflow(&self.path, self.pr.PageSize(), pgnum, &mut k)); let res = bcmp::Compare(&*k, other); Ok(res) } } fn compare_two(x: &SegmentCursor, y: &SegmentCursor) -> Result<Ordering> { fn get_info(c: &SegmentCursor) -> Result<(usize, bool, usize, usize)> { match c.currentKey { None => Err(LsmError::CursorNotValid), Some(n) => { let mut cur = c.leafKeys[n as usize]; let kflag = c.pr.GetByte(&mut cur); let klen = c.pr.GetVarint(&mut cur) as usize; let overflowed = 0 != (kflag & ValueFlag::FLAG_OVERFLOW); Ok((n, overflowed, cur, klen)) }, } } let (x_n, x_over, x_cur, x_klen) = try!(get_info(x)); let (y_n, y_over, y_cur, y_klen) = try!(get_info(y)); if x_over || y_over { // if either of these keys is overflowed, don't bother // trying to do anything clever. just read both keys // into memory and compare them. let x_k = try!(x.keyInLeaf(x_n)); let y_k = try!(y.keyInLeaf(y_n)); Ok(bcmp::Compare(&x_k, &y_k)) } else { match (&x.prefix, &y.prefix) { (&Some(ref x_p), &Some(ref y_p)) => { let x_k = x.pr.get_slice(x_cur, x_klen - x_p.len()); let y_k = y.pr.get_slice(y_cur, y_klen - y_p.len()); Ok(KeyRef::compare_px_py(x_p, x_k, y_p, y_k)) }, (&Some(ref x_p), &None) => { let x_k = x.pr.get_slice(x_cur, x_klen - x_p.len()); let y_k = y.pr.get_slice(y_cur, y_klen); Ok(KeyRef::compare_px_y(x_p, x_k, y_k)) }, (&None, &Some(ref y_p)) => { let x_k = x.pr.get_slice(x_cur, x_klen); let y_k = y.pr.get_slice(y_cur, y_klen - y_p.len()); Ok(KeyRef::compare_x_py(x_k, y_p, y_k)) }, (&None, &None) => { let x_k = x.pr.get_slice(x_cur, x_klen); let y_k = y.pr.get_slice(y_cur, y_klen); Ok(bcmp::Compare(&x_k, &y_k)) }, } } } fn searchLeaf(&mut self, k: &KeyRef, min:usize, max:usize, sop:SeekOp, le: Option<usize>, ge: Option<usize>) -> Result<(Option<usize>,bool)> { if max < min { match sop { SeekOp::SEEK_EQ => Ok((None, false)), SeekOp::SEEK_LE => Ok((le, false)), SeekOp::SEEK_GE => Ok((ge, false)), } } else { let mid = (max + min) / 2; // assert mid >= 0 let cmp = { let q = try!(self.keyInLeaf2(mid)); KeyRef::cmp(&q, k) }; match cmp { Ordering::Equal => Ok((Some(mid), true)), Ordering::Less => self.searchLeaf(k, (mid+1), max, sop, Some(mid), ge), Ordering::Greater => // we could just recurse with mid-1, but that would overflow if // mod is 0, so we catch that case here. if mid==0 { match sop { SeekOp::SEEK_EQ => Ok((None, false)), SeekOp::SEEK_LE => Ok((le, false)), SeekOp::SEEK_GE => Ok((Some(mid), false)), } } else { self.searchLeaf(k, min, (mid-1), sop, le, Some(mid)) }, } } } fn readParentPage(&mut self) -> Result<(Vec<PageNum>, Vec<KeyRef>)> { let mut cur = 0; let pt = try!(PageType::from_u8(self.pr.GetByte(&mut cur))); if pt != PageType::PARENT_NODE { return Err(LsmError::CorruptFile("parent page has invalid page type")); } cur = cur + 1; // page flags let count = self.pr.GetInt16(&mut cur); let mut ptrs = Vec::new(); let mut keys = Vec::new(); for _ in 0 .. count+1 { ptrs.push(self.pr.GetVarint(&mut cur) as PageNum); } for _ in 0 .. count { let kflag = self.pr.GetByte(&mut cur); let klen = self.pr.GetVarint(&mut cur) as usize; if 0 == (kflag & ValueFlag::FLAG_OVERFLOW) { keys.push(KeyRef::Array(self.pr.get_slice(cur, klen))); cur = cur + klen; } else { let firstPage = self.pr.GetInt32(&mut cur) as PageNum; let pgsz = self.pr.PageSize(); let mut ostrm = try!(myOverflowReadStream::new(&self.path, pgsz, firstPage, klen)); let mut x_k = Vec::new(); try!(ostrm.read_to_end(&mut x_k)); let x_k = x_k.into_boxed_slice(); keys.push(KeyRef::Overflowed(x_k)); } } Ok((ptrs,keys)) } // this is used when moving forward through the leaf pages. // we need to skip any overflows. when moving backward, // this is not necessary, because each leaf has a pointer to // the leaf before it. // TODO it's unfortunate that Next is the slower operation // when it is far more common than Prev. OTOH, the pages // are written as we stream through a set of kvp objects, // and we don't want to rewind, and we want to write each // page only once, and we want to keep the minimum amount // of stuff in memory as we go. and this code only causes // a perf difference if there are overflow pages between // the leaves. fn searchForwardForLeaf(&mut self) -> Result<bool> { let pt = try!(self.pr.PageType()); if pt == PageType::LEAF_NODE { Ok(true) } else if pt == PageType::PARENT_NODE { // if we bump into a parent node, that means there are // no more leaves. Ok(false) } else { let lastInt32 = self.pr.GetLastInt32() as PageNum; // // an overflow page has a value in its LastInt32 which // is one of two things. // // if it's a boundary node, it's the page number of the // next page in the segment. // // otherwise, it's the number of pages to skip ahead. // this skip might take us to whatever follows this // overflow (which could be a leaf or a parent or // another overflow), or it might just take us to a // boundary page (in the case where the overflow didn't // fit). it doesn't matter. we just skip ahead. // if self.pr.CheckPageFlag(PageFlag::FLAG_BOUNDARY_NODE) { if try!(self.setCurrentPage(lastInt32)) { self.searchForwardForLeaf() } else { Ok(false) } } else { let lastPage = self.currentPage + lastInt32; let endsOnBoundary = self.pr.CheckPageFlag(PageFlag::FLAG_ENDS_ON_BOUNDARY); if endsOnBoundary { if try!(self.setCurrentPage(lastPage)) { let next = self.pr.GetLastInt32() as PageNum; if try!(self.setCurrentPage(next)) { self.searchForwardForLeaf() } else { Ok(false) } } else { Ok(false) } } else { if try!(self.setCurrentPage(lastPage + 1)) { self.searchForwardForLeaf() } else { Ok(false) } } } } } fn leafIsValid(&self) -> bool { // TODO the bounds check of self.currentKey against self.leafKeys.len() could be an assert let ok = (!self.leafKeys.is_empty()) && (self.currentKey.is_some()) && (self.currentKey.expect("just did is_some") as usize) < self.leafKeys.len(); ok } fn search(&mut self, pg: PageNum, k: &KeyRef, sop:SeekOp) -> Result<SeekResult> { if try!(self.setCurrentPage(pg)) { let pt = try!(self.pr.PageType()); if PageType::LEAF_NODE == pt { try!(self.readLeaf()); let tmp_countLeafKeys = self.leafKeys.len(); let (newCur, equal) = try!(self.searchLeaf(k, 0, (tmp_countLeafKeys - 1), sop, None, None)); self.currentKey = newCur; if SeekOp::SEEK_EQ != sop { if ! self.leafIsValid() { // if LE or GE failed on a given page, we might need // to look at the next/prev leaf. if SeekOp::SEEK_GE == sop { let nextPage = if self.pr.CheckPageFlag(PageFlag::FLAG_BOUNDARY_NODE) { self.pr.GetLastInt32() as PageNum } else if self.currentPage == self.rootPage { 0 } else { self.currentPage + 1 }; if try!(self.setCurrentPage(nextPage)) && try!(self.searchForwardForLeaf()) { try!(self.readLeaf()); self.currentKey = Some(0); } } else { let tmp_previousLeaf = self.previousLeaf; if 0 == self.previousLeaf { self.resetLeaf(); } else if try!(self.setCurrentPage(tmp_previousLeaf)) { try!(self.readLeaf()); self.currentKey = Some(self.leafKeys.len() - 1); } } } } if self.currentKey.is_none() { Ok(SeekResult::Invalid) } else if equal { Ok(SeekResult::Equal) } else { Ok(SeekResult::Unequal) } } else if PageType::PARENT_NODE == pt { let next = { let (ptrs, keys) = try!(self.readParentPage()); match Self::searchInParentPage(k, &ptrs, &keys, 0) { Some(found) => found, None => ptrs[ptrs.len() - 1], } }; self.search(next, k, sop) } else { unreachable!(); } } else { Ok(SeekResult::Invalid) } } fn searchInParentPage(k: &KeyRef, ptrs: &Vec<PageNum>, keys: &Vec<KeyRef>, i: usize) -> Option<PageNum> { // TODO linear search? really? // TODO also, this doesn't need to be recursive if i < keys.len() { let cmp = KeyRef::cmp(k, &keys[i]); if cmp==Ordering::Greater { Self::searchInParentPage(k, ptrs, keys, i+1) } else { Some(ptrs[i]) } } else { None } } } impl<'a> Drop for SegmentCursor<'a> { fn drop(&mut self) { self.inner.cursor_dropped(self.segnum, self.csrnum); } } impl<'a> ICursor<'a> for SegmentCursor<'a> { fn IsValid(&self) -> bool { self.leafIsValid() } fn SeekRef(&mut self, k: &KeyRef, sop:SeekOp) -> Result<SeekResult> { let rootPage = self.rootPage; self.search(rootPage, k, sop) } fn Key(&self) -> Result<Box<[u8]>> { match self.currentKey { None => Err(LsmError::CursorNotValid), Some(currentKey) => self.keyInLeaf(currentKey), } } fn KeyRef(&'a self) -> Result<KeyRef<'a>> { match self.currentKey { None => Err(LsmError::CursorNotValid), Some(currentKey) => self.keyInLeaf2(currentKey), } } fn Value(&self) -> Result<Blob> { match self.currentKey { None => Err(LsmError::CursorNotValid), Some(currentKey) => { let mut pos = self.leafKeys[currentKey as usize]; self.skipKey(&mut pos); let vflag = self.pr.GetByte(&mut pos); if 0 != (vflag & ValueFlag::FLAG_TOMBSTONE) { Ok(Blob::Tombstone) } else { let vlen = self.pr.GetVarint(&mut pos) as usize; if 0 != (vflag & ValueFlag::FLAG_OVERFLOW) { let pgnum = self.pr.GetInt32(&mut pos) as PageNum; let strm = try!(myOverflowReadStream::new(&self.path, self.pr.PageSize(), pgnum, vlen)); Ok(Blob::Stream(box strm)) } else { let mut a = vec![0;vlen as usize].into_boxed_slice(); self.pr.GetIntoArray(&mut pos, &mut a); Ok(Blob::Array(a)) } } } } } fn ValueRef(&'a self) -> Result<ValueRef<'a>> { match self.currentKey { None => Err(LsmError::CursorNotValid), Some(currentKey) => { let mut pos = self.leafKeys[currentKey as usize]; self.skipKey(&mut pos); let vflag = self.pr.GetByte(&mut pos); if 0 != (vflag & ValueFlag::FLAG_TOMBSTONE) { Ok(ValueRef::Tombstone) } else { let vlen = self.pr.GetVarint(&mut pos) as usize; if 0 != (vflag & ValueFlag::FLAG_OVERFLOW) { let pgnum = self.pr.GetInt32(&mut pos) as PageNum; let strm = try!(myOverflowReadStream::new(&self.path, self.pr.PageSize(), pgnum, vlen)); Ok(ValueRef::Overflowed(vlen, box strm)) } else { Ok(ValueRef::Array(self.pr.get_slice(pos, vlen))) } } } } } fn ValueLength(&self) -> Result<Option<usize>> { match self.currentKey { None => Err(LsmError::CursorNotValid), Some(currentKey) => { let mut cur = self.leafKeys[currentKey as usize]; self.skipKey(&mut cur); let vflag = self.pr.GetByte(&mut cur); if 0 != (vflag & ValueFlag::FLAG_TOMBSTONE) { Ok(None) } else { let vlen = self.pr.GetVarint(&mut cur) as usize; Ok(Some(vlen)) } } } } fn KeyCompare(&self, k_other: &KeyRef) -> Result<Ordering> { let k_me = try!(self.KeyRef()); let c = KeyRef::cmp(&k_me, &k_other); Ok(c) } fn First(&mut self) -> Result<()> { let firstLeaf = self.firstLeaf; if try!(self.setCurrentPage(firstLeaf)) { try!(self.readLeaf()); self.currentKey = Some(0); } Ok(()) } fn Last(&mut self) -> Result<()> { let lastLeaf = self.lastLeaf; if try!(self.setCurrentPage(lastLeaf)) { try!(self.readLeaf()); self.currentKey = Some(self.leafKeys.len() - 1); } Ok(()) } fn Next(&mut self) -> Result<()> { if ! self.nextInLeaf() { let nextPage = if self.pr.CheckPageFlag(PageFlag::FLAG_BOUNDARY_NODE) { self.pr.GetLastInt32() as PageNum } else if try!(self.pr.PageType()) == PageType::LEAF_NODE { if self.currentPage == self.rootPage { 0 } else { self.currentPage + 1 } } else { 0 } ; if try!(self.setCurrentPage(nextPage)) && try!(self.searchForwardForLeaf()) { try!(self.readLeaf()); self.currentKey = Some(0); } } Ok(()) } fn Prev(&mut self) -> Result<()> { if ! self.prevInLeaf() { let previousLeaf = self.previousLeaf; if 0 == previousLeaf { self.resetLeaf(); } else if try!(self.setCurrentPage(previousLeaf)) { try!(self.readLeaf()); self.currentKey = Some(self.leafKeys.len() - 1); } } Ok(()) } } #[derive(Clone)] struct HeaderData { // TODO currentState is an ordered copy of segments.Keys. eliminate duplication? // or add assertions and tests to make sure they never get out of sync? we wish // we had a form of HashMap that kept track of ordering. currentState: Vec<SegmentNum>, segments: HashMap<SegmentNum,SegmentInfo>, headerOverflow: Option<PageBlock>, changeCounter: u64, mergeCounter: u64, } const HEADER_SIZE_IN_BYTES: usize = 4096; impl PendingSegment { fn new(num: SegmentNum) -> PendingSegment { PendingSegment {blockList: Vec::new(), segnum: num} } fn AddBlock(&mut self, b: PageBlock) { //println!("seg {:?} got block {:?}", self.segnum, b); let len = self.blockList.len(); if (! (self.blockList.is_empty())) && (b.firstPage == self.blockList[len-1].lastPage+1) { // note that by consolidating blocks here, the segment info list will // not have information about the fact that the two blocks were // originally separate. that's okay, since all we care about here is // keeping track of which pages are used. but the btree code itself // is still treating the last page of the first block as a boundary // page, even though its pointer to the next block goes to the very // next page, because its page manager happened to give it a block // which immediately follows the one it had. self.blockList[len-1].lastPage = b.lastPage; } else { self.blockList.push(b); } } fn End(mut self, lastPage: PageNum) -> (SegmentNum, Vec<PageBlock>, Option<PageBlock>) { let len = self.blockList.len(); let leftovers = { let givenLastPage = self.blockList[len-1].lastPage; if lastPage < givenLastPage { self.blockList[len-1].lastPage = lastPage; Some (PageBlock::new(lastPage+1, givenLastPage)) } else { None } }; // consume self return blockList (self.segnum, self.blockList, leftovers) } } fn readHeader<R>(fs: &mut R) -> Result<(HeaderData,usize,PageNum,SegmentNum)> where R : Read+Seek { fn read<R>(fs: &mut R) -> Result<PageBuffer> where R : Read { let mut pr = PageBuffer::new(HEADER_SIZE_IN_BYTES); let got = try!(pr.Read(fs)); if got < HEADER_SIZE_IN_BYTES { Err(LsmError::CorruptFile("invalid header")) } else { Ok(pr) } } fn parse<R>(pr: &PageBuffer, cur: &mut usize, fs: &mut R) -> Result<(HeaderData, usize)> where R : Read+Seek { fn readSegmentList(pr: &PageBuffer, cur: &mut usize) -> Result<(Vec<SegmentNum>,HashMap<SegmentNum,SegmentInfo>)> { fn readBlockList(prBlocks: &PageBuffer, cur: &mut usize) -> Vec<PageBlock> { let count = prBlocks.GetVarint(cur) as usize; let mut a = Vec::new(); for _ in 0 .. count { let firstPage = prBlocks.GetVarint(cur) as PageNum; let countPages = prBlocks.GetVarint(cur) as PageNum; // blocks are stored as firstPage/count rather than as // firstPage/lastPage, because the count will always be // smaller as a varint a.push(PageBlock::new(firstPage,firstPage + countPages - 1)); } a } let count = pr.GetVarint(cur) as usize; let mut a = Vec::new(); // TODO capacity count let mut m = HashMap::new(); // TODO capacity count for _ in 0 .. count { let g = pr.GetVarint(cur) as SegmentNum; a.push(g); let root = pr.GetVarint(cur) as PageNum; let age = pr.GetVarint(cur) as u32; let blocks = readBlockList(pr, cur); if !block_list_contains_page(&blocks, root) { return Err(LsmError::RootPageNotInSegmentBlockList); } let info = SegmentInfo {root:root,age:age,blocks:blocks}; m.insert(g,info); } Ok((a,m)) } // -------- let pgsz = pr.GetInt32(cur) as usize; let changeCounter = pr.GetVarint(cur); let mergeCounter = pr.GetVarint(cur); let lenSegmentList = pr.GetVarint(cur) as usize; let overflowed = pr.GetByte(cur) != 0u8; let (state, segments, blk) = if overflowed { let lenChunk1 = pr.GetInt32(cur) as usize; let lenChunk2 = lenSegmentList - lenChunk1; let firstPageChunk2 = pr.GetInt32(cur) as PageNum; let extraPages = lenChunk2 / pgsz + if (lenChunk2 % pgsz) != 0 { 1 } else { 0 }; let extraPages = extraPages as PageNum; let lastPageChunk2 = firstPageChunk2 + extraPages - 1; let mut pr2 = PageBuffer::new(lenSegmentList); // TODO chain? // copy from chunk1 into pr2 try!(pr2.ReadPart(fs, 0, lenChunk1)); // now get chunk2 and copy it in as well try!(utils::SeekPage(fs, pgsz, firstPageChunk2)); try!(pr2.ReadPart(fs, lenChunk1, lenChunk2)); let mut cur2 = 0; let (state, segments) = try!(readSegmentList(&pr2, &mut cur2)); (state, segments, Some (PageBlock::new(firstPageChunk2, lastPageChunk2))) } else { let (state,segments) = try!(readSegmentList(pr, cur)); (state, segments, None) }; let hd = HeaderData { currentState: state, segments: segments, headerOverflow: blk, changeCounter: changeCounter, mergeCounter: mergeCounter, }; Ok((hd, pgsz)) } fn calcNextPage(pgsz: usize, len: usize) -> PageNum { let numPagesSoFar = (if pgsz > len { 1 } else { len / pgsz }) as PageNum; numPagesSoFar + 1 } // -------- let len = try!(seek_len(fs)); if len > 0 { try!(fs.seek(SeekFrom::Start(0 as u64))); let pr = try!(read(fs)); let mut cur = 0; let (h, pgsz) = try!(parse(&pr, &mut cur, fs)); let nextAvailablePage = calcNextPage(pgsz, len as usize); let nextAvailableSegmentNum = match h.currentState.iter().max() { Some(n) => n+1, None => 1, }; Ok((h, pgsz, nextAvailablePage, nextAvailableSegmentNum)) } else { let defaultPageSize = DEFAULT_SETTINGS.DefaultPageSize; let h = HeaderData { segments: HashMap::new(), currentState: Vec::new(), headerOverflow: None, changeCounter: 0, mergeCounter: 0, }; let nextAvailablePage = calcNextPage(defaultPageSize, HEADER_SIZE_IN_BYTES); let nextAvailableSegmentNum = 1; Ok((h, defaultPageSize, nextAvailablePage, nextAvailableSegmentNum)) } } fn consolidateBlockList(blocks: &mut Vec<PageBlock>) { blocks.sort_by(|a,b| a.firstPage.cmp(&b.firstPage)); loop { if blocks.len()==1 { break; } let mut did = false; for i in 1 .. blocks.len() { if blocks[i-1].lastPage+1 == blocks[i].firstPage { blocks[i-1].lastPage = blocks[i].lastPage; blocks.remove(i); did = true; break; } } if !did { break; } } } fn invertBlockList(blocks: &Vec<PageBlock>) -> Vec<PageBlock> { let len = blocks.len(); let mut result = Vec::new(); for i in 0 .. len { result.push(blocks[i]); } result.sort_by(|a,b| a.firstPage.cmp(&b.firstPage)); for i in 0 .. len-1 { result[i].firstPage = result[i].lastPage+1; result[i].lastPage = result[i+1].firstPage-1; } result.remove(len-1); result } fn listAllBlocks(h: &HeaderData, segmentsInWaiting: &HashMap<SegmentNum,SegmentInfo>, pgsz: usize) -> Vec<PageBlock> { let headerBlock = PageBlock::new(1, (HEADER_SIZE_IN_BYTES / pgsz) as PageNum); let mut blocks = Vec::new(); fn grab(blocks: &mut Vec<PageBlock>, from: &HashMap<SegmentNum,SegmentInfo>) { for info in from.values() { for b in info.blocks.iter() { blocks.push(*b); } } } grab(&mut blocks, &h.segments); grab(&mut blocks, segmentsInWaiting); blocks.push(headerBlock); match h.headerOverflow { Some(blk) => blocks.push(blk), None => () } blocks } use std::sync::Mutex; struct NextSeg { nextSeg: SegmentNum, } struct Space { nextPage: PageNum, freeBlocks: Vec<PageBlock>, } struct SafeSegmentsInWaiting { segmentsInWaiting: HashMap<SegmentNum,SegmentInfo>, } struct SafeMergeStuff { merging: HashSet<SegmentNum>, pendingMerges: HashMap<SegmentNum,Vec<SegmentNum>>, } struct SafeHeader { // TODO one level too much nesting header: HeaderData, } struct SafeCursors { nextCursorNum: u64, cursors: HashMap<u64,SegmentNum>, zombies: HashMap<SegmentNum,SegmentInfo>, } struct InnerPart { path: String, pgsz: usize, settings: DbSettings, nextSeg: Mutex<NextSeg>, space: Mutex<Space>, // TODO should the header mutex be an RWLock? header: Mutex<SafeHeader>, segmentsInWaiting: Mutex<SafeSegmentsInWaiting>, mergeStuff: Mutex<SafeMergeStuff>, cursors: Mutex<SafeCursors>, } pub struct WriteLock<'a> { inner: Option<&'a InnerPart> } impl<'a> WriteLock<'a> { pub fn commitSegments(&self, newSegs: Vec<SegmentNum>) -> Result<()> { self.inner.unwrap().commitSegments(newSegs) } pub fn commitMerge(&self, newSegNum:SegmentNum) -> Result<()> { self.inner.unwrap().commitMerge(newSegNum) } } // TODO rename this pub struct db<'a> { inner: InnerPart, write_lock: Mutex<WriteLock<'a>>, } impl<'a> db<'a> { pub fn new(path: String, settings : DbSettings) -> Result<db<'a>> { let mut f = try!(OpenOptions::new() .read(true) .create(true) .open(&path)); let (header,pgsz,firstAvailablePage,nextAvailableSegmentNum) = try!(readHeader(&mut f)); let segmentsInWaiting = HashMap::new(); let mut blocks = listAllBlocks(&header, &segmentsInWaiting, pgsz); consolidateBlockList(&mut blocks); let mut freeBlocks = invertBlockList(&blocks); freeBlocks.sort_by(|a,b| b.count_pages().cmp(&a.count_pages())); let nextSeg = NextSeg { nextSeg: nextAvailableSegmentNum, }; let space = Space { nextPage: firstAvailablePage, freeBlocks: freeBlocks, }; let segmentsInWaiting = SafeSegmentsInWaiting { segmentsInWaiting: segmentsInWaiting, }; let mergeStuff = SafeMergeStuff { merging: HashSet::new(), pendingMerges: HashMap::new(), }; let header = SafeHeader { header: header, }; let cursors = SafeCursors { nextCursorNum: 1, cursors: HashMap::new(), zombies: HashMap::new(), }; let inner = InnerPart { path: path, pgsz: pgsz, settings: settings, header: Mutex::new(header), nextSeg: Mutex::new(nextSeg), space: Mutex::new(space), segmentsInWaiting: Mutex::new(segmentsInWaiting), mergeStuff: Mutex::new(mergeStuff), cursors: Mutex::new(cursors), }; // WriteLock contains a reference to another part of // the struct it is in. So we wrap it in an option, // and set it to null for now. We set it later when // somebody actually asks for the lock. let lck = WriteLock { inner: None }; let res = db { inner: inner, write_lock: Mutex::new(lck), }; Ok(res) } // TODO func to ask for the write lock without blocking? pub fn GetWriteLock(&'a self) -> Result<std::sync::MutexGuard<WriteLock<'a>>> { let mut lck = try!(self.write_lock.lock()); // set the inner reference lck.inner = Some(&self.inner); Ok(lck) } // the following methods are passthrus, exposing inner // stuff publicly. pub fn OpenCursor(&self) -> Result<LivingCursor> { self.inner.OpenCursor() } pub fn WriteSegmentFromSortedSequence<I>(&self, source: I) -> Result<SegmentNum> where I:Iterator<Item=Result<kvp>> { self.inner.WriteSegmentFromSortedSequence(source) } pub fn WriteSegment(&self, pairs: HashMap<Box<[u8]>,Box<[u8]>>) -> Result<SegmentNum> { self.inner.WriteSegment(pairs) } pub fn WriteSegment2(&self, pairs: HashMap<Box<[u8]>,Blob>) -> Result<SegmentNum> { self.inner.WriteSegment2(pairs) } pub fn merge(&self, level: u32, min: usize, max: Option<usize>) -> Result<Option<SegmentNum>> { self.inner.merge(level, min, max) } } // TODO this could be generic fn slice_within(sub: &[SegmentNum], within: &[SegmentNum]) -> Result<usize> { match within.iter().position(|&g| g == sub[0]) { Some(ndx_first) => { let count = sub.len(); if sub == &within[ndx_first .. ndx_first + count] { Ok(ndx_first) } else { Err(LsmError::Misc("not contiguous")) } }, None => { Err(LsmError::Misc("not contiguous")) }, } } impl InnerPart { fn cursor_dropped(&self, segnum: SegmentNum, csrnum: u64) { //println!("cursor_dropped"); let mut cursors = self.cursors.lock().unwrap(); // gotta succeed let seg = cursors.cursors.remove(&csrnum).expect("gotta be there"); assert_eq!(seg, segnum); match cursors.zombies.remove(&segnum) { Some(info) => { // TODO maybe allow this lock to fail with try_lock. the // worst that can happen is that these blocks don't get // reclaimed until some other day. let mut space = self.space.lock().unwrap(); // gotta succeed self.addFreeBlocks(&mut space, info.blocks); }, None => { }, } } fn getBlock(&self, space: &mut Space, specificSizeInPages: PageNum) -> PageBlock { if specificSizeInPages > 0 { if space.freeBlocks.is_empty() || specificSizeInPages > space.freeBlocks[0].count_pages() { let newBlk = PageBlock::new(space.nextPage, space.nextPage+specificSizeInPages-1); space.nextPage = space.nextPage + specificSizeInPages; newBlk } else { let headBlk = space.freeBlocks[0]; if headBlk.count_pages() > specificSizeInPages { // trim the block to size let blk2 = PageBlock::new(headBlk.firstPage, headBlk.firstPage+specificSizeInPages-1); space.freeBlocks[0].firstPage = space.freeBlocks[0].firstPage + specificSizeInPages; // TODO problem: the list is probably no longer sorted. is this okay? // is a re-sort of the list really worth it? blk2 } else { space.freeBlocks.remove(0); headBlk } } } else { if space.freeBlocks.is_empty() { let size = self.settings.PagesPerBlock; let newBlk = PageBlock::new(space.nextPage, space.nextPage+size-1) ; space.nextPage = space.nextPage + size; newBlk } else { let headBlk = space.freeBlocks[0]; space.freeBlocks.remove(0); headBlk } } } fn OpenForWriting(&self) -> io::Result<File> { OpenOptions::new() .read(true) .write(true) .open(&self.path) } fn OpenForReading(&self) -> io::Result<File> { OpenOptions::new() .read(true) .open(&self.path) } // this code should not be called in a release build. it helps // finds problems by zeroing out pages in blocks that // have been freed. fn stomp(&self, blocks:Vec<PageBlock>) -> Result<()> { let bad = vec![0;self.pgsz as usize].into_boxed_slice(); let mut fs = try!(OpenOptions::new() .read(true) .write(true) .open(&self.path)); for b in blocks { for x in b.firstPage .. b.lastPage+1 { try!(utils::SeekPage(&mut fs, self.pgsz, x)); try!(fs.write(&bad)); } } Ok(()) } fn addFreeBlocks(&self, space: &mut Space, blocks:Vec<PageBlock>) { // all additions to the freeBlocks list should happen here // by calling this function. // // the list is kept consolidated and sorted by size descending. // unfortunately this requires two sorts, and they happen here // inside a critical section. but the benefit is considered // worth the trouble. // TODO it is important that freeBlocks contains no overlaps. // add debug-only checks to verify? // TODO is there such a thing as a block that is so small we // don't want to bother with it? what about a single-page block? // should this be a configurable setting? // TODO if the last block of the file is free, consider just // moving nextPage back. for b in blocks { space.freeBlocks.push(b); } consolidateBlockList(&mut space.freeBlocks); space.freeBlocks.sort_by(|a,b| b.count_pages().cmp(&a.count_pages())); } // a stored segmentinfo for a segment is a single blob of bytes. // root page // age // number of pairs // each pair is startBlock,countBlocks // all in varints fn writeHeader(&self, st: &mut SafeHeader, space: &mut Space, fs: &mut File, mut hdr: HeaderData ) -> Result<Option<PageBlock>> { fn spaceNeededForSegmentInfo(info: &SegmentInfo) -> usize { let mut a = 0; for t in info.blocks.iter() { a = a + Varint::SpaceNeededFor(t.firstPage as u64); a = a + Varint::SpaceNeededFor(t.count_pages() as u64); } a = a + Varint::SpaceNeededFor(info.root as u64); a = a + Varint::SpaceNeededFor(info.age as u64); a = a + Varint::SpaceNeededFor(info.blocks.len() as u64); a } fn spaceForHeader(h: &HeaderData) -> usize { let mut a = Varint::SpaceNeededFor(h.currentState.len() as u64); // TODO use currentState with a lookup into h.segments instead? // should be the same, right? for (g,info) in h.segments.iter() { a = a + spaceNeededForSegmentInfo(&info) + Varint::SpaceNeededFor(*g); } a } fn buildSegmentList(h: &HeaderData) -> PageBuilder { let space = spaceForHeader(h); let mut pb = PageBuilder::new(space); // TODO format version number pb.PutVarint(h.currentState.len() as u64); for g in h.currentState.iter() { pb.PutVarint(*g); match h.segments.get(&g) { Some(info) => { pb.PutVarint(info.root as u64); pb.PutVarint(info.age as u64); pb.PutVarint(info.blocks.len() as u64); // we store PageBlock as first/count instead of first/last, since the // count will always compress better as a varint. for t in info.blocks.iter() { pb.PutVarint(t.firstPage as u64); pb.PutVarint(t.count_pages() as u64); } }, None => panic!("segment num in currentState but not in segments") } } assert!(0 == pb.Available()); pb } let mut pb = PageBuilder::new(HEADER_SIZE_IN_BYTES); pb.PutInt32(self.pgsz as u32); pb.PutVarint(hdr.changeCounter); pb.PutVarint(hdr.mergeCounter); let pbSegList = buildSegmentList(&hdr); let buf = pbSegList.Buffer(); pb.PutVarint(buf.len() as u64); let headerOverflow = if pb.Available() >= (buf.len() + 1) { pb.PutByte(0u8); pb.PutArray(buf); None } else { pb.PutByte(1u8); let fits = pb.Available() - 4 - 4; let extra = buf.len() - fits; let extraPages = extra / self.pgsz + if (extra % self.pgsz) != 0 { 1 } else { 0 }; //printfn "extra pages: %d" extraPages let blk = self.getBlock(space, extraPages as PageNum); try!(utils::SeekPage(fs, self.pgsz, blk.firstPage)); try!(fs.write(&buf[fits .. buf.len()])); pb.PutInt32(fits as u32); pb.PutInt32(blk.firstPage); pb.PutArray(&buf[0 .. fits]); Some(blk) }; try!(fs.seek(SeekFrom::Start(0))); try!(pb.Write(fs)); try!(fs.flush()); let oldHeaderOverflow = hdr.headerOverflow; hdr.headerOverflow = headerOverflow; st.header = hdr; Ok((oldHeaderOverflow)) } // TODO this function looks for the segment in the header.segments, // which means it cannot be used to open a cursor on a pendingSegment, // which we think we might need in the future. fn getCursor(&self, st: &SafeHeader, g: SegmentNum ) -> Result<SegmentCursor> { match st.header.segments.get(&g) { None => Err(LsmError::Misc("getCursor: segment not found")), Some(seg) => { let rootPage = seg.root; let mut cursors = try!(self.cursors.lock()); let csrnum = cursors.nextCursorNum; let csr = try!(SegmentCursor::new(&self.path, self.pgsz, rootPage, seg.blocks.clone(), &self, g, csrnum)); cursors.nextCursorNum = cursors.nextCursorNum + 1; let was = cursors.cursors.insert(csrnum, g); assert!(was.is_none()); Ok(csr) } } } // TODO we also need a way to open a cursor on segments in waiting fn OpenCursor(&self) -> Result<LivingCursor> { // TODO this cursor needs to expose the changeCounter and segment list // on which it is based. for optimistic writes. caller can grab a cursor, // do their writes, then grab the writelock, and grab another cursor, then // compare the two cursors to see if anything important changed. if not, // commit their writes. if so, nevermind the written segments and start over. let st = try!(self.header.lock()); let mut clist = Vec::new(); for g in st.header.currentState.iter() { clist.push(try!(self.getCursor(&*st, *g))); } let mc = MultiCursor::Create(clist); let lc = LivingCursor::Create(mc); Ok(lc) } fn commitSegments(&self, newSegs: Vec<SegmentNum> ) -> Result<()> { assert_eq!(newSegs.len(), newSegs.iter().map(|g| *g).collect::<HashSet<SegmentNum>>().len()); let mut st = try!(self.header.lock()); let mut waiting = try!(self.segmentsInWaiting.lock()); let mut space = try!(self.space.lock()); assert!({ let mut ok = true; for newSegNum in newSegs.iter() { ok = st.header.currentState.iter().position(|&g| g == *newSegNum).is_none(); if !ok { break; } } ok }); // self.segmentsInWaiting must contain one seg for each segment num in newSegs. // we want those entries to move out and move into the header, currentState // and segments. This means taking ownership of those SegmentInfos. But // the others we want to leave. let mut newHeader = st.header.clone(); let mut newSegmentsInWaiting = waiting.segmentsInWaiting.clone(); for g in newSegs.iter() { match newSegmentsInWaiting.remove(&g) { Some(info) => { newHeader.segments.insert(*g,info); }, None => { return Err(LsmError::Misc("commitSegments: segment not found in segmentsInWaiting")); }, } } // TODO surely there's a better way to insert one vec into another? // like insert_all, similar to push_all? for i in 0 .. newSegs.len() { let g = newSegs[i]; newHeader.currentState.insert(i, g); } newHeader.changeCounter = newHeader.changeCounter + 1; let mut fs = try!(self.OpenForWriting()); let oldHeaderOverflow = try!(self.writeHeader(&mut st, &mut space, &mut fs, newHeader)); waiting.segmentsInWaiting = newSegmentsInWaiting; //printfn "after commit, currentState: %A" header.currentState //printfn "after commit, segments: %A" header.segments // all the segments we just committed can now be removed from // the segments in waiting list match oldHeaderOverflow { Some(blk) => self.addFreeBlocks(&mut space, vec![ blk ]), None => () } // note that we intentionally do not release the writeLock here. // you can change the segment list more than once while holding // the writeLock. the writeLock gets released when you Dispose() it. Ok(()) } // TODO bad fn name fn WriteSegmentFromSortedSequence<I>(&self, source: I) -> Result<SegmentNum> where I:Iterator<Item=Result<kvp>> { let mut fs = try!(self.OpenForWriting()); let (g,_) = try!(CreateFromSortedSequenceOfKeyValuePairs(&mut fs, self, source)); Ok(g) } // TODO bad fn name fn WriteSegment(&self, pairs: HashMap<Box<[u8]>,Box<[u8]>>) -> Result<SegmentNum> { let mut a : Vec<(Box<[u8]>,Box<[u8]>)> = pairs.into_iter().collect(); a.sort_by(|a,b| { let (ref ka,_) = *a; let (ref kb,_) = *b; bcmp::Compare(&ka,&kb) }); let source = a.into_iter().map(|t| { let (k,v) = t; Ok(kvp {Key:k, Value:Blob::Array(v)}) }); let mut fs = try!(self.OpenForWriting()); let (g,_) = try!(CreateFromSortedSequenceOfKeyValuePairs(&mut fs, self, source)); Ok(g) } // TODO bad fn name fn WriteSegment2(&self, pairs: HashMap<Box<[u8]>,Blob>) -> Result<SegmentNum> { let mut a : Vec<(Box<[u8]>,Blob)> = pairs.into_iter().collect(); a.sort_by(|a,b| { let (ref ka,_) = *a; let (ref kb,_) = *b; bcmp::Compare(&ka,&kb) }); let source = a.into_iter().map(|t| { let (k,v) = t; Ok(kvp {Key:k, Value:v}) }); let mut fs = try!(self.OpenForWriting()); let (g,_) = try!(CreateFromSortedSequenceOfKeyValuePairs(&mut fs, self, source)); Ok(g) } fn merge(&self, level: u32, min: usize, max: Option<usize>) -> Result<Option<SegmentNum>> { let mrg = { let st = try!(self.header.lock()); if st.header.currentState.len() == 0 { return Ok(None) } //println!("age for merge: {}", level); //println!("currentState: {:?}", st.header.currentState); let age_group = st.header.currentState.iter().filter(|g| { let info = st.header.segments.get(&g).unwrap(); info.age == level }).map(|g| *g).collect::<Vec<SegmentNum>>(); //println!("age_group: {:?}", age_group); if age_group.len() == 0 { return Ok(None) } // make sure this is contiguous assert!(slice_within(age_group.as_slice(), st.header.currentState.as_slice()).is_ok()); let mut segs = Vec::new(); let mut mergeStuff = try!(self.mergeStuff.lock()); // we can merge any contiguous set of not-already-being-merged // segments at the end of the group. if we merge something // that is not at the end of the group, we could end up with // age groups not being contiguous. for g in age_group.iter().rev() { if mergeStuff.merging.contains(g) { break; } else { segs.push(*g); } } if segs.len() >= min { match max { Some(max) => { segs.truncate(max); }, None => (), } segs.reverse(); let mut clist = Vec::new(); for g in segs.iter() { clist.push(try!(self.getCursor(&st, *g))); } for g in segs.iter() { mergeStuff.merging.insert(*g); } Some((segs,clist)) } else { None } }; match mrg { Some((segs,clist)) => { let mut mc = MultiCursor::Create(clist); let mut fs = try!(self.OpenForWriting()); try!(mc.First()); let (g,_) = try!(CreateFromSortedSequenceOfKeyValuePairs(&mut fs, self, CursorIterator::new(mc))); //printfn "merged %A to get %A" segs g let mut mergeStuff = try!(self.mergeStuff.lock()); mergeStuff.pendingMerges.insert(g, segs); Ok(Some(g)) }, None => { Ok(None) }, } } // TODO maybe commitSegments and commitMerge should be the same function. // just check to see if the segment being committed is a merge. if so, // do the extra paperwork. fn commitMerge(&self, newSegNum:SegmentNum) -> Result<()> { let mut st = try!(self.header.lock()); let mut waiting = try!(self.segmentsInWaiting.lock()); let mut space = try!(self.space.lock()); let mut mergeStuff = try!(self.mergeStuff.lock()); assert!(st.header.currentState.iter().position(|&g| g == newSegNum).is_none()); // we need the list of segments which were merged. we make a copy of // so that we're not keeping a reference that inhibits our ability to // get other references a little later in the function. let old = { let maybe = mergeStuff.pendingMerges.get(&newSegNum); if maybe.is_none() { return Err(LsmError::Misc("commitMerge: segment not found in pendingMerges")); } else { maybe.expect("just checked is_none").clone() } }; let oldAsSet : HashSet<SegmentNum> = old.iter().map(|g| *g).collect(); assert!(oldAsSet.len() == old.len()); // now we need to verify that the segments being replaced are in currentState // and contiguous. let ndxFirstOld = try!(slice_within(old.as_slice(), st.header.currentState.as_slice())); // now we construct a newHeader let mut newHeader = st.header.clone(); // first, fix the currentState for _ in &old { newHeader.currentState.remove(ndxFirstOld); } newHeader.currentState.insert(ndxFirstOld, newSegNum); // remove the old segmentinfos, keeping them for later let mut segmentsBeingReplaced = HashMap::new(); for g in &oldAsSet { let info = newHeader.segments.remove(g).expect("old seg not found in header.segments"); segmentsBeingReplaced.insert(g, info); } // now get the segment info for the new segment let mut newSegmentInfo = { let maybe = waiting.segmentsInWaiting.get(&newSegNum); if maybe.is_none() { return Err(LsmError::Misc("commitMerge: segment not found in segmentsInWaiting")); } else { maybe.expect("seg not found").clone() } }; // and fix its age to be one higher than the maximum age of the // segments it replaced. let age_of_new_segment = { let ages: Vec<u32> = segmentsBeingReplaced.values().map(|info| info.age).collect(); 1 + ages.iter().max().expect("this cannot be empty") }; newSegmentInfo.age = age_of_new_segment; newHeader.segments.insert(newSegNum, newSegmentInfo); newHeader.mergeCounter = newHeader.mergeCounter + 1; let mut fs = try!(self.OpenForWriting()); let oldHeaderOverflow = try!(self.writeHeader(&mut st, &mut space, &mut fs, newHeader)); // the write of the new header has succeeded. waiting.segmentsInWaiting.remove(&newSegNum); mergeStuff.pendingMerges.remove(&newSegNum); for g in old { mergeStuff.merging.remove(&g); } let mut segmentsToBeFreed = segmentsBeingReplaced; { let mut cursors = try!(self.cursors.lock()); let segmentsWithACursor : HashSet<SegmentNum> = cursors.cursors.iter().map(|t| {let (_,segnum) = t; *segnum}).collect(); for g in segmentsWithACursor { // don't free anything that has a cursor match segmentsToBeFreed.remove(&g) { Some(z) => { cursors.zombies.insert(g, z); }, None => { }, } } } let mut blocksToBeFreed = Vec::new(); for info in segmentsToBeFreed.values() { blocksToBeFreed.push_all(&info.blocks); } match oldHeaderOverflow { Some(blk) => blocksToBeFreed.push(blk), None => (), } self.addFreeBlocks(&mut space, blocksToBeFreed); // note that we intentionally do not release the writeLock here. // you can change the segment list more than once while holding // the writeLock. the writeLock gets released when you Dispose() it. Ok(()) } } impl IPages for InnerPart { fn PageSize(&self) -> usize { self.pgsz } fn Begin(&self) -> Result<PendingSegment> { let mut lck = try!(self.nextSeg.lock()); let p = PendingSegment::new(lck.nextSeg); lck.nextSeg = lck.nextSeg + 1; Ok(p) } fn GetBlock(&self, ps: &mut PendingSegment) -> Result<PageBlock> { let mut space = try!(self.space.lock()); // specificSize=0 means we don't care how big of a block we get let blk = self.getBlock(&mut space, 0); ps.AddBlock(blk); Ok(blk) } fn End(&self, ps:PendingSegment, lastPage: PageNum) -> Result<SegmentNum> { let (g, blocks, leftovers) = ps.End(lastPage); let info = SegmentInfo {age: 0,blocks:blocks,root:lastPage}; let mut waiting = try!(self.segmentsInWaiting.lock()); let mut space = try!(self.space.lock()); waiting.segmentsInWaiting.insert(g,info); //printfn "wrote %A: %A" g blocks match leftovers { Some(b) => self.addFreeBlocks(&mut space, vec![b]), None => () } Ok(g) } } // ---------------------------------------------------------------- /* type Database(_io:IDatabaseFile, _settings:DbSettings) = let doAutoMerge() = if settings.AutoMergeEnabled then for level in 0 .. 3 do // TODO max merge level immediate match getPossibleMerge level settings.AutoMergeMinimumPages false with | Some f -> let g = f() commitMerge g | None -> () // printfn "cannot merge level %d" level for level in 4 .. 7 do // TODO max merge level match getPossibleMerge level settings.AutoMergeMinimumPages false with | Some f -> f |> wrapMergeForLater |> startBackgroundMergeJob | None -> () // printfn "cannot merge level %d" level member this.ForgetWaitingSegments(guids:seq<Guid>) = // TODO need a test case for this let guidsAsSet = Seq.fold (fun acc g -> Set.add g acc) Set.empty guids let mySegmentsInWaiting = Map.filter (fun g _ -> Set.contains g guidsAsSet) segmentsInWaiting lock critSectionSegmentsInWaiting (fun () -> let remainingSegmentsInWaiting = Map.filter (fun g _ -> Set.contains g guidsAsSet |> not) segmentsInWaiting segmentsInWaiting <- remainingSegmentsInWaiting ) lock critSectionCursors (fun () -> let segmentsToBeFreed = Map.filter (fun g _ -> not (Map.containsKey g cursors)) mySegmentsInWaiting let blocksToBeFreed = Seq.fold (fun acc info -> info.blocks @ acc) List.empty (Map.values segmentsToBeFreed) addFreeBlocks blocksToBeFreed ) member this.OpenSegmentCursor(g:Guid) = let csr = lock critSectionCursors (fun () -> let h = header getCursor h.segments g (Some checkForGoneSegment) ) csr member this.GetFreeBlocks() = freeBlocks member this.PageSize() = pgsz member this.ListSegments() = (header.currentState, header.segments) member this.RequestWriteLock(timeout:int) = // TODO need a test case for this getWriteLock false timeout (Some doAutoMerge) member this.RequestWriteLock() = getWriteLock false (-1) (Some doAutoMerge) type PairBuffer(_db:IDatabase, _limit:int) = let db = _db let limit = _limit let d = System.Collections.Generic.Dictionary<byte[],Blob>() let mutable segs = [] let emptyByteArray:byte[] = Array.empty let emptyBlobValue = Blob.Array emptyByteArray member this.Flush() = if d.Count > 0 then let g = db.WriteSegment(d) segs <- g :: segs d.Clear() member this.AddPair(k:byte[], v:Blob) = // TODO dictionary deals with byte[] keys by reference. d.[k] <- v if d.Count >= limit then this.Flush() member this.AddEmptyKey(k:byte[]) = this.AddPair(k, emptyBlobValue) member this.Commit(tx:IWriteLock) = tx.CommitSegments segs segs <- [] */ #[cfg(test)] mod tests { use std; use super::Result; #[test] fn it_works() { } #[test] #[ignore] fn quick() { fn tempfile(base: &str) -> String { fn tid() -> String { // TODO use the rand crate fn bytes() -> std::io::Result<[u8;16]> { let mut f = try!(std::fs::OpenOptions::new() .read(true) .open("/dev/urandom")); let mut ba = [0;16]; try!(super::utils::ReadFully(&mut f, &mut ba)); Ok(ba) } fn to_hex_string(ba: &[u8]) -> String { let strs: Vec<String> = ba.iter() .map(|b| format!("{:02X}", b)) .collect(); strs.connect("") } let ba = bytes().unwrap(); to_hex_string(&ba) } std::fs::create_dir("tmp"); let file = "tmp/".to_string() + base + "_" + &tid(); file } fn f() -> Result<()> { //println!("running"); let db = try!(super::db::new(tempfile("quick"), super::DEFAULT_SETTINGS)); const NUM : usize = 100000; let mut a = Vec::new(); for i in 0 .. 10 { let g = try!(db.WriteSegmentFromSortedSequence(super::GenerateNumbers {cur: i * NUM, end: (i+1) * NUM, step: i+1})); a.push(g); } { let lck = try!(db.GetWriteLock()); try!(lck.commitSegments(a.clone())); } let g3 = try!(db.merge(0, 2, None)); assert!(g3.is_some()); let g3 = g3.unwrap(); { let lck = try!(db.GetWriteLock()); try!(lck.commitMerge(g3)); } Ok(()) } assert!(f().is_ok()); } } pub struct GenerateNumbers { pub cur: usize, pub end: usize, pub step: usize, } impl Iterator for GenerateNumbers { type Item = Result<kvp>; // TODO allow the number of digits to be customized? fn next(&mut self) -> Option<Result<kvp>> { if self.cur > self.end { None } else { let k = format!("{:08}", self.cur).into_bytes().into_boxed_slice(); let v = format!("{}", self.cur * 2).into_bytes().into_boxed_slice(); let r = kvp{Key:k, Value:Blob::Array(v)}; self.cur = self.cur + self.step; Some(Ok(r)) } } } pub struct GenerateWeirdPairs { pub cur: usize, pub end: usize, pub klen: usize, pub vlen: usize, } impl Iterator for GenerateWeirdPairs { type Item = Result<kvp>; fn next(&mut self) -> Option<Result<kvp>> { if self.cur > self.end { None } else { fn get_weird(i: usize) -> u8 { let f = i as f64; let f = f.sin() * 1000.0; let f = f.abs(); let f = f.floor() as u32; let f = f & 0xff; let f = f as u8; f } let mut k = Vec::new(); for i in 0 .. self.klen { k.push(get_weird(i + self.cur)); } let k = k.into_boxed_slice(); let mut v = Vec::new(); for i in 0 .. self.vlen { v.push(get_weird(i * 2 + self.cur)); } let v = v.into_boxed_slice(); let r = kvp{Key:k, Value:Blob::Array(v)}; self.cur = self.cur + 1; Some(Ok(r)) } } } pub struct sqlite4_num { neg: bool, approx: bool, e: i16, m: u64, } impl sqlite4_num { const SQLITE4_MX_EXP: i16 = 999; const SQLITE4_NAN_EXP: i16 = 2000; const NAN: sqlite4_num = sqlite4_num { neg: false, approx: true, e: sqlite4_num::SQLITE4_NAN_EXP, m: 0, }; const POS_INF: sqlite4_num = sqlite4_num {m: 1, .. sqlite4_num::NAN}; const NEG_INF: sqlite4_num = sqlite4_num {neg: true, .. sqlite4_num::POS_INF}; const ZERO: sqlite4_num = sqlite4_num { neg: false, approx: false, e: 0, m: 0, }; fn from_f64(d: f64) -> sqlite4_num { // TODO probably this function should be done by decoding the bits if d.is_nan() { sqlite4_num::NAN } else if d.is_sign_positive() && d.is_infinite() { sqlite4_num::POS_INF } else if d.is_sign_negative() && d.is_infinite() { sqlite4_num::NEG_INF } else if d==0.0 { sqlite4_num::ZERO } else { let LARGEST_UINT64 = u64::max_value(); let TENTH_MAX = LARGEST_UINT64 / 10; let large = LARGEST_UINT64 as f64; let large10 = TENTH_MAX as f64; let neg = d<0.0; let mut d = if neg { -d } else { d }; let mut e = 0; while d>large || (d>1.0 && d==((d as i64) as f64)) { d = d / 10.0; e = e + 1; } while d<large10 && d != ((d as i64) as f64) { d = d * 10.0; e = e - 1; } sqlite4_num { neg: neg, approx: true, e: e as i16, m: d as u64, } } } fn is_inf(&self) -> bool { (self.e > sqlite4_num::SQLITE4_MX_EXP) && (self.m != 0) } fn is_nan(&self) -> bool{ (self.e > sqlite4_num::SQLITE4_MX_EXP) && (self.m == 0) } fn from_i64(n: i64) -> sqlite4_num { sqlite4_num { neg: n<0, approx: false, m: if n>=0 { (n as u64) } else if n != i64::min_value() { ((-n) as u64) } else { 1 + (i64::max_value() as u64) }, e: 0, } } fn normalize(&self) -> sqlite4_num { let mut m = self.m; let mut e = self.e; while (m % 10) == 0 { e = e + 1; m = m / 10; } sqlite4_num {m: m, e: e, .. *self} } fn encode_for_index(&self, w: &mut Vec<u8>) { // TODO in sqlite4, the first byte of this encoding // is designed to mesh with the // overall type order byte. if self.m == 0 { if self.is_nan() { w.push(0x06u8); } else { w.push(0x15u8); } } else if self.is_inf() { if self.neg { w.push(0x07u8); } else { w.push(0x23u8); } } else { let num = self.normalize(); let mut m = num.m; let mut e = num.e; let mut iDigit; let mut aDigit = [0; 12]; if (num.e%2) != 0 { aDigit[0] = (10 * (m % 10)) as u8; m = m / 10; e = e - 1; iDigit = 1; } else { iDigit = 0; } while m != 0 { aDigit[iDigit] = (m % 100) as u8; iDigit = iDigit + 1; m = m / 100; } e = (iDigit as i16) + (e/2); fn push_u16_be(w: &mut Vec<u8>, e: u16) { w.push(((e>>8) & 0xff_u16) as u8); w.push(((e>>0) & 0xff_u16) as u8); } if e>= 11 { if ! num.neg { w.push(0x22u8); push_u16_be(w, e as u16); } else { w.push(0x08u8); push_u16_be(w, !e as u16); } } else if e>=0 { if ! num.neg { w.push(0x17u8+(e as u8)); } else { w.push(0x13u8-(e as u8)); } } else { if ! num.neg { w.push(0x16u8); push_u16_be(w, !((-e) as u16)); } else { w.push(0x14u8); push_u16_be(w, (-e) as u16); } } while iDigit>0 { iDigit = iDigit - 1; let mut d = aDigit[iDigit] * 2u8; if iDigit != 0 { d = d | 0x01u8; } if num.neg { d = !d; } w.push(d) } } } } // TODO the following can be removed at some point. it is here // now only because the test suite has not yet been adapted to use // KeyRef/ValueRef. impl<'a> LivingCursor<'a> { pub fn Seek(&mut self, k: &[u8], sop:SeekOp) -> Result<SeekResult> { let k2 = KeyRef::for_slice(k); let r = self.SeekRef(&k2, sop); println!("{:?}", r); r } } undo the previous two commits. back to KeyRef. /* Copyright 2014-2015 Zumero, LLC Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #![feature(core)] #![feature(collections)] #![feature(box_syntax)] #![feature(convert)] #![feature(collections_drain)] #![feature(associated_consts)] // TODO turn the following warnings back on later #![allow(non_snake_case)] #![allow(non_camel_case_types)] use std::io; use std::io::Seek; use std::io::Read; use std::io::Write; use std::io::SeekFrom; use std::cmp::Ordering; use std::fs::File; use std::fs::OpenOptions; use std::collections::HashMap; use std::collections::HashSet; use std::ops::Index; use std::error::Error; const SIZE_32: usize = 4; // like std::mem::size_of::<u32>() const SIZE_16: usize = 2; // like std::mem::size_of::<u16>() pub type PageNum = u32; // type PageSize = u32; // TODO also perhaps the type representing size of a value, u32 // size of a value should NOT be usize, right? // TODO there is code which assumes that PageNum is u32. // but that's the nature of the file format. the type alias // isn't so much so that we can change it, but rather, to make // reading the code easier. pub enum Blob { Stream(Box<Read>), Array(Box<[u8]>), Tombstone, } #[derive(Debug)] enum LsmError { // TODO remove Misc Misc(&'static str), // TODO more detail within CorruptFile CorruptFile(&'static str), Io(std::io::Error), CursorNotValid, InvalidPageNumber, InvalidPageType, RootPageNotInSegmentBlockList, Poisoned, } impl std::fmt::Display for LsmError { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { match *self { LsmError::Io(ref err) => write!(f, "IO error: {}", err), LsmError::Misc(s) => write!(f, "Misc error: {}", s), LsmError::CorruptFile(s) => write!(f, "Corrupt file: {}", s), LsmError::Poisoned => write!(f, "Poisoned"), LsmError::CursorNotValid => write!(f, "Cursor not valid"), LsmError::InvalidPageNumber => write!(f, "Invalid page number"), LsmError::InvalidPageType => write!(f, "Invalid page type"), LsmError::RootPageNotInSegmentBlockList => write!(f, "Root page not in segment block list"), } } } impl std::error::Error for LsmError { fn description(&self) -> &str { match *self { LsmError::Io(ref err) => std::error::Error::description(err), LsmError::Misc(s) => s, LsmError::CorruptFile(s) => s, LsmError::Poisoned => "poisoned", LsmError::CursorNotValid => "cursor not valid", LsmError::InvalidPageNumber => "invalid page number", LsmError::InvalidPageType => "invalid page type", LsmError::RootPageNotInSegmentBlockList => "Root page not in segment block list", } } // TODO cause } impl From<io::Error> for LsmError { fn from(err: io::Error) -> LsmError { LsmError::Io(err) } } impl<T> From<std::sync::PoisonError<T>> for LsmError { fn from(_err: std::sync::PoisonError<T>) -> LsmError { LsmError::Poisoned } } pub type Result<T> = std::result::Result<T, LsmError>; // kvp is the struct used to provide key-value pairs downward, // for storage into the database. pub struct kvp { Key : Box<[u8]>, Value : Blob, } struct PendingSegment { blockList: Vec<PageBlock>, segnum: SegmentNum, } // TODO this is experimental. it might not be very useful unless // it can be used everywhere a regular slice can be used. but we // obviously don't want to just pass around an Index<Output=u8> // trait object if that forces us into dynamic dispatch everywhere. struct SplitSlice<'a> { front: &'a [u8], back: &'a [u8], } impl<'a> SplitSlice<'a> { fn new(front: &'a [u8], back: &'a [u8]) -> SplitSlice<'a> { SplitSlice {front: front, back: back} } fn len(&self) -> usize { self.front.len() + self.back.len() } fn into_boxed_slice(self) -> Box<[u8]> { let mut k = Vec::new(); k.push_all(&self.front); k.push_all(&self.back); k.into_boxed_slice() } } impl<'a> Index<usize> for SplitSlice<'a> { type Output = u8; fn index(&self, _index: usize) -> &u8 { if _index >= self.front.len() { &self.back[_index - self.front.len()] } else { &self.front[_index] } } } fn split3<T>(a: &mut [T], i: usize) -> (&mut [T], &mut [T], &mut [T]) { let (before, a2) = a.split_at_mut(i); let (islice, after) = a2.split_at_mut(1); (before, islice, after) } pub enum KeyRef<'a> { // for an overflowed key, we just punt and read it into memory Overflowed(Box<[u8]>), // the other two are references into the page Prefixed(&'a [u8],&'a [u8]), Array(&'a [u8]), } impl<'a> std::fmt::Debug for KeyRef<'a> { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::result::Result<(), std::fmt::Error> { match *self { KeyRef::Overflowed(ref a) => write!(f, "Overflowed, a={:?}", a), KeyRef::Prefixed(front,back) => write!(f, "Prefixed, front={:?}, back={:?}", front, back), KeyRef::Array(a) => write!(f, "Array, val={:?}", a), } } } impl<'a> KeyRef<'a> { pub fn len(&self) -> usize { match *self { KeyRef::Overflowed(ref a) => a.len(), KeyRef::Array(a) => a.len(), KeyRef::Prefixed(front,back) => front.len() + back.len(), } } pub fn from_boxed_slice(k: Box<[u8]>) -> KeyRef<'a> { KeyRef::Overflowed(k) } pub fn for_slice(k: &[u8]) -> KeyRef { KeyRef::Array(k) } pub fn into_boxed_slice(self) -> Box<[u8]> { match self { KeyRef::Overflowed(a) => { a }, KeyRef::Array(a) => { let mut k = Vec::with_capacity(a.len()); k.push_all(a); k.into_boxed_slice() }, KeyRef::Prefixed(front,back) => { let mut k = Vec::with_capacity(front.len() + back.len()); k.push_all(front); k.push_all(back); k.into_boxed_slice() }, } } // TODO move this to the bcmp module? fn compare_px_py(px: &[u8], x: &[u8], py: &[u8], y: &[u8]) -> Ordering { let xlen = px.len() + x.len(); let ylen = py.len() + y.len(); let len = std::cmp::min(xlen, ylen); for i in 0 .. len { let xval = if i<px.len() { px[i] } else { x[i - px.len()] }; let yval = if i<py.len() { py[i] } else { y[i - py.len()] }; let c = xval.cmp(&yval); if c != Ordering::Equal { return c; } } return xlen.cmp(&ylen); } // TODO move this to the bcmp module? fn compare_px_y(px: &[u8], x: &[u8], y: &[u8]) -> Ordering { let xlen = px.len() + x.len(); let ylen = y.len(); let len = std::cmp::min(xlen, ylen); for i in 0 .. len { let xval = if i<px.len() { px[i] } else { x[i - px.len()] }; let yval = y[i]; let c = xval.cmp(&yval); if c != Ordering::Equal { return c; } } return xlen.cmp(&ylen); } // TODO move this to the bcmp module? fn compare_x_py(x: &[u8], py: &[u8], y: &[u8]) -> Ordering { let xlen = x.len(); let ylen = py.len() + y.len(); let len = std::cmp::min(xlen, ylen); for i in 0 .. len { let xval = x[i]; let yval = if i<py.len() { py[i] } else { y[i - py.len()] }; let c = xval.cmp(&yval); if c != Ordering::Equal { return c; } } return xlen.cmp(&ylen); } pub fn cmp(x: &KeyRef, y: &KeyRef) -> Ordering { match (x,y) { (&KeyRef::Overflowed(ref x_k), &KeyRef::Overflowed(ref y_k)) => { bcmp::Compare(&x_k, &y_k) }, (&KeyRef::Overflowed(ref x_k), &KeyRef::Prefixed(ref y_p, ref y_k)) => { Self::compare_x_py(&x_k, y_p, y_k) }, (&KeyRef::Overflowed(ref x_k), &KeyRef::Array(ref y_k)) => { bcmp::Compare(&x_k, &y_k) }, (&KeyRef::Prefixed(ref x_p, ref x_k), &KeyRef::Overflowed(ref y_k)) => { Self::compare_px_y(x_p, x_k, &y_k) }, (&KeyRef::Array(ref x_k), &KeyRef::Overflowed(ref y_k)) => { bcmp::Compare(&x_k, &y_k) }, (&KeyRef::Prefixed(ref x_p, ref x_k), &KeyRef::Prefixed(ref y_p, ref y_k)) => { Self::compare_px_py(x_p, x_k, y_p, y_k) }, (&KeyRef::Prefixed(ref x_p, ref x_k), &KeyRef::Array(ref y_k)) => { Self::compare_px_y(x_p, x_k, y_k) }, (&KeyRef::Array(ref x_k), &KeyRef::Prefixed(ref y_p, ref y_k)) => { Self::compare_x_py(x_k, y_p, y_k) }, (&KeyRef::Array(ref x_k), &KeyRef::Array(ref y_k)) => { bcmp::Compare(&x_k, &y_k) }, } } } pub enum ValueRef<'a> { Array(&'a [u8]), Overflowed(usize, Box<Read>), Tombstone, } impl<'a> ValueRef<'a> { pub fn len(&self) -> Option<usize> { match *self { ValueRef::Array(a) => Some(a.len()), ValueRef::Overflowed(len, _) => Some(len), ValueRef::Tombstone => None, } } pub fn into_blob(self) -> Blob { match self { ValueRef::Array(a) => { let mut k = Vec::new(); k.push_all(a); Blob::Array(k.into_boxed_slice()) }, ValueRef::Overflowed(len, r) => Blob::Stream(r), ValueRef::Tombstone => Blob::Tombstone, } } } impl<'a> std::fmt::Debug for ValueRef<'a> { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::result::Result<(), std::fmt::Error> { match *self { ValueRef::Array(a) => write!(f, "Array, len={:?}", a), ValueRef::Overflowed(klen,_) => write!(f, "Overflowed, len={}", klen), ValueRef::Tombstone => write!(f, "Tombstone"), } } } #[derive(Hash,PartialEq,Eq,Copy,Clone,Debug)] struct PageBlock { firstPage: PageNum, lastPage: PageNum, } impl PageBlock { fn new(first: PageNum, last: PageNum) -> PageBlock { PageBlock { firstPage: first, lastPage: last } } fn count_pages(&self) -> PageNum { self.lastPage - self.firstPage + 1 } fn contains_page(&self, pgnum: PageNum) -> bool { (pgnum >= self.firstPage) && (pgnum <= self.lastPage) } } fn block_list_contains_page(blocks: &Vec<PageBlock>, pgnum: PageNum) -> bool { for blk in blocks.iter() { if blk.contains_page(pgnum) { return true; } } return false; } pub type SegmentNum = u64; trait IPages { fn PageSize(&self) -> usize; fn Begin(&self) -> Result<PendingSegment>; fn GetBlock(&self, token: &mut PendingSegment) -> Result<PageBlock>; fn End(&self, token: PendingSegment, page: PageNum) -> Result<SegmentNum>; } #[derive(PartialEq,Copy,Clone)] pub enum SeekOp { SEEK_EQ = 0, SEEK_LE = 1, SEEK_GE = 2, } // this code was ported from F# which assumes that any Stream // that supports Seek also can give you its Length. That method // isn't part of the Seek trait, but this implementation should // suffice. fn seek_len<R>(fs: &mut R) -> io::Result<u64> where R : Seek { // remember where we are let pos = try!(fs.seek(SeekFrom::Current(0))); // seek the end let len = try!(fs.seek(SeekFrom::End(0))); // restore to where we were let _ = try!(fs.seek(SeekFrom::Start(pos))); Ok(len) } struct CursorIterator<'a> { csr: MultiCursor<'a> } impl<'a> CursorIterator<'a> { fn new(it: MultiCursor) -> CursorIterator { CursorIterator { csr: it } } } impl<'a> Iterator for CursorIterator<'a> { type Item = Result<kvp>; fn next(&mut self) -> Option<Result<kvp>> { if self.csr.IsValid() { let k = { let k = self.csr.KeyRef(); if k.is_err() { return Some(Err(k.err().unwrap())); } let k = k.unwrap().into_boxed_slice(); k }; let v = { let v = self.csr.ValueRef(); if v.is_err() { return Some(Err(v.err().unwrap())); } let v = v.unwrap().into_blob(); v }; let r = self.csr.Next(); if r.is_err() { return Some(Err(r.err().unwrap())); } Some(Ok(kvp{Key:k, Value:v})) } else { return None; } } } #[derive(Copy,Clone,Debug)] pub enum SeekResult { Invalid, Unequal, Equal, } impl SeekResult { fn from_cursor<'a, T: ICursor<'a>>(csr: &T, k: &KeyRef) -> Result<SeekResult> { if csr.IsValid() { if Ordering::Equal == try!(csr.KeyCompare(k)) { Ok(SeekResult::Equal) } else { Ok(SeekResult::Unequal) } } else { Ok(SeekResult::Invalid) } } fn is_valid(self) -> bool { match self { SeekResult::Invalid => false, SeekResult::Unequal => true, SeekResult::Equal => true, } } fn is_valid_and_equal(self) -> bool { match self { SeekResult::Invalid => false, SeekResult::Unequal => false, SeekResult::Equal => true, } } } pub trait ICursor<'a> { fn SeekRef(&mut self, k: &KeyRef, sop: SeekOp) -> Result<SeekResult>; fn First(&mut self) -> Result<()>; fn Last(&mut self) -> Result<()>; fn Next(&mut self) -> Result<()>; fn Prev(&mut self) -> Result<()>; fn IsValid(&self) -> bool; fn KeyRef(&'a self) -> Result<KeyRef<'a>>; fn ValueRef(&'a self) -> Result<ValueRef<'a>>; fn ValueLength(&self) -> Result<Option<usize>>; // tombstone is None fn KeyCompare(&self, k: &KeyRef) -> Result<Ordering>; } //#[derive(Copy,Clone)] pub struct DbSettings { pub AutoMergeEnabled : bool, pub AutoMergeMinimumPages : PageNum, pub DefaultPageSize : usize, pub PagesPerBlock : PageNum, } pub const DEFAULT_SETTINGS : DbSettings = DbSettings { AutoMergeEnabled : true, AutoMergeMinimumPages : 4, DefaultPageSize : 4096, PagesPerBlock : 256, }; #[derive(Clone)] struct SegmentInfo { root : PageNum, age : u32, // TODO does this grow? shouldn't it be a boxed array? // yes, but then derive clone complains. // ideally we could just stop cloning this struct. blocks : Vec<PageBlock> } pub mod utils { use std::io; use std::io::Seek; use std::io::Read; use std::io::SeekFrom; use super::PageNum; use super::LsmError; use super::Result; pub fn SeekPage(strm: &mut Seek, pgsz: usize, pageNumber: PageNum) -> Result<u64> { if 0==pageNumber { return Err(LsmError::InvalidPageNumber); } let pos = ((pageNumber as u64) - 1) * (pgsz as u64); let v = try!(strm.seek(SeekFrom::Start(pos))); Ok(v) } pub fn ReadFully(strm: &mut Read, buf: &mut [u8]) -> io::Result<usize> { let mut sofar = 0; let len = buf.len(); loop { let cur = &mut buf[sofar..len]; let n = try!(strm.read(cur)); if n==0 { break; } sofar += n; if sofar==len { break; } } let res : io::Result<usize> = Ok(sofar); res } } mod bcmp { use std::cmp::Ordering; use std::cmp::min; // TODO get rid of this function. regular cmp() is apparently lexicographic. #[inline(always)] pub fn Compare(x: &[u8], y: &[u8]) -> Ordering { x.cmp(y) } #[inline(always)] pub fn CompareWithPrefix(prefix: &[u8], x: &[u8], y: &[u8]) -> Ordering { assert!(prefix.len() > 0); if y.len() <= prefix.len() { prefix.cmp(y) } else { let c = prefix.cmp(&y[0 .. prefix.len()]); if c != Ordering::Equal { c } else { x.cmp(&y[prefix.len() .. y.len()]) } } } pub fn PrefixMatch(x: &[u8], y: &[u8], max: usize) -> usize { let len = min(x.len(), y.len()); let lim = min(len, max); let mut i = 0; while i<lim && x[i]==y[i] { i = i + 1; } i } // TODO rm fn StartsWith(x: &[u8], y: &[u8], max: usize) -> bool { if x.len() < y.len() { false } else { let len = y.len(); let mut i = 0; while i<len && x[i]==y[i] { i = i + 1; } i==len } } } mod Varint { // TODO this doesn't need to be usize. u8 is enough. pub fn SpaceNeededFor(v: u64) -> usize { if v<=240 { 1 } else if v<=2287 { 2 } else if v<=67823 { 3 } else if v<=16777215 { 4 } else if v<=4294967295 { 5 } else if v<=1099511627775 { 6 } else if v<=281474976710655 { 7 } else if v<=72057594037927935 { 8 } else { 9 } } // TODO stronger inline hint? pub fn read(buf: &[u8], cur: &mut usize) -> u64 { let c = *cur; let a0 = buf[c] as u64; if a0 <= 240u64 { *cur = *cur + 1; a0 } else if a0 <= 248u64 { let a1 = buf[c+1] as u64; let r = 240u64 + 256u64 * (a0 - 241u64) + a1; *cur = *cur + 2; r } else if a0 == 249u64 { let a1 = buf[c+1] as u64; let a2 = buf[c+2] as u64; let r = 2288u64 + 256u64 * a1 + a2; *cur = *cur + 3; r } else if a0 == 250u64 { let a1 = buf[c+1] as u64; let a2 = buf[c+2] as u64; let a3 = buf[c+3] as u64; let r = (a1<<16) | (a2<<8) | a3; *cur = *cur + 4; r } else if a0 == 251u64 { let a1 = buf[c+1] as u64; let a2 = buf[c+2] as u64; let a3 = buf[c+3] as u64; let a4 = buf[c+4] as u64; let r = (a1<<24) | (a2<<16) | (a3<<8) | a4; *cur = *cur + 5; r } else if a0 == 252u64 { let a1 = buf[c+1] as u64; let a2 = buf[c+2] as u64; let a3 = buf[c+3] as u64; let a4 = buf[c+4] as u64; let a5 = buf[c+5] as u64; let r = (a1<<32) | (a2<<24) | (a3<<16) | (a4<<8) | a5; *cur = *cur + 6; r } else if a0 == 253u64 { let a1 = buf[c+1] as u64; let a2 = buf[c+2] as u64; let a3 = buf[c+3] as u64; let a4 = buf[c+4] as u64; let a5 = buf[c+5] as u64; let a6 = buf[c+6] as u64; let r = (a1<<40) | (a2<<32) | (a3<<24) | (a4<<16) | (a5<<8) | a6; *cur = *cur + 7; r } else if a0 == 254u64 { let a1 = buf[c+1] as u64; let a2 = buf[c+2] as u64; let a3 = buf[c+3] as u64; let a4 = buf[c+4] as u64; let a5 = buf[c+5] as u64; let a6 = buf[c+6] as u64; let a7 = buf[c+7] as u64; let r = (a1<<48) | (a2<<40) | (a3<<32) | (a4<<24) | (a5<<16) | (a6<<8) | a7; *cur = *cur + 8; r } else { let a1 = buf[c+1] as u64; let a2 = buf[c+2] as u64; let a3 = buf[c+3] as u64; let a4 = buf[c+4] as u64; let a5 = buf[c+5] as u64; let a6 = buf[c+6] as u64; let a7 = buf[c+7] as u64; let a8 = buf[c+8] as u64; let r = (a1<<56) | (a2<<48) | (a3<<40) | (a4<<32) | (a5<<24) | (a6<<16) | (a7<<8) | a8; *cur = *cur + 9; r } } pub fn write(buf: &mut [u8], cur: &mut usize, v: u64) { let c = *cur; if v<=240u64 { buf[c] = v as u8; *cur = *cur + 1; } else if v<=2287u64 { buf[c] = ((v - 240u64) / 256u64 + 241u64) as u8; buf[c+1] = ((v - 240u64) % 256u64) as u8; *cur = *cur + 2; } else if v<=67823u64 { buf[c] = 249u8; buf[c+1] = ((v - 2288u64) / 256u64) as u8; buf[c+2] = ((v - 2288u64) % 256u64) as u8; *cur = *cur + 3; } else if v<=16777215u64 { buf[c] = 250u8; buf[c+1] = (v >> 16) as u8; buf[c+2] = (v >> 8) as u8; buf[c+3] = (v >> 0) as u8; *cur = *cur + 4; } else if v<=4294967295u64 { buf[c] = 251u8; buf[c+1] = (v >> 24) as u8; buf[c+2] = (v >> 16) as u8; buf[c+3] = (v >> 8) as u8; buf[c+4] = (v >> 0) as u8; *cur = *cur + 5; } else if v<=1099511627775u64 { buf[c] = 252u8; buf[c+1] = (v >> 32) as u8; buf[c+2] = (v >> 24) as u8; buf[c+3] = (v >> 16) as u8; buf[c+4] = (v >> 8) as u8; buf[c+5] = (v >> 0) as u8; *cur = *cur + 6; } else if v<=281474976710655u64 { buf[c] = 253u8; buf[c+1] = (v >> 40) as u8; buf[c+2] = (v >> 32) as u8; buf[c+3] = (v >> 24) as u8; buf[c+4] = (v >> 16) as u8; buf[c+5] = (v >> 8) as u8; buf[c+6] = (v >> 0) as u8; *cur = *cur + 7; } else if v<=72057594037927935u64 { buf[c] = 254u8; buf[c+1] = (v >> 48) as u8; buf[c+2] = (v >> 40) as u8; buf[c+3] = (v >> 32) as u8; buf[c+4] = (v >> 24) as u8; buf[c+5] = (v >> 16) as u8; buf[c+6] = (v >> 8) as u8; buf[c+7] = (v >> 0) as u8; *cur = *cur + 8; } else { buf[c] = 255u8; buf[c+1] = (v >> 56) as u8; buf[c+2] = (v >> 48) as u8; buf[c+3] = (v >> 40) as u8; buf[c+4] = (v >> 32) as u8; buf[c+5] = (v >> 24) as u8; buf[c+6] = (v >> 16) as u8; buf[c+7] = (v >> 8) as u8; buf[c+8] = (v >> 0) as u8; *cur = *cur + 9; } } } /* fn write_u32_le(v: &mut [u8], i: u32) { v[0] = ((i>> 0) & 0xff_u32) as u8; v[1] = ((i>> 8) & 0xff_u32) as u8; v[2] = ((i>>16) & 0xff_u32) as u8; v[3] = ((i>>24) & 0xff_u32) as u8; } */ fn write_u32_be(v: &mut [u8], i: u32) { v[0] = ((i>>24) & 0xff_u32) as u8; v[1] = ((i>>16) & 0xff_u32) as u8; v[2] = ((i>> 8) & 0xff_u32) as u8; v[3] = ((i>> 0) & 0xff_u32) as u8; } fn read_u32_be(v: &[u8]) -> u32 { let a0 = v[0] as u64; let a1 = v[1] as u64; let a2 = v[2] as u64; let a3 = v[3] as u64; let r = (a0 << 24) | (a1 << 16) | (a2 << 8) | (a3 << 0); // assert r fits r as u32 } fn read_u16_be(v: &[u8]) -> u16 { let a0 = v[0] as u64; let a1 = v[1] as u64; let r = (a0 << 8) | (a1 << 0); // assert r fits r as u16 } fn write_u16_be(v: &mut [u8], i: u16) { v[0] = ((i>>8) & 0xff_u16) as u8; v[1] = ((i>>0) & 0xff_u16) as u8; } struct PageBuilder { cur : usize, buf : Box<[u8]>, } // TODO bundling cur with the buf almost seems sad, because there are // cases where we want buf to be mutable but not cur. :-) impl PageBuilder { fn new(pgsz : usize) -> PageBuilder { let ba = vec![0;pgsz as usize].into_boxed_slice(); PageBuilder { cur: 0, buf:ba } } fn Reset(&mut self) { self.cur = 0; } fn Write(&self, strm: &mut Write) -> io::Result<()> { strm.write_all(&*self.buf) } fn PageSize(&self) -> usize { self.buf.len() } fn Buffer(&self) -> &[u8] { &self.buf } fn Position(&self) -> usize { self.cur } fn Available(&self) -> usize { self.buf.len() - self.cur } fn SetPageFlag(&mut self, x: u8) { self.buf[1] = self.buf[1] | (x); } fn PutByte(&mut self, x: u8) { self.buf[self.cur] = x; self.cur = self.cur + 1; } fn PutStream2(&mut self, s: &mut Read, len: usize) -> io::Result<usize> { let n = try!(utils::ReadFully(s, &mut self.buf[self.cur .. self.cur + len])); self.cur = self.cur + n; let res : io::Result<usize> = Ok(n); res } // TODO rm this function fn PutStream(&mut self, s: &mut Read, len: usize) -> io::Result<usize> { let n = try!(self.PutStream2(s, len)); // TODO if n != len fail, which may mean a different result type here let res : io::Result<usize> = Ok(len); res } fn PutArray(&mut self, ba: &[u8]) { self.buf[self.cur .. self.cur + ba.len()].clone_from_slice(ba); self.cur = self.cur + ba.len(); } fn PutInt32(&mut self, ov: u32) { let at = self.cur; write_u32_be(&mut self.buf[at .. at + SIZE_32], ov); self.cur = self.cur + SIZE_32; } fn SetSecondToLastInt32(&mut self, page: u32) { let len = self.buf.len(); let at = len - 2 * SIZE_32; if self.cur > at { panic!("SetSecondToLastInt32 is squashing data"); } write_u32_be(&mut self.buf[at .. at + SIZE_32], page); } fn SetLastInt32(&mut self, page: u32) { let len = self.buf.len(); let at = len - 1 * SIZE_32; if self.cur > at { panic!("SetLastInt32 is squashing data"); } write_u32_be(&mut self.buf[at .. at + SIZE_32], page); } fn PutInt16(&mut self, ov: u16) { let at = self.cur; write_u16_be(&mut self.buf[at .. at + SIZE_16], ov); self.cur = self.cur + SIZE_16; } // TODO rm fn PutInt16At(&mut self, at: usize, ov: u16) { write_u16_be(&mut self.buf[at .. at + SIZE_16], ov); } fn PutVarint(&mut self, ov: u64) { Varint::write(&mut *self.buf, &mut self.cur, ov); } } // TODO this struct should just go away. just use the buf. struct PageBuffer { buf : Box<[u8]>, } impl PageBuffer { fn new(pgsz: usize) -> PageBuffer { let ba = vec![0;pgsz as usize].into_boxed_slice(); PageBuffer { buf:ba } } fn PageSize(&self) -> usize { self.buf.len() } fn Read(&mut self, strm: &mut Read) -> io::Result<usize> { utils::ReadFully(strm, &mut self.buf) } fn ReadPart(&mut self, strm: &mut Read, off: usize, len: usize) -> io::Result<usize> { utils::ReadFully(strm, &mut self.buf[off .. len-off]) } fn Compare(&self, cur: usize, len: usize, other: &[u8]) -> Ordering { let slice = &self.buf[cur .. cur + len]; bcmp::Compare(slice, other) } fn CompareWithPrefix(&self, cur: usize, prefix: &[u8], len: usize, other: &[u8]) -> Ordering { let slice = &self.buf[cur .. cur + len - prefix.len()]; bcmp::CompareWithPrefix(prefix, slice, other) } fn PageType(&self) -> Result<PageType> { PageType::from_u8(self.buf[0]) } fn GetByte(&self, cur: &mut usize) -> u8 { let r = self.buf[*cur]; *cur = *cur + 1; r } fn GetInt32(&self, cur: &mut usize) -> u32 { let at = *cur; let r = read_u32_be(&self.buf[at .. at + SIZE_32]); *cur = *cur + SIZE_32; r } fn GetInt32At(&self, at: usize) -> u32 { read_u32_be(&self.buf[at .. at + SIZE_32]) } fn CheckPageFlag(&self, f: u8) -> bool { 0 != (self.buf[1] & f) } fn GetSecondToLastInt32(&self) -> u32 { let len = self.buf.len(); let at = len - 2 * SIZE_32; self.GetInt32At(at) } fn GetLastInt32(&self) -> u32 { let len = self.buf.len(); let at = len - 1 * SIZE_32; self.GetInt32At(at) } fn GetInt16(&self, cur: &mut usize) -> u16 { let at = *cur; let r = read_u16_be(&self.buf[at .. at + SIZE_16]); *cur = *cur + SIZE_16; r } fn get_slice(&self, start: usize, len: usize) -> &[u8] { &self.buf[start .. start + len] } fn GetIntoArray(&self, cur: &mut usize, a : &mut [u8]) { let len = a.len(); a.clone_from_slice(&self.buf[*cur .. *cur + len]); *cur = *cur + a.len(); } // TODO this function shows up a lot in the profiler // TODO inline hint? fn GetVarint(&self, cur: &mut usize) -> u64 { Varint::read(&*self.buf, cur) } } #[derive(PartialEq,Copy,Clone)] enum Direction { FORWARD = 0, BACKWARD = 1, WANDERING = 2, } struct MultiCursor<'a> { subcursors: Box<[SegmentCursor<'a>]>, sorted: Box<[(usize,Option<Ordering>)]>, cur: Option<usize>, dir: Direction, } impl<'a> MultiCursor<'a> { fn sort(&mut self, want_max: bool) -> Result<()> { if self.subcursors.is_empty() { return Ok(()) } // get a KeyRef for all the cursors let mut ka = Vec::new(); for c in self.subcursors.iter() { if c.IsValid() { ka.push(Some(try!(c.KeyRef()))); } else { ka.push(None); } } // init the orderings to None. // the invalid cursors will stay that way. for i in 0 .. self.sorted.len() { self.sorted[i].1 = None; } for i in 1 .. ka.len() { let mut j = i; while j > 0 { let nj = self.sorted[j].0; let nprev = self.sorted[j - 1].0; match (&ka[nj], &ka[nprev]) { (&Some(ref kj), &Some(ref kprev)) => { let c = { if want_max { KeyRef::cmp(kprev, kj) } else { KeyRef::cmp(kj, kprev) } }; match c { Ordering::Greater => { self.sorted[j].1 = Some(Ordering::Greater); break; }, Ordering::Equal => { match nj.cmp(&nprev) { Ordering::Equal => { unreachable!(); }, Ordering::Greater => { self.sorted[j].1 = Some(Ordering::Equal); break; }, Ordering::Less => { self.sorted[j - 1].1 = Some(Ordering::Equal); // keep going }, } }, Ordering::Less => { // keep going self.sorted[j - 1].1 = Some(Ordering::Greater); }, } }, (&Some(ref kj), &None) => { // keep going }, (&None, &Some(ref kprev)) => { break; }, (&None, &None) => { match nj.cmp(&nprev) { Ordering::Equal => { unreachable!(); }, Ordering::Greater => { break; }, Ordering::Less => { // keep going }, } } }; self.sorted.swap(j, j - 1); j = j - 1; } } // fix the first one if self.sorted.len() > 0 { let n = self.sorted[0].0; match &ka[n] { &Some(_) => { self.sorted[0].1 = Some(Ordering::Equal); }, &None => { }, } } /* println!("{:?} : {}", self.sorted, if want_max { "backward" } else {"forward"} ); for i in 0 .. self.sorted.len() { let (n, ord) = self.sorted[i]; println!(" {:?}", ka[n]); } */ Ok(()) } fn sorted_first(&self) -> Option<usize> { let n = self.sorted[0].0; if self.sorted[0].1.is_some() { Some(n) } else { None } } fn findMin(&mut self) -> Result<Option<usize>> { if self.subcursors.is_empty() { Ok(None) } else { try!(self.sort(false)); Ok(self.sorted_first()) } } fn findMax(&mut self) -> Result<Option<usize>> { if self.subcursors.is_empty() { Ok(None) } else { try!(self.sort(true)); Ok(self.sorted_first()) } } fn Create(subs: Vec<SegmentCursor>) -> MultiCursor { let s = subs.into_boxed_slice(); let mut sorted = Vec::new(); for i in 0 .. s.len() { sorted.push((i, None)); } MultiCursor { subcursors: s, sorted: sorted.into_boxed_slice(), cur: None, dir: Direction::WANDERING, } } } impl<'a> ICursor<'a> for MultiCursor<'a> { fn IsValid(&self) -> bool { match self.cur { Some(i) => self.subcursors[i].IsValid(), None => false } } fn First(&mut self) -> Result<()> { for i in 0 .. self.subcursors.len() { try!(self.subcursors[i].First()); } self.cur = try!(self.findMin()); self.dir = Direction::FORWARD; Ok(()) } fn Last(&mut self) -> Result<()> { for i in 0 .. self.subcursors.len() { try!(self.subcursors[i].Last()); } self.cur = try!(self.findMax()); self.dir = Direction::BACKWARD; Ok(()) } fn KeyRef(&'a self) -> Result<KeyRef<'a>> { match self.cur { None => Err(LsmError::CursorNotValid), Some(icur) => self.subcursors[icur].KeyRef(), } } fn ValueRef(&'a self) -> Result<ValueRef<'a>> { match self.cur { None => Err(LsmError::CursorNotValid), Some(icur) => self.subcursors[icur].ValueRef(), } } fn KeyCompare(&self, k: &KeyRef) -> Result<Ordering> { match self.cur { None => Err(LsmError::CursorNotValid), Some(icur) => self.subcursors[icur].KeyCompare(k), } } fn ValueLength(&self) -> Result<Option<usize>> { match self.cur { None => Err(LsmError::CursorNotValid), Some(icur) => self.subcursors[icur].ValueLength(), } } fn Next(&mut self) -> Result<()> { match self.cur { None => Err(LsmError::CursorNotValid), Some(icur) => { if (self.dir == Direction::FORWARD) { // TODO self.sorted[0] is cur. // immediately after that, there may (or may not be) some // entries which were Ordering:Equal to cur. call Next on // each of these. assert!(icur == self.sorted[0].0); for i in 1 .. self.sorted.len() { //println!("sorted[{}] : {:?}", i, self.sorted[i]); let (n,c) = self.sorted[i]; match c { None => { break; }, Some(c) => { if c == Ordering::Equal { try!(self.subcursors[n].Next()); } else { break; } }, } } } else { // TODO consider simplifying all the stuff below. // all this complexity may not be worth it. // we need to fix every cursor to point to its min // value > icur. // if perf didn't matter, this would be simple. // call Next on icur. and call Seek(GE) (and maybe Next) // on every other cursor. // but there are several cases where we can do a lot // less work than a Seek. And we have the information // to identify those cases. So, this function is // pretty complicated, but it's fast. // -------- // the current cursor (icur) is easy. it just needs Next(). // we'll do it last, so we can use it for comparisons. // for now we deal with all the others. // the current direction of the multicursor tells us // something about the state of all the others. fn half(dir: Direction, ki: &KeyRef, subs: &mut [SegmentCursor]) -> Result<()> { match dir { Direction::FORWARD => { // this is the happy case. each cursor is at most // one step away. // direction is FORWARD, so we know that every valid cursor // is pointing at a key which is either == to icur, or // it is already the min key > icur. for csr in subs { if csr.IsValid() { let cmp = { let k = try!(csr.KeyRef()); let cmp = KeyRef::cmp(&k, ki); cmp }; match cmp { Ordering::Less => { // should never happen, because FORWARD unreachable!(); }, Ordering::Greater => { // TODO assert that j.Prev is <= icur? // done }, Ordering::Equal => { try!(csr.Next()); }, } } } Ok(()) }, Direction::BACKWARD => { // this case isn't too bad. each cursor is either // one step away or two. // every other cursor is either == icur or it is the // max value < icur. // find the invalid cursors first. we have to call seek // on these, because we don't know if they might have // a valid value which is > icur. we save the list and // deal with them after the others. for csr in subs { if csr.IsValid() { let cmp = { let k = try!(csr.KeyRef()); let cmp = KeyRef::cmp(&k, ki); cmp }; match cmp { Ordering::Less => { try!(csr.Next()); // we moved one step. let's see if we need to move one more. if csr.IsValid() { let cmp = { let k = try!(csr.KeyRef()); let cmp = KeyRef::cmp(&k, ki); cmp }; match cmp { Ordering::Less => { // should never happen. we should not have // been more than one step away from icur. unreachable!(); }, Ordering::Greater => { // done }, Ordering::Equal => { // and one more step try!(csr.Next()); }, } } }, Ordering::Greater => { // should never happen, because BACKWARD unreachable!(); }, Ordering::Equal => { // one step away try!(csr.Next()); }, } } else { let sr = try!(csr.SeekRef(&ki, SeekOp::SEEK_GE)); if sr.is_valid_and_equal() { try!(csr.Next()); } } } Ok(()) }, Direction::WANDERING => { // we have no idea where all the other cursors are. // so we have to do a seek on each one. // unfortunately, we have to make a copy of the icur Key. // Seek only needs a reference to a slice for the key, // and because we don't handle the case where icur == j, // there should be no mutability conflict, in theory. // But Rust doesn't know that. It knows that both // cursors are in the same array, so we cannot have a // mutable reference (to seek) into that array while // there is any other reference (the icur key). // also, KeyRef() gives a KeyRef, which Seek can't handle. for j in 0 .. subs.len() { let csr = &mut subs[j]; let sr = try!(csr.SeekRef(&ki, SeekOp::SEEK_GE)); if sr.is_valid_and_equal() { try!(csr.Next()); } } Ok(()) }, } } { let (before, middle, after) = split3(&mut *self.subcursors, icur); let icsr = &middle[0]; let ki = try!(icsr.KeyRef()); half(self.dir, &ki, before); half(self.dir, &ki, after); } } // now the current cursor try!(self.subcursors[icur].Next()); // now find the min. // this seems kinda awful. we just walked through the entire cursor list, // and now we're doing it again. should we have just kept track along // the way? maybe, but it doesn't save any key comparisons. it just // moves those compares from a separate loop into the loops above. still // might be a good idea. TODO. self.cur = try!(self.findMin()); self.dir = Direction::FORWARD; Ok(()) }, } } // TODO fix Prev like Next fn Prev(&mut self) -> Result<()> { match self.cur { None => Err(LsmError::CursorNotValid), Some(icur) => { let k = { let k = try!(self.subcursors[icur].KeyRef()); let k = k.into_boxed_slice(); let k = KeyRef::from_boxed_slice(k); k }; for j in 0 .. self.subcursors.len() { let csr = &mut self.subcursors[j]; if (self.dir != Direction::BACKWARD) && (icur != j) { try!(csr.SeekRef(&k, SeekOp::SEEK_LE)); } if csr.IsValid() && (Ordering::Equal == try!(csr.KeyCompare(&k))) { try!(csr.Prev()); } } self.cur = try!(self.findMax()); self.dir = Direction::BACKWARD; Ok(()) }, } } fn SeekRef(&mut self, k: &KeyRef, sop:SeekOp) -> Result<SeekResult> { self.cur = None; self.dir = Direction::WANDERING; for j in 0 .. self.subcursors.len() { let sr = try!(self.subcursors[j].SeekRef(k, sop)); if sr.is_valid_and_equal() { self.cur = Some(j); return Ok(sr); } } match sop { SeekOp::SEEK_GE => { self.cur = try!(self.findMin()); match self.cur { Some(i) => { self.dir = Direction::FORWARD; SeekResult::from_cursor(&self.subcursors[i], k) }, None => { Ok(SeekResult::Invalid) }, } }, SeekOp::SEEK_LE => { self.cur = try!(self.findMax()); match self.cur { Some(i) => { self.dir = Direction::BACKWARD; SeekResult::from_cursor(&self.subcursors[i], k) }, None => { Ok(SeekResult::Invalid) }, } }, SeekOp::SEEK_EQ => { Ok(SeekResult::Invalid) }, } } } pub struct LivingCursor<'a> { chain : MultiCursor<'a> } impl<'a> LivingCursor<'a> { fn skipTombstonesForward(&mut self) -> Result<()> { while self.chain.IsValid() && try!(self.chain.ValueLength()).is_none() { try!(self.chain.Next()); } Ok(()) } fn skipTombstonesBackward(&mut self) -> Result<()> { while self.chain.IsValid() && try!(self.chain.ValueLength()).is_none() { try!(self.chain.Prev()); } Ok(()) } fn Create(ch : MultiCursor) -> LivingCursor { LivingCursor { chain : ch } } } impl<'a> ICursor<'a> for LivingCursor<'a> { fn First(&mut self) -> Result<()> { try!(self.chain.First()); try!(self.skipTombstonesForward()); Ok(()) } fn Last(&mut self) -> Result<()> { try!(self.chain.Last()); try!(self.skipTombstonesBackward()); Ok(()) } fn KeyRef(&'a self) -> Result<KeyRef<'a>> { self.chain.KeyRef() } fn ValueRef(&'a self) -> Result<ValueRef<'a>> { self.chain.ValueRef() } fn ValueLength(&self) -> Result<Option<usize>> { self.chain.ValueLength() } fn IsValid(&self) -> bool { self.chain.IsValid() && { let r = self.chain.ValueLength(); if r.is_ok() { r.unwrap().is_some() } else { false } } } fn KeyCompare(&self, k: &KeyRef) -> Result<Ordering> { self.chain.KeyCompare(k) } fn Next(&mut self) -> Result<()> { try!(self.chain.Next()); try!(self.skipTombstonesForward()); Ok(()) } fn Prev(&mut self) -> Result<()> { try!(self.chain.Prev()); try!(self.skipTombstonesBackward()); Ok(()) } fn SeekRef(&mut self, k: &KeyRef, sop:SeekOp) -> Result<SeekResult> { let sr = try!(self.chain.SeekRef(k, sop)); match sop { SeekOp::SEEK_GE => { if sr.is_valid() && self.chain.ValueLength().unwrap().is_none() { try!(self.skipTombstonesForward()); SeekResult::from_cursor(&self.chain, k) } else { Ok(sr) } }, SeekOp::SEEK_LE => { if sr.is_valid() && self.chain.ValueLength().unwrap().is_none() { try!(self.skipTombstonesBackward()); SeekResult::from_cursor(&self.chain, k) } else { Ok(sr) } }, SeekOp::SEEK_EQ => Ok(sr), } } } #[derive(Hash,PartialEq,Eq,Copy,Clone,Debug)] #[repr(u8)] enum PageType { LEAF_NODE, PARENT_NODE, OVERFLOW_NODE, } impl PageType { #[inline(always)] fn to_u8(self) -> u8 { match self { PageType::LEAF_NODE => 1, PageType::PARENT_NODE => 2, PageType::OVERFLOW_NODE => 3, } } #[inline(always)] fn from_u8(v: u8) -> Result<PageType> { match v { 1 => Ok(PageType::LEAF_NODE), 2 => Ok(PageType::PARENT_NODE), 3 => Ok(PageType::OVERFLOW_NODE), _ => Err(LsmError::InvalidPageType), } } } mod ValueFlag { pub const FLAG_OVERFLOW: u8 = 1; pub const FLAG_TOMBSTONE: u8 = 2; } mod PageFlag { pub const FLAG_ROOT_NODE: u8 = 1; pub const FLAG_BOUNDARY_NODE: u8 = 2; pub const FLAG_ENDS_ON_BOUNDARY: u8 = 3; } #[derive(Debug)] // this struct is used to remember pages we have written. // for each page, we need to remember a key, and it needs // to be in a box because the original copy is gone and // the page has been written out to disk. struct pgitem { page : PageNum, key : Box<[u8]>, } struct ParentState { sofar : usize, nextGeneration : Vec<pgitem>, blk : PageBlock, } // this enum keeps track of what happened to a key as we // processed it. either we determined that it will fit // inline or we wrote it as an overflow. enum KeyLocation { Inline, Overflowed(PageNum), } // this enum keeps track of what happened to a value as we // processed it. it might have already been overflowed. if // it's going to fit in the page, we still have the data // buffer. enum ValueLocation { Tombstone, // when this is a Buffer, this gets ownership of kvp.Value Buffer(Box<[u8]>), Overflowed(usize,PageNum), } struct LeafPair { // key gets ownership of kvp.Key key : Box<[u8]>, kLoc : KeyLocation, vLoc : ValueLocation, } struct LeafState { sofarLeaf : usize, keys_in_this_leaf : Vec<LeafPair>, prevLeaf : PageNum, prefixLen : usize, firstLeaf : PageNum, leaves : Vec<pgitem>, blk : PageBlock, } fn CreateFromSortedSequenceOfKeyValuePairs<I,SeekWrite>(fs: &mut SeekWrite, pageManager: &IPages, source: I, ) -> Result<(SegmentNum,PageNum)> where I:Iterator<Item=Result<kvp>>, SeekWrite : Seek+Write { fn writeOverflow<SeekWrite>(startingBlock: PageBlock, ba: &mut Read, pageManager: &IPages, fs: &mut SeekWrite ) -> Result<(usize,PageBlock)> where SeekWrite : Seek+Write { fn buildFirstPage(ba: &mut Read, pbFirstOverflow : &mut PageBuilder, pgsz: usize) -> Result<(usize,bool)> { pbFirstOverflow.Reset(); pbFirstOverflow.PutByte(PageType::OVERFLOW_NODE.to_u8()); pbFirstOverflow.PutByte(0u8); // starts 0, may be changed later let room = pgsz - (2 + SIZE_32); // something will be put in lastInt32 later let put = try!(pbFirstOverflow.PutStream2(ba, room)); Ok((put, put<room)) }; fn buildRegularPage(ba: &mut Read, pbOverflow : &mut PageBuilder, pgsz: usize) -> Result<(usize,bool)> { pbOverflow.Reset(); let room = pgsz; let put = try!(pbOverflow.PutStream2(ba, room)); Ok((put, put<room)) }; fn buildBoundaryPage(ba: &mut Read, pbOverflow : &mut PageBuilder, pgsz: usize) -> Result<(usize,bool)> { pbOverflow.Reset(); let room = pgsz - SIZE_32; // something will be put in lastInt32 before the page is written let put = try!(pbOverflow.PutStream2(ba, room)); Ok((put, put<room)) } fn writeRegularPages<SeekWrite>(max: PageNum, sofar: usize, pb: &mut PageBuilder, fs: &mut SeekWrite, ba: &mut Read, pgsz: usize ) -> Result<(PageNum,usize,bool)> where SeekWrite : Seek+Write { let mut i = 0; let mut sofar = sofar; loop { if i < max { let (put, finished) = try!(buildRegularPage(ba, pb, pgsz)); if put==0 { return Ok((i, sofar, true)); } else { sofar = sofar + put; try!(pb.Write(fs)); if finished { return Ok((i+1, sofar, true)); } else { i = i + 1; } } } else { return Ok((i, sofar, false)); } } } // TODO misnamed fn writeOneBlock<SeekWrite>(param_sofar: usize, param_firstBlk: PageBlock, fs: &mut SeekWrite, ba: &mut Read, pgsz: usize, pbOverflow: &mut PageBuilder, pbFirstOverflow: &mut PageBuilder, pageManager: &IPages, token: &mut PendingSegment ) -> Result<(usize,PageBlock)> where SeekWrite : Seek+Write { // each trip through this loop will write out one // block, starting with the overflow first page, // followed by zero-or-more "regular" overflow pages, // which have no header. we'll stop at the block boundary, // either because we land there or because the whole overflow // won't fit and we have to continue into the next block. // the boundary page will be like a regular overflow page, // headerless, but it is four bytes smaller. let mut loop_sofar = param_sofar; let mut loop_firstBlk = param_firstBlk; loop { let sofar = loop_sofar; let firstBlk = loop_firstBlk; let (putFirst,finished) = try!(buildFirstPage (ba, pbFirstOverflow, pgsz)); if putFirst==0 { return Ok((sofar, firstBlk)); } else { // note that we haven't written the first page yet. we may have to fix // a couple of things before it gets written out. let sofar = sofar + putFirst; if firstBlk.firstPage == firstBlk.lastPage { // the first page landed on a boundary. // we can just set the flag and write it now. pbFirstOverflow.SetPageFlag(PageFlag::FLAG_BOUNDARY_NODE); let blk = try!(pageManager.GetBlock(&mut *token)); pbFirstOverflow.SetLastInt32(blk.firstPage); try!(pbFirstOverflow.Write(fs)); try!(utils::SeekPage(fs, pgsz, blk.firstPage)); if !finished { loop_sofar = sofar; loop_firstBlk = blk; } else { return Ok((sofar, blk)); } } else { let firstRegularPageNumber = firstBlk.firstPage + 1; if finished { // the first page is also the last one pbFirstOverflow.SetLastInt32(0); // offset to last used page in this block, which is this one try!(pbFirstOverflow.Write(fs)); return Ok((sofar, PageBlock::new(firstRegularPageNumber,firstBlk.lastPage))); } else { // we need to write more pages, // until the end of the block, // or the end of the stream, // whichever comes first try!(utils::SeekPage(fs, pgsz, firstRegularPageNumber)); // availableBeforeBoundary is the number of pages until the boundary, // NOT counting the boundary page, and the first page in the block // has already been accounted for, so we're just talking about data pages. let availableBeforeBoundary = if firstBlk.lastPage > 0 { (firstBlk.lastPage - firstRegularPageNumber) } else { PageNum::max_value() } ; let (numRegularPages, sofar, finished) = try!(writeRegularPages(availableBeforeBoundary, sofar, pbOverflow, fs, ba, pgsz)); if finished { // go back and fix the first page pbFirstOverflow.SetLastInt32(numRegularPages); try!(utils::SeekPage(fs, pgsz, firstBlk.firstPage)); try!(pbFirstOverflow.Write(fs)); // now reset to the next page in the block let blk = PageBlock::new(firstRegularPageNumber + numRegularPages, firstBlk.lastPage); try!(utils::SeekPage(fs, pgsz, blk.firstPage)); return Ok((sofar,blk)); } else { // we need to write out a regular page except with a // boundary pointer in it. and we need to set // FLAG_ENDS_ON_BOUNDARY on the first // overflow page in this block. let (putBoundary,finished) = try!(buildBoundaryPage (ba, pbOverflow, pgsz)); if putBoundary==0 { // go back and fix the first page pbFirstOverflow.SetLastInt32(numRegularPages); try!(utils::SeekPage(fs, pgsz, firstBlk.firstPage)); try!(pbFirstOverflow.Write(fs)); // now reset to the next page in the block let blk = PageBlock::new(firstRegularPageNumber + numRegularPages, firstBlk.lastPage); try!(utils::SeekPage(fs, pgsz, firstBlk.lastPage)); return Ok((sofar,blk)); } else { // write the boundary page let sofar = sofar + putBoundary; let blk = try!(pageManager.GetBlock(&mut *token)); pbOverflow.SetLastInt32(blk.firstPage); try!(pbOverflow.Write(fs)); // go back and fix the first page pbFirstOverflow.SetPageFlag(PageFlag::FLAG_ENDS_ON_BOUNDARY); pbFirstOverflow.SetLastInt32(numRegularPages + 1); try!(utils::SeekPage(fs, pgsz, firstBlk.firstPage)); try!(pbFirstOverflow.Write(fs)); // now reset to the first page in the next block try!(utils::SeekPage(fs, pgsz, blk.firstPage)); if finished { loop_sofar = sofar; loop_firstBlk = blk; } else { return Ok((sofar,blk)); } } } } } } } } let pgsz = pageManager.PageSize(); let mut token = try!(pageManager.Begin()); let mut pbFirstOverflow = PageBuilder::new(pgsz); let mut pbOverflow = PageBuilder::new(pgsz); writeOneBlock(0, startingBlock, fs, ba, pgsz, &mut pbOverflow, &mut pbFirstOverflow, pageManager, &mut token) } fn writeLeaves<I,SeekWrite>(leavesBlk:PageBlock, pageManager: &IPages, source: I, vbuf: &mut [u8], fs: &mut SeekWrite, pb: &mut PageBuilder, token: &mut PendingSegment, ) -> Result<(PageBlock,Vec<pgitem>,PageNum)> where I: Iterator<Item=Result<kvp>> , SeekWrite : Seek+Write { // 2 for the page type and flags // 4 for the prev page // 2 for the stored count // 4 for lastInt32 (which isn't in pb.Available) const LEAF_PAGE_OVERHEAD: usize = 2 + 4 + 2 + 4; fn buildLeaf(st: &mut LeafState, pb: &mut PageBuilder) -> Box<[u8]> { pb.Reset(); pb.PutByte(PageType::LEAF_NODE.to_u8()); pb.PutByte(0u8); // flags pb.PutInt32 (st.prevLeaf); // prev page num. // TODO prefixLen is one byte. should it be two? pb.PutByte(st.prefixLen as u8); if st.prefixLen > 0 { pb.PutArray(&st.keys_in_this_leaf[0].key[0 .. st.prefixLen]); } let count_keys_in_this_leaf = st.keys_in_this_leaf.len(); // TODO should we support more than 64k keys in a leaf? // either way, overflow-check this cast. pb.PutInt16 (count_keys_in_this_leaf as u16); fn f(pb: &mut PageBuilder, prefixLen: usize, lp: &LeafPair) { match lp.kLoc { KeyLocation::Inline => { pb.PutByte(0u8); // flags pb.PutVarint(lp.key.len() as u64); pb.PutArray(&lp.key[prefixLen .. lp.key.len()]); }, KeyLocation::Overflowed(kpage) => { pb.PutByte(ValueFlag::FLAG_OVERFLOW); pb.PutVarint(lp.key.len() as u64); pb.PutInt32(kpage); }, } match lp.vLoc { ValueLocation::Tombstone => { pb.PutByte(ValueFlag::FLAG_TOMBSTONE); }, ValueLocation::Buffer (ref vbuf) => { pb.PutByte(0u8); pb.PutVarint(vbuf.len() as u64); pb.PutArray(&vbuf); }, ValueLocation::Overflowed (vlen,vpage) => { pb.PutByte(ValueFlag::FLAG_OVERFLOW); pb.PutVarint(vlen as u64); pb.PutInt32(vpage); }, } } // deal with all the keys except the last one for lp in st.keys_in_this_leaf.drain(0 .. count_keys_in_this_leaf-1) { f(pb, st.prefixLen, &lp); } assert!(st.keys_in_this_leaf.len() == 1); let lp = st.keys_in_this_leaf.remove(0); assert!(st.keys_in_this_leaf.is_empty()); f(pb, st.prefixLen, &lp); lp.key } fn writeLeaf<SeekWrite>(st: &mut LeafState, isRootPage: bool, pb: &mut PageBuilder, fs: &mut SeekWrite, pgsz: usize, pageManager: &IPages, token: &mut PendingSegment, ) -> Result<()> where SeekWrite : Seek+Write { let last_key = buildLeaf(st, pb); assert!(st.keys_in_this_leaf.is_empty()); let thisPageNumber = st.blk.firstPage; let firstLeaf = if st.leaves.is_empty() { thisPageNumber } else { st.firstLeaf }; let nextBlk = if isRootPage { PageBlock::new(thisPageNumber + 1, st.blk.lastPage) } else if thisPageNumber == st.blk.lastPage { pb.SetPageFlag(PageFlag::FLAG_BOUNDARY_NODE); let newBlk = try!(pageManager.GetBlock(&mut *token)); pb.SetLastInt32(newBlk.firstPage); newBlk } else { PageBlock::new(thisPageNumber + 1, st.blk.lastPage) }; try!(pb.Write(fs)); if nextBlk.firstPage != (thisPageNumber+1) { try!(utils::SeekPage(fs, pgsz, nextBlk.firstPage)); } let pg = pgitem {page:thisPageNumber, key:last_key}; st.leaves.push(pg); st.sofarLeaf = 0; st.prevLeaf = thisPageNumber; st.prefixLen = 0; st.firstLeaf = firstLeaf; st.blk = nextBlk; Ok(()) } // TODO can the overflow page number become a varint? const NEEDED_FOR_OVERFLOW_PAGE_NUMBER: usize = 4; // the max limit of an inline key is when that key is the only // one in the leaf, and its value is overflowed. let pgsz = pageManager.PageSize(); let maxKeyInline = pgsz - LEAF_PAGE_OVERHEAD - 1 // prefixLen - 1 // key flags - Varint::SpaceNeededFor(pgsz as u64) // approx worst case inline key len - 1 // value flags - 9 // worst case varint value len - NEEDED_FOR_OVERFLOW_PAGE_NUMBER; // overflowed value page fn kLocNeed(k: &[u8], kloc: &KeyLocation, prefixLen: usize) -> usize { let klen = k.len(); match *kloc { KeyLocation::Inline => { 1 + Varint::SpaceNeededFor(klen as u64) + klen - prefixLen }, KeyLocation::Overflowed(_) => { 1 + Varint::SpaceNeededFor(klen as u64) + NEEDED_FOR_OVERFLOW_PAGE_NUMBER }, } } fn vLocNeed (vloc: &ValueLocation) -> usize { match *vloc { ValueLocation::Tombstone => { 1 }, ValueLocation::Buffer(ref vbuf) => { let vlen = vbuf.len(); 1 + Varint::SpaceNeededFor(vlen as u64) + vlen }, ValueLocation::Overflowed(vlen,_) => { 1 + Varint::SpaceNeededFor(vlen as u64) + NEEDED_FOR_OVERFLOW_PAGE_NUMBER }, } } fn leafPairSize(prefixLen: usize, lp: &LeafPair) -> usize { kLocNeed(&lp.key, &lp.kLoc, prefixLen) + vLocNeed(&lp.vLoc) } fn defaultPrefixLen(k: &[u8]) -> usize { // TODO max prefix. relative to page size? currently must fit in one byte. if k.len() > 255 { 255 } else { k.len() } } // this is the body of writeLeaves let mut st = LeafState { sofarLeaf: 0, firstLeaf: 0, prevLeaf: 0, keys_in_this_leaf:Vec::new(), prefixLen: 0, leaves:Vec::new(), blk:leavesBlk, }; for result_pair in source { let mut pair = try!(result_pair); let k = pair.Key; // TODO is it possible for this to conclude that the key must be overflowed // when it would actually fit because of prefixing? let (blkAfterKey,kloc) = if k.len() <= maxKeyInline { (st.blk, KeyLocation::Inline) } else { let vPage = st.blk.firstPage; let (_,newBlk) = try!(writeOverflow(st.blk, &mut &*k, pageManager, fs)); (newBlk, KeyLocation::Overflowed(vPage)) }; // the max limit of an inline value is when the key is inline // on a new page. // TODO this is a usize, so it might cause integer underflow. let availableOnNewPageAfterKey = pgsz - LEAF_PAGE_OVERHEAD - 1 // prefixLen - 1 // key flags - Varint::SpaceNeededFor(k.len() as u64) - k.len() - 1 // value flags ; // availableOnNewPageAfterKey needs to accomodate the value and its length as a varint. // it might already be <=0 because of the key length let maxValueInline = if availableOnNewPageAfterKey > 0 { let neededForVarintLen = Varint::SpaceNeededFor(availableOnNewPageAfterKey as u64); let avail2 = availableOnNewPageAfterKey - neededForVarintLen; if avail2 > 0 { avail2 } else { 0 } } else { 0 }; let (blkAfterValue, vloc) = match pair.Value { Blob::Tombstone => { (blkAfterKey, ValueLocation::Tombstone) }, _ => match kloc { KeyLocation::Inline => { if maxValueInline == 0 { match pair.Value { Blob::Tombstone => { (blkAfterKey, ValueLocation::Tombstone) }, Blob::Stream(ref mut strm) => { let valuePage = blkAfterKey.firstPage; let (len,newBlk) = try!(writeOverflow(blkAfterKey, &mut *strm, pageManager, fs)); (newBlk, ValueLocation::Overflowed(len,valuePage)) }, Blob::Array(a) => { if a.is_empty() { // TODO maybe we need ValueLocation::Empty (blkAfterKey, ValueLocation::Buffer(a)) } else { let valuePage = blkAfterKey.firstPage; let strm = a; // TODO need a Read for this let (len,newBlk) = try!(writeOverflow(blkAfterKey, &mut &*strm, pageManager, fs)); (newBlk, ValueLocation::Overflowed(len,valuePage)) } }, } } else { match pair.Value { Blob::Tombstone => { (blkAfterKey, ValueLocation::Tombstone) }, Blob::Stream(ref mut strm) => { // not sure reusing vbuf is worth it. maybe we should just // alloc here. ownership will get passed into the // ValueLocation when it fits. let vread = try!(utils::ReadFully(&mut *strm, &mut vbuf[0 .. maxValueInline+1])); let vbuf = &vbuf[0 .. vread]; if vread < maxValueInline { // TODO this alloc+copy is unfortunate let mut va = Vec::new(); for i in 0 .. vbuf.len() { va.push(vbuf[i]); } (blkAfterKey, ValueLocation::Buffer(va.into_boxed_slice())) } else { let valuePage = blkAfterKey.firstPage; let (len,newBlk) = try!(writeOverflow(blkAfterKey, &mut (vbuf.chain(strm)), pageManager, fs)); (newBlk, ValueLocation::Overflowed (len,valuePage)) } }, Blob::Array(a) => { if a.len() < maxValueInline { (blkAfterKey, ValueLocation::Buffer(a)) } else { let valuePage = blkAfterKey.firstPage; let (len,newBlk) = try!(writeOverflow(blkAfterKey, &mut &*a, pageManager, fs)); (newBlk, ValueLocation::Overflowed(len,valuePage)) } }, } } }, KeyLocation::Overflowed(_) => { match pair.Value { Blob::Tombstone => { (blkAfterKey, ValueLocation::Tombstone) }, Blob::Stream(ref mut strm) => { let valuePage = blkAfterKey.firstPage; let (len,newBlk) = try!(writeOverflow(blkAfterKey, &mut *strm, pageManager, fs)); (newBlk, ValueLocation::Overflowed(len,valuePage)) }, Blob::Array(a) => { if a.is_empty() { // TODO maybe we need ValueLocation::Empty (blkAfterKey, ValueLocation::Buffer(a)) } else { let valuePage = blkAfterKey.firstPage; let (len,newBlk) = try!(writeOverflow(blkAfterKey, &mut &*a, pageManager, fs)); (newBlk, ValueLocation::Overflowed(len,valuePage)) } } } } } }; // whether/not the key/value are to be overflowed is now already decided. // now all we have to do is decide if this key/value are going into this leaf // or not. note that it is possible to overflow these and then have them not // fit into the current leaf and end up landing in the next leaf. st.blk = blkAfterValue; // TODO ignore prefixLen for overflowed keys? let newPrefixLen = if st.keys_in_this_leaf.is_empty() { defaultPrefixLen(&k) } else { bcmp::PrefixMatch(&*st.keys_in_this_leaf[0].key, &k, st.prefixLen) }; let sofar = if newPrefixLen < st.prefixLen { // the prefixLen would change with the addition of this key, // so we need to recalc sofar let sum = st.keys_in_this_leaf.iter().map(|lp| leafPairSize(newPrefixLen, lp)).sum();; sum } else { st.sofarLeaf }; let fit = { let needed = kLocNeed(&k, &kloc, newPrefixLen) + vLocNeed(&vloc); let used = sofar + LEAF_PAGE_OVERHEAD + 1 + newPrefixLen; if pgsz > used { let available = pgsz - used; (available >= needed) } else { false } }; let writeThisPage = (! st.keys_in_this_leaf.is_empty()) && (! fit); if writeThisPage { try!(writeLeaf(&mut st, false, pb, fs, pgsz, pageManager, &mut *token)); } // TODO ignore prefixLen for overflowed keys? let newPrefixLen = if st.keys_in_this_leaf.is_empty() { defaultPrefixLen(&k) } else { bcmp::PrefixMatch(&*st.keys_in_this_leaf[0].key, &k, st.prefixLen) }; let sofar = if newPrefixLen < st.prefixLen { // the prefixLen will change with the addition of this key, // so we need to recalc sofar let sum = st.keys_in_this_leaf.iter().map(|lp| leafPairSize(newPrefixLen, lp)).sum();; sum } else { st.sofarLeaf }; // note that the LeafPair struct gets ownership of the key provided // from above. let lp = LeafPair { key:k, kLoc:kloc, vLoc:vloc, }; st.sofarLeaf=sofar + leafPairSize(newPrefixLen, &lp); st.keys_in_this_leaf.push(lp); st.prefixLen=newPrefixLen; } if !st.keys_in_this_leaf.is_empty() { let isRootNode = st.leaves.is_empty(); try!(writeLeaf(&mut st, isRootNode, pb, fs, pgsz, pageManager, &mut *token)); } Ok((st.blk,st.leaves,st.firstLeaf)) } fn writeParentNodes<SeekWrite>(startingBlk: PageBlock, children: &mut Vec<pgitem>, pgsz: usize, fs: &mut SeekWrite, pageManager: &IPages, token: &mut PendingSegment, lastLeaf: PageNum, firstLeaf: PageNum, pb: &mut PageBuilder, ) -> Result<(PageBlock, Vec<pgitem>)> where SeekWrite : Seek+Write { // 2 for the page type and flags // 2 for the stored count // 5 for the extra ptr we will add at the end, a varint, 5 is worst case (page num < 4294967295L) // 4 for lastInt32 const PARENT_PAGE_OVERHEAD: usize = 2 + 2 + 5 + 4; fn calcAvailable(currentSize: usize, couldBeRoot: bool, pgsz: usize) -> usize { let basicSize = pgsz - currentSize; let allowanceForRootNode = if couldBeRoot { SIZE_32 } else { 0 }; // first/last Leaf, lastInt32 already // TODO can this cause integer overflow? basicSize - allowanceForRootNode } fn buildParentPage(items: &mut Vec<pgitem>, lastPtr: PageNum, overflows: &HashMap<usize,PageNum>, pb : &mut PageBuilder, ) { pb.Reset(); pb.PutByte(PageType::PARENT_NODE.to_u8()); pb.PutByte(0u8); pb.PutInt16(items.len() as u16); // store all the ptrs, n+1 of them for x in items.iter() { pb.PutVarint(x.page as u64); } pb.PutVarint(lastPtr as u64); // store all the keys, n of them for (i,x) in items.drain(..).enumerate() { match overflows.get(&i) { Some(pg) => { pb.PutByte(ValueFlag::FLAG_OVERFLOW); pb.PutVarint(x.key.len() as u64); pb.PutInt32(*pg as PageNum); }, None => { pb.PutByte(0u8); pb.PutVarint(x.key.len() as u64); pb.PutArray(&x.key); }, } } } fn writeParentPage<SeekWrite>(st: &mut ParentState, items: &mut Vec<pgitem>, overflows: &HashMap<usize,PageNum>, pgnum: PageNum, key: Box<[u8]>, isRootNode: bool, pb: &mut PageBuilder, lastLeaf: PageNum, fs: &mut SeekWrite, pageManager: &IPages, pgsz: usize, token: &mut PendingSegment, firstLeaf: PageNum, ) -> Result<()> where SeekWrite : Seek+Write { // assert st.sofar > 0 let thisPageNumber = st.blk.firstPage; buildParentPage(items, pgnum, &overflows, pb); let nextBlk = if isRootNode { pb.SetPageFlag(PageFlag::FLAG_ROOT_NODE); pb.SetSecondToLastInt32(firstLeaf); pb.SetLastInt32(lastLeaf); PageBlock::new(thisPageNumber+1,st.blk.lastPage) } else { if st.blk.firstPage == st.blk.lastPage { pb.SetPageFlag(PageFlag::FLAG_BOUNDARY_NODE); let newBlk = try!(pageManager.GetBlock(&mut *token)); pb.SetLastInt32(newBlk.firstPage); newBlk } else { PageBlock::new(thisPageNumber+1,st.blk.lastPage) } }; try!(pb.Write(fs)); if nextBlk.firstPage != (thisPageNumber+1) { try!(utils::SeekPage(fs, pgsz, nextBlk.firstPage)); } st.sofar = 0; st.blk = nextBlk; let pg = pgitem {page:thisPageNumber, key:key}; st.nextGeneration.push(pg); Ok(()) } // this is the body of writeParentNodes let mut st = ParentState {nextGeneration:Vec::new(),sofar: 0,blk:startingBlk,}; let mut items = Vec::new(); let mut overflows = HashMap::new(); let count_children = children.len(); // deal with all the children except the last one for pair in children.drain(0 .. count_children-1) { let pgnum = pair.page; let neededEitherWay = 1 + Varint::SpaceNeededFor(pair.key.len() as u64) + Varint::SpaceNeededFor(pgnum as u64); let neededForInline = neededEitherWay + pair.key.len(); let neededForOverflow = neededEitherWay + SIZE_32; let couldBeRoot = st.nextGeneration.is_empty(); let available = calcAvailable(st.sofar, couldBeRoot, pgsz); let fitsInline = available >= neededForInline; let wouldFitInlineOnNextPage = (pgsz - PARENT_PAGE_OVERHEAD) >= neededForInline; let fitsOverflow = available >= neededForOverflow; let writeThisPage = (! fitsInline) && (wouldFitInlineOnNextPage || (! fitsOverflow)); if writeThisPage { // assert sofar > 0 // we need to make a copy of this key because writeParentPage needs to own one, // but we still need to put this pair in the items (below). let mut copy_key = vec![0; pair.key.len()].into_boxed_slice(); copy_key.clone_from_slice(&pair.key); try!(writeParentPage(&mut st, &mut items, &overflows, pair.page, copy_key, false, pb, lastLeaf, fs, pageManager, pgsz, &mut *token, firstLeaf)); assert!(items.is_empty()); } if st.sofar == 0 { st.sofar = PARENT_PAGE_OVERHEAD; assert!(items.is_empty()); } if calcAvailable(st.sofar, st.nextGeneration.is_empty(), pgsz) >= neededForInline { st.sofar = st.sofar + neededForInline; } else { let keyOverflowFirstPage = st.blk.firstPage; let (_,newBlk) = try!(writeOverflow(st.blk, &mut &*pair.key, pageManager, fs)); st.sofar = st.sofar + neededForOverflow; st.blk = newBlk; // items.len() is the index that this pair is about to get, just below overflows.insert(items.len(),keyOverflowFirstPage); } items.push(pair); } assert!(children.len() == 1); let isRootNode = st.nextGeneration.is_empty(); let pgitem {page: pgnum, key: key} = children.remove(0); assert!(children.is_empty()); try!(writeParentPage(&mut st, &mut items, &overflows, pgnum, key, isRootNode, pb, lastLeaf, fs, pageManager, pgsz, &mut *token, firstLeaf)); Ok((st.blk,st.nextGeneration)) } // this is the body of Create let pgsz = pageManager.PageSize(); let mut pb = PageBuilder::new(pgsz); let mut token = try!(pageManager.Begin()); let startingBlk = try!(pageManager.GetBlock(&mut token)); try!(utils::SeekPage(fs, pgsz, startingBlk.firstPage)); // TODO this is a buffer just for the purpose of being reused // in cases where the blob is provided as a stream, and we need // read a bit of it to figure out if it might fit inline rather // than overflow. let mut vbuf = vec![0;pgsz].into_boxed_slice(); let (blkAfterLeaves, leaves, firstLeaf) = try!(writeLeaves(startingBlk, pageManager, source, &mut vbuf, fs, &mut pb, &mut token)); // all the leaves are written. // now write the parent pages. // maybe more than one level of them. // keep writing until we have written a level which has only one node, // which is the root node. let lastLeaf = leaves[leaves.len()-1].page; let rootPage = { let mut blk = blkAfterLeaves; let mut children = leaves; loop { let (newBlk, newChildren) = try!(writeParentNodes(blk, &mut children, pgsz, fs, pageManager, &mut token, lastLeaf, firstLeaf, &mut pb)); assert!(children.is_empty()); blk = newBlk; children = newChildren; if children.len()==1 { break; } } children[0].page }; let g = try!(pageManager.End(token, rootPage)); Ok((g,rootPage)) } struct myOverflowReadStream { fs: File, len: usize, // same type as ValueLength(), max len of a single value firstPage: PageNum, // TODO will be needed later for Seek trait buf: Box<[u8]>, currentPage: PageNum, sofarOverall: usize, sofarThisPage: usize, firstPageInBlock: PageNum, offsetToLastPageInThisBlock: PageNum, countRegularDataPagesInBlock: PageNum, boundaryPageNumber: PageNum, bytesOnThisPage: usize, offsetOnThisPage: usize, } impl myOverflowReadStream { fn new(path: &str, pgsz: usize, firstPage: PageNum, len: usize) -> Result<myOverflowReadStream> { let f = try!(OpenOptions::new() .read(true) .open(path)); let mut res = myOverflowReadStream { fs: f, len: len, firstPage: firstPage, buf: vec![0;pgsz].into_boxed_slice(), currentPage: firstPage, sofarOverall: 0, sofarThisPage: 0, firstPageInBlock: 0, offsetToLastPageInThisBlock: 0, // add to firstPageInBlock to get the last one countRegularDataPagesInBlock: 0, boundaryPageNumber: 0, bytesOnThisPage: 0, offsetOnThisPage: 0, }; try!(res.ReadFirstPage()); Ok(res) } fn len(&self) -> usize { self.len } // TODO consider supporting Seek trait fn ReadPage(&mut self) -> Result<()> { try!(utils::SeekPage(&mut self.fs, self.buf.len(), self.currentPage)); try!(utils::ReadFully(&mut self.fs, &mut *self.buf)); // assert PageType is OVERFLOW self.sofarThisPage = 0; if self.currentPage == self.firstPageInBlock { self.bytesOnThisPage = self.buf.len() - (2 + SIZE_32); self.offsetOnThisPage = 2; } else if self.currentPage == self.boundaryPageNumber { self.bytesOnThisPage = self.buf.len() - SIZE_32; self.offsetOnThisPage = 0; } else { // assert currentPage > firstPageInBlock // assert currentPage < boundaryPageNumber OR boundaryPageNumber = 0 self.bytesOnThisPage = self.buf.len(); self.offsetOnThisPage = 0; } Ok(()) } fn GetLastInt32(&self) -> u32 { let at = self.buf.len() - SIZE_32; read_u32_be(&self.buf[at .. at+4]) } fn PageType(&self) -> Result<PageType> { PageType::from_u8(self.buf[0]) } fn CheckPageFlag(&self, f: u8) -> bool { 0 != (self.buf[1] & f) } fn ReadFirstPage(&mut self) -> Result<()> { self.firstPageInBlock = self.currentPage; try!(self.ReadPage()); if try!(self.PageType()) != (PageType::OVERFLOW_NODE) { return Err(LsmError::CorruptFile("first overflow page has invalid page type")); } if self.CheckPageFlag(PageFlag::FLAG_BOUNDARY_NODE) { // first page landed on a boundary node // lastInt32 is the next page number, which we'll fetch later self.boundaryPageNumber = self.currentPage; self.offsetToLastPageInThisBlock = 0; self.countRegularDataPagesInBlock = 0; } else { self.offsetToLastPageInThisBlock = self.GetLastInt32(); if self.CheckPageFlag(PageFlag::FLAG_ENDS_ON_BOUNDARY) { self.boundaryPageNumber = self.currentPage + self.offsetToLastPageInThisBlock; self.countRegularDataPagesInBlock = self.offsetToLastPageInThisBlock - 1; } else { self.boundaryPageNumber = 0; self.countRegularDataPagesInBlock = self.offsetToLastPageInThisBlock; } } Ok(()) } fn Read(&mut self, ba: &mut [u8], offset: usize, wanted: usize) -> Result<usize> { if self.sofarOverall >= self.len { Ok(0) } else { let mut direct = false; if self.sofarThisPage >= self.bytesOnThisPage { if self.currentPage == self.boundaryPageNumber { self.currentPage = self.GetLastInt32(); try!(self.ReadFirstPage()); } else { // we need a new page. and if it's a full data page, // and if wanted is big enough to take all of it, then // we want to read (at least) it directly into the // buffer provided by the caller. we already know // this candidate page cannot be the first page in a // block. let maybeDataPage = self.currentPage + 1; let isDataPage = if self.boundaryPageNumber > 0 { ((self.len - self.sofarOverall) >= self.buf.len()) && (self.countRegularDataPagesInBlock > 0) && (maybeDataPage > self.firstPageInBlock) && (maybeDataPage < self.boundaryPageNumber) } else { ((self.len - self.sofarOverall) >= self.buf.len()) && (self.countRegularDataPagesInBlock > 0) && (maybeDataPage > self.firstPageInBlock) && (maybeDataPage <= (self.firstPageInBlock + self.countRegularDataPagesInBlock)) }; if isDataPage && (wanted >= self.buf.len()) { // assert (currentPage + 1) > firstPageInBlock // // don't increment currentPage here because below, we will // calculate how many pages we actually want to do. direct = true; self.bytesOnThisPage = self.buf.len(); self.sofarThisPage = 0; self.offsetOnThisPage = 0; } else { self.currentPage = self.currentPage + 1; try!(self.ReadPage()); } } } if direct { // currentPage has not been incremented yet // // skip the buffer. note, therefore, that the contents of the // buffer are "invalid" in that they do not correspond to currentPage // let numPagesWanted = (wanted / self.buf.len()) as PageNum; // assert countRegularDataPagesInBlock > 0 let lastDataPageInThisBlock = self.firstPageInBlock + self.countRegularDataPagesInBlock; let theDataPage = self.currentPage + 1; let numPagesAvailable = if self.boundaryPageNumber>0 { self.boundaryPageNumber - theDataPage } else { lastDataPageInThisBlock - theDataPage + 1 }; let numPagesToFetch = std::cmp::min(numPagesWanted, numPagesAvailable) as PageNum; let bytesToFetch = { let bytesToFetch = (numPagesToFetch as usize) * self.buf.len(); let available = self.len - self.sofarOverall; if bytesToFetch > available { available } else { bytesToFetch } }; // assert bytesToFetch <= wanted try!(utils::SeekPage(&mut self.fs, self.buf.len(), theDataPage)); try!(utils::ReadFully(&mut self.fs, &mut ba[offset .. offset + bytesToFetch])); self.sofarOverall = self.sofarOverall + bytesToFetch; self.currentPage = self.currentPage + numPagesToFetch; self.sofarThisPage = self.buf.len(); Ok(bytesToFetch) } else { let available = std::cmp::min(self.bytesOnThisPage - self.sofarThisPage, self.len - self.sofarOverall); let num = std::cmp::min(available, wanted); for i in 0 .. num { ba[offset+i] = self.buf[self.offsetOnThisPage + self.sofarThisPage + i]; } self.sofarOverall = self.sofarOverall + num; self.sofarThisPage = self.sofarThisPage + num; Ok(num) } } } } impl Read for myOverflowReadStream { fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> { let len = buf.len(); match self.Read(buf, 0, len) { Ok(v) => Ok(v), Err(e) => { // this interface requires io::Result, so we shoehorn the others into it match e { LsmError::Io(e) => Err(e), _ => Err(std::io::Error::new(std::io::ErrorKind::Other, e.description())), } }, } } } fn readOverflow(path: &str, pgsz: usize, firstPage: PageNum, buf: &mut [u8]) -> Result<usize> { let mut ostrm = try!(myOverflowReadStream::new(path, pgsz, firstPage, buf.len())); let res = try!(utils::ReadFully(&mut ostrm, buf)); Ok(res) } struct SegmentCursor<'a> { path: String, // TODO in the f# version, these three were a closure. // it would be nice to make it work that way again. // so that this code would not have specific knowledge // of the InnerPart type. inner: &'a InnerPart, segnum: SegmentNum, csrnum: u64, blocks: Vec<PageBlock>, // TODO will be needed later for stray checking fs: File, len: u64, rootPage: PageNum, pr: PageBuffer, currentPage: PageNum, leafKeys: Vec<usize>, previousLeaf: PageNum, currentKey: Option<usize>, prefix: Option<Box<[u8]>>, firstLeaf: PageNum, lastLeaf: PageNum, } impl<'a> SegmentCursor<'a> { fn new(path: &str, pgsz: usize, rootPage: PageNum, blocks: Vec<PageBlock>, inner: &'a InnerPart, segnum: SegmentNum, csrnum: u64 ) -> Result<SegmentCursor<'a>> { // TODO consider not passsing in the path, and instead, // making the cursor call back to inner.OpenForReading... let mut f = try!(OpenOptions::new() .read(true) .open(path)); // TODO the len is used for checking to make sure we don't stray // to far. This should probably be done with the blocks provided // by the caller, not by looking at the full length of the file, // which this cursor shouldn't care about. let len = try!(seek_len(&mut f)); let mut res = SegmentCursor { path: String::from_str(path), fs: f, blocks: blocks, inner: inner, segnum: segnum, csrnum: csrnum, len: len, rootPage: rootPage, pr: PageBuffer::new(pgsz), currentPage: 0, leafKeys: Vec::new(), previousLeaf: 0, currentKey: None, prefix: None, firstLeaf: 0, // temporary lastLeaf: 0, // temporary }; if ! try!(res.setCurrentPage(rootPage)) { // TODO fix this error. or assert, because we previously verified // that the root page was in the block list we were given. return Err(LsmError::Misc("failed to read root page")); } let pt = try!(res.pr.PageType()); if pt == PageType::LEAF_NODE { res.firstLeaf = rootPage; res.lastLeaf = rootPage; } else if pt == PageType::PARENT_NODE { if ! res.pr.CheckPageFlag(PageFlag::FLAG_ROOT_NODE) { return Err(LsmError::CorruptFile("root page lacks flag")); } res.firstLeaf = res.pr.GetSecondToLastInt32() as PageNum; res.lastLeaf = res.pr.GetLastInt32() as PageNum; } else { return Err(LsmError::CorruptFile("root page has invalid page type")); } Ok(res) } fn resetLeaf(&mut self) { self.leafKeys.clear(); self.previousLeaf = 0; self.currentKey = None; self.prefix = None; } fn setCurrentPage(&mut self, pgnum: PageNum) -> Result<bool> { // TODO use self.blocks to make sure we are not straying out of bounds. // TODO so I think this function actually should be Result<()>. // it used to return Ok(false) in situations that I think should // actually have been errors. not 100% sure yet. still trying // to verify all the cases. // TODO if currentPage = pgnum already... self.currentPage = pgnum; self.resetLeaf(); if 0 == self.currentPage { Err(LsmError::InvalidPageNumber) //Ok(false) } else { // refuse to go to a page beyond the end of the stream // TODO is this the right place for this check? let pos = (self.currentPage - 1) as u64 * self.pr.PageSize() as u64; if pos + self.pr.PageSize() as u64 <= self.len { try!(utils::SeekPage(&mut self.fs, self.pr.PageSize(), self.currentPage)); try!(self.pr.Read(&mut self.fs)); Ok(true) } else { Err(LsmError::InvalidPageNumber) //Ok(false) } } } fn nextInLeaf(&mut self) -> bool { match self.currentKey { Some(cur) => { if (cur+1) < self.leafKeys.len() { self.currentKey = Some(cur + 1); true } else { false } }, None => { false }, } } fn prevInLeaf(&mut self) -> bool { match self.currentKey { Some(cur) => { if cur > 0 { self.currentKey = Some(cur - 1); true } else { false } }, None => { false }, } } fn skipKey(&self, cur: &mut usize) { let kflag = self.pr.GetByte(cur); let klen = self.pr.GetVarint(cur) as usize; if 0 == (kflag & ValueFlag::FLAG_OVERFLOW) { let prefixLen = match self.prefix { Some(ref a) => a.len(), None => 0 }; *cur = *cur + (klen - prefixLen); } else { *cur = *cur + SIZE_32; } } fn skipValue(&self, cur: &mut usize) { let vflag = self.pr.GetByte(cur); if 0 != (vflag & ValueFlag::FLAG_TOMBSTONE) { () } else { let vlen = self.pr.GetVarint(cur) as usize; if 0 != (vflag & ValueFlag::FLAG_OVERFLOW) { *cur = *cur + SIZE_32; } else { *cur = *cur + vlen; } } } fn readLeaf(&mut self) -> Result<()> { self.resetLeaf(); let mut cur = 0; let pt = try!(PageType::from_u8(self.pr.GetByte(&mut cur))); if pt != PageType::LEAF_NODE { return Err(LsmError::CorruptFile("leaf has invalid page type")); } self.pr.GetByte(&mut cur); self.previousLeaf = self.pr.GetInt32(&mut cur) as PageNum; let prefixLen = self.pr.GetByte(&mut cur) as usize; if prefixLen > 0 { let mut a = vec![0;prefixLen].into_boxed_slice(); self.pr.GetIntoArray(&mut cur, &mut a); self.prefix = Some(a); } else { self.prefix = None; } let countLeafKeys = self.pr.GetInt16(&mut cur) as usize; // assert countLeafKeys>0 self.leafKeys.truncate(countLeafKeys); while self.leafKeys.len() < countLeafKeys { self.leafKeys.push(0); } for i in 0 .. countLeafKeys { self.leafKeys[i] = cur; self.skipKey(&mut cur); self.skipValue(&mut cur); } Ok(()) } fn keyInLeaf2(&'a self, n: usize) -> Result<KeyRef<'a>> { let mut cur = self.leafKeys[n as usize]; let kflag = self.pr.GetByte(&mut cur); let klen = self.pr.GetVarint(&mut cur) as usize; if 0 == (kflag & ValueFlag::FLAG_OVERFLOW) { match self.prefix { Some(ref a) => { Ok(KeyRef::Prefixed(&a, self.pr.get_slice(cur, klen - a.len()))) }, None => { Ok(KeyRef::Array(self.pr.get_slice(cur, klen))) }, } } else { let pgnum = self.pr.GetInt32(&mut cur) as PageNum; let mut ostrm = try!(myOverflowReadStream::new(&self.path, self.pr.PageSize(), pgnum, klen)); let mut x_k = Vec::new(); try!(ostrm.read_to_end(&mut x_k)); let x_k = x_k.into_boxed_slice(); Ok(KeyRef::Overflowed(x_k)) } } fn keyInLeaf(&self, n: usize) -> Result<Box<[u8]>> { let mut cur = self.leafKeys[n as usize]; let kflag = self.pr.GetByte(&mut cur); let klen = self.pr.GetVarint(&mut cur) as usize; let mut res = vec![0;klen].into_boxed_slice(); if 0 == (kflag & ValueFlag::FLAG_OVERFLOW) { match self.prefix { Some(ref a) => { let prefixLen = a.len(); for i in 0 .. prefixLen { res[i] = a[i]; } self.pr.GetIntoArray(&mut cur, &mut res[prefixLen .. klen]); Ok(res) }, None => { self.pr.GetIntoArray(&mut cur, &mut res); Ok(res) }, } } else { let pgnum = self.pr.GetInt32(&mut cur) as PageNum; try!(readOverflow(&self.path, self.pr.PageSize(), pgnum, &mut res)); Ok(res) } } fn compareKeyInLeaf(&self, n: usize, other: &[u8]) -> Result<Ordering> { let mut cur = self.leafKeys[n as usize]; let kflag = self.pr.GetByte(&mut cur); let klen = self.pr.GetVarint(&mut cur) as usize; if 0 == (kflag & ValueFlag::FLAG_OVERFLOW) { let res = match self.prefix { Some(ref a) => { self.pr.CompareWithPrefix(cur, a, klen, other) }, None => { self.pr.Compare(cur, klen, other) }, }; Ok(res) } else { // TODO this could be more efficient. we could compare the key // in place in the overflow without fetching the entire thing. // TODO overflowed keys are not prefixed. should they be? let pgnum = self.pr.GetInt32(&mut cur) as PageNum; let mut k = vec![0;klen].into_boxed_slice(); try!(readOverflow(&self.path, self.pr.PageSize(), pgnum, &mut k)); let res = bcmp::Compare(&*k, other); Ok(res) } } fn searchLeaf(&mut self, k: &KeyRef, min:usize, max:usize, sop:SeekOp, le: Option<usize>, ge: Option<usize>) -> Result<(Option<usize>,bool)> { if max < min { match sop { SeekOp::SEEK_EQ => Ok((None, false)), SeekOp::SEEK_LE => Ok((le, false)), SeekOp::SEEK_GE => Ok((ge, false)), } } else { let mid = (max + min) / 2; // assert mid >= 0 let cmp = { let q = try!(self.keyInLeaf2(mid)); KeyRef::cmp(&q, k) }; match cmp { Ordering::Equal => Ok((Some(mid), true)), Ordering::Less => self.searchLeaf(k, (mid+1), max, sop, Some(mid), ge), Ordering::Greater => // we could just recurse with mid-1, but that would overflow if // mod is 0, so we catch that case here. if mid==0 { match sop { SeekOp::SEEK_EQ => Ok((None, false)), SeekOp::SEEK_LE => Ok((le, false)), SeekOp::SEEK_GE => Ok((Some(mid), false)), } } else { self.searchLeaf(k, min, (mid-1), sop, le, Some(mid)) }, } } } fn readParentPage(&mut self) -> Result<(Vec<PageNum>, Vec<KeyRef>)> { let mut cur = 0; let pt = try!(PageType::from_u8(self.pr.GetByte(&mut cur))); if pt != PageType::PARENT_NODE { return Err(LsmError::CorruptFile("parent page has invalid page type")); } cur = cur + 1; // page flags let count = self.pr.GetInt16(&mut cur); let mut ptrs = Vec::new(); let mut keys = Vec::new(); for _ in 0 .. count+1 { ptrs.push(self.pr.GetVarint(&mut cur) as PageNum); } for _ in 0 .. count { let kflag = self.pr.GetByte(&mut cur); let klen = self.pr.GetVarint(&mut cur) as usize; if 0 == (kflag & ValueFlag::FLAG_OVERFLOW) { keys.push(KeyRef::Array(self.pr.get_slice(cur, klen))); cur = cur + klen; } else { let firstPage = self.pr.GetInt32(&mut cur) as PageNum; let pgsz = self.pr.PageSize(); let mut ostrm = try!(myOverflowReadStream::new(&self.path, pgsz, firstPage, klen)); let mut x_k = Vec::new(); try!(ostrm.read_to_end(&mut x_k)); let x_k = x_k.into_boxed_slice(); keys.push(KeyRef::Overflowed(x_k)); } } Ok((ptrs,keys)) } // this is used when moving forward through the leaf pages. // we need to skip any overflows. when moving backward, // this is not necessary, because each leaf has a pointer to // the leaf before it. // TODO it's unfortunate that Next is the slower operation // when it is far more common than Prev. OTOH, the pages // are written as we stream through a set of kvp objects, // and we don't want to rewind, and we want to write each // page only once, and we want to keep the minimum amount // of stuff in memory as we go. and this code only causes // a perf difference if there are overflow pages between // the leaves. fn searchForwardForLeaf(&mut self) -> Result<bool> { let pt = try!(self.pr.PageType()); if pt == PageType::LEAF_NODE { Ok(true) } else if pt == PageType::PARENT_NODE { // if we bump into a parent node, that means there are // no more leaves. Ok(false) } else { let lastInt32 = self.pr.GetLastInt32() as PageNum; // // an overflow page has a value in its LastInt32 which // is one of two things. // // if it's a boundary node, it's the page number of the // next page in the segment. // // otherwise, it's the number of pages to skip ahead. // this skip might take us to whatever follows this // overflow (which could be a leaf or a parent or // another overflow), or it might just take us to a // boundary page (in the case where the overflow didn't // fit). it doesn't matter. we just skip ahead. // if self.pr.CheckPageFlag(PageFlag::FLAG_BOUNDARY_NODE) { if try!(self.setCurrentPage(lastInt32)) { self.searchForwardForLeaf() } else { Ok(false) } } else { let lastPage = self.currentPage + lastInt32; let endsOnBoundary = self.pr.CheckPageFlag(PageFlag::FLAG_ENDS_ON_BOUNDARY); if endsOnBoundary { if try!(self.setCurrentPage(lastPage)) { let next = self.pr.GetLastInt32() as PageNum; if try!(self.setCurrentPage(next)) { self.searchForwardForLeaf() } else { Ok(false) } } else { Ok(false) } } else { if try!(self.setCurrentPage(lastPage + 1)) { self.searchForwardForLeaf() } else { Ok(false) } } } } } fn leafIsValid(&self) -> bool { // TODO the bounds check of self.currentKey against self.leafKeys.len() could be an assert let ok = (!self.leafKeys.is_empty()) && (self.currentKey.is_some()) && (self.currentKey.expect("just did is_some") as usize) < self.leafKeys.len(); ok } fn search(&mut self, pg: PageNum, k: &KeyRef, sop:SeekOp) -> Result<SeekResult> { if try!(self.setCurrentPage(pg)) { let pt = try!(self.pr.PageType()); if PageType::LEAF_NODE == pt { try!(self.readLeaf()); let tmp_countLeafKeys = self.leafKeys.len(); let (newCur, equal) = try!(self.searchLeaf(k, 0, (tmp_countLeafKeys - 1), sop, None, None)); self.currentKey = newCur; if SeekOp::SEEK_EQ != sop { if ! self.leafIsValid() { // if LE or GE failed on a given page, we might need // to look at the next/prev leaf. if SeekOp::SEEK_GE == sop { let nextPage = if self.pr.CheckPageFlag(PageFlag::FLAG_BOUNDARY_NODE) { self.pr.GetLastInt32() as PageNum } else if self.currentPage == self.rootPage { 0 } else { self.currentPage + 1 }; if try!(self.setCurrentPage(nextPage)) && try!(self.searchForwardForLeaf()) { try!(self.readLeaf()); self.currentKey = Some(0); } } else { let tmp_previousLeaf = self.previousLeaf; if 0 == self.previousLeaf { self.resetLeaf(); } else if try!(self.setCurrentPage(tmp_previousLeaf)) { try!(self.readLeaf()); self.currentKey = Some(self.leafKeys.len() - 1); } } } } if self.currentKey.is_none() { Ok(SeekResult::Invalid) } else if equal { Ok(SeekResult::Equal) } else { Ok(SeekResult::Unequal) } } else if PageType::PARENT_NODE == pt { let next = { let (ptrs, keys) = try!(self.readParentPage()); match Self::searchInParentPage(k, &ptrs, &keys, 0) { Some(found) => found, None => ptrs[ptrs.len() - 1], } }; self.search(next, k, sop) } else { unreachable!(); } } else { Ok(SeekResult::Invalid) } } fn searchInParentPage(k: &KeyRef, ptrs: &Vec<PageNum>, keys: &Vec<KeyRef>, i: usize) -> Option<PageNum> { // TODO linear search? really? // TODO also, this doesn't need to be recursive if i < keys.len() { let cmp = KeyRef::cmp(k, &keys[i]); if cmp==Ordering::Greater { Self::searchInParentPage(k, ptrs, keys, i+1) } else { Some(ptrs[i]) } } else { None } } } impl<'a> Drop for SegmentCursor<'a> { fn drop(&mut self) { self.inner.cursor_dropped(self.segnum, self.csrnum); } } impl<'a> ICursor<'a> for SegmentCursor<'a> { fn IsValid(&self) -> bool { self.leafIsValid() } fn SeekRef(&mut self, k: &KeyRef, sop:SeekOp) -> Result<SeekResult> { let rootPage = self.rootPage; self.search(rootPage, k, sop) } fn KeyRef(&'a self) -> Result<KeyRef<'a>> { match self.currentKey { None => Err(LsmError::CursorNotValid), Some(currentKey) => self.keyInLeaf2(currentKey), } } fn ValueRef(&'a self) -> Result<ValueRef<'a>> { match self.currentKey { None => Err(LsmError::CursorNotValid), Some(currentKey) => { let mut pos = self.leafKeys[currentKey as usize]; self.skipKey(&mut pos); let vflag = self.pr.GetByte(&mut pos); if 0 != (vflag & ValueFlag::FLAG_TOMBSTONE) { Ok(ValueRef::Tombstone) } else { let vlen = self.pr.GetVarint(&mut pos) as usize; if 0 != (vflag & ValueFlag::FLAG_OVERFLOW) { let pgnum = self.pr.GetInt32(&mut pos) as PageNum; let strm = try!(myOverflowReadStream::new(&self.path, self.pr.PageSize(), pgnum, vlen)); Ok(ValueRef::Overflowed(vlen, box strm)) } else { Ok(ValueRef::Array(self.pr.get_slice(pos, vlen))) } } } } } fn ValueLength(&self) -> Result<Option<usize>> { match self.currentKey { None => Err(LsmError::CursorNotValid), Some(currentKey) => { let mut cur = self.leafKeys[currentKey as usize]; self.skipKey(&mut cur); let vflag = self.pr.GetByte(&mut cur); if 0 != (vflag & ValueFlag::FLAG_TOMBSTONE) { Ok(None) } else { let vlen = self.pr.GetVarint(&mut cur) as usize; Ok(Some(vlen)) } } } } fn KeyCompare(&self, k_other: &KeyRef) -> Result<Ordering> { let k_me = try!(self.KeyRef()); let c = KeyRef::cmp(&k_me, &k_other); Ok(c) } fn First(&mut self) -> Result<()> { let firstLeaf = self.firstLeaf; if try!(self.setCurrentPage(firstLeaf)) { try!(self.readLeaf()); self.currentKey = Some(0); } Ok(()) } fn Last(&mut self) -> Result<()> { let lastLeaf = self.lastLeaf; if try!(self.setCurrentPage(lastLeaf)) { try!(self.readLeaf()); self.currentKey = Some(self.leafKeys.len() - 1); } Ok(()) } fn Next(&mut self) -> Result<()> { if ! self.nextInLeaf() { let nextPage = if self.pr.CheckPageFlag(PageFlag::FLAG_BOUNDARY_NODE) { self.pr.GetLastInt32() as PageNum } else if try!(self.pr.PageType()) == PageType::LEAF_NODE { if self.currentPage == self.rootPage { 0 } else { self.currentPage + 1 } } else { 0 } ; if try!(self.setCurrentPage(nextPage)) && try!(self.searchForwardForLeaf()) { try!(self.readLeaf()); self.currentKey = Some(0); } } Ok(()) } fn Prev(&mut self) -> Result<()> { if ! self.prevInLeaf() { let previousLeaf = self.previousLeaf; if 0 == previousLeaf { self.resetLeaf(); } else if try!(self.setCurrentPage(previousLeaf)) { try!(self.readLeaf()); self.currentKey = Some(self.leafKeys.len() - 1); } } Ok(()) } } #[derive(Clone)] struct HeaderData { // TODO currentState is an ordered copy of segments.Keys. eliminate duplication? // or add assertions and tests to make sure they never get out of sync? we wish // we had a form of HashMap that kept track of ordering. currentState: Vec<SegmentNum>, segments: HashMap<SegmentNum,SegmentInfo>, headerOverflow: Option<PageBlock>, changeCounter: u64, mergeCounter: u64, } const HEADER_SIZE_IN_BYTES: usize = 4096; impl PendingSegment { fn new(num: SegmentNum) -> PendingSegment { PendingSegment {blockList: Vec::new(), segnum: num} } fn AddBlock(&mut self, b: PageBlock) { //println!("seg {:?} got block {:?}", self.segnum, b); let len = self.blockList.len(); if (! (self.blockList.is_empty())) && (b.firstPage == self.blockList[len-1].lastPage+1) { // note that by consolidating blocks here, the segment info list will // not have information about the fact that the two blocks were // originally separate. that's okay, since all we care about here is // keeping track of which pages are used. but the btree code itself // is still treating the last page of the first block as a boundary // page, even though its pointer to the next block goes to the very // next page, because its page manager happened to give it a block // which immediately follows the one it had. self.blockList[len-1].lastPage = b.lastPage; } else { self.blockList.push(b); } } fn End(mut self, lastPage: PageNum) -> (SegmentNum, Vec<PageBlock>, Option<PageBlock>) { let len = self.blockList.len(); let leftovers = { let givenLastPage = self.blockList[len-1].lastPage; if lastPage < givenLastPage { self.blockList[len-1].lastPage = lastPage; Some (PageBlock::new(lastPage+1, givenLastPage)) } else { None } }; // consume self return blockList (self.segnum, self.blockList, leftovers) } } fn readHeader<R>(fs: &mut R) -> Result<(HeaderData,usize,PageNum,SegmentNum)> where R : Read+Seek { fn read<R>(fs: &mut R) -> Result<PageBuffer> where R : Read { let mut pr = PageBuffer::new(HEADER_SIZE_IN_BYTES); let got = try!(pr.Read(fs)); if got < HEADER_SIZE_IN_BYTES { Err(LsmError::CorruptFile("invalid header")) } else { Ok(pr) } } fn parse<R>(pr: &PageBuffer, cur: &mut usize, fs: &mut R) -> Result<(HeaderData, usize)> where R : Read+Seek { fn readSegmentList(pr: &PageBuffer, cur: &mut usize) -> Result<(Vec<SegmentNum>,HashMap<SegmentNum,SegmentInfo>)> { fn readBlockList(prBlocks: &PageBuffer, cur: &mut usize) -> Vec<PageBlock> { let count = prBlocks.GetVarint(cur) as usize; let mut a = Vec::new(); for _ in 0 .. count { let firstPage = prBlocks.GetVarint(cur) as PageNum; let countPages = prBlocks.GetVarint(cur) as PageNum; // blocks are stored as firstPage/count rather than as // firstPage/lastPage, because the count will always be // smaller as a varint a.push(PageBlock::new(firstPage,firstPage + countPages - 1)); } a } let count = pr.GetVarint(cur) as usize; let mut a = Vec::new(); // TODO capacity count let mut m = HashMap::new(); // TODO capacity count for _ in 0 .. count { let g = pr.GetVarint(cur) as SegmentNum; a.push(g); let root = pr.GetVarint(cur) as PageNum; let age = pr.GetVarint(cur) as u32; let blocks = readBlockList(pr, cur); if !block_list_contains_page(&blocks, root) { return Err(LsmError::RootPageNotInSegmentBlockList); } let info = SegmentInfo {root:root,age:age,blocks:blocks}; m.insert(g,info); } Ok((a,m)) } // -------- let pgsz = pr.GetInt32(cur) as usize; let changeCounter = pr.GetVarint(cur); let mergeCounter = pr.GetVarint(cur); let lenSegmentList = pr.GetVarint(cur) as usize; let overflowed = pr.GetByte(cur) != 0u8; let (state, segments, blk) = if overflowed { let lenChunk1 = pr.GetInt32(cur) as usize; let lenChunk2 = lenSegmentList - lenChunk1; let firstPageChunk2 = pr.GetInt32(cur) as PageNum; let extraPages = lenChunk2 / pgsz + if (lenChunk2 % pgsz) != 0 { 1 } else { 0 }; let extraPages = extraPages as PageNum; let lastPageChunk2 = firstPageChunk2 + extraPages - 1; let mut pr2 = PageBuffer::new(lenSegmentList); // TODO chain? // copy from chunk1 into pr2 try!(pr2.ReadPart(fs, 0, lenChunk1)); // now get chunk2 and copy it in as well try!(utils::SeekPage(fs, pgsz, firstPageChunk2)); try!(pr2.ReadPart(fs, lenChunk1, lenChunk2)); let mut cur2 = 0; let (state, segments) = try!(readSegmentList(&pr2, &mut cur2)); (state, segments, Some (PageBlock::new(firstPageChunk2, lastPageChunk2))) } else { let (state,segments) = try!(readSegmentList(pr, cur)); (state, segments, None) }; let hd = HeaderData { currentState: state, segments: segments, headerOverflow: blk, changeCounter: changeCounter, mergeCounter: mergeCounter, }; Ok((hd, pgsz)) } fn calcNextPage(pgsz: usize, len: usize) -> PageNum { let numPagesSoFar = (if pgsz > len { 1 } else { len / pgsz }) as PageNum; numPagesSoFar + 1 } // -------- let len = try!(seek_len(fs)); if len > 0 { try!(fs.seek(SeekFrom::Start(0 as u64))); let pr = try!(read(fs)); let mut cur = 0; let (h, pgsz) = try!(parse(&pr, &mut cur, fs)); let nextAvailablePage = calcNextPage(pgsz, len as usize); let nextAvailableSegmentNum = match h.currentState.iter().max() { Some(n) => n+1, None => 1, }; Ok((h, pgsz, nextAvailablePage, nextAvailableSegmentNum)) } else { let defaultPageSize = DEFAULT_SETTINGS.DefaultPageSize; let h = HeaderData { segments: HashMap::new(), currentState: Vec::new(), headerOverflow: None, changeCounter: 0, mergeCounter: 0, }; let nextAvailablePage = calcNextPage(defaultPageSize, HEADER_SIZE_IN_BYTES); let nextAvailableSegmentNum = 1; Ok((h, defaultPageSize, nextAvailablePage, nextAvailableSegmentNum)) } } fn consolidateBlockList(blocks: &mut Vec<PageBlock>) { blocks.sort_by(|a,b| a.firstPage.cmp(&b.firstPage)); loop { if blocks.len()==1 { break; } let mut did = false; for i in 1 .. blocks.len() { if blocks[i-1].lastPage+1 == blocks[i].firstPage { blocks[i-1].lastPage = blocks[i].lastPage; blocks.remove(i); did = true; break; } } if !did { break; } } } fn invertBlockList(blocks: &Vec<PageBlock>) -> Vec<PageBlock> { let len = blocks.len(); let mut result = Vec::new(); for i in 0 .. len { result.push(blocks[i]); } result.sort_by(|a,b| a.firstPage.cmp(&b.firstPage)); for i in 0 .. len-1 { result[i].firstPage = result[i].lastPage+1; result[i].lastPage = result[i+1].firstPage-1; } result.remove(len-1); result } fn listAllBlocks(h: &HeaderData, segmentsInWaiting: &HashMap<SegmentNum,SegmentInfo>, pgsz: usize) -> Vec<PageBlock> { let headerBlock = PageBlock::new(1, (HEADER_SIZE_IN_BYTES / pgsz) as PageNum); let mut blocks = Vec::new(); fn grab(blocks: &mut Vec<PageBlock>, from: &HashMap<SegmentNum,SegmentInfo>) { for info in from.values() { for b in info.blocks.iter() { blocks.push(*b); } } } grab(&mut blocks, &h.segments); grab(&mut blocks, segmentsInWaiting); blocks.push(headerBlock); match h.headerOverflow { Some(blk) => blocks.push(blk), None => () } blocks } use std::sync::Mutex; struct NextSeg { nextSeg: SegmentNum, } struct Space { nextPage: PageNum, freeBlocks: Vec<PageBlock>, } struct SafeSegmentsInWaiting { segmentsInWaiting: HashMap<SegmentNum,SegmentInfo>, } struct SafeMergeStuff { merging: HashSet<SegmentNum>, pendingMerges: HashMap<SegmentNum,Vec<SegmentNum>>, } struct SafeHeader { // TODO one level too much nesting header: HeaderData, } struct SafeCursors { nextCursorNum: u64, cursors: HashMap<u64,SegmentNum>, zombies: HashMap<SegmentNum,SegmentInfo>, } struct InnerPart { path: String, pgsz: usize, settings: DbSettings, nextSeg: Mutex<NextSeg>, space: Mutex<Space>, // TODO should the header mutex be an RWLock? header: Mutex<SafeHeader>, segmentsInWaiting: Mutex<SafeSegmentsInWaiting>, mergeStuff: Mutex<SafeMergeStuff>, cursors: Mutex<SafeCursors>, } pub struct WriteLock<'a> { inner: Option<&'a InnerPart> } impl<'a> WriteLock<'a> { pub fn commitSegments(&self, newSegs: Vec<SegmentNum>) -> Result<()> { self.inner.unwrap().commitSegments(newSegs) } pub fn commitMerge(&self, newSegNum:SegmentNum) -> Result<()> { self.inner.unwrap().commitMerge(newSegNum) } } // TODO rename this pub struct db<'a> { inner: InnerPart, write_lock: Mutex<WriteLock<'a>>, } impl<'a> db<'a> { pub fn new(path: String, settings : DbSettings) -> Result<db<'a>> { let mut f = try!(OpenOptions::new() .read(true) .create(true) .open(&path)); let (header,pgsz,firstAvailablePage,nextAvailableSegmentNum) = try!(readHeader(&mut f)); let segmentsInWaiting = HashMap::new(); let mut blocks = listAllBlocks(&header, &segmentsInWaiting, pgsz); consolidateBlockList(&mut blocks); let mut freeBlocks = invertBlockList(&blocks); freeBlocks.sort_by(|a,b| b.count_pages().cmp(&a.count_pages())); let nextSeg = NextSeg { nextSeg: nextAvailableSegmentNum, }; let space = Space { nextPage: firstAvailablePage, freeBlocks: freeBlocks, }; let segmentsInWaiting = SafeSegmentsInWaiting { segmentsInWaiting: segmentsInWaiting, }; let mergeStuff = SafeMergeStuff { merging: HashSet::new(), pendingMerges: HashMap::new(), }; let header = SafeHeader { header: header, }; let cursors = SafeCursors { nextCursorNum: 1, cursors: HashMap::new(), zombies: HashMap::new(), }; let inner = InnerPart { path: path, pgsz: pgsz, settings: settings, header: Mutex::new(header), nextSeg: Mutex::new(nextSeg), space: Mutex::new(space), segmentsInWaiting: Mutex::new(segmentsInWaiting), mergeStuff: Mutex::new(mergeStuff), cursors: Mutex::new(cursors), }; // WriteLock contains a reference to another part of // the struct it is in. So we wrap it in an option, // and set it to null for now. We set it later when // somebody actually asks for the lock. let lck = WriteLock { inner: None }; let res = db { inner: inner, write_lock: Mutex::new(lck), }; Ok(res) } // TODO func to ask for the write lock without blocking? pub fn GetWriteLock(&'a self) -> Result<std::sync::MutexGuard<WriteLock<'a>>> { let mut lck = try!(self.write_lock.lock()); // set the inner reference lck.inner = Some(&self.inner); Ok(lck) } // the following methods are passthrus, exposing inner // stuff publicly. pub fn OpenCursor(&self) -> Result<LivingCursor> { self.inner.OpenCursor() } pub fn WriteSegmentFromSortedSequence<I>(&self, source: I) -> Result<SegmentNum> where I:Iterator<Item=Result<kvp>> { self.inner.WriteSegmentFromSortedSequence(source) } pub fn WriteSegment(&self, pairs: HashMap<Box<[u8]>,Box<[u8]>>) -> Result<SegmentNum> { self.inner.WriteSegment(pairs) } pub fn WriteSegment2(&self, pairs: HashMap<Box<[u8]>,Blob>) -> Result<SegmentNum> { self.inner.WriteSegment2(pairs) } pub fn merge(&self, level: u32, min: usize, max: Option<usize>) -> Result<Option<SegmentNum>> { self.inner.merge(level, min, max) } } // TODO this could be generic fn slice_within(sub: &[SegmentNum], within: &[SegmentNum]) -> Result<usize> { match within.iter().position(|&g| g == sub[0]) { Some(ndx_first) => { let count = sub.len(); if sub == &within[ndx_first .. ndx_first + count] { Ok(ndx_first) } else { Err(LsmError::Misc("not contiguous")) } }, None => { Err(LsmError::Misc("not contiguous")) }, } } impl InnerPart { fn cursor_dropped(&self, segnum: SegmentNum, csrnum: u64) { //println!("cursor_dropped"); let mut cursors = self.cursors.lock().unwrap(); // gotta succeed let seg = cursors.cursors.remove(&csrnum).expect("gotta be there"); assert_eq!(seg, segnum); match cursors.zombies.remove(&segnum) { Some(info) => { // TODO maybe allow this lock to fail with try_lock. the // worst that can happen is that these blocks don't get // reclaimed until some other day. let mut space = self.space.lock().unwrap(); // gotta succeed self.addFreeBlocks(&mut space, info.blocks); }, None => { }, } } fn getBlock(&self, space: &mut Space, specificSizeInPages: PageNum) -> PageBlock { if specificSizeInPages > 0 { if space.freeBlocks.is_empty() || specificSizeInPages > space.freeBlocks[0].count_pages() { let newBlk = PageBlock::new(space.nextPage, space.nextPage+specificSizeInPages-1); space.nextPage = space.nextPage + specificSizeInPages; newBlk } else { let headBlk = space.freeBlocks[0]; if headBlk.count_pages() > specificSizeInPages { // trim the block to size let blk2 = PageBlock::new(headBlk.firstPage, headBlk.firstPage+specificSizeInPages-1); space.freeBlocks[0].firstPage = space.freeBlocks[0].firstPage + specificSizeInPages; // TODO problem: the list is probably no longer sorted. is this okay? // is a re-sort of the list really worth it? blk2 } else { space.freeBlocks.remove(0); headBlk } } } else { if space.freeBlocks.is_empty() { let size = self.settings.PagesPerBlock; let newBlk = PageBlock::new(space.nextPage, space.nextPage+size-1) ; space.nextPage = space.nextPage + size; newBlk } else { let headBlk = space.freeBlocks[0]; space.freeBlocks.remove(0); headBlk } } } fn OpenForWriting(&self) -> io::Result<File> { OpenOptions::new() .read(true) .write(true) .open(&self.path) } fn OpenForReading(&self) -> io::Result<File> { OpenOptions::new() .read(true) .open(&self.path) } // this code should not be called in a release build. it helps // finds problems by zeroing out pages in blocks that // have been freed. fn stomp(&self, blocks:Vec<PageBlock>) -> Result<()> { let bad = vec![0;self.pgsz as usize].into_boxed_slice(); let mut fs = try!(OpenOptions::new() .read(true) .write(true) .open(&self.path)); for b in blocks { for x in b.firstPage .. b.lastPage+1 { try!(utils::SeekPage(&mut fs, self.pgsz, x)); try!(fs.write(&bad)); } } Ok(()) } fn addFreeBlocks(&self, space: &mut Space, blocks:Vec<PageBlock>) { // all additions to the freeBlocks list should happen here // by calling this function. // // the list is kept consolidated and sorted by size descending. // unfortunately this requires two sorts, and they happen here // inside a critical section. but the benefit is considered // worth the trouble. // TODO it is important that freeBlocks contains no overlaps. // add debug-only checks to verify? // TODO is there such a thing as a block that is so small we // don't want to bother with it? what about a single-page block? // should this be a configurable setting? // TODO if the last block of the file is free, consider just // moving nextPage back. for b in blocks { space.freeBlocks.push(b); } consolidateBlockList(&mut space.freeBlocks); space.freeBlocks.sort_by(|a,b| b.count_pages().cmp(&a.count_pages())); } // a stored segmentinfo for a segment is a single blob of bytes. // root page // age // number of pairs // each pair is startBlock,countBlocks // all in varints fn writeHeader(&self, st: &mut SafeHeader, space: &mut Space, fs: &mut File, mut hdr: HeaderData ) -> Result<Option<PageBlock>> { fn spaceNeededForSegmentInfo(info: &SegmentInfo) -> usize { let mut a = 0; for t in info.blocks.iter() { a = a + Varint::SpaceNeededFor(t.firstPage as u64); a = a + Varint::SpaceNeededFor(t.count_pages() as u64); } a = a + Varint::SpaceNeededFor(info.root as u64); a = a + Varint::SpaceNeededFor(info.age as u64); a = a + Varint::SpaceNeededFor(info.blocks.len() as u64); a } fn spaceForHeader(h: &HeaderData) -> usize { let mut a = Varint::SpaceNeededFor(h.currentState.len() as u64); // TODO use currentState with a lookup into h.segments instead? // should be the same, right? for (g,info) in h.segments.iter() { a = a + spaceNeededForSegmentInfo(&info) + Varint::SpaceNeededFor(*g); } a } fn buildSegmentList(h: &HeaderData) -> PageBuilder { let space = spaceForHeader(h); let mut pb = PageBuilder::new(space); // TODO format version number pb.PutVarint(h.currentState.len() as u64); for g in h.currentState.iter() { pb.PutVarint(*g); match h.segments.get(&g) { Some(info) => { pb.PutVarint(info.root as u64); pb.PutVarint(info.age as u64); pb.PutVarint(info.blocks.len() as u64); // we store PageBlock as first/count instead of first/last, since the // count will always compress better as a varint. for t in info.blocks.iter() { pb.PutVarint(t.firstPage as u64); pb.PutVarint(t.count_pages() as u64); } }, None => panic!("segment num in currentState but not in segments") } } assert!(0 == pb.Available()); pb } let mut pb = PageBuilder::new(HEADER_SIZE_IN_BYTES); pb.PutInt32(self.pgsz as u32); pb.PutVarint(hdr.changeCounter); pb.PutVarint(hdr.mergeCounter); let pbSegList = buildSegmentList(&hdr); let buf = pbSegList.Buffer(); pb.PutVarint(buf.len() as u64); let headerOverflow = if pb.Available() >= (buf.len() + 1) { pb.PutByte(0u8); pb.PutArray(buf); None } else { pb.PutByte(1u8); let fits = pb.Available() - 4 - 4; let extra = buf.len() - fits; let extraPages = extra / self.pgsz + if (extra % self.pgsz) != 0 { 1 } else { 0 }; //printfn "extra pages: %d" extraPages let blk = self.getBlock(space, extraPages as PageNum); try!(utils::SeekPage(fs, self.pgsz, blk.firstPage)); try!(fs.write(&buf[fits .. buf.len()])); pb.PutInt32(fits as u32); pb.PutInt32(blk.firstPage); pb.PutArray(&buf[0 .. fits]); Some(blk) }; try!(fs.seek(SeekFrom::Start(0))); try!(pb.Write(fs)); try!(fs.flush()); let oldHeaderOverflow = hdr.headerOverflow; hdr.headerOverflow = headerOverflow; st.header = hdr; Ok((oldHeaderOverflow)) } // TODO this function looks for the segment in the header.segments, // which means it cannot be used to open a cursor on a pendingSegment, // which we think we might need in the future. fn getCursor(&self, st: &SafeHeader, g: SegmentNum ) -> Result<SegmentCursor> { match st.header.segments.get(&g) { None => Err(LsmError::Misc("getCursor: segment not found")), Some(seg) => { let rootPage = seg.root; let mut cursors = try!(self.cursors.lock()); let csrnum = cursors.nextCursorNum; let csr = try!(SegmentCursor::new(&self.path, self.pgsz, rootPage, seg.blocks.clone(), &self, g, csrnum)); cursors.nextCursorNum = cursors.nextCursorNum + 1; let was = cursors.cursors.insert(csrnum, g); assert!(was.is_none()); Ok(csr) } } } // TODO we also need a way to open a cursor on segments in waiting fn OpenCursor(&self) -> Result<LivingCursor> { // TODO this cursor needs to expose the changeCounter and segment list // on which it is based. for optimistic writes. caller can grab a cursor, // do their writes, then grab the writelock, and grab another cursor, then // compare the two cursors to see if anything important changed. if not, // commit their writes. if so, nevermind the written segments and start over. let st = try!(self.header.lock()); let mut clist = Vec::new(); for g in st.header.currentState.iter() { clist.push(try!(self.getCursor(&*st, *g))); } let mc = MultiCursor::Create(clist); let lc = LivingCursor::Create(mc); Ok(lc) } fn commitSegments(&self, newSegs: Vec<SegmentNum> ) -> Result<()> { assert_eq!(newSegs.len(), newSegs.iter().map(|g| *g).collect::<HashSet<SegmentNum>>().len()); let mut st = try!(self.header.lock()); let mut waiting = try!(self.segmentsInWaiting.lock()); let mut space = try!(self.space.lock()); assert!({ let mut ok = true; for newSegNum in newSegs.iter() { ok = st.header.currentState.iter().position(|&g| g == *newSegNum).is_none(); if !ok { break; } } ok }); // self.segmentsInWaiting must contain one seg for each segment num in newSegs. // we want those entries to move out and move into the header, currentState // and segments. This means taking ownership of those SegmentInfos. But // the others we want to leave. let mut newHeader = st.header.clone(); let mut newSegmentsInWaiting = waiting.segmentsInWaiting.clone(); for g in newSegs.iter() { match newSegmentsInWaiting.remove(&g) { Some(info) => { newHeader.segments.insert(*g,info); }, None => { return Err(LsmError::Misc("commitSegments: segment not found in segmentsInWaiting")); }, } } // TODO surely there's a better way to insert one vec into another? // like insert_all, similar to push_all? for i in 0 .. newSegs.len() { let g = newSegs[i]; newHeader.currentState.insert(i, g); } newHeader.changeCounter = newHeader.changeCounter + 1; let mut fs = try!(self.OpenForWriting()); let oldHeaderOverflow = try!(self.writeHeader(&mut st, &mut space, &mut fs, newHeader)); waiting.segmentsInWaiting = newSegmentsInWaiting; //printfn "after commit, currentState: %A" header.currentState //printfn "after commit, segments: %A" header.segments // all the segments we just committed can now be removed from // the segments in waiting list match oldHeaderOverflow { Some(blk) => self.addFreeBlocks(&mut space, vec![ blk ]), None => () } // note that we intentionally do not release the writeLock here. // you can change the segment list more than once while holding // the writeLock. the writeLock gets released when you Dispose() it. Ok(()) } // TODO bad fn name fn WriteSegmentFromSortedSequence<I>(&self, source: I) -> Result<SegmentNum> where I:Iterator<Item=Result<kvp>> { let mut fs = try!(self.OpenForWriting()); let (g,_) = try!(CreateFromSortedSequenceOfKeyValuePairs(&mut fs, self, source)); Ok(g) } // TODO bad fn name fn WriteSegment(&self, pairs: HashMap<Box<[u8]>,Box<[u8]>>) -> Result<SegmentNum> { let mut a : Vec<(Box<[u8]>,Box<[u8]>)> = pairs.into_iter().collect(); a.sort_by(|a,b| { let (ref ka,_) = *a; let (ref kb,_) = *b; bcmp::Compare(&ka,&kb) }); let source = a.into_iter().map(|t| { let (k,v) = t; Ok(kvp {Key:k, Value:Blob::Array(v)}) }); let mut fs = try!(self.OpenForWriting()); let (g,_) = try!(CreateFromSortedSequenceOfKeyValuePairs(&mut fs, self, source)); Ok(g) } // TODO bad fn name fn WriteSegment2(&self, pairs: HashMap<Box<[u8]>,Blob>) -> Result<SegmentNum> { let mut a : Vec<(Box<[u8]>,Blob)> = pairs.into_iter().collect(); a.sort_by(|a,b| { let (ref ka,_) = *a; let (ref kb,_) = *b; bcmp::Compare(&ka,&kb) }); let source = a.into_iter().map(|t| { let (k,v) = t; Ok(kvp {Key:k, Value:v}) }); let mut fs = try!(self.OpenForWriting()); let (g,_) = try!(CreateFromSortedSequenceOfKeyValuePairs(&mut fs, self, source)); Ok(g) } fn merge(&self, level: u32, min: usize, max: Option<usize>) -> Result<Option<SegmentNum>> { let mrg = { let st = try!(self.header.lock()); if st.header.currentState.len() == 0 { return Ok(None) } //println!("age for merge: {}", level); //println!("currentState: {:?}", st.header.currentState); let age_group = st.header.currentState.iter().filter(|g| { let info = st.header.segments.get(&g).unwrap(); info.age == level }).map(|g| *g).collect::<Vec<SegmentNum>>(); //println!("age_group: {:?}", age_group); if age_group.len() == 0 { return Ok(None) } // make sure this is contiguous assert!(slice_within(age_group.as_slice(), st.header.currentState.as_slice()).is_ok()); let mut segs = Vec::new(); let mut mergeStuff = try!(self.mergeStuff.lock()); // we can merge any contiguous set of not-already-being-merged // segments at the end of the group. if we merge something // that is not at the end of the group, we could end up with // age groups not being contiguous. for g in age_group.iter().rev() { if mergeStuff.merging.contains(g) { break; } else { segs.push(*g); } } if segs.len() >= min { match max { Some(max) => { segs.truncate(max); }, None => (), } segs.reverse(); let mut clist = Vec::new(); for g in segs.iter() { clist.push(try!(self.getCursor(&st, *g))); } for g in segs.iter() { mergeStuff.merging.insert(*g); } Some((segs,clist)) } else { None } }; match mrg { Some((segs,clist)) => { let mut mc = MultiCursor::Create(clist); let mut fs = try!(self.OpenForWriting()); try!(mc.First()); let (g,_) = try!(CreateFromSortedSequenceOfKeyValuePairs(&mut fs, self, CursorIterator::new(mc))); //printfn "merged %A to get %A" segs g let mut mergeStuff = try!(self.mergeStuff.lock()); mergeStuff.pendingMerges.insert(g, segs); Ok(Some(g)) }, None => { Ok(None) }, } } // TODO maybe commitSegments and commitMerge should be the same function. // just check to see if the segment being committed is a merge. if so, // do the extra paperwork. fn commitMerge(&self, newSegNum:SegmentNum) -> Result<()> { let mut st = try!(self.header.lock()); let mut waiting = try!(self.segmentsInWaiting.lock()); let mut space = try!(self.space.lock()); let mut mergeStuff = try!(self.mergeStuff.lock()); assert!(st.header.currentState.iter().position(|&g| g == newSegNum).is_none()); // we need the list of segments which were merged. we make a copy of // so that we're not keeping a reference that inhibits our ability to // get other references a little later in the function. let old = { let maybe = mergeStuff.pendingMerges.get(&newSegNum); if maybe.is_none() { return Err(LsmError::Misc("commitMerge: segment not found in pendingMerges")); } else { maybe.expect("just checked is_none").clone() } }; let oldAsSet : HashSet<SegmentNum> = old.iter().map(|g| *g).collect(); assert!(oldAsSet.len() == old.len()); // now we need to verify that the segments being replaced are in currentState // and contiguous. let ndxFirstOld = try!(slice_within(old.as_slice(), st.header.currentState.as_slice())); // now we construct a newHeader let mut newHeader = st.header.clone(); // first, fix the currentState for _ in &old { newHeader.currentState.remove(ndxFirstOld); } newHeader.currentState.insert(ndxFirstOld, newSegNum); // remove the old segmentinfos, keeping them for later let mut segmentsBeingReplaced = HashMap::new(); for g in &oldAsSet { let info = newHeader.segments.remove(g).expect("old seg not found in header.segments"); segmentsBeingReplaced.insert(g, info); } // now get the segment info for the new segment let mut newSegmentInfo = { let maybe = waiting.segmentsInWaiting.get(&newSegNum); if maybe.is_none() { return Err(LsmError::Misc("commitMerge: segment not found in segmentsInWaiting")); } else { maybe.expect("seg not found").clone() } }; // and fix its age to be one higher than the maximum age of the // segments it replaced. let age_of_new_segment = { let ages: Vec<u32> = segmentsBeingReplaced.values().map(|info| info.age).collect(); 1 + ages.iter().max().expect("this cannot be empty") }; newSegmentInfo.age = age_of_new_segment; newHeader.segments.insert(newSegNum, newSegmentInfo); newHeader.mergeCounter = newHeader.mergeCounter + 1; let mut fs = try!(self.OpenForWriting()); let oldHeaderOverflow = try!(self.writeHeader(&mut st, &mut space, &mut fs, newHeader)); // the write of the new header has succeeded. waiting.segmentsInWaiting.remove(&newSegNum); mergeStuff.pendingMerges.remove(&newSegNum); for g in old { mergeStuff.merging.remove(&g); } let mut segmentsToBeFreed = segmentsBeingReplaced; { let mut cursors = try!(self.cursors.lock()); let segmentsWithACursor : HashSet<SegmentNum> = cursors.cursors.iter().map(|t| {let (_,segnum) = t; *segnum}).collect(); for g in segmentsWithACursor { // don't free anything that has a cursor match segmentsToBeFreed.remove(&g) { Some(z) => { cursors.zombies.insert(g, z); }, None => { }, } } } let mut blocksToBeFreed = Vec::new(); for info in segmentsToBeFreed.values() { blocksToBeFreed.push_all(&info.blocks); } match oldHeaderOverflow { Some(blk) => blocksToBeFreed.push(blk), None => (), } self.addFreeBlocks(&mut space, blocksToBeFreed); // note that we intentionally do not release the writeLock here. // you can change the segment list more than once while holding // the writeLock. the writeLock gets released when you Dispose() it. Ok(()) } } impl IPages for InnerPart { fn PageSize(&self) -> usize { self.pgsz } fn Begin(&self) -> Result<PendingSegment> { let mut lck = try!(self.nextSeg.lock()); let p = PendingSegment::new(lck.nextSeg); lck.nextSeg = lck.nextSeg + 1; Ok(p) } fn GetBlock(&self, ps: &mut PendingSegment) -> Result<PageBlock> { let mut space = try!(self.space.lock()); // specificSize=0 means we don't care how big of a block we get let blk = self.getBlock(&mut space, 0); ps.AddBlock(blk); Ok(blk) } fn End(&self, ps:PendingSegment, lastPage: PageNum) -> Result<SegmentNum> { let (g, blocks, leftovers) = ps.End(lastPage); let info = SegmentInfo {age: 0,blocks:blocks,root:lastPage}; let mut waiting = try!(self.segmentsInWaiting.lock()); let mut space = try!(self.space.lock()); waiting.segmentsInWaiting.insert(g,info); //printfn "wrote %A: %A" g blocks match leftovers { Some(b) => self.addFreeBlocks(&mut space, vec![b]), None => () } Ok(g) } } // ---------------------------------------------------------------- /* type Database(_io:IDatabaseFile, _settings:DbSettings) = let doAutoMerge() = if settings.AutoMergeEnabled then for level in 0 .. 3 do // TODO max merge level immediate match getPossibleMerge level settings.AutoMergeMinimumPages false with | Some f -> let g = f() commitMerge g | None -> () // printfn "cannot merge level %d" level for level in 4 .. 7 do // TODO max merge level match getPossibleMerge level settings.AutoMergeMinimumPages false with | Some f -> f |> wrapMergeForLater |> startBackgroundMergeJob | None -> () // printfn "cannot merge level %d" level member this.ForgetWaitingSegments(guids:seq<Guid>) = // TODO need a test case for this let guidsAsSet = Seq.fold (fun acc g -> Set.add g acc) Set.empty guids let mySegmentsInWaiting = Map.filter (fun g _ -> Set.contains g guidsAsSet) segmentsInWaiting lock critSectionSegmentsInWaiting (fun () -> let remainingSegmentsInWaiting = Map.filter (fun g _ -> Set.contains g guidsAsSet |> not) segmentsInWaiting segmentsInWaiting <- remainingSegmentsInWaiting ) lock critSectionCursors (fun () -> let segmentsToBeFreed = Map.filter (fun g _ -> not (Map.containsKey g cursors)) mySegmentsInWaiting let blocksToBeFreed = Seq.fold (fun acc info -> info.blocks @ acc) List.empty (Map.values segmentsToBeFreed) addFreeBlocks blocksToBeFreed ) member this.OpenSegmentCursor(g:Guid) = let csr = lock critSectionCursors (fun () -> let h = header getCursor h.segments g (Some checkForGoneSegment) ) csr member this.GetFreeBlocks() = freeBlocks member this.PageSize() = pgsz member this.ListSegments() = (header.currentState, header.segments) member this.RequestWriteLock(timeout:int) = // TODO need a test case for this getWriteLock false timeout (Some doAutoMerge) member this.RequestWriteLock() = getWriteLock false (-1) (Some doAutoMerge) type PairBuffer(_db:IDatabase, _limit:int) = let db = _db let limit = _limit let d = System.Collections.Generic.Dictionary<byte[],Blob>() let mutable segs = [] let emptyByteArray:byte[] = Array.empty let emptyBlobValue = Blob.Array emptyByteArray member this.Flush() = if d.Count > 0 then let g = db.WriteSegment(d) segs <- g :: segs d.Clear() member this.AddPair(k:byte[], v:Blob) = // TODO dictionary deals with byte[] keys by reference. d.[k] <- v if d.Count >= limit then this.Flush() member this.AddEmptyKey(k:byte[]) = this.AddPair(k, emptyBlobValue) member this.Commit(tx:IWriteLock) = tx.CommitSegments segs segs <- [] */ #[cfg(test)] mod tests { use std; use super::Result; #[test] fn it_works() { } #[test] #[ignore] fn quick() { fn tempfile(base: &str) -> String { fn tid() -> String { // TODO use the rand crate fn bytes() -> std::io::Result<[u8;16]> { let mut f = try!(std::fs::OpenOptions::new() .read(true) .open("/dev/urandom")); let mut ba = [0;16]; try!(super::utils::ReadFully(&mut f, &mut ba)); Ok(ba) } fn to_hex_string(ba: &[u8]) -> String { let strs: Vec<String> = ba.iter() .map(|b| format!("{:02X}", b)) .collect(); strs.connect("") } let ba = bytes().unwrap(); to_hex_string(&ba) } std::fs::create_dir("tmp"); let file = "tmp/".to_string() + base + "_" + &tid(); file } fn f() -> Result<()> { //println!("running"); let db = try!(super::db::new(tempfile("quick"), super::DEFAULT_SETTINGS)); const NUM : usize = 100000; let mut a = Vec::new(); for i in 0 .. 10 { let g = try!(db.WriteSegmentFromSortedSequence(super::GenerateNumbers {cur: i * NUM, end: (i+1) * NUM, step: i+1})); a.push(g); } { let lck = try!(db.GetWriteLock()); try!(lck.commitSegments(a.clone())); } let g3 = try!(db.merge(0, 2, None)); assert!(g3.is_some()); let g3 = g3.unwrap(); { let lck = try!(db.GetWriteLock()); try!(lck.commitMerge(g3)); } Ok(()) } assert!(f().is_ok()); } } pub struct GenerateNumbers { pub cur: usize, pub end: usize, pub step: usize, } impl Iterator for GenerateNumbers { type Item = Result<kvp>; // TODO allow the number of digits to be customized? fn next(&mut self) -> Option<Result<kvp>> { if self.cur > self.end { None } else { let k = format!("{:08}", self.cur).into_bytes().into_boxed_slice(); let v = format!("{}", self.cur * 2).into_bytes().into_boxed_slice(); let r = kvp{Key:k, Value:Blob::Array(v)}; self.cur = self.cur + self.step; Some(Ok(r)) } } } pub struct GenerateWeirdPairs { pub cur: usize, pub end: usize, pub klen: usize, pub vlen: usize, } impl Iterator for GenerateWeirdPairs { type Item = Result<kvp>; fn next(&mut self) -> Option<Result<kvp>> { if self.cur > self.end { None } else { fn get_weird(i: usize) -> u8 { let f = i as f64; let f = f.sin() * 1000.0; let f = f.abs(); let f = f.floor() as u32; let f = f & 0xff; let f = f as u8; f } let mut k = Vec::new(); for i in 0 .. self.klen { k.push(get_weird(i + self.cur)); } let k = k.into_boxed_slice(); let mut v = Vec::new(); for i in 0 .. self.vlen { v.push(get_weird(i * 2 + self.cur)); } let v = v.into_boxed_slice(); let r = kvp{Key:k, Value:Blob::Array(v)}; self.cur = self.cur + 1; Some(Ok(r)) } } } pub struct sqlite4_num { neg: bool, approx: bool, e: i16, m: u64, } impl sqlite4_num { const SQLITE4_MX_EXP: i16 = 999; const SQLITE4_NAN_EXP: i16 = 2000; const NAN: sqlite4_num = sqlite4_num { neg: false, approx: true, e: sqlite4_num::SQLITE4_NAN_EXP, m: 0, }; const POS_INF: sqlite4_num = sqlite4_num {m: 1, .. sqlite4_num::NAN}; const NEG_INF: sqlite4_num = sqlite4_num {neg: true, .. sqlite4_num::POS_INF}; const ZERO: sqlite4_num = sqlite4_num { neg: false, approx: false, e: 0, m: 0, }; fn from_f64(d: f64) -> sqlite4_num { // TODO probably this function should be done by decoding the bits if d.is_nan() { sqlite4_num::NAN } else if d.is_sign_positive() && d.is_infinite() { sqlite4_num::POS_INF } else if d.is_sign_negative() && d.is_infinite() { sqlite4_num::NEG_INF } else if d==0.0 { sqlite4_num::ZERO } else { let LARGEST_UINT64 = u64::max_value(); let TENTH_MAX = LARGEST_UINT64 / 10; let large = LARGEST_UINT64 as f64; let large10 = TENTH_MAX as f64; let neg = d<0.0; let mut d = if neg { -d } else { d }; let mut e = 0; while d>large || (d>1.0 && d==((d as i64) as f64)) { d = d / 10.0; e = e + 1; } while d<large10 && d != ((d as i64) as f64) { d = d * 10.0; e = e - 1; } sqlite4_num { neg: neg, approx: true, e: e as i16, m: d as u64, } } } fn is_inf(&self) -> bool { (self.e > sqlite4_num::SQLITE4_MX_EXP) && (self.m != 0) } fn is_nan(&self) -> bool{ (self.e > sqlite4_num::SQLITE4_MX_EXP) && (self.m == 0) } fn from_i64(n: i64) -> sqlite4_num { sqlite4_num { neg: n<0, approx: false, m: if n>=0 { (n as u64) } else if n != i64::min_value() { ((-n) as u64) } else { 1 + (i64::max_value() as u64) }, e: 0, } } fn normalize(&self) -> sqlite4_num { let mut m = self.m; let mut e = self.e; while (m % 10) == 0 { e = e + 1; m = m / 10; } sqlite4_num {m: m, e: e, .. *self} } fn encode_for_index(&self, w: &mut Vec<u8>) { // TODO in sqlite4, the first byte of this encoding // is designed to mesh with the // overall type order byte. if self.m == 0 { if self.is_nan() { w.push(0x06u8); } else { w.push(0x15u8); } } else if self.is_inf() { if self.neg { w.push(0x07u8); } else { w.push(0x23u8); } } else { let num = self.normalize(); let mut m = num.m; let mut e = num.e; let mut iDigit; let mut aDigit = [0; 12]; if (num.e%2) != 0 { aDigit[0] = (10 * (m % 10)) as u8; m = m / 10; e = e - 1; iDigit = 1; } else { iDigit = 0; } while m != 0 { aDigit[iDigit] = (m % 100) as u8; iDigit = iDigit + 1; m = m / 100; } e = (iDigit as i16) + (e/2); fn push_u16_be(w: &mut Vec<u8>, e: u16) { w.push(((e>>8) & 0xff_u16) as u8); w.push(((e>>0) & 0xff_u16) as u8); } if e>= 11 { if ! num.neg { w.push(0x22u8); push_u16_be(w, e as u16); } else { w.push(0x08u8); push_u16_be(w, !e as u16); } } else if e>=0 { if ! num.neg { w.push(0x17u8+(e as u8)); } else { w.push(0x13u8-(e as u8)); } } else { if ! num.neg { w.push(0x16u8); push_u16_be(w, !((-e) as u16)); } else { w.push(0x14u8); push_u16_be(w, (-e) as u16); } } while iDigit>0 { iDigit = iDigit - 1; let mut d = aDigit[iDigit] * 2u8; if iDigit != 0 { d = d | 0x01u8; } if num.neg { d = !d; } w.push(d) } } } } // TODO the following can be removed at some point. it is here // now only because the test suite has not yet been adapted to use // KeyRef/ValueRef. impl<'a> LivingCursor<'a> { pub fn Key(&self) -> Result<Box<[u8]>> { let k = try!(self.KeyRef()); let k = k.into_boxed_slice(); Ok(k) } pub fn Value(&self) -> Result<Blob> { let v = try!(self.ValueRef()); let v = v.into_blob(); Ok(v) } pub fn Seek(&mut self, k: &[u8], sop:SeekOp) -> Result<SeekResult> { let k2 = KeyRef::for_slice(k); let r = self.SeekRef(&k2, sop); println!("{:?}", r); r } }
use self::private::Sealed; use std::error::Error; use std::panic::UnwindSafe; pub trait AsDynError<'a>: Sealed { fn as_dyn_error(&self) -> &(dyn Error + 'a); } impl<'a, T: Error + 'a> AsDynError<'a> for T { #[inline] fn as_dyn_error(&self) -> &(dyn Error + 'a) { self } } impl<'a> AsDynError<'a> for dyn Error + 'a { #[inline] fn as_dyn_error(&self) -> &(dyn Error + 'a) { self } } impl<'a> AsDynError<'a> for dyn Error + Send + 'a { #[inline] fn as_dyn_error(&self) -> &(dyn Error + 'a) { self } } impl<'a> AsDynError<'a> for dyn Error + Send + Sync + 'a { #[inline] fn as_dyn_error(&self) -> &(dyn Error + 'a) { self } } impl<'a> AsDynError<'a> for dyn Error + Send + Sync + UnwindSafe + 'a { #[inline] fn as_dyn_error(&self) -> &(dyn Error + 'a) { self } } mod private { use super::*; pub trait Sealed {} impl<'a, T: Error + 'a> Sealed for T {} impl<'a> Sealed for dyn Error + 'a {} impl<'a> Sealed for dyn Error + Send + 'a {} impl<'a> Sealed for dyn Error + Send + Sync + 'a {} impl<'a> Sealed for dyn Error + Send + Sync + UnwindSafe + 'a {} } Sealed trait does not need its own module The aserror module is already private to thiserror, with AsDynError being re-exported in thiserror::private, so as long as Sealed is not also re-exported there, it remains inaccessible outside the crate. use std::error::Error; use std::panic::UnwindSafe; pub trait AsDynError<'a>: Sealed { fn as_dyn_error(&self) -> &(dyn Error + 'a); } impl<'a, T: Error + 'a> AsDynError<'a> for T { #[inline] fn as_dyn_error(&self) -> &(dyn Error + 'a) { self } } impl<'a> AsDynError<'a> for dyn Error + 'a { #[inline] fn as_dyn_error(&self) -> &(dyn Error + 'a) { self } } impl<'a> AsDynError<'a> for dyn Error + Send + 'a { #[inline] fn as_dyn_error(&self) -> &(dyn Error + 'a) { self } } impl<'a> AsDynError<'a> for dyn Error + Send + Sync + 'a { #[inline] fn as_dyn_error(&self) -> &(dyn Error + 'a) { self } } impl<'a> AsDynError<'a> for dyn Error + Send + Sync + UnwindSafe + 'a { #[inline] fn as_dyn_error(&self) -> &(dyn Error + 'a) { self } } pub trait Sealed {} impl<'a, T: Error + 'a> Sealed for T {} impl<'a> Sealed for dyn Error + 'a {} impl<'a> Sealed for dyn Error + Send + 'a {} impl<'a> Sealed for dyn Error + Send + Sync + 'a {} impl<'a> Sealed for dyn Error + Send + Sync + UnwindSafe + 'a {}
use std::fmt; use span::Spanned; pub mod printer; #[derive(Debug, Clone, PartialEq)] pub struct TranslationUnit { pub imports: Vec<String>, pub declarations: Vec<Spanned<Declaration>>, } #[derive(Debug, Clone, PartialEq)] pub enum Declaration { ExternFunction { name: String, params: Vec<Spanned<ParseType>>, variadic: bool, return_ty: Spanned<ParseType>, }, Function { name: String, params: Vec<(Spanned<String>, Spanned<ParseType>)>, return_ty: Spanned<ParseType>, stmt: Spanned<CompoundStatement>, }, Struct { name: String, fields: Vec<(Spanned<String>, Spanned<ParseType>)>, } } #[derive(Debug, Clone, PartialEq)] pub struct CompoundStatement(pub Vec<Spanned<Statement>>); #[derive(Debug, Clone, PartialEq)] pub enum Statement { Compound(Spanned<CompoundStatement>), Let { name: String, ty: Option<Spanned<ParseType>>, expr: Spanned<Expression>, }, Loop { stmt: Spanned<CompoundStatement> }, While { cond: Spanned<Expression>, stmt: Spanned<CompoundStatement>, }, For { name: String, init_expr: Spanned<Expression>, cond_expr: Spanned<Expression>, step_expr: Spanned<Expression>, stmt: Spanned<CompoundStatement>, }, If { if_branch: (Spanned<Expression>, Spanned<CompoundStatement>), elseif_branches: Vec<(Spanned<Expression>, Spanned<CompoundStatement>)>, else_branch: Option<Spanned<CompoundStatement>>, }, Break, Continue, Return { expr: Option<Spanned<Expression>> }, Expression { expr: Spanned<Expression> }, } #[derive(Debug, Clone, PartialEq)] pub enum Expression { Assign(Option<BinOpCode>, Box<Spanned<Expression>>, Box<Spanned<Expression>>), // None if classic assign Subscript(Box<Spanned<Expression>>, Box<Spanned<Expression>>), BinOp(BinOpCode, Box<Spanned<Expression>>, Box<Spanned<Expression>>), UnOp(UnOpCode, Box<Spanned<Expression>>), FuncCall(Box<Spanned<Expression>>, Vec<Spanned<Expression>>), Cast(Box<Spanned<Expression>>, Spanned<ParseType>), FieldAccess(Box<Spanned<Expression>>, Spanned<String>), Paren(Box<Spanned<Expression>>), Identifier(String), Literal(Literal), StringLiteral(String), ArrayFullLiteral(Vec<Spanned<Expression>>), ArrayDefaultLiteral(Box<Spanned<Expression>>, i64), } #[derive(Debug, Clone, PartialEq)] pub enum Literal { Int(i64), Double(f64), Bool(bool), Char(String), Unit, } #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum BinOpCode { Add, Sub, Times, Divide, Mod, Less, LessEqual, Greater, GreaterEqual, Equal, NotEqual, LogicalAnd, LogicalOr, } #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum UnOpCode { Minus, LogicalNot, AddressOf, Deref, } impl fmt::Display for BinOpCode { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { use self::BinOpCode::*; let op = match *self { Add => "+", Sub => "-", Times => "*", Divide => "/", Mod => "%", Less => "<", LessEqual => "<=", Greater => ">", GreaterEqual => ">=", Equal => "==", NotEqual => "!=", LogicalAnd => "&&", LogicalOr => "||", }; write!(f, "{}", op) } } impl fmt::Display for UnOpCode { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { use self::UnOpCode::*; let op = match *self { Minus => "-", LogicalNot => "!", AddressOf => "&", Deref => "*", }; write!(f, "{}", op) } } #[derive(Debug, Clone, PartialEq, Eq)] pub enum ParseType { Unit, Lit(String), Ptr(Box<Spanned<ParseType>>), } impl fmt::Display for ParseType { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { ParseType::Unit => write!(f, "()"), ParseType::Lit(ref lit) => write!(f, "{}", lit), ParseType::Ptr(ref sub) => write!(f, "*{}", sub.inner), } } } Add struct literal to ast use std::fmt; use span::Spanned; pub mod printer; #[derive(Debug, Clone, PartialEq)] pub struct TranslationUnit { pub imports: Vec<String>, pub declarations: Vec<Spanned<Declaration>>, } #[derive(Debug, Clone, PartialEq)] pub enum Declaration { ExternFunction { name: String, params: Vec<Spanned<ParseType>>, variadic: bool, return_ty: Spanned<ParseType>, }, Function { name: String, params: Vec<(Spanned<String>, Spanned<ParseType>)>, return_ty: Spanned<ParseType>, stmt: Spanned<CompoundStatement>, }, Struct { name: String, fields: Vec<(Spanned<String>, Spanned<ParseType>)>, } } #[derive(Debug, Clone, PartialEq)] pub struct CompoundStatement(pub Vec<Spanned<Statement>>); #[derive(Debug, Clone, PartialEq)] pub enum Statement { Compound(Spanned<CompoundStatement>), Let { name: String, ty: Option<Spanned<ParseType>>, expr: Spanned<Expression>, }, Loop { stmt: Spanned<CompoundStatement> }, While { cond: Spanned<Expression>, stmt: Spanned<CompoundStatement>, }, For { name: String, init_expr: Spanned<Expression>, cond_expr: Spanned<Expression>, step_expr: Spanned<Expression>, stmt: Spanned<CompoundStatement>, }, If { if_branch: (Spanned<Expression>, Spanned<CompoundStatement>), elseif_branches: Vec<(Spanned<Expression>, Spanned<CompoundStatement>)>, else_branch: Option<Spanned<CompoundStatement>>, }, Break, Continue, Return { expr: Option<Spanned<Expression>> }, Expression { expr: Spanned<Expression> }, } #[derive(Debug, Clone, PartialEq)] pub enum Expression { Assign(Option<BinOpCode>, Box<Spanned<Expression>>, Box<Spanned<Expression>>), // None if classic assign Subscript(Box<Spanned<Expression>>, Box<Spanned<Expression>>), BinOp(BinOpCode, Box<Spanned<Expression>>, Box<Spanned<Expression>>), UnOp(UnOpCode, Box<Spanned<Expression>>), FuncCall(Box<Spanned<Expression>>, Vec<Spanned<Expression>>), Cast(Box<Spanned<Expression>>, Spanned<ParseType>), FieldAccess(Box<Spanned<Expression>>, Spanned<String>), Paren(Box<Spanned<Expression>>), Identifier(String), Literal(Literal), StringLiteral(String), ArrayFullLiteral(Vec<Spanned<Expression>>), ArrayDefaultLiteral(Box<Spanned<Expression>>, i64), StructLiteral(StructLiteral), } #[derive(Debug, Clone, PartialEq)] pub enum Literal { Int(i64), Double(f64), Bool(bool), Char(String), Unit, } #[derive(Debug, Clone, PartialEq)] pub struct StructLiteral { name: String, fields: Vec<Spanned<(String, Spanned<Expression>)>, } #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum BinOpCode { Add, Sub, Times, Divide, Mod, Less, LessEqual, Greater, GreaterEqual, Equal, NotEqual, LogicalAnd, LogicalOr, } #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum UnOpCode { Minus, LogicalNot, AddressOf, Deref, } impl fmt::Display for BinOpCode { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { use self::BinOpCode::*; let op = match *self { Add => "+", Sub => "-", Times => "*", Divide => "/", Mod => "%", Less => "<", LessEqual => "<=", Greater => ">", GreaterEqual => ">=", Equal => "==", NotEqual => "!=", LogicalAnd => "&&", LogicalOr => "||", }; write!(f, "{}", op) } } impl fmt::Display for UnOpCode { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { use self::UnOpCode::*; let op = match *self { Minus => "-", LogicalNot => "!", AddressOf => "&", Deref => "*", }; write!(f, "{}", op) } } #[derive(Debug, Clone, PartialEq, Eq)] pub enum ParseType { Unit, Lit(String), Ptr(Box<Spanned<ParseType>>), } impl fmt::Display for ParseType { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { ParseType::Unit => write!(f, "()"), ParseType::Lit(ref lit) => write!(f, "{}", lit), ParseType::Ptr(ref sub) => write!(f, "*{}", sub.inner), } } }
use std::cmp::min; use std::io::Write; use std::rc::{Rc,Weak}; use std::cell::RefCell; use std::net::{SocketAddr,IpAddr}; use mio::*; use mio::unix::UnixReady; use mio::tcp::TcpStream; use time::{Duration, precise_time_s, precise_time_ns}; use uuid::Uuid; use parser::http11::{HttpState,parse_request_until_stop, parse_response_until_stop, BufferMove, RequestState, ResponseState, Chunk, Continue, RRequestLine, RStatusLine}; use network::{ClientResult,Protocol,Readiness,SessionMetrics, LogDuration}; use network::buffer_queue::BufferQueue; use network::socket::{SocketHandler,SocketResult}; use network::protocol::ProtocolResult; use network::pool::{Pool,Checkout}; use pool_crate::Reset; use util::UnwrapLog; #[derive(Clone)] pub struct StickySession { pub sticky_id: String } impl StickySession { pub fn new(backend_id: String) -> StickySession { StickySession { sticky_id: backend_id } } } type BackendToken = Token; #[derive(Debug,Clone,PartialEq)] pub enum ClientStatus { Normal, /// status, HTTP answer, index in HTTP answer DefaultAnswer(DefaultAnswerStatus, Rc<Vec<u8>>, usize), } #[derive(Debug,Clone,Copy,PartialEq)] pub enum DefaultAnswerStatus { Answer301, Answer400, Answer404, Answer503, Answer413, } pub struct Http<Front:SocketHandler> { pub frontend: Front, pub backend: Option<TcpStream>, frontend_token: Token, backend_token: Option<Token>, pub status: ClientStatus, pub state: Option<HttpState>, pub front_buf: Option<Checkout<BufferQueue>>, pub back_buf: Option<Checkout<BufferQueue>>, pub app_id: Option<String>, pub request_id: String, pub front_readiness:Readiness, pub back_readiness: Readiness, pub log_ctx: String, pub public_address: Option<IpAddr>, pub client_address: Option<SocketAddr>, pub sticky_name: String, pub sticky_session: Option<StickySession>, pub protocol: Protocol, pool: Weak<RefCell<Pool<BufferQueue>>>, } impl<Front:SocketHandler> Http<Front> { pub fn new(sock: Front, token: Token, pool: Weak<RefCell<Pool<BufferQueue>>>, public_address: Option<IpAddr>, client_address: Option<SocketAddr>, sticky_name: String, protocol: Protocol) -> Option<Http<Front>> { let request_id = Uuid::new_v4().hyphenated().to_string(); let log_ctx = format!("{} unknown\t", &request_id); let mut client = Http { frontend: sock, backend: None, frontend_token: token, backend_token: None, status: ClientStatus::Normal, state: Some(HttpState::new()), front_buf: None, back_buf: None, app_id: None, request_id: request_id, front_readiness: Readiness::new(), back_readiness: Readiness::new(), log_ctx: log_ctx, public_address: public_address, client_address: client_address, sticky_name: sticky_name, sticky_session: None, protocol: protocol, pool, }; let req_header = client.added_request_header(public_address, client_address); let res_header = client.added_response_header(); client.state.as_mut().map(|ref mut state| state.added_req_header = req_header); client.state.as_mut().map(|ref mut state| state.added_res_header = res_header); Some(client) } pub fn reset(&mut self) { let request_id = Uuid::new_v4().hyphenated().to_string(); //info!("{} RESET TO {}", self.log_ctx, request_id); gauge_add!("http.active_requests", -1); self.state.as_mut().map(|state| state.reset()); let req_header = self.added_request_header(self.public_address, self.client_address); let res_header = self.added_response_header(); self.state.as_mut().map(|ref mut state| state.added_req_header = req_header); self.state.as_mut().map(|ref mut state| state.added_res_header = res_header); // if HTTP requests are pipelined, we might still have some data in the front buffer if self.front_buf.as_ref().map(|buf| !buf.empty()).unwrap_or(false) { self.front_readiness.event.insert(Ready::readable()); } else { self.front_buf = None; } self.back_buf = None; self.request_id = request_id; self.log_ctx = format!("{} {}\t", self.request_id, self.app_id.as_ref().unwrap_or(&String::from("unknown"))); } fn tokens(&self) -> Option<(Token,Token)> { if let Some(back) = self.backend_token { return Some((self.frontend_token, back)) } None } pub fn state(&mut self) -> &mut HttpState { unwrap_msg!(self.state.as_mut()) } pub fn set_state(&mut self, state: HttpState) { self.state = Some(state); } pub fn set_answer(&mut self, answer: DefaultAnswerStatus, buf: Rc<Vec<u8>>) { self.front_buf = None; self.back_buf = None; self.status = ClientStatus::DefaultAnswer(answer, buf, 0); self.front_readiness.interest = UnixReady::from(Ready::writable()) | UnixReady::hup() | UnixReady::error(); self.back_readiness.interest = UnixReady::hup() | UnixReady::error(); } pub fn added_request_header(&self, public_address: Option<IpAddr>, client_address: Option<SocketAddr>) -> String { let peer = client_address.or(self.front_socket().peer_addr().ok()).map(|addr| (addr.ip(), addr.port())); let front = public_address.or(self.front_socket().local_addr().map(|addr| addr.ip()).ok()); let client_port = self.front_socket().local_addr().map(|addr| addr.port()).ok(); if let (Some((peer_ip, peer_port)), Some(front), Some(client_port)) = (peer, front, client_port) { let proto = match self.protocol() { Protocol::HTTP => "http", Protocol::HTTPS => "https", _ => unreachable!() }; //FIXME: in the "for", we don't put the other values we could get from a preexisting forward header match (peer_ip, peer_port, front) { (IpAddr::V4(p), peer_port, IpAddr::V4(f)) => { format!("Forwarded: proto={};for={}:{};by={}\r\nX-Forwarded-Proto: {}\r\nX-Forwarded-For: {}\r\n\ X-Forwarded-Port: {}\r\nSozu-Id: {}\r\n", proto, peer_ip, peer_port, front, proto, peer_ip, client_port, self.request_id) }, (IpAddr::V4(p), peer_port, IpAddr::V6(f)) => { format!("Forwarded: proto={};for={}:{};by=\"{}\"\r\nX-Forwarded-Proto: {}\r\nX-Forwarded-For: {}\r\n\ X-Forwarded-Port: {}\r\nSozu-Id: {}\r\n", proto, peer_ip, peer_port, front, proto, peer_ip, client_port, self.request_id) }, (IpAddr::V6(p), peer_port, IpAddr::V4(f)) => { format!("Forwarded: proto={};for=\"{}:{}\";by={}\r\nX-Forwarded-Proto: {}\r\nX-Forwarded-For: {}\r\n\ X-Forwarded-Port: {}\r\nSozu-Id: {}\r\n", proto, peer_ip, peer_port, front, proto, peer_ip, client_port, self.request_id) }, (IpAddr::V6(p), peer_port, IpAddr::V6(f)) => { format!("Forwarded: proto={};for=\"{}:{}\";by=\"{}\"\r\nX-Forwarded-Proto: {}\r\nX-Forwarded-For: {}\r\n\ X-Forwarded-Port: {}\r\nSozu-Id: {}\r\n", proto, peer_ip, peer_port, front, proto, peer_ip, client_port, self.request_id) }, } } else { format!("Sozu-Id: {}\r\n", self.request_id) } } pub fn added_response_header(&self) -> String { format!("Sozu-Id: {}\r\n", self.request_id) } pub fn front_socket(&self) -> &TcpStream { self.frontend.socket_ref() } pub fn back_socket(&self) -> Option<&TcpStream> { self.backend.as_ref() } pub fn back_token(&self) -> Option<Token> { self.backend_token } pub fn close(&mut self) { } pub fn log_context(&self) -> String { if let Some(ref app_id) = self.app_id { format!("{}\t{}\t", self.request_id, app_id) } else { format!("{}\tunknown\t", self.request_id) } } pub fn set_back_socket(&mut self, socket: TcpStream) { self.backend = Some(socket); } pub fn set_app_id(&mut self, app_id: String) { self.log_ctx = format!("{} {}\t", self.request_id, &app_id); self.app_id = Some(app_id); } pub fn set_back_token(&mut self, token: Token) { self.backend_token = Some(token); } pub fn front_readiness(&mut self) -> &mut Readiness { &mut self.front_readiness } pub fn back_readiness(&mut self) -> &mut Readiness { &mut self.back_readiness } fn protocol(&self) -> Protocol { self.protocol } pub fn remove_backend(&mut self) -> (Option<String>, Option<SocketAddr>) { debug!("{}\tPROXY [{} -> {}] CLOSED BACKEND", self.log_ctx, self.frontend_token.0, self.backend_token.map(|t| format!("{}", t.0)).unwrap_or("-".to_string())); let addr:Option<SocketAddr> = self.backend.as_ref().and_then(|sock| sock.peer_addr().ok()); self.backend = None; self.backend_token = None; (self.app_id.clone(), addr) } pub fn front_hup(&mut self) -> ClientResult { ClientResult::CloseClient } pub fn back_hup(&mut self) -> ClientResult { if let Some(ref mut buf) = self.back_buf { //FIXME: closing the client might not be a good idea if we do keep alive on the front here? if buf.output_data_size() == 0 || buf.next_output_data().len() == 0 { if self.back_readiness.event.is_readable() { self.back_readiness.interest.insert(Ready::readable()); ClientResult::Continue } else { ClientResult::CloseClient } } else { self.front_readiness.interest.insert(Ready::writable()); if self.back_readiness.event.is_readable() { self.back_readiness.interest.insert(Ready::readable()); } ClientResult::Continue } } else { ClientResult::CloseClient } } /// Retrieve the response status from the http response state pub fn get_response_status(&self) -> Option<RStatusLine> { if let Some(state) = self.state.as_ref() { state.get_status_line() } else { None } } pub fn get_host(&self) -> Option<String> { if let Some(state) = self.state.as_ref() { state.get_host() } else { None } } pub fn get_request_line(&self) -> Option<RRequestLine> { if let Some(state) = self.state.as_ref() { state.get_request_line() } else { None } } pub fn get_client_address(&self) -> Option<SocketAddr> { self.client_address.or(self.frontend.socket_ref().peer_addr().ok()) } pub fn get_backend_address(&self) -> Option<SocketAddr> { self.backend.as_ref().and_then(|backend| backend.peer_addr().ok()) } pub fn log_request_success(&self, metrics: &SessionMetrics) { let client = match self.get_client_address() { None => String::from("-"), Some(SocketAddr::V4(addr)) => format!("{}", addr), Some(SocketAddr::V6(addr)) => format!("{}", addr), }; let backend = match self.get_backend_address() { None => String::from("-"), Some(SocketAddr::V4(addr)) => format!("{}", addr), Some(SocketAddr::V6(addr)) => format!("{}", addr), }; let host = self.get_host().unwrap_or(String::from("-")); let request_line = self.get_request_line().map(|line| format!("{} {}", line.method, line.uri)).unwrap_or(String::from("-")); let status_line = self.get_response_status().map(|line| format!("{} {}", line.status, line.reason)).unwrap_or(String::from("-")); let response_time = metrics.response_time(); let service_time = metrics.service_time(); let app_id = self.app_id.clone().unwrap_or(String::from("-")); time!("request_time", &app_id, response_time.num_milliseconds()); if let Some(backend_id) = metrics.backend_id.as_ref() { if let Some(backend_response_time) = metrics.backend_response_time() { record_backend_metrics!(app_id, backend_id, backend_response_time.num_milliseconds(), metrics.backend_bin, metrics.backend_bout); } } info_access!("{}{} -> {}\t{} {} {} {}\t{} {} {}", self.log_ctx, client, backend, LogDuration(response_time), LogDuration(service_time), metrics.bin, metrics.bout, status_line, host, request_line); } pub fn log_default_answer_success(&self, metrics: &SessionMetrics) { let client = match self.get_client_address() { None => String::from("-"), Some(SocketAddr::V4(addr)) => format!("{}", addr), Some(SocketAddr::V6(addr)) => format!("{}", addr), }; let status_line = match self.status { ClientStatus::Normal => "-", ClientStatus::DefaultAnswer(DefaultAnswerStatus::Answer301, _, _) => "301 Moved Permanently", ClientStatus::DefaultAnswer(DefaultAnswerStatus::Answer400, _, _) => "400 Bad Request", ClientStatus::DefaultAnswer(DefaultAnswerStatus::Answer404, _, _) => "404 Not Found", ClientStatus::DefaultAnswer(DefaultAnswerStatus::Answer503, _, _) => "503 Service Unavailable", ClientStatus::DefaultAnswer(DefaultAnswerStatus::Answer413, _, _) => "413 Payload Too Large", }; let host = self.get_host().unwrap_or(String::from("-")); let request_line = self.get_request_line().map(|line| format!("{} {}", line.method, line.uri)).unwrap_or(String::from("-")); let response_time = metrics.response_time(); let service_time = metrics.service_time(); if let Some(ref app_id) = self.app_id { time!("http.request.time", &app_id, response_time.num_milliseconds()); } incr!("http.errors"); info_access!("{}{} -> X\t{} {} {} {}\t{} {} {}", self.log_ctx, client, LogDuration(response_time), LogDuration(service_time), metrics.bin, metrics.bout, status_line, host, request_line); } pub fn log_request_error(&self, metrics: &SessionMetrics, message: &str) { let client = match self.get_client_address() { None => String::from("-"), Some(SocketAddr::V4(addr)) => format!("{}", addr), Some(SocketAddr::V6(addr)) => format!("{}", addr), }; let backend = match self.get_backend_address() { None => String::from("-"), Some(SocketAddr::V4(addr)) => format!("{}", addr), Some(SocketAddr::V6(addr)) => format!("{}", addr), }; let host = self.get_host().unwrap_or(String::from("-")); let request_line = self.get_request_line().map(|line| format!("{} {}", line.method, line.uri)).unwrap_or(String::from("-")); let status_line = self.get_response_status().map(|line| format!("{} {}", line.status, line.reason)).unwrap_or(String::from("-")); let response_time = metrics.response_time(); let service_time = metrics.service_time(); let app_id = self.app_id.clone().unwrap_or(String::from("-")); incr!("http.errors"); /*time!("request_time", &app_id, response_time); if let Some(backend_id) = metrics.backend_id.as_ref() { if let Some(backend_response_time) = metrics.backend_response_time() { record_backend_metrics!(app_id, backend_id, backend_response_time.num_milliseconds(), metrics.backend_bin, metrics.backend_bout); } }*/ error_access!("{}{} -> {}\t{} {} {} {}\t{} {} {} | {}", self.log_ctx, client, backend, LogDuration(response_time), LogDuration(service_time), metrics.bin, metrics.bout, status_line, host, request_line, message); } // Read content from the client pub fn readable(&mut self, metrics: &mut SessionMetrics) -> ClientResult { if let ClientStatus::DefaultAnswer(_,_,_) = self.status { self.front_readiness.interest.insert(Ready::writable()); self.back_readiness.interest.remove(Ready::readable()); self.back_readiness.interest.remove(Ready::writable()); return ClientResult::Continue; } assert!(!unwrap_msg!(self.state.as_ref()).is_front_error()); if self.front_buf.is_none() { if let Some(p) = self.pool.upgrade() { if let Some(buf) = p.borrow_mut().checkout() { self.front_buf = Some(buf); } else { error!("cannot get front buffer from pool, closing"); return ClientResult::CloseClient; } } } if self.front_buf.as_ref().unwrap().buffer.available_space() == 0 { if self.backend_token == None { // We don't have a backend to empty the buffer into, close the connection metrics.service_stop(); self.log_request_error(metrics, "front buffer full, no backend, closing connection"); let answer_413 = "HTTP/1.1 413 Payload Too Large\r\nContent-Length: 0\r\n\r\n"; self.set_answer(DefaultAnswerStatus::Answer413, Rc::new(Vec::from(answer_413.as_bytes()))); self.front_readiness.interest.remove(Ready::readable()); self.front_readiness.interest.insert(Ready::writable()); } else { self.front_readiness.interest.remove(Ready::readable()); self.back_readiness.interest.insert(Ready::writable()); } return ClientResult::Continue; } let (sz, res) = self.frontend.socket_read(self.front_buf.as_mut().unwrap().buffer.space()); debug!("{}\tFRONT: read {} bytes", self.log_ctx, sz); if sz > 0 { self.front_buf.as_mut().unwrap().buffer.fill(sz); self.front_buf.as_mut().unwrap().sliced_input(sz); count!("bytes_in", sz as i64); metrics.bin += sz; if self.front_buf.as_ref().unwrap().start_parsing_position > self.front_buf.as_ref().unwrap().parsed_position { let to_consume = min(self.front_buf.as_ref().unwrap().input_data_size(), self.front_buf.as_ref().unwrap().start_parsing_position - self.front_buf.as_ref().unwrap().parsed_position); self.front_buf.as_mut().unwrap().consume_parsed_data(to_consume); } if self.front_buf.as_ref().unwrap().buffer.available_space() == 0 { self.front_readiness.interest.remove(Ready::readable()); } } else { self.front_readiness.event.remove(Ready::readable()); } match res { SocketResult::Error => { self.log_request_error(metrics, &format!("front socket error, closing the connection. Readiness: {:?} -> {:?}", self.front_readiness, self.back_readiness)); metrics.service_stop(); self.front_readiness.reset(); self.back_readiness.reset(); return ClientResult::CloseClient; }, SocketResult::Closed => { //we were in keep alive but the peer closed the connection //FIXME: what happens if the connection was just opened but no data came? if unwrap_msg!(self.state.as_ref()).request == Some(RequestState::Initial) { metrics.service_stop(); self.front_readiness.reset(); self.back_readiness.reset(); return ClientResult::CloseClient; } else { self.log_request_error(metrics, &format!("front socket error, closing the connection. Readiness: {:?} -> {:?}", self.front_readiness, self.back_readiness)); metrics.service_stop(); self.front_readiness.reset(); self.back_readiness.reset(); return ClientResult::CloseClient; } }, SocketResult::WouldBlock => { self.front_readiness.event.remove(Ready::readable()); }, SocketResult::Continue => {} }; self.readable_parse(metrics) } pub fn readable_parse(&mut self, metrics: &mut SessionMetrics) -> ClientResult { let is_initial = unwrap_msg!(self.state.as_ref()).request == Some(RequestState::Initial); // if there's no host, continue parsing until we find it let has_host = unwrap_msg!(self.state.as_ref()).has_host(); if !has_host { self.state = Some(parse_request_until_stop(unwrap_msg!(self.state.take()), &self.request_id, &mut self.front_buf.as_mut().unwrap(), &self.sticky_name)); if unwrap_msg!(self.state.as_ref()).is_front_error() { self.log_request_error(metrics, "front parsing error, closing the connection"); metrics.service_stop(); incr!("http.front_parse_errors"); // increment active requests here because it will be decremented right away // when closing the connection. It's slightly easier than decrementing it // at every place we return ClientResult::CloseClient gauge_add!("http.active_requests", 1); self.front_readiness.interest.remove(Ready::readable()); return ClientResult::CloseClient; } let is_now_initial = unwrap_msg!(self.state.as_ref()).request == Some(RequestState::Initial); if is_initial && !is_now_initial { gauge_add!("http.active_requests", 1); incr!("http.requests"); } if unwrap_msg!(self.state.as_ref()).has_host() { self.back_readiness.interest.insert(Ready::writable()); return ClientResult::ConnectBackend; } else { self.front_readiness.interest.insert(Ready::readable()); return ClientResult::Continue; } } self.back_readiness.interest.insert(Ready::writable()); match unwrap_msg!(self.state.as_ref()).request { Some(RequestState::Request(_,_,_)) | Some(RequestState::RequestWithBody(_,_,_,_)) => { if ! self.front_buf.as_ref().unwrap().needs_input() { // stop reading self.front_readiness.interest.remove(Ready::readable()); } ClientResult::Continue }, Some(RequestState::RequestWithBodyChunks(_,_,_,Chunk::Ended)) => { error!("{}\tfront read should have stopped on chunk ended", self.log_ctx); self.front_readiness.interest.remove(Ready::readable()); ClientResult::Continue }, Some(RequestState::RequestWithBodyChunks(_,_,_,Chunk::Error)) => { self.log_request_error(metrics, "front read should have stopped on chunk error"); metrics.service_stop(); self.front_readiness.reset(); self.back_readiness.reset(); ClientResult::CloseClient }, Some(RequestState::RequestWithBodyChunks(_,_,_,_)) => { if ! self.front_buf.as_ref().unwrap().needs_input() { self.state = Some(parse_request_until_stop(unwrap_msg!(self.state.take()), &self.request_id, &mut self.front_buf.as_mut().unwrap(), &self.sticky_name)); if unwrap_msg!(self.state.as_ref()).is_front_error() { self.log_request_error(metrics, "front chunk parsing error, closing the connection"); self.front_readiness.reset(); self.back_readiness.reset(); return ClientResult::CloseClient; } if let Some(&Some(RequestState::RequestWithBodyChunks(_,_,_,Chunk::Ended))) = self.state.as_ref().map(|s| &s.request) { self.front_readiness.interest.remove(Ready::readable()); } } self.back_readiness.interest.insert(Ready::writable()); ClientResult::Continue }, _ => { self.state = Some(parse_request_until_stop(unwrap_msg!(self.state.take()), &self.request_id, &mut self.front_buf.as_mut().unwrap(), &self.sticky_name)); if unwrap_msg!(self.state.as_ref()).is_front_error() { self.log_request_error(metrics, "front parsing error, closing the connection"); self.front_readiness.reset(); self.back_readiness.reset(); return ClientResult::CloseClient; } if let Some(&Some(RequestState::Request(_,_,_))) = self.state.as_ref().map(|s| &s.request) { self.front_readiness.interest.remove(Ready::readable()); } self.back_readiness.interest.insert(Ready::writable()); ClientResult::Continue } } } fn writable_default_answer(&mut self, metrics: &mut SessionMetrics) -> ClientResult { if let ClientStatus::DefaultAnswer(answer, ref buf, mut index) = self.status { let len = buf.len(); let mut sz = 0usize; let mut res = SocketResult::Continue; while res == SocketResult::Continue && index < len { let (current_sz, current_res) = self.frontend.socket_write(&buf[index..]); res = current_res; sz += current_sz; index += current_sz; } count!("bytes_out", sz as i64); metrics.bout += sz; if res != SocketResult::Continue { self.front_readiness.event.remove(Ready::writable()); } if index == len { metrics.service_stop(); self.log_default_answer_success(&metrics); self.front_readiness.reset(); self.back_readiness.reset(); return ClientResult::CloseClient; } if res == SocketResult::Error { self.front_readiness.reset(); self.back_readiness.reset(); metrics.service_stop(); self.log_request_error(metrics, "error writing default answer to front socket, closing"); return ClientResult::CloseClient; } else { return ClientResult::Continue; } } else { ClientResult::CloseClient } } // Forward content to client pub fn writable(&mut self, metrics: &mut SessionMetrics) -> ClientResult { //handle default answers if let ClientStatus::DefaultAnswer(_,_,_) = self.status { return self.writable_default_answer(metrics); } if self.back_buf.is_none() { error!("no back buffer to write on the front socket"); return ClientResult::CloseClient; } let output_size = self.back_buf.as_ref().unwrap().output_data_size(); if self.back_buf.as_ref().unwrap().output_data_size() == 0 || self.back_buf.as_ref().unwrap().next_output_data().len() == 0 { self.back_readiness.interest.insert(Ready::readable()); self.front_readiness.interest.remove(Ready::writable()); return ClientResult::Continue; } let mut sz = 0usize; let mut res = SocketResult::Continue; while res == SocketResult::Continue && self.back_buf.as_ref().unwrap().output_data_size() > 0 { // no more data in buffer, stop here if self.back_buf.as_ref().unwrap().next_output_data().len() == 0 { self.back_readiness.interest.insert(Ready::readable()); self.front_readiness.interest.remove(Ready::writable()); count!("bytes_out", sz as i64); metrics.bout += sz; return ClientResult::Continue; } let (current_sz, current_res) = self.frontend.socket_write(self.back_buf.as_ref().unwrap().next_output_data()); res = current_res; self.back_buf.as_mut().unwrap().consume_output_data(current_sz); sz += current_sz; } count!("bytes_out", sz as i64); metrics.bout += sz; if let Some((front,back)) = self.tokens() { debug!("{}\tFRONT [{}<-{}]: wrote {} bytes of {}, buffer position {} restart position {}", self.log_ctx, front.0, back.0, sz, output_size, self.back_buf.as_ref().unwrap().buffer_position, self.back_buf.as_ref().unwrap().start_parsing_position); } match res { SocketResult::Error | SocketResult::Closed => { metrics.service_stop(); self.log_request_error(metrics, "error writing to front socket, closing"); self.front_readiness.reset(); self.back_readiness.reset(); return ClientResult::CloseClient; }, SocketResult::WouldBlock => { self.front_readiness.event.remove(Ready::writable()); }, SocketResult::Continue => {}, } if !self.back_buf.as_ref().unwrap().can_restart_parsing() { self.back_readiness.interest.insert(Ready::readable()); return ClientResult::Continue; } //handle this case separately as its cumbersome to do from the pattern match if let Some(sz) = self.state.as_ref().and_then(|st| st.must_continue()) { self.front_readiness.interest.insert(Ready::readable()); self.front_readiness.interest.remove(Ready::writable()); if self.front_buf.is_some() { // we must now copy the body from front to back trace!("100-Continue => copying {} of body from front to back", sz); self.front_buf.as_mut().unwrap().slice_output(sz); self.front_buf.as_mut().unwrap().consume_parsed_data(sz); self.state.as_mut().map(|ref mut st| { st.response = Some(ResponseState::Initial); st.res_header_end = None; st.request.as_mut().map(|r| r.get_mut_connection().map(|conn| conn.continues = Continue::None)); }); return ClientResult::Continue; } else { error!("got 100 continue but front buffer was already removed"); return ClientResult::CloseClient; } } match unwrap_msg!(self.state.as_ref()).response { // FIXME: should only restart parsing if we are using keepalive Some(ResponseState::Response(_,_)) | Some(ResponseState::ResponseWithBody(_,_,_)) | Some(ResponseState::ResponseWithBodyChunks(_,_,Chunk::Ended)) => { let front_keep_alive = self.state.as_ref().map(|st| st.request.as_ref().map(|r| r.should_keep_alive()).unwrap_or(false)).unwrap_or(false); let back_keep_alive = self.state.as_ref().map(|st| st.response.as_ref().map(|r| r.should_keep_alive()).unwrap_or(false)).unwrap_or(false); save_http_status_metric(self.get_response_status()); self.log_request_success(&metrics); metrics.reset(); //FIXME: we could get smarter about this // with no keepalive on backend, we could open a new backend ConnectionError // with no keepalive on front but keepalive on backend, we could have // a pool of connections if front_keep_alive && back_keep_alive { debug!("{} keep alive front/back", self.log_ctx); self.reset(); self.front_readiness.interest = UnixReady::from(Ready::readable()) | UnixReady::hup() | UnixReady::error(); self.back_readiness.interest = UnixReady::hup() | UnixReady::error(); ClientResult::Continue //FIXME: issues reusing the backend socket //self.back_readiness.interest = UnixReady::hup() | UnixReady::error(); //ClientResult::CloseBackend } else if front_keep_alive && !back_keep_alive { debug!("{} keep alive front", self.log_ctx); self.reset(); self.front_readiness.interest = UnixReady::from(Ready::readable()) | UnixReady::hup() | UnixReady::error(); self.back_readiness.interest = UnixReady::hup() | UnixReady::error(); ClientResult::CloseBackend(self.backend_token.clone()) } else { debug!("{} no keep alive", self.log_ctx); self.front_readiness.reset(); self.back_readiness.reset(); ClientResult::CloseClient } }, // restart parsing, since there will be other chunks next Some(ResponseState::ResponseWithBodyChunks(_,_,_)) => { self.back_readiness.interest.insert(Ready::readable()); ClientResult::Continue }, //we're not done parsing the headers Some(ResponseState::HasStatusLine(_,_)) | Some(ResponseState::HasUpgrade(_,_,_)) | Some(ResponseState::HasLength(_,_,_)) => { self.back_readiness.interest.insert(Ready::readable()); ClientResult::Continue }, _ => { self.front_readiness.reset(); self.back_readiness.reset(); ClientResult::CloseClient } } } // Forward content to application pub fn back_writable(&mut self, metrics: &mut SessionMetrics) -> ClientResult { if let ClientStatus::DefaultAnswer(_,_,_) = self.status { error!("{}\tsending default answer, should not write to back", self.log_ctx); self.back_readiness.interest.remove(Ready::writable()); self.front_readiness.interest.insert(Ready::writable()); return ClientResult::Continue; } if self.front_buf.as_ref().unwrap().output_data_size() == 0 || self.front_buf.as_ref().unwrap().next_output_data().len() == 0 { self.front_readiness.interest.insert(Ready::readable()); self.back_readiness.interest.remove(Ready::writable()); return ClientResult::Continue; } let tokens = self.tokens().clone(); let output_size = self.front_buf.as_ref().unwrap().output_data_size(); if self.backend.is_none() { metrics.service_stop(); self.log_request_error(metrics, "back socket not found, closing connection"); self.front_readiness.reset(); self.back_readiness.reset(); return ClientResult::CloseClient; } let mut sz = 0usize; let mut socket_res = SocketResult::Continue; { let sock = unwrap_msg!(self.backend.as_mut()); while socket_res == SocketResult::Continue && self.front_buf.as_ref().unwrap().output_data_size() > 0 { // no more data in buffer, stop here if self.front_buf.as_ref().unwrap().next_output_data().len() == 0 { self.front_readiness.interest.insert(Ready::readable()); self.back_readiness.interest.remove(Ready::writable()); metrics.backend_bout += sz; return ClientResult::Continue; } let (current_sz, current_res) = sock.socket_write(self.front_buf.as_ref().unwrap().next_output_data()); socket_res = current_res; self.front_buf.as_mut().unwrap().consume_output_data(current_sz); sz += current_sz; } } metrics.backend_bout += sz; if let Some((front,back)) = tokens { debug!("{}\tBACK [{}->{}]: wrote {} bytes of {}", self.log_ctx, front.0, back.0, sz, output_size); } match socket_res { SocketResult::Error | SocketResult::Closed => { metrics.service_stop(); self.log_request_error(metrics, "back socket write error, closing connection"); self.front_readiness.reset(); self.back_readiness.reset(); return ClientResult::CloseClient; }, SocketResult::WouldBlock => { self.back_readiness.event.remove(Ready::writable()); }, SocketResult::Continue => {} } // FIXME/ should read exactly as much data as needed if self.front_buf.as_ref().unwrap().can_restart_parsing() { match unwrap_msg!(self.state.as_ref()).request { // the entire request was transmitted Some(RequestState::Request(_,_,_)) | Some(RequestState::RequestWithBody(_,_,_,_)) | Some(RequestState::RequestWithBodyChunks(_,_,_,Chunk::Ended)) => { // return the buffer to the pool // if there's still data in there, keep it for pipelining if self.state.as_ref().map(|st| st.must_continue()).is_none() { if self.front_buf.as_ref().map(|buf| buf.empty()) == Some(true) { self.front_buf = None; } } self.front_readiness.interest.remove(Ready::readable()); self.back_readiness.interest.insert(Ready::readable()); self.back_readiness.interest.remove(Ready::writable()); ClientResult::Continue }, Some(RequestState::RequestWithBodyChunks(_,_,_,Chunk::Initial)) => { if self.state.as_ref().map(|st| st.must_continue()).is_none() { self.front_readiness.interest.insert(Ready::readable()); ClientResult::Continue } else { // wait for the 100 continue response from the backend // keep the front buffer self.front_readiness.interest.remove(Ready::readable()); self.back_readiness.interest.insert(Ready::readable()); self.back_readiness.interest.remove(Ready::writable()); ClientResult::Continue } } Some(RequestState::RequestWithBodyChunks(_,_,_,_)) => { self.front_readiness.interest.insert(Ready::readable()); ClientResult::Continue }, //we're not done parsing the headers Some(RequestState::HasRequestLine(_,_)) | Some(RequestState::HasHost(_,_,_)) | Some(RequestState::HasLength(_,_,_)) | Some(RequestState::HasHostAndLength(_,_,_,_)) => { self.front_readiness.interest.insert(Ready::readable()); ClientResult::Continue }, ref s => { metrics.service_stop(); self.log_request_error(metrics, "invalid state, closing connection"); self.front_readiness.reset(); self.back_readiness.reset(); ClientResult::CloseClient } } } else { self.front_readiness.interest.insert(Ready::readable()); self.back_readiness.interest.insert(Ready::writable()); ClientResult::Continue } } // Read content from application pub fn back_readable(&mut self, metrics: &mut SessionMetrics) -> (ProtocolResult, ClientResult) { if let ClientStatus::DefaultAnswer(_,_,_) = self.status { error!("{}\tsending default answer, should not read from back socket", self.log_ctx); self.back_readiness.interest.remove(Ready::readable()); return (ProtocolResult::Continue, ClientResult::Continue); } if self.back_buf.is_none() { if let Some(p) = self.pool.upgrade() { if let Some(buf) = p.borrow_mut().checkout() { self.back_buf = Some(buf); } else { error!("cannot get back buffer from pool, closing"); return (ProtocolResult::Continue, ClientResult::CloseClient); } } } if self.back_buf.as_ref().unwrap().buffer.available_space() == 0 { self.back_readiness.interest.remove(Ready::readable()); return (ProtocolResult::Continue, ClientResult::Continue); } let tokens = self.tokens().clone(); if self.backend.is_none() { metrics.service_stop(); self.log_request_error(metrics, "back socket not found, closing connection"); self.front_readiness.reset(); self.back_readiness.reset(); return (ProtocolResult::Continue, ClientResult::CloseClient); } let (sz, r) = { let sock = unwrap_msg!(self.backend.as_mut()); sock.socket_read(&mut self.back_buf.as_mut().unwrap().buffer.space()) }; self.back_buf.as_mut().unwrap().buffer.fill(sz); self.back_buf.as_mut().unwrap().sliced_input(sz); metrics.backend_bin += sz; if let Some((front,back)) = tokens { debug!("{}\tBACK [{}<-{}]: read {} bytes", self.log_ctx, front.0, back.0, sz); } if r != SocketResult::Continue || sz == 0 { self.back_readiness.event.remove(Ready::readable()); } if r == SocketResult::Error { metrics.service_stop(); self.log_request_error(metrics, "back socket read error, closing connection"); self.front_readiness.reset(); self.back_readiness.reset(); return (ProtocolResult::Continue, ClientResult::CloseClient); } // isolate that here because the "ref protocol" and the self.state = " make borrowing conflicts if let Some(&Some(ResponseState::ResponseUpgrade(_,_, ref protocol))) = self.state.as_ref().map(|s| &s.response) { debug!("got an upgrade state[{}]: {:?}", line!(), protocol); if protocol == "websocket" { return (ProtocolResult::Upgrade, ClientResult::Continue); } else { //FIXME: should we upgrade to a pipe or send an error? return (ProtocolResult::Continue, ClientResult::Continue); } } match unwrap_msg!(self.state.as_ref()).response { Some(ResponseState::Response(_,_)) => { metrics.service_stop(); self.log_request_error(metrics, "should not go back in back_readable if the whole response was parsed"); self.front_readiness.reset(); self.back_readiness.reset(); (ProtocolResult::Continue, ClientResult::CloseClient) }, Some(ResponseState::ResponseWithBody(_,_,_)) => { self.front_readiness.interest.insert(Ready::writable()); if ! self.back_buf.as_ref().unwrap().needs_input() { self.back_readiness.interest.remove(Ready::readable()); } (ProtocolResult::Continue, ClientResult::Continue) }, Some(ResponseState::ResponseWithBodyChunks(_,_,Chunk::Ended)) => { use nom::HexDisplay; metrics.service_stop(); error!("{}\tback read should have stopped on chunk ended\nstate: {:?}\ndata:{}", self.log_ctx, self.state, self.back_buf.as_ref().unwrap().unparsed_data().to_hex(16)); self.log_request_error(metrics, "back read should have stopped on chunk ended"); self.front_readiness.reset(); self.back_readiness.reset(); (ProtocolResult::Continue, ClientResult::CloseClient) }, Some(ResponseState::ResponseWithBodyChunks(_,_,Chunk::Error)) => { metrics.service_stop(); self.log_request_error(metrics, "back read should have stopped on chunk error"); self.front_readiness.reset(); self.back_readiness.reset(); (ProtocolResult::Continue, ClientResult::CloseClient) }, Some(ResponseState::ResponseWithBodyChunks(_,_,_)) => { if ! self.back_buf.as_ref().unwrap().needs_input() { self.state = Some(parse_response_until_stop(unwrap_msg!(self.state.take()), &self.request_id, &mut self.back_buf.as_mut().unwrap(), &self.sticky_name, self.sticky_session.take())); if unwrap_msg!(self.state.as_ref()).is_back_error() { metrics.service_stop(); self.log_request_error(metrics, "back socket chunk parse error, closing connection"); self.front_readiness.reset(); self.back_readiness.reset(); return (ProtocolResult::Continue, ClientResult::CloseClient); } if let Some(&Some(ResponseState::ResponseWithBodyChunks(_,_,Chunk::Ended))) = self.state.as_ref().map(|s| &s.response) { self.back_readiness.interest.remove(Ready::readable()); } } self.front_readiness.interest.insert(Ready::writable()); (ProtocolResult::Continue, ClientResult::Continue) }, Some(ResponseState::Error(_,_,_,_,_)) => panic!("{}\tback read should have stopped on responsestate error", self.log_ctx), _ => { self.state = Some(parse_response_until_stop(unwrap_msg!(self.state.take()), &self.request_id, &mut self.back_buf.as_mut().unwrap(), &self.sticky_name, self.sticky_session.take())); if unwrap_msg!(self.state.as_ref()).is_back_error() { metrics.service_stop(); self.log_request_error(metrics, "back socket parse error, closing connection"); self.front_readiness.reset(); self.back_readiness.reset(); return (ProtocolResult::Continue, ClientResult::CloseClient); } if let Some(ResponseState::Response(_,_)) = unwrap_msg!(self.state.as_ref()).response { self.back_readiness.interest.remove(Ready::readable()); } if let Some(&Some(ResponseState::ResponseUpgrade(_,_, ref protocol))) = self.state.as_ref().map(|s| &s.response) { debug!("got an upgrade state[{}]: {:?}", line!(), protocol); if protocol == "websocket" { return (ProtocolResult::Upgrade, ClientResult::Continue); } else { //FIXME: should we upgrade to a pipe or send an error? return (ProtocolResult::Continue, ClientResult::Continue); } } self.front_readiness.interest.insert(Ready::writable()); (ProtocolResult::Continue, ClientResult::Continue) } } } } /// Save the backend http response status code metric fn save_http_status_metric(rs_status_line : Option<RStatusLine>) { if let Some(rs_status_line) = rs_status_line { match rs_status_line.status { 100...199 => { incr!("http.status.1xx"); }, 200...299 => { incr!("http.status.2xx"); }, 300...399 => { incr!("http.status.3xx"); }, 400...499 => { incr!("http.status.4xx"); }, 500...599 => { incr!("http.status.5xx"); }, _ => { incr!("http.status.other"); }, // http responses with other codes (protocol error) } } } do not show the back read chunk error when there's no data we can get in a case where the last chunk was read, but we still see the back socket as readable (because we read it entirely but did not call read again to get a WouldBlock error), so we get into the back_readable method again, but we read 0 bytes (the error message was "back read should have stopped on chunk ended"). We'll still leave the debug message for now, to see if there's a case where there was actually some lingering data use std::cmp::min; use std::io::Write; use std::rc::{Rc,Weak}; use std::cell::RefCell; use std::net::{SocketAddr,IpAddr}; use mio::*; use mio::unix::UnixReady; use mio::tcp::TcpStream; use time::{Duration, precise_time_s, precise_time_ns}; use uuid::Uuid; use parser::http11::{HttpState,parse_request_until_stop, parse_response_until_stop, BufferMove, RequestState, ResponseState, Chunk, Continue, RRequestLine, RStatusLine}; use network::{ClientResult,Protocol,Readiness,SessionMetrics, LogDuration}; use network::buffer_queue::BufferQueue; use network::socket::{SocketHandler,SocketResult}; use network::protocol::ProtocolResult; use network::pool::{Pool,Checkout}; use pool_crate::Reset; use util::UnwrapLog; #[derive(Clone)] pub struct StickySession { pub sticky_id: String } impl StickySession { pub fn new(backend_id: String) -> StickySession { StickySession { sticky_id: backend_id } } } type BackendToken = Token; #[derive(Debug,Clone,PartialEq)] pub enum ClientStatus { Normal, /// status, HTTP answer, index in HTTP answer DefaultAnswer(DefaultAnswerStatus, Rc<Vec<u8>>, usize), } #[derive(Debug,Clone,Copy,PartialEq)] pub enum DefaultAnswerStatus { Answer301, Answer400, Answer404, Answer503, Answer413, } pub struct Http<Front:SocketHandler> { pub frontend: Front, pub backend: Option<TcpStream>, frontend_token: Token, backend_token: Option<Token>, pub status: ClientStatus, pub state: Option<HttpState>, pub front_buf: Option<Checkout<BufferQueue>>, pub back_buf: Option<Checkout<BufferQueue>>, pub app_id: Option<String>, pub request_id: String, pub front_readiness:Readiness, pub back_readiness: Readiness, pub log_ctx: String, pub public_address: Option<IpAddr>, pub client_address: Option<SocketAddr>, pub sticky_name: String, pub sticky_session: Option<StickySession>, pub protocol: Protocol, pool: Weak<RefCell<Pool<BufferQueue>>>, } impl<Front:SocketHandler> Http<Front> { pub fn new(sock: Front, token: Token, pool: Weak<RefCell<Pool<BufferQueue>>>, public_address: Option<IpAddr>, client_address: Option<SocketAddr>, sticky_name: String, protocol: Protocol) -> Option<Http<Front>> { let request_id = Uuid::new_v4().hyphenated().to_string(); let log_ctx = format!("{} unknown\t", &request_id); let mut client = Http { frontend: sock, backend: None, frontend_token: token, backend_token: None, status: ClientStatus::Normal, state: Some(HttpState::new()), front_buf: None, back_buf: None, app_id: None, request_id: request_id, front_readiness: Readiness::new(), back_readiness: Readiness::new(), log_ctx: log_ctx, public_address: public_address, client_address: client_address, sticky_name: sticky_name, sticky_session: None, protocol: protocol, pool, }; let req_header = client.added_request_header(public_address, client_address); let res_header = client.added_response_header(); client.state.as_mut().map(|ref mut state| state.added_req_header = req_header); client.state.as_mut().map(|ref mut state| state.added_res_header = res_header); Some(client) } pub fn reset(&mut self) { let request_id = Uuid::new_v4().hyphenated().to_string(); //info!("{} RESET TO {}", self.log_ctx, request_id); gauge_add!("http.active_requests", -1); self.state.as_mut().map(|state| state.reset()); let req_header = self.added_request_header(self.public_address, self.client_address); let res_header = self.added_response_header(); self.state.as_mut().map(|ref mut state| state.added_req_header = req_header); self.state.as_mut().map(|ref mut state| state.added_res_header = res_header); // if HTTP requests are pipelined, we might still have some data in the front buffer if self.front_buf.as_ref().map(|buf| !buf.empty()).unwrap_or(false) { self.front_readiness.event.insert(Ready::readable()); } else { self.front_buf = None; } self.back_buf = None; self.request_id = request_id; self.log_ctx = format!("{} {}\t", self.request_id, self.app_id.as_ref().unwrap_or(&String::from("unknown"))); } fn tokens(&self) -> Option<(Token,Token)> { if let Some(back) = self.backend_token { return Some((self.frontend_token, back)) } None } pub fn state(&mut self) -> &mut HttpState { unwrap_msg!(self.state.as_mut()) } pub fn set_state(&mut self, state: HttpState) { self.state = Some(state); } pub fn set_answer(&mut self, answer: DefaultAnswerStatus, buf: Rc<Vec<u8>>) { self.front_buf = None; self.back_buf = None; self.status = ClientStatus::DefaultAnswer(answer, buf, 0); self.front_readiness.interest = UnixReady::from(Ready::writable()) | UnixReady::hup() | UnixReady::error(); self.back_readiness.interest = UnixReady::hup() | UnixReady::error(); } pub fn added_request_header(&self, public_address: Option<IpAddr>, client_address: Option<SocketAddr>) -> String { let peer = client_address.or(self.front_socket().peer_addr().ok()).map(|addr| (addr.ip(), addr.port())); let front = public_address.or(self.front_socket().local_addr().map(|addr| addr.ip()).ok()); let client_port = self.front_socket().local_addr().map(|addr| addr.port()).ok(); if let (Some((peer_ip, peer_port)), Some(front), Some(client_port)) = (peer, front, client_port) { let proto = match self.protocol() { Protocol::HTTP => "http", Protocol::HTTPS => "https", _ => unreachable!() }; //FIXME: in the "for", we don't put the other values we could get from a preexisting forward header match (peer_ip, peer_port, front) { (IpAddr::V4(p), peer_port, IpAddr::V4(f)) => { format!("Forwarded: proto={};for={}:{};by={}\r\nX-Forwarded-Proto: {}\r\nX-Forwarded-For: {}\r\n\ X-Forwarded-Port: {}\r\nSozu-Id: {}\r\n", proto, peer_ip, peer_port, front, proto, peer_ip, client_port, self.request_id) }, (IpAddr::V4(p), peer_port, IpAddr::V6(f)) => { format!("Forwarded: proto={};for={}:{};by=\"{}\"\r\nX-Forwarded-Proto: {}\r\nX-Forwarded-For: {}\r\n\ X-Forwarded-Port: {}\r\nSozu-Id: {}\r\n", proto, peer_ip, peer_port, front, proto, peer_ip, client_port, self.request_id) }, (IpAddr::V6(p), peer_port, IpAddr::V4(f)) => { format!("Forwarded: proto={};for=\"{}:{}\";by={}\r\nX-Forwarded-Proto: {}\r\nX-Forwarded-For: {}\r\n\ X-Forwarded-Port: {}\r\nSozu-Id: {}\r\n", proto, peer_ip, peer_port, front, proto, peer_ip, client_port, self.request_id) }, (IpAddr::V6(p), peer_port, IpAddr::V6(f)) => { format!("Forwarded: proto={};for=\"{}:{}\";by=\"{}\"\r\nX-Forwarded-Proto: {}\r\nX-Forwarded-For: {}\r\n\ X-Forwarded-Port: {}\r\nSozu-Id: {}\r\n", proto, peer_ip, peer_port, front, proto, peer_ip, client_port, self.request_id) }, } } else { format!("Sozu-Id: {}\r\n", self.request_id) } } pub fn added_response_header(&self) -> String { format!("Sozu-Id: {}\r\n", self.request_id) } pub fn front_socket(&self) -> &TcpStream { self.frontend.socket_ref() } pub fn back_socket(&self) -> Option<&TcpStream> { self.backend.as_ref() } pub fn back_token(&self) -> Option<Token> { self.backend_token } pub fn close(&mut self) { } pub fn log_context(&self) -> String { if let Some(ref app_id) = self.app_id { format!("{}\t{}\t", self.request_id, app_id) } else { format!("{}\tunknown\t", self.request_id) } } pub fn set_back_socket(&mut self, socket: TcpStream) { self.backend = Some(socket); } pub fn set_app_id(&mut self, app_id: String) { self.log_ctx = format!("{} {}\t", self.request_id, &app_id); self.app_id = Some(app_id); } pub fn set_back_token(&mut self, token: Token) { self.backend_token = Some(token); } pub fn front_readiness(&mut self) -> &mut Readiness { &mut self.front_readiness } pub fn back_readiness(&mut self) -> &mut Readiness { &mut self.back_readiness } fn protocol(&self) -> Protocol { self.protocol } pub fn remove_backend(&mut self) -> (Option<String>, Option<SocketAddr>) { debug!("{}\tPROXY [{} -> {}] CLOSED BACKEND", self.log_ctx, self.frontend_token.0, self.backend_token.map(|t| format!("{}", t.0)).unwrap_or("-".to_string())); let addr:Option<SocketAddr> = self.backend.as_ref().and_then(|sock| sock.peer_addr().ok()); self.backend = None; self.backend_token = None; (self.app_id.clone(), addr) } pub fn front_hup(&mut self) -> ClientResult { ClientResult::CloseClient } pub fn back_hup(&mut self) -> ClientResult { if let Some(ref mut buf) = self.back_buf { //FIXME: closing the client might not be a good idea if we do keep alive on the front here? if buf.output_data_size() == 0 || buf.next_output_data().len() == 0 { if self.back_readiness.event.is_readable() { self.back_readiness.interest.insert(Ready::readable()); ClientResult::Continue } else { ClientResult::CloseClient } } else { self.front_readiness.interest.insert(Ready::writable()); if self.back_readiness.event.is_readable() { self.back_readiness.interest.insert(Ready::readable()); } ClientResult::Continue } } else { ClientResult::CloseClient } } /// Retrieve the response status from the http response state pub fn get_response_status(&self) -> Option<RStatusLine> { if let Some(state) = self.state.as_ref() { state.get_status_line() } else { None } } pub fn get_host(&self) -> Option<String> { if let Some(state) = self.state.as_ref() { state.get_host() } else { None } } pub fn get_request_line(&self) -> Option<RRequestLine> { if let Some(state) = self.state.as_ref() { state.get_request_line() } else { None } } pub fn get_client_address(&self) -> Option<SocketAddr> { self.client_address.or(self.frontend.socket_ref().peer_addr().ok()) } pub fn get_backend_address(&self) -> Option<SocketAddr> { self.backend.as_ref().and_then(|backend| backend.peer_addr().ok()) } pub fn log_request_success(&self, metrics: &SessionMetrics) { let client = match self.get_client_address() { None => String::from("-"), Some(SocketAddr::V4(addr)) => format!("{}", addr), Some(SocketAddr::V6(addr)) => format!("{}", addr), }; let backend = match self.get_backend_address() { None => String::from("-"), Some(SocketAddr::V4(addr)) => format!("{}", addr), Some(SocketAddr::V6(addr)) => format!("{}", addr), }; let host = self.get_host().unwrap_or(String::from("-")); let request_line = self.get_request_line().map(|line| format!("{} {}", line.method, line.uri)).unwrap_or(String::from("-")); let status_line = self.get_response_status().map(|line| format!("{} {}", line.status, line.reason)).unwrap_or(String::from("-")); let response_time = metrics.response_time(); let service_time = metrics.service_time(); let app_id = self.app_id.clone().unwrap_or(String::from("-")); time!("request_time", &app_id, response_time.num_milliseconds()); if let Some(backend_id) = metrics.backend_id.as_ref() { if let Some(backend_response_time) = metrics.backend_response_time() { record_backend_metrics!(app_id, backend_id, backend_response_time.num_milliseconds(), metrics.backend_bin, metrics.backend_bout); } } info_access!("{}{} -> {}\t{} {} {} {}\t{} {} {}", self.log_ctx, client, backend, LogDuration(response_time), LogDuration(service_time), metrics.bin, metrics.bout, status_line, host, request_line); } pub fn log_default_answer_success(&self, metrics: &SessionMetrics) { let client = match self.get_client_address() { None => String::from("-"), Some(SocketAddr::V4(addr)) => format!("{}", addr), Some(SocketAddr::V6(addr)) => format!("{}", addr), }; let status_line = match self.status { ClientStatus::Normal => "-", ClientStatus::DefaultAnswer(DefaultAnswerStatus::Answer301, _, _) => "301 Moved Permanently", ClientStatus::DefaultAnswer(DefaultAnswerStatus::Answer400, _, _) => "400 Bad Request", ClientStatus::DefaultAnswer(DefaultAnswerStatus::Answer404, _, _) => "404 Not Found", ClientStatus::DefaultAnswer(DefaultAnswerStatus::Answer503, _, _) => "503 Service Unavailable", ClientStatus::DefaultAnswer(DefaultAnswerStatus::Answer413, _, _) => "413 Payload Too Large", }; let host = self.get_host().unwrap_or(String::from("-")); let request_line = self.get_request_line().map(|line| format!("{} {}", line.method, line.uri)).unwrap_or(String::from("-")); let response_time = metrics.response_time(); let service_time = metrics.service_time(); if let Some(ref app_id) = self.app_id { time!("http.request.time", &app_id, response_time.num_milliseconds()); } incr!("http.errors"); info_access!("{}{} -> X\t{} {} {} {}\t{} {} {}", self.log_ctx, client, LogDuration(response_time), LogDuration(service_time), metrics.bin, metrics.bout, status_line, host, request_line); } pub fn log_request_error(&self, metrics: &SessionMetrics, message: &str) { let client = match self.get_client_address() { None => String::from("-"), Some(SocketAddr::V4(addr)) => format!("{}", addr), Some(SocketAddr::V6(addr)) => format!("{}", addr), }; let backend = match self.get_backend_address() { None => String::from("-"), Some(SocketAddr::V4(addr)) => format!("{}", addr), Some(SocketAddr::V6(addr)) => format!("{}", addr), }; let host = self.get_host().unwrap_or(String::from("-")); let request_line = self.get_request_line().map(|line| format!("{} {}", line.method, line.uri)).unwrap_or(String::from("-")); let status_line = self.get_response_status().map(|line| format!("{} {}", line.status, line.reason)).unwrap_or(String::from("-")); let response_time = metrics.response_time(); let service_time = metrics.service_time(); let app_id = self.app_id.clone().unwrap_or(String::from("-")); incr!("http.errors"); /*time!("request_time", &app_id, response_time); if let Some(backend_id) = metrics.backend_id.as_ref() { if let Some(backend_response_time) = metrics.backend_response_time() { record_backend_metrics!(app_id, backend_id, backend_response_time.num_milliseconds(), metrics.backend_bin, metrics.backend_bout); } }*/ error_access!("{}{} -> {}\t{} {} {} {}\t{} {} {} | {}", self.log_ctx, client, backend, LogDuration(response_time), LogDuration(service_time), metrics.bin, metrics.bout, status_line, host, request_line, message); } // Read content from the client pub fn readable(&mut self, metrics: &mut SessionMetrics) -> ClientResult { if let ClientStatus::DefaultAnswer(_,_,_) = self.status { self.front_readiness.interest.insert(Ready::writable()); self.back_readiness.interest.remove(Ready::readable()); self.back_readiness.interest.remove(Ready::writable()); return ClientResult::Continue; } assert!(!unwrap_msg!(self.state.as_ref()).is_front_error()); if self.front_buf.is_none() { if let Some(p) = self.pool.upgrade() { if let Some(buf) = p.borrow_mut().checkout() { self.front_buf = Some(buf); } else { error!("cannot get front buffer from pool, closing"); return ClientResult::CloseClient; } } } if self.front_buf.as_ref().unwrap().buffer.available_space() == 0 { if self.backend_token == None { // We don't have a backend to empty the buffer into, close the connection metrics.service_stop(); self.log_request_error(metrics, "front buffer full, no backend, closing connection"); let answer_413 = "HTTP/1.1 413 Payload Too Large\r\nContent-Length: 0\r\n\r\n"; self.set_answer(DefaultAnswerStatus::Answer413, Rc::new(Vec::from(answer_413.as_bytes()))); self.front_readiness.interest.remove(Ready::readable()); self.front_readiness.interest.insert(Ready::writable()); } else { self.front_readiness.interest.remove(Ready::readable()); self.back_readiness.interest.insert(Ready::writable()); } return ClientResult::Continue; } let (sz, res) = self.frontend.socket_read(self.front_buf.as_mut().unwrap().buffer.space()); debug!("{}\tFRONT: read {} bytes", self.log_ctx, sz); if sz > 0 { self.front_buf.as_mut().unwrap().buffer.fill(sz); self.front_buf.as_mut().unwrap().sliced_input(sz); count!("bytes_in", sz as i64); metrics.bin += sz; if self.front_buf.as_ref().unwrap().start_parsing_position > self.front_buf.as_ref().unwrap().parsed_position { let to_consume = min(self.front_buf.as_ref().unwrap().input_data_size(), self.front_buf.as_ref().unwrap().start_parsing_position - self.front_buf.as_ref().unwrap().parsed_position); self.front_buf.as_mut().unwrap().consume_parsed_data(to_consume); } if self.front_buf.as_ref().unwrap().buffer.available_space() == 0 { self.front_readiness.interest.remove(Ready::readable()); } } else { self.front_readiness.event.remove(Ready::readable()); } match res { SocketResult::Error => { self.log_request_error(metrics, &format!("front socket error, closing the connection. Readiness: {:?} -> {:?}", self.front_readiness, self.back_readiness)); metrics.service_stop(); self.front_readiness.reset(); self.back_readiness.reset(); return ClientResult::CloseClient; }, SocketResult::Closed => { //we were in keep alive but the peer closed the connection //FIXME: what happens if the connection was just opened but no data came? if unwrap_msg!(self.state.as_ref()).request == Some(RequestState::Initial) { metrics.service_stop(); self.front_readiness.reset(); self.back_readiness.reset(); return ClientResult::CloseClient; } else { self.log_request_error(metrics, &format!("front socket error, closing the connection. Readiness: {:?} -> {:?}", self.front_readiness, self.back_readiness)); metrics.service_stop(); self.front_readiness.reset(); self.back_readiness.reset(); return ClientResult::CloseClient; } }, SocketResult::WouldBlock => { self.front_readiness.event.remove(Ready::readable()); }, SocketResult::Continue => {} }; self.readable_parse(metrics) } pub fn readable_parse(&mut self, metrics: &mut SessionMetrics) -> ClientResult { let is_initial = unwrap_msg!(self.state.as_ref()).request == Some(RequestState::Initial); // if there's no host, continue parsing until we find it let has_host = unwrap_msg!(self.state.as_ref()).has_host(); if !has_host { self.state = Some(parse_request_until_stop(unwrap_msg!(self.state.take()), &self.request_id, &mut self.front_buf.as_mut().unwrap(), &self.sticky_name)); if unwrap_msg!(self.state.as_ref()).is_front_error() { self.log_request_error(metrics, "front parsing error, closing the connection"); metrics.service_stop(); incr!("http.front_parse_errors"); // increment active requests here because it will be decremented right away // when closing the connection. It's slightly easier than decrementing it // at every place we return ClientResult::CloseClient gauge_add!("http.active_requests", 1); self.front_readiness.interest.remove(Ready::readable()); return ClientResult::CloseClient; } let is_now_initial = unwrap_msg!(self.state.as_ref()).request == Some(RequestState::Initial); if is_initial && !is_now_initial { gauge_add!("http.active_requests", 1); incr!("http.requests"); } if unwrap_msg!(self.state.as_ref()).has_host() { self.back_readiness.interest.insert(Ready::writable()); return ClientResult::ConnectBackend; } else { self.front_readiness.interest.insert(Ready::readable()); return ClientResult::Continue; } } self.back_readiness.interest.insert(Ready::writable()); match unwrap_msg!(self.state.as_ref()).request { Some(RequestState::Request(_,_,_)) | Some(RequestState::RequestWithBody(_,_,_,_)) => { if ! self.front_buf.as_ref().unwrap().needs_input() { // stop reading self.front_readiness.interest.remove(Ready::readable()); } ClientResult::Continue }, Some(RequestState::RequestWithBodyChunks(_,_,_,Chunk::Ended)) => { error!("{}\tfront read should have stopped on chunk ended", self.log_ctx); self.front_readiness.interest.remove(Ready::readable()); ClientResult::Continue }, Some(RequestState::RequestWithBodyChunks(_,_,_,Chunk::Error)) => { self.log_request_error(metrics, "front read should have stopped on chunk error"); metrics.service_stop(); self.front_readiness.reset(); self.back_readiness.reset(); ClientResult::CloseClient }, Some(RequestState::RequestWithBodyChunks(_,_,_,_)) => { if ! self.front_buf.as_ref().unwrap().needs_input() { self.state = Some(parse_request_until_stop(unwrap_msg!(self.state.take()), &self.request_id, &mut self.front_buf.as_mut().unwrap(), &self.sticky_name)); if unwrap_msg!(self.state.as_ref()).is_front_error() { self.log_request_error(metrics, "front chunk parsing error, closing the connection"); self.front_readiness.reset(); self.back_readiness.reset(); return ClientResult::CloseClient; } if let Some(&Some(RequestState::RequestWithBodyChunks(_,_,_,Chunk::Ended))) = self.state.as_ref().map(|s| &s.request) { self.front_readiness.interest.remove(Ready::readable()); } } self.back_readiness.interest.insert(Ready::writable()); ClientResult::Continue }, _ => { self.state = Some(parse_request_until_stop(unwrap_msg!(self.state.take()), &self.request_id, &mut self.front_buf.as_mut().unwrap(), &self.sticky_name)); if unwrap_msg!(self.state.as_ref()).is_front_error() { self.log_request_error(metrics, "front parsing error, closing the connection"); self.front_readiness.reset(); self.back_readiness.reset(); return ClientResult::CloseClient; } if let Some(&Some(RequestState::Request(_,_,_))) = self.state.as_ref().map(|s| &s.request) { self.front_readiness.interest.remove(Ready::readable()); } self.back_readiness.interest.insert(Ready::writable()); ClientResult::Continue } } } fn writable_default_answer(&mut self, metrics: &mut SessionMetrics) -> ClientResult { if let ClientStatus::DefaultAnswer(answer, ref buf, mut index) = self.status { let len = buf.len(); let mut sz = 0usize; let mut res = SocketResult::Continue; while res == SocketResult::Continue && index < len { let (current_sz, current_res) = self.frontend.socket_write(&buf[index..]); res = current_res; sz += current_sz; index += current_sz; } count!("bytes_out", sz as i64); metrics.bout += sz; if res != SocketResult::Continue { self.front_readiness.event.remove(Ready::writable()); } if index == len { metrics.service_stop(); self.log_default_answer_success(&metrics); self.front_readiness.reset(); self.back_readiness.reset(); return ClientResult::CloseClient; } if res == SocketResult::Error { self.front_readiness.reset(); self.back_readiness.reset(); metrics.service_stop(); self.log_request_error(metrics, "error writing default answer to front socket, closing"); return ClientResult::CloseClient; } else { return ClientResult::Continue; } } else { ClientResult::CloseClient } } // Forward content to client pub fn writable(&mut self, metrics: &mut SessionMetrics) -> ClientResult { //handle default answers if let ClientStatus::DefaultAnswer(_,_,_) = self.status { return self.writable_default_answer(metrics); } if self.back_buf.is_none() { error!("no back buffer to write on the front socket"); return ClientResult::CloseClient; } let output_size = self.back_buf.as_ref().unwrap().output_data_size(); if self.back_buf.as_ref().unwrap().output_data_size() == 0 || self.back_buf.as_ref().unwrap().next_output_data().len() == 0 { self.back_readiness.interest.insert(Ready::readable()); self.front_readiness.interest.remove(Ready::writable()); return ClientResult::Continue; } let mut sz = 0usize; let mut res = SocketResult::Continue; while res == SocketResult::Continue && self.back_buf.as_ref().unwrap().output_data_size() > 0 { // no more data in buffer, stop here if self.back_buf.as_ref().unwrap().next_output_data().len() == 0 { self.back_readiness.interest.insert(Ready::readable()); self.front_readiness.interest.remove(Ready::writable()); count!("bytes_out", sz as i64); metrics.bout += sz; return ClientResult::Continue; } let (current_sz, current_res) = self.frontend.socket_write(self.back_buf.as_ref().unwrap().next_output_data()); res = current_res; self.back_buf.as_mut().unwrap().consume_output_data(current_sz); sz += current_sz; } count!("bytes_out", sz as i64); metrics.bout += sz; if let Some((front,back)) = self.tokens() { debug!("{}\tFRONT [{}<-{}]: wrote {} bytes of {}, buffer position {} restart position {}", self.log_ctx, front.0, back.0, sz, output_size, self.back_buf.as_ref().unwrap().buffer_position, self.back_buf.as_ref().unwrap().start_parsing_position); } match res { SocketResult::Error | SocketResult::Closed => { metrics.service_stop(); self.log_request_error(metrics, "error writing to front socket, closing"); self.front_readiness.reset(); self.back_readiness.reset(); return ClientResult::CloseClient; }, SocketResult::WouldBlock => { self.front_readiness.event.remove(Ready::writable()); }, SocketResult::Continue => {}, } if !self.back_buf.as_ref().unwrap().can_restart_parsing() { self.back_readiness.interest.insert(Ready::readable()); return ClientResult::Continue; } //handle this case separately as its cumbersome to do from the pattern match if let Some(sz) = self.state.as_ref().and_then(|st| st.must_continue()) { self.front_readiness.interest.insert(Ready::readable()); self.front_readiness.interest.remove(Ready::writable()); if self.front_buf.is_some() { // we must now copy the body from front to back trace!("100-Continue => copying {} of body from front to back", sz); self.front_buf.as_mut().unwrap().slice_output(sz); self.front_buf.as_mut().unwrap().consume_parsed_data(sz); self.state.as_mut().map(|ref mut st| { st.response = Some(ResponseState::Initial); st.res_header_end = None; st.request.as_mut().map(|r| r.get_mut_connection().map(|conn| conn.continues = Continue::None)); }); return ClientResult::Continue; } else { error!("got 100 continue but front buffer was already removed"); return ClientResult::CloseClient; } } match unwrap_msg!(self.state.as_ref()).response { // FIXME: should only restart parsing if we are using keepalive Some(ResponseState::Response(_,_)) | Some(ResponseState::ResponseWithBody(_,_,_)) | Some(ResponseState::ResponseWithBodyChunks(_,_,Chunk::Ended)) => { let front_keep_alive = self.state.as_ref().map(|st| st.request.as_ref().map(|r| r.should_keep_alive()).unwrap_or(false)).unwrap_or(false); let back_keep_alive = self.state.as_ref().map(|st| st.response.as_ref().map(|r| r.should_keep_alive()).unwrap_or(false)).unwrap_or(false); save_http_status_metric(self.get_response_status()); self.log_request_success(&metrics); metrics.reset(); //FIXME: we could get smarter about this // with no keepalive on backend, we could open a new backend ConnectionError // with no keepalive on front but keepalive on backend, we could have // a pool of connections if front_keep_alive && back_keep_alive { debug!("{} keep alive front/back", self.log_ctx); self.reset(); self.front_readiness.interest = UnixReady::from(Ready::readable()) | UnixReady::hup() | UnixReady::error(); self.back_readiness.interest = UnixReady::hup() | UnixReady::error(); ClientResult::Continue //FIXME: issues reusing the backend socket //self.back_readiness.interest = UnixReady::hup() | UnixReady::error(); //ClientResult::CloseBackend } else if front_keep_alive && !back_keep_alive { debug!("{} keep alive front", self.log_ctx); self.reset(); self.front_readiness.interest = UnixReady::from(Ready::readable()) | UnixReady::hup() | UnixReady::error(); self.back_readiness.interest = UnixReady::hup() | UnixReady::error(); ClientResult::CloseBackend(self.backend_token.clone()) } else { debug!("{} no keep alive", self.log_ctx); self.front_readiness.reset(); self.back_readiness.reset(); ClientResult::CloseClient } }, // restart parsing, since there will be other chunks next Some(ResponseState::ResponseWithBodyChunks(_,_,_)) => { self.back_readiness.interest.insert(Ready::readable()); ClientResult::Continue }, //we're not done parsing the headers Some(ResponseState::HasStatusLine(_,_)) | Some(ResponseState::HasUpgrade(_,_,_)) | Some(ResponseState::HasLength(_,_,_)) => { self.back_readiness.interest.insert(Ready::readable()); ClientResult::Continue }, _ => { self.front_readiness.reset(); self.back_readiness.reset(); ClientResult::CloseClient } } } // Forward content to application pub fn back_writable(&mut self, metrics: &mut SessionMetrics) -> ClientResult { if let ClientStatus::DefaultAnswer(_,_,_) = self.status { error!("{}\tsending default answer, should not write to back", self.log_ctx); self.back_readiness.interest.remove(Ready::writable()); self.front_readiness.interest.insert(Ready::writable()); return ClientResult::Continue; } if self.front_buf.as_ref().unwrap().output_data_size() == 0 || self.front_buf.as_ref().unwrap().next_output_data().len() == 0 { self.front_readiness.interest.insert(Ready::readable()); self.back_readiness.interest.remove(Ready::writable()); return ClientResult::Continue; } let tokens = self.tokens().clone(); let output_size = self.front_buf.as_ref().unwrap().output_data_size(); if self.backend.is_none() { metrics.service_stop(); self.log_request_error(metrics, "back socket not found, closing connection"); self.front_readiness.reset(); self.back_readiness.reset(); return ClientResult::CloseClient; } let mut sz = 0usize; let mut socket_res = SocketResult::Continue; { let sock = unwrap_msg!(self.backend.as_mut()); while socket_res == SocketResult::Continue && self.front_buf.as_ref().unwrap().output_data_size() > 0 { // no more data in buffer, stop here if self.front_buf.as_ref().unwrap().next_output_data().len() == 0 { self.front_readiness.interest.insert(Ready::readable()); self.back_readiness.interest.remove(Ready::writable()); metrics.backend_bout += sz; return ClientResult::Continue; } let (current_sz, current_res) = sock.socket_write(self.front_buf.as_ref().unwrap().next_output_data()); socket_res = current_res; self.front_buf.as_mut().unwrap().consume_output_data(current_sz); sz += current_sz; } } metrics.backend_bout += sz; if let Some((front,back)) = tokens { debug!("{}\tBACK [{}->{}]: wrote {} bytes of {}", self.log_ctx, front.0, back.0, sz, output_size); } match socket_res { SocketResult::Error | SocketResult::Closed => { metrics.service_stop(); self.log_request_error(metrics, "back socket write error, closing connection"); self.front_readiness.reset(); self.back_readiness.reset(); return ClientResult::CloseClient; }, SocketResult::WouldBlock => { self.back_readiness.event.remove(Ready::writable()); }, SocketResult::Continue => {} } // FIXME/ should read exactly as much data as needed if self.front_buf.as_ref().unwrap().can_restart_parsing() { match unwrap_msg!(self.state.as_ref()).request { // the entire request was transmitted Some(RequestState::Request(_,_,_)) | Some(RequestState::RequestWithBody(_,_,_,_)) | Some(RequestState::RequestWithBodyChunks(_,_,_,Chunk::Ended)) => { // return the buffer to the pool // if there's still data in there, keep it for pipelining if self.state.as_ref().map(|st| st.must_continue()).is_none() { if self.front_buf.as_ref().map(|buf| buf.empty()) == Some(true) { self.front_buf = None; } } self.front_readiness.interest.remove(Ready::readable()); self.back_readiness.interest.insert(Ready::readable()); self.back_readiness.interest.remove(Ready::writable()); ClientResult::Continue }, Some(RequestState::RequestWithBodyChunks(_,_,_,Chunk::Initial)) => { if self.state.as_ref().map(|st| st.must_continue()).is_none() { self.front_readiness.interest.insert(Ready::readable()); ClientResult::Continue } else { // wait for the 100 continue response from the backend // keep the front buffer self.front_readiness.interest.remove(Ready::readable()); self.back_readiness.interest.insert(Ready::readable()); self.back_readiness.interest.remove(Ready::writable()); ClientResult::Continue } } Some(RequestState::RequestWithBodyChunks(_,_,_,_)) => { self.front_readiness.interest.insert(Ready::readable()); ClientResult::Continue }, //we're not done parsing the headers Some(RequestState::HasRequestLine(_,_)) | Some(RequestState::HasHost(_,_,_)) | Some(RequestState::HasLength(_,_,_)) | Some(RequestState::HasHostAndLength(_,_,_,_)) => { self.front_readiness.interest.insert(Ready::readable()); ClientResult::Continue }, ref s => { metrics.service_stop(); self.log_request_error(metrics, "invalid state, closing connection"); self.front_readiness.reset(); self.back_readiness.reset(); ClientResult::CloseClient } } } else { self.front_readiness.interest.insert(Ready::readable()); self.back_readiness.interest.insert(Ready::writable()); ClientResult::Continue } } // Read content from application pub fn back_readable(&mut self, metrics: &mut SessionMetrics) -> (ProtocolResult, ClientResult) { if let ClientStatus::DefaultAnswer(_,_,_) = self.status { error!("{}\tsending default answer, should not read from back socket", self.log_ctx); self.back_readiness.interest.remove(Ready::readable()); return (ProtocolResult::Continue, ClientResult::Continue); } if self.back_buf.is_none() { if let Some(p) = self.pool.upgrade() { if let Some(buf) = p.borrow_mut().checkout() { self.back_buf = Some(buf); } else { error!("cannot get back buffer from pool, closing"); return (ProtocolResult::Continue, ClientResult::CloseClient); } } } if self.back_buf.as_ref().unwrap().buffer.available_space() == 0 { self.back_readiness.interest.remove(Ready::readable()); return (ProtocolResult::Continue, ClientResult::Continue); } let tokens = self.tokens().clone(); if self.backend.is_none() { metrics.service_stop(); self.log_request_error(metrics, "back socket not found, closing connection"); self.front_readiness.reset(); self.back_readiness.reset(); return (ProtocolResult::Continue, ClientResult::CloseClient); } let (sz, r) = { let sock = unwrap_msg!(self.backend.as_mut()); sock.socket_read(&mut self.back_buf.as_mut().unwrap().buffer.space()) }; self.back_buf.as_mut().unwrap().buffer.fill(sz); self.back_buf.as_mut().unwrap().sliced_input(sz); metrics.backend_bin += sz; if let Some((front,back)) = tokens { debug!("{}\tBACK [{}<-{}]: read {} bytes", self.log_ctx, front.0, back.0, sz); } if r != SocketResult::Continue || sz == 0 { self.back_readiness.event.remove(Ready::readable()); } if r == SocketResult::Error { metrics.service_stop(); self.log_request_error(metrics, "back socket read error, closing connection"); self.front_readiness.reset(); self.back_readiness.reset(); return (ProtocolResult::Continue, ClientResult::CloseClient); } // isolate that here because the "ref protocol" and the self.state = " make borrowing conflicts if let Some(&Some(ResponseState::ResponseUpgrade(_,_, ref protocol))) = self.state.as_ref().map(|s| &s.response) { debug!("got an upgrade state[{}]: {:?}", line!(), protocol); if protocol == "websocket" { return (ProtocolResult::Upgrade, ClientResult::Continue); } else { //FIXME: should we upgrade to a pipe or send an error? return (ProtocolResult::Continue, ClientResult::Continue); } } match unwrap_msg!(self.state.as_ref()).response { Some(ResponseState::Response(_,_)) => { metrics.service_stop(); self.log_request_error(metrics, "should not go back in back_readable if the whole response was parsed"); self.front_readiness.reset(); self.back_readiness.reset(); (ProtocolResult::Continue, ClientResult::CloseClient) }, Some(ResponseState::ResponseWithBody(_,_,_)) => { self.front_readiness.interest.insert(Ready::writable()); if ! self.back_buf.as_ref().unwrap().needs_input() { self.back_readiness.interest.remove(Ready::readable()); } (ProtocolResult::Continue, ClientResult::Continue) }, Some(ResponseState::ResponseWithBodyChunks(_,_,Chunk::Ended)) => { use nom::HexDisplay; self.back_readiness.interest.remove(Ready::readable()); if sz == 0 { (ProtocolResult::Continue, ClientResult::Continue) } else { metrics.service_stop(); error!("{}\tback read should have stopped on chunk ended\nstate: {:?}\ndata:{}", self.log_ctx, self.state, self.back_buf.as_ref().unwrap().unparsed_data().to_hex(16)); self.log_request_error(metrics, "back read should have stopped on chunk ended"); self.front_readiness.reset(); self.back_readiness.reset(); (ProtocolResult::Continue, ClientResult::CloseClient) } }, Some(ResponseState::ResponseWithBodyChunks(_,_,Chunk::Error)) => { metrics.service_stop(); self.log_request_error(metrics, "back read should have stopped on chunk error"); self.front_readiness.reset(); self.back_readiness.reset(); (ProtocolResult::Continue, ClientResult::CloseClient) }, Some(ResponseState::ResponseWithBodyChunks(_,_,_)) => { if ! self.back_buf.as_ref().unwrap().needs_input() { self.state = Some(parse_response_until_stop(unwrap_msg!(self.state.take()), &self.request_id, &mut self.back_buf.as_mut().unwrap(), &self.sticky_name, self.sticky_session.take())); if unwrap_msg!(self.state.as_ref()).is_back_error() { metrics.service_stop(); self.log_request_error(metrics, "back socket chunk parse error, closing connection"); self.front_readiness.reset(); self.back_readiness.reset(); return (ProtocolResult::Continue, ClientResult::CloseClient); } if let Some(&Some(ResponseState::ResponseWithBodyChunks(_,_,Chunk::Ended))) = self.state.as_ref().map(|s| &s.response) { self.back_readiness.interest.remove(Ready::readable()); } } self.front_readiness.interest.insert(Ready::writable()); (ProtocolResult::Continue, ClientResult::Continue) }, Some(ResponseState::Error(_,_,_,_,_)) => panic!("{}\tback read should have stopped on responsestate error", self.log_ctx), _ => { self.state = Some(parse_response_until_stop(unwrap_msg!(self.state.take()), &self.request_id, &mut self.back_buf.as_mut().unwrap(), &self.sticky_name, self.sticky_session.take())); if unwrap_msg!(self.state.as_ref()).is_back_error() { metrics.service_stop(); self.log_request_error(metrics, "back socket parse error, closing connection"); self.front_readiness.reset(); self.back_readiness.reset(); return (ProtocolResult::Continue, ClientResult::CloseClient); } if let Some(ResponseState::Response(_,_)) = unwrap_msg!(self.state.as_ref()).response { self.back_readiness.interest.remove(Ready::readable()); } if let Some(&Some(ResponseState::ResponseUpgrade(_,_, ref protocol))) = self.state.as_ref().map(|s| &s.response) { debug!("got an upgrade state[{}]: {:?}", line!(), protocol); if protocol == "websocket" { return (ProtocolResult::Upgrade, ClientResult::Continue); } else { //FIXME: should we upgrade to a pipe or send an error? return (ProtocolResult::Continue, ClientResult::Continue); } } self.front_readiness.interest.insert(Ready::writable()); (ProtocolResult::Continue, ClientResult::Continue) } } } } /// Save the backend http response status code metric fn save_http_status_metric(rs_status_line : Option<RStatusLine>) { if let Some(rs_status_line) = rs_status_line { match rs_status_line.status { 100...199 => { incr!("http.status.1xx"); }, 200...299 => { incr!("http.status.2xx"); }, 300...399 => { incr!("http.status.3xx"); }, 400...499 => { incr!("http.status.4xx"); }, 500...599 => { incr!("http.status.5xx"); }, _ => { incr!("http.status.other"); }, // http responses with other codes (protocol error) } } }
/* * Copyright (c) 2013-2014, David Renshaw (dwrenshaw@gmail.com) * * See the LICENSE file in the capnproto-rust root directory. */ use capability::{ClientHook}; use common::*; use endian::*; use mask::*; use arena::*; use blob::*; use std; #[repr(u8)] #[deriving(Eq)] pub enum FieldSize { VOID = 0, BIT = 1, BYTE = 2, TWO_BYTES = 3, FOUR_BYTES = 4, EIGHT_BYTES = 5, POINTER = 6, INLINE_COMPOSITE = 7 } pub fn data_bits_per_element(size : FieldSize) -> BitCount0 { match size { VOID => 0, BIT => 1, BYTE => 8, TWO_BYTES => 16, FOUR_BYTES => 32, EIGHT_BYTES => 64, POINTER => 0, INLINE_COMPOSITE => 0 } } pub fn pointers_per_element(size : FieldSize) -> WirePointerCount { match size { POINTER => 1, _ => 0 } } // Port note: here, this is only valid for T a primitive type. In // capnproto-c++, it dispatches on the 'kind' of T and can handle // structs and pointers. pub fn element_size_for_type<T>() -> FieldSize { match bits_per_element::<T>() { 0 => VOID, 1 => BIT, 8 => BYTE, 16 => TWO_BYTES, 32 => FOUR_BYTES, 64 => EIGHT_BYTES, b => fail!("don't know how to get field size with {} bits", b) } } pub enum Kind { PRIMITIVE, BLOB, ENUM, STRUCT, UNION, INTERFACE, LIST, UNKNOWN } // In the future, Rust will have an alignment attribute // and we won't need the dummy field. pub struct AlignedData<T> { _dummy : u64, words : T } pub struct StructSize { data : WordCount16, pointers : WirePointerCount16, preferred_list_encoding : FieldSize } impl StructSize { pub fn total(&self) -> WordCount { (self.data as WordCount) + (self.pointers as WordCount) * WORDS_PER_POINTER } } #[repr(u8)] #[deriving(Eq)] pub enum WirePointerKind { WP_STRUCT = 0, WP_LIST = 1, WP_FAR = 2, WP_OTHER = 3 } pub struct WirePointer { offset_and_kind : WireValue<u32>, upper32bits : u32, } pub struct StructRef { data_size : WireValue<WordCount16>, ptr_count : WireValue<WirePointerCount16> } pub struct ListRef { element_size_and_count : WireValue<u32> } pub struct FarRef { segment_id : WireValue<u32> } pub struct CapRef { index : WireValue<u32> } impl StructRef { pub fn word_size(&self) -> WordCount { self.data_size.get() as WordCount + self.ptr_count.get() as WordCount * WORDS_PER_POINTER } #[inline] pub fn set_from_struct_size(&mut self, size : StructSize) { self.data_size.set(size.data); self.ptr_count.set(size.pointers); } #[inline] pub fn set(&mut self, ds : WordCount16, rc : WirePointerCount16) { self.data_size.set(ds); self.ptr_count.set(rc); } } impl ListRef { #[inline] pub fn element_size(&self) -> FieldSize { unsafe { std::cast::transmute( (self.element_size_and_count.get() & 7) as u8) } } #[inline] pub fn element_count(&self) -> ElementCount { (self.element_size_and_count.get() >> 3) as uint } #[inline] pub fn inline_composite_word_count(&self) -> WordCount { self.element_count() } #[inline] pub fn set(&mut self, es : FieldSize, ec : ElementCount) { assert!(ec < (1 << 29), "Lists are limited to 2**29 elements"); self.element_size_and_count.set(((ec as u32) << 3 ) | (es as u32)); } #[inline] pub fn set_inline_composite(& mut self, wc : WordCount) { assert!(wc < (1 << 29), "Inline composite lists are limited to 2 ** 29 words"); self.element_size_and_count.set((( wc as u32) << 3) | (INLINE_COMPOSITE as u32)); } } impl FarRef { #[inline] pub fn set(&mut self, si : SegmentId) { self.segment_id.set(si); } } impl CapRef { #[inline] pub fn set(&mut self, index : u32) { self.index.set(index); } } impl WirePointer { #[inline] pub fn kind(&self) -> WirePointerKind { unsafe { std::cast::transmute((self.offset_and_kind.get() & 3) as u8) } } #[inline] pub fn is_capability(&self) -> bool { self.offset_and_kind.get() == WP_OTHER as u32 } #[inline] pub fn target(&self) -> *Word { let thisAddr : *Word = unsafe {std::cast::transmute(&*self) }; unsafe { thisAddr.offset(1 + ((self.offset_and_kind.get() as int) >> 2)) } } #[inline] pub fn mut_target(&mut self) -> *mut Word { let thisAddr : *mut Word = unsafe {std::cast::transmute(&*self) }; unsafe { thisAddr.offset(1 + ((self.offset_and_kind.get() as int) >> 2)) } } #[inline] pub fn set_kind_and_target(&mut self, kind : WirePointerKind, target : *mut Word, _segmentBuilder : *mut SegmentBuilder) { let thisAddr : int = unsafe {std::cast::transmute(&*self)}; let targetAddr : int = unsafe {std::cast::transmute(target)}; self.offset_and_kind.set( ((((targetAddr - thisAddr)/BYTES_PER_WORD as int) as i32 - 1) << 2) as u32 | (kind as u32)) } #[inline] pub fn set_kind_with_zero_offset(&mut self, kind : WirePointerKind) { self.offset_and_kind.set( kind as u32) } #[inline] pub fn inline_composite_list_element_count(&self) -> ElementCount { (self.offset_and_kind.get() >> 2) as ElementCount } #[inline] pub fn set_kind_and_inline_composite_list_element_count( &mut self, kind : WirePointerKind, element_count : ElementCount) { self.offset_and_kind.set((( element_count as u32 << 2) | (kind as u32))) } #[inline] pub fn far_position_in_segment(&self) -> WordCount { (self.offset_and_kind.get() >> 3) as WordCount } #[inline] pub fn is_double_far(&self) -> bool { ((self.offset_and_kind.get() >> 2) & 1) != 0 } #[inline] pub fn set_far(&mut self, is_double_far : bool, pos : WordCount) { self.offset_and_kind.set (( pos << 3) as u32 | (is_double_far as u32 << 2) | WP_FAR as u32); } #[inline] pub fn set_cap(&mut self, index : u32) { self.offset_and_kind.set(WP_OTHER as u32); self.mut_cap_ref().set(index); } #[inline] pub fn struct_ref(&self) -> StructRef { unsafe { std::cast::transmute(self.upper32bits) } } #[inline] pub fn mut_struct_ref<'a>(&'a mut self) -> &'a mut StructRef { unsafe { std::cast::transmute(& self.upper32bits) } } #[inline] pub fn list_ref(&self) -> ListRef { unsafe { std::cast::transmute(self.upper32bits) } } #[inline] pub fn mut_list_ref<'a>(&'a self) -> &'a mut ListRef { unsafe { std::cast::transmute(& self.upper32bits) } } #[inline] pub fn far_ref(&self) -> FarRef { unsafe { std::cast::transmute(self.upper32bits) } } #[inline] pub fn mut_far_ref<'a>(&'a mut self) -> &'a mut FarRef { unsafe { std::cast::transmute(& self.upper32bits) } } #[inline] pub fn cap_ref(&self) -> CapRef { unsafe { std::cast::transmute(self.upper32bits) } } #[inline] pub fn mut_cap_ref<'a>(&'a mut self) -> &'a mut CapRef { unsafe { std::cast::transmute(& self.upper32bits) } } #[inline] pub fn is_null(&self) -> bool { (self.offset_and_kind.get() == 0) & (self.upper32bits == 0) } } struct SegmentAnd<T> { segment : *mut SegmentBuilder, value : T } mod WireHelpers { use std; use capability::ClientHook; use common::*; use layout::*; use arena::*; use blob::*; #[inline] pub fn round_bytes_up_to_words(bytes : ByteCount) -> WordCount { //# This code assumes 64-bit words. (bytes + 7) / BYTES_PER_WORD } //# The maximum object size is 4GB - 1 byte. If measured in bits, //# this would overflow a 32-bit counter, so we need to accept //# BitCount64. However, 32 bits is enough for the returned //# ByteCounts and WordCounts. #[inline] pub fn round_bits_up_to_words(bits : BitCount64) -> WordCount { //# This code assumes 64-bit words. ((bits + 63) / (BITS_PER_WORD as u64)) as WordCount } #[allow(dead_code)] #[inline] pub fn round_bits_up_to_bytes(bits : BitCount64) -> ByteCount { ((bits + 7) / (BITS_PER_BYTE as u64)) as ByteCount } #[inline] pub unsafe fn bounds_check(segment : *SegmentReader, start : *Word, end : *Word) -> bool { //# If segment is null, this is an unchecked message, so we don't do bounds checks. return segment.is_null() || (*segment).contains_interval(start, end); } #[inline] pub unsafe fn allocate(reff : &mut *mut WirePointer, segment : &mut *mut SegmentBuilder, amount : WordCount, kind : WirePointerKind) -> *mut Word { let is_null = (**reff).is_null(); if !is_null { zero_object(*segment, *reff) } match (**segment).allocate(amount) { None => { //# Need to allocate in a new segment. We'll need to //# allocate an extra pointer worth of space to act as //# the landing pad for a far pointer. let amountPlusRef = amount + POINTER_SIZE_IN_WORDS; let allocation = (*(**segment).get_arena()).allocate(amountPlusRef); *segment = allocation.first(); let ptr = allocation.second(); //# Set up the original pointer to be a far pointer to //# the new segment. (**reff).set_far(false, (**segment).get_word_offset_to(ptr)); (**reff).mut_far_ref().segment_id.set((**segment).id); //# Initialize the landing pad to indicate that the //# data immediately follows the pad. *reff = std::cast::transmute(ptr); let ptr1 = ptr.offset(POINTER_SIZE_IN_WORDS as int); (**reff).set_kind_and_target(kind, ptr1, *segment); return ptr1; } Some(ptr) => { (**reff).set_kind_and_target(kind, ptr, *segment); return ptr; } } } #[inline] pub unsafe fn follow_builder_fars(reff : &mut * mut WirePointer, ref_target : *mut Word, segment : &mut *mut SegmentBuilder) -> *mut Word { //# If `ref` is a far pointer, follow it. On return, `ref` will //# have been updated to point at a WirePointer that contains //# the type information about the target object, and a pointer //# to the object contents is returned. The caller must NOT use //# `ref->target()` as this may or may not actually return a //# valid pointer. `segment` is also updated to point at the //# segment which actually contains the object. //# //# If `ref` is not a far pointer, this simply returns //# `refTarget`. Usually, `refTarget` should be the same as //# `ref->target()`, but may not be in cases where `ref` is //# only a tag. if (**reff).kind() == WP_FAR { *segment = (*(**segment).get_arena()).get_segment((**reff).far_ref().segment_id.get()); let pad : *mut WirePointer = std::cast::transmute((**segment).get_ptr_unchecked((**reff).far_position_in_segment())); if !(**reff).is_double_far() { *reff = pad; return (*pad).mut_target(); } //# Landing pad is another far pointer. It is followed by a //# tag describing the pointed-to object. *reff = pad.offset(1); *segment = (*(**segment).get_arena()).get_segment((*pad).far_ref().segment_id.get()); return (**segment).get_ptr_unchecked((*pad).far_position_in_segment()); } else { ref_target } } #[inline] pub unsafe fn follow_fars(reff: &mut *WirePointer, refTarget: *Word, segment : &mut *SegmentReader) -> *Word { //# If the segment is null, this is an unchecked message, //# so there are no FAR pointers. if !(*segment).is_null() && (**reff).kind() == WP_FAR { *segment = (**segment).arena.try_get_segment((**reff).far_ref().segment_id.get()); let ptr : *Word = (**segment).get_start_ptr().offset( (**reff).far_position_in_segment() as int); let padWords : int = if (**reff).is_double_far() { 2 } else { 1 }; assert!(bounds_check(*segment, ptr, ptr.offset(padWords))); let pad : *WirePointer = std::cast::transmute(ptr); if !(**reff).is_double_far() { *reff = pad; return (*pad).target(); } else { //# Landing pad is another far pointer. It is //# followed by a tag describing the pointed-to //# object. *reff = pad.offset(1); *segment = (**segment).arena.try_get_segment((*pad).far_ref().segment_id.get()); return (**segment).get_start_ptr().offset((*pad).far_position_in_segment() as int); } } else { return refTarget; } } pub unsafe fn zero_object(mut segment : *mut SegmentBuilder, reff : *mut WirePointer) { //# Zero out the pointed-to object. Use when the pointer is //# about to be overwritten making the target object no longer //# reachable. match (*reff).kind() { WP_STRUCT | WP_LIST | WP_OTHER => { zero_object_helper(segment, reff, (*reff).mut_target()) } WP_FAR => { segment = (*(*segment).get_arena()).get_segment((*reff).far_ref().segment_id.get()); let pad : *mut WirePointer = std::cast::transmute((*segment).get_ptr_unchecked((*reff).far_position_in_segment())); if (*reff).is_double_far() { segment = (*(*segment).get_arena()).get_segment((*pad).far_ref().segment_id.get()); zero_object_helper(segment, pad.offset(1), (*segment).get_ptr_unchecked((*pad).far_position_in_segment())); std::ptr::set_memory(pad, 0u8, 2); } else { zero_object(segment, pad); std::ptr::set_memory(pad, 0u8, 1); } } } } pub unsafe fn zero_object_helper(segment : *mut SegmentBuilder, tag : *mut WirePointer, ptr: *mut Word) { match (*tag).kind() { WP_OTHER => { fail!("Don't know how to handle OTHER") } WP_STRUCT => { let pointerSection : *mut WirePointer = std::cast::transmute( ptr.offset((*tag).struct_ref().data_size.get() as int)); let count = (*tag).struct_ref().ptr_count.get() as int; for i in range::<int>(0, count) { zero_object(segment, pointerSection.offset(i)); } std::ptr::set_memory(ptr, 0u8, (*tag).struct_ref().word_size()); } WP_LIST => { match (*tag).list_ref().element_size() { VOID => { } BIT | BYTE | TWO_BYTES | FOUR_BYTES | EIGHT_BYTES => { std::ptr::set_memory( ptr, 0u8, round_bits_up_to_words(( (*tag).list_ref().element_count() * data_bits_per_element( (*tag).list_ref().element_size())) as u64)) } POINTER => { let count = (*tag).list_ref().element_count() as uint; for i in range::<int>(0, count as int) { zero_object(segment, std::cast::transmute(ptr.offset(i))) } std::ptr::set_memory(ptr, 0u8, count); } INLINE_COMPOSITE => { let elementTag : *mut WirePointer = std::cast::transmute(ptr); assert!((*elementTag).kind() == WP_STRUCT, "Don't know how to handle non-STRUCT inline composite"); let data_size = (*elementTag).struct_ref().data_size.get(); let pointer_count = (*elementTag).struct_ref().ptr_count.get(); let mut pos : *mut Word = ptr.offset(1); let count = (*elementTag).inline_composite_list_element_count(); for _ in range(0, count) { pos = pos.offset(data_size as int); for _ in range(0, pointer_count as uint) { zero_object( segment, std::cast::transmute::<*mut Word, *mut WirePointer>(pos)); pos = pos.offset(1); } } std::ptr::set_memory(ptr, 0u8, (*elementTag).struct_ref().word_size() * count + 1); } } } WP_FAR => { fail!("Unexpected FAR pointer") } } } #[inline] pub unsafe fn zero_pointer_and_fars(segment : *mut SegmentBuilder, reff : *mut WirePointer) { //# Zero out the pointer itself and, if it is a far pointer, //# zero the landing pad as well, but do not zero the object //# body. Used when upgrading. if (*reff).kind() == WP_FAR { let pad = (*(*(*segment).get_arena()).get_segment((*reff).far_ref().segment_id.get())) .get_ptr_unchecked((*reff).far_position_in_segment()); let num_elements = if (*reff).is_double_far() { 2 } else { 1 }; std::ptr::zero_memory(pad, num_elements); } std::ptr::zero_memory(reff, 1); } pub unsafe fn total_size(mut segment : *SegmentReader, mut reff : *WirePointer, mut nesting_limit : int) -> MessageSize { let mut result = MessageSize { word_count : 0, cap_count : 0}; if (*reff).is_null() { return result }; nesting_limit -= 1; let ptr = follow_fars(&mut reff, (*reff).target(), &mut segment); match (*reff).kind() { WP_STRUCT => { assert!(bounds_check(segment, ptr, ptr.offset((*reff).struct_ref().word_size() as int)), "Message contains out-of-bounds struct pointer."); result.word_count += (*reff).struct_ref().word_size() as u64; let pointer_section : *WirePointer = std::cast::transmute(ptr.offset((*reff).struct_ref().data_size.get() as int)); let count : int = (*reff).struct_ref().ptr_count.get() as int; for i in range(0, count) { result.plus_eq(total_size(segment, pointer_section.offset(i), nesting_limit)); } } WP_LIST => { match (*reff).list_ref().element_size() { VOID => {} BIT | BYTE | TWO_BYTES | FOUR_BYTES | EIGHT_BYTES => { let total_words = round_bits_up_to_words( (*reff).list_ref().element_count() as u64 * data_bits_per_element((*reff).list_ref().element_size()) as u64); assert!(bounds_check(segment, ptr, ptr.offset(total_words as int)), "Message contains out-of-bounds list pointer."); result.word_count += total_words as u64; } POINTER => { let count = (*reff).list_ref().element_count(); assert!(bounds_check(segment, ptr, ptr.offset((count * WORDS_PER_POINTER) as int)), "Message contains out-of-bounds list pointer."); result.word_count += count as u64 * WORDS_PER_POINTER as u64; for i in range(0, count as int) { result.plus_eq( total_size(segment, std::cast::transmute::<*Word,*WirePointer>(ptr).offset(i), nesting_limit)); } } INLINE_COMPOSITE => { let word_count = (*reff).list_ref().inline_composite_word_count(); assert!(bounds_check(segment, ptr, ptr.offset(word_count as int + POINTER_SIZE_IN_WORDS as int)), "Message contains out-of-bounds list pointer."); result.word_count += word_count as u64 + POINTER_SIZE_IN_WORDS as u64; let element_tag : *WirePointer = std::cast::transmute(ptr); let count = (*element_tag).inline_composite_list_element_count(); assert!((*element_tag).kind() == WP_STRUCT, "Don't know how to handle non-STRUCT inline composite."); assert!((*element_tag).struct_ref().word_size() * count <= word_count, "INLINE_COMPOSITE list's elements overrun its word count"); let data_size = (*element_tag).struct_ref().data_size.get(); let pointer_count = (*element_tag).struct_ref().ptr_count.get(); let mut pos : *Word = ptr.offset(POINTER_SIZE_IN_WORDS as int); for _ in range(0, count) { pos = pos.offset(data_size as int); for _ in range(0, pointer_count) { result.plus_eq( total_size(segment, std::cast::transmute::<*Word,*WirePointer>(pos), nesting_limit)); pos = pos.offset(POINTER_SIZE_IN_WORDS as int); } } } } } WP_FAR => { fail!("Unexpedted FAR pointer."); } WP_OTHER => { if (*reff).is_capability() { result.cap_count += 1; } else { fail!("Unknown pointer type."); } } } result } pub unsafe fn transfer_pointer(dst_segment : *mut SegmentBuilder, dst : *mut WirePointer, src_segment : *mut SegmentBuilder, src : *mut WirePointer) { //# Make *dst point to the same object as *src. Both must //# reside in the same message, but can be in different //# segments. Not always-inline because this is rarely used. // //# Caller MUST zero out the source pointer after calling this, //# to make sure no later code mistakenly thinks the source //# location still owns the object. transferPointer() doesn't //# do this zeroing itself because many callers transfer //# several pointers in a loop then zero out the whole section. assert!((*dst).is_null()); // We expect the caller to ensure the target is already null so won't leak. if (*src).is_null() { std::ptr::zero_memory(dst, 1); } else if (*src).kind() == WP_FAR { std::ptr::copy_nonoverlapping_memory(dst, src as *WirePointer, 1); } else { transfer_pointer_split(dst_segment, dst, src_segment, src, (*src).mut_target()); } } pub unsafe fn transfer_pointer_split(dst_segment : *mut SegmentBuilder, dst : *mut WirePointer, src_segment : *mut SegmentBuilder, src_tag : *mut WirePointer, src_ptr : *mut Word) { // Like the other transfer_pointer, but splits src into a tag and a // target. Particularly useful for OrphanBuilder. if dst_segment == src_segment { //# Same segment, so create a direct pointer. (*dst).set_kind_and_target((*src_tag).kind(), src_ptr, dst_segment); //# We can just copy the upper 32 bits. (Use memcpy() to complt with aliasing rules.) // (?) std::ptr::copy_nonoverlapping_memory(std::ptr::to_mut_unsafe_ptr(&mut (*dst).upper32bits), std::ptr::to_unsafe_ptr(&(*src_tag).upper32bits), 1); } else { //# Need to create a far pointer. Try to allocate it in the //# same segment as the source, so that it doesn't need to //# be a double-far. match (*src_segment).allocate(1) { None => { //# Darn, need a double-far. fail!("unimplemented"); } Some(landing_pad_word) => { //# Simple landing pad is just a pointer. let landing_pad : *mut WirePointer = std::cast::transmute(landing_pad_word); (*landing_pad).set_kind_and_target((*src_tag).kind(), src_ptr, src_segment); std::ptr::copy_nonoverlapping_memory( std::ptr::to_mut_unsafe_ptr(&mut (*landing_pad).upper32bits), std::ptr::to_unsafe_ptr(& (*src_tag).upper32bits), 1); (*dst).set_far(false, (*src_segment).get_word_offset_to(landing_pad_word)); (*dst).far_ref().set((*src_segment).get_segment_id()); } } } } #[inline] pub unsafe fn init_struct_pointer<'a>(mut reff : *mut WirePointer, mut segmentBuilder : *mut SegmentBuilder, size : StructSize) -> StructBuilder<'a> { let ptr : *mut Word = allocate(&mut reff, &mut segmentBuilder, size.total(), WP_STRUCT); (*reff).mut_struct_ref().set_from_struct_size(size); StructBuilder { segment : segmentBuilder, data : std::cast::transmute(ptr), pointers : std::cast::transmute( ptr.offset((size.data as uint) as int)), data_size : size.data as WordCount32 * (BITS_PER_WORD as BitCount32), pointer_count : size.pointers, bit0offset : 0 } } #[inline] pub unsafe fn get_writable_struct_pointer<'a>(mut reff : *mut WirePointer, mut segment : *mut SegmentBuilder, size : StructSize, default_value : *Word) -> StructBuilder<'a> { if (*reff).is_null() { if default_value.is_null() || (*std::cast::transmute::<*Word,*WirePointer>(default_value)).is_null() { return init_struct_pointer(reff, segment, size); } fail!("TODO") } let ref_target = (*reff).mut_target(); let mut old_ref = reff; let mut old_segment = segment; let old_ptr = follow_builder_fars(&mut old_ref, ref_target, &mut old_segment); assert!((*old_ref).kind() == WP_STRUCT, "Message contains non-struct pointer where struct pointer was expected."); let old_data_size = (*old_ref).struct_ref().data_size.get(); let old_pointer_count = (*old_ref).struct_ref().ptr_count.get(); let old_pointer_section : *mut WirePointer = std::cast::transmute(old_ptr.offset(old_data_size as int)); if old_data_size < size.data || old_pointer_count < size.pointers { //# The space allocated for this struct is too small. //# Unlike with readers, we can't just run with it and do //# bounds checks at access time, because how would we //# handle writes? Instead, we have to copy the struct to a //# new space now. let new_data_size = std::cmp::max(old_data_size, size.data); let new_pointer_count = std::cmp::max(old_pointer_count, size.pointers); let total_size = new_data_size as WordCount + new_pointer_count as WordCount * WORDS_PER_POINTER; //# Don't let allocate() zero out the object just yet. zero_pointer_and_fars(segment, reff); let ptr = allocate(&mut reff, &mut segment, total_size, WP_STRUCT); (*reff).struct_ref().set(new_data_size, new_pointer_count); //# Copy data section. // Note: copy_nonoverlapping memory's third argument is an element count, not a byte count. std::ptr::copy_nonoverlapping_memory(ptr, old_ptr as *Word, old_data_size as uint); //# Copy pointer section. let new_pointer_section : *mut WirePointer = std::cast::transmute(ptr.offset(new_data_size as int)); for i in range::<int>(0, old_pointer_count as int) { transfer_pointer(segment, new_pointer_section.offset(i), old_segment, old_pointer_section.offset(i)); } std::ptr::zero_memory(old_ptr, old_data_size as uint + old_pointer_count as uint); StructBuilder { segment : segment, data : std::cast::transmute(ptr), pointers : new_pointer_section, data_size : new_data_size as u32 * BITS_PER_WORD as u32, pointer_count : new_pointer_count, bit0offset : 0 } } else { StructBuilder { segment : old_segment, data : std::cast::transmute(old_ptr), pointers : old_pointer_section, data_size : old_data_size as u32 * BITS_PER_WORD as u32, pointer_count : old_pointer_count, bit0offset : 0 } } } #[inline] pub unsafe fn init_list_pointer<'a>(mut reff : *mut WirePointer, mut segmentBuilder : *mut SegmentBuilder, element_count : ElementCount, element_size : FieldSize) -> ListBuilder<'a> { match element_size { INLINE_COMPOSITE => { fail!("Should have called initStructListPointer() instead") } _ => { } } let data_size : BitCount0 = data_bits_per_element(element_size); let pointer_count = pointers_per_element(element_size); let step = (data_size + pointer_count * BITS_PER_POINTER); let wordCount = round_bits_up_to_words(element_count as ElementCount64 * (step as u64)); let ptr = allocate(&mut reff, &mut segmentBuilder, wordCount, WP_LIST); (*reff).mut_list_ref().set(element_size, element_count); ListBuilder { segment : segmentBuilder, ptr : std::cast::transmute(ptr), step : step, element_count : element_count, struct_data_size : data_size as u32, struct_pointer_count : pointer_count as u16 } } #[inline] pub unsafe fn init_struct_list_pointer<'a>(mut reff : *mut WirePointer, mut segmentBuilder : *mut SegmentBuilder, element_count : ElementCount, element_size : StructSize) -> ListBuilder<'a> { match element_size.preferred_list_encoding { INLINE_COMPOSITE => { } otherEncoding => { return init_list_pointer(reff, segmentBuilder, element_count, otherEncoding); } } let wordsPerElement = element_size.total(); //# Allocate the list, prefixed by a single WirePointer. let wordCount : WordCount = element_count * wordsPerElement; let ptr : *mut WirePointer = std::cast::transmute(allocate(&mut reff, &mut segmentBuilder, POINTER_SIZE_IN_WORDS + wordCount, WP_LIST)); //# Initialize the pointer. (*reff).mut_list_ref().set_inline_composite(wordCount); (*ptr).set_kind_and_inline_composite_list_element_count(WP_STRUCT, element_count); (*ptr).mut_struct_ref().set_from_struct_size(element_size); let ptr1 = ptr.offset(POINTER_SIZE_IN_WORDS as int); ListBuilder { segment : segmentBuilder, ptr : std::cast::transmute(ptr1), step : wordsPerElement * BITS_PER_WORD, element_count : element_count, struct_data_size : element_size.data as u32 * (BITS_PER_WORD as u32), struct_pointer_count : element_size.pointers } } #[inline] pub unsafe fn get_writable_list_pointer<'a>(orig_ref : *mut WirePointer, orig_segment : *mut SegmentBuilder, element_size : FieldSize, default_value : *Word) -> ListBuilder<'a> { assert!(element_size != INLINE_COMPOSITE, "Use get_struct_list_{element,field}() for structs"); if (*orig_ref).is_null() { if default_value.is_null() || (*std::cast::transmute::<*Word,*WirePointer>(default_value)).is_null() { return ListBuilder::new_default(); } fail!("TODO") } let orig_ref_target = (*orig_ref).mut_target(); //# We must verify that the pointer has the right size. Unlike //# in getWritableStructListReference(), we never need to //# "upgrade" the data, because this method is called only for //# non-struct lists, and there is no allowed upgrade path *to* //# a non-struct list, only *from* them. let mut reff = orig_ref; let mut segment = orig_segment; let mut ptr = follow_builder_fars(&mut reff, orig_ref_target, &mut segment); assert!((*reff).kind() == WP_LIST, "Called get_list_{field,element}() but existing pointer is not a list"); let old_size = (*reff).list_ref().element_size(); if old_size == INLINE_COMPOSITE { //# The existing element size is INLINE_COMPOSITE, which //# means that it is at least two words, which makes it //# bigger than the expected element size. Since fields can //# only grow when upgraded, the existing data must have //# been written with a newer version of the protocol. We //# therefore never need to upgrade the data in this case, //# but we do need to validate that it is a valid upgrade //# from what we expected. //# Read the tag to get the actual element count. let tag : *WirePointer = std::cast::transmute(ptr); assert!((*tag).kind() == WP_STRUCT, "INLINE_COMPOSITE list with non-STRUCT elements not supported."); ptr = ptr.offset(POINTER_SIZE_IN_WORDS as int); let data_size = (*tag).struct_ref().data_size.get(); let pointer_count = (*tag).struct_ref().ptr_count.get(); match element_size { VOID => {} //# Anything is a valid upgrade from Void. BIT | BYTE | TWO_BYTES | FOUR_BYTES | EIGHT_BYTES => { assert!(data_size >= 1, "Existing list value is incompatible with expected type."); } POINTER => { assert!(pointer_count >= 1, "Existing list value is incompatible with expected type."); //# Adjust the pointer to point at the reference segment. ptr = ptr.offset(data_size as int); } INLINE_COMPOSITE => { unreachable!() } } //# OK, looks valid. ListBuilder { segment : segment, ptr : std::cast::transmute(ptr), element_count : (*tag).inline_composite_list_element_count(), step : (*tag).struct_ref().word_size() * BITS_PER_WORD, struct_data_size : data_size as u32 * BITS_PER_WORD as u32, struct_pointer_count : pointer_count } } else { let data_size = data_bits_per_element(old_size); let pointer_count = pointers_per_element(old_size); assert!(data_size >= data_bits_per_element(element_size), "Existing list value is incompatible with expected type."); assert!(pointer_count >= pointers_per_element(element_size), "Existing list value is incompatible with expected type."); let step = data_size + pointer_count * BITS_PER_POINTER; ListBuilder { segment : segment, ptr : std::cast::transmute(ptr), step : step, element_count : (*reff).list_ref().element_count(), struct_data_size : data_size as u32, struct_pointer_count : pointer_count as u16 } } } #[inline] pub unsafe fn get_writable_struct_list_pointer<'a>(orig_ref : *mut WirePointer, orig_segment : *mut SegmentBuilder, element_size : StructSize, default_value : *Word) -> ListBuilder<'a> { let orig_ref_target = (*orig_ref).mut_target(); if (*orig_ref).is_null() { if default_value.is_null() || (*std::cast::transmute::<*Word,*WirePointer>(default_value)).is_null() { return ListBuilder::new_default(); } fail!("unimplemented"); } //# We must verify that the pointer has the right size and //# potentially upgrade it if not. let mut old_ref = orig_ref; let mut old_segment = orig_segment; let mut old_ptr = follow_builder_fars(&mut old_ref, orig_ref_target, &mut old_segment); assert!((*old_ref).kind() == WP_LIST, "Called getList\\{Field,Element\\} but existing pointer is not a list."); let old_size = (*old_ref).list_ref().element_size(); if old_size == INLINE_COMPOSITE { //# Existing list is INLINE_COMPOSITE, but we need to verify that the sizes match. let old_tag : *WirePointer = std::cast::transmute(old_ptr); old_ptr = old_ptr.offset(POINTER_SIZE_IN_WORDS as int); assert!((*old_tag).kind() == WP_STRUCT, "INLINE_COMPOSITE list with non-STRUCT elements not supported."); let old_data_size = (*old_tag).struct_ref().data_size.get(); let old_pointer_count = (*old_tag).struct_ref().ptr_count.get(); let old_step = old_data_size as uint + old_pointer_count as uint * WORDS_PER_POINTER; let element_count = (*old_tag).inline_composite_list_element_count(); if old_data_size >= element_size.data && old_pointer_count >= element_size.pointers { //# Old size is at least as large as we need. Ship it. return ListBuilder { segment : old_segment, ptr : std::cast::transmute(old_ptr), element_count : element_count, step : old_step * BITS_PER_WORD, struct_data_size : old_data_size as u32 * BITS_PER_WORD as u32, struct_pointer_count : old_pointer_count }; } //# The structs in this list are smaller than expected, //# probably written using an older version of the //# protocol. We need to make a copy and expand them. fail!("unimplemented"); } else if old_size == element_size.preferred_list_encoding { //# Old size matches exactly. let data_size = data_bits_per_element(old_size); let pointer_count = pointers_per_element(old_size); let step = data_size + pointer_count * BITS_PER_POINTER; return ListBuilder { segment : old_segment, ptr : std::cast::transmute(old_ptr), step : step, element_count : (*old_ref).list_ref().element_count(), struct_data_size : data_size as u32, struct_pointer_count : pointer_count as u16 }; } else { fail!("unimplemented"); } } #[inline] pub unsafe fn init_text_pointer<'a>(mut reff : *mut WirePointer, mut segment : *mut SegmentBuilder, size : ByteCount) -> super::SegmentAnd<Text::Builder<'a>> { //# The byte list must include a NUL terminator. let byte_size = size + 1; //# Allocate the space. let ptr = allocate(&mut reff, &mut segment, round_bytes_up_to_words(byte_size), WP_LIST); //# Initialize the pointer. (*reff).mut_list_ref().set(BYTE, byte_size); return super::SegmentAnd {segment : segment, value : Text::Builder::new(std::cast::transmute(ptr), size) } } #[inline] pub unsafe fn set_text_pointer<'a>(reff : *mut WirePointer, segment : *mut SegmentBuilder, value : &str) -> super::SegmentAnd<Text::Builder<'a>> { let value_bytes = value.as_bytes(); let allocation = init_text_pointer(reff, segment, value_bytes.len()); let builder = allocation.value; builder.as_mut_bytes().copy_memory(value_bytes); allocation } #[inline] pub unsafe fn get_writable_text_pointer<'a>(mut reff : *mut WirePointer, mut segment : *mut SegmentBuilder, default_value : *Word, default_size : ByteCount) -> Text::Builder<'a> { if (*reff).is_null() { if default_size == 0 { return Text::Builder::new(std::ptr::mut_null(), 0); } else { let builder = init_text_pointer(reff, segment, default_size).value; std::ptr::copy_nonoverlapping_memory::<u8>(builder.as_ptr(), std::cast::transmute(default_value), default_size); return builder; } } else { let ref_target = (*reff).mut_target(); let ptr = follow_builder_fars(&mut reff, ref_target, &mut segment); assert!((*reff).kind() == WP_LIST, "Called getText\\{Field,Element\\}() but existing pointer is not a list."); assert!((*reff).list_ref().element_size() == BYTE, "Called getText\\{Field,Element\\}() but existing list pointer is not byte-sized."); //# Subtract 1 from the size for the NUL terminator. return Text::Builder::new(std::cast::transmute(ptr), (*reff).list_ref().element_count() - 1); } } #[inline] pub unsafe fn init_data_pointer<'a>(mut reff : *mut WirePointer, mut segment : *mut SegmentBuilder, size : ByteCount) -> super::SegmentAnd<Data::Builder<'a>> { //# Allocate the space. let ptr = allocate(&mut reff, &mut segment, round_bytes_up_to_words(size), WP_LIST); //# Initialize the pointer. (*reff).mut_list_ref().set(BYTE, size); return super::SegmentAnd { segment : segment, value : Data::new_builder(std::cast::transmute(ptr), size) }; } #[inline] pub unsafe fn set_data_pointer<'a>(reff : *mut WirePointer, segment : *mut SegmentBuilder, value : &[u8]) -> super::SegmentAnd<Data::Builder<'a>> { let allocation = init_data_pointer(reff, segment, value.len()); allocation.value.copy_memory(value); return allocation; } #[inline] pub unsafe fn get_writable_data_pointer<'a>(mut reff : *mut WirePointer, mut segment : *mut SegmentBuilder, default_value : *Word, default_size : ByteCount) -> Data::Builder<'a> { if (*reff).is_null() { if default_size == 0 { return Data::new_builder(std::ptr::mut_null(), 0); } else { let builder = init_data_pointer(reff, segment, default_size).value; std::ptr::copy_nonoverlapping_memory::<u8>(builder.as_mut_ptr(), std::cast::transmute(default_value), default_size); return builder; } } else { let ref_target = (*reff).mut_target(); let ptr = follow_builder_fars(&mut reff, ref_target, &mut segment); assert!((*reff).kind() == WP_LIST, "Called getData\\{Field,Element\\}() but existing pointer is not a list."); assert!((*reff).list_ref().element_size() == BYTE, "Called getData\\{Field,Element\\}() but existing list pointer is not byte-sized."); return Data::new_builder(std::cast::transmute(ptr), (*reff).list_ref().element_count()); } } pub unsafe fn set_struct_pointer<'a>(mut segment : *mut SegmentBuilder, mut reff : *mut WirePointer, value : StructReader) -> super::SegmentAnd<*mut Word> { let data_size : WordCount = round_bits_up_to_words(value.data_size as u64); let total_size : WordCount = data_size + value.pointer_count as uint * WORDS_PER_POINTER; let ptr = allocate(&mut reff, &mut segment, total_size, WP_STRUCT); (*reff).struct_ref().set(data_size as u16, value.pointer_count); if value.data_size == 1 { *std::cast::transmute::<*mut Word, *mut u8>(ptr) = value.get_bool_field(0) as u8 } else { std::ptr::copy_nonoverlapping_memory::<Word>(ptr, std::cast::transmute(value.data), value.data_size as uint / BITS_PER_WORD); } let pointer_section : *mut WirePointer = std::cast::transmute(ptr.offset(data_size as int)); for i in range(0, value.pointer_count as int) { copy_pointer(segment, pointer_section.offset(i), value.segment, value.pointers.offset(i), value.nesting_limit); } super::SegmentAnd { segment : segment, value : ptr } } pub unsafe fn set_capability_pointer(segment : *mut SegmentBuilder, reff : *mut WirePointer, cap : ~ClientHook) { (*reff).set_cap((*(*segment).get_arena()).inject_cap(cap)); } pub unsafe fn set_list_pointer<'a>(mut segment : *mut SegmentBuilder, mut reff : *mut WirePointer, value : ListReader) -> super::SegmentAnd<*mut Word> { let total_size = round_bits_up_to_words((value.element_count * value.step) as u64); if value.step <= BITS_PER_WORD { //# List of non-structs. let ptr = allocate(&mut reff, &mut segment, total_size, WP_LIST); if value.struct_pointer_count == 1 { //# List of pointers. (*reff).list_ref().set(POINTER, value.element_count); for i in range(0, value.element_count as int) { copy_pointer(segment, std::cast::transmute::<*mut Word,*mut WirePointer>(ptr).offset(i), value.segment, std::cast::transmute::<*u8,*WirePointer>(value.ptr).offset(i), value.nesting_limit); } } else { //# List of data. let element_size = match value.step { 0 => VOID, 1 => BIT, 8 => BYTE, 16 => TWO_BYTES, 32 => FOUR_BYTES, 64 => EIGHT_BYTES, _ => { fail!("invalid list step size: {}", value.step) } }; (*reff).list_ref().set(element_size, value.element_count); std::ptr::copy_memory(ptr, std::cast::transmute::<*u8,*Word>(value.ptr), total_size); } super::SegmentAnd { segment : segment, value : ptr } } else { //# List of structs. let ptr = allocate(&mut reff, &mut segment, total_size + POINTER_SIZE_IN_WORDS, WP_LIST); (*reff).list_ref().set_inline_composite(total_size); let data_size = round_bits_up_to_words(value.struct_data_size as u64); let pointer_count = value.struct_pointer_count; let tag : *mut WirePointer = std::cast::transmute(ptr); (*tag).set_kind_and_inline_composite_list_element_count(WP_STRUCT, value.element_count); (*tag).struct_ref().set(data_size as u16, pointer_count); let mut dst = ptr.offset(POINTER_SIZE_IN_WORDS as int); let mut src : *Word = std::cast::transmute(value.ptr); for _ in range(0, value.element_count) { std::ptr::copy_nonoverlapping_memory(dst, src, value.struct_data_size as uint / BITS_PER_WORD); dst = dst.offset(data_size as int); src = src.offset(data_size as int); for _ in range(0, pointer_count) { copy_pointer(segment, std::cast::transmute(dst), value.segment, std::cast::transmute(src), value.nesting_limit); dst = dst.offset(POINTER_SIZE_IN_WORDS as int); src = src.offset(POINTER_SIZE_IN_WORDS as int); } } super::SegmentAnd { segment : segment, value : ptr } } } pub unsafe fn copy_pointer(dst_segment : *mut SegmentBuilder, dst : *mut WirePointer, mut src_segment : *SegmentReader, mut src : *WirePointer, nesting_limit : int) -> super::SegmentAnd<*mut Word> { let src_target = (*src).target(); if (*src).is_null() { std::ptr::zero_memory(dst, 1); return super::SegmentAnd { segment : dst_segment, value : std::ptr::mut_null() }; } let mut ptr = follow_fars(&mut src, src_target, &mut src_segment); // TODO what if ptr is null? match (*src).kind() { WP_STRUCT => { assert!(nesting_limit > 0, "Message is too deeply-nested or contains cycles. See ReadOptions."); assert!(bounds_check(src_segment, ptr, ptr.offset((*src).struct_ref().word_size() as int)), "Message contains out-of-bounds struct pointer."); set_struct_pointer( dst_segment, dst, StructReader { segment : src_segment, data : std::cast::transmute(ptr), pointers : std::cast::transmute(ptr.offset((*src).struct_ref().data_size.get() as int)), data_size : (*src).struct_ref().data_size.get() as u32 * BITS_PER_WORD as u32, pointer_count : (*src).struct_ref().ptr_count.get(), bit0offset : 0, nesting_limit : nesting_limit - 1 }) } WP_LIST => { let element_size = (*src).list_ref().element_size(); assert!(nesting_limit > 0, "Message is too deeply-nested or contains cycles. See ReadOptions."); if element_size == INLINE_COMPOSITE { let word_count = (*src).list_ref().inline_composite_word_count(); let tag : *WirePointer = std::cast::transmute(ptr); ptr = ptr.offset(POINTER_SIZE_IN_WORDS as int); assert!(bounds_check(src_segment, ptr.offset(-1), ptr.offset(word_count as int)), "Message contains out-of-bounds list pointer."); assert!((*tag).kind() == WP_STRUCT, "INLINE_COMPOSITE lists of non-STRUCT type are not supported."); let element_count = (*tag).inline_composite_list_element_count(); let words_per_element = (*tag).struct_ref().word_size(); assert!(words_per_element * element_count <= word_count, "INLINE_COMPOSITE list's elements overrun its word count."); set_list_pointer( dst_segment, dst, ListReader { segment : src_segment, ptr : std::cast::transmute(ptr), element_count : element_count, step : words_per_element * BITS_PER_WORD, struct_data_size : (*tag).struct_ref().data_size.get() as u32 * BITS_PER_WORD as u32, struct_pointer_count : (*tag).struct_ref().ptr_count.get(), nesting_limit : nesting_limit - 1 }) } else { let data_size = data_bits_per_element(element_size); let pointer_count = pointers_per_element(element_size); let step = data_size + pointer_count * BITS_PER_POINTER; let element_count = (*src).list_ref().element_count(); let word_count = round_bits_up_to_words(element_count as u64 * step as u64); assert!(bounds_check(src_segment, ptr, ptr.offset(word_count as int)), "Message contains out-of-bounds list pointer."); set_list_pointer( dst_segment, dst, ListReader { segment : src_segment, ptr : std::cast::transmute(ptr), element_count : element_count, step : step, struct_data_size : data_size as u32, struct_pointer_count : pointer_count as u16, nesting_limit : nesting_limit - 1 }) } } WP_FAR => { fail!("Far pointer should have been handled above"); } WP_OTHER => { assert!((*src).is_capability(), "Unknown pointer type."); fail!("unimplemented"); } } } #[inline] pub unsafe fn read_struct_pointer<'a>(mut segment: *SegmentReader, mut reff : *WirePointer, defaultValue : *Word, nesting_limit : int) -> StructReader<'a> { if (*reff).is_null() { if defaultValue.is_null() || (*std::cast::transmute::<*Word,*WirePointer>(defaultValue)).is_null() { return StructReader::new_default(); } //segment = std::ptr::null(); //reff = std::cast::transmute::<*Word,*WirePointer>(defaultValue); fail!("default struct values unimplemented"); } let refTarget : *Word = (*reff).target(); assert!(nesting_limit > 0, "Message is too deeply-nested or contains cycles."); let ptr = follow_fars(&mut reff, refTarget, &mut segment); let data_size_words = (*reff).struct_ref().data_size.get(); assert!((*reff).kind() == WP_STRUCT, "Message contains non-struct pointer where struct pointer was expected."); assert!(bounds_check(segment, ptr, ptr.offset((*reff).struct_ref().word_size() as int)), "Message contains out-of-bounds struct pointer."); StructReader {segment : segment, data : std::cast::transmute(ptr), pointers : std::cast::transmute(ptr.offset(data_size_words as int)), data_size : data_size_words as u32 * BITS_PER_WORD as BitCount32, pointer_count : (*reff).struct_ref().ptr_count.get(), bit0offset : 0, nesting_limit : nesting_limit - 1 } } #[inline] pub unsafe fn read_capability_pointer(segment : *SegmentReader, reff : *WirePointer, _nesting_limit : int) -> ~ClientHook { if (*reff).is_null() { fail!("broken cap factory is unimplemented"); } else if !(*reff).is_capability() { fail!("Message contains non-capability pointer where capability pointer was expected."); } else { let n = (*reff).cap_ref().index.get() as uint; match (*segment).arena.extract_cap(n) { Some(client_hook) => { client_hook } None => { fail!("Message contains invalid capability pointer: {}", n) } } } } #[inline] pub unsafe fn read_list_pointer<'a>(mut segment: *SegmentReader, mut reff : *WirePointer, defaultValue : *Word, expectedElementSize : FieldSize, nesting_limit : int ) -> ListReader<'a> { if (*reff).is_null() { if defaultValue.is_null() || (*std::cast::transmute::<*Word,*WirePointer>(defaultValue)).is_null() { return ListReader::new_default(); } fail!("list default values unimplemented"); } let refTarget : *Word = (*reff).target(); if nesting_limit <= 0 { fail!("nesting limit exceeded"); } let mut ptr : *Word = follow_fars(&mut reff, refTarget, &mut segment); assert!((*reff).kind() == WP_LIST, "Message contains non-list pointer where list pointer was expected {:?}", reff); let list_ref = (*reff).list_ref(); match list_ref.element_size() { INLINE_COMPOSITE => { let wordCount = list_ref.inline_composite_word_count(); let tag: *WirePointer = std::cast::transmute(ptr); ptr = ptr.offset(1); assert!(bounds_check(segment, ptr.offset(-1), ptr.offset(wordCount as int))); assert!((*tag).kind() == WP_STRUCT, "INLINE_COMPOSITE lists of non-STRUCT type are not supported"); let size = (*tag).inline_composite_list_element_count(); let struct_ref = (*tag).struct_ref(); let wordsPerElement = struct_ref.word_size(); assert!(size * wordsPerElement <= wordCount, "INLINE_COMPOSITE list's elements overrun its word count"); //# If a struct list was not expected, then presumably //# a non-struct list was upgraded to a struct list. //# We need to manipulate the pointer to point at the //# first field of the struct. Together with the //# "stepBits", this will allow the struct list to be //# accessed as if it were a primitive list without //# branching. //# Check whether the size is compatible. match expectedElementSize { VOID => {} BIT => fail!("Expected a bit list, but got a list of structs"), BYTE | TWO_BYTES | FOUR_BYTES | EIGHT_BYTES => { assert!(struct_ref.data_size.get() > 0, "Expected a primitive list, but got a list of pointer-only structs") } POINTER => { ptr = ptr.offset(struct_ref.data_size.get() as int); assert!(struct_ref.ptr_count.get() > 0, "Expected a pointer list, but got a list of data-only structs") } INLINE_COMPOSITE => {} } ListReader { segment : segment, ptr : std::cast::transmute(ptr), element_count : size, step : wordsPerElement * BITS_PER_WORD, struct_data_size : struct_ref.data_size.get() as u32 * (BITS_PER_WORD as u32), struct_pointer_count : struct_ref.ptr_count.get() as u16, nesting_limit : nesting_limit - 1 } } _ => { //# This is a primitive or pointer list, but all such //# lists can also be interpreted as struct lists. We //# need to compute the data size and pointer count for //# such structs. let data_size = data_bits_per_element(list_ref.element_size()); let pointer_count = pointers_per_element(list_ref.element_size()); let step = data_size + pointer_count * BITS_PER_POINTER; assert!( bounds_check( segment, ptr, ptr.offset( round_bits_up_to_words( (list_ref.element_count() * step) as u64) as int))); //# Verify that the elements are at least as large as //# the expected type. Note that if we expected //# INLINE_COMPOSITE, the expected sizes here will be //# zero, because bounds checking will be performed at //# field access time. So this check here is for the //# case where we expected a list of some primitive or //# pointer type. let expectedDataBitsPerElement = data_bits_per_element(expectedElementSize); let expectedPointersPerElement = pointers_per_element(expectedElementSize); assert!(expectedDataBitsPerElement <= data_size); assert!(expectedPointersPerElement <= pointer_count) ListReader { segment : segment, ptr : std::cast::transmute(ptr), element_count : list_ref.element_count(), step : step, struct_data_size : data_size as u32, struct_pointer_count : pointer_count as u16, nesting_limit : nesting_limit - 1 } } } } #[inline] pub unsafe fn read_text_pointer<'a>(mut segment : *SegmentReader, mut reff : *WirePointer, default_value : *Word, default_size : ByteCount ) -> Text::Reader<'a> { if reff.is_null() || (*reff).is_null() { return Text::new_reader(std::cast::transmute(default_value), default_size); } let refTarget = (*reff).target(); let ptr : *Word = follow_fars(&mut reff, refTarget, &mut segment); let list_ref = (*reff).list_ref(); let size : uint = list_ref.element_count(); assert!((*reff).kind() == WP_LIST, "Message contains non-list pointer where text was expected"); assert!(list_ref.element_size() == BYTE); assert!(bounds_check(segment, ptr, ptr.offset(round_bytes_up_to_words(size) as int))); assert!(size > 0, "Message contains text that is not NUL-terminated"); let str_ptr = std::cast::transmute::<*Word,*u8>(ptr); assert!((*str_ptr.offset((size - 1) as int)) == 0u8, "Message contains text that is not NUL-terminated"); Text::new_reader(str_ptr, size-1) } #[inline] pub unsafe fn read_data_pointer<'a>(mut segment : *SegmentReader, mut reff : *WirePointer, default_value : *Word, default_size : ByteCount ) -> Data::Reader<'a> { if reff.is_null() || (*reff).is_null() { return Data::new_reader(std::cast::transmute(default_value), default_size); } let refTarget = (*reff).target(); let ptr : *Word = follow_fars(&mut reff, refTarget, &mut segment); let list_ref = (*reff).list_ref(); let size : uint = list_ref.element_count(); assert!((*reff).kind() == WP_LIST, "Message contains non-list pointer where text was expected"); assert!(list_ref.element_size() == BYTE, "Message contains list pointer of non-bytes where data was expected"); assert!(bounds_check(segment, ptr, ptr.offset(round_bytes_up_to_words(size) as int)), "Message contains out-of-bounds data pointer."); Data::new_reader(std::cast::transmute(ptr), size) } } static zero : u64 = 0; fn zero_pointer() -> *WirePointer { unsafe {std::cast::transmute(std::ptr::to_unsafe_ptr(&zero))}} pub struct PointerReader<'a> { segment : *SegmentReader, pointer : *WirePointer, nesting_limit : int } impl <'a> PointerReader<'a> { pub fn new_default<'b>() -> PointerReader<'b> { PointerReader { segment : std::ptr::null(), pointer : std::ptr::null(), nesting_limit : 0x7fffffff } } pub fn get_root<'b>(segment : *SegmentReader, location : *Word, nesting_limit : int) -> PointerReader<'b> { unsafe { assert!(WireHelpers::bounds_check(segment, location, location.offset(POINTER_SIZE_IN_WORDS as int)), "Root location out of bounds."); PointerReader { segment : segment, pointer : std::cast::transmute(location), nesting_limit : nesting_limit } } } pub fn get_root_unchecked<'b>(location : *Word) -> PointerReader<'b> { PointerReader { segment : std::ptr::null(), pointer : unsafe { std::cast::transmute(location) }, nesting_limit : 0x7fffffff } } pub fn is_null(&self) -> bool { self.pointer.is_null() || unsafe { (*self.pointer).is_null() } } pub fn get_struct(&self, default_value: *Word) -> StructReader<'a> { let reff : *WirePointer = if self.pointer.is_null() { zero_pointer() } else { self.pointer }; unsafe { WireHelpers::read_struct_pointer(self.segment, reff, default_value, self.nesting_limit) } } pub fn get_list(&self, expected_element_size : FieldSize, default_value : *Word) -> ListReader<'a> { let reff = if self.pointer.is_null() { zero_pointer() } else { self.pointer }; unsafe { WireHelpers::read_list_pointer(self.segment, reff, default_value, expected_element_size, self.nesting_limit) } } pub fn get_text(&self, default_value : *Word, default_size : ByteCount) -> Text::Reader<'a> { unsafe { WireHelpers::read_text_pointer(self.segment, self.pointer, default_value, default_size) } } pub fn get_data(&self, default_value : *Word, default_size : ByteCount) -> Data::Reader<'a> { unsafe { WireHelpers::read_data_pointer(self.segment, self.pointer, default_value, default_size) } } pub fn get_capability(&self) -> ~ClientHook { let reff : *WirePointer = if self.pointer.is_null() { zero_pointer() } else { self.pointer }; unsafe { WireHelpers::read_capability_pointer(self.segment, reff, self.nesting_limit) } } pub fn total_size(&self) -> MessageSize { unsafe { WireHelpers::total_size(self.segment, self.pointer, self.nesting_limit) } } } pub struct PointerBuilder<'a> { segment : *mut SegmentBuilder, pointer : *mut WirePointer } impl <'a> PointerBuilder<'a> { #[inline] pub fn get_root(segment : *mut SegmentBuilder, location : *mut Word) -> PointerBuilder<'a> { PointerBuilder {segment : segment, pointer : unsafe { std::cast::transmute(location) }} } pub fn is_null(&self) -> bool { unsafe { (*self.pointer).is_null() } } pub fn get_struct(&self, size : StructSize, default_value : *Word) -> StructBuilder<'a> { unsafe { WireHelpers::get_writable_struct_pointer( self.pointer, self.segment, size, default_value) } } pub fn get_list(&self, element_size : FieldSize, default_value : *Word) -> ListBuilder<'a> { unsafe { WireHelpers::get_writable_list_pointer( self.pointer, self.segment, element_size, default_value) } } pub fn get_struct_list(&self, element_size : StructSize, default_value : *Word) -> ListBuilder<'a> { unsafe { WireHelpers::get_writable_struct_list_pointer( self.pointer, self.segment, element_size, default_value) } } pub fn get_text(&self, default_value : *Word, default_size : ByteCount) -> Text::Builder<'a> { unsafe { WireHelpers::get_writable_text_pointer( self.pointer, self.segment, default_value, default_size) } } pub fn get_data(&self, default_value : *Word, default_size : ByteCount) -> Data::Builder<'a> { unsafe { WireHelpers::get_writable_data_pointer( self.pointer, self.segment, default_value, default_size) } } pub fn get_capability(&self) -> ~ClientHook { unsafe { WireHelpers::read_capability_pointer( std::ptr::to_unsafe_ptr(&(*self.segment).reader), self.pointer as *WirePointer, std::int::MAX) } } pub fn init_struct(&self, size : StructSize) -> StructBuilder<'a> { unsafe { WireHelpers::init_struct_pointer(self.pointer, self.segment, size) } } pub fn init_list(&self, element_size : FieldSize, element_count : ElementCount) -> ListBuilder<'a> { unsafe { WireHelpers::init_list_pointer( self.pointer, self.segment, element_count, element_size) } } pub fn init_struct_list(&self, element_count : ElementCount, element_size : StructSize) -> ListBuilder<'a> { unsafe { WireHelpers::init_struct_list_pointer( self.pointer, self.segment, element_count, element_size) } } pub fn init_text(&self, size : ByteCount) -> Text::Builder<'a> { unsafe { WireHelpers::init_text_pointer(self.pointer, self.segment, size).value } } pub fn init_data(&self, size : ByteCount) -> Data::Builder<'a> { unsafe { WireHelpers::init_data_pointer(self.pointer, self.segment, size).value } } pub fn set_struct(&self, value : &StructReader) { unsafe { WireHelpers::set_struct_pointer(self.segment, self.pointer, *value); } } pub fn set_list(&self, value : &ListReader) { unsafe { WireHelpers::set_list_pointer(self.segment, self.pointer, *value); } } pub fn set_text(&self, value : &str) { unsafe { WireHelpers::set_text_pointer(self.pointer, self.segment, value); } } pub fn set_data(&self, value : &[u8]) { unsafe { WireHelpers::set_data_pointer(self.pointer, self.segment, value); } } pub fn set_capability(&self, cap : ~ClientHook) { unsafe { WireHelpers::set_capability_pointer(self.segment, self.pointer, cap); } } pub fn clear(&self) { unsafe { WireHelpers::zero_object(self.segment, self.pointer); std::ptr::zero_memory(self.pointer, 1); } } pub fn as_reader(&self) -> PointerReader<'a> { unsafe { let segment_reader = &(*self.segment).reader; PointerReader { segment : segment_reader, pointer : self.pointer as *WirePointer, nesting_limit : 0x7fffffff } } } } pub trait FromStructReader<'a> { fn new(reader : StructReader<'a>) -> Self; } pub struct StructReader<'a> { segment : *SegmentReader, data : *u8, pointers : *WirePointer, data_size : BitCount32, pointer_count : WirePointerCount16, bit0offset : BitCount8, nesting_limit : int } impl <'a> StructReader<'a> { pub fn new_default() -> StructReader { StructReader { segment : std::ptr::null(), data : std::ptr::null(), pointers : std::ptr::null(), data_size : 0, pointer_count : 0, bit0offset : 0, nesting_limit : 0x7fffffff} } pub fn get_data_section_size(&self) -> BitCount32 { self.data_size } pub fn get_pointer_section_size(&self) -> WirePointerCount16 { self.pointer_count } pub fn get_data_section_as_blob(&self) -> uint { fail!("unimplemented") } #[inline] pub fn get_data_field<T:Clone + std::num::Zero>(&self, offset : ElementCount) -> T { // We need to check the offset because the struct may have // been created with an old version of the protocol that did // not contain the field. if (offset + 1) * bits_per_element::<T>() <= self.data_size as uint { unsafe { let dwv : *WireValue<T> = std::cast::transmute(self.data); (*dwv.offset(offset as int)).get() } } else { return std::num::Zero::zero() } } #[inline] pub fn get_bool_field(&self, offset : ElementCount) -> bool { let mut boffset : BitCount32 = offset as BitCount32; if boffset < self.data_size { if offset == 0 { boffset = self.bit0offset as BitCount32; } unsafe { let b : *u8 = self.data.offset((boffset as uint / BITS_PER_BYTE) as int); ((*b) & (1 << (boffset % BITS_PER_BYTE as u32 ))) != 0 } } else { false } } #[inline] pub fn get_data_field_mask<T:Clone + std::num::Zero + Mask>(&self, offset : ElementCount, mask : T) -> T { Mask::mask(self.get_data_field(offset), mask) } #[inline] pub fn get_bool_field_mask(&self, offset : ElementCount, mask : bool) -> bool { self.get_bool_field(offset) ^ mask } #[inline] pub fn get_pointer_field(&self, ptr_index : WirePointerCount) -> PointerReader<'a> { if ptr_index < self.pointer_count as WirePointerCount { PointerReader { segment : self.segment, pointer : unsafe { self.pointers.offset(ptr_index as int) }, nesting_limit : self.nesting_limit } } else { PointerReader::new_default() } } pub fn total_size(&self) -> MessageSize { let mut result = MessageSize { word_count : WireHelpers::round_bits_up_to_words(self.data_size as u64) as u64 + self.pointer_count as u64 * WORDS_PER_POINTER as u64, cap_count : 0 }; for i in range(0, self.pointer_count as int) { unsafe { result.plus_eq(WireHelpers::total_size(self.segment, self.pointers.offset(i), self.nesting_limit)); } } // TODO when we have read limiting: segment->unread() result } } pub trait HasStructSize { fn struct_size(unused_self : Option<Self>) -> StructSize; } pub trait FromStructBuilder<'a> { fn new(structBuilder : StructBuilder<'a>) -> Self; } pub struct StructBuilder<'a> { segment : *mut SegmentBuilder, data : *mut u8, pointers : *mut WirePointer, data_size : BitCount32, pointer_count : WirePointerCount16, bit0offset : BitCount8 } impl <'a> StructBuilder<'a> { pub fn as_reader(&self) -> StructReader<'a> { unsafe { let segmentReader = &(*self.segment).reader; StructReader { segment : std::ptr::to_unsafe_ptr(segmentReader), data : std::cast::transmute(self.data), pointers : std::cast::transmute(self.pointers), data_size : self.data_size, pointer_count : self.pointer_count, bit0offset : self.bit0offset, nesting_limit : 0x7fffffff } } } #[inline] pub fn set_data_field<T:Clone>(&self, offset : ElementCount, value : T) { unsafe { let ptr : *mut WireValue<T> = std::cast::transmute(self.data); (*ptr.offset(offset as int)).set(value) } } #[inline] pub fn set_data_field_mask<T:Clone + std::num::Zero + Mask>(&self, offset : ElementCount, value : T, mask : T) { self.set_data_field(offset, Mask::mask(value, mask)); } #[inline] pub fn get_data_field<T:Clone>(&self, offset : ElementCount) -> T { unsafe { let ptr : *mut WireValue<T> = std::cast::transmute(self.data); (*ptr.offset(offset as int)).get() } } #[inline] pub fn get_data_field_mask<T:Clone + std::num::Zero + Mask>(&self, offset : ElementCount, mask : T) -> T { Mask::mask(self.get_data_field(offset), mask) } #[inline] pub fn set_bool_field(&self, offset : ElementCount, value : bool) { //# This branch should be compiled out whenever this is //# inlined with a constant offset. let boffset : BitCount0 = if offset == 0 { self.bit0offset as uint } else { offset }; let b = unsafe { self.data.offset((boffset / BITS_PER_BYTE) as int)}; let bitnum = boffset % BITS_PER_BYTE; unsafe { (*b) = (( (*b) & !(1 << bitnum)) | (value as u8 << bitnum)) } } #[inline] pub fn set_bool_field_mask(&self, offset : ElementCount, value : bool, mask : bool) { self.set_bool_field(offset , value ^ mask); } #[inline] pub fn get_bool_field(&self, offset : ElementCount) -> bool { let boffset : BitCount0 = if offset == 0 {self.bit0offset as BitCount0} else {offset}; let b = unsafe { self.data.offset((boffset / BITS_PER_BYTE) as int) }; unsafe { ((*b) & (1 << (boffset % BITS_PER_BYTE ))) != 0 } } #[inline] pub fn get_bool_field_mask(&self, offset : ElementCount, mask : bool) -> bool { self.get_bool_field(offset) ^ mask } #[inline] pub fn get_pointer_field(&self, ptr_index : WirePointerCount) -> PointerBuilder<'a> { PointerBuilder { segment : self.segment, pointer : unsafe { self.pointers.offset(ptr_index as int) } } } } pub struct ListReader<'a> { segment : *SegmentReader, ptr : *u8, element_count : ElementCount, step : BitCount0, struct_data_size : BitCount32, struct_pointer_count : WirePointerCount16, nesting_limit : int } impl <'a> ListReader<'a> { pub fn new_default() -> ListReader { ListReader { segment : std::ptr::null(), ptr : std::ptr::null(), element_count : 0, step: 0, struct_data_size : 0, struct_pointer_count : 0, nesting_limit : 0x7fffffff} } #[inline] pub fn size(&self) -> ElementCount { self.element_count } pub fn get_struct_element(&self, index : ElementCount) -> StructReader<'a> { assert!(self.nesting_limit > 0, "Message is too deeply-nested or contains cycles"); let indexBit : BitCount64 = index as ElementCount64 * (self.step as BitCount64); let structData : *u8 = unsafe { self.ptr.offset((indexBit as uint / BITS_PER_BYTE) as int) }; let structPointers : *WirePointer = unsafe { std::cast::transmute( structData.offset((self.struct_data_size as uint / BITS_PER_BYTE) as int)) }; /* assert!(self.struct_pointer_count == 0 || structPointers % BYTES_PER_POINTER == 0, "Pointer section of struct list element not aligned" ); */ StructReader { segment : self.segment, data : structData, pointers : structPointers, data_size : self.struct_data_size as BitCount32, pointer_count : self.struct_pointer_count, bit0offset : (indexBit % (BITS_PER_BYTE as u64)) as u8, nesting_limit : self.nesting_limit - 1 } } #[inline] pub fn get_pointer_element(&self, index : ElementCount) -> PointerReader<'a> { PointerReader { segment : self.segment, pointer : unsafe { std::cast::transmute(self.ptr.offset((index * self.step / BITS_PER_BYTE) as int)) }, nesting_limit : self.nesting_limit } } } pub struct ListBuilder<'a> { segment : *mut SegmentBuilder, ptr : *mut u8, element_count : ElementCount, step : BitCount0, struct_data_size : BitCount32, struct_pointer_count : WirePointerCount16 } impl <'a> ListBuilder<'a> { #[inline] pub fn new_default<'a>() -> ListBuilder<'a> { ListBuilder { segment : std::ptr::mut_null(), ptr : std::ptr::mut_null(), element_count : 0, step : 0, struct_data_size : 0, struct_pointer_count : 0 } } #[inline] pub fn size(&self) -> ElementCount { self.element_count } pub fn get_struct_element(&self, index : ElementCount) -> StructBuilder<'a> { let indexBit = index * self.step; let structData = unsafe{ self.ptr.offset((indexBit / BITS_PER_BYTE) as int)}; let structPointers = unsafe { std::cast::transmute( structData.offset(((self.struct_data_size as uint) / BITS_PER_BYTE) as int)) }; StructBuilder { segment : self.segment, data : structData, pointers : structPointers, data_size : self.struct_data_size, pointer_count : self.struct_pointer_count, bit0offset : (indexBit % BITS_PER_BYTE) as u8 } } #[inline] pub fn get_pointer_element(&self, index : ElementCount) -> PointerBuilder<'a> { PointerBuilder { segment : self.segment, pointer : unsafe { std::cast::transmute(self.ptr.offset((index * self.step / BITS_PER_BYTE) as int)) } } } } pub trait PrimitiveElement : Clone { #[inline] fn get(listReader : &ListReader, index : ElementCount) -> Self { unsafe { let ptr : *u8 = listReader.ptr.offset( (index * listReader.step / BITS_PER_BYTE) as int); (*std::cast::transmute::<*u8,*WireValue<Self>>(ptr)).get() } } #[inline] fn get_from_builder(listBuilder : &ListBuilder, index : ElementCount) -> Self { unsafe { let ptr : *mut WireValue<Self> = std::cast::transmute( listBuilder.ptr.offset( (index * listBuilder.step / BITS_PER_BYTE) as int)); (*ptr).get() } } #[inline] fn set(listBuilder : &ListBuilder, index : ElementCount, value: Self) { unsafe { let ptr : *mut WireValue<Self> = std::cast::transmute( listBuilder.ptr.offset( (index * listBuilder.step / BITS_PER_BYTE) as int)); (*ptr).set(value); } } } impl PrimitiveElement for u8 { } impl PrimitiveElement for u16 { } impl PrimitiveElement for u32 { } impl PrimitiveElement for u64 { } impl PrimitiveElement for i8 { } impl PrimitiveElement for i16 { } impl PrimitiveElement for i32 { } impl PrimitiveElement for i64 { } impl PrimitiveElement for f32 { } impl PrimitiveElement for f64 { } impl PrimitiveElement for bool { #[inline] fn get(list : &ListReader, index : ElementCount) -> bool { //# Ignore stepBytes for bit lists because bit lists cannot be //# upgraded to struct lists. let bindex : BitCount0 = index * list.step; unsafe { let b : *u8 = list.ptr.offset((bindex / BITS_PER_BYTE) as int); ((*b) & (1 << (bindex % BITS_PER_BYTE))) != 0 } } #[inline] fn get_from_builder(list : &ListBuilder, index : ElementCount) -> bool { //# Ignore stepBytes for bit lists because bit lists cannot be //# upgraded to struct lists. let bindex : BitCount0 = index * list.step; let b = unsafe { list.ptr.offset((bindex / BITS_PER_BYTE) as int) }; unsafe { ((*b) & (1 << (bindex % BITS_PER_BYTE ))) != 0 } } #[inline] fn set(list : &ListBuilder, index : ElementCount, value : bool) { //# Ignore stepBytes for bit lists because bit lists cannot be //# upgraded to struct lists. let bindex : BitCount0 = index; let b = unsafe { list.ptr.offset((bindex / BITS_PER_BYTE) as int) }; let bitnum = bindex % BITS_PER_BYTE; unsafe { (*b) = (( (*b) & !(1 << bitnum)) | (value as u8 << bitnum)) } } } impl PrimitiveElement for () { #[inline] fn get(_list : &ListReader, _index : ElementCount) -> () { () } #[inline] fn get_from_builder(_list : &ListBuilder, _index : ElementCount) -> () { () } #[inline] fn set(_list : &ListBuilder, _index : ElementCount, _value : ()) { } } fill in unimplemented section of copy_pointer /* * Copyright (c) 2013-2014, David Renshaw (dwrenshaw@gmail.com) * * See the LICENSE file in the capnproto-rust root directory. */ use capability::{ClientHook}; use common::*; use endian::*; use mask::*; use arena::*; use blob::*; use std; #[repr(u8)] #[deriving(Eq)] pub enum FieldSize { VOID = 0, BIT = 1, BYTE = 2, TWO_BYTES = 3, FOUR_BYTES = 4, EIGHT_BYTES = 5, POINTER = 6, INLINE_COMPOSITE = 7 } pub fn data_bits_per_element(size : FieldSize) -> BitCount0 { match size { VOID => 0, BIT => 1, BYTE => 8, TWO_BYTES => 16, FOUR_BYTES => 32, EIGHT_BYTES => 64, POINTER => 0, INLINE_COMPOSITE => 0 } } pub fn pointers_per_element(size : FieldSize) -> WirePointerCount { match size { POINTER => 1, _ => 0 } } // Port note: here, this is only valid for T a primitive type. In // capnproto-c++, it dispatches on the 'kind' of T and can handle // structs and pointers. pub fn element_size_for_type<T>() -> FieldSize { match bits_per_element::<T>() { 0 => VOID, 1 => BIT, 8 => BYTE, 16 => TWO_BYTES, 32 => FOUR_BYTES, 64 => EIGHT_BYTES, b => fail!("don't know how to get field size with {} bits", b) } } pub enum Kind { PRIMITIVE, BLOB, ENUM, STRUCT, UNION, INTERFACE, LIST, UNKNOWN } // In the future, Rust will have an alignment attribute // and we won't need the dummy field. pub struct AlignedData<T> { _dummy : u64, words : T } pub struct StructSize { data : WordCount16, pointers : WirePointerCount16, preferred_list_encoding : FieldSize } impl StructSize { pub fn total(&self) -> WordCount { (self.data as WordCount) + (self.pointers as WordCount) * WORDS_PER_POINTER } } #[repr(u8)] #[deriving(Eq)] pub enum WirePointerKind { WP_STRUCT = 0, WP_LIST = 1, WP_FAR = 2, WP_OTHER = 3 } pub struct WirePointer { offset_and_kind : WireValue<u32>, upper32bits : u32, } pub struct StructRef { data_size : WireValue<WordCount16>, ptr_count : WireValue<WirePointerCount16> } pub struct ListRef { element_size_and_count : WireValue<u32> } pub struct FarRef { segment_id : WireValue<u32> } pub struct CapRef { index : WireValue<u32> } impl StructRef { pub fn word_size(&self) -> WordCount { self.data_size.get() as WordCount + self.ptr_count.get() as WordCount * WORDS_PER_POINTER } #[inline] pub fn set_from_struct_size(&mut self, size : StructSize) { self.data_size.set(size.data); self.ptr_count.set(size.pointers); } #[inline] pub fn set(&mut self, ds : WordCount16, rc : WirePointerCount16) { self.data_size.set(ds); self.ptr_count.set(rc); } } impl ListRef { #[inline] pub fn element_size(&self) -> FieldSize { unsafe { std::cast::transmute( (self.element_size_and_count.get() & 7) as u8) } } #[inline] pub fn element_count(&self) -> ElementCount { (self.element_size_and_count.get() >> 3) as uint } #[inline] pub fn inline_composite_word_count(&self) -> WordCount { self.element_count() } #[inline] pub fn set(&mut self, es : FieldSize, ec : ElementCount) { assert!(ec < (1 << 29), "Lists are limited to 2**29 elements"); self.element_size_and_count.set(((ec as u32) << 3 ) | (es as u32)); } #[inline] pub fn set_inline_composite(& mut self, wc : WordCount) { assert!(wc < (1 << 29), "Inline composite lists are limited to 2 ** 29 words"); self.element_size_and_count.set((( wc as u32) << 3) | (INLINE_COMPOSITE as u32)); } } impl FarRef { #[inline] pub fn set(&mut self, si : SegmentId) { self.segment_id.set(si); } } impl CapRef { #[inline] pub fn set(&mut self, index : u32) { self.index.set(index); } } impl WirePointer { #[inline] pub fn kind(&self) -> WirePointerKind { unsafe { std::cast::transmute((self.offset_and_kind.get() & 3) as u8) } } #[inline] pub fn is_capability(&self) -> bool { self.offset_and_kind.get() == WP_OTHER as u32 } #[inline] pub fn target(&self) -> *Word { let thisAddr : *Word = unsafe {std::cast::transmute(&*self) }; unsafe { thisAddr.offset(1 + ((self.offset_and_kind.get() as int) >> 2)) } } #[inline] pub fn mut_target(&mut self) -> *mut Word { let thisAddr : *mut Word = unsafe {std::cast::transmute(&*self) }; unsafe { thisAddr.offset(1 + ((self.offset_and_kind.get() as int) >> 2)) } } #[inline] pub fn set_kind_and_target(&mut self, kind : WirePointerKind, target : *mut Word, _segmentBuilder : *mut SegmentBuilder) { let thisAddr : int = unsafe {std::cast::transmute(&*self)}; let targetAddr : int = unsafe {std::cast::transmute(target)}; self.offset_and_kind.set( ((((targetAddr - thisAddr)/BYTES_PER_WORD as int) as i32 - 1) << 2) as u32 | (kind as u32)) } #[inline] pub fn set_kind_with_zero_offset(&mut self, kind : WirePointerKind) { self.offset_and_kind.set( kind as u32) } #[inline] pub fn inline_composite_list_element_count(&self) -> ElementCount { (self.offset_and_kind.get() >> 2) as ElementCount } #[inline] pub fn set_kind_and_inline_composite_list_element_count( &mut self, kind : WirePointerKind, element_count : ElementCount) { self.offset_and_kind.set((( element_count as u32 << 2) | (kind as u32))) } #[inline] pub fn far_position_in_segment(&self) -> WordCount { (self.offset_and_kind.get() >> 3) as WordCount } #[inline] pub fn is_double_far(&self) -> bool { ((self.offset_and_kind.get() >> 2) & 1) != 0 } #[inline] pub fn set_far(&mut self, is_double_far : bool, pos : WordCount) { self.offset_and_kind.set (( pos << 3) as u32 | (is_double_far as u32 << 2) | WP_FAR as u32); } #[inline] pub fn set_cap(&mut self, index : u32) { self.offset_and_kind.set(WP_OTHER as u32); self.mut_cap_ref().set(index); } #[inline] pub fn struct_ref(&self) -> StructRef { unsafe { std::cast::transmute(self.upper32bits) } } #[inline] pub fn mut_struct_ref<'a>(&'a mut self) -> &'a mut StructRef { unsafe { std::cast::transmute(& self.upper32bits) } } #[inline] pub fn list_ref(&self) -> ListRef { unsafe { std::cast::transmute(self.upper32bits) } } #[inline] pub fn mut_list_ref<'a>(&'a self) -> &'a mut ListRef { unsafe { std::cast::transmute(& self.upper32bits) } } #[inline] pub fn far_ref(&self) -> FarRef { unsafe { std::cast::transmute(self.upper32bits) } } #[inline] pub fn mut_far_ref<'a>(&'a mut self) -> &'a mut FarRef { unsafe { std::cast::transmute(& self.upper32bits) } } #[inline] pub fn cap_ref(&self) -> CapRef { unsafe { std::cast::transmute(self.upper32bits) } } #[inline] pub fn mut_cap_ref<'a>(&'a mut self) -> &'a mut CapRef { unsafe { std::cast::transmute(& self.upper32bits) } } #[inline] pub fn is_null(&self) -> bool { (self.offset_and_kind.get() == 0) & (self.upper32bits == 0) } } struct SegmentAnd<T> { segment : *mut SegmentBuilder, value : T } mod WireHelpers { use std; use capability::ClientHook; use common::*; use layout::*; use arena::*; use blob::*; #[inline] pub fn round_bytes_up_to_words(bytes : ByteCount) -> WordCount { //# This code assumes 64-bit words. (bytes + 7) / BYTES_PER_WORD } //# The maximum object size is 4GB - 1 byte. If measured in bits, //# this would overflow a 32-bit counter, so we need to accept //# BitCount64. However, 32 bits is enough for the returned //# ByteCounts and WordCounts. #[inline] pub fn round_bits_up_to_words(bits : BitCount64) -> WordCount { //# This code assumes 64-bit words. ((bits + 63) / (BITS_PER_WORD as u64)) as WordCount } #[allow(dead_code)] #[inline] pub fn round_bits_up_to_bytes(bits : BitCount64) -> ByteCount { ((bits + 7) / (BITS_PER_BYTE as u64)) as ByteCount } #[inline] pub unsafe fn bounds_check(segment : *SegmentReader, start : *Word, end : *Word) -> bool { //# If segment is null, this is an unchecked message, so we don't do bounds checks. return segment.is_null() || (*segment).contains_interval(start, end); } #[inline] pub unsafe fn allocate(reff : &mut *mut WirePointer, segment : &mut *mut SegmentBuilder, amount : WordCount, kind : WirePointerKind) -> *mut Word { let is_null = (**reff).is_null(); if !is_null { zero_object(*segment, *reff) } match (**segment).allocate(amount) { None => { //# Need to allocate in a new segment. We'll need to //# allocate an extra pointer worth of space to act as //# the landing pad for a far pointer. let amountPlusRef = amount + POINTER_SIZE_IN_WORDS; let allocation = (*(**segment).get_arena()).allocate(amountPlusRef); *segment = allocation.first(); let ptr = allocation.second(); //# Set up the original pointer to be a far pointer to //# the new segment. (**reff).set_far(false, (**segment).get_word_offset_to(ptr)); (**reff).mut_far_ref().segment_id.set((**segment).id); //# Initialize the landing pad to indicate that the //# data immediately follows the pad. *reff = std::cast::transmute(ptr); let ptr1 = ptr.offset(POINTER_SIZE_IN_WORDS as int); (**reff).set_kind_and_target(kind, ptr1, *segment); return ptr1; } Some(ptr) => { (**reff).set_kind_and_target(kind, ptr, *segment); return ptr; } } } #[inline] pub unsafe fn follow_builder_fars(reff : &mut * mut WirePointer, ref_target : *mut Word, segment : &mut *mut SegmentBuilder) -> *mut Word { //# If `ref` is a far pointer, follow it. On return, `ref` will //# have been updated to point at a WirePointer that contains //# the type information about the target object, and a pointer //# to the object contents is returned. The caller must NOT use //# `ref->target()` as this may or may not actually return a //# valid pointer. `segment` is also updated to point at the //# segment which actually contains the object. //# //# If `ref` is not a far pointer, this simply returns //# `refTarget`. Usually, `refTarget` should be the same as //# `ref->target()`, but may not be in cases where `ref` is //# only a tag. if (**reff).kind() == WP_FAR { *segment = (*(**segment).get_arena()).get_segment((**reff).far_ref().segment_id.get()); let pad : *mut WirePointer = std::cast::transmute((**segment).get_ptr_unchecked((**reff).far_position_in_segment())); if !(**reff).is_double_far() { *reff = pad; return (*pad).mut_target(); } //# Landing pad is another far pointer. It is followed by a //# tag describing the pointed-to object. *reff = pad.offset(1); *segment = (*(**segment).get_arena()).get_segment((*pad).far_ref().segment_id.get()); return (**segment).get_ptr_unchecked((*pad).far_position_in_segment()); } else { ref_target } } #[inline] pub unsafe fn follow_fars(reff: &mut *WirePointer, refTarget: *Word, segment : &mut *SegmentReader) -> *Word { //# If the segment is null, this is an unchecked message, //# so there are no FAR pointers. if !(*segment).is_null() && (**reff).kind() == WP_FAR { *segment = (**segment).arena.try_get_segment((**reff).far_ref().segment_id.get()); let ptr : *Word = (**segment).get_start_ptr().offset( (**reff).far_position_in_segment() as int); let padWords : int = if (**reff).is_double_far() { 2 } else { 1 }; assert!(bounds_check(*segment, ptr, ptr.offset(padWords))); let pad : *WirePointer = std::cast::transmute(ptr); if !(**reff).is_double_far() { *reff = pad; return (*pad).target(); } else { //# Landing pad is another far pointer. It is //# followed by a tag describing the pointed-to //# object. *reff = pad.offset(1); *segment = (**segment).arena.try_get_segment((*pad).far_ref().segment_id.get()); return (**segment).get_start_ptr().offset((*pad).far_position_in_segment() as int); } } else { return refTarget; } } pub unsafe fn zero_object(mut segment : *mut SegmentBuilder, reff : *mut WirePointer) { //# Zero out the pointed-to object. Use when the pointer is //# about to be overwritten making the target object no longer //# reachable. match (*reff).kind() { WP_STRUCT | WP_LIST | WP_OTHER => { zero_object_helper(segment, reff, (*reff).mut_target()) } WP_FAR => { segment = (*(*segment).get_arena()).get_segment((*reff).far_ref().segment_id.get()); let pad : *mut WirePointer = std::cast::transmute((*segment).get_ptr_unchecked((*reff).far_position_in_segment())); if (*reff).is_double_far() { segment = (*(*segment).get_arena()).get_segment((*pad).far_ref().segment_id.get()); zero_object_helper(segment, pad.offset(1), (*segment).get_ptr_unchecked((*pad).far_position_in_segment())); std::ptr::set_memory(pad, 0u8, 2); } else { zero_object(segment, pad); std::ptr::set_memory(pad, 0u8, 1); } } } } pub unsafe fn zero_object_helper(segment : *mut SegmentBuilder, tag : *mut WirePointer, ptr: *mut Word) { match (*tag).kind() { WP_OTHER => { fail!("Don't know how to handle OTHER") } WP_STRUCT => { let pointerSection : *mut WirePointer = std::cast::transmute( ptr.offset((*tag).struct_ref().data_size.get() as int)); let count = (*tag).struct_ref().ptr_count.get() as int; for i in range::<int>(0, count) { zero_object(segment, pointerSection.offset(i)); } std::ptr::set_memory(ptr, 0u8, (*tag).struct_ref().word_size()); } WP_LIST => { match (*tag).list_ref().element_size() { VOID => { } BIT | BYTE | TWO_BYTES | FOUR_BYTES | EIGHT_BYTES => { std::ptr::set_memory( ptr, 0u8, round_bits_up_to_words(( (*tag).list_ref().element_count() * data_bits_per_element( (*tag).list_ref().element_size())) as u64)) } POINTER => { let count = (*tag).list_ref().element_count() as uint; for i in range::<int>(0, count as int) { zero_object(segment, std::cast::transmute(ptr.offset(i))) } std::ptr::set_memory(ptr, 0u8, count); } INLINE_COMPOSITE => { let elementTag : *mut WirePointer = std::cast::transmute(ptr); assert!((*elementTag).kind() == WP_STRUCT, "Don't know how to handle non-STRUCT inline composite"); let data_size = (*elementTag).struct_ref().data_size.get(); let pointer_count = (*elementTag).struct_ref().ptr_count.get(); let mut pos : *mut Word = ptr.offset(1); let count = (*elementTag).inline_composite_list_element_count(); for _ in range(0, count) { pos = pos.offset(data_size as int); for _ in range(0, pointer_count as uint) { zero_object( segment, std::cast::transmute::<*mut Word, *mut WirePointer>(pos)); pos = pos.offset(1); } } std::ptr::set_memory(ptr, 0u8, (*elementTag).struct_ref().word_size() * count + 1); } } } WP_FAR => { fail!("Unexpected FAR pointer") } } } #[inline] pub unsafe fn zero_pointer_and_fars(segment : *mut SegmentBuilder, reff : *mut WirePointer) { //# Zero out the pointer itself and, if it is a far pointer, //# zero the landing pad as well, but do not zero the object //# body. Used when upgrading. if (*reff).kind() == WP_FAR { let pad = (*(*(*segment).get_arena()).get_segment((*reff).far_ref().segment_id.get())) .get_ptr_unchecked((*reff).far_position_in_segment()); let num_elements = if (*reff).is_double_far() { 2 } else { 1 }; std::ptr::zero_memory(pad, num_elements); } std::ptr::zero_memory(reff, 1); } pub unsafe fn total_size(mut segment : *SegmentReader, mut reff : *WirePointer, mut nesting_limit : int) -> MessageSize { let mut result = MessageSize { word_count : 0, cap_count : 0}; if (*reff).is_null() { return result }; nesting_limit -= 1; let ptr = follow_fars(&mut reff, (*reff).target(), &mut segment); match (*reff).kind() { WP_STRUCT => { assert!(bounds_check(segment, ptr, ptr.offset((*reff).struct_ref().word_size() as int)), "Message contains out-of-bounds struct pointer."); result.word_count += (*reff).struct_ref().word_size() as u64; let pointer_section : *WirePointer = std::cast::transmute(ptr.offset((*reff).struct_ref().data_size.get() as int)); let count : int = (*reff).struct_ref().ptr_count.get() as int; for i in range(0, count) { result.plus_eq(total_size(segment, pointer_section.offset(i), nesting_limit)); } } WP_LIST => { match (*reff).list_ref().element_size() { VOID => {} BIT | BYTE | TWO_BYTES | FOUR_BYTES | EIGHT_BYTES => { let total_words = round_bits_up_to_words( (*reff).list_ref().element_count() as u64 * data_bits_per_element((*reff).list_ref().element_size()) as u64); assert!(bounds_check(segment, ptr, ptr.offset(total_words as int)), "Message contains out-of-bounds list pointer."); result.word_count += total_words as u64; } POINTER => { let count = (*reff).list_ref().element_count(); assert!(bounds_check(segment, ptr, ptr.offset((count * WORDS_PER_POINTER) as int)), "Message contains out-of-bounds list pointer."); result.word_count += count as u64 * WORDS_PER_POINTER as u64; for i in range(0, count as int) { result.plus_eq( total_size(segment, std::cast::transmute::<*Word,*WirePointer>(ptr).offset(i), nesting_limit)); } } INLINE_COMPOSITE => { let word_count = (*reff).list_ref().inline_composite_word_count(); assert!(bounds_check(segment, ptr, ptr.offset(word_count as int + POINTER_SIZE_IN_WORDS as int)), "Message contains out-of-bounds list pointer."); result.word_count += word_count as u64 + POINTER_SIZE_IN_WORDS as u64; let element_tag : *WirePointer = std::cast::transmute(ptr); let count = (*element_tag).inline_composite_list_element_count(); assert!((*element_tag).kind() == WP_STRUCT, "Don't know how to handle non-STRUCT inline composite."); assert!((*element_tag).struct_ref().word_size() * count <= word_count, "INLINE_COMPOSITE list's elements overrun its word count"); let data_size = (*element_tag).struct_ref().data_size.get(); let pointer_count = (*element_tag).struct_ref().ptr_count.get(); let mut pos : *Word = ptr.offset(POINTER_SIZE_IN_WORDS as int); for _ in range(0, count) { pos = pos.offset(data_size as int); for _ in range(0, pointer_count) { result.plus_eq( total_size(segment, std::cast::transmute::<*Word,*WirePointer>(pos), nesting_limit)); pos = pos.offset(POINTER_SIZE_IN_WORDS as int); } } } } } WP_FAR => { fail!("Unexpedted FAR pointer."); } WP_OTHER => { if (*reff).is_capability() { result.cap_count += 1; } else { fail!("Unknown pointer type."); } } } result } pub unsafe fn transfer_pointer(dst_segment : *mut SegmentBuilder, dst : *mut WirePointer, src_segment : *mut SegmentBuilder, src : *mut WirePointer) { //# Make *dst point to the same object as *src. Both must //# reside in the same message, but can be in different //# segments. Not always-inline because this is rarely used. // //# Caller MUST zero out the source pointer after calling this, //# to make sure no later code mistakenly thinks the source //# location still owns the object. transferPointer() doesn't //# do this zeroing itself because many callers transfer //# several pointers in a loop then zero out the whole section. assert!((*dst).is_null()); // We expect the caller to ensure the target is already null so won't leak. if (*src).is_null() { std::ptr::zero_memory(dst, 1); } else if (*src).kind() == WP_FAR { std::ptr::copy_nonoverlapping_memory(dst, src as *WirePointer, 1); } else { transfer_pointer_split(dst_segment, dst, src_segment, src, (*src).mut_target()); } } pub unsafe fn transfer_pointer_split(dst_segment : *mut SegmentBuilder, dst : *mut WirePointer, src_segment : *mut SegmentBuilder, src_tag : *mut WirePointer, src_ptr : *mut Word) { // Like the other transfer_pointer, but splits src into a tag and a // target. Particularly useful for OrphanBuilder. if dst_segment == src_segment { //# Same segment, so create a direct pointer. (*dst).set_kind_and_target((*src_tag).kind(), src_ptr, dst_segment); //# We can just copy the upper 32 bits. (Use memcpy() to complt with aliasing rules.) // (?) std::ptr::copy_nonoverlapping_memory(std::ptr::to_mut_unsafe_ptr(&mut (*dst).upper32bits), std::ptr::to_unsafe_ptr(&(*src_tag).upper32bits), 1); } else { //# Need to create a far pointer. Try to allocate it in the //# same segment as the source, so that it doesn't need to //# be a double-far. match (*src_segment).allocate(1) { None => { //# Darn, need a double-far. fail!("unimplemented"); } Some(landing_pad_word) => { //# Simple landing pad is just a pointer. let landing_pad : *mut WirePointer = std::cast::transmute(landing_pad_word); (*landing_pad).set_kind_and_target((*src_tag).kind(), src_ptr, src_segment); std::ptr::copy_nonoverlapping_memory( std::ptr::to_mut_unsafe_ptr(&mut (*landing_pad).upper32bits), std::ptr::to_unsafe_ptr(& (*src_tag).upper32bits), 1); (*dst).set_far(false, (*src_segment).get_word_offset_to(landing_pad_word)); (*dst).far_ref().set((*src_segment).get_segment_id()); } } } } #[inline] pub unsafe fn init_struct_pointer<'a>(mut reff : *mut WirePointer, mut segmentBuilder : *mut SegmentBuilder, size : StructSize) -> StructBuilder<'a> { let ptr : *mut Word = allocate(&mut reff, &mut segmentBuilder, size.total(), WP_STRUCT); (*reff).mut_struct_ref().set_from_struct_size(size); StructBuilder { segment : segmentBuilder, data : std::cast::transmute(ptr), pointers : std::cast::transmute( ptr.offset((size.data as uint) as int)), data_size : size.data as WordCount32 * (BITS_PER_WORD as BitCount32), pointer_count : size.pointers, bit0offset : 0 } } #[inline] pub unsafe fn get_writable_struct_pointer<'a>(mut reff : *mut WirePointer, mut segment : *mut SegmentBuilder, size : StructSize, default_value : *Word) -> StructBuilder<'a> { if (*reff).is_null() { if default_value.is_null() || (*std::cast::transmute::<*Word,*WirePointer>(default_value)).is_null() { return init_struct_pointer(reff, segment, size); } fail!("TODO") } let ref_target = (*reff).mut_target(); let mut old_ref = reff; let mut old_segment = segment; let old_ptr = follow_builder_fars(&mut old_ref, ref_target, &mut old_segment); assert!((*old_ref).kind() == WP_STRUCT, "Message contains non-struct pointer where struct pointer was expected."); let old_data_size = (*old_ref).struct_ref().data_size.get(); let old_pointer_count = (*old_ref).struct_ref().ptr_count.get(); let old_pointer_section : *mut WirePointer = std::cast::transmute(old_ptr.offset(old_data_size as int)); if old_data_size < size.data || old_pointer_count < size.pointers { //# The space allocated for this struct is too small. //# Unlike with readers, we can't just run with it and do //# bounds checks at access time, because how would we //# handle writes? Instead, we have to copy the struct to a //# new space now. let new_data_size = std::cmp::max(old_data_size, size.data); let new_pointer_count = std::cmp::max(old_pointer_count, size.pointers); let total_size = new_data_size as WordCount + new_pointer_count as WordCount * WORDS_PER_POINTER; //# Don't let allocate() zero out the object just yet. zero_pointer_and_fars(segment, reff); let ptr = allocate(&mut reff, &mut segment, total_size, WP_STRUCT); (*reff).struct_ref().set(new_data_size, new_pointer_count); //# Copy data section. // Note: copy_nonoverlapping memory's third argument is an element count, not a byte count. std::ptr::copy_nonoverlapping_memory(ptr, old_ptr as *Word, old_data_size as uint); //# Copy pointer section. let new_pointer_section : *mut WirePointer = std::cast::transmute(ptr.offset(new_data_size as int)); for i in range::<int>(0, old_pointer_count as int) { transfer_pointer(segment, new_pointer_section.offset(i), old_segment, old_pointer_section.offset(i)); } std::ptr::zero_memory(old_ptr, old_data_size as uint + old_pointer_count as uint); StructBuilder { segment : segment, data : std::cast::transmute(ptr), pointers : new_pointer_section, data_size : new_data_size as u32 * BITS_PER_WORD as u32, pointer_count : new_pointer_count, bit0offset : 0 } } else { StructBuilder { segment : old_segment, data : std::cast::transmute(old_ptr), pointers : old_pointer_section, data_size : old_data_size as u32 * BITS_PER_WORD as u32, pointer_count : old_pointer_count, bit0offset : 0 } } } #[inline] pub unsafe fn init_list_pointer<'a>(mut reff : *mut WirePointer, mut segmentBuilder : *mut SegmentBuilder, element_count : ElementCount, element_size : FieldSize) -> ListBuilder<'a> { match element_size { INLINE_COMPOSITE => { fail!("Should have called initStructListPointer() instead") } _ => { } } let data_size : BitCount0 = data_bits_per_element(element_size); let pointer_count = pointers_per_element(element_size); let step = (data_size + pointer_count * BITS_PER_POINTER); let wordCount = round_bits_up_to_words(element_count as ElementCount64 * (step as u64)); let ptr = allocate(&mut reff, &mut segmentBuilder, wordCount, WP_LIST); (*reff).mut_list_ref().set(element_size, element_count); ListBuilder { segment : segmentBuilder, ptr : std::cast::transmute(ptr), step : step, element_count : element_count, struct_data_size : data_size as u32, struct_pointer_count : pointer_count as u16 } } #[inline] pub unsafe fn init_struct_list_pointer<'a>(mut reff : *mut WirePointer, mut segmentBuilder : *mut SegmentBuilder, element_count : ElementCount, element_size : StructSize) -> ListBuilder<'a> { match element_size.preferred_list_encoding { INLINE_COMPOSITE => { } otherEncoding => { return init_list_pointer(reff, segmentBuilder, element_count, otherEncoding); } } let wordsPerElement = element_size.total(); //# Allocate the list, prefixed by a single WirePointer. let wordCount : WordCount = element_count * wordsPerElement; let ptr : *mut WirePointer = std::cast::transmute(allocate(&mut reff, &mut segmentBuilder, POINTER_SIZE_IN_WORDS + wordCount, WP_LIST)); //# Initialize the pointer. (*reff).mut_list_ref().set_inline_composite(wordCount); (*ptr).set_kind_and_inline_composite_list_element_count(WP_STRUCT, element_count); (*ptr).mut_struct_ref().set_from_struct_size(element_size); let ptr1 = ptr.offset(POINTER_SIZE_IN_WORDS as int); ListBuilder { segment : segmentBuilder, ptr : std::cast::transmute(ptr1), step : wordsPerElement * BITS_PER_WORD, element_count : element_count, struct_data_size : element_size.data as u32 * (BITS_PER_WORD as u32), struct_pointer_count : element_size.pointers } } #[inline] pub unsafe fn get_writable_list_pointer<'a>(orig_ref : *mut WirePointer, orig_segment : *mut SegmentBuilder, element_size : FieldSize, default_value : *Word) -> ListBuilder<'a> { assert!(element_size != INLINE_COMPOSITE, "Use get_struct_list_{element,field}() for structs"); if (*orig_ref).is_null() { if default_value.is_null() || (*std::cast::transmute::<*Word,*WirePointer>(default_value)).is_null() { return ListBuilder::new_default(); } fail!("TODO") } let orig_ref_target = (*orig_ref).mut_target(); //# We must verify that the pointer has the right size. Unlike //# in getWritableStructListReference(), we never need to //# "upgrade" the data, because this method is called only for //# non-struct lists, and there is no allowed upgrade path *to* //# a non-struct list, only *from* them. let mut reff = orig_ref; let mut segment = orig_segment; let mut ptr = follow_builder_fars(&mut reff, orig_ref_target, &mut segment); assert!((*reff).kind() == WP_LIST, "Called get_list_{field,element}() but existing pointer is not a list"); let old_size = (*reff).list_ref().element_size(); if old_size == INLINE_COMPOSITE { //# The existing element size is INLINE_COMPOSITE, which //# means that it is at least two words, which makes it //# bigger than the expected element size. Since fields can //# only grow when upgraded, the existing data must have //# been written with a newer version of the protocol. We //# therefore never need to upgrade the data in this case, //# but we do need to validate that it is a valid upgrade //# from what we expected. //# Read the tag to get the actual element count. let tag : *WirePointer = std::cast::transmute(ptr); assert!((*tag).kind() == WP_STRUCT, "INLINE_COMPOSITE list with non-STRUCT elements not supported."); ptr = ptr.offset(POINTER_SIZE_IN_WORDS as int); let data_size = (*tag).struct_ref().data_size.get(); let pointer_count = (*tag).struct_ref().ptr_count.get(); match element_size { VOID => {} //# Anything is a valid upgrade from Void. BIT | BYTE | TWO_BYTES | FOUR_BYTES | EIGHT_BYTES => { assert!(data_size >= 1, "Existing list value is incompatible with expected type."); } POINTER => { assert!(pointer_count >= 1, "Existing list value is incompatible with expected type."); //# Adjust the pointer to point at the reference segment. ptr = ptr.offset(data_size as int); } INLINE_COMPOSITE => { unreachable!() } } //# OK, looks valid. ListBuilder { segment : segment, ptr : std::cast::transmute(ptr), element_count : (*tag).inline_composite_list_element_count(), step : (*tag).struct_ref().word_size() * BITS_PER_WORD, struct_data_size : data_size as u32 * BITS_PER_WORD as u32, struct_pointer_count : pointer_count } } else { let data_size = data_bits_per_element(old_size); let pointer_count = pointers_per_element(old_size); assert!(data_size >= data_bits_per_element(element_size), "Existing list value is incompatible with expected type."); assert!(pointer_count >= pointers_per_element(element_size), "Existing list value is incompatible with expected type."); let step = data_size + pointer_count * BITS_PER_POINTER; ListBuilder { segment : segment, ptr : std::cast::transmute(ptr), step : step, element_count : (*reff).list_ref().element_count(), struct_data_size : data_size as u32, struct_pointer_count : pointer_count as u16 } } } #[inline] pub unsafe fn get_writable_struct_list_pointer<'a>(orig_ref : *mut WirePointer, orig_segment : *mut SegmentBuilder, element_size : StructSize, default_value : *Word) -> ListBuilder<'a> { let orig_ref_target = (*orig_ref).mut_target(); if (*orig_ref).is_null() { if default_value.is_null() || (*std::cast::transmute::<*Word,*WirePointer>(default_value)).is_null() { return ListBuilder::new_default(); } fail!("unimplemented"); } //# We must verify that the pointer has the right size and //# potentially upgrade it if not. let mut old_ref = orig_ref; let mut old_segment = orig_segment; let mut old_ptr = follow_builder_fars(&mut old_ref, orig_ref_target, &mut old_segment); assert!((*old_ref).kind() == WP_LIST, "Called getList\\{Field,Element\\} but existing pointer is not a list."); let old_size = (*old_ref).list_ref().element_size(); if old_size == INLINE_COMPOSITE { //# Existing list is INLINE_COMPOSITE, but we need to verify that the sizes match. let old_tag : *WirePointer = std::cast::transmute(old_ptr); old_ptr = old_ptr.offset(POINTER_SIZE_IN_WORDS as int); assert!((*old_tag).kind() == WP_STRUCT, "INLINE_COMPOSITE list with non-STRUCT elements not supported."); let old_data_size = (*old_tag).struct_ref().data_size.get(); let old_pointer_count = (*old_tag).struct_ref().ptr_count.get(); let old_step = old_data_size as uint + old_pointer_count as uint * WORDS_PER_POINTER; let element_count = (*old_tag).inline_composite_list_element_count(); if old_data_size >= element_size.data && old_pointer_count >= element_size.pointers { //# Old size is at least as large as we need. Ship it. return ListBuilder { segment : old_segment, ptr : std::cast::transmute(old_ptr), element_count : element_count, step : old_step * BITS_PER_WORD, struct_data_size : old_data_size as u32 * BITS_PER_WORD as u32, struct_pointer_count : old_pointer_count }; } //# The structs in this list are smaller than expected, //# probably written using an older version of the //# protocol. We need to make a copy and expand them. fail!("unimplemented"); } else if old_size == element_size.preferred_list_encoding { //# Old size matches exactly. let data_size = data_bits_per_element(old_size); let pointer_count = pointers_per_element(old_size); let step = data_size + pointer_count * BITS_PER_POINTER; return ListBuilder { segment : old_segment, ptr : std::cast::transmute(old_ptr), step : step, element_count : (*old_ref).list_ref().element_count(), struct_data_size : data_size as u32, struct_pointer_count : pointer_count as u16 }; } else { fail!("unimplemented"); } } #[inline] pub unsafe fn init_text_pointer<'a>(mut reff : *mut WirePointer, mut segment : *mut SegmentBuilder, size : ByteCount) -> super::SegmentAnd<Text::Builder<'a>> { //# The byte list must include a NUL terminator. let byte_size = size + 1; //# Allocate the space. let ptr = allocate(&mut reff, &mut segment, round_bytes_up_to_words(byte_size), WP_LIST); //# Initialize the pointer. (*reff).mut_list_ref().set(BYTE, byte_size); return super::SegmentAnd {segment : segment, value : Text::Builder::new(std::cast::transmute(ptr), size) } } #[inline] pub unsafe fn set_text_pointer<'a>(reff : *mut WirePointer, segment : *mut SegmentBuilder, value : &str) -> super::SegmentAnd<Text::Builder<'a>> { let value_bytes = value.as_bytes(); let allocation = init_text_pointer(reff, segment, value_bytes.len()); let builder = allocation.value; builder.as_mut_bytes().copy_memory(value_bytes); allocation } #[inline] pub unsafe fn get_writable_text_pointer<'a>(mut reff : *mut WirePointer, mut segment : *mut SegmentBuilder, default_value : *Word, default_size : ByteCount) -> Text::Builder<'a> { if (*reff).is_null() { if default_size == 0 { return Text::Builder::new(std::ptr::mut_null(), 0); } else { let builder = init_text_pointer(reff, segment, default_size).value; std::ptr::copy_nonoverlapping_memory::<u8>(builder.as_ptr(), std::cast::transmute(default_value), default_size); return builder; } } else { let ref_target = (*reff).mut_target(); let ptr = follow_builder_fars(&mut reff, ref_target, &mut segment); assert!((*reff).kind() == WP_LIST, "Called getText\\{Field,Element\\}() but existing pointer is not a list."); assert!((*reff).list_ref().element_size() == BYTE, "Called getText\\{Field,Element\\}() but existing list pointer is not byte-sized."); //# Subtract 1 from the size for the NUL terminator. return Text::Builder::new(std::cast::transmute(ptr), (*reff).list_ref().element_count() - 1); } } #[inline] pub unsafe fn init_data_pointer<'a>(mut reff : *mut WirePointer, mut segment : *mut SegmentBuilder, size : ByteCount) -> super::SegmentAnd<Data::Builder<'a>> { //# Allocate the space. let ptr = allocate(&mut reff, &mut segment, round_bytes_up_to_words(size), WP_LIST); //# Initialize the pointer. (*reff).mut_list_ref().set(BYTE, size); return super::SegmentAnd { segment : segment, value : Data::new_builder(std::cast::transmute(ptr), size) }; } #[inline] pub unsafe fn set_data_pointer<'a>(reff : *mut WirePointer, segment : *mut SegmentBuilder, value : &[u8]) -> super::SegmentAnd<Data::Builder<'a>> { let allocation = init_data_pointer(reff, segment, value.len()); allocation.value.copy_memory(value); return allocation; } #[inline] pub unsafe fn get_writable_data_pointer<'a>(mut reff : *mut WirePointer, mut segment : *mut SegmentBuilder, default_value : *Word, default_size : ByteCount) -> Data::Builder<'a> { if (*reff).is_null() { if default_size == 0 { return Data::new_builder(std::ptr::mut_null(), 0); } else { let builder = init_data_pointer(reff, segment, default_size).value; std::ptr::copy_nonoverlapping_memory::<u8>(builder.as_mut_ptr(), std::cast::transmute(default_value), default_size); return builder; } } else { let ref_target = (*reff).mut_target(); let ptr = follow_builder_fars(&mut reff, ref_target, &mut segment); assert!((*reff).kind() == WP_LIST, "Called getData\\{Field,Element\\}() but existing pointer is not a list."); assert!((*reff).list_ref().element_size() == BYTE, "Called getData\\{Field,Element\\}() but existing list pointer is not byte-sized."); return Data::new_builder(std::cast::transmute(ptr), (*reff).list_ref().element_count()); } } pub unsafe fn set_struct_pointer<'a>(mut segment : *mut SegmentBuilder, mut reff : *mut WirePointer, value : StructReader) -> super::SegmentAnd<*mut Word> { let data_size : WordCount = round_bits_up_to_words(value.data_size as u64); let total_size : WordCount = data_size + value.pointer_count as uint * WORDS_PER_POINTER; let ptr = allocate(&mut reff, &mut segment, total_size, WP_STRUCT); (*reff).struct_ref().set(data_size as u16, value.pointer_count); if value.data_size == 1 { *std::cast::transmute::<*mut Word, *mut u8>(ptr) = value.get_bool_field(0) as u8 } else { std::ptr::copy_nonoverlapping_memory::<Word>(ptr, std::cast::transmute(value.data), value.data_size as uint / BITS_PER_WORD); } let pointer_section : *mut WirePointer = std::cast::transmute(ptr.offset(data_size as int)); for i in range(0, value.pointer_count as int) { copy_pointer(segment, pointer_section.offset(i), value.segment, value.pointers.offset(i), value.nesting_limit); } super::SegmentAnd { segment : segment, value : ptr } } pub unsafe fn set_capability_pointer(segment : *mut SegmentBuilder, reff : *mut WirePointer, cap : ~ClientHook) { (*reff).set_cap((*(*segment).get_arena()).inject_cap(cap)); } pub unsafe fn set_list_pointer<'a>(mut segment : *mut SegmentBuilder, mut reff : *mut WirePointer, value : ListReader) -> super::SegmentAnd<*mut Word> { let total_size = round_bits_up_to_words((value.element_count * value.step) as u64); if value.step <= BITS_PER_WORD { //# List of non-structs. let ptr = allocate(&mut reff, &mut segment, total_size, WP_LIST); if value.struct_pointer_count == 1 { //# List of pointers. (*reff).list_ref().set(POINTER, value.element_count); for i in range(0, value.element_count as int) { copy_pointer(segment, std::cast::transmute::<*mut Word,*mut WirePointer>(ptr).offset(i), value.segment, std::cast::transmute::<*u8,*WirePointer>(value.ptr).offset(i), value.nesting_limit); } } else { //# List of data. let element_size = match value.step { 0 => VOID, 1 => BIT, 8 => BYTE, 16 => TWO_BYTES, 32 => FOUR_BYTES, 64 => EIGHT_BYTES, _ => { fail!("invalid list step size: {}", value.step) } }; (*reff).list_ref().set(element_size, value.element_count); std::ptr::copy_memory(ptr, std::cast::transmute::<*u8,*Word>(value.ptr), total_size); } super::SegmentAnd { segment : segment, value : ptr } } else { //# List of structs. let ptr = allocate(&mut reff, &mut segment, total_size + POINTER_SIZE_IN_WORDS, WP_LIST); (*reff).list_ref().set_inline_composite(total_size); let data_size = round_bits_up_to_words(value.struct_data_size as u64); let pointer_count = value.struct_pointer_count; let tag : *mut WirePointer = std::cast::transmute(ptr); (*tag).set_kind_and_inline_composite_list_element_count(WP_STRUCT, value.element_count); (*tag).struct_ref().set(data_size as u16, pointer_count); let mut dst = ptr.offset(POINTER_SIZE_IN_WORDS as int); let mut src : *Word = std::cast::transmute(value.ptr); for _ in range(0, value.element_count) { std::ptr::copy_nonoverlapping_memory(dst, src, value.struct_data_size as uint / BITS_PER_WORD); dst = dst.offset(data_size as int); src = src.offset(data_size as int); for _ in range(0, pointer_count) { copy_pointer(segment, std::cast::transmute(dst), value.segment, std::cast::transmute(src), value.nesting_limit); dst = dst.offset(POINTER_SIZE_IN_WORDS as int); src = src.offset(POINTER_SIZE_IN_WORDS as int); } } super::SegmentAnd { segment : segment, value : ptr } } } pub unsafe fn copy_pointer(dst_segment : *mut SegmentBuilder, dst : *mut WirePointer, mut src_segment : *SegmentReader, mut src : *WirePointer, nesting_limit : int) -> super::SegmentAnd<*mut Word> { let src_target = (*src).target(); if (*src).is_null() { std::ptr::zero_memory(dst, 1); return super::SegmentAnd { segment : dst_segment, value : std::ptr::mut_null() }; } let mut ptr = follow_fars(&mut src, src_target, &mut src_segment); // TODO what if ptr is null? match (*src).kind() { WP_STRUCT => { assert!(nesting_limit > 0, "Message is too deeply-nested or contains cycles. See ReadOptions."); assert!(bounds_check(src_segment, ptr, ptr.offset((*src).struct_ref().word_size() as int)), "Message contains out-of-bounds struct pointer."); set_struct_pointer( dst_segment, dst, StructReader { segment : src_segment, data : std::cast::transmute(ptr), pointers : std::cast::transmute(ptr.offset((*src).struct_ref().data_size.get() as int)), data_size : (*src).struct_ref().data_size.get() as u32 * BITS_PER_WORD as u32, pointer_count : (*src).struct_ref().ptr_count.get(), bit0offset : 0, nesting_limit : nesting_limit - 1 }) } WP_LIST => { let element_size = (*src).list_ref().element_size(); assert!(nesting_limit > 0, "Message is too deeply-nested or contains cycles. See ReadOptions."); if element_size == INLINE_COMPOSITE { let word_count = (*src).list_ref().inline_composite_word_count(); let tag : *WirePointer = std::cast::transmute(ptr); ptr = ptr.offset(POINTER_SIZE_IN_WORDS as int); assert!(bounds_check(src_segment, ptr.offset(-1), ptr.offset(word_count as int)), "Message contains out-of-bounds list pointer."); assert!((*tag).kind() == WP_STRUCT, "INLINE_COMPOSITE lists of non-STRUCT type are not supported."); let element_count = (*tag).inline_composite_list_element_count(); let words_per_element = (*tag).struct_ref().word_size(); assert!(words_per_element * element_count <= word_count, "INLINE_COMPOSITE list's elements overrun its word count."); set_list_pointer( dst_segment, dst, ListReader { segment : src_segment, ptr : std::cast::transmute(ptr), element_count : element_count, step : words_per_element * BITS_PER_WORD, struct_data_size : (*tag).struct_ref().data_size.get() as u32 * BITS_PER_WORD as u32, struct_pointer_count : (*tag).struct_ref().ptr_count.get(), nesting_limit : nesting_limit - 1 }) } else { let data_size = data_bits_per_element(element_size); let pointer_count = pointers_per_element(element_size); let step = data_size + pointer_count * BITS_PER_POINTER; let element_count = (*src).list_ref().element_count(); let word_count = round_bits_up_to_words(element_count as u64 * step as u64); assert!(bounds_check(src_segment, ptr, ptr.offset(word_count as int)), "Message contains out-of-bounds list pointer."); set_list_pointer( dst_segment, dst, ListReader { segment : src_segment, ptr : std::cast::transmute(ptr), element_count : element_count, step : step, struct_data_size : data_size as u32, struct_pointer_count : pointer_count as u16, nesting_limit : nesting_limit - 1 }) } } WP_FAR => { fail!("Far pointer should have been handled above"); } WP_OTHER => { assert!((*src).is_capability(), "Unknown pointer type."); match (*src_segment).arena.extract_cap((*src).cap_ref().index.get() as uint) { Some(cap) => { set_capability_pointer(dst_segment, dst, cap); return super::SegmentAnd { segment : dst_segment, value : std::ptr::mut_null() }; } None => { fail!("Message contained invalid capability pointer.") } } } } } #[inline] pub unsafe fn read_struct_pointer<'a>(mut segment: *SegmentReader, mut reff : *WirePointer, defaultValue : *Word, nesting_limit : int) -> StructReader<'a> { if (*reff).is_null() { if defaultValue.is_null() || (*std::cast::transmute::<*Word,*WirePointer>(defaultValue)).is_null() { return StructReader::new_default(); } //segment = std::ptr::null(); //reff = std::cast::transmute::<*Word,*WirePointer>(defaultValue); fail!("default struct values unimplemented"); } let refTarget : *Word = (*reff).target(); assert!(nesting_limit > 0, "Message is too deeply-nested or contains cycles."); let ptr = follow_fars(&mut reff, refTarget, &mut segment); let data_size_words = (*reff).struct_ref().data_size.get(); assert!((*reff).kind() == WP_STRUCT, "Message contains non-struct pointer where struct pointer was expected."); assert!(bounds_check(segment, ptr, ptr.offset((*reff).struct_ref().word_size() as int)), "Message contains out-of-bounds struct pointer."); StructReader {segment : segment, data : std::cast::transmute(ptr), pointers : std::cast::transmute(ptr.offset(data_size_words as int)), data_size : data_size_words as u32 * BITS_PER_WORD as BitCount32, pointer_count : (*reff).struct_ref().ptr_count.get(), bit0offset : 0, nesting_limit : nesting_limit - 1 } } #[inline] pub unsafe fn read_capability_pointer(segment : *SegmentReader, reff : *WirePointer, _nesting_limit : int) -> ~ClientHook { if (*reff).is_null() { fail!("broken cap factory is unimplemented"); } else if !(*reff).is_capability() { fail!("Message contains non-capability pointer where capability pointer was expected."); } else { let n = (*reff).cap_ref().index.get() as uint; match (*segment).arena.extract_cap(n) { Some(client_hook) => { client_hook } None => { fail!("Message contains invalid capability pointer: {}", n) } } } } #[inline] pub unsafe fn read_list_pointer<'a>(mut segment: *SegmentReader, mut reff : *WirePointer, defaultValue : *Word, expectedElementSize : FieldSize, nesting_limit : int ) -> ListReader<'a> { if (*reff).is_null() { if defaultValue.is_null() || (*std::cast::transmute::<*Word,*WirePointer>(defaultValue)).is_null() { return ListReader::new_default(); } fail!("list default values unimplemented"); } let refTarget : *Word = (*reff).target(); if nesting_limit <= 0 { fail!("nesting limit exceeded"); } let mut ptr : *Word = follow_fars(&mut reff, refTarget, &mut segment); assert!((*reff).kind() == WP_LIST, "Message contains non-list pointer where list pointer was expected {:?}", reff); let list_ref = (*reff).list_ref(); match list_ref.element_size() { INLINE_COMPOSITE => { let wordCount = list_ref.inline_composite_word_count(); let tag: *WirePointer = std::cast::transmute(ptr); ptr = ptr.offset(1); assert!(bounds_check(segment, ptr.offset(-1), ptr.offset(wordCount as int))); assert!((*tag).kind() == WP_STRUCT, "INLINE_COMPOSITE lists of non-STRUCT type are not supported"); let size = (*tag).inline_composite_list_element_count(); let struct_ref = (*tag).struct_ref(); let wordsPerElement = struct_ref.word_size(); assert!(size * wordsPerElement <= wordCount, "INLINE_COMPOSITE list's elements overrun its word count"); //# If a struct list was not expected, then presumably //# a non-struct list was upgraded to a struct list. //# We need to manipulate the pointer to point at the //# first field of the struct. Together with the //# "stepBits", this will allow the struct list to be //# accessed as if it were a primitive list without //# branching. //# Check whether the size is compatible. match expectedElementSize { VOID => {} BIT => fail!("Expected a bit list, but got a list of structs"), BYTE | TWO_BYTES | FOUR_BYTES | EIGHT_BYTES => { assert!(struct_ref.data_size.get() > 0, "Expected a primitive list, but got a list of pointer-only structs") } POINTER => { ptr = ptr.offset(struct_ref.data_size.get() as int); assert!(struct_ref.ptr_count.get() > 0, "Expected a pointer list, but got a list of data-only structs") } INLINE_COMPOSITE => {} } ListReader { segment : segment, ptr : std::cast::transmute(ptr), element_count : size, step : wordsPerElement * BITS_PER_WORD, struct_data_size : struct_ref.data_size.get() as u32 * (BITS_PER_WORD as u32), struct_pointer_count : struct_ref.ptr_count.get() as u16, nesting_limit : nesting_limit - 1 } } _ => { //# This is a primitive or pointer list, but all such //# lists can also be interpreted as struct lists. We //# need to compute the data size and pointer count for //# such structs. let data_size = data_bits_per_element(list_ref.element_size()); let pointer_count = pointers_per_element(list_ref.element_size()); let step = data_size + pointer_count * BITS_PER_POINTER; assert!( bounds_check( segment, ptr, ptr.offset( round_bits_up_to_words( (list_ref.element_count() * step) as u64) as int))); //# Verify that the elements are at least as large as //# the expected type. Note that if we expected //# INLINE_COMPOSITE, the expected sizes here will be //# zero, because bounds checking will be performed at //# field access time. So this check here is for the //# case where we expected a list of some primitive or //# pointer type. let expectedDataBitsPerElement = data_bits_per_element(expectedElementSize); let expectedPointersPerElement = pointers_per_element(expectedElementSize); assert!(expectedDataBitsPerElement <= data_size); assert!(expectedPointersPerElement <= pointer_count) ListReader { segment : segment, ptr : std::cast::transmute(ptr), element_count : list_ref.element_count(), step : step, struct_data_size : data_size as u32, struct_pointer_count : pointer_count as u16, nesting_limit : nesting_limit - 1 } } } } #[inline] pub unsafe fn read_text_pointer<'a>(mut segment : *SegmentReader, mut reff : *WirePointer, default_value : *Word, default_size : ByteCount ) -> Text::Reader<'a> { if reff.is_null() || (*reff).is_null() { return Text::new_reader(std::cast::transmute(default_value), default_size); } let refTarget = (*reff).target(); let ptr : *Word = follow_fars(&mut reff, refTarget, &mut segment); let list_ref = (*reff).list_ref(); let size : uint = list_ref.element_count(); assert!((*reff).kind() == WP_LIST, "Message contains non-list pointer where text was expected"); assert!(list_ref.element_size() == BYTE); assert!(bounds_check(segment, ptr, ptr.offset(round_bytes_up_to_words(size) as int))); assert!(size > 0, "Message contains text that is not NUL-terminated"); let str_ptr = std::cast::transmute::<*Word,*u8>(ptr); assert!((*str_ptr.offset((size - 1) as int)) == 0u8, "Message contains text that is not NUL-terminated"); Text::new_reader(str_ptr, size-1) } #[inline] pub unsafe fn read_data_pointer<'a>(mut segment : *SegmentReader, mut reff : *WirePointer, default_value : *Word, default_size : ByteCount ) -> Data::Reader<'a> { if reff.is_null() || (*reff).is_null() { return Data::new_reader(std::cast::transmute(default_value), default_size); } let refTarget = (*reff).target(); let ptr : *Word = follow_fars(&mut reff, refTarget, &mut segment); let list_ref = (*reff).list_ref(); let size : uint = list_ref.element_count(); assert!((*reff).kind() == WP_LIST, "Message contains non-list pointer where text was expected"); assert!(list_ref.element_size() == BYTE, "Message contains list pointer of non-bytes where data was expected"); assert!(bounds_check(segment, ptr, ptr.offset(round_bytes_up_to_words(size) as int)), "Message contains out-of-bounds data pointer."); Data::new_reader(std::cast::transmute(ptr), size) } } static zero : u64 = 0; fn zero_pointer() -> *WirePointer { unsafe {std::cast::transmute(std::ptr::to_unsafe_ptr(&zero))}} pub struct PointerReader<'a> { segment : *SegmentReader, pointer : *WirePointer, nesting_limit : int } impl <'a> PointerReader<'a> { pub fn new_default<'b>() -> PointerReader<'b> { PointerReader { segment : std::ptr::null(), pointer : std::ptr::null(), nesting_limit : 0x7fffffff } } pub fn get_root<'b>(segment : *SegmentReader, location : *Word, nesting_limit : int) -> PointerReader<'b> { unsafe { assert!(WireHelpers::bounds_check(segment, location, location.offset(POINTER_SIZE_IN_WORDS as int)), "Root location out of bounds."); PointerReader { segment : segment, pointer : std::cast::transmute(location), nesting_limit : nesting_limit } } } pub fn get_root_unchecked<'b>(location : *Word) -> PointerReader<'b> { PointerReader { segment : std::ptr::null(), pointer : unsafe { std::cast::transmute(location) }, nesting_limit : 0x7fffffff } } pub fn is_null(&self) -> bool { self.pointer.is_null() || unsafe { (*self.pointer).is_null() } } pub fn get_struct(&self, default_value: *Word) -> StructReader<'a> { let reff : *WirePointer = if self.pointer.is_null() { zero_pointer() } else { self.pointer }; unsafe { WireHelpers::read_struct_pointer(self.segment, reff, default_value, self.nesting_limit) } } pub fn get_list(&self, expected_element_size : FieldSize, default_value : *Word) -> ListReader<'a> { let reff = if self.pointer.is_null() { zero_pointer() } else { self.pointer }; unsafe { WireHelpers::read_list_pointer(self.segment, reff, default_value, expected_element_size, self.nesting_limit) } } pub fn get_text(&self, default_value : *Word, default_size : ByteCount) -> Text::Reader<'a> { unsafe { WireHelpers::read_text_pointer(self.segment, self.pointer, default_value, default_size) } } pub fn get_data(&self, default_value : *Word, default_size : ByteCount) -> Data::Reader<'a> { unsafe { WireHelpers::read_data_pointer(self.segment, self.pointer, default_value, default_size) } } pub fn get_capability(&self) -> ~ClientHook { let reff : *WirePointer = if self.pointer.is_null() { zero_pointer() } else { self.pointer }; unsafe { WireHelpers::read_capability_pointer(self.segment, reff, self.nesting_limit) } } pub fn total_size(&self) -> MessageSize { unsafe { WireHelpers::total_size(self.segment, self.pointer, self.nesting_limit) } } } pub struct PointerBuilder<'a> { segment : *mut SegmentBuilder, pointer : *mut WirePointer } impl <'a> PointerBuilder<'a> { #[inline] pub fn get_root(segment : *mut SegmentBuilder, location : *mut Word) -> PointerBuilder<'a> { PointerBuilder {segment : segment, pointer : unsafe { std::cast::transmute(location) }} } pub fn is_null(&self) -> bool { unsafe { (*self.pointer).is_null() } } pub fn get_struct(&self, size : StructSize, default_value : *Word) -> StructBuilder<'a> { unsafe { WireHelpers::get_writable_struct_pointer( self.pointer, self.segment, size, default_value) } } pub fn get_list(&self, element_size : FieldSize, default_value : *Word) -> ListBuilder<'a> { unsafe { WireHelpers::get_writable_list_pointer( self.pointer, self.segment, element_size, default_value) } } pub fn get_struct_list(&self, element_size : StructSize, default_value : *Word) -> ListBuilder<'a> { unsafe { WireHelpers::get_writable_struct_list_pointer( self.pointer, self.segment, element_size, default_value) } } pub fn get_text(&self, default_value : *Word, default_size : ByteCount) -> Text::Builder<'a> { unsafe { WireHelpers::get_writable_text_pointer( self.pointer, self.segment, default_value, default_size) } } pub fn get_data(&self, default_value : *Word, default_size : ByteCount) -> Data::Builder<'a> { unsafe { WireHelpers::get_writable_data_pointer( self.pointer, self.segment, default_value, default_size) } } pub fn get_capability(&self) -> ~ClientHook { unsafe { WireHelpers::read_capability_pointer( std::ptr::to_unsafe_ptr(&(*self.segment).reader), self.pointer as *WirePointer, std::int::MAX) } } pub fn init_struct(&self, size : StructSize) -> StructBuilder<'a> { unsafe { WireHelpers::init_struct_pointer(self.pointer, self.segment, size) } } pub fn init_list(&self, element_size : FieldSize, element_count : ElementCount) -> ListBuilder<'a> { unsafe { WireHelpers::init_list_pointer( self.pointer, self.segment, element_count, element_size) } } pub fn init_struct_list(&self, element_count : ElementCount, element_size : StructSize) -> ListBuilder<'a> { unsafe { WireHelpers::init_struct_list_pointer( self.pointer, self.segment, element_count, element_size) } } pub fn init_text(&self, size : ByteCount) -> Text::Builder<'a> { unsafe { WireHelpers::init_text_pointer(self.pointer, self.segment, size).value } } pub fn init_data(&self, size : ByteCount) -> Data::Builder<'a> { unsafe { WireHelpers::init_data_pointer(self.pointer, self.segment, size).value } } pub fn set_struct(&self, value : &StructReader) { unsafe { WireHelpers::set_struct_pointer(self.segment, self.pointer, *value); } } pub fn set_list(&self, value : &ListReader) { unsafe { WireHelpers::set_list_pointer(self.segment, self.pointer, *value); } } pub fn set_text(&self, value : &str) { unsafe { WireHelpers::set_text_pointer(self.pointer, self.segment, value); } } pub fn set_data(&self, value : &[u8]) { unsafe { WireHelpers::set_data_pointer(self.pointer, self.segment, value); } } pub fn set_capability(&self, cap : ~ClientHook) { unsafe { WireHelpers::set_capability_pointer(self.segment, self.pointer, cap); } } pub fn clear(&self) { unsafe { WireHelpers::zero_object(self.segment, self.pointer); std::ptr::zero_memory(self.pointer, 1); } } pub fn as_reader(&self) -> PointerReader<'a> { unsafe { let segment_reader = &(*self.segment).reader; PointerReader { segment : segment_reader, pointer : self.pointer as *WirePointer, nesting_limit : 0x7fffffff } } } } pub trait FromStructReader<'a> { fn new(reader : StructReader<'a>) -> Self; } pub struct StructReader<'a> { segment : *SegmentReader, data : *u8, pointers : *WirePointer, data_size : BitCount32, pointer_count : WirePointerCount16, bit0offset : BitCount8, nesting_limit : int } impl <'a> StructReader<'a> { pub fn new_default() -> StructReader { StructReader { segment : std::ptr::null(), data : std::ptr::null(), pointers : std::ptr::null(), data_size : 0, pointer_count : 0, bit0offset : 0, nesting_limit : 0x7fffffff} } pub fn get_data_section_size(&self) -> BitCount32 { self.data_size } pub fn get_pointer_section_size(&self) -> WirePointerCount16 { self.pointer_count } pub fn get_data_section_as_blob(&self) -> uint { fail!("unimplemented") } #[inline] pub fn get_data_field<T:Clone + std::num::Zero>(&self, offset : ElementCount) -> T { // We need to check the offset because the struct may have // been created with an old version of the protocol that did // not contain the field. if (offset + 1) * bits_per_element::<T>() <= self.data_size as uint { unsafe { let dwv : *WireValue<T> = std::cast::transmute(self.data); (*dwv.offset(offset as int)).get() } } else { return std::num::Zero::zero() } } #[inline] pub fn get_bool_field(&self, offset : ElementCount) -> bool { let mut boffset : BitCount32 = offset as BitCount32; if boffset < self.data_size { if offset == 0 { boffset = self.bit0offset as BitCount32; } unsafe { let b : *u8 = self.data.offset((boffset as uint / BITS_PER_BYTE) as int); ((*b) & (1 << (boffset % BITS_PER_BYTE as u32 ))) != 0 } } else { false } } #[inline] pub fn get_data_field_mask<T:Clone + std::num::Zero + Mask>(&self, offset : ElementCount, mask : T) -> T { Mask::mask(self.get_data_field(offset), mask) } #[inline] pub fn get_bool_field_mask(&self, offset : ElementCount, mask : bool) -> bool { self.get_bool_field(offset) ^ mask } #[inline] pub fn get_pointer_field(&self, ptr_index : WirePointerCount) -> PointerReader<'a> { if ptr_index < self.pointer_count as WirePointerCount { PointerReader { segment : self.segment, pointer : unsafe { self.pointers.offset(ptr_index as int) }, nesting_limit : self.nesting_limit } } else { PointerReader::new_default() } } pub fn total_size(&self) -> MessageSize { let mut result = MessageSize { word_count : WireHelpers::round_bits_up_to_words(self.data_size as u64) as u64 + self.pointer_count as u64 * WORDS_PER_POINTER as u64, cap_count : 0 }; for i in range(0, self.pointer_count as int) { unsafe { result.plus_eq(WireHelpers::total_size(self.segment, self.pointers.offset(i), self.nesting_limit)); } } // TODO when we have read limiting: segment->unread() result } } pub trait HasStructSize { fn struct_size(unused_self : Option<Self>) -> StructSize; } pub trait FromStructBuilder<'a> { fn new(structBuilder : StructBuilder<'a>) -> Self; } pub struct StructBuilder<'a> { segment : *mut SegmentBuilder, data : *mut u8, pointers : *mut WirePointer, data_size : BitCount32, pointer_count : WirePointerCount16, bit0offset : BitCount8 } impl <'a> StructBuilder<'a> { pub fn as_reader(&self) -> StructReader<'a> { unsafe { let segmentReader = &(*self.segment).reader; StructReader { segment : std::ptr::to_unsafe_ptr(segmentReader), data : std::cast::transmute(self.data), pointers : std::cast::transmute(self.pointers), data_size : self.data_size, pointer_count : self.pointer_count, bit0offset : self.bit0offset, nesting_limit : 0x7fffffff } } } #[inline] pub fn set_data_field<T:Clone>(&self, offset : ElementCount, value : T) { unsafe { let ptr : *mut WireValue<T> = std::cast::transmute(self.data); (*ptr.offset(offset as int)).set(value) } } #[inline] pub fn set_data_field_mask<T:Clone + std::num::Zero + Mask>(&self, offset : ElementCount, value : T, mask : T) { self.set_data_field(offset, Mask::mask(value, mask)); } #[inline] pub fn get_data_field<T:Clone>(&self, offset : ElementCount) -> T { unsafe { let ptr : *mut WireValue<T> = std::cast::transmute(self.data); (*ptr.offset(offset as int)).get() } } #[inline] pub fn get_data_field_mask<T:Clone + std::num::Zero + Mask>(&self, offset : ElementCount, mask : T) -> T { Mask::mask(self.get_data_field(offset), mask) } #[inline] pub fn set_bool_field(&self, offset : ElementCount, value : bool) { //# This branch should be compiled out whenever this is //# inlined with a constant offset. let boffset : BitCount0 = if offset == 0 { self.bit0offset as uint } else { offset }; let b = unsafe { self.data.offset((boffset / BITS_PER_BYTE) as int)}; let bitnum = boffset % BITS_PER_BYTE; unsafe { (*b) = (( (*b) & !(1 << bitnum)) | (value as u8 << bitnum)) } } #[inline] pub fn set_bool_field_mask(&self, offset : ElementCount, value : bool, mask : bool) { self.set_bool_field(offset , value ^ mask); } #[inline] pub fn get_bool_field(&self, offset : ElementCount) -> bool { let boffset : BitCount0 = if offset == 0 {self.bit0offset as BitCount0} else {offset}; let b = unsafe { self.data.offset((boffset / BITS_PER_BYTE) as int) }; unsafe { ((*b) & (1 << (boffset % BITS_PER_BYTE ))) != 0 } } #[inline] pub fn get_bool_field_mask(&self, offset : ElementCount, mask : bool) -> bool { self.get_bool_field(offset) ^ mask } #[inline] pub fn get_pointer_field(&self, ptr_index : WirePointerCount) -> PointerBuilder<'a> { PointerBuilder { segment : self.segment, pointer : unsafe { self.pointers.offset(ptr_index as int) } } } } pub struct ListReader<'a> { segment : *SegmentReader, ptr : *u8, element_count : ElementCount, step : BitCount0, struct_data_size : BitCount32, struct_pointer_count : WirePointerCount16, nesting_limit : int } impl <'a> ListReader<'a> { pub fn new_default() -> ListReader { ListReader { segment : std::ptr::null(), ptr : std::ptr::null(), element_count : 0, step: 0, struct_data_size : 0, struct_pointer_count : 0, nesting_limit : 0x7fffffff} } #[inline] pub fn size(&self) -> ElementCount { self.element_count } pub fn get_struct_element(&self, index : ElementCount) -> StructReader<'a> { assert!(self.nesting_limit > 0, "Message is too deeply-nested or contains cycles"); let indexBit : BitCount64 = index as ElementCount64 * (self.step as BitCount64); let structData : *u8 = unsafe { self.ptr.offset((indexBit as uint / BITS_PER_BYTE) as int) }; let structPointers : *WirePointer = unsafe { std::cast::transmute( structData.offset((self.struct_data_size as uint / BITS_PER_BYTE) as int)) }; /* assert!(self.struct_pointer_count == 0 || structPointers % BYTES_PER_POINTER == 0, "Pointer section of struct list element not aligned" ); */ StructReader { segment : self.segment, data : structData, pointers : structPointers, data_size : self.struct_data_size as BitCount32, pointer_count : self.struct_pointer_count, bit0offset : (indexBit % (BITS_PER_BYTE as u64)) as u8, nesting_limit : self.nesting_limit - 1 } } #[inline] pub fn get_pointer_element(&self, index : ElementCount) -> PointerReader<'a> { PointerReader { segment : self.segment, pointer : unsafe { std::cast::transmute(self.ptr.offset((index * self.step / BITS_PER_BYTE) as int)) }, nesting_limit : self.nesting_limit } } } pub struct ListBuilder<'a> { segment : *mut SegmentBuilder, ptr : *mut u8, element_count : ElementCount, step : BitCount0, struct_data_size : BitCount32, struct_pointer_count : WirePointerCount16 } impl <'a> ListBuilder<'a> { #[inline] pub fn new_default<'a>() -> ListBuilder<'a> { ListBuilder { segment : std::ptr::mut_null(), ptr : std::ptr::mut_null(), element_count : 0, step : 0, struct_data_size : 0, struct_pointer_count : 0 } } #[inline] pub fn size(&self) -> ElementCount { self.element_count } pub fn get_struct_element(&self, index : ElementCount) -> StructBuilder<'a> { let indexBit = index * self.step; let structData = unsafe{ self.ptr.offset((indexBit / BITS_PER_BYTE) as int)}; let structPointers = unsafe { std::cast::transmute( structData.offset(((self.struct_data_size as uint) / BITS_PER_BYTE) as int)) }; StructBuilder { segment : self.segment, data : structData, pointers : structPointers, data_size : self.struct_data_size, pointer_count : self.struct_pointer_count, bit0offset : (indexBit % BITS_PER_BYTE) as u8 } } #[inline] pub fn get_pointer_element(&self, index : ElementCount) -> PointerBuilder<'a> { PointerBuilder { segment : self.segment, pointer : unsafe { std::cast::transmute(self.ptr.offset((index * self.step / BITS_PER_BYTE) as int)) } } } } pub trait PrimitiveElement : Clone { #[inline] fn get(listReader : &ListReader, index : ElementCount) -> Self { unsafe { let ptr : *u8 = listReader.ptr.offset( (index * listReader.step / BITS_PER_BYTE) as int); (*std::cast::transmute::<*u8,*WireValue<Self>>(ptr)).get() } } #[inline] fn get_from_builder(listBuilder : &ListBuilder, index : ElementCount) -> Self { unsafe { let ptr : *mut WireValue<Self> = std::cast::transmute( listBuilder.ptr.offset( (index * listBuilder.step / BITS_PER_BYTE) as int)); (*ptr).get() } } #[inline] fn set(listBuilder : &ListBuilder, index : ElementCount, value: Self) { unsafe { let ptr : *mut WireValue<Self> = std::cast::transmute( listBuilder.ptr.offset( (index * listBuilder.step / BITS_PER_BYTE) as int)); (*ptr).set(value); } } } impl PrimitiveElement for u8 { } impl PrimitiveElement for u16 { } impl PrimitiveElement for u32 { } impl PrimitiveElement for u64 { } impl PrimitiveElement for i8 { } impl PrimitiveElement for i16 { } impl PrimitiveElement for i32 { } impl PrimitiveElement for i64 { } impl PrimitiveElement for f32 { } impl PrimitiveElement for f64 { } impl PrimitiveElement for bool { #[inline] fn get(list : &ListReader, index : ElementCount) -> bool { //# Ignore stepBytes for bit lists because bit lists cannot be //# upgraded to struct lists. let bindex : BitCount0 = index * list.step; unsafe { let b : *u8 = list.ptr.offset((bindex / BITS_PER_BYTE) as int); ((*b) & (1 << (bindex % BITS_PER_BYTE))) != 0 } } #[inline] fn get_from_builder(list : &ListBuilder, index : ElementCount) -> bool { //# Ignore stepBytes for bit lists because bit lists cannot be //# upgraded to struct lists. let bindex : BitCount0 = index * list.step; let b = unsafe { list.ptr.offset((bindex / BITS_PER_BYTE) as int) }; unsafe { ((*b) & (1 << (bindex % BITS_PER_BYTE ))) != 0 } } #[inline] fn set(list : &ListBuilder, index : ElementCount, value : bool) { //# Ignore stepBytes for bit lists because bit lists cannot be //# upgraded to struct lists. let bindex : BitCount0 = index; let b = unsafe { list.ptr.offset((bindex / BITS_PER_BYTE) as int) }; let bitnum = bindex % BITS_PER_BYTE; unsafe { (*b) = (( (*b) & !(1 << bitnum)) | (value as u8 << bitnum)) } } } impl PrimitiveElement for () { #[inline] fn get(_list : &ListReader, _index : ElementCount) -> () { () } #[inline] fn get_from_builder(_list : &ListBuilder, _index : ElementCount) -> () { () } #[inline] fn set(_list : &ListBuilder, _index : ElementCount, _value : ()) { } }
// // imag - the personal information management suite for the commandline // Copyright (C) 2015, 2016 Matthias Beyer <mail@beyermatthias.de> and contributors // // This library is free software; you can redistribute it and/or // modify it under the terms of the GNU Lesser General Public // License as published by the Free Software Foundation; version // 2.1 of the License. // // This library is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU // Lesser General Public License for more details. // // You should have received a copy of the GNU Lesser General Public // License along with this library; if not, write to the Free Software // Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA // use std::collections::BTreeMap; #[cfg(test)] use std::path::PathBuf; use libimagstore::storeid::StoreId; use libimagstore::storeid::IntoStoreId; use libimagstore::store::Entry; use libimagstore::store::Result as StoreResult; use libimagstore::toml_ext::TomlValueExt; use libimagerror::into::IntoError; use error::LinkErrorKind as LEK; use error::MapErrInto; use result::Result; use self::iter::LinkIter; use self::iter::IntoValues; use toml::Value; #[derive(Eq, PartialOrd, Ord, Hash, Debug, Clone)] pub enum Link { Id { link: StoreId }, Annotated { link: StoreId, annotation: String }, } impl Link { pub fn exists(&self) -> Result<bool> { match *self { Link::Id { ref link } => link.exists(), Link::Annotated { ref link, .. } => link.exists(), } .map_err_into(LEK::StoreIdError) } pub fn to_str(&self) -> Result<String> { match *self { Link::Id { ref link } => link.to_str(), Link::Annotated { ref link, .. } => link.to_str(), } .map_err_into(LEK::StoreReadError) } fn eq_store_id(&self, id: &StoreId) -> bool { match self { &Link::Id { link: ref s } => s.eq(id), &Link::Annotated { link: ref s, .. } => s.eq(id), } } /// Get the StoreId inside the Link, which is always present pub fn get_store_id(&self) -> &StoreId { match self { &Link::Id { link: ref s } => s, &Link::Annotated { link: ref s, .. } => s, } } /// Helper wrapper around Link for StoreId fn without_base(self) -> Link { match self { Link::Id { link: s } => Link::Id { link: s.without_base() }, Link::Annotated { link: s, annotation: ann } => Link::Annotated { link: s.without_base(), annotation: ann }, } } /// Helper wrapper around Link for StoreId #[cfg(test)] fn with_base(self, pb: PathBuf) -> Link { match self { Link::Id { link: s } => Link::Id { link: s.with_base(pb) }, Link::Annotated { link: s, annotation: ann } => Link::Annotated { link: s.with_base(pb), annotation: ann }, } } fn to_value(&self) -> Result<Value> { match self { &Link::Id { link: ref s } => s.to_str().map(Value::String).map_err_into(LEK::InternalConversionError), &Link::Annotated { ref link, annotation: ref anno } => { link.to_str() .map(Value::String) .map_err_into(LEK::InternalConversionError) .map(|link| { let mut tab = BTreeMap::new(); tab.insert("link".to_owned(), link); tab.insert("annotation".to_owned(), Value::String(anno.clone())); Value::Table(tab) }) } } } } impl ::std::cmp::PartialEq for Link { fn eq(&self, other: &Self) -> bool { match (self, other) { (&Link::Id { link: ref a }, &Link::Id { link: ref b }) => a.eq(&b), (&Link::Annotated { link: ref a, annotation: ref ann1 }, &Link::Annotated { link: ref b, annotation: ref ann2 }) => (a, ann1).eq(&(b, ann2)), _ => false, } } } impl From<StoreId> for Link { fn from(s: StoreId) -> Link { Link::Id { link: s } } } impl Into<StoreId> for Link { fn into(self) -> StoreId { match self { Link::Id { link } => link, Link::Annotated { link, .. } => link, } } } impl IntoStoreId for Link { fn into_storeid(self) -> StoreResult<StoreId> { match self { Link::Id { link } => Ok(link), Link::Annotated { link, .. } => Ok(link), } } } impl AsRef<StoreId> for Link { fn as_ref(&self) -> &StoreId { match self { &Link::Id { ref link } => &link, &Link::Annotated { ref link, .. } => &link, } } } pub trait InternalLinker { /// Get the internal links from the implementor object fn get_internal_links(&self) -> Result<LinkIter>; /// Set the internal links for the implementor object fn set_internal_links(&mut self, links: Vec<&mut Entry>) -> Result<LinkIter>; /// Add an internal link to the implementor object fn add_internal_link(&mut self, link: &mut Entry) -> Result<()>; /// Remove an internal link from the implementor object fn remove_internal_link(&mut self, link: &mut Entry) -> Result<()>; /// Add internal annotated link fn add_internal_annotated_link(&mut self, link: &mut Entry, annotation: String) -> Result<()>; } pub mod iter { use std::vec::IntoIter; use super::Link; use error::LinkErrorKind as LEK; use error::MapErrInto; use result::Result; use toml::Value; use itertools::Itertools; use libimagstore::store::Store; use libimagstore::store::FileLockEntry; pub struct LinkIter(IntoIter<Link>); impl LinkIter { pub fn new(v: Vec<Link>) -> LinkIter { LinkIter(v.into_iter()) } pub fn into_getter(self, store: &Store) -> GetIter { GetIter(self.0, store) } } impl Iterator for LinkIter { type Item = Link; fn next(&mut self) -> Option<Self::Item> { self.0.next() } } pub trait IntoValues { fn into_values(self) -> Vec<Result<Value>>; } impl<I: Iterator<Item = Link>> IntoValues for I { fn into_values(self) -> Vec<Result<Value>> { self.map(|s| s.without_base()) .unique() .sorted() .into_iter() // Cannot sort toml::Value, hence uglyness here .map(|link| link.to_value().map_err_into(LEK::InternalConversionError)) .collect() } } /// An Iterator that `Store::get()`s the Entries from the store while consumed pub struct GetIter<'a>(IntoIter<Link>, &'a Store); impl<'a> GetIter<'a> { pub fn new(i: IntoIter<Link>, store: &'a Store) -> GetIter<'a> { GetIter(i, store) } /// Turn this iterator into a LinkGcIter, which `Store::delete()`s entries that are not /// linked to any other entry. pub fn delete_unlinked(self) -> DeleteUnlinkedIter<'a> { DeleteUnlinkedIter(self) } /// Turn this iterator into a FilterLinksIter that removes all entries that are not linked /// to any other entry, by filtering them out the iterator. /// /// This does _not_ remove the entries from the store. pub fn without_unlinked(self) -> FilterLinksIter<'a> { FilterLinksIter::new(self, Box::new(|links: &[Link]| links.len() > 0)) } /// Turn this iterator into a FilterLinksIter that removes all entries that have less than /// `n` links to any other entries. /// /// This does _not_ remove the entries from the store. pub fn with_less_than_n_links(self, n: usize) -> FilterLinksIter<'a> { FilterLinksIter::new(self, Box::new(move |links: &[Link]| links.len() < n)) } /// Turn this iterator into a FilterLinksIter that removes all entries that have more than /// `n` links to any other entries. /// /// This does _not_ remove the entries from the store. pub fn with_more_than_n_links(self, n: usize) -> FilterLinksIter<'a> { FilterLinksIter::new(self, Box::new(move |links: &[Link]| links.len() > n)) } /// Turn this iterator into a FilterLinksIter that removes all entries where the predicate /// `F` returns false /// /// This does _not_ remove the entries from the store. pub fn filtered_for_links(self, f: Box<Fn(&[Link]) -> bool>) -> FilterLinksIter<'a> { FilterLinksIter::new(self, f) } pub fn store(&self) -> &Store { self.1 } } impl<'a> Iterator for GetIter<'a> { type Item = Result<FileLockEntry<'a>>; fn next(&mut self) -> Option<Self::Item> { self.0.next().and_then(|id| match self.1.get(id).map_err_into(LEK::StoreReadError) { Ok(None) => None, Ok(Some(x)) => Some(Ok(x)), Err(e) => Some(Err(e)), }) } } /// An iterator helper that has a function F. /// /// If the function F returns `false` for the number of links, the entry is ignored, else it is /// taken. pub struct FilterLinksIter<'a>(GetIter<'a>, Box<Fn(&[Link]) -> bool>); impl<'a> FilterLinksIter<'a> { pub fn new(gi: GetIter<'a>, f: Box<Fn(&[Link]) -> bool>) -> FilterLinksIter<'a> { FilterLinksIter(gi, f) } } impl<'a> Iterator for FilterLinksIter<'a> { type Item = Result<FileLockEntry<'a>>; fn next(&mut self) -> Option<Self::Item> { use internal::InternalLinker; loop { match self.0.next() { Some(Ok(fle)) => { let links = match fle.get_internal_links().map_err_into(LEK::StoreReadError) { Err(e) => return Some(Err(e)), Ok(links) => links.collect::<Vec<_>>(), }; if !(self.1)(&links) { continue; } else { return Some(Ok(fle)); } }, Some(Err(e)) => return Some(Err(e)), None => break, } } None } } /// An iterator that removes all Items from the iterator that are not linked anymore by calling /// `Store::delete()` on them. /// /// It yields only items which are somehow linked to another entry /// /// # Warning /// /// Deletes entries from the store. /// pub struct DeleteUnlinkedIter<'a>(GetIter<'a>); impl<'a> Iterator for DeleteUnlinkedIter<'a> { type Item = Result<FileLockEntry<'a>>; fn next(&mut self) -> Option<Self::Item> { use internal::InternalLinker; loop { match self.0.next() { Some(Ok(fle)) => { let links = match fle.get_internal_links().map_err_into(LEK::StoreReadError) { Err(e) => return Some(Err(e)), Ok(links) => links, }; if links.count() == 0 { match self.0 .store() .delete(fle.get_location().clone()) .map_err_into(LEK::StoreWriteError) { Ok(x) => x, Err(e) => return Some(Err(e)), } } else { return Some(Ok(fle)); } }, Some(Err(e)) => return Some(Err(e)), None => break, } } None } } } impl InternalLinker for Entry { fn get_internal_links(&self) -> Result<LinkIter> { process_rw_result(self.get_header().read("imag.links")) } /// Set the links in a header and return the old links, if any. fn set_internal_links(&mut self, links: Vec<&mut Entry>) -> Result<LinkIter> { use internal::iter::IntoValues; let self_location = self.get_location().clone(); let mut new_links = vec![]; for link in links { if let Err(e) = add_foreign_link(link, self_location.clone()) { return Err(e); } new_links.push(link.get_location().clone().into()); } let new_links = try!(LinkIter::new(new_links) .into_values() .into_iter() .fold(Ok(vec![]), |acc, elem| { acc.and_then(move |mut v| { elem.map_err_into(LEK::InternalConversionError) .map(|e| { v.push(e); v }) }) })); process_rw_result(self.get_header_mut().set("imag.links", Value::Array(new_links))) } fn add_internal_link(&mut self, link: &mut Entry) -> Result<()> { let location = link.get_location().clone().into(); add_internal_link_with_instance(self, link, location) } fn remove_internal_link(&mut self, link: &mut Entry) -> Result<()> { let own_loc = self.get_location().clone().without_base(); let other_loc = link.get_location().clone().without_base(); debug!("Removing internal link from {:?} to {:?}", own_loc, other_loc); link.get_internal_links() .and_then(|links| { debug!("Rewriting own links for {:?}, without {:?}", other_loc, own_loc); let links = links.filter(|l| !l.eq_store_id(&own_loc)); rewrite_links(link.get_header_mut(), links) }) .and_then(|_| { self.get_internal_links() .and_then(|links| { debug!("Rewriting own links for {:?}, without {:?}", own_loc, other_loc); let links = links.filter(|l| !l.eq_store_id(&other_loc)); rewrite_links(self.get_header_mut(), links) }) }) } fn add_internal_annotated_link(&mut self, link: &mut Entry, annotation: String) -> Result<()> { let new_link = Link::Annotated { link: link.get_location().clone(), annotation: annotation, }; add_internal_link_with_instance(self, link, new_link) } } fn add_internal_link_with_instance(this: &mut Entry, link: &mut Entry, instance: Link) -> Result<()> { debug!("Adding internal link from {:?} to {:?}", this.get_location(), instance); add_foreign_link(link, this.get_location().clone()) .and_then(|_| { this.get_internal_links() .and_then(|links| { let links = links.chain(LinkIter::new(vec![instance])); rewrite_links(this.get_header_mut(), links) }) }) } fn rewrite_links<I: Iterator<Item = Link>>(header: &mut Value, links: I) -> Result<()> { let links = try!(links.into_values() .into_iter() .fold(Ok(vec![]), |acc, elem| { acc.and_then(move |mut v| { elem.map_err_into(LEK::InternalConversionError) .map(|e| { v.push(e); v }) }) })); debug!("Setting new link array: {:?}", links); let process = header.set("imag.links", Value::Array(links)); process_rw_result(process).map(|_| ()) } /// When Linking A -> B, the specification wants us to link back B -> A. /// This is a helper function which does this. fn add_foreign_link(target: &mut Entry, from: StoreId) -> Result<()> { debug!("Linking back from {:?} to {:?}", target.get_location(), from); target.get_internal_links() .and_then(|links| { let links = try!(links .chain(LinkIter::new(vec![from.into()])) .into_values() .into_iter() .fold(Ok(vec![]), |acc, elem| { acc.and_then(move |mut v| { elem.map_err_into(LEK::InternalConversionError) .map(|e| { v.push(e); v }) }) })); debug!("Setting links in {:?}: {:?}", target.get_location(), links); process_rw_result(target.get_header_mut().set("imag.links", Value::Array(links))) .map(|_| ()) }) } fn process_rw_result(links: StoreResult<Option<Value>>) -> Result<LinkIter> { use std::path::PathBuf; let links = match links { Err(e) => { debug!("RW action on store failed. Generating LinkError"); return Err(LEK::EntryHeaderReadError.into_error_with_cause(Box::new(e))) }, Ok(None) => { debug!("We got no value from the header!"); return Ok(LinkIter::new(vec![])) }, Ok(Some(Value::Array(l))) => l, Ok(Some(_)) => { debug!("We expected an Array for the links, but there was a non-Array!"); return Err(LEK::ExistingLinkTypeWrong.into()); } }; if !links.iter().all(|l| is_match!(*l, Value::String(_)) || is_match!(*l, Value::Table(_))) { debug!("At least one of the Values which were expected in the Array of links is not a String or a Table!"); debug!("Generating LinkError"); return Err(LEK::ExistingLinkTypeWrong.into()); } let links : Vec<Link> = try!(links.into_iter() .map(|link| { debug!("Matching the link: {:?}", link); match link { Value::String(s) => StoreId::new_baseless(PathBuf::from(s)) .map_err_into(LEK::StoreIdError) .map(|s| Link::Id { link: s }) , Value::Table(mut tab) => { debug!("Destructuring table"); if !tab.contains_key("link") || !tab.contains_key("annotation") { debug!("Things missing... returning Error instance"); Err(LEK::LinkParserError.into_error()) } else { let link = try!(tab.remove("link") .ok_or(LEK::LinkParserFieldMissingError.into_error())); let anno = try!(tab.remove("annotation") .ok_or(LEK::LinkParserFieldMissingError.into_error())); debug!("Ok, here we go with building a Link::Annotated"); match (link, anno) { (Value::String(link), Value::String(anno)) => { StoreId::new_baseless(PathBuf::from(link)) .map_err_into(LEK::StoreIdError) .map(|link| { Link::Annotated { link: link, annotation: anno, } }) }, _ => Err(LEK::LinkParserFieldTypeError.into_error()), } } } _ => unreachable!(), } }) .collect()); debug!("Ok, the RW action was successful, returning link vector now!"); Ok(LinkIter::new(links)) } pub mod store_check { use libimagstore::store::Store; pub mod error { generate_error_imports!(); use libimagstore::storeid::StoreId; #[derive(Debug)] pub enum StoreLinkConsistencyErrorCustomData { DeadLink { target: StoreId }, OneDirectionalLink { source: StoreId, target: StoreId }, } impl Display for StoreLinkConsistencyErrorCustomData { } generate_custom_error_types!( StoreLinkConsistencyError, StoreLinkConsistencyErrorKind, StoreLinkConsistencyErrorCustomData, StoreLinkConsistencyError => "Links in the store are not consistent", LinkHandlingError => "Error in link handling", StoreError => "Error while talking to the store" ); generate_result_helper!(StoreLinkConsistencyError, StoreLinkConsistencyErrorKind); generate_option_helper!(StoreLinkConsistencyError, StoreLinkConsistencyErrorKind); } pub use self::error::StoreLinkConsistencyError; pub use self::error::StoreLinkConsistencyErrorKind; pub use self::error::MapErrInto; pub mod result { use std::result::Result as RResult; use internal::store_check::error::StoreLinkConsistencyError as SLCE; pub type Result<T> = RResult<T, SLCE>; } pub trait StoreLinkConsistentExt { fn check_link_consistency(&self) -> Result<()>; } impl StoreLinkConsistentExt for Store { fn check_link_consistency(&self) -> Result<()> { use std::collections::HashMap; use self::error::StoreLinkConsistencyErrorKind as SLCEK; use self::error::StoreLinkConsistencyError as SLCE; use self::error::StoreLinkConsistencyErrorCustomData as SLCECD; use error::LinkErrorKind as LEK; use result::Result as LResult; use internal::InternalLinker; use libimagstore::store::StoreObject; use libimagstore::storeid::StoreId; use libimagerror::iter::TraceIterator; use libimagerror::into::IntoError; use libimagutil::iter::FoldResult; // Helper data structure to collect incoming and outgoing links for each StoreId #[derive(Debug, Default)] struct Linking { outgoing: Vec<StoreId>, incoming: Vec<StoreId>, } /// Helper function to aggregate the Link network /// /// This function aggregates a HashMap which maps each StoreId object in the store onto /// a Linking object, which contains a list of StoreIds which this entry links to and a /// list of StoreIds which link to the current one. /// /// The lambda returns an error if something fails let aggregate_link_network = |store: &Store| -> Result<HashMap<StoreId, Linking>> { store .walk("") // this is a hack... I know... .filter_map(|obj: StoreObject| match obj { StoreObject::Id(id) => Some(id), _ => None }) // Only ids are interesting .fold(Ok(HashMap::new()), |acc, sid| { acc.and_then(|mut state| { debug!("Checking entry: '{}'", sid); match try!(self.get(sid).map_err_into(SLCEK::StoreError)) { Some(fle) => { debug!("Found FileLockEntry"); let fle_loc = fle.get_location(); let internal_links = fle .get_internal_links() .map_err_into(SLCEK::StoreError)? .into_getter(self) // get the FLEs from the Store .trace_unwrap(); // trace all Err(e)s and get the Ok(fle)s for internal_link in internal_links { let il_loc = internal_link.get_location(); state .entry(il_loc.clone()) .or_insert(Linking::default()) .incoming .push(fle_loc.clone()); // Make sure an empty linking object is present for the // current StoreId object state .entry(fle_loc.clone()) .or_insert(Linking::default()) .outgoing .push(il_loc.clone()); } Ok(state) }, None => { debug!("No entry"); Ok(state) } } }) }) }; /// Helper to check whethre all StoreIds in the network actually exists /// /// Because why not? let all_collected_storeids_exist = |network: &HashMap<StoreId, Linking>| -> LResult<()> { network .iter() .fold_result(|(id, _)| { if is_match!(self.get(id.clone()), Ok(Some(_))) { debug!("Exists in store: {:?}", id); let exists = { use error::MapErrInto as MEI; try!(MEI::map_err_into(id.exists(), LEK::StoreReadError)) }; if !exists { warn!("Does exist in store but not on FS: {:?}", id); Err(LEK::LinkTargetDoesNotExist.into_error()) } else { Ok(()) } } else { warn!("Does not exist in store: {:?}", id); Err(LEK::LinkTargetDoesNotExist.into_error()) } }) }; /// Helper function to create a SLCECD::OneDirectionalLink error object #[inline] let mk_one_directional_link_err = |src: StoreId, target: StoreId| -> SLCE { // construct the error let custom = SLCECD::OneDirectionalLink { source: src, target: target, }; SLCEK::StoreLinkConsistencyError .into_error() .with_custom_data(custom) }; /// Helper lambda to check whether the _incoming_ links of each entry actually also /// appear in the _outgoing_ list of the linked entry let incoming_links_exists_as_outgoing_links = |src: &StoreId, linking: &Linking, network: &HashMap<StoreId, Linking>| -> Result<()> { linking .incoming .iter() .fold_result(|link| { // Check whether the links which are _incoming_ on _src_ are outgoing // in each of the links in the incoming list. let incoming_consistent = network.get(link) .map(|l| l.outgoing.contains(src)) .unwrap_or(false); if !incoming_consistent { Err(mk_one_directional_link_err(src.clone(), link.clone())) } else { Ok(()) } }) }; /// Helper lambda to check whether the _outgoing links of each entry actually also /// appear in the _incoming_ list of the linked entry let outgoing_links_exist_as_incoming_links = |src: &StoreId, linking: &Linking, network: &HashMap<StoreId, Linking>| -> Result<()> { linking .outgoing .iter() .fold_result(|link| { // Check whether the links which are _outgoing_ on _src_ are incoming // in each of the links in the outgoing list. let outgoing_consistent = network.get(link) .map(|l| l.incoming.contains(src)) .unwrap_or(false); if !outgoing_consistent { Err(mk_one_directional_link_err(link.clone(), src.clone())) } else { Ok(()) } }) }; aggregate_link_network(&self) .and_then(|nw| { all_collected_storeids_exist(&nw) .map(|_| nw) .map_err_into(SLCEK::LinkHandlingError) }) .and_then(|nw| { nw.iter().fold_result(|(id, linking)| { try!(incoming_links_exists_as_outgoing_links(id, linking, &nw)); try!(outgoing_links_exist_as_incoming_links(id, linking, &nw)); Ok(()) }) }) .map(|_| ()) } } } #[cfg(test)] mod test { use std::path::PathBuf; use libimagstore::store::Store; use super::InternalLinker; fn setup_logging() { use env_logger; let _ = env_logger::init().unwrap_or(()); } pub fn get_store() -> Store { Store::new(PathBuf::from("/"), None).unwrap() } #[test] fn test_new_entry_no_links() { setup_logging(); let store = get_store(); let entry = store.create(PathBuf::from("test_new_entry_no_links")).unwrap(); let links = entry.get_internal_links(); assert!(links.is_ok()); let links = links.unwrap(); assert_eq!(links.collect::<Vec<_>>().len(), 0); } #[test] fn test_link_two_entries() { setup_logging(); let store = get_store(); let mut e1 = store.create(PathBuf::from("test_link_two_entries1")).unwrap(); assert!(e1.get_internal_links().is_ok()); let mut e2 = store.create(PathBuf::from("test_link_two_entries2")).unwrap(); assert!(e2.get_internal_links().is_ok()); { assert!(e1.add_internal_link(&mut e2).is_ok()); let e1_links = e1.get_internal_links().unwrap().collect::<Vec<_>>(); let e2_links = e2.get_internal_links().unwrap().collect::<Vec<_>>(); debug!("1 has links: {:?}", e1_links); debug!("2 has links: {:?}", e2_links); assert_eq!(e1_links.len(), 1); assert_eq!(e2_links.len(), 1); assert!(e1_links.first().map(|l| l.clone().with_base(store.path().clone()).eq_store_id(e2.get_location())).unwrap_or(false)); assert!(e2_links.first().map(|l| l.clone().with_base(store.path().clone()).eq_store_id(e1.get_location())).unwrap_or(false)); } { assert!(e1.remove_internal_link(&mut e2).is_ok()); println!("{:?}", e2.to_str()); let e2_links = e2.get_internal_links().unwrap().collect::<Vec<_>>(); assert_eq!(e2_links.len(), 0, "Expected [], got: {:?}", e2_links); println!("{:?}", e1.to_str()); let e1_links = e1.get_internal_links().unwrap().collect::<Vec<_>>(); assert_eq!(e1_links.len(), 0, "Expected [], got: {:?}", e1_links); } } #[test] fn test_multiple_links() { setup_logging(); let store = get_store(); let mut e1 = store.retrieve(PathBuf::from("1")).unwrap(); let mut e2 = store.retrieve(PathBuf::from("2")).unwrap(); let mut e3 = store.retrieve(PathBuf::from("3")).unwrap(); let mut e4 = store.retrieve(PathBuf::from("4")).unwrap(); let mut e5 = store.retrieve(PathBuf::from("5")).unwrap(); assert!(e1.add_internal_link(&mut e2).is_ok()); assert_eq!(e1.get_internal_links().unwrap().collect::<Vec<_>>().len(), 1); assert_eq!(e2.get_internal_links().unwrap().collect::<Vec<_>>().len(), 1); assert_eq!(e3.get_internal_links().unwrap().collect::<Vec<_>>().len(), 0); assert_eq!(e4.get_internal_links().unwrap().collect::<Vec<_>>().len(), 0); assert_eq!(e5.get_internal_links().unwrap().collect::<Vec<_>>().len(), 0); assert!(e1.add_internal_link(&mut e3).is_ok()); assert_eq!(e1.get_internal_links().unwrap().collect::<Vec<_>>().len(), 2); assert_eq!(e2.get_internal_links().unwrap().collect::<Vec<_>>().len(), 1); assert_eq!(e3.get_internal_links().unwrap().collect::<Vec<_>>().len(), 1); assert_eq!(e4.get_internal_links().unwrap().collect::<Vec<_>>().len(), 0); assert_eq!(e5.get_internal_links().unwrap().collect::<Vec<_>>().len(), 0); assert!(e1.add_internal_link(&mut e4).is_ok()); assert_eq!(e1.get_internal_links().unwrap().collect::<Vec<_>>().len(), 3); assert_eq!(e2.get_internal_links().unwrap().collect::<Vec<_>>().len(), 1); assert_eq!(e3.get_internal_links().unwrap().collect::<Vec<_>>().len(), 1); assert_eq!(e4.get_internal_links().unwrap().collect::<Vec<_>>().len(), 1); assert_eq!(e5.get_internal_links().unwrap().collect::<Vec<_>>().len(), 0); assert!(e1.add_internal_link(&mut e5).is_ok()); assert_eq!(e1.get_internal_links().unwrap().collect::<Vec<_>>().len(), 4); assert_eq!(e2.get_internal_links().unwrap().collect::<Vec<_>>().len(), 1); assert_eq!(e3.get_internal_links().unwrap().collect::<Vec<_>>().len(), 1); assert_eq!(e4.get_internal_links().unwrap().collect::<Vec<_>>().len(), 1); assert_eq!(e5.get_internal_links().unwrap().collect::<Vec<_>>().len(), 1); assert!(e5.remove_internal_link(&mut e1).is_ok()); assert_eq!(e1.get_internal_links().unwrap().collect::<Vec<_>>().len(), 3); assert_eq!(e2.get_internal_links().unwrap().collect::<Vec<_>>().len(), 1); assert_eq!(e3.get_internal_links().unwrap().collect::<Vec<_>>().len(), 1); assert_eq!(e4.get_internal_links().unwrap().collect::<Vec<_>>().len(), 1); assert_eq!(e5.get_internal_links().unwrap().collect::<Vec<_>>().len(), 0); assert!(e4.remove_internal_link(&mut e1).is_ok()); assert_eq!(e1.get_internal_links().unwrap().collect::<Vec<_>>().len(), 2); assert_eq!(e2.get_internal_links().unwrap().collect::<Vec<_>>().len(), 1); assert_eq!(e3.get_internal_links().unwrap().collect::<Vec<_>>().len(), 1); assert_eq!(e4.get_internal_links().unwrap().collect::<Vec<_>>().len(), 0); assert_eq!(e5.get_internal_links().unwrap().collect::<Vec<_>>().len(), 0); assert!(e3.remove_internal_link(&mut e1).is_ok()); assert_eq!(e1.get_internal_links().unwrap().collect::<Vec<_>>().len(), 1); assert_eq!(e2.get_internal_links().unwrap().collect::<Vec<_>>().len(), 1); assert_eq!(e3.get_internal_links().unwrap().collect::<Vec<_>>().len(), 0); assert_eq!(e4.get_internal_links().unwrap().collect::<Vec<_>>().len(), 0); assert_eq!(e5.get_internal_links().unwrap().collect::<Vec<_>>().len(), 0); assert!(e2.remove_internal_link(&mut e1).is_ok()); assert_eq!(e1.get_internal_links().unwrap().collect::<Vec<_>>().len(), 0); assert_eq!(e2.get_internal_links().unwrap().collect::<Vec<_>>().len(), 0); assert_eq!(e3.get_internal_links().unwrap().collect::<Vec<_>>().len(), 0); assert_eq!(e4.get_internal_links().unwrap().collect::<Vec<_>>().len(), 0); assert_eq!(e5.get_internal_links().unwrap().collect::<Vec<_>>().len(), 0); } } Impl Display for StoreLinkConsistencyErrorCustomData // // imag - the personal information management suite for the commandline // Copyright (C) 2015, 2016 Matthias Beyer <mail@beyermatthias.de> and contributors // // This library is free software; you can redistribute it and/or // modify it under the terms of the GNU Lesser General Public // License as published by the Free Software Foundation; version // 2.1 of the License. // // This library is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU // Lesser General Public License for more details. // // You should have received a copy of the GNU Lesser General Public // License along with this library; if not, write to the Free Software // Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA // use std::collections::BTreeMap; #[cfg(test)] use std::path::PathBuf; use libimagstore::storeid::StoreId; use libimagstore::storeid::IntoStoreId; use libimagstore::store::Entry; use libimagstore::store::Result as StoreResult; use libimagstore::toml_ext::TomlValueExt; use libimagerror::into::IntoError; use error::LinkErrorKind as LEK; use error::MapErrInto; use result::Result; use self::iter::LinkIter; use self::iter::IntoValues; use toml::Value; #[derive(Eq, PartialOrd, Ord, Hash, Debug, Clone)] pub enum Link { Id { link: StoreId }, Annotated { link: StoreId, annotation: String }, } impl Link { pub fn exists(&self) -> Result<bool> { match *self { Link::Id { ref link } => link.exists(), Link::Annotated { ref link, .. } => link.exists(), } .map_err_into(LEK::StoreIdError) } pub fn to_str(&self) -> Result<String> { match *self { Link::Id { ref link } => link.to_str(), Link::Annotated { ref link, .. } => link.to_str(), } .map_err_into(LEK::StoreReadError) } fn eq_store_id(&self, id: &StoreId) -> bool { match self { &Link::Id { link: ref s } => s.eq(id), &Link::Annotated { link: ref s, .. } => s.eq(id), } } /// Get the StoreId inside the Link, which is always present pub fn get_store_id(&self) -> &StoreId { match self { &Link::Id { link: ref s } => s, &Link::Annotated { link: ref s, .. } => s, } } /// Helper wrapper around Link for StoreId fn without_base(self) -> Link { match self { Link::Id { link: s } => Link::Id { link: s.without_base() }, Link::Annotated { link: s, annotation: ann } => Link::Annotated { link: s.without_base(), annotation: ann }, } } /// Helper wrapper around Link for StoreId #[cfg(test)] fn with_base(self, pb: PathBuf) -> Link { match self { Link::Id { link: s } => Link::Id { link: s.with_base(pb) }, Link::Annotated { link: s, annotation: ann } => Link::Annotated { link: s.with_base(pb), annotation: ann }, } } fn to_value(&self) -> Result<Value> { match self { &Link::Id { link: ref s } => s.to_str().map(Value::String).map_err_into(LEK::InternalConversionError), &Link::Annotated { ref link, annotation: ref anno } => { link.to_str() .map(Value::String) .map_err_into(LEK::InternalConversionError) .map(|link| { let mut tab = BTreeMap::new(); tab.insert("link".to_owned(), link); tab.insert("annotation".to_owned(), Value::String(anno.clone())); Value::Table(tab) }) } } } } impl ::std::cmp::PartialEq for Link { fn eq(&self, other: &Self) -> bool { match (self, other) { (&Link::Id { link: ref a }, &Link::Id { link: ref b }) => a.eq(&b), (&Link::Annotated { link: ref a, annotation: ref ann1 }, &Link::Annotated { link: ref b, annotation: ref ann2 }) => (a, ann1).eq(&(b, ann2)), _ => false, } } } impl From<StoreId> for Link { fn from(s: StoreId) -> Link { Link::Id { link: s } } } impl Into<StoreId> for Link { fn into(self) -> StoreId { match self { Link::Id { link } => link, Link::Annotated { link, .. } => link, } } } impl IntoStoreId for Link { fn into_storeid(self) -> StoreResult<StoreId> { match self { Link::Id { link } => Ok(link), Link::Annotated { link, .. } => Ok(link), } } } impl AsRef<StoreId> for Link { fn as_ref(&self) -> &StoreId { match self { &Link::Id { ref link } => &link, &Link::Annotated { ref link, .. } => &link, } } } pub trait InternalLinker { /// Get the internal links from the implementor object fn get_internal_links(&self) -> Result<LinkIter>; /// Set the internal links for the implementor object fn set_internal_links(&mut self, links: Vec<&mut Entry>) -> Result<LinkIter>; /// Add an internal link to the implementor object fn add_internal_link(&mut self, link: &mut Entry) -> Result<()>; /// Remove an internal link from the implementor object fn remove_internal_link(&mut self, link: &mut Entry) -> Result<()>; /// Add internal annotated link fn add_internal_annotated_link(&mut self, link: &mut Entry, annotation: String) -> Result<()>; } pub mod iter { use std::vec::IntoIter; use super::Link; use error::LinkErrorKind as LEK; use error::MapErrInto; use result::Result; use toml::Value; use itertools::Itertools; use libimagstore::store::Store; use libimagstore::store::FileLockEntry; pub struct LinkIter(IntoIter<Link>); impl LinkIter { pub fn new(v: Vec<Link>) -> LinkIter { LinkIter(v.into_iter()) } pub fn into_getter(self, store: &Store) -> GetIter { GetIter(self.0, store) } } impl Iterator for LinkIter { type Item = Link; fn next(&mut self) -> Option<Self::Item> { self.0.next() } } pub trait IntoValues { fn into_values(self) -> Vec<Result<Value>>; } impl<I: Iterator<Item = Link>> IntoValues for I { fn into_values(self) -> Vec<Result<Value>> { self.map(|s| s.without_base()) .unique() .sorted() .into_iter() // Cannot sort toml::Value, hence uglyness here .map(|link| link.to_value().map_err_into(LEK::InternalConversionError)) .collect() } } /// An Iterator that `Store::get()`s the Entries from the store while consumed pub struct GetIter<'a>(IntoIter<Link>, &'a Store); impl<'a> GetIter<'a> { pub fn new(i: IntoIter<Link>, store: &'a Store) -> GetIter<'a> { GetIter(i, store) } /// Turn this iterator into a LinkGcIter, which `Store::delete()`s entries that are not /// linked to any other entry. pub fn delete_unlinked(self) -> DeleteUnlinkedIter<'a> { DeleteUnlinkedIter(self) } /// Turn this iterator into a FilterLinksIter that removes all entries that are not linked /// to any other entry, by filtering them out the iterator. /// /// This does _not_ remove the entries from the store. pub fn without_unlinked(self) -> FilterLinksIter<'a> { FilterLinksIter::new(self, Box::new(|links: &[Link]| links.len() > 0)) } /// Turn this iterator into a FilterLinksIter that removes all entries that have less than /// `n` links to any other entries. /// /// This does _not_ remove the entries from the store. pub fn with_less_than_n_links(self, n: usize) -> FilterLinksIter<'a> { FilterLinksIter::new(self, Box::new(move |links: &[Link]| links.len() < n)) } /// Turn this iterator into a FilterLinksIter that removes all entries that have more than /// `n` links to any other entries. /// /// This does _not_ remove the entries from the store. pub fn with_more_than_n_links(self, n: usize) -> FilterLinksIter<'a> { FilterLinksIter::new(self, Box::new(move |links: &[Link]| links.len() > n)) } /// Turn this iterator into a FilterLinksIter that removes all entries where the predicate /// `F` returns false /// /// This does _not_ remove the entries from the store. pub fn filtered_for_links(self, f: Box<Fn(&[Link]) -> bool>) -> FilterLinksIter<'a> { FilterLinksIter::new(self, f) } pub fn store(&self) -> &Store { self.1 } } impl<'a> Iterator for GetIter<'a> { type Item = Result<FileLockEntry<'a>>; fn next(&mut self) -> Option<Self::Item> { self.0.next().and_then(|id| match self.1.get(id).map_err_into(LEK::StoreReadError) { Ok(None) => None, Ok(Some(x)) => Some(Ok(x)), Err(e) => Some(Err(e)), }) } } /// An iterator helper that has a function F. /// /// If the function F returns `false` for the number of links, the entry is ignored, else it is /// taken. pub struct FilterLinksIter<'a>(GetIter<'a>, Box<Fn(&[Link]) -> bool>); impl<'a> FilterLinksIter<'a> { pub fn new(gi: GetIter<'a>, f: Box<Fn(&[Link]) -> bool>) -> FilterLinksIter<'a> { FilterLinksIter(gi, f) } } impl<'a> Iterator for FilterLinksIter<'a> { type Item = Result<FileLockEntry<'a>>; fn next(&mut self) -> Option<Self::Item> { use internal::InternalLinker; loop { match self.0.next() { Some(Ok(fle)) => { let links = match fle.get_internal_links().map_err_into(LEK::StoreReadError) { Err(e) => return Some(Err(e)), Ok(links) => links.collect::<Vec<_>>(), }; if !(self.1)(&links) { continue; } else { return Some(Ok(fle)); } }, Some(Err(e)) => return Some(Err(e)), None => break, } } None } } /// An iterator that removes all Items from the iterator that are not linked anymore by calling /// `Store::delete()` on them. /// /// It yields only items which are somehow linked to another entry /// /// # Warning /// /// Deletes entries from the store. /// pub struct DeleteUnlinkedIter<'a>(GetIter<'a>); impl<'a> Iterator for DeleteUnlinkedIter<'a> { type Item = Result<FileLockEntry<'a>>; fn next(&mut self) -> Option<Self::Item> { use internal::InternalLinker; loop { match self.0.next() { Some(Ok(fle)) => { let links = match fle.get_internal_links().map_err_into(LEK::StoreReadError) { Err(e) => return Some(Err(e)), Ok(links) => links, }; if links.count() == 0 { match self.0 .store() .delete(fle.get_location().clone()) .map_err_into(LEK::StoreWriteError) { Ok(x) => x, Err(e) => return Some(Err(e)), } } else { return Some(Ok(fle)); } }, Some(Err(e)) => return Some(Err(e)), None => break, } } None } } } impl InternalLinker for Entry { fn get_internal_links(&self) -> Result<LinkIter> { process_rw_result(self.get_header().read("imag.links")) } /// Set the links in a header and return the old links, if any. fn set_internal_links(&mut self, links: Vec<&mut Entry>) -> Result<LinkIter> { use internal::iter::IntoValues; let self_location = self.get_location().clone(); let mut new_links = vec![]; for link in links { if let Err(e) = add_foreign_link(link, self_location.clone()) { return Err(e); } new_links.push(link.get_location().clone().into()); } let new_links = try!(LinkIter::new(new_links) .into_values() .into_iter() .fold(Ok(vec![]), |acc, elem| { acc.and_then(move |mut v| { elem.map_err_into(LEK::InternalConversionError) .map(|e| { v.push(e); v }) }) })); process_rw_result(self.get_header_mut().set("imag.links", Value::Array(new_links))) } fn add_internal_link(&mut self, link: &mut Entry) -> Result<()> { let location = link.get_location().clone().into(); add_internal_link_with_instance(self, link, location) } fn remove_internal_link(&mut self, link: &mut Entry) -> Result<()> { let own_loc = self.get_location().clone().without_base(); let other_loc = link.get_location().clone().without_base(); debug!("Removing internal link from {:?} to {:?}", own_loc, other_loc); link.get_internal_links() .and_then(|links| { debug!("Rewriting own links for {:?}, without {:?}", other_loc, own_loc); let links = links.filter(|l| !l.eq_store_id(&own_loc)); rewrite_links(link.get_header_mut(), links) }) .and_then(|_| { self.get_internal_links() .and_then(|links| { debug!("Rewriting own links for {:?}, without {:?}", own_loc, other_loc); let links = links.filter(|l| !l.eq_store_id(&other_loc)); rewrite_links(self.get_header_mut(), links) }) }) } fn add_internal_annotated_link(&mut self, link: &mut Entry, annotation: String) -> Result<()> { let new_link = Link::Annotated { link: link.get_location().clone(), annotation: annotation, }; add_internal_link_with_instance(self, link, new_link) } } fn add_internal_link_with_instance(this: &mut Entry, link: &mut Entry, instance: Link) -> Result<()> { debug!("Adding internal link from {:?} to {:?}", this.get_location(), instance); add_foreign_link(link, this.get_location().clone()) .and_then(|_| { this.get_internal_links() .and_then(|links| { let links = links.chain(LinkIter::new(vec![instance])); rewrite_links(this.get_header_mut(), links) }) }) } fn rewrite_links<I: Iterator<Item = Link>>(header: &mut Value, links: I) -> Result<()> { let links = try!(links.into_values() .into_iter() .fold(Ok(vec![]), |acc, elem| { acc.and_then(move |mut v| { elem.map_err_into(LEK::InternalConversionError) .map(|e| { v.push(e); v }) }) })); debug!("Setting new link array: {:?}", links); let process = header.set("imag.links", Value::Array(links)); process_rw_result(process).map(|_| ()) } /// When Linking A -> B, the specification wants us to link back B -> A. /// This is a helper function which does this. fn add_foreign_link(target: &mut Entry, from: StoreId) -> Result<()> { debug!("Linking back from {:?} to {:?}", target.get_location(), from); target.get_internal_links() .and_then(|links| { let links = try!(links .chain(LinkIter::new(vec![from.into()])) .into_values() .into_iter() .fold(Ok(vec![]), |acc, elem| { acc.and_then(move |mut v| { elem.map_err_into(LEK::InternalConversionError) .map(|e| { v.push(e); v }) }) })); debug!("Setting links in {:?}: {:?}", target.get_location(), links); process_rw_result(target.get_header_mut().set("imag.links", Value::Array(links))) .map(|_| ()) }) } fn process_rw_result(links: StoreResult<Option<Value>>) -> Result<LinkIter> { use std::path::PathBuf; let links = match links { Err(e) => { debug!("RW action on store failed. Generating LinkError"); return Err(LEK::EntryHeaderReadError.into_error_with_cause(Box::new(e))) }, Ok(None) => { debug!("We got no value from the header!"); return Ok(LinkIter::new(vec![])) }, Ok(Some(Value::Array(l))) => l, Ok(Some(_)) => { debug!("We expected an Array for the links, but there was a non-Array!"); return Err(LEK::ExistingLinkTypeWrong.into()); } }; if !links.iter().all(|l| is_match!(*l, Value::String(_)) || is_match!(*l, Value::Table(_))) { debug!("At least one of the Values which were expected in the Array of links is not a String or a Table!"); debug!("Generating LinkError"); return Err(LEK::ExistingLinkTypeWrong.into()); } let links : Vec<Link> = try!(links.into_iter() .map(|link| { debug!("Matching the link: {:?}", link); match link { Value::String(s) => StoreId::new_baseless(PathBuf::from(s)) .map_err_into(LEK::StoreIdError) .map(|s| Link::Id { link: s }) , Value::Table(mut tab) => { debug!("Destructuring table"); if !tab.contains_key("link") || !tab.contains_key("annotation") { debug!("Things missing... returning Error instance"); Err(LEK::LinkParserError.into_error()) } else { let link = try!(tab.remove("link") .ok_or(LEK::LinkParserFieldMissingError.into_error())); let anno = try!(tab.remove("annotation") .ok_or(LEK::LinkParserFieldMissingError.into_error())); debug!("Ok, here we go with building a Link::Annotated"); match (link, anno) { (Value::String(link), Value::String(anno)) => { StoreId::new_baseless(PathBuf::from(link)) .map_err_into(LEK::StoreIdError) .map(|link| { Link::Annotated { link: link, annotation: anno, } }) }, _ => Err(LEK::LinkParserFieldTypeError.into_error()), } } } _ => unreachable!(), } }) .collect()); debug!("Ok, the RW action was successful, returning link vector now!"); Ok(LinkIter::new(links)) } pub mod store_check { use libimagstore::store::Store; pub mod error { generate_error_imports!(); use libimagstore::storeid::StoreId; #[derive(Debug)] pub enum StoreLinkConsistencyErrorCustomData { DeadLink { target: StoreId }, OneDirectionalLink { source: StoreId, target: StoreId }, } impl Display for StoreLinkConsistencyErrorCustomData { fn fmt(&self, fmt: &mut Formatter) -> Result<(), FmtError> { use self::StoreLinkConsistencyErrorCustomData as SLCECD; match self { &SLCECD::DeadLink { ref target } => { try!(write!(fmt, "Dead Link to '{}'", target)) }, &SLCECD::OneDirectionalLink { ref source, ref target } => { try!(write!(fmt, "Link from '{}' to '{}' does exist, but not other way round", source, target)) } }; Ok(()) } } generate_custom_error_types!( StoreLinkConsistencyError, StoreLinkConsistencyErrorKind, StoreLinkConsistencyErrorCustomData, StoreLinkConsistencyError => "Links in the store are not consistent", LinkHandlingError => "Error in link handling", StoreError => "Error while talking to the store" ); generate_result_helper!(StoreLinkConsistencyError, StoreLinkConsistencyErrorKind); generate_option_helper!(StoreLinkConsistencyError, StoreLinkConsistencyErrorKind); } pub use self::error::StoreLinkConsistencyError; pub use self::error::StoreLinkConsistencyErrorKind; pub use self::error::MapErrInto; pub mod result { use std::result::Result as RResult; use internal::store_check::error::StoreLinkConsistencyError as SLCE; pub type Result<T> = RResult<T, SLCE>; } use self::result::Result; pub trait StoreLinkConsistentExt { fn check_link_consistency(&self) -> Result<()>; } impl StoreLinkConsistentExt for Store { fn check_link_consistency(&self) -> Result<()> { use std::collections::HashMap; use self::error::StoreLinkConsistencyErrorKind as SLCEK; use self::error::StoreLinkConsistencyError as SLCE; use self::error::StoreLinkConsistencyErrorCustomData as SLCECD; use error::LinkErrorKind as LEK; use result::Result as LResult; use internal::InternalLinker; use libimagstore::store::StoreObject; use libimagstore::storeid::StoreId; use libimagerror::iter::TraceIterator; use libimagerror::into::IntoError; use libimagutil::iter::FoldResult; // Helper data structure to collect incoming and outgoing links for each StoreId #[derive(Debug, Default)] struct Linking { outgoing: Vec<StoreId>, incoming: Vec<StoreId>, } /// Helper function to aggregate the Link network /// /// This function aggregates a HashMap which maps each StoreId object in the store onto /// a Linking object, which contains a list of StoreIds which this entry links to and a /// list of StoreIds which link to the current one. /// /// The lambda returns an error if something fails let aggregate_link_network = |store: &Store| -> Result<HashMap<StoreId, Linking>> { store .walk("") // this is a hack... I know... .filter_map(|obj: StoreObject| match obj { StoreObject::Id(id) => Some(id), _ => None }) // Only ids are interesting .fold(Ok(HashMap::new()), |acc, sid| { acc.and_then(|mut state| { debug!("Checking entry: '{}'", sid); match try!(self.get(sid).map_err_into(SLCEK::StoreError)) { Some(fle) => { debug!("Found FileLockEntry"); let fle_loc = fle.get_location(); let internal_links = fle .get_internal_links() .map_err_into(SLCEK::StoreError)? .into_getter(self) // get the FLEs from the Store .trace_unwrap(); // trace all Err(e)s and get the Ok(fle)s for internal_link in internal_links { let il_loc = internal_link.get_location(); state .entry(il_loc.clone()) .or_insert(Linking::default()) .incoming .push(fle_loc.clone()); // Make sure an empty linking object is present for the // current StoreId object state .entry(fle_loc.clone()) .or_insert(Linking::default()) .outgoing .push(il_loc.clone()); } Ok(state) }, None => { debug!("No entry"); Ok(state) } } }) }) }; /// Helper to check whethre all StoreIds in the network actually exists /// /// Because why not? let all_collected_storeids_exist = |network: &HashMap<StoreId, Linking>| -> LResult<()> { network .iter() .fold_result(|(id, _)| { if is_match!(self.get(id.clone()), Ok(Some(_))) { debug!("Exists in store: {:?}", id); let exists = { use error::MapErrInto as MEI; try!(MEI::map_err_into(id.exists(), LEK::StoreReadError)) }; if !exists { warn!("Does exist in store but not on FS: {:?}", id); Err(LEK::LinkTargetDoesNotExist.into_error()) } else { Ok(()) } } else { warn!("Does not exist in store: {:?}", id); Err(LEK::LinkTargetDoesNotExist.into_error()) } }) }; /// Helper function to create a SLCECD::OneDirectionalLink error object #[inline] let mk_one_directional_link_err = |src: StoreId, target: StoreId| -> SLCE { // construct the error let custom = SLCECD::OneDirectionalLink { source: src, target: target, }; SLCEK::StoreLinkConsistencyError .into_error() .with_custom_data(custom) }; /// Helper lambda to check whether the _incoming_ links of each entry actually also /// appear in the _outgoing_ list of the linked entry let incoming_links_exists_as_outgoing_links = |src: &StoreId, linking: &Linking, network: &HashMap<StoreId, Linking>| -> Result<()> { linking .incoming .iter() .fold_result(|link| { // Check whether the links which are _incoming_ on _src_ are outgoing // in each of the links in the incoming list. let incoming_consistent = network.get(link) .map(|l| l.outgoing.contains(src)) .unwrap_or(false); if !incoming_consistent { Err(mk_one_directional_link_err(src.clone(), link.clone())) } else { Ok(()) } }) }; /// Helper lambda to check whether the _outgoing links of each entry actually also /// appear in the _incoming_ list of the linked entry let outgoing_links_exist_as_incoming_links = |src: &StoreId, linking: &Linking, network: &HashMap<StoreId, Linking>| -> Result<()> { linking .outgoing .iter() .fold_result(|link| { // Check whether the links which are _outgoing_ on _src_ are incoming // in each of the links in the outgoing list. let outgoing_consistent = network.get(link) .map(|l| l.incoming.contains(src)) .unwrap_or(false); if !outgoing_consistent { Err(mk_one_directional_link_err(link.clone(), src.clone())) } else { Ok(()) } }) }; aggregate_link_network(&self) .and_then(|nw| { all_collected_storeids_exist(&nw) .map(|_| nw) .map_err_into(SLCEK::LinkHandlingError) }) .and_then(|nw| { nw.iter().fold_result(|(id, linking)| { try!(incoming_links_exists_as_outgoing_links(id, linking, &nw)); try!(outgoing_links_exist_as_incoming_links(id, linking, &nw)); Ok(()) }) }) .map(|_| ()) } } } #[cfg(test)] mod test { use std::path::PathBuf; use libimagstore::store::Store; use super::InternalLinker; fn setup_logging() { use env_logger; let _ = env_logger::init().unwrap_or(()); } pub fn get_store() -> Store { Store::new(PathBuf::from("/"), None).unwrap() } #[test] fn test_new_entry_no_links() { setup_logging(); let store = get_store(); let entry = store.create(PathBuf::from("test_new_entry_no_links")).unwrap(); let links = entry.get_internal_links(); assert!(links.is_ok()); let links = links.unwrap(); assert_eq!(links.collect::<Vec<_>>().len(), 0); } #[test] fn test_link_two_entries() { setup_logging(); let store = get_store(); let mut e1 = store.create(PathBuf::from("test_link_two_entries1")).unwrap(); assert!(e1.get_internal_links().is_ok()); let mut e2 = store.create(PathBuf::from("test_link_two_entries2")).unwrap(); assert!(e2.get_internal_links().is_ok()); { assert!(e1.add_internal_link(&mut e2).is_ok()); let e1_links = e1.get_internal_links().unwrap().collect::<Vec<_>>(); let e2_links = e2.get_internal_links().unwrap().collect::<Vec<_>>(); debug!("1 has links: {:?}", e1_links); debug!("2 has links: {:?}", e2_links); assert_eq!(e1_links.len(), 1); assert_eq!(e2_links.len(), 1); assert!(e1_links.first().map(|l| l.clone().with_base(store.path().clone()).eq_store_id(e2.get_location())).unwrap_or(false)); assert!(e2_links.first().map(|l| l.clone().with_base(store.path().clone()).eq_store_id(e1.get_location())).unwrap_or(false)); } { assert!(e1.remove_internal_link(&mut e2).is_ok()); println!("{:?}", e2.to_str()); let e2_links = e2.get_internal_links().unwrap().collect::<Vec<_>>(); assert_eq!(e2_links.len(), 0, "Expected [], got: {:?}", e2_links); println!("{:?}", e1.to_str()); let e1_links = e1.get_internal_links().unwrap().collect::<Vec<_>>(); assert_eq!(e1_links.len(), 0, "Expected [], got: {:?}", e1_links); } } #[test] fn test_multiple_links() { setup_logging(); let store = get_store(); let mut e1 = store.retrieve(PathBuf::from("1")).unwrap(); let mut e2 = store.retrieve(PathBuf::from("2")).unwrap(); let mut e3 = store.retrieve(PathBuf::from("3")).unwrap(); let mut e4 = store.retrieve(PathBuf::from("4")).unwrap(); let mut e5 = store.retrieve(PathBuf::from("5")).unwrap(); assert!(e1.add_internal_link(&mut e2).is_ok()); assert_eq!(e1.get_internal_links().unwrap().collect::<Vec<_>>().len(), 1); assert_eq!(e2.get_internal_links().unwrap().collect::<Vec<_>>().len(), 1); assert_eq!(e3.get_internal_links().unwrap().collect::<Vec<_>>().len(), 0); assert_eq!(e4.get_internal_links().unwrap().collect::<Vec<_>>().len(), 0); assert_eq!(e5.get_internal_links().unwrap().collect::<Vec<_>>().len(), 0); assert!(e1.add_internal_link(&mut e3).is_ok()); assert_eq!(e1.get_internal_links().unwrap().collect::<Vec<_>>().len(), 2); assert_eq!(e2.get_internal_links().unwrap().collect::<Vec<_>>().len(), 1); assert_eq!(e3.get_internal_links().unwrap().collect::<Vec<_>>().len(), 1); assert_eq!(e4.get_internal_links().unwrap().collect::<Vec<_>>().len(), 0); assert_eq!(e5.get_internal_links().unwrap().collect::<Vec<_>>().len(), 0); assert!(e1.add_internal_link(&mut e4).is_ok()); assert_eq!(e1.get_internal_links().unwrap().collect::<Vec<_>>().len(), 3); assert_eq!(e2.get_internal_links().unwrap().collect::<Vec<_>>().len(), 1); assert_eq!(e3.get_internal_links().unwrap().collect::<Vec<_>>().len(), 1); assert_eq!(e4.get_internal_links().unwrap().collect::<Vec<_>>().len(), 1); assert_eq!(e5.get_internal_links().unwrap().collect::<Vec<_>>().len(), 0); assert!(e1.add_internal_link(&mut e5).is_ok()); assert_eq!(e1.get_internal_links().unwrap().collect::<Vec<_>>().len(), 4); assert_eq!(e2.get_internal_links().unwrap().collect::<Vec<_>>().len(), 1); assert_eq!(e3.get_internal_links().unwrap().collect::<Vec<_>>().len(), 1); assert_eq!(e4.get_internal_links().unwrap().collect::<Vec<_>>().len(), 1); assert_eq!(e5.get_internal_links().unwrap().collect::<Vec<_>>().len(), 1); assert!(e5.remove_internal_link(&mut e1).is_ok()); assert_eq!(e1.get_internal_links().unwrap().collect::<Vec<_>>().len(), 3); assert_eq!(e2.get_internal_links().unwrap().collect::<Vec<_>>().len(), 1); assert_eq!(e3.get_internal_links().unwrap().collect::<Vec<_>>().len(), 1); assert_eq!(e4.get_internal_links().unwrap().collect::<Vec<_>>().len(), 1); assert_eq!(e5.get_internal_links().unwrap().collect::<Vec<_>>().len(), 0); assert!(e4.remove_internal_link(&mut e1).is_ok()); assert_eq!(e1.get_internal_links().unwrap().collect::<Vec<_>>().len(), 2); assert_eq!(e2.get_internal_links().unwrap().collect::<Vec<_>>().len(), 1); assert_eq!(e3.get_internal_links().unwrap().collect::<Vec<_>>().len(), 1); assert_eq!(e4.get_internal_links().unwrap().collect::<Vec<_>>().len(), 0); assert_eq!(e5.get_internal_links().unwrap().collect::<Vec<_>>().len(), 0); assert!(e3.remove_internal_link(&mut e1).is_ok()); assert_eq!(e1.get_internal_links().unwrap().collect::<Vec<_>>().len(), 1); assert_eq!(e2.get_internal_links().unwrap().collect::<Vec<_>>().len(), 1); assert_eq!(e3.get_internal_links().unwrap().collect::<Vec<_>>().len(), 0); assert_eq!(e4.get_internal_links().unwrap().collect::<Vec<_>>().len(), 0); assert_eq!(e5.get_internal_links().unwrap().collect::<Vec<_>>().len(), 0); assert!(e2.remove_internal_link(&mut e1).is_ok()); assert_eq!(e1.get_internal_links().unwrap().collect::<Vec<_>>().len(), 0); assert_eq!(e2.get_internal_links().unwrap().collect::<Vec<_>>().len(), 0); assert_eq!(e3.get_internal_links().unwrap().collect::<Vec<_>>().len(), 0); assert_eq!(e4.get_internal_links().unwrap().collect::<Vec<_>>().len(), 0); assert_eq!(e5.get_internal_links().unwrap().collect::<Vec<_>>().len(), 0); } }
// Copyright 2017 Serde Developers // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. #[macro_use] extern crate serde_derive; use std::collections::{BTreeMap, BTreeSet, HashMap, HashSet}; use std::ffi::CString; use std::net; use std::num::Wrapping; use std::path::{Path, PathBuf}; use std::rc::Rc; use std::sync::Arc; use std::time::{Duration, UNIX_EPOCH}; #[cfg(unix)] use std::str; extern crate serde_test; use self::serde_test::{assert_ser_tokens, assert_ser_tokens_error, Configure, Token}; extern crate fnv; use self::fnv::FnvHasher; #[macro_use] mod macros; ////////////////////////////////////////////////////////////////////////// #[derive(Serialize)] struct UnitStruct; #[derive(Serialize)] struct TupleStruct(i32, i32, i32); #[derive(Serialize)] struct Struct { a: i32, b: i32, c: i32, } #[derive(Serialize, PartialEq, Debug)] enum Enum { Unit, One(i32), Seq(i32, i32), Map { a: i32, b: i32, }, #[serde(skip_serializing)] SkippedUnit, #[serde(skip_serializing)] SkippedOne(i32), #[serde(skip_serializing)] SkippedSeq(i32, i32), #[serde(skip_serializing)] SkippedMap { _a: i32, _b: i32, }, } ////////////////////////////////////////////////////////////////////////// macro_rules! declare_tests { ( $readable:tt $($name:ident { $($value:expr => $tokens:expr,)+ })+ ) => { $( #[test] fn $name() { $( assert_ser_tokens(&$value.$readable(), $tokens); )+ } )+ }; ($($name:ident { $($value:expr => $tokens:expr,)+ })+) => { $( #[test] fn $name() { $( assert_ser_tokens(&$value, $tokens); )+ } )+ } } declare_tests! { test_unit { () => &[Token::Unit], } test_bool { true => &[Token::Bool(true)], false => &[Token::Bool(false)], } test_isizes { 0i8 => &[Token::I8(0)], 0i16 => &[Token::I16(0)], 0i32 => &[Token::I32(0)], 0i64 => &[Token::I64(0)], } test_usizes { 0u8 => &[Token::U8(0)], 0u16 => &[Token::U16(0)], 0u32 => &[Token::U32(0)], 0u64 => &[Token::U64(0)], } test_floats { 0f32 => &[Token::F32(0.)], 0f64 => &[Token::F64(0.)], } test_char { 'a' => &[Token::Char('a')], } test_str { "abc" => &[Token::Str("abc")], "abc".to_owned() => &[Token::Str("abc")], } test_option { None::<i32> => &[Token::None], Some(1) => &[ Token::Some, Token::I32(1), ], } test_result { Ok::<i32, i32>(0) => &[ Token::NewtypeVariant { name: "Result", variant: "Ok" }, Token::I32(0), ], Err::<i32, i32>(1) => &[ Token::NewtypeVariant { name: "Result", variant: "Err" }, Token::I32(1), ], } test_slice { &[0][..0] => &[ Token::Seq { len: Some(0) }, Token::SeqEnd, ], &[1, 2, 3][..] => &[ Token::Seq { len: Some(3) }, Token::I32(1), Token::I32(2), Token::I32(3), Token::SeqEnd, ], } test_array { [0; 0] => &[ Token::Tuple { len: 0 }, Token::TupleEnd, ], [1, 2, 3] => &[ Token::Tuple { len: 3 }, Token::I32(1), Token::I32(2), Token::I32(3), Token::TupleEnd, ], } test_vec { Vec::<isize>::new() => &[ Token::Seq { len: Some(0) }, Token::SeqEnd, ], vec![vec![], vec![1], vec![2, 3]] => &[ Token::Seq { len: Some(3) }, Token::Seq { len: Some(0) }, Token::SeqEnd, Token::Seq { len: Some(1) }, Token::I32(1), Token::SeqEnd, Token::Seq { len: Some(2) }, Token::I32(2), Token::I32(3), Token::SeqEnd, Token::SeqEnd, ], } test_btreeset { BTreeSet::<isize>::new() => &[ Token::Seq { len: Some(0) }, Token::SeqEnd, ], btreeset![1] => &[ Token::Seq { len: Some(1) }, Token::I32(1), Token::SeqEnd, ], } test_hashset { HashSet::<isize>::new() => &[ Token::Seq { len: Some(0) }, Token::SeqEnd, ], hashset![1] => &[ Token::Seq { len: Some(1) }, Token::I32(1), Token::SeqEnd, ], hashset![FnvHasher @ 1] => &[ Token::Seq { len: Some(1) }, Token::I32(1), Token::SeqEnd, ], } test_tuple { (1,) => &[ Token::Tuple { len: 1 }, Token::I32(1), Token::TupleEnd, ], (1, 2, 3) => &[ Token::Tuple { len: 3 }, Token::I32(1), Token::I32(2), Token::I32(3), Token::TupleEnd, ], } test_btreemap { btreemap![1 => 2] => &[ Token::Map { len: Some(1) }, Token::I32(1), Token::I32(2), Token::MapEnd, ], btreemap![1 => 2, 3 => 4] => &[ Token::Map { len: Some(2) }, Token::I32(1), Token::I32(2), Token::I32(3), Token::I32(4), Token::MapEnd, ], btreemap![1 => btreemap![], 2 => btreemap![3 => 4, 5 => 6]] => &[ Token::Map { len: Some(2) }, Token::I32(1), Token::Map { len: Some(0) }, Token::MapEnd, Token::I32(2), Token::Map { len: Some(2) }, Token::I32(3), Token::I32(4), Token::I32(5), Token::I32(6), Token::MapEnd, Token::MapEnd, ], } test_hashmap { HashMap::<isize, isize>::new() => &[ Token::Map { len: Some(0) }, Token::MapEnd, ], hashmap![1 => 2] => &[ Token::Map { len: Some(1) }, Token::I32(1), Token::I32(2), Token::MapEnd, ], hashmap![FnvHasher @ 1 => 2] => &[ Token::Map { len: Some(1) }, Token::I32(1), Token::I32(2), Token::MapEnd, ], } test_unit_struct { UnitStruct => &[Token::UnitStruct { name: "UnitStruct" }], } test_tuple_struct { TupleStruct(1, 2, 3) => &[ Token::TupleStruct { name: "TupleStruct", len: 3 }, Token::I32(1), Token::I32(2), Token::I32(3), Token::TupleStructEnd, ], } test_struct { Struct { a: 1, b: 2, c: 3 } => &[ Token::Struct { name: "Struct", len: 3 }, Token::Str("a"), Token::I32(1), Token::Str("b"), Token::I32(2), Token::Str("c"), Token::I32(3), Token::StructEnd, ], } test_enum { Enum::Unit => &[Token::UnitVariant { name: "Enum", variant: "Unit" }], Enum::One(42) => &[Token::NewtypeVariant { name: "Enum", variant: "One" }, Token::I32(42)], Enum::Seq(1, 2) => &[ Token::TupleVariant { name: "Enum", variant: "Seq", len: 2 }, Token::I32(1), Token::I32(2), Token::TupleVariantEnd, ], Enum::Map { a: 1, b: 2 } => &[ Token::StructVariant { name: "Enum", variant: "Map", len: 2 }, Token::Str("a"), Token::I32(1), Token::Str("b"), Token::I32(2), Token::StructVariantEnd, ], } test_box { Box::new(0i32) => &[Token::I32(0)], } test_boxed_slice { Box::new([0, 1, 2]) => &[ Token::Tuple { len: 3 }, Token::I32(0), Token::I32(1), Token::I32(2), Token::TupleEnd, ], } test_duration { Duration::new(1, 2) => &[ Token::Struct { name: "Duration", len: 2 }, Token::Str("secs"), Token::U64(1), Token::Str("nanos"), Token::U32(2), Token::StructEnd, ], } test_system_time { UNIX_EPOCH + Duration::new(1, 200) => &[ Token::Struct { name: "SystemTime", len: 2 }, Token::Str("secs_since_epoch"), Token::U64(1), Token::Str("nanos_since_epoch"), Token::U32(200), Token::StructEnd, ], } test_range { 1u32..2u32 => &[ Token::Struct { name: "Range", len: 2 }, Token::Str("start"), Token::U32(1), Token::Str("end"), Token::U32(2), Token::StructEnd, ], } test_path { Path::new("/usr/local/lib") => &[ Token::Str("/usr/local/lib"), ], } test_path_buf { PathBuf::from("/usr/local/lib") => &[ Token::Str("/usr/local/lib"), ], } test_cstring { CString::new("abc").unwrap() => &[ Token::Bytes(b"abc"), ], } test_cstr { (&*CString::new("abc").unwrap()) => &[ Token::Bytes(b"abc"), ], } test_rc { Rc::new(true) => &[ Token::Bool(true), ], } test_arc { Arc::new(true) => &[ Token::Bool(true), ], } test_wrapping { Wrapping(1usize) => &[ Token::U64(1), ], } } declare_tests! { readable test_net_ipv4addr_readable { "1.2.3.4".parse::<net::Ipv4Addr>().unwrap() => &[Token::Str("1.2.3.4")], } test_net_ipv6addr_readable { "::1".parse::<net::Ipv6Addr>().unwrap() => &[Token::Str("::1")], } test_net_ipaddr_readable { "1.2.3.4".parse::<net::IpAddr>().unwrap() => &[Token::Str("1.2.3.4")], } test_net_socketaddr_readable { "1.2.3.4:1234".parse::<net::SocketAddr>().unwrap() => &[Token::Str("1.2.3.4:1234")], "1.2.3.4:1234".parse::<net::SocketAddrV4>().unwrap() => &[Token::Str("1.2.3.4:1234")], "[::1]:1234".parse::<net::SocketAddrV6>().unwrap() => &[Token::Str("[::1]:1234")], } } declare_tests! { compact test_net_ipv4addr_compact { net::Ipv4Addr::from(*b"1234") => &seq![ Token::Tuple { len: 4 }, seq b"1234".iter().map(|&b| Token::U8(b)), Token::TupleEnd, ], } test_net_ipv6addr_compact { net::Ipv6Addr::from(*b"1234567890123456") => &seq![ Token::Tuple { len: 16 }, seq b"1234567890123456".iter().map(|&b| Token::U8(b)), Token::TupleEnd, ], } test_net_ipaddr_compact { net::IpAddr::from(*b"1234") => &seq![ Token::NewtypeVariant { name: "IpAddr", variant: "V4" }, Token::Tuple { len: 4 }, seq b"1234".iter().map(|&b| Token::U8(b)), Token::TupleEnd, ], } test_net_socketaddr_compact { net::SocketAddr::from((*b"1234567890123456", 1234)) => &seq![ Token::NewtypeVariant { name: "SocketAddr", variant: "V6" }, Token::Tuple { len: 2 }, Token::Tuple { len: 16 }, seq b"1234567890123456".iter().map(|&b| Token::U8(b)), Token::TupleEnd, Token::U16(1234), Token::TupleEnd, ], net::SocketAddrV4::new(net::Ipv4Addr::from(*b"1234"), 1234) => &seq![ Token::Tuple { len: 2 }, Token::Tuple { len: 4 }, seq b"1234".iter().map(|&b| Token::U8(b)), Token::TupleEnd, Token::U16(1234), Token::TupleEnd, ], net::SocketAddrV6::new(net::Ipv6Addr::from(*b"1234567890123456"), 1234, 0, 0) => &seq![ Token::Tuple { len: 2 }, Token::Tuple { len: 16 }, seq b"1234567890123456".iter().map(|&b| Token::U8(b)), Token::TupleEnd, Token::U16(1234), Token::TupleEnd, ], } } // Serde's implementation is not unstable, but the constructors are. #[cfg(feature = "unstable")] declare_tests! { test_rc_dst { Rc::<str>::from("s") => &[ Token::Str("s"), ], Rc::<[bool]>::from(&[true][..]) => &[ Token::Seq { len: Some(1) }, Token::Bool(true), Token::SeqEnd, ], } test_arc_dst { Arc::<str>::from("s") => &[ Token::Str("s"), ], Arc::<[bool]>::from(&[true][..]) => &[ Token::Seq { len: Some(1) }, Token::Bool(true), Token::SeqEnd, ], } } #[test] #[cfg(unix)] fn test_cannot_serialize_paths() { let path = unsafe { str::from_utf8_unchecked(b"Hello \xF0\x90\x80World") }; assert_ser_tokens_error( &Path::new(path), &[], "path contains invalid UTF-8 characters", ); let mut path_buf = PathBuf::new(); path_buf.push(path); assert_ser_tokens_error(&path_buf, &[], "path contains invalid UTF-8 characters"); } #[test] fn test_enum_skipped() { assert_ser_tokens_error( &Enum::SkippedUnit, &[], "the enum variant Enum::SkippedUnit cannot be serialized", ); assert_ser_tokens_error( &Enum::SkippedOne(42), &[], "the enum variant Enum::SkippedOne cannot be serialized", ); assert_ser_tokens_error( &Enum::SkippedSeq(1, 2), &[], "the enum variant Enum::SkippedSeq cannot be serialized", ); assert_ser_tokens_error( &Enum::SkippedMap { _a: 1, _b: 2 }, &[], "the enum variant Enum::SkippedMap cannot be serialized", ); } Test Weak serialize impls // Copyright 2017 Serde Developers // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. #[macro_use] extern crate serde_derive; use std::collections::{BTreeMap, BTreeSet, HashMap, HashSet}; use std::ffi::CString; use std::mem; use std::net; use std::num::Wrapping; use std::path::{Path, PathBuf}; use std::rc::{Rc, Weak as RcWeak}; use std::sync::{Arc, Weak as ArcWeak}; use std::time::{Duration, UNIX_EPOCH}; #[cfg(unix)] use std::str; extern crate serde_test; use self::serde_test::{assert_ser_tokens, assert_ser_tokens_error, Configure, Token}; extern crate fnv; use self::fnv::FnvHasher; #[macro_use] mod macros; ////////////////////////////////////////////////////////////////////////// #[derive(Serialize)] struct UnitStruct; #[derive(Serialize)] struct TupleStruct(i32, i32, i32); #[derive(Serialize)] struct Struct { a: i32, b: i32, c: i32, } #[derive(Serialize, PartialEq, Debug)] enum Enum { Unit, One(i32), Seq(i32, i32), Map { a: i32, b: i32, }, #[serde(skip_serializing)] SkippedUnit, #[serde(skip_serializing)] SkippedOne(i32), #[serde(skip_serializing)] SkippedSeq(i32, i32), #[serde(skip_serializing)] SkippedMap { _a: i32, _b: i32, }, } ////////////////////////////////////////////////////////////////////////// macro_rules! declare_tests { ( $readable:tt $($name:ident { $($value:expr => $tokens:expr,)+ })+ ) => { $( #[test] fn $name() { $( assert_ser_tokens(&$value.$readable(), $tokens); )+ } )+ }; ($($name:ident { $($value:expr => $tokens:expr,)+ })+) => { $( #[test] fn $name() { $( assert_ser_tokens(&$value, $tokens); )+ } )+ } } declare_tests! { test_unit { () => &[Token::Unit], } test_bool { true => &[Token::Bool(true)], false => &[Token::Bool(false)], } test_isizes { 0i8 => &[Token::I8(0)], 0i16 => &[Token::I16(0)], 0i32 => &[Token::I32(0)], 0i64 => &[Token::I64(0)], } test_usizes { 0u8 => &[Token::U8(0)], 0u16 => &[Token::U16(0)], 0u32 => &[Token::U32(0)], 0u64 => &[Token::U64(0)], } test_floats { 0f32 => &[Token::F32(0.)], 0f64 => &[Token::F64(0.)], } test_char { 'a' => &[Token::Char('a')], } test_str { "abc" => &[Token::Str("abc")], "abc".to_owned() => &[Token::Str("abc")], } test_option { None::<i32> => &[Token::None], Some(1) => &[ Token::Some, Token::I32(1), ], } test_result { Ok::<i32, i32>(0) => &[ Token::NewtypeVariant { name: "Result", variant: "Ok" }, Token::I32(0), ], Err::<i32, i32>(1) => &[ Token::NewtypeVariant { name: "Result", variant: "Err" }, Token::I32(1), ], } test_slice { &[0][..0] => &[ Token::Seq { len: Some(0) }, Token::SeqEnd, ], &[1, 2, 3][..] => &[ Token::Seq { len: Some(3) }, Token::I32(1), Token::I32(2), Token::I32(3), Token::SeqEnd, ], } test_array { [0; 0] => &[ Token::Tuple { len: 0 }, Token::TupleEnd, ], [1, 2, 3] => &[ Token::Tuple { len: 3 }, Token::I32(1), Token::I32(2), Token::I32(3), Token::TupleEnd, ], } test_vec { Vec::<isize>::new() => &[ Token::Seq { len: Some(0) }, Token::SeqEnd, ], vec![vec![], vec![1], vec![2, 3]] => &[ Token::Seq { len: Some(3) }, Token::Seq { len: Some(0) }, Token::SeqEnd, Token::Seq { len: Some(1) }, Token::I32(1), Token::SeqEnd, Token::Seq { len: Some(2) }, Token::I32(2), Token::I32(3), Token::SeqEnd, Token::SeqEnd, ], } test_btreeset { BTreeSet::<isize>::new() => &[ Token::Seq { len: Some(0) }, Token::SeqEnd, ], btreeset![1] => &[ Token::Seq { len: Some(1) }, Token::I32(1), Token::SeqEnd, ], } test_hashset { HashSet::<isize>::new() => &[ Token::Seq { len: Some(0) }, Token::SeqEnd, ], hashset![1] => &[ Token::Seq { len: Some(1) }, Token::I32(1), Token::SeqEnd, ], hashset![FnvHasher @ 1] => &[ Token::Seq { len: Some(1) }, Token::I32(1), Token::SeqEnd, ], } test_tuple { (1,) => &[ Token::Tuple { len: 1 }, Token::I32(1), Token::TupleEnd, ], (1, 2, 3) => &[ Token::Tuple { len: 3 }, Token::I32(1), Token::I32(2), Token::I32(3), Token::TupleEnd, ], } test_btreemap { btreemap![1 => 2] => &[ Token::Map { len: Some(1) }, Token::I32(1), Token::I32(2), Token::MapEnd, ], btreemap![1 => 2, 3 => 4] => &[ Token::Map { len: Some(2) }, Token::I32(1), Token::I32(2), Token::I32(3), Token::I32(4), Token::MapEnd, ], btreemap![1 => btreemap![], 2 => btreemap![3 => 4, 5 => 6]] => &[ Token::Map { len: Some(2) }, Token::I32(1), Token::Map { len: Some(0) }, Token::MapEnd, Token::I32(2), Token::Map { len: Some(2) }, Token::I32(3), Token::I32(4), Token::I32(5), Token::I32(6), Token::MapEnd, Token::MapEnd, ], } test_hashmap { HashMap::<isize, isize>::new() => &[ Token::Map { len: Some(0) }, Token::MapEnd, ], hashmap![1 => 2] => &[ Token::Map { len: Some(1) }, Token::I32(1), Token::I32(2), Token::MapEnd, ], hashmap![FnvHasher @ 1 => 2] => &[ Token::Map { len: Some(1) }, Token::I32(1), Token::I32(2), Token::MapEnd, ], } test_unit_struct { UnitStruct => &[Token::UnitStruct { name: "UnitStruct" }], } test_tuple_struct { TupleStruct(1, 2, 3) => &[ Token::TupleStruct { name: "TupleStruct", len: 3 }, Token::I32(1), Token::I32(2), Token::I32(3), Token::TupleStructEnd, ], } test_struct { Struct { a: 1, b: 2, c: 3 } => &[ Token::Struct { name: "Struct", len: 3 }, Token::Str("a"), Token::I32(1), Token::Str("b"), Token::I32(2), Token::Str("c"), Token::I32(3), Token::StructEnd, ], } test_enum { Enum::Unit => &[Token::UnitVariant { name: "Enum", variant: "Unit" }], Enum::One(42) => &[Token::NewtypeVariant { name: "Enum", variant: "One" }, Token::I32(42)], Enum::Seq(1, 2) => &[ Token::TupleVariant { name: "Enum", variant: "Seq", len: 2 }, Token::I32(1), Token::I32(2), Token::TupleVariantEnd, ], Enum::Map { a: 1, b: 2 } => &[ Token::StructVariant { name: "Enum", variant: "Map", len: 2 }, Token::Str("a"), Token::I32(1), Token::Str("b"), Token::I32(2), Token::StructVariantEnd, ], } test_box { Box::new(0i32) => &[Token::I32(0)], } test_boxed_slice { Box::new([0, 1, 2]) => &[ Token::Tuple { len: 3 }, Token::I32(0), Token::I32(1), Token::I32(2), Token::TupleEnd, ], } test_duration { Duration::new(1, 2) => &[ Token::Struct { name: "Duration", len: 2 }, Token::Str("secs"), Token::U64(1), Token::Str("nanos"), Token::U32(2), Token::StructEnd, ], } test_system_time { UNIX_EPOCH + Duration::new(1, 200) => &[ Token::Struct { name: "SystemTime", len: 2 }, Token::Str("secs_since_epoch"), Token::U64(1), Token::Str("nanos_since_epoch"), Token::U32(200), Token::StructEnd, ], } test_range { 1u32..2u32 => &[ Token::Struct { name: "Range", len: 2 }, Token::Str("start"), Token::U32(1), Token::Str("end"), Token::U32(2), Token::StructEnd, ], } test_path { Path::new("/usr/local/lib") => &[ Token::Str("/usr/local/lib"), ], } test_path_buf { PathBuf::from("/usr/local/lib") => &[ Token::Str("/usr/local/lib"), ], } test_cstring { CString::new("abc").unwrap() => &[ Token::Bytes(b"abc"), ], } test_cstr { (&*CString::new("abc").unwrap()) => &[ Token::Bytes(b"abc"), ], } test_rc { Rc::new(true) => &[ Token::Bool(true), ], } test_rc_weak_some { { let rc = Rc::new(true); mem::forget(rc.clone()); Rc::downgrade(&rc) } => &[ Token::Some, Token::Bool(true), ], } test_rc_weak_none { RcWeak::<bool>::new() => &[ Token::None, ], } test_arc { Arc::new(true) => &[ Token::Bool(true), ], } test_arc_weak_some { { let arc = Arc::new(true); mem::forget(arc.clone()); Arc::downgrade(&arc) } => &[ Token::Some, Token::Bool(true), ], } test_arc_weak_none { ArcWeak::<bool>::new() => &[ Token::None, ], } test_wrapping { Wrapping(1usize) => &[ Token::U64(1), ], } } declare_tests! { readable test_net_ipv4addr_readable { "1.2.3.4".parse::<net::Ipv4Addr>().unwrap() => &[Token::Str("1.2.3.4")], } test_net_ipv6addr_readable { "::1".parse::<net::Ipv6Addr>().unwrap() => &[Token::Str("::1")], } test_net_ipaddr_readable { "1.2.3.4".parse::<net::IpAddr>().unwrap() => &[Token::Str("1.2.3.4")], } test_net_socketaddr_readable { "1.2.3.4:1234".parse::<net::SocketAddr>().unwrap() => &[Token::Str("1.2.3.4:1234")], "1.2.3.4:1234".parse::<net::SocketAddrV4>().unwrap() => &[Token::Str("1.2.3.4:1234")], "[::1]:1234".parse::<net::SocketAddrV6>().unwrap() => &[Token::Str("[::1]:1234")], } } declare_tests! { compact test_net_ipv4addr_compact { net::Ipv4Addr::from(*b"1234") => &seq![ Token::Tuple { len: 4 }, seq b"1234".iter().map(|&b| Token::U8(b)), Token::TupleEnd, ], } test_net_ipv6addr_compact { net::Ipv6Addr::from(*b"1234567890123456") => &seq![ Token::Tuple { len: 16 }, seq b"1234567890123456".iter().map(|&b| Token::U8(b)), Token::TupleEnd, ], } test_net_ipaddr_compact { net::IpAddr::from(*b"1234") => &seq![ Token::NewtypeVariant { name: "IpAddr", variant: "V4" }, Token::Tuple { len: 4 }, seq b"1234".iter().map(|&b| Token::U8(b)), Token::TupleEnd, ], } test_net_socketaddr_compact { net::SocketAddr::from((*b"1234567890123456", 1234)) => &seq![ Token::NewtypeVariant { name: "SocketAddr", variant: "V6" }, Token::Tuple { len: 2 }, Token::Tuple { len: 16 }, seq b"1234567890123456".iter().map(|&b| Token::U8(b)), Token::TupleEnd, Token::U16(1234), Token::TupleEnd, ], net::SocketAddrV4::new(net::Ipv4Addr::from(*b"1234"), 1234) => &seq![ Token::Tuple { len: 2 }, Token::Tuple { len: 4 }, seq b"1234".iter().map(|&b| Token::U8(b)), Token::TupleEnd, Token::U16(1234), Token::TupleEnd, ], net::SocketAddrV6::new(net::Ipv6Addr::from(*b"1234567890123456"), 1234, 0, 0) => &seq![ Token::Tuple { len: 2 }, Token::Tuple { len: 16 }, seq b"1234567890123456".iter().map(|&b| Token::U8(b)), Token::TupleEnd, Token::U16(1234), Token::TupleEnd, ], } } // Serde's implementation is not unstable, but the constructors are. #[cfg(feature = "unstable")] declare_tests! { test_rc_dst { Rc::<str>::from("s") => &[ Token::Str("s"), ], Rc::<[bool]>::from(&[true][..]) => &[ Token::Seq { len: Some(1) }, Token::Bool(true), Token::SeqEnd, ], } test_arc_dst { Arc::<str>::from("s") => &[ Token::Str("s"), ], Arc::<[bool]>::from(&[true][..]) => &[ Token::Seq { len: Some(1) }, Token::Bool(true), Token::SeqEnd, ], } } #[test] #[cfg(unix)] fn test_cannot_serialize_paths() { let path = unsafe { str::from_utf8_unchecked(b"Hello \xF0\x90\x80World") }; assert_ser_tokens_error( &Path::new(path), &[], "path contains invalid UTF-8 characters", ); let mut path_buf = PathBuf::new(); path_buf.push(path); assert_ser_tokens_error(&path_buf, &[], "path contains invalid UTF-8 characters"); } #[test] fn test_enum_skipped() { assert_ser_tokens_error( &Enum::SkippedUnit, &[], "the enum variant Enum::SkippedUnit cannot be serialized", ); assert_ser_tokens_error( &Enum::SkippedOne(42), &[], "the enum variant Enum::SkippedOne cannot be serialized", ); assert_ser_tokens_error( &Enum::SkippedSeq(1, 2), &[], "the enum variant Enum::SkippedSeq cannot be serialized", ); assert_ser_tokens_error( &Enum::SkippedMap { _a: 1, _b: 2 }, &[], "the enum variant Enum::SkippedMap cannot be serialized", ); }
//! Multi-producer, single-consumer FIFO queue communication primitives. //! //! This module provides message-based communication over channels, concretely //! defined among three types: //! //! * [`Sender`] //! * [`SyncSender`] //! * [`Receiver`] //! //! A [`Sender`] or [`SyncSender`] is used to send data to a [`Receiver`]. Both //! senders are clone-able (multi-producer) such that many threads can send //! simultaneously to one receiver (single-consumer). //! //! These channels come in two flavors: //! //! 1. An asynchronous, infinitely buffered channel. The [`channel`] function //! will return a `(Sender, Receiver)` tuple where all sends will be //! **asynchronous** (they never block). The channel conceptually has an //! infinite buffer. //! //! 2. A synchronous, bounded channel. The [`sync_channel`] function will //! return a `(SyncSender, Receiver)` tuple where the storage for pending //! messages is a pre-allocated buffer of a fixed size. All sends will be //! **synchronous** by blocking until there is buffer space available. Note //! that a bound of 0 is allowed, causing the channel to become a "rendezvous" //! channel where each sender atomically hands off a message to a receiver. //! //! [`send`]: Sender::send //! //! ## Disconnection //! //! The send and receive operations on channels will all return a [`Result`] //! indicating whether the operation succeeded or not. An unsuccessful operation //! is normally indicative of the other half of a channel having "hung up" by //! being dropped in its corresponding thread. //! //! Once half of a channel has been deallocated, most operations can no longer //! continue to make progress, so [`Err`] will be returned. Many applications //! will continue to [`unwrap`] the results returned from this module, //! instigating a propagation of failure among threads if one unexpectedly dies. //! //! [`unwrap`]: Result::unwrap //! //! # Examples //! //! Simple usage: //! //! ``` //! use std::thread; //! use std::sync::mpsc::channel; //! //! // Create a simple streaming channel //! let (tx, rx) = channel(); //! thread::spawn(move|| { //! tx.send(10).unwrap(); //! }); //! assert_eq!(rx.recv().unwrap(), 10); //! ``` //! //! Shared usage: //! //! ``` //! use std::thread; //! use std::sync::mpsc::channel; //! //! // Create a shared channel that can be sent along from many threads //! // where tx is the sending half (tx for transmission), and rx is the receiving //! // half (rx for receiving). //! let (tx, rx) = channel(); //! for i in 0..10 { //! let tx = tx.clone(); //! thread::spawn(move|| { //! tx.send(i).unwrap(); //! }); //! } //! //! for _ in 0..10 { //! let j = rx.recv().unwrap(); //! assert!(0 <= j && j < 10); //! } //! ``` //! //! Propagating panics: //! //! ``` //! use std::sync::mpsc::channel; //! //! // The call to recv() will return an error because the channel has already //! // hung up (or been deallocated) //! let (tx, rx) = channel::<i32>(); //! drop(tx); //! assert!(rx.recv().is_err()); //! ``` //! //! Synchronous channels: //! //! ``` //! use std::thread; //! use std::sync::mpsc::sync_channel; //! //! let (tx, rx) = sync_channel::<i32>(0); //! thread::spawn(move|| { //! // This will wait for the parent thread to start receiving //! tx.send(53).unwrap(); //! }); //! rx.recv().unwrap(); //! ``` #![stable(feature = "rust1", since = "1.0.0")] #[cfg(all(test, not(target_os = "emscripten")))] mod tests; #[cfg(all(test, not(target_os = "emscripten")))] mod sync_tests; // A description of how Rust's channel implementation works // // Channels are supposed to be the basic building block for all other // concurrent primitives that are used in Rust. As a result, the channel type // needs to be highly optimized, flexible, and broad enough for use everywhere. // // The choice of implementation of all channels is to be built on lock-free data // structures. The channels themselves are then consequently also lock-free data // structures. As always with lock-free code, this is a very "here be dragons" // territory, especially because I'm unaware of any academic papers that have // gone into great length about channels of these flavors. // // ## Flavors of channels // // From the perspective of a consumer of this library, there is only one flavor // of channel. This channel can be used as a stream and cloned to allow multiple // senders. Under the hood, however, there are actually three flavors of // channels in play. // // * Flavor::Oneshots - these channels are highly optimized for the one-send use // case. They contain as few atomics as possible and // involve one and exactly one allocation. // * Streams - these channels are optimized for the non-shared use case. They // use a different concurrent queue that is more tailored for this // use case. The initial allocation of this flavor of channel is not // optimized. // * Shared - this is the most general form of channel that this module offers, // a channel with multiple senders. This type is as optimized as it // can be, but the previous two types mentioned are much faster for // their use-cases. // // ## Concurrent queues // // The basic idea of Rust's Sender/Receiver types is that send() never blocks, // but recv() obviously blocks. This means that under the hood there must be // some shared and concurrent queue holding all of the actual data. // // With two flavors of channels, two flavors of queues are also used. We have // chosen to use queues from a well-known author that are abbreviated as SPSC // and MPSC (single producer, single consumer and multiple producer, single // consumer). SPSC queues are used for streams while MPSC queues are used for // shared channels. // // ### SPSC optimizations // // The SPSC queue found online is essentially a linked list of nodes where one // half of the nodes are the "queue of data" and the other half of nodes are a // cache of unused nodes. The unused nodes are used such that an allocation is // not required on every push() and a free doesn't need to happen on every // pop(). // // As found online, however, the cache of nodes is of an infinite size. This // means that if a channel at one point in its life had 50k items in the queue, // then the queue will always have the capacity for 50k items. I believed that // this was an unnecessary limitation of the implementation, so I have altered // the queue to optionally have a bound on the cache size. // // By default, streams will have an unbounded SPSC queue with a small-ish cache // size. The hope is that the cache is still large enough to have very fast // send() operations while not too large such that millions of channels can // coexist at once. // // ### MPSC optimizations // // Right now the MPSC queue has not been optimized. Like the SPSC queue, it uses // a linked list under the hood to earn its unboundedness, but I have not put // forth much effort into having a cache of nodes similar to the SPSC queue. // // For now, I believe that this is "ok" because shared channels are not the most // common type, but soon we may wish to revisit this queue choice and determine // another candidate for backend storage of shared channels. // // ## Overview of the Implementation // // Now that there's a little background on the concurrent queues used, it's // worth going into much more detail about the channels themselves. The basic // pseudocode for a send/recv are: // // // send(t) recv() // queue.push(t) return if queue.pop() // if increment() == -1 deschedule { // wakeup() if decrement() > 0 // cancel_deschedule() // } // queue.pop() // // As mentioned before, there are no locks in this implementation, only atomic // instructions are used. // // ### The internal atomic counter // // Every channel has a shared counter with each half to keep track of the size // of the queue. This counter is used to abort descheduling by the receiver and // to know when to wake up on the sending side. // // As seen in the pseudocode, senders will increment this count and receivers // will decrement the count. The theory behind this is that if a sender sees a // -1 count, it will wake up the receiver, and if the receiver sees a 1+ count, // then it doesn't need to block. // // The recv() method has a beginning call to pop(), and if successful, it needs // to decrement the count. It is a crucial implementation detail that this // decrement does *not* happen to the shared counter. If this were the case, // then it would be possible for the counter to be very negative when there were // no receivers waiting, in which case the senders would have to determine when // it was actually appropriate to wake up a receiver. // // Instead, the "steal count" is kept track of separately (not atomically // because it's only used by receivers), and then the decrement() call when // descheduling will lump in all of the recent steals into one large decrement. // // The implication of this is that if a sender sees a -1 count, then there's // guaranteed to be a waiter waiting! // // ## Native Implementation // // A major goal of these channels is to work seamlessly on and off the runtime. // All of the previous race conditions have been worded in terms of // scheduler-isms (which is obviously not available without the runtime). // // For now, native usage of channels (off the runtime) will fall back onto // mutexes/cond vars for descheduling/atomic decisions. The no-contention path // is still entirely lock-free, the "deschedule" blocks above are surrounded by // a mutex and the "wakeup" blocks involve grabbing a mutex and signaling on a // condition variable. // // ## Select // // Being able to support selection over channels has greatly influenced this // design, and not only does selection need to work inside the runtime, but also // outside the runtime. // // The implementation is fairly straightforward. The goal of select() is not to // return some data, but only to return which channel can receive data without // blocking. The implementation is essentially the entire blocking procedure // followed by an increment as soon as its woken up. The cancellation procedure // involves an increment and swapping out of to_wake to acquire ownership of the // thread to unblock. // // Sadly this current implementation requires multiple allocations, so I have // seen the throughput of select() be much worse than it should be. I do not // believe that there is anything fundamental that needs to change about these // channels, however, in order to support a more efficient select(). // // FIXME: Select is now removed, so these factors are ready to be cleaned up! // // # Conclusion // // And now that you've seen all the races that I found and attempted to fix, // here's the code for you to find some more! use crate::cell::UnsafeCell; use crate::error; use crate::fmt; use crate::mem; use crate::sync::Arc; use crate::time::{Duration, Instant}; mod blocking; mod mpsc_queue; mod oneshot; mod shared; mod spsc_queue; mod stream; mod sync; mod cache_aligned; /// The receiving half of Rust's [`channel`] (or [`sync_channel`]) type. /// This half can only be owned by one thread. /// /// Messages sent to the channel can be retrieved using [`recv`]. /// /// [`recv`]: Receiver::recv /// /// # Examples /// /// ```rust /// use std::sync::mpsc::channel; /// use std::thread; /// use std::time::Duration; /// /// let (send, recv) = channel(); /// /// thread::spawn(move || { /// send.send("Hello world!").unwrap(); /// thread::sleep(Duration::from_secs(2)); // block for two seconds /// send.send("Delayed for 2 seconds").unwrap(); /// }); /// /// println!("{}", recv.recv().unwrap()); // Received immediately /// println!("Waiting..."); /// println!("{}", recv.recv().unwrap()); // Received after 2 seconds /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub struct Receiver<T> { inner: UnsafeCell<Flavor<T>>, } // The receiver port can be sent from place to place, so long as it // is not used to receive non-sendable things. #[stable(feature = "rust1", since = "1.0.0")] unsafe impl<T: Send> Send for Receiver<T> {} #[stable(feature = "rust1", since = "1.0.0")] impl<T> !Sync for Receiver<T> {} /// An iterator over messages on a [`Receiver`], created by [`iter`]. /// /// This iterator will block whenever [`next`] is called, /// waiting for a new message, and [`None`] will be returned /// when the corresponding channel has hung up. /// /// [`iter`]: Receiver::iter /// [`next`]: Iterator::next /// /// # Examples /// /// ```rust /// use std::sync::mpsc::channel; /// use std::thread; /// /// let (send, recv) = channel(); /// /// thread::spawn(move || { /// send.send(1u8).unwrap(); /// send.send(2u8).unwrap(); /// send.send(3u8).unwrap(); /// }); /// /// for x in recv.iter() { /// println!("Got: {}", x); /// } /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[derive(Debug)] pub struct Iter<'a, T: 'a> { rx: &'a Receiver<T>, } /// An iterator that attempts to yield all pending values for a [`Receiver`], /// created by [`try_iter`]. /// /// [`None`] will be returned when there are no pending values remaining or /// if the corresponding channel has hung up. /// /// This iterator will never block the caller in order to wait for data to /// become available. Instead, it will return [`None`]. /// /// [`try_iter`]: Receiver::try_iter /// /// # Examples /// /// ```rust /// use std::sync::mpsc::channel; /// use std::thread; /// use std::time::Duration; /// /// let (sender, receiver) = channel(); /// /// // Nothing is in the buffer yet /// assert!(receiver.try_iter().next().is_none()); /// println!("Nothing in the buffer..."); /// /// thread::spawn(move || { /// sender.send(1).unwrap(); /// sender.send(2).unwrap(); /// sender.send(3).unwrap(); /// }); /// /// println!("Going to sleep..."); /// thread::sleep(Duration::from_secs(2)); // block for two seconds /// /// for x in receiver.try_iter() { /// println!("Got: {}", x); /// } /// ``` #[stable(feature = "receiver_try_iter", since = "1.15.0")] #[derive(Debug)] pub struct TryIter<'a, T: 'a> { rx: &'a Receiver<T>, } /// An owning iterator over messages on a [`Receiver`], /// created by **Receiver::into_iter**. /// /// This iterator will block whenever [`next`] /// is called, waiting for a new message, and [`None`] will be /// returned if the corresponding channel has hung up. /// /// [`next`]: Iterator::next /// /// # Examples /// /// ```rust /// use std::sync::mpsc::channel; /// use std::thread; /// /// let (send, recv) = channel(); /// /// thread::spawn(move || { /// send.send(1u8).unwrap(); /// send.send(2u8).unwrap(); /// send.send(3u8).unwrap(); /// }); /// /// for x in recv.into_iter() { /// println!("Got: {}", x); /// } /// ``` #[stable(feature = "receiver_into_iter", since = "1.1.0")] #[derive(Debug)] pub struct IntoIter<T> { rx: Receiver<T>, } /// The sending-half of Rust's asynchronous [`channel`] type. This half can only be /// owned by one thread, but it can be cloned to send to other threads. /// /// Messages can be sent through this channel with [`send`]. /// /// [`send`]: Sender::send /// /// # Examples /// /// ```rust /// use std::sync::mpsc::channel; /// use std::thread; /// /// let (sender, receiver) = channel(); /// let sender2 = sender.clone(); /// /// // First thread owns sender /// thread::spawn(move || { /// sender.send(1).unwrap(); /// }); /// /// // Second thread owns sender2 /// thread::spawn(move || { /// sender2.send(2).unwrap(); /// }); /// /// let msg = receiver.recv().unwrap(); /// let msg2 = receiver.recv().unwrap(); /// /// assert_eq!(3, msg + msg2); /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub struct Sender<T> { inner: UnsafeCell<Flavor<T>>, } // The send port can be sent from place to place, so long as it // is not used to send non-sendable things. #[stable(feature = "rust1", since = "1.0.0")] unsafe impl<T: Send> Send for Sender<T> {} #[stable(feature = "rust1", since = "1.0.0")] impl<T> !Sync for Sender<T> {} /// The sending-half of Rust's synchronous [`sync_channel`] type. /// /// Messages can be sent through this channel with [`send`] or [`try_send`]. /// /// [`send`] will block if there is no space in the internal buffer. /// /// [`send`]: SyncSender::send /// [`try_send`]: SyncSender::try_send /// /// # Examples /// /// ```rust /// use std::sync::mpsc::sync_channel; /// use std::thread; /// /// // Create a sync_channel with buffer size 2 /// let (sync_sender, receiver) = sync_channel(2); /// let sync_sender2 = sync_sender.clone(); /// /// // First thread owns sync_sender /// thread::spawn(move || { /// sync_sender.send(1).unwrap(); /// sync_sender.send(2).unwrap(); /// }); /// /// // Second thread owns sync_sender2 /// thread::spawn(move || { /// sync_sender2.send(3).unwrap(); /// // thread will now block since the buffer is full /// println!("Thread unblocked!"); /// }); /// /// let mut msg; /// /// msg = receiver.recv().unwrap(); /// println!("message {} received", msg); /// /// // "Thread unblocked!" will be printed now /// /// msg = receiver.recv().unwrap(); /// println!("message {} received", msg); /// /// msg = receiver.recv().unwrap(); /// /// println!("message {} received", msg); /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub struct SyncSender<T> { inner: Arc<sync::Packet<T>>, } #[stable(feature = "rust1", since = "1.0.0")] unsafe impl<T: Send> Send for SyncSender<T> {} /// An error returned from the [`Sender::send`] or [`SyncSender::send`] /// function on **channel**s. /// /// A **send** operation can only fail if the receiving end of a channel is /// disconnected, implying that the data could never be received. The error /// contains the data being sent as a payload so it can be recovered. #[stable(feature = "rust1", since = "1.0.0")] #[derive(PartialEq, Eq, Clone, Copy)] pub struct SendError<T>(#[stable(feature = "rust1", since = "1.0.0")] pub T); /// An error returned from the [`recv`] function on a [`Receiver`]. /// /// The [`recv`] operation can only fail if the sending half of a /// [`channel`] (or [`sync_channel`]) is disconnected, implying that no further /// messages will ever be received. /// /// [`recv`]: Receiver::recv #[derive(PartialEq, Eq, Clone, Copy, Debug)] #[stable(feature = "rust1", since = "1.0.0")] pub struct RecvError; /// This enumeration is the list of the possible reasons that [`try_recv`] could /// not return data when called. This can occur with both a [`channel`] and /// a [`sync_channel`]. /// /// [`try_recv`]: Receiver::try_recv #[derive(PartialEq, Eq, Clone, Copy, Debug)] #[stable(feature = "rust1", since = "1.0.0")] pub enum TryRecvError { /// This **channel** is currently empty, but the **Sender**(s) have not yet /// disconnected, so data may yet become available. #[stable(feature = "rust1", since = "1.0.0")] Empty, /// The **channel**'s sending half has become disconnected, and there will /// never be any more data received on it. #[stable(feature = "rust1", since = "1.0.0")] Disconnected, } /// This enumeration is the list of possible errors that made [`recv_timeout`] /// unable to return data when called. This can occur with both a [`channel`] and /// a [`sync_channel`]. /// /// [`recv_timeout`]: Receiver::recv_timeout #[derive(PartialEq, Eq, Clone, Copy, Debug)] #[stable(feature = "mpsc_recv_timeout", since = "1.12.0")] pub enum RecvTimeoutError { /// This **channel** is currently empty, but the **Sender**(s) have not yet /// disconnected, so data may yet become available. #[stable(feature = "mpsc_recv_timeout", since = "1.12.0")] Timeout, /// The **channel**'s sending half has become disconnected, and there will /// never be any more data received on it. #[stable(feature = "mpsc_recv_timeout", since = "1.12.0")] Disconnected, } /// This enumeration is the list of the possible error outcomes for the /// [`try_send`] method. /// /// [`try_send`]: SyncSender::try_send #[stable(feature = "rust1", since = "1.0.0")] #[derive(PartialEq, Eq, Clone, Copy)] pub enum TrySendError<T> { /// The data could not be sent on the [`sync_channel`] because it would require that /// the callee block to send the data. /// /// If this is a buffered channel, then the buffer is full at this time. If /// this is not a buffered channel, then there is no [`Receiver`] available to /// acquire the data. #[stable(feature = "rust1", since = "1.0.0")] Full(#[stable(feature = "rust1", since = "1.0.0")] T), /// This [`sync_channel`]'s receiving half has disconnected, so the data could not be /// sent. The data is returned back to the callee in this case. #[stable(feature = "rust1", since = "1.0.0")] Disconnected(#[stable(feature = "rust1", since = "1.0.0")] T), } enum Flavor<T> { Oneshot(Arc<oneshot::Packet<T>>), Stream(Arc<stream::Packet<T>>), Shared(Arc<shared::Packet<T>>), Sync(Arc<sync::Packet<T>>), } #[doc(hidden)] trait UnsafeFlavor<T> { fn inner_unsafe(&self) -> &UnsafeCell<Flavor<T>>; unsafe fn inner_mut(&self) -> &mut Flavor<T> { &mut *self.inner_unsafe().get() } unsafe fn inner(&self) -> &Flavor<T> { &*self.inner_unsafe().get() } } impl<T> UnsafeFlavor<T> for Sender<T> { fn inner_unsafe(&self) -> &UnsafeCell<Flavor<T>> { &self.inner } } impl<T> UnsafeFlavor<T> for Receiver<T> { fn inner_unsafe(&self) -> &UnsafeCell<Flavor<T>> { &self.inner } } /// Creates a new asynchronous channel, returning the sender/receiver halves. /// All data sent on the [`Sender`] will become available on the [`Receiver`] in /// the same order as it was sent, and no [`send`] will block the calling thread /// (this channel has an "infinite buffer", unlike [`sync_channel`], which will /// block after its buffer limit is reached). [`recv`] will block until a message /// is available. /// /// The [`Sender`] can be cloned to [`send`] to the same channel multiple times, but /// only one [`Receiver`] is supported. /// /// If the [`Receiver`] is disconnected while trying to [`send`] with the /// [`Sender`], the [`send`] method will return a [`SendError`]. Similarly, if the /// [`Sender`] is disconnected while trying to [`recv`], the [`recv`] method will /// return a [`RecvError`]. /// /// [`send`]: Sender::send /// [`recv`]: Receiver::recv /// /// # Examples /// /// ``` /// use std::sync::mpsc::channel; /// use std::thread; /// /// let (sender, receiver) = channel(); /// /// // Spawn off an expensive computation /// thread::spawn(move|| { /// # fn expensive_computation() {} /// sender.send(expensive_computation()).unwrap(); /// }); /// /// // Do some useful work for awhile /// /// // Let's see what that answer was /// println!("{:?}", receiver.recv().unwrap()); /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn channel<T>() -> (Sender<T>, Receiver<T>) { let a = Arc::new(oneshot::Packet::new()); (Sender::new(Flavor::Oneshot(a.clone())), Receiver::new(Flavor::Oneshot(a))) } /// Creates a new synchronous, bounded channel. /// All data sent on the [`SyncSender`] will become available on the [`Receiver`] /// in the same order as it was sent. Like asynchronous [`channel`]s, the /// [`Receiver`] will block until a message becomes available. `sync_channel` /// differs greatly in the semantics of the sender, however. /// /// This channel has an internal buffer on which messages will be queued. /// `bound` specifies the buffer size. When the internal buffer becomes full, /// future sends will *block* waiting for the buffer to open up. Note that a /// buffer size of 0 is valid, in which case this becomes "rendezvous channel" /// where each [`send`] will not return until a [`recv`] is paired with it. /// /// The [`SyncSender`] can be cloned to [`send`] to the same channel multiple /// times, but only one [`Receiver`] is supported. /// /// Like asynchronous channels, if the [`Receiver`] is disconnected while trying /// to [`send`] with the [`SyncSender`], the [`send`] method will return a /// [`SendError`]. Similarly, If the [`SyncSender`] is disconnected while trying /// to [`recv`], the [`recv`] method will return a [`RecvError`]. /// /// [`send`]: SyncSender::send /// [`recv`]: Receiver::recv /// /// # Examples /// /// ``` /// use std::sync::mpsc::sync_channel; /// use std::thread; /// /// let (sender, receiver) = sync_channel(1); /// /// // this returns immediately /// sender.send(1).unwrap(); /// /// thread::spawn(move|| { /// // this will block until the previous message has been received /// sender.send(2).unwrap(); /// }); /// /// assert_eq!(receiver.recv().unwrap(), 1); /// assert_eq!(receiver.recv().unwrap(), 2); /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn sync_channel<T>(bound: usize) -> (SyncSender<T>, Receiver<T>) { let a = Arc::new(sync::Packet::new(bound)); (SyncSender::new(a.clone()), Receiver::new(Flavor::Sync(a))) } //////////////////////////////////////////////////////////////////////////////// // Sender //////////////////////////////////////////////////////////////////////////////// impl<T> Sender<T> { fn new(inner: Flavor<T>) -> Sender<T> { Sender { inner: UnsafeCell::new(inner) } } /// Attempts to send a value on this channel, returning it back if it could /// not be sent. /// /// A successful send occurs when it is determined that the other end of /// the channel has not hung up already. An unsuccessful send would be one /// where the corresponding receiver has already been deallocated. Note /// that a return value of [`Err`] means that the data will never be /// received, but a return value of [`Ok`] does *not* mean that the data /// will be received. It is possible for the corresponding receiver to /// hang up immediately after this function returns [`Ok`]. /// /// This method will never block the current thread. /// /// # Examples /// /// ``` /// use std::sync::mpsc::channel; /// /// let (tx, rx) = channel(); /// /// // This send is always successful /// tx.send(1).unwrap(); /// /// // This send will fail because the receiver is gone /// drop(rx); /// assert_eq!(tx.send(1).unwrap_err().0, 1); /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn send(&self, t: T) -> Result<(), SendError<T>> { let (new_inner, ret) = match *unsafe { self.inner() } { Flavor::Oneshot(ref p) => { if !p.sent() { return p.send(t).map_err(SendError); } else { let a = Arc::new(stream::Packet::new()); let rx = Receiver::new(Flavor::Stream(a.clone())); match p.upgrade(rx) { oneshot::UpSuccess => { let ret = a.send(t); (a, ret) } oneshot::UpDisconnected => (a, Err(t)), oneshot::UpWoke(token) => { // This send cannot panic because the thread is // asleep (we're looking at it), so the receiver // can't go away. a.send(t).ok().unwrap(); token.signal(); (a, Ok(())) } } } } Flavor::Stream(ref p) => return p.send(t).map_err(SendError), Flavor::Shared(ref p) => return p.send(t).map_err(SendError), Flavor::Sync(..) => unreachable!(), }; unsafe { let tmp = Sender::new(Flavor::Stream(new_inner)); mem::swap(self.inner_mut(), tmp.inner_mut()); } ret.map_err(SendError) } } #[stable(feature = "rust1", since = "1.0.0")] impl<T> Clone for Sender<T> { fn clone(&self) -> Sender<T> { let packet = match *unsafe { self.inner() } { Flavor::Oneshot(ref p) => { let a = Arc::new(shared::Packet::new()); { let guard = a.postinit_lock(); let rx = Receiver::new(Flavor::Shared(a.clone())); let sleeper = match p.upgrade(rx) { oneshot::UpSuccess | oneshot::UpDisconnected => None, oneshot::UpWoke(task) => Some(task), }; a.inherit_blocker(sleeper, guard); } a } Flavor::Stream(ref p) => { let a = Arc::new(shared::Packet::new()); { let guard = a.postinit_lock(); let rx = Receiver::new(Flavor::Shared(a.clone())); let sleeper = match p.upgrade(rx) { stream::UpSuccess | stream::UpDisconnected => None, stream::UpWoke(task) => Some(task), }; a.inherit_blocker(sleeper, guard); } a } Flavor::Shared(ref p) => { p.clone_chan(); return Sender::new(Flavor::Shared(p.clone())); } Flavor::Sync(..) => unreachable!(), }; unsafe { let tmp = Sender::new(Flavor::Shared(packet.clone())); mem::swap(self.inner_mut(), tmp.inner_mut()); } Sender::new(Flavor::Shared(packet)) } } #[stable(feature = "rust1", since = "1.0.0")] impl<T> Drop for Sender<T> { fn drop(&mut self) { match *unsafe { self.inner() } { Flavor::Oneshot(ref p) => p.drop_chan(), Flavor::Stream(ref p) => p.drop_chan(), Flavor::Shared(ref p) => p.drop_chan(), Flavor::Sync(..) => unreachable!(), } } } #[stable(feature = "mpsc_debug", since = "1.8.0")] impl<T> fmt::Debug for Sender<T> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("Sender").finish() } } //////////////////////////////////////////////////////////////////////////////// // SyncSender //////////////////////////////////////////////////////////////////////////////// impl<T> SyncSender<T> { fn new(inner: Arc<sync::Packet<T>>) -> SyncSender<T> { SyncSender { inner } } /// Sends a value on this synchronous channel. /// /// This function will *block* until space in the internal buffer becomes /// available or a receiver is available to hand off the message to. /// /// Note that a successful send does *not* guarantee that the receiver will /// ever see the data if there is a buffer on this channel. Items may be /// enqueued in the internal buffer for the receiver to receive at a later /// time. If the buffer size is 0, however, the channel becomes a rendezvous /// channel and it guarantees that the receiver has indeed received /// the data if this function returns success. /// /// This function will never panic, but it may return [`Err`] if the /// [`Receiver`] has disconnected and is no longer able to receive /// information. /// /// # Examples /// /// ```rust /// use std::sync::mpsc::sync_channel; /// use std::thread; /// /// // Create a rendezvous sync_channel with buffer size 0 /// let (sync_sender, receiver) = sync_channel(0); /// /// thread::spawn(move || { /// println!("sending message..."); /// sync_sender.send(1).unwrap(); /// // Thread is now blocked until the message is received /// /// println!("...message received!"); /// }); /// /// let msg = receiver.recv().unwrap(); /// assert_eq!(1, msg); /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn send(&self, t: T) -> Result<(), SendError<T>> { self.inner.send(t).map_err(SendError) } /// Attempts to send a value on this channel without blocking. /// /// This method differs from [`send`] by returning immediately if the /// channel's buffer is full or no receiver is waiting to acquire some /// data. Compared with [`send`], this function has two failure cases /// instead of one (one for disconnection, one for a full buffer). /// /// See [`send`] for notes about guarantees of whether the /// receiver has received the data or not if this function is successful. /// /// [`send`]: Self::send /// /// # Examples /// /// ```rust /// use std::sync::mpsc::sync_channel; /// use std::thread; /// /// // Create a sync_channel with buffer size 1 /// let (sync_sender, receiver) = sync_channel(1); /// let sync_sender2 = sync_sender.clone(); /// /// // First thread owns sync_sender /// thread::spawn(move || { /// sync_sender.send(1).unwrap(); /// sync_sender.send(2).unwrap(); /// // Thread blocked /// }); /// /// // Second thread owns sync_sender2 /// thread::spawn(move || { /// // This will return an error and send /// // no message if the buffer is full /// let _ = sync_sender2.try_send(3); /// }); /// /// let mut msg; /// msg = receiver.recv().unwrap(); /// println!("message {} received", msg); /// /// msg = receiver.recv().unwrap(); /// println!("message {} received", msg); /// /// // Third message may have never been sent /// match receiver.try_recv() { /// Ok(msg) => println!("message {} received", msg), /// Err(_) => println!("the third message was never sent"), /// } /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn try_send(&self, t: T) -> Result<(), TrySendError<T>> { self.inner.try_send(t) } } #[stable(feature = "rust1", since = "1.0.0")] impl<T> Clone for SyncSender<T> { fn clone(&self) -> SyncSender<T> { self.inner.clone_chan(); SyncSender::new(self.inner.clone()) } } #[stable(feature = "rust1", since = "1.0.0")] impl<T> Drop for SyncSender<T> { fn drop(&mut self) { self.inner.drop_chan(); } } #[stable(feature = "mpsc_debug", since = "1.8.0")] impl<T> fmt::Debug for SyncSender<T> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("SyncSender").finish() } } //////////////////////////////////////////////////////////////////////////////// // Receiver //////////////////////////////////////////////////////////////////////////////// impl<T> Receiver<T> { fn new(inner: Flavor<T>) -> Receiver<T> { Receiver { inner: UnsafeCell::new(inner) } } /// Attempts to return a pending value on this receiver without blocking. /// /// This method will never block the caller in order to wait for data to /// become available. Instead, this will always return immediately with a /// possible option of pending data on the channel. /// /// This is useful for a flavor of "optimistic check" before deciding to /// block on a receiver. /// /// Compared with [`recv`], this function has two failure cases instead of one /// (one for disconnection, one for an empty buffer). /// /// [`recv`]: Self::recv /// /// # Examples /// /// ```rust /// use std::sync::mpsc::{Receiver, channel}; /// /// let (_, receiver): (_, Receiver<i32>) = channel(); /// /// assert!(receiver.try_recv().is_err()); /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn try_recv(&self) -> Result<T, TryRecvError> { loop { let new_port = match *unsafe { self.inner() } { Flavor::Oneshot(ref p) => match p.try_recv() { Ok(t) => return Ok(t), Err(oneshot::Empty) => return Err(TryRecvError::Empty), Err(oneshot::Disconnected) => return Err(TryRecvError::Disconnected), Err(oneshot::Upgraded(rx)) => rx, }, Flavor::Stream(ref p) => match p.try_recv() { Ok(t) => return Ok(t), Err(stream::Empty) => return Err(TryRecvError::Empty), Err(stream::Disconnected) => return Err(TryRecvError::Disconnected), Err(stream::Upgraded(rx)) => rx, }, Flavor::Shared(ref p) => match p.try_recv() { Ok(t) => return Ok(t), Err(shared::Empty) => return Err(TryRecvError::Empty), Err(shared::Disconnected) => return Err(TryRecvError::Disconnected), }, Flavor::Sync(ref p) => match p.try_recv() { Ok(t) => return Ok(t), Err(sync::Empty) => return Err(TryRecvError::Empty), Err(sync::Disconnected) => return Err(TryRecvError::Disconnected), }, }; unsafe { mem::swap(self.inner_mut(), new_port.inner_mut()); } } } /// Attempts to wait for a value on this receiver, returning an error if the /// corresponding channel has hung up. /// /// This function will always block the current thread if there is no data /// available and it's possible for more data to be sent. Once a message is /// sent to the corresponding [`Sender`] (or [`SyncSender`]), then this /// receiver will wake up and return that message. /// /// If the corresponding [`Sender`] has disconnected, or it disconnects while /// this call is blocking, this call will wake up and return [`Err`] to /// indicate that no more messages can ever be received on this channel. /// However, since channels are buffered, messages sent before the disconnect /// will still be properly received. /// /// # Examples /// /// ``` /// use std::sync::mpsc; /// use std::thread; /// /// let (send, recv) = mpsc::channel(); /// let handle = thread::spawn(move || { /// send.send(1u8).unwrap(); /// }); /// /// handle.join().unwrap(); /// /// assert_eq!(Ok(1), recv.recv()); /// ``` /// /// Buffering behavior: /// /// ``` /// use std::sync::mpsc; /// use std::thread; /// use std::sync::mpsc::RecvError; /// /// let (send, recv) = mpsc::channel(); /// let handle = thread::spawn(move || { /// send.send(1u8).unwrap(); /// send.send(2).unwrap(); /// send.send(3).unwrap(); /// drop(send); /// }); /// /// // wait for the thread to join so we ensure the sender is dropped /// handle.join().unwrap(); /// /// assert_eq!(Ok(1), recv.recv()); /// assert_eq!(Ok(2), recv.recv()); /// assert_eq!(Ok(3), recv.recv()); /// assert_eq!(Err(RecvError), recv.recv()); /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn recv(&self) -> Result<T, RecvError> { loop { let new_port = match *unsafe { self.inner() } { Flavor::Oneshot(ref p) => match p.recv(None) { Ok(t) => return Ok(t), Err(oneshot::Disconnected) => return Err(RecvError), Err(oneshot::Upgraded(rx)) => rx, Err(oneshot::Empty) => unreachable!(), }, Flavor::Stream(ref p) => match p.recv(None) { Ok(t) => return Ok(t), Err(stream::Disconnected) => return Err(RecvError), Err(stream::Upgraded(rx)) => rx, Err(stream::Empty) => unreachable!(), }, Flavor::Shared(ref p) => match p.recv(None) { Ok(t) => return Ok(t), Err(shared::Disconnected) => return Err(RecvError), Err(shared::Empty) => unreachable!(), }, Flavor::Sync(ref p) => return p.recv(None).map_err(|_| RecvError), }; unsafe { mem::swap(self.inner_mut(), new_port.inner_mut()); } } } /// Attempts to wait for a value on this receiver, returning an error if the /// corresponding channel has hung up, or if it waits more than `timeout`. /// /// This function will always block the current thread if there is no data /// available and it's possible for more data to be sent. Once a message is /// sent to the corresponding [`Sender`] (or [`SyncSender`]), then this /// receiver will wake up and return that message. /// /// If the corresponding [`Sender`] has disconnected, or it disconnects while /// this call is blocking, this call will wake up and return [`Err`] to /// indicate that no more messages can ever be received on this channel. /// However, since channels are buffered, messages sent before the disconnect /// will still be properly received. /// /// # Known Issues /// /// There is currently a known issue (see [`#39364`]) that causes `recv_timeout` /// to panic unexpectedly with the following example: /// /// ```no_run /// use std::sync::mpsc::channel; /// use std::thread; /// use std::time::Duration; /// /// let (tx, rx) = channel::<String>(); /// /// thread::spawn(move || { /// let d = Duration::from_millis(10); /// loop { /// println!("recv"); /// let _r = rx.recv_timeout(d); /// } /// }); /// /// thread::sleep(Duration::from_millis(100)); /// let _c1 = tx.clone(); /// /// thread::sleep(Duration::from_secs(1)); /// ``` /// /// [`#39364`]: https://github.com/rust-lang/rust/issues/39364 /// /// # Examples /// /// Successfully receiving value before encountering timeout: /// /// ```no_run /// use std::thread; /// use std::time::Duration; /// use std::sync::mpsc; /// /// let (send, recv) = mpsc::channel(); /// /// thread::spawn(move || { /// send.send('a').unwrap(); /// }); /// /// assert_eq!( /// recv.recv_timeout(Duration::from_millis(400)), /// Ok('a') /// ); /// ``` /// /// Receiving an error upon reaching timeout: /// /// ```no_run /// use std::thread; /// use std::time::Duration; /// use std::sync::mpsc; /// /// let (send, recv) = mpsc::channel(); /// /// thread::spawn(move || { /// thread::sleep(Duration::from_millis(800)); /// send.send('a').unwrap(); /// }); /// /// assert_eq!( /// recv.recv_timeout(Duration::from_millis(400)), /// Err(mpsc::RecvTimeoutError::Timeout) /// ); /// ``` #[stable(feature = "mpsc_recv_timeout", since = "1.12.0")] pub fn recv_timeout(&self, timeout: Duration) -> Result<T, RecvTimeoutError> { // Do an optimistic try_recv to avoid the performance impact of // Instant::now() in the full-channel case. match self.try_recv() { Ok(result) => Ok(result), Err(TryRecvError::Disconnected) => Err(RecvTimeoutError::Disconnected), Err(TryRecvError::Empty) => match Instant::now().checked_add(timeout) { Some(deadline) => self.recv_deadline(deadline), // So far in the future that it's practically the same as waiting indefinitely. None => self.recv().map_err(RecvTimeoutError::from), }, } } /// Attempts to wait for a value on this receiver, returning an error if the /// corresponding channel has hung up, or if `deadline` is reached. /// /// This function will always block the current thread if there is no data /// available and it's possible for more data to be sent. Once a message is /// sent to the corresponding [`Sender`] (or [`SyncSender`]), then this /// receiver will wake up and return that message. /// /// If the corresponding [`Sender`] has disconnected, or it disconnects while /// this call is blocking, this call will wake up and return [`Err`] to /// indicate that no more messages can ever be received on this channel. /// However, since channels are buffered, messages sent before the disconnect /// will still be properly received. /// /// # Examples /// /// Successfully receiving value before reaching deadline: /// /// ```no_run /// #![feature(deadline_api)] /// use std::thread; /// use std::time::{Duration, Instant}; /// use std::sync::mpsc; /// /// let (send, recv) = mpsc::channel(); /// /// thread::spawn(move || { /// send.send('a').unwrap(); /// }); /// /// assert_eq!( /// recv.recv_deadline(Instant::now() + Duration::from_millis(400)), /// Ok('a') /// ); /// ``` /// /// Receiving an error upon reaching deadline: /// /// ```no_run /// #![feature(deadline_api)] /// use std::thread; /// use std::time::{Duration, Instant}; /// use std::sync::mpsc; /// /// let (send, recv) = mpsc::channel(); /// /// thread::spawn(move || { /// thread::sleep(Duration::from_millis(800)); /// send.send('a').unwrap(); /// }); /// /// assert_eq!( /// recv.recv_deadline(Instant::now() + Duration::from_millis(400)), /// Err(mpsc::RecvTimeoutError::Timeout) /// ); /// ``` #[unstable(feature = "deadline_api", issue = "46316")] pub fn recv_deadline(&self, deadline: Instant) -> Result<T, RecvTimeoutError> { use self::RecvTimeoutError::*; loop { let port_or_empty = match *unsafe { self.inner() } { Flavor::Oneshot(ref p) => match p.recv(Some(deadline)) { Ok(t) => return Ok(t), Err(oneshot::Disconnected) => return Err(Disconnected), Err(oneshot::Upgraded(rx)) => Some(rx), Err(oneshot::Empty) => None, }, Flavor::Stream(ref p) => match p.recv(Some(deadline)) { Ok(t) => return Ok(t), Err(stream::Disconnected) => return Err(Disconnected), Err(stream::Upgraded(rx)) => Some(rx), Err(stream::Empty) => None, }, Flavor::Shared(ref p) => match p.recv(Some(deadline)) { Ok(t) => return Ok(t), Err(shared::Disconnected) => return Err(Disconnected), Err(shared::Empty) => None, }, Flavor::Sync(ref p) => match p.recv(Some(deadline)) { Ok(t) => return Ok(t), Err(sync::Disconnected) => return Err(Disconnected), Err(sync::Empty) => None, }, }; if let Some(new_port) = port_or_empty { unsafe { mem::swap(self.inner_mut(), new_port.inner_mut()); } } // If we're already passed the deadline, and we're here without // data, return a timeout, else try again. if Instant::now() >= deadline { return Err(Timeout); } } } /// Returns an iterator that will block waiting for messages, but never /// [`panic!`]. It will return [`None`] when the channel has hung up. /// /// # Examples /// /// ```rust /// use std::sync::mpsc::channel; /// use std::thread; /// /// let (send, recv) = channel(); /// /// thread::spawn(move || { /// send.send(1).unwrap(); /// send.send(2).unwrap(); /// send.send(3).unwrap(); /// }); /// /// let mut iter = recv.iter(); /// assert_eq!(iter.next(), Some(1)); /// assert_eq!(iter.next(), Some(2)); /// assert_eq!(iter.next(), Some(3)); /// assert_eq!(iter.next(), None); /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn iter(&self) -> Iter<'_, T> { Iter { rx: self } } /// Returns an iterator that will attempt to yield all pending values. /// It will return `None` if there are no more pending values or if the /// channel has hung up. The iterator will never [`panic!`] or block the /// user by waiting for values. /// /// # Examples /// /// ```no_run /// use std::sync::mpsc::channel; /// use std::thread; /// use std::time::Duration; /// /// let (sender, receiver) = channel(); /// /// // nothing is in the buffer yet /// assert!(receiver.try_iter().next().is_none()); /// /// thread::spawn(move || { /// thread::sleep(Duration::from_secs(1)); /// sender.send(1).unwrap(); /// sender.send(2).unwrap(); /// sender.send(3).unwrap(); /// }); /// /// // nothing is in the buffer yet /// assert!(receiver.try_iter().next().is_none()); /// /// // block for two seconds /// thread::sleep(Duration::from_secs(2)); /// /// let mut iter = receiver.try_iter(); /// assert_eq!(iter.next(), Some(1)); /// assert_eq!(iter.next(), Some(2)); /// assert_eq!(iter.next(), Some(3)); /// assert_eq!(iter.next(), None); /// ``` #[stable(feature = "receiver_try_iter", since = "1.15.0")] pub fn try_iter(&self) -> TryIter<'_, T> { TryIter { rx: self } } } #[stable(feature = "rust1", since = "1.0.0")] impl<'a, T> Iterator for Iter<'a, T> { type Item = T; fn next(&mut self) -> Option<T> { self.rx.recv().ok() } } #[stable(feature = "receiver_try_iter", since = "1.15.0")] impl<'a, T> Iterator for TryIter<'a, T> { type Item = T; fn next(&mut self) -> Option<T> { self.rx.try_recv().ok() } } #[stable(feature = "receiver_into_iter", since = "1.1.0")] impl<'a, T> IntoIterator for &'a Receiver<T> { type Item = T; type IntoIter = Iter<'a, T>; fn into_iter(self) -> Iter<'a, T> { self.iter() } } #[stable(feature = "receiver_into_iter", since = "1.1.0")] impl<T> Iterator for IntoIter<T> { type Item = T; fn next(&mut self) -> Option<T> { self.rx.recv().ok() } } #[stable(feature = "receiver_into_iter", since = "1.1.0")] impl<T> IntoIterator for Receiver<T> { type Item = T; type IntoIter = IntoIter<T>; fn into_iter(self) -> IntoIter<T> { IntoIter { rx: self } } } #[stable(feature = "rust1", since = "1.0.0")] impl<T> Drop for Receiver<T> { fn drop(&mut self) { match *unsafe { self.inner() } { Flavor::Oneshot(ref p) => p.drop_port(), Flavor::Stream(ref p) => p.drop_port(), Flavor::Shared(ref p) => p.drop_port(), Flavor::Sync(ref p) => p.drop_port(), } } } #[stable(feature = "mpsc_debug", since = "1.8.0")] impl<T> fmt::Debug for Receiver<T> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("Receiver").finish() } } #[stable(feature = "rust1", since = "1.0.0")] impl<T> fmt::Debug for SendError<T> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { "SendError(..)".fmt(f) } } #[stable(feature = "rust1", since = "1.0.0")] impl<T> fmt::Display for SendError<T> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { "sending on a closed channel".fmt(f) } } #[stable(feature = "rust1", since = "1.0.0")] impl<T: Send> error::Error for SendError<T> { #[allow(deprecated)] fn description(&self) -> &str { "sending on a closed channel" } } #[stable(feature = "rust1", since = "1.0.0")] impl<T> fmt::Debug for TrySendError<T> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match *self { TrySendError::Full(..) => "Full(..)".fmt(f), TrySendError::Disconnected(..) => "Disconnected(..)".fmt(f), } } } #[stable(feature = "rust1", since = "1.0.0")] impl<T> fmt::Display for TrySendError<T> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match *self { TrySendError::Full(..) => "sending on a full channel".fmt(f), TrySendError::Disconnected(..) => "sending on a closed channel".fmt(f), } } } #[stable(feature = "rust1", since = "1.0.0")] impl<T: Send> error::Error for TrySendError<T> { #[allow(deprecated)] fn description(&self) -> &str { match *self { TrySendError::Full(..) => "sending on a full channel", TrySendError::Disconnected(..) => "sending on a closed channel", } } } #[stable(feature = "mpsc_error_conversions", since = "1.24.0")] impl<T> From<SendError<T>> for TrySendError<T> { /// Converts a `SendError<T>` into a `TrySendError<T>`. /// /// This conversion always returns a `TrySendError::Disconnected` containing the data in the `SendError<T>`. /// /// No data is allocated on the heap. fn from(err: SendError<T>) -> TrySendError<T> { match err { SendError(t) => TrySendError::Disconnected(t), } } } #[stable(feature = "rust1", since = "1.0.0")] impl fmt::Display for RecvError { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { "receiving on a closed channel".fmt(f) } } #[stable(feature = "rust1", since = "1.0.0")] impl error::Error for RecvError { #[allow(deprecated)] fn description(&self) -> &str { "receiving on a closed channel" } } #[stable(feature = "rust1", since = "1.0.0")] impl fmt::Display for TryRecvError { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match *self { TryRecvError::Empty => "receiving on an empty channel".fmt(f), TryRecvError::Disconnected => "receiving on a closed channel".fmt(f), } } } #[stable(feature = "rust1", since = "1.0.0")] impl error::Error for TryRecvError { #[allow(deprecated)] fn description(&self) -> &str { match *self { TryRecvError::Empty => "receiving on an empty channel", TryRecvError::Disconnected => "receiving on a closed channel", } } } #[stable(feature = "mpsc_error_conversions", since = "1.24.0")] impl From<RecvError> for TryRecvError { /// Converts a `RecvError` into a `TryRecvError`. /// /// This conversion always returns `TryRecvError::Disconnected`. /// /// No data is allocated on the heap. fn from(err: RecvError) -> TryRecvError { match err { RecvError => TryRecvError::Disconnected, } } } #[stable(feature = "mpsc_recv_timeout_error", since = "1.15.0")] impl fmt::Display for RecvTimeoutError { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match *self { RecvTimeoutError::Timeout => "timed out waiting on channel".fmt(f), RecvTimeoutError::Disconnected => "channel is empty and sending half is closed".fmt(f), } } } #[stable(feature = "mpsc_recv_timeout_error", since = "1.15.0")] impl error::Error for RecvTimeoutError { #[allow(deprecated)] fn description(&self) -> &str { match *self { RecvTimeoutError::Timeout => "timed out waiting on channel", RecvTimeoutError::Disconnected => "channel is empty and sending half is closed", } } } #[stable(feature = "mpsc_error_conversions", since = "1.24.0")] impl From<RecvError> for RecvTimeoutError { /// Converts a `RecvError` into a `RecvTimeoutError`. /// /// This conversion always returns `RecvTimeoutError::Disconnected`. /// /// No data is allocated on the heap. fn from(err: RecvError) -> RecvTimeoutError { match err { RecvError => RecvTimeoutError::Disconnected, } } } Explain non-dropped sender recv in docs Original senders that are still hanging around could cause Receiver::recv to not block since this is a potential footgun for beginners, clarify more on this in the docs for readers to be aware about it. Fix minor tidbits in sender recv doc Co-authored-by: Dylan DPC <dylan.dpc@gmail.com> Add example for unbounded receive loops in doc Show the drop(tx) pattern, based on tokio docs https://tokio-rs.github.io/tokio/doc/tokio/sync/index.html Fix example code for drop sender recv Fix wording in sender docs Co-authored-by: Josh Triplett <c028c213ed5efcf30c3f4fc7361dbde0c893c5b7@joshtriplett.org> //! Multi-producer, single-consumer FIFO queue communication primitives. //! //! This module provides message-based communication over channels, concretely //! defined among three types: //! //! * [`Sender`] //! * [`SyncSender`] //! * [`Receiver`] //! //! A [`Sender`] or [`SyncSender`] is used to send data to a [`Receiver`]. Both //! senders are clone-able (multi-producer) such that many threads can send //! simultaneously to one receiver (single-consumer). //! //! These channels come in two flavors: //! //! 1. An asynchronous, infinitely buffered channel. The [`channel`] function //! will return a `(Sender, Receiver)` tuple where all sends will be //! **asynchronous** (they never block). The channel conceptually has an //! infinite buffer. //! //! 2. A synchronous, bounded channel. The [`sync_channel`] function will //! return a `(SyncSender, Receiver)` tuple where the storage for pending //! messages is a pre-allocated buffer of a fixed size. All sends will be //! **synchronous** by blocking until there is buffer space available. Note //! that a bound of 0 is allowed, causing the channel to become a "rendezvous" //! channel where each sender atomically hands off a message to a receiver. //! //! [`send`]: Sender::send //! //! ## Disconnection //! //! The send and receive operations on channels will all return a [`Result`] //! indicating whether the operation succeeded or not. An unsuccessful operation //! is normally indicative of the other half of a channel having "hung up" by //! being dropped in its corresponding thread. //! //! Once half of a channel has been deallocated, most operations can no longer //! continue to make progress, so [`Err`] will be returned. Many applications //! will continue to [`unwrap`] the results returned from this module, //! instigating a propagation of failure among threads if one unexpectedly dies. //! //! [`unwrap`]: Result::unwrap //! //! # Examples //! //! Simple usage: //! //! ``` //! use std::thread; //! use std::sync::mpsc::channel; //! //! // Create a simple streaming channel //! let (tx, rx) = channel(); //! thread::spawn(move|| { //! tx.send(10).unwrap(); //! }); //! assert_eq!(rx.recv().unwrap(), 10); //! ``` //! //! Shared usage: //! //! ``` //! use std::thread; //! use std::sync::mpsc::channel; //! //! // Create a shared channel that can be sent along from many threads //! // where tx is the sending half (tx for transmission), and rx is the receiving //! // half (rx for receiving). //! let (tx, rx) = channel(); //! for i in 0..10 { //! let tx = tx.clone(); //! thread::spawn(move|| { //! tx.send(i).unwrap(); //! }); //! } //! //! for _ in 0..10 { //! let j = rx.recv().unwrap(); //! assert!(0 <= j && j < 10); //! } //! ``` //! //! Propagating panics: //! //! ``` //! use std::sync::mpsc::channel; //! //! // The call to recv() will return an error because the channel has already //! // hung up (or been deallocated) //! let (tx, rx) = channel::<i32>(); //! drop(tx); //! assert!(rx.recv().is_err()); //! ``` //! //! Synchronous channels: //! //! ``` //! use std::thread; //! use std::sync::mpsc::sync_channel; //! //! let (tx, rx) = sync_channel::<i32>(0); //! thread::spawn(move|| { //! // This will wait for the parent thread to start receiving //! tx.send(53).unwrap(); //! }); //! rx.recv().unwrap(); //! ``` //! //! Unbounded receive loop: //! //! ``` //! use std::sync::mpsc::sync_channel; //! use std::thread; //! //! let (tx, rx) = sync_channel(3); //! //! for _ in 0..3 { //! // It would be the same without thread and clone here //! // since there will still be one `tx` left. //! let tx = tx.clone(); //! // cloned tx dropped within thread //! thread::spawn(move || tx.send("ok").unwrap()); //! } //! //! // Drop the last sender to stop `rx` waiting for message. //! // The program will not complete if we comment this out. //! // **All** `tx` needs to be dropped for `rx` to have `Err`. //! drop(tx); //! //! // Unbounded receiver waiting for all senders to complete. //! while let Ok(msg) = rx.recv() { //! println!("{}", msg); //! } //! //! println!("completed"); //! ``` #![stable(feature = "rust1", since = "1.0.0")] #[cfg(all(test, not(target_os = "emscripten")))] mod tests; #[cfg(all(test, not(target_os = "emscripten")))] mod sync_tests; // A description of how Rust's channel implementation works // // Channels are supposed to be the basic building block for all other // concurrent primitives that are used in Rust. As a result, the channel type // needs to be highly optimized, flexible, and broad enough for use everywhere. // // The choice of implementation of all channels is to be built on lock-free data // structures. The channels themselves are then consequently also lock-free data // structures. As always with lock-free code, this is a very "here be dragons" // territory, especially because I'm unaware of any academic papers that have // gone into great length about channels of these flavors. // // ## Flavors of channels // // From the perspective of a consumer of this library, there is only one flavor // of channel. This channel can be used as a stream and cloned to allow multiple // senders. Under the hood, however, there are actually three flavors of // channels in play. // // * Flavor::Oneshots - these channels are highly optimized for the one-send use // case. They contain as few atomics as possible and // involve one and exactly one allocation. // * Streams - these channels are optimized for the non-shared use case. They // use a different concurrent queue that is more tailored for this // use case. The initial allocation of this flavor of channel is not // optimized. // * Shared - this is the most general form of channel that this module offers, // a channel with multiple senders. This type is as optimized as it // can be, but the previous two types mentioned are much faster for // their use-cases. // // ## Concurrent queues // // The basic idea of Rust's Sender/Receiver types is that send() never blocks, // but recv() obviously blocks. This means that under the hood there must be // some shared and concurrent queue holding all of the actual data. // // With two flavors of channels, two flavors of queues are also used. We have // chosen to use queues from a well-known author that are abbreviated as SPSC // and MPSC (single producer, single consumer and multiple producer, single // consumer). SPSC queues are used for streams while MPSC queues are used for // shared channels. // // ### SPSC optimizations // // The SPSC queue found online is essentially a linked list of nodes where one // half of the nodes are the "queue of data" and the other half of nodes are a // cache of unused nodes. The unused nodes are used such that an allocation is // not required on every push() and a free doesn't need to happen on every // pop(). // // As found online, however, the cache of nodes is of an infinite size. This // means that if a channel at one point in its life had 50k items in the queue, // then the queue will always have the capacity for 50k items. I believed that // this was an unnecessary limitation of the implementation, so I have altered // the queue to optionally have a bound on the cache size. // // By default, streams will have an unbounded SPSC queue with a small-ish cache // size. The hope is that the cache is still large enough to have very fast // send() operations while not too large such that millions of channels can // coexist at once. // // ### MPSC optimizations // // Right now the MPSC queue has not been optimized. Like the SPSC queue, it uses // a linked list under the hood to earn its unboundedness, but I have not put // forth much effort into having a cache of nodes similar to the SPSC queue. // // For now, I believe that this is "ok" because shared channels are not the most // common type, but soon we may wish to revisit this queue choice and determine // another candidate for backend storage of shared channels. // // ## Overview of the Implementation // // Now that there's a little background on the concurrent queues used, it's // worth going into much more detail about the channels themselves. The basic // pseudocode for a send/recv are: // // // send(t) recv() // queue.push(t) return if queue.pop() // if increment() == -1 deschedule { // wakeup() if decrement() > 0 // cancel_deschedule() // } // queue.pop() // // As mentioned before, there are no locks in this implementation, only atomic // instructions are used. // // ### The internal atomic counter // // Every channel has a shared counter with each half to keep track of the size // of the queue. This counter is used to abort descheduling by the receiver and // to know when to wake up on the sending side. // // As seen in the pseudocode, senders will increment this count and receivers // will decrement the count. The theory behind this is that if a sender sees a // -1 count, it will wake up the receiver, and if the receiver sees a 1+ count, // then it doesn't need to block. // // The recv() method has a beginning call to pop(), and if successful, it needs // to decrement the count. It is a crucial implementation detail that this // decrement does *not* happen to the shared counter. If this were the case, // then it would be possible for the counter to be very negative when there were // no receivers waiting, in which case the senders would have to determine when // it was actually appropriate to wake up a receiver. // // Instead, the "steal count" is kept track of separately (not atomically // because it's only used by receivers), and then the decrement() call when // descheduling will lump in all of the recent steals into one large decrement. // // The implication of this is that if a sender sees a -1 count, then there's // guaranteed to be a waiter waiting! // // ## Native Implementation // // A major goal of these channels is to work seamlessly on and off the runtime. // All of the previous race conditions have been worded in terms of // scheduler-isms (which is obviously not available without the runtime). // // For now, native usage of channels (off the runtime) will fall back onto // mutexes/cond vars for descheduling/atomic decisions. The no-contention path // is still entirely lock-free, the "deschedule" blocks above are surrounded by // a mutex and the "wakeup" blocks involve grabbing a mutex and signaling on a // condition variable. // // ## Select // // Being able to support selection over channels has greatly influenced this // design, and not only does selection need to work inside the runtime, but also // outside the runtime. // // The implementation is fairly straightforward. The goal of select() is not to // return some data, but only to return which channel can receive data without // blocking. The implementation is essentially the entire blocking procedure // followed by an increment as soon as its woken up. The cancellation procedure // involves an increment and swapping out of to_wake to acquire ownership of the // thread to unblock. // // Sadly this current implementation requires multiple allocations, so I have // seen the throughput of select() be much worse than it should be. I do not // believe that there is anything fundamental that needs to change about these // channels, however, in order to support a more efficient select(). // // FIXME: Select is now removed, so these factors are ready to be cleaned up! // // # Conclusion // // And now that you've seen all the races that I found and attempted to fix, // here's the code for you to find some more! use crate::cell::UnsafeCell; use crate::error; use crate::fmt; use crate::mem; use crate::sync::Arc; use crate::time::{Duration, Instant}; mod blocking; mod mpsc_queue; mod oneshot; mod shared; mod spsc_queue; mod stream; mod sync; mod cache_aligned; /// The receiving half of Rust's [`channel`] (or [`sync_channel`]) type. /// This half can only be owned by one thread. /// /// Messages sent to the channel can be retrieved using [`recv`]. /// /// [`recv`]: Receiver::recv /// /// # Examples /// /// ```rust /// use std::sync::mpsc::channel; /// use std::thread; /// use std::time::Duration; /// /// let (send, recv) = channel(); /// /// thread::spawn(move || { /// send.send("Hello world!").unwrap(); /// thread::sleep(Duration::from_secs(2)); // block for two seconds /// send.send("Delayed for 2 seconds").unwrap(); /// }); /// /// println!("{}", recv.recv().unwrap()); // Received immediately /// println!("Waiting..."); /// println!("{}", recv.recv().unwrap()); // Received after 2 seconds /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub struct Receiver<T> { inner: UnsafeCell<Flavor<T>>, } // The receiver port can be sent from place to place, so long as it // is not used to receive non-sendable things. #[stable(feature = "rust1", since = "1.0.0")] unsafe impl<T: Send> Send for Receiver<T> {} #[stable(feature = "rust1", since = "1.0.0")] impl<T> !Sync for Receiver<T> {} /// An iterator over messages on a [`Receiver`], created by [`iter`]. /// /// This iterator will block whenever [`next`] is called, /// waiting for a new message, and [`None`] will be returned /// when the corresponding channel has hung up. /// /// [`iter`]: Receiver::iter /// [`next`]: Iterator::next /// /// # Examples /// /// ```rust /// use std::sync::mpsc::channel; /// use std::thread; /// /// let (send, recv) = channel(); /// /// thread::spawn(move || { /// send.send(1u8).unwrap(); /// send.send(2u8).unwrap(); /// send.send(3u8).unwrap(); /// }); /// /// for x in recv.iter() { /// println!("Got: {}", x); /// } /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[derive(Debug)] pub struct Iter<'a, T: 'a> { rx: &'a Receiver<T>, } /// An iterator that attempts to yield all pending values for a [`Receiver`], /// created by [`try_iter`]. /// /// [`None`] will be returned when there are no pending values remaining or /// if the corresponding channel has hung up. /// /// This iterator will never block the caller in order to wait for data to /// become available. Instead, it will return [`None`]. /// /// [`try_iter`]: Receiver::try_iter /// /// # Examples /// /// ```rust /// use std::sync::mpsc::channel; /// use std::thread; /// use std::time::Duration; /// /// let (sender, receiver) = channel(); /// /// // Nothing is in the buffer yet /// assert!(receiver.try_iter().next().is_none()); /// println!("Nothing in the buffer..."); /// /// thread::spawn(move || { /// sender.send(1).unwrap(); /// sender.send(2).unwrap(); /// sender.send(3).unwrap(); /// }); /// /// println!("Going to sleep..."); /// thread::sleep(Duration::from_secs(2)); // block for two seconds /// /// for x in receiver.try_iter() { /// println!("Got: {}", x); /// } /// ``` #[stable(feature = "receiver_try_iter", since = "1.15.0")] #[derive(Debug)] pub struct TryIter<'a, T: 'a> { rx: &'a Receiver<T>, } /// An owning iterator over messages on a [`Receiver`], /// created by **Receiver::into_iter**. /// /// This iterator will block whenever [`next`] /// is called, waiting for a new message, and [`None`] will be /// returned if the corresponding channel has hung up. /// /// [`next`]: Iterator::next /// /// # Examples /// /// ```rust /// use std::sync::mpsc::channel; /// use std::thread; /// /// let (send, recv) = channel(); /// /// thread::spawn(move || { /// send.send(1u8).unwrap(); /// send.send(2u8).unwrap(); /// send.send(3u8).unwrap(); /// }); /// /// for x in recv.into_iter() { /// println!("Got: {}", x); /// } /// ``` #[stable(feature = "receiver_into_iter", since = "1.1.0")] #[derive(Debug)] pub struct IntoIter<T> { rx: Receiver<T>, } /// The sending-half of Rust's asynchronous [`channel`] type. This half can only be /// owned by one thread, but it can be cloned to send to other threads. /// /// Messages can be sent through this channel with [`send`]. /// /// Note: all senders (the original and the clones) need to be dropped for the receiver /// to stop blocking to receive messages with [`Receiver::recv`]. /// /// [`send`]: Sender::send /// /// # Examples /// /// ```rust /// use std::sync::mpsc::channel; /// use std::thread; /// /// let (sender, receiver) = channel(); /// let sender2 = sender.clone(); /// /// // First thread owns sender /// thread::spawn(move || { /// sender.send(1).unwrap(); /// }); /// /// // Second thread owns sender2 /// thread::spawn(move || { /// sender2.send(2).unwrap(); /// }); /// /// let msg = receiver.recv().unwrap(); /// let msg2 = receiver.recv().unwrap(); /// /// assert_eq!(3, msg + msg2); /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub struct Sender<T> { inner: UnsafeCell<Flavor<T>>, } // The send port can be sent from place to place, so long as it // is not used to send non-sendable things. #[stable(feature = "rust1", since = "1.0.0")] unsafe impl<T: Send> Send for Sender<T> {} #[stable(feature = "rust1", since = "1.0.0")] impl<T> !Sync for Sender<T> {} /// The sending-half of Rust's synchronous [`sync_channel`] type. /// /// Messages can be sent through this channel with [`send`] or [`try_send`]. /// /// [`send`] will block if there is no space in the internal buffer. /// /// [`send`]: SyncSender::send /// [`try_send`]: SyncSender::try_send /// /// # Examples /// /// ```rust /// use std::sync::mpsc::sync_channel; /// use std::thread; /// /// // Create a sync_channel with buffer size 2 /// let (sync_sender, receiver) = sync_channel(2); /// let sync_sender2 = sync_sender.clone(); /// /// // First thread owns sync_sender /// thread::spawn(move || { /// sync_sender.send(1).unwrap(); /// sync_sender.send(2).unwrap(); /// }); /// /// // Second thread owns sync_sender2 /// thread::spawn(move || { /// sync_sender2.send(3).unwrap(); /// // thread will now block since the buffer is full /// println!("Thread unblocked!"); /// }); /// /// let mut msg; /// /// msg = receiver.recv().unwrap(); /// println!("message {} received", msg); /// /// // "Thread unblocked!" will be printed now /// /// msg = receiver.recv().unwrap(); /// println!("message {} received", msg); /// /// msg = receiver.recv().unwrap(); /// /// println!("message {} received", msg); /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub struct SyncSender<T> { inner: Arc<sync::Packet<T>>, } #[stable(feature = "rust1", since = "1.0.0")] unsafe impl<T: Send> Send for SyncSender<T> {} /// An error returned from the [`Sender::send`] or [`SyncSender::send`] /// function on **channel**s. /// /// A **send** operation can only fail if the receiving end of a channel is /// disconnected, implying that the data could never be received. The error /// contains the data being sent as a payload so it can be recovered. #[stable(feature = "rust1", since = "1.0.0")] #[derive(PartialEq, Eq, Clone, Copy)] pub struct SendError<T>(#[stable(feature = "rust1", since = "1.0.0")] pub T); /// An error returned from the [`recv`] function on a [`Receiver`]. /// /// The [`recv`] operation can only fail if the sending half of a /// [`channel`] (or [`sync_channel`]) is disconnected, implying that no further /// messages will ever be received. /// /// [`recv`]: Receiver::recv #[derive(PartialEq, Eq, Clone, Copy, Debug)] #[stable(feature = "rust1", since = "1.0.0")] pub struct RecvError; /// This enumeration is the list of the possible reasons that [`try_recv`] could /// not return data when called. This can occur with both a [`channel`] and /// a [`sync_channel`]. /// /// [`try_recv`]: Receiver::try_recv #[derive(PartialEq, Eq, Clone, Copy, Debug)] #[stable(feature = "rust1", since = "1.0.0")] pub enum TryRecvError { /// This **channel** is currently empty, but the **Sender**(s) have not yet /// disconnected, so data may yet become available. #[stable(feature = "rust1", since = "1.0.0")] Empty, /// The **channel**'s sending half has become disconnected, and there will /// never be any more data received on it. #[stable(feature = "rust1", since = "1.0.0")] Disconnected, } /// This enumeration is the list of possible errors that made [`recv_timeout`] /// unable to return data when called. This can occur with both a [`channel`] and /// a [`sync_channel`]. /// /// [`recv_timeout`]: Receiver::recv_timeout #[derive(PartialEq, Eq, Clone, Copy, Debug)] #[stable(feature = "mpsc_recv_timeout", since = "1.12.0")] pub enum RecvTimeoutError { /// This **channel** is currently empty, but the **Sender**(s) have not yet /// disconnected, so data may yet become available. #[stable(feature = "mpsc_recv_timeout", since = "1.12.0")] Timeout, /// The **channel**'s sending half has become disconnected, and there will /// never be any more data received on it. #[stable(feature = "mpsc_recv_timeout", since = "1.12.0")] Disconnected, } /// This enumeration is the list of the possible error outcomes for the /// [`try_send`] method. /// /// [`try_send`]: SyncSender::try_send #[stable(feature = "rust1", since = "1.0.0")] #[derive(PartialEq, Eq, Clone, Copy)] pub enum TrySendError<T> { /// The data could not be sent on the [`sync_channel`] because it would require that /// the callee block to send the data. /// /// If this is a buffered channel, then the buffer is full at this time. If /// this is not a buffered channel, then there is no [`Receiver`] available to /// acquire the data. #[stable(feature = "rust1", since = "1.0.0")] Full(#[stable(feature = "rust1", since = "1.0.0")] T), /// This [`sync_channel`]'s receiving half has disconnected, so the data could not be /// sent. The data is returned back to the callee in this case. #[stable(feature = "rust1", since = "1.0.0")] Disconnected(#[stable(feature = "rust1", since = "1.0.0")] T), } enum Flavor<T> { Oneshot(Arc<oneshot::Packet<T>>), Stream(Arc<stream::Packet<T>>), Shared(Arc<shared::Packet<T>>), Sync(Arc<sync::Packet<T>>), } #[doc(hidden)] trait UnsafeFlavor<T> { fn inner_unsafe(&self) -> &UnsafeCell<Flavor<T>>; unsafe fn inner_mut(&self) -> &mut Flavor<T> { &mut *self.inner_unsafe().get() } unsafe fn inner(&self) -> &Flavor<T> { &*self.inner_unsafe().get() } } impl<T> UnsafeFlavor<T> for Sender<T> { fn inner_unsafe(&self) -> &UnsafeCell<Flavor<T>> { &self.inner } } impl<T> UnsafeFlavor<T> for Receiver<T> { fn inner_unsafe(&self) -> &UnsafeCell<Flavor<T>> { &self.inner } } /// Creates a new asynchronous channel, returning the sender/receiver halves. /// All data sent on the [`Sender`] will become available on the [`Receiver`] in /// the same order as it was sent, and no [`send`] will block the calling thread /// (this channel has an "infinite buffer", unlike [`sync_channel`], which will /// block after its buffer limit is reached). [`recv`] will block until a message /// is available while there is at least one [`Sender`] alive (including clones). /// /// The [`Sender`] can be cloned to [`send`] to the same channel multiple times, but /// only one [`Receiver`] is supported. /// /// If the [`Receiver`] is disconnected while trying to [`send`] with the /// [`Sender`], the [`send`] method will return a [`SendError`]. Similarly, if the /// [`Sender`] is disconnected while trying to [`recv`], the [`recv`] method will /// return a [`RecvError`]. /// /// [`send`]: Sender::send /// [`recv`]: Receiver::recv /// /// # Examples /// /// ``` /// use std::sync::mpsc::channel; /// use std::thread; /// /// let (sender, receiver) = channel(); /// /// // Spawn off an expensive computation /// thread::spawn(move|| { /// # fn expensive_computation() {} /// sender.send(expensive_computation()).unwrap(); /// }); /// /// // Do some useful work for awhile /// /// // Let's see what that answer was /// println!("{:?}", receiver.recv().unwrap()); /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn channel<T>() -> (Sender<T>, Receiver<T>) { let a = Arc::new(oneshot::Packet::new()); (Sender::new(Flavor::Oneshot(a.clone())), Receiver::new(Flavor::Oneshot(a))) } /// Creates a new synchronous, bounded channel. /// All data sent on the [`SyncSender`] will become available on the [`Receiver`] /// in the same order as it was sent. Like asynchronous [`channel`]s, the /// [`Receiver`] will block until a message becomes available. `sync_channel` /// differs greatly in the semantics of the sender, however. /// /// This channel has an internal buffer on which messages will be queued. /// `bound` specifies the buffer size. When the internal buffer becomes full, /// future sends will *block* waiting for the buffer to open up. Note that a /// buffer size of 0 is valid, in which case this becomes "rendezvous channel" /// where each [`send`] will not return until a [`recv`] is paired with it. /// /// The [`SyncSender`] can be cloned to [`send`] to the same channel multiple /// times, but only one [`Receiver`] is supported. /// /// Like asynchronous channels, if the [`Receiver`] is disconnected while trying /// to [`send`] with the [`SyncSender`], the [`send`] method will return a /// [`SendError`]. Similarly, If the [`SyncSender`] is disconnected while trying /// to [`recv`], the [`recv`] method will return a [`RecvError`]. /// /// [`send`]: SyncSender::send /// [`recv`]: Receiver::recv /// /// # Examples /// /// ``` /// use std::sync::mpsc::sync_channel; /// use std::thread; /// /// let (sender, receiver) = sync_channel(1); /// /// // this returns immediately /// sender.send(1).unwrap(); /// /// thread::spawn(move|| { /// // this will block until the previous message has been received /// sender.send(2).unwrap(); /// }); /// /// assert_eq!(receiver.recv().unwrap(), 1); /// assert_eq!(receiver.recv().unwrap(), 2); /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn sync_channel<T>(bound: usize) -> (SyncSender<T>, Receiver<T>) { let a = Arc::new(sync::Packet::new(bound)); (SyncSender::new(a.clone()), Receiver::new(Flavor::Sync(a))) } //////////////////////////////////////////////////////////////////////////////// // Sender //////////////////////////////////////////////////////////////////////////////// impl<T> Sender<T> { fn new(inner: Flavor<T>) -> Sender<T> { Sender { inner: UnsafeCell::new(inner) } } /// Attempts to send a value on this channel, returning it back if it could /// not be sent. /// /// A successful send occurs when it is determined that the other end of /// the channel has not hung up already. An unsuccessful send would be one /// where the corresponding receiver has already been deallocated. Note /// that a return value of [`Err`] means that the data will never be /// received, but a return value of [`Ok`] does *not* mean that the data /// will be received. It is possible for the corresponding receiver to /// hang up immediately after this function returns [`Ok`]. /// /// This method will never block the current thread. /// /// # Examples /// /// ``` /// use std::sync::mpsc::channel; /// /// let (tx, rx) = channel(); /// /// // This send is always successful /// tx.send(1).unwrap(); /// /// // This send will fail because the receiver is gone /// drop(rx); /// assert_eq!(tx.send(1).unwrap_err().0, 1); /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn send(&self, t: T) -> Result<(), SendError<T>> { let (new_inner, ret) = match *unsafe { self.inner() } { Flavor::Oneshot(ref p) => { if !p.sent() { return p.send(t).map_err(SendError); } else { let a = Arc::new(stream::Packet::new()); let rx = Receiver::new(Flavor::Stream(a.clone())); match p.upgrade(rx) { oneshot::UpSuccess => { let ret = a.send(t); (a, ret) } oneshot::UpDisconnected => (a, Err(t)), oneshot::UpWoke(token) => { // This send cannot panic because the thread is // asleep (we're looking at it), so the receiver // can't go away. a.send(t).ok().unwrap(); token.signal(); (a, Ok(())) } } } } Flavor::Stream(ref p) => return p.send(t).map_err(SendError), Flavor::Shared(ref p) => return p.send(t).map_err(SendError), Flavor::Sync(..) => unreachable!(), }; unsafe { let tmp = Sender::new(Flavor::Stream(new_inner)); mem::swap(self.inner_mut(), tmp.inner_mut()); } ret.map_err(SendError) } } #[stable(feature = "rust1", since = "1.0.0")] impl<T> Clone for Sender<T> { /// Clone a sender to send to other threads. /// /// Note, be aware of the lifetime of the sender because all senders /// (including the original) need to be dropped in order for /// [`Receiver::recv`] to stop blocking. fn clone(&self) -> Sender<T> { let packet = match *unsafe { self.inner() } { Flavor::Oneshot(ref p) => { let a = Arc::new(shared::Packet::new()); { let guard = a.postinit_lock(); let rx = Receiver::new(Flavor::Shared(a.clone())); let sleeper = match p.upgrade(rx) { oneshot::UpSuccess | oneshot::UpDisconnected => None, oneshot::UpWoke(task) => Some(task), }; a.inherit_blocker(sleeper, guard); } a } Flavor::Stream(ref p) => { let a = Arc::new(shared::Packet::new()); { let guard = a.postinit_lock(); let rx = Receiver::new(Flavor::Shared(a.clone())); let sleeper = match p.upgrade(rx) { stream::UpSuccess | stream::UpDisconnected => None, stream::UpWoke(task) => Some(task), }; a.inherit_blocker(sleeper, guard); } a } Flavor::Shared(ref p) => { p.clone_chan(); return Sender::new(Flavor::Shared(p.clone())); } Flavor::Sync(..) => unreachable!(), }; unsafe { let tmp = Sender::new(Flavor::Shared(packet.clone())); mem::swap(self.inner_mut(), tmp.inner_mut()); } Sender::new(Flavor::Shared(packet)) } } #[stable(feature = "rust1", since = "1.0.0")] impl<T> Drop for Sender<T> { fn drop(&mut self) { match *unsafe { self.inner() } { Flavor::Oneshot(ref p) => p.drop_chan(), Flavor::Stream(ref p) => p.drop_chan(), Flavor::Shared(ref p) => p.drop_chan(), Flavor::Sync(..) => unreachable!(), } } } #[stable(feature = "mpsc_debug", since = "1.8.0")] impl<T> fmt::Debug for Sender<T> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("Sender").finish() } } //////////////////////////////////////////////////////////////////////////////// // SyncSender //////////////////////////////////////////////////////////////////////////////// impl<T> SyncSender<T> { fn new(inner: Arc<sync::Packet<T>>) -> SyncSender<T> { SyncSender { inner } } /// Sends a value on this synchronous channel. /// /// This function will *block* until space in the internal buffer becomes /// available or a receiver is available to hand off the message to. /// /// Note that a successful send does *not* guarantee that the receiver will /// ever see the data if there is a buffer on this channel. Items may be /// enqueued in the internal buffer for the receiver to receive at a later /// time. If the buffer size is 0, however, the channel becomes a rendezvous /// channel and it guarantees that the receiver has indeed received /// the data if this function returns success. /// /// This function will never panic, but it may return [`Err`] if the /// [`Receiver`] has disconnected and is no longer able to receive /// information. /// /// # Examples /// /// ```rust /// use std::sync::mpsc::sync_channel; /// use std::thread; /// /// // Create a rendezvous sync_channel with buffer size 0 /// let (sync_sender, receiver) = sync_channel(0); /// /// thread::spawn(move || { /// println!("sending message..."); /// sync_sender.send(1).unwrap(); /// // Thread is now blocked until the message is received /// /// println!("...message received!"); /// }); /// /// let msg = receiver.recv().unwrap(); /// assert_eq!(1, msg); /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn send(&self, t: T) -> Result<(), SendError<T>> { self.inner.send(t).map_err(SendError) } /// Attempts to send a value on this channel without blocking. /// /// This method differs from [`send`] by returning immediately if the /// channel's buffer is full or no receiver is waiting to acquire some /// data. Compared with [`send`], this function has two failure cases /// instead of one (one for disconnection, one for a full buffer). /// /// See [`send`] for notes about guarantees of whether the /// receiver has received the data or not if this function is successful. /// /// [`send`]: Self::send /// /// # Examples /// /// ```rust /// use std::sync::mpsc::sync_channel; /// use std::thread; /// /// // Create a sync_channel with buffer size 1 /// let (sync_sender, receiver) = sync_channel(1); /// let sync_sender2 = sync_sender.clone(); /// /// // First thread owns sync_sender /// thread::spawn(move || { /// sync_sender.send(1).unwrap(); /// sync_sender.send(2).unwrap(); /// // Thread blocked /// }); /// /// // Second thread owns sync_sender2 /// thread::spawn(move || { /// // This will return an error and send /// // no message if the buffer is full /// let _ = sync_sender2.try_send(3); /// }); /// /// let mut msg; /// msg = receiver.recv().unwrap(); /// println!("message {} received", msg); /// /// msg = receiver.recv().unwrap(); /// println!("message {} received", msg); /// /// // Third message may have never been sent /// match receiver.try_recv() { /// Ok(msg) => println!("message {} received", msg), /// Err(_) => println!("the third message was never sent"), /// } /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn try_send(&self, t: T) -> Result<(), TrySendError<T>> { self.inner.try_send(t) } } #[stable(feature = "rust1", since = "1.0.0")] impl<T> Clone for SyncSender<T> { fn clone(&self) -> SyncSender<T> { self.inner.clone_chan(); SyncSender::new(self.inner.clone()) } } #[stable(feature = "rust1", since = "1.0.0")] impl<T> Drop for SyncSender<T> { fn drop(&mut self) { self.inner.drop_chan(); } } #[stable(feature = "mpsc_debug", since = "1.8.0")] impl<T> fmt::Debug for SyncSender<T> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("SyncSender").finish() } } //////////////////////////////////////////////////////////////////////////////// // Receiver //////////////////////////////////////////////////////////////////////////////// impl<T> Receiver<T> { fn new(inner: Flavor<T>) -> Receiver<T> { Receiver { inner: UnsafeCell::new(inner) } } /// Attempts to return a pending value on this receiver without blocking. /// /// This method will never block the caller in order to wait for data to /// become available. Instead, this will always return immediately with a /// possible option of pending data on the channel. /// /// This is useful for a flavor of "optimistic check" before deciding to /// block on a receiver. /// /// Compared with [`recv`], this function has two failure cases instead of one /// (one for disconnection, one for an empty buffer). /// /// [`recv`]: Self::recv /// /// # Examples /// /// ```rust /// use std::sync::mpsc::{Receiver, channel}; /// /// let (_, receiver): (_, Receiver<i32>) = channel(); /// /// assert!(receiver.try_recv().is_err()); /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn try_recv(&self) -> Result<T, TryRecvError> { loop { let new_port = match *unsafe { self.inner() } { Flavor::Oneshot(ref p) => match p.try_recv() { Ok(t) => return Ok(t), Err(oneshot::Empty) => return Err(TryRecvError::Empty), Err(oneshot::Disconnected) => return Err(TryRecvError::Disconnected), Err(oneshot::Upgraded(rx)) => rx, }, Flavor::Stream(ref p) => match p.try_recv() { Ok(t) => return Ok(t), Err(stream::Empty) => return Err(TryRecvError::Empty), Err(stream::Disconnected) => return Err(TryRecvError::Disconnected), Err(stream::Upgraded(rx)) => rx, }, Flavor::Shared(ref p) => match p.try_recv() { Ok(t) => return Ok(t), Err(shared::Empty) => return Err(TryRecvError::Empty), Err(shared::Disconnected) => return Err(TryRecvError::Disconnected), }, Flavor::Sync(ref p) => match p.try_recv() { Ok(t) => return Ok(t), Err(sync::Empty) => return Err(TryRecvError::Empty), Err(sync::Disconnected) => return Err(TryRecvError::Disconnected), }, }; unsafe { mem::swap(self.inner_mut(), new_port.inner_mut()); } } } /// Attempts to wait for a value on this receiver, returning an error if the /// corresponding channel has hung up. /// /// This function will always block the current thread if there is no data /// available and it's possible for more data to be sent (at least one sender /// still exists). Once a message is sent to the corresponding [`Sender`] /// (or [`SyncSender`]), this receiver will wake up and return that /// message. /// /// If the corresponding [`Sender`] has disconnected, or it disconnects while /// this call is blocking, this call will wake up and return [`Err`] to /// indicate that no more messages can ever be received on this channel. /// However, since channels are buffered, messages sent before the disconnect /// will still be properly received. /// /// # Examples /// /// ``` /// use std::sync::mpsc; /// use std::thread; /// /// let (send, recv) = mpsc::channel(); /// let handle = thread::spawn(move || { /// send.send(1u8).unwrap(); /// }); /// /// handle.join().unwrap(); /// /// assert_eq!(Ok(1), recv.recv()); /// ``` /// /// Buffering behavior: /// /// ``` /// use std::sync::mpsc; /// use std::thread; /// use std::sync::mpsc::RecvError; /// /// let (send, recv) = mpsc::channel(); /// let handle = thread::spawn(move || { /// send.send(1u8).unwrap(); /// send.send(2).unwrap(); /// send.send(3).unwrap(); /// drop(send); /// }); /// /// // wait for the thread to join so we ensure the sender is dropped /// handle.join().unwrap(); /// /// assert_eq!(Ok(1), recv.recv()); /// assert_eq!(Ok(2), recv.recv()); /// assert_eq!(Ok(3), recv.recv()); /// assert_eq!(Err(RecvError), recv.recv()); /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn recv(&self) -> Result<T, RecvError> { loop { let new_port = match *unsafe { self.inner() } { Flavor::Oneshot(ref p) => match p.recv(None) { Ok(t) => return Ok(t), Err(oneshot::Disconnected) => return Err(RecvError), Err(oneshot::Upgraded(rx)) => rx, Err(oneshot::Empty) => unreachable!(), }, Flavor::Stream(ref p) => match p.recv(None) { Ok(t) => return Ok(t), Err(stream::Disconnected) => return Err(RecvError), Err(stream::Upgraded(rx)) => rx, Err(stream::Empty) => unreachable!(), }, Flavor::Shared(ref p) => match p.recv(None) { Ok(t) => return Ok(t), Err(shared::Disconnected) => return Err(RecvError), Err(shared::Empty) => unreachable!(), }, Flavor::Sync(ref p) => return p.recv(None).map_err(|_| RecvError), }; unsafe { mem::swap(self.inner_mut(), new_port.inner_mut()); } } } /// Attempts to wait for a value on this receiver, returning an error if the /// corresponding channel has hung up, or if it waits more than `timeout`. /// /// This function will always block the current thread if there is no data /// available and it's possible for more data to be sent (at least one sender /// still exists). Once a message is sent to the corresponding [`Sender`] /// (or [`SyncSender`]), this receiver will wake up and return that /// message. /// /// If the corresponding [`Sender`] has disconnected, or it disconnects while /// this call is blocking, this call will wake up and return [`Err`] to /// indicate that no more messages can ever be received on this channel. /// However, since channels are buffered, messages sent before the disconnect /// will still be properly received. /// /// # Known Issues /// /// There is currently a known issue (see [`#39364`]) that causes `recv_timeout` /// to panic unexpectedly with the following example: /// /// ```no_run /// use std::sync::mpsc::channel; /// use std::thread; /// use std::time::Duration; /// /// let (tx, rx) = channel::<String>(); /// /// thread::spawn(move || { /// let d = Duration::from_millis(10); /// loop { /// println!("recv"); /// let _r = rx.recv_timeout(d); /// } /// }); /// /// thread::sleep(Duration::from_millis(100)); /// let _c1 = tx.clone(); /// /// thread::sleep(Duration::from_secs(1)); /// ``` /// /// [`#39364`]: https://github.com/rust-lang/rust/issues/39364 /// /// # Examples /// /// Successfully receiving value before encountering timeout: /// /// ```no_run /// use std::thread; /// use std::time::Duration; /// use std::sync::mpsc; /// /// let (send, recv) = mpsc::channel(); /// /// thread::spawn(move || { /// send.send('a').unwrap(); /// }); /// /// assert_eq!( /// recv.recv_timeout(Duration::from_millis(400)), /// Ok('a') /// ); /// ``` /// /// Receiving an error upon reaching timeout: /// /// ```no_run /// use std::thread; /// use std::time::Duration; /// use std::sync::mpsc; /// /// let (send, recv) = mpsc::channel(); /// /// thread::spawn(move || { /// thread::sleep(Duration::from_millis(800)); /// send.send('a').unwrap(); /// }); /// /// assert_eq!( /// recv.recv_timeout(Duration::from_millis(400)), /// Err(mpsc::RecvTimeoutError::Timeout) /// ); /// ``` #[stable(feature = "mpsc_recv_timeout", since = "1.12.0")] pub fn recv_timeout(&self, timeout: Duration) -> Result<T, RecvTimeoutError> { // Do an optimistic try_recv to avoid the performance impact of // Instant::now() in the full-channel case. match self.try_recv() { Ok(result) => Ok(result), Err(TryRecvError::Disconnected) => Err(RecvTimeoutError::Disconnected), Err(TryRecvError::Empty) => match Instant::now().checked_add(timeout) { Some(deadline) => self.recv_deadline(deadline), // So far in the future that it's practically the same as waiting indefinitely. None => self.recv().map_err(RecvTimeoutError::from), }, } } /// Attempts to wait for a value on this receiver, returning an error if the /// corresponding channel has hung up, or if `deadline` is reached. /// /// This function will always block the current thread if there is no data /// available and it's possible for more data to be sent. Once a message is /// sent to the corresponding [`Sender`] (or [`SyncSender`]), then this /// receiver will wake up and return that message. /// /// If the corresponding [`Sender`] has disconnected, or it disconnects while /// this call is blocking, this call will wake up and return [`Err`] to /// indicate that no more messages can ever be received on this channel. /// However, since channels are buffered, messages sent before the disconnect /// will still be properly received. /// /// # Examples /// /// Successfully receiving value before reaching deadline: /// /// ```no_run /// #![feature(deadline_api)] /// use std::thread; /// use std::time::{Duration, Instant}; /// use std::sync::mpsc; /// /// let (send, recv) = mpsc::channel(); /// /// thread::spawn(move || { /// send.send('a').unwrap(); /// }); /// /// assert_eq!( /// recv.recv_deadline(Instant::now() + Duration::from_millis(400)), /// Ok('a') /// ); /// ``` /// /// Receiving an error upon reaching deadline: /// /// ```no_run /// #![feature(deadline_api)] /// use std::thread; /// use std::time::{Duration, Instant}; /// use std::sync::mpsc; /// /// let (send, recv) = mpsc::channel(); /// /// thread::spawn(move || { /// thread::sleep(Duration::from_millis(800)); /// send.send('a').unwrap(); /// }); /// /// assert_eq!( /// recv.recv_deadline(Instant::now() + Duration::from_millis(400)), /// Err(mpsc::RecvTimeoutError::Timeout) /// ); /// ``` #[unstable(feature = "deadline_api", issue = "46316")] pub fn recv_deadline(&self, deadline: Instant) -> Result<T, RecvTimeoutError> { use self::RecvTimeoutError::*; loop { let port_or_empty = match *unsafe { self.inner() } { Flavor::Oneshot(ref p) => match p.recv(Some(deadline)) { Ok(t) => return Ok(t), Err(oneshot::Disconnected) => return Err(Disconnected), Err(oneshot::Upgraded(rx)) => Some(rx), Err(oneshot::Empty) => None, }, Flavor::Stream(ref p) => match p.recv(Some(deadline)) { Ok(t) => return Ok(t), Err(stream::Disconnected) => return Err(Disconnected), Err(stream::Upgraded(rx)) => Some(rx), Err(stream::Empty) => None, }, Flavor::Shared(ref p) => match p.recv(Some(deadline)) { Ok(t) => return Ok(t), Err(shared::Disconnected) => return Err(Disconnected), Err(shared::Empty) => None, }, Flavor::Sync(ref p) => match p.recv(Some(deadline)) { Ok(t) => return Ok(t), Err(sync::Disconnected) => return Err(Disconnected), Err(sync::Empty) => None, }, }; if let Some(new_port) = port_or_empty { unsafe { mem::swap(self.inner_mut(), new_port.inner_mut()); } } // If we're already passed the deadline, and we're here without // data, return a timeout, else try again. if Instant::now() >= deadline { return Err(Timeout); } } } /// Returns an iterator that will block waiting for messages, but never /// [`panic!`]. It will return [`None`] when the channel has hung up. /// /// # Examples /// /// ```rust /// use std::sync::mpsc::channel; /// use std::thread; /// /// let (send, recv) = channel(); /// /// thread::spawn(move || { /// send.send(1).unwrap(); /// send.send(2).unwrap(); /// send.send(3).unwrap(); /// }); /// /// let mut iter = recv.iter(); /// assert_eq!(iter.next(), Some(1)); /// assert_eq!(iter.next(), Some(2)); /// assert_eq!(iter.next(), Some(3)); /// assert_eq!(iter.next(), None); /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn iter(&self) -> Iter<'_, T> { Iter { rx: self } } /// Returns an iterator that will attempt to yield all pending values. /// It will return `None` if there are no more pending values or if the /// channel has hung up. The iterator will never [`panic!`] or block the /// user by waiting for values. /// /// # Examples /// /// ```no_run /// use std::sync::mpsc::channel; /// use std::thread; /// use std::time::Duration; /// /// let (sender, receiver) = channel(); /// /// // nothing is in the buffer yet /// assert!(receiver.try_iter().next().is_none()); /// /// thread::spawn(move || { /// thread::sleep(Duration::from_secs(1)); /// sender.send(1).unwrap(); /// sender.send(2).unwrap(); /// sender.send(3).unwrap(); /// }); /// /// // nothing is in the buffer yet /// assert!(receiver.try_iter().next().is_none()); /// /// // block for two seconds /// thread::sleep(Duration::from_secs(2)); /// /// let mut iter = receiver.try_iter(); /// assert_eq!(iter.next(), Some(1)); /// assert_eq!(iter.next(), Some(2)); /// assert_eq!(iter.next(), Some(3)); /// assert_eq!(iter.next(), None); /// ``` #[stable(feature = "receiver_try_iter", since = "1.15.0")] pub fn try_iter(&self) -> TryIter<'_, T> { TryIter { rx: self } } } #[stable(feature = "rust1", since = "1.0.0")] impl<'a, T> Iterator for Iter<'a, T> { type Item = T; fn next(&mut self) -> Option<T> { self.rx.recv().ok() } } #[stable(feature = "receiver_try_iter", since = "1.15.0")] impl<'a, T> Iterator for TryIter<'a, T> { type Item = T; fn next(&mut self) -> Option<T> { self.rx.try_recv().ok() } } #[stable(feature = "receiver_into_iter", since = "1.1.0")] impl<'a, T> IntoIterator for &'a Receiver<T> { type Item = T; type IntoIter = Iter<'a, T>; fn into_iter(self) -> Iter<'a, T> { self.iter() } } #[stable(feature = "receiver_into_iter", since = "1.1.0")] impl<T> Iterator for IntoIter<T> { type Item = T; fn next(&mut self) -> Option<T> { self.rx.recv().ok() } } #[stable(feature = "receiver_into_iter", since = "1.1.0")] impl<T> IntoIterator for Receiver<T> { type Item = T; type IntoIter = IntoIter<T>; fn into_iter(self) -> IntoIter<T> { IntoIter { rx: self } } } #[stable(feature = "rust1", since = "1.0.0")] impl<T> Drop for Receiver<T> { fn drop(&mut self) { match *unsafe { self.inner() } { Flavor::Oneshot(ref p) => p.drop_port(), Flavor::Stream(ref p) => p.drop_port(), Flavor::Shared(ref p) => p.drop_port(), Flavor::Sync(ref p) => p.drop_port(), } } } #[stable(feature = "mpsc_debug", since = "1.8.0")] impl<T> fmt::Debug for Receiver<T> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("Receiver").finish() } } #[stable(feature = "rust1", since = "1.0.0")] impl<T> fmt::Debug for SendError<T> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { "SendError(..)".fmt(f) } } #[stable(feature = "rust1", since = "1.0.0")] impl<T> fmt::Display for SendError<T> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { "sending on a closed channel".fmt(f) } } #[stable(feature = "rust1", since = "1.0.0")] impl<T: Send> error::Error for SendError<T> { #[allow(deprecated)] fn description(&self) -> &str { "sending on a closed channel" } } #[stable(feature = "rust1", since = "1.0.0")] impl<T> fmt::Debug for TrySendError<T> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match *self { TrySendError::Full(..) => "Full(..)".fmt(f), TrySendError::Disconnected(..) => "Disconnected(..)".fmt(f), } } } #[stable(feature = "rust1", since = "1.0.0")] impl<T> fmt::Display for TrySendError<T> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match *self { TrySendError::Full(..) => "sending on a full channel".fmt(f), TrySendError::Disconnected(..) => "sending on a closed channel".fmt(f), } } } #[stable(feature = "rust1", since = "1.0.0")] impl<T: Send> error::Error for TrySendError<T> { #[allow(deprecated)] fn description(&self) -> &str { match *self { TrySendError::Full(..) => "sending on a full channel", TrySendError::Disconnected(..) => "sending on a closed channel", } } } #[stable(feature = "mpsc_error_conversions", since = "1.24.0")] impl<T> From<SendError<T>> for TrySendError<T> { /// Converts a `SendError<T>` into a `TrySendError<T>`. /// /// This conversion always returns a `TrySendError::Disconnected` containing the data in the `SendError<T>`. /// /// No data is allocated on the heap. fn from(err: SendError<T>) -> TrySendError<T> { match err { SendError(t) => TrySendError::Disconnected(t), } } } #[stable(feature = "rust1", since = "1.0.0")] impl fmt::Display for RecvError { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { "receiving on a closed channel".fmt(f) } } #[stable(feature = "rust1", since = "1.0.0")] impl error::Error for RecvError { #[allow(deprecated)] fn description(&self) -> &str { "receiving on a closed channel" } } #[stable(feature = "rust1", since = "1.0.0")] impl fmt::Display for TryRecvError { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match *self { TryRecvError::Empty => "receiving on an empty channel".fmt(f), TryRecvError::Disconnected => "receiving on a closed channel".fmt(f), } } } #[stable(feature = "rust1", since = "1.0.0")] impl error::Error for TryRecvError { #[allow(deprecated)] fn description(&self) -> &str { match *self { TryRecvError::Empty => "receiving on an empty channel", TryRecvError::Disconnected => "receiving on a closed channel", } } } #[stable(feature = "mpsc_error_conversions", since = "1.24.0")] impl From<RecvError> for TryRecvError { /// Converts a `RecvError` into a `TryRecvError`. /// /// This conversion always returns `TryRecvError::Disconnected`. /// /// No data is allocated on the heap. fn from(err: RecvError) -> TryRecvError { match err { RecvError => TryRecvError::Disconnected, } } } #[stable(feature = "mpsc_recv_timeout_error", since = "1.15.0")] impl fmt::Display for RecvTimeoutError { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match *self { RecvTimeoutError::Timeout => "timed out waiting on channel".fmt(f), RecvTimeoutError::Disconnected => "channel is empty and sending half is closed".fmt(f), } } } #[stable(feature = "mpsc_recv_timeout_error", since = "1.15.0")] impl error::Error for RecvTimeoutError { #[allow(deprecated)] fn description(&self) -> &str { match *self { RecvTimeoutError::Timeout => "timed out waiting on channel", RecvTimeoutError::Disconnected => "channel is empty and sending half is closed", } } } #[stable(feature = "mpsc_error_conversions", since = "1.24.0")] impl From<RecvError> for RecvTimeoutError { /// Converts a `RecvError` into a `RecvTimeoutError`. /// /// This conversion always returns `RecvTimeoutError::Disconnected`. /// /// No data is allocated on the heap. fn from(err: RecvError) -> RecvTimeoutError { match err { RecvError => RecvTimeoutError::Disconnected, } } }
use {Action, InputTranslator, InputRebind, MouseTranslationData, to_act_bt_hashmap}; use input::Button; use window::Size; use std::convert::Into; use std::default::Default; /// Convenience object for constructing an InputMap. pub struct RebindBuilder<A: Action> { input_remappings: Vec<(Button, A)>, mouse_data: MouseTranslationData } impl<A: Action> RebindBuilder<A> { /// Creates a new `RebindBuilder` with the specified viewport size. pub fn new<S: Into<Size> + Sized>(size: S) -> Self { RebindBuilder { input_remappings: vec![], mouse_data: MouseTranslationData::new(size) } } /// Set whether the x scroll is inverted on the builder (and thus /// on the built object). pub fn x_scroll_inverted(mut self, invert: bool) -> Self { self.mouse_data.x_axis_scroll_inverted = invert; self } /// Returns true if the x scroll is inverted on the builder (and thus /// on the built object). pub fn get_x_scroll_inverted(&self) -> &bool { &self.mouse_data.x_axis_scroll_inverted } /// Set whether the y scroll is inverted on the builder (and thus /// on the built object). pub fn y_scroll_inverted(mut self, invert: bool) -> Self { self.mouse_data.y_axis_scroll_inverted = invert; self } /// Returns true if the y scroll is inverted on the builder (and thus /// on the built object). pub fn get_y_scroll_inverted(&self) -> &bool { &self.mouse_data.y_axis_scroll_inverted } /// Set whether the x axis motion is inverted on the builder (and thus /// on the built object). pub fn x_motion_inverted(mut self, invert: bool) -> Self { self.mouse_data.x_axis_motion_inverted = invert; self } /// Returns true if the x axis motion is inverted on the builder (and thus /// on the built object). pub fn get_x_motion_inverted(&self) -> &bool { &self.mouse_data.x_axis_motion_inverted } /// Set whether the y axis motion is inverted on the builder (and thus /// on the built object). pub fn y_motion_inverted(mut self, invert: bool) -> Self { self.mouse_data.y_axis_motion_inverted = invert; self } /// Returns true if the y axis motion is inverted on the builder (and thus /// on the built object). pub fn get_y_motion_inverted(&self) -> &bool { &self.mouse_data.y_axis_motion_inverted } /// Set the mouse sensitivity pub fn mouse_sensitivity(mut self, sensitivity: f64) -> Self { self.mouse_data.sensitivity = sensitivity; self } /// Returns the mouse sensitivity pub fn get_mouse_sensitivity(&self) -> &f64 { &self.mouse_data.sensitivity } /// Sets the viewport size used for mouse position calculations. pub fn viewport_size(mut self, size: Size) -> Self { self.mouse_data.viewport_size = size; self } /// Returns the currently set viewport size. pub fn get_viewport_size(&self) -> &Size { &self.mouse_data.viewport_size } /// Add an association between the Button and Action in the built object. pub fn with_action_mapping(mut self, button: Button, action: A) -> Self { self.input_remappings.push((button, action)); self } /// Creates an `InputTranslator` from this builder object. Consumes self. pub fn build_translator(self) -> InputTranslator<A> { self.into() } /// Creates an `InputRebind` from this builder object. Consumes self. pub fn build_rebind(self) -> InputRebind<A> { self.into() } } /// Creates a new `RebindBuilder`. The viewport size is set to (800, 600). impl<A: Action> Default for RebindBuilder<A> { fn default() -> Self { Self::new((800, 600)) } } impl<A: Action> Into<InputTranslator<A>> for RebindBuilder<A> { fn into(self) -> InputTranslator<A> { let mut input_map = InputTranslator::new(self.mouse_data.viewport_size); input_map.mouse_translator.data = self.mouse_data; input_map.keymap = self.input_remappings.iter().cloned().collect(); input_map } } impl<A: Action> Into<InputRebind<A>> for RebindBuilder<A> { fn into(self) -> InputRebind<A> { let mut input_rebind = InputRebind::new(self.mouse_data.viewport_size); input_rebind.mouse_data = self.mouse_data; input_rebind.keymap = to_act_bt_hashmap(self.input_remappings.iter().cloned()); input_rebind } } Remove trailing newlines use {Action, InputTranslator, InputRebind, MouseTranslationData, to_act_bt_hashmap}; use input::Button; use window::Size; use std::convert::Into; use std::default::Default; /// Convenience object for constructing an InputMap. pub struct RebindBuilder<A: Action> { input_remappings: Vec<(Button, A)>, mouse_data: MouseTranslationData } impl<A: Action> RebindBuilder<A> { /// Creates a new `RebindBuilder` with the specified viewport size. pub fn new<S: Into<Size> + Sized>(size: S) -> Self { RebindBuilder { input_remappings: vec![], mouse_data: MouseTranslationData::new(size) } } /// Set whether the x scroll is inverted on the builder (and thus /// on the built object). pub fn x_scroll_inverted(mut self, invert: bool) -> Self { self.mouse_data.x_axis_scroll_inverted = invert; self } /// Returns true if the x scroll is inverted on the builder (and thus /// on the built object). pub fn get_x_scroll_inverted(&self) -> &bool { &self.mouse_data.x_axis_scroll_inverted } /// Set whether the y scroll is inverted on the builder (and thus /// on the built object). pub fn y_scroll_inverted(mut self, invert: bool) -> Self { self.mouse_data.y_axis_scroll_inverted = invert; self } /// Returns true if the y scroll is inverted on the builder (and thus /// on the built object). pub fn get_y_scroll_inverted(&self) -> &bool { &self.mouse_data.y_axis_scroll_inverted } /// Set whether the x axis motion is inverted on the builder (and thus /// on the built object). pub fn x_motion_inverted(mut self, invert: bool) -> Self { self.mouse_data.x_axis_motion_inverted = invert; self } /// Returns true if the x axis motion is inverted on the builder (and thus /// on the built object). pub fn get_x_motion_inverted(&self) -> &bool { &self.mouse_data.x_axis_motion_inverted } /// Set whether the y axis motion is inverted on the builder (and thus /// on the built object). pub fn y_motion_inverted(mut self, invert: bool) -> Self { self.mouse_data.y_axis_motion_inverted = invert; self } /// Returns true if the y axis motion is inverted on the builder (and thus /// on the built object). pub fn get_y_motion_inverted(&self) -> &bool { &self.mouse_data.y_axis_motion_inverted } /// Set the mouse sensitivity pub fn mouse_sensitivity(mut self, sensitivity: f64) -> Self { self.mouse_data.sensitivity = sensitivity; self } /// Returns the mouse sensitivity pub fn get_mouse_sensitivity(&self) -> &f64 { &self.mouse_data.sensitivity } /// Sets the viewport size used for mouse position calculations. pub fn viewport_size(mut self, size: Size) -> Self { self.mouse_data.viewport_size = size; self } /// Returns the currently set viewport size. pub fn get_viewport_size(&self) -> &Size { &self.mouse_data.viewport_size } /// Add an association between the Button and Action in the built object. pub fn with_action_mapping(mut self, button: Button, action: A) -> Self { self.input_remappings.push((button, action)); self } /// Creates an `InputTranslator` from this builder object. Consumes self. pub fn build_translator(self) -> InputTranslator<A> { self.into() } /// Creates an `InputRebind` from this builder object. Consumes self. pub fn build_rebind(self) -> InputRebind<A> { self.into() } } /// Creates a new `RebindBuilder`. The viewport size is set to (800, 600). impl<A: Action> Default for RebindBuilder<A> { fn default() -> Self { Self::new((800, 600)) } } impl<A: Action> Into<InputTranslator<A>> for RebindBuilder<A> { fn into(self) -> InputTranslator<A> { let mut input_map = InputTranslator::new(self.mouse_data.viewport_size); input_map.mouse_translator.data = self.mouse_data; input_map.keymap = self.input_remappings.iter().cloned().collect(); input_map } } impl<A: Action> Into<InputRebind<A>> for RebindBuilder<A> { fn into(self) -> InputRebind<A> { let mut input_rebind = InputRebind::new(self.mouse_data.viewport_size); input_rebind.mouse_data = self.mouse_data; input_rebind.keymap = to_act_bt_hashmap(self.input_remappings.iter().cloned()); input_rebind } }
use std::collections::HashMap; use std::boxed::Box; use std::io::{Read, Write}; use mio::{self, Token, EventLoop, EventSet, PollOpt, Sender}; use mio::tcp::{TcpListener, TcpStream}; use protobuf::RepeatedField; use kvproto::kvrpcpb::{CmdGetResponse, CmdScanResponse, CmdPrewriteResponse, CmdCommitResponse, CmdCleanupResponse, CmdRollbackThenGetResponse, CmdCommitThenGetResponse, Request, Response, MessageType, Item, ResultType, ResultType_Type, LockInfo, Operator}; use storage::{Storage, Key, Value, KvPair, KvOpt, Mutation, MaybeLocked, MaybeComitted, MaybeRolledback, Callback}; use storage::Result as ResultStorage; use storage::Error as StorageError; use storage::EngineError; use super::conn::Conn; use super::{Result, ServerError}; // Token(1) instead of Token(0) // See here: https://github.com/hjr3/mob/blob/multi-echo-blog-post/src/main.rs#L115 pub const SERVER_TOKEN: Token = Token(1); const FIRST_CUSTOM_TOKEN: Token = Token(1024); pub struct Server { pub listener: TcpListener, pub conns: HashMap<Token, Conn>, pub token_counter: usize, store: Storage, } impl Server { pub fn new(listener: TcpListener, conns: HashMap<Token, Conn>, store: Storage) -> Server { Server { listener: listener, conns: conns, token_counter: FIRST_CUSTOM_TOKEN.as_usize(), store: store, } } pub fn handle_get(&mut self, msg: &mut Request, msg_id: u64, token: Token, event_loop: &mut EventLoop<Server>) -> Result<()> { if !msg.has_cmd_get_req() { format_err!("Msg doesn't contain a CmdGetRequest"); } let mut cmd_get_req = msg.take_cmd_get_req(); let mut key_address = cmd_get_req.take_key_address(); let key = key_address.take_key(); let opt = KvOpt::new(key_address.get_region_id(), key_address.take_peer()); let sender = event_loop.channel(); let cb = Server::make_cb::<Option<Value>>(Server::cmd_get_done, sender, token, msg_id); self.store .async_get(Key::new(key), cmd_get_req.get_version(), opt, cb) .map_err(ServerError::Storage) } pub fn handle_scan(&mut self, msg: &mut Request, msg_id: u64, token: Token, event_loop: &mut EventLoop<Server>) -> Result<()> { if !msg.has_cmd_scan_req() { format_err!("Msg doesn't contain a CmdScanRequest"); } let mut cmd_scan_req = msg.take_cmd_scan_req(); let sender = event_loop.channel(); let mut start_key_addresss = cmd_scan_req.take_key_address(); let start_key = start_key_addresss.take_key(); let opt = KvOpt::new(start_key_addresss.get_region_id(), start_key_addresss.take_peer()); debug!("start_key [{:?}]", start_key); let cb = Server::make_cb::<Vec<ResultStorage<KvPair>>>(Server::cmd_scan_done, sender, token, msg_id); self.store .async_scan(Key::new(start_key), cmd_scan_req.get_limit() as usize, cmd_scan_req.get_version(), opt, cb) .map_err(ServerError::Storage) } pub fn handle_prewrite(&mut self, msg: &mut Request, msg_id: u64, token: Token, event_loop: &mut EventLoop<Server>) -> Result<()> { if !msg.has_cmd_prewrite_req() { format_err!("Msg doesn't contain a CmdPrewriteRequest"); } let mut cmd_prewrite_req = msg.take_cmd_prewrite_req(); let sender = event_loop.channel(); let mutations = cmd_prewrite_req.take_mutations() .into_iter() .map(|mut x| { match x.get_op() { Operator::OpPut => { Mutation::Put((Key::new(x.take_key()), x.take_value())) } Operator::OpDel => { Mutation::Delete(Key::new(x.take_key())) } Operator::OpLock => { Mutation::Lock(Key::new(x.take_key())) } } }) .collect(); let opt = { let mut key_address = cmd_prewrite_req.take_key_address(); KvOpt::new(key_address.get_region_id(), key_address.take_peer()) }; let cb = Server::make_cb::<Vec<ResultStorage<()>>>(Server::cmd_prewrite_done, sender, token, msg_id); self.store .async_prewrite(mutations, cmd_prewrite_req.get_primary_lock().to_vec(), cmd_prewrite_req.get_start_version(), opt, cb) .map_err(ServerError::Storage) } pub fn handle_commit(&mut self, msg: &mut Request, msg_id: u64, token: Token, event_loop: &mut EventLoop<Server>) -> Result<()> { if !msg.has_cmd_commit_req() { format_err!("Msg doesn't contain a CmdCommitRequest"); } let mut cmd_commit_req = msg.take_cmd_commit_req(); let sender = event_loop.channel(); let cb = Server::make_cb::<()>(Server::cmd_commit_done, sender, token, msg_id); let opt = { let mut first = cmd_commit_req.get_keys_address()[0].clone(); KvOpt::new(first.get_region_id(), first.take_peer()) }; let keys = cmd_commit_req.take_keys_address() .into_iter() .map(|mut x| Key::new(x.take_key())) .collect(); self.store .async_commit(keys, cmd_commit_req.get_start_version(), cmd_commit_req.get_commit_version(), opt, cb) .map_err(ServerError::Storage) } pub fn handle_cleanup(&mut self, msg: &mut Request, msg_id: u64, token: Token, event_loop: &mut EventLoop<Server>) -> Result<()> { if !msg.has_cmd_cleanup_req() { format_err!("Msg doesn't contain a CmdCleanupRequest"); } let mut cmd_cleanup_req = msg.take_cmd_cleanup_req(); let sender = event_loop.channel(); let cb = Server::make_cb::<()>(Server::cmd_cleanup_done, sender, token, msg_id); let mut key_address = cmd_cleanup_req.take_key_address(); let key = key_address.take_key(); let opt = KvOpt::new(key_address.get_region_id(), key_address.take_peer()); self.store .async_cleanup(Key::new(key), cmd_cleanup_req.get_start_version(), opt, cb) .map_err(ServerError::Storage) } pub fn handle_commit_then_get(&mut self, msg: &mut Request, msg_id: u64, token: Token, event_loop: &mut EventLoop<Server>) -> Result<()> { if !msg.has_cmd_commit_get_req() { format_err!("Msg doesn't contain a CmdCommitThenGetRequest"); } let mut cmd_commit_get_req = msg.take_cmd_commit_get_req(); let sender = event_loop.channel(); let cb = Server::make_cb::<Option<Value>>(Server::cmd_commit_get_done, sender, token, msg_id); let mut key_address = cmd_commit_get_req.take_key_address(); let key = key_address.take_key(); let opt = KvOpt::new(key_address.get_region_id(), key_address.take_peer()); self.store .async_commit_then_get(Key::new(key), cmd_commit_get_req.get_lock_version(), cmd_commit_get_req.get_commit_version(), cmd_commit_get_req.get_get_version(), opt, cb) .map_err(ServerError::Storage) } pub fn handle_rollback_then_get(&mut self, msg: &mut Request, msg_id: u64, token: Token, event_loop: &mut EventLoop<Server>) -> Result<()> { if !msg.has_cmd_rb_get_req() { format_err!("Msg doesn't contain a CmdRollbackThenGetRequest"); } let mut cmd_rollback_get_req = msg.take_cmd_rb_get_req(); let sender = event_loop.channel(); let cb = Server::make_cb::<Option<Value>>(Server::cmd_rollback_get_done, sender, token, msg_id); let mut key_address = cmd_rollback_get_req.take_key_address(); let key = key_address.take_key(); let opt = KvOpt::new(key_address.get_region_id(), key_address.take_peer()); self.store .async_rollback_then_get(Key::new(key), cmd_rollback_get_req.get_lock_version(), opt, cb) .map_err(ServerError::Storage) } fn cmd_get_done(r: ResultStorage<Option<Value>>) -> Response { let mut resp: Response = Response::new(); let mut cmd_get_resp: CmdGetResponse = CmdGetResponse::new(); let mut res_type: ResultType = ResultType::new(); match r { Ok(opt) => { res_type.set_field_type(ResultType_Type::Ok); match opt { Some(val) => cmd_get_resp.set_value(val), None => cmd_get_resp.set_value(Vec::new()), } } Err(ref e) => { if let StorageError::Engine(EngineError::Request(ref err)) = *e { if err.has_not_leader() { res_type.set_field_type(ResultType_Type::NotLeader); res_type.set_leader_info(err.get_not_leader().to_owned()); } else { error!("{:?}", err); res_type.set_field_type(ResultType_Type::Other); res_type.set_msg(format!("engine error: {:?}", err)); } } else if r.is_locked() { if let Some((_, primary, ts)) = r.get_lock() { res_type.set_field_type(ResultType_Type::Locked); let mut lock_info = LockInfo::new(); lock_info.set_primary_lock(primary); lock_info.set_lock_version(ts); res_type.set_lock_info(lock_info); } else { let err_str = "key is locked but primary info not found".to_owned(); error!("{}", err_str); res_type.set_field_type(ResultType_Type::Other); res_type.set_msg(err_str); } } else { let err_str = format!("storage error: {:?}", e); error!("{}", err_str); res_type.set_field_type(ResultType_Type::Retryable); res_type.set_msg(err_str); } } } cmd_get_resp.set_res_type(res_type); resp.set_field_type(MessageType::CmdGet); resp.set_cmd_get_resp(cmd_get_resp); resp } fn cmd_scan_done(kvs: ResultStorage<Vec<ResultStorage<KvPair>>>) -> Response { let mut resp: Response = Response::new(); let mut cmd_scan_resp: CmdScanResponse = CmdScanResponse::new(); cmd_scan_resp.set_ok(kvs.is_ok()); match kvs { Ok(kvs) => { // convert storage::KvPair to kvrpcpb::Item let mut new_kvs: Vec<Item> = Vec::new(); for result in kvs { let mut new_kv: Item = Item::new(); let mut res_type: ResultType = ResultType::new(); match result { Ok((ref key, ref value)) => { res_type.set_field_type(ResultType_Type::Ok); new_kv.set_key(key.clone()); new_kv.set_value(value.clone()); } Err(..) => { if result.is_locked() { if let Some((key, primary, ts)) = result.get_lock() { res_type.set_field_type(ResultType_Type::Locked); let mut lock_info = LockInfo::new(); lock_info.set_primary_lock(primary); lock_info.set_lock_version(ts); res_type.set_lock_info(lock_info); new_kv.set_key(key); } } else { res_type.set_field_type(ResultType_Type::Retryable); } } } new_kv.set_res_type(res_type); new_kvs.push(new_kv); } cmd_scan_resp.set_results(RepeatedField::from_vec(new_kvs)); } Err(e) => { error!("storage error: {:?}", e); } } resp.set_field_type(MessageType::CmdScan); resp.set_cmd_scan_resp(cmd_scan_resp); resp } fn cmd_prewrite_done(results: ResultStorage<Vec<ResultStorage<()>>>) -> Response { let mut resp: Response = Response::new(); let mut cmd_prewrite_resp: CmdPrewriteResponse = CmdPrewriteResponse::new(); cmd_prewrite_resp.set_ok(results.is_ok()); let mut items: Vec<Item> = Vec::new(); match results { Ok(results) => { for result in results { let mut item = Item::new(); let mut res_type: ResultType = ResultType::new(); if result.is_ok() { res_type.set_field_type(ResultType_Type::Ok); } else if let Some((key, primary, ts)) = result.get_lock() { // Actually items only contain locked item, so `ok` is always false. res_type.set_field_type(ResultType_Type::Locked); let mut lock_info = LockInfo::new(); lock_info.set_primary_lock(primary); lock_info.set_lock_version(ts); res_type.set_lock_info(lock_info); item.set_key(key); } else { res_type.set_field_type(ResultType_Type::Retryable); } item.set_res_type(res_type); items.push(item); } } Err(e) => { error!("storage error: {:?}", e); } } cmd_prewrite_resp.set_results(RepeatedField::from_vec(items)); resp.set_field_type(MessageType::CmdPrewrite); resp.set_cmd_prewrite_resp(cmd_prewrite_resp); resp } fn cmd_commit_done(r: ResultStorage<()>) -> Response { let mut resp: Response = Response::new(); let mut cmd_commit_resp: CmdCommitResponse = CmdCommitResponse::new(); cmd_commit_resp.set_ok(r.is_ok()); resp.set_field_type(MessageType::CmdCommit); resp.set_cmd_commit_resp(cmd_commit_resp); resp } fn cmd_cleanup_done(r: ResultStorage<()>) -> Response { let mut resp: Response = Response::new(); let mut cmd_cleanup_resp: CmdCleanupResponse = CmdCleanupResponse::new(); let mut res_type: ResultType = ResultType::new(); if r.is_ok() { res_type.set_field_type(ResultType_Type::Ok); } else if r.is_committed() { res_type.set_field_type(ResultType_Type::Committed); if let Some(commit_ts) = r.get_commit() { cmd_cleanup_resp.set_commit_version(commit_ts); } else { warn!("commit_ts not found when is_committed."); } } else if r.is_rolledback() { res_type.set_field_type(ResultType_Type::Rolledback); } else { warn!("Cleanup other error {:?}", r.err()); res_type.set_field_type(ResultType_Type::Retryable); } cmd_cleanup_resp.set_res_type(res_type); resp.set_field_type(MessageType::CmdCleanup); resp.set_cmd_cleanup_resp(cmd_cleanup_resp); resp } fn cmd_commit_get_done(r: ResultStorage<Option<Value>>) -> Response { let mut resp: Response = Response::new(); let mut cmd_commit_get_resp: CmdCommitThenGetResponse = CmdCommitThenGetResponse::new(); cmd_commit_get_resp.set_ok(r.is_ok()); if let Ok(Some(val)) = r { cmd_commit_get_resp.set_value(val); } resp.set_field_type(MessageType::CmdCommitThenGet); resp.set_cmd_commit_get_resp(cmd_commit_get_resp); resp } fn cmd_rollback_get_done(r: ResultStorage<Option<Value>>) -> Response { let mut resp: Response = Response::new(); let mut cmd_rollback_get_resp: CmdRollbackThenGetResponse = CmdRollbackThenGetResponse::new(); cmd_rollback_get_resp.set_ok(r.is_ok()); if let Err(ref e) = r { error!("rb & get error: {}", e); } if let Ok(Some(val)) = r { cmd_rollback_get_resp.set_value(val); } resp.set_field_type(MessageType::CmdRollbackThenGet); resp.set_cmd_rb_get_resp(cmd_rollback_get_resp); resp } fn make_cb<T: 'static>(f: fn(ResultStorage<T>) -> Response, sender: Sender<QueueMessage>, token: Token, msg_id: u64) -> Callback<T> { Box::new(move |r: ResultStorage<T>| { let resp: Response = f(r); let queue_msg: QueueMessage = QueueMessage::Response(token, msg_id, resp); if let Err(e) = sender.send(queue_msg) { error!("{:?}", e); } }) } fn add_new_conn(&mut self, event_loop: &mut EventLoop<Server>, sock: TcpStream) -> Result<(Token)> { let new_token = Token(self.token_counter); let _ = sock.set_nodelay(true).map_err(|e| error!("set nodelay failed {:?}", e)); self.conns.insert(new_token, Conn::new(sock, new_token)); self.token_counter += 1; event_loop.register(&self.conns[&new_token].sock, new_token, EventSet::readable() | EventSet::hup(), PollOpt::edge()) .unwrap(); Ok(new_token) } fn remove_conn(&mut self, event_loop: &mut EventLoop<Server>, token: Token) { let conn = self.conns.remove(&token); match conn { Some(conn) => { let _ = event_loop.deregister(&conn.sock); } None => { warn!("missing connection for token {}", token.as_usize()); } } } fn handle_server_readable(&mut self, event_loop: &mut EventLoop<Server>) { loop { let sock = match self.listener.accept() { // Some(sock, addr) Ok(Some((sock, _))) => sock, Ok(None) => { debug!("no connection, accept later."); return; } Err(e) => { error!("accept error: {}", e); return; } }; let _ = self.add_new_conn(event_loop, sock); } } fn handle_conn_readable(&mut self, event_loop: &mut EventLoop<Server>, token: Token) { let mut conn: &mut Conn = match self.conns.get_mut(&token) { Some(c) => c, None => { error!("Get connection failed token[{}]", token.0); return; } }; if let Err(e) = conn.read(event_loop) { error!("read failed with {:?}", e); } } fn handle_writable(&mut self, event_loop: &mut EventLoop<Server>, token: Token) { let mut conn: &mut Conn = match self.conns.get_mut(&token) { Some(c) => c, None => { error!("Get connection failed token[{}]", token.0); return; } }; if let Err(e) = conn.write(event_loop) { error!("write failed with {:?}", e); } } fn handle_request(&mut self, event_loop: &mut EventLoop<Server>, token: Token, msg_id: u64, mut req: Request) { debug!("notify Request token[{}] msg_id[{}] type[{:?}]", token.0, msg_id, req.get_field_type()); if let Err(e) = match req.get_field_type() { MessageType::CmdGet => self.handle_get(&mut req, msg_id, token, event_loop), MessageType::CmdScan => self.handle_scan(&mut req, msg_id, token, event_loop), MessageType::CmdPrewrite => self.handle_prewrite(&mut req, msg_id, token, event_loop), MessageType::CmdCommit => self.handle_commit(&mut req, msg_id, token, event_loop), MessageType::CmdCleanup => self.handle_cleanup(&mut req, msg_id, token, event_loop), MessageType::CmdCommitThenGet => { self.handle_commit_then_get(&mut req, msg_id, token, event_loop) } MessageType::CmdRollbackThenGet => { self.handle_rollback_then_get(&mut req, msg_id, token, event_loop) } } { error!("Some error occur err[{:?}]", e); } } fn handle_response(&mut self, event_loop: &mut EventLoop<Server>, token: Token, msg_id: u64, resp: Response) { debug!("notify Response token[{}] msg_id[{}] type[{:?}]", token.0, msg_id, resp.get_field_type()); let mut conn: &mut Conn = match self.conns.get_mut(&token) { Some(c) => c, None => { error!("Get connection failed token[{}]", token.0); return; } }; let _ = conn.append_write_buf(event_loop, msg_id, resp); } } #[allow(dead_code)] pub enum QueueMessage { // Request(token, msg_id, kvrpc_request) Request(Token, u64, Request), // Request(token, msg_id, kvrpc_response) Response(Token, u64, Response), Quit, } impl mio::Handler for Server { type Timeout = (); type Message = QueueMessage; fn ready(&mut self, event_loop: &mut EventLoop<Server>, token: Token, events: EventSet) { if events.is_hup() || events.is_error() { self.remove_conn(event_loop, token); return; } if events.is_readable() { match token { SERVER_TOKEN => { self.handle_server_readable(event_loop); } token => { self.handle_conn_readable(event_loop, token); } } } if events.is_writable() { self.handle_writable(event_loop, token); } } fn notify(&mut self, event_loop: &mut EventLoop<Server>, msg: QueueMessage) { match msg { QueueMessage::Request(token, msg_id, req) => { self.handle_request(event_loop, token, msg_id, req); } QueueMessage::Response(token, msg_id, resp) => { self.handle_response(event_loop, token, msg_id, resp); } QueueMessage::Quit => event_loop.shutdown(), } } } #[cfg(test)] mod tests { use std::thread; use mio::EventLoop; use kvproto::kvrpcpb::*; use kvproto::errorpb::NotLeaderError; use storage::{self, Value, Storage, Dsn, txn, mvcc, engine}; use storage::Error::Other; use storage::KvPair as StorageKV; use storage::Result as ResultStorage; use super::*; #[test] fn test_quit() { use std::collections::HashMap; use mio::tcp::TcpListener; let mut event_loop = EventLoop::new().unwrap(); let sender = event_loop.channel(); let h = thread::spawn(move || { let l: TcpListener = TcpListener::bind(&"127.0.0.1:64321".parse().unwrap()).unwrap(); let store: Storage = Storage::new(Dsn::Memory).unwrap(); let mut srv: Server = Server::new(l, HashMap::new(), store); event_loop.run(&mut srv).unwrap(); }); // Without this thread will be hang. let _ = sender.send(QueueMessage::Quit); h.join().unwrap(); } #[test] fn test_get_done_none() { let actual_resp: Response = Server::cmd_get_done(Ok(None)); let mut exp_resp: Response = Response::new(); let mut exp_cmd_resp: CmdGetResponse = CmdGetResponse::new(); exp_cmd_resp.set_res_type(make_res_type(ResultType_Type::Ok)); exp_cmd_resp.set_value(Vec::new()); exp_resp.set_field_type(MessageType::CmdGet); exp_resp.set_cmd_get_resp(exp_cmd_resp); assert_eq!(exp_resp, actual_resp); } #[test] fn test_get_done_some() { let storage_val: Vec<_> = vec![0u8; 8]; let actual_resp: Response = Server::cmd_get_done(Ok(Some(storage_val))); let mut exp_resp: Response = Response::new(); let mut exp_cmd_resp: CmdGetResponse = CmdGetResponse::new(); exp_cmd_resp.set_res_type(make_res_type(ResultType_Type::Ok)); exp_cmd_resp.set_value(vec![0u8; 8]); exp_resp.set_field_type(MessageType::CmdGet); exp_resp.set_cmd_get_resp(exp_cmd_resp); assert_eq!(exp_resp, actual_resp); } #[test] // #[should_panic] fn test_get_done_error() { let actual_resp: Response = Server::cmd_get_done(Err(Other(Box::new("error")))); let mut exp_resp: Response = Response::new(); let mut exp_cmd_resp: CmdGetResponse = CmdGetResponse::new(); let mut res_type = make_res_type(ResultType_Type::Retryable); res_type.set_msg("storage error: Other(Any)".to_owned()); exp_cmd_resp.set_res_type(res_type); exp_resp.set_field_type(MessageType::CmdGet); exp_resp.set_cmd_get_resp(exp_cmd_resp); assert_eq!(exp_resp, actual_resp); } #[test] fn test_scan_done_empty() { let actual_resp: Response = Server::cmd_scan_done(Ok(Vec::new())); let mut exp_resp: Response = Response::new(); let mut exp_cmd_resp: CmdScanResponse = CmdScanResponse::new(); exp_cmd_resp.set_ok(true); exp_resp.set_field_type(MessageType::CmdScan); exp_resp.set_cmd_scan_resp(exp_cmd_resp); assert_eq!(exp_resp, actual_resp); } #[test] fn test_scan_done_some() { let k0 = vec![0u8, 0u8]; let v0: Value = vec![255u8, 255u8]; let k1 = vec![0u8, 1u8]; let v1: Value = vec![255u8, 254u8]; let kvs: Vec<ResultStorage<StorageKV>> = vec![Ok((k0.clone(), v0.clone())), Ok((k1.clone(), v1.clone()))]; let actual_resp: Response = Server::cmd_scan_done(Ok(kvs)); assert_eq!(MessageType::CmdScan, actual_resp.get_field_type()); let actual_cmd_resp: &CmdScanResponse = actual_resp.get_cmd_scan_resp(); assert_eq!(true, actual_cmd_resp.get_ok()); let actual_kvs = actual_cmd_resp.get_results(); assert_eq!(2, actual_kvs.len()); assert_eq!(make_res_type(ResultType_Type::Ok), *actual_kvs[0].get_res_type()); assert_eq!(k0, actual_kvs[0].get_key()); assert_eq!(v0, actual_kvs[0].get_value()); assert_eq!(make_res_type(ResultType_Type::Ok), *actual_kvs[1].get_res_type()); assert_eq!(k1, actual_kvs[1].get_key()); assert_eq!(v1, actual_kvs[1].get_value()); } #[test] fn test_scan_done_lock() { use kvproto::kvrpcpb::LockInfo; let k0 = vec![0u8, 0u8]; let v0: Value = vec![255u8, 255u8]; let k1 = vec![0u8, 1u8]; let k1_primary = k0.clone(); let k1_ts: u64 = 10000; let kvs: Vec<ResultStorage<StorageKV>> = vec![Ok((k0.clone(), v0.clone())), make_lock_error(k1.clone(), k1_primary.clone(), k1_ts)]; let actual_resp: Response = Server::cmd_scan_done(Ok(kvs)); assert_eq!(MessageType::CmdScan, actual_resp.get_field_type()); let actual_cmd_resp: &CmdScanResponse = actual_resp.get_cmd_scan_resp(); assert_eq!(true, actual_cmd_resp.get_ok()); let actual_kvs = actual_cmd_resp.get_results(); assert_eq!(2, actual_kvs.len()); assert_eq!(make_res_type(ResultType_Type::Ok), *actual_kvs[0].get_res_type()); assert_eq!(k0, actual_kvs[0].get_key()); assert_eq!(v0, actual_kvs[0].get_value()); let mut exp_res_type1 = make_res_type(ResultType_Type::Locked); let mut lock_info1 = LockInfo::new(); lock_info1.set_primary_lock(k1_primary.clone()); lock_info1.set_lock_version(k1_ts); exp_res_type1.set_lock_info(lock_info1); assert_eq!(exp_res_type1, *actual_kvs[1].get_res_type()); assert_eq!(k1, actual_kvs[1].get_key()); assert_eq!(k1_primary.clone(), actual_kvs[1].get_res_type().get_lock_info().get_primary_lock()); assert_eq!(k1_ts, actual_kvs[1].get_res_type().get_lock_info().get_lock_version()); } #[test] fn test_prewrite_done_ok() { let actual_resp: Response = Server::cmd_prewrite_done(Ok(Vec::new())); assert_eq!(MessageType::CmdPrewrite, actual_resp.get_field_type()); assert_eq!(true, actual_resp.get_cmd_prewrite_resp().get_ok()); } #[test] fn test_prewrite_done_err() { let err = Other(Box::new("prewrite error")); let actual_resp: Response = Server::cmd_prewrite_done(Err(err)); assert_eq!(MessageType::CmdPrewrite, actual_resp.get_field_type()); assert_eq!(false, actual_resp.get_cmd_prewrite_resp().get_ok()); } #[test] fn test_commit_done_ok() { let actual_resp: Response = Server::cmd_commit_done(Ok(())); assert_eq!(MessageType::CmdCommit, actual_resp.get_field_type()); assert_eq!(true, actual_resp.get_cmd_commit_resp().get_ok()); } #[test] fn test_commit_done_err() { let err = Other(Box::new("commit error")); let actual_resp: Response = Server::cmd_commit_done(Err(err)); assert_eq!(MessageType::CmdCommit, actual_resp.get_field_type()); assert_eq!(false, actual_resp.get_cmd_commit_resp().get_ok()); } #[test] fn test_cleanup_done_ok() { let actual_resp: Response = Server::cmd_cleanup_done(Ok(())); assert_eq!(MessageType::CmdCleanup, actual_resp.get_field_type()); assert_eq!(make_res_type(ResultType_Type::Ok), *actual_resp.get_cmd_cleanup_resp().get_res_type()); } #[test] fn test_cleanup_done_err() { let err = Other(Box::new("cleanup error")); let actual_resp: Response = Server::cmd_cleanup_done(Err(err)); assert_eq!(MessageType::CmdCleanup, actual_resp.get_field_type()); assert_eq!(make_res_type(ResultType_Type::Retryable), *actual_resp.get_cmd_cleanup_resp().get_res_type()); } #[test] fn test_not_leader() { use kvproto::errorpb::NotLeaderError; let mut leader_info = NotLeaderError::new(); leader_info.set_region_id(1); let storage_res: ResultStorage<Option<Value>> = make_not_leader_error(leader_info.to_owned()); let actual_resp: Response = Server::cmd_get_done(storage_res); assert_eq!(MessageType::CmdGet, actual_resp.get_field_type()); let mut exp_res_type = make_res_type(ResultType_Type::NotLeader); exp_res_type.set_leader_info(leader_info.to_owned()); assert_eq!(exp_res_type, *actual_resp.get_cmd_get_resp().get_res_type()); } fn make_lock_error<T>(key: Vec<u8>, primary: Vec<u8>, ts: u64) -> ResultStorage<T> { Err(mvcc::Error::KeyIsLocked { key: key, primary: primary, ts: ts, }) .map_err(txn::Error::from) .map_err(storage::Error::from) } fn make_not_leader_error<T>(leader_info: NotLeaderError) -> ResultStorage<T> { use kvproto::errorpb::Error; let mut err = Error::new(); err.set_not_leader(leader_info); Err(engine::Error::Request(err)).map_err(storage::Error::from) } fn make_res_type(tp: ResultType_Type) -> ResultType { let mut res_type = ResultType::new(); res_type.set_field_type(tp); res_type } } address comment use std::collections::HashMap; use std::boxed::Box; use std::io::{Read, Write}; use mio::{self, Token, EventLoop, EventSet, PollOpt, Sender}; use mio::tcp::{TcpListener, TcpStream}; use protobuf::RepeatedField; use kvproto::kvrpcpb::{CmdGetResponse, CmdScanResponse, CmdPrewriteResponse, CmdCommitResponse, CmdCleanupResponse, CmdRollbackThenGetResponse, CmdCommitThenGetResponse, Request, Response, MessageType, Item, ResultType, ResultType_Type, LockInfo, Operator}; use storage::{Storage, Key, Value, KvPair, KvOpt, Mutation, MaybeLocked, MaybeComitted, MaybeRolledback, Callback}; use storage::Result as ResultStorage; use storage::Error as StorageError; use storage::EngineError; use super::conn::Conn; use super::{Result, ServerError}; // Token(1) instead of Token(0) // See here: https://github.com/hjr3/mob/blob/multi-echo-blog-post/src/main.rs#L115 pub const SERVER_TOKEN: Token = Token(1); const FIRST_CUSTOM_TOKEN: Token = Token(1024); pub struct Server { pub listener: TcpListener, pub conns: HashMap<Token, Conn>, pub token_counter: usize, store: Storage, } impl Server { pub fn new(listener: TcpListener, conns: HashMap<Token, Conn>, store: Storage) -> Server { Server { listener: listener, conns: conns, token_counter: FIRST_CUSTOM_TOKEN.as_usize(), store: store, } } pub fn handle_get(&mut self, mut msg: Request, msg_id: u64, token: Token, event_loop: &mut EventLoop<Server>) -> Result<()> { if !msg.has_cmd_get_req() { format_err!("Msg doesn't contain a CmdGetRequest"); } let mut cmd_get_req = msg.take_cmd_get_req(); let mut key_address = cmd_get_req.take_key_address(); let key = key_address.take_key(); let opt = KvOpt::new(key_address.get_region_id(), key_address.take_peer()); let sender = event_loop.channel(); let cb = Server::make_cb::<Option<Value>>(Server::cmd_get_done, sender, token, msg_id); self.store .async_get(Key::new(key), cmd_get_req.get_version(), opt, cb) .map_err(ServerError::Storage) } pub fn handle_scan(&mut self, mut msg: Request, msg_id: u64, token: Token, event_loop: &mut EventLoop<Server>) -> Result<()> { if !msg.has_cmd_scan_req() { format_err!("Msg doesn't contain a CmdScanRequest"); } let mut cmd_scan_req = msg.take_cmd_scan_req(); let sender = event_loop.channel(); let mut start_key_addresss = cmd_scan_req.take_key_address(); let start_key = start_key_addresss.take_key(); let opt = KvOpt::new(start_key_addresss.get_region_id(), start_key_addresss.take_peer()); debug!("start_key [{:?}]", start_key); let cb = Server::make_cb::<Vec<ResultStorage<KvPair>>>(Server::cmd_scan_done, sender, token, msg_id); self.store .async_scan(Key::new(start_key), cmd_scan_req.get_limit() as usize, cmd_scan_req.get_version(), opt, cb) .map_err(ServerError::Storage) } pub fn handle_prewrite(&mut self, mut msg: Request, msg_id: u64, token: Token, event_loop: &mut EventLoop<Server>) -> Result<()> { if !msg.has_cmd_prewrite_req() { format_err!("Msg doesn't contain a CmdPrewriteRequest"); } let mut cmd_prewrite_req = msg.take_cmd_prewrite_req(); let sender = event_loop.channel(); let mutations = cmd_prewrite_req.take_mutations() .into_iter() .map(|mut x| { match x.get_op() { Operator::OpPut => { Mutation::Put((Key::new(x.take_key()), x.take_value())) } Operator::OpDel => { Mutation::Delete(Key::new(x.take_key())) } Operator::OpLock => { Mutation::Lock(Key::new(x.take_key())) } } }) .collect(); let opt = { let mut key_address = cmd_prewrite_req.take_key_address(); KvOpt::new(key_address.get_region_id(), key_address.take_peer()) }; let cb = Server::make_cb::<Vec<ResultStorage<()>>>(Server::cmd_prewrite_done, sender, token, msg_id); self.store .async_prewrite(mutations, cmd_prewrite_req.get_primary_lock().to_vec(), cmd_prewrite_req.get_start_version(), opt, cb) .map_err(ServerError::Storage) } pub fn handle_commit(&mut self, mut msg: Request, msg_id: u64, token: Token, event_loop: &mut EventLoop<Server>) -> Result<()> { if !msg.has_cmd_commit_req() { format_err!("Msg doesn't contain a CmdCommitRequest"); } let mut cmd_commit_req = msg.take_cmd_commit_req(); let sender = event_loop.channel(); let cb = Server::make_cb::<()>(Server::cmd_commit_done, sender, token, msg_id); let opt = { let mut first = cmd_commit_req.get_keys_address()[0].clone(); KvOpt::new(first.get_region_id(), first.take_peer()) }; let keys = cmd_commit_req.take_keys_address() .into_iter() .map(|mut x| Key::new(x.take_key())) .collect(); self.store .async_commit(keys, cmd_commit_req.get_start_version(), cmd_commit_req.get_commit_version(), opt, cb) .map_err(ServerError::Storage) } pub fn handle_cleanup(&mut self, mut msg: Request, msg_id: u64, token: Token, event_loop: &mut EventLoop<Server>) -> Result<()> { if !msg.has_cmd_cleanup_req() { format_err!("Msg doesn't contain a CmdCleanupRequest"); } let mut cmd_cleanup_req = msg.take_cmd_cleanup_req(); let sender = event_loop.channel(); let cb = Server::make_cb::<()>(Server::cmd_cleanup_done, sender, token, msg_id); let mut key_address = cmd_cleanup_req.take_key_address(); let key = key_address.take_key(); let opt = KvOpt::new(key_address.get_region_id(), key_address.take_peer()); self.store .async_cleanup(Key::new(key), cmd_cleanup_req.get_start_version(), opt, cb) .map_err(ServerError::Storage) } pub fn handle_commit_then_get(&mut self, mut msg: Request, msg_id: u64, token: Token, event_loop: &mut EventLoop<Server>) -> Result<()> { if !msg.has_cmd_commit_get_req() { format_err!("Msg doesn't contain a CmdCommitThenGetRequest"); } let mut cmd_commit_get_req = msg.take_cmd_commit_get_req(); let sender = event_loop.channel(); let cb = Server::make_cb::<Option<Value>>(Server::cmd_commit_get_done, sender, token, msg_id); let mut key_address = cmd_commit_get_req.take_key_address(); let key = key_address.take_key(); let opt = KvOpt::new(key_address.get_region_id(), key_address.take_peer()); self.store .async_commit_then_get(Key::new(key), cmd_commit_get_req.get_lock_version(), cmd_commit_get_req.get_commit_version(), cmd_commit_get_req.get_get_version(), opt, cb) .map_err(ServerError::Storage) } pub fn handle_rollback_then_get(&mut self, mut msg: Request, msg_id: u64, token: Token, event_loop: &mut EventLoop<Server>) -> Result<()> { if !msg.has_cmd_rb_get_req() { format_err!("Msg doesn't contain a CmdRollbackThenGetRequest"); } let mut cmd_rollback_get_req = msg.take_cmd_rb_get_req(); let sender = event_loop.channel(); let cb = Server::make_cb::<Option<Value>>(Server::cmd_rollback_get_done, sender, token, msg_id); let mut key_address = cmd_rollback_get_req.take_key_address(); let key = key_address.take_key(); let opt = KvOpt::new(key_address.get_region_id(), key_address.take_peer()); self.store .async_rollback_then_get(Key::new(key), cmd_rollback_get_req.get_lock_version(), opt, cb) .map_err(ServerError::Storage) } fn cmd_get_done(r: ResultStorage<Option<Value>>) -> Response { let mut resp: Response = Response::new(); let mut cmd_get_resp: CmdGetResponse = CmdGetResponse::new(); let mut res_type: ResultType = ResultType::new(); match r { Ok(opt) => { res_type.set_field_type(ResultType_Type::Ok); match opt { Some(val) => cmd_get_resp.set_value(val), None => cmd_get_resp.set_value(Vec::new()), } } Err(ref e) => { if let StorageError::Engine(EngineError::Request(ref err)) = *e { if err.has_not_leader() { res_type.set_field_type(ResultType_Type::NotLeader); res_type.set_leader_info(err.get_not_leader().to_owned()); } else { error!("{:?}", err); res_type.set_field_type(ResultType_Type::Other); res_type.set_msg(format!("engine error: {:?}", err)); } } else if r.is_locked() { if let Some((_, primary, ts)) = r.get_lock() { res_type.set_field_type(ResultType_Type::Locked); let mut lock_info = LockInfo::new(); lock_info.set_primary_lock(primary); lock_info.set_lock_version(ts); res_type.set_lock_info(lock_info); } else { let err_str = "key is locked but primary info not found".to_owned(); error!("{}", err_str); res_type.set_field_type(ResultType_Type::Other); res_type.set_msg(err_str); } } else { let err_str = format!("storage error: {:?}", e); error!("{}", err_str); res_type.set_field_type(ResultType_Type::Retryable); res_type.set_msg(err_str); } } } cmd_get_resp.set_res_type(res_type); resp.set_field_type(MessageType::CmdGet); resp.set_cmd_get_resp(cmd_get_resp); resp } fn cmd_scan_done(kvs: ResultStorage<Vec<ResultStorage<KvPair>>>) -> Response { let mut resp: Response = Response::new(); let mut cmd_scan_resp: CmdScanResponse = CmdScanResponse::new(); cmd_scan_resp.set_ok(kvs.is_ok()); match kvs { Ok(kvs) => { // convert storage::KvPair to kvrpcpb::Item let mut new_kvs: Vec<Item> = Vec::new(); for result in kvs { let mut new_kv: Item = Item::new(); let mut res_type: ResultType = ResultType::new(); match result { Ok((ref key, ref value)) => { res_type.set_field_type(ResultType_Type::Ok); new_kv.set_key(key.clone()); new_kv.set_value(value.clone()); } Err(..) => { if result.is_locked() { if let Some((key, primary, ts)) = result.get_lock() { res_type.set_field_type(ResultType_Type::Locked); let mut lock_info = LockInfo::new(); lock_info.set_primary_lock(primary); lock_info.set_lock_version(ts); res_type.set_lock_info(lock_info); new_kv.set_key(key); } } else { res_type.set_field_type(ResultType_Type::Retryable); } } } new_kv.set_res_type(res_type); new_kvs.push(new_kv); } cmd_scan_resp.set_results(RepeatedField::from_vec(new_kvs)); } Err(e) => { error!("storage error: {:?}", e); } } resp.set_field_type(MessageType::CmdScan); resp.set_cmd_scan_resp(cmd_scan_resp); resp } fn cmd_prewrite_done(results: ResultStorage<Vec<ResultStorage<()>>>) -> Response { let mut resp: Response = Response::new(); let mut cmd_prewrite_resp: CmdPrewriteResponse = CmdPrewriteResponse::new(); cmd_prewrite_resp.set_ok(results.is_ok()); let mut items: Vec<Item> = Vec::new(); match results { Ok(results) => { for result in results { let mut item = Item::new(); let mut res_type: ResultType = ResultType::new(); if result.is_ok() { res_type.set_field_type(ResultType_Type::Ok); } else if let Some((key, primary, ts)) = result.get_lock() { // Actually items only contain locked item, so `ok` is always false. res_type.set_field_type(ResultType_Type::Locked); let mut lock_info = LockInfo::new(); lock_info.set_primary_lock(primary); lock_info.set_lock_version(ts); res_type.set_lock_info(lock_info); item.set_key(key); } else { res_type.set_field_type(ResultType_Type::Retryable); } item.set_res_type(res_type); items.push(item); } } Err(e) => { error!("storage error: {:?}", e); } } cmd_prewrite_resp.set_results(RepeatedField::from_vec(items)); resp.set_field_type(MessageType::CmdPrewrite); resp.set_cmd_prewrite_resp(cmd_prewrite_resp); resp } fn cmd_commit_done(r: ResultStorage<()>) -> Response { let mut resp: Response = Response::new(); let mut cmd_commit_resp: CmdCommitResponse = CmdCommitResponse::new(); cmd_commit_resp.set_ok(r.is_ok()); resp.set_field_type(MessageType::CmdCommit); resp.set_cmd_commit_resp(cmd_commit_resp); resp } fn cmd_cleanup_done(r: ResultStorage<()>) -> Response { let mut resp: Response = Response::new(); let mut cmd_cleanup_resp: CmdCleanupResponse = CmdCleanupResponse::new(); let mut res_type: ResultType = ResultType::new(); if r.is_ok() { res_type.set_field_type(ResultType_Type::Ok); } else if r.is_committed() { res_type.set_field_type(ResultType_Type::Committed); if let Some(commit_ts) = r.get_commit() { cmd_cleanup_resp.set_commit_version(commit_ts); } else { warn!("commit_ts not found when is_committed."); } } else if r.is_rolledback() { res_type.set_field_type(ResultType_Type::Rolledback); } else { warn!("Cleanup other error {:?}", r.err()); res_type.set_field_type(ResultType_Type::Retryable); } cmd_cleanup_resp.set_res_type(res_type); resp.set_field_type(MessageType::CmdCleanup); resp.set_cmd_cleanup_resp(cmd_cleanup_resp); resp } fn cmd_commit_get_done(r: ResultStorage<Option<Value>>) -> Response { let mut resp: Response = Response::new(); let mut cmd_commit_get_resp: CmdCommitThenGetResponse = CmdCommitThenGetResponse::new(); cmd_commit_get_resp.set_ok(r.is_ok()); if let Ok(Some(val)) = r { cmd_commit_get_resp.set_value(val); } resp.set_field_type(MessageType::CmdCommitThenGet); resp.set_cmd_commit_get_resp(cmd_commit_get_resp); resp } fn cmd_rollback_get_done(r: ResultStorage<Option<Value>>) -> Response { let mut resp: Response = Response::new(); let mut cmd_rollback_get_resp: CmdRollbackThenGetResponse = CmdRollbackThenGetResponse::new(); cmd_rollback_get_resp.set_ok(r.is_ok()); if let Err(ref e) = r { error!("rb & get error: {}", e); } if let Ok(Some(val)) = r { cmd_rollback_get_resp.set_value(val); } resp.set_field_type(MessageType::CmdRollbackThenGet); resp.set_cmd_rb_get_resp(cmd_rollback_get_resp); resp } fn make_cb<T: 'static>(f: fn(ResultStorage<T>) -> Response, sender: Sender<QueueMessage>, token: Token, msg_id: u64) -> Callback<T> { Box::new(move |r: ResultStorage<T>| { let resp: Response = f(r); let queue_msg: QueueMessage = QueueMessage::Response(token, msg_id, resp); if let Err(e) = sender.send(queue_msg) { error!("{:?}", e); } }) } fn add_new_conn(&mut self, event_loop: &mut EventLoop<Server>, sock: TcpStream) -> Result<(Token)> { let new_token = Token(self.token_counter); let _ = sock.set_nodelay(true).map_err(|e| error!("set nodelay failed {:?}", e)); self.conns.insert(new_token, Conn::new(sock, new_token)); self.token_counter += 1; event_loop.register(&self.conns[&new_token].sock, new_token, EventSet::readable() | EventSet::hup(), PollOpt::edge()) .unwrap(); Ok(new_token) } fn remove_conn(&mut self, event_loop: &mut EventLoop<Server>, token: Token) { let conn = self.conns.remove(&token); match conn { Some(conn) => { let _ = event_loop.deregister(&conn.sock); } None => { warn!("missing connection for token {}", token.as_usize()); } } } fn handle_server_readable(&mut self, event_loop: &mut EventLoop<Server>) { loop { let sock = match self.listener.accept() { // Some(sock, addr) Ok(Some((sock, _))) => sock, Ok(None) => { debug!("no connection, accept later."); return; } Err(e) => { error!("accept error: {}", e); return; } }; let _ = self.add_new_conn(event_loop, sock); } } fn handle_conn_readable(&mut self, event_loop: &mut EventLoop<Server>, token: Token) { let mut conn: &mut Conn = match self.conns.get_mut(&token) { Some(c) => c, None => { error!("Get connection failed token[{}]", token.0); return; } }; if let Err(e) = conn.read(event_loop) { error!("read failed with {:?}", e); } } fn handle_writable(&mut self, event_loop: &mut EventLoop<Server>, token: Token) { let mut conn: &mut Conn = match self.conns.get_mut(&token) { Some(c) => c, None => { error!("Get connection failed token[{}]", token.0); return; } }; if let Err(e) = conn.write(event_loop) { error!("write failed with {:?}", e); } } fn handle_request(&mut self, event_loop: &mut EventLoop<Server>, token: Token, msg_id: u64, req: Request) { debug!("notify Request token[{}] msg_id[{}] type[{:?}]", token.0, msg_id, req.get_field_type()); if let Err(e) = match req.get_field_type() { MessageType::CmdGet => self.handle_get(req, msg_id, token, event_loop), MessageType::CmdScan => self.handle_scan(req, msg_id, token, event_loop), MessageType::CmdPrewrite => self.handle_prewrite(req, msg_id, token, event_loop), MessageType::CmdCommit => self.handle_commit(req, msg_id, token, event_loop), MessageType::CmdCleanup => self.handle_cleanup(req, msg_id, token, event_loop), MessageType::CmdCommitThenGet => { self.handle_commit_then_get(req, msg_id, token, event_loop) } MessageType::CmdRollbackThenGet => { self.handle_rollback_then_get(req, msg_id, token, event_loop) } } { error!("Some error occur err[{:?}]", e); } } fn handle_response(&mut self, event_loop: &mut EventLoop<Server>, token: Token, msg_id: u64, resp: Response) { debug!("notify Response token[{}] msg_id[{}] type[{:?}]", token.0, msg_id, resp.get_field_type()); let mut conn: &mut Conn = match self.conns.get_mut(&token) { Some(c) => c, None => { error!("Get connection failed token[{}]", token.0); return; } }; let _ = conn.append_write_buf(event_loop, msg_id, resp); } } #[allow(dead_code)] pub enum QueueMessage { // Request(token, msg_id, kvrpc_request) Request(Token, u64, Request), // Request(token, msg_id, kvrpc_response) Response(Token, u64, Response), Quit, } impl mio::Handler for Server { type Timeout = (); type Message = QueueMessage; fn ready(&mut self, event_loop: &mut EventLoop<Server>, token: Token, events: EventSet) { if events.is_hup() || events.is_error() { self.remove_conn(event_loop, token); return; } if events.is_readable() { match token { SERVER_TOKEN => { self.handle_server_readable(event_loop); } token => { self.handle_conn_readable(event_loop, token); } } } if events.is_writable() { self.handle_writable(event_loop, token); } } fn notify(&mut self, event_loop: &mut EventLoop<Server>, msg: QueueMessage) { match msg { QueueMessage::Request(token, msg_id, req) => { self.handle_request(event_loop, token, msg_id, req); } QueueMessage::Response(token, msg_id, resp) => { self.handle_response(event_loop, token, msg_id, resp); } QueueMessage::Quit => event_loop.shutdown(), } } } #[cfg(test)] mod tests { use std::thread; use mio::EventLoop; use kvproto::kvrpcpb::*; use kvproto::errorpb::NotLeaderError; use storage::{self, Value, Storage, Dsn, txn, mvcc, engine}; use storage::Error::Other; use storage::KvPair as StorageKV; use storage::Result as ResultStorage; use super::*; #[test] fn test_quit() { use std::collections::HashMap; use mio::tcp::TcpListener; let mut event_loop = EventLoop::new().unwrap(); let sender = event_loop.channel(); let h = thread::spawn(move || { let l: TcpListener = TcpListener::bind(&"127.0.0.1:64321".parse().unwrap()).unwrap(); let store: Storage = Storage::new(Dsn::Memory).unwrap(); let mut srv: Server = Server::new(l, HashMap::new(), store); event_loop.run(&mut srv).unwrap(); }); // Without this thread will be hang. let _ = sender.send(QueueMessage::Quit); h.join().unwrap(); } #[test] fn test_get_done_none() { let actual_resp: Response = Server::cmd_get_done(Ok(None)); let mut exp_resp: Response = Response::new(); let mut exp_cmd_resp: CmdGetResponse = CmdGetResponse::new(); exp_cmd_resp.set_res_type(make_res_type(ResultType_Type::Ok)); exp_cmd_resp.set_value(Vec::new()); exp_resp.set_field_type(MessageType::CmdGet); exp_resp.set_cmd_get_resp(exp_cmd_resp); assert_eq!(exp_resp, actual_resp); } #[test] fn test_get_done_some() { let storage_val: Vec<_> = vec![0u8; 8]; let actual_resp: Response = Server::cmd_get_done(Ok(Some(storage_val))); let mut exp_resp: Response = Response::new(); let mut exp_cmd_resp: CmdGetResponse = CmdGetResponse::new(); exp_cmd_resp.set_res_type(make_res_type(ResultType_Type::Ok)); exp_cmd_resp.set_value(vec![0u8; 8]); exp_resp.set_field_type(MessageType::CmdGet); exp_resp.set_cmd_get_resp(exp_cmd_resp); assert_eq!(exp_resp, actual_resp); } #[test] // #[should_panic] fn test_get_done_error() { let actual_resp: Response = Server::cmd_get_done(Err(Other(Box::new("error")))); let mut exp_resp: Response = Response::new(); let mut exp_cmd_resp: CmdGetResponse = CmdGetResponse::new(); let mut res_type = make_res_type(ResultType_Type::Retryable); res_type.set_msg("storage error: Other(Any)".to_owned()); exp_cmd_resp.set_res_type(res_type); exp_resp.set_field_type(MessageType::CmdGet); exp_resp.set_cmd_get_resp(exp_cmd_resp); assert_eq!(exp_resp, actual_resp); } #[test] fn test_scan_done_empty() { let actual_resp: Response = Server::cmd_scan_done(Ok(Vec::new())); let mut exp_resp: Response = Response::new(); let mut exp_cmd_resp: CmdScanResponse = CmdScanResponse::new(); exp_cmd_resp.set_ok(true); exp_resp.set_field_type(MessageType::CmdScan); exp_resp.set_cmd_scan_resp(exp_cmd_resp); assert_eq!(exp_resp, actual_resp); } #[test] fn test_scan_done_some() { let k0 = vec![0u8, 0u8]; let v0: Value = vec![255u8, 255u8]; let k1 = vec![0u8, 1u8]; let v1: Value = vec![255u8, 254u8]; let kvs: Vec<ResultStorage<StorageKV>> = vec![Ok((k0.clone(), v0.clone())), Ok((k1.clone(), v1.clone()))]; let actual_resp: Response = Server::cmd_scan_done(Ok(kvs)); assert_eq!(MessageType::CmdScan, actual_resp.get_field_type()); let actual_cmd_resp: &CmdScanResponse = actual_resp.get_cmd_scan_resp(); assert_eq!(true, actual_cmd_resp.get_ok()); let actual_kvs = actual_cmd_resp.get_results(); assert_eq!(2, actual_kvs.len()); assert_eq!(make_res_type(ResultType_Type::Ok), *actual_kvs[0].get_res_type()); assert_eq!(k0, actual_kvs[0].get_key()); assert_eq!(v0, actual_kvs[0].get_value()); assert_eq!(make_res_type(ResultType_Type::Ok), *actual_kvs[1].get_res_type()); assert_eq!(k1, actual_kvs[1].get_key()); assert_eq!(v1, actual_kvs[1].get_value()); } #[test] fn test_scan_done_lock() { use kvproto::kvrpcpb::LockInfo; let k0 = vec![0u8, 0u8]; let v0: Value = vec![255u8, 255u8]; let k1 = vec![0u8, 1u8]; let k1_primary = k0.clone(); let k1_ts: u64 = 10000; let kvs: Vec<ResultStorage<StorageKV>> = vec![Ok((k0.clone(), v0.clone())), make_lock_error(k1.clone(), k1_primary.clone(), k1_ts)]; let actual_resp: Response = Server::cmd_scan_done(Ok(kvs)); assert_eq!(MessageType::CmdScan, actual_resp.get_field_type()); let actual_cmd_resp: &CmdScanResponse = actual_resp.get_cmd_scan_resp(); assert_eq!(true, actual_cmd_resp.get_ok()); let actual_kvs = actual_cmd_resp.get_results(); assert_eq!(2, actual_kvs.len()); assert_eq!(make_res_type(ResultType_Type::Ok), *actual_kvs[0].get_res_type()); assert_eq!(k0, actual_kvs[0].get_key()); assert_eq!(v0, actual_kvs[0].get_value()); let mut exp_res_type1 = make_res_type(ResultType_Type::Locked); let mut lock_info1 = LockInfo::new(); lock_info1.set_primary_lock(k1_primary.clone()); lock_info1.set_lock_version(k1_ts); exp_res_type1.set_lock_info(lock_info1); assert_eq!(exp_res_type1, *actual_kvs[1].get_res_type()); assert_eq!(k1, actual_kvs[1].get_key()); assert_eq!(k1_primary.clone(), actual_kvs[1].get_res_type().get_lock_info().get_primary_lock()); assert_eq!(k1_ts, actual_kvs[1].get_res_type().get_lock_info().get_lock_version()); } #[test] fn test_prewrite_done_ok() { let actual_resp: Response = Server::cmd_prewrite_done(Ok(Vec::new())); assert_eq!(MessageType::CmdPrewrite, actual_resp.get_field_type()); assert_eq!(true, actual_resp.get_cmd_prewrite_resp().get_ok()); } #[test] fn test_prewrite_done_err() { let err = Other(Box::new("prewrite error")); let actual_resp: Response = Server::cmd_prewrite_done(Err(err)); assert_eq!(MessageType::CmdPrewrite, actual_resp.get_field_type()); assert_eq!(false, actual_resp.get_cmd_prewrite_resp().get_ok()); } #[test] fn test_commit_done_ok() { let actual_resp: Response = Server::cmd_commit_done(Ok(())); assert_eq!(MessageType::CmdCommit, actual_resp.get_field_type()); assert_eq!(true, actual_resp.get_cmd_commit_resp().get_ok()); } #[test] fn test_commit_done_err() { let err = Other(Box::new("commit error")); let actual_resp: Response = Server::cmd_commit_done(Err(err)); assert_eq!(MessageType::CmdCommit, actual_resp.get_field_type()); assert_eq!(false, actual_resp.get_cmd_commit_resp().get_ok()); } #[test] fn test_cleanup_done_ok() { let actual_resp: Response = Server::cmd_cleanup_done(Ok(())); assert_eq!(MessageType::CmdCleanup, actual_resp.get_field_type()); assert_eq!(make_res_type(ResultType_Type::Ok), *actual_resp.get_cmd_cleanup_resp().get_res_type()); } #[test] fn test_cleanup_done_err() { let err = Other(Box::new("cleanup error")); let actual_resp: Response = Server::cmd_cleanup_done(Err(err)); assert_eq!(MessageType::CmdCleanup, actual_resp.get_field_type()); assert_eq!(make_res_type(ResultType_Type::Retryable), *actual_resp.get_cmd_cleanup_resp().get_res_type()); } #[test] fn test_not_leader() { use kvproto::errorpb::NotLeaderError; let mut leader_info = NotLeaderError::new(); leader_info.set_region_id(1); let storage_res: ResultStorage<Option<Value>> = make_not_leader_error(leader_info.to_owned()); let actual_resp: Response = Server::cmd_get_done(storage_res); assert_eq!(MessageType::CmdGet, actual_resp.get_field_type()); let mut exp_res_type = make_res_type(ResultType_Type::NotLeader); exp_res_type.set_leader_info(leader_info.to_owned()); assert_eq!(exp_res_type, *actual_resp.get_cmd_get_resp().get_res_type()); } fn make_lock_error<T>(key: Vec<u8>, primary: Vec<u8>, ts: u64) -> ResultStorage<T> { Err(mvcc::Error::KeyIsLocked { key: key, primary: primary, ts: ts, }) .map_err(txn::Error::from) .map_err(storage::Error::from) } fn make_not_leader_error<T>(leader_info: NotLeaderError) -> ResultStorage<T> { use kvproto::errorpb::Error; let mut err = Error::new(); err.set_not_leader(leader_info); Err(engine::Error::Request(err)).map_err(storage::Error::from) } fn make_res_type(tp: ResultType_Type) -> ResultType { let mut res_type = ResultType::new(); res_type.set_field_type(tp); res_type } }
// Copyright 2020 Google LLC // // Use of this source code is governed by an MIT-style license that can be found // in the LICENSE file or at https://opensource.org/licenses/MIT. //! Utilities for working with the chunked format. //! //! The chunked file format is extremely simple serialization procedure where //! each chunk of raw bytes is prepended with the size of the chunk (as 64-bit //! unsigned big-endian integer). //! //! Its primary application is streaming encoding and decoding of blobs of data //! in formats that do not support or are inefficient for these purposes (such //! as serialized Protocol Buffer messages). use std::io::Cursor; use byteorder::BigEndian; /// Encodes a given iterator over binary blobs into the chunked format. /// /// This is a streaming encoder and performs the encoding in a lazy way. It /// should compose well with other streaming encoders (e.g. these offered by /// the [`flate2`] crate). /// /// [`flate2`]: https://crates.io/crates/flate2 /// /// # Examples /// /// ```no_run /// use std::fs::File; /// /// let data = [b"foo", b"bar", b"baz"]; /// /// let mut stream = rrg::chunked::encode(data.iter().map(|blob| &blob[..])); /// let mut file = File::create("output.chunked").unwrap(); /// std::io::copy(&mut stream, &mut file).unwrap(); /// ``` pub fn encode<I, M>(iter: I) -> Encode<I> where I: Iterator<Item = M>, M: prost::Message, { Encode { iter: iter, cur: Cursor::new(vec!()), } } /// Decodes a buffer in the chunked format into binary blobs. /// /// This is a streaming decoder and performs the decoding in a lazy way. It /// should compose well with other streaming decoders (e.g. these offered by the /// [`flate2`] crate). /// /// [`flate2`]: https://crates.io/crates/flate2 /// /// # Examples /// /// ```no_run /// use std::fs::File; /// /// let file = File::open("input.chunked").unwrap(); /// for (idx, blob) in rrg::chunked::decode(file).enumerate() { /// println!("blob #{}: {:?}", idx, blob.unwrap()); /// } /// ``` pub fn decode<R, M>(buf: R) -> Decode<R, M> where R: std::io::Read, M: prost::Message, { Decode { reader: buf, buf: vec!(), marker: std::marker::PhantomData, } } /// Streaming encoder for the chunked format. /// /// It implements the `Read` trait, lazily polling the underlying chunk iterator /// as more bytes is needed. /// /// Instances of this type can be constructed using the [`encode`] function. /// /// [`encode`]: fn.encode.html pub struct Encode<I> { iter: I, cur: Cursor<Vec<u8>>, } impl<I, M> Encode<I> where I: Iterator<Item = M>, M: prost::Message, { /// Checks whether all the data from the underlying cursor has been read. fn is_empty(&self) -> bool { self.cur.position() == self.cur.get_ref().len() as u64 } /// Pulls another blob of data from the underlying iterator. fn pull(&mut self) -> std::io::Result<()> { use byteorder::WriteBytesExt as _; let msg = match self.iter.next() { Some(msg) => msg, None => return Ok(()), }; self.cur.get_mut().clear(); self.cur.set_position(0); self.cur.write_u64::<BigEndian>(msg.encoded_len() as u64)?; msg.encode(&mut self.cur.get_mut())?; self.cur.set_position(0); Ok(()) } } impl<I, M> std::io::Read for Encode<I> where I: Iterator<Item = M>, M: prost::Message, { fn read(&mut self, buf: &mut [u8]) -> std::io::Result<usize> { if self.is_empty() { self.pull()?; } self.cur.read(buf) } } /// Streaming decoder for the chunked format. /// /// It implements the `Iterator` trait yielding chunks of decoded blobs, lazily /// decoding data from the underlying buffer. /// /// Instances of this type can be constructed using the [`decode`] function. /// /// [`decode`]: fn.decode.html pub struct Decode<R, M> { reader: R, buf: Vec<u8>, marker: std::marker::PhantomData<M>, } impl<R: std::io::Read, M> Decode<R, M> { /// Reads a size tag from the underlying buffer. /// /// It will return `None` if the is no more data in the buffer. fn read_len(&mut self) -> std::io::Result<Option<usize>> { use byteorder::ReadBytesExt as _; // `read` might not always read all 8 bytes. On the other hand, we also // cannot use just `read_exact` because the stream might have ended // already. Hence, we combine the two. First we attempt to read some // bytes with `read`: it should either return 0 (indicating end of the // stream), 8 (indicating that we have filled the whole buffer fully) // or something in between. In the last case, we use `read_exact to get // the remaining bytes (which should be non-zero now). let mut buf = [0; 8]; match self.reader.read(&mut buf[..])? { 8 => (), 0 => return Ok(None), len => self.reader.read_exact(&mut buf[len..])?, } let len = (&buf[..]).read_u64::<BigEndian>()? as usize; Ok(Some(len)) } } impl<R, M> Iterator for Decode<R, M> where R: std::io::Read, M: prost::Message + Default, { type Item = std::io::Result<M>; fn next(&mut self) -> Option<std::io::Result<M>> { let len = match self.read_len() { Ok(Some(len)) => len, Ok(None) => return None, Err(error) => return Some(Err(error)), }; self.buf.resize(len, u8::default()); match self.reader.read_exact(&mut self.buf[..]) { Ok(()) => (), Err(error) => return Some(Err(error)), } let msg = match M::decode(&self.buf[..]) { Ok(msg) => msg, Err(error) => return Some(Err(error.into())), }; Some(Ok(msg)) } } #[cfg(test)] pub mod tests { use super::*; #[test] pub fn test_encode_empty_iter() { use std::io::Read as _; let mut stream = encode(std::iter::empty::<()>()); let mut output = vec!(); stream.read_to_end(&mut output).unwrap(); assert!(output.is_empty()); } #[test] pub fn test_decode_empty_buf() { let buf: &[u8] = b""; let mut iter = decode::<_, ()>(buf); assert!(iter.next().is_none()); } #[test] pub fn test_decode_incorrect_size_tag() { let buf: &[u8] = b"\x12\x34\x56"; let mut iter = decode::<_, ()>(buf); let error = iter.next().unwrap().unwrap_err(); assert_eq!(error.kind(), std::io::ErrorKind::UnexpectedEof); } #[test] pub fn test_decode_zero_size_tag() { let buf: &[u8] = b"\x00\x00\x00\x00\x00\x00\x00\x00"; let mut iter = decode::<_, ()>(buf); assert!(iter.next().is_none()); } #[test] pub fn test_decode_partial_size_tag() { // A simple reader that yields a 0-valued size tag byte by byte. struct Reader(u8); impl std::io::Read for Reader { fn read(&mut self, buf: &mut [u8]) -> std::io::Result<usize> { if self.0 > 8 { Ok(0) } else { buf[0] = 0; self.0 += 1; Ok(1) } } } let mut iter = decode::<_, ()>(Reader(0)); assert!(iter.next().is_none()); } #[test] pub fn test_decode_short_input() { let buf: &[u8] = b"\x00\x00\x00\x00\x00\x00\x00\x42foo"; let mut iter = decode::<_, ()>(buf); let error = iter.next().unwrap().unwrap_err(); assert_eq!(error.kind(), std::io::ErrorKind::UnexpectedEof); } #[test] pub fn test_encode_and_decode_single_message() { let mut iter = decode(encode(std::iter::once(String::from("foo")))) .map(Result::unwrap); assert_eq!(iter.next(), Some(String::from("foo"))); assert_eq!(iter.next(), None); } #[test] pub fn test_encode_and_decode_single_unit_message() { let mut iter = decode(encode(std::iter::once(()))) .map(Result::unwrap); assert_eq!(iter.next(), Some(())); assert_eq!(iter.next(), None); } #[test] pub fn test_encode_and_decode_multiple_messages() { let msgs = vec! { b"foo".to_vec(), b"bar".to_vec(), b"baz".to_vec(), }; let mut iter = decode(encode(msgs.into_iter())) .map(Result::unwrap); assert_eq!(iter.next(), Some(b"foo".to_vec())); assert_eq!(iter.next(), Some(b"bar".to_vec())); assert_eq!(iter.next(), Some(b"baz".to_vec())); assert_eq!(iter.next(), None); } #[test] pub fn test_encode_and_decode_multiple_unit_messages() { let msgs = vec!((), (), ()); let mut iter = decode(encode(msgs.into_iter())) .map(Result::unwrap); assert_eq!(iter.next(), Some(())); assert_eq!(iter.next(), Some(())); assert_eq!(iter.next(), Some(())); assert_eq!(iter.next(), None); } } Fix tests for decoding chunked size tag. // Copyright 2020 Google LLC // // Use of this source code is governed by an MIT-style license that can be found // in the LICENSE file or at https://opensource.org/licenses/MIT. //! Utilities for working with the chunked format. //! //! The chunked file format is extremely simple serialization procedure where //! each chunk of raw bytes is prepended with the size of the chunk (as 64-bit //! unsigned big-endian integer). //! //! Its primary application is streaming encoding and decoding of blobs of data //! in formats that do not support or are inefficient for these purposes (such //! as serialized Protocol Buffer messages). use std::io::Cursor; use byteorder::BigEndian; /// Encodes a given iterator over binary blobs into the chunked format. /// /// This is a streaming encoder and performs the encoding in a lazy way. It /// should compose well with other streaming encoders (e.g. these offered by /// the [`flate2`] crate). /// /// [`flate2`]: https://crates.io/crates/flate2 /// /// # Examples /// /// ```no_run /// use std::fs::File; /// /// let data = [b"foo", b"bar", b"baz"]; /// /// let mut stream = rrg::chunked::encode(data.iter().map(|blob| &blob[..])); /// let mut file = File::create("output.chunked").unwrap(); /// std::io::copy(&mut stream, &mut file).unwrap(); /// ``` pub fn encode<I, M>(iter: I) -> Encode<I> where I: Iterator<Item = M>, M: prost::Message, { Encode { iter: iter, cur: Cursor::new(vec!()), } } /// Decodes a buffer in the chunked format into binary blobs. /// /// This is a streaming decoder and performs the decoding in a lazy way. It /// should compose well with other streaming decoders (e.g. these offered by the /// [`flate2`] crate). /// /// [`flate2`]: https://crates.io/crates/flate2 /// /// # Examples /// /// ```no_run /// use std::fs::File; /// /// let file = File::open("input.chunked").unwrap(); /// for (idx, blob) in rrg::chunked::decode(file).enumerate() { /// println!("blob #{}: {:?}", idx, blob.unwrap()); /// } /// ``` pub fn decode<R, M>(buf: R) -> Decode<R, M> where R: std::io::Read, M: prost::Message, { Decode { reader: buf, buf: vec!(), marker: std::marker::PhantomData, } } /// Streaming encoder for the chunked format. /// /// It implements the `Read` trait, lazily polling the underlying chunk iterator /// as more bytes is needed. /// /// Instances of this type can be constructed using the [`encode`] function. /// /// [`encode`]: fn.encode.html pub struct Encode<I> { iter: I, cur: Cursor<Vec<u8>>, } impl<I, M> Encode<I> where I: Iterator<Item = M>, M: prost::Message, { /// Checks whether all the data from the underlying cursor has been read. fn is_empty(&self) -> bool { self.cur.position() == self.cur.get_ref().len() as u64 } /// Pulls another blob of data from the underlying iterator. fn pull(&mut self) -> std::io::Result<()> { use byteorder::WriteBytesExt as _; let msg = match self.iter.next() { Some(msg) => msg, None => return Ok(()), }; self.cur.get_mut().clear(); self.cur.set_position(0); self.cur.write_u64::<BigEndian>(msg.encoded_len() as u64)?; msg.encode(&mut self.cur.get_mut())?; self.cur.set_position(0); Ok(()) } } impl<I, M> std::io::Read for Encode<I> where I: Iterator<Item = M>, M: prost::Message, { fn read(&mut self, buf: &mut [u8]) -> std::io::Result<usize> { if self.is_empty() { self.pull()?; } self.cur.read(buf) } } /// Streaming decoder for the chunked format. /// /// It implements the `Iterator` trait yielding chunks of decoded blobs, lazily /// decoding data from the underlying buffer. /// /// Instances of this type can be constructed using the [`decode`] function. /// /// [`decode`]: fn.decode.html pub struct Decode<R, M> { reader: R, buf: Vec<u8>, marker: std::marker::PhantomData<M>, } impl<R: std::io::Read, M> Decode<R, M> { /// Reads a size tag from the underlying buffer. /// /// It will return `None` if the is no more data in the buffer. fn read_len(&mut self) -> std::io::Result<Option<usize>> { use byteorder::ReadBytesExt as _; // `read` might not always read all 8 bytes. On the other hand, we also // cannot use just `read_exact` because the stream might have ended // already. Hence, we combine the two. First we attempt to read some // bytes with `read`: it should either return 0 (indicating end of the // stream), 8 (indicating that we have filled the whole buffer fully) // or something in between. In the last case, we use `read_exact to get // the remaining bytes (which should be non-zero now). let mut buf = [0; 8]; match self.reader.read(&mut buf[..])? { 8 => (), 0 => return Ok(None), len => self.reader.read_exact(&mut buf[len..])?, } let len = (&buf[..]).read_u64::<BigEndian>()? as usize; Ok(Some(len)) } } impl<R, M> Iterator for Decode<R, M> where R: std::io::Read, M: prost::Message + Default, { type Item = std::io::Result<M>; fn next(&mut self) -> Option<std::io::Result<M>> { let len = match self.read_len() { Ok(Some(len)) => len, Ok(None) => return None, Err(error) => return Some(Err(error)), }; self.buf.resize(len, u8::default()); match self.reader.read_exact(&mut self.buf[..]) { Ok(()) => (), Err(error) => return Some(Err(error)), } let msg = match M::decode(&self.buf[..]) { Ok(msg) => msg, Err(error) => return Some(Err(error.into())), }; Some(Ok(msg)) } } #[cfg(test)] pub mod tests { use super::*; #[test] pub fn test_encode_empty_iter() { use std::io::Read as _; let mut stream = encode(std::iter::empty::<()>()); let mut output = vec!(); stream.read_to_end(&mut output).unwrap(); assert!(output.is_empty()); } #[test] pub fn test_decode_empty_buf() { let buf: &[u8] = b""; let mut iter = decode::<_, ()>(buf); assert!(iter.next().is_none()); } #[test] pub fn test_decode_incorrect_size_tag() { let buf: &[u8] = b"\x12\x34\x56"; let mut iter = decode::<_, ()>(buf); let error = iter.next().unwrap().unwrap_err(); assert_eq!(error.kind(), std::io::ErrorKind::UnexpectedEof); } #[test] pub fn test_decode_zero_size_tag() { let buf: &[u8] = b"\x00\x00\x00\x00\x00\x00\x00\x00"; let mut iter = decode(buf).map(Result::unwrap); assert_eq!(iter.next(), Some(())); assert_eq!(iter.next(), None); } #[test] pub fn test_decode_partial_size_tag() { // A simple reader that yields a 0-valued size tag byte by byte. struct Reader(u8); impl std::io::Read for Reader { fn read(&mut self, buf: &mut [u8]) -> std::io::Result<usize> { dbg!(self.0); if self.0 == 8 { Ok(0) } else { buf[0] = 0; self.0 += 1; Ok(1) } } } let mut iter = decode(Reader(0)).map(Result::unwrap); assert_eq!(iter.next(), Some(())); assert_eq!(iter.next(), None); } #[test] pub fn test_decode_short_input() { let buf: &[u8] = b"\x00\x00\x00\x00\x00\x00\x00\x42foo"; let mut iter = decode::<_, ()>(buf); let error = iter.next().unwrap().unwrap_err(); assert_eq!(error.kind(), std::io::ErrorKind::UnexpectedEof); } #[test] pub fn test_encode_and_decode_single_message() { let mut iter = decode(encode(std::iter::once(String::from("foo")))) .map(Result::unwrap); assert_eq!(iter.next(), Some(String::from("foo"))); assert_eq!(iter.next(), None); } #[test] pub fn test_encode_and_decode_single_unit_message() { let mut iter = decode(encode(std::iter::once(()))) .map(Result::unwrap); assert_eq!(iter.next(), Some(())); assert_eq!(iter.next(), None); } #[test] pub fn test_encode_and_decode_multiple_messages() { let msgs = vec! { b"foo".to_vec(), b"bar".to_vec(), b"baz".to_vec(), }; let mut iter = decode(encode(msgs.into_iter())) .map(Result::unwrap); assert_eq!(iter.next(), Some(b"foo".to_vec())); assert_eq!(iter.next(), Some(b"bar".to_vec())); assert_eq!(iter.next(), Some(b"baz".to_vec())); assert_eq!(iter.next(), None); } #[test] pub fn test_encode_and_decode_multiple_unit_messages() { let msgs = vec!((), (), ()); let mut iter = decode(encode(msgs.into_iter())) .map(Result::unwrap); assert_eq!(iter.next(), Some(())); assert_eq!(iter.next(), Some(())); assert_eq!(iter.next(), Some(())); assert_eq!(iter.next(), None); } }
extern crate semver; use std::collections::HashMap; use std::io::net::ip::IpAddr; use std::hash::Hash; use std::any::Any; #[deriving(PartialEq, Show, Clone)] pub enum Scheme { Http, Https } #[deriving(PartialEq, Show, Clone)] pub enum Host<'a> { HostName(&'a str), HostIp(IpAddr) } #[deriving(PartialEq, Hash, Eq, Show, Clone)] pub enum Method { Get, Post, Put, Delete, Head, Connect, Options, Trace, // RFC-5789 Patch, Purge, // WebDAV, Subversion, UPNP Other(&'static str) } /// A Dictionary for extensions provided by the server or middleware pub type Extensions = HashMap<&'static str, Box<Any>>; pub trait Request { /// The version of HTTP being used fn http_version(&self) -> semver::Version; /// The version of the conduit spec being used fn conduit_version(&self) -> semver::Version; /// The request method, such as GET, POST, PUT, DELETE or PATCH fn method(&self) -> Method; /// The scheme part of the request URL fn scheme(&self) -> Scheme; /// The host part of the requested URL fn host<'a>(&'a self) -> Host<'a>; /// The initial part of the request URL's path that corresponds /// to a virtual root. This allows an application to have a /// virtual location that consumes part of the path. fn virtual_root<'a>(&'a self) -> Option<&'a str>; /// The remainder of the path. fn path<'a>(&'a self) -> &'a str; /// The portion of the request URL that follows the "?" fn query_string<'a>(&'a self) -> Option<&'a str>; /// The remote IP address of the client or the last proxy that /// sent the request. fn remote_ip(&self) -> IpAddr; /// The byte-size of the body, if any fn content_length(&self) -> Option<uint>; /// The request's headers, as conduit::Headers. fn headers<'a>(&'a self) -> &'a Headers; /// A Reader for the body of the request fn body<'a>(&'a mut self) -> &'a mut Reader; /// A readable map of extensions fn extensions<'a>(&'a self) -> &'a Extensions; /// A mutable map of extensions fn mut_extensions<'a>(&'a mut self) -> &'a mut Extensions; } pub type HeaderEntries<'a> = Box<Iterator<(&'a str, Vec<&'a str>)>>; pub trait Headers { /// Find the value of a given header. Multi-line headers are represented /// as an array. fn find<'a>(&'a self, key: &str) -> Option<Vec<&'a str>>; /// Returns true if a particular header exists fn has(&self, key: &str) -> bool; /// Iterate over all of the available headers. fn iter<'a>(&'a self) -> HeaderEntries<'a>; } pub struct Response { /// The status code as a tuple of the return code and status string pub status: (uint, &'static str), /// A Map of the headers pub headers: HashMap<String, Vec<String>>, /// A Writer for body of the response pub body: Box<Reader + Send> } /// A Handler takes a request and returns a response or an error. /// By default, a bare function implements `Handler`. pub trait Handler<E> { fn call(&self, request: &mut Request) -> Result<Response, E>; } impl<E> Handler<E> for fn(&mut Request) -> Result<Response, E> { fn call(&self, request: &mut Request) -> Result<Response, E> { (*self)(request) } } Restrict errors to Showable extern crate semver; use std::collections::HashMap; use std::io::net::ip::IpAddr; use std::hash::Hash; use std::any::Any; use std::fmt::Show; #[deriving(PartialEq, Show, Clone)] pub enum Scheme { Http, Https } #[deriving(PartialEq, Show, Clone)] pub enum Host<'a> { HostName(&'a str), HostIp(IpAddr) } #[deriving(PartialEq, Hash, Eq, Show, Clone)] pub enum Method { Get, Post, Put, Delete, Head, Connect, Options, Trace, // RFC-5789 Patch, Purge, // WebDAV, Subversion, UPNP Other(&'static str) } /// A Dictionary for extensions provided by the server or middleware pub type Extensions = HashMap<&'static str, Box<Any>>; pub trait Request { /// The version of HTTP being used fn http_version(&self) -> semver::Version; /// The version of the conduit spec being used fn conduit_version(&self) -> semver::Version; /// The request method, such as GET, POST, PUT, DELETE or PATCH fn method(&self) -> Method; /// The scheme part of the request URL fn scheme(&self) -> Scheme; /// The host part of the requested URL fn host<'a>(&'a self) -> Host<'a>; /// The initial part of the request URL's path that corresponds /// to a virtual root. This allows an application to have a /// virtual location that consumes part of the path. fn virtual_root<'a>(&'a self) -> Option<&'a str>; /// The remainder of the path. fn path<'a>(&'a self) -> &'a str; /// The portion of the request URL that follows the "?" fn query_string<'a>(&'a self) -> Option<&'a str>; /// The remote IP address of the client or the last proxy that /// sent the request. fn remote_ip(&self) -> IpAddr; /// The byte-size of the body, if any fn content_length(&self) -> Option<uint>; /// The request's headers, as conduit::Headers. fn headers<'a>(&'a self) -> &'a Headers; /// A Reader for the body of the request fn body<'a>(&'a mut self) -> &'a mut Reader; /// A readable map of extensions fn extensions<'a>(&'a self) -> &'a Extensions; /// A mutable map of extensions fn mut_extensions<'a>(&'a mut self) -> &'a mut Extensions; } pub type HeaderEntries<'a> = Box<Iterator<(&'a str, Vec<&'a str>)>>; pub trait Headers { /// Find the value of a given header. Multi-line headers are represented /// as an array. fn find<'a>(&'a self, key: &str) -> Option<Vec<&'a str>>; /// Returns true if a particular header exists fn has(&self, key: &str) -> bool; /// Iterate over all of the available headers. fn iter<'a>(&'a self) -> HeaderEntries<'a>; } pub struct Response { /// The status code as a tuple of the return code and status string pub status: (uint, &'static str), /// A Map of the headers pub headers: HashMap<String, Vec<String>>, /// A Writer for body of the response pub body: Box<Reader + Send> } /// A Handler takes a request and returns a response or an error. /// By default, a bare function implements `Handler`. pub trait Handler { fn call(&self, request: &mut Request) -> Result<Response, Box<Show>>; } impl<T: 'static + Show> Handler for fn(&mut Request) -> Result<Response, T> { fn call(&self, request: &mut Request) -> Result<Response, Box<Show>> { { (*self)(request) }.map_err(|e| box e as Box<Show>) } }
use std::collections::HashMap; use ast::Expr; use val::Val; macro_rules! try_number { ($e:expr) => (match $e { Ok($crate::val::Val::Number(n)) => { n }, _ => { return Err(EvalError::EvalError) } }) } #[derive(Debug, PartialEq)] enum EvalError{ EvalError, IncorrectSpecialForm, IncorrectNumberOfArguments, IncorrectTypeOfArgument } type EvalResult = Result<Val, EvalError>; pub struct Context { env: HashMap<String, Val>, } impl Context { pub fn new() -> Context { let mut ctx = Context { env: HashMap::new(), }; ctx.env.insert("nil".to_string(), v_list![]); ctx.env.insert("true".to_string(), v_bool!(true)); ctx.env.insert("false".to_string(), v_bool!(false)); ctx } pub fn eval(&mut self, s: &Expr) -> EvalResult { match *s { Expr::Number(n) => { Ok(Val::Number(n)) }, Expr::Symbol(ref name) => { if let Some(v) = self.env.get(name) { Ok(v.clone()) } else { Err(EvalError::EvalError) } }, Expr::String(ref s) => { Ok(Val::String(s.clone())) }, Expr::List(ref l) => { if let Expr::Symbol(ref n) = l[0] { match &n[..] { "def" => { self.eval_def(s) }, "fn" => { self.eval_fn(s) }, "+" => { self.eval_plus(s) }, "-" => { self.eval_minus(s) }, "/" => { self.eval_div(s) }, "*" => { self.eval_mul(s) }, "<" => { self.eval_lt(s) }, ">" => { self.eval_gt(s) }, "=" => { self.eval_eq(s) }, _ => { self.eval_call(s) }, } } else { Err(EvalError::EvalError) } } } } fn eval_eq(&mut self, s: &Expr) -> EvalResult { match *s { Expr::List(ref l) => { if let Expr::Symbol(ref n) = l[0] { match &n[..] { "=" => { if l.len() > 2 { let mut a = try_number!(self.eval(&l[1])); for e in &l[2..] { let n = try_number!(self.eval(e)); if a == n { a = n } else { return Ok(Val::Bool(false)) } } Ok(Val::Bool(true)) } else { Err(EvalError::EvalError) } }, _ => { Err(EvalError::EvalError) } } } else { Err(EvalError::EvalError) } }, _ => { Err(EvalError::EvalError) } } } fn eval_gt(&mut self, s: &Expr) -> EvalResult { match *s { Expr::List(ref l) => { if let Expr::Symbol(ref n) = l[0] { match &n[..] { ">" => { if l.len() > 2 { let mut a = try_number!(self.eval(&l[1])); for e in &l[2..] { let n = try_number!(self.eval(e)); if a > n { a = n } else { return Ok(Val::Bool(false)) } } Ok(Val::Bool(true)) } else { Err(EvalError::EvalError) } }, _ => { Err(EvalError::EvalError) } } } else { Err(EvalError::EvalError) } }, _ => { Err(EvalError::EvalError) } } } fn eval_lt(&mut self, s: &Expr) -> EvalResult { match *s { Expr::List(ref l) => { if let Expr::Symbol(ref n) = l[0] { match &n[..] { "<" => { if l.len() > 2 { let mut a = try_number!(self.eval(&l[1])); for e in &l[2..] { let n = try_number!(self.eval(e)); if a < n { a = n } else { return Ok(Val::Bool(false)) } } Ok(Val::Bool(true)) } else { Err(EvalError::EvalError) } }, _ => { Err(EvalError::EvalError) } } } else { Err(EvalError::EvalError) } }, _ => { Err(EvalError::EvalError) } } } fn eval_call(&mut self, s: &Expr) -> EvalResult { match *s { Expr::List(ref l) => { if let Expr::Symbol(ref n) = l[0] { let fun = match self.env.get(n) { Some(v) => { v.clone() }, _ => { return Err(EvalError::EvalError) } }; let mut e_params = vec![]; for e in &l[1..] { e_params.push(try!(self.eval(e))) } let mut ctx = Context::new(); match fun { Val::Fn { ref params, ref body } => { for (p, e) in params.iter().zip(e_params.iter()) { match p { &Expr::Symbol(ref name) => { ctx.env.insert(name.clone(), e.clone()); }, _ => { return Err(EvalError::EvalError) } } } let mut result = v_list![]; for e in body { result = try!(ctx.eval(e)) } Ok(result) }, _ => { Err(EvalError::EvalError) } } } else { Err(EvalError::EvalError) } }, _ => { Err(EvalError::EvalError) } } } fn eval_def(&mut self, s: &Expr) -> EvalResult { if let Expr::List(ref l) = *s { if let Expr::Symbol(ref n) = l[0] { match &n[..] { "def" => { if l.len() == 3 { if let Expr::Symbol(ref n) = l[1] { self.eval(&l[2]).and_then(|v| { self.env.insert(n.clone(), v.clone()); Ok(v) }) } else { Err(EvalError::EvalError) } } else { Err(EvalError::IncorrectNumberOfArguments) } }, _ => { Err(EvalError::IncorrectSpecialForm) } } } else { Err(EvalError::EvalError) } } else { Err(EvalError::EvalError) } } fn eval_fn(&mut self, s: &Expr) -> EvalResult { if let Expr::List(ref l) = *s { if let Expr::Symbol(ref n) = l[0] { match &n[..] { "fn" => { if l.len() >= 3 { if let Expr::List(ref params) = l[1] { Ok(Val::Fn { params: params.iter().cloned().collect::<Vec<Expr>>(), body: l.iter().skip(2).cloned().collect::<Vec<Expr>>() }) } else { Err(EvalError::EvalError) } } else { Err(EvalError::IncorrectNumberOfArguments) } }, _ => { Err(EvalError::IncorrectSpecialForm) } } } else { Err(EvalError::EvalError) } } else { Err(EvalError::EvalError) } } fn eval_plus(&mut self, s: &Expr) -> EvalResult { if let Expr::List(ref l) = *s { if let Expr::Symbol(ref n) = l[0] { match &n[..] { "+" => { if l.len() > 1 { let mut a = 0_f64; for i in &l[1..] { a += try_number!(self.eval(i)); } Ok(Val::Number(a)) } else { Err(EvalError::IncorrectNumberOfArguments) } }, _ => { Err(EvalError::IncorrectSpecialForm) } } } else { Err(EvalError::EvalError) } } else { Err(EvalError::EvalError) } } fn eval_minus(&mut self, s: &Expr) -> EvalResult { if let Expr::List(ref l) = *s { if let Expr::Symbol(ref n) = l[0] { match &n[..] { "-" => { if l.len() > 1 { if let Expr::Number(n) = l[1] { let mut a = n; for i in &l[2..] { a -= try_number!(self.eval(i)); } Ok(Val::Number(a)) } else { Err(EvalError::IncorrectTypeOfArgument) } } else { Err(EvalError::IncorrectNumberOfArguments) } }, _ => { Err(EvalError::IncorrectSpecialForm) } } } else { Err(EvalError::EvalError) } } else { Err(EvalError::EvalError) } } fn eval_div(&mut self, s: &Expr) -> EvalResult { if let Expr::List(ref l) = *s { if let Expr::Symbol(ref n) = l[0] { match &n[..] { "/" => { if l.len() > 1 { if let Expr::Number(n) = l[1] { let mut a = n; for i in &l[2..] { a /= try_number!(self.eval(i)) } Ok(Val::Number(a)) } else { Err(EvalError::IncorrectTypeOfArgument) } } else { Err(EvalError::IncorrectNumberOfArguments) } }, _ => { Err(EvalError::IncorrectSpecialForm) } } } else { Err(EvalError::EvalError) } } else { Err(EvalError::EvalError) } } fn eval_mul(&mut self, s: &Expr) -> EvalResult { if let Expr::List(ref l) = *s { if let Expr::Symbol(ref n) = l[0] { match &n[..] { "*" => { if l.len() > 1 { let mut a = 1_f64; for i in &l[1..] { a *= try_number!(self.eval(i)); } Ok(Val::Number(a)) } else { Err(EvalError::IncorrectNumberOfArguments) } }, _ => { Err(EvalError::IncorrectSpecialForm) } } } else { Err(EvalError::EvalError) } } else { Err(EvalError::EvalError) } } } #[cfg(test)] mod tests { use super::Context; use super::EvalError::EvalError; #[test] fn test_eval_number_to_itself() { let num = 10_f64; let mut ctx = Context::new(); let expected_result = v_number!(num); let actual_result = ctx.eval(&e_number!(num)); assert_eq!(expected_result, actual_result.ok().unwrap()); } #[test] fn test_eval_string_to_itself() { let s = "rust is awesome"; let mut ctx = Context::new(); let expected_result = v_string!(s); let actual_result = ctx.eval(&e_string!(s)); assert_eq!(expected_result, actual_result.ok().unwrap()); } #[test] fn test_eval_undefined_symbol_to_error() { let mut ctx = Context::new(); let expected_result = EvalError; let actual_result = ctx.eval(&e_symbol!("a")); assert_eq!(expected_result, actual_result.err().unwrap()); } #[test] fn test_eval_true_to_matching_bool() { let mut ctx = Context::new(); let expected_result = v_bool!(true); let actual_result = ctx.eval(&e_symbol!("true")); assert_eq!(expected_result, actual_result.ok().unwrap()); } #[test] fn test_eval_false_to_matching_bool() { let mut ctx = Context::new(); let expected_result = v_bool!(false); let actual_result = ctx.eval(&e_symbol!("false")); assert_eq!(expected_result, actual_result.ok().unwrap()); } #[test] fn test_eval_nil_to_empty_list() { let mut ctx = Context::new(); let expected_result = v_list![]; let actual_result = ctx.eval(&e_symbol!("nil")); assert_eq!(expected_result, actual_result.ok().unwrap()); } #[test] fn test_eval_def_special_form() { let num = 1_f64; let mut ctx = Context::new(); let expected_result = v_number!(num); let actual_input = e_list![e_symbol!("def"), e_symbol!("a"), e_number!(num)]; let actual_result = ctx.eval(&actual_input); assert_eq!(expected_result, actual_result.ok().unwrap()); } #[test] fn test_eval_fn_special_form_and_call_define_function() { let mut ctx = Context::new(); let fun = e_list![e_symbol!("fn"), e_list![e_symbol!("a"), e_symbol!("b")], e_list![e_symbol!("+"), e_symbol!("a"), e_symbol!("b")]]; ctx.eval(&e_list![e_symbol!("def"), e_symbol!("add"), fun]).ok().unwrap(); let expected_result = v_number!(3_f64); let actual_result = ctx.eval(&e_list![e_symbol!("add"), e_number!(1_f64), e_number!(2_f64)]); assert_eq!(expected_result, actual_result.ok().unwrap()); } #[test] fn test_eval_plus_builtin_fn() { let mut ctx = Context::new(); ctx.eval(&e_list![e_symbol!("def"), e_symbol!("a"), e_number!(1_f64)]).ok().unwrap(); ctx.eval(&e_list![e_symbol!("def"), e_symbol!("b"), e_number!(2_f64)]).ok().unwrap(); let actual_input = e_list![e_symbol!("+"), e_list![e_symbol!("+"), e_symbol!("a"), e_symbol!("b")], e_number!(3_f64)]; assert_eq!(v_number!(6_f64), ctx.eval(&actual_input).ok().unwrap()); } #[test] fn test_eval_minus_builtin_fn() { let mut ctx = Context::new(); let actual_input = e_list![e_symbol!("-"), e_number!(3_f64), e_number!(2_f64)]; let actual_result = ctx.eval(&actual_input); let expected_result = v_number!(1_f64); assert_eq!(expected_result, actual_result.ok().unwrap()); } #[test] fn test_eval_div_builtin_fn() { let mut ctx = Context::new(); let actual_input = e_list![e_symbol!("/"), e_number!(3_f64), e_number!(2_f64)]; let actual_result = ctx.eval(&actual_input); let expected_result = v_number!(1.5); assert_eq!(expected_result, actual_result.ok().unwrap()); } #[test] fn test_eval_mul_builtin_fn() { let mut ctx = Context::new(); let actual_input = e_list![e_symbol!("*"), e_number!(3.5), e_number!(2_f64)]; let actual_result = ctx.eval(&actual_input); let expected_result = v_number!(7_f64); assert_eq!(expected_result, actual_result.ok().unwrap()); } #[test] fn test_eval_lt_builtin_fn_positive_case() { let mut ctx = Context::new(); let actual_input = e_list![e_symbol!("<"), e_number!(1_f64), e_number!(2_f64), e_number!(3_f64)]; let actual_result = ctx.eval(&actual_input); let expected_result = v_bool!(true); assert_eq!(expected_result, actual_result.ok().unwrap()); } #[test] fn test_eval_lt_builtin_fn_negative_case() { let mut ctx = Context::new(); let actual_input = e_list![e_symbol!("<"), e_number!(3.5), e_number!(20_f64), e_number!(1_f64)]; let actual_result = ctx.eval(&actual_input); let expected_result = v_bool!(false); assert_eq!(expected_result, actual_result.ok().unwrap()); } #[test] fn test_eval_gt_builtin_fn_positive_case() { let mut ctx = Context::new(); ctx.eval(&e_list![e_symbol!("def"), e_symbol!("a"), e_number!(3_f64)]).ok().unwrap(); let actual_input = e_list![e_symbol!(">"), e_symbol!("a"), e_number!(2_f64), e_number!(1_f64)]; let actual_result = ctx.eval(&actual_input); let expected_result = v_bool!(true); assert_eq!(expected_result, actual_result.ok().unwrap()); } #[test] fn test_eval_gt_builtin_fn_negative_case() { let mut ctx = Context::new(); ctx.eval(&e_list![e_symbol!("def"), e_symbol!("a"), e_number!(20_f64)]).ok().unwrap(); let actual_input = e_list![e_symbol!(">"), e_number!(3.5), e_symbol!("a"), e_number!(1_f64)]; let actual_result = ctx.eval(&actual_input); let expected_result = v_bool!(false); assert_eq!(expected_result, actual_result.ok().unwrap()); } #[test] fn test_eval_eq_builtin_fn_positive_case() { let mut ctx = Context::new(); ctx.eval(&e_list![e_symbol!("def"), e_symbol!("a"), e_number!(3_f64)]).ok().unwrap(); let actual_input = e_list![e_symbol!("="), e_symbol!("a"), e_number!(3_f64), e_number!(3_f64)]; let actual_result = ctx.eval(&actual_input); let expected_result = v_bool!(true); assert_eq!(expected_result, actual_result.ok().unwrap()); } #[test] fn test_eval_eq_builtin_fn_negative_case() { let mut ctx = Context::new(); ctx.eval(&e_list![e_symbol!["def"], e_symbol!["a"], e_number![1_f64]]).ok().unwrap(); let actual_input = e_list![e_symbol!["="], e_number![3.5], e_number![20_f64], e_symbol!["a"]]; let actual_result = ctx.eval(&actual_input); let expected_result = v_bool![false]; assert_eq!(expected_result, actual_result.ok().unwrap()); } } Remove redundant checks, prepare to detailed error reporting use std::collections::HashMap; use ast::Expr; use val::Val; macro_rules! try_number { ($e:expr) => (match $e { Ok($crate::val::Val::Number(n)) => { n }, _ => { return Err(EvalError(EvalErrorCode::UnknownError)) } }) } #[derive(Debug, PartialEq)] enum EvalErrorCode { UnknownError } #[derive(Debug, PartialEq)] struct EvalError(EvalErrorCode); type EvalResult = Result<Val, EvalError>; pub struct Context { env: HashMap<String, Val>, } impl Context { pub fn new() -> Context { let mut ctx = Context { env: HashMap::new(), }; ctx.insert("nil".to_string(), v_list![]); ctx.insert("true".to_string(), v_bool!(true)); ctx.insert("false".to_string(), v_bool!(false)); ctx } fn get(&self, s: &String) -> EvalResult { if let Some(v) = self.env.get(s) { Ok(v.clone()) } else { self.error(EvalErrorCode::UnknownError) } } fn insert(&mut self, s: String, v: Val) -> Option<Val> { self.env.insert(s, v) } pub fn eval(&mut self, s: &Expr) -> EvalResult { match *s { Expr::Number(n) => { Ok(Val::Number(n)) }, Expr::Symbol(ref name) => { self.get(name) }, Expr::String(ref s) => { Ok(Val::String(s.clone())) }, Expr::List(ref l) => { if let Expr::Symbol(ref n) = l[0] { match &n[..] { "def" => { self.eval_def(s) }, "fn" => { self.eval_fn(s) }, "+" => { self.eval_plus(s) }, "-" => { self.eval_minus(s) }, "*" => { self.eval_mul(s) }, "/" => { self.eval_div(s) }, "<" => { self.eval_lt(s) }, ">" => { self.eval_gt(s) }, "=" => { self.eval_eq(s) }, _ => { self.eval_call(s) }, } } else { self.error(EvalErrorCode::UnknownError) } } } } fn eval_def(&mut self, s: &Expr) -> EvalResult { if let Expr::List(ref l) = *s { if l.len() == 3 { if let Expr::Symbol(ref n) = l[1] { let v = try!(self.eval(&l[2])); self.insert(n.clone(), v.clone()); Ok(v) } else { self.error(EvalErrorCode::UnknownError) } } else { self.error(EvalErrorCode::UnknownError) } } else { self.error(EvalErrorCode::UnknownError) } } fn eval_fn(&mut self, s: &Expr) -> EvalResult { if let Expr::List(ref l) = *s { if l.len() >= 3 { if let Expr::List(ref params) = l[1] { Ok(Val::Fn { params: params.iter().cloned().collect::<Vec<Expr>>(), body: l.iter().skip(2).cloned().collect::<Vec<Expr>>() }) } else { self.error(EvalErrorCode::UnknownError) } } else { self.error(EvalErrorCode::UnknownError) } } else { self.error(EvalErrorCode::UnknownError) } } fn eval_plus(&mut self, s: &Expr) -> EvalResult { if let Expr::List(ref l) = *s { if l.len() >= 3 { let mut a = 0_f64; for i in &l[1..] { a += try_number!(self.eval(i)); } Ok(Val::Number(a)) } else { self.error(EvalErrorCode::UnknownError) } } else { self.error(EvalErrorCode::UnknownError) } } fn eval_minus(&mut self, s: &Expr) -> EvalResult { if let Expr::List(ref l) = *s { if l.len() >= 3 { if let Expr::Number(n) = l[1] { let mut a = n; for i in &l[2..] { a -= try_number!(self.eval(i)); } Ok(Val::Number(a)) } else { self.error(EvalErrorCode::UnknownError) } } else { self.error(EvalErrorCode::UnknownError) } } else { self.error(EvalErrorCode::UnknownError) } } fn eval_mul(&mut self, s: &Expr) -> EvalResult { if let Expr::List(ref l) = *s { if l.len() >= 3 { let mut a = 1_f64; for i in &l[1..] { a *= try_number!(self.eval(i)); } Ok(Val::Number(a)) } else { self.error(EvalErrorCode::UnknownError) } } else { self.error(EvalErrorCode::UnknownError) } } fn eval_div(&mut self, s: &Expr) -> EvalResult { if let Expr::List(ref l) = *s { if l.len() >= 3 { if let Expr::Number(n) = l[1] { let mut a = n; for i in &l[2..] { a /= try_number!(self.eval(i)) } Ok(Val::Number(a)) } else { self.error(EvalErrorCode::UnknownError) } } else { self.error(EvalErrorCode::UnknownError) } } else { self.error(EvalErrorCode::UnknownError) } } fn eval_lt(&mut self, s: &Expr) -> EvalResult { if let Expr::List(ref l) = *s { if l.len() >= 3 { let mut a = try_number!(self.eval(&l[1])); for e in &l[2..] { let n = try_number!(self.eval(e)); if a < n { a = n } else { return Ok(Val::Bool(false)) } } Ok(Val::Bool(true)) } else { self.error(EvalErrorCode::UnknownError) } } else { self.error(EvalErrorCode::UnknownError) } } fn eval_gt(&mut self, s: &Expr) -> EvalResult { if let Expr::List(ref l) = *s { if l.len() >= 3 { let mut a = try_number!(self.eval(&l[1])); for e in &l[2..] { let n = try_number!(self.eval(e)); if a > n { a = n } else { return Ok(Val::Bool(false)) } } Ok(Val::Bool(true)) } else { self.error(EvalErrorCode::UnknownError) } } else { self.error(EvalErrorCode::UnknownError) } } fn eval_eq(&mut self, s: &Expr) -> EvalResult { if let Expr::List(ref l) = *s { if l.len() >= 3 { let mut a = try_number!(self.eval(&l[1])); for e in &l[2..] { let n = try_number!(self.eval(e)); if a == n { a = n } else { return Ok(Val::Bool(false)) } } Ok(Val::Bool(true)) } else { self.error(EvalErrorCode::UnknownError) } } else { self.error(EvalErrorCode::UnknownError) } } fn eval_call(&mut self, s: &Expr) -> EvalResult { if let Expr::List(ref l) = *s { if let Expr::Symbol(ref n) = l[0] { if let Val::Fn { ref params, ref body } = try!(self.get(n)) { let mut v_params = vec![]; for e in &l[1..] { v_params.push(try!(self.eval(e))) } let mut ctx = Context::new(); for (p, e) in params.iter().zip(v_params.iter()) { if let Expr::Symbol(ref s) = *p { ctx.insert(s.clone(), e.clone()); } else { return self.error(EvalErrorCode::UnknownError) } } let mut result = v_list![]; for e in body { result = try!(ctx.eval(e)) } Ok(result) } else { self.error(EvalErrorCode::UnknownError) } } else { self.error(EvalErrorCode::UnknownError) } } else { self.error(EvalErrorCode::UnknownError) } } fn error(&self, ec: EvalErrorCode) -> EvalResult { Err(EvalError(ec)) } } #[cfg(test)] mod tests { use super::Context; use super::EvalError; use super::EvalErrorCode::UnknownError; #[test] fn test_eval_number_to_itself() { let num = 10_f64; let mut ctx = Context::new(); let expected_result = v_number!(num); let actual_result = ctx.eval(&e_number!(num)); assert_eq!(expected_result, actual_result.ok().unwrap()); } #[test] fn test_eval_string_to_itself() { let s = "rust is awesome"; let mut ctx = Context::new(); let expected_result = v_string!(s); let actual_result = ctx.eval(&e_string!(s)); assert_eq!(expected_result, actual_result.ok().unwrap()); } #[test] fn test_eval_undefined_symbol_to_error() { let mut ctx = Context::new(); let expected_result = EvalError(UnknownError); let actual_result = ctx.eval(&e_symbol!("a")); assert_eq!(expected_result, actual_result.err().unwrap()); } #[test] fn test_eval_true_to_matching_bool() { let mut ctx = Context::new(); let expected_result = v_bool!(true); let actual_result = ctx.eval(&e_symbol!("true")); assert_eq!(expected_result, actual_result.ok().unwrap()); } #[test] fn test_eval_false_to_matching_bool() { let mut ctx = Context::new(); let expected_result = v_bool!(false); let actual_result = ctx.eval(&e_symbol!("false")); assert_eq!(expected_result, actual_result.ok().unwrap()); } #[test] fn test_eval_nil_to_empty_list() { let mut ctx = Context::new(); let expected_result = v_list![]; let actual_result = ctx.eval(&e_symbol!("nil")); assert_eq!(expected_result, actual_result.ok().unwrap()); } #[test] fn test_eval_def_special_form() { let num = 1_f64; let mut ctx = Context::new(); let expected_result = v_number!(num); let actual_input = e_list![e_symbol!("def"), e_symbol!("a"), e_number!(num)]; let actual_result = ctx.eval(&actual_input); assert_eq!(expected_result, actual_result.ok().unwrap()); } #[test] fn test_eval_fn_special_form_and_call_define_function() { let mut ctx = Context::new(); let fun = e_list![e_symbol!("fn"), e_list![e_symbol!("a"), e_symbol!("b")], e_list![e_symbol!("+"), e_symbol!("a"), e_symbol!("b")]]; ctx.eval(&e_list![e_symbol!("def"), e_symbol!("add"), fun]).ok().unwrap(); let expected_result = v_number!(3_f64); let actual_result = ctx.eval(&e_list![e_symbol!("add"), e_number!(1_f64), e_number!(2_f64)]); assert_eq!(expected_result, actual_result.ok().unwrap()); } #[test] fn test_eval_plus_builtin_fn() { let mut ctx = Context::new(); ctx.eval(&e_list![e_symbol!("def"), e_symbol!("a"), e_number!(1_f64)]).ok().unwrap(); ctx.eval(&e_list![e_symbol!("def"), e_symbol!("b"), e_number!(2_f64)]).ok().unwrap(); let actual_input = e_list![e_symbol!("+"), e_list![e_symbol!("+"), e_symbol!("a"), e_symbol!("b")], e_number!(3_f64)]; assert_eq!(v_number!(6_f64), ctx.eval(&actual_input).ok().unwrap()); } #[test] fn test_eval_minus_builtin_fn() { let mut ctx = Context::new(); let actual_input = e_list![e_symbol!("-"), e_number!(3_f64), e_number!(2_f64)]; let actual_result = ctx.eval(&actual_input); let expected_result = v_number!(1_f64); assert_eq!(expected_result, actual_result.ok().unwrap()); } #[test] fn test_eval_div_builtin_fn() { let mut ctx = Context::new(); let actual_input = e_list![e_symbol!("/"), e_number!(3_f64), e_number!(2_f64)]; let actual_result = ctx.eval(&actual_input); let expected_result = v_number!(1.5); assert_eq!(expected_result, actual_result.ok().unwrap()); } #[test] fn test_eval_mul_builtin_fn() { let mut ctx = Context::new(); let actual_input = e_list![e_symbol!("*"), e_number!(3.5), e_number!(2_f64)]; let actual_result = ctx.eval(&actual_input); let expected_result = v_number!(7_f64); assert_eq!(expected_result, actual_result.ok().unwrap()); } #[test] fn test_eval_lt_builtin_fn_positive_case() { let mut ctx = Context::new(); let actual_input = e_list![e_symbol!("<"), e_number!(1_f64), e_number!(2_f64), e_number!(3_f64)]; let actual_result = ctx.eval(&actual_input); let expected_result = v_bool!(true); assert_eq!(expected_result, actual_result.ok().unwrap()); } #[test] fn test_eval_lt_builtin_fn_negative_case() { let mut ctx = Context::new(); let actual_input = e_list![e_symbol!("<"), e_number!(3.5), e_number!(20_f64), e_number!(1_f64)]; let actual_result = ctx.eval(&actual_input); let expected_result = v_bool!(false); assert_eq!(expected_result, actual_result.ok().unwrap()); } #[test] fn test_eval_gt_builtin_fn_positive_case() { let mut ctx = Context::new(); ctx.eval(&e_list![e_symbol!("def"), e_symbol!("a"), e_number!(3_f64)]).ok().unwrap(); let actual_input = e_list![e_symbol!(">"), e_symbol!("a"), e_number!(2_f64), e_number!(1_f64)]; let actual_result = ctx.eval(&actual_input); let expected_result = v_bool!(true); assert_eq!(expected_result, actual_result.ok().unwrap()); } #[test] fn test_eval_gt_builtin_fn_negative_case() { let mut ctx = Context::new(); ctx.eval(&e_list![e_symbol!("def"), e_symbol!("a"), e_number!(20_f64)]).ok().unwrap(); let actual_input = e_list![e_symbol!(">"), e_number!(3.5), e_symbol!("a"), e_number!(1_f64)]; let actual_result = ctx.eval(&actual_input); let expected_result = v_bool!(false); assert_eq!(expected_result, actual_result.ok().unwrap()); } #[test] fn test_eval_eq_builtin_fn_positive_case() { let mut ctx = Context::new(); ctx.eval(&e_list![e_symbol!("def"), e_symbol!("a"), e_number!(3_f64)]).ok().unwrap(); let actual_input = e_list![e_symbol!("="), e_symbol!("a"), e_number!(3_f64), e_number!(3_f64)]; let actual_result = ctx.eval(&actual_input); let expected_result = v_bool!(true); assert_eq!(expected_result, actual_result.ok().unwrap()); } #[test] fn test_eval_eq_builtin_fn_negative_case() { let mut ctx = Context::new(); ctx.eval(&e_list![e_symbol!["def"], e_symbol!["a"], e_number![1_f64]]).ok().unwrap(); let actual_input = e_list![e_symbol!["="], e_number![3.5], e_number![20_f64], e_symbol!["a"]]; let actual_result = ctx.eval(&actual_input); let expected_result = v_bool![false]; assert_eq!(expected_result, actual_result.ok().unwrap()); } }
/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ //! this is context class to use autograd. extern crate std; // TODO #[inline] where appropriate. pub trait Context<T: std::num::Float>: std::marker::Sized { // public functions fn new() -> Self { // TODO implement correctly. // let size: usize = (std::u32::MAX as usize) / 2; Context::<T>::new_tmp() } fn new_variable(&self, value: T) -> super::float::Float<T, Self> { super::float::float_new(value, <Self as Context<T>>::get_new_variable_index()) } fn differentiate(&self, float: super::float::Float<T, Self>) { unsafe { // let count = <Self as Context<T>>::get_recorded_variables_count(); *<Self as Context<T>>::get_result_derivatives().offset(super::float::float_get_index(&float) as isize) = std::num::Float::one(); for i in (0..(*<Self as Context<T>>::get_recorded_entries_count())).rev() { let lhs_index = *<Self as Context<T>>::get_lhs_indices().offset(i as isize); let rhs_index = *<Self as Context<T>>::get_rhs_indices().offset(i as isize); *<Self as Context<T>>::get_result_derivatives().offset(rhs_index as isize) = *<Self as Context<T>>::get_result_derivatives().offset(rhs_index as isize) + (*<Self as Context<T>>::get_result_derivatives().offset(lhs_index as isize) * *<Self as Context<T>>::get_adjoints().offset(i as isize)); let t = *<Self as Context<T>>::get_result_derivatives().offset(i as isize); } } } fn get_derivative(&self, float: super::float::Float<T, Self>) -> T { let float_index_offset = super::float::float_get_index(&float) as isize; unsafe { *<Self as Context<T>>::get_result_derivatives().offset(float_index_offset) } } // Private functions fn get_new_variable_index() -> usize { let count = <Self as Context<T>>::get_recorded_variables_count(); let index = *count; *count += 1; index } fn get_new_entry_index() -> usize { let count = <Self as Context<T>>::get_recorded_entries_count(); let index = *count; *count += 1; index } fn unary_operation(adjoint: T, rhs_index: usize) -> usize { let lhs_index = <Self as Context<T>>::get_new_variable_index(); let recorded_entries_count_offset = <Self as Context<T>>::get_new_entry_index() as isize; unsafe { *<Self as Context<T>>::get_adjoints().offset(recorded_entries_count_offset) = adjoint; *<Self as Context<T>>::get_lhs_indices().offset(recorded_entries_count_offset) = lhs_index; *<Self as Context<T>>::get_rhs_indices().offset(recorded_entries_count_offset) = rhs_index; } lhs_index } fn binary_operation(adjoints: &[T; 2], rhs_indices: &[usize; 2]) -> usize { let lhs_index = <Self as Context<T>>::get_new_variable_index(); let recorded_entries_count_offset_1 = <Self as Context<T>>::get_new_entry_index() as isize; let recorded_entries_count_offset_2 = <Self as Context<T>>::get_new_entry_index() as isize; unsafe { // TODO is indexing inefficient? *<Self as Context<T>>::get_adjoints().offset(recorded_entries_count_offset_1) = adjoints[0]; *<Self as Context<T>>::get_lhs_indices().offset(recorded_entries_count_offset_1) = lhs_index; *<Self as Context<T>>::get_rhs_indices().offset(recorded_entries_count_offset_1) = rhs_indices[0]; *<Self as Context<T>>::get_adjoints().offset(recorded_entries_count_offset_2) = adjoints[1]; *<Self as Context<T>>::get_lhs_indices().offset(recorded_entries_count_offset_2) = lhs_index; *<Self as Context<T>>::get_rhs_indices().offset(recorded_entries_count_offset_2) = rhs_indices[1]; } lhs_index } // TODO the Option arguments and above std::marker::Sized are // a hack due to the Rust compiler's limitation. // This should be possible ideally. http://is.gd/QelquA // TODO just return raw memory instead of vector? fn get_recorded_variables_count<'a>() -> &'a mut usize; fn get_recorded_entries_count<'a>() -> &'a mut usize; fn get_adjoints<'a>() -> &'a mut*mut T; fn get_lhs_indices<'a>() -> &'a mut*mut usize; fn get_rhs_indices<'a>() -> &'a mut*mut usize; fn get_result_derivatives<'a>() -> &'a mut*mut T; fn new_tmp() -> Self; } pub struct ContextImpl { // TODO this is to keep the struct construction private. Remove when we don't need it. _private: (), } impl Context<f32> for ContextImpl { fn get_recorded_variables_count<'a>() -> &'a mut usize { #[thread_local] static mut ptr : usize = 0; unsafe { &mut ptr } } fn get_recorded_entries_count<'a>() -> &'a mut usize { #[thread_local] static mut ptr : usize = 0; unsafe { &mut ptr } } fn get_adjoints<'a>() -> &'a mut*mut f32 { #[thread_local] static mut ptr : *mut f32 = 0 as *mut f32; unsafe { &mut ptr } } fn get_lhs_indices<'a>() -> &'a mut*mut usize { #[thread_local] static mut ptr : *mut usize = 0 as *mut usize; unsafe { &mut ptr } } fn get_rhs_indices<'a>() -> &'a mut*mut usize{ #[thread_local] static mut ptr : *mut usize = 0 as *mut usize; unsafe { &mut ptr } } fn get_result_derivatives<'a>() -> &'a mut*mut f32 { #[thread_local] static mut ptr : *mut f32 = 0 as *mut f32; unsafe { &mut ptr } } fn new_tmp() -> Self { // TODO implement let capacity: usize = 1000; // TODO use checked_mul? // example : let usize_size = capacity.checked_mul(std::mem::size_of::<usize>()).expect("capacity overflow"); let usize_size = capacity * std::mem::size_of::<usize>(); let f32_size = capacity * std::mem::size_of::<f32>(); // std::rt::heap::allocate(f32_size, mem::min_align_of::<T>()) unsafe { *<Self as Context<f32>>::get_recorded_variables_count() = 0; *<Self as Context<f32>>::get_recorded_entries_count() = 0; *<Self as Context<f32>>::get_adjoints() = std::rt::heap::allocate(f32_size, std::mem::align_of::<f32>()) as *mut f32; *<Self as Context<f32>>::get_lhs_indices() = std::rt::heap::allocate(usize_size, std::mem::align_of::<usize>()) as *mut usize; *<Self as Context<f32>>::get_rhs_indices() = std::rt::heap::allocate(usize_size, std::mem::align_of::<usize>()) as *mut usize; // TODO we don't have to allocate get_result_derivatives now, isn't it? *<Self as Context<f32>>::get_result_derivatives() = std::rt::heap::allocate(f32_size, std::mem::align_of::<f32>()) as *mut f32; } ContextImpl{_private: ()} } } // TODO implement safe lock mutex. // impl ContextImpl { // fn get_mutex() { // #[thread_local] // static MUTEX : std::sync::StaticMutex = std::sync::MUTEX_INIT; // } // } impl std::ops::Drop for ContextImpl { fn drop(&mut self) { // *Context::<f32>::get_recorded_variables_count(None::<Self>) = 0; // *Context::<f32>::get_adjoints(None::<Self>) = 0; // TODO implement. let capacity: usize = 1000; let usize_size = capacity * std::mem::size_of::<usize>(); let f32_size = capacity * std::mem::size_of::<f32>(); unsafe { std::rt::heap::deallocate(*<Self as Context<f32>>::get_adjoints() as *mut u8, f32_size, std::mem::align_of::<f32>()); std::rt::heap::deallocate(*<Self as Context<f32>>::get_lhs_indices() as *mut u8, usize_size, std::mem::align_of::<usize>()); std::rt::heap::deallocate(*<Self as Context<f32>>::get_rhs_indices() as *mut u8, usize_size, std::mem::align_of::<usize>()); std::rt::heap::deallocate(*<Self as Context<f32>>::get_result_derivatives() as *mut u8, f32_size, std::mem::align_of::<f32>()); } } } Remove an outdated comment and TODOs. /* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ //! this is context class to use autograd. extern crate std; // TODO #[inline] where appropriate. pub trait Context<T: std::num::Float>: std::marker::Sized { // public functions fn new() -> Self { // TODO implement correctly. // let size: usize = (std::u32::MAX as usize) / 2; Context::<T>::new_tmp() } fn new_variable(&self, value: T) -> super::float::Float<T, Self> { super::float::float_new(value, <Self as Context<T>>::get_new_variable_index()) } fn differentiate(&self, float: super::float::Float<T, Self>) { unsafe { // let count = <Self as Context<T>>::get_recorded_variables_count(); *<Self as Context<T>>::get_result_derivatives().offset(super::float::float_get_index(&float) as isize) = std::num::Float::one(); for i in (0..(*<Self as Context<T>>::get_recorded_entries_count())).rev() { let lhs_index = *<Self as Context<T>>::get_lhs_indices().offset(i as isize); let rhs_index = *<Self as Context<T>>::get_rhs_indices().offset(i as isize); *<Self as Context<T>>::get_result_derivatives().offset(rhs_index as isize) = *<Self as Context<T>>::get_result_derivatives().offset(rhs_index as isize) + (*<Self as Context<T>>::get_result_derivatives().offset(lhs_index as isize) * *<Self as Context<T>>::get_adjoints().offset(i as isize)); let t = *<Self as Context<T>>::get_result_derivatives().offset(i as isize); } } } fn get_derivative(&self, float: super::float::Float<T, Self>) -> T { let float_index_offset = super::float::float_get_index(&float) as isize; unsafe { *<Self as Context<T>>::get_result_derivatives().offset(float_index_offset) } } // Private functions fn get_new_variable_index() -> usize { let count = <Self as Context<T>>::get_recorded_variables_count(); let index = *count; *count += 1; index } fn get_new_entry_index() -> usize { let count = <Self as Context<T>>::get_recorded_entries_count(); let index = *count; *count += 1; index } fn unary_operation(adjoint: T, rhs_index: usize) -> usize { let lhs_index = <Self as Context<T>>::get_new_variable_index(); let recorded_entries_count_offset = <Self as Context<T>>::get_new_entry_index() as isize; unsafe { *<Self as Context<T>>::get_adjoints().offset(recorded_entries_count_offset) = adjoint; *<Self as Context<T>>::get_lhs_indices().offset(recorded_entries_count_offset) = lhs_index; *<Self as Context<T>>::get_rhs_indices().offset(recorded_entries_count_offset) = rhs_index; } lhs_index } fn binary_operation(adjoints: &[T; 2], rhs_indices: &[usize; 2]) -> usize { let lhs_index = <Self as Context<T>>::get_new_variable_index(); let recorded_entries_count_offset_1 = <Self as Context<T>>::get_new_entry_index() as isize; let recorded_entries_count_offset_2 = <Self as Context<T>>::get_new_entry_index() as isize; unsafe { // TODO is indexing inefficient? *<Self as Context<T>>::get_adjoints().offset(recorded_entries_count_offset_1) = adjoints[0]; *<Self as Context<T>>::get_lhs_indices().offset(recorded_entries_count_offset_1) = lhs_index; *<Self as Context<T>>::get_rhs_indices().offset(recorded_entries_count_offset_1) = rhs_indices[0]; *<Self as Context<T>>::get_adjoints().offset(recorded_entries_count_offset_2) = adjoints[1]; *<Self as Context<T>>::get_lhs_indices().offset(recorded_entries_count_offset_2) = lhs_index; *<Self as Context<T>>::get_rhs_indices().offset(recorded_entries_count_offset_2) = rhs_indices[1]; } lhs_index } fn get_recorded_variables_count<'a>() -> &'a mut usize; fn get_recorded_entries_count<'a>() -> &'a mut usize; fn get_adjoints<'a>() -> &'a mut*mut T; fn get_lhs_indices<'a>() -> &'a mut*mut usize; fn get_rhs_indices<'a>() -> &'a mut*mut usize; fn get_result_derivatives<'a>() -> &'a mut*mut T; fn new_tmp() -> Self; } pub struct ContextImpl { // TODO this is to keep the struct construction private. Remove when we don't need it. _private: (), } impl Context<f32> for ContextImpl { fn get_recorded_variables_count<'a>() -> &'a mut usize { #[thread_local] static mut ptr : usize = 0; unsafe { &mut ptr } } fn get_recorded_entries_count<'a>() -> &'a mut usize { #[thread_local] static mut ptr : usize = 0; unsafe { &mut ptr } } fn get_adjoints<'a>() -> &'a mut*mut f32 { #[thread_local] static mut ptr : *mut f32 = 0 as *mut f32; unsafe { &mut ptr } } fn get_lhs_indices<'a>() -> &'a mut*mut usize { #[thread_local] static mut ptr : *mut usize = 0 as *mut usize; unsafe { &mut ptr } } fn get_rhs_indices<'a>() -> &'a mut*mut usize{ #[thread_local] static mut ptr : *mut usize = 0 as *mut usize; unsafe { &mut ptr } } fn get_result_derivatives<'a>() -> &'a mut*mut f32 { #[thread_local] static mut ptr : *mut f32 = 0 as *mut f32; unsafe { &mut ptr } } fn new_tmp() -> Self { // TODO implement let capacity: usize = 1000; // TODO use checked_mul? // example : let usize_size = capacity.checked_mul(std::mem::size_of::<usize>()).expect("capacity overflow"); let usize_size = capacity * std::mem::size_of::<usize>(); let f32_size = capacity * std::mem::size_of::<f32>(); // std::rt::heap::allocate(f32_size, mem::min_align_of::<T>()) unsafe { *<Self as Context<f32>>::get_recorded_variables_count() = 0; *<Self as Context<f32>>::get_recorded_entries_count() = 0; *<Self as Context<f32>>::get_adjoints() = std::rt::heap::allocate(f32_size, std::mem::align_of::<f32>()) as *mut f32; *<Self as Context<f32>>::get_lhs_indices() = std::rt::heap::allocate(usize_size, std::mem::align_of::<usize>()) as *mut usize; *<Self as Context<f32>>::get_rhs_indices() = std::rt::heap::allocate(usize_size, std::mem::align_of::<usize>()) as *mut usize; // TODO we don't have to allocate get_result_derivatives now, isn't it? *<Self as Context<f32>>::get_result_derivatives() = std::rt::heap::allocate(f32_size, std::mem::align_of::<f32>()) as *mut f32; } ContextImpl{_private: ()} } } // TODO implement safe lock mutex. // impl ContextImpl { // fn get_mutex() { // #[thread_local] // static MUTEX : std::sync::StaticMutex = std::sync::MUTEX_INIT; // } // } impl std::ops::Drop for ContextImpl { fn drop(&mut self) { // *Context::<f32>::get_recorded_variables_count(None::<Self>) = 0; // *Context::<f32>::get_adjoints(None::<Self>) = 0; // TODO implement. let capacity: usize = 1000; let usize_size = capacity * std::mem::size_of::<usize>(); let f32_size = capacity * std::mem::size_of::<f32>(); unsafe { std::rt::heap::deallocate(*<Self as Context<f32>>::get_adjoints() as *mut u8, f32_size, std::mem::align_of::<f32>()); std::rt::heap::deallocate(*<Self as Context<f32>>::get_lhs_indices() as *mut u8, usize_size, std::mem::align_of::<usize>()); std::rt::heap::deallocate(*<Self as Context<f32>>::get_rhs_indices() as *mut u8, usize_size, std::mem::align_of::<usize>()); std::rt::heap::deallocate(*<Self as Context<f32>>::get_result_derivatives() as *mut u8, f32_size, std::mem::align_of::<f32>()); } } }
use std::fmt; use byteorder::{ByteOrder, BigEndian}; use super::exception::Exception; use super::instruction::Instruction; use super::mmu; use super::machine_status::MachineStatus; use super::super::memory; const NUM_GPR: usize = 32; const NUM_SPR: usize = 1023; const NUM_SR : usize = 16; const NUM_CR : usize = 8; const XER : usize = 1; const HID0: usize = 1008; pub struct Cpu { pub memory: memory::Memory, pub mmu: mmu::Mmu, pub pc: u32, ctr: u32, gpr: [u32; NUM_GPR], spr: [u32; NUM_SPR], // ToDo phase out pub msr: MachineStatus, sr: [u32; NUM_SR], cr: [u8; NUM_CR], lr: u32 } impl Cpu { pub fn new(memory: memory::Memory) -> Cpu { let mut cpu = Cpu { memory: memory, mmu: mmu::Mmu::new(), pc: 0, ctr: 0, gpr: [0; NUM_GPR], spr: [0; NUM_SPR], msr: MachineStatus::default(), sr: [0; NUM_SR], cr: [0; NUM_CR], lr: 0 }; cpu.exception(Exception::SystemReset); // power on reset cpu } pub fn run_instruction(&mut self) { let instr = self.read_instruction(); println!("{:#x}:{:?}", instr.opcode(), self.gpr); match instr.opcode() { 10 => self.cmpli(instr), 14 => self.addi(instr), 15 => self.addis(instr), 16 => self.bcx(instr), 18 => self.bx(instr), 19 => { match instr.subopcode() { 16 => self.bclrx(instr), 150 => { // isync - instruction synchronize // don't do anything }, _ => panic!("Unrecognized instruction subopcode {} {}", instr.opcode(), instr.subopcode()) } }, 21 => self.rlwinm(instr), 24 => { // ori - OR immediate self.gpr[instr.a()] = self.gpr[instr.s()] | instr.uimm(); }, 31 => { match instr.subopcode() { 28 => self.andx(instr), 40 => self.subf(instr), 83 => self.mfmsr(instr), 146 => self.mtmsr(instr), 210 => self.mtsr(instr), 339 => self.mfspr(instr), 371 => self.mftb(instr), 467 => self.mtspr(instr), _ => panic!("Unrecognized instruction subopcode {} {}", instr.opcode(), instr.subopcode()) } }, 32 => self.lwz(instr), 36 => self.stw(instr), 44 => self.sth(instr), _ => panic!("Unrecognized instruction {:#x} {:#b}", instr.0, instr.opcode()) } self.pc += 4; } fn read_instruction(&mut self) -> Instruction { let mut data = [0u8; 5]; let addr = self.mmu.instr_address_translate(&self.msr, self.pc); self.memory.read(addr, &mut data); Instruction(BigEndian::read_u32(&data[0..])) } // FixMe: handle exceptions properly pub fn exception(&mut self, e: Exception) { let nia = match e { Exception::SystemReset => 0x00100 }; if self.msr.exception_prefix { self.pc = nia ^ 0xFFF00000 } else { self.pc = nia } println!("{:#x} exception occurred, nia {}", nia, self.pc); } // complare logic immediate fn cmpli(&mut self, instr: Instruction) { let a = self.gpr[instr.a()]; let b = instr.uimm(); let mut c:u8; if a < b { c = 0b1000; } else if a > b { c = 0b0100; } else { c = 0b0010; } c |= self.spr[XER] as u8 & 0b1; self.cr[instr.crfd()] = c; } // add immediate fn addi(&mut self, instr: Instruction) { let a = instr.a(); if a == 0 { self.gpr[instr.d()] = instr.uimm(); } else { self.gpr[instr.d()] = self.gpr[a] + instr.uimm(); } } // add immediate shifted fn addis(&mut self, instr: Instruction) { if instr.a() == 0 { // lis self.gpr[instr.d()] = instr.uimm() << 16; } else { // subis self.gpr[instr.d()] = self.gpr[instr.a()] + (instr.uimm() << 16); } } // ToDo: verify this is working fn bcx(&mut self, instr: Instruction) { let bo = instr.bo(); let ctr_ok = if bon(bo, 2) == 0 { self.ctr.wrapping_sub(1); if bon(bo, 3) != 0 { self.ctr == 0 } else { self.ctr != 0 } } else { true }; let cond_ok = if bon(bo, 0) == 0 { (bon(bo, 1) == (self.cr[instr.bi()])) } else { true }; if ctr_ok && cond_ok { if instr.aa() == 1 { self.pc = instr.bd() << 2; } else { self.pc = self.pc + (instr.bd() << 2); } if instr.lk() == 1 { self.lr = self.pc + 4; } } } // branch fn bx(&mut self, instr: Instruction) { if instr.aa() == 1 { self.pc = instr.li() << 2; } else { self.pc = self.pc + (instr.li() << 2); } if instr.lk() == 1 { self.lr = self.pc + 4; } } // branch conditional to link register fn bclrx(&mut self, instr: Instruction) { let bo = instr.bo(); let ctr_ok = if bon(bo, 2) == 0 { self.ctr.wrapping_sub(1); if bon(bo, 3) != 0 { self.ctr == 0 } else { self.ctr != 0 } } else { true }; let cond_ok = if bon(bo, 0) == 0 { (bon(bo, 1) == (self.cr[instr.bi()])) } else { true }; if ctr_ok && cond_ok { self.pc = self.lr & 0b00; if instr.lk() == 1 { self.lr = self.pc + 4; } } } // rotate word immediate then AND with mask fn rlwinm(&mut self, instr: Instruction) { let r = self.gpr[instr.s()] << instr.sh(); let m = mask(instr.mb(), instr.me()); self.gpr[instr.a()] = r & m; } fn andx(&mut self, instr: Instruction) { self.gpr[instr.a()] = self.gpr[instr.d()] & self.gpr[instr.b()]; // TODO: other registers altered } // subtract from fn subf(&mut self, instr: Instruction) { self.gpr[instr.d()] = self.gpr[instr.a()] + self.gpr[instr.b()] + 1; // TODO: other registers altered } // move from machine state register fn mfmsr(&mut self, instr: Instruction) { self.gpr[instr.d()] = self.msr.as_u32(); // TODO: check privelege level } // move to machine state register fn mtmsr(&mut self, instr: Instruction) { self.msr = self.gpr[instr.s()].into(); // TODO: check privelege level } // move to segment register fn mtsr(&mut self, instr: Instruction) { self.sr[instr.sr()] = self.gpr[instr.s()]; // TODO: check privelege level -> supervisor level instruction } // move from special purpose register fn mfspr(&mut self, instr: Instruction) { let n = ((instr.spr_upper() << 5) | (instr.spr_lower() & 0b1_1111)) as usize; match n { 8 => self.gpr[instr.s()] = self.lr, 9 => self.gpr[instr.s()] = self.ctr, _ => { println!("FIXME: spr {} not implemented", n); self.gpr[instr.s()] = self.spr[n]; } } // TODO: check privelege level } // move from time base fn mftb(&mut self, instr: Instruction) { let n = (instr.spr_upper() << 5) | (instr.spr_lower() & 0b1_1111); match n { 268 => { // TBL println!("FIXME: mftb, get time base tbl"); }, 269 => { // TBR println!("FIXME: mftb, get time base tbu"); }, _ => panic!("Unrecognized TBR {}", n) // FixMe: invoke error handler } } // move special purpose register fn mtspr(&mut self, instr: Instruction) { let n = ((instr.spr_upper() << 5) | (instr.spr_lower() & 0b1_1111)) as usize; match n { 8 => self.lr = self.gpr[instr.s()], 9 => self.ctr = self.gpr[instr.s()], 528 ... 543 => { // if IBAT or DBAT, write to MMU register self.mmu.write_bat_reg(n, self.gpr[instr.s()]); //panic!("FixMe: write to BAT registers"); }, _ => { println!("FIXME: mtspr {} not implemented", n); self.spr[n] = self.gpr[instr.s()]; } } // TODO: check privelege level } // load word and zero fn lwz(&mut self, instr: Instruction) { let ea = if instr.a() == 0 { instr.simm() } else { self.gpr[instr.a()] + instr.simm() }; let addr = self.mmu.data_address_translate(&self.msr, ea); self.gpr[instr.d()] = self.memory.read_u32(addr); } // store word fn stw(&mut self, instr: Instruction) { let ea = if instr.a() == 0 { instr.simm() } else { self.gpr[instr.a()] + instr.simm() }; let addr = self.mmu.data_address_translate(&self.msr, ea); self.memory.write_u32(addr, self.gpr[instr.s()]); } // store half word fn sth(&mut self, instr: Instruction) { let ea = if instr.a() == 0 { instr.simm() } else { self.gpr[instr.a()] + instr.simm() }; let addr = self.mmu.data_address_translate(&self.msr, ea); self.memory.write_u16(addr, self.gpr[instr.s()] as u16); } } //((1 << (x - y +1)) - 1) << y // FixMe: not sure if this is correct // actually I think this might be backwards fn mask(x: u8, y: u8) -> u32 { let mut mask:u32 = 0xFFFFFFFF >> y; if x >= 31 { mask ^= 0; } else { mask ^= 0xFFFFFFFF >> (x + 1); } if y > x { !mask } else { mask } } fn bon(bo: u8, n: u8) -> u8 { (bo >> (4-n)) & 1 } impl fmt::Debug for Cpu { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "MSR: {:?} gpr: {:?}, sr: {:?}, cr:{:?}, HID0: {}", self.msr, self.gpr, self.sr, self.cr, self.spr[HID0]) } } fixed EXTS use std::fmt; use byteorder::{ByteOrder, BigEndian}; use super::exception::Exception; use super::instruction::Instruction; use super::mmu; use super::machine_status::MachineStatus; use super::super::memory; const NUM_GPR: usize = 32; const NUM_SPR: usize = 1023; const NUM_SR : usize = 16; const NUM_CR : usize = 8; const XER : usize = 1; const HID0: usize = 1008; pub struct Cpu { pub memory: memory::Memory, pub mmu: mmu::Mmu, pub pc: u32, ctr: u32, gpr: [u32; NUM_GPR], spr: [u32; NUM_SPR], // ToDo phase out pub msr: MachineStatus, sr: [u32; NUM_SR], cr: [u8; NUM_CR], lr: u32 } impl Cpu { pub fn new(memory: memory::Memory) -> Cpu { let mut cpu = Cpu { memory: memory, mmu: mmu::Mmu::new(), pc: 0, ctr: 0, gpr: [0; NUM_GPR], spr: [0; NUM_SPR], msr: MachineStatus::default(), sr: [0; NUM_SR], cr: [0; NUM_CR], lr: 0 }; cpu.exception(Exception::SystemReset); // power on reset cpu } pub fn run_instruction(&mut self) { let instr = self.read_instruction(); match instr.opcode() { 10 => self.cmpli(instr), 14 => self.addi(instr), 15 => self.addis(instr), 16 => self.bcx(instr), 18 => self.bx(instr), 19 => { match instr.subopcode() { 16 => self.bclrx(instr), 150 => self.isync(instr), _ => panic!("Unrecognized instruction subopcode {} {}", instr.opcode(), instr.subopcode()) } }, 21 => self.rlwinm(instr), 24 => { // ori - OR immediate self.gpr[instr.a()] = self.gpr[instr.s()] | instr.uimm(); }, 31 => { match instr.subopcode() { 28 => self.andx(instr), 40 => self.subf(instr), 83 => self.mfmsr(instr), 146 => self.mtmsr(instr), 210 => self.mtsr(instr), 339 => self.mfspr(instr), 371 => self.mftb(instr), 467 => self.mtspr(instr), _ => panic!("Unrecognized instruction subopcode {} {}", instr.opcode(), instr.subopcode()) } }, 32 => self.lwz(instr), 36 => self.stw(instr), 44 => self.sth(instr), _ => panic!("Unrecognized instruction {:#x} {:#b}", instr.0, instr.opcode()) } self.pc += 4; } fn read_instruction(&mut self) -> Instruction { let mut data = [0u8; 5]; let addr = self.mmu.instr_address_translate(&self.msr, self.pc); self.memory.read(addr, &mut data); Instruction(BigEndian::read_u32(&data[0..])) } // FixMe: handle exceptions properly pub fn exception(&mut self, e: Exception) { let nia = match e { Exception::SystemReset => 0x00100 }; if self.msr.exception_prefix { self.pc = nia ^ 0xFFF00000 } else { self.pc = nia } println!("{:#x} exception occurred, nia {:#x}", nia, self.pc); } // complare logic immediate fn cmpli(&mut self, instr: Instruction) { let a = self.gpr[instr.a()]; let b = instr.uimm(); let mut c:u8; if a < b { c = 0b1000; } else if a > b { c = 0b0100; } else { c = 0b0010; } c |= self.spr[XER] as u8 & 0b1; self.cr[instr.crfd()] = c; } // add immediate fn addi(&mut self, instr: Instruction) { let a = instr.a(); if a == 0 { self.gpr[instr.d()] = instr.uimm(); } else { self.gpr[instr.d()] = self.gpr[a] + instr.uimm(); } } // add immediate shifted fn addis(&mut self, instr: Instruction) { if instr.a() == 0 { // lis self.gpr[instr.d()] = instr.uimm() << 16; } else { // subis self.gpr[instr.d()] = self.gpr[instr.a()] + (instr.uimm() << 16); } } // branch conditional fn bcx(&mut self, instr: Instruction) { let bo = instr.bo(); let ctr_ok = if bon(bo, 2) == 0 { self.ctr = self.ctr.wrapping_sub(1); if bon(bo, 3) != 0 { self.ctr == 0 } else { self.ctr != 0 } } else { true }; let cond_ok = if bon(bo, 0) == 0 { (bon(bo, 1) == (self.cr[instr.bi()])) } else { true }; if ctr_ok && cond_ok { if instr.aa() == 1 { self.pc = sign_ext_16(instr.bd() << 2) as u32; } else { self.pc = self.pc.wrapping_add(sign_ext_16(instr.bd() << 2) as u32); } if instr.lk() == 1 { self.lr = self.pc + 4; } } } // branch fn bx(&mut self, instr: Instruction) { if instr.aa() == 1 { self.pc = sign_ext_26(instr.li() << 2) as u32; } else { self.pc = self.pc.wrapping_add(sign_ext_26(instr.li() << 2) as u32); } if instr.lk() == 1 { self.lr = self.pc + 4; } } // branch conditional to link register fn bclrx(&mut self, instr: Instruction) { let bo = instr.bo(); let ctr_ok = if bon(bo, 2) == 0 { self.ctr = self.ctr.wrapping_sub(1); if bon(bo, 3) != 0 { self.ctr == 0 } else { self.ctr != 0 } } else { true }; let cond_ok = if bon(bo, 0) == 0 { (bon(bo, 1) == (self.cr[instr.bi()])) } else { true }; if ctr_ok && cond_ok { self.pc = self.lr & 0b00; if instr.lk() == 1 { self.lr = self.pc + 4; } } } #[allow(unused_variables)] // isync - instruction synchronize fn isync(&mut self, instr: Instruction) { // don't do anything } // rotate word immediate then AND with mask fn rlwinm(&mut self, instr: Instruction) { let r = self.gpr[instr.s()] << instr.sh(); let m = mask(instr.mb(), instr.me()); self.gpr[instr.a()] = r & m; } fn andx(&mut self, instr: Instruction) { self.gpr[instr.a()] = self.gpr[instr.d()] & self.gpr[instr.b()]; // TODO: other registers altered } // subtract from fn subf(&mut self, instr: Instruction) { self.gpr[instr.d()] = self.gpr[instr.a()] + self.gpr[instr.b()] + 1; // TODO: other registers altered } // move from machine state register fn mfmsr(&mut self, instr: Instruction) { self.gpr[instr.d()] = self.msr.as_u32(); // TODO: check privelege level } // move to machine state register fn mtmsr(&mut self, instr: Instruction) { self.msr = self.gpr[instr.s()].into(); // TODO: check privelege level } // move to segment register fn mtsr(&mut self, instr: Instruction) { self.sr[instr.sr()] = self.gpr[instr.s()]; // TODO: check privelege level -> supervisor level instruction } // move from special purpose register fn mfspr(&mut self, instr: Instruction) { let n = ((instr.spr_upper() << 5) | (instr.spr_lower() & 0b1_1111)) as usize; match n { 8 => self.gpr[instr.s()] = self.lr, 9 => self.gpr[instr.s()] = self.ctr, _ => { println!("FIXME: spr {} not implemented", n); self.gpr[instr.s()] = self.spr[n]; } } // TODO: check privelege level } // move from time base fn mftb(&mut self, instr: Instruction) { let n = (instr.spr_upper() << 5) | (instr.spr_lower() & 0b1_1111); match n { 268 => { // TBL println!("FIXME: mftb, get time base tbl"); }, 269 => { // TBR println!("FIXME: mftb, get time base tbu"); }, _ => panic!("Unrecognized TBR {}", n) // FixMe: invoke error handler } } // move special purpose register fn mtspr(&mut self, instr: Instruction) { let n = ((instr.spr_upper() << 5) | (instr.spr_lower() & 0b1_1111)) as usize; match n { 8 => self.lr = self.gpr[instr.s()], 9 => self.ctr = self.gpr[instr.s()], 528 ... 543 => { // if IBAT or DBAT, write to MMU register self.mmu.write_bat_reg(n, self.gpr[instr.s()]); //panic!("FixMe: write to BAT registers"); }, _ => { println!("FIXME: mtspr {} not implemented", n); self.spr[n] = self.gpr[instr.s()]; } } // TODO: check privelege level } // load word and zero fn lwz(&mut self, instr: Instruction) { let ea = if instr.a() == 0 { instr.simm() } else { self.gpr[instr.a()] + instr.simm() }; let addr = self.mmu.data_address_translate(&self.msr, ea); self.gpr[instr.d()] = self.memory.read_u32(addr); } // store word fn stw(&mut self, instr: Instruction) { let ea = if instr.a() == 0 { instr.simm() } else { self.gpr[instr.a()] + instr.simm() }; let addr = self.mmu.data_address_translate(&self.msr, ea); self.memory.write_u32(addr, self.gpr[instr.s()]); } // store half word fn sth(&mut self, instr: Instruction) { let ea = if instr.a() == 0 { instr.simm() } else { self.gpr[instr.a()] + instr.simm() }; let addr = self.mmu.data_address_translate(&self.msr, ea); self.memory.write_u16(addr, self.gpr[instr.s()] as u16); } } //((1 << (x - y +1)) - 1) << y // FixMe: not sure if this is correct // actually I think this might be backwards fn mask(x: u8, y: u8) -> u32 { let mut mask:u32 = 0xFFFFFFFF >> y; if x >= 31 { mask ^= 0; } else { mask ^= 0xFFFFFFFF >> (x + 1); } if y > x { !mask } else { mask } } // Note: A cast from a signed value widens with signed-extension // A cast from an unsigned value widens with zero-extension fn sign_ext_16(x: u16) -> i32 { (x as i16) as i32 } fn sign_ext_26(x: u32) -> i32 { if x & 0x2000000 != 0 { (x | 0xFC000000) as i32 } else { x as i32 } } fn bon(bo: u8, n: u8) -> u8 { (bo >> (4-n)) & 1 } impl fmt::Debug for Cpu { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "MSR: {:?} gpr: {:?}, sr: {:?}, cr:{:?}, HID0: {}", self.msr, self.gpr, self.sr, self.cr, self.spr[HID0]) } }
// type alias for exception handling use std::result; pub type Result<T> = result::Result<T, Exception>; pub type Handler = fn(&mut Core) -> Result<Cycles>; pub type InstructionSet = Vec<Handler>; use ram::{LoggingMem, AddressBus, OpsLogger, SUPERVISOR_PROGRAM, SUPERVISOR_DATA, USER_PROGRAM, USER_DATA}; pub mod ops; mod effective_address; mod operator; pub struct Core { pub pc: u32, pub inactive_ssp: u32, // when in user mode pub inactive_usp: u32, // when in supervisor mode pub ir: u16, pub dar: [u32; 16], pub ophandlers: InstructionSet, pub s_flag: u32, pub int_mask: u32, pub x_flag: u32, pub c_flag: u32, pub v_flag: u32, pub n_flag: u32, pub prefetch_addr: u32, pub prefetch_data: u32, pub not_z_flag: u32, pub processing_state: ProcessingState, pub mem: LoggingMem<OpsLogger>, } #[derive(Clone, Copy)] pub struct Cycles(i32); use std::ops::Sub; impl Sub for Cycles { type Output = Cycles; fn sub(self, _rhs: Cycles) -> Cycles { Cycles(self.0 - _rhs.0) } } impl Cycles { fn any(self) -> bool { self.0 > 0 } } #[derive(Clone, Copy, Debug)] pub enum ProcessingState { Normal, Exception } #[derive(Clone, Copy, Debug)] pub enum AccessType {Read, Write} use ram::AddressSpace; #[derive(Debug)] pub enum Exception { AddressError { address: u32, access_type: AccessType, processing_state: ProcessingState, address_space: AddressSpace}, IllegalInstruction(u16, u32) } use std::fmt; impl fmt::Display for Exception { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { Exception::AddressError { address, access_type, processing_state, address_space } => write!(f, "Address Error: {:?} {:?} at {:08x} during {:?} processing", access_type, address_space, address, processing_state), Exception::IllegalInstruction(ic, pc) => write!(f, "Illegal Instruction {:04x} at {:08x}", ic, pc), } } } use std::error; impl error::Error for Exception { fn description(&self) -> &str { match *self { Exception::AddressError{..} => "Address Error", Exception::IllegalInstruction(_, _) => "Illegal Instruction", } } fn cause(&self) -> Option<&error::Error> { None } } use std::num::Wrapping; // these values are borrowed from Musashi // and not yet fully understood const SFLAG_SET: u32 = 0x04; const XFLAG_SET: u32 = 0x100; const NFLAG_SET: u32 = 0x80; const VFLAG_SET: u32 = 0x80; const CFLAG_SET: u32 = 0x100; const CPU_SR_MASK: u32 = 0xa71f; /* T1 -- S -- -- I2 I1 I0 -- -- -- X N Z V C */ const CPU_SR_INT_MASK: u32 = 0x0700; // Exception Vectors //const EXCEPTION_BUS_ERROR: u32 = 2; const EXCEPTION_ADDRESS_ERROR: u32 = 3; const EXCEPTION_ILLEGAL_INSTRUCTION: u32 = 4; // const EXCEPTION_ZERO_DIVIDE: u32 = 5; // const EXCEPTION_CHK: u32 = 6; // const EXCEPTION_TRAPV: u32 = 7; // const EXCEPTION_PRIVILEGE_VIOLATION: u32 = 8; // const EXCEPTION_TRACE: u32 = 9; // const EXCEPTION_1010: u32 = 10; // const EXCEPTION_1111: u32 = 11; // const EXCEPTION_FORMAT_ERROR: u32 = 14; // const EXCEPTION_UNINITIALIZED_INTERRUPT: u32 = 15; // const EXCEPTION_SPURIOUS_INTERRUPT: u32 = 24; // const EXCEPTION_INTERRUPT_AUTOVECTOR: u32 = 24; // const EXCEPTION_TRAP_BASE: u32 = 32; impl Core { pub fn new(base: u32) -> Core { Core { pc: base, prefetch_addr: 0, prefetch_data: 0, inactive_ssp: 0, inactive_usp: 0, ir: 0, processing_state: ProcessingState::Exception, dar: [0u32; 16], mem: LoggingMem::new(0xaaaaaaaa, OpsLogger::new()), ophandlers: ops::fake::instruction_set(), s_flag: SFLAG_SET, int_mask: CPU_SR_INT_MASK, x_flag: 0, v_flag: 0, c_flag: 0, n_flag: 0, not_z_flag: 0xffffffff } } pub fn new_mem(base: u32, contents: &[u8]) -> Core { let mut lm = LoggingMem::new(0xaaaaaaaa, OpsLogger::new()); for (offset, byte) in contents.iter().enumerate() { lm.write_u8(base + offset as u32, *byte as u32); } Core { pc: base, prefetch_addr: 0, prefetch_data: 0, inactive_ssp: 0, inactive_usp: 0, ir: 0, processing_state: ProcessingState::Normal, dar: [0u32; 16], mem: lm, ophandlers: ops::fake::instruction_set(), s_flag: SFLAG_SET, int_mask: CPU_SR_INT_MASK, x_flag: 0, v_flag: 0, c_flag: 0, n_flag: 0, not_z_flag: 0xffffffff } } pub fn reset(&mut self) { self.processing_state = ProcessingState::Exception; self.s_flag = SFLAG_SET; self.int_mask = CPU_SR_INT_MASK; self.prefetch_addr = 1; // non-zero, or the prefetch won't kick in self.jump(0); // these reads cannot possibly cause AddressError, as we forced PC to 0 self.dar[15] = self.read_imm_u32().unwrap(); let new_pc = self.read_imm_u32().unwrap(); self.jump(new_pc); self.processing_state = ProcessingState::Normal; } pub fn x_flag_as_1(&self) -> u32 { (self.x_flag>>8)&1 } // admittely I've chosen to reuse Musashi's representation of flags // which I don't fully understand (they are not matching their // positions in the SR/CCR) pub fn status_register(&self) -> u32 { (self.s_flag << 11) | self.int_mask | ((self.x_flag & XFLAG_SET) >> 4) | ((self.n_flag & NFLAG_SET) >> 4) | ((not1!(self.not_z_flag)) << 2) | ((self.v_flag & VFLAG_SET) >> 6) | ((self.c_flag & CFLAG_SET) >> 8) } pub fn usp(&self) -> u32 { if self.s_flag > 0 { self.inactive_usp } else { self.dar[15] } } pub fn ssp(&self) -> u32 { if self.s_flag > 0 { self.dar[15] } else { self.inactive_ssp } } // admittely I've chosen to reuse Musashi's representation of flags // which I don't fully understand (they are not matching their // positions in the SR/CCR) pub fn sr_to_flags(&mut self, sr: u32) { let sr = sr & CPU_SR_MASK; self.int_mask = sr & CPU_SR_INT_MASK; self.s_flag = (sr >> 11) & SFLAG_SET; self.x_flag = (sr << 4) & XFLAG_SET; self.n_flag = (sr << 4) & NFLAG_SET; self.not_z_flag = not1!(sr & 0b00100); self.v_flag = (sr << 6) & VFLAG_SET; self.c_flag = (sr << 8) & CFLAG_SET; // println!("{} {:016b} {} {}", self.flags(), sr, self.not_z_flag, sr & 0b00100); } pub fn flags(&self) -> String { let sr = self.status_register(); let supervisor = (sr >> 13) & 1; let irq_mask = (0x700 & sr) >> 8; format!("-{}{}{}{}{}{}{}", if supervisor > 0 {'S'} else {'U'}, irq_mask, if 0 < (sr >> 4) & 1 {'X'} else {'-'}, if 0 < (sr >> 3) & 1 {'N'} else {'-'}, if 0 < (sr >> 2) & 1 {'Z'} else {'-'}, if 0 < (sr >> 1) & 1 {'V'} else {'-'}, if 0 < (sr ) & 1 {'C'} else {'-'}) } fn prefetch_if_needed(&mut self) -> bool { // does current PC overlap with fetched data let fetched = if self.pc & !3 != self.prefetch_addr { self.prefetch_addr = self.pc & !3; let address_space = if self.s_flag != 0 {SUPERVISOR_PROGRAM} else {USER_PROGRAM}; self.prefetch_data = self.mem.read_long(address_space, self.prefetch_addr); true } else { false }; self.pc += 2; fetched } pub fn read_imm_u32(&mut self) -> Result<u32> { if self.pc & 1 > 0 { let address_space = if self.s_flag != 0 {SUPERVISOR_PROGRAM} else {USER_PROGRAM}; return Err(Exception::AddressError{address: self.pc, access_type: AccessType::Read, address_space: address_space, processing_state: self.processing_state}) } self.prefetch_if_needed(); let prev_prefetch_data = self.prefetch_data; Ok(if self.prefetch_if_needed() { ((prev_prefetch_data << 16) | (self.prefetch_data >> 16)) & 0xffffffff } else { prev_prefetch_data }) } pub fn read_imm_i16(&mut self) -> Result<i16> { Ok(try!(self.read_imm_u16()) as i16) } pub fn read_imm_u16(&mut self) -> Result<u16> { // the Musashi read_imm_16 calls cpu_read_long as part of prefetch if self.pc & 1 > 0 { let address_space = if self.s_flag != 0 {SUPERVISOR_PROGRAM} else {USER_PROGRAM}; return Err(Exception::AddressError{address: self.pc, access_type: AccessType::Read, address_space: address_space, processing_state: self.processing_state}) } self.prefetch_if_needed(); Ok(((self.prefetch_data >> ((2 - ((self.pc - 2) & 2))<<3)) & 0xffff) as u16) } pub fn push_32(&mut self, value: u32) { let new_sp = (Wrapping(self.dar[15]) - Wrapping(4)).0; self.dar[15] = new_sp; self.write_data_long(new_sp, value); } pub fn push_16(&mut self, value: u16) { let new_sp = (Wrapping(self.dar[15]) - Wrapping(2)).0; self.dar[15] = new_sp; self.write_data_word(new_sp, value as u32); } pub fn read_data_byte(&mut self, address: u32) -> Result<u32> { let address_space = if self.s_flag != 0 {SUPERVISOR_DATA} else {USER_DATA}; Ok(self.mem.read_byte(address_space, address)) } pub fn read_program_byte(&mut self, address: u32) -> Result<u32> { let address_space = if self.s_flag != 0 {SUPERVISOR_PROGRAM} else {USER_PROGRAM}; Ok(self.mem.read_byte(address_space, address)) } pub fn write_data_byte(&mut self, address: u32, value: u32) { let address_space = if self.s_flag != 0 {SUPERVISOR_DATA} else {USER_DATA}; self.mem.write_byte(address_space, address, value); } pub fn write_program_byte(&mut self, address: u32, value: u32) { let address_space = if self.s_flag != 0 {SUPERVISOR_PROGRAM} else {USER_PROGRAM}; self.mem.write_byte(address_space, address, value); } pub fn read_data_word(&mut self, address: u32) -> Result<u32> { let address_space = if self.s_flag != 0 {SUPERVISOR_DATA} else {USER_DATA}; if address & 1 > 0 { Err(Exception::AddressError{address: address, access_type: AccessType::Read, address_space: address_space, processing_state: self.processing_state}) } else { Ok(self.mem.read_word(address_space, address)) } } pub fn read_program_word(&mut self, address: u32) -> Result<u32> { let address_space = if self.s_flag != 0 {SUPERVISOR_PROGRAM} else {USER_PROGRAM}; if address & 1 > 0 { Err(Exception::AddressError {address: address, access_type: AccessType::Read, address_space: address_space, processing_state: self.processing_state}) } else { Ok(self.mem.read_word(address_space, address)) } } pub fn write_data_word(&mut self, address: u32, value: u32) { let address_space = if self.s_flag != 0 {SUPERVISOR_DATA} else {USER_DATA}; if address & 1 > 0 { panic!("Address error, odd write address at {:08x} {:?}", address, address_space); } self.mem.write_word(address_space, address, value); } pub fn write_program_word(&mut self, address: u32, value: u32) { let address_space = if self.s_flag != 0 {SUPERVISOR_PROGRAM} else {USER_PROGRAM}; if address & 1 > 0 { panic!("Address error, odd write address at {:08x} {:?}", address, address_space); } self.mem.write_word(address_space, address, value); } pub fn read_data_long(&mut self, address: u32) -> Result<u32> { let address_space = if self.s_flag != 0 {SUPERVISOR_DATA} else {USER_DATA}; if address & 1 > 0 { Err(Exception::AddressError{address: address, access_type: AccessType::Read, address_space: address_space, processing_state: self.processing_state}) } else { Ok(self.mem.read_long(address_space, address)) } } pub fn read_program_long(&mut self, address: u32) -> Result<u32> { let address_space = if self.s_flag != 0 {SUPERVISOR_PROGRAM} else {USER_PROGRAM}; if address & 1 > 0 { Err(Exception::AddressError{address: address, access_type: AccessType::Read, address_space: address_space, processing_state: self.processing_state}) } else { Ok(self.mem.read_long(address_space, address)) } } pub fn write_data_long(&mut self, address: u32, value: u32) { let address_space = if self.s_flag != 0 {SUPERVISOR_DATA} else {USER_DATA}; if address & 1 > 0 { panic!("Address error, odd write address at {:08x} {:?}", address, address_space); } self.mem.write_long(address_space, address, value); } pub fn write_program_long(&mut self, address: u32, value: u32) { let address_space = if self.s_flag != 0 {SUPERVISOR_PROGRAM} else {USER_PROGRAM}; if address & 1 > 0 { panic!("Address error, odd write address at {:08x} {:?}", address, address_space); } self.mem.write_long(address_space, address, value); } pub fn jump(&mut self, pc: u32) { self.pc = pc; } pub fn jump_vector(&mut self, vector: u32) { let vector_address = vector<<2; self.pc = self.read_data_long(vector_address).unwrap(); } pub fn handle_address_error(&mut self, bad_address: u32, access_type: AccessType, processing_state: ProcessingState, address_space: AddressSpace) -> Cycles { self.processing_state = ProcessingState::Exception; let backup_sr = self.status_register(); // enter supervisor mode self.s_flag = SFLAG_SET; // Bus error stack frame (68000 only). let (pc, ir) = (self.pc, self.ir); self.push_32(pc); self.push_16(backup_sr as u16); self.push_16(ir); self.push_32(bad_address); /* access address */ /* 0 0 0 0 0 0 0 0 0 0 0 R/W I/N FC * R/W 0 = write, 1 = read * I/N 0 = instruction, 1 = not * FC 3-bit function code */ let access_info = match access_type {AccessType::Read => 0b10000, _ => 0 } | match processing_state {ProcessingState::Normal => 0, _ => 0b01000 } | address_space.fc(); self.push_16(access_info); self.jump_vector(EXCEPTION_ADDRESS_ERROR); self.processing_state = ProcessingState::Normal; Cycles(50) } pub fn handle_illegal_instruction(&mut self, pc: u32) -> Cycles { self.processing_state = ProcessingState::Exception; let backup_sr = self.status_register(); // enter supervisor mode self.s_flag = SFLAG_SET; // Group 1 and 2 stack frame (68000 only). self.push_32(pc); self.push_16(backup_sr as u16); self.jump_vector(EXCEPTION_ILLEGAL_INSTRUCTION); self.processing_state = ProcessingState::Normal; Cycles(34) } pub fn execute1(&mut self) -> Cycles { self.execute(1) } pub fn execute(&mut self, cycles: i32) -> Cycles { let cycles = Cycles(cycles); let mut remaining_cycles = cycles; while remaining_cycles.any() { // Read an instruction from PC (increments PC by 2) let result = self.read_imm_u16().and_then(|opcode| { self.ir = opcode; // Call instruction handler to mutate Core accordingly self.ophandlers[opcode as usize](self) }); remaining_cycles = remaining_cycles - match result { Ok(cycles_used) => cycles_used, Err(Exception::AddressError { address, access_type, processing_state, address_space }) => self.handle_address_error(address, access_type, processing_state, address_space), Err(Exception::IllegalInstruction(_, pc)) => self.handle_illegal_instruction(pc), }; } cycles - remaining_cycles } } impl Clone for Core { fn clone(&self) -> Self { let mut lm = LoggingMem::new(0xaaaaaaaa, OpsLogger::new()); lm.copy_from(&self.mem); assert_eq!(0, lm.logger.len()); Core { pc: self.pc, prefetch_addr: 0, prefetch_data: 0, inactive_ssp: self.inactive_ssp, inactive_usp: self.inactive_usp, ir: self.ir, processing_state: self.processing_state, dar: self.dar, mem: lm, ophandlers: ops::instruction_set(), s_flag: self.s_flag, int_mask: self.int_mask, x_flag: self.x_flag, v_flag: self.v_flag, c_flag: self.c_flag, n_flag: self.n_flag, not_z_flag: self.not_z_flag } } } #[cfg(test)] mod tests { use super::{Core, Cycles}; use super::ops; //::instruction_set; use ram::{AddressBus, Operation, SUPERVISOR_PROGRAM, USER_PROGRAM, USER_DATA}; #[test] fn new_sets_pc() { let cpu = Core::new(256); assert_eq!(256, cpu.pc); } #[test] fn new_mem_sets_pc_and_mem() { let base = 128; let cpu = Core::new_mem(base, &[1u8, 2u8, 3u8, 4u8, 5u8, 6u8]); assert_eq!(128, cpu.pc); assert_eq!(1, cpu.mem.read_byte(SUPERVISOR_PROGRAM, 128)); assert_eq!(2, cpu.mem.read_byte(SUPERVISOR_PROGRAM, 129)); } #[test] fn a_jump_changes_pc() { let mut cpu = Core::new(0); cpu.jump(128); assert_eq!(128, cpu.pc); } #[test] #[allow(unused_must_use)] fn a_read_imm_u32_changes_pc() { let base = 128; let mut cpu = Core::new(base); cpu.read_imm_u32(); assert_eq!(base+4, cpu.pc); } #[test] fn a_read_imm_u32_reads_from_pc() { let base = 128; let mut cpu = Core::new_mem(base, &[2u8, 1u8, 3u8, 4u8]); let val = cpu.read_imm_u32().unwrap(); assert_eq!((2<<24)+(1<<16)+(3<<8)+4, val); } #[test] #[allow(unused_must_use)] fn a_read_imm_u16_changes_pc() { let base = 128; let mut cpu = Core::new(base); cpu.read_imm_u16(); assert_eq!(base+2, cpu.pc); } #[test] fn a_read_imm_u16_reads_from_pc() { let base = 128; let mut cpu = Core::new_mem(base, &[2u8, 1u8, 3u8, 4u8]); assert_eq!("-S7-----", cpu.flags()); let val = cpu.read_imm_u16().unwrap(); assert_eq!((2<<8)+(1<<0), val); assert_eq!(Operation::ReadLong(SUPERVISOR_PROGRAM, base, 0x02010304), cpu.mem.logger.ops()[0]); } #[test] fn an_user_mode_read_imm_u16_is_reflected_in_mem_ops() { let base = 128; let mut cpu = Core::new_mem(base, &[2u8, 1u8, 3u8, 4u8]); cpu.s_flag = 0; assert_eq!("-U7-----", cpu.flags()); let val = cpu.read_imm_u16().unwrap(); assert_eq!((2<<8)+(1<<0), val); assert_eq!(Operation::ReadLong(USER_PROGRAM, base, 0x02010304), cpu.mem.logger.ops()[0]); } #[test] fn a_reset_reads_sp_and_pc_from_0() { let mut cpu = Core::new_mem(0, &[0u8,0u8,1u8,0u8, 0u8,0u8,0u8,128u8]); cpu.reset(); assert_eq!(256, cpu.dar[15]); assert_eq!(128, cpu.pc); assert_eq!("-S7-----", cpu.flags()); assert_eq!(Operation::ReadLong(SUPERVISOR_PROGRAM, 0, 0x100), cpu.mem.logger.ops()[0]); } #[test] fn execute_reads_from_pc_and_does_not_panic_on_illegal_instruction() { let mut cpu = Core::new_mem(0xba, &[0xba,0xd1,1u8,0u8, 0u8,0u8,0u8,128u8]); cpu.execute1(); } #[test] fn execute_does_not_panic_on_odd_pc() { let mut cpu = Core::new_mem(0xbd, &[0x00, 0x0a, 0x00, 0x00]); cpu.execute1(); } #[test] fn execute_can_execute_instruction_handler_0a() { let mut cpu = Core::new_mem(0xba, &[0x00, 0x0A, 1u8,0u8, 0u8,0u8,0u8,128u8]); cpu.execute1(); assert_eq!(0xabcd, cpu.dar[0]); assert_eq!(0x0000, cpu.dar[1]); } #[test] fn execute_can_execute_instruction_handler_0b() { let mut cpu = Core::new_mem(0xba, &[0x00, 0x0B, 1u8,0u8, 0u8,0u8,0u8,128u8]); cpu.execute1(); assert_eq!(0x0000, cpu.dar[0]); assert_eq!(0xbcde, cpu.dar[1]); } #[test] fn execute_can_execute_set_dx() { // first byte 40 is register D0 // 42 == D1 // 44 == D2 // 46 == D3 // 48 == D4 // 4a == D5 // 4c == D6 // 4e == D7 let mut cpu = Core::new_mem(0x40, &[0x4c, 0x00, 1u8, 0u8]); cpu.execute1(); assert_eq!(0xcdef, cpu.dar[6]); } #[test] fn array_elems() { let mut arr = [1, 2, 3, 4]; let mut marr = &mut arr; let mut elem: &mut i32 = &mut (marr[1]); // let mut elem2: &mut i32 = &mut (arr[2]); assert_eq!(2, *elem); *elem = 200; assert_eq!(200, *elem); // assert_eq!(200, &mut marr[1]); } #[test] fn cycle_counting() { // 0xc308 = abcd_8_mm taking 18 cycles let mut cpu = Core::new_mem(0x40, &[0xc3, 0x08]); cpu.ophandlers = ops::instruction_set(); let Cycles(count) = cpu.execute1(); assert_eq!(18, count); } #[test] fn cycle_counting_exec2() { // 0xc308 = abcd_8_mm taking 18 cycles let mut cpu = Core::new_mem(0x40, &[0xc3, 0x08, 0xc3, 0x08]); cpu.ophandlers = ops::instruction_set(); let Cycles(count) = cpu.execute(20); assert_eq!(18*2, count); } #[test] fn abcd_8_rr() { // opcodes c100 - c107, c300 - c307, etc. // or more generally c[13579bdf]0[0-7] // where [13579bdf] is DX (dest regno) and [0-7] is DY (src regno) // so c300 means D1 = D0 + D1 in BCD let mut cpu = Core::new_mem(0x40, &[0xc3, 0x00]); cpu.ophandlers = ops::instruction_set(); cpu.dar[0] = 0x16; cpu.dar[1] = 0x26; cpu.execute1(); // 16 + 26 is 42 assert_eq!(0x42, cpu.dar[1]); } #[test] fn abcd_8_mm() { // opcodes c108 - c10f, c308 - c30f, etc. // or more generally c[13579bdf]0[8-f] // where [13579bdf] is AX (dest regno) and [8-f] is AY (src regno) // so c308 means A1 = A0 + A1 in BCD let mut cpu = Core::new_mem(0x40, &[0xc3, 0x08]); cpu.ophandlers = ops::instruction_set(); cpu.dar[8+0] = 0x160+1; cpu.dar[8+1] = 0x260+1; cpu.mem.write_byte(USER_DATA, 0x160, 0x16); cpu.mem.write_byte(USER_DATA, 0x260, 0x26); cpu.execute1(); let res = cpu.mem.read_byte(USER_DATA, 0x260); // 16 + 26 is 42 assert_eq!(0x42, res); } #[test] fn add_8_er_d() { // opcodes d000 - d007, d200 - d207, etc. // or more generally d[02468ace]0[0-7] // where [02468ace] is DX (dest regno) and [0-7] is DY (src regno) // opcodes d200 is ADD.B D0, D1 let mut cpu = Core::new_mem(0x40, &[0xd2, 0x00]); cpu.ophandlers = ops::instruction_set(); cpu.dar[0] = 16; cpu.dar[1] = 26; cpu.execute1(); // 16 + 26 is 42 assert_eq!(42, cpu.dar[1]); } #[test] fn add_8_er_pi() { // opcodes d018 - d01f, d218 - d21f, etc. // or more generally d[02468ace]1[8-f] // where [02468ace] is DX (dest regno) and [8-f] is AY (src regno) // opcodes d218 is ADD.B (A0)+, D1 let mut cpu = Core::new_mem(0x40, &[0xd2, 0x18]); cpu.ophandlers = ops::instruction_set(); let addr = 0x100; cpu.dar[8+0] = addr; cpu.mem.write_byte(USER_DATA, addr, 16); cpu.dar[1] = 26; cpu.execute1(); // 16 + 26 is 42 assert_eq!(42, cpu.dar[1]); assert_eq!(addr+1, cpu.dar[8+0]); } #[test] fn add_8_er_pd() { // opcodes d020 - d027, d220 - d227, etc. // or more generally d[02468ace]2[0-7] // where [02468ace] is DX (dest regno) and [0-7] is AY (src regno) // opcodes d220 is ADD.B -(A0), D1 let mut cpu = Core::new_mem(0x40, &[0xd2, 0x20]); cpu.ophandlers = ops::instruction_set(); let addr = 0x100; cpu.dar[8+0] = addr; cpu.mem.write_byte(USER_DATA, addr-1, 16); cpu.dar[1] = 26; cpu.execute1(); // 16 + 26 is 42 assert_eq!(42, cpu.dar[1]); assert_eq!(addr-1, cpu.dar[8+0]); } #[test] fn add_8_er_ai() { // opcodes d010 - d017, d210 - d217, etc. // or more generally d[02468ace]1[0-7] // where [02468ace] is DX (dest regno) and [0-7] is AY (src regno) // opcodes d210 is ADD.B (A0), D1 let mut cpu = Core::new_mem(0x40, &[0xd2, 0x10]); cpu.ophandlers = ops::instruction_set(); let addr = 0x100; cpu.dar[8+0] = addr; cpu.mem.write_byte(USER_DATA, addr, 16); cpu.dar[1] = 26; cpu.execute1(); // 16 + 26 is 42 assert_eq!(42, cpu.dar[1]); assert_eq!(addr, cpu.dar[8+0]); } #[test] fn add_8_er_di_with_positive_displacement() { // opcodes d028 - d02f, d228 - d22f, etc. // or more generally d[02468ace]2[8-f] // where [02468ace] is DX (dest regno) and [8-f] is AY (src regno) // opcodes d228,0108 is ADD.B (0x108, A0), D1 let mut cpu = Core::new_mem(0x40, &[0xd2, 0x28, 0x01, 0x08]); cpu.ophandlers = ops::instruction_set(); let addr = 0x100; cpu.dar[8+0] = addr; let displaced_addr = addr + 0x108; cpu.mem.write_byte(USER_DATA, displaced_addr, 16); cpu.dar[1] = 26; cpu.execute1(); // 16 + 26 is 42 assert_eq!(42, cpu.dar[1]); assert_eq!(addr, cpu.dar[8+0]); } #[test] fn add_8_er_di_with_negative_displacement() { // opcodes d028 - d02f, d228 - d22f, etc. followed by an extension word // or more generally d[02468ace]2[8-f] // where [02468ace] is DX (dest regno) and [8-f] is AY (src regno) // opcodes d228,FFFE is ADD.B (-2, A0), D1 let mut cpu = Core::new_mem(0x40, &[0xd2, 0x28, 0xFF, 0xFE]); cpu.ophandlers = ops::instruction_set(); let addr = 0x100; cpu.dar[8+0] = addr; let displaced_addr = addr - 2; cpu.mem.write_byte(USER_DATA, displaced_addr, 16); cpu.dar[1] = 26; cpu.execute1(); // 16 + 26 is 42 assert_eq!(42, cpu.dar[1]); assert_eq!(addr, cpu.dar[8+0]); } #[test] fn add_8_er_ix_with_positive_displacement() { // opcodes d030 - d037, d230 - d237, etc. followed by an extension word // or more generally d[02468ace]3[0-7] // where [02468ace] is DX (dest regno) and [0-7] is AY (src regno) // opcodes d230,9002 is ADD.B (2, A0, A1), D1 let mut cpu = Core::new_mem(0x40, &[0xd2, 0x30, 0x90, 0x02]); cpu.ophandlers = ops::instruction_set(); let addr = 0x100; let index = 0x10; let displacement = 2; cpu.dar[8+0] = addr; cpu.dar[8+1] = index; let effective_addr = addr + index + displacement; cpu.mem.write_byte(USER_DATA, effective_addr, 16); cpu.dar[1] = 26; cpu.execute1(); // 16 + 26 is 42 assert_eq!(42, cpu.dar[1]); assert_eq!(addr, cpu.dar[8+0]); } #[test] fn add_8_er_ix_with_negative_displacement() { // opcodes d030 - d037, d230 - d237, etc. followed by an extension word // or more generally d[02468ace]3[0-7] // where [02468ace] is DX (dest regno) and [0-7] is AY (src regno) // opcodes d230,90FE is ADD.B (-2, A0, A1), D1 let mut cpu = Core::new_mem(0x40, &[0xd2, 0x30, 0x90, 0xFE]); cpu.ophandlers = ops::instruction_set(); let addr = 0x100; let index = 0x10; let displacement = 2; cpu.dar[8+0] = addr; cpu.dar[8+1] = index; let effective_addr = addr + index - displacement; cpu.mem.write_byte(USER_DATA, effective_addr, 16); cpu.dar[1] = 26; cpu.execute1(); // 16 + 26 is 42 assert_eq!(42, cpu.dar[1]); assert_eq!(addr, cpu.dar[8+0]); } #[test] fn add_8_er_aw() { // opcodes d038, d238, d438, etc. followed by an extension word // or more generally d[02468ace]38 // where [02468ace] is DX (dest regno) and the extension word is // the 16-bit absolute address // opcodes d238,0108 is ADD.B $0108, D1 let mut cpu = Core::new_mem(0x40, &[0xd2, 0x38, 0x01, 0x08]); cpu.ophandlers = ops::instruction_set(); cpu.mem.write_byte(USER_DATA, 0x108, 16); cpu.dar[1] = 26; cpu.execute1(); // 16 + 26 is 42 assert_eq!(42, cpu.dar[1]); } #[test] fn add_8_er_al() { // opcodes d039, d239, d439, etc. followed by two extension words // or more generally d[02468ace]39 // where [02468ace] is DX (dest regno) and the first extension // word is the high order word of the 32-bit absolute address, // and the second extension word is the low order word. // opcodes d239,0009,0000 is ADD.B $90000, D1 let mut cpu = Core::new_mem(0x40, &[0xd2, 0x39, 0x00, 0x09, 0x00, 0x00]); cpu.ophandlers = ops::instruction_set(); cpu.mem.write_byte(USER_DATA, 0x90000, 16); cpu.dar[1] = 26; cpu.execute1(); // 16 + 26 is 42 assert_eq!(42, cpu.dar[1]); } #[test] fn add_8_er_pcdi() { // opcodes d03a, d23a, d43a, etc. followed by an extension word // or more generally d[02468ace]3a // where [02468ace] is DX (dest regno) // opcodes d23a,0108 is ADD.B ($0108, PC), D1 let mut cpu = Core::new_mem(0x40, &[0xd2, 0x3a, 0x01, 0x08]); cpu.ophandlers = ops::instruction_set(); let addr = 0x40+2+0x0108; cpu.mem.write_byte(USER_DATA, addr, 16); cpu.dar[1] = 26; cpu.execute1(); // 16 + 26 is 42 assert_eq!(42, cpu.dar[1]); } #[test] fn add_8_er_pcix() { // opcodes d03b, d23b, d43b, etc. followed by an extension word // or more generally d[02468ace]3b // where [02468ace] is DX (dest regno) // opcodes d23b,9002 is ADD.B (2, PC, A1), D1 let mut cpu = Core::new_mem(0x40, &[0xd2, 0x3b, 0x90, 0x02]); cpu.ophandlers = ops::instruction_set(); let addr = cpu.pc + 2; // will be +2 after reading instruction word let index = 0x10; let displacement = 2; cpu.dar[8+1] = index; let effective_addr = addr + index + displacement; cpu.mem.write_byte(USER_DATA, effective_addr, 16); cpu.dar[1] = 26; cpu.execute1(); // 16 + 26 is 42 assert_eq!(42, cpu.dar[1]); } #[test] fn add_8_er_imm() { // opcodes d03c, d23c, d43c, etc. followed by an extension word // or more generally d[02468ace]3c // where [02468ace] is DX (dest regno) // opcodes d23c,0010 is ADD.B #16, D1 let mut cpu = Core::new_mem(0x40, &[0xd2, 0x3c, 0x00, 0x10]); cpu.ophandlers = ops::instruction_set(); cpu.dar[1] = 26; cpu.execute1(); // 16 + 26 is 42 assert_eq!(42, cpu.dar[1]); } #[test] fn op_with_extension_word_moves_pc_past_extension_word() { let mut cpu = Core::new_mem(0x40, &[0xd2, 0x30, 0x90, 0xFE]); cpu.ophandlers = ops::instruction_set(); cpu.execute1(); assert_eq!(0x44, cpu.pc); } #[test] fn status_register_roundtrip(){ let mut core = Core::new(0x40); //Status register bits are: // TTSM_0iii_000X_NZVC; let f=0b0000_1000_1110_0000; // these bits should always be zero let s=0b0010_0000_0000_0000; let i=0b0000_0111_0000_0000; let x=0b0000_0000_0001_0000; let n=0b0000_0000_0000_1000; let z=0b0000_0000_0000_0100; let v=0b0000_0000_0000_0010; let c=0b0000_0000_0000_0001; let flags = vec![x,n,z,v,c,f,s,i,0]; for sf in flags { core.sr_to_flags(sf); let sr = core.status_register(); let expected = if sf == f {0} else {sf}; assert_eq!(expected, sr); } } #[test] fn clones_have_independent_registers() { let mut core = Core::new(0x40); core.dar[1] = 0x16; let mut clone = core.clone(); assert_eq!(0x16, core.dar[1]); assert_eq!(0x16, clone.dar[1]); clone.dar[1] = 0x32; assert_eq!(0x16, core.dar[1]); assert_eq!(0x32, clone.dar[1]); } } Simplify one-liner with map // type alias for exception handling use std::result; pub type Result<T> = result::Result<T, Exception>; pub type Handler = fn(&mut Core) -> Result<Cycles>; pub type InstructionSet = Vec<Handler>; use ram::{LoggingMem, AddressBus, OpsLogger, SUPERVISOR_PROGRAM, SUPERVISOR_DATA, USER_PROGRAM, USER_DATA}; pub mod ops; mod effective_address; mod operator; pub struct Core { pub pc: u32, pub inactive_ssp: u32, // when in user mode pub inactive_usp: u32, // when in supervisor mode pub ir: u16, pub dar: [u32; 16], pub ophandlers: InstructionSet, pub s_flag: u32, pub int_mask: u32, pub x_flag: u32, pub c_flag: u32, pub v_flag: u32, pub n_flag: u32, pub prefetch_addr: u32, pub prefetch_data: u32, pub not_z_flag: u32, pub processing_state: ProcessingState, pub mem: LoggingMem<OpsLogger>, } #[derive(Clone, Copy)] pub struct Cycles(i32); use std::ops::Sub; impl Sub for Cycles { type Output = Cycles; fn sub(self, _rhs: Cycles) -> Cycles { Cycles(self.0 - _rhs.0) } } impl Cycles { fn any(self) -> bool { self.0 > 0 } } #[derive(Clone, Copy, Debug)] pub enum ProcessingState { Normal, Exception } #[derive(Clone, Copy, Debug)] pub enum AccessType {Read, Write} use ram::AddressSpace; #[derive(Debug)] pub enum Exception { AddressError { address: u32, access_type: AccessType, processing_state: ProcessingState, address_space: AddressSpace}, IllegalInstruction(u16, u32) } use std::fmt; impl fmt::Display for Exception { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { Exception::AddressError { address, access_type, processing_state, address_space } => write!(f, "Address Error: {:?} {:?} at {:08x} during {:?} processing", access_type, address_space, address, processing_state), Exception::IllegalInstruction(ic, pc) => write!(f, "Illegal Instruction {:04x} at {:08x}", ic, pc), } } } use std::error; impl error::Error for Exception { fn description(&self) -> &str { match *self { Exception::AddressError{..} => "Address Error", Exception::IllegalInstruction(_, _) => "Illegal Instruction", } } fn cause(&self) -> Option<&error::Error> { None } } use std::num::Wrapping; // these values are borrowed from Musashi // and not yet fully understood const SFLAG_SET: u32 = 0x04; const XFLAG_SET: u32 = 0x100; const NFLAG_SET: u32 = 0x80; const VFLAG_SET: u32 = 0x80; const CFLAG_SET: u32 = 0x100; const CPU_SR_MASK: u32 = 0xa71f; /* T1 -- S -- -- I2 I1 I0 -- -- -- X N Z V C */ const CPU_SR_INT_MASK: u32 = 0x0700; // Exception Vectors //const EXCEPTION_BUS_ERROR: u32 = 2; const EXCEPTION_ADDRESS_ERROR: u32 = 3; const EXCEPTION_ILLEGAL_INSTRUCTION: u32 = 4; // const EXCEPTION_ZERO_DIVIDE: u32 = 5; // const EXCEPTION_CHK: u32 = 6; // const EXCEPTION_TRAPV: u32 = 7; // const EXCEPTION_PRIVILEGE_VIOLATION: u32 = 8; // const EXCEPTION_TRACE: u32 = 9; // const EXCEPTION_1010: u32 = 10; // const EXCEPTION_1111: u32 = 11; // const EXCEPTION_FORMAT_ERROR: u32 = 14; // const EXCEPTION_UNINITIALIZED_INTERRUPT: u32 = 15; // const EXCEPTION_SPURIOUS_INTERRUPT: u32 = 24; // const EXCEPTION_INTERRUPT_AUTOVECTOR: u32 = 24; // const EXCEPTION_TRAP_BASE: u32 = 32; impl Core { pub fn new(base: u32) -> Core { Core { pc: base, prefetch_addr: 0, prefetch_data: 0, inactive_ssp: 0, inactive_usp: 0, ir: 0, processing_state: ProcessingState::Exception, dar: [0u32; 16], mem: LoggingMem::new(0xaaaaaaaa, OpsLogger::new()), ophandlers: ops::fake::instruction_set(), s_flag: SFLAG_SET, int_mask: CPU_SR_INT_MASK, x_flag: 0, v_flag: 0, c_flag: 0, n_flag: 0, not_z_flag: 0xffffffff } } pub fn new_mem(base: u32, contents: &[u8]) -> Core { let mut lm = LoggingMem::new(0xaaaaaaaa, OpsLogger::new()); for (offset, byte) in contents.iter().enumerate() { lm.write_u8(base + offset as u32, *byte as u32); } Core { pc: base, prefetch_addr: 0, prefetch_data: 0, inactive_ssp: 0, inactive_usp: 0, ir: 0, processing_state: ProcessingState::Normal, dar: [0u32; 16], mem: lm, ophandlers: ops::fake::instruction_set(), s_flag: SFLAG_SET, int_mask: CPU_SR_INT_MASK, x_flag: 0, v_flag: 0, c_flag: 0, n_flag: 0, not_z_flag: 0xffffffff } } pub fn reset(&mut self) { self.processing_state = ProcessingState::Exception; self.s_flag = SFLAG_SET; self.int_mask = CPU_SR_INT_MASK; self.prefetch_addr = 1; // non-zero, or the prefetch won't kick in self.jump(0); // these reads cannot possibly cause AddressError, as we forced PC to 0 self.dar[15] = self.read_imm_u32().unwrap(); let new_pc = self.read_imm_u32().unwrap(); self.jump(new_pc); self.processing_state = ProcessingState::Normal; } pub fn x_flag_as_1(&self) -> u32 { (self.x_flag>>8)&1 } // admittely I've chosen to reuse Musashi's representation of flags // which I don't fully understand (they are not matching their // positions in the SR/CCR) pub fn status_register(&self) -> u32 { (self.s_flag << 11) | self.int_mask | ((self.x_flag & XFLAG_SET) >> 4) | ((self.n_flag & NFLAG_SET) >> 4) | ((not1!(self.not_z_flag)) << 2) | ((self.v_flag & VFLAG_SET) >> 6) | ((self.c_flag & CFLAG_SET) >> 8) } pub fn usp(&self) -> u32 { if self.s_flag > 0 { self.inactive_usp } else { self.dar[15] } } pub fn ssp(&self) -> u32 { if self.s_flag > 0 { self.dar[15] } else { self.inactive_ssp } } // admittely I've chosen to reuse Musashi's representation of flags // which I don't fully understand (they are not matching their // positions in the SR/CCR) pub fn sr_to_flags(&mut self, sr: u32) { let sr = sr & CPU_SR_MASK; self.int_mask = sr & CPU_SR_INT_MASK; self.s_flag = (sr >> 11) & SFLAG_SET; self.x_flag = (sr << 4) & XFLAG_SET; self.n_flag = (sr << 4) & NFLAG_SET; self.not_z_flag = not1!(sr & 0b00100); self.v_flag = (sr << 6) & VFLAG_SET; self.c_flag = (sr << 8) & CFLAG_SET; // println!("{} {:016b} {} {}", self.flags(), sr, self.not_z_flag, sr & 0b00100); } pub fn flags(&self) -> String { let sr = self.status_register(); let supervisor = (sr >> 13) & 1; let irq_mask = (0x700 & sr) >> 8; format!("-{}{}{}{}{}{}{}", if supervisor > 0 {'S'} else {'U'}, irq_mask, if 0 < (sr >> 4) & 1 {'X'} else {'-'}, if 0 < (sr >> 3) & 1 {'N'} else {'-'}, if 0 < (sr >> 2) & 1 {'Z'} else {'-'}, if 0 < (sr >> 1) & 1 {'V'} else {'-'}, if 0 < (sr ) & 1 {'C'} else {'-'}) } fn prefetch_if_needed(&mut self) -> bool { // does current PC overlap with fetched data let fetched = if self.pc & !3 != self.prefetch_addr { self.prefetch_addr = self.pc & !3; let address_space = if self.s_flag != 0 {SUPERVISOR_PROGRAM} else {USER_PROGRAM}; self.prefetch_data = self.mem.read_long(address_space, self.prefetch_addr); true } else { false }; self.pc += 2; fetched } pub fn read_imm_u32(&mut self) -> Result<u32> { if self.pc & 1 > 0 { let address_space = if self.s_flag != 0 {SUPERVISOR_PROGRAM} else {USER_PROGRAM}; return Err(Exception::AddressError{address: self.pc, access_type: AccessType::Read, address_space: address_space, processing_state: self.processing_state}) } self.prefetch_if_needed(); let prev_prefetch_data = self.prefetch_data; Ok(if self.prefetch_if_needed() { ((prev_prefetch_data << 16) | (self.prefetch_data >> 16)) & 0xffffffff } else { prev_prefetch_data }) } pub fn read_imm_i16(&mut self) -> Result<i16> { self.read_imm_u16().map(|val| val as i16) } pub fn read_imm_u16(&mut self) -> Result<u16> { // the Musashi read_imm_16 calls cpu_read_long as part of prefetch if self.pc & 1 > 0 { let address_space = if self.s_flag != 0 {SUPERVISOR_PROGRAM} else {USER_PROGRAM}; return Err(Exception::AddressError{address: self.pc, access_type: AccessType::Read, address_space: address_space, processing_state: self.processing_state}) } self.prefetch_if_needed(); Ok(((self.prefetch_data >> ((2 - ((self.pc - 2) & 2))<<3)) & 0xffff) as u16) } pub fn push_32(&mut self, value: u32) { let new_sp = (Wrapping(self.dar[15]) - Wrapping(4)).0; self.dar[15] = new_sp; self.write_data_long(new_sp, value); } pub fn push_16(&mut self, value: u16) { let new_sp = (Wrapping(self.dar[15]) - Wrapping(2)).0; self.dar[15] = new_sp; self.write_data_word(new_sp, value as u32); } pub fn read_data_byte(&mut self, address: u32) -> Result<u32> { let address_space = if self.s_flag != 0 {SUPERVISOR_DATA} else {USER_DATA}; Ok(self.mem.read_byte(address_space, address)) } pub fn read_program_byte(&mut self, address: u32) -> Result<u32> { let address_space = if self.s_flag != 0 {SUPERVISOR_PROGRAM} else {USER_PROGRAM}; Ok(self.mem.read_byte(address_space, address)) } pub fn write_data_byte(&mut self, address: u32, value: u32) { let address_space = if self.s_flag != 0 {SUPERVISOR_DATA} else {USER_DATA}; self.mem.write_byte(address_space, address, value); } pub fn write_program_byte(&mut self, address: u32, value: u32) { let address_space = if self.s_flag != 0 {SUPERVISOR_PROGRAM} else {USER_PROGRAM}; self.mem.write_byte(address_space, address, value); } pub fn read_data_word(&mut self, address: u32) -> Result<u32> { let address_space = if self.s_flag != 0 {SUPERVISOR_DATA} else {USER_DATA}; if address & 1 > 0 { Err(Exception::AddressError{address: address, access_type: AccessType::Read, address_space: address_space, processing_state: self.processing_state}) } else { Ok(self.mem.read_word(address_space, address)) } } pub fn read_program_word(&mut self, address: u32) -> Result<u32> { let address_space = if self.s_flag != 0 {SUPERVISOR_PROGRAM} else {USER_PROGRAM}; if address & 1 > 0 { Err(Exception::AddressError {address: address, access_type: AccessType::Read, address_space: address_space, processing_state: self.processing_state}) } else { Ok(self.mem.read_word(address_space, address)) } } pub fn write_data_word(&mut self, address: u32, value: u32) { let address_space = if self.s_flag != 0 {SUPERVISOR_DATA} else {USER_DATA}; if address & 1 > 0 { panic!("Address error, odd write address at {:08x} {:?}", address, address_space); } self.mem.write_word(address_space, address, value); } pub fn write_program_word(&mut self, address: u32, value: u32) { let address_space = if self.s_flag != 0 {SUPERVISOR_PROGRAM} else {USER_PROGRAM}; if address & 1 > 0 { panic!("Address error, odd write address at {:08x} {:?}", address, address_space); } self.mem.write_word(address_space, address, value); } pub fn read_data_long(&mut self, address: u32) -> Result<u32> { let address_space = if self.s_flag != 0 {SUPERVISOR_DATA} else {USER_DATA}; if address & 1 > 0 { Err(Exception::AddressError{address: address, access_type: AccessType::Read, address_space: address_space, processing_state: self.processing_state}) } else { Ok(self.mem.read_long(address_space, address)) } } pub fn read_program_long(&mut self, address: u32) -> Result<u32> { let address_space = if self.s_flag != 0 {SUPERVISOR_PROGRAM} else {USER_PROGRAM}; if address & 1 > 0 { Err(Exception::AddressError{address: address, access_type: AccessType::Read, address_space: address_space, processing_state: self.processing_state}) } else { Ok(self.mem.read_long(address_space, address)) } } pub fn write_data_long(&mut self, address: u32, value: u32) { let address_space = if self.s_flag != 0 {SUPERVISOR_DATA} else {USER_DATA}; if address & 1 > 0 { panic!("Address error, odd write address at {:08x} {:?}", address, address_space); } self.mem.write_long(address_space, address, value); } pub fn write_program_long(&mut self, address: u32, value: u32) { let address_space = if self.s_flag != 0 {SUPERVISOR_PROGRAM} else {USER_PROGRAM}; if address & 1 > 0 { panic!("Address error, odd write address at {:08x} {:?}", address, address_space); } self.mem.write_long(address_space, address, value); } pub fn jump(&mut self, pc: u32) { self.pc = pc; } pub fn jump_vector(&mut self, vector: u32) { let vector_address = vector<<2; self.pc = self.read_data_long(vector_address).unwrap(); } pub fn handle_address_error(&mut self, bad_address: u32, access_type: AccessType, processing_state: ProcessingState, address_space: AddressSpace) -> Cycles { self.processing_state = ProcessingState::Exception; let backup_sr = self.status_register(); // enter supervisor mode self.s_flag = SFLAG_SET; // Bus error stack frame (68000 only). let (pc, ir) = (self.pc, self.ir); self.push_32(pc); self.push_16(backup_sr as u16); self.push_16(ir); self.push_32(bad_address); /* access address */ /* 0 0 0 0 0 0 0 0 0 0 0 R/W I/N FC * R/W 0 = write, 1 = read * I/N 0 = instruction, 1 = not * FC 3-bit function code */ let access_info = match access_type {AccessType::Read => 0b10000, _ => 0 } | match processing_state {ProcessingState::Normal => 0, _ => 0b01000 } | address_space.fc(); self.push_16(access_info); self.jump_vector(EXCEPTION_ADDRESS_ERROR); self.processing_state = ProcessingState::Normal; Cycles(50) } pub fn handle_illegal_instruction(&mut self, pc: u32) -> Cycles { self.processing_state = ProcessingState::Exception; let backup_sr = self.status_register(); // enter supervisor mode self.s_flag = SFLAG_SET; // Group 1 and 2 stack frame (68000 only). self.push_32(pc); self.push_16(backup_sr as u16); self.jump_vector(EXCEPTION_ILLEGAL_INSTRUCTION); self.processing_state = ProcessingState::Normal; Cycles(34) } pub fn execute1(&mut self) -> Cycles { self.execute(1) } pub fn execute(&mut self, cycles: i32) -> Cycles { let cycles = Cycles(cycles); let mut remaining_cycles = cycles; while remaining_cycles.any() { // Read an instruction from PC (increments PC by 2) let result = self.read_imm_u16().and_then(|opcode| { self.ir = opcode; // Call instruction handler to mutate Core accordingly self.ophandlers[opcode as usize](self) }); remaining_cycles = remaining_cycles - match result { Ok(cycles_used) => cycles_used, Err(Exception::AddressError { address, access_type, processing_state, address_space }) => self.handle_address_error(address, access_type, processing_state, address_space), Err(Exception::IllegalInstruction(_, pc)) => self.handle_illegal_instruction(pc), }; } cycles - remaining_cycles } } impl Clone for Core { fn clone(&self) -> Self { let mut lm = LoggingMem::new(0xaaaaaaaa, OpsLogger::new()); lm.copy_from(&self.mem); assert_eq!(0, lm.logger.len()); Core { pc: self.pc, prefetch_addr: 0, prefetch_data: 0, inactive_ssp: self.inactive_ssp, inactive_usp: self.inactive_usp, ir: self.ir, processing_state: self.processing_state, dar: self.dar, mem: lm, ophandlers: ops::instruction_set(), s_flag: self.s_flag, int_mask: self.int_mask, x_flag: self.x_flag, v_flag: self.v_flag, c_flag: self.c_flag, n_flag: self.n_flag, not_z_flag: self.not_z_flag } } } #[cfg(test)] mod tests { use super::{Core, Cycles}; use super::ops; //::instruction_set; use ram::{AddressBus, Operation, SUPERVISOR_PROGRAM, USER_PROGRAM, USER_DATA}; #[test] fn new_sets_pc() { let cpu = Core::new(256); assert_eq!(256, cpu.pc); } #[test] fn new_mem_sets_pc_and_mem() { let base = 128; let cpu = Core::new_mem(base, &[1u8, 2u8, 3u8, 4u8, 5u8, 6u8]); assert_eq!(128, cpu.pc); assert_eq!(1, cpu.mem.read_byte(SUPERVISOR_PROGRAM, 128)); assert_eq!(2, cpu.mem.read_byte(SUPERVISOR_PROGRAM, 129)); } #[test] fn a_jump_changes_pc() { let mut cpu = Core::new(0); cpu.jump(128); assert_eq!(128, cpu.pc); } #[test] #[allow(unused_must_use)] fn a_read_imm_u32_changes_pc() { let base = 128; let mut cpu = Core::new(base); cpu.read_imm_u32(); assert_eq!(base+4, cpu.pc); } #[test] fn a_read_imm_u32_reads_from_pc() { let base = 128; let mut cpu = Core::new_mem(base, &[2u8, 1u8, 3u8, 4u8]); let val = cpu.read_imm_u32().unwrap(); assert_eq!((2<<24)+(1<<16)+(3<<8)+4, val); } #[test] #[allow(unused_must_use)] fn a_read_imm_u16_changes_pc() { let base = 128; let mut cpu = Core::new(base); cpu.read_imm_u16(); assert_eq!(base+2, cpu.pc); } #[test] fn a_read_imm_u16_reads_from_pc() { let base = 128; let mut cpu = Core::new_mem(base, &[2u8, 1u8, 3u8, 4u8]); assert_eq!("-S7-----", cpu.flags()); let val = cpu.read_imm_u16().unwrap(); assert_eq!((2<<8)+(1<<0), val); assert_eq!(Operation::ReadLong(SUPERVISOR_PROGRAM, base, 0x02010304), cpu.mem.logger.ops()[0]); } #[test] fn an_user_mode_read_imm_u16_is_reflected_in_mem_ops() { let base = 128; let mut cpu = Core::new_mem(base, &[2u8, 1u8, 3u8, 4u8]); cpu.s_flag = 0; assert_eq!("-U7-----", cpu.flags()); let val = cpu.read_imm_u16().unwrap(); assert_eq!((2<<8)+(1<<0), val); assert_eq!(Operation::ReadLong(USER_PROGRAM, base, 0x02010304), cpu.mem.logger.ops()[0]); } #[test] fn a_reset_reads_sp_and_pc_from_0() { let mut cpu = Core::new_mem(0, &[0u8,0u8,1u8,0u8, 0u8,0u8,0u8,128u8]); cpu.reset(); assert_eq!(256, cpu.dar[15]); assert_eq!(128, cpu.pc); assert_eq!("-S7-----", cpu.flags()); assert_eq!(Operation::ReadLong(SUPERVISOR_PROGRAM, 0, 0x100), cpu.mem.logger.ops()[0]); } #[test] fn execute_reads_from_pc_and_does_not_panic_on_illegal_instruction() { let mut cpu = Core::new_mem(0xba, &[0xba,0xd1,1u8,0u8, 0u8,0u8,0u8,128u8]); cpu.execute1(); } #[test] fn execute_does_not_panic_on_odd_pc() { let mut cpu = Core::new_mem(0xbd, &[0x00, 0x0a, 0x00, 0x00]); cpu.execute1(); } #[test] fn execute_can_execute_instruction_handler_0a() { let mut cpu = Core::new_mem(0xba, &[0x00, 0x0A, 1u8,0u8, 0u8,0u8,0u8,128u8]); cpu.execute1(); assert_eq!(0xabcd, cpu.dar[0]); assert_eq!(0x0000, cpu.dar[1]); } #[test] fn execute_can_execute_instruction_handler_0b() { let mut cpu = Core::new_mem(0xba, &[0x00, 0x0B, 1u8,0u8, 0u8,0u8,0u8,128u8]); cpu.execute1(); assert_eq!(0x0000, cpu.dar[0]); assert_eq!(0xbcde, cpu.dar[1]); } #[test] fn execute_can_execute_set_dx() { // first byte 40 is register D0 // 42 == D1 // 44 == D2 // 46 == D3 // 48 == D4 // 4a == D5 // 4c == D6 // 4e == D7 let mut cpu = Core::new_mem(0x40, &[0x4c, 0x00, 1u8, 0u8]); cpu.execute1(); assert_eq!(0xcdef, cpu.dar[6]); } #[test] fn array_elems() { let mut arr = [1, 2, 3, 4]; let mut marr = &mut arr; let mut elem: &mut i32 = &mut (marr[1]); // let mut elem2: &mut i32 = &mut (arr[2]); assert_eq!(2, *elem); *elem = 200; assert_eq!(200, *elem); // assert_eq!(200, &mut marr[1]); } #[test] fn cycle_counting() { // 0xc308 = abcd_8_mm taking 18 cycles let mut cpu = Core::new_mem(0x40, &[0xc3, 0x08]); cpu.ophandlers = ops::instruction_set(); let Cycles(count) = cpu.execute1(); assert_eq!(18, count); } #[test] fn cycle_counting_exec2() { // 0xc308 = abcd_8_mm taking 18 cycles let mut cpu = Core::new_mem(0x40, &[0xc3, 0x08, 0xc3, 0x08]); cpu.ophandlers = ops::instruction_set(); let Cycles(count) = cpu.execute(20); assert_eq!(18*2, count); } #[test] fn abcd_8_rr() { // opcodes c100 - c107, c300 - c307, etc. // or more generally c[13579bdf]0[0-7] // where [13579bdf] is DX (dest regno) and [0-7] is DY (src regno) // so c300 means D1 = D0 + D1 in BCD let mut cpu = Core::new_mem(0x40, &[0xc3, 0x00]); cpu.ophandlers = ops::instruction_set(); cpu.dar[0] = 0x16; cpu.dar[1] = 0x26; cpu.execute1(); // 16 + 26 is 42 assert_eq!(0x42, cpu.dar[1]); } #[test] fn abcd_8_mm() { // opcodes c108 - c10f, c308 - c30f, etc. // or more generally c[13579bdf]0[8-f] // where [13579bdf] is AX (dest regno) and [8-f] is AY (src regno) // so c308 means A1 = A0 + A1 in BCD let mut cpu = Core::new_mem(0x40, &[0xc3, 0x08]); cpu.ophandlers = ops::instruction_set(); cpu.dar[8+0] = 0x160+1; cpu.dar[8+1] = 0x260+1; cpu.mem.write_byte(USER_DATA, 0x160, 0x16); cpu.mem.write_byte(USER_DATA, 0x260, 0x26); cpu.execute1(); let res = cpu.mem.read_byte(USER_DATA, 0x260); // 16 + 26 is 42 assert_eq!(0x42, res); } #[test] fn add_8_er_d() { // opcodes d000 - d007, d200 - d207, etc. // or more generally d[02468ace]0[0-7] // where [02468ace] is DX (dest regno) and [0-7] is DY (src regno) // opcodes d200 is ADD.B D0, D1 let mut cpu = Core::new_mem(0x40, &[0xd2, 0x00]); cpu.ophandlers = ops::instruction_set(); cpu.dar[0] = 16; cpu.dar[1] = 26; cpu.execute1(); // 16 + 26 is 42 assert_eq!(42, cpu.dar[1]); } #[test] fn add_8_er_pi() { // opcodes d018 - d01f, d218 - d21f, etc. // or more generally d[02468ace]1[8-f] // where [02468ace] is DX (dest regno) and [8-f] is AY (src regno) // opcodes d218 is ADD.B (A0)+, D1 let mut cpu = Core::new_mem(0x40, &[0xd2, 0x18]); cpu.ophandlers = ops::instruction_set(); let addr = 0x100; cpu.dar[8+0] = addr; cpu.mem.write_byte(USER_DATA, addr, 16); cpu.dar[1] = 26; cpu.execute1(); // 16 + 26 is 42 assert_eq!(42, cpu.dar[1]); assert_eq!(addr+1, cpu.dar[8+0]); } #[test] fn add_8_er_pd() { // opcodes d020 - d027, d220 - d227, etc. // or more generally d[02468ace]2[0-7] // where [02468ace] is DX (dest regno) and [0-7] is AY (src regno) // opcodes d220 is ADD.B -(A0), D1 let mut cpu = Core::new_mem(0x40, &[0xd2, 0x20]); cpu.ophandlers = ops::instruction_set(); let addr = 0x100; cpu.dar[8+0] = addr; cpu.mem.write_byte(USER_DATA, addr-1, 16); cpu.dar[1] = 26; cpu.execute1(); // 16 + 26 is 42 assert_eq!(42, cpu.dar[1]); assert_eq!(addr-1, cpu.dar[8+0]); } #[test] fn add_8_er_ai() { // opcodes d010 - d017, d210 - d217, etc. // or more generally d[02468ace]1[0-7] // where [02468ace] is DX (dest regno) and [0-7] is AY (src regno) // opcodes d210 is ADD.B (A0), D1 let mut cpu = Core::new_mem(0x40, &[0xd2, 0x10]); cpu.ophandlers = ops::instruction_set(); let addr = 0x100; cpu.dar[8+0] = addr; cpu.mem.write_byte(USER_DATA, addr, 16); cpu.dar[1] = 26; cpu.execute1(); // 16 + 26 is 42 assert_eq!(42, cpu.dar[1]); assert_eq!(addr, cpu.dar[8+0]); } #[test] fn add_8_er_di_with_positive_displacement() { // opcodes d028 - d02f, d228 - d22f, etc. // or more generally d[02468ace]2[8-f] // where [02468ace] is DX (dest regno) and [8-f] is AY (src regno) // opcodes d228,0108 is ADD.B (0x108, A0), D1 let mut cpu = Core::new_mem(0x40, &[0xd2, 0x28, 0x01, 0x08]); cpu.ophandlers = ops::instruction_set(); let addr = 0x100; cpu.dar[8+0] = addr; let displaced_addr = addr + 0x108; cpu.mem.write_byte(USER_DATA, displaced_addr, 16); cpu.dar[1] = 26; cpu.execute1(); // 16 + 26 is 42 assert_eq!(42, cpu.dar[1]); assert_eq!(addr, cpu.dar[8+0]); } #[test] fn add_8_er_di_with_negative_displacement() { // opcodes d028 - d02f, d228 - d22f, etc. followed by an extension word // or more generally d[02468ace]2[8-f] // where [02468ace] is DX (dest regno) and [8-f] is AY (src regno) // opcodes d228,FFFE is ADD.B (-2, A0), D1 let mut cpu = Core::new_mem(0x40, &[0xd2, 0x28, 0xFF, 0xFE]); cpu.ophandlers = ops::instruction_set(); let addr = 0x100; cpu.dar[8+0] = addr; let displaced_addr = addr - 2; cpu.mem.write_byte(USER_DATA, displaced_addr, 16); cpu.dar[1] = 26; cpu.execute1(); // 16 + 26 is 42 assert_eq!(42, cpu.dar[1]); assert_eq!(addr, cpu.dar[8+0]); } #[test] fn add_8_er_ix_with_positive_displacement() { // opcodes d030 - d037, d230 - d237, etc. followed by an extension word // or more generally d[02468ace]3[0-7] // where [02468ace] is DX (dest regno) and [0-7] is AY (src regno) // opcodes d230,9002 is ADD.B (2, A0, A1), D1 let mut cpu = Core::new_mem(0x40, &[0xd2, 0x30, 0x90, 0x02]); cpu.ophandlers = ops::instruction_set(); let addr = 0x100; let index = 0x10; let displacement = 2; cpu.dar[8+0] = addr; cpu.dar[8+1] = index; let effective_addr = addr + index + displacement; cpu.mem.write_byte(USER_DATA, effective_addr, 16); cpu.dar[1] = 26; cpu.execute1(); // 16 + 26 is 42 assert_eq!(42, cpu.dar[1]); assert_eq!(addr, cpu.dar[8+0]); } #[test] fn add_8_er_ix_with_negative_displacement() { // opcodes d030 - d037, d230 - d237, etc. followed by an extension word // or more generally d[02468ace]3[0-7] // where [02468ace] is DX (dest regno) and [0-7] is AY (src regno) // opcodes d230,90FE is ADD.B (-2, A0, A1), D1 let mut cpu = Core::new_mem(0x40, &[0xd2, 0x30, 0x90, 0xFE]); cpu.ophandlers = ops::instruction_set(); let addr = 0x100; let index = 0x10; let displacement = 2; cpu.dar[8+0] = addr; cpu.dar[8+1] = index; let effective_addr = addr + index - displacement; cpu.mem.write_byte(USER_DATA, effective_addr, 16); cpu.dar[1] = 26; cpu.execute1(); // 16 + 26 is 42 assert_eq!(42, cpu.dar[1]); assert_eq!(addr, cpu.dar[8+0]); } #[test] fn add_8_er_aw() { // opcodes d038, d238, d438, etc. followed by an extension word // or more generally d[02468ace]38 // where [02468ace] is DX (dest regno) and the extension word is // the 16-bit absolute address // opcodes d238,0108 is ADD.B $0108, D1 let mut cpu = Core::new_mem(0x40, &[0xd2, 0x38, 0x01, 0x08]); cpu.ophandlers = ops::instruction_set(); cpu.mem.write_byte(USER_DATA, 0x108, 16); cpu.dar[1] = 26; cpu.execute1(); // 16 + 26 is 42 assert_eq!(42, cpu.dar[1]); } #[test] fn add_8_er_al() { // opcodes d039, d239, d439, etc. followed by two extension words // or more generally d[02468ace]39 // where [02468ace] is DX (dest regno) and the first extension // word is the high order word of the 32-bit absolute address, // and the second extension word is the low order word. // opcodes d239,0009,0000 is ADD.B $90000, D1 let mut cpu = Core::new_mem(0x40, &[0xd2, 0x39, 0x00, 0x09, 0x00, 0x00]); cpu.ophandlers = ops::instruction_set(); cpu.mem.write_byte(USER_DATA, 0x90000, 16); cpu.dar[1] = 26; cpu.execute1(); // 16 + 26 is 42 assert_eq!(42, cpu.dar[1]); } #[test] fn add_8_er_pcdi() { // opcodes d03a, d23a, d43a, etc. followed by an extension word // or more generally d[02468ace]3a // where [02468ace] is DX (dest regno) // opcodes d23a,0108 is ADD.B ($0108, PC), D1 let mut cpu = Core::new_mem(0x40, &[0xd2, 0x3a, 0x01, 0x08]); cpu.ophandlers = ops::instruction_set(); let addr = 0x40+2+0x0108; cpu.mem.write_byte(USER_DATA, addr, 16); cpu.dar[1] = 26; cpu.execute1(); // 16 + 26 is 42 assert_eq!(42, cpu.dar[1]); } #[test] fn add_8_er_pcix() { // opcodes d03b, d23b, d43b, etc. followed by an extension word // or more generally d[02468ace]3b // where [02468ace] is DX (dest regno) // opcodes d23b,9002 is ADD.B (2, PC, A1), D1 let mut cpu = Core::new_mem(0x40, &[0xd2, 0x3b, 0x90, 0x02]); cpu.ophandlers = ops::instruction_set(); let addr = cpu.pc + 2; // will be +2 after reading instruction word let index = 0x10; let displacement = 2; cpu.dar[8+1] = index; let effective_addr = addr + index + displacement; cpu.mem.write_byte(USER_DATA, effective_addr, 16); cpu.dar[1] = 26; cpu.execute1(); // 16 + 26 is 42 assert_eq!(42, cpu.dar[1]); } #[test] fn add_8_er_imm() { // opcodes d03c, d23c, d43c, etc. followed by an extension word // or more generally d[02468ace]3c // where [02468ace] is DX (dest regno) // opcodes d23c,0010 is ADD.B #16, D1 let mut cpu = Core::new_mem(0x40, &[0xd2, 0x3c, 0x00, 0x10]); cpu.ophandlers = ops::instruction_set(); cpu.dar[1] = 26; cpu.execute1(); // 16 + 26 is 42 assert_eq!(42, cpu.dar[1]); } #[test] fn op_with_extension_word_moves_pc_past_extension_word() { let mut cpu = Core::new_mem(0x40, &[0xd2, 0x30, 0x90, 0xFE]); cpu.ophandlers = ops::instruction_set(); cpu.execute1(); assert_eq!(0x44, cpu.pc); } #[test] fn status_register_roundtrip(){ let mut core = Core::new(0x40); //Status register bits are: // TTSM_0iii_000X_NZVC; let f=0b0000_1000_1110_0000; // these bits should always be zero let s=0b0010_0000_0000_0000; let i=0b0000_0111_0000_0000; let x=0b0000_0000_0001_0000; let n=0b0000_0000_0000_1000; let z=0b0000_0000_0000_0100; let v=0b0000_0000_0000_0010; let c=0b0000_0000_0000_0001; let flags = vec![x,n,z,v,c,f,s,i,0]; for sf in flags { core.sr_to_flags(sf); let sr = core.status_register(); let expected = if sf == f {0} else {sf}; assert_eq!(expected, sr); } } #[test] fn clones_have_independent_registers() { let mut core = Core::new(0x40); core.dar[1] = 0x16; let mut clone = core.clone(); assert_eq!(0x16, core.dar[1]); assert_eq!(0x16, clone.dar[1]); clone.dar[1] = 0x32; assert_eq!(0x16, core.dar[1]); assert_eq!(0x32, clone.dar[1]); } }
#[cfg(test)] mod tests { #[test] fn it_works() { assert!(false); } } removed false assertion in test stub #[cfg(test)] mod tests { #[test] fn it_works() { } }
mod common; use std::convert::TryFrom; use std::env::{self, VarError}; use std::mem::size_of; use std::process::{exit, Child, Command, ExitStatus}; use std::sync::{Arc, Barrier}; use std::thread; use std::time::Duration; use rand::Rng; use sled::Config; use common::cleanup; const TEST_ENV_VAR: &str = "SLED_CRASH_TEST"; const N_TESTS: usize = 100; const CYCLE: usize = 256; const BATCH_SIZE: u32 = 8; const SEGMENT_SIZE: usize = 1024; // test names, also used as dir names const RECOVERY_DIR: &str = "crash_recovery"; const BATCHES_DIR: &str = "crash_batches"; const ITER_DIR: &str = "crash_iter"; const TX_DIR: &str = "crash_tx"; const CRASH_CHANCE: u32 = 250; fn main() { // Don't actually run this harness=false test under miri, as it requires // spawning and killing child processes. if cfg!(miri) { return; } common::setup_logger(); match env::var(TEST_ENV_VAR) { Err(VarError::NotPresent) => { crash_recovery(); crash_batches(); concurrent_crash_iter(); concurrent_crash_transactions(); } Ok(ref s) if s == RECOVERY_DIR => run(), Ok(ref s) if s == BATCHES_DIR => run_batches(), Ok(ref s) if s == ITER_DIR => run_iter(), Ok(ref s) if s == TX_DIR => run_tx(), Ok(_) | Err(_) => panic!("invalid crash test case"), } } /// Verifies that the keys in the tree are correctly recovered. /// Panics if they are incorrect. /// Returns the key that should be resumed at, and the current cycle value. fn verify(tree: &sled::Tree) -> (u32, u32) { // key 0 should always be the highest value, as that's where we increment // at some point, it might go down by one // it should never return, or go down again after that let mut iter = tree.iter(); let highest = match iter.next() { Some(Ok((_k, v))) => slice_to_u32(&*v), Some(Err(e)) => panic!("{:?}", e), None => return (0, 0), }; let highest_vec = u32_to_vec(highest); // find how far we got let mut contiguous: u32 = 0; let mut lowest = 0; for res in iter { let (_k, v) = res.unwrap(); if v[..4] == highest_vec[..4] { contiguous += 1; } else { let expected = if highest == 0 { CYCLE as u32 - 1 } else { (highest - 1) % CYCLE as u32 }; let actual = slice_to_u32(&*v); assert_eq!(expected, actual); lowest = actual; break; } } // ensure nothing changes after this point let low_beginning = u32_to_vec(contiguous + 1); for res in tree.range(&*low_beginning..) { let (k, v): (sled::IVec, _) = res.unwrap(); assert_eq!( slice_to_u32(&*v), lowest, "expected key {} to have value {}, instead it had value {} in db: {:?}", slice_to_u32(&*k), lowest, slice_to_u32(&*v), tree ); } tree.verify_integrity().unwrap(); (contiguous, highest) } fn u32_to_vec(u: u32) -> Vec<u8> { let buf: [u8; size_of::<u32>()] = u.to_be_bytes(); buf.to_vec() } fn slice_to_u32(b: &[u8]) -> u32 { let mut buf = [0u8; size_of::<u32>()]; buf.copy_from_slice(&b[..size_of::<u32>()]); u32::from_be_bytes(buf) } fn spawn_killah() { thread::spawn(|| { let runtime = rand::thread_rng().gen_range(0, 60); thread::sleep(Duration::from_millis(runtime)); exit(9); }); } fn run_inner(config: Config) { let crash_during_initialization = rand::thread_rng().gen_bool(0.1); if crash_during_initialization { spawn_killah(); } let tree = config.open().unwrap(); if !crash_during_initialization { spawn_killah(); } let (key, highest) = verify(&tree); let mut hu = ((highest as usize) * CYCLE) + key as usize; assert_eq!(hu % CYCLE, key as usize); assert_eq!(hu / CYCLE, highest as usize); loop { hu += 1; if hu / CYCLE >= CYCLE { hu = 0; } let key = u32_to_vec((hu % CYCLE) as u32); let mut value = u32_to_vec((hu / CYCLE) as u32); let additional_len = rand::thread_rng().gen_range(0, SEGMENT_SIZE / 3); value.append(&mut vec![0u8; additional_len]); tree.insert(&key, value).unwrap(); } } /// Verifies that the keys in the tree are correctly recovered (i.e., equal). /// Panics if they are incorrect. fn verify_batches(tree: &sled::Tree) -> u32 { let mut iter = tree.iter(); let first_value = match iter.next() { Some(Ok((_k, v))) => slice_to_u32(&*v), Some(Err(e)) => panic!("{:?}", e), None => return 0, }; for key in 0..BATCH_SIZE { let res = tree.get(u32_to_vec(key)); let option = res.unwrap(); let v = match option { Some(v) => v, None => panic!( "expected key {} to have a value, instead it was missing in db: {:?}", key, tree ), }; let value = slice_to_u32(&*v); assert_eq!( first_value, value, "expected key {} to have value {}, instead it had value {} in db: {:?}", key, first_value, value, tree ); } tree.verify_integrity().unwrap(); first_value } fn run_batches_inner(db: sled::Db) { fn do_batch(i: u32, db: &sled::Db) { let mut rng = rand::thread_rng(); let base_value = u32_to_vec(i); let mut batch = sled::Batch::default(); if rng.gen_bool(0.1) { for key in 0..BATCH_SIZE { batch.remove(u32_to_vec(key)); } } else { for key in 0..BATCH_SIZE { let mut value = base_value.clone(); let additional_len = rng.gen_range(0, SEGMENT_SIZE / 3); value.append(&mut vec![0u8; additional_len]); batch.insert(u32_to_vec(key), value); } } db.apply_batch(batch).unwrap(); } let mut i = verify_batches(&db); i += 1; do_batch(i, &db); loop { i += 1; do_batch(i, &db); } } fn run() { let config = Config::new() .cache_capacity(128 * 1024 * 1024) .flush_every_ms(Some(1)) .path(RECOVERY_DIR.to_string()) .segment_size(SEGMENT_SIZE); if let Err(e) = thread::spawn(|| run_inner(config)).join() { println!("worker thread failed: {:?}", e); std::process::exit(15); } } fn run_batches() { let crash_during_initialization = rand::thread_rng().gen_ratio(1, 10); if crash_during_initialization { spawn_killah(); } let config = Config::new() .cache_capacity(128 * 1024 * 1024) .flush_every_ms(Some(1)) .path(BATCHES_DIR.to_string()) .segment_size(SEGMENT_SIZE); let db = config.open().unwrap(); // let db2 = db.clone(); let t1 = thread::spawn(|| run_batches_inner(db)); let t2 = thread::spawn(|| {}); // run_batches_inner(db2)); if !crash_during_initialization { spawn_killah(); } if let Err(e) = t1.join().and_then(|_| t2.join()) { println!("worker thread failed: {:?}", e); std::process::exit(15); } } fn run_child_process(test_name: &str) -> Child { let bin = env::current_exe().expect("could not get test binary path"); env::set_var(TEST_ENV_VAR, test_name); Command::new(bin) .env(TEST_ENV_VAR, test_name) .env("SLED_CRASH_CHANCE", CRASH_CHANCE.to_string()) .spawn() .unwrap_or_else(|_| { panic!("could not spawn child process for {} test", test_name) }) } fn handle_child_exit_status(dir: &str, status: ExitStatus) { let code = status.code(); if code.is_none() || code.unwrap() != 9 { cleanup(dir); panic!("{} test child exited abnormally", dir); } } fn handle_child_wait_err(dir: &str, e: std::io::Error) { cleanup(dir); panic!("error waiting for {} test child: {}", dir, e); } fn crash_recovery() { let dir = RECOVERY_DIR; cleanup(dir); for _ in 0..N_TESTS { let mut child = run_child_process(dir); child .wait() .map(|status| handle_child_exit_status(dir, status)) .map_err(|e| handle_child_wait_err(dir, e)) .unwrap(); } cleanup(dir); } fn crash_batches() { let dir = BATCHES_DIR; cleanup(dir); for _ in 0..N_TESTS { let mut child = run_child_process(dir); child .wait() .map(|status| handle_child_exit_status(dir, status)) .map_err(|e| handle_child_wait_err(dir, e)) .unwrap(); } cleanup(dir); } fn concurrent_crash_iter() { let dir = ITER_DIR; cleanup(dir); for _ in 0..N_TESTS { let mut child = run_child_process(dir); child .wait() .map(|status| handle_child_exit_status(dir, status)) .map_err(|e| handle_child_wait_err(dir, e)) .unwrap(); } cleanup(dir); } fn concurrent_crash_transactions() { let dir = TX_DIR; cleanup(dir); for _ in 0..N_TESTS { let mut child = run_child_process(dir); child .wait() .map(|status| handle_child_exit_status(dir, status)) .map_err(|e| handle_child_wait_err(dir, e)) .unwrap(); } cleanup(dir); } fn run_iter() { common::setup_logger(); const N_FORWARD: usize = 50; const N_REVERSE: usize = 50; let config = Config::new().path(ITER_DIR).flush_every_ms(Some(1)); let t = config.open().unwrap(); t.verify_integrity().unwrap(); const INDELIBLE: [&[u8]; 16] = [ &[0u8], &[1u8], &[2u8], &[3u8], &[4u8], &[5u8], &[6u8], &[7u8], &[8u8], &[9u8], &[10u8], &[11u8], &[12u8], &[13u8], &[14u8], &[15u8], ]; for item in &INDELIBLE { t.insert(*item, *item).unwrap(); } let barrier = Arc::new(Barrier::new(N_FORWARD + N_REVERSE + 2)); let mut threads = vec![]; for i in 0..N_FORWARD { let t = thread::Builder::new() .name(format!("forward({})", i)) .spawn({ let t = t.clone(); let barrier = barrier.clone(); move || { barrier.wait(); loop { let expected = INDELIBLE.iter(); let mut keys = t.iter().keys(); for expect in expected { loop { let k = keys.next().unwrap().unwrap(); assert!( &*k <= *expect, "witnessed key is {:?} but we expected \ one <= {:?}, so we overshot due to a \ concurrent modification", k, expect, ); if &*k == *expect { break; } } } } } }) .unwrap(); threads.push(t); } for i in 0..N_REVERSE { let t = thread::Builder::new() .name(format!("reverse({})", i)) .spawn({ let t = t.clone(); let barrier = barrier.clone(); move || { barrier.wait(); loop { let expected = INDELIBLE.iter().rev(); let mut keys = t.iter().keys().rev(); for expect in expected { loop { if let Some(Ok(k)) = keys.next() { assert!( &*k >= *expect, "witnessed key is {:?} but we expected \ one >= {:?}, so we overshot due to a \ concurrent modification\n{:?}", k, expect, *t, ); if &*k == *expect { break; } } else { panic!("undershot key on tree: \n{:?}", *t); } } } } } }) .unwrap(); threads.push(t); } let inserter = thread::Builder::new() .name("inserter".into()) .spawn({ let t = t.clone(); let barrier = barrier.clone(); move || { barrier.wait(); loop { for i in 0..(16 * 16 * 8) { let major = i / (16 * 8); let minor = i % 16; let mut base = INDELIBLE[major].to_vec(); base.push(minor as u8); t.insert(base.clone(), base.clone()).unwrap(); } } } }) .unwrap(); threads.push(inserter); let deleter = thread::Builder::new() .name("deleter".into()) .spawn({ move || { barrier.wait(); loop { for i in 0..(16 * 16 * 8) { let major = i / (16 * 8); let minor = i % 16; let mut base = INDELIBLE[major].to_vec(); base.push(minor as u8); t.remove(&base).unwrap(); } } } }) .unwrap(); spawn_killah(); threads.push(deleter); for thread in threads.into_iter() { thread.join().expect("thread should not have crashed"); } } fn run_tx() { common::setup_logger(); let config = Config::new().flush_every_ms(Some(1)).path(TX_DIR); let db = config.open().unwrap(); db.verify_integrity().unwrap(); db.insert(b"k1", b"cats").unwrap(); db.insert(b"k2", b"dogs").unwrap(); db.insert(b"id", &0_u64.to_le_bytes()).unwrap(); let mut threads = vec![]; const N_WRITERS: usize = 50; const N_READERS: usize = 5; let barrier = Arc::new(Barrier::new(N_WRITERS + N_READERS)); for _ in 0..N_WRITERS { let db = db.clone(); let barrier = barrier.clone(); let thread = std::thread::spawn(move || { barrier.wait(); loop { db.transaction::<_, _, ()>(|db| { let v1 = db.remove(b"k1").unwrap().unwrap(); let v2 = db.remove(b"k2").unwrap().unwrap(); db.insert(b"id", &db.generate_id().unwrap().to_le_bytes()) .unwrap(); db.insert(b"k1", v2).unwrap(); db.insert(b"k2", v1).unwrap(); Ok(()) }) .unwrap(); } }); threads.push(thread); } for _ in 0..N_READERS { let db = db.clone(); let barrier = barrier.clone(); let thread = std::thread::spawn(move || { barrier.wait(); let mut last_id = 0; loop { let read_id = db .transaction::<_, _, ()>(|db| { let v1 = db.get(b"k1").unwrap().unwrap(); let v2 = db.get(b"k2").unwrap().unwrap(); let id = u64::from_le_bytes( TryFrom::try_from( &*db.get(b"id").unwrap().unwrap(), ) .unwrap(), ); let mut results = vec![v1, v2]; results.sort(); assert_eq!( [&results[0], &results[1]], [b"cats", b"dogs"] ); Ok(id) }) .unwrap(); assert!(read_id >= last_id); last_id = read_id; } }); threads.push(thread); } spawn_killah(); for thread in threads.into_iter() { thread.join().expect("threads should not crash"); } let v1 = db.get(b"k1").unwrap().unwrap(); let v2 = db.get(b"k2").unwrap().unwrap(); assert_eq!([v1, v2], [b"cats", b"dogs"]); } Add filtering and stderr output to no-harness test mod common; use std::convert::TryFrom; use std::env::{self, VarError}; use std::mem::size_of; use std::process::{exit, Child, Command, ExitStatus}; use std::sync::{Arc, Barrier}; use std::thread; use std::time::Duration; use rand::Rng; use sled::Config; use common::cleanup; const TEST_ENV_VAR: &str = "SLED_CRASH_TEST"; const N_TESTS: usize = 100; const CYCLE: usize = 256; const BATCH_SIZE: u32 = 8; const SEGMENT_SIZE: usize = 1024; // test names, also used as dir names const RECOVERY_DIR: &str = "crash_recovery"; const BATCHES_DIR: &str = "crash_batches"; const ITER_DIR: &str = "crash_iter"; const TX_DIR: &str = "crash_tx"; const TESTS: [(&'static str, fn()); 4] = [ (RECOVERY_DIR, crash_recovery), (BATCHES_DIR, crash_batches), (ITER_DIR, concurrent_crash_iter), (TX_DIR, concurrent_crash_transactions), ]; const CRASH_CHANCE: u32 = 250; fn main() { // Don't actually run this harness=false test under miri, as it requires // spawning and killing child processes. if cfg!(miri) { return; } common::setup_logger(); match env::var(TEST_ENV_VAR) { Err(VarError::NotPresent) => { let filtered: Vec<(&'static str, fn())> = if let Some(filter) = std::env::args().skip(1).next() { TESTS .iter() .filter(|(name, _)| name.contains(&filter)) .cloned() .collect() } else { TESTS.to_vec() }; eprintln!(); eprintln!( "running {} test{}", filtered.len(), if filtered.len() == 1 { "" } else { "s" }, ); for (test_name, test_fn) in filtered.iter() { eprint!("test {} ...", test_name); test_fn(); eprintln!(" ok"); } eprintln!(); eprintln!( "test result: ok. {} passed; {} filtered out", filtered.len(), TESTS.len() - filtered.len(), ); eprintln!(); } Ok(ref s) if s == RECOVERY_DIR => run(), Ok(ref s) if s == BATCHES_DIR => run_batches(), Ok(ref s) if s == ITER_DIR => run_iter(), Ok(ref s) if s == TX_DIR => run_tx(), Ok(_) | Err(_) => panic!("invalid crash test case"), } } /// Verifies that the keys in the tree are correctly recovered. /// Panics if they are incorrect. /// Returns the key that should be resumed at, and the current cycle value. fn verify(tree: &sled::Tree) -> (u32, u32) { // key 0 should always be the highest value, as that's where we increment // at some point, it might go down by one // it should never return, or go down again after that let mut iter = tree.iter(); let highest = match iter.next() { Some(Ok((_k, v))) => slice_to_u32(&*v), Some(Err(e)) => panic!("{:?}", e), None => return (0, 0), }; let highest_vec = u32_to_vec(highest); // find how far we got let mut contiguous: u32 = 0; let mut lowest = 0; for res in iter { let (_k, v) = res.unwrap(); if v[..4] == highest_vec[..4] { contiguous += 1; } else { let expected = if highest == 0 { CYCLE as u32 - 1 } else { (highest - 1) % CYCLE as u32 }; let actual = slice_to_u32(&*v); assert_eq!(expected, actual); lowest = actual; break; } } // ensure nothing changes after this point let low_beginning = u32_to_vec(contiguous + 1); for res in tree.range(&*low_beginning..) { let (k, v): (sled::IVec, _) = res.unwrap(); assert_eq!( slice_to_u32(&*v), lowest, "expected key {} to have value {}, instead it had value {} in db: {:?}", slice_to_u32(&*k), lowest, slice_to_u32(&*v), tree ); } tree.verify_integrity().unwrap(); (contiguous, highest) } fn u32_to_vec(u: u32) -> Vec<u8> { let buf: [u8; size_of::<u32>()] = u.to_be_bytes(); buf.to_vec() } fn slice_to_u32(b: &[u8]) -> u32 { let mut buf = [0u8; size_of::<u32>()]; buf.copy_from_slice(&b[..size_of::<u32>()]); u32::from_be_bytes(buf) } fn spawn_killah() { thread::spawn(|| { let runtime = rand::thread_rng().gen_range(0, 60); thread::sleep(Duration::from_millis(runtime)); exit(9); }); } fn run_inner(config: Config) { let crash_during_initialization = rand::thread_rng().gen_bool(0.1); if crash_during_initialization { spawn_killah(); } let tree = config.open().unwrap(); if !crash_during_initialization { spawn_killah(); } let (key, highest) = verify(&tree); let mut hu = ((highest as usize) * CYCLE) + key as usize; assert_eq!(hu % CYCLE, key as usize); assert_eq!(hu / CYCLE, highest as usize); loop { hu += 1; if hu / CYCLE >= CYCLE { hu = 0; } let key = u32_to_vec((hu % CYCLE) as u32); let mut value = u32_to_vec((hu / CYCLE) as u32); let additional_len = rand::thread_rng().gen_range(0, SEGMENT_SIZE / 3); value.append(&mut vec![0u8; additional_len]); tree.insert(&key, value).unwrap(); } } /// Verifies that the keys in the tree are correctly recovered (i.e., equal). /// Panics if they are incorrect. fn verify_batches(tree: &sled::Tree) -> u32 { let mut iter = tree.iter(); let first_value = match iter.next() { Some(Ok((_k, v))) => slice_to_u32(&*v), Some(Err(e)) => panic!("{:?}", e), None => return 0, }; for key in 0..BATCH_SIZE { let res = tree.get(u32_to_vec(key)); let option = res.unwrap(); let v = match option { Some(v) => v, None => panic!( "expected key {} to have a value, instead it was missing in db: {:?}", key, tree ), }; let value = slice_to_u32(&*v); assert_eq!( first_value, value, "expected key {} to have value {}, instead it had value {} in db: {:?}", key, first_value, value, tree ); } tree.verify_integrity().unwrap(); first_value } fn run_batches_inner(db: sled::Db) { fn do_batch(i: u32, db: &sled::Db) { let mut rng = rand::thread_rng(); let base_value = u32_to_vec(i); let mut batch = sled::Batch::default(); if rng.gen_bool(0.1) { for key in 0..BATCH_SIZE { batch.remove(u32_to_vec(key)); } } else { for key in 0..BATCH_SIZE { let mut value = base_value.clone(); let additional_len = rng.gen_range(0, SEGMENT_SIZE / 3); value.append(&mut vec![0u8; additional_len]); batch.insert(u32_to_vec(key), value); } } db.apply_batch(batch).unwrap(); } let mut i = verify_batches(&db); i += 1; do_batch(i, &db); loop { i += 1; do_batch(i, &db); } } fn run() { let config = Config::new() .cache_capacity(128 * 1024 * 1024) .flush_every_ms(Some(1)) .path(RECOVERY_DIR.to_string()) .segment_size(SEGMENT_SIZE); if let Err(e) = thread::spawn(|| run_inner(config)).join() { println!("worker thread failed: {:?}", e); std::process::exit(15); } } fn run_batches() { let crash_during_initialization = rand::thread_rng().gen_ratio(1, 10); if crash_during_initialization { spawn_killah(); } let config = Config::new() .cache_capacity(128 * 1024 * 1024) .flush_every_ms(Some(1)) .path(BATCHES_DIR.to_string()) .segment_size(SEGMENT_SIZE); let db = config.open().unwrap(); // let db2 = db.clone(); let t1 = thread::spawn(|| run_batches_inner(db)); let t2 = thread::spawn(|| {}); // run_batches_inner(db2)); if !crash_during_initialization { spawn_killah(); } if let Err(e) = t1.join().and_then(|_| t2.join()) { println!("worker thread failed: {:?}", e); std::process::exit(15); } } fn run_child_process(test_name: &str) -> Child { let bin = env::current_exe().expect("could not get test binary path"); env::set_var(TEST_ENV_VAR, test_name); Command::new(bin) .env(TEST_ENV_VAR, test_name) .env("SLED_CRASH_CHANCE", CRASH_CHANCE.to_string()) .spawn() .unwrap_or_else(|_| { panic!("could not spawn child process for {} test", test_name) }) } fn handle_child_exit_status(dir: &str, status: ExitStatus) { let code = status.code(); if code.is_none() || code.unwrap() != 9 { cleanup(dir); panic!("{} test child exited abnormally", dir); } } fn handle_child_wait_err(dir: &str, e: std::io::Error) { cleanup(dir); panic!("error waiting for {} test child: {}", dir, e); } fn crash_recovery() { let dir = RECOVERY_DIR; cleanup(dir); for _ in 0..N_TESTS { let mut child = run_child_process(dir); child .wait() .map(|status| handle_child_exit_status(dir, status)) .map_err(|e| handle_child_wait_err(dir, e)) .unwrap(); } cleanup(dir); } fn crash_batches() { let dir = BATCHES_DIR; cleanup(dir); for _ in 0..N_TESTS { let mut child = run_child_process(dir); child .wait() .map(|status| handle_child_exit_status(dir, status)) .map_err(|e| handle_child_wait_err(dir, e)) .unwrap(); } cleanup(dir); } fn concurrent_crash_iter() { let dir = ITER_DIR; cleanup(dir); for _ in 0..N_TESTS { let mut child = run_child_process(dir); child .wait() .map(|status| handle_child_exit_status(dir, status)) .map_err(|e| handle_child_wait_err(dir, e)) .unwrap(); } cleanup(dir); } fn concurrent_crash_transactions() { let dir = TX_DIR; cleanup(dir); for _ in 0..N_TESTS { let mut child = run_child_process(dir); child .wait() .map(|status| handle_child_exit_status(dir, status)) .map_err(|e| handle_child_wait_err(dir, e)) .unwrap(); } cleanup(dir); } fn run_iter() { common::setup_logger(); const N_FORWARD: usize = 50; const N_REVERSE: usize = 50; let config = Config::new().path(ITER_DIR).flush_every_ms(Some(1)); let t = config.open().unwrap(); t.verify_integrity().unwrap(); const INDELIBLE: [&[u8]; 16] = [ &[0u8], &[1u8], &[2u8], &[3u8], &[4u8], &[5u8], &[6u8], &[7u8], &[8u8], &[9u8], &[10u8], &[11u8], &[12u8], &[13u8], &[14u8], &[15u8], ]; for item in &INDELIBLE { t.insert(*item, *item).unwrap(); } let barrier = Arc::new(Barrier::new(N_FORWARD + N_REVERSE + 2)); let mut threads = vec![]; for i in 0..N_FORWARD { let t = thread::Builder::new() .name(format!("forward({})", i)) .spawn({ let t = t.clone(); let barrier = barrier.clone(); move || { barrier.wait(); loop { let expected = INDELIBLE.iter(); let mut keys = t.iter().keys(); for expect in expected { loop { let k = keys.next().unwrap().unwrap(); assert!( &*k <= *expect, "witnessed key is {:?} but we expected \ one <= {:?}, so we overshot due to a \ concurrent modification", k, expect, ); if &*k == *expect { break; } } } } } }) .unwrap(); threads.push(t); } for i in 0..N_REVERSE { let t = thread::Builder::new() .name(format!("reverse({})", i)) .spawn({ let t = t.clone(); let barrier = barrier.clone(); move || { barrier.wait(); loop { let expected = INDELIBLE.iter().rev(); let mut keys = t.iter().keys().rev(); for expect in expected { loop { if let Some(Ok(k)) = keys.next() { assert!( &*k >= *expect, "witnessed key is {:?} but we expected \ one >= {:?}, so we overshot due to a \ concurrent modification\n{:?}", k, expect, *t, ); if &*k == *expect { break; } } else { panic!("undershot key on tree: \n{:?}", *t); } } } } } }) .unwrap(); threads.push(t); } let inserter = thread::Builder::new() .name("inserter".into()) .spawn({ let t = t.clone(); let barrier = barrier.clone(); move || { barrier.wait(); loop { for i in 0..(16 * 16 * 8) { let major = i / (16 * 8); let minor = i % 16; let mut base = INDELIBLE[major].to_vec(); base.push(minor as u8); t.insert(base.clone(), base.clone()).unwrap(); } } } }) .unwrap(); threads.push(inserter); let deleter = thread::Builder::new() .name("deleter".into()) .spawn({ move || { barrier.wait(); loop { for i in 0..(16 * 16 * 8) { let major = i / (16 * 8); let minor = i % 16; let mut base = INDELIBLE[major].to_vec(); base.push(minor as u8); t.remove(&base).unwrap(); } } } }) .unwrap(); spawn_killah(); threads.push(deleter); for thread in threads.into_iter() { thread.join().expect("thread should not have crashed"); } } fn run_tx() { common::setup_logger(); let config = Config::new().flush_every_ms(Some(1)).path(TX_DIR); let db = config.open().unwrap(); db.verify_integrity().unwrap(); db.insert(b"k1", b"cats").unwrap(); db.insert(b"k2", b"dogs").unwrap(); db.insert(b"id", &0_u64.to_le_bytes()).unwrap(); let mut threads = vec![]; const N_WRITERS: usize = 50; const N_READERS: usize = 5; let barrier = Arc::new(Barrier::new(N_WRITERS + N_READERS)); for _ in 0..N_WRITERS { let db = db.clone(); let barrier = barrier.clone(); let thread = std::thread::spawn(move || { barrier.wait(); loop { db.transaction::<_, _, ()>(|db| { let v1 = db.remove(b"k1").unwrap().unwrap(); let v2 = db.remove(b"k2").unwrap().unwrap(); db.insert(b"id", &db.generate_id().unwrap().to_le_bytes()) .unwrap(); db.insert(b"k1", v2).unwrap(); db.insert(b"k2", v1).unwrap(); Ok(()) }) .unwrap(); } }); threads.push(thread); } for _ in 0..N_READERS { let db = db.clone(); let barrier = barrier.clone(); let thread = std::thread::spawn(move || { barrier.wait(); let mut last_id = 0; loop { let read_id = db .transaction::<_, _, ()>(|db| { let v1 = db.get(b"k1").unwrap().unwrap(); let v2 = db.get(b"k2").unwrap().unwrap(); let id = u64::from_le_bytes( TryFrom::try_from( &*db.get(b"id").unwrap().unwrap(), ) .unwrap(), ); let mut results = vec![v1, v2]; results.sort(); assert_eq!( [&results[0], &results[1]], [b"cats", b"dogs"] ); Ok(id) }) .unwrap(); assert!(read_id >= last_id); last_id = read_id; } }); threads.push(thread); } spawn_killah(); for thread in threads.into_iter() { thread.join().expect("threads should not crash"); } let v1 = db.get(b"k1").unwrap().unwrap(); let v2 = db.get(b"k2").unwrap().unwrap(); assert_eq!([v1, v2], [b"cats", b"dogs"]); }
/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ use cssparser::Parser; use euclid::size::Size2D; use euclid::scale_factor::ScaleFactor; use style::media_queries::{Device, MediaType}; use style::parser::ParserContext; use style::stylesheets::{Origin, Stylesheet, CSSRuleIteratorExt}; use style::values::specified::{Length, LengthOrPercentageOrAuto}; use style::viewport::*; use url::Url; macro_rules! stylesheet { ($css:expr, $origin:ident) => { Stylesheet::from_str($css, Url::parse("http://localhost").unwrap(), Origin::$origin); } } fn test_viewport_rule<F>(css: &str, device: &Device, callback: F) where F: Fn(&Vec<ViewportDescriptorDeclaration>, &str) { ::util::opts::set_experimental_enabled(true); let stylesheet = stylesheet!(css, Author); let mut rule_count = 0; for rule in stylesheet.effective_rules(&device).viewport() { rule_count += 1; callback(&rule.declarations, css); } assert!(rule_count > 0); } macro_rules! assert_decl_len { ($declarations:ident == 1) => { assert!($declarations.len() == 1, "expected 1 declaration; have {}: {:?})", $declarations.len(), $declarations) }; ($declarations:ident == $len:expr) => { assert!($declarations.len() == $len, "expected {} declarations; have {}: {:?})", $len, $declarations.len(), $declarations) } } #[test] fn empty_viewport_rule() { let device = Device::new(MediaType::Screen, Size2D::typed(800., 600.)); test_viewport_rule("@viewport {}", &device, |declarations, css| { println!("{}", css); assert_decl_len!(declarations == 0); }); } macro_rules! assert_decl_eq { ($d:expr, $origin:ident, $expected:ident: $value:expr) => {{ assert_eq!($d.origin, Origin::$origin); assert_eq!($d.descriptor, ViewportDescriptor::$expected($value)); assert!($d.important == false, "descriptor should not be !important"); }}; ($d:expr, $origin:ident, $expected:ident: $value:expr, !important) => {{ assert_eq!($d.origin, Origin::$origin); assert_eq!($d.descriptor, ViewportDescriptor::$expected($value)); assert!($d.important == true, "descriptor should be !important"); }}; } #[test] fn simple_viewport_rules() { let device = Device::new(MediaType::Screen, Size2D::typed(800., 600.)); test_viewport_rule("@viewport { width: auto; height: auto;\ zoom: auto; min-zoom: 0; max-zoom: 200%;\ user-zoom: zoom; orientation: auto; }", &device, |declarations, css| { println!("{}", css); assert_decl_len!(declarations == 9); assert_decl_eq!(&declarations[0], Author, MinWidth: LengthOrPercentageOrAuto::Auto); assert_decl_eq!(&declarations[1], Author, MaxWidth: LengthOrPercentageOrAuto::Auto); assert_decl_eq!(&declarations[2], Author, MinHeight: LengthOrPercentageOrAuto::Auto); assert_decl_eq!(&declarations[3], Author, MaxHeight: LengthOrPercentageOrAuto::Auto); assert_decl_eq!(&declarations[4], Author, Zoom: Zoom::Auto); assert_decl_eq!(&declarations[5], Author, MinZoom: Zoom::Number(0.)); assert_decl_eq!(&declarations[6], Author, MaxZoom: Zoom::Percentage(2.)); assert_decl_eq!(&declarations[7], Author, UserZoom: UserZoom::Zoom); assert_decl_eq!(&declarations[8], Author, Orientation: Orientation::Auto); }); test_viewport_rule("@viewport { min-width: 200px; max-width: auto;\ min-height: 200px; max-height: auto; }", &device, |declarations, css| { println!("{}", css); assert_decl_len!(declarations == 4); assert_decl_eq!(&declarations[0], Author, MinWidth: LengthOrPercentageOrAuto::Length(Length::from_px(200.))); assert_decl_eq!(&declarations[1], Author, MaxWidth: LengthOrPercentageOrAuto::Auto); assert_decl_eq!(&declarations[2], Author, MinHeight: LengthOrPercentageOrAuto::Length(Length::from_px(200.))); assert_decl_eq!(&declarations[3], Author, MaxHeight: LengthOrPercentageOrAuto::Auto); }); } #[test] fn cascading_within_viewport_rule() { let device = Device::new(MediaType::Screen, Size2D::typed(800., 600.)); // normal order of appearance test_viewport_rule("@viewport { min-width: 200px; min-width: auto; }", &device, |declarations, css| { println!("{}", css); assert_decl_len!(declarations == 1); assert_decl_eq!(&declarations[0], Author, MinWidth: LengthOrPercentageOrAuto::Auto); }); // !important order of appearance test_viewport_rule("@viewport { min-width: 200px !important; min-width: auto !important; }", &device, |declarations, css| { println!("{}", css); assert_decl_len!(declarations == 1); assert_decl_eq!(&declarations[0], Author, MinWidth: LengthOrPercentageOrAuto::Auto, !important); }); // !important vs normal test_viewport_rule("@viewport { min-width: auto !important; min-width: 200px; }", &device, |declarations, css| { println!("{}", css); assert_decl_len!(declarations == 1); assert_decl_eq!(&declarations[0], Author, MinWidth: LengthOrPercentageOrAuto::Auto, !important); }); // normal longhands vs normal shorthand test_viewport_rule("@viewport { min-width: 200px; max-width: 200px; width: auto; }", &device, |declarations, css| { println!("{}", css); assert_decl_len!(declarations == 2); assert_decl_eq!(&declarations[0], Author, MinWidth: LengthOrPercentageOrAuto::Auto); assert_decl_eq!(&declarations[1], Author, MaxWidth: LengthOrPercentageOrAuto::Auto); }); // normal shorthand vs normal longhands test_viewport_rule("@viewport { width: 200px; min-width: auto; max-width: auto; }", &device, |declarations, css| { println!("{}", css); assert_decl_len!(declarations == 2); assert_decl_eq!(&declarations[0], Author, MinWidth: LengthOrPercentageOrAuto::Auto); assert_decl_eq!(&declarations[1], Author, MaxWidth: LengthOrPercentageOrAuto::Auto); }); // one !important longhand vs normal shorthand test_viewport_rule("@viewport { min-width: auto !important; width: 200px; }", &device, |declarations, css| { println!("{}", css); assert_decl_len!(declarations == 2); assert_decl_eq!(&declarations[0], Author, MinWidth: LengthOrPercentageOrAuto::Auto, !important); assert_decl_eq!(&declarations[1], Author, MaxWidth: LengthOrPercentageOrAuto::Length(Length::from_px(200.))); }); // both !important longhands vs normal shorthand test_viewport_rule("@viewport { min-width: auto !important; max-width: auto !important; width: 200px; }", &device, |declarations, css| { println!("{}", css); assert_decl_len!(declarations == 2); assert_decl_eq!(&declarations[0], Author, MinWidth: LengthOrPercentageOrAuto::Auto, !important); assert_decl_eq!(&declarations[1], Author, MaxWidth: LengthOrPercentageOrAuto::Auto, !important); }); } #[test] fn multiple_stylesheets_cascading() { let device = Device::new(MediaType::Screen, Size2D::typed(800., 600.)); let stylesheets = vec![ stylesheet!("@viewport { min-width: 100px; min-height: 100px; zoom: 1; }", UserAgent), stylesheet!("@viewport { min-width: 200px; min-height: 200px; }", User), stylesheet!("@viewport { min-width: 300px; }", Author)]; let declarations = stylesheets.iter() .flat_map(|s| s.effective_rules(&device).viewport()) .cascade() .declarations; assert_decl_len!(declarations == 3); assert_decl_eq!(&declarations[0], UserAgent, Zoom: Zoom::Number(1.)); assert_decl_eq!(&declarations[1], User, MinHeight: LengthOrPercentageOrAuto::Length(Length::from_px(200.))); assert_decl_eq!(&declarations[2], Author, MinWidth: LengthOrPercentageOrAuto::Length(Length::from_px(300.))); let stylesheets = vec![ stylesheet!("@viewport { min-width: 100px !important; }", UserAgent), stylesheet!("@viewport { min-width: 200px !important; min-height: 200px !important; }", User), stylesheet!( "@viewport { min-width: 300px !important; min-height: 300px !important; zoom: 3 !important; }", Author)]; let declarations = stylesheets.iter() .flat_map(|s| s.effective_rules(&device).viewport()) .cascade() .declarations; assert_decl_len!(declarations == 3); assert_decl_eq!( &declarations[0], UserAgent, MinWidth: LengthOrPercentageOrAuto::Length(Length::from_px(100.)), !important); assert_decl_eq!( &declarations[1], User, MinHeight: LengthOrPercentageOrAuto::Length(Length::from_px(200.)), !important); assert_decl_eq!(&declarations[2], Author, Zoom: Zoom::Number(3.), !important); } #[test] fn constrain_viewport() { let url = Url::parse("http://localhost").unwrap(); let context = ParserContext::new(Origin::Author, &url); macro_rules! from_css { ($css:expr) => { &ViewportRule::parse(&mut Parser::new($css), &context).unwrap() } } let initial_viewport = Size2D::typed(800., 600.); assert_eq!(ViewportConstraints::maybe_new(initial_viewport, from_css!("")), None); let initial_viewport = Size2D::typed(800., 600.); assert_eq!(ViewportConstraints::maybe_new(initial_viewport, from_css!("width: 320px auto")), Some(ViewportConstraints { size: initial_viewport, initial_zoom: ScaleFactor::new(1.), min_zoom: None, max_zoom: None, user_zoom: UserZoom::Zoom, orientation: Orientation::Auto })); let initial_viewport = Size2D::typed(200., 150.); assert_eq!(ViewportConstraints::maybe_new(initial_viewport, from_css!("width: 320px auto")), Some(ViewportConstraints { size: Size2D::typed(320., 240.), initial_zoom: ScaleFactor::new(1.), min_zoom: None, max_zoom: None, user_zoom: UserZoom::Zoom, orientation: Orientation::Auto })); let initial_viewport = Size2D::typed(800., 600.); assert_eq!(ViewportConstraints::maybe_new(initial_viewport, from_css!("width: 320px auto")), Some(ViewportConstraints { size: initial_viewport, initial_zoom: ScaleFactor::new(1.), min_zoom: None, max_zoom: None, user_zoom: UserZoom::Zoom, orientation: Orientation::Auto })); let initial_viewport = Size2D::typed(800., 600.); assert_eq!(ViewportConstraints::maybe_new(initial_viewport, from_css!("width: 800px; height: 600px;\ zoom: 1;\ user-zoom: zoom;\ orientation: auto;")), Some(ViewportConstraints { size: initial_viewport, initial_zoom: ScaleFactor::new(1.), min_zoom: None, max_zoom: None, user_zoom: UserZoom::Zoom, orientation: Orientation::Auto })); } Auto merge of #6470 - Ms2ger:6466-viewport, r=metajack Enable experimental features in the multiple_stylesheets_cascading test (fixes #6466). <!-- Reviewable:start --> [<img src="https://reviewable.io/review_button.png" height=40 alt="Review on Reviewable"/>](https://reviewable.io/reviews/servo/servo/6470) <!-- Reviewable:end --> /* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ use cssparser::Parser; use euclid::size::Size2D; use euclid::scale_factor::ScaleFactor; use style::media_queries::{Device, MediaType}; use style::parser::ParserContext; use style::stylesheets::{Origin, Stylesheet, CSSRuleIteratorExt}; use style::values::specified::{Length, LengthOrPercentageOrAuto}; use style::viewport::*; use url::Url; macro_rules! stylesheet { ($css:expr, $origin:ident) => { Stylesheet::from_str($css, Url::parse("http://localhost").unwrap(), Origin::$origin); } } fn test_viewport_rule<F>(css: &str, device: &Device, callback: F) where F: Fn(&Vec<ViewportDescriptorDeclaration>, &str) { ::util::opts::set_experimental_enabled(true); let stylesheet = stylesheet!(css, Author); let mut rule_count = 0; for rule in stylesheet.effective_rules(&device).viewport() { rule_count += 1; callback(&rule.declarations, css); } assert!(rule_count > 0); } macro_rules! assert_decl_len { ($declarations:ident == 1) => { assert!($declarations.len() == 1, "expected 1 declaration; have {}: {:?})", $declarations.len(), $declarations) }; ($declarations:ident == $len:expr) => { assert!($declarations.len() == $len, "expected {} declarations; have {}: {:?})", $len, $declarations.len(), $declarations) } } #[test] fn empty_viewport_rule() { let device = Device::new(MediaType::Screen, Size2D::typed(800., 600.)); test_viewport_rule("@viewport {}", &device, |declarations, css| { println!("{}", css); assert_decl_len!(declarations == 0); }); } macro_rules! assert_decl_eq { ($d:expr, $origin:ident, $expected:ident: $value:expr) => {{ assert_eq!($d.origin, Origin::$origin); assert_eq!($d.descriptor, ViewportDescriptor::$expected($value)); assert!($d.important == false, "descriptor should not be !important"); }}; ($d:expr, $origin:ident, $expected:ident: $value:expr, !important) => {{ assert_eq!($d.origin, Origin::$origin); assert_eq!($d.descriptor, ViewportDescriptor::$expected($value)); assert!($d.important == true, "descriptor should be !important"); }}; } #[test] fn simple_viewport_rules() { let device = Device::new(MediaType::Screen, Size2D::typed(800., 600.)); test_viewport_rule("@viewport { width: auto; height: auto;\ zoom: auto; min-zoom: 0; max-zoom: 200%;\ user-zoom: zoom; orientation: auto; }", &device, |declarations, css| { println!("{}", css); assert_decl_len!(declarations == 9); assert_decl_eq!(&declarations[0], Author, MinWidth: LengthOrPercentageOrAuto::Auto); assert_decl_eq!(&declarations[1], Author, MaxWidth: LengthOrPercentageOrAuto::Auto); assert_decl_eq!(&declarations[2], Author, MinHeight: LengthOrPercentageOrAuto::Auto); assert_decl_eq!(&declarations[3], Author, MaxHeight: LengthOrPercentageOrAuto::Auto); assert_decl_eq!(&declarations[4], Author, Zoom: Zoom::Auto); assert_decl_eq!(&declarations[5], Author, MinZoom: Zoom::Number(0.)); assert_decl_eq!(&declarations[6], Author, MaxZoom: Zoom::Percentage(2.)); assert_decl_eq!(&declarations[7], Author, UserZoom: UserZoom::Zoom); assert_decl_eq!(&declarations[8], Author, Orientation: Orientation::Auto); }); test_viewport_rule("@viewport { min-width: 200px; max-width: auto;\ min-height: 200px; max-height: auto; }", &device, |declarations, css| { println!("{}", css); assert_decl_len!(declarations == 4); assert_decl_eq!(&declarations[0], Author, MinWidth: LengthOrPercentageOrAuto::Length(Length::from_px(200.))); assert_decl_eq!(&declarations[1], Author, MaxWidth: LengthOrPercentageOrAuto::Auto); assert_decl_eq!(&declarations[2], Author, MinHeight: LengthOrPercentageOrAuto::Length(Length::from_px(200.))); assert_decl_eq!(&declarations[3], Author, MaxHeight: LengthOrPercentageOrAuto::Auto); }); } #[test] fn cascading_within_viewport_rule() { let device = Device::new(MediaType::Screen, Size2D::typed(800., 600.)); // normal order of appearance test_viewport_rule("@viewport { min-width: 200px; min-width: auto; }", &device, |declarations, css| { println!("{}", css); assert_decl_len!(declarations == 1); assert_decl_eq!(&declarations[0], Author, MinWidth: LengthOrPercentageOrAuto::Auto); }); // !important order of appearance test_viewport_rule("@viewport { min-width: 200px !important; min-width: auto !important; }", &device, |declarations, css| { println!("{}", css); assert_decl_len!(declarations == 1); assert_decl_eq!(&declarations[0], Author, MinWidth: LengthOrPercentageOrAuto::Auto, !important); }); // !important vs normal test_viewport_rule("@viewport { min-width: auto !important; min-width: 200px; }", &device, |declarations, css| { println!("{}", css); assert_decl_len!(declarations == 1); assert_decl_eq!(&declarations[0], Author, MinWidth: LengthOrPercentageOrAuto::Auto, !important); }); // normal longhands vs normal shorthand test_viewport_rule("@viewport { min-width: 200px; max-width: 200px; width: auto; }", &device, |declarations, css| { println!("{}", css); assert_decl_len!(declarations == 2); assert_decl_eq!(&declarations[0], Author, MinWidth: LengthOrPercentageOrAuto::Auto); assert_decl_eq!(&declarations[1], Author, MaxWidth: LengthOrPercentageOrAuto::Auto); }); // normal shorthand vs normal longhands test_viewport_rule("@viewport { width: 200px; min-width: auto; max-width: auto; }", &device, |declarations, css| { println!("{}", css); assert_decl_len!(declarations == 2); assert_decl_eq!(&declarations[0], Author, MinWidth: LengthOrPercentageOrAuto::Auto); assert_decl_eq!(&declarations[1], Author, MaxWidth: LengthOrPercentageOrAuto::Auto); }); // one !important longhand vs normal shorthand test_viewport_rule("@viewport { min-width: auto !important; width: 200px; }", &device, |declarations, css| { println!("{}", css); assert_decl_len!(declarations == 2); assert_decl_eq!(&declarations[0], Author, MinWidth: LengthOrPercentageOrAuto::Auto, !important); assert_decl_eq!(&declarations[1], Author, MaxWidth: LengthOrPercentageOrAuto::Length(Length::from_px(200.))); }); // both !important longhands vs normal shorthand test_viewport_rule("@viewport { min-width: auto !important; max-width: auto !important; width: 200px; }", &device, |declarations, css| { println!("{}", css); assert_decl_len!(declarations == 2); assert_decl_eq!(&declarations[0], Author, MinWidth: LengthOrPercentageOrAuto::Auto, !important); assert_decl_eq!(&declarations[1], Author, MaxWidth: LengthOrPercentageOrAuto::Auto, !important); }); } #[test] fn multiple_stylesheets_cascading() { ::util::opts::set_experimental_enabled(true); let device = Device::new(MediaType::Screen, Size2D::typed(800., 600.)); let stylesheets = vec![ stylesheet!("@viewport { min-width: 100px; min-height: 100px; zoom: 1; }", UserAgent), stylesheet!("@viewport { min-width: 200px; min-height: 200px; }", User), stylesheet!("@viewport { min-width: 300px; }", Author)]; let declarations = stylesheets.iter() .flat_map(|s| s.effective_rules(&device).viewport()) .cascade() .declarations; assert_decl_len!(declarations == 3); assert_decl_eq!(&declarations[0], UserAgent, Zoom: Zoom::Number(1.)); assert_decl_eq!(&declarations[1], User, MinHeight: LengthOrPercentageOrAuto::Length(Length::from_px(200.))); assert_decl_eq!(&declarations[2], Author, MinWidth: LengthOrPercentageOrAuto::Length(Length::from_px(300.))); let stylesheets = vec![ stylesheet!("@viewport { min-width: 100px !important; }", UserAgent), stylesheet!("@viewport { min-width: 200px !important; min-height: 200px !important; }", User), stylesheet!( "@viewport { min-width: 300px !important; min-height: 300px !important; zoom: 3 !important; }", Author)]; let declarations = stylesheets.iter() .flat_map(|s| s.effective_rules(&device).viewport()) .cascade() .declarations; assert_decl_len!(declarations == 3); assert_decl_eq!( &declarations[0], UserAgent, MinWidth: LengthOrPercentageOrAuto::Length(Length::from_px(100.)), !important); assert_decl_eq!( &declarations[1], User, MinHeight: LengthOrPercentageOrAuto::Length(Length::from_px(200.)), !important); assert_decl_eq!(&declarations[2], Author, Zoom: Zoom::Number(3.), !important); } #[test] fn constrain_viewport() { let url = Url::parse("http://localhost").unwrap(); let context = ParserContext::new(Origin::Author, &url); macro_rules! from_css { ($css:expr) => { &ViewportRule::parse(&mut Parser::new($css), &context).unwrap() } } let initial_viewport = Size2D::typed(800., 600.); assert_eq!(ViewportConstraints::maybe_new(initial_viewport, from_css!("")), None); let initial_viewport = Size2D::typed(800., 600.); assert_eq!(ViewportConstraints::maybe_new(initial_viewport, from_css!("width: 320px auto")), Some(ViewportConstraints { size: initial_viewport, initial_zoom: ScaleFactor::new(1.), min_zoom: None, max_zoom: None, user_zoom: UserZoom::Zoom, orientation: Orientation::Auto })); let initial_viewport = Size2D::typed(200., 150.); assert_eq!(ViewportConstraints::maybe_new(initial_viewport, from_css!("width: 320px auto")), Some(ViewportConstraints { size: Size2D::typed(320., 240.), initial_zoom: ScaleFactor::new(1.), min_zoom: None, max_zoom: None, user_zoom: UserZoom::Zoom, orientation: Orientation::Auto })); let initial_viewport = Size2D::typed(800., 600.); assert_eq!(ViewportConstraints::maybe_new(initial_viewport, from_css!("width: 320px auto")), Some(ViewportConstraints { size: initial_viewport, initial_zoom: ScaleFactor::new(1.), min_zoom: None, max_zoom: None, user_zoom: UserZoom::Zoom, orientation: Orientation::Auto })); let initial_viewport = Size2D::typed(800., 600.); assert_eq!(ViewportConstraints::maybe_new(initial_viewport, from_css!("width: 800px; height: 600px;\ zoom: 1;\ user-zoom: zoom;\ orientation: auto;")), Some(ViewportConstraints { size: initial_viewport, initial_zoom: ScaleFactor::new(1.), min_zoom: None, max_zoom: None, user_zoom: UserZoom::Zoom, orientation: Orientation::Auto })); }
use std::collections::HashMap; use enum_primitive::FromPrimitive; use dbus::Path; use dbus::arg::{Dict, Variant, Iter, Array, RefArg}; use dbus_api::{DBusApi, extract, utf8_vec_u8_to_string, utf8_variant_to_string, string_to_utf8_vec_u8, path_to_string, VariantTo}; use manager::{Connectivity, NetworkManagerState}; use connection::{ConnectionSettings, ConnectionState}; use device::{DeviceType, DeviceState}; use wifi::{NM80211ApSecurityFlags, NM80211ApFlags, Security, WEP, NONE}; type SettingsMap = HashMap<String, Variant<Box<RefArg>>>; const NM_SERVICE_MANAGER: &'static str = "org.freedesktop.NetworkManager"; const NM_SERVICE_PATH: &'static str = "/org/freedesktop/NetworkManager"; const NM_SETTINGS_PATH: &'static str = "/org/freedesktop/NetworkManager/Settings"; const NM_SERVICE_INTERFACE: &'static str = "org.freedesktop.NetworkManager"; const NM_SETTINGS_INTERFACE: &'static str = "org.freedesktop.NetworkManager.Settings"; const NM_CONNECTION_INTERFACE: &'static str = "org.freedesktop.NetworkManager.Settings.\ Connection"; const NM_ACTIVE_INTERFACE: &'static str = "org.freedesktop.NetworkManager.Connection.Active"; const NM_DEVICE_INTERFACE: &'static str = "org.freedesktop.NetworkManager.Device"; const NM_WIRELESS_INTERFACE: &'static str = "org.freedesktop.NetworkManager.Device.Wireless"; const NM_ACCESS_POINT_INTERFACE: &'static str = "org.freedesktop.NetworkManager.AccessPoint"; const NM_WEP_KEY_TYPE_PASSPHRASE: u32 = 2; const UNKNOWN_CONNECTION: &'static str = "org.freedesktop.NetworkManager.UnknownConnection"; const METHOD_RETRY_ERROR_NAMES: &'static [&'static str; 1] = &[UNKNOWN_CONNECTION]; pub struct DBusNetworkManager { dbus: DBusApi, } impl DBusNetworkManager { pub fn new() -> Self { DBusNetworkManager { dbus: DBusApi::new(NM_SERVICE_MANAGER, METHOD_RETRY_ERROR_NAMES) } } pub fn method_timeout(&self) -> u64 { self.dbus.method_timeout() } pub fn set_method_timeout(&mut self, timeout: u64) { self.dbus.set_method_timeout(timeout); } pub fn get_state(&self) -> Result<NetworkManagerState, String> { let response = try!(self.dbus .call(NM_SERVICE_PATH, NM_SERVICE_INTERFACE, "state")); let state_u32: u32 = try!(self.dbus.extract(&response)); Ok(NetworkManagerState::from(state_u32)) } pub fn check_connectivity(&self) -> Result<Connectivity, String> { let response = try!(self.dbus .call(NM_SERVICE_PATH, NM_SERVICE_INTERFACE, "CheckConnectivity")); let connectivity_u32: u32 = try!(self.dbus.extract(&response)); Ok(Connectivity::from(connectivity_u32)) } pub fn is_wireless_enabled(&self) -> Result<bool, String> { self.dbus .property(NM_SERVICE_PATH, NM_SERVICE_INTERFACE, "WirelessEnabled") } pub fn is_networking_enabled(&self) -> Result<bool, String> { self.dbus .property(NM_SERVICE_PATH, NM_SERVICE_INTERFACE, "NetworkingEnabled") } pub fn list_connections(&self) -> Result<Vec<String>, String> { let response = try!(self.dbus .call(NM_SETTINGS_PATH, NM_SETTINGS_INTERFACE, "ListConnections")); let array: Array<Path, _> = try!(self.dbus.extract(&response)); Ok(array.map(|e| e.to_string()).collect()) } pub fn get_active_connections(&self) -> Result<Vec<String>, String> { self.dbus .property(NM_SERVICE_PATH, NM_SERVICE_INTERFACE, "ActiveConnections") } pub fn get_active_connection_path(&self, path: &str) -> Option<String> { self.dbus .property(path, NM_ACTIVE_INTERFACE, "Connection") .ok() } pub fn get_connection_state(&self, path: &str) -> Result<ConnectionState, String> { let state_i64 = match self.dbus.property(path, NM_ACTIVE_INTERFACE, "State") { Ok(state_i64) => state_i64, Err(_) => return Ok(ConnectionState::Unknown), }; ConnectionState::from_i64(state_i64) .ok_or(format!("Undefined connection state for {}", path)) } pub fn get_connection_settings(&self, path: &str) -> Result<ConnectionSettings, String> { let response = try!(self.dbus.call(path, NM_CONNECTION_INTERFACE, "GetSettings")); let dict: Dict<&str, Dict<&str, Variant<Iter>, _>, _> = try!(self.dbus.extract(&response)); let mut id = String::new(); let mut uuid = String::new(); let mut ssid = String::new(); for (_, v1) in dict { for (k2, v2) in v1 { match k2 { "id" => { id = try!(extract::<String>(&v2)); } "uuid" => { uuid = try!(extract::<String>(&v2)); } "ssid" => { ssid = try!(utf8_variant_to_string(&v2)); } _ => {} } } } Ok(ConnectionSettings { id: id, uuid: uuid, ssid: ssid, }) } pub fn get_active_connection_devices(&self, path: &str) -> Result<Vec<String>, String> { self.dbus.property(path, NM_ACTIVE_INTERFACE, "Devices") } pub fn delete_connection(&self, path: &str) -> Result<(), String> { try!(self.dbus.call(path, NM_CONNECTION_INTERFACE, "Delete")); Ok(()) } pub fn activate_connection(&self, path: &str) -> Result<(), String> { try!(self.dbus .call_with_args(NM_SERVICE_PATH, NM_SERVICE_INTERFACE, "ActivateConnection", &[&try!(Path::new(path)) as &RefArg, &try!(Path::new("/")) as &RefArg, &try!(Path::new("/")) as &RefArg])); Ok(()) } pub fn deactivate_connection(&self, path: &str) -> Result<(), String> { try!(self.dbus .call_with_args(NM_SERVICE_PATH, NM_SERVICE_INTERFACE, "DeactivateConnection", &[&try!(Path::new(path)) as &RefArg])); Ok(()) } pub fn add_and_activate_connection(&self, device_path: &str, ap_path: &str, ssid: &str, security: &Security, password: &str) -> Result<(String, String), String> { let mut settings: HashMap<String, SettingsMap> = HashMap::new(); let mut wireless: SettingsMap = HashMap::new(); add_val(&mut wireless, "ssid", string_to_utf8_vec_u8(&ssid.to_string())); settings.insert("802-11-wireless".to_string(), wireless); if *security != NONE { let mut security_settings: SettingsMap = HashMap::new(); if security.contains(WEP) { add_val(&mut security_settings, "wep-key-type", NM_WEP_KEY_TYPE_PASSPHRASE); add_str(&mut security_settings, "wep-key0", password); } else { add_str(&mut security_settings, "key-mgmt", "wpa-psk"); add_str(&mut security_settings, "psk", password); }; settings.insert("802-11-wireless-security".to_string(), security_settings); } let response = try!(self.dbus .call_with_args(NM_SERVICE_PATH, NM_SERVICE_INTERFACE, "AddAndActivateConnection", &[&settings as &RefArg, &try!(Path::new(device_path.to_string())) as &RefArg, &try!(Path::new(ap_path.to_string())) as &RefArg])); let (conn_path, active_connection): (Path, Path) = try!(self.dbus.extract_two(&response)); Ok((try!(path_to_string(&conn_path)), try!(path_to_string(&active_connection)))) } pub fn create_hotspot(&self, device_path: &str, interface: &str, ssid: &str, password: Option<&str>) -> Result<(String, String), String> { let mut wireless: SettingsMap = HashMap::new(); add_val(&mut wireless, "ssid", string_to_utf8_vec_u8(&ssid.to_string())); add_str(&mut wireless, "band", "bg"); add_val(&mut wireless, "hidden", false); add_str(&mut wireless, "mode", "ap"); let mut connection: SettingsMap = HashMap::new(); add_val(&mut connection, "autoconnect", false); add_str(&mut connection, "id", ssid); add_str(&mut connection, "interface-name", interface); add_str(&mut connection, "type", "802-11-wireless"); let mut ipv4: SettingsMap = HashMap::new(); add_str(&mut ipv4, "method", "shared"); let mut settings: HashMap<String, SettingsMap> = HashMap::new(); if let Some(password) = password { add_str(&mut wireless, "security", "802-11-wireless-security"); let mut security: SettingsMap = HashMap::new(); add_str(&mut security, "key-mgmt", "wpa-psk"); add_str(&mut security, "psk", &password); settings.insert("802-11-wireless-security".to_string(), security); } settings.insert("802-11-wireless".to_string(), wireless); settings.insert("connection".to_string(), connection); settings.insert("ipv4".to_string(), ipv4); let response = try!(self.dbus .call_with_args(NM_SERVICE_PATH, NM_SERVICE_INTERFACE, "AddAndActivateConnection", &[&settings as &RefArg, &try!(Path::new(device_path.clone())) as &RefArg, &try!(Path::new("/")) as &RefArg])); let (conn_path, active_connection): (Path, Path) = try!(self.dbus.extract_two(&response)); Ok((try!(path_to_string(&conn_path)), try!(path_to_string(&active_connection)))) } pub fn get_devices(&self) -> Result<Vec<String>, String> { self.dbus .property(NM_SERVICE_PATH, NM_SERVICE_INTERFACE, "Devices") } pub fn get_device_by_interface(&self, interface: &str) -> Result<String, String> { let response = try!(self.dbus .call_with_args(NM_SERVICE_PATH, NM_SERVICE_INTERFACE, "GetDeviceByIpIface", &[&interface.to_string() as &RefArg])); let path: Path = try!(self.dbus.extract(&response)); path_to_string(&path) } pub fn get_device_interface(&self, path: &str) -> Result<String, String> { self.dbus.property(path, NM_DEVICE_INTERFACE, "Interface") } pub fn get_device_type(&self, path: &str) -> Result<DeviceType, String> { self.dbus.property(path, NM_DEVICE_INTERFACE, "DeviceType") } pub fn get_device_state(&self, path: &str) -> Result<DeviceState, String> { self.dbus.property(path, NM_DEVICE_INTERFACE, "State") } pub fn connect_device(&self, path: &str) -> Result<(), String> { try!(self.dbus .call_with_args(NM_SERVICE_PATH, NM_SERVICE_INTERFACE, "ActivateConnection", &[&try!(Path::new("/")) as &RefArg, &try!(Path::new(path)) as &RefArg, &try!(Path::new("/")) as &RefArg])); Ok(()) } pub fn disconnect_device(&self, path: &str) -> Result<(), String> { try!(self.dbus.call(path, NM_DEVICE_INTERFACE, "Disconnect")); Ok(()) } pub fn get_device_access_points(&self, path: &str) -> Result<Vec<String>, String> { self.dbus .property(path, NM_WIRELESS_INTERFACE, "AccessPoints") } pub fn get_access_point_ssid(&self, path: &str) -> Option<String> { if let Ok(ssid_vec) = self.dbus.property(path, NM_ACCESS_POINT_INTERFACE, "Ssid") { utf8_vec_u8_to_string(ssid_vec).ok() } else { None } } pub fn get_access_point_strength(&self, path: &str) -> Result<u32, String> { self.dbus .property(path, NM_ACCESS_POINT_INTERFACE, "Strength") } pub fn get_access_point_flags(&self, path: &str) -> Result<NM80211ApFlags, String> { self.dbus.property(path, NM_ACCESS_POINT_INTERFACE, "Flags") } pub fn get_access_point_wpa_flags(&self, path: &str) -> Result<NM80211ApSecurityFlags, String> { self.dbus .property(path, NM_ACCESS_POINT_INTERFACE, "WpaFlags") } pub fn get_access_point_rsn_flags(&self, path: &str) -> Result<NM80211ApSecurityFlags, String> { self.dbus .property(path, NM_ACCESS_POINT_INTERFACE, "RsnFlags") } } impl VariantTo<DeviceType> for DBusApi { fn variant_to(value: Variant<Box<RefArg>>) -> Option<DeviceType> { variant_to_device_type(value) } } impl VariantTo<DeviceState> for DBusApi { fn variant_to(value: Variant<Box<RefArg>>) -> Option<DeviceState> { variant_to_device_state(value) } } impl VariantTo<NM80211ApFlags> for DBusApi { fn variant_to(value: Variant<Box<RefArg>>) -> Option<NM80211ApFlags> { variant_to_ap_flags(value) } } impl VariantTo<NM80211ApSecurityFlags> for DBusApi { fn variant_to(value: Variant<Box<RefArg>>) -> Option<NM80211ApSecurityFlags> { variant_to_ap_security_flags(value) } } fn variant_to_device_type(value: Variant<Box<RefArg>>) -> Option<DeviceType> { value.0.as_i64().and_then(|v| Some(DeviceType::from(v))) } fn variant_to_device_state(value: Variant<Box<RefArg>>) -> Option<DeviceState> { value.0.as_i64().and_then(|v| Some(DeviceState::from(v))) } fn variant_to_ap_flags(value: Variant<Box<RefArg>>) -> Option<NM80211ApFlags> { value .0 .as_i64() .and_then(|v| NM80211ApFlags::from_bits(v as u32)) } fn variant_to_ap_security_flags(value: Variant<Box<RefArg>>) -> Option<NM80211ApSecurityFlags> { value .0 .as_i64() .and_then(|v| NM80211ApSecurityFlags::from_bits(v as u32)) } pub fn add_val<T>(map: &mut SettingsMap, key: &str, value: T) where T: RefArg + 'static { map.insert(key.to_string(), Variant(Box::new(value))); } pub fn add_str(map: &mut SettingsMap, key: &str, value: &str) { map.insert(key.to_string(), Variant(Box::new(value.to_string()))); } Shortening variant conversions use std::collections::HashMap; use enum_primitive::FromPrimitive; use dbus::Path; use dbus::arg::{Dict, Variant, Iter, Array, RefArg}; use dbus_api::{DBusApi, extract, utf8_vec_u8_to_string, utf8_variant_to_string, string_to_utf8_vec_u8, path_to_string, VariantTo}; use manager::{Connectivity, NetworkManagerState}; use connection::{ConnectionSettings, ConnectionState}; use device::{DeviceType, DeviceState}; use wifi::{NM80211ApSecurityFlags, NM80211ApFlags, Security, WEP, NONE}; type SettingsMap = HashMap<String, Variant<Box<RefArg>>>; const NM_SERVICE_MANAGER: &'static str = "org.freedesktop.NetworkManager"; const NM_SERVICE_PATH: &'static str = "/org/freedesktop/NetworkManager"; const NM_SETTINGS_PATH: &'static str = "/org/freedesktop/NetworkManager/Settings"; const NM_SERVICE_INTERFACE: &'static str = "org.freedesktop.NetworkManager"; const NM_SETTINGS_INTERFACE: &'static str = "org.freedesktop.NetworkManager.Settings"; const NM_CONNECTION_INTERFACE: &'static str = "org.freedesktop.NetworkManager.Settings.\ Connection"; const NM_ACTIVE_INTERFACE: &'static str = "org.freedesktop.NetworkManager.Connection.Active"; const NM_DEVICE_INTERFACE: &'static str = "org.freedesktop.NetworkManager.Device"; const NM_WIRELESS_INTERFACE: &'static str = "org.freedesktop.NetworkManager.Device.Wireless"; const NM_ACCESS_POINT_INTERFACE: &'static str = "org.freedesktop.NetworkManager.AccessPoint"; const NM_WEP_KEY_TYPE_PASSPHRASE: u32 = 2; const UNKNOWN_CONNECTION: &'static str = "org.freedesktop.NetworkManager.UnknownConnection"; const METHOD_RETRY_ERROR_NAMES: &'static [&'static str; 1] = &[UNKNOWN_CONNECTION]; pub struct DBusNetworkManager { dbus: DBusApi, } impl DBusNetworkManager { pub fn new() -> Self { DBusNetworkManager { dbus: DBusApi::new(NM_SERVICE_MANAGER, METHOD_RETRY_ERROR_NAMES) } } pub fn method_timeout(&self) -> u64 { self.dbus.method_timeout() } pub fn set_method_timeout(&mut self, timeout: u64) { self.dbus.set_method_timeout(timeout); } pub fn get_state(&self) -> Result<NetworkManagerState, String> { let response = try!(self.dbus .call(NM_SERVICE_PATH, NM_SERVICE_INTERFACE, "state")); let state_u32: u32 = try!(self.dbus.extract(&response)); Ok(NetworkManagerState::from(state_u32)) } pub fn check_connectivity(&self) -> Result<Connectivity, String> { let response = try!(self.dbus .call(NM_SERVICE_PATH, NM_SERVICE_INTERFACE, "CheckConnectivity")); let connectivity_u32: u32 = try!(self.dbus.extract(&response)); Ok(Connectivity::from(connectivity_u32)) } pub fn is_wireless_enabled(&self) -> Result<bool, String> { self.dbus .property(NM_SERVICE_PATH, NM_SERVICE_INTERFACE, "WirelessEnabled") } pub fn is_networking_enabled(&self) -> Result<bool, String> { self.dbus .property(NM_SERVICE_PATH, NM_SERVICE_INTERFACE, "NetworkingEnabled") } pub fn list_connections(&self) -> Result<Vec<String>, String> { let response = try!(self.dbus .call(NM_SETTINGS_PATH, NM_SETTINGS_INTERFACE, "ListConnections")); let array: Array<Path, _> = try!(self.dbus.extract(&response)); Ok(array.map(|e| e.to_string()).collect()) } pub fn get_active_connections(&self) -> Result<Vec<String>, String> { self.dbus .property(NM_SERVICE_PATH, NM_SERVICE_INTERFACE, "ActiveConnections") } pub fn get_active_connection_path(&self, path: &str) -> Option<String> { self.dbus .property(path, NM_ACTIVE_INTERFACE, "Connection") .ok() } pub fn get_connection_state(&self, path: &str) -> Result<ConnectionState, String> { let state_i64 = match self.dbus.property(path, NM_ACTIVE_INTERFACE, "State") { Ok(state_i64) => state_i64, Err(_) => return Ok(ConnectionState::Unknown), }; ConnectionState::from_i64(state_i64) .ok_or(format!("Undefined connection state for {}", path)) } pub fn get_connection_settings(&self, path: &str) -> Result<ConnectionSettings, String> { let response = try!(self.dbus.call(path, NM_CONNECTION_INTERFACE, "GetSettings")); let dict: Dict<&str, Dict<&str, Variant<Iter>, _>, _> = try!(self.dbus.extract(&response)); let mut id = String::new(); let mut uuid = String::new(); let mut ssid = String::new(); for (_, v1) in dict { for (k2, v2) in v1 { match k2 { "id" => { id = try!(extract::<String>(&v2)); } "uuid" => { uuid = try!(extract::<String>(&v2)); } "ssid" => { ssid = try!(utf8_variant_to_string(&v2)); } _ => {} } } } Ok(ConnectionSettings { id: id, uuid: uuid, ssid: ssid, }) } pub fn get_active_connection_devices(&self, path: &str) -> Result<Vec<String>, String> { self.dbus.property(path, NM_ACTIVE_INTERFACE, "Devices") } pub fn delete_connection(&self, path: &str) -> Result<(), String> { try!(self.dbus.call(path, NM_CONNECTION_INTERFACE, "Delete")); Ok(()) } pub fn activate_connection(&self, path: &str) -> Result<(), String> { try!(self.dbus .call_with_args(NM_SERVICE_PATH, NM_SERVICE_INTERFACE, "ActivateConnection", &[&try!(Path::new(path)) as &RefArg, &try!(Path::new("/")) as &RefArg, &try!(Path::new("/")) as &RefArg])); Ok(()) } pub fn deactivate_connection(&self, path: &str) -> Result<(), String> { try!(self.dbus .call_with_args(NM_SERVICE_PATH, NM_SERVICE_INTERFACE, "DeactivateConnection", &[&try!(Path::new(path)) as &RefArg])); Ok(()) } pub fn add_and_activate_connection(&self, device_path: &str, ap_path: &str, ssid: &str, security: &Security, password: &str) -> Result<(String, String), String> { let mut settings: HashMap<String, SettingsMap> = HashMap::new(); let mut wireless: SettingsMap = HashMap::new(); add_val(&mut wireless, "ssid", string_to_utf8_vec_u8(&ssid.to_string())); settings.insert("802-11-wireless".to_string(), wireless); if *security != NONE { let mut security_settings: SettingsMap = HashMap::new(); if security.contains(WEP) { add_val(&mut security_settings, "wep-key-type", NM_WEP_KEY_TYPE_PASSPHRASE); add_str(&mut security_settings, "wep-key0", password); } else { add_str(&mut security_settings, "key-mgmt", "wpa-psk"); add_str(&mut security_settings, "psk", password); }; settings.insert("802-11-wireless-security".to_string(), security_settings); } let response = try!(self.dbus .call_with_args(NM_SERVICE_PATH, NM_SERVICE_INTERFACE, "AddAndActivateConnection", &[&settings as &RefArg, &try!(Path::new(device_path.to_string())) as &RefArg, &try!(Path::new(ap_path.to_string())) as &RefArg])); let (conn_path, active_connection): (Path, Path) = try!(self.dbus.extract_two(&response)); Ok((try!(path_to_string(&conn_path)), try!(path_to_string(&active_connection)))) } pub fn create_hotspot(&self, device_path: &str, interface: &str, ssid: &str, password: Option<&str>) -> Result<(String, String), String> { let mut wireless: SettingsMap = HashMap::new(); add_val(&mut wireless, "ssid", string_to_utf8_vec_u8(&ssid.to_string())); add_str(&mut wireless, "band", "bg"); add_val(&mut wireless, "hidden", false); add_str(&mut wireless, "mode", "ap"); let mut connection: SettingsMap = HashMap::new(); add_val(&mut connection, "autoconnect", false); add_str(&mut connection, "id", ssid); add_str(&mut connection, "interface-name", interface); add_str(&mut connection, "type", "802-11-wireless"); let mut ipv4: SettingsMap = HashMap::new(); add_str(&mut ipv4, "method", "shared"); let mut settings: HashMap<String, SettingsMap> = HashMap::new(); if let Some(password) = password { add_str(&mut wireless, "security", "802-11-wireless-security"); let mut security: SettingsMap = HashMap::new(); add_str(&mut security, "key-mgmt", "wpa-psk"); add_str(&mut security, "psk", &password); settings.insert("802-11-wireless-security".to_string(), security); } settings.insert("802-11-wireless".to_string(), wireless); settings.insert("connection".to_string(), connection); settings.insert("ipv4".to_string(), ipv4); let response = try!(self.dbus .call_with_args(NM_SERVICE_PATH, NM_SERVICE_INTERFACE, "AddAndActivateConnection", &[&settings as &RefArg, &try!(Path::new(device_path.clone())) as &RefArg, &try!(Path::new("/")) as &RefArg])); let (conn_path, active_connection): (Path, Path) = try!(self.dbus.extract_two(&response)); Ok((try!(path_to_string(&conn_path)), try!(path_to_string(&active_connection)))) } pub fn get_devices(&self) -> Result<Vec<String>, String> { self.dbus .property(NM_SERVICE_PATH, NM_SERVICE_INTERFACE, "Devices") } pub fn get_device_by_interface(&self, interface: &str) -> Result<String, String> { let response = try!(self.dbus .call_with_args(NM_SERVICE_PATH, NM_SERVICE_INTERFACE, "GetDeviceByIpIface", &[&interface.to_string() as &RefArg])); let path: Path = try!(self.dbus.extract(&response)); path_to_string(&path) } pub fn get_device_interface(&self, path: &str) -> Result<String, String> { self.dbus.property(path, NM_DEVICE_INTERFACE, "Interface") } pub fn get_device_type(&self, path: &str) -> Result<DeviceType, String> { self.dbus.property(path, NM_DEVICE_INTERFACE, "DeviceType") } pub fn get_device_state(&self, path: &str) -> Result<DeviceState, String> { self.dbus.property(path, NM_DEVICE_INTERFACE, "State") } pub fn connect_device(&self, path: &str) -> Result<(), String> { try!(self.dbus .call_with_args(NM_SERVICE_PATH, NM_SERVICE_INTERFACE, "ActivateConnection", &[&try!(Path::new("/")) as &RefArg, &try!(Path::new(path)) as &RefArg, &try!(Path::new("/")) as &RefArg])); Ok(()) } pub fn disconnect_device(&self, path: &str) -> Result<(), String> { try!(self.dbus.call(path, NM_DEVICE_INTERFACE, "Disconnect")); Ok(()) } pub fn get_device_access_points(&self, path: &str) -> Result<Vec<String>, String> { self.dbus .property(path, NM_WIRELESS_INTERFACE, "AccessPoints") } pub fn get_access_point_ssid(&self, path: &str) -> Option<String> { if let Ok(ssid_vec) = self.dbus.property(path, NM_ACCESS_POINT_INTERFACE, "Ssid") { utf8_vec_u8_to_string(ssid_vec).ok() } else { None } } pub fn get_access_point_strength(&self, path: &str) -> Result<u32, String> { self.dbus .property(path, NM_ACCESS_POINT_INTERFACE, "Strength") } pub fn get_access_point_flags(&self, path: &str) -> Result<NM80211ApFlags, String> { self.dbus.property(path, NM_ACCESS_POINT_INTERFACE, "Flags") } pub fn get_access_point_wpa_flags(&self, path: &str) -> Result<NM80211ApSecurityFlags, String> { self.dbus .property(path, NM_ACCESS_POINT_INTERFACE, "WpaFlags") } pub fn get_access_point_rsn_flags(&self, path: &str) -> Result<NM80211ApSecurityFlags, String> { self.dbus .property(path, NM_ACCESS_POINT_INTERFACE, "RsnFlags") } } impl VariantTo<DeviceType> for DBusApi { fn variant_to(value: Variant<Box<RefArg>>) -> Option<DeviceType> { variant_to_device_type(value) } } impl VariantTo<DeviceState> for DBusApi { fn variant_to(value: Variant<Box<RefArg>>) -> Option<DeviceState> { variant_to_device_state(value) } } impl VariantTo<NM80211ApFlags> for DBusApi { fn variant_to(value: Variant<Box<RefArg>>) -> Option<NM80211ApFlags> { variant_to_ap_flags(value) } } impl VariantTo<NM80211ApSecurityFlags> for DBusApi { fn variant_to(value: Variant<Box<RefArg>>) -> Option<NM80211ApSecurityFlags> { variant_to_ap_security_flags(value) } } fn variant_to_device_type(value: Variant<Box<RefArg>>) -> Option<DeviceType> { value.0.as_i64().map(DeviceType::from) } fn variant_to_device_state(value: Variant<Box<RefArg>>) -> Option<DeviceState> { value.0.as_i64().map(DeviceState::from) } fn variant_to_ap_flags(value: Variant<Box<RefArg>>) -> Option<NM80211ApFlags> { value .0 .as_i64() .and_then(|v| NM80211ApFlags::from_bits(v as u32)) } fn variant_to_ap_security_flags(value: Variant<Box<RefArg>>) -> Option<NM80211ApSecurityFlags> { value .0 .as_i64() .and_then(|v| NM80211ApSecurityFlags::from_bits(v as u32)) } pub fn add_val<T>(map: &mut SettingsMap, key: &str, value: T) where T: RefArg + 'static { map.insert(key.to_string(), Variant(Box::new(value))); } pub fn add_str(map: &mut SettingsMap, key: &str, value: &str) { map.insert(key.to_string(), Variant(Box::new(value.to_string()))); }
use crate::Error; use alloc::{string::String, vec::Vec}; use arrayvec::{ArrayString, ArrayVec}; use core::{ cmp::{Ordering::Equal, *}, fmt, hash::{Hash, Hasher}, iter::Sum, ops::{Add, AddAssign, Div, DivAssign, Mul, MulAssign, Neg, Rem, RemAssign, Sub, SubAssign}, str::FromStr, }; #[cfg(feature = "diesel")] use diesel::sql_types::Numeric; #[allow(unused_imports)] // It's not actually dead code below, but the compiler thinks it is. #[cfg(not(feature = "std"))] use num_traits::float::FloatCore; use num_traits::{FromPrimitive, Num, One, Signed, ToPrimitive, Zero}; // Sign mask for the flags field. A value of zero in this bit indicates a // positive Decimal value, and a value of one in this bit indicates a // negative Decimal value. const SIGN_MASK: u32 = 0x8000_0000; const UNSIGN_MASK: u32 = 0x4FFF_FFFF; // Scale mask for the flags field. This byte in the flags field contains // the power of 10 to divide the Decimal value by. The scale byte must // contain a value between 0 and 28 inclusive. const SCALE_MASK: u32 = 0x00FF_0000; const U8_MASK: u32 = 0x0000_00FF; const U32_MASK: u64 = 0xFFFF_FFFF; // Number of bits scale is shifted by. const SCALE_SHIFT: u32 = 16; // Number of bits sign is shifted by. const SIGN_SHIFT: u32 = 31; // The maximum string buffer size used for serialization purposes. 31 is optimal, however we align // to the byte boundary for simplicity. const MAX_STR_BUFFER_SIZE: usize = 32; // The maximum supported precision pub(crate) const MAX_PRECISION: u32 = 28; #[cfg(not(feature = "legacy-ops"))] const MAX_PRECISION_I32: i32 = 28; // 79,228,162,514,264,337,593,543,950,335 const MAX_I128_REPR: i128 = 0x0000_0000_FFFF_FFFF_FFFF_FFFF_FFFF_FFFF; const MIN: Decimal = Decimal { flags: 2_147_483_648, lo: 4_294_967_295, mid: 4_294_967_295, hi: 4_294_967_295, }; const MAX: Decimal = Decimal { flags: 0, lo: 4_294_967_295, mid: 4_294_967_295, hi: 4_294_967_295, }; // Fast access for 10^n where n is 0-9 const POWERS_10: [u32; 10] = [ 1, 10, 100, 1_000, 10_000, 100_000, 1_000_000, 10_000_000, 100_000_000, 1_000_000_000, ]; // Fast access for 10^n where n is 10-19 #[allow(dead_code)] const BIG_POWERS_10: [u64; 10] = [ 10_000_000_000, 100_000_000_000, 1_000_000_000_000, 10_000_000_000_000, 100_000_000_000_000, 1_000_000_000_000_000, 10_000_000_000_000_000, 100_000_000_000_000_000, 1_000_000_000_000_000_000, 10_000_000_000_000_000_000, ]; /// `UnpackedDecimal` contains unpacked representation of `Decimal` where each component /// of decimal-format stored in it's own field #[derive(Clone, Copy, Debug)] pub struct UnpackedDecimal { pub is_negative: bool, pub scale: u32, pub hi: u32, pub mid: u32, pub lo: u32, } /// `Decimal` represents a 128 bit representation of a fixed-precision decimal number. /// The finite set of values of type `Decimal` are of the form m / 10<sup>e</sup>, /// where m is an integer such that -2<sup>96</sup> < m < 2<sup>96</sup>, and e is an integer /// between 0 and 28 inclusive. #[derive(Clone, Copy)] #[cfg_attr(feature = "diesel", derive(FromSqlRow, AsExpression), sql_type = "Numeric")] pub struct Decimal { // Bits 0-15: unused // Bits 16-23: Contains "e", a value between 0-28 that indicates the scale // Bits 24-30: unused // Bit 31: the sign of the Decimal value, 0 meaning positive and 1 meaning negative. flags: u32, // The lo, mid, hi, and flags fields contain the representation of the // Decimal value as a 96-bit integer. hi: u32, lo: u32, mid: u32, } /// `RoundingStrategy` represents the different strategies that can be used by /// `round_dp_with_strategy`. /// /// `RoundingStrategy::BankersRounding` - Rounds toward the nearest even number, e.g. 6.5 -> 6, 7.5 -> 8 /// `RoundingStrategy::RoundHalfUp` - Rounds up if the value >= 5, otherwise rounds down, e.g. 6.5 -> 7, /// `RoundingStrategy::RoundHalfDown` - Rounds down if the value =< 5, otherwise rounds up, e.g. /// 6.5 -> 6, 6.51 -> 7 /// 1.4999999 -> 1 /// `RoundingStrategy::RoundDown` - Always round down. /// `RoundingStrategy::RoundUp` - Always round up. #[derive(Clone, Copy, PartialEq, Eq)] pub enum RoundingStrategy { BankersRounding, RoundHalfUp, RoundHalfDown, RoundDown, RoundUp, } #[allow(dead_code)] impl Decimal { /// Returns a `Decimal` with a 64 bit `m` representation and corresponding `e` scale. /// /// # Arguments /// /// * `num` - An i64 that represents the `m` portion of the decimal number /// * `scale` - A u32 representing the `e` portion of the decimal number. /// /// # Example /// /// ``` /// use rust_decimal::Decimal; /// /// let pi = Decimal::new(3141, 3); /// assert_eq!(pi.to_string(), "3.141"); /// ``` pub fn new(num: i64, scale: u32) -> Decimal { if scale > MAX_PRECISION { panic!( "Scale exceeds the maximum precision allowed: {} > {}", scale, MAX_PRECISION ); } let flags: u32 = scale << SCALE_SHIFT; if num < 0 { let pos_num = num.wrapping_neg() as u64; return Decimal { flags: flags | SIGN_MASK, hi: 0, lo: (pos_num & U32_MASK) as u32, mid: ((pos_num >> 32) & U32_MASK) as u32, }; } Decimal { flags, hi: 0, lo: (num as u64 & U32_MASK) as u32, mid: ((num as u64 >> 32) & U32_MASK) as u32, } } /// Creates a `Decimal` using a 128 bit signed `m` representation and corresponding `e` scale. /// /// # Arguments /// /// * `num` - An i128 that represents the `m` portion of the decimal number /// * `scale` - A u32 representing the `e` portion of the decimal number. /// /// # Example /// /// ``` /// use rust_decimal::Decimal; /// /// let pi = Decimal::from_i128_with_scale(3141i128, 3); /// assert_eq!(pi.to_string(), "3.141"); /// ``` pub fn from_i128_with_scale(num: i128, scale: u32) -> Decimal { if scale > MAX_PRECISION { panic!( "Scale exceeds the maximum precision allowed: {} > {}", scale, MAX_PRECISION ); } let mut neg = false; let mut wrapped = num; if num > MAX_I128_REPR { panic!("Number exceeds maximum value that can be represented"); } else if num < -MAX_I128_REPR { panic!("Number less than minimum value that can be represented"); } else if num < 0 { neg = true; wrapped = -num; } let flags: u32 = flags(neg, scale); Decimal { flags, lo: (wrapped as u64 & U32_MASK) as u32, mid: ((wrapped as u64 >> 32) & U32_MASK) as u32, hi: ((wrapped as u128 >> 64) as u64 & U32_MASK) as u32, } } /// Returns a `Decimal` using the instances constituent parts. /// /// # Arguments /// /// * `lo` - The low 32 bits of a 96-bit integer. /// * `mid` - The middle 32 bits of a 96-bit integer. /// * `hi` - The high 32 bits of a 96-bit integer. /// * `negative` - `true` to indicate a negative number. /// * `scale` - A power of 10 ranging from 0 to 28. /// /// # Example /// /// ``` /// use rust_decimal::Decimal; /// /// let pi = Decimal::from_parts(1102470952, 185874565, 1703060790, false, 28); /// assert_eq!(pi.to_string(), "3.1415926535897932384626433832"); /// ``` pub const fn from_parts(lo: u32, mid: u32, hi: u32, negative: bool, scale: u32) -> Decimal { Decimal { lo, mid, hi, flags: flags(negative, scale), } } pub(crate) const fn from_parts_raw(lo: u32, mid: u32, hi: u32, flags: u32) -> Decimal { Decimal { lo, mid, hi, flags } } /// Returns a `Result` which if successful contains the `Decimal` constitution of /// the scientific notation provided by `value`. /// /// # Arguments /// /// * `value` - The scientific notation of the `Decimal`. /// /// # Example /// /// ``` /// use rust_decimal::Decimal; /// /// let value = Decimal::from_scientific("9.7e-7").unwrap(); /// assert_eq!(value.to_string(), "0.00000097"); /// ``` pub fn from_scientific(value: &str) -> Result<Decimal, Error> { let err = Error::new("Failed to parse"); let mut split = value.splitn(2, |c| c == 'e' || c == 'E'); let base = split.next().ok_or_else(|| err.clone())?; let exp = split.next().ok_or_else(|| err.clone())?; let mut ret = Decimal::from_str(base)?; let current_scale = ret.scale(); if exp.starts_with('-') { let exp: u32 = exp[1..].parse().map_err(move |_| err)?; ret.set_scale(current_scale + exp)?; } else { let exp: u32 = exp.parse().map_err(move |_| err)?; if exp <= current_scale { ret.set_scale(current_scale - exp)?; } else { ret *= Decimal::from_i64(10_i64.pow(exp)).unwrap(); ret = ret.normalize(); } } Ok(ret) } /// Returns the scale of the decimal number, otherwise known as `e`. /// /// # Example /// /// ``` /// use rust_decimal::Decimal; /// /// let num = Decimal::new(1234, 3); /// assert_eq!(num.scale(), 3u32); /// ``` #[inline] pub const fn scale(&self) -> u32 { ((self.flags & SCALE_MASK) >> SCALE_SHIFT) as u32 } /// An optimized method for changing the sign of a decimal number. /// /// # Arguments /// /// * `positive`: true if the resulting decimal should be positive. /// /// # Example /// /// ``` /// use rust_decimal::Decimal; /// /// let mut one = Decimal::new(1, 0); /// one.set_sign(false); /// assert_eq!(one.to_string(), "-1"); /// ``` #[deprecated(since = "1.4.0", note = "please use `set_sign_positive` instead")] pub fn set_sign(&mut self, positive: bool) { self.set_sign_positive(positive); } /// An optimized method for changing the sign of a decimal number. /// /// # Arguments /// /// * `positive`: true if the resulting decimal should be positive. /// /// # Example /// /// ``` /// use rust_decimal::Decimal; /// /// let mut one = Decimal::new(1, 0); /// one.set_sign_positive(false); /// assert_eq!(one.to_string(), "-1"); /// ``` #[inline(always)] pub fn set_sign_positive(&mut self, positive: bool) { if positive { self.flags &= UNSIGN_MASK; } else { self.flags |= SIGN_MASK; } } /// An optimized method for changing the sign of a decimal number. /// /// # Arguments /// /// * `negative`: true if the resulting decimal should be negative. /// /// # Example /// /// ``` /// use rust_decimal::Decimal; /// /// let mut one = Decimal::new(1, 0); /// one.set_sign_negative(true); /// assert_eq!(one.to_string(), "-1"); /// ``` #[inline(always)] pub fn set_sign_negative(&mut self, negative: bool) { self.set_sign_positive(!negative); } /// An optimized method for changing the scale of a decimal number. /// /// # Arguments /// /// * `scale`: the new scale of the number /// /// # Example /// /// ``` /// use rust_decimal::Decimal; /// /// let mut one = Decimal::new(1, 0); /// one.set_scale(5); /// assert_eq!(one.to_string(), "0.00001"); /// ``` pub fn set_scale(&mut self, scale: u32) -> Result<(), Error> { if scale > MAX_PRECISION { return Err(Error::new("Scale exceeds maximum precision")); } self.flags = (scale << SCALE_SHIFT) | (self.flags & SIGN_MASK); Ok(()) } /// Modifies the `Decimal` to the given scale, attempting to do so without changing the /// underlying number itself. /// /// Note that setting the scale to something less then the current `Decimal`s scale will /// cause the newly created `Decimal` to have some rounding. /// Scales greater than the maximum precision supported by `Decimal` will be automatically /// rounded to `Decimal::MAX_PRECISION`. /// Rounding leverages the half up strategy. /// /// # Arguments /// * `scale`: The scale to use for the new `Decimal` number. /// /// # Example /// /// ``` /// use rust_decimal::Decimal; /// /// let mut number = Decimal::new(1_123, 3); /// number.rescale(6); /// assert_eq!(number, Decimal::new(1_123_000, 6)); /// let mut round = Decimal::new(145, 2); /// round.rescale(1); /// assert_eq!(round, Decimal::new(15, 1)); /// ``` pub fn rescale(&mut self, scale: u32) { let mut array = [self.lo, self.mid, self.hi]; let mut value_scale = self.scale(); rescale_internal(&mut array, &mut value_scale, scale); self.lo = array[0]; self.mid = array[1]; self.hi = array[2]; self.flags = flags(self.is_sign_negative(), value_scale); } /// Returns a serialized version of the decimal number. /// The resulting byte array will have the following representation: /// /// * Bytes 1-4: flags /// * Bytes 5-8: lo portion of `m` /// * Bytes 9-12: mid portion of `m` /// * Bytes 13-16: high portion of `m` pub const fn serialize(&self) -> [u8; 16] { [ (self.flags & U8_MASK) as u8, ((self.flags >> 8) & U8_MASK) as u8, ((self.flags >> 16) & U8_MASK) as u8, ((self.flags >> 24) & U8_MASK) as u8, (self.lo & U8_MASK) as u8, ((self.lo >> 8) & U8_MASK) as u8, ((self.lo >> 16) & U8_MASK) as u8, ((self.lo >> 24) & U8_MASK) as u8, (self.mid & U8_MASK) as u8, ((self.mid >> 8) & U8_MASK) as u8, ((self.mid >> 16) & U8_MASK) as u8, ((self.mid >> 24) & U8_MASK) as u8, (self.hi & U8_MASK) as u8, ((self.hi >> 8) & U8_MASK) as u8, ((self.hi >> 16) & U8_MASK) as u8, ((self.hi >> 24) & U8_MASK) as u8, ] } /// Deserializes the given bytes into a decimal number. /// The deserialized byte representation must be 16 bytes and adhere to the followign convention: /// /// * Bytes 1-4: flags /// * Bytes 5-8: lo portion of `m` /// * Bytes 9-12: mid portion of `m` /// * Bytes 13-16: high portion of `m` pub const fn deserialize(bytes: [u8; 16]) -> Decimal { Decimal { flags: (bytes[0] as u32) | (bytes[1] as u32) << 8 | (bytes[2] as u32) << 16 | (bytes[3] as u32) << 24, lo: (bytes[4] as u32) | (bytes[5] as u32) << 8 | (bytes[6] as u32) << 16 | (bytes[7] as u32) << 24, mid: (bytes[8] as u32) | (bytes[9] as u32) << 8 | (bytes[10] as u32) << 16 | (bytes[11] as u32) << 24, hi: (bytes[12] as u32) | (bytes[13] as u32) << 8 | (bytes[14] as u32) << 16 | (bytes[15] as u32) << 24, } } /// Returns `true` if the decimal is negative. #[deprecated(since = "0.6.3", note = "please use `is_sign_negative` instead")] pub fn is_negative(&self) -> bool { self.is_sign_negative() } /// Returns `true` if the decimal is positive. #[deprecated(since = "0.6.3", note = "please use `is_sign_positive` instead")] pub fn is_positive(&self) -> bool { self.is_sign_positive() } /// Returns `true` if the sign bit of the decimal is negative. #[inline(always)] pub const fn is_sign_negative(&self) -> bool { self.flags & SIGN_MASK > 0 } /// Returns `true` if the sign bit of the decimal is positive. #[inline(always)] pub const fn is_sign_positive(&self) -> bool { self.flags & SIGN_MASK == 0 } /// Returns the minimum possible number that `Decimal` can represent. pub const fn min_value() -> Decimal { MIN } /// Returns the maximum possible number that `Decimal` can represent. pub const fn max_value() -> Decimal { MAX } /// Returns a new `Decimal` integral with no fractional portion. /// This is a true truncation whereby no rounding is performed. /// /// # Example /// /// ``` /// use rust_decimal::Decimal; /// /// let pi = Decimal::new(3141, 3); /// let trunc = Decimal::new(3, 0); /// // note that it returns a decimal /// assert_eq!(pi.trunc(), trunc); /// ``` pub fn trunc(&self) -> Decimal { let mut scale = self.scale(); if scale == 0 { // Nothing to do return *self; } let mut working = [self.lo, self.mid, self.hi]; while scale > 0 { // We're removing precision, so we don't care about overflow if scale < 10 { div_by_u32(&mut working, POWERS_10[scale as usize]); break; } else { div_by_u32(&mut working, POWERS_10[9]); // Only 9 as this array starts with 1 scale -= 9; } } Decimal { lo: working[0], mid: working[1], hi: working[2], flags: flags(self.is_sign_negative(), 0), } } /// Returns a new `Decimal` representing the fractional portion of the number. /// /// # Example /// /// ``` /// use rust_decimal::Decimal; /// /// let pi = Decimal::new(3141, 3); /// let fract = Decimal::new(141, 3); /// // note that it returns a decimal /// assert_eq!(pi.fract(), fract); /// ``` pub fn fract(&self) -> Decimal { // This is essentially the original number minus the integral. // Could possibly be optimized in the future *self - self.trunc() } /// Computes the absolute value of `self`. /// /// # Example /// /// ``` /// use rust_decimal::Decimal; /// /// let num = Decimal::new(-3141, 3); /// assert_eq!(num.abs().to_string(), "3.141"); /// ``` pub fn abs(&self) -> Decimal { let mut me = *self; me.set_sign_positive(true); me } /// Returns the largest integer less than or equal to a number. /// /// # Example /// /// ``` /// use rust_decimal::Decimal; /// /// let num = Decimal::new(3641, 3); /// assert_eq!(num.floor().to_string(), "3"); /// ``` pub fn floor(&self) -> Decimal { let scale = self.scale(); if scale == 0 { // Nothing to do return *self; } // Opportunity for optimization here let floored = self.trunc(); if self.is_sign_negative() && !self.fract().is_zero() { floored - Decimal::one() } else { floored } } /// Returns the smallest integer greater than or equal to a number. /// /// # Example /// /// ``` /// use rust_decimal::Decimal; /// /// let num = Decimal::new(3141, 3); /// assert_eq!(num.ceil().to_string(), "4"); /// let num = Decimal::new(3, 0); /// assert_eq!(num.ceil().to_string(), "3"); /// ``` pub fn ceil(&self) -> Decimal { let scale = self.scale(); if scale == 0 { // Nothing to do return *self; } // Opportunity for optimization here if self.is_sign_positive() && !self.fract().is_zero() { self.trunc() + Decimal::one() } else { self.trunc() } } /// Returns the maximum of the two numbers. /// /// ``` /// use rust_decimal::Decimal; /// /// let x = Decimal::new(1, 0); /// let y = Decimal::new(2, 0); /// assert_eq!(y, x.max(y)); /// ``` pub fn max(self, other: Decimal) -> Decimal { if self < other { other } else { self } } /// Returns the minimum of the two numbers. /// /// ``` /// use rust_decimal::Decimal; /// /// let x = Decimal::new(1, 0); /// let y = Decimal::new(2, 0); /// assert_eq!(x, x.min(y)); /// ``` pub fn min(self, other: Decimal) -> Decimal { if self > other { other } else { self } } /// Strips any trailing zero's from a `Decimal` and converts -0 to 0. /// /// # Example /// /// ``` /// use rust_decimal::Decimal; /// /// let number = Decimal::new(3100, 3); /// // note that it returns a decimal, without the extra scale /// assert_eq!(number.normalize().to_string(), "3.1"); /// ``` pub fn normalize(&self) -> Decimal { if self.is_zero() { // Convert -0, -0.0*, or 0.0* to 0. return Decimal::zero(); } let mut scale = self.scale(); if scale == 0 { // Nothing to do return *self; } let mut result = [self.lo, self.mid, self.hi]; let mut working = [self.lo, self.mid, self.hi]; while scale > 0 { if div_by_u32(&mut working, 10) > 0 { break; } scale -= 1; result.copy_from_slice(&working); } Decimal { lo: result[0], mid: result[1], hi: result[2], flags: flags(self.is_sign_negative(), scale), } } /// Returns a new `Decimal` number with no fractional portion (i.e. an integer). /// Rounding currently follows "Bankers Rounding" rules. e.g. 6.5 -> 6, 7.5 -> 8 /// /// # Example /// /// ``` /// use rust_decimal::Decimal; /// /// // Demonstrating bankers rounding... /// let number_down = Decimal::new(65, 1); /// let number_up = Decimal::new(75, 1); /// assert_eq!(number_down.round().to_string(), "6"); /// assert_eq!(number_up.round().to_string(), "8"); /// ``` pub fn round(&self) -> Decimal { self.round_dp(0) } /// Returns a new `Decimal` number with the specified number of decimal points for fractional /// portion. /// Rounding is performed using the provided [`RoundingStrategy`] /// /// # Arguments /// * `dp`: the number of decimal points to round to. /// * `strategy`: the [`RoundingStrategy`] to use. /// /// # Example /// /// ``` /// use rust_decimal::{Decimal, RoundingStrategy}; /// use core::str::FromStr; /// /// let tax = Decimal::from_str("3.4395").unwrap(); /// assert_eq!(tax.round_dp_with_strategy(2, RoundingStrategy::RoundHalfUp).to_string(), "3.44"); /// ``` pub fn round_dp_with_strategy(&self, dp: u32, strategy: RoundingStrategy) -> Decimal { // Short circuit for zero if self.is_zero() { return Decimal { lo: 0, mid: 0, hi: 0, flags: flags(self.is_sign_negative(), dp), }; } let old_scale = self.scale(); // return early if decimal has a smaller number of fractional places than dp // e.g. 2.51 rounded to 3 decimal places is 2.51 if old_scale <= dp { return *self; } let mut value = [self.lo, self.mid, self.hi]; let mut value_scale = self.scale(); let negative = self.is_sign_negative(); value_scale -= dp; // Rescale to zero so it's easier to work with while value_scale > 0 { if value_scale < 10 { div_by_u32(&mut value, POWERS_10[value_scale as usize]); value_scale = 0; } else { div_by_u32(&mut value, POWERS_10[9]); value_scale -= 9; } } // Do some midpoint rounding checks // We're actually doing two things here. // 1. Figuring out midpoint rounding when we're right on the boundary. e.g. 2.50000 // 2. Figuring out whether to add one or not e.g. 2.51 // For this, we need to figure out the fractional portion that is additional to // the rounded number. e.g. for 0.12345 rounding to 2dp we'd want 345. // We're doing the equivalent of losing precision (e.g. to get 0.12) // then increasing the precision back up to 0.12000 let mut offset = [self.lo, self.mid, self.hi]; let mut diff = old_scale - dp; while diff > 0 { if diff < 10 { div_by_u32(&mut offset, POWERS_10[diff as usize]); break; } else { div_by_u32(&mut offset, POWERS_10[9]); // Only 9 as this array starts with 1 diff -= 9; } } let mut diff = old_scale - dp; while diff > 0 { if diff < 10 { mul_by_u32(&mut offset, POWERS_10[diff as usize]); break; } else { mul_by_u32(&mut offset, POWERS_10[9]); // Only 9 as this array starts with 1 diff -= 9; } } let mut decimal_portion = [self.lo, self.mid, self.hi]; sub_by_internal(&mut decimal_portion, &offset); // If the decimal_portion is zero then we round based on the other data let mut cap = [5, 0, 0]; for _ in 0..(old_scale - dp - 1) { mul_by_u32(&mut cap, 10); } let order = cmp_internal(&decimal_portion, &cap); match strategy { RoundingStrategy::BankersRounding => { match order { Ordering::Equal => { if (value[0] & 1) == 1 { add_one_internal(&mut value); } } Ordering::Greater => { // Doesn't matter about the decimal portion add_one_internal(&mut value); } _ => {} } } RoundingStrategy::RoundHalfDown => { if let Ordering::Greater = order { add_one_internal(&mut value); } } RoundingStrategy::RoundHalfUp => { // when Ordering::Equal, decimal_portion is 0.5 exactly // when Ordering::Greater, decimal_portion is > 0.5 match order { Ordering::Equal => { add_one_internal(&mut value); } Ordering::Greater => { // Doesn't matter about the decimal portion add_one_internal(&mut value); } _ => {} } } RoundingStrategy::RoundUp => { if !is_all_zero(&decimal_portion) { add_one_internal(&mut value); } } RoundingStrategy::RoundDown => (), } Decimal { lo: value[0], mid: value[1], hi: value[2], flags: flags(negative, dp), } } /// Returns a new `Decimal` number with the specified number of decimal points for fractional portion. /// Rounding currently follows "Bankers Rounding" rules. e.g. 6.5 -> 6, 7.5 -> 8 /// /// # Arguments /// * `dp`: the number of decimal points to round to. /// /// # Example /// /// ``` /// use rust_decimal::Decimal; /// use core::str::FromStr; /// /// let pi = Decimal::from_str("3.1415926535897932384626433832").unwrap(); /// assert_eq!(pi.round_dp(2).to_string(), "3.14"); /// ``` pub fn round_dp(&self, dp: u32) -> Decimal { self.round_dp_with_strategy(dp, RoundingStrategy::BankersRounding) } /// Convert `Decimal` to an internal representation of the underlying struct. This is useful /// for debugging the internal state of the object. /// /// # Important Disclaimer /// This is primarily intended for library maintainers. The internal representation of a /// `Decimal` is considered "unstable" for public use. /// /// # Example /// /// ``` /// use rust_decimal::Decimal; /// use core::str::FromStr; /// /// let pi = Decimal::from_str("3.1415926535897932384626433832").unwrap(); /// assert_eq!(format!("{:?}", pi), "3.1415926535897932384626433832"); /// assert_eq!(format!("{:?}", pi.unpack()), "UnpackedDecimal { \ /// is_negative: false, scale: 28, hi: 1703060790, mid: 185874565, lo: 1102470952 \ /// }"); /// ``` pub const fn unpack(&self) -> UnpackedDecimal { UnpackedDecimal { is_negative: self.is_sign_negative(), scale: self.scale(), hi: self.hi, lo: self.lo, mid: self.mid, } } /// Convert `Decimal` to an internal representation of the underlying struct. This is useful /// for debugging the internal state of the object. /// /// # Important Disclaimer /// This is primarily intended for library maintainers. The internal representation of a /// `Decimal` is considered "unstable" for public use. /// /// # Example /// /// ``` /// use rust_decimal::Decimal; /// use core::str::FromStr; /// /// let pi = Decimal::from_str("3.1415926535897932384626433832").unwrap(); /// assert_eq!(format!("{:?}", pi), "3.1415926535897932384626433832"); /// assert_eq!(format!("{:?}", pi.unpack()), "UnpackedDecimal { \ /// is_negative: false, scale: 28, hi: 1703060790, mid: 185874565, lo: 1102470952 \ /// }"); /// ``` #[inline(always)] pub(crate) const fn mantissa_array3(&self) -> [u32; 3] { [self.lo, self.mid, self.hi] } #[inline(always)] pub(crate) const fn mantissa_array4(&self) -> [u32; 4] { [self.lo, self.mid, self.hi, 0] } fn base2_to_decimal(bits: &mut [u32; 3], exponent2: i32, positive: bool, is64: bool) -> Option<Self> { // 2^exponent2 = (10^exponent2)/(5^exponent2) // = (5^-exponent2)*(10^exponent2) let mut exponent5 = -exponent2; let mut exponent10 = exponent2; // Ultimately, we want this for the scale while exponent5 > 0 { // Check to see if the mantissa is divisible by 2 if bits[0] & 0x1 == 0 { exponent10 += 1; exponent5 -= 1; // We can divide by 2 without losing precision let hi_carry = bits[2] & 0x1 == 1; bits[2] >>= 1; let mid_carry = bits[1] & 0x1 == 1; bits[1] = (bits[1] >> 1) | if hi_carry { SIGN_MASK } else { 0 }; bits[0] = (bits[0] >> 1) | if mid_carry { SIGN_MASK } else { 0 }; } else { // The mantissa is NOT divisible by 2. Therefore the mantissa should // be multiplied by 5, unless the multiplication overflows. exponent5 -= 1; let mut temp = [bits[0], bits[1], bits[2]]; if mul_by_u32(&mut temp, 5) == 0 { // Multiplication succeeded without overflow, so copy result back bits[0] = temp[0]; bits[1] = temp[1]; bits[2] = temp[2]; } else { // Multiplication by 5 overflows. The mantissa should be divided // by 2, and therefore will lose significant digits. exponent10 += 1; // Shift right let hi_carry = bits[2] & 0x1 == 1; bits[2] >>= 1; let mid_carry = bits[1] & 0x1 == 1; bits[1] = (bits[1] >> 1) | if hi_carry { SIGN_MASK } else { 0 }; bits[0] = (bits[0] >> 1) | if mid_carry { SIGN_MASK } else { 0 }; } } } // In order to divide the value by 5, it is best to multiply by 2/10. // Therefore, exponent10 is decremented, and the mantissa should be multiplied by 2 while exponent5 < 0 { if bits[2] & SIGN_MASK == 0 { // No far left bit, the mantissa can withstand a shift-left without overflowing exponent10 -= 1; exponent5 += 1; shl1_internal(bits, 0); } else { // The mantissa would overflow if shifted. Therefore it should be // directly divided by 5. This will lose significant digits, unless // by chance the mantissa happens to be divisible by 5. exponent5 += 1; div_by_u32(bits, 5); } } // At this point, the mantissa has assimilated the exponent5, but // exponent10 might not be suitable for assignment. exponent10 must be // in the range [-MAX_PRECISION..0], so the mantissa must be scaled up or // down appropriately. while exponent10 > 0 { // In order to bring exponent10 down to 0, the mantissa should be // multiplied by 10 to compensate. If the exponent10 is too big, this // will cause the mantissa to overflow. if mul_by_u32(bits, 10) == 0 { exponent10 -= 1; } else { // Overflowed - return? return None; } } // In order to bring exponent up to -MAX_PRECISION, the mantissa should // be divided by 10 to compensate. If the exponent10 is too small, this // will cause the mantissa to underflow and become 0. while exponent10 < -(MAX_PRECISION as i32) { let rem10 = div_by_u32(bits, 10); exponent10 += 1; if is_all_zero(bits) { // Underflow, unable to keep dividing exponent10 = 0; } else if rem10 >= 5 { add_one_internal(bits); } } // This step is required in order to remove excess bits of precision from the // end of the bit representation, down to the precision guaranteed by the // floating point number if is64 { // Guaranteed to about 16 dp while exponent10 < 0 && (bits[2] != 0 || (bits[1] & 0xFFF0_0000) != 0) { let rem10 = div_by_u32(bits, 10); exponent10 += 1; if rem10 >= 5 { add_one_internal(bits); } } } else { // Guaranteed to about 7 dp while exponent10 < 0 && (bits[2] != 0 || bits[1] != 0 || (bits[2] == 0 && bits[1] == 0 && (bits[0] & 0xFF00_0000) != 0)) { let rem10 = div_by_u32(bits, 10); exponent10 += 1; if rem10 >= 5 { add_one_internal(bits); } } } // Remove multiples of 10 from the representation while exponent10 < 0 { let mut temp = [bits[0], bits[1], bits[2]]; let remainder = div_by_u32(&mut temp, 10); if remainder == 0 { exponent10 += 1; bits[0] = temp[0]; bits[1] = temp[1]; bits[2] = temp[2]; } else { break; } } Some(Decimal { lo: bits[0], mid: bits[1], hi: bits[2], flags: flags(!positive, -exponent10 as u32), }) } /// Checked addition. Computes `self + other`, returning `None` if overflow occurred. #[inline(always)] pub fn checked_add(self, other: Decimal) -> Option<Decimal> { // Convert to the same scale let mut my = [self.lo, self.mid, self.hi]; let mut my_scale = self.scale(); let mut ot = [other.lo, other.mid, other.hi]; let mut other_scale = other.scale(); rescale_to_maximum_scale(&mut my, &mut my_scale, &mut ot, &mut other_scale); let mut final_scale = my_scale.max(other_scale); // Add the items together let my_negative = self.is_sign_negative(); let other_negative = other.is_sign_negative(); let mut negative = false; let carry; if !(my_negative ^ other_negative) { negative = my_negative; carry = add_by_internal3(&mut my, &ot); } else { let cmp = cmp_internal(&my, &ot); // -x + y // if x > y then it's negative (i.e. -2 + 1) match cmp { Ordering::Less => { negative = other_negative; sub_by_internal3(&mut ot, &my); my[0] = ot[0]; my[1] = ot[1]; my[2] = ot[2]; } Ordering::Greater => { negative = my_negative; sub_by_internal3(&mut my, &ot); } Ordering::Equal => { // -2 + 2 my[0] = 0; my[1] = 0; my[2] = 0; } } carry = 0; } // If we have a carry we underflowed. // We need to lose some significant digits (if possible) if carry > 0 { if final_scale == 0 { return None; } // Copy it over to a temp array for modification let mut temp = [my[0], my[1], my[2], carry]; while final_scale > 0 && temp[3] != 0 { div_by_u32(&mut temp, 10); final_scale -= 1; } // If we still have a carry bit then we overflowed if temp[3] > 0 { return None; } // Copy it back - we're done my[0] = temp[0]; my[1] = temp[1]; my[2] = temp[2]; } Some(Decimal { lo: my[0], mid: my[1], hi: my[2], flags: flags(negative, final_scale), }) } /// Checked subtraction. Computes `self - other`, returning `None` if overflow occurred. #[inline(always)] pub fn checked_sub(self, other: Decimal) -> Option<Decimal> { let negated_other = Decimal { lo: other.lo, mid: other.mid, hi: other.hi, flags: other.flags ^ SIGN_MASK, }; self.checked_add(negated_other) } /// Checked multiplication. Computes `self * other`, returning `None` if overflow occurred. #[inline] pub fn checked_mul(self, other: Decimal) -> Option<Decimal> { // Early exit if either is zero if self.is_zero() || other.is_zero() { return Some(Decimal::zero()); } // We are only resulting in a negative if we have mismatched signs let negative = self.is_sign_negative() ^ other.is_sign_negative(); // We get the scale of the result by adding the operands. This may be too big, however // we'll correct later let mut final_scale = self.scale() + other.scale(); // First of all, if ONLY the lo parts of both numbers is filled // then we can simply do a standard 64 bit calculation. It's a minor // optimization however prevents the need for long form multiplication if self.mid == 0 && self.hi == 0 && other.mid == 0 && other.hi == 0 { // Simply multiplication let mut u64_result = u64_to_array(u64::from(self.lo) * u64::from(other.lo)); // If we're above max precision then this is a very small number if final_scale > MAX_PRECISION { final_scale -= MAX_PRECISION; // If the number is above 19 then this will equate to zero. // This is because the max value in 64 bits is 1.84E19 if final_scale > 19 { return Some(Decimal::zero()); } let mut rem_lo = 0; let mut power; if final_scale > 9 { // Since 10^10 doesn't fit into u32, we divide by 10^10/4 // and multiply the next divisor by 4. rem_lo = div_by_u32(&mut u64_result, 2_500_000_000); power = POWERS_10[final_scale as usize - 10] << 2; } else { power = POWERS_10[final_scale as usize]; } // Divide fits in 32 bits let rem_hi = div_by_u32(&mut u64_result, power); // Round the result. Since the divisor is a power of 10 // we check to see if the remainder is >= 1/2 divisor power >>= 1; if rem_hi >= power && (rem_hi > power || (rem_lo | (u64_result[0] & 0x1)) != 0) { u64_result[0] += 1; } final_scale = MAX_PRECISION; } return Some(Decimal { lo: u64_result[0], mid: u64_result[1], hi: 0, flags: flags(negative, final_scale), }); } // We're using some of the high bits, so we essentially perform // long form multiplication. We compute the 9 partial products // into a 192 bit result array. // // [my-h][my-m][my-l] // x [ot-h][ot-m][ot-l] // -------------------------------------- // 1. [r-hi][r-lo] my-l * ot-l [0, 0] // 2. [r-hi][r-lo] my-l * ot-m [0, 1] // 3. [r-hi][r-lo] my-m * ot-l [1, 0] // 4. [r-hi][r-lo] my-m * ot-m [1, 1] // 5. [r-hi][r-lo] my-l * ot-h [0, 2] // 6. [r-hi][r-lo] my-h * ot-l [2, 0] // 7. [r-hi][r-lo] my-m * ot-h [1, 2] // 8. [r-hi][r-lo] my-h * ot-m [2, 1] // 9.[r-hi][r-lo] my-h * ot-h [2, 2] let my = [self.lo, self.mid, self.hi]; let ot = [other.lo, other.mid, other.hi]; let mut product = [0u32, 0u32, 0u32, 0u32, 0u32, 0u32]; // We can perform a minor short circuit here. If the // high portions are both 0 then we can skip portions 5-9 let to = if my[2] == 0 && ot[2] == 0 { 2 } else { 3 }; for my_index in 0..to { for ot_index in 0..to { let (mut rlo, mut rhi) = mul_part(my[my_index], ot[ot_index], 0); // Get the index for the lo portion of the product for prod in product.iter_mut().skip(my_index + ot_index) { let (res, overflow) = add_part(rlo, *prod); *prod = res; // If we have something in rhi from before then promote that if rhi > 0 { // If we overflowed in the last add, add that with rhi if overflow > 0 { let (nlo, nhi) = add_part(rhi, overflow); rlo = nlo; rhi = nhi; } else { rlo = rhi; rhi = 0; } } else if overflow > 0 { rlo = overflow; rhi = 0; } else { break; } // If nothing to do next round then break out if rlo == 0 { break; } } } } // If our result has used up the high portion of the product // then we either have an overflow or an underflow situation // Overflow will occur if we can't scale it back, whereas underflow // with kick in rounding let mut remainder = 0; while final_scale > 0 && (product[3] != 0 || product[4] != 0 || product[5] != 0) { remainder = div_by_u32(&mut product, 10u32); final_scale -= 1; } // Round up the carry if we need to if remainder >= 5 { for part in product.iter_mut() { if remainder == 0 { break; } let digit: u64 = u64::from(*part) + 1; remainder = if digit > 0xFFFF_FFFF { 1 } else { 0 }; *part = (digit & 0xFFFF_FFFF) as u32; } } // If we're still above max precision then we'll try again to // reduce precision - we may be dealing with a limit of "0" if final_scale > MAX_PRECISION { // We're in an underflow situation // The easiest way to remove precision is to divide off the result while final_scale > MAX_PRECISION && !is_all_zero(&product) { div_by_u32(&mut product, 10); final_scale -= 1; } // If we're still at limit then we can't represent any // siginificant decimal digits and will return an integer only // Can also be invoked while representing 0. if final_scale > MAX_PRECISION { final_scale = 0; } } else if !(product[3] == 0 && product[4] == 0 && product[5] == 0) { // We're in an overflow situation - we're within our precision bounds // but still have bits in overflow return None; } Some(Decimal { lo: product[0], mid: product[1], hi: product[2], flags: flags(negative, final_scale), }) } /// Checked division. Computes `self / other`, returning `None` if `other == 0.0` or the /// division results in overflow. pub fn checked_div(self, other: Decimal) -> Option<Decimal> { match ops::div_impl(&self, &other) { DivResult::Ok(quot) => Some(quot), DivResult::Overflow => None, DivResult::DivByZero => None, } } /// Checked remainder. Computes `self % other`, returning `None` if `other == 0.0`. pub fn checked_rem(self, other: Decimal) -> Option<Decimal> { if other.is_zero() { return None; } if self.is_zero() { return Some(Decimal::zero()); } // Rescale so comparable let initial_scale = self.scale(); let mut quotient = [self.lo, self.mid, self.hi]; let mut quotient_scale = initial_scale; let mut divisor = [other.lo, other.mid, other.hi]; let mut divisor_scale = other.scale(); rescale_to_maximum_scale(&mut quotient, &mut quotient_scale, &mut divisor, &mut divisor_scale); // Working is the remainder + the quotient // We use an aligned array since we'll be using it a lot. let mut working_quotient = [quotient[0], quotient[1], quotient[2], 0u32]; let mut working_remainder = [0u32, 0u32, 0u32, 0u32]; div_internal(&mut working_quotient, &mut working_remainder, &divisor); // Round if necessary. This is for semantic correctness, but could feasibly be removed for // performance improvements. if quotient_scale > initial_scale { let mut working = [ working_remainder[0], working_remainder[1], working_remainder[2], working_remainder[3], ]; while quotient_scale > initial_scale { if div_by_u32(&mut working, 10) > 0 { break; } quotient_scale -= 1; working_remainder.copy_from_slice(&working); } } Some(Decimal { lo: working_remainder[0], mid: working_remainder[1], hi: working_remainder[2], flags: flags(self.is_sign_negative(), quotient_scale), }) } pub fn from_str_radix(str: &str, radix: u32) -> Result<Self, crate::Error> { if radix == 10 { parse_str_radix_10(str) } else { parse_str_radix_n(str, radix) } } } impl Default for Decimal { fn default() -> Self { Self::zero() } } pub(crate) enum DivResult { Ok(Decimal), Overflow, DivByZero, } #[inline] const fn flags(neg: bool, scale: u32) -> u32 { (scale << SCALE_SHIFT) | ((neg as u32) << SIGN_SHIFT) } /// Rescales the given decimals to equivalent scales. /// It will firstly try to scale both the left and the right side to /// the maximum scale of left/right. If it is unable to do that it /// will try to reduce the accuracy of the other argument. /// e.g. with 1.23 and 2.345 it'll rescale the first arg to 1.230 #[inline(always)] fn rescale_to_maximum_scale(left: &mut [u32; 3], left_scale: &mut u32, right: &mut [u32; 3], right_scale: &mut u32) { if left_scale == right_scale { // Nothing to do return; } if is_all_zero(left) { *left_scale = *right_scale; return; } else if is_all_zero(right) { *right_scale = *left_scale; return; } if left_scale > right_scale { rescale_internal(right, right_scale, *left_scale); if right_scale != left_scale { rescale_internal(left, left_scale, *right_scale); } } else { rescale_internal(left, left_scale, *right_scale); if right_scale != left_scale { rescale_internal(right, right_scale, *left_scale); } } } /// Rescales the given decimal to new scale. /// e.g. with 1.23 and new scale 3 rescale the value to 1.230 #[inline(always)] fn rescale_internal(value: &mut [u32; 3], value_scale: &mut u32, new_scale: u32) { if *value_scale == new_scale { // Nothing to do return; } if is_all_zero(value) { *value_scale = new_scale; return; } if *value_scale > new_scale { let mut diff = *value_scale - new_scale; // Scaling further isn't possible since we got an overflow // In this case we need to reduce the accuracy of the "side to keep" // Now do the necessary rounding let mut remainder = 0; while diff > 0 { if is_all_zero(value) { *value_scale = new_scale; return; } diff -= 1; // Any remainder is discarded if diff > 0 still (i.e. lost precision) remainder = div_by_10(value); } if remainder >= 5 { for part in value.iter_mut() { let digit = u64::from(*part) + 1u64; remainder = if digit > 0xFFFF_FFFF { 1 } else { 0 }; *part = (digit & 0xFFFF_FFFF) as u32; if remainder == 0 { break; } } } *value_scale = new_scale; } else { let mut diff = new_scale - *value_scale; let mut working = [value[0], value[1], value[2]]; while diff > 0 && mul_by_10(&mut working) == 0 { value.copy_from_slice(&working); diff -= 1; } *value_scale = new_scale - diff; } } #[inline] const fn u64_to_array(value: u64) -> [u32; 2] { [(value & U32_MASK) as u32, (value >> 32 & U32_MASK) as u32] } fn add_by_internal(value: &mut [u32], by: &[u32]) -> u32 { let mut carry: u64 = 0; let vl = value.len(); let bl = by.len(); if vl >= bl { let mut sum: u64; for i in 0..bl { sum = u64::from(value[i]) + u64::from(by[i]) + carry; value[i] = (sum & U32_MASK) as u32; carry = sum >> 32; } if vl > bl && carry > 0 { for i in value.iter_mut().skip(bl) { sum = u64::from(*i) + carry; *i = (sum & U32_MASK) as u32; carry = sum >> 32; if carry == 0 { break; } } } } else if vl + 1 == bl { // Overflow, by default, is anything in the high portion of by let mut sum: u64; for i in 0..vl { sum = u64::from(value[i]) + u64::from(by[i]) + carry; value[i] = (sum & U32_MASK) as u32; carry = sum >> 32; } if by[vl] > 0 { carry += u64::from(by[vl]); } } else { panic!("Internal error: add using incompatible length arrays. {} <- {}", vl, bl); } carry as u32 } #[inline] fn add_one_internal(value: &mut [u32]) -> u32 { let mut carry: u64 = 1; // Start with one, since adding one let mut sum: u64; for i in value.iter_mut() { sum = (*i as u64) + carry; *i = (sum & U32_MASK) as u32; carry = sum >> 32; } carry as u32 } #[inline] fn add_one_internal4(value: &mut [u32; 4]) -> u32 { let mut carry: u64 = 1; // Start with one, since adding one let mut sum: u64; for i in value.iter_mut() { sum = (*i as u64) + carry; *i = (sum & U32_MASK) as u32; carry = sum >> 32; } carry as u32 } #[inline] fn add_by_internal3(value: &mut [u32; 3], by: &[u32; 3]) -> u32 { let mut carry: u32 = 0; let bl = by.len(); for i in 0..bl { let res1 = value[i].overflowing_add(by[i]); let res2 = res1.0.overflowing_add(carry); value[i] = res2.0; carry = (res1.1 | res2.1) as u32; } carry } #[inline] fn add_part(left: u32, right: u32) -> (u32, u32) { let added = u64::from(left) + u64::from(right); ((added & U32_MASK) as u32, (added >> 32 & U32_MASK) as u32) } #[inline(always)] fn sub_by_internal3(value: &mut [u32; 3], by: &[u32; 3]) { let mut overflow = 0; let vl = value.len(); for i in 0..vl { let part = (0x1_0000_0000u64 + u64::from(value[i])) - (u64::from(by[i]) + overflow); value[i] = part as u32; overflow = 1 - (part >> 32); } } fn sub_by_internal(value: &mut [u32], by: &[u32]) -> u32 { // The way this works is similar to long subtraction // Let's assume we're working with bytes for simpliciy in an example: // 257 - 8 = 249 // 0000_0001 0000_0001 - 0000_0000 0000_1000 = 0000_0000 1111_1001 // We start by doing the first byte... // Overflow = 0 // Left = 0000_0001 (1) // Right = 0000_1000 (8) // Firstly, we make sure the left and right are scaled up to twice the size // Left = 0000_0000 0000_0001 // Right = 0000_0000 0000_1000 // We then subtract right from left // Result = Left - Right = 1111_1111 1111_1001 // We subtract the overflow, which in this case is 0. // Because left < right (1 < 8) we invert the high part. // Lo = 1111_1001 // Hi = 1111_1111 -> 0000_0001 // Lo is the field, hi is the overflow. // We do the same for the second byte... // Overflow = 1 // Left = 0000_0001 // Right = 0000_0000 // Result = Left - Right = 0000_0000 0000_0001 // We subtract the overflow... // Result = 0000_0000 0000_0001 - 1 = 0 // And we invert the high, just because (invert 0 = 0). // So our result is: // 0000_0000 1111_1001 let mut overflow = 0; let vl = value.len(); let bl = by.len(); for i in 0..vl { if i >= bl { break; } let (lo, hi) = sub_part(value[i], by[i], overflow); value[i] = lo; overflow = hi; } overflow } fn sub_part(left: u32, right: u32, overflow: u32) -> (u32, u32) { let part = 0x1_0000_0000u64 + u64::from(left) - (u64::from(right) + u64::from(overflow)); let lo = part as u32; let hi = 1 - ((part >> 32) as u32); (lo, hi) } // Returns overflow #[inline] fn mul_by_10(bits: &mut [u32; 3]) -> u32 { let mut overflow = 0u64; for b in bits.iter_mut() { let result = u64::from(*b) * 10u64 + overflow; let hi = (result >> 32) & U32_MASK; let lo = (result & U32_MASK) as u32; *b = lo; overflow = hi; } overflow as u32 } // Returns overflow pub(crate) fn mul_by_u32(bits: &mut [u32], m: u32) -> u32 { let mut overflow = 0; for b in bits.iter_mut() { let (lo, hi) = mul_part(*b, m, overflow); *b = lo; overflow = hi; } overflow } fn mul_part(left: u32, right: u32, high: u32) -> (u32, u32) { let result = u64::from(left) * u64::from(right) + u64::from(high); let hi = ((result >> 32) & U32_MASK) as u32; let lo = (result & U32_MASK) as u32; (lo, hi) } fn div_internal(quotient: &mut [u32; 4], remainder: &mut [u32; 4], divisor: &[u32; 3]) { // There are a couple of ways to do division on binary numbers: // 1. Using long division // 2. Using the complement method // ref: http://paulmason.me/dividing-binary-numbers-part-2/ // The complement method basically keeps trying to subtract the // divisor until it can't anymore and placing the rest in remainder. let mut complement = [ divisor[0] ^ 0xFFFF_FFFF, divisor[1] ^ 0xFFFF_FFFF, divisor[2] ^ 0xFFFF_FFFF, 0xFFFF_FFFF, ]; // Add one onto the complement add_one_internal4(&mut complement); // Make sure the remainder is 0 remainder.iter_mut().for_each(|x| *x = 0); // If we have nothing in our hi+ block then shift over till we do let mut blocks_to_process = 0; while blocks_to_process < 4 && quotient[3] == 0 { // memcpy would be useful here quotient[3] = quotient[2]; quotient[2] = quotient[1]; quotient[1] = quotient[0]; quotient[0] = 0; // Incremember the counter blocks_to_process += 1; } // Let's try and do the addition... let mut block = blocks_to_process << 5; let mut working = [0u32, 0u32, 0u32, 0u32]; while block < 128 { // << 1 for quotient AND remainder. Moving the carry from the quotient to the bottom of the // remainder. let carry = shl1_internal(quotient, 0); shl1_internal(remainder, carry); // Copy the remainder of working into sub working.copy_from_slice(remainder); // Add the remainder with the complement add_by_internal(&mut working, &complement); // Check for the significant bit - move over to the quotient // as necessary if (working[3] & 0x8000_0000) == 0 { remainder.copy_from_slice(&working); quotient[0] |= 1; } // Increment our pointer block += 1; } } #[cfg(feature = "legacy-ops")] mod ops { use super::*; pub(crate) fn div_impl(d1: &Decimal, d2: &Decimal) -> DivResult { if d2.is_zero() { return DivResult::DivByZero; } if d1.is_zero() { return DivResult::Ok(Decimal::zero()); } let dividend = [d1.lo, d1.mid, d1.hi]; let divisor = [d2.lo, d2.mid, d2.hi]; let mut quotient = [0u32, 0u32, 0u32]; let mut quotient_scale: i32 = d1.scale() as i32 - d2.scale() as i32; // We supply an extra overflow word for each of the dividend and the remainder let mut working_quotient = [dividend[0], dividend[1], dividend[2], 0u32]; let mut working_remainder = [0u32, 0u32, 0u32, 0u32]; let mut working_scale = quotient_scale; let mut remainder_scale = quotient_scale; let mut underflow; loop { div_internal(&mut working_quotient, &mut working_remainder, &divisor); underflow = add_with_scale_internal( &mut quotient, &mut quotient_scale, &mut working_quotient, &mut working_scale, ); // Multiply the remainder by 10 let mut overflow = 0; for part in working_remainder.iter_mut() { let (lo, hi) = mul_part(*part, 10, overflow); *part = lo; overflow = hi; } // Copy temp remainder into the temp quotient section working_quotient.copy_from_slice(&working_remainder); remainder_scale += 1; working_scale = remainder_scale; if underflow || is_all_zero(&working_remainder) { break; } } // If we have a really big number try to adjust the scale to 0 while quotient_scale < 0 { copy_array_diff_lengths(&mut working_quotient, &quotient); working_quotient[3] = 0; working_remainder.iter_mut().for_each(|x| *x = 0); // Mul 10 let mut overflow = 0; for part in &mut working_quotient { let (lo, hi) = mul_part(*part, 10, overflow); *part = lo; overflow = hi; } for part in &mut working_remainder { let (lo, hi) = mul_part(*part, 10, overflow); *part = lo; overflow = hi; } if working_quotient[3] == 0 && is_all_zero(&working_remainder) { quotient_scale += 1; quotient[0] = working_quotient[0]; quotient[1] = working_quotient[1]; quotient[2] = working_quotient[2]; } else { // Overflow return DivResult::Overflow; } } if quotient_scale > 255 { quotient[0] = 0; quotient[1] = 0; quotient[2] = 0; quotient_scale = 0; } let mut quotient_negative = d1.is_sign_negative() ^ d2.is_sign_negative(); // Check for underflow let mut final_scale: u32 = quotient_scale as u32; if final_scale > MAX_PRECISION { let mut remainder = 0; // Division underflowed. We must remove some significant digits over using // an invalid scale. while final_scale > MAX_PRECISION && !is_all_zero(&quotient) { remainder = div_by_u32(&mut quotient, 10); final_scale -= 1; } if final_scale > MAX_PRECISION { // Result underflowed so set to zero final_scale = 0; quotient_negative = false; } else if remainder >= 5 { for part in &mut quotient { if remainder == 0 { break; } let digit: u64 = u64::from(*part) + 1; remainder = if digit > 0xFFFF_FFFF { 1 } else { 0 }; *part = (digit & 0xFFFF_FFFF) as u32; } } } DivResult::Ok(Decimal { lo: quotient[0], mid: quotient[1], hi: quotient[2], flags: flags(quotient_negative, final_scale), }) } #[inline] fn copy_array_diff_lengths(into: &mut [u32], from: &[u32]) { for i in 0..into.len() { if i >= from.len() { break; } into[i] = from[i]; } } fn add_with_scale_internal( quotient: &mut [u32; 3], quotient_scale: &mut i32, working_quotient: &mut [u32; 4], working_scale: &mut i32, ) -> bool { // Add quotient and the working (i.e. quotient = quotient + working) if is_all_zero(quotient) { // Quotient is zero so we can just copy the working quotient in directly // First, make sure they are both 96 bit. while working_quotient[3] != 0 { div_by_u32(working_quotient, 10); *working_scale -= 1; } copy_array_diff_lengths(quotient, working_quotient); *quotient_scale = *working_scale; return false; } if is_all_zero(working_quotient) { return false; } // We have ensured that our working is not zero so we should do the addition // If our two quotients are different then // try to scale down the one with the bigger scale let mut temp3 = [0u32, 0u32, 0u32]; let mut temp4 = [0u32, 0u32, 0u32, 0u32]; if *quotient_scale != *working_scale { // TODO: Remove necessity for temp (without performance impact) fn div_by_10(target: &mut [u32], temp: &mut [u32], scale: &mut i32, target_scale: i32) { // Copy to the temp array temp.copy_from_slice(target); // divide by 10 until target scale is reached while *scale > target_scale { let remainder = div_by_u32(temp, 10); if remainder == 0 { *scale -= 1; target.copy_from_slice(&temp); } else { break; } } } if *quotient_scale < *working_scale { div_by_10(working_quotient, &mut temp4, working_scale, *quotient_scale); } else { div_by_10(quotient, &mut temp3, quotient_scale, *working_scale); } } // If our two quotients are still different then // try to scale up the smaller scale if *quotient_scale != *working_scale { // TODO: Remove necessity for temp (without performance impact) fn mul_by_10(target: &mut [u32], temp: &mut [u32], scale: &mut i32, target_scale: i32) { temp.copy_from_slice(target); let mut overflow = 0; // Multiply by 10 until target scale reached or overflow while *scale < target_scale && overflow == 0 { overflow = mul_by_u32(temp, 10); if overflow == 0 { // Still no overflow *scale += 1; target.copy_from_slice(&temp); } } } if *quotient_scale > *working_scale { mul_by_10(working_quotient, &mut temp4, working_scale, *quotient_scale); } else { mul_by_10(quotient, &mut temp3, quotient_scale, *working_scale); } } // If our two quotients are still different then // try to scale down the one with the bigger scale // (ultimately losing significant digits) if *quotient_scale != *working_scale { // TODO: Remove necessity for temp (without performance impact) fn div_by_10_lossy(target: &mut [u32], temp: &mut [u32], scale: &mut i32, target_scale: i32) { temp.copy_from_slice(target); // divide by 10 until target scale is reached while *scale > target_scale { div_by_u32(temp, 10); *scale -= 1; target.copy_from_slice(&temp); } } if *quotient_scale < *working_scale { div_by_10_lossy(working_quotient, &mut temp4, working_scale, *quotient_scale); } else { div_by_10_lossy(quotient, &mut temp3, quotient_scale, *working_scale); } } // If quotient or working are zero we have an underflow condition if is_all_zero(quotient) || is_all_zero(working_quotient) { // Underflow return true; } else { // Both numbers have the same scale and can be added. // We just need to know whether we can fit them in let mut underflow = false; let mut temp = [0u32, 0u32, 0u32]; while !underflow { temp.copy_from_slice(quotient); // Add the working quotient let overflow = add_by_internal(&mut temp, working_quotient); if overflow == 0 { // addition was successful quotient.copy_from_slice(&temp); break; } else { // addition overflowed - remove significant digits and try again div_by_u32(quotient, 10); *quotient_scale -= 1; div_by_u32(working_quotient, 10); *working_scale -= 1; // Check for underflow underflow = is_all_zero(quotient) || is_all_zero(working_quotient); } } if underflow { return true; } } false } } // This code (in fact, this library) is heavily inspired by the dotnet Decimal number library // implementation. Consequently, a huge thank you for to all the contributors to that project // which has also found it's way into here. #[cfg(not(feature = "legacy-ops"))] mod ops { use super::*; use core::ops::BitXor; // This is a table of the largest values that will not overflow when multiplied // by a given power as represented by the index. static POWER_OVERFLOW_VALUES: [Dec12; 8] = [ Dec12 { hi: 429496729, mid: 2576980377, lo: 2576980377, }, Dec12 { hi: 42949672, mid: 4123168604, lo: 687194767, }, Dec12 { hi: 4294967, mid: 1271310319, lo: 2645699854, }, Dec12 { hi: 429496, mid: 3133608139, lo: 694066715, }, Dec12 { hi: 42949, mid: 2890341191, lo: 2216890319, }, Dec12 { hi: 4294, mid: 4154504685, lo: 2369172679, }, Dec12 { hi: 429, mid: 2133437386, lo: 4102387834, }, Dec12 { hi: 42, mid: 4078814305, lo: 410238783, }, ]; // A structure that is used for faking a union of the decimal type. This allows setting mid/hi // with a u64, for example struct Dec12 { lo: u32, mid: u32, hi: u32, } impl Dec12 { const fn new(value: &Decimal) -> Self { Dec12 { lo: value.lo, mid: value.mid, hi: value.hi, } } // lo + mid combined const fn low64(&self) -> u64 { ((self.mid as u64) << 32) | (self.lo as u64) } fn set_low64(&mut self, value: u64) { self.mid = (value >> 32) as u32; self.lo = value as u32; } // mid + hi combined const fn high64(&self) -> u64 { ((self.hi as u64) << 32) | (self.mid as u64) } fn set_high64(&mut self, value: u64) { self.hi = (value >> 32) as u32; self.mid = value as u32; } // Returns true if successful, else false for an overflow fn add32(&mut self, value: u32) -> Result<(), DivError> { let value = value as u64; let new = self.low64().wrapping_add(value); self.set_low64(new); if new < value { self.hi = self.hi.wrapping_add(1); if self.hi == 0 { return Err(DivError::Overflow); } } Ok(()) } // Divide a Decimal union by a 32 bit divisor. // Self is overwritten with the quotient. // Return value is a 32 bit remainder. fn div32(&mut self, divisor: u32) -> u32 { let divisor64 = divisor as u64; // See if we can get by using a simple u64 division if self.hi != 0 { let mut temp = self.high64(); let q64 = temp / divisor64; self.set_high64(q64); // Calculate the "remainder" temp = ((temp - q64 * divisor64) << 32) | (self.lo as u64); if temp == 0 { return 0; } let q32 = (temp / divisor64) as u32; self.lo = q32; ((temp as u32).wrapping_sub(q32.wrapping_mul(divisor))) as u32 } else { // Super easy divisor let low64 = self.low64(); if low64 == 0 { // Nothing to do return 0; } // Do the calc let quotient = low64 / divisor64; self.set_low64(quotient); // Remainder is the leftover that wasn't used (low64.wrapping_sub(quotient.wrapping_mul(divisor64))) as u32 } } // Divide the number by a power constant // Returns true if division was successful fn div32_const(&mut self, pow: u32) -> bool { let pow64 = pow as u64; let high64 = self.high64(); let lo = self.lo as u64; let div64: u64 = high64 / pow64; let div = ((((high64 - div64 * pow64) << 32) + lo) / pow64) as u32; if self.lo == div.wrapping_mul(pow) { self.set_high64(div64); self.lo = div; true } else { false } } } // A structure that is used for faking a union of the decimal type with an overflow word. struct Dec16 { lo: u32, mid: u32, hi: u32, overflow: u32, } impl Dec16 { const fn zero() -> Self { Dec16 { lo: 0, mid: 0, hi: 0, overflow: 0, } } // lo + mid combined const fn low64(&self) -> u64 { ((self.mid as u64) << 32) | (self.lo as u64) } fn set_low64(&mut self, value: u64) { self.mid = (value >> 32) as u32; self.lo = value as u32; } // Equivalent to Dec12 high64 (i.e. mid + hi) const fn mid64(&self) -> u64 { ((self.hi as u64) << 32) | (self.mid as u64) } fn set_mid64(&mut self, value: u64) { self.hi = (value >> 32) as u32; self.mid = value as u32; } // hi + overflow combined const fn high64(&self) -> u64 { ((self.overflow as u64) << 32) | (self.hi as u64) } fn set_high64(&mut self, value: u64) { self.overflow = (value >> 32) as u32; self.hi = value as u32; } // Does a partial divide with a 64 bit divisor. The divisor in this case must require 64 bits // otherwise various assumptions fail (e.g. 32 bit quotient). // To assist, the upper 64 bits must be greater than the divisor for this to succeed. // Consequently, it will return the quotient as a 32 bit number and overwrite self with the // 64 bit remainder. fn partial_divide_64(&mut self, divisor: u64) -> u32 { // We make this assertion here, however below we pivot based on the data debug_assert!(divisor > self.mid64()); // If we have an empty high bit, then divisor must be greater than the dividend due to // the assumption that the divisor REQUIRES 64 bits. if self.hi == 0 { let low64 = self.low64(); if low64 < divisor { // We can't divide at at all so result is 0. The dividend remains untouched since // the full amount is the remainder. return 0; } let quotient = low64 / divisor; self.set_low64(low64 - (quotient * divisor)); return quotient as u32; } // Do a simple check to see if the hi portion of the dividend is greater than the hi // portion of the divisor. let divisor_hi32 = (divisor >> 32) as u32; if self.hi >= divisor_hi32 { // We know that the divisor goes into this at MOST u32::max times. // So we kick things off, with that assumption let mut low64 = self.low64(); low64 = low64 - (divisor << 32) + divisor; let mut quotient = u32::max_value(); // If we went negative then keep adding it back in loop { if low64 < divisor { break; } quotient -= 1; low64 += divisor; } self.set_low64(low64); return quotient; } let mid64 = self.mid64(); let divisor_hi32_64 = divisor_hi32 as u64; if mid64 < divisor_hi32_64 as u64 { // similar situation as above where we've got nothing left to divide return 0; } let mut quotient = mid64 / divisor_hi32_64; let mut remainder = self.lo as u64 | ((mid64 - quotient * divisor_hi32_64) << 32); // Do quotient * lo divisor let product = quotient * (divisor & 0xFFFF_FFFF); remainder = remainder.wrapping_sub(product); // Check if we've gone negative. If so, add it back if remainder > product.bitxor(u64::max_value()) { loop { quotient = quotient.wrapping_sub(1); remainder = remainder.wrapping_add(divisor); if remainder < divisor { break; } } } self.set_low64(remainder); quotient as u32 } // Does a partial divide with a 96 bit divisor. The divisor in this case must require 96 bits // otherwise various assumptions fail (e.g. 32 bit quotient). fn partial_divide_96(&mut self, divisor: &Dec12) -> u32 { let dividend = self.high64(); let divisor_hi = divisor.hi; if dividend < divisor_hi as u64 { // Dividend is too small - entire number is remainder return 0; } let mut quo = (dividend / divisor_hi as u64) as u32; let mut remainder = (dividend as u32).wrapping_sub(quo.wrapping_mul(divisor_hi)); // Compute full remainder let mut prod1 = quo as u64 * divisor.lo as u64; let mut prod2 = quo as u64 * divisor.mid as u64; prod2 += prod1 >> 32; prod1 = (prod1 & 0xFFFF_FFFF) | (prod2 << 32); prod2 >>= 32; let mut num = self.low64(); num = num.wrapping_sub(prod1); remainder = remainder.wrapping_sub(prod2 as u32); // If there are carries make sure they are propogated if num > prod1.bitxor(u64::max_value()) { remainder = remainder.wrapping_sub(1); if remainder < (prod2 as u32).bitxor(u32::max_value()) { self.set_low64(num); self.hi = remainder; return quo; } } else if remainder <= (prod2 as u32).bitxor(u32::max_value()) { self.set_low64(num); self.hi = remainder; return quo; } // Remainder went negative, add divisor back until it's positive prod1 = divisor.low64(); loop { quo = quo.wrapping_sub(1); num = num.wrapping_add(prod1); remainder = remainder.wrapping_add(divisor_hi); if num < prod1 { // Detected carry. let tmp = remainder; remainder += 1; if tmp < divisor_hi { break; } } if remainder < divisor_hi { break; // detected carry } } self.set_low64(num); self.hi = remainder; quo } } enum DivError { Overflow, } pub(crate) fn div_impl(dividend: &Decimal, divisor: &Decimal) -> DivResult { if divisor.is_zero() { return DivResult::DivByZero; } if dividend.is_zero() { return DivResult::Ok(Decimal::zero()); } // Pre calculate the scale and the sign let mut scale = (dividend.scale() as i32) - (divisor.scale() as i32); let sign_negative = dividend.is_sign_negative() ^ divisor.is_sign_negative(); // Set up some variables for modification throughout let mut require_unscale = false; let mut quotient = Dec12::new(&dividend); let divisor = Dec12::new(&divisor); // Branch depending on the complexity of the divisor if divisor.hi | divisor.mid == 0 { // We have a simple(r) divisor (32 bit) let divisor32 = divisor.lo; // Remainder can only be 32 bits since the divisor is 32 bits. let mut remainder = quotient.div32(divisor32); let mut power_scale = 0; // Figure out how to apply the remainder (i.e. we may have performed something like 10/3 or 8/5) loop { // Remainder is 0 so we have a simple situation if remainder == 0 { // If the scale is positive then we're actually done if scale >= 0 { break; } power_scale = 9usize.min((-scale) as usize); } else { // We may need to normalize later, so set the flag appropriately require_unscale = true; // We have a remainder so we effectively want to try to adjust the quotient and add // the remainder into the quotient. We do this below, however first of all we want // to try to avoid overflowing so we do that check first. let will_overflow = if scale == MAX_PRECISION_I32 { true } else { // Figure out how much we can scale by if let Ok(s) = find_scale(&quotient, scale) { power_scale = s; } else { return DivResult::Overflow; } // If it comes back as 0 (i.e. 10^0 = 1) then we're going to overflow since // we're doing nothing. power_scale == 0 }; if will_overflow { // No more scaling can be done, but remainder is non-zero so we round if necessary. let tmp = remainder << 1; let round = if tmp < remainder { // We round if we wrapped around true } else { if tmp >= divisor32 { // If we're greater than the divisor (i.e. underflow) // or if there is a lo bit set, we round tmp > divisor32 || (quotient.lo & 0x1) > 0 } else { false } }; // If we need to round, try to do so. if round { if let Ok(new_scale) = round_up(&mut quotient, scale) { scale = new_scale; } else { // Overflowed return DivResult::Overflow; } } break; } } // Do some scaling let power = POWERS_10[power_scale]; scale += power_scale as i32; // Increase the quotient by the power that was looked up let overflow = increase_scale(&mut quotient, power as u64); if overflow > 0 { return DivResult::Overflow; } let remainder_scaled = (remainder as u64) * (power as u64); let remainder_quotient = (remainder_scaled / (divisor32 as u64)) as u32; remainder = (remainder_scaled - remainder_quotient as u64 * divisor32 as u64) as u32; if let Err(DivError::Overflow) = quotient.add32(remainder_quotient) { if let Ok(adj) = unscale_from_overflow(&mut quotient, scale, remainder != 0) { scale = adj; } else { // Still overflowing return DivResult::Overflow; } break; } } } else { // We have a divisor greater than 32 bits. Both of these share some quick calculation wins // so we'll do those before branching into separate logic. // The win we can do is shifting the bits to the left as much as possible. We do this to both // the dividend and the divisor to ensure the quotient is not changed. // As a simple contrived example: if we have 4 / 2 then we could bit shift all the way to the // left meaning that the lo portion would have nothing inside of it. Of course, shifting these // left one has the same result (8/4) etc. // The advantage is that we may be able to write off lower portions of the number making things // easier. let mut power_scale = if divisor.hi == 0 { divisor.mid.leading_zeros() } else { divisor.hi.leading_zeros() } as usize; let mut remainder = Dec16::zero(); remainder.set_low64(quotient.low64() << power_scale); let tmp_high = ((quotient.mid as u64) + ((quotient.hi as u64) << 32)) >> (32 - power_scale); remainder.set_high64(tmp_high); // Work out the divisor after it's shifted let divisor64 = divisor.low64() << power_scale; // Check if the divisor is 64 bit or the full 96 bits if divisor.hi == 0 { // It's 64 bits quotient.hi = 0; // Calc mid/lo by shifting accordingly let rem_lo = remainder.lo; remainder.lo = remainder.mid; remainder.mid = remainder.hi; remainder.hi = remainder.overflow; quotient.mid = remainder.partial_divide_64(divisor64); remainder.hi = remainder.mid; remainder.mid = remainder.lo; remainder.lo = rem_lo; quotient.lo = remainder.partial_divide_64(divisor64); loop { let rem_low64 = remainder.low64(); if rem_low64 == 0 { // If the scale is positive then we're actually done if scale >= 0 { break; } power_scale = 9usize.min((-scale) as usize); } else { // We may need to normalize later, so set the flag appropriately require_unscale = true; // We have a remainder so we effectively want to try to adjust the quotient and add // the remainder into the quotient. We do this below, however first of all we want // to try to avoid overflowing so we do that check first. let will_overflow = if scale == MAX_PRECISION_I32 { true } else { // Figure out how much we can scale by if let Ok(s) = find_scale(&quotient, scale) { power_scale = s; } else { return DivResult::Overflow; } // If it comes back as 0 (i.e. 10^0 = 1) then we're going to overflow since // we're doing nothing. power_scale == 0 }; if will_overflow { // No more scaling can be done, but remainder is non-zero so we round if necessary. let mut tmp = remainder.low64(); let round = if (tmp as i64) < 0 { // We round if we wrapped around true } else { tmp <<= 1; if tmp > divisor64 { true } else { tmp == divisor64 && quotient.lo & 0x1 != 0 } }; // If we need to round, try to do so. if round { if let Ok(new_scale) = round_up(&mut quotient, scale) { scale = new_scale; } else { // Overflowed return DivResult::Overflow; } } break; } } // Do some scaling let power = POWERS_10[power_scale]; scale += power_scale as i32; // Increase the quotient by the power that was looked up let overflow = increase_scale(&mut quotient, power as u64); if overflow > 0 { return DivResult::Overflow; } increase_scale64(&mut remainder, power as u64); let tmp = remainder.partial_divide_64(divisor64); if let Err(DivError::Overflow) = quotient.add32(tmp) { if let Ok(adj) = unscale_from_overflow(&mut quotient, scale, remainder.low64() != 0) { scale = adj; } else { // Still overflowing return DivResult::Overflow; } break; } } } else { // It's 96 bits // Start by finishing the shift left let divisor_mid = divisor.mid; let divisor_hi = divisor.hi; let mut divisor = divisor; divisor.set_low64(divisor64); divisor.hi = ((divisor_mid as u64 + ((divisor_hi as u64) << 32)) >> (32 - power_scale)) as u32; let quo = remainder.partial_divide_96(&divisor); quotient.set_low64(quo as u64); quotient.hi = 0; loop { let mut rem_low64 = remainder.low64(); if rem_low64 == 0 && remainder.hi == 0 { // If the scale is positive then we're actually done if scale >= 0 { break; } power_scale = 9usize.min((-scale) as usize); } else { // We may need to normalize later, so set the flag appropriately require_unscale = true; // We have a remainder so we effectively want to try to adjust the quotient and add // the remainder into the quotient. We do this below, however first of all we want // to try to avoid overflowing so we do that check first. let will_overflow = if scale == MAX_PRECISION_I32 { true } else { // Figure out how much we can scale by if let Ok(s) = find_scale(&quotient, scale) { power_scale = s; } else { return DivResult::Overflow; } // If it comes back as 0 (i.e. 10^0 = 1) then we're going to overflow since // we're doing nothing. power_scale == 0 }; if will_overflow { // No more scaling can be done, but remainder is non-zero so we round if necessary. let round = if (remainder.hi as i32) < 0 { // We round if we wrapped around true } else { let tmp = remainder.mid >> 31; rem_low64 <<= 1; remainder.set_low64(rem_low64); remainder.hi = (remainder.hi << 1) + tmp; if remainder.hi > divisor.hi { true } else if remainder.hi == divisor.hi { let divisor_low64 = divisor.low64(); if rem_low64 > divisor_low64 { true } else { rem_low64 == divisor_low64 && (quotient.lo & 1) != 0 } } else { false } }; // If we need to round, try to do so. if round { if let Ok(new_scale) = round_up(&mut quotient, scale) { scale = new_scale; } else { // Overflowed return DivResult::Overflow; } } break; } } // Do some scaling let power = POWERS_10[power_scale]; scale += power_scale as i32; // Increase the quotient by the power that was looked up let overflow = increase_scale(&mut quotient, power as u64); if overflow > 0 { return DivResult::Overflow; } let mut tmp_remainder = Dec12 { lo: remainder.lo, mid: remainder.mid, hi: remainder.hi, }; let overflow = increase_scale(&mut tmp_remainder, power as u64); remainder.lo = tmp_remainder.lo; remainder.mid = tmp_remainder.mid; remainder.hi = tmp_remainder.hi; remainder.overflow = overflow; let tmp = remainder.partial_divide_96(&divisor); if let Err(DivError::Overflow) = quotient.add32(tmp) { if let Ok(adj) = unscale_from_overflow(&mut quotient, scale, (remainder.low64() | remainder.high64()) != 0) { scale = adj; } else { // Still overflowing return DivResult::Overflow; } break; } } } } if require_unscale { scale = unscale(&mut quotient, scale); } DivResult::Ok(Decimal { lo: quotient.lo, mid: quotient.mid, hi: quotient.hi, flags: flags(sign_negative, scale as u32), }) } // Multiply num by power (multiple of 10). Power must be 32 bits. // Returns the overflow, if any fn increase_scale(num: &mut Dec12, power: u64) -> u32 { let mut tmp = (num.lo as u64) * power; num.lo = tmp as u32; tmp >>= 32; tmp += (num.mid as u64) * power; num.mid = tmp as u32; tmp >>= 32; tmp += (num.hi as u64) * power; num.hi = tmp as u32; (tmp >> 32) as u32 } // Multiply num by power (multiple of 10). Power must be 32 bits. fn increase_scale64(num: &mut Dec16, power: u64) { let mut tmp = (num.lo as u64) * power; num.lo = tmp as u32; tmp >>= 32; tmp += (num.mid as u64) * power; num.set_mid64(tmp) } // Adjust the number to deal with an overflow. This function follows being scaled up (i.e. multiplied // by 10, so this effectively tries to reverse that by dividing by 10 then feeding in the high bit // to undo the overflow and rounding instead. // Returns the updated scale. fn unscale_from_overflow(num: &mut Dec12, scale: i32, sticky: bool) -> Result<i32, DivError> { let scale = scale - 1; if scale < 0 { return Err(DivError::Overflow); } // This function is called when the hi portion has "overflowed" upon adding one and has wrapped // back around to 0. Consequently, we need to "feed" that back in, but also rescaling down // to reverse out the overflow. const HIGH_BIT: u64 = 0x1_0000_0000; num.hi = (HIGH_BIT / 10) as u32; // Calc the mid let mut tmp = ((HIGH_BIT % 10) << 32) + (num.mid as u64); let mut val = (tmp / 10) as u32; num.mid = val; // Calc the lo using a similar method tmp = ((tmp - (val as u64) * 10) << 32) + (num.lo as u64); val = (tmp / 10) as u32; num.lo = val; // Work out the remainder, and round if we have one (since it doesn't fit) let remainder = (tmp - (val as u64) * 10) as u32; if remainder > 5 || (remainder == 5 && (sticky || num.lo & 0x1 > 0)) { let _ = num.add32(1); } Ok(scale) } // Determine the maximum value of x that ensures that the quotient when scaled up by 10^x // still fits in 96 bits. Ultimately, we want to make scale positive - if we can't then // we're going to overflow. Because x is ultimately used to lookup inside the POWERS array, it // must be a valid value 0 <= x <= 9 fn find_scale(num: &Dec12, scale: i32) -> Result<usize, DivError> { const OVERFLOW_MAX_9_HI: u32 = 4; const OVERFLOW_MAX_8_HI: u32 = 42; const OVERFLOW_MAX_7_HI: u32 = 429; const OVERFLOW_MAX_6_HI: u32 = 4294; const OVERFLOW_MAX_5_HI: u32 = 42949; const OVERFLOW_MAX_4_HI: u32 = 429496; const OVERFLOW_MAX_3_HI: u32 = 4294967; const OVERFLOW_MAX_2_HI: u32 = 42949672; const OVERFLOW_MAX_1_HI: u32 = 429496729; const OVERFLOW_MAX_9_LOW64: u64 = 5441186219426131129; let hi = num.hi; let low64 = num.low64(); let mut x = 0usize; // Quick check to stop us from trying to scale any more. // if hi > OVERFLOW_MAX_1_HI { // If it's less than 0, which it probably is - overflow. We can't do anything. if scale < 0 { return Err(DivError::Overflow); } return Ok(x); } if scale > MAX_PRECISION_I32 - 9 { // We can't scale by 10^9 without exceeding the max scale factor. // Instead, we'll try to scale by the most that we can and see if that works. // This is safe to do due to the check above. e.g. scale > 19 in the above, so it will // evaluate to 9 or less below. x = (MAX_PRECISION_I32 - scale) as usize; if hi < POWER_OVERFLOW_VALUES[x - 1].hi { if x as i32 + scale < 0 { // We still overflow return Err(DivError::Overflow); } return Ok(x); } } else if hi < OVERFLOW_MAX_9_HI || hi == OVERFLOW_MAX_9_HI && low64 <= OVERFLOW_MAX_9_LOW64 { return Ok(9); } // Do a binary search to find a power to scale by that is less than 9 x = if hi > OVERFLOW_MAX_5_HI { if hi > OVERFLOW_MAX_3_HI { if hi > OVERFLOW_MAX_2_HI { 1 } else { 2 } } else { if hi > OVERFLOW_MAX_4_HI { 3 } else { 4 } } } else { if hi > OVERFLOW_MAX_7_HI { if hi > OVERFLOW_MAX_6_HI { 5 } else { 6 } } else { if hi > OVERFLOW_MAX_8_HI { 7 } else { 8 } } }; // Double check what we've found won't overflow. Otherwise, we go one below. if hi == POWER_OVERFLOW_VALUES[x - 1].hi && low64 > POWER_OVERFLOW_VALUES[x - 1].low64() { x -= 1; } // Confirm we've actually resolved things if x as i32 + scale < 0 { Err(DivError::Overflow) } else { Ok(x) } } #[inline] fn round_up(num: &mut Dec12, scale: i32) -> Result<i32, DivError> { let low64 = num.low64().wrapping_add(1); num.set_low64(low64); if low64 != 0 { return Ok(scale); } let hi = num.hi.wrapping_add(1); num.hi = hi; if hi != 0 { return Ok(scale); } unscale_from_overflow(num, scale, true) } fn unscale(num: &mut Dec12, scale: i32) -> i32 { // Since 10 = 2 * 5, there must be a factor of 2 for every power of 10 we can extract. // We use this as a quick test on whether to try a given power. let mut scale = scale; while num.lo == 0 && scale >= 8 && num.div32_const(100000000) { scale -= 8; } if (num.lo & 0xF) == 0 && scale >= 4 && num.div32_const(10000) { scale -= 4; } if (num.lo & 0x3) == 0 && scale >= 2 && num.div32_const(100) { scale -= 2; } if (num.lo & 0x1) == 0 && scale >= 1 && num.div32_const(10) { scale -= 1; } scale } } // Returns remainder pub(crate) fn div_by_u32(bits: &mut [u32], divisor: u32) -> u32 { if divisor == 0 { // Divide by zero panic!("Internal error: divide by zero"); } else if divisor == 1 { // dividend remains unchanged 0 } else { let mut remainder = 0u32; let divisor = u64::from(divisor); for part in bits.iter_mut().rev() { let temp = (u64::from(remainder) << 32) + u64::from(*part); remainder = (temp % divisor) as u32; *part = (temp / divisor) as u32; } remainder } } fn div_by_10(bits: &mut [u32; 3]) -> u32 { let mut remainder = 0u32; let divisor = 10u64; for part in bits.iter_mut().rev() { let temp = (u64::from(remainder) << 32) + u64::from(*part); remainder = (temp % divisor) as u32; *part = (temp / divisor) as u32; } remainder } #[inline] fn shl1_internal(bits: &mut [u32], carry: u32) -> u32 { let mut carry = carry; for part in bits.iter_mut() { let b = *part >> 31; *part = (*part << 1) | carry; carry = b; } carry } #[inline] fn cmp_internal(left: &[u32; 3], right: &[u32; 3]) -> Ordering { let left_hi: u32 = left[2]; let right_hi: u32 = right[2]; let left_lo: u64 = u64::from(left[1]) << 32 | u64::from(left[0]); let right_lo: u64 = u64::from(right[1]) << 32 | u64::from(right[0]); if left_hi < right_hi || (left_hi <= right_hi && left_lo < right_lo) { Ordering::Less } else if left_hi == right_hi && left_lo == right_lo { Ordering::Equal } else { Ordering::Greater } } #[inline] pub(crate) fn is_all_zero(bits: &[u32]) -> bool { bits.iter().all(|b| *b == 0) } macro_rules! impl_from { ($T:ty, $from_ty:path) => { impl core::convert::From<$T> for Decimal { #[inline] fn from(t: $T) -> Self { $from_ty(t).unwrap() } } }; } impl_from!(isize, FromPrimitive::from_isize); impl_from!(i8, FromPrimitive::from_i8); impl_from!(i16, FromPrimitive::from_i16); impl_from!(i32, FromPrimitive::from_i32); impl_from!(i64, FromPrimitive::from_i64); impl_from!(usize, FromPrimitive::from_usize); impl_from!(u8, FromPrimitive::from_u8); impl_from!(u16, FromPrimitive::from_u16); impl_from!(u32, FromPrimitive::from_u32); impl_from!(u64, FromPrimitive::from_u64); impl_from!(i128, FromPrimitive::from_i128); impl_from!(u128, FromPrimitive::from_u128); macro_rules! forward_val_val_binop { (impl $imp:ident for $res:ty, $method:ident) => { impl $imp<$res> for $res { type Output = $res; #[inline] fn $method(self, other: $res) -> $res { (&self).$method(&other) } } }; } macro_rules! forward_ref_val_binop { (impl $imp:ident for $res:ty, $method:ident) => { impl<'a> $imp<$res> for &'a $res { type Output = $res; #[inline] fn $method(self, other: $res) -> $res { self.$method(&other) } } }; } macro_rules! forward_val_ref_binop { (impl $imp:ident for $res:ty, $method:ident) => { impl<'a> $imp<&'a $res> for $res { type Output = $res; #[inline] fn $method(self, other: &$res) -> $res { (&self).$method(other) } } }; } macro_rules! forward_all_binop { (impl $imp:ident for $res:ty, $method:ident) => { forward_val_val_binop!(impl $imp for $res, $method); forward_ref_val_binop!(impl $imp for $res, $method); forward_val_ref_binop!(impl $imp for $res, $method); }; } impl Zero for Decimal { fn zero() -> Decimal { Decimal { flags: 0, hi: 0, lo: 0, mid: 0, } } fn is_zero(&self) -> bool { self.lo.is_zero() && self.mid.is_zero() && self.hi.is_zero() } } impl One for Decimal { fn one() -> Decimal { Decimal { flags: 0, hi: 0, lo: 1, mid: 0, } } } impl Signed for Decimal { fn abs(&self) -> Self { self.abs() } fn abs_sub(&self, other: &Self) -> Self { if self <= other { Decimal::zero() } else { self.abs() } } fn signum(&self) -> Self { if self.is_zero() { Decimal::zero() } else { let mut value = Decimal::one(); if self.is_sign_negative() { value.set_sign_negative(true); } value } } fn is_positive(&self) -> bool { self.is_sign_positive() } fn is_negative(&self) -> bool { self.is_sign_negative() } } // dedicated implementation for the most common case. fn parse_str_radix_10(str: &str) -> Result<Decimal, crate::Error> { if str.is_empty() { return Err(Error::new("Invalid decimal: empty")); } let mut offset = 0; let mut len = str.len(); let bytes = str.as_bytes(); let mut negative = false; // assume positive // handle the sign if bytes[offset] == b'-' { negative = true; // leading minus means negative offset += 1; len -= 1; } else if bytes[offset] == b'+' { // leading + allowed offset += 1; len -= 1; } // should now be at numeric part of the significand let mut digits_before_dot: i32 = -1; // digits before '.', -1 if no '.' let mut coeff = ArrayVec::<[_; MAX_STR_BUFFER_SIZE]>::new(); // integer significand array let mut maybe_round = false; while len > 0 { let b = bytes[offset]; match b { b'0'..=b'9' => { coeff.push(u32::from(b - b'0')); offset += 1; len -= 1; // If the coefficient is longer than the max, exit early if coeff.len() as u32 > 28 { maybe_round = true; break; } } b'.' => { if digits_before_dot >= 0 { return Err(Error::new("Invalid decimal: two decimal points")); } digits_before_dot = coeff.len() as i32; offset += 1; len -= 1; } b'_' => { // Must start with a number... if coeff.is_empty() { return Err(Error::new("Invalid decimal: must start lead with a number")); } offset += 1; len -= 1; } _ => return Err(Error::new("Invalid decimal: unknown character")), } } // If we exited before the end of the string then do some rounding if necessary if maybe_round && offset < bytes.len() { let next_byte = bytes[offset]; let digit = match next_byte { b'0'..=b'9' => u32::from(next_byte - b'0'), b'_' => 0, b'.' => { // Still an error if we have a second dp if digits_before_dot >= 0 { return Err(Error::new("Invalid decimal: two decimal points")); } 0 } _ => return Err(Error::new("Invalid decimal: unknown character")), }; // Round at midpoint if digit >= 5 { let mut index = coeff.len() - 1; loop { let new_digit = coeff[index] + 1; if new_digit <= 9 { coeff[index] = new_digit; break; } else { coeff[index] = 0; if index == 0 { coeff.insert(0, 1u32); digits_before_dot += 1; coeff.pop(); break; } } index -= 1; } } } // here when no characters left if coeff.is_empty() { return Err(Error::new("Invalid decimal: no digits found")); } let mut scale = if digits_before_dot >= 0 { // we had a decimal place so set the scale (coeff.len() as u32) - (digits_before_dot as u32) } else { 0 }; let mut data = [0u32, 0u32, 0u32]; let mut tmp = [0u32, 0u32, 0u32]; let len = coeff.len(); for (i, digit) in coeff.iter().enumerate() { // If the data is going to overflow then we should go into recovery mode tmp[0] = data[0]; tmp[1] = data[1]; tmp[2] = data[2]; let overflow = mul_by_10(&mut tmp); if overflow > 0 { // This means that we have more data to process, that we're not sure what to do with. // This may or may not be an issue - depending on whether we're past a decimal point // or not. if (i as i32) < digits_before_dot && i + 1 < len { return Err(Error::new("Invalid decimal: overflow from too many digits")); } if *digit >= 5 { let carry = add_one_internal(&mut data); if carry > 0 { // Highly unlikely scenario which is more indicative of a bug return Err(Error::new("Invalid decimal: overflow when rounding")); } } // We're also one less digit so reduce the scale let diff = (len - i) as u32; if diff > scale { return Err(Error::new("Invalid decimal: overflow from scale mismatch")); } scale -= diff; break; } else { data[0] = tmp[0]; data[1] = tmp[1]; data[2] = tmp[2]; let carry = add_by_internal(&mut data, &[*digit]); if carry > 0 { // Highly unlikely scenario which is more indicative of a bug return Err(Error::new("Invalid decimal: overflow from carry")); } } } Ok(Decimal { lo: data[0], mid: data[1], hi: data[2], flags: flags(negative, scale), }) } pub fn parse_str_radix_n(str: &str, radix: u32) -> Result<Decimal, crate::Error> { if str.is_empty() { return Err(Error::new("Invalid decimal: empty")); } if radix < 2 { return Err(Error::new("Unsupported radix < 2")); } if radix > 36 { // As per trait documentation return Err(Error::new("Unsupported radix > 36")); } let mut offset = 0; let mut len = str.len(); let bytes = str.as_bytes(); let mut negative = false; // assume positive // handle the sign if bytes[offset] == b'-' { negative = true; // leading minus means negative offset += 1; len -= 1; } else if bytes[offset] == b'+' { // leading + allowed offset += 1; len -= 1; } // should now be at numeric part of the significand let mut digits_before_dot: i32 = -1; // digits before '.', -1 if no '.' let mut coeff = ArrayVec::<[_; 96]>::new(); // integer significand array // Supporting different radix let (max_n, max_alpha_lower, max_alpha_upper) = if radix <= 10 { (b'0' + (radix - 1) as u8, 0, 0) } else { let adj = (radix - 11) as u8; (b'9', adj + b'a', adj + b'A') }; // Estimate the max precision. All in all, it needs to fit into 96 bits. // Rather than try to estimate, I've included the constants directly in here. We could, // perhaps, replace this with a formula if it's faster - though it does appear to be log2. let estimated_max_precision = match radix { 2 => 96, 3 => 61, 4 => 48, 5 => 42, 6 => 38, 7 => 35, 8 => 32, 9 => 31, 10 => 28, 11 => 28, 12 => 27, 13 => 26, 14 => 26, 15 => 25, 16 => 24, 17 => 24, 18 => 24, 19 => 23, 20 => 23, 21 => 22, 22 => 22, 23 => 22, 24 => 21, 25 => 21, 26 => 21, 27 => 21, 28 => 20, 29 => 20, 30 => 20, 31 => 20, 32 => 20, 33 => 20, 34 => 19, 35 => 19, 36 => 19, _ => return Err(Error::new("Unsupported radix")), }; let mut maybe_round = false; while len > 0 { let b = bytes[offset]; match b { b'0'..=b'9' => { if b > max_n { return Err(Error::new("Invalid decimal: invalid character")); } coeff.push(u32::from(b - b'0')); offset += 1; len -= 1; // If the coefficient is longer than the max, exit early if coeff.len() as u32 > estimated_max_precision { maybe_round = true; break; } } b'a'..=b'z' => { if b > max_alpha_lower { return Err(Error::new("Invalid decimal: invalid character")); } coeff.push(u32::from(b - b'a') + 10); offset += 1; len -= 1; if coeff.len() as u32 > estimated_max_precision { maybe_round = true; break; } } b'A'..=b'Z' => { if b > max_alpha_upper { return Err(Error::new("Invalid decimal: invalid character")); } coeff.push(u32::from(b - b'A') + 10); offset += 1; len -= 1; if coeff.len() as u32 > estimated_max_precision { maybe_round = true; break; } } b'.' => { if digits_before_dot >= 0 { return Err(Error::new("Invalid decimal: two decimal points")); } digits_before_dot = coeff.len() as i32; offset += 1; len -= 1; } b'_' => { // Must start with a number... if coeff.is_empty() { return Err(Error::new("Invalid decimal: must start lead with a number")); } offset += 1; len -= 1; } _ => return Err(Error::new("Invalid decimal: unknown character")), } } // If we exited before the end of the string then do some rounding if necessary if maybe_round && offset < bytes.len() { let next_byte = bytes[offset]; let digit = match next_byte { b'0'..=b'9' => { if next_byte > max_n { return Err(Error::new("Invalid decimal: invalid character")); } u32::from(next_byte - b'0') } b'a'..=b'z' => { if next_byte > max_alpha_lower { return Err(Error::new("Invalid decimal: invalid character")); } u32::from(next_byte - b'a') + 10 } b'A'..=b'Z' => { if next_byte > max_alpha_upper { return Err(Error::new("Invalid decimal: invalid character")); } u32::from(next_byte - b'A') + 10 } b'_' => 0, b'.' => { // Still an error if we have a second dp if digits_before_dot >= 0 { return Err(Error::new("Invalid decimal: two decimal points")); } 0 } _ => return Err(Error::new("Invalid decimal: unknown character")), }; // Round at midpoint let midpoint = if radix & 0x1 == 1 { radix / 2 } else { (radix + 1) / 2 }; if digit >= midpoint { let mut index = coeff.len() - 1; loop { let new_digit = coeff[index] + 1; if new_digit <= 9 { coeff[index] = new_digit; break; } else { coeff[index] = 0; if index == 0 { coeff.insert(0, 1u32); digits_before_dot += 1; coeff.pop(); break; } } index -= 1; } } } // here when no characters left if coeff.is_empty() { return Err(Error::new("Invalid decimal: no digits found")); } let mut scale = if digits_before_dot >= 0 { // we had a decimal place so set the scale (coeff.len() as u32) - (digits_before_dot as u32) } else { 0 }; // Parse this using specified radix let mut data = [0u32, 0u32, 0u32]; let mut tmp = [0u32, 0u32, 0u32]; let len = coeff.len(); for (i, digit) in coeff.iter().enumerate() { // If the data is going to overflow then we should go into recovery mode tmp[0] = data[0]; tmp[1] = data[1]; tmp[2] = data[2]; let overflow = mul_by_u32(&mut tmp, radix); if overflow > 0 { // This means that we have more data to process, that we're not sure what to do with. // This may or may not be an issue - depending on whether we're past a decimal point // or not. if (i as i32) < digits_before_dot && i + 1 < len { return Err(Error::new("Invalid decimal: overflow from too many digits")); } if *digit >= 5 { let carry = add_one_internal(&mut data); if carry > 0 { // Highly unlikely scenario which is more indicative of a bug return Err(Error::new("Invalid decimal: overflow when rounding")); } } // We're also one less digit so reduce the scale let diff = (len - i) as u32; if diff > scale { return Err(Error::new("Invalid decimal: overflow from scale mismatch")); } scale -= diff; break; } else { data[0] = tmp[0]; data[1] = tmp[1]; data[2] = tmp[2]; let carry = add_by_internal(&mut data, &[*digit]); if carry > 0 { // Highly unlikely scenario which is more indicative of a bug return Err(Error::new("Invalid decimal: overflow from carry")); } } } Ok(Decimal { lo: data[0], mid: data[1], hi: data[2], flags: flags(negative, scale), }) } impl Num for Decimal { type FromStrRadixErr = Error; fn from_str_radix(str: &str, radix: u32) -> Result<Self, Self::FromStrRadixErr> { Decimal::from_str_radix(str, radix) } } impl FromStr for Decimal { type Err = Error; fn from_str(value: &str) -> Result<Decimal, Self::Err> { parse_str_radix_10(value) } } impl FromPrimitive for Decimal { fn from_i32(n: i32) -> Option<Decimal> { let flags: u32; let value_copy: i64; if n >= 0 { flags = 0; value_copy = n as i64; } else { flags = SIGN_MASK; value_copy = -(n as i64); } Some(Decimal { flags, lo: value_copy as u32, mid: 0, hi: 0, }) } fn from_i64(n: i64) -> Option<Decimal> { let flags: u32; let value_copy: i128; if n >= 0 { flags = 0; value_copy = n as i128; } else { flags = SIGN_MASK; value_copy = -(n as i128); } Some(Decimal { flags, lo: value_copy as u32, mid: (value_copy >> 32) as u32, hi: 0, }) } fn from_i128(n: i128) -> Option<Decimal> { let flags; let unsigned; if n >= 0 { unsigned = n as u128; flags = 0; } else { unsigned = -n as u128; flags = SIGN_MASK; }; // Check if we overflow if unsigned >> 96 != 0 { return None; } Some(Decimal { flags, lo: unsigned as u32, mid: (unsigned >> 32) as u32, hi: (unsigned >> 64) as u32, }) } fn from_u32(n: u32) -> Option<Decimal> { Some(Decimal { flags: 0, lo: n, mid: 0, hi: 0, }) } fn from_u64(n: u64) -> Option<Decimal> { Some(Decimal { flags: 0, lo: n as u32, mid: (n >> 32) as u32, hi: 0, }) } fn from_u128(n: u128) -> Option<Decimal> { // Check if we overflow if n >> 96 != 0 { return None; } Some(Decimal { flags: 0, lo: n as u32, mid: (n >> 32) as u32, hi: (n >> 64) as u32, }) } fn from_f32(n: f32) -> Option<Decimal> { // Handle the case if it is NaN, Infinity or -Infinity if !n.is_finite() { return None; } // It's a shame we can't use a union for this due to it being broken up by bits // i.e. 1/8/23 (sign, exponent, mantissa) // See https://en.wikipedia.org/wiki/IEEE_754-1985 // n = (sign*-1) * 2^exp * mantissa // Decimal of course stores this differently... 10^-exp * significand let raw = n.to_bits(); let positive = (raw >> 31) == 0; let biased_exponent = ((raw >> 23) & 0xFF) as i32; let mantissa = raw & 0x007F_FFFF; // Handle the special zero case if biased_exponent == 0 && mantissa == 0 { let mut zero = Decimal::zero(); if !positive { zero.set_sign_negative(true); } return Some(zero); } // Get the bits and exponent2 let mut exponent2 = biased_exponent - 127; let mut bits = [mantissa, 0u32, 0u32]; if biased_exponent == 0 { // Denormalized number - correct the exponent exponent2 += 1; } else { // Add extra hidden bit to mantissa bits[0] |= 0x0080_0000; } // The act of copying a mantissa as integer bits is equivalent to shifting // left the mantissa 23 bits. The exponent is reduced to compensate. exponent2 -= 23; // Convert to decimal Decimal::base2_to_decimal(&mut bits, exponent2, positive, false) } fn from_f64(n: f64) -> Option<Decimal> { // Handle the case if it is NaN, Infinity or -Infinity if !n.is_finite() { return None; } // It's a shame we can't use a union for this due to it being broken up by bits // i.e. 1/11/52 (sign, exponent, mantissa) // See https://en.wikipedia.org/wiki/IEEE_754-1985 // n = (sign*-1) * 2^exp * mantissa // Decimal of course stores this differently... 10^-exp * significand let raw = n.to_bits(); let positive = (raw >> 63) == 0; let biased_exponent = ((raw >> 52) & 0x7FF) as i32; let mantissa = raw & 0x000F_FFFF_FFFF_FFFF; // Handle the special zero case if biased_exponent == 0 && mantissa == 0 { let mut zero = Decimal::zero(); if !positive { zero.set_sign_negative(true); } return Some(zero); } // Get the bits and exponent2 let mut exponent2 = biased_exponent - 1023; let mut bits = [ (mantissa & 0xFFFF_FFFF) as u32, ((mantissa >> 32) & 0xFFFF_FFFF) as u32, 0u32, ]; if biased_exponent == 0 { // Denormalized number - correct the exponent exponent2 += 1; } else { // Add extra hidden bit to mantissa bits[1] |= 0x0010_0000; } // The act of copying a mantissa as integer bits is equivalent to shifting // left the mantissa 52 bits. The exponent is reduced to compensate. exponent2 -= 52; // Convert to decimal Decimal::base2_to_decimal(&mut bits, exponent2, positive, true) } } impl ToPrimitive for Decimal { fn to_i64(&self) -> Option<i64> { let d = self.trunc(); // Quick overflow check if d.hi != 0 || (d.mid & 0x8000_0000) > 0 { // Overflow return None; } let raw: i64 = (i64::from(d.mid) << 32) | i64::from(d.lo); if self.is_sign_negative() { Some(-raw) } else { Some(raw) } } fn to_i128(&self) -> Option<i128> { let d = self.trunc(); let raw: i128 = ((i128::from(d.hi) << 64) | i128::from(d.mid) << 32) | i128::from(d.lo); if self.is_sign_negative() { Some(-raw) } else { Some(raw) } } fn to_u64(&self) -> Option<u64> { if self.is_sign_negative() { return None; } let d = self.trunc(); if d.hi != 0 { // Overflow return None; } Some((u64::from(d.mid) << 32) | u64::from(d.lo)) } fn to_u128(&self) -> Option<u128> { if self.is_sign_negative() { return None; } let d = self.trunc(); Some((u128::from(d.hi) << 64) | (u128::from(d.mid) << 32) | u128::from(d.lo)) } fn to_f64(&self) -> Option<f64> { if self.scale() == 0 { let integer = self.to_i64(); match integer { Some(i) => Some(i as f64), None => None, } } else { let sign: f64 = if self.is_sign_negative() { -1.0 } else { 1.0 }; let mut mantissa: u128 = self.lo.into(); mantissa |= (self.mid as u128) << 32; mantissa |= (self.hi as u128) << 64; // scale is at most 28, so this fits comfortably into a u128. let scale = self.scale(); let precision: u128 = 10_u128.pow(scale); let integral_part = mantissa / precision; let frac_part = mantissa % precision; let frac_f64 = (frac_part as f64) / (precision as f64); let value = sign * ((integral_part as f64) + frac_f64); let round_to = 10f64.powi(self.scale() as i32); Some(value * round_to / round_to) } } } impl core::convert::TryFrom<f32> for Decimal { type Error = crate::Error; fn try_from(value: f32) -> Result<Self, Error> { Self::from_f32(value).ok_or_else(|| Error::new("Failed to convert to Decimal")) } } impl core::convert::TryFrom<f64> for Decimal { type Error = crate::Error; fn try_from(value: f64) -> Result<Self, Error> { Self::from_f64(value).ok_or_else(|| Error::new("Failed to convert to Decimal")) } } impl core::convert::TryFrom<Decimal> for f32 { type Error = crate::Error; fn try_from(value: Decimal) -> Result<Self, Self::Error> { Decimal::to_f32(&value).ok_or_else(|| Error::new("Failed to convert to f32")) } } impl core::convert::TryFrom<Decimal> for f64 { type Error = crate::Error; fn try_from(value: Decimal) -> Result<Self, Self::Error> { Decimal::to_f64(&value).ok_or_else(|| Error::new("Failed to convert to f64")) } } // impl that doesn't allocate for serialization purposes. pub(crate) fn to_str_internal( value: &Decimal, append_sign: bool, precision: Option<usize>, ) -> ArrayString<[u8; MAX_STR_BUFFER_SIZE]> { // Get the scale - where we need to put the decimal point let scale = value.scale() as usize; // Convert to a string and manipulate that (neg at front, inject decimal) let mut chars = ArrayVec::<[_; MAX_STR_BUFFER_SIZE]>::new(); let mut working = [value.lo, value.mid, value.hi]; while !is_all_zero(&working) { let remainder = div_by_u32(&mut working, 10u32); chars.push(char::from(b'0' + remainder as u8)); } while scale > chars.len() { chars.push('0'); } let prec = match precision { Some(prec) => prec, None => scale, }; let len = chars.len(); let whole_len = len - scale; let mut rep = ArrayString::new(); if append_sign && value.is_sign_negative() { rep.push('-'); } for i in 0..whole_len + prec { if i == len - scale { if i == 0 { rep.push('0'); } rep.push('.'); } if i >= len { rep.push('0'); } else { let c = chars[len - i - 1]; rep.push(c); } } // corner case for when we truncated everything in a low fractional if rep.is_empty() { rep.push('0'); } rep } impl fmt::Display for Decimal { fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { let rep = to_str_internal(self, false, f.precision()); f.pad_integral(self.is_sign_positive(), "", rep.as_str()) } } impl fmt::Debug for Decimal { fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { fmt::Display::fmt(self, f) } } fn fmt_scientific_notation(value: &Decimal, exponent_symbol: &str, f: &mut fmt::Formatter<'_>) -> fmt::Result { #[cfg(not(feature = "std"))] use alloc::string::ToString; // Get the scale - this is the e value. With multiples of 10 this may get bigger. let mut exponent = -(value.scale() as isize); // Convert the integral to a string let mut chars = Vec::new(); let mut working = [value.lo, value.mid, value.hi]; while !is_all_zero(&working) { let remainder = div_by_u32(&mut working, 10u32); chars.push(char::from(b'0' + remainder as u8)); } // First of all, apply scientific notation rules. That is: // 1. If non-zero digit comes first, move decimal point left so that e is a positive integer // 2. If decimal point comes first, move decimal point right until after the first non-zero digit // Since decimal notation naturally lends itself this way, we just need to inject the decimal // point in the right place and adjust the exponent accordingly. let len = chars.len(); let mut rep; if len > 1 { if chars.iter().take(len - 1).all(|c| *c == '0') { // Chomp off the zero's. rep = chars.iter().skip(len - 1).collect::<String>(); } else { chars.insert(len - 1, '.'); rep = chars.iter().rev().collect::<String>(); } exponent += (len - 1) as isize; } else { rep = chars.iter().collect::<String>(); } rep.push_str(exponent_symbol); rep.push_str(&exponent.to_string()); f.pad_integral(value.is_sign_positive(), "", &rep) } impl fmt::LowerExp for Decimal { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fmt_scientific_notation(self, "e", f) } } impl fmt::UpperExp for Decimal { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fmt_scientific_notation(self, "E", f) } } impl Neg for Decimal { type Output = Decimal; fn neg(self) -> Decimal { let mut copy = self; copy.set_sign_negative(self.is_sign_positive()); copy } } impl<'a> Neg for &'a Decimal { type Output = Decimal; fn neg(self) -> Decimal { Decimal { flags: flags(!self.is_sign_negative(), self.scale()), hi: self.hi, lo: self.lo, mid: self.mid, } } } forward_all_binop!(impl Add for Decimal, add); impl<'a, 'b> Add<&'b Decimal> for &'a Decimal { type Output = Decimal; #[inline(always)] fn add(self, other: &Decimal) -> Decimal { match self.checked_add(*other) { Some(sum) => sum, None => panic!("Addition overflowed"), } } } impl AddAssign for Decimal { fn add_assign(&mut self, other: Decimal) { let result = self.add(other); self.lo = result.lo; self.mid = result.mid; self.hi = result.hi; self.flags = result.flags; } } impl<'a> AddAssign<&'a Decimal> for Decimal { fn add_assign(&mut self, other: &'a Decimal) { Decimal::add_assign(self, *other) } } impl<'a> AddAssign<Decimal> for &'a mut Decimal { fn add_assign(&mut self, other: Decimal) { Decimal::add_assign(*self, other) } } impl<'a> AddAssign<&'a Decimal> for &'a mut Decimal { fn add_assign(&mut self, other: &'a Decimal) { Decimal::add_assign(*self, *other) } } forward_all_binop!(impl Sub for Decimal, sub); impl<'a, 'b> Sub<&'b Decimal> for &'a Decimal { type Output = Decimal; #[inline(always)] fn sub(self, other: &Decimal) -> Decimal { match self.checked_sub(*other) { Some(diff) => diff, None => panic!("Subtraction overflowed"), } } } impl SubAssign for Decimal { fn sub_assign(&mut self, other: Decimal) { let result = self.sub(other); self.lo = result.lo; self.mid = result.mid; self.hi = result.hi; self.flags = result.flags; } } impl<'a> SubAssign<&'a Decimal> for Decimal { fn sub_assign(&mut self, other: &'a Decimal) { Decimal::sub_assign(self, *other) } } impl<'a> SubAssign<Decimal> for &'a mut Decimal { fn sub_assign(&mut self, other: Decimal) { Decimal::sub_assign(*self, other) } } impl<'a> SubAssign<&'a Decimal> for &'a mut Decimal { fn sub_assign(&mut self, other: &'a Decimal) { Decimal::sub_assign(*self, *other) } } forward_all_binop!(impl Mul for Decimal, mul); impl<'a, 'b> Mul<&'b Decimal> for &'a Decimal { type Output = Decimal; #[inline] fn mul(self, other: &Decimal) -> Decimal { match self.checked_mul(*other) { Some(prod) => prod, None => panic!("Multiplication overflowed"), } } } impl MulAssign for Decimal { fn mul_assign(&mut self, other: Decimal) { let result = self.mul(other); self.lo = result.lo; self.mid = result.mid; self.hi = result.hi; self.flags = result.flags; } } impl<'a> MulAssign<&'a Decimal> for Decimal { fn mul_assign(&mut self, other: &'a Decimal) { Decimal::mul_assign(self, *other) } } impl<'a> MulAssign<Decimal> for &'a mut Decimal { fn mul_assign(&mut self, other: Decimal) { Decimal::mul_assign(*self, other) } } impl<'a> MulAssign<&'a Decimal> for &'a mut Decimal { fn mul_assign(&mut self, other: &'a Decimal) { Decimal::mul_assign(*self, *other) } } forward_all_binop!(impl Div for Decimal, div); impl<'a, 'b> Div<&'b Decimal> for &'a Decimal { type Output = Decimal; fn div(self, other: &Decimal) -> Decimal { match ops::div_impl(&self, other) { DivResult::Ok(quot) => quot, DivResult::Overflow => panic!("Division overflowed"), DivResult::DivByZero => panic!("Division by zero"), } } } impl DivAssign for Decimal { fn div_assign(&mut self, other: Decimal) { let result = self.div(other); self.lo = result.lo; self.mid = result.mid; self.hi = result.hi; self.flags = result.flags; } } impl<'a> DivAssign<&'a Decimal> for Decimal { fn div_assign(&mut self, other: &'a Decimal) { Decimal::div_assign(self, *other) } } impl<'a> DivAssign<Decimal> for &'a mut Decimal { fn div_assign(&mut self, other: Decimal) { Decimal::div_assign(*self, other) } } impl<'a> DivAssign<&'a Decimal> for &'a mut Decimal { fn div_assign(&mut self, other: &'a Decimal) { Decimal::div_assign(*self, *other) } } forward_all_binop!(impl Rem for Decimal, rem); impl<'a, 'b> Rem<&'b Decimal> for &'a Decimal { type Output = Decimal; #[inline] fn rem(self, other: &Decimal) -> Decimal { match self.checked_rem(*other) { Some(rem) => rem, None => panic!("Division by zero"), } } } impl RemAssign for Decimal { fn rem_assign(&mut self, other: Decimal) { let result = self.rem(other); self.lo = result.lo; self.mid = result.mid; self.hi = result.hi; self.flags = result.flags; } } impl<'a> RemAssign<&'a Decimal> for Decimal { fn rem_assign(&mut self, other: &'a Decimal) { Decimal::rem_assign(self, *other) } } impl<'a> RemAssign<Decimal> for &'a mut Decimal { fn rem_assign(&mut self, other: Decimal) { Decimal::rem_assign(*self, other) } } impl<'a> RemAssign<&'a Decimal> for &'a mut Decimal { fn rem_assign(&mut self, other: &'a Decimal) { Decimal::rem_assign(*self, *other) } } impl PartialEq for Decimal { #[inline] fn eq(&self, other: &Decimal) -> bool { self.cmp(other) == Equal } } impl Eq for Decimal {} impl Hash for Decimal { fn hash<H: Hasher>(&self, state: &mut H) { let n = self.normalize(); n.lo.hash(state); n.mid.hash(state); n.hi.hash(state); n.flags.hash(state); } } impl PartialOrd for Decimal { #[inline] fn partial_cmp(&self, other: &Decimal) -> Option<Ordering> { Some(self.cmp(other)) } } impl Ord for Decimal { fn cmp(&self, other: &Decimal) -> Ordering { // Quick exit if major differences if self.is_zero() && other.is_zero() { return Ordering::Equal; } let self_negative = self.is_sign_negative(); let other_negative = other.is_sign_negative(); if self_negative && !other_negative { return Ordering::Less; } else if !self_negative && other_negative { return Ordering::Greater; } // If we have 1.23 and 1.2345 then we have // 123 scale 2 and 12345 scale 4 // We need to convert the first to // 12300 scale 4 so we can compare equally let left: &Decimal; let right: &Decimal; if self_negative && other_negative { // Both are negative, so reverse cmp left = other; right = self; } else { left = self; right = other; } let mut left_scale = left.scale(); let mut right_scale = right.scale(); if left_scale == right_scale { // Fast path for same scale if left.hi != right.hi { return left.hi.cmp(&right.hi); } if left.mid != right.mid { return left.mid.cmp(&right.mid); } return left.lo.cmp(&right.lo); } // Rescale and compare let mut left_raw = [left.lo, left.mid, left.hi]; let mut right_raw = [right.lo, right.mid, right.hi]; rescale_to_maximum_scale(&mut left_raw, &mut left_scale, &mut right_raw, &mut right_scale); cmp_internal(&left_raw, &right_raw) } } impl Sum for Decimal { fn sum<I: Iterator<Item = Decimal>>(iter: I) -> Self { let mut sum = Decimal::zero(); for i in iter { sum += i; } sum } } impl<'a> Sum<&'a Decimal> for Decimal { fn sum<I: Iterator<Item = &'a Decimal>>(iter: I) -> Self { let mut sum = Decimal::zero(); for i in iter { sum += i; } sum } } #[cfg(test)] mod test { // Tests on private methods. // // All public tests should go under `tests/`. use super::*; #[test] fn it_can_rescale_to_maximum_scale() { fn extract(value: &str) -> ([u32; 3], u32) { let v = Decimal::from_str(value).unwrap(); ([v.lo, v.mid, v.hi], v.scale()) } let tests = &[ ("1", "1", "1", "1"), ("1", "1.0", "1.0", "1.0"), ("1", "1.00000", "1.00000", "1.00000"), ("1", "1.0000000000", "1.0000000000", "1.0000000000"), ( "1", "1.00000000000000000000", "1.00000000000000000000", "1.00000000000000000000", ), ("1.1", "1.1", "1.1", "1.1"), ("1.1", "1.10000", "1.10000", "1.10000"), ("1.1", "1.1000000000", "1.1000000000", "1.1000000000"), ( "1.1", "1.10000000000000000000", "1.10000000000000000000", "1.10000000000000000000", ), ( "0.6386554621848739495798319328", "11.815126050420168067226890757", "0.638655462184873949579831933", "11.815126050420168067226890757", ), ( "0.0872727272727272727272727272", // Scale 28 "843.65000000", // Scale 8 "0.0872727272727272727272727", // 25 "843.6500000000000000000000000", // 25 ), ]; for &(left_raw, right_raw, expected_left, expected_right) in tests { // Left = the value to rescale // Right = the new scale we're scaling to // Expected = the expected left value after rescale let (expected_left, expected_lscale) = extract(expected_left); let (expected_right, expected_rscale) = extract(expected_right); let (mut left, mut left_scale) = extract(left_raw); let (mut right, mut right_scale) = extract(right_raw); rescale_to_maximum_scale(&mut left, &mut left_scale, &mut right, &mut right_scale); assert_eq!(left, expected_left); assert_eq!(left_scale, expected_lscale); assert_eq!(right, expected_right); assert_eq!(right_scale, expected_rscale); // Also test the transitive case let (mut left, mut left_scale) = extract(left_raw); let (mut right, mut right_scale) = extract(right_raw); rescale_to_maximum_scale(&mut right, &mut right_scale, &mut left, &mut left_scale); assert_eq!(left, expected_left); assert_eq!(left_scale, expected_lscale); assert_eq!(right, expected_right); assert_eq!(right_scale, expected_rscale); } } #[test] fn it_can_rescale_internal() { fn extract(value: &str) -> ([u32; 3], u32) { let v = Decimal::from_str(value).unwrap(); ([v.lo, v.mid, v.hi], v.scale()) } let tests = &[ ("1", 0, "1"), ("1", 1, "1.0"), ("1", 5, "1.00000"), ("1", 10, "1.0000000000"), ("1", 20, "1.00000000000000000000"), ("0.6386554621848739495798319328", 27, "0.638655462184873949579831933"), ( "843.65000000", // Scale 8 25, // 25 "843.6500000000000000000000000", // 25 ), ( "843.65000000", // Scale 8 30, // 30 "843.6500000000000000000000000000", // 28 ), ]; for &(value_raw, new_scale, expected_value) in tests { let (expected_value, _) = extract(expected_value); let (mut value, mut value_scale) = extract(value_raw); rescale_internal(&mut value, &mut value_scale, new_scale); assert_eq!(value, expected_value); } } #[test] fn test_shl1_internal() { struct TestCase { // One thing to be cautious of is that the structure of a number here for shifting left is // the reverse of how you may conceive this mentally. i.e. a[2] contains the higher order // bits: a[2] a[1] a[0] given: [u32; 3], given_carry: u32, expected: [u32; 3], expected_carry: u32, } let tests = [ TestCase { given: [1, 0, 0], given_carry: 0, expected: [2, 0, 0], expected_carry: 0, }, TestCase { given: [1, 0, 2147483648], given_carry: 1, expected: [3, 0, 0], expected_carry: 1, }, ]; for case in &tests { let mut test = [case.given[0], case.given[1], case.given[2]]; let carry = shl1_internal(&mut test, case.given_carry); assert_eq!( test, case.expected, "Bits: {:?} << 1 | {}", case.given, case.given_carry ); assert_eq!( carry, case.expected_carry, "Carry: {:?} << 1 | {}", case.given, case.given_carry ) } } } implement num checked traits use crate::Error; use alloc::{string::String, vec::Vec}; use arrayvec::{ArrayString, ArrayVec}; use core::{ cmp::{Ordering::Equal, *}, fmt, hash::{Hash, Hasher}, iter::Sum, ops::{Add, AddAssign, Div, DivAssign, Mul, MulAssign, Neg, Rem, RemAssign, Sub, SubAssign}, str::FromStr, }; #[cfg(feature = "diesel")] use diesel::sql_types::Numeric; #[allow(unused_imports)] // It's not actually dead code below, but the compiler thinks it is. #[cfg(not(feature = "std"))] use num_traits::float::FloatCore; use num_traits::{ CheckedAdd, CheckedDiv, CheckedMul, CheckedRem, CheckedSub, FromPrimitive, Num, One, Signed, ToPrimitive, Zero, }; // Sign mask for the flags field. A value of zero in this bit indicates a // positive Decimal value, and a value of one in this bit indicates a // negative Decimal value. const SIGN_MASK: u32 = 0x8000_0000; const UNSIGN_MASK: u32 = 0x4FFF_FFFF; // Scale mask for the flags field. This byte in the flags field contains // the power of 10 to divide the Decimal value by. The scale byte must // contain a value between 0 and 28 inclusive. const SCALE_MASK: u32 = 0x00FF_0000; const U8_MASK: u32 = 0x0000_00FF; const U32_MASK: u64 = 0xFFFF_FFFF; // Number of bits scale is shifted by. const SCALE_SHIFT: u32 = 16; // Number of bits sign is shifted by. const SIGN_SHIFT: u32 = 31; // The maximum string buffer size used for serialization purposes. 31 is optimal, however we align // to the byte boundary for simplicity. const MAX_STR_BUFFER_SIZE: usize = 32; // The maximum supported precision pub(crate) const MAX_PRECISION: u32 = 28; #[cfg(not(feature = "legacy-ops"))] const MAX_PRECISION_I32: i32 = 28; // 79,228,162,514,264,337,593,543,950,335 const MAX_I128_REPR: i128 = 0x0000_0000_FFFF_FFFF_FFFF_FFFF_FFFF_FFFF; const MIN: Decimal = Decimal { flags: 2_147_483_648, lo: 4_294_967_295, mid: 4_294_967_295, hi: 4_294_967_295, }; const MAX: Decimal = Decimal { flags: 0, lo: 4_294_967_295, mid: 4_294_967_295, hi: 4_294_967_295, }; // Fast access for 10^n where n is 0-9 const POWERS_10: [u32; 10] = [ 1, 10, 100, 1_000, 10_000, 100_000, 1_000_000, 10_000_000, 100_000_000, 1_000_000_000, ]; // Fast access for 10^n where n is 10-19 #[allow(dead_code)] const BIG_POWERS_10: [u64; 10] = [ 10_000_000_000, 100_000_000_000, 1_000_000_000_000, 10_000_000_000_000, 100_000_000_000_000, 1_000_000_000_000_000, 10_000_000_000_000_000, 100_000_000_000_000_000, 1_000_000_000_000_000_000, 10_000_000_000_000_000_000, ]; /// `UnpackedDecimal` contains unpacked representation of `Decimal` where each component /// of decimal-format stored in it's own field #[derive(Clone, Copy, Debug)] pub struct UnpackedDecimal { pub is_negative: bool, pub scale: u32, pub hi: u32, pub mid: u32, pub lo: u32, } /// `Decimal` represents a 128 bit representation of a fixed-precision decimal number. /// The finite set of values of type `Decimal` are of the form m / 10<sup>e</sup>, /// where m is an integer such that -2<sup>96</sup> < m < 2<sup>96</sup>, and e is an integer /// between 0 and 28 inclusive. #[derive(Clone, Copy)] #[cfg_attr(feature = "diesel", derive(FromSqlRow, AsExpression), sql_type = "Numeric")] pub struct Decimal { // Bits 0-15: unused // Bits 16-23: Contains "e", a value between 0-28 that indicates the scale // Bits 24-30: unused // Bit 31: the sign of the Decimal value, 0 meaning positive and 1 meaning negative. flags: u32, // The lo, mid, hi, and flags fields contain the representation of the // Decimal value as a 96-bit integer. hi: u32, lo: u32, mid: u32, } /// `RoundingStrategy` represents the different strategies that can be used by /// `round_dp_with_strategy`. /// /// `RoundingStrategy::BankersRounding` - Rounds toward the nearest even number, e.g. 6.5 -> 6, 7.5 -> 8 /// `RoundingStrategy::RoundHalfUp` - Rounds up if the value >= 5, otherwise rounds down, e.g. 6.5 -> 7, /// `RoundingStrategy::RoundHalfDown` - Rounds down if the value =< 5, otherwise rounds up, e.g. /// 6.5 -> 6, 6.51 -> 7 /// 1.4999999 -> 1 /// `RoundingStrategy::RoundDown` - Always round down. /// `RoundingStrategy::RoundUp` - Always round up. #[derive(Clone, Copy, PartialEq, Eq)] pub enum RoundingStrategy { BankersRounding, RoundHalfUp, RoundHalfDown, RoundDown, RoundUp, } #[allow(dead_code)] impl Decimal { /// Returns a `Decimal` with a 64 bit `m` representation and corresponding `e` scale. /// /// # Arguments /// /// * `num` - An i64 that represents the `m` portion of the decimal number /// * `scale` - A u32 representing the `e` portion of the decimal number. /// /// # Example /// /// ``` /// use rust_decimal::Decimal; /// /// let pi = Decimal::new(3141, 3); /// assert_eq!(pi.to_string(), "3.141"); /// ``` pub fn new(num: i64, scale: u32) -> Decimal { if scale > MAX_PRECISION { panic!( "Scale exceeds the maximum precision allowed: {} > {}", scale, MAX_PRECISION ); } let flags: u32 = scale << SCALE_SHIFT; if num < 0 { let pos_num = num.wrapping_neg() as u64; return Decimal { flags: flags | SIGN_MASK, hi: 0, lo: (pos_num & U32_MASK) as u32, mid: ((pos_num >> 32) & U32_MASK) as u32, }; } Decimal { flags, hi: 0, lo: (num as u64 & U32_MASK) as u32, mid: ((num as u64 >> 32) & U32_MASK) as u32, } } /// Creates a `Decimal` using a 128 bit signed `m` representation and corresponding `e` scale. /// /// # Arguments /// /// * `num` - An i128 that represents the `m` portion of the decimal number /// * `scale` - A u32 representing the `e` portion of the decimal number. /// /// # Example /// /// ``` /// use rust_decimal::Decimal; /// /// let pi = Decimal::from_i128_with_scale(3141i128, 3); /// assert_eq!(pi.to_string(), "3.141"); /// ``` pub fn from_i128_with_scale(num: i128, scale: u32) -> Decimal { if scale > MAX_PRECISION { panic!( "Scale exceeds the maximum precision allowed: {} > {}", scale, MAX_PRECISION ); } let mut neg = false; let mut wrapped = num; if num > MAX_I128_REPR { panic!("Number exceeds maximum value that can be represented"); } else if num < -MAX_I128_REPR { panic!("Number less than minimum value that can be represented"); } else if num < 0 { neg = true; wrapped = -num; } let flags: u32 = flags(neg, scale); Decimal { flags, lo: (wrapped as u64 & U32_MASK) as u32, mid: ((wrapped as u64 >> 32) & U32_MASK) as u32, hi: ((wrapped as u128 >> 64) as u64 & U32_MASK) as u32, } } /// Returns a `Decimal` using the instances constituent parts. /// /// # Arguments /// /// * `lo` - The low 32 bits of a 96-bit integer. /// * `mid` - The middle 32 bits of a 96-bit integer. /// * `hi` - The high 32 bits of a 96-bit integer. /// * `negative` - `true` to indicate a negative number. /// * `scale` - A power of 10 ranging from 0 to 28. /// /// # Example /// /// ``` /// use rust_decimal::Decimal; /// /// let pi = Decimal::from_parts(1102470952, 185874565, 1703060790, false, 28); /// assert_eq!(pi.to_string(), "3.1415926535897932384626433832"); /// ``` pub const fn from_parts(lo: u32, mid: u32, hi: u32, negative: bool, scale: u32) -> Decimal { Decimal { lo, mid, hi, flags: flags(negative, scale), } } pub(crate) const fn from_parts_raw(lo: u32, mid: u32, hi: u32, flags: u32) -> Decimal { Decimal { lo, mid, hi, flags } } /// Returns a `Result` which if successful contains the `Decimal` constitution of /// the scientific notation provided by `value`. /// /// # Arguments /// /// * `value` - The scientific notation of the `Decimal`. /// /// # Example /// /// ``` /// use rust_decimal::Decimal; /// /// let value = Decimal::from_scientific("9.7e-7").unwrap(); /// assert_eq!(value.to_string(), "0.00000097"); /// ``` pub fn from_scientific(value: &str) -> Result<Decimal, Error> { let err = Error::new("Failed to parse"); let mut split = value.splitn(2, |c| c == 'e' || c == 'E'); let base = split.next().ok_or_else(|| err.clone())?; let exp = split.next().ok_or_else(|| err.clone())?; let mut ret = Decimal::from_str(base)?; let current_scale = ret.scale(); if exp.starts_with('-') { let exp: u32 = exp[1..].parse().map_err(move |_| err)?; ret.set_scale(current_scale + exp)?; } else { let exp: u32 = exp.parse().map_err(move |_| err)?; if exp <= current_scale { ret.set_scale(current_scale - exp)?; } else { ret *= Decimal::from_i64(10_i64.pow(exp)).unwrap(); ret = ret.normalize(); } } Ok(ret) } /// Returns the scale of the decimal number, otherwise known as `e`. /// /// # Example /// /// ``` /// use rust_decimal::Decimal; /// /// let num = Decimal::new(1234, 3); /// assert_eq!(num.scale(), 3u32); /// ``` #[inline] pub const fn scale(&self) -> u32 { ((self.flags & SCALE_MASK) >> SCALE_SHIFT) as u32 } /// An optimized method for changing the sign of a decimal number. /// /// # Arguments /// /// * `positive`: true if the resulting decimal should be positive. /// /// # Example /// /// ``` /// use rust_decimal::Decimal; /// /// let mut one = Decimal::new(1, 0); /// one.set_sign(false); /// assert_eq!(one.to_string(), "-1"); /// ``` #[deprecated(since = "1.4.0", note = "please use `set_sign_positive` instead")] pub fn set_sign(&mut self, positive: bool) { self.set_sign_positive(positive); } /// An optimized method for changing the sign of a decimal number. /// /// # Arguments /// /// * `positive`: true if the resulting decimal should be positive. /// /// # Example /// /// ``` /// use rust_decimal::Decimal; /// /// let mut one = Decimal::new(1, 0); /// one.set_sign_positive(false); /// assert_eq!(one.to_string(), "-1"); /// ``` #[inline(always)] pub fn set_sign_positive(&mut self, positive: bool) { if positive { self.flags &= UNSIGN_MASK; } else { self.flags |= SIGN_MASK; } } /// An optimized method for changing the sign of a decimal number. /// /// # Arguments /// /// * `negative`: true if the resulting decimal should be negative. /// /// # Example /// /// ``` /// use rust_decimal::Decimal; /// /// let mut one = Decimal::new(1, 0); /// one.set_sign_negative(true); /// assert_eq!(one.to_string(), "-1"); /// ``` #[inline(always)] pub fn set_sign_negative(&mut self, negative: bool) { self.set_sign_positive(!negative); } /// An optimized method for changing the scale of a decimal number. /// /// # Arguments /// /// * `scale`: the new scale of the number /// /// # Example /// /// ``` /// use rust_decimal::Decimal; /// /// let mut one = Decimal::new(1, 0); /// one.set_scale(5); /// assert_eq!(one.to_string(), "0.00001"); /// ``` pub fn set_scale(&mut self, scale: u32) -> Result<(), Error> { if scale > MAX_PRECISION { return Err(Error::new("Scale exceeds maximum precision")); } self.flags = (scale << SCALE_SHIFT) | (self.flags & SIGN_MASK); Ok(()) } /// Modifies the `Decimal` to the given scale, attempting to do so without changing the /// underlying number itself. /// /// Note that setting the scale to something less then the current `Decimal`s scale will /// cause the newly created `Decimal` to have some rounding. /// Scales greater than the maximum precision supported by `Decimal` will be automatically /// rounded to `Decimal::MAX_PRECISION`. /// Rounding leverages the half up strategy. /// /// # Arguments /// * `scale`: The scale to use for the new `Decimal` number. /// /// # Example /// /// ``` /// use rust_decimal::Decimal; /// /// let mut number = Decimal::new(1_123, 3); /// number.rescale(6); /// assert_eq!(number, Decimal::new(1_123_000, 6)); /// let mut round = Decimal::new(145, 2); /// round.rescale(1); /// assert_eq!(round, Decimal::new(15, 1)); /// ``` pub fn rescale(&mut self, scale: u32) { let mut array = [self.lo, self.mid, self.hi]; let mut value_scale = self.scale(); rescale_internal(&mut array, &mut value_scale, scale); self.lo = array[0]; self.mid = array[1]; self.hi = array[2]; self.flags = flags(self.is_sign_negative(), value_scale); } /// Returns a serialized version of the decimal number. /// The resulting byte array will have the following representation: /// /// * Bytes 1-4: flags /// * Bytes 5-8: lo portion of `m` /// * Bytes 9-12: mid portion of `m` /// * Bytes 13-16: high portion of `m` pub const fn serialize(&self) -> [u8; 16] { [ (self.flags & U8_MASK) as u8, ((self.flags >> 8) & U8_MASK) as u8, ((self.flags >> 16) & U8_MASK) as u8, ((self.flags >> 24) & U8_MASK) as u8, (self.lo & U8_MASK) as u8, ((self.lo >> 8) & U8_MASK) as u8, ((self.lo >> 16) & U8_MASK) as u8, ((self.lo >> 24) & U8_MASK) as u8, (self.mid & U8_MASK) as u8, ((self.mid >> 8) & U8_MASK) as u8, ((self.mid >> 16) & U8_MASK) as u8, ((self.mid >> 24) & U8_MASK) as u8, (self.hi & U8_MASK) as u8, ((self.hi >> 8) & U8_MASK) as u8, ((self.hi >> 16) & U8_MASK) as u8, ((self.hi >> 24) & U8_MASK) as u8, ] } /// Deserializes the given bytes into a decimal number. /// The deserialized byte representation must be 16 bytes and adhere to the followign convention: /// /// * Bytes 1-4: flags /// * Bytes 5-8: lo portion of `m` /// * Bytes 9-12: mid portion of `m` /// * Bytes 13-16: high portion of `m` pub const fn deserialize(bytes: [u8; 16]) -> Decimal { Decimal { flags: (bytes[0] as u32) | (bytes[1] as u32) << 8 | (bytes[2] as u32) << 16 | (bytes[3] as u32) << 24, lo: (bytes[4] as u32) | (bytes[5] as u32) << 8 | (bytes[6] as u32) << 16 | (bytes[7] as u32) << 24, mid: (bytes[8] as u32) | (bytes[9] as u32) << 8 | (bytes[10] as u32) << 16 | (bytes[11] as u32) << 24, hi: (bytes[12] as u32) | (bytes[13] as u32) << 8 | (bytes[14] as u32) << 16 | (bytes[15] as u32) << 24, } } /// Returns `true` if the decimal is negative. #[deprecated(since = "0.6.3", note = "please use `is_sign_negative` instead")] pub fn is_negative(&self) -> bool { self.is_sign_negative() } /// Returns `true` if the decimal is positive. #[deprecated(since = "0.6.3", note = "please use `is_sign_positive` instead")] pub fn is_positive(&self) -> bool { self.is_sign_positive() } /// Returns `true` if the sign bit of the decimal is negative. #[inline(always)] pub const fn is_sign_negative(&self) -> bool { self.flags & SIGN_MASK > 0 } /// Returns `true` if the sign bit of the decimal is positive. #[inline(always)] pub const fn is_sign_positive(&self) -> bool { self.flags & SIGN_MASK == 0 } /// Returns the minimum possible number that `Decimal` can represent. pub const fn min_value() -> Decimal { MIN } /// Returns the maximum possible number that `Decimal` can represent. pub const fn max_value() -> Decimal { MAX } /// Returns a new `Decimal` integral with no fractional portion. /// This is a true truncation whereby no rounding is performed. /// /// # Example /// /// ``` /// use rust_decimal::Decimal; /// /// let pi = Decimal::new(3141, 3); /// let trunc = Decimal::new(3, 0); /// // note that it returns a decimal /// assert_eq!(pi.trunc(), trunc); /// ``` pub fn trunc(&self) -> Decimal { let mut scale = self.scale(); if scale == 0 { // Nothing to do return *self; } let mut working = [self.lo, self.mid, self.hi]; while scale > 0 { // We're removing precision, so we don't care about overflow if scale < 10 { div_by_u32(&mut working, POWERS_10[scale as usize]); break; } else { div_by_u32(&mut working, POWERS_10[9]); // Only 9 as this array starts with 1 scale -= 9; } } Decimal { lo: working[0], mid: working[1], hi: working[2], flags: flags(self.is_sign_negative(), 0), } } /// Returns a new `Decimal` representing the fractional portion of the number. /// /// # Example /// /// ``` /// use rust_decimal::Decimal; /// /// let pi = Decimal::new(3141, 3); /// let fract = Decimal::new(141, 3); /// // note that it returns a decimal /// assert_eq!(pi.fract(), fract); /// ``` pub fn fract(&self) -> Decimal { // This is essentially the original number minus the integral. // Could possibly be optimized in the future *self - self.trunc() } /// Computes the absolute value of `self`. /// /// # Example /// /// ``` /// use rust_decimal::Decimal; /// /// let num = Decimal::new(-3141, 3); /// assert_eq!(num.abs().to_string(), "3.141"); /// ``` pub fn abs(&self) -> Decimal { let mut me = *self; me.set_sign_positive(true); me } /// Returns the largest integer less than or equal to a number. /// /// # Example /// /// ``` /// use rust_decimal::Decimal; /// /// let num = Decimal::new(3641, 3); /// assert_eq!(num.floor().to_string(), "3"); /// ``` pub fn floor(&self) -> Decimal { let scale = self.scale(); if scale == 0 { // Nothing to do return *self; } // Opportunity for optimization here let floored = self.trunc(); if self.is_sign_negative() && !self.fract().is_zero() { floored - Decimal::one() } else { floored } } /// Returns the smallest integer greater than or equal to a number. /// /// # Example /// /// ``` /// use rust_decimal::Decimal; /// /// let num = Decimal::new(3141, 3); /// assert_eq!(num.ceil().to_string(), "4"); /// let num = Decimal::new(3, 0); /// assert_eq!(num.ceil().to_string(), "3"); /// ``` pub fn ceil(&self) -> Decimal { let scale = self.scale(); if scale == 0 { // Nothing to do return *self; } // Opportunity for optimization here if self.is_sign_positive() && !self.fract().is_zero() { self.trunc() + Decimal::one() } else { self.trunc() } } /// Returns the maximum of the two numbers. /// /// ``` /// use rust_decimal::Decimal; /// /// let x = Decimal::new(1, 0); /// let y = Decimal::new(2, 0); /// assert_eq!(y, x.max(y)); /// ``` pub fn max(self, other: Decimal) -> Decimal { if self < other { other } else { self } } /// Returns the minimum of the two numbers. /// /// ``` /// use rust_decimal::Decimal; /// /// let x = Decimal::new(1, 0); /// let y = Decimal::new(2, 0); /// assert_eq!(x, x.min(y)); /// ``` pub fn min(self, other: Decimal) -> Decimal { if self > other { other } else { self } } /// Strips any trailing zero's from a `Decimal` and converts -0 to 0. /// /// # Example /// /// ``` /// use rust_decimal::Decimal; /// /// let number = Decimal::new(3100, 3); /// // note that it returns a decimal, without the extra scale /// assert_eq!(number.normalize().to_string(), "3.1"); /// ``` pub fn normalize(&self) -> Decimal { if self.is_zero() { // Convert -0, -0.0*, or 0.0* to 0. return Decimal::zero(); } let mut scale = self.scale(); if scale == 0 { // Nothing to do return *self; } let mut result = [self.lo, self.mid, self.hi]; let mut working = [self.lo, self.mid, self.hi]; while scale > 0 { if div_by_u32(&mut working, 10) > 0 { break; } scale -= 1; result.copy_from_slice(&working); } Decimal { lo: result[0], mid: result[1], hi: result[2], flags: flags(self.is_sign_negative(), scale), } } /// Returns a new `Decimal` number with no fractional portion (i.e. an integer). /// Rounding currently follows "Bankers Rounding" rules. e.g. 6.5 -> 6, 7.5 -> 8 /// /// # Example /// /// ``` /// use rust_decimal::Decimal; /// /// // Demonstrating bankers rounding... /// let number_down = Decimal::new(65, 1); /// let number_up = Decimal::new(75, 1); /// assert_eq!(number_down.round().to_string(), "6"); /// assert_eq!(number_up.round().to_string(), "8"); /// ``` pub fn round(&self) -> Decimal { self.round_dp(0) } /// Returns a new `Decimal` number with the specified number of decimal points for fractional /// portion. /// Rounding is performed using the provided [`RoundingStrategy`] /// /// # Arguments /// * `dp`: the number of decimal points to round to. /// * `strategy`: the [`RoundingStrategy`] to use. /// /// # Example /// /// ``` /// use rust_decimal::{Decimal, RoundingStrategy}; /// use core::str::FromStr; /// /// let tax = Decimal::from_str("3.4395").unwrap(); /// assert_eq!(tax.round_dp_with_strategy(2, RoundingStrategy::RoundHalfUp).to_string(), "3.44"); /// ``` pub fn round_dp_with_strategy(&self, dp: u32, strategy: RoundingStrategy) -> Decimal { // Short circuit for zero if self.is_zero() { return Decimal { lo: 0, mid: 0, hi: 0, flags: flags(self.is_sign_negative(), dp), }; } let old_scale = self.scale(); // return early if decimal has a smaller number of fractional places than dp // e.g. 2.51 rounded to 3 decimal places is 2.51 if old_scale <= dp { return *self; } let mut value = [self.lo, self.mid, self.hi]; let mut value_scale = self.scale(); let negative = self.is_sign_negative(); value_scale -= dp; // Rescale to zero so it's easier to work with while value_scale > 0 { if value_scale < 10 { div_by_u32(&mut value, POWERS_10[value_scale as usize]); value_scale = 0; } else { div_by_u32(&mut value, POWERS_10[9]); value_scale -= 9; } } // Do some midpoint rounding checks // We're actually doing two things here. // 1. Figuring out midpoint rounding when we're right on the boundary. e.g. 2.50000 // 2. Figuring out whether to add one or not e.g. 2.51 // For this, we need to figure out the fractional portion that is additional to // the rounded number. e.g. for 0.12345 rounding to 2dp we'd want 345. // We're doing the equivalent of losing precision (e.g. to get 0.12) // then increasing the precision back up to 0.12000 let mut offset = [self.lo, self.mid, self.hi]; let mut diff = old_scale - dp; while diff > 0 { if diff < 10 { div_by_u32(&mut offset, POWERS_10[diff as usize]); break; } else { div_by_u32(&mut offset, POWERS_10[9]); // Only 9 as this array starts with 1 diff -= 9; } } let mut diff = old_scale - dp; while diff > 0 { if diff < 10 { mul_by_u32(&mut offset, POWERS_10[diff as usize]); break; } else { mul_by_u32(&mut offset, POWERS_10[9]); // Only 9 as this array starts with 1 diff -= 9; } } let mut decimal_portion = [self.lo, self.mid, self.hi]; sub_by_internal(&mut decimal_portion, &offset); // If the decimal_portion is zero then we round based on the other data let mut cap = [5, 0, 0]; for _ in 0..(old_scale - dp - 1) { mul_by_u32(&mut cap, 10); } let order = cmp_internal(&decimal_portion, &cap); match strategy { RoundingStrategy::BankersRounding => { match order { Ordering::Equal => { if (value[0] & 1) == 1 { add_one_internal(&mut value); } } Ordering::Greater => { // Doesn't matter about the decimal portion add_one_internal(&mut value); } _ => {} } } RoundingStrategy::RoundHalfDown => { if let Ordering::Greater = order { add_one_internal(&mut value); } } RoundingStrategy::RoundHalfUp => { // when Ordering::Equal, decimal_portion is 0.5 exactly // when Ordering::Greater, decimal_portion is > 0.5 match order { Ordering::Equal => { add_one_internal(&mut value); } Ordering::Greater => { // Doesn't matter about the decimal portion add_one_internal(&mut value); } _ => {} } } RoundingStrategy::RoundUp => { if !is_all_zero(&decimal_portion) { add_one_internal(&mut value); } } RoundingStrategy::RoundDown => (), } Decimal { lo: value[0], mid: value[1], hi: value[2], flags: flags(negative, dp), } } /// Returns a new `Decimal` number with the specified number of decimal points for fractional portion. /// Rounding currently follows "Bankers Rounding" rules. e.g. 6.5 -> 6, 7.5 -> 8 /// /// # Arguments /// * `dp`: the number of decimal points to round to. /// /// # Example /// /// ``` /// use rust_decimal::Decimal; /// use core::str::FromStr; /// /// let pi = Decimal::from_str("3.1415926535897932384626433832").unwrap(); /// assert_eq!(pi.round_dp(2).to_string(), "3.14"); /// ``` pub fn round_dp(&self, dp: u32) -> Decimal { self.round_dp_with_strategy(dp, RoundingStrategy::BankersRounding) } /// Convert `Decimal` to an internal representation of the underlying struct. This is useful /// for debugging the internal state of the object. /// /// # Important Disclaimer /// This is primarily intended for library maintainers. The internal representation of a /// `Decimal` is considered "unstable" for public use. /// /// # Example /// /// ``` /// use rust_decimal::Decimal; /// use core::str::FromStr; /// /// let pi = Decimal::from_str("3.1415926535897932384626433832").unwrap(); /// assert_eq!(format!("{:?}", pi), "3.1415926535897932384626433832"); /// assert_eq!(format!("{:?}", pi.unpack()), "UnpackedDecimal { \ /// is_negative: false, scale: 28, hi: 1703060790, mid: 185874565, lo: 1102470952 \ /// }"); /// ``` pub const fn unpack(&self) -> UnpackedDecimal { UnpackedDecimal { is_negative: self.is_sign_negative(), scale: self.scale(), hi: self.hi, lo: self.lo, mid: self.mid, } } /// Convert `Decimal` to an internal representation of the underlying struct. This is useful /// for debugging the internal state of the object. /// /// # Important Disclaimer /// This is primarily intended for library maintainers. The internal representation of a /// `Decimal` is considered "unstable" for public use. /// /// # Example /// /// ``` /// use rust_decimal::Decimal; /// use core::str::FromStr; /// /// let pi = Decimal::from_str("3.1415926535897932384626433832").unwrap(); /// assert_eq!(format!("{:?}", pi), "3.1415926535897932384626433832"); /// assert_eq!(format!("{:?}", pi.unpack()), "UnpackedDecimal { \ /// is_negative: false, scale: 28, hi: 1703060790, mid: 185874565, lo: 1102470952 \ /// }"); /// ``` #[inline(always)] pub(crate) const fn mantissa_array3(&self) -> [u32; 3] { [self.lo, self.mid, self.hi] } #[inline(always)] pub(crate) const fn mantissa_array4(&self) -> [u32; 4] { [self.lo, self.mid, self.hi, 0] } fn base2_to_decimal(bits: &mut [u32; 3], exponent2: i32, positive: bool, is64: bool) -> Option<Self> { // 2^exponent2 = (10^exponent2)/(5^exponent2) // = (5^-exponent2)*(10^exponent2) let mut exponent5 = -exponent2; let mut exponent10 = exponent2; // Ultimately, we want this for the scale while exponent5 > 0 { // Check to see if the mantissa is divisible by 2 if bits[0] & 0x1 == 0 { exponent10 += 1; exponent5 -= 1; // We can divide by 2 without losing precision let hi_carry = bits[2] & 0x1 == 1; bits[2] >>= 1; let mid_carry = bits[1] & 0x1 == 1; bits[1] = (bits[1] >> 1) | if hi_carry { SIGN_MASK } else { 0 }; bits[0] = (bits[0] >> 1) | if mid_carry { SIGN_MASK } else { 0 }; } else { // The mantissa is NOT divisible by 2. Therefore the mantissa should // be multiplied by 5, unless the multiplication overflows. exponent5 -= 1; let mut temp = [bits[0], bits[1], bits[2]]; if mul_by_u32(&mut temp, 5) == 0 { // Multiplication succeeded without overflow, so copy result back bits[0] = temp[0]; bits[1] = temp[1]; bits[2] = temp[2]; } else { // Multiplication by 5 overflows. The mantissa should be divided // by 2, and therefore will lose significant digits. exponent10 += 1; // Shift right let hi_carry = bits[2] & 0x1 == 1; bits[2] >>= 1; let mid_carry = bits[1] & 0x1 == 1; bits[1] = (bits[1] >> 1) | if hi_carry { SIGN_MASK } else { 0 }; bits[0] = (bits[0] >> 1) | if mid_carry { SIGN_MASK } else { 0 }; } } } // In order to divide the value by 5, it is best to multiply by 2/10. // Therefore, exponent10 is decremented, and the mantissa should be multiplied by 2 while exponent5 < 0 { if bits[2] & SIGN_MASK == 0 { // No far left bit, the mantissa can withstand a shift-left without overflowing exponent10 -= 1; exponent5 += 1; shl1_internal(bits, 0); } else { // The mantissa would overflow if shifted. Therefore it should be // directly divided by 5. This will lose significant digits, unless // by chance the mantissa happens to be divisible by 5. exponent5 += 1; div_by_u32(bits, 5); } } // At this point, the mantissa has assimilated the exponent5, but // exponent10 might not be suitable for assignment. exponent10 must be // in the range [-MAX_PRECISION..0], so the mantissa must be scaled up or // down appropriately. while exponent10 > 0 { // In order to bring exponent10 down to 0, the mantissa should be // multiplied by 10 to compensate. If the exponent10 is too big, this // will cause the mantissa to overflow. if mul_by_u32(bits, 10) == 0 { exponent10 -= 1; } else { // Overflowed - return? return None; } } // In order to bring exponent up to -MAX_PRECISION, the mantissa should // be divided by 10 to compensate. If the exponent10 is too small, this // will cause the mantissa to underflow and become 0. while exponent10 < -(MAX_PRECISION as i32) { let rem10 = div_by_u32(bits, 10); exponent10 += 1; if is_all_zero(bits) { // Underflow, unable to keep dividing exponent10 = 0; } else if rem10 >= 5 { add_one_internal(bits); } } // This step is required in order to remove excess bits of precision from the // end of the bit representation, down to the precision guaranteed by the // floating point number if is64 { // Guaranteed to about 16 dp while exponent10 < 0 && (bits[2] != 0 || (bits[1] & 0xFFF0_0000) != 0) { let rem10 = div_by_u32(bits, 10); exponent10 += 1; if rem10 >= 5 { add_one_internal(bits); } } } else { // Guaranteed to about 7 dp while exponent10 < 0 && (bits[2] != 0 || bits[1] != 0 || (bits[2] == 0 && bits[1] == 0 && (bits[0] & 0xFF00_0000) != 0)) { let rem10 = div_by_u32(bits, 10); exponent10 += 1; if rem10 >= 5 { add_one_internal(bits); } } } // Remove multiples of 10 from the representation while exponent10 < 0 { let mut temp = [bits[0], bits[1], bits[2]]; let remainder = div_by_u32(&mut temp, 10); if remainder == 0 { exponent10 += 1; bits[0] = temp[0]; bits[1] = temp[1]; bits[2] = temp[2]; } else { break; } } Some(Decimal { lo: bits[0], mid: bits[1], hi: bits[2], flags: flags(!positive, -exponent10 as u32), }) } /// Checked addition. Computes `self + other`, returning `None` if overflow occurred. #[inline(always)] pub fn checked_add(self, other: Decimal) -> Option<Decimal> { // Convert to the same scale let mut my = [self.lo, self.mid, self.hi]; let mut my_scale = self.scale(); let mut ot = [other.lo, other.mid, other.hi]; let mut other_scale = other.scale(); rescale_to_maximum_scale(&mut my, &mut my_scale, &mut ot, &mut other_scale); let mut final_scale = my_scale.max(other_scale); // Add the items together let my_negative = self.is_sign_negative(); let other_negative = other.is_sign_negative(); let mut negative = false; let carry; if !(my_negative ^ other_negative) { negative = my_negative; carry = add_by_internal3(&mut my, &ot); } else { let cmp = cmp_internal(&my, &ot); // -x + y // if x > y then it's negative (i.e. -2 + 1) match cmp { Ordering::Less => { negative = other_negative; sub_by_internal3(&mut ot, &my); my[0] = ot[0]; my[1] = ot[1]; my[2] = ot[2]; } Ordering::Greater => { negative = my_negative; sub_by_internal3(&mut my, &ot); } Ordering::Equal => { // -2 + 2 my[0] = 0; my[1] = 0; my[2] = 0; } } carry = 0; } // If we have a carry we underflowed. // We need to lose some significant digits (if possible) if carry > 0 { if final_scale == 0 { return None; } // Copy it over to a temp array for modification let mut temp = [my[0], my[1], my[2], carry]; while final_scale > 0 && temp[3] != 0 { div_by_u32(&mut temp, 10); final_scale -= 1; } // If we still have a carry bit then we overflowed if temp[3] > 0 { return None; } // Copy it back - we're done my[0] = temp[0]; my[1] = temp[1]; my[2] = temp[2]; } Some(Decimal { lo: my[0], mid: my[1], hi: my[2], flags: flags(negative, final_scale), }) } /// Checked subtraction. Computes `self - other`, returning `None` if overflow occurred. #[inline(always)] pub fn checked_sub(self, other: Decimal) -> Option<Decimal> { let negated_other = Decimal { lo: other.lo, mid: other.mid, hi: other.hi, flags: other.flags ^ SIGN_MASK, }; self.checked_add(negated_other) } /// Checked multiplication. Computes `self * other`, returning `None` if overflow occurred. #[inline] pub fn checked_mul(self, other: Decimal) -> Option<Decimal> { // Early exit if either is zero if self.is_zero() || other.is_zero() { return Some(Decimal::zero()); } // We are only resulting in a negative if we have mismatched signs let negative = self.is_sign_negative() ^ other.is_sign_negative(); // We get the scale of the result by adding the operands. This may be too big, however // we'll correct later let mut final_scale = self.scale() + other.scale(); // First of all, if ONLY the lo parts of both numbers is filled // then we can simply do a standard 64 bit calculation. It's a minor // optimization however prevents the need for long form multiplication if self.mid == 0 && self.hi == 0 && other.mid == 0 && other.hi == 0 { // Simply multiplication let mut u64_result = u64_to_array(u64::from(self.lo) * u64::from(other.lo)); // If we're above max precision then this is a very small number if final_scale > MAX_PRECISION { final_scale -= MAX_PRECISION; // If the number is above 19 then this will equate to zero. // This is because the max value in 64 bits is 1.84E19 if final_scale > 19 { return Some(Decimal::zero()); } let mut rem_lo = 0; let mut power; if final_scale > 9 { // Since 10^10 doesn't fit into u32, we divide by 10^10/4 // and multiply the next divisor by 4. rem_lo = div_by_u32(&mut u64_result, 2_500_000_000); power = POWERS_10[final_scale as usize - 10] << 2; } else { power = POWERS_10[final_scale as usize]; } // Divide fits in 32 bits let rem_hi = div_by_u32(&mut u64_result, power); // Round the result. Since the divisor is a power of 10 // we check to see if the remainder is >= 1/2 divisor power >>= 1; if rem_hi >= power && (rem_hi > power || (rem_lo | (u64_result[0] & 0x1)) != 0) { u64_result[0] += 1; } final_scale = MAX_PRECISION; } return Some(Decimal { lo: u64_result[0], mid: u64_result[1], hi: 0, flags: flags(negative, final_scale), }); } // We're using some of the high bits, so we essentially perform // long form multiplication. We compute the 9 partial products // into a 192 bit result array. // // [my-h][my-m][my-l] // x [ot-h][ot-m][ot-l] // -------------------------------------- // 1. [r-hi][r-lo] my-l * ot-l [0, 0] // 2. [r-hi][r-lo] my-l * ot-m [0, 1] // 3. [r-hi][r-lo] my-m * ot-l [1, 0] // 4. [r-hi][r-lo] my-m * ot-m [1, 1] // 5. [r-hi][r-lo] my-l * ot-h [0, 2] // 6. [r-hi][r-lo] my-h * ot-l [2, 0] // 7. [r-hi][r-lo] my-m * ot-h [1, 2] // 8. [r-hi][r-lo] my-h * ot-m [2, 1] // 9.[r-hi][r-lo] my-h * ot-h [2, 2] let my = [self.lo, self.mid, self.hi]; let ot = [other.lo, other.mid, other.hi]; let mut product = [0u32, 0u32, 0u32, 0u32, 0u32, 0u32]; // We can perform a minor short circuit here. If the // high portions are both 0 then we can skip portions 5-9 let to = if my[2] == 0 && ot[2] == 0 { 2 } else { 3 }; for my_index in 0..to { for ot_index in 0..to { let (mut rlo, mut rhi) = mul_part(my[my_index], ot[ot_index], 0); // Get the index for the lo portion of the product for prod in product.iter_mut().skip(my_index + ot_index) { let (res, overflow) = add_part(rlo, *prod); *prod = res; // If we have something in rhi from before then promote that if rhi > 0 { // If we overflowed in the last add, add that with rhi if overflow > 0 { let (nlo, nhi) = add_part(rhi, overflow); rlo = nlo; rhi = nhi; } else { rlo = rhi; rhi = 0; } } else if overflow > 0 { rlo = overflow; rhi = 0; } else { break; } // If nothing to do next round then break out if rlo == 0 { break; } } } } // If our result has used up the high portion of the product // then we either have an overflow or an underflow situation // Overflow will occur if we can't scale it back, whereas underflow // with kick in rounding let mut remainder = 0; while final_scale > 0 && (product[3] != 0 || product[4] != 0 || product[5] != 0) { remainder = div_by_u32(&mut product, 10u32); final_scale -= 1; } // Round up the carry if we need to if remainder >= 5 { for part in product.iter_mut() { if remainder == 0 { break; } let digit: u64 = u64::from(*part) + 1; remainder = if digit > 0xFFFF_FFFF { 1 } else { 0 }; *part = (digit & 0xFFFF_FFFF) as u32; } } // If we're still above max precision then we'll try again to // reduce precision - we may be dealing with a limit of "0" if final_scale > MAX_PRECISION { // We're in an underflow situation // The easiest way to remove precision is to divide off the result while final_scale > MAX_PRECISION && !is_all_zero(&product) { div_by_u32(&mut product, 10); final_scale -= 1; } // If we're still at limit then we can't represent any // siginificant decimal digits and will return an integer only // Can also be invoked while representing 0. if final_scale > MAX_PRECISION { final_scale = 0; } } else if !(product[3] == 0 && product[4] == 0 && product[5] == 0) { // We're in an overflow situation - we're within our precision bounds // but still have bits in overflow return None; } Some(Decimal { lo: product[0], mid: product[1], hi: product[2], flags: flags(negative, final_scale), }) } /// Checked division. Computes `self / other`, returning `None` if `other == 0.0` or the /// division results in overflow. pub fn checked_div(self, other: Decimal) -> Option<Decimal> { match ops::div_impl(&self, &other) { DivResult::Ok(quot) => Some(quot), DivResult::Overflow => None, DivResult::DivByZero => None, } } /// Checked remainder. Computes `self % other`, returning `None` if `other == 0.0`. pub fn checked_rem(self, other: Decimal) -> Option<Decimal> { if other.is_zero() { return None; } if self.is_zero() { return Some(Decimal::zero()); } // Rescale so comparable let initial_scale = self.scale(); let mut quotient = [self.lo, self.mid, self.hi]; let mut quotient_scale = initial_scale; let mut divisor = [other.lo, other.mid, other.hi]; let mut divisor_scale = other.scale(); rescale_to_maximum_scale(&mut quotient, &mut quotient_scale, &mut divisor, &mut divisor_scale); // Working is the remainder + the quotient // We use an aligned array since we'll be using it a lot. let mut working_quotient = [quotient[0], quotient[1], quotient[2], 0u32]; let mut working_remainder = [0u32, 0u32, 0u32, 0u32]; div_internal(&mut working_quotient, &mut working_remainder, &divisor); // Round if necessary. This is for semantic correctness, but could feasibly be removed for // performance improvements. if quotient_scale > initial_scale { let mut working = [ working_remainder[0], working_remainder[1], working_remainder[2], working_remainder[3], ]; while quotient_scale > initial_scale { if div_by_u32(&mut working, 10) > 0 { break; } quotient_scale -= 1; working_remainder.copy_from_slice(&working); } } Some(Decimal { lo: working_remainder[0], mid: working_remainder[1], hi: working_remainder[2], flags: flags(self.is_sign_negative(), quotient_scale), }) } pub fn from_str_radix(str: &str, radix: u32) -> Result<Self, crate::Error> { if radix == 10 { parse_str_radix_10(str) } else { parse_str_radix_n(str, radix) } } } impl Default for Decimal { fn default() -> Self { Self::zero() } } pub(crate) enum DivResult { Ok(Decimal), Overflow, DivByZero, } #[inline] const fn flags(neg: bool, scale: u32) -> u32 { (scale << SCALE_SHIFT) | ((neg as u32) << SIGN_SHIFT) } /// Rescales the given decimals to equivalent scales. /// It will firstly try to scale both the left and the right side to /// the maximum scale of left/right. If it is unable to do that it /// will try to reduce the accuracy of the other argument. /// e.g. with 1.23 and 2.345 it'll rescale the first arg to 1.230 #[inline(always)] fn rescale_to_maximum_scale(left: &mut [u32; 3], left_scale: &mut u32, right: &mut [u32; 3], right_scale: &mut u32) { if left_scale == right_scale { // Nothing to do return; } if is_all_zero(left) { *left_scale = *right_scale; return; } else if is_all_zero(right) { *right_scale = *left_scale; return; } if left_scale > right_scale { rescale_internal(right, right_scale, *left_scale); if right_scale != left_scale { rescale_internal(left, left_scale, *right_scale); } } else { rescale_internal(left, left_scale, *right_scale); if right_scale != left_scale { rescale_internal(right, right_scale, *left_scale); } } } /// Rescales the given decimal to new scale. /// e.g. with 1.23 and new scale 3 rescale the value to 1.230 #[inline(always)] fn rescale_internal(value: &mut [u32; 3], value_scale: &mut u32, new_scale: u32) { if *value_scale == new_scale { // Nothing to do return; } if is_all_zero(value) { *value_scale = new_scale; return; } if *value_scale > new_scale { let mut diff = *value_scale - new_scale; // Scaling further isn't possible since we got an overflow // In this case we need to reduce the accuracy of the "side to keep" // Now do the necessary rounding let mut remainder = 0; while diff > 0 { if is_all_zero(value) { *value_scale = new_scale; return; } diff -= 1; // Any remainder is discarded if diff > 0 still (i.e. lost precision) remainder = div_by_10(value); } if remainder >= 5 { for part in value.iter_mut() { let digit = u64::from(*part) + 1u64; remainder = if digit > 0xFFFF_FFFF { 1 } else { 0 }; *part = (digit & 0xFFFF_FFFF) as u32; if remainder == 0 { break; } } } *value_scale = new_scale; } else { let mut diff = new_scale - *value_scale; let mut working = [value[0], value[1], value[2]]; while diff > 0 && mul_by_10(&mut working) == 0 { value.copy_from_slice(&working); diff -= 1; } *value_scale = new_scale - diff; } } #[inline] const fn u64_to_array(value: u64) -> [u32; 2] { [(value & U32_MASK) as u32, (value >> 32 & U32_MASK) as u32] } fn add_by_internal(value: &mut [u32], by: &[u32]) -> u32 { let mut carry: u64 = 0; let vl = value.len(); let bl = by.len(); if vl >= bl { let mut sum: u64; for i in 0..bl { sum = u64::from(value[i]) + u64::from(by[i]) + carry; value[i] = (sum & U32_MASK) as u32; carry = sum >> 32; } if vl > bl && carry > 0 { for i in value.iter_mut().skip(bl) { sum = u64::from(*i) + carry; *i = (sum & U32_MASK) as u32; carry = sum >> 32; if carry == 0 { break; } } } } else if vl + 1 == bl { // Overflow, by default, is anything in the high portion of by let mut sum: u64; for i in 0..vl { sum = u64::from(value[i]) + u64::from(by[i]) + carry; value[i] = (sum & U32_MASK) as u32; carry = sum >> 32; } if by[vl] > 0 { carry += u64::from(by[vl]); } } else { panic!("Internal error: add using incompatible length arrays. {} <- {}", vl, bl); } carry as u32 } #[inline] fn add_one_internal(value: &mut [u32]) -> u32 { let mut carry: u64 = 1; // Start with one, since adding one let mut sum: u64; for i in value.iter_mut() { sum = (*i as u64) + carry; *i = (sum & U32_MASK) as u32; carry = sum >> 32; } carry as u32 } #[inline] fn add_one_internal4(value: &mut [u32; 4]) -> u32 { let mut carry: u64 = 1; // Start with one, since adding one let mut sum: u64; for i in value.iter_mut() { sum = (*i as u64) + carry; *i = (sum & U32_MASK) as u32; carry = sum >> 32; } carry as u32 } #[inline] fn add_by_internal3(value: &mut [u32; 3], by: &[u32; 3]) -> u32 { let mut carry: u32 = 0; let bl = by.len(); for i in 0..bl { let res1 = value[i].overflowing_add(by[i]); let res2 = res1.0.overflowing_add(carry); value[i] = res2.0; carry = (res1.1 | res2.1) as u32; } carry } #[inline] fn add_part(left: u32, right: u32) -> (u32, u32) { let added = u64::from(left) + u64::from(right); ((added & U32_MASK) as u32, (added >> 32 & U32_MASK) as u32) } #[inline(always)] fn sub_by_internal3(value: &mut [u32; 3], by: &[u32; 3]) { let mut overflow = 0; let vl = value.len(); for i in 0..vl { let part = (0x1_0000_0000u64 + u64::from(value[i])) - (u64::from(by[i]) + overflow); value[i] = part as u32; overflow = 1 - (part >> 32); } } fn sub_by_internal(value: &mut [u32], by: &[u32]) -> u32 { // The way this works is similar to long subtraction // Let's assume we're working with bytes for simpliciy in an example: // 257 - 8 = 249 // 0000_0001 0000_0001 - 0000_0000 0000_1000 = 0000_0000 1111_1001 // We start by doing the first byte... // Overflow = 0 // Left = 0000_0001 (1) // Right = 0000_1000 (8) // Firstly, we make sure the left and right are scaled up to twice the size // Left = 0000_0000 0000_0001 // Right = 0000_0000 0000_1000 // We then subtract right from left // Result = Left - Right = 1111_1111 1111_1001 // We subtract the overflow, which in this case is 0. // Because left < right (1 < 8) we invert the high part. // Lo = 1111_1001 // Hi = 1111_1111 -> 0000_0001 // Lo is the field, hi is the overflow. // We do the same for the second byte... // Overflow = 1 // Left = 0000_0001 // Right = 0000_0000 // Result = Left - Right = 0000_0000 0000_0001 // We subtract the overflow... // Result = 0000_0000 0000_0001 - 1 = 0 // And we invert the high, just because (invert 0 = 0). // So our result is: // 0000_0000 1111_1001 let mut overflow = 0; let vl = value.len(); let bl = by.len(); for i in 0..vl { if i >= bl { break; } let (lo, hi) = sub_part(value[i], by[i], overflow); value[i] = lo; overflow = hi; } overflow } fn sub_part(left: u32, right: u32, overflow: u32) -> (u32, u32) { let part = 0x1_0000_0000u64 + u64::from(left) - (u64::from(right) + u64::from(overflow)); let lo = part as u32; let hi = 1 - ((part >> 32) as u32); (lo, hi) } // Returns overflow #[inline] fn mul_by_10(bits: &mut [u32; 3]) -> u32 { let mut overflow = 0u64; for b in bits.iter_mut() { let result = u64::from(*b) * 10u64 + overflow; let hi = (result >> 32) & U32_MASK; let lo = (result & U32_MASK) as u32; *b = lo; overflow = hi; } overflow as u32 } // Returns overflow pub(crate) fn mul_by_u32(bits: &mut [u32], m: u32) -> u32 { let mut overflow = 0; for b in bits.iter_mut() { let (lo, hi) = mul_part(*b, m, overflow); *b = lo; overflow = hi; } overflow } fn mul_part(left: u32, right: u32, high: u32) -> (u32, u32) { let result = u64::from(left) * u64::from(right) + u64::from(high); let hi = ((result >> 32) & U32_MASK) as u32; let lo = (result & U32_MASK) as u32; (lo, hi) } fn div_internal(quotient: &mut [u32; 4], remainder: &mut [u32; 4], divisor: &[u32; 3]) { // There are a couple of ways to do division on binary numbers: // 1. Using long division // 2. Using the complement method // ref: http://paulmason.me/dividing-binary-numbers-part-2/ // The complement method basically keeps trying to subtract the // divisor until it can't anymore and placing the rest in remainder. let mut complement = [ divisor[0] ^ 0xFFFF_FFFF, divisor[1] ^ 0xFFFF_FFFF, divisor[2] ^ 0xFFFF_FFFF, 0xFFFF_FFFF, ]; // Add one onto the complement add_one_internal4(&mut complement); // Make sure the remainder is 0 remainder.iter_mut().for_each(|x| *x = 0); // If we have nothing in our hi+ block then shift over till we do let mut blocks_to_process = 0; while blocks_to_process < 4 && quotient[3] == 0 { // memcpy would be useful here quotient[3] = quotient[2]; quotient[2] = quotient[1]; quotient[1] = quotient[0]; quotient[0] = 0; // Incremember the counter blocks_to_process += 1; } // Let's try and do the addition... let mut block = blocks_to_process << 5; let mut working = [0u32, 0u32, 0u32, 0u32]; while block < 128 { // << 1 for quotient AND remainder. Moving the carry from the quotient to the bottom of the // remainder. let carry = shl1_internal(quotient, 0); shl1_internal(remainder, carry); // Copy the remainder of working into sub working.copy_from_slice(remainder); // Add the remainder with the complement add_by_internal(&mut working, &complement); // Check for the significant bit - move over to the quotient // as necessary if (working[3] & 0x8000_0000) == 0 { remainder.copy_from_slice(&working); quotient[0] |= 1; } // Increment our pointer block += 1; } } #[cfg(feature = "legacy-ops")] mod ops { use super::*; pub(crate) fn div_impl(d1: &Decimal, d2: &Decimal) -> DivResult { if d2.is_zero() { return DivResult::DivByZero; } if d1.is_zero() { return DivResult::Ok(Decimal::zero()); } let dividend = [d1.lo, d1.mid, d1.hi]; let divisor = [d2.lo, d2.mid, d2.hi]; let mut quotient = [0u32, 0u32, 0u32]; let mut quotient_scale: i32 = d1.scale() as i32 - d2.scale() as i32; // We supply an extra overflow word for each of the dividend and the remainder let mut working_quotient = [dividend[0], dividend[1], dividend[2], 0u32]; let mut working_remainder = [0u32, 0u32, 0u32, 0u32]; let mut working_scale = quotient_scale; let mut remainder_scale = quotient_scale; let mut underflow; loop { div_internal(&mut working_quotient, &mut working_remainder, &divisor); underflow = add_with_scale_internal( &mut quotient, &mut quotient_scale, &mut working_quotient, &mut working_scale, ); // Multiply the remainder by 10 let mut overflow = 0; for part in working_remainder.iter_mut() { let (lo, hi) = mul_part(*part, 10, overflow); *part = lo; overflow = hi; } // Copy temp remainder into the temp quotient section working_quotient.copy_from_slice(&working_remainder); remainder_scale += 1; working_scale = remainder_scale; if underflow || is_all_zero(&working_remainder) { break; } } // If we have a really big number try to adjust the scale to 0 while quotient_scale < 0 { copy_array_diff_lengths(&mut working_quotient, &quotient); working_quotient[3] = 0; working_remainder.iter_mut().for_each(|x| *x = 0); // Mul 10 let mut overflow = 0; for part in &mut working_quotient { let (lo, hi) = mul_part(*part, 10, overflow); *part = lo; overflow = hi; } for part in &mut working_remainder { let (lo, hi) = mul_part(*part, 10, overflow); *part = lo; overflow = hi; } if working_quotient[3] == 0 && is_all_zero(&working_remainder) { quotient_scale += 1; quotient[0] = working_quotient[0]; quotient[1] = working_quotient[1]; quotient[2] = working_quotient[2]; } else { // Overflow return DivResult::Overflow; } } if quotient_scale > 255 { quotient[0] = 0; quotient[1] = 0; quotient[2] = 0; quotient_scale = 0; } let mut quotient_negative = d1.is_sign_negative() ^ d2.is_sign_negative(); // Check for underflow let mut final_scale: u32 = quotient_scale as u32; if final_scale > MAX_PRECISION { let mut remainder = 0; // Division underflowed. We must remove some significant digits over using // an invalid scale. while final_scale > MAX_PRECISION && !is_all_zero(&quotient) { remainder = div_by_u32(&mut quotient, 10); final_scale -= 1; } if final_scale > MAX_PRECISION { // Result underflowed so set to zero final_scale = 0; quotient_negative = false; } else if remainder >= 5 { for part in &mut quotient { if remainder == 0 { break; } let digit: u64 = u64::from(*part) + 1; remainder = if digit > 0xFFFF_FFFF { 1 } else { 0 }; *part = (digit & 0xFFFF_FFFF) as u32; } } } DivResult::Ok(Decimal { lo: quotient[0], mid: quotient[1], hi: quotient[2], flags: flags(quotient_negative, final_scale), }) } #[inline] fn copy_array_diff_lengths(into: &mut [u32], from: &[u32]) { for i in 0..into.len() { if i >= from.len() { break; } into[i] = from[i]; } } fn add_with_scale_internal( quotient: &mut [u32; 3], quotient_scale: &mut i32, working_quotient: &mut [u32; 4], working_scale: &mut i32, ) -> bool { // Add quotient and the working (i.e. quotient = quotient + working) if is_all_zero(quotient) { // Quotient is zero so we can just copy the working quotient in directly // First, make sure they are both 96 bit. while working_quotient[3] != 0 { div_by_u32(working_quotient, 10); *working_scale -= 1; } copy_array_diff_lengths(quotient, working_quotient); *quotient_scale = *working_scale; return false; } if is_all_zero(working_quotient) { return false; } // We have ensured that our working is not zero so we should do the addition // If our two quotients are different then // try to scale down the one with the bigger scale let mut temp3 = [0u32, 0u32, 0u32]; let mut temp4 = [0u32, 0u32, 0u32, 0u32]; if *quotient_scale != *working_scale { // TODO: Remove necessity for temp (without performance impact) fn div_by_10(target: &mut [u32], temp: &mut [u32], scale: &mut i32, target_scale: i32) { // Copy to the temp array temp.copy_from_slice(target); // divide by 10 until target scale is reached while *scale > target_scale { let remainder = div_by_u32(temp, 10); if remainder == 0 { *scale -= 1; target.copy_from_slice(&temp); } else { break; } } } if *quotient_scale < *working_scale { div_by_10(working_quotient, &mut temp4, working_scale, *quotient_scale); } else { div_by_10(quotient, &mut temp3, quotient_scale, *working_scale); } } // If our two quotients are still different then // try to scale up the smaller scale if *quotient_scale != *working_scale { // TODO: Remove necessity for temp (without performance impact) fn mul_by_10(target: &mut [u32], temp: &mut [u32], scale: &mut i32, target_scale: i32) { temp.copy_from_slice(target); let mut overflow = 0; // Multiply by 10 until target scale reached or overflow while *scale < target_scale && overflow == 0 { overflow = mul_by_u32(temp, 10); if overflow == 0 { // Still no overflow *scale += 1; target.copy_from_slice(&temp); } } } if *quotient_scale > *working_scale { mul_by_10(working_quotient, &mut temp4, working_scale, *quotient_scale); } else { mul_by_10(quotient, &mut temp3, quotient_scale, *working_scale); } } // If our two quotients are still different then // try to scale down the one with the bigger scale // (ultimately losing significant digits) if *quotient_scale != *working_scale { // TODO: Remove necessity for temp (without performance impact) fn div_by_10_lossy(target: &mut [u32], temp: &mut [u32], scale: &mut i32, target_scale: i32) { temp.copy_from_slice(target); // divide by 10 until target scale is reached while *scale > target_scale { div_by_u32(temp, 10); *scale -= 1; target.copy_from_slice(&temp); } } if *quotient_scale < *working_scale { div_by_10_lossy(working_quotient, &mut temp4, working_scale, *quotient_scale); } else { div_by_10_lossy(quotient, &mut temp3, quotient_scale, *working_scale); } } // If quotient or working are zero we have an underflow condition if is_all_zero(quotient) || is_all_zero(working_quotient) { // Underflow return true; } else { // Both numbers have the same scale and can be added. // We just need to know whether we can fit them in let mut underflow = false; let mut temp = [0u32, 0u32, 0u32]; while !underflow { temp.copy_from_slice(quotient); // Add the working quotient let overflow = add_by_internal(&mut temp, working_quotient); if overflow == 0 { // addition was successful quotient.copy_from_slice(&temp); break; } else { // addition overflowed - remove significant digits and try again div_by_u32(quotient, 10); *quotient_scale -= 1; div_by_u32(working_quotient, 10); *working_scale -= 1; // Check for underflow underflow = is_all_zero(quotient) || is_all_zero(working_quotient); } } if underflow { return true; } } false } } // This code (in fact, this library) is heavily inspired by the dotnet Decimal number library // implementation. Consequently, a huge thank you for to all the contributors to that project // which has also found it's way into here. #[cfg(not(feature = "legacy-ops"))] mod ops { use super::*; use core::ops::BitXor; // This is a table of the largest values that will not overflow when multiplied // by a given power as represented by the index. static POWER_OVERFLOW_VALUES: [Dec12; 8] = [ Dec12 { hi: 429496729, mid: 2576980377, lo: 2576980377, }, Dec12 { hi: 42949672, mid: 4123168604, lo: 687194767, }, Dec12 { hi: 4294967, mid: 1271310319, lo: 2645699854, }, Dec12 { hi: 429496, mid: 3133608139, lo: 694066715, }, Dec12 { hi: 42949, mid: 2890341191, lo: 2216890319, }, Dec12 { hi: 4294, mid: 4154504685, lo: 2369172679, }, Dec12 { hi: 429, mid: 2133437386, lo: 4102387834, }, Dec12 { hi: 42, mid: 4078814305, lo: 410238783, }, ]; // A structure that is used for faking a union of the decimal type. This allows setting mid/hi // with a u64, for example struct Dec12 { lo: u32, mid: u32, hi: u32, } impl Dec12 { const fn new(value: &Decimal) -> Self { Dec12 { lo: value.lo, mid: value.mid, hi: value.hi, } } // lo + mid combined const fn low64(&self) -> u64 { ((self.mid as u64) << 32) | (self.lo as u64) } fn set_low64(&mut self, value: u64) { self.mid = (value >> 32) as u32; self.lo = value as u32; } // mid + hi combined const fn high64(&self) -> u64 { ((self.hi as u64) << 32) | (self.mid as u64) } fn set_high64(&mut self, value: u64) { self.hi = (value >> 32) as u32; self.mid = value as u32; } // Returns true if successful, else false for an overflow fn add32(&mut self, value: u32) -> Result<(), DivError> { let value = value as u64; let new = self.low64().wrapping_add(value); self.set_low64(new); if new < value { self.hi = self.hi.wrapping_add(1); if self.hi == 0 { return Err(DivError::Overflow); } } Ok(()) } // Divide a Decimal union by a 32 bit divisor. // Self is overwritten with the quotient. // Return value is a 32 bit remainder. fn div32(&mut self, divisor: u32) -> u32 { let divisor64 = divisor as u64; // See if we can get by using a simple u64 division if self.hi != 0 { let mut temp = self.high64(); let q64 = temp / divisor64; self.set_high64(q64); // Calculate the "remainder" temp = ((temp - q64 * divisor64) << 32) | (self.lo as u64); if temp == 0 { return 0; } let q32 = (temp / divisor64) as u32; self.lo = q32; ((temp as u32).wrapping_sub(q32.wrapping_mul(divisor))) as u32 } else { // Super easy divisor let low64 = self.low64(); if low64 == 0 { // Nothing to do return 0; } // Do the calc let quotient = low64 / divisor64; self.set_low64(quotient); // Remainder is the leftover that wasn't used (low64.wrapping_sub(quotient.wrapping_mul(divisor64))) as u32 } } // Divide the number by a power constant // Returns true if division was successful fn div32_const(&mut self, pow: u32) -> bool { let pow64 = pow as u64; let high64 = self.high64(); let lo = self.lo as u64; let div64: u64 = high64 / pow64; let div = ((((high64 - div64 * pow64) << 32) + lo) / pow64) as u32; if self.lo == div.wrapping_mul(pow) { self.set_high64(div64); self.lo = div; true } else { false } } } // A structure that is used for faking a union of the decimal type with an overflow word. struct Dec16 { lo: u32, mid: u32, hi: u32, overflow: u32, } impl Dec16 { const fn zero() -> Self { Dec16 { lo: 0, mid: 0, hi: 0, overflow: 0, } } // lo + mid combined const fn low64(&self) -> u64 { ((self.mid as u64) << 32) | (self.lo as u64) } fn set_low64(&mut self, value: u64) { self.mid = (value >> 32) as u32; self.lo = value as u32; } // Equivalent to Dec12 high64 (i.e. mid + hi) const fn mid64(&self) -> u64 { ((self.hi as u64) << 32) | (self.mid as u64) } fn set_mid64(&mut self, value: u64) { self.hi = (value >> 32) as u32; self.mid = value as u32; } // hi + overflow combined const fn high64(&self) -> u64 { ((self.overflow as u64) << 32) | (self.hi as u64) } fn set_high64(&mut self, value: u64) { self.overflow = (value >> 32) as u32; self.hi = value as u32; } // Does a partial divide with a 64 bit divisor. The divisor in this case must require 64 bits // otherwise various assumptions fail (e.g. 32 bit quotient). // To assist, the upper 64 bits must be greater than the divisor for this to succeed. // Consequently, it will return the quotient as a 32 bit number and overwrite self with the // 64 bit remainder. fn partial_divide_64(&mut self, divisor: u64) -> u32 { // We make this assertion here, however below we pivot based on the data debug_assert!(divisor > self.mid64()); // If we have an empty high bit, then divisor must be greater than the dividend due to // the assumption that the divisor REQUIRES 64 bits. if self.hi == 0 { let low64 = self.low64(); if low64 < divisor { // We can't divide at at all so result is 0. The dividend remains untouched since // the full amount is the remainder. return 0; } let quotient = low64 / divisor; self.set_low64(low64 - (quotient * divisor)); return quotient as u32; } // Do a simple check to see if the hi portion of the dividend is greater than the hi // portion of the divisor. let divisor_hi32 = (divisor >> 32) as u32; if self.hi >= divisor_hi32 { // We know that the divisor goes into this at MOST u32::max times. // So we kick things off, with that assumption let mut low64 = self.low64(); low64 = low64 - (divisor << 32) + divisor; let mut quotient = u32::max_value(); // If we went negative then keep adding it back in loop { if low64 < divisor { break; } quotient -= 1; low64 += divisor; } self.set_low64(low64); return quotient; } let mid64 = self.mid64(); let divisor_hi32_64 = divisor_hi32 as u64; if mid64 < divisor_hi32_64 as u64 { // similar situation as above where we've got nothing left to divide return 0; } let mut quotient = mid64 / divisor_hi32_64; let mut remainder = self.lo as u64 | ((mid64 - quotient * divisor_hi32_64) << 32); // Do quotient * lo divisor let product = quotient * (divisor & 0xFFFF_FFFF); remainder = remainder.wrapping_sub(product); // Check if we've gone negative. If so, add it back if remainder > product.bitxor(u64::max_value()) { loop { quotient = quotient.wrapping_sub(1); remainder = remainder.wrapping_add(divisor); if remainder < divisor { break; } } } self.set_low64(remainder); quotient as u32 } // Does a partial divide with a 96 bit divisor. The divisor in this case must require 96 bits // otherwise various assumptions fail (e.g. 32 bit quotient). fn partial_divide_96(&mut self, divisor: &Dec12) -> u32 { let dividend = self.high64(); let divisor_hi = divisor.hi; if dividend < divisor_hi as u64 { // Dividend is too small - entire number is remainder return 0; } let mut quo = (dividend / divisor_hi as u64) as u32; let mut remainder = (dividend as u32).wrapping_sub(quo.wrapping_mul(divisor_hi)); // Compute full remainder let mut prod1 = quo as u64 * divisor.lo as u64; let mut prod2 = quo as u64 * divisor.mid as u64; prod2 += prod1 >> 32; prod1 = (prod1 & 0xFFFF_FFFF) | (prod2 << 32); prod2 >>= 32; let mut num = self.low64(); num = num.wrapping_sub(prod1); remainder = remainder.wrapping_sub(prod2 as u32); // If there are carries make sure they are propogated if num > prod1.bitxor(u64::max_value()) { remainder = remainder.wrapping_sub(1); if remainder < (prod2 as u32).bitxor(u32::max_value()) { self.set_low64(num); self.hi = remainder; return quo; } } else if remainder <= (prod2 as u32).bitxor(u32::max_value()) { self.set_low64(num); self.hi = remainder; return quo; } // Remainder went negative, add divisor back until it's positive prod1 = divisor.low64(); loop { quo = quo.wrapping_sub(1); num = num.wrapping_add(prod1); remainder = remainder.wrapping_add(divisor_hi); if num < prod1 { // Detected carry. let tmp = remainder; remainder += 1; if tmp < divisor_hi { break; } } if remainder < divisor_hi { break; // detected carry } } self.set_low64(num); self.hi = remainder; quo } } enum DivError { Overflow, } pub(crate) fn div_impl(dividend: &Decimal, divisor: &Decimal) -> DivResult { if divisor.is_zero() { return DivResult::DivByZero; } if dividend.is_zero() { return DivResult::Ok(Decimal::zero()); } // Pre calculate the scale and the sign let mut scale = (dividend.scale() as i32) - (divisor.scale() as i32); let sign_negative = dividend.is_sign_negative() ^ divisor.is_sign_negative(); // Set up some variables for modification throughout let mut require_unscale = false; let mut quotient = Dec12::new(&dividend); let divisor = Dec12::new(&divisor); // Branch depending on the complexity of the divisor if divisor.hi | divisor.mid == 0 { // We have a simple(r) divisor (32 bit) let divisor32 = divisor.lo; // Remainder can only be 32 bits since the divisor is 32 bits. let mut remainder = quotient.div32(divisor32); let mut power_scale = 0; // Figure out how to apply the remainder (i.e. we may have performed something like 10/3 or 8/5) loop { // Remainder is 0 so we have a simple situation if remainder == 0 { // If the scale is positive then we're actually done if scale >= 0 { break; } power_scale = 9usize.min((-scale) as usize); } else { // We may need to normalize later, so set the flag appropriately require_unscale = true; // We have a remainder so we effectively want to try to adjust the quotient and add // the remainder into the quotient. We do this below, however first of all we want // to try to avoid overflowing so we do that check first. let will_overflow = if scale == MAX_PRECISION_I32 { true } else { // Figure out how much we can scale by if let Ok(s) = find_scale(&quotient, scale) { power_scale = s; } else { return DivResult::Overflow; } // If it comes back as 0 (i.e. 10^0 = 1) then we're going to overflow since // we're doing nothing. power_scale == 0 }; if will_overflow { // No more scaling can be done, but remainder is non-zero so we round if necessary. let tmp = remainder << 1; let round = if tmp < remainder { // We round if we wrapped around true } else { if tmp >= divisor32 { // If we're greater than the divisor (i.e. underflow) // or if there is a lo bit set, we round tmp > divisor32 || (quotient.lo & 0x1) > 0 } else { false } }; // If we need to round, try to do so. if round { if let Ok(new_scale) = round_up(&mut quotient, scale) { scale = new_scale; } else { // Overflowed return DivResult::Overflow; } } break; } } // Do some scaling let power = POWERS_10[power_scale]; scale += power_scale as i32; // Increase the quotient by the power that was looked up let overflow = increase_scale(&mut quotient, power as u64); if overflow > 0 { return DivResult::Overflow; } let remainder_scaled = (remainder as u64) * (power as u64); let remainder_quotient = (remainder_scaled / (divisor32 as u64)) as u32; remainder = (remainder_scaled - remainder_quotient as u64 * divisor32 as u64) as u32; if let Err(DivError::Overflow) = quotient.add32(remainder_quotient) { if let Ok(adj) = unscale_from_overflow(&mut quotient, scale, remainder != 0) { scale = adj; } else { // Still overflowing return DivResult::Overflow; } break; } } } else { // We have a divisor greater than 32 bits. Both of these share some quick calculation wins // so we'll do those before branching into separate logic. // The win we can do is shifting the bits to the left as much as possible. We do this to both // the dividend and the divisor to ensure the quotient is not changed. // As a simple contrived example: if we have 4 / 2 then we could bit shift all the way to the // left meaning that the lo portion would have nothing inside of it. Of course, shifting these // left one has the same result (8/4) etc. // The advantage is that we may be able to write off lower portions of the number making things // easier. let mut power_scale = if divisor.hi == 0 { divisor.mid.leading_zeros() } else { divisor.hi.leading_zeros() } as usize; let mut remainder = Dec16::zero(); remainder.set_low64(quotient.low64() << power_scale); let tmp_high = ((quotient.mid as u64) + ((quotient.hi as u64) << 32)) >> (32 - power_scale); remainder.set_high64(tmp_high); // Work out the divisor after it's shifted let divisor64 = divisor.low64() << power_scale; // Check if the divisor is 64 bit or the full 96 bits if divisor.hi == 0 { // It's 64 bits quotient.hi = 0; // Calc mid/lo by shifting accordingly let rem_lo = remainder.lo; remainder.lo = remainder.mid; remainder.mid = remainder.hi; remainder.hi = remainder.overflow; quotient.mid = remainder.partial_divide_64(divisor64); remainder.hi = remainder.mid; remainder.mid = remainder.lo; remainder.lo = rem_lo; quotient.lo = remainder.partial_divide_64(divisor64); loop { let rem_low64 = remainder.low64(); if rem_low64 == 0 { // If the scale is positive then we're actually done if scale >= 0 { break; } power_scale = 9usize.min((-scale) as usize); } else { // We may need to normalize later, so set the flag appropriately require_unscale = true; // We have a remainder so we effectively want to try to adjust the quotient and add // the remainder into the quotient. We do this below, however first of all we want // to try to avoid overflowing so we do that check first. let will_overflow = if scale == MAX_PRECISION_I32 { true } else { // Figure out how much we can scale by if let Ok(s) = find_scale(&quotient, scale) { power_scale = s; } else { return DivResult::Overflow; } // If it comes back as 0 (i.e. 10^0 = 1) then we're going to overflow since // we're doing nothing. power_scale == 0 }; if will_overflow { // No more scaling can be done, but remainder is non-zero so we round if necessary. let mut tmp = remainder.low64(); let round = if (tmp as i64) < 0 { // We round if we wrapped around true } else { tmp <<= 1; if tmp > divisor64 { true } else { tmp == divisor64 && quotient.lo & 0x1 != 0 } }; // If we need to round, try to do so. if round { if let Ok(new_scale) = round_up(&mut quotient, scale) { scale = new_scale; } else { // Overflowed return DivResult::Overflow; } } break; } } // Do some scaling let power = POWERS_10[power_scale]; scale += power_scale as i32; // Increase the quotient by the power that was looked up let overflow = increase_scale(&mut quotient, power as u64); if overflow > 0 { return DivResult::Overflow; } increase_scale64(&mut remainder, power as u64); let tmp = remainder.partial_divide_64(divisor64); if let Err(DivError::Overflow) = quotient.add32(tmp) { if let Ok(adj) = unscale_from_overflow(&mut quotient, scale, remainder.low64() != 0) { scale = adj; } else { // Still overflowing return DivResult::Overflow; } break; } } } else { // It's 96 bits // Start by finishing the shift left let divisor_mid = divisor.mid; let divisor_hi = divisor.hi; let mut divisor = divisor; divisor.set_low64(divisor64); divisor.hi = ((divisor_mid as u64 + ((divisor_hi as u64) << 32)) >> (32 - power_scale)) as u32; let quo = remainder.partial_divide_96(&divisor); quotient.set_low64(quo as u64); quotient.hi = 0; loop { let mut rem_low64 = remainder.low64(); if rem_low64 == 0 && remainder.hi == 0 { // If the scale is positive then we're actually done if scale >= 0 { break; } power_scale = 9usize.min((-scale) as usize); } else { // We may need to normalize later, so set the flag appropriately require_unscale = true; // We have a remainder so we effectively want to try to adjust the quotient and add // the remainder into the quotient. We do this below, however first of all we want // to try to avoid overflowing so we do that check first. let will_overflow = if scale == MAX_PRECISION_I32 { true } else { // Figure out how much we can scale by if let Ok(s) = find_scale(&quotient, scale) { power_scale = s; } else { return DivResult::Overflow; } // If it comes back as 0 (i.e. 10^0 = 1) then we're going to overflow since // we're doing nothing. power_scale == 0 }; if will_overflow { // No more scaling can be done, but remainder is non-zero so we round if necessary. let round = if (remainder.hi as i32) < 0 { // We round if we wrapped around true } else { let tmp = remainder.mid >> 31; rem_low64 <<= 1; remainder.set_low64(rem_low64); remainder.hi = (remainder.hi << 1) + tmp; if remainder.hi > divisor.hi { true } else if remainder.hi == divisor.hi { let divisor_low64 = divisor.low64(); if rem_low64 > divisor_low64 { true } else { rem_low64 == divisor_low64 && (quotient.lo & 1) != 0 } } else { false } }; // If we need to round, try to do so. if round { if let Ok(new_scale) = round_up(&mut quotient, scale) { scale = new_scale; } else { // Overflowed return DivResult::Overflow; } } break; } } // Do some scaling let power = POWERS_10[power_scale]; scale += power_scale as i32; // Increase the quotient by the power that was looked up let overflow = increase_scale(&mut quotient, power as u64); if overflow > 0 { return DivResult::Overflow; } let mut tmp_remainder = Dec12 { lo: remainder.lo, mid: remainder.mid, hi: remainder.hi, }; let overflow = increase_scale(&mut tmp_remainder, power as u64); remainder.lo = tmp_remainder.lo; remainder.mid = tmp_remainder.mid; remainder.hi = tmp_remainder.hi; remainder.overflow = overflow; let tmp = remainder.partial_divide_96(&divisor); if let Err(DivError::Overflow) = quotient.add32(tmp) { if let Ok(adj) = unscale_from_overflow(&mut quotient, scale, (remainder.low64() | remainder.high64()) != 0) { scale = adj; } else { // Still overflowing return DivResult::Overflow; } break; } } } } if require_unscale { scale = unscale(&mut quotient, scale); } DivResult::Ok(Decimal { lo: quotient.lo, mid: quotient.mid, hi: quotient.hi, flags: flags(sign_negative, scale as u32), }) } // Multiply num by power (multiple of 10). Power must be 32 bits. // Returns the overflow, if any fn increase_scale(num: &mut Dec12, power: u64) -> u32 { let mut tmp = (num.lo as u64) * power; num.lo = tmp as u32; tmp >>= 32; tmp += (num.mid as u64) * power; num.mid = tmp as u32; tmp >>= 32; tmp += (num.hi as u64) * power; num.hi = tmp as u32; (tmp >> 32) as u32 } // Multiply num by power (multiple of 10). Power must be 32 bits. fn increase_scale64(num: &mut Dec16, power: u64) { let mut tmp = (num.lo as u64) * power; num.lo = tmp as u32; tmp >>= 32; tmp += (num.mid as u64) * power; num.set_mid64(tmp) } // Adjust the number to deal with an overflow. This function follows being scaled up (i.e. multiplied // by 10, so this effectively tries to reverse that by dividing by 10 then feeding in the high bit // to undo the overflow and rounding instead. // Returns the updated scale. fn unscale_from_overflow(num: &mut Dec12, scale: i32, sticky: bool) -> Result<i32, DivError> { let scale = scale - 1; if scale < 0 { return Err(DivError::Overflow); } // This function is called when the hi portion has "overflowed" upon adding one and has wrapped // back around to 0. Consequently, we need to "feed" that back in, but also rescaling down // to reverse out the overflow. const HIGH_BIT: u64 = 0x1_0000_0000; num.hi = (HIGH_BIT / 10) as u32; // Calc the mid let mut tmp = ((HIGH_BIT % 10) << 32) + (num.mid as u64); let mut val = (tmp / 10) as u32; num.mid = val; // Calc the lo using a similar method tmp = ((tmp - (val as u64) * 10) << 32) + (num.lo as u64); val = (tmp / 10) as u32; num.lo = val; // Work out the remainder, and round if we have one (since it doesn't fit) let remainder = (tmp - (val as u64) * 10) as u32; if remainder > 5 || (remainder == 5 && (sticky || num.lo & 0x1 > 0)) { let _ = num.add32(1); } Ok(scale) } // Determine the maximum value of x that ensures that the quotient when scaled up by 10^x // still fits in 96 bits. Ultimately, we want to make scale positive - if we can't then // we're going to overflow. Because x is ultimately used to lookup inside the POWERS array, it // must be a valid value 0 <= x <= 9 fn find_scale(num: &Dec12, scale: i32) -> Result<usize, DivError> { const OVERFLOW_MAX_9_HI: u32 = 4; const OVERFLOW_MAX_8_HI: u32 = 42; const OVERFLOW_MAX_7_HI: u32 = 429; const OVERFLOW_MAX_6_HI: u32 = 4294; const OVERFLOW_MAX_5_HI: u32 = 42949; const OVERFLOW_MAX_4_HI: u32 = 429496; const OVERFLOW_MAX_3_HI: u32 = 4294967; const OVERFLOW_MAX_2_HI: u32 = 42949672; const OVERFLOW_MAX_1_HI: u32 = 429496729; const OVERFLOW_MAX_9_LOW64: u64 = 5441186219426131129; let hi = num.hi; let low64 = num.low64(); let mut x = 0usize; // Quick check to stop us from trying to scale any more. // if hi > OVERFLOW_MAX_1_HI { // If it's less than 0, which it probably is - overflow. We can't do anything. if scale < 0 { return Err(DivError::Overflow); } return Ok(x); } if scale > MAX_PRECISION_I32 - 9 { // We can't scale by 10^9 without exceeding the max scale factor. // Instead, we'll try to scale by the most that we can and see if that works. // This is safe to do due to the check above. e.g. scale > 19 in the above, so it will // evaluate to 9 or less below. x = (MAX_PRECISION_I32 - scale) as usize; if hi < POWER_OVERFLOW_VALUES[x - 1].hi { if x as i32 + scale < 0 { // We still overflow return Err(DivError::Overflow); } return Ok(x); } } else if hi < OVERFLOW_MAX_9_HI || hi == OVERFLOW_MAX_9_HI && low64 <= OVERFLOW_MAX_9_LOW64 { return Ok(9); } // Do a binary search to find a power to scale by that is less than 9 x = if hi > OVERFLOW_MAX_5_HI { if hi > OVERFLOW_MAX_3_HI { if hi > OVERFLOW_MAX_2_HI { 1 } else { 2 } } else { if hi > OVERFLOW_MAX_4_HI { 3 } else { 4 } } } else { if hi > OVERFLOW_MAX_7_HI { if hi > OVERFLOW_MAX_6_HI { 5 } else { 6 } } else { if hi > OVERFLOW_MAX_8_HI { 7 } else { 8 } } }; // Double check what we've found won't overflow. Otherwise, we go one below. if hi == POWER_OVERFLOW_VALUES[x - 1].hi && low64 > POWER_OVERFLOW_VALUES[x - 1].low64() { x -= 1; } // Confirm we've actually resolved things if x as i32 + scale < 0 { Err(DivError::Overflow) } else { Ok(x) } } #[inline] fn round_up(num: &mut Dec12, scale: i32) -> Result<i32, DivError> { let low64 = num.low64().wrapping_add(1); num.set_low64(low64); if low64 != 0 { return Ok(scale); } let hi = num.hi.wrapping_add(1); num.hi = hi; if hi != 0 { return Ok(scale); } unscale_from_overflow(num, scale, true) } fn unscale(num: &mut Dec12, scale: i32) -> i32 { // Since 10 = 2 * 5, there must be a factor of 2 for every power of 10 we can extract. // We use this as a quick test on whether to try a given power. let mut scale = scale; while num.lo == 0 && scale >= 8 && num.div32_const(100000000) { scale -= 8; } if (num.lo & 0xF) == 0 && scale >= 4 && num.div32_const(10000) { scale -= 4; } if (num.lo & 0x3) == 0 && scale >= 2 && num.div32_const(100) { scale -= 2; } if (num.lo & 0x1) == 0 && scale >= 1 && num.div32_const(10) { scale -= 1; } scale } } // Returns remainder pub(crate) fn div_by_u32(bits: &mut [u32], divisor: u32) -> u32 { if divisor == 0 { // Divide by zero panic!("Internal error: divide by zero"); } else if divisor == 1 { // dividend remains unchanged 0 } else { let mut remainder = 0u32; let divisor = u64::from(divisor); for part in bits.iter_mut().rev() { let temp = (u64::from(remainder) << 32) + u64::from(*part); remainder = (temp % divisor) as u32; *part = (temp / divisor) as u32; } remainder } } fn div_by_10(bits: &mut [u32; 3]) -> u32 { let mut remainder = 0u32; let divisor = 10u64; for part in bits.iter_mut().rev() { let temp = (u64::from(remainder) << 32) + u64::from(*part); remainder = (temp % divisor) as u32; *part = (temp / divisor) as u32; } remainder } #[inline] fn shl1_internal(bits: &mut [u32], carry: u32) -> u32 { let mut carry = carry; for part in bits.iter_mut() { let b = *part >> 31; *part = (*part << 1) | carry; carry = b; } carry } #[inline] fn cmp_internal(left: &[u32; 3], right: &[u32; 3]) -> Ordering { let left_hi: u32 = left[2]; let right_hi: u32 = right[2]; let left_lo: u64 = u64::from(left[1]) << 32 | u64::from(left[0]); let right_lo: u64 = u64::from(right[1]) << 32 | u64::from(right[0]); if left_hi < right_hi || (left_hi <= right_hi && left_lo < right_lo) { Ordering::Less } else if left_hi == right_hi && left_lo == right_lo { Ordering::Equal } else { Ordering::Greater } } #[inline] pub(crate) fn is_all_zero(bits: &[u32]) -> bool { bits.iter().all(|b| *b == 0) } macro_rules! impl_from { ($T:ty, $from_ty:path) => { impl core::convert::From<$T> for Decimal { #[inline] fn from(t: $T) -> Self { $from_ty(t).unwrap() } } }; } impl_from!(isize, FromPrimitive::from_isize); impl_from!(i8, FromPrimitive::from_i8); impl_from!(i16, FromPrimitive::from_i16); impl_from!(i32, FromPrimitive::from_i32); impl_from!(i64, FromPrimitive::from_i64); impl_from!(usize, FromPrimitive::from_usize); impl_from!(u8, FromPrimitive::from_u8); impl_from!(u16, FromPrimitive::from_u16); impl_from!(u32, FromPrimitive::from_u32); impl_from!(u64, FromPrimitive::from_u64); impl_from!(i128, FromPrimitive::from_i128); impl_from!(u128, FromPrimitive::from_u128); macro_rules! forward_val_val_binop { (impl $imp:ident for $res:ty, $method:ident) => { impl $imp<$res> for $res { type Output = $res; #[inline] fn $method(self, other: $res) -> $res { (&self).$method(&other) } } }; } macro_rules! forward_ref_val_binop { (impl $imp:ident for $res:ty, $method:ident) => { impl<'a> $imp<$res> for &'a $res { type Output = $res; #[inline] fn $method(self, other: $res) -> $res { self.$method(&other) } } }; } macro_rules! forward_val_ref_binop { (impl $imp:ident for $res:ty, $method:ident) => { impl<'a> $imp<&'a $res> for $res { type Output = $res; #[inline] fn $method(self, other: &$res) -> $res { (&self).$method(other) } } }; } macro_rules! forward_all_binop { (impl $imp:ident for $res:ty, $method:ident) => { forward_val_val_binop!(impl $imp for $res, $method); forward_ref_val_binop!(impl $imp for $res, $method); forward_val_ref_binop!(impl $imp for $res, $method); }; } impl Zero for Decimal { fn zero() -> Decimal { Decimal { flags: 0, hi: 0, lo: 0, mid: 0, } } fn is_zero(&self) -> bool { self.lo.is_zero() && self.mid.is_zero() && self.hi.is_zero() } } impl One for Decimal { fn one() -> Decimal { Decimal { flags: 0, hi: 0, lo: 1, mid: 0, } } } impl Signed for Decimal { fn abs(&self) -> Self { self.abs() } fn abs_sub(&self, other: &Self) -> Self { if self <= other { Decimal::zero() } else { self.abs() } } fn signum(&self) -> Self { if self.is_zero() { Decimal::zero() } else { let mut value = Decimal::one(); if self.is_sign_negative() { value.set_sign_negative(true); } value } } fn is_positive(&self) -> bool { self.is_sign_positive() } fn is_negative(&self) -> bool { self.is_sign_negative() } } impl CheckedAdd for Decimal { #[inline] fn checked_add(&self, v: &Decimal) -> Option<Decimal> { Decimal::checked_add(*self, *v) } } impl CheckedSub for Decimal { #[inline] fn checked_sub(&self, v: &Decimal) -> Option<Decimal> { Decimal::checked_sub(*self, *v) } } impl CheckedMul for Decimal { #[inline] fn checked_mul(&self, v: &Decimal) -> Option<Decimal> { Decimal::checked_mul(*self, *v) } } impl CheckedDiv for Decimal { #[inline] fn checked_div(&self, v: &Decimal) -> Option<Decimal> { Decimal::checked_div(*self, *v) } } impl CheckedRem for Decimal { #[inline] fn checked_rem(&self, v: &Decimal) -> Option<Decimal> { Decimal::checked_rem(*self, *v) } } // dedicated implementation for the most common case. fn parse_str_radix_10(str: &str) -> Result<Decimal, crate::Error> { if str.is_empty() { return Err(Error::new("Invalid decimal: empty")); } let mut offset = 0; let mut len = str.len(); let bytes = str.as_bytes(); let mut negative = false; // assume positive // handle the sign if bytes[offset] == b'-' { negative = true; // leading minus means negative offset += 1; len -= 1; } else if bytes[offset] == b'+' { // leading + allowed offset += 1; len -= 1; } // should now be at numeric part of the significand let mut digits_before_dot: i32 = -1; // digits before '.', -1 if no '.' let mut coeff = ArrayVec::<[_; MAX_STR_BUFFER_SIZE]>::new(); // integer significand array let mut maybe_round = false; while len > 0 { let b = bytes[offset]; match b { b'0'..=b'9' => { coeff.push(u32::from(b - b'0')); offset += 1; len -= 1; // If the coefficient is longer than the max, exit early if coeff.len() as u32 > 28 { maybe_round = true; break; } } b'.' => { if digits_before_dot >= 0 { return Err(Error::new("Invalid decimal: two decimal points")); } digits_before_dot = coeff.len() as i32; offset += 1; len -= 1; } b'_' => { // Must start with a number... if coeff.is_empty() { return Err(Error::new("Invalid decimal: must start lead with a number")); } offset += 1; len -= 1; } _ => return Err(Error::new("Invalid decimal: unknown character")), } } // If we exited before the end of the string then do some rounding if necessary if maybe_round && offset < bytes.len() { let next_byte = bytes[offset]; let digit = match next_byte { b'0'..=b'9' => u32::from(next_byte - b'0'), b'_' => 0, b'.' => { // Still an error if we have a second dp if digits_before_dot >= 0 { return Err(Error::new("Invalid decimal: two decimal points")); } 0 } _ => return Err(Error::new("Invalid decimal: unknown character")), }; // Round at midpoint if digit >= 5 { let mut index = coeff.len() - 1; loop { let new_digit = coeff[index] + 1; if new_digit <= 9 { coeff[index] = new_digit; break; } else { coeff[index] = 0; if index == 0 { coeff.insert(0, 1u32); digits_before_dot += 1; coeff.pop(); break; } } index -= 1; } } } // here when no characters left if coeff.is_empty() { return Err(Error::new("Invalid decimal: no digits found")); } let mut scale = if digits_before_dot >= 0 { // we had a decimal place so set the scale (coeff.len() as u32) - (digits_before_dot as u32) } else { 0 }; let mut data = [0u32, 0u32, 0u32]; let mut tmp = [0u32, 0u32, 0u32]; let len = coeff.len(); for (i, digit) in coeff.iter().enumerate() { // If the data is going to overflow then we should go into recovery mode tmp[0] = data[0]; tmp[1] = data[1]; tmp[2] = data[2]; let overflow = mul_by_10(&mut tmp); if overflow > 0 { // This means that we have more data to process, that we're not sure what to do with. // This may or may not be an issue - depending on whether we're past a decimal point // or not. if (i as i32) < digits_before_dot && i + 1 < len { return Err(Error::new("Invalid decimal: overflow from too many digits")); } if *digit >= 5 { let carry = add_one_internal(&mut data); if carry > 0 { // Highly unlikely scenario which is more indicative of a bug return Err(Error::new("Invalid decimal: overflow when rounding")); } } // We're also one less digit so reduce the scale let diff = (len - i) as u32; if diff > scale { return Err(Error::new("Invalid decimal: overflow from scale mismatch")); } scale -= diff; break; } else { data[0] = tmp[0]; data[1] = tmp[1]; data[2] = tmp[2]; let carry = add_by_internal(&mut data, &[*digit]); if carry > 0 { // Highly unlikely scenario which is more indicative of a bug return Err(Error::new("Invalid decimal: overflow from carry")); } } } Ok(Decimal { lo: data[0], mid: data[1], hi: data[2], flags: flags(negative, scale), }) } pub fn parse_str_radix_n(str: &str, radix: u32) -> Result<Decimal, crate::Error> { if str.is_empty() { return Err(Error::new("Invalid decimal: empty")); } if radix < 2 { return Err(Error::new("Unsupported radix < 2")); } if radix > 36 { // As per trait documentation return Err(Error::new("Unsupported radix > 36")); } let mut offset = 0; let mut len = str.len(); let bytes = str.as_bytes(); let mut negative = false; // assume positive // handle the sign if bytes[offset] == b'-' { negative = true; // leading minus means negative offset += 1; len -= 1; } else if bytes[offset] == b'+' { // leading + allowed offset += 1; len -= 1; } // should now be at numeric part of the significand let mut digits_before_dot: i32 = -1; // digits before '.', -1 if no '.' let mut coeff = ArrayVec::<[_; 96]>::new(); // integer significand array // Supporting different radix let (max_n, max_alpha_lower, max_alpha_upper) = if radix <= 10 { (b'0' + (radix - 1) as u8, 0, 0) } else { let adj = (radix - 11) as u8; (b'9', adj + b'a', adj + b'A') }; // Estimate the max precision. All in all, it needs to fit into 96 bits. // Rather than try to estimate, I've included the constants directly in here. We could, // perhaps, replace this with a formula if it's faster - though it does appear to be log2. let estimated_max_precision = match radix { 2 => 96, 3 => 61, 4 => 48, 5 => 42, 6 => 38, 7 => 35, 8 => 32, 9 => 31, 10 => 28, 11 => 28, 12 => 27, 13 => 26, 14 => 26, 15 => 25, 16 => 24, 17 => 24, 18 => 24, 19 => 23, 20 => 23, 21 => 22, 22 => 22, 23 => 22, 24 => 21, 25 => 21, 26 => 21, 27 => 21, 28 => 20, 29 => 20, 30 => 20, 31 => 20, 32 => 20, 33 => 20, 34 => 19, 35 => 19, 36 => 19, _ => return Err(Error::new("Unsupported radix")), }; let mut maybe_round = false; while len > 0 { let b = bytes[offset]; match b { b'0'..=b'9' => { if b > max_n { return Err(Error::new("Invalid decimal: invalid character")); } coeff.push(u32::from(b - b'0')); offset += 1; len -= 1; // If the coefficient is longer than the max, exit early if coeff.len() as u32 > estimated_max_precision { maybe_round = true; break; } } b'a'..=b'z' => { if b > max_alpha_lower { return Err(Error::new("Invalid decimal: invalid character")); } coeff.push(u32::from(b - b'a') + 10); offset += 1; len -= 1; if coeff.len() as u32 > estimated_max_precision { maybe_round = true; break; } } b'A'..=b'Z' => { if b > max_alpha_upper { return Err(Error::new("Invalid decimal: invalid character")); } coeff.push(u32::from(b - b'A') + 10); offset += 1; len -= 1; if coeff.len() as u32 > estimated_max_precision { maybe_round = true; break; } } b'.' => { if digits_before_dot >= 0 { return Err(Error::new("Invalid decimal: two decimal points")); } digits_before_dot = coeff.len() as i32; offset += 1; len -= 1; } b'_' => { // Must start with a number... if coeff.is_empty() { return Err(Error::new("Invalid decimal: must start lead with a number")); } offset += 1; len -= 1; } _ => return Err(Error::new("Invalid decimal: unknown character")), } } // If we exited before the end of the string then do some rounding if necessary if maybe_round && offset < bytes.len() { let next_byte = bytes[offset]; let digit = match next_byte { b'0'..=b'9' => { if next_byte > max_n { return Err(Error::new("Invalid decimal: invalid character")); } u32::from(next_byte - b'0') } b'a'..=b'z' => { if next_byte > max_alpha_lower { return Err(Error::new("Invalid decimal: invalid character")); } u32::from(next_byte - b'a') + 10 } b'A'..=b'Z' => { if next_byte > max_alpha_upper { return Err(Error::new("Invalid decimal: invalid character")); } u32::from(next_byte - b'A') + 10 } b'_' => 0, b'.' => { // Still an error if we have a second dp if digits_before_dot >= 0 { return Err(Error::new("Invalid decimal: two decimal points")); } 0 } _ => return Err(Error::new("Invalid decimal: unknown character")), }; // Round at midpoint let midpoint = if radix & 0x1 == 1 { radix / 2 } else { (radix + 1) / 2 }; if digit >= midpoint { let mut index = coeff.len() - 1; loop { let new_digit = coeff[index] + 1; if new_digit <= 9 { coeff[index] = new_digit; break; } else { coeff[index] = 0; if index == 0 { coeff.insert(0, 1u32); digits_before_dot += 1; coeff.pop(); break; } } index -= 1; } } } // here when no characters left if coeff.is_empty() { return Err(Error::new("Invalid decimal: no digits found")); } let mut scale = if digits_before_dot >= 0 { // we had a decimal place so set the scale (coeff.len() as u32) - (digits_before_dot as u32) } else { 0 }; // Parse this using specified radix let mut data = [0u32, 0u32, 0u32]; let mut tmp = [0u32, 0u32, 0u32]; let len = coeff.len(); for (i, digit) in coeff.iter().enumerate() { // If the data is going to overflow then we should go into recovery mode tmp[0] = data[0]; tmp[1] = data[1]; tmp[2] = data[2]; let overflow = mul_by_u32(&mut tmp, radix); if overflow > 0 { // This means that we have more data to process, that we're not sure what to do with. // This may or may not be an issue - depending on whether we're past a decimal point // or not. if (i as i32) < digits_before_dot && i + 1 < len { return Err(Error::new("Invalid decimal: overflow from too many digits")); } if *digit >= 5 { let carry = add_one_internal(&mut data); if carry > 0 { // Highly unlikely scenario which is more indicative of a bug return Err(Error::new("Invalid decimal: overflow when rounding")); } } // We're also one less digit so reduce the scale let diff = (len - i) as u32; if diff > scale { return Err(Error::new("Invalid decimal: overflow from scale mismatch")); } scale -= diff; break; } else { data[0] = tmp[0]; data[1] = tmp[1]; data[2] = tmp[2]; let carry = add_by_internal(&mut data, &[*digit]); if carry > 0 { // Highly unlikely scenario which is more indicative of a bug return Err(Error::new("Invalid decimal: overflow from carry")); } } } Ok(Decimal { lo: data[0], mid: data[1], hi: data[2], flags: flags(negative, scale), }) } impl Num for Decimal { type FromStrRadixErr = Error; fn from_str_radix(str: &str, radix: u32) -> Result<Self, Self::FromStrRadixErr> { Decimal::from_str_radix(str, radix) } } impl FromStr for Decimal { type Err = Error; fn from_str(value: &str) -> Result<Decimal, Self::Err> { parse_str_radix_10(value) } } impl FromPrimitive for Decimal { fn from_i32(n: i32) -> Option<Decimal> { let flags: u32; let value_copy: i64; if n >= 0 { flags = 0; value_copy = n as i64; } else { flags = SIGN_MASK; value_copy = -(n as i64); } Some(Decimal { flags, lo: value_copy as u32, mid: 0, hi: 0, }) } fn from_i64(n: i64) -> Option<Decimal> { let flags: u32; let value_copy: i128; if n >= 0 { flags = 0; value_copy = n as i128; } else { flags = SIGN_MASK; value_copy = -(n as i128); } Some(Decimal { flags, lo: value_copy as u32, mid: (value_copy >> 32) as u32, hi: 0, }) } fn from_i128(n: i128) -> Option<Decimal> { let flags; let unsigned; if n >= 0 { unsigned = n as u128; flags = 0; } else { unsigned = -n as u128; flags = SIGN_MASK; }; // Check if we overflow if unsigned >> 96 != 0 { return None; } Some(Decimal { flags, lo: unsigned as u32, mid: (unsigned >> 32) as u32, hi: (unsigned >> 64) as u32, }) } fn from_u32(n: u32) -> Option<Decimal> { Some(Decimal { flags: 0, lo: n, mid: 0, hi: 0, }) } fn from_u64(n: u64) -> Option<Decimal> { Some(Decimal { flags: 0, lo: n as u32, mid: (n >> 32) as u32, hi: 0, }) } fn from_u128(n: u128) -> Option<Decimal> { // Check if we overflow if n >> 96 != 0 { return None; } Some(Decimal { flags: 0, lo: n as u32, mid: (n >> 32) as u32, hi: (n >> 64) as u32, }) } fn from_f32(n: f32) -> Option<Decimal> { // Handle the case if it is NaN, Infinity or -Infinity if !n.is_finite() { return None; } // It's a shame we can't use a union for this due to it being broken up by bits // i.e. 1/8/23 (sign, exponent, mantissa) // See https://en.wikipedia.org/wiki/IEEE_754-1985 // n = (sign*-1) * 2^exp * mantissa // Decimal of course stores this differently... 10^-exp * significand let raw = n.to_bits(); let positive = (raw >> 31) == 0; let biased_exponent = ((raw >> 23) & 0xFF) as i32; let mantissa = raw & 0x007F_FFFF; // Handle the special zero case if biased_exponent == 0 && mantissa == 0 { let mut zero = Decimal::zero(); if !positive { zero.set_sign_negative(true); } return Some(zero); } // Get the bits and exponent2 let mut exponent2 = biased_exponent - 127; let mut bits = [mantissa, 0u32, 0u32]; if biased_exponent == 0 { // Denormalized number - correct the exponent exponent2 += 1; } else { // Add extra hidden bit to mantissa bits[0] |= 0x0080_0000; } // The act of copying a mantissa as integer bits is equivalent to shifting // left the mantissa 23 bits. The exponent is reduced to compensate. exponent2 -= 23; // Convert to decimal Decimal::base2_to_decimal(&mut bits, exponent2, positive, false) } fn from_f64(n: f64) -> Option<Decimal> { // Handle the case if it is NaN, Infinity or -Infinity if !n.is_finite() { return None; } // It's a shame we can't use a union for this due to it being broken up by bits // i.e. 1/11/52 (sign, exponent, mantissa) // See https://en.wikipedia.org/wiki/IEEE_754-1985 // n = (sign*-1) * 2^exp * mantissa // Decimal of course stores this differently... 10^-exp * significand let raw = n.to_bits(); let positive = (raw >> 63) == 0; let biased_exponent = ((raw >> 52) & 0x7FF) as i32; let mantissa = raw & 0x000F_FFFF_FFFF_FFFF; // Handle the special zero case if biased_exponent == 0 && mantissa == 0 { let mut zero = Decimal::zero(); if !positive { zero.set_sign_negative(true); } return Some(zero); } // Get the bits and exponent2 let mut exponent2 = biased_exponent - 1023; let mut bits = [ (mantissa & 0xFFFF_FFFF) as u32, ((mantissa >> 32) & 0xFFFF_FFFF) as u32, 0u32, ]; if biased_exponent == 0 { // Denormalized number - correct the exponent exponent2 += 1; } else { // Add extra hidden bit to mantissa bits[1] |= 0x0010_0000; } // The act of copying a mantissa as integer bits is equivalent to shifting // left the mantissa 52 bits. The exponent is reduced to compensate. exponent2 -= 52; // Convert to decimal Decimal::base2_to_decimal(&mut bits, exponent2, positive, true) } } impl ToPrimitive for Decimal { fn to_i64(&self) -> Option<i64> { let d = self.trunc(); // Quick overflow check if d.hi != 0 || (d.mid & 0x8000_0000) > 0 { // Overflow return None; } let raw: i64 = (i64::from(d.mid) << 32) | i64::from(d.lo); if self.is_sign_negative() { Some(-raw) } else { Some(raw) } } fn to_i128(&self) -> Option<i128> { let d = self.trunc(); let raw: i128 = ((i128::from(d.hi) << 64) | i128::from(d.mid) << 32) | i128::from(d.lo); if self.is_sign_negative() { Some(-raw) } else { Some(raw) } } fn to_u64(&self) -> Option<u64> { if self.is_sign_negative() { return None; } let d = self.trunc(); if d.hi != 0 { // Overflow return None; } Some((u64::from(d.mid) << 32) | u64::from(d.lo)) } fn to_u128(&self) -> Option<u128> { if self.is_sign_negative() { return None; } let d = self.trunc(); Some((u128::from(d.hi) << 64) | (u128::from(d.mid) << 32) | u128::from(d.lo)) } fn to_f64(&self) -> Option<f64> { if self.scale() == 0 { let integer = self.to_i64(); match integer { Some(i) => Some(i as f64), None => None, } } else { let sign: f64 = if self.is_sign_negative() { -1.0 } else { 1.0 }; let mut mantissa: u128 = self.lo.into(); mantissa |= (self.mid as u128) << 32; mantissa |= (self.hi as u128) << 64; // scale is at most 28, so this fits comfortably into a u128. let scale = self.scale(); let precision: u128 = 10_u128.pow(scale); let integral_part = mantissa / precision; let frac_part = mantissa % precision; let frac_f64 = (frac_part as f64) / (precision as f64); let value = sign * ((integral_part as f64) + frac_f64); let round_to = 10f64.powi(self.scale() as i32); Some(value * round_to / round_to) } } } impl core::convert::TryFrom<f32> for Decimal { type Error = crate::Error; fn try_from(value: f32) -> Result<Self, Error> { Self::from_f32(value).ok_or_else(|| Error::new("Failed to convert to Decimal")) } } impl core::convert::TryFrom<f64> for Decimal { type Error = crate::Error; fn try_from(value: f64) -> Result<Self, Error> { Self::from_f64(value).ok_or_else(|| Error::new("Failed to convert to Decimal")) } } impl core::convert::TryFrom<Decimal> for f32 { type Error = crate::Error; fn try_from(value: Decimal) -> Result<Self, Self::Error> { Decimal::to_f32(&value).ok_or_else(|| Error::new("Failed to convert to f32")) } } impl core::convert::TryFrom<Decimal> for f64 { type Error = crate::Error; fn try_from(value: Decimal) -> Result<Self, Self::Error> { Decimal::to_f64(&value).ok_or_else(|| Error::new("Failed to convert to f64")) } } // impl that doesn't allocate for serialization purposes. pub(crate) fn to_str_internal( value: &Decimal, append_sign: bool, precision: Option<usize>, ) -> ArrayString<[u8; MAX_STR_BUFFER_SIZE]> { // Get the scale - where we need to put the decimal point let scale = value.scale() as usize; // Convert to a string and manipulate that (neg at front, inject decimal) let mut chars = ArrayVec::<[_; MAX_STR_BUFFER_SIZE]>::new(); let mut working = [value.lo, value.mid, value.hi]; while !is_all_zero(&working) { let remainder = div_by_u32(&mut working, 10u32); chars.push(char::from(b'0' + remainder as u8)); } while scale > chars.len() { chars.push('0'); } let prec = match precision { Some(prec) => prec, None => scale, }; let len = chars.len(); let whole_len = len - scale; let mut rep = ArrayString::new(); if append_sign && value.is_sign_negative() { rep.push('-'); } for i in 0..whole_len + prec { if i == len - scale { if i == 0 { rep.push('0'); } rep.push('.'); } if i >= len { rep.push('0'); } else { let c = chars[len - i - 1]; rep.push(c); } } // corner case for when we truncated everything in a low fractional if rep.is_empty() { rep.push('0'); } rep } impl fmt::Display for Decimal { fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { let rep = to_str_internal(self, false, f.precision()); f.pad_integral(self.is_sign_positive(), "", rep.as_str()) } } impl fmt::Debug for Decimal { fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { fmt::Display::fmt(self, f) } } fn fmt_scientific_notation(value: &Decimal, exponent_symbol: &str, f: &mut fmt::Formatter<'_>) -> fmt::Result { #[cfg(not(feature = "std"))] use alloc::string::ToString; // Get the scale - this is the e value. With multiples of 10 this may get bigger. let mut exponent = -(value.scale() as isize); // Convert the integral to a string let mut chars = Vec::new(); let mut working = [value.lo, value.mid, value.hi]; while !is_all_zero(&working) { let remainder = div_by_u32(&mut working, 10u32); chars.push(char::from(b'0' + remainder as u8)); } // First of all, apply scientific notation rules. That is: // 1. If non-zero digit comes first, move decimal point left so that e is a positive integer // 2. If decimal point comes first, move decimal point right until after the first non-zero digit // Since decimal notation naturally lends itself this way, we just need to inject the decimal // point in the right place and adjust the exponent accordingly. let len = chars.len(); let mut rep; if len > 1 { if chars.iter().take(len - 1).all(|c| *c == '0') { // Chomp off the zero's. rep = chars.iter().skip(len - 1).collect::<String>(); } else { chars.insert(len - 1, '.'); rep = chars.iter().rev().collect::<String>(); } exponent += (len - 1) as isize; } else { rep = chars.iter().collect::<String>(); } rep.push_str(exponent_symbol); rep.push_str(&exponent.to_string()); f.pad_integral(value.is_sign_positive(), "", &rep) } impl fmt::LowerExp for Decimal { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fmt_scientific_notation(self, "e", f) } } impl fmt::UpperExp for Decimal { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fmt_scientific_notation(self, "E", f) } } impl Neg for Decimal { type Output = Decimal; fn neg(self) -> Decimal { let mut copy = self; copy.set_sign_negative(self.is_sign_positive()); copy } } impl<'a> Neg for &'a Decimal { type Output = Decimal; fn neg(self) -> Decimal { Decimal { flags: flags(!self.is_sign_negative(), self.scale()), hi: self.hi, lo: self.lo, mid: self.mid, } } } forward_all_binop!(impl Add for Decimal, add); impl<'a, 'b> Add<&'b Decimal> for &'a Decimal { type Output = Decimal; #[inline(always)] fn add(self, other: &Decimal) -> Decimal { match self.checked_add(other) { Some(sum) => sum, None => panic!("Addition overflowed"), } } } impl AddAssign for Decimal { fn add_assign(&mut self, other: Decimal) { let result = self.add(other); self.lo = result.lo; self.mid = result.mid; self.hi = result.hi; self.flags = result.flags; } } impl<'a> AddAssign<&'a Decimal> for Decimal { fn add_assign(&mut self, other: &'a Decimal) { Decimal::add_assign(self, *other) } } impl<'a> AddAssign<Decimal> for &'a mut Decimal { fn add_assign(&mut self, other: Decimal) { Decimal::add_assign(*self, other) } } impl<'a> AddAssign<&'a Decimal> for &'a mut Decimal { fn add_assign(&mut self, other: &'a Decimal) { Decimal::add_assign(*self, *other) } } forward_all_binop!(impl Sub for Decimal, sub); impl<'a, 'b> Sub<&'b Decimal> for &'a Decimal { type Output = Decimal; #[inline(always)] fn sub(self, other: &Decimal) -> Decimal { match self.checked_sub(other) { Some(diff) => diff, None => panic!("Subtraction overflowed"), } } } impl SubAssign for Decimal { fn sub_assign(&mut self, other: Decimal) { let result = self.sub(other); self.lo = result.lo; self.mid = result.mid; self.hi = result.hi; self.flags = result.flags; } } impl<'a> SubAssign<&'a Decimal> for Decimal { fn sub_assign(&mut self, other: &'a Decimal) { Decimal::sub_assign(self, *other) } } impl<'a> SubAssign<Decimal> for &'a mut Decimal { fn sub_assign(&mut self, other: Decimal) { Decimal::sub_assign(*self, other) } } impl<'a> SubAssign<&'a Decimal> for &'a mut Decimal { fn sub_assign(&mut self, other: &'a Decimal) { Decimal::sub_assign(*self, *other) } } forward_all_binop!(impl Mul for Decimal, mul); impl<'a, 'b> Mul<&'b Decimal> for &'a Decimal { type Output = Decimal; #[inline] fn mul(self, other: &Decimal) -> Decimal { match self.checked_mul(other) { Some(prod) => prod, None => panic!("Multiplication overflowed"), } } } impl MulAssign for Decimal { fn mul_assign(&mut self, other: Decimal) { let result = self.mul(other); self.lo = result.lo; self.mid = result.mid; self.hi = result.hi; self.flags = result.flags; } } impl<'a> MulAssign<&'a Decimal> for Decimal { fn mul_assign(&mut self, other: &'a Decimal) { Decimal::mul_assign(self, *other) } } impl<'a> MulAssign<Decimal> for &'a mut Decimal { fn mul_assign(&mut self, other: Decimal) { Decimal::mul_assign(*self, other) } } impl<'a> MulAssign<&'a Decimal> for &'a mut Decimal { fn mul_assign(&mut self, other: &'a Decimal) { Decimal::mul_assign(*self, *other) } } forward_all_binop!(impl Div for Decimal, div); impl<'a, 'b> Div<&'b Decimal> for &'a Decimal { type Output = Decimal; fn div(self, other: &Decimal) -> Decimal { match ops::div_impl(&self, other) { DivResult::Ok(quot) => quot, DivResult::Overflow => panic!("Division overflowed"), DivResult::DivByZero => panic!("Division by zero"), } } } impl DivAssign for Decimal { fn div_assign(&mut self, other: Decimal) { let result = self.div(other); self.lo = result.lo; self.mid = result.mid; self.hi = result.hi; self.flags = result.flags; } } impl<'a> DivAssign<&'a Decimal> for Decimal { fn div_assign(&mut self, other: &'a Decimal) { Decimal::div_assign(self, *other) } } impl<'a> DivAssign<Decimal> for &'a mut Decimal { fn div_assign(&mut self, other: Decimal) { Decimal::div_assign(*self, other) } } impl<'a> DivAssign<&'a Decimal> for &'a mut Decimal { fn div_assign(&mut self, other: &'a Decimal) { Decimal::div_assign(*self, *other) } } forward_all_binop!(impl Rem for Decimal, rem); impl<'a, 'b> Rem<&'b Decimal> for &'a Decimal { type Output = Decimal; #[inline] fn rem(self, other: &Decimal) -> Decimal { match self.checked_rem(other) { Some(rem) => rem, None => panic!("Division by zero"), } } } impl RemAssign for Decimal { fn rem_assign(&mut self, other: Decimal) { let result = self.rem(other); self.lo = result.lo; self.mid = result.mid; self.hi = result.hi; self.flags = result.flags; } } impl<'a> RemAssign<&'a Decimal> for Decimal { fn rem_assign(&mut self, other: &'a Decimal) { Decimal::rem_assign(self, *other) } } impl<'a> RemAssign<Decimal> for &'a mut Decimal { fn rem_assign(&mut self, other: Decimal) { Decimal::rem_assign(*self, other) } } impl<'a> RemAssign<&'a Decimal> for &'a mut Decimal { fn rem_assign(&mut self, other: &'a Decimal) { Decimal::rem_assign(*self, *other) } } impl PartialEq for Decimal { #[inline] fn eq(&self, other: &Decimal) -> bool { self.cmp(other) == Equal } } impl Eq for Decimal {} impl Hash for Decimal { fn hash<H: Hasher>(&self, state: &mut H) { let n = self.normalize(); n.lo.hash(state); n.mid.hash(state); n.hi.hash(state); n.flags.hash(state); } } impl PartialOrd for Decimal { #[inline] fn partial_cmp(&self, other: &Decimal) -> Option<Ordering> { Some(self.cmp(other)) } } impl Ord for Decimal { fn cmp(&self, other: &Decimal) -> Ordering { // Quick exit if major differences if self.is_zero() && other.is_zero() { return Ordering::Equal; } let self_negative = self.is_sign_negative(); let other_negative = other.is_sign_negative(); if self_negative && !other_negative { return Ordering::Less; } else if !self_negative && other_negative { return Ordering::Greater; } // If we have 1.23 and 1.2345 then we have // 123 scale 2 and 12345 scale 4 // We need to convert the first to // 12300 scale 4 so we can compare equally let left: &Decimal; let right: &Decimal; if self_negative && other_negative { // Both are negative, so reverse cmp left = other; right = self; } else { left = self; right = other; } let mut left_scale = left.scale(); let mut right_scale = right.scale(); if left_scale == right_scale { // Fast path for same scale if left.hi != right.hi { return left.hi.cmp(&right.hi); } if left.mid != right.mid { return left.mid.cmp(&right.mid); } return left.lo.cmp(&right.lo); } // Rescale and compare let mut left_raw = [left.lo, left.mid, left.hi]; let mut right_raw = [right.lo, right.mid, right.hi]; rescale_to_maximum_scale(&mut left_raw, &mut left_scale, &mut right_raw, &mut right_scale); cmp_internal(&left_raw, &right_raw) } } impl Sum for Decimal { fn sum<I: Iterator<Item = Decimal>>(iter: I) -> Self { let mut sum = Decimal::zero(); for i in iter { sum += i; } sum } } impl<'a> Sum<&'a Decimal> for Decimal { fn sum<I: Iterator<Item = &'a Decimal>>(iter: I) -> Self { let mut sum = Decimal::zero(); for i in iter { sum += i; } sum } } #[cfg(test)] mod test { // Tests on private methods. // // All public tests should go under `tests/`. use super::*; #[test] fn it_can_rescale_to_maximum_scale() { fn extract(value: &str) -> ([u32; 3], u32) { let v = Decimal::from_str(value).unwrap(); ([v.lo, v.mid, v.hi], v.scale()) } let tests = &[ ("1", "1", "1", "1"), ("1", "1.0", "1.0", "1.0"), ("1", "1.00000", "1.00000", "1.00000"), ("1", "1.0000000000", "1.0000000000", "1.0000000000"), ( "1", "1.00000000000000000000", "1.00000000000000000000", "1.00000000000000000000", ), ("1.1", "1.1", "1.1", "1.1"), ("1.1", "1.10000", "1.10000", "1.10000"), ("1.1", "1.1000000000", "1.1000000000", "1.1000000000"), ( "1.1", "1.10000000000000000000", "1.10000000000000000000", "1.10000000000000000000", ), ( "0.6386554621848739495798319328", "11.815126050420168067226890757", "0.638655462184873949579831933", "11.815126050420168067226890757", ), ( "0.0872727272727272727272727272", // Scale 28 "843.65000000", // Scale 8 "0.0872727272727272727272727", // 25 "843.6500000000000000000000000", // 25 ), ]; for &(left_raw, right_raw, expected_left, expected_right) in tests { // Left = the value to rescale // Right = the new scale we're scaling to // Expected = the expected left value after rescale let (expected_left, expected_lscale) = extract(expected_left); let (expected_right, expected_rscale) = extract(expected_right); let (mut left, mut left_scale) = extract(left_raw); let (mut right, mut right_scale) = extract(right_raw); rescale_to_maximum_scale(&mut left, &mut left_scale, &mut right, &mut right_scale); assert_eq!(left, expected_left); assert_eq!(left_scale, expected_lscale); assert_eq!(right, expected_right); assert_eq!(right_scale, expected_rscale); // Also test the transitive case let (mut left, mut left_scale) = extract(left_raw); let (mut right, mut right_scale) = extract(right_raw); rescale_to_maximum_scale(&mut right, &mut right_scale, &mut left, &mut left_scale); assert_eq!(left, expected_left); assert_eq!(left_scale, expected_lscale); assert_eq!(right, expected_right); assert_eq!(right_scale, expected_rscale); } } #[test] fn it_can_rescale_internal() { fn extract(value: &str) -> ([u32; 3], u32) { let v = Decimal::from_str(value).unwrap(); ([v.lo, v.mid, v.hi], v.scale()) } let tests = &[ ("1", 0, "1"), ("1", 1, "1.0"), ("1", 5, "1.00000"), ("1", 10, "1.0000000000"), ("1", 20, "1.00000000000000000000"), ("0.6386554621848739495798319328", 27, "0.638655462184873949579831933"), ( "843.65000000", // Scale 8 25, // 25 "843.6500000000000000000000000", // 25 ), ( "843.65000000", // Scale 8 30, // 30 "843.6500000000000000000000000000", // 28 ), ]; for &(value_raw, new_scale, expected_value) in tests { let (expected_value, _) = extract(expected_value); let (mut value, mut value_scale) = extract(value_raw); rescale_internal(&mut value, &mut value_scale, new_scale); assert_eq!(value, expected_value); } } #[test] fn test_shl1_internal() { struct TestCase { // One thing to be cautious of is that the structure of a number here for shifting left is // the reverse of how you may conceive this mentally. i.e. a[2] contains the higher order // bits: a[2] a[1] a[0] given: [u32; 3], given_carry: u32, expected: [u32; 3], expected_carry: u32, } let tests = [ TestCase { given: [1, 0, 0], given_carry: 0, expected: [2, 0, 0], expected_carry: 0, }, TestCase { given: [1, 0, 2147483648], given_carry: 1, expected: [3, 0, 0], expected_carry: 1, }, ]; for case in &tests { let mut test = [case.given[0], case.given[1], case.given[2]]; let carry = shl1_internal(&mut test, case.given_carry); assert_eq!( test, case.expected, "Bits: {:?} << 1 | {}", case.given, case.given_carry ); assert_eq!( carry, case.expected_carry, "Carry: {:?} << 1 | {}", case.given, case.given_carry ) } } }
use std::marker::PhantomData; use std::thread::sleep; use std::time::Duration; // TODO replace by configurable value use super::FIRST_LINE_ADDRESS; use super::{DisplayControlBuilder, EntryModeBuilder}; // TODO make configurable // TODO add optional implementation using the busy flag static E_DELAY: u32 = 5; const LCD_WIDTH: usize = 16; bitflags! { struct Instructions: u8 { const CLEAR_DISPLAY = 0b00000001; const RETURN_HOME = 0b00000010; const SHIFT = 0b00010000; const SET_CGRAM = 0b01000000; const SET_DDRAM = 0b10000000; } } bitflags! { struct ShiftTarget: u8 { const CURSOR = 0b00000000; const DISPLAY = 0b00001000; } } bitflags! { struct ShiftDirection: u8 { const RIGHT = 0b00000100; const LEFT = 0b00000000; } } enum WriteMode { Command, Data, } enum ReadMode { Data, // TODO: use busy flag BusyFlag, } /// Enumeration of possible methods to shift a cursor or display. pub enum ShiftTo { /// Shifts to the right by the given offset. Right(u8), /// Shifts to the left by the given offset. Left(u8), } impl ShiftTo { fn as_offset_and_raw_direction(&self) -> (u8, ShiftDirection) { match *self { ShiftTo::Right(offset) => (offset, RIGHT), ShiftTo::Left(offset) => (offset, LEFT), } } } /// Enumeration of possible methods to seek within a `Display` object. pub enum SeekFrom<T: Into<u8>> { /// Sets the cursor position to `Home` plus the provided number of bytes. Home(u8), /// Sets the cursor to the current position plus the specified number of bytes. Current(u8), /// Sets the cursor position to the provides line plus the specified number of bytes. Line { line: T, bytes: u8 }, } /// Enumeration of possible data directions of a pin. pub enum Direction { In, Out, } /// The `DisplayHardwareLayer` trait is intended to be implemented by the library user as a thin /// wrapper around the hardware specific system calls. pub trait DisplayHardwareLayer { /// Initializes an I/O pin. fn init(&self) {} /// Cleanup an I/O pin. fn cleanup(&self) {} fn set_direction(&self, Direction); /// Sets a value on an I/O pin. // TODO need a way to let the user set up how levels are interpreted by the hardware fn set_value(&self, u8) -> Result<(), ()>; fn get_value(&self) -> u8; } pub struct DisplayPins { pub register_select: u64, pub read: u64, pub enable: u64, pub data4: u64, pub data5: u64, pub data6: u64, pub data7: u64, } /// A HD44780 compliant display. /// /// It provides a high-level and hardware agnostic interface to controll a HD44780 compliant /// liquid crystal display (LCD). pub struct Display<T, U> where T: From<u64> + DisplayHardwareLayer, U: Into<u8>, { register_select: T, read: T, enable: T, data4: T, data5: T, data6: T, data7: T, cursor_address: u8, _marker: PhantomData<U>, } impl<T, U> Display<T, U> where T: From<u64> + DisplayHardwareLayer, U: Into<u8>, { /// Makes a new `Display` from a numeric pins configuration, given via `DisplayPins`. pub fn from_pins(pins: DisplayPins) -> Display<T, U> { let lcd = Display { register_select: T::from(pins.register_select), enable: T::from(pins.enable), read: T::from(pins.read), data4: T::from(pins.data4), data5: T::from(pins.data5), data6: T::from(pins.data6), data7: T::from(pins.data7), cursor_address: 0, _marker: PhantomData, }; lcd.register_select.init(); lcd.read.init(); lcd.enable.init(); lcd.data4.init(); lcd.data5.init(); lcd.data6.init(); lcd.data7.init(); lcd.read.set_value(0).unwrap(); // Initializing by Instruction lcd.write_byte(0x33, WriteMode::Command); lcd.write_byte(0x32, WriteMode::Command); // FuctionSet: Data length 4bit + 2 lines lcd.write_byte(0x28, WriteMode::Command); // DisplayControl: Display on, Cursor off + cursor blinking off lcd.write_byte(0x0C, WriteMode::Command); // EntryModeSet: Cursor move direction inc + no display shift lcd.write_byte(0x06, WriteMode::Command); lcd.clear(); // ClearDisplay lcd } /// Sets the entry mode of the display using the builder given in the closure. pub fn set_entry_mode<F>(&self, f: F) where F: Fn(&mut EntryModeBuilder), { let mut builder = EntryModeBuilder::default(); f(&mut builder); self.write_byte(builder.build_command(), WriteMode::Command); } /// Sets the display control settings using the builder given in the closure. pub fn set_display_control<F>(&self, f: F) where F: Fn(&mut DisplayControlBuilder), { let mut builder = DisplayControlBuilder::default(); f(&mut builder); self.write_byte(builder.build_command(), WriteMode::Command); } /// Shifts the cursor to the left or the right by the given offset. /// /// **Note:** Consider to use [seek()](struct.Display.html#method.seek) for longer distances. pub fn shift_cursor(&mut self, direction: ShiftTo) { let (offset, raw_direction) = direction.as_offset_and_raw_direction(); match direction { ShiftTo::Right(offset) => self.cursor_address += offset, ShiftTo::Left(offset) => self.cursor_address -= offset, } self.raw_shift(CURSOR, offset, raw_direction); } /// Shifts the display to the right or the left by the given offset. /// /// Note that the first and second line will shift at the same time. /// /// When the displayed data is shifted repeatedly each line moves only horizontally. /// The second line display does not shift into the first line position. pub fn shift(&self, direction: ShiftTo) { let (offset, raw_direction) = direction.as_offset_and_raw_direction(); self.raw_shift(DISPLAY, offset, raw_direction); } fn raw_shift(&self, shift_type: ShiftTarget, offset: u8, raw_direction: ShiftDirection) { let mut cmd = SHIFT.bits(); cmd |= shift_type.bits(); cmd |= raw_direction.bits(); for _ in 0..offset { self.write_byte(cmd, WriteMode::Command); } } /// Clears the entire display, sets the cursor to the home position and undo all display /// shifts. /// /// It also sets the cursor's move direction to `Increment`. pub fn clear(&self) { self.write_byte(CLEAR_DISPLAY.bits(), WriteMode::Command); } /// Seeks to an offset in display data RAM. pub fn seek(&mut self, pos: SeekFrom<U>) { let mut cmd = SET_DDRAM.bits(); let (start, bytes) = match pos { SeekFrom::Home(bytes) => (FIRST_LINE_ADDRESS, bytes), SeekFrom::Current(bytes) => (self.cursor_address, bytes), SeekFrom::Line { line, bytes } => (line.into(), bytes), }; self.cursor_address = start + bytes; cmd |= self.cursor_address; self.write_byte(cmd, WriteMode::Command); } /// Seeks to an offset in display character generator RAM. pub fn seek_cgram(&mut self, pos: SeekFrom<U>) { let mut cmd = SET_CGRAM.bits(); let (start, bytes) = match pos { SeekFrom::Home(bytes) => (FIRST_LINE_ADDRESS, bytes), SeekFrom::Current(bytes) => (self.cursor_address, bytes), SeekFrom::Line { line, bytes } => (line.into(), bytes), }; self.cursor_address = start + bytes; cmd |= self.cursor_address; self.write_byte(cmd, WriteMode::Command); } fn write_byte(&self, value: u8, mode: WriteMode) { let wait_time = Duration::new(0, E_DELAY); self.read.set_value(0).unwrap(); self.data4.set_direction(Direction::Out); self.data5.set_direction(Direction::Out); self.data6.set_direction(Direction::Out); self.data7.set_direction(Direction::Out); match mode { WriteMode::Data => self.register_select.set_value(1), WriteMode::Command => self.register_select.set_value(0), }.unwrap(); self.data4.set_value(0).unwrap(); self.data5.set_value(0).unwrap(); self.data6.set_value(0).unwrap(); self.data7.set_value(0).unwrap(); if value & 0x10 == 0x10 { self.data4.set_value(1).unwrap(); } if value & 0x20 == 0x20 { self.data5.set_value(1).unwrap(); } if value & 0x40 == 0x40 { self.data6.set_value(1).unwrap(); } if value & 0x80 == 0x80 { self.data7.set_value(1).unwrap(); } sleep(wait_time); self.enable.set_value(1).unwrap(); sleep(wait_time); self.enable.set_value(0).unwrap(); sleep(wait_time); self.data4.set_value(0).unwrap(); self.data5.set_value(0).unwrap(); self.data6.set_value(0).unwrap(); self.data7.set_value(0).unwrap(); if value & 0x01 == 0x01 { self.data4.set_value(1).unwrap(); } if value & 0x02 == 0x02 { self.data5.set_value(1).unwrap(); } if value & 0x04 == 0x04 { self.data6.set_value(1).unwrap(); } if value & 0x08 == 0x08 { self.data7.set_value(1).unwrap(); } sleep(wait_time); self.enable.set_value(1).unwrap(); sleep(wait_time); self.enable.set_value(0).unwrap(); sleep(wait_time); } fn read_raw_byte(&self, mode: ReadMode) -> u8 { let mut result = 0u8; let wait_time = Duration::new(0, 10); self.data4.set_direction(Direction::In); self.data5.set_direction(Direction::In); self.data6.set_direction(Direction::In); self.data7.set_direction(Direction::In); match mode { ReadMode::Data => self.register_select.set_value(1), ReadMode::BusyFlag => self.register_select.set_value(0), }.unwrap(); self.read.set_value(1).unwrap(); sleep(Duration::new(0, 45)); self.enable.set_value(1).unwrap(); sleep(Duration::new(0, 165)); result |= self.data7.get_value() << 7; result |= self.data6.get_value() << 6; result |= self.data5.get_value() << 5; result |= self.data4.get_value() << 4; self.enable.set_value(0).unwrap(); sleep(wait_time); self.enable.set_value(1).unwrap(); sleep(Duration::new(0, 165)); result |= self.data7.get_value() << 3; result |= self.data6.get_value() << 2; result |= self.data5.get_value() << 1; result |= self.data4.get_value(); self.enable.set_value(0).unwrap(); sleep(wait_time); result } /// Reads a single byte from data RAM. pub fn read_byte(&mut self) -> u8 { self.cursor_address += 1; self.read_raw_byte(ReadMode::Data) } /// Reads busy flag and the cursor's current address. pub fn read_busy_flag(&self) -> (bool, u8) { let byte = self.read_raw_byte(ReadMode::BusyFlag); let busy_flag = (byte & 0b10000000) != 0; let address = byte & 0b01111111; (busy_flag, address) } /// Writes the given message to data or character generator RAM, depending on the previous /// seek operation. pub fn write_message(&mut self, msg: &str) { for c in msg.as_bytes().iter().take(LCD_WIDTH) { self.cursor_address += 1; self.write_byte(*c, WriteMode::Data); } } } impl<T, U> Drop for Display<T, U> where T: From<u64> + DisplayHardwareLayer, U: Into<u8>, { fn drop(&mut self) { self.register_select.cleanup(); self.enable.cleanup(); self.data4.cleanup(); self.data5.cleanup(); self.data6.cleanup(); self.data7.cleanup(); } } Refactor duplicate code use std::marker::PhantomData; use std::thread::sleep; use std::time::Duration; // TODO replace by configurable value use super::FIRST_LINE_ADDRESS; use super::{DisplayControlBuilder, EntryModeBuilder}; // TODO make configurable // TODO add optional implementation using the busy flag static E_DELAY: u32 = 5; const LCD_WIDTH: usize = 16; bitflags! { struct Instructions: u8 { const CLEAR_DISPLAY = 0b00000001; const RETURN_HOME = 0b00000010; const SHIFT = 0b00010000; } } bitflags! { struct ShiftTarget: u8 { const CURSOR = 0b00000000; const DISPLAY = 0b00001000; } } bitflags! { struct ShiftDirection: u8 { const RIGHT = 0b00000100; const LEFT = 0b00000000; } } enum WriteMode { Command, Data, } enum ReadMode { Data, // TODO: use busy flag BusyFlag, } enum RamType { DisplayData, CharacterGenerator, } impl From<RamType> for u8 { fn from(ram_type: RamType) -> Self { match ram_type { RamType::DisplayData => 0b10000000, RamType::CharacterGenerator => 0b01000000, } } } /// Enumeration of possible methods to shift a cursor or display. pub enum ShiftTo { /// Shifts to the right by the given offset. Right(u8), /// Shifts to the left by the given offset. Left(u8), } impl ShiftTo { fn as_offset_and_raw_direction(&self) -> (u8, ShiftDirection) { match *self { ShiftTo::Right(offset) => (offset, RIGHT), ShiftTo::Left(offset) => (offset, LEFT), } } } /// Enumeration of possible methods to seek within a `Display` object. pub enum SeekFrom<T: Into<u8>> { /// Sets the cursor position to `Home` plus the provided number of bytes. Home(u8), /// Sets the cursor to the current position plus the specified number of bytes. Current(u8), /// Sets the cursor position to the provides line plus the specified number of bytes. Line { line: T, bytes: u8 }, } /// Enumeration of possible data directions of a pin. pub enum Direction { In, Out, } /// The `DisplayHardwareLayer` trait is intended to be implemented by the library user as a thin /// wrapper around the hardware specific system calls. pub trait DisplayHardwareLayer { /// Initializes an I/O pin. fn init(&self) {} /// Cleanup an I/O pin. fn cleanup(&self) {} fn set_direction(&self, Direction); /// Sets a value on an I/O pin. // TODO need a way to let the user set up how levels are interpreted by the hardware fn set_value(&self, u8) -> Result<(), ()>; fn get_value(&self) -> u8; } pub struct DisplayPins { pub register_select: u64, pub read: u64, pub enable: u64, pub data4: u64, pub data5: u64, pub data6: u64, pub data7: u64, } /// A HD44780 compliant display. /// /// It provides a high-level and hardware agnostic interface to controll a HD44780 compliant /// liquid crystal display (LCD). pub struct Display<T, U> where T: From<u64> + DisplayHardwareLayer, U: Into<u8>, { register_select: T, read: T, enable: T, data4: T, data5: T, data6: T, data7: T, cursor_address: u8, _marker: PhantomData<U>, } impl<T, U> Display<T, U> where T: From<u64> + DisplayHardwareLayer, U: Into<u8>, { /// Makes a new `Display` from a numeric pins configuration, given via `DisplayPins`. pub fn from_pins(pins: DisplayPins) -> Display<T, U> { let lcd = Display { register_select: T::from(pins.register_select), enable: T::from(pins.enable), read: T::from(pins.read), data4: T::from(pins.data4), data5: T::from(pins.data5), data6: T::from(pins.data6), data7: T::from(pins.data7), cursor_address: 0, _marker: PhantomData, }; lcd.register_select.init(); lcd.read.init(); lcd.enable.init(); lcd.data4.init(); lcd.data5.init(); lcd.data6.init(); lcd.data7.init(); lcd.read.set_value(0).unwrap(); // Initializing by Instruction lcd.write_byte(0x33, WriteMode::Command); lcd.write_byte(0x32, WriteMode::Command); // FuctionSet: Data length 4bit + 2 lines lcd.write_byte(0x28, WriteMode::Command); // DisplayControl: Display on, Cursor off + cursor blinking off lcd.write_byte(0x0C, WriteMode::Command); // EntryModeSet: Cursor move direction inc + no display shift lcd.write_byte(0x06, WriteMode::Command); lcd.clear(); // ClearDisplay lcd } /// Sets the entry mode of the display using the builder given in the closure. pub fn set_entry_mode<F>(&self, f: F) where F: Fn(&mut EntryModeBuilder), { let mut builder = EntryModeBuilder::default(); f(&mut builder); self.write_byte(builder.build_command(), WriteMode::Command); } /// Sets the display control settings using the builder given in the closure. pub fn set_display_control<F>(&self, f: F) where F: Fn(&mut DisplayControlBuilder), { let mut builder = DisplayControlBuilder::default(); f(&mut builder); self.write_byte(builder.build_command(), WriteMode::Command); } /// Shifts the cursor to the left or the right by the given offset. /// /// **Note:** Consider to use [seek()](struct.Display.html#method.seek) for longer distances. pub fn shift_cursor(&mut self, direction: ShiftTo) { let (offset, raw_direction) = direction.as_offset_and_raw_direction(); match direction { ShiftTo::Right(offset) => self.cursor_address += offset, ShiftTo::Left(offset) => self.cursor_address -= offset, } self.raw_shift(CURSOR, offset, raw_direction); } /// Shifts the display to the right or the left by the given offset. /// /// Note that the first and second line will shift at the same time. /// /// When the displayed data is shifted repeatedly each line moves only horizontally. /// The second line display does not shift into the first line position. pub fn shift(&self, direction: ShiftTo) { let (offset, raw_direction) = direction.as_offset_and_raw_direction(); self.raw_shift(DISPLAY, offset, raw_direction); } fn raw_shift(&self, shift_type: ShiftTarget, offset: u8, raw_direction: ShiftDirection) { let mut cmd = SHIFT.bits(); cmd |= shift_type.bits(); cmd |= raw_direction.bits(); for _ in 0..offset { self.write_byte(cmd, WriteMode::Command); } } /// Clears the entire display, sets the cursor to the home position and undo all display /// shifts. /// /// It also sets the cursor's move direction to `Increment`. pub fn clear(&self) { self.write_byte(CLEAR_DISPLAY.bits(), WriteMode::Command); } fn generic_seek(&mut self, ram_type: RamType, pos: SeekFrom<U>) { let mut cmd = ram_type.into(); let (start, bytes) = match pos { SeekFrom::Home(bytes) => (FIRST_LINE_ADDRESS, bytes), SeekFrom::Current(bytes) => (self.cursor_address, bytes), SeekFrom::Line { line, bytes } => (line.into(), bytes), }; self.cursor_address = start + bytes; cmd |= self.cursor_address; self.write_byte(cmd, WriteMode::Command); } /// Seeks to an offset in display data RAM. pub fn seek(&mut self, pos: SeekFrom<U>) { self.generic_seek(RamType::DisplayData, pos); } /// Seeks to an offset in display character generator RAM. pub fn seek_cgram(&mut self, pos: SeekFrom<U>) { self.generic_seek(RamType::CharacterGenerator, pos); } fn write_byte(&self, value: u8, mode: WriteMode) { let wait_time = Duration::new(0, E_DELAY); self.read.set_value(0).unwrap(); self.data4.set_direction(Direction::Out); self.data5.set_direction(Direction::Out); self.data6.set_direction(Direction::Out); self.data7.set_direction(Direction::Out); match mode { WriteMode::Data => self.register_select.set_value(1), WriteMode::Command => self.register_select.set_value(0), }.unwrap(); self.data4.set_value(0).unwrap(); self.data5.set_value(0).unwrap(); self.data6.set_value(0).unwrap(); self.data7.set_value(0).unwrap(); if value & 0x10 == 0x10 { self.data4.set_value(1).unwrap(); } if value & 0x20 == 0x20 { self.data5.set_value(1).unwrap(); } if value & 0x40 == 0x40 { self.data6.set_value(1).unwrap(); } if value & 0x80 == 0x80 { self.data7.set_value(1).unwrap(); } sleep(wait_time); self.enable.set_value(1).unwrap(); sleep(wait_time); self.enable.set_value(0).unwrap(); sleep(wait_time); self.data4.set_value(0).unwrap(); self.data5.set_value(0).unwrap(); self.data6.set_value(0).unwrap(); self.data7.set_value(0).unwrap(); if value & 0x01 == 0x01 { self.data4.set_value(1).unwrap(); } if value & 0x02 == 0x02 { self.data5.set_value(1).unwrap(); } if value & 0x04 == 0x04 { self.data6.set_value(1).unwrap(); } if value & 0x08 == 0x08 { self.data7.set_value(1).unwrap(); } sleep(wait_time); self.enable.set_value(1).unwrap(); sleep(wait_time); self.enable.set_value(0).unwrap(); sleep(wait_time); } fn read_raw_byte(&self, mode: ReadMode) -> u8 { let mut result = 0u8; let wait_time = Duration::new(0, 10); self.data4.set_direction(Direction::In); self.data5.set_direction(Direction::In); self.data6.set_direction(Direction::In); self.data7.set_direction(Direction::In); match mode { ReadMode::Data => self.register_select.set_value(1), ReadMode::BusyFlag => self.register_select.set_value(0), }.unwrap(); self.read.set_value(1).unwrap(); sleep(Duration::new(0, 45)); self.enable.set_value(1).unwrap(); sleep(Duration::new(0, 165)); result |= self.data7.get_value() << 7; result |= self.data6.get_value() << 6; result |= self.data5.get_value() << 5; result |= self.data4.get_value() << 4; self.enable.set_value(0).unwrap(); sleep(wait_time); self.enable.set_value(1).unwrap(); sleep(Duration::new(0, 165)); result |= self.data7.get_value() << 3; result |= self.data6.get_value() << 2; result |= self.data5.get_value() << 1; result |= self.data4.get_value(); self.enable.set_value(0).unwrap(); sleep(wait_time); result } /// Reads a single byte from data RAM. pub fn read_byte(&mut self) -> u8 { self.cursor_address += 1; self.read_raw_byte(ReadMode::Data) } /// Reads busy flag and the cursor's current address. pub fn read_busy_flag(&self) -> (bool, u8) { let byte = self.read_raw_byte(ReadMode::BusyFlag); let busy_flag = (byte & 0b10000000) != 0; let address = byte & 0b01111111; (busy_flag, address) } /// Writes the given message to data or character generator RAM, depending on the previous /// seek operation. pub fn write_message(&mut self, msg: &str) { for c in msg.as_bytes().iter().take(LCD_WIDTH) { self.cursor_address += 1; self.write_byte(*c, WriteMode::Data); } } } impl<T, U> Drop for Display<T, U> where T: From<u64> + DisplayHardwareLayer, U: Into<u8>, { fn drop(&mut self) { self.register_select.cleanup(); self.enable.cleanup(); self.data4.cleanup(); self.data5.cleanup(); self.data6.cleanup(); self.data7.cleanup(); } }
/* # Minimal gif encoder ``` use std::fs::File; // Get pixel data from some source let mut pixels = unimplemented!(); // Create frame from data let frame = Frame::from_rgb(100, 100, &mut *pixels); // Create encoder let encoder = gif::Encoder::new(File::create("some.gif").unwrap(), frame.width, frame.height); // Write header to file let mut encoder = encoder.write_global_palette(&[]).unwrap(); // Write frame to file encoder.write_frame(&frame).unwrap(); ``` */ use std::cmp::min; use std::io; use std::io::prelude::*; use std::slice::bytes; use lzw; use traits::WriteBytesExt; use common::{Block, Frame, Extension, DisposalMethod}; pub enum ExtensionData { Control { flags: u8, delay: u16, trns: u8 } } impl ExtensionData { pub fn new_control_ext(delay: u16, dispose: DisposalMethod, needs_user_input: bool, trns: Option<u8>) -> ExtensionData { let mut flags = 0; let trns = match trns { Some(trns) => { flags |= 1; trns as u8 }, None => 0 }; flags |= (needs_user_input as u8) << 1; flags |= (dispose as u8) << 2; ExtensionData::Control { flags: flags, delay: delay, trns: trns } } } struct BlockWriter<'a, W: Write + 'a> { w: &'a mut W, bytes: usize, buf: [u8; 0xFF] } impl<'a, W: Write + 'a> BlockWriter<'a, W> { fn new(w: &'a mut W) -> BlockWriter<'a, W> { BlockWriter { w: w, bytes: 0, buf: [0; 0xFF] } } } impl<'a, W: Write + 'a> Write for BlockWriter<'a, W> { fn write(&mut self, buf: &[u8]) -> io::Result<usize> { let to_copy = min(buf.len(), 0xFF - self.bytes); bytes::copy_memory(&buf[..to_copy], &mut self.buf[self.bytes..]); self.bytes += to_copy; if self.bytes == 0xFF { self.bytes = 0; try!(self.w.write_le(0xFFu8)); try!(self.w.write_all(&self.buf)); } Ok(to_copy) } fn flush(&mut self) -> io::Result<()> { return Err(io::Error::new( io::ErrorKind::Other, "Cannot flush a BlockWriter, use `drop` instead." )) } } impl<'a, W: Write + 'a> Drop for BlockWriter<'a, W> { #[cfg(feature = "raii_no_panic")] fn drop(&mut self) { if self.bytes > 0 { let _ = self.w.write_le(self.bytes as u8); let _ = self.w.write_all(&self.buf[..self.bytes]); } } #[cfg(not(feature = "raii_no_panic"))] fn drop(&mut self) { if self.bytes > 0 { self.w.write_le(self.bytes as u8).unwrap(); self.w.write_all(&self.buf[..self.bytes]).unwrap(); } } } pub struct Encoder<'a, W: Write + 'a> { w: &'a mut W, global_palette: bool, width: u16, height: u16 } pub struct HeaderWritten<'a, W: Write + 'a> { enc: Encoder<'a, W> } impl<'a, W: Write + 'a> HeaderWritten<'a, W> { /// Writes a complete frame to the image /// /// Note: This function also writes a control extention if necessary. pub fn write_frame(&mut self, frame: &Frame) -> io::Result<()> { self.enc.write_frame(frame) } /// Writes an extension to the image pub fn write_extension(&mut self, extension: ExtensionData) -> io::Result<()> { self.enc.write_extension(extension) } /// Writes a raw extension to the image pub fn write_raw_extension(&mut self, func: u8, data: &[u8]) -> io::Result<()> { self.enc.write_raw_extension(func, data) } } impl<'a, W: Write + 'a> Drop for HeaderWritten<'a, W> { #[cfg(feature = "raii_no_panic")] fn drop(&mut self) { let _ = self.enc.w.write_le(Block::Trailer as u8); } #[cfg(not(feature = "raii_no_panic"))] fn drop(&mut self) { self.enc.w.write_le(Block::Trailer as u8).unwrap() } } /* pub struct Frame { pub delay: u16, pub dispose: DisposalMethod, pub transparent: Option<usize>, pub needs_user_input: bool, pub top: u16, pub left: u16, pub width: u16, pub height: u16, pub interlaced: bool, pub palette: Option<Vec<u8>>, pub buffer: Vec<u8> } */ impl<'a, W: Write + 'a> Encoder<'a, W> { pub fn new(w: &'a mut W, width: u16, height: u16) -> Self { Encoder { w: w, global_palette: false, width: width, height: height } } /// Writes the global color palette pub fn write_global_palette(mut self, palette: &[u8]) -> io::Result<HeaderWritten<'a, W>> { self.global_palette = true; let mut flags = 0; flags |= 0b1000_0000; let num_colors = palette.len() / 3; flags |= flag_size(num_colors); flags |= flag_size(num_colors) << 4; // wtf flag try!(self.write_screen_desc(flags)); try!(self.write_color_table(palette)); Ok(HeaderWritten { enc: self }) } /// Writes a complete frame to the image /// /// Note: This function also writes a control extension if necessary. fn write_frame(&mut self, frame: &Frame) -> io::Result<()> { // TODO commented off to pass test in lib.rs //if frame.delay > 0 || frame.transparent.is_some() { try!(self.write_extension(ExtensionData::new_control_ext( frame.delay, frame.dispose, frame.needs_user_input, frame.transparent ))); //} try!(self.w.write_le(Block::Image as u8)); try!(self.w.write_le(frame.left)); try!(self.w.write_le(frame.top)); try!(self.w.write_le(frame.width)); try!(self.w.write_le(frame.height)); let mut flags = 0; try!(match frame.palette { Some(ref palette) => { flags |= 0b1000_0000; let num_colors = palette.len() / 3; flags |= flag_size(num_colors); try!(self.w.write_le(flags)); self.write_color_table(palette) }, None => if !self.global_palette { return Err(io::Error::new( io::ErrorKind::InvalidInput, "The GIF format requires a color palette but none was given." )) } else { self.w.write_le(flags) } }); self.write_image_block(&frame.buffer) } fn write_image_block(&mut self, data: &[u8]) -> io::Result<()> { { let min_code_size: u8 = flag_size((*data.iter().max().unwrap_or(&0) as usize + 1)) + 1; try!(self.w.write_le(min_code_size)); let mut bw = BlockWriter::new(&mut self.w); let mut enc = try!(lzw::Encoder::new(lzw::LsbWriter::new(&mut bw), min_code_size)); try!(enc.encode_bytes(data)); } self.w.write_le(0u8) } fn write_color_table(&mut self, table: &[u8]) -> io::Result<()> { let num_colors = table.len() / 3; let size = flag_size(num_colors); try!(self.w.write_all(&table[..num_colors * 3])); // Waste some space as of gif spec for _ in 0..((2 << size) - num_colors) { try!(self.w.write_all(&[0, 0, 0])) } Ok(()) } /// Writes an extension to the image fn write_extension(&mut self, extension: ExtensionData) -> io::Result<()> { use self::ExtensionData::*; try!(self.w.write_le(Block::Extension as u8)); match extension { Control { flags, delay, trns } => { try!(self.w.write_le(Extension::Control as u8)); try!(self.w.write_le(4u8)); try!(self.w.write_le(flags)); try!(self.w.write_le(delay)); try!(self.w.write_le(trns)); } } self.w.write_le(0u8) } /// Writes a raw extension to the image fn write_raw_extension(&mut self, func: u8, data: &[u8]) -> io::Result<()> { try!(self.w.write_le(Block::Extension as u8)); try!(self.w.write_le(func as u8)); for chunk in data.chunks(0xFF) { try!(self.w.write_le(chunk.len() as u8)); try!(self.w.write_all(chunk)); } self.w.write_le(0u8) } /// Writes the logical screen desriptor fn write_screen_desc(&mut self, flags: u8) -> io::Result<()> { try!(self.w.write_all(b"GIF89a")); try!(self.w.write_le(self.width)); try!(self.w.write_le(self.height)); try!(self.w.write_le(flags)); // packed field try!(self.w.write_le(0u8)); // bg index self.w.write_le(0u8) // aspect ratio } } // Color table size converted to flag bits fn flag_size(size: usize) -> u8 { match size { 0 ...2 => 0, 3 ...4 => 1, 5 ...8 => 2, 7 ...16 => 3, 17 ...32 => 4, 33 ...64 => 5, 65 ...128 => 6, 129...256 => 7, _ => 7 } } take writer by value /* # Minimal gif encoder ``` use std::fs::File; // Get pixel data from some source let mut pixels = unimplemented!(); // Create frame from data let frame = Frame::from_rgb(100, 100, &mut *pixels); // Create encoder let encoder = gif::Encoder::new(File::create("some.gif").unwrap(), frame.width, frame.height); // Write header to file let mut encoder = encoder.write_global_palette(&[]).unwrap(); // Write frame to file encoder.write_frame(&frame).unwrap(); ``` */ use std::cmp::min; use std::io; use std::io::prelude::*; use std::slice::bytes; use lzw; use traits::WriteBytesExt; use common::{Block, Frame, Extension, DisposalMethod}; pub enum ExtensionData { Control { flags: u8, delay: u16, trns: u8 } } impl ExtensionData { pub fn new_control_ext(delay: u16, dispose: DisposalMethod, needs_user_input: bool, trns: Option<u8>) -> ExtensionData { let mut flags = 0; let trns = match trns { Some(trns) => { flags |= 1; trns as u8 }, None => 0 }; flags |= (needs_user_input as u8) << 1; flags |= (dispose as u8) << 2; ExtensionData::Control { flags: flags, delay: delay, trns: trns } } } struct BlockWriter<'a, W: Write + 'a> { w: &'a mut W, bytes: usize, buf: [u8; 0xFF] } impl<'a, W: Write + 'a> BlockWriter<'a, W> { fn new(w: &'a mut W) -> BlockWriter<'a, W> { BlockWriter { w: w, bytes: 0, buf: [0; 0xFF] } } } impl<'a, W: Write + 'a> Write for BlockWriter<'a, W> { fn write(&mut self, buf: &[u8]) -> io::Result<usize> { let to_copy = min(buf.len(), 0xFF - self.bytes); bytes::copy_memory(&buf[..to_copy], &mut self.buf[self.bytes..]); self.bytes += to_copy; if self.bytes == 0xFF { self.bytes = 0; try!(self.w.write_le(0xFFu8)); try!(self.w.write_all(&self.buf)); } Ok(to_copy) } fn flush(&mut self) -> io::Result<()> { return Err(io::Error::new( io::ErrorKind::Other, "Cannot flush a BlockWriter, use `drop` instead." )) } } impl<'a, W: Write + 'a> Drop for BlockWriter<'a, W> { #[cfg(feature = "raii_no_panic")] fn drop(&mut self) { if self.bytes > 0 { let _ = self.w.write_le(self.bytes as u8); let _ = self.w.write_all(&self.buf[..self.bytes]); } } #[cfg(not(feature = "raii_no_panic"))] fn drop(&mut self) { if self.bytes > 0 { self.w.write_le(self.bytes as u8).unwrap(); self.w.write_all(&self.buf[..self.bytes]).unwrap(); } } } /// Wrapper for `Encoder` that indicates that the file headers have been written. pub struct HeaderWritten<W: Write> { enc: Encoder<W> } impl<W: Write> HeaderWritten<W> { /// Writes a complete frame to the image /// /// Note: This function also writes a control extention if necessary. pub fn write_frame(&mut self, frame: &Frame) -> io::Result<()> { self.enc.write_frame(frame) } /// Writes an extension to the image pub fn write_extension(&mut self, extension: ExtensionData) -> io::Result<()> { self.enc.write_extension(extension) } /// Writes a raw extension to the image pub fn write_raw_extension(&mut self, func: u8, data: &[u8]) -> io::Result<()> { self.enc.write_raw_extension(func, data) } } impl<W: Write> Drop for HeaderWritten<W> { #[cfg(feature = "raii_no_panic")] fn drop(&mut self) { let _ = self.enc.w.write_le(Block::Trailer as u8); } #[cfg(not(feature = "raii_no_panic"))] fn drop(&mut self) { self.enc.w.write_le(Block::Trailer as u8).unwrap() } } /* pub struct Frame { pub delay: u16, pub dispose: DisposalMethod, pub transparent: Option<usize>, pub needs_user_input: bool, pub top: u16, pub left: u16, pub width: u16, pub height: u16, pub interlaced: bool, pub palette: Option<Vec<u8>>, pub buffer: Vec<u8> } */ /// Minimal GIF encoder. pub struct Encoder<W: Write> { w: W, global_palette: bool, width: u16, height: u16 } impl<W: Write> Encoder<W> { /// Creates a new encoder. pub fn new(w: W, width: u16, height: u16) -> Self { Encoder { w: w, global_palette: false, width: width, height: height } } /// Writes the global color palette pub fn write_global_palette(mut self, palette: &[u8]) -> io::Result<HeaderWritten<W>> { self.global_palette = true; let mut flags = 0; flags |= 0b1000_0000; let num_colors = palette.len() / 3; flags |= flag_size(num_colors); flags |= flag_size(num_colors) << 4; // wtf flag try!(self.write_screen_desc(flags)); try!(self.write_color_table(palette)); Ok(HeaderWritten { enc: self }) } /// Writes a complete frame to the image /// /// Note: This function also writes a control extension if necessary. fn write_frame(&mut self, frame: &Frame) -> io::Result<()> { // TODO commented off to pass test in lib.rs //if frame.delay > 0 || frame.transparent.is_some() { try!(self.write_extension(ExtensionData::new_control_ext( frame.delay, frame.dispose, frame.needs_user_input, frame.transparent ))); //} try!(self.w.write_le(Block::Image as u8)); try!(self.w.write_le(frame.left)); try!(self.w.write_le(frame.top)); try!(self.w.write_le(frame.width)); try!(self.w.write_le(frame.height)); let mut flags = 0; try!(match frame.palette { Some(ref palette) => { flags |= 0b1000_0000; let num_colors = palette.len() / 3; flags |= flag_size(num_colors); try!(self.w.write_le(flags)); self.write_color_table(palette) }, None => if !self.global_palette { return Err(io::Error::new( io::ErrorKind::InvalidInput, "The GIF format requires a color palette but none was given." )) } else { self.w.write_le(flags) } }); self.write_image_block(&frame.buffer) } fn write_image_block(&mut self, data: &[u8]) -> io::Result<()> { { let min_code_size: u8 = flag_size((*data.iter().max().unwrap_or(&0) as usize + 1)) + 1; try!(self.w.write_le(min_code_size)); let mut bw = BlockWriter::new(&mut self.w); let mut enc = try!(lzw::Encoder::new(lzw::LsbWriter::new(&mut bw), min_code_size)); try!(enc.encode_bytes(data)); } self.w.write_le(0u8) } fn write_color_table(&mut self, table: &[u8]) -> io::Result<()> { let num_colors = table.len() / 3; let size = flag_size(num_colors); try!(self.w.write_all(&table[..num_colors * 3])); // Waste some space as of gif spec for _ in 0..((2 << size) - num_colors) { try!(self.w.write_all(&[0, 0, 0])) } Ok(()) } /// Writes an extension to the image fn write_extension(&mut self, extension: ExtensionData) -> io::Result<()> { use self::ExtensionData::*; try!(self.w.write_le(Block::Extension as u8)); match extension { Control { flags, delay, trns } => { try!(self.w.write_le(Extension::Control as u8)); try!(self.w.write_le(4u8)); try!(self.w.write_le(flags)); try!(self.w.write_le(delay)); try!(self.w.write_le(trns)); } } self.w.write_le(0u8) } /// Writes a raw extension to the image fn write_raw_extension(&mut self, func: u8, data: &[u8]) -> io::Result<()> { try!(self.w.write_le(Block::Extension as u8)); try!(self.w.write_le(func as u8)); for chunk in data.chunks(0xFF) { try!(self.w.write_le(chunk.len() as u8)); try!(self.w.write_all(chunk)); } self.w.write_le(0u8) } /// Writes the logical screen desriptor fn write_screen_desc(&mut self, flags: u8) -> io::Result<()> { try!(self.w.write_all(b"GIF89a")); try!(self.w.write_le(self.width)); try!(self.w.write_le(self.height)); try!(self.w.write_le(flags)); // packed field try!(self.w.write_le(0u8)); // bg index self.w.write_le(0u8) // aspect ratio } } // Color table size converted to flag bits fn flag_size(size: usize) -> u8 { match size { 0 ...2 => 0, 3 ...4 => 1, 5 ...8 => 2, 7 ...16 => 3, 17 ...32 => 4, 33 ...64 => 5, 65 ...128 => 6, 129...256 => 7, _ => 7 } }
// Copyright (c) 2018, The rav1e contributors. All rights reserved // // This source code is subject to the terms of the BSD 2 Clause License and // the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License // was not distributed with this source code in the LICENSE file, you can // obtain it at www.aomedia.org/license/software. If the Alliance for Open // Media Patent License 1.0 was not distributed with this source code in the // PATENTS file, you can obtain it at www.aomedia.org/license/patent. use crate::api::*; use crate::cdef::*; use crate::context::*; use crate::deblock::*; use crate::ec::*; use crate::lrf::*; use crate::mc::*; use crate::me::*; use crate::partition::*; use crate::plane::*; use crate::quantize::*; use crate::rate::QuantizerParameters; use crate::rate::FRAME_SUBTYPE_I; use crate::rate::FRAME_SUBTYPE_P; use crate::rdo::*; use crate::segmentation::*; use crate::tiling::*; use crate::transform::*; use crate::util::*; use crate::partition::PartitionType::*; use crate::partition::RefType::*; use crate::header::*; use arg_enum_proc_macro::ArgEnum; use bitstream_io::{BitWriter, BigEndian}; use bincode::{serialize, deserialize}; use std; use std::{fmt, io, mem}; use std::io::Write; use std::io::Read; use std::sync::Arc; use std::fs::File; #[derive(Debug, Clone)] pub struct Frame<T: Pixel> { pub planes: [Plane<T>; 3] } pub static TEMPORAL_DELIMITER: [u8; 2] = [0x12, 0x00]; const FRAME_MARGIN: usize = 16 + SUBPEL_FILTER_SIZE; impl<T: Pixel> Frame<T> { pub fn new(width: usize, height: usize, chroma_sampling: ChromaSampling) -> Self { let luma_width = width.align_power_of_two(3); let luma_height = height.align_power_of_two(3); let luma_padding = MAX_SB_SIZE + FRAME_MARGIN; let (chroma_sampling_period_x, chroma_sampling_period_y) = chroma_sampling.sampling_period(); let chroma_width = luma_width / chroma_sampling_period_x; let chroma_height = luma_height / chroma_sampling_period_y; let chroma_padding_x = luma_padding / chroma_sampling_period_x; let chroma_padding_y = luma_padding / chroma_sampling_period_y; let chroma_decimation_x = chroma_sampling_period_x - 1; let chroma_decimation_y = chroma_sampling_period_y - 1; Frame { planes: [ Plane::new( luma_width, luma_height, 0, 0, luma_padding, luma_padding ), Plane::new( chroma_width, chroma_height, chroma_decimation_x, chroma_decimation_y, chroma_padding_x, chroma_padding_y ), Plane::new( chroma_width, chroma_height, chroma_decimation_x, chroma_decimation_y, chroma_padding_x, chroma_padding_y ) ] } } pub fn pad(&mut self, w: usize, h: usize) { for p in self.planes.iter_mut() { p.pad(w, h); } } #[inline(always)] pub fn as_tile(&self) -> Tile<'_, T> { let PlaneConfig { width, height, .. } = self.planes[0].cfg; Tile::new(self, TileRect { x: 0, y: 0, width, height }) } #[inline(always)] pub fn as_tile_mut(&mut self) -> TileMut<'_, T> { let PlaneConfig { width, height, .. } = self.planes[0].cfg; TileMut::new(self, TileRect { x: 0, y: 0, width, height }) } /// Returns a `PixelIter` containing the data of this frame's planes in YUV format. /// Each point in the `PixelIter` is a triple consisting of a Y, U, and V component. /// The `PixelIter` is laid out as contiguous rows, e.g. to get a given 0-indexed row /// you could use `data.skip(width * row_idx).take(width)`. /// /// This data retains any padding, e.g. it uses the width and height specifed in /// the Y-plane's `cfg` struct, and not the display width and height specied in /// `FrameInvariants`. pub fn iter(&self) -> PixelIter<'_, T> { PixelIter::new(&self.planes) } } #[derive(Debug)] pub struct PixelIter<'a, T: Pixel> { planes: &'a [Plane<T>; 3], y: usize, x: usize, } impl<'a, T: Pixel> PixelIter<'a, T> { pub fn new(planes: &'a [Plane<T>; 3]) -> Self { PixelIter { planes, y: 0, x: 0, } } fn width(&self) -> usize { self.planes[0].cfg.width } fn height(&self) -> usize { self.planes[0].cfg.height } } impl<'a, T: Pixel> Iterator for PixelIter<'a, T> { type Item = (T, T, T); fn next(&mut self) -> Option<<Self as Iterator>::Item> { if self.y == self.height() - 1 && self.x == self.width() - 1 { return None; } let pixel = ( self.planes[0].p(self.x, self.y), self.planes[1].p(self.x >> self.planes[1].cfg.xdec, self.y >> self.planes[1].cfg.ydec), self.planes[2].p(self.x >> self.planes[2].cfg.xdec, self.y >> self.planes[2].cfg.ydec), ); if self.x == self.width() - 1 { self.x = 0; self.y += 1; } else { self.x += 1; } Some(pixel) } } #[derive(Debug, Clone)] pub struct ReferenceFrame<T: Pixel> { pub order_hint: u32, pub frame: Frame<T>, pub input_hres: Plane<T>, pub input_qres: Plane<T>, pub cdfs: CDFContext, pub frame_mvs: Vec<FrameMotionVectors>, } #[derive(Debug, Clone, Default)] pub struct ReferenceFramesSet<T: Pixel> { pub frames: [Option<Arc<ReferenceFrame<T>>>; (REF_FRAMES as usize)], pub deblock: [DeblockState; (REF_FRAMES as usize)] } impl<T: Pixel> ReferenceFramesSet<T> { pub fn new() -> Self { Self { frames: Default::default(), deblock: Default::default() } } } const MAX_NUM_TEMPORAL_LAYERS: usize = 8; const MAX_NUM_SPATIAL_LAYERS: usize = 4; const MAX_NUM_OPERATING_POINTS: usize = MAX_NUM_TEMPORAL_LAYERS * MAX_NUM_SPATIAL_LAYERS; #[derive(ArgEnum, Copy, Clone, Debug, PartialEq)] #[repr(C)] pub enum Tune { Psnr, Psychovisual } impl Default for Tune { fn default() -> Self { Tune::Psychovisual } } #[derive(Copy, Clone, Debug)] pub struct Sequence { // OBU Sequence header of AV1 pub profile: u8, pub num_bits_width: u32, pub num_bits_height: u32, pub bit_depth: usize, pub chroma_sampling: ChromaSampling, pub chroma_sample_position: ChromaSamplePosition, pub pixel_range: PixelRange, pub color_description: Option<ColorDescription>, pub mastering_display: Option<MasteringDisplay>, pub content_light: Option<ContentLight>, pub max_frame_width: u32, pub max_frame_height: u32, pub frame_id_numbers_present_flag: bool, pub frame_id_length: u32, pub delta_frame_id_length: u32, pub use_128x128_superblock: bool, pub order_hint_bits_minus_1: u32, pub force_screen_content_tools: u32, // 0 - force off // 1 - force on // 2 - adaptive pub force_integer_mv: u32, // 0 - Not to force. MV can be in 1/4 or 1/8 // 1 - force to integer // 2 - adaptive pub still_picture: bool, // Video is a single frame still picture pub reduced_still_picture_hdr: bool, // Use reduced header for still picture pub enable_intra_edge_filter: bool, // enables/disables corner/edge/upsampling pub enable_interintra_compound: bool, // enables/disables interintra_compound pub enable_masked_compound: bool, // enables/disables masked compound pub enable_dual_filter: bool, // 0 - disable dual interpolation filter // 1 - enable vert/horiz filter selection pub enable_order_hint: bool, // 0 - disable order hint, and related tools // jnt_comp, ref_frame_mvs, frame_sign_bias // if 0, enable_jnt_comp and // enable_ref_frame_mvs must be set zs 0. pub enable_jnt_comp: bool, // 0 - disable joint compound modes // 1 - enable it pub enable_ref_frame_mvs: bool, // 0 - disable ref frame mvs // 1 - enable it pub enable_warped_motion: bool, // 0 - disable warped motion for sequence // 1 - enable it for the sequence pub enable_superres: bool,// 0 - Disable superres for the sequence, and disable // transmitting per-frame superres enabled flag. // 1 - Enable superres for the sequence, and also // enable per-frame flag to denote if superres is // enabled for that frame. pub enable_cdef: bool, // To turn on/off CDEF pub enable_restoration: bool, // To turn on/off loop restoration pub operating_points_cnt_minus_1: usize, pub operating_point_idc: [u16; MAX_NUM_OPERATING_POINTS], pub display_model_info_present_flag: bool, pub decoder_model_info_present_flag: bool, pub level: [[usize; 2]; MAX_NUM_OPERATING_POINTS], // minor, major pub tier: [usize; MAX_NUM_OPERATING_POINTS], // seq_tier in the spec. One bit: 0 // or 1. pub film_grain_params_present: bool, pub separate_uv_delta_q: bool, } impl Sequence { pub fn new(config: &EncoderConfig) -> Sequence { let width_bits = 32 - (config.width as u32).leading_zeros(); let height_bits = 32 - (config.height as u32).leading_zeros(); assert!(width_bits <= 16); assert!(height_bits <= 16); let profile = if config.bit_depth == 12 || config.chroma_sampling == ChromaSampling::Cs422 { 2 } else if config.chroma_sampling == ChromaSampling::Cs444 { 1 } else { 0 }; let mut operating_point_idc = [0 as u16; MAX_NUM_OPERATING_POINTS]; let mut level = [[1, 2 as usize]; MAX_NUM_OPERATING_POINTS]; let mut tier = [0 as usize; MAX_NUM_OPERATING_POINTS]; for i in 0..MAX_NUM_OPERATING_POINTS { operating_point_idc[i] = 0; level[i][0] = 1; // minor level[i][1] = 2; // major tier[i] = 0; } Sequence { profile, num_bits_width: width_bits, num_bits_height: height_bits, bit_depth: config.bit_depth, chroma_sampling: config.chroma_sampling, chroma_sample_position: config.chroma_sample_position, pixel_range: config.pixel_range, color_description: config.color_description, mastering_display: config.mastering_display, content_light: config.content_light, max_frame_width: config.width as u32, max_frame_height: config.height as u32, frame_id_numbers_present_flag: false, frame_id_length: 0, delta_frame_id_length: 0, use_128x128_superblock: false, order_hint_bits_minus_1: 5, force_screen_content_tools: 0, force_integer_mv: 2, still_picture: false, reduced_still_picture_hdr: false, enable_intra_edge_filter: false, enable_interintra_compound: false, enable_masked_compound: false, enable_dual_filter: false, enable_order_hint: true, enable_jnt_comp: false, enable_ref_frame_mvs: false, enable_warped_motion: false, enable_superres: false, enable_cdef: config.speed_settings.cdef, enable_restoration: config.chroma_sampling != ChromaSampling::Cs422 && config.chroma_sampling != ChromaSampling::Cs444, // FIXME: not working yet operating_points_cnt_minus_1: 0, operating_point_idc, display_model_info_present_flag: false, decoder_model_info_present_flag: false, level, tier, film_grain_params_present: false, separate_uv_delta_q: true, } } pub fn get_relative_dist(&self, a: u32, b: u32) -> i32 { let diff = a as i32 - b as i32; let m = 1 << self.order_hint_bits_minus_1; (diff & (m - 1)) - (diff & m) } pub fn get_skip_mode_allowed<T: Pixel>(&self, fi: &FrameInvariants<T>, reference_select: bool) -> bool { if fi.intra_only || !reference_select || !self.enable_order_hint { return false; } let mut forward_idx: isize = -1; let mut backward_idx: isize = -1; let mut forward_hint = 0; let mut backward_hint = 0; for i in 0..INTER_REFS_PER_FRAME { if let Some(ref rec) = fi.rec_buffer.frames[fi.ref_frames[i] as usize] { let ref_hint = rec.order_hint; if self.get_relative_dist(ref_hint, fi.order_hint) < 0 { if forward_idx < 0 || self.get_relative_dist(ref_hint, forward_hint) > 0 { forward_idx = i as isize; forward_hint = ref_hint; } } else if self.get_relative_dist(ref_hint, fi.order_hint) > 0 && (backward_idx < 0 || self.get_relative_dist(ref_hint, backward_hint) > 0) { backward_idx = i as isize; backward_hint = ref_hint; } } } if forward_idx < 0 { false } else if backward_idx >= 0 { // set skip_mode_frame true } else { let mut second_forward_idx: isize = -1; let mut second_forward_hint = 0; for i in 0..INTER_REFS_PER_FRAME { if let Some(ref rec) = fi.rec_buffer.frames[fi.ref_frames[i] as usize] { let ref_hint = rec.order_hint; if self.get_relative_dist(ref_hint, forward_hint) < 0 && (second_forward_idx < 0 || self.get_relative_dist(ref_hint, second_forward_hint) > 0) { second_forward_idx = i as isize; second_forward_hint = ref_hint; } } } // TODO: Set skip_mode_frame, when second_forward_idx is not less than 0. second_forward_idx >= 0 } } #[inline(always)] pub fn sb_size_log2(&self) -> usize { if self.use_128x128_superblock { 7 } else { 6 } } #[inline(always)] pub fn sb_size(&self) -> usize { 1 << self.sb_size_log2() } } #[derive(Debug)] pub struct FrameState<T: Pixel> { pub sb_size_log2: usize, pub input: Arc<Frame<T>>, pub input_hres: Plane<T>, // half-resolution version of input luma pub input_qres: Plane<T>, // quarter-resolution version of input luma pub rec: Frame<T>, pub cdfs: CDFContext, pub deblock: DeblockState, pub segmentation: SegmentationState, pub restoration: RestorationState, pub frame_mvs: Vec<FrameMotionVectors>, pub t: RDOTracker, } impl<T: Pixel> FrameState<T> { pub fn new(fi: &FrameInvariants<T>) -> Self { // TODO(negge): Use fi.cfg.chroma_sampling when we store VideoDetails in FrameInvariants FrameState::new_with_frame(fi, Arc::new(Frame::new( fi.width, fi.height, fi.sequence.chroma_sampling))) } pub fn new_with_frame(fi: &FrameInvariants<T>, frame: Arc<Frame<T>>) -> Self { let rs = RestorationState::new(fi, &frame); let luma_width = frame.planes[0].cfg.width; let luma_height = frame.planes[0].cfg.height; let luma_padding_x = frame.planes[0].cfg.xpad; let luma_padding_y = frame.planes[0].cfg.ypad; Self { sb_size_log2: fi.sb_size_log2(), input: frame, input_hres: Plane::new(luma_width / 2, luma_height / 2, 1, 1, luma_padding_x / 2, luma_padding_y / 2), input_qres: Plane::new(luma_width / 4, luma_height / 4, 2, 2, luma_padding_x / 4, luma_padding_y / 4), rec: Frame::new(luma_width, luma_height, fi.sequence.chroma_sampling), cdfs: CDFContext::new(0), deblock: Default::default(), segmentation: Default::default(), restoration: rs, frame_mvs: { let mut vec = Vec::with_capacity(REF_FRAMES); for _ in 0..REF_FRAMES { vec.push(FrameMotionVectors::new(fi.w_in_b, fi.h_in_b)); } vec }, t: RDOTracker::new() } } #[inline(always)] pub fn as_tile_state_mut(&mut self) -> TileStateMut<'_, T> { let PlaneConfig { width, height, .. } = self.rec.planes[0].cfg; let sbo_0 = SuperBlockOffset { x: 0, y: 0 }; TileStateMut::new(self, sbo_0, self.sb_size_log2, width, height) } } #[derive(Copy, Clone, Debug)] pub struct DeblockState { pub levels: [u8; PLANES+1], // Y vertical edges, Y horizontal, U, V pub sharpness: u8, pub deltas_enabled: bool, pub delta_updates_enabled: bool, pub ref_deltas: [i8; REF_FRAMES], pub mode_deltas: [i8; 2], pub block_deltas_enabled: bool, pub block_delta_shift: u8, pub block_delta_multi: bool, } impl Default for DeblockState { fn default() -> Self { DeblockState { levels: [8,8,4,4], sharpness: 0, deltas_enabled: false, // requires delta_q_enabled delta_updates_enabled: false, ref_deltas: [1, 0, 0, 0, 0, -1, -1, -1], mode_deltas: [0, 0], block_deltas_enabled: false, block_delta_shift: 0, block_delta_multi: false } } } #[derive(Copy, Clone, Debug)] pub struct SegmentationState { pub enabled: bool, pub update_data: bool, pub update_map: bool, pub preskip: bool, pub last_active_segid: u8, pub features: [[bool; SegLvl::SEG_LVL_MAX as usize]; 8], pub data: [[i16; SegLvl::SEG_LVL_MAX as usize]; 8], } impl Default for SegmentationState { fn default() -> Self { SegmentationState { enabled: false, update_data: false, update_map: false, preskip: true, last_active_segid: 0, features: [[false; SegLvl::SEG_LVL_MAX as usize]; 8], data: [[0; SegLvl::SEG_LVL_MAX as usize]; 8], } } } // Frame Invariants are invariant inside a frame #[allow(dead_code)] #[derive(Debug, Clone)] pub struct FrameInvariants<T: Pixel> { pub sequence: Sequence, pub width: usize, pub height: usize, pub sb_width: usize, pub sb_height: usize, pub w_in_b: usize, pub h_in_b: usize, pub number: u64, pub order_hint: u32, pub show_frame: bool, pub showable_frame: bool, pub error_resilient: bool, pub intra_only: bool, pub allow_high_precision_mv: bool, pub frame_type: FrameType, pub show_existing_frame: bool, pub frame_to_show_map_idx: u32, pub use_reduced_tx_set: bool, pub reference_mode: ReferenceMode, pub use_prev_frame_mvs: bool, pub min_partition_size: BlockSize, pub globalmv_transformation_type: [GlobalMVMode; INTER_REFS_PER_FRAME], pub num_tg: usize, pub large_scale_tile: bool, pub disable_cdf_update: bool, pub allow_screen_content_tools: u32, pub force_integer_mv: u32, pub primary_ref_frame: u32, pub refresh_frame_flags: u32, // a bitmask that specifies which // reference frame slots will be updated with the current frame // after it is decoded. pub allow_intrabc: bool, pub use_ref_frame_mvs: bool, pub is_filter_switchable: bool, pub is_motion_mode_switchable: bool, pub disable_frame_end_update_cdf: bool, pub allow_warped_motion: bool, pub cdef_damping: u8, pub cdef_bits: u8, pub cdef_y_strengths: [u8; 8], pub cdef_uv_strengths: [u8; 8], pub delta_q_present: bool, pub config: EncoderConfig, pub ref_frames: [u8; INTER_REFS_PER_FRAME], pub ref_frame_sign_bias: [bool; INTER_REFS_PER_FRAME], pub rec_buffer: ReferenceFramesSet<T>, pub base_q_idx: u8, pub dc_delta_q: [i8; 3], pub ac_delta_q: [i8; 3], pub lambda: f64, pub me_lambda: f64, pub me_range_scale: u8, pub use_tx_domain_distortion: bool, pub use_tx_domain_rate: bool, pub inter_cfg: Option<InterPropsConfig>, pub enable_early_exit: bool, pub tx_mode_select: bool, } pub(crate) fn pos_to_lvl(pos: u64, pyramid_depth: u64) -> u64 { // Derive level within pyramid for a frame with a given coding order position // For example, with a pyramid of depth 2, the 2 least significant bits of the // position determine the level: // 00 -> 0 // 01 -> 2 // 10 -> 1 // 11 -> 2 pyramid_depth - (pos | (1 << pyramid_depth)).trailing_zeros() as u64 } impl<T: Pixel> FrameInvariants<T> { #[allow(clippy::erasing_op, clippy::identity_op)] pub fn new(config: EncoderConfig, sequence: Sequence) -> Self { assert!(sequence.bit_depth <= mem::size_of::<T>() * 8, "bit depth cannot fit into u8"); // Speed level decides the minimum partition size, i.e. higher speed --> larger min partition size, // with exception that SBs on right or bottom frame borders split down to BLOCK_4X4. // At speed = 0, RDO search is exhaustive. let min_partition_size = config.speed_settings.min_block_size; let use_reduced_tx_set = config.speed_settings.reduced_tx_set; let use_tx_domain_distortion = config.tune == Tune::Psnr && config.speed_settings.tx_domain_distortion; let use_tx_domain_rate = config.speed_settings.tx_domain_rate; let w_in_b = 2 * config.width.align_power_of_two_and_shift(3); // MiCols, ((width+7)/8)<<3 >> MI_SIZE_LOG2 let h_in_b = 2 * config.height.align_power_of_two_and_shift(3); // MiRows, ((height+7)/8)<<3 >> MI_SIZE_LOG2 Self { sequence, width: config.width, height: config.height, sb_width: config.width.align_power_of_two_and_shift(6), sb_height: config.height.align_power_of_two_and_shift(6), w_in_b, h_in_b, number: 0, order_hint: 0, show_frame: true, showable_frame: true, error_resilient: false, intra_only: false, allow_high_precision_mv: false, frame_type: FrameType::KEY, show_existing_frame: false, frame_to_show_map_idx: 0, use_reduced_tx_set, reference_mode: ReferenceMode::SINGLE, use_prev_frame_mvs: false, min_partition_size, globalmv_transformation_type: [GlobalMVMode::IDENTITY; INTER_REFS_PER_FRAME], num_tg: 1, large_scale_tile: false, disable_cdf_update: false, allow_screen_content_tools: 0, force_integer_mv: 0, primary_ref_frame: PRIMARY_REF_NONE, refresh_frame_flags: 0, allow_intrabc: false, use_ref_frame_mvs: false, is_filter_switchable: false, is_motion_mode_switchable: false, // 0: only the SIMPLE motion mode will be used. disable_frame_end_update_cdf: false, allow_warped_motion: false, cdef_damping: 3, cdef_bits: 3, cdef_y_strengths: [0*4+0, 1*4+0, 2*4+1, 3*4+1, 5*4+2, 7*4+3, 10*4+3, 13*4+3], cdef_uv_strengths: [0*4+0, 1*4+0, 2*4+1, 3*4+1, 5*4+2, 7*4+3, 10*4+3, 13*4+3], delta_q_present: false, ref_frames: [0; INTER_REFS_PER_FRAME], ref_frame_sign_bias: [false; INTER_REFS_PER_FRAME], rec_buffer: ReferenceFramesSet::new(), base_q_idx: config.quantizer as u8, dc_delta_q: [0; 3], ac_delta_q: [0; 3], lambda: 0.0, me_lambda: 0.0, me_range_scale: 1, use_tx_domain_distortion, use_tx_domain_rate, inter_cfg: None, enable_early_exit: true, config, tx_mode_select : false, } } pub fn new_key_frame(previous_fi: &Self, segment_start_frame: u64) -> Self { let mut fi = previous_fi.clone(); fi.frame_type = FrameType::KEY; fi.intra_only = true; fi.inter_cfg = None; fi.order_hint = 0; fi.refresh_frame_flags = ALL_REF_FRAMES_MASK; fi.show_frame = true; fi.show_existing_frame = false; fi.frame_to_show_map_idx = 0; fi.primary_ref_frame = PRIMARY_REF_NONE; fi.number = segment_start_frame; for i in 0..INTER_REFS_PER_FRAME { fi.ref_frames[i] = 0; } fi.tx_mode_select = fi.config.speed_settings.rdo_tx_decision; // FIXME: tx partition for intra not supported for chroma 422 if fi.tx_mode_select && fi.sequence.chroma_sampling == ChromaSampling::Cs422 { fi.tx_mode_select = false; } fi } fn apply_inter_props_cfg(&mut self, idx_in_segment: u64) { let reorder = !self.config.low_latency; let multiref = reorder || self.config.speed_settings.multiref; let pyramid_depth = if reorder { 2 } else { 0 }; let group_src_len = 1 << pyramid_depth; let group_len = group_src_len + if reorder { pyramid_depth } else { 0 }; let idx_in_group = (idx_in_segment - 1) % group_len; let group_idx = (idx_in_segment - 1) / group_len; self.inter_cfg = Some(InterPropsConfig { reorder, multiref, pyramid_depth, group_src_len, group_len, idx_in_group, group_idx, }) } /// Returns the created FrameInvariants along with a bool indicating success. /// This interface provides simpler usage, because we always need the produced /// FrameInvariants regardless of success or failure. pub fn new_inter_frame( previous_fi: &Self, segment_start_frame: u64, idx_in_segment: u64, next_keyframe: u64 ) -> (Self, bool) { let mut fi = previous_fi.clone(); fi.frame_type = FrameType::INTER; fi.intra_only = false; fi.apply_inter_props_cfg(idx_in_segment); fi.tx_mode_select = false; let inter_cfg = fi.inter_cfg.unwrap(); fi.order_hint = (inter_cfg.group_src_len * inter_cfg.group_idx + if inter_cfg.reorder && inter_cfg.idx_in_group < inter_cfg.pyramid_depth { inter_cfg.group_src_len >> inter_cfg.idx_in_group } else { inter_cfg.idx_in_group - inter_cfg.pyramid_depth + 1 }) as u32; let number = segment_start_frame + fi.order_hint as u64; if number >= next_keyframe { fi.show_existing_frame = false; fi.show_frame = false; return (fi, false); } let lvl = if !inter_cfg.reorder { 0 } else if inter_cfg.idx_in_group < inter_cfg.pyramid_depth { inter_cfg.idx_in_group } else { pos_to_lvl(inter_cfg.idx_in_group - inter_cfg.pyramid_depth + 1, inter_cfg.pyramid_depth) }; // Frames with lvl == 0 are stored in slots 0..4 and frames with higher values // of lvl in slots 4..8 let slot_idx = if lvl == 0 { (fi.order_hint >> inter_cfg.pyramid_depth) % 4 as u32 } else { 3 + lvl as u32 }; fi.show_frame = !inter_cfg.reorder || inter_cfg.idx_in_group >= inter_cfg.pyramid_depth; fi.show_existing_frame = fi.show_frame && inter_cfg.reorder && (inter_cfg.idx_in_group - inter_cfg.pyramid_depth + 1).count_ones() == 1 && inter_cfg.idx_in_group != inter_cfg.pyramid_depth; fi.frame_to_show_map_idx = slot_idx; fi.refresh_frame_flags = if fi.show_existing_frame { 0 } else { 1 << slot_idx }; let second_ref_frame = if !inter_cfg.multiref { LAST_FRAME // make second_ref_frame match first } else if !inter_cfg.reorder || inter_cfg.idx_in_group == 0 { LAST2_FRAME } else { ALTREF_FRAME }; let ref_in_previous_group = LAST3_FRAME; // reuse probability estimates from previous frames only in top level frames fi.primary_ref_frame = if lvl > 0 { PRIMARY_REF_NONE } else { (ref_in_previous_group.to_index()) as u32 }; for i in 0..INTER_REFS_PER_FRAME { fi.ref_frames[i] = if lvl == 0 { if i == second_ref_frame.to_index() { (slot_idx + 4 - 2) as u8 % 4 } else { (slot_idx + 4 - 1) as u8 % 4 } } else if i == second_ref_frame.to_index() { let oh = fi.order_hint + (inter_cfg.group_src_len as u32 >> lvl); let lvl2 = pos_to_lvl(oh as u64, inter_cfg.pyramid_depth); if lvl2 == 0 { ((oh >> inter_cfg.pyramid_depth) % 4) as u8 } else { 3 + lvl2 as u8 } } else if i == ref_in_previous_group.to_index() { if lvl == 0 { (slot_idx + 4 - 1) as u8 % 4 } else { slot_idx as u8 } } else { let oh = fi.order_hint - (inter_cfg.group_src_len as u32 >> lvl); let lvl1 = pos_to_lvl(oh as u64, inter_cfg.pyramid_depth); if lvl1 == 0 { ((oh >> inter_cfg.pyramid_depth) % 4) as u8 } else { 3 + lvl1 as u8 } } } fi.reference_mode = if inter_cfg.multiref && inter_cfg.reorder && inter_cfg.idx_in_group != 0 { ReferenceMode::SELECT } else { ReferenceMode::SINGLE }; fi.number = number; fi.me_range_scale = (inter_cfg.group_src_len >> lvl) as u8; (fi, true) } pub fn get_frame_subtype(&self) -> usize { if self.frame_type == FrameType::KEY { FRAME_SUBTYPE_I } else { let inter_cfg = self.inter_cfg.unwrap(); let lvl = if !inter_cfg.reorder { 0 } else if inter_cfg.idx_in_group < inter_cfg.pyramid_depth { inter_cfg.idx_in_group } else { pos_to_lvl( inter_cfg.idx_in_group - inter_cfg.pyramid_depth + 1, inter_cfg.pyramid_depth ) }; FRAME_SUBTYPE_P + (lvl as usize) } } pub fn set_quantizers(&mut self, qps: &QuantizerParameters) { self.base_q_idx = qps.ac_qi[0]; if self.frame_type != FrameType::KEY { self.cdef_bits = 3 - ((self.base_q_idx.max(128) - 128) >> 5); } else { self.cdef_bits = 3; } let base_q_idx = self.base_q_idx as i32; for pi in 0..3 { debug_assert!(qps.dc_qi[pi] as i32 - base_q_idx >= -128); debug_assert!((qps.dc_qi[pi] as i32 - base_q_idx) < 128); debug_assert!(qps.ac_qi[pi] as i32 - base_q_idx >= -128); debug_assert!((qps.ac_qi[pi] as i32 - base_q_idx) < 128); self.dc_delta_q[pi] = (qps.dc_qi[pi] as i32 - base_q_idx) as i8; self.ac_delta_q[pi] = (qps.ac_qi[pi] as i32 - base_q_idx) as i8; } self.lambda = qps.lambda * ((1 << (2 * (self.sequence.bit_depth - 8))) as f64); self.me_lambda = self.lambda.sqrt(); } #[inline(always)] pub fn sb_size_log2(&self) -> usize { self.sequence.sb_size_log2() } #[inline(always)] pub fn sb_size(&self) -> usize { self.sequence.sb_size() } } impl<T: Pixel> fmt::Display for FrameInvariants<T> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "Frame {} - {}", self.number, self.frame_type) } } #[derive(Debug, Clone, Copy)] pub struct InterPropsConfig { pub reorder: bool, pub multiref: bool, pub pyramid_depth: u64, pub group_src_len: u64, pub group_len: u64, pub idx_in_group: u64, pub group_idx: u64, } pub fn write_temporal_delimiter( packet: &mut dyn io::Write ) -> io::Result<()> { packet.write_all(&TEMPORAL_DELIMITER)?; Ok(()) } fn write_obus<T: Pixel>( packet: &mut dyn io::Write, fi: &mut FrameInvariants<T>, fs: &FrameState<T> ) -> io::Result<()> { let obu_extension = 0 as u32; let mut buf1 = Vec::new(); // write sequence header obu if KEY_FRAME, preceded by 4-byte size if fi.frame_type == FrameType::KEY { let mut buf2 = Vec::new(); { let mut bw2 = BitWriter::endian(&mut buf2, BigEndian); bw2.write_sequence_header_obu(fi)?; bw2.write_bit(true)?; // trailing bit bw2.byte_align()?; } { let mut bw1 = BitWriter::endian(&mut buf1, BigEndian); bw1.write_obu_header(ObuType::OBU_SEQUENCE_HEADER, obu_extension)?; } packet.write_all(&buf1).unwrap(); buf1.clear(); { let mut bw1 = BitWriter::endian(&mut buf1, BigEndian); bw1.write_uleb128(buf2.len() as u64)?; } packet.write_all(&buf1).unwrap(); buf1.clear(); packet.write_all(&buf2).unwrap(); buf2.clear(); if fi.sequence.content_light.is_some() { let mut bw1 = BitWriter::endian(&mut buf1, BigEndian); bw1.write_metadata_obu(ObuMetaType::OBU_META_HDR_CLL, fi.sequence)?; packet.write_all(&buf1).unwrap(); buf1.clear(); } if fi.sequence.mastering_display.is_some() { let mut bw1 = BitWriter::endian(&mut buf1, BigEndian); bw1.write_metadata_obu(ObuMetaType::OBU_META_HDR_MDCV, fi.sequence)?; packet.write_all(&buf1).unwrap(); buf1.clear(); } } let mut buf2 = Vec::new(); { let mut bw2 = BitWriter::endian(&mut buf2, BigEndian); bw2.write_frame_header_obu(fi, fs)?; } { let mut bw1 = BitWriter::endian(&mut buf1, BigEndian); bw1.write_obu_header(ObuType::OBU_FRAME_HEADER, obu_extension)?; } packet.write_all(&buf1).unwrap(); buf1.clear(); { let mut bw1 = BitWriter::endian(&mut buf1, BigEndian); bw1.write_uleb128(buf2.len() as u64)?; } packet.write_all(&buf1).unwrap(); buf1.clear(); packet.write_all(&buf2).unwrap(); buf2.clear(); Ok(()) } /// Write into `dst` the difference between the blocks at `src1` and `src2` fn diff<T: Pixel>( dst: &mut [i16], src1: &PlaneRegion<'_, T>, src2: &PlaneRegion<'_, T>, width: usize, height: usize, ) { for ((l, s1), s2) in dst.chunks_mut(width).take(height) .zip(src1.rows_iter()) .zip(src2.rows_iter()) { for ((r, v1), v2) in l.iter_mut().zip(s1).zip(s2) { *r = i16::cast_from(*v1) - i16::cast_from(*v2); } } } fn get_qidx<T: Pixel>(fi: &FrameInvariants<T>, ts: &TileStateMut<'_, T>, cw: &ContextWriter, tile_bo: BlockOffset) -> u8 { let mut qidx = fi.base_q_idx; let sidx = cw.bc.blocks[tile_bo].segmentation_idx as usize; if ts.segmentation.features[sidx][SegLvl::SEG_LVL_ALT_Q as usize] { let delta = ts.segmentation.data[sidx][SegLvl::SEG_LVL_ALT_Q as usize]; qidx = clamp((qidx as i16) + delta, 0, 255) as u8; } qidx } // For a transform block, // predict, transform, quantize, write coefficients to a bitstream, // dequantize, inverse-transform. pub fn encode_tx_block<T: Pixel>( fi: &FrameInvariants<T>, ts: &mut TileStateMut<'_, T>, cw: &mut ContextWriter, w: &mut dyn Writer, p: usize, tile_bo: BlockOffset, mode: PredictionMode, tx_size: TxSize, tx_type: TxType, plane_bsize: BlockSize, po: PlaneOffset, skip: bool, ac: &[i16], alpha: i16, rdo_type: RDOType, for_rdo_use: bool ) -> (bool, i64) { let qidx = get_qidx(fi, ts, cw, tile_bo); let PlaneConfig { xdec, ydec, .. } = ts.input.planes[p].cfg; let tile_rect = ts.tile_rect().decimated(xdec, ydec); let rec = &mut ts.rec.planes[p]; let area = Area::BlockStartingAt { bo: tile_bo }; assert!(tx_size.sqr() <= TxSize::TX_32X32 || tx_type == TxType::DCT_DCT); if mode.is_intra() { let bit_depth = fi.sequence.bit_depth; let edge_buf = get_intra_edges(&rec.as_const(), po, tx_size, bit_depth, Some(mode)); mode.predict_intra(tile_rect, &mut rec.subregion_mut(area), tx_size, bit_depth, &ac, alpha, &edge_buf); } if skip { return (false, -1); } let mut residual_storage: AlignedArray<[i16; 64 * 64]> = UninitializedAlignedArray(); let mut coeffs_storage: AlignedArray<[i32; 64 * 64]> = UninitializedAlignedArray(); let mut qcoeffs_storage: AlignedArray<[i32; 64 * 64]> = UninitializedAlignedArray(); let mut rcoeffs_storage: AlignedArray<[i32; 64 * 64]> = UninitializedAlignedArray(); let residual = &mut residual_storage.array[..tx_size.area()]; let coeffs = &mut coeffs_storage.array[..tx_size.area()]; let qcoeffs = &mut qcoeffs_storage.array[..tx_size.area()]; let rcoeffs = &mut rcoeffs_storage.array[..tx_size.area()]; diff( residual, &ts.input_tile.planes[p].subregion(area), &rec.subregion(area), tx_size.width(), tx_size.height()); forward_transform(residual, coeffs, tx_size.width(), tx_size, tx_type, fi.sequence.bit_depth); let coded_tx_size = av1_get_coded_tx_size(tx_size).area(); ts.qc.quantize(coeffs, qcoeffs, coded_tx_size); let tell_coeffs = w.tell_frac(); let has_coeff = if !for_rdo_use || rdo_type.needs_coeff_rate() { cw.write_coeffs_lv_map(w, p, tile_bo, &qcoeffs, mode, tx_size, tx_type, plane_bsize, xdec, ydec, fi.use_reduced_tx_set) } else { true }; let cost_coeffs = w.tell_frac() - tell_coeffs; // Reconstruct dequantize(qidx, qcoeffs, rcoeffs, tx_size, fi.sequence.bit_depth, fi.dc_delta_q[p], fi.ac_delta_q[p]); let mut tx_dist: i64 = -1; if !fi.use_tx_domain_distortion || !for_rdo_use { inverse_transform_add(rcoeffs, &mut rec.subregion_mut(area), tx_size, tx_type, fi.sequence.bit_depth); } if rdo_type.needs_tx_dist() { // Store tx-domain distortion of this block tx_dist = coeffs .iter() .zip(rcoeffs) .map(|(a, b)| { let c = *a as i32 - *b as i32; (c * c) as u64 }).sum::<u64>() as i64; let tx_dist_scale_bits = 2*(3 - get_log_tx_scale(tx_size)); let tx_dist_scale_rounding_offset = 1 << (tx_dist_scale_bits - 1); tx_dist = (tx_dist + tx_dist_scale_rounding_offset) >> tx_dist_scale_bits; } if fi.config.train_rdo { ts.rdo.add_rate(fi.base_q_idx, tx_size, tx_dist as u64, cost_coeffs as u64); } if rdo_type == RDOType::TxDistEstRate { // look up rate and distortion in table let estimated_rate = estimate_rate(fi.base_q_idx, tx_size, tx_dist as u64); w.add_bits_frac(estimated_rate as u32); } (has_coeff, tx_dist) } pub fn motion_compensate<T: Pixel>( fi: &FrameInvariants<T>, ts: &mut TileStateMut<'_, T>, cw: &mut ContextWriter, luma_mode: PredictionMode, ref_frames: [RefType; 2], mvs: [MotionVector; 2], bsize: BlockSize, tile_bo: BlockOffset, luma_only: bool ) { debug_assert!(!luma_mode.is_intra()); let PlaneConfig { xdec: u_xdec, ydec: u_ydec, .. } = ts.input.planes[1].cfg; // Inter mode prediction can take place once for a whole partition, // instead of each tx-block. let num_planes = 1 + if !luma_only && has_chroma(tile_bo, bsize, u_xdec, u_ydec) { 2 } else { 0 }; let luma_tile_rect = ts.tile_rect(); for p in 0..num_planes { let plane_bsize = if p == 0 { bsize } else { get_plane_block_size(bsize, u_xdec, u_ydec) }; let rec = &mut ts.rec.planes[p]; let po = tile_bo.plane_offset(&rec.plane_cfg); let &PlaneConfig { xdec, ydec, .. } = rec.plane_cfg; let tile_rect = luma_tile_rect.decimated(xdec, ydec); let area = Area::BlockStartingAt { bo: tile_bo }; if p > 0 && bsize < BlockSize::BLOCK_8X8 { let mut some_use_intra = false; if bsize == BlockSize::BLOCK_4X4 || bsize == BlockSize::BLOCK_4X8 { some_use_intra |= cw.bc.blocks[tile_bo.with_offset(-1,0)].mode.is_intra(); }; if !some_use_intra && bsize == BlockSize::BLOCK_4X4 || bsize == BlockSize::BLOCK_8X4 { some_use_intra |= cw.bc.blocks[tile_bo.with_offset(0,-1)].mode.is_intra(); }; if !some_use_intra && bsize == BlockSize::BLOCK_4X4 { some_use_intra |= cw.bc.blocks[tile_bo.with_offset(-1,-1)].mode.is_intra(); }; if some_use_intra { luma_mode.predict_inter(fi, tile_rect, p, po, &mut rec.subregion_mut(area), plane_bsize.width(), plane_bsize.height(), ref_frames, mvs); } else { assert!(u_xdec == 1 && u_ydec == 1); // TODO: these are absolutely only valid for 4:2:0 if bsize == BlockSize::BLOCK_4X4 { let mv0 = cw.bc.blocks[tile_bo.with_offset(-1,-1)].mv; let rf0 = cw.bc.blocks[tile_bo.with_offset(-1,-1)].ref_frames; let mv1 = cw.bc.blocks[tile_bo.with_offset(0,-1)].mv; let rf1 = cw.bc.blocks[tile_bo.with_offset(0,-1)].ref_frames; let po1 = PlaneOffset { x: po.x+2, y: po.y }; let area1 = Area::StartingAt { x: po1.x, y: po1.y }; let mv2 = cw.bc.blocks[tile_bo.with_offset(-1,0)].mv; let rf2 = cw.bc.blocks[tile_bo.with_offset(-1,0)].ref_frames; let po2 = PlaneOffset { x: po.x, y: po.y+2 }; let area2 = Area::StartingAt { x: po2.x, y: po2.y }; let po3 = PlaneOffset { x: po.x+2, y: po.y+2 }; let area3 = Area::StartingAt { x: po3.x, y: po3.y }; luma_mode.predict_inter(fi, tile_rect, p, po, &mut rec.subregion_mut(area), 2, 2, rf0, mv0); luma_mode.predict_inter(fi, tile_rect, p, po1, &mut rec.subregion_mut(area1), 2, 2, rf1, mv1); luma_mode.predict_inter(fi, tile_rect, p, po2, &mut rec.subregion_mut(area2), 2, 2, rf2, mv2); luma_mode.predict_inter(fi, tile_rect, p, po3, &mut rec.subregion_mut(area3), 2, 2, ref_frames, mvs); } if bsize == BlockSize::BLOCK_8X4 { let mv1 = cw.bc.blocks[tile_bo.with_offset(0,-1)].mv; let rf1 = cw.bc.blocks[tile_bo.with_offset(0,-1)].ref_frames; luma_mode.predict_inter(fi, tile_rect, p, po, &mut rec.subregion_mut(area), 4, 2, rf1, mv1); let po3 = PlaneOffset { x: po.x, y: po.y+2 }; let area3 = Area::StartingAt { x: po3.x, y: po3.y }; luma_mode.predict_inter(fi, tile_rect, p, po3, &mut rec.subregion_mut(area3), 4, 2, ref_frames, mvs); } if bsize == BlockSize::BLOCK_4X8 { let mv2 = cw.bc.blocks[tile_bo.with_offset(-1,0)].mv; let rf2 = cw.bc.blocks[tile_bo.with_offset(-1,0)].ref_frames; luma_mode.predict_inter(fi, tile_rect, p, po, &mut rec.subregion_mut(area), 2, 4, rf2, mv2); let po3 = PlaneOffset { x: po.x+2, y: po.y }; let area3 = Area::StartingAt { x: po3.x, y: po3.y }; luma_mode.predict_inter(fi, tile_rect, p, po3, &mut rec.subregion_mut(area3), 2, 4, ref_frames, mvs); } } } else { luma_mode.predict_inter(fi, tile_rect, p, po, &mut rec.subregion_mut(area), plane_bsize.width(), plane_bsize.height(), ref_frames, mvs); } } } pub fn save_block_motion<T: Pixel>( ts: &mut TileStateMut<'_, T>, bsize: BlockSize, tile_bo: BlockOffset, ref_frame: usize, mv: MotionVector, ) { let tile_mvs = &mut ts.mvs[ref_frame]; let tile_bo_x_end = (tile_bo.x + bsize.width_mi()).min(ts.mi_width); let tile_bo_y_end = (tile_bo.y + bsize.height_mi()).min(ts.mi_height); for mi_y in tile_bo.y..tile_bo_y_end { for mi_x in tile_bo.x..tile_bo_x_end { tile_mvs[mi_y][mi_x] = mv; } } } pub fn encode_block_a<T: Pixel>( seq: &Sequence, ts: &TileStateMut<'_, T>, cw: &mut ContextWriter, w: &mut dyn Writer, bsize: BlockSize, tile_bo: BlockOffset, skip: bool ) -> bool { cw.bc.blocks.set_skip(tile_bo, bsize, skip); if ts.segmentation.enabled && ts.segmentation.update_map && ts.segmentation.preskip { cw.write_segmentation(w, tile_bo, bsize, false, ts.segmentation.last_active_segid); } cw.write_skip(w, tile_bo, skip); if ts.segmentation.enabled && ts.segmentation.update_map && !ts.segmentation.preskip { cw.write_segmentation(w, tile_bo, bsize, skip, ts.segmentation.last_active_segid); } if !skip && seq.enable_cdef { cw.bc.cdef_coded = true; } cw.bc.cdef_coded } pub fn encode_block_b<T: Pixel>( fi: &FrameInvariants<T>, ts: &mut TileStateMut<'_, T>, cw: &mut ContextWriter, w: &mut dyn Writer, luma_mode: PredictionMode, chroma_mode: PredictionMode, ref_frames: [RefType; 2], mvs: [MotionVector; 2], bsize: BlockSize, tile_bo: BlockOffset, skip: bool, cfl: CFLParams, tx_size: TxSize, tx_type: TxType, mode_context: usize, mv_stack: &[CandidateMV], rdo_type: RDOType, for_rdo_use: bool ) -> i64 { let is_inter = !luma_mode.is_intra(); if is_inter { assert!(luma_mode == chroma_mode); }; let sb_size = if fi.sequence.use_128x128_superblock { BlockSize::BLOCK_128X128 } else { BlockSize::BLOCK_64X64 }; let PlaneConfig { xdec, ydec, .. } = ts.input.planes[1].cfg; if skip { cw.bc.reset_skip_context(tile_bo, bsize, xdec, ydec); } cw.bc.blocks.set_block_size(tile_bo, bsize); cw.bc.blocks.set_mode(tile_bo, bsize, luma_mode); cw.bc.blocks.set_tx_size(tile_bo, bsize, tx_size); cw.bc.blocks.set_ref_frames(tile_bo, bsize, ref_frames); cw.bc.blocks.set_motion_vectors(tile_bo, bsize, mvs); //write_q_deltas(); if cw.bc.code_deltas && ts.deblock.block_deltas_enabled && (bsize < sb_size || !skip) { cw.write_block_deblock_deltas(w, tile_bo, ts.deblock.block_delta_multi); } cw.bc.code_deltas = false; if fi.frame_type == FrameType::INTER { cw.write_is_inter(w, tile_bo, is_inter); if is_inter { cw.fill_neighbours_ref_counts(tile_bo); cw.write_ref_frames(w, fi, tile_bo); if luma_mode >= PredictionMode::NEAREST_NEARESTMV { cw.write_compound_mode(w, luma_mode, mode_context); } else { cw.write_inter_mode(w, luma_mode, mode_context); } let ref_mv_idx = 0; let num_mv_found = mv_stack.len(); if luma_mode == PredictionMode::NEWMV || luma_mode == PredictionMode::NEW_NEWMV { if luma_mode == PredictionMode::NEW_NEWMV { assert!(num_mv_found >= 2); } for idx in 0..2 { if num_mv_found > idx + 1 { let drl_mode = ref_mv_idx > idx; let ctx: usize = (mv_stack[idx].weight < REF_CAT_LEVEL) as usize + (mv_stack[idx + 1].weight < REF_CAT_LEVEL) as usize; cw.write_drl_mode(w, drl_mode, ctx); if !drl_mode { break; } } } } let ref_mvs = if num_mv_found > 0 { [mv_stack[ref_mv_idx].this_mv, mv_stack[ref_mv_idx].comp_mv] } else { [MotionVector::default(); 2] }; let mv_precision = if fi.force_integer_mv != 0 { MvSubpelPrecision::MV_SUBPEL_NONE } else if fi.allow_high_precision_mv { MvSubpelPrecision::MV_SUBPEL_HIGH_PRECISION } else { MvSubpelPrecision::MV_SUBPEL_LOW_PRECISION }; if luma_mode == PredictionMode::NEWMV || luma_mode == PredictionMode::NEW_NEWMV || luma_mode == PredictionMode::NEW_NEARESTMV { cw.write_mv(w, mvs[0], ref_mvs[0], mv_precision); } if luma_mode == PredictionMode::NEW_NEWMV || luma_mode == PredictionMode::NEAREST_NEWMV { cw.write_mv(w, mvs[1], ref_mvs[1], mv_precision); } if luma_mode >= PredictionMode::NEAR0MV && luma_mode <= PredictionMode::NEAR2MV { let ref_mv_idx = luma_mode as usize - PredictionMode::NEAR0MV as usize + 1; if luma_mode != PredictionMode::NEAR0MV { assert!(num_mv_found > ref_mv_idx); } for idx in 1..3 { if num_mv_found > idx + 1 { let drl_mode = ref_mv_idx > idx; let ctx: usize = (mv_stack[idx].weight < REF_CAT_LEVEL) as usize + (mv_stack[idx + 1].weight < REF_CAT_LEVEL) as usize; cw.write_drl_mode(w, drl_mode, ctx); if !drl_mode { break; } } } if mv_stack.len() > 1 { assert!(mv_stack[ref_mv_idx].this_mv.row == mvs[0].row); assert!(mv_stack[ref_mv_idx].this_mv.col == mvs[0].col); } else { assert!(0 == mvs[0].row); assert!(0 == mvs[0].col); } } else if luma_mode == PredictionMode::NEARESTMV { if mv_stack.is_empty() { assert_eq!(mvs[0].row, 0); assert_eq!(mvs[0].col, 0); } else { assert_eq!(mvs[0].row, mv_stack[0].this_mv.row); assert_eq!(mvs[0].col, mv_stack[0].this_mv.col); } } } else { cw.write_intra_mode(w, bsize, luma_mode); } } else { cw.write_intra_mode_kf(w, tile_bo, luma_mode); } if !is_inter { if luma_mode.is_directional() && bsize >= BlockSize::BLOCK_8X8 { cw.write_angle_delta(w, 0, luma_mode); } if has_chroma(tile_bo, bsize, xdec, ydec) { cw.write_intra_uv_mode(w, chroma_mode, luma_mode, bsize); if chroma_mode.is_cfl() { assert!(bsize.cfl_allowed()); cw.write_cfl_alphas(w, cfl); } if chroma_mode.is_directional() && bsize >= BlockSize::BLOCK_8X8 { cw.write_angle_delta(w, 0, chroma_mode); } } // TODO: Extra condition related to palette mode, see `read_filter_intra_mode_info` in decodemv.c if luma_mode == PredictionMode::DC_PRED && bsize.width() <= 32 && bsize.height() <= 32 { cw.write_use_filter_intra(w,false, bsize); // Always turn off FILTER_INTRA } } // write tx_size here (for now, intra frame only) // TODO: Add new field tx_mode to fi, then Use the condition, fi.tx_mode == TX_MODE_SELECT if fi.tx_mode_select { if bsize.greater_than(BlockSize::BLOCK_4X4) && !(is_inter && skip) { if !is_inter { cw.write_tx_size_intra(w, tile_bo, bsize, tx_size); cw.bc.update_tx_size_context(tile_bo, bsize, tx_size, false); } /*else { // TODO (yushin): write_tx_size_inter(), i.e. var-tx }*/ } else { cw.bc.update_tx_size_context(tile_bo, bsize, tx_size, is_inter && skip); } } if is_inter { motion_compensate(fi, ts, cw, luma_mode, ref_frames, mvs, bsize, tile_bo, false); write_tx_tree(fi, ts, cw, w, luma_mode, tile_bo, bsize, tx_size, tx_type, skip, false, rdo_type, for_rdo_use) } else { write_tx_blocks(fi, ts, cw, w, luma_mode, chroma_mode, tile_bo, bsize, tx_size, tx_type, skip, cfl, false, rdo_type, for_rdo_use) } } pub fn luma_ac<T: Pixel>( ac: &mut [i16], ts: &mut TileStateMut<'_, T>, tile_bo: BlockOffset, bsize: BlockSize ) { let PlaneConfig { xdec, ydec, .. } = ts.input.planes[1].cfg; let plane_bsize = get_plane_block_size(bsize, xdec, ydec); let bo = if bsize.is_sub8x8(xdec, ydec) { let offset = bsize.sub8x8_offset(xdec, ydec); tile_bo.with_offset(offset.0, offset.1) } else { tile_bo }; let rec = &ts.rec.planes[0]; let luma = &rec.subregion(Area::BlockStartingAt { bo }); let mut sum: i32 = 0; for sub_y in 0..plane_bsize.height() { for sub_x in 0..plane_bsize.width() { let y = sub_y << ydec; let x = sub_x << xdec; let mut sample: i16 = i16::cast_from(luma[y][x]); if xdec != 0 { sample += i16::cast_from(luma[y][x + 1]); } if ydec != 0 { debug_assert!(xdec != 0); sample += i16::cast_from(luma[y + 1][x]) + i16::cast_from(luma[y + 1][x + 1]); } sample <<= 3 - xdec - ydec; ac[sub_y * plane_bsize.width() + sub_x] = sample; sum += sample as i32; } } let shift = plane_bsize.width_log2() + plane_bsize.height_log2(); let average = ((sum + (1 << (shift - 1))) >> shift) as i16; for sub_y in 0..plane_bsize.height() { for sub_x in 0..plane_bsize.width() { ac[sub_y * plane_bsize.width() + sub_x] -= average; } } } pub fn write_tx_blocks<T: Pixel>( fi: &FrameInvariants<T>, ts: &mut TileStateMut<'_, T>, cw: &mut ContextWriter, w: &mut dyn Writer, luma_mode: PredictionMode, chroma_mode: PredictionMode, tile_bo: BlockOffset, bsize: BlockSize, tx_size: TxSize, tx_type: TxType, skip: bool, cfl: CFLParams, luma_only: bool, rdo_type: RDOType, for_rdo_use: bool ) -> i64 { let bw = bsize.width_mi() / tx_size.width_mi(); let bh = bsize.height_mi() / tx_size.height_mi(); let qidx = get_qidx(fi, ts, cw, tile_bo); let PlaneConfig { xdec, ydec, .. } = ts.input.planes[1].cfg; let mut ac: AlignedArray<[i16; 32 * 32]> = UninitializedAlignedArray(); let mut tx_dist: i64 = 0; let do_chroma = has_chroma(tile_bo, bsize, xdec, ydec); ts.qc.update(qidx, tx_size, luma_mode.is_intra(), fi.sequence.bit_depth, fi.dc_delta_q[0], 0); for by in 0..bh { for bx in 0..bw { let tx_bo = BlockOffset { x: tile_bo.x + bx * tx_size.width_mi(), y: tile_bo.y + by * tx_size.height_mi() }; let po = tx_bo.plane_offset(&ts.input.planes[0].cfg); let (_, dist) = encode_tx_block( fi, ts, cw, w, 0, tx_bo, luma_mode, tx_size, tx_type, bsize, po, skip, &ac.array, 0, rdo_type, for_rdo_use ); assert!(!fi.use_tx_domain_distortion || !for_rdo_use || skip || dist >= 0); tx_dist += dist; } } if luma_only { return tx_dist }; let uv_tx_size = bsize.largest_uv_tx_size(xdec, ydec); let mut bw_uv = (bw * tx_size.width_mi()) >> xdec; let mut bh_uv = (bh * tx_size.height_mi()) >> ydec; if (bw_uv == 0 || bh_uv == 0) && do_chroma { bw_uv = 1; bh_uv = 1; } bw_uv /= uv_tx_size.width_mi(); bh_uv /= uv_tx_size.height_mi(); let plane_bsize = get_plane_block_size(bsize, xdec, ydec); if chroma_mode.is_cfl() { luma_ac(&mut ac.array, ts, tile_bo, bsize); } if bw_uv > 0 && bh_uv > 0 { // TODO: Disable these asserts temporarilly, since chroma_sampling_422_aom and chroma_sampling_444_aom // tests seems trigerring them as well, which should not // TODO: Not valid if partition > 64x64 && chroma != 420 /*if xdec == 1 && ydec == 1 { assert!(bw_uv == 1, "bw_uv = {}, bh_uv = {}", bw_uv, bh_uv); assert!(bh_uv == 1, "bw_uv = {}, bh_uv = {}", bw_uv, bh_uv); }*/ let uv_tx_type = if uv_tx_size.width() >= 32 || uv_tx_size.height() >= 32 { TxType::DCT_DCT } else { uv_intra_mode_to_tx_type_context(chroma_mode) }; for p in 1..3 { ts.qc.update(fi.base_q_idx, uv_tx_size, true, fi.sequence.bit_depth, fi.dc_delta_q[p], fi.ac_delta_q[p]); let alpha = cfl.alpha(p - 1); for by in 0..bh_uv { for bx in 0..bw_uv { let tx_bo = BlockOffset { x: tile_bo.x + ((bx * uv_tx_size.width_mi()) << xdec) - ((bw * tx_size.width_mi() == 1) as usize) * xdec, y: tile_bo.y + ((by * uv_tx_size.height_mi()) << ydec) - ((bh * tx_size.height_mi() == 1) as usize) * ydec }; let mut po = tile_bo.plane_offset(&ts.input.planes[p].cfg); po.x += (bx * uv_tx_size.width()) as isize; po.y += (by * uv_tx_size.height()) as isize; let (_, dist) = encode_tx_block(fi, ts, cw, w, p, tx_bo, chroma_mode, uv_tx_size, uv_tx_type, plane_bsize, po, skip, &ac.array, alpha, rdo_type, for_rdo_use); assert!(!fi.use_tx_domain_distortion || !for_rdo_use || skip || dist >= 0); tx_dist += dist; } } } } tx_dist } // FIXME: For now, assume tx_mode is LARGEST_TX, so var-tx is not implemented yet, // which means only one tx block exist for a inter mode partition. pub fn write_tx_tree<T: Pixel>( fi: &FrameInvariants<T>, ts: &mut TileStateMut<'_, T>, cw: &mut ContextWriter, w: &mut dyn Writer, luma_mode: PredictionMode, tile_bo: BlockOffset, bsize: BlockSize, tx_size: TxSize, tx_type: TxType, skip: bool, luma_only: bool, rdo_type: RDOType, for_rdo_use: bool ) -> i64 { let bw = bsize.width_mi() / tx_size.width_mi(); let bh = bsize.height_mi() / tx_size.height_mi(); let qidx = get_qidx(fi, ts, cw, tile_bo); let PlaneConfig { xdec, ydec, .. } = ts.input.planes[1].cfg; let ac = &[0i16; 0]; let mut tx_dist: i64 = 0; ts.qc.update(qidx, tx_size, luma_mode.is_intra(), fi.sequence.bit_depth, fi.dc_delta_q[0], 0); let po = tile_bo.plane_offset(&ts.input.planes[0].cfg); let (has_coeff, dist) = encode_tx_block( fi, ts, cw, w, 0, tile_bo, luma_mode, tx_size, tx_type, bsize, po, skip, ac, 0, rdo_type, for_rdo_use ); assert!(!fi.use_tx_domain_distortion || !for_rdo_use || skip || dist >= 0); tx_dist += dist; if luma_only { return tx_dist }; let uv_tx_size = bsize.largest_uv_tx_size(xdec, ydec); let mut bw_uv = (bw * tx_size.width_mi()) >> xdec; let mut bh_uv = (bh * tx_size.height_mi()) >> ydec; if (bw_uv == 0 || bh_uv == 0) && has_chroma(tile_bo, bsize, xdec, ydec) { bw_uv = 1; bh_uv = 1; } bw_uv /= uv_tx_size.width_mi(); bh_uv /= uv_tx_size.height_mi(); let plane_bsize = get_plane_block_size(bsize, xdec, ydec); if bw_uv > 0 && bh_uv > 0 { // TODO: Disable these asserts temporarilly, since chroma_sampling_422_aom and chroma_sampling_444_aom // tests seems trigerring them as well, which should not // TODO: Not valid if partition > 64x64 && chroma != 420 /*if xdec == 1 && ydec == 1 { debug_assert!(bw_uv == 1, "bw_uv = {}, bh_uv = {}", bw_uv, bh_uv); debug_assert!(bh_uv == 1, "bw_uv = {}, bh_uv = {}", bw_uv, bh_uv); }*/ let uv_tx_type = if has_coeff {tx_type} else {TxType::DCT_DCT}; // if inter mode, uv_tx_type == tx_type for p in 1..3 { ts.qc.update(qidx, uv_tx_size, false, fi.sequence.bit_depth, fi.dc_delta_q[p], fi.ac_delta_q[p]); let tx_bo = BlockOffset { x: tile_bo.x - ((bw * tx_size.width_mi() == 1) as usize), y: tile_bo.y - ((bh * tx_size.height_mi() == 1) as usize) }; let po = tile_bo.plane_offset(&ts.input.planes[p].cfg); let (_, dist) = encode_tx_block(fi, ts, cw, w, p, tx_bo, luma_mode, uv_tx_size, uv_tx_type, plane_bsize, po, skip, ac, 0, rdo_type, for_rdo_use); assert!(!fi.use_tx_domain_distortion || !for_rdo_use || skip || dist >= 0); tx_dist += dist; } } tx_dist } pub fn encode_block_with_modes<T: Pixel>( fi: &FrameInvariants<T>, ts: &mut TileStateMut<'_, T>, cw: &mut ContextWriter, w_pre_cdef: &mut dyn Writer, w_post_cdef: &mut dyn Writer, bsize: BlockSize, tile_bo: BlockOffset, mode_decision: &RDOPartitionOutput, rdo_type: RDOType ) { let (mode_luma, mode_chroma) = (mode_decision.pred_mode_luma, mode_decision.pred_mode_chroma); let cfl = mode_decision.pred_cfl_params; let ref_frames = mode_decision.ref_frames; let mvs = mode_decision.mvs; let skip = mode_decision.skip; let mut cdef_coded = cw.bc.cdef_coded; let (tx_size, tx_type) = (mode_decision.tx_size, mode_decision.tx_type); debug_assert!((tx_size, tx_type) == rdo_tx_size_type(fi, ts, cw, bsize, tile_bo, mode_luma, ref_frames, mvs, skip)); let mut mv_stack = Vec::new(); let is_compound = ref_frames[1] != NONE_FRAME; let mode_context = cw.find_mvrefs(tile_bo, ref_frames, &mut mv_stack, bsize, fi, is_compound); cdef_coded = encode_block_a(&fi.sequence, ts, cw, if cdef_coded {w_post_cdef} else {w_pre_cdef}, bsize, tile_bo, skip); encode_block_b(fi, ts, cw, if cdef_coded {w_post_cdef} else {w_pre_cdef}, mode_luma, mode_chroma, ref_frames, mvs, bsize, tile_bo, skip, cfl, tx_size, tx_type, mode_context, &mv_stack, rdo_type, false); } fn encode_partition_bottomup<T: Pixel>( fi: &FrameInvariants<T>, ts: &mut TileStateMut<'_, T>, cw: &mut ContextWriter, w_pre_cdef: &mut dyn Writer, w_post_cdef: &mut dyn Writer, bsize: BlockSize, tile_bo: BlockOffset, pmvs: &mut [[Option<MotionVector>; REF_FRAMES]; 5], ref_rd_cost: f64 ) -> (RDOOutput) { let rdo_type = RDOType::PixelDistRealRate; let mut rd_cost = std::f64::MAX; let mut best_rd = std::f64::MAX; let mut rdo_output = RDOOutput { rd_cost, part_type: PartitionType::PARTITION_INVALID, part_modes: Vec::new() }; if tile_bo.x >= cw.bc.blocks.cols() || tile_bo.y >= cw.bc.blocks.rows() { return rdo_output } let bsw = bsize.width_mi(); let bsh = bsize.height_mi(); let is_square = bsize.is_sqr(); // Always split if the current partition is too large let must_split = (tile_bo.x + bsw as usize > ts.mi_width || tile_bo.y + bsh as usize > ts.mi_height || bsize.greater_than(BlockSize::BLOCK_64X64)) && is_square; // must_split overrides the minimum partition size when applicable let can_split = (bsize > fi.min_partition_size && is_square) || must_split; let mut best_partition = PartitionType::PARTITION_INVALID; let cw_checkpoint = cw.checkpoint(); let w_pre_checkpoint = w_pre_cdef.checkpoint(); let w_post_checkpoint = w_post_cdef.checkpoint(); // Code the whole block if !must_split { let cost = if bsize.gte(BlockSize::BLOCK_8X8) && is_square { let w: &mut dyn Writer = if cw.bc.cdef_coded {w_post_cdef} else {w_pre_cdef}; let tell = w.tell_frac(); cw.write_partition(w, tile_bo, PartitionType::PARTITION_NONE, bsize); (w.tell_frac() - tell) as f64 * fi.lambda / ((1 << OD_BITRES) as f64) } else { 0.0 }; let pmv_idx = if bsize.greater_than(BlockSize::BLOCK_32X32) { 0 } else { ((tile_bo.x & 32) >> 5) + ((tile_bo.y & 32) >> 4) + 1 }; let spmvs = &mut pmvs[pmv_idx]; let mode_decision = rdo_mode_decision(fi, ts, cw, bsize, tile_bo, spmvs); if !mode_decision.pred_mode_luma.is_intra() { // Fill the saved motion structure save_block_motion( ts, mode_decision.bsize, mode_decision.bo, mode_decision.ref_frames[0].to_index(), mode_decision.mvs[0] ); } rd_cost = mode_decision.rd_cost + cost; best_partition = PartitionType::PARTITION_NONE; best_rd = rd_cost; rdo_output.part_modes.push(mode_decision.clone()); if !can_split { encode_block_with_modes(fi, ts, cw, w_pre_cdef, w_post_cdef, bsize, tile_bo, &mode_decision, rdo_type); } } // Test all partition types other than PARTITION_NONE by comparing their RD costs if can_split { debug_assert!(is_square); for &partition in RAV1E_PARTITION_TYPES { if partition == PartitionType::PARTITION_NONE { continue; } if fi.sequence.chroma_sampling == ChromaSampling::Cs422 && partition == PartitionType::PARTITION_VERT { continue; } if must_split { let cbw = (ts.mi_width - tile_bo.x).min(bsw); // clipped block width, i.e. having effective pixels let cbh = (ts.mi_height - tile_bo.y).min(bsh); let mut split_vert = false; let mut split_horz = false; if cbw == bsw/2 && cbh == bsh { split_vert = true; } if cbh == bsh/2 && cbw == bsw { split_horz = true; } if !split_horz && partition == PartitionType::PARTITION_HORZ { continue; }; if !split_vert && partition == PartitionType::PARTITION_VERT { continue; }; } cw.rollback(&cw_checkpoint); w_pre_cdef.rollback(&w_pre_checkpoint); w_post_cdef.rollback(&w_post_checkpoint); let subsize = bsize.subsize(partition); let hbsw = subsize.width_mi(); // Half the block size width in blocks let hbsh = subsize.height_mi(); // Half the block size height in blocks let mut child_modes: Vec<RDOPartitionOutput> = Vec::new(); rd_cost = 0.0; if bsize.gte(BlockSize::BLOCK_8X8) { let w: &mut dyn Writer = if cw.bc.cdef_coded {w_post_cdef} else {w_pre_cdef}; let tell = w.tell_frac(); cw.write_partition(w, tile_bo, partition, bsize); rd_cost = (w.tell_frac() - tell) as f64 * fi.lambda / ((1 << OD_BITRES) as f64); } let four_partitions = [ tile_bo, BlockOffset{ x: tile_bo.x + hbsw as usize, y: tile_bo.y }, BlockOffset{ x: tile_bo.x, y: tile_bo.y + hbsh as usize }, BlockOffset{ x: tile_bo.x + hbsw as usize, y: tile_bo.y + hbsh as usize } ]; let partitions = get_sub_partitions(&four_partitions, partition); let mut early_exit = false; // If either of horz or vert partition types is being tested, // two partitioned rectangles, defined in 'partitions', of the current block // is passed to encode_partition_bottomup() for offset in partitions { let child_rdo_output = encode_partition_bottomup( fi, ts, cw, w_pre_cdef, w_post_cdef, subsize, offset, pmvs,//&best_decision.mvs[0] best_rd ); let cost = child_rdo_output.rd_cost; assert!(cost >= 0.0); if cost != std::f64::MAX { rd_cost += cost; if fi.enable_early_exit && (rd_cost >= best_rd || rd_cost >= ref_rd_cost) { assert!(cost != std::f64::MAX); early_exit = true; break; } else if partition != PartitionType::PARTITION_SPLIT { child_modes.push(child_rdo_output.part_modes[0].clone()); } } }; if !early_exit && rd_cost < best_rd { best_rd = rd_cost; best_partition = partition; if partition != PartitionType::PARTITION_SPLIT { assert!(!child_modes.is_empty()); rdo_output.part_modes = child_modes; } } } debug_assert!(best_partition != PartitionType::PARTITION_INVALID); // If the best partition is not PARTITION_SPLIT, recode it if best_partition != PartitionType::PARTITION_SPLIT { assert!(!rdo_output.part_modes.is_empty()); cw.rollback(&cw_checkpoint); w_pre_cdef.rollback(&w_pre_checkpoint); w_post_cdef.rollback(&w_post_checkpoint); assert!(best_partition != PartitionType::PARTITION_NONE || !must_split); let subsize = bsize.subsize(best_partition); if bsize.gte(BlockSize::BLOCK_8X8) { let w: &mut dyn Writer = if cw.bc.cdef_coded {w_post_cdef} else {w_pre_cdef}; cw.write_partition(w, tile_bo, best_partition, bsize); } for mode in rdo_output.part_modes.clone() { assert!(subsize == mode.bsize); if !mode.pred_mode_luma.is_intra() { save_block_motion( ts, mode.bsize, mode.bo, mode.ref_frames[0].to_index(), mode.mvs[0] ); } // FIXME: redundant block re-encode encode_block_with_modes(fi, ts, cw, w_pre_cdef, w_post_cdef, mode.bsize, mode.bo, &mode, rdo_type); } } } assert!(best_partition != PartitionType::PARTITION_INVALID); if is_square && bsize.gte(BlockSize::BLOCK_8X8) && (bsize == BlockSize::BLOCK_8X8 || best_partition != PartitionType::PARTITION_SPLIT) { cw.bc.update_partition_context(tile_bo, bsize.subsize(best_partition), bsize); } rdo_output.rd_cost = best_rd; rdo_output.part_type = best_partition; if best_partition != PartitionType::PARTITION_NONE { rdo_output.part_modes.clear(); } rdo_output } fn encode_partition_topdown<T: Pixel>( fi: &FrameInvariants<T>, ts: &mut TileStateMut<'_, T>, cw: &mut ContextWriter, w_pre_cdef: &mut dyn Writer, w_post_cdef: &mut dyn Writer, bsize: BlockSize, tile_bo: BlockOffset, block_output: &Option<RDOOutput>, pmvs: &mut [[Option<MotionVector>; REF_FRAMES]; 5] ) { if tile_bo.x >= cw.bc.blocks.cols() || tile_bo.y >= cw.bc.blocks.rows() { return; } let bsw = bsize.width_mi(); let bsh = bsize.height_mi(); let is_square = bsize.is_sqr(); let rdo_type = RDOType::PixelDistRealRate; // Always split if the current partition is too large let must_split = (tile_bo.x + bsw as usize > ts.mi_width || tile_bo.y + bsh as usize > ts.mi_height || bsize.greater_than(BlockSize::BLOCK_64X64)) && is_square; let mut rdo_output = block_output.clone().unwrap_or(RDOOutput { part_type: PartitionType::PARTITION_INVALID, rd_cost: std::f64::MAX, part_modes: Vec::new() }); let partition: PartitionType; let mut split_vert = false; let mut split_horz = false; if must_split { let cbw = (ts.mi_width - tile_bo.x).min(bsw); // clipped block width, i.e. having effective pixels let cbh = (ts.mi_height - tile_bo.y).min(bsh); if cbw == bsw/2 && cbh == bsh && fi.sequence.chroma_sampling != ChromaSampling::Cs422 { split_vert = true; } if cbh == bsh/2 && cbw == bsw { split_horz = true; } } if must_split && (!split_vert && !split_horz) { // Oversized blocks are split automatically partition = PartitionType::PARTITION_SPLIT; } else if must_split || (bsize > fi.min_partition_size && is_square) { // Blocks of sizes within the supported range are subjected to a partitioning decision let mut partition_types: Vec<PartitionType> = Vec::new(); if must_split { partition_types.push(PartitionType::PARTITION_SPLIT); if split_horz { partition_types.push(PartitionType::PARTITION_HORZ); }; if split_vert { partition_types.push(PartitionType::PARTITION_VERT); }; } else { //partition_types.append(&mut RAV1E_PARTITION_TYPES.to_vec()); partition_types.push(PartitionType::PARTITION_NONE); partition_types.push(PartitionType::PARTITION_SPLIT); } rdo_output = rdo_partition_decision(fi, ts, cw, w_pre_cdef, w_post_cdef, bsize, tile_bo, &rdo_output, pmvs, &partition_types, rdo_type); partition = rdo_output.part_type; } else { // Blocks of sizes below the supported range are encoded directly partition = PartitionType::PARTITION_NONE; } assert!(PartitionType::PARTITION_NONE <= partition && partition < PartitionType::PARTITION_INVALID); let subsize = bsize.subsize(partition); if bsize.gte(BlockSize::BLOCK_8X8) && is_square { let w: &mut dyn Writer = if cw.bc.cdef_coded {w_post_cdef} else {w_pre_cdef}; cw.write_partition(w, tile_bo, partition, bsize); } match partition { PartitionType::PARTITION_NONE => { let part_decision = if !rdo_output.part_modes.is_empty() { // The optimal prediction mode is known from a previous iteration rdo_output.part_modes[0].clone() } else { let pmv_idx = if bsize.greater_than(BlockSize::BLOCK_32X32) { 0 } else { ((tile_bo.x & 32) >> 5) + ((tile_bo.y & 32) >> 4) + 1 }; let spmvs = &mut pmvs[pmv_idx]; // Make a prediction mode decision for blocks encoded with no rdo_partition_decision call (e.g. edges) rdo_mode_decision(fi, ts, cw, bsize, tile_bo, spmvs) }; let mut mode_luma = part_decision.pred_mode_luma; let mut mode_chroma = part_decision.pred_mode_chroma; let cfl = part_decision.pred_cfl_params; let skip = part_decision.skip; let ref_frames = part_decision.ref_frames; let mvs = part_decision.mvs; let mut cdef_coded = cw.bc.cdef_coded; // NOTE: Cannot avoid calling rdo_tx_size_type() here again, // because, with top-down partition RDO, the neighnoring contexts // of current partition can change, i.e. neighboring partitions can split down more. let (tx_size, tx_type) = rdo_tx_size_type(fi, ts, cw, bsize, tile_bo, mode_luma, ref_frames, mvs, skip); let mut mv_stack = Vec::new(); let is_compound = ref_frames[1] != NONE_FRAME; let mode_context = cw.find_mvrefs(tile_bo, ref_frames, &mut mv_stack, bsize, fi, is_compound); // TODO: proper remap when is_compound is true if !mode_luma.is_intra() { if is_compound && mode_luma != PredictionMode::GLOBAL_GLOBALMV { let match0 = mv_stack[0].this_mv.row == mvs[0].row && mv_stack[0].this_mv.col == mvs[0].col; let match1 = mv_stack[0].comp_mv.row == mvs[1].row && mv_stack[0].comp_mv.col == mvs[1].col; mode_luma = if match0 && match1 { PredictionMode::NEAREST_NEARESTMV } else if match0 { PredictionMode::NEAREST_NEWMV } else if match1 { PredictionMode::NEW_NEARESTMV } else { PredictionMode::NEW_NEWMV }; if mode_luma != PredictionMode::NEAREST_NEARESTMV && mvs[0].row == 0 && mvs[0].col == 0 && mvs[1].row == 0 && mvs[1].col == 0 { mode_luma = PredictionMode::GLOBAL_GLOBALMV; } mode_chroma = mode_luma; } else if !is_compound && mode_luma != PredictionMode::GLOBALMV { mode_luma = PredictionMode::NEWMV; for (c, m) in mv_stack.iter().take(4) .zip([PredictionMode::NEARESTMV, PredictionMode::NEAR0MV, PredictionMode::NEAR1MV, PredictionMode::NEAR2MV].iter()) { if c.this_mv.row == mvs[0].row && c.this_mv.col == mvs[0].col { mode_luma = *m; } } if mode_luma == PredictionMode::NEWMV && mvs[0].row == 0 && mvs[0].col == 0 { mode_luma = if mv_stack.is_empty() { PredictionMode::NEARESTMV } else if mv_stack.len() == 1 { PredictionMode::NEAR0MV } else { PredictionMode::GLOBALMV }; } mode_chroma = mode_luma; } save_block_motion( ts, part_decision.bsize, part_decision.bo, part_decision.ref_frames[0].to_index(), part_decision.mvs[0] ); } // FIXME: every final block that has gone through the RDO decision process is encoded twice cdef_coded = encode_block_a(&fi.sequence, ts, cw, if cdef_coded {w_post_cdef} else {w_pre_cdef}, bsize, tile_bo, skip); encode_block_b(fi, ts, cw, if cdef_coded {w_post_cdef} else {w_pre_cdef}, mode_luma, mode_chroma, ref_frames, mvs, bsize, tile_bo, skip, cfl, tx_size, tx_type, mode_context, &mv_stack, RDOType::PixelDistRealRate, false); }, PARTITION_SPLIT | PARTITION_HORZ | PARTITION_VERT => { if !rdo_output.part_modes.is_empty() { // The optimal prediction modes for each split block is known from an rdo_partition_decision() call assert!(subsize != BlockSize::BLOCK_INVALID); for mode in rdo_output.part_modes { // Each block is subjected to a new splitting decision encode_partition_topdown(fi, ts, cw, w_pre_cdef, w_post_cdef, subsize, mode.bo, &Some(RDOOutput { rd_cost: mode.rd_cost, part_type: PartitionType::PARTITION_NONE, part_modes: vec![mode] }), pmvs); } } else { let hbsw = subsize.width_mi(); // Half the block size width in blocks let hbsh = subsize.height_mi(); // Half the block size height in blocks let four_partitions = [ tile_bo, BlockOffset{ x: tile_bo.x + hbsw as usize, y: tile_bo.y }, BlockOffset{ x: tile_bo.x, y: tile_bo.y + hbsh as usize }, BlockOffset{ x: tile_bo.x + hbsw as usize, y: tile_bo.y + hbsh as usize } ]; let partitions = get_sub_partitions(&four_partitions, partition); partitions.iter().for_each(|&offset| { encode_partition_topdown( fi, ts, cw, w_pre_cdef, w_post_cdef, subsize, offset, &None, pmvs ); }); } }, _ => unreachable!(), } if is_square && bsize.gte(BlockSize::BLOCK_8X8) && (bsize == BlockSize::BLOCK_8X8 || partition != PartitionType::PARTITION_SPLIT) { cw.bc.update_partition_context(tile_bo, subsize, bsize); } } #[inline(always)] fn build_coarse_pmvs<T: Pixel>(fi: &FrameInvariants<T>, ts: &TileStateMut<'_, T>) -> Vec<[Option<MotionVector>; REF_FRAMES]> { assert!(!fi.sequence.use_128x128_superblock); if ts.mi_width >= 16 && ts.mi_height >= 16 { let mut frame_pmvs = Vec::with_capacity(ts.sb_width * ts.sb_height); for sby in 0..ts.sb_height { for sbx in 0..ts.sb_width { let sbo = SuperBlockOffset { x: sbx, y: sby }; let bo = sbo.block_offset(0, 0); let mut pmvs: [Option<MotionVector>; REF_FRAMES] = [None; REF_FRAMES]; for i in 0..INTER_REFS_PER_FRAME { let r = fi.ref_frames[i] as usize; if pmvs[r].is_none() { pmvs[r] = estimate_motion_ss4(fi, ts, BlockSize::BLOCK_64X64, r, bo); } } frame_pmvs.push(pmvs); } } frame_pmvs } else { // the block use for motion estimation would be smaller than the whole image vec![[None; REF_FRAMES]; ts.sb_width * ts.sb_height] } } fn get_initial_cdfcontext<T: Pixel>(fi: &FrameInvariants<T>) -> CDFContext { let cdf = if fi.primary_ref_frame == PRIMARY_REF_NONE { None } else { let ref_frame_idx = fi.ref_frames[fi.primary_ref_frame as usize] as usize; let ref_frame = fi.rec_buffer.frames[ref_frame_idx].as_ref(); ref_frame.map(|rec| rec.cdfs) }; // return the retrieved instance if any, a new one otherwise cdf.unwrap_or_else(|| CDFContext::new(fi.base_q_idx)) } fn encode_tile_group<T: Pixel>(fi: &FrameInvariants<T>, fs: &mut FrameState<T>) -> Vec<u8> { let mut fc = get_initial_cdfcontext(fi); let mut blocks = FrameBlocks::new(fi.w_in_b, fi.h_in_b); let mut ts = fs.as_tile_state_mut(); let mut tb = blocks.as_tile_blocks_mut(); let data = encode_tile(fi, &mut ts, &mut fc, &mut tb); /* TODO: Don't apply if lossless */ deblock_filter_optimize(fi, fs, &blocks); if fs.deblock.levels[0] != 0 || fs.deblock.levels[1] != 0 { deblock_filter_frame(fs, &blocks, fi.sequence.bit_depth); } // Until the loop filters are pipelined, we'll need to keep // around a copy of both the pre- and post-cdef frame. let pre_cdef_frame = fs.rec.clone(); /* TODO: Don't apply if lossless */ if fi.sequence.enable_cdef { cdef_filter_frame(fi, &mut fs.rec, &blocks); } /* TODO: Don't apply if lossless */ if fi.sequence.enable_restoration { fs.restoration.lrf_filter_frame(&mut fs.rec, &pre_cdef_frame, &fi); } if fi.config.train_rdo { eprintln!("train rdo"); if let Ok(mut file) = File::open("rdo.dat") { let mut data = vec![]; file.read_to_end(&mut data).unwrap(); fs.t.merge_in(&deserialize(data.as_slice()).unwrap()); } let mut rdo_file = File::create("rdo.dat").unwrap(); rdo_file.write_all(&serialize(&fs.t).unwrap()).unwrap(); fs.t.print_code(); } fs.cdfs = fc; fs.cdfs.reset_counts(); data } fn encode_tile<'a, T: Pixel>( fi: &FrameInvariants<T>, ts: &mut TileStateMut<'_, T>, fc: &'a mut CDFContext, blocks: &'a mut TileBlocksMut<'a>, ) -> Vec<u8> { let mut w = WriterEncoder::new(); let estimate_motion_ss2 = if fi.config.speed_settings.diamond_me { crate::me::DiamondSearch::estimate_motion_ss2 } else { crate::me::FullSearch::estimate_motion_ss2 }; let bc = BlockContext::new(blocks); // For now, restoration unit size is locked to superblock size. let mut cw = ContextWriter::new(fc, bc); let tile_pmvs = build_coarse_pmvs(fi, ts); // main loop for sby in 0..ts.sb_height { cw.bc.reset_left_contexts(); for sbx in 0..ts.sb_width { let mut w_pre_cdef = WriterRecorder::new(); let mut w_post_cdef = WriterRecorder::new(); let tile_sbo = SuperBlockOffset { x: sbx, y: sby }; let tile_bo = tile_sbo.block_offset(0, 0); cw.bc.cdef_coded = false; cw.bc.code_deltas = fi.delta_q_present; // Do subsampled ME let mut pmvs: [[Option<MotionVector>; REF_FRAMES]; 5] = [[None; REF_FRAMES]; 5]; if ts.mi_width >= 8 && ts.mi_height >= 8 { for i in 0..INTER_REFS_PER_FRAME { let r = fi.ref_frames[i] as usize; if pmvs[0][r].is_none() { pmvs[0][r] = tile_pmvs[sby * ts.sb_width + sbx][r]; if let Some(pmv) = pmvs[0][r] { let pmv_w = if sbx > 0 { tile_pmvs[sby * ts.sb_width + sbx - 1][r] } else { None }; let pmv_e = if sbx < ts.sb_width - 1 { tile_pmvs[sby * ts.sb_width + sbx + 1][r] } else { None }; let pmv_n = if sby > 0 { tile_pmvs[sby * ts.sb_width + sbx - ts.sb_width][r] } else { None }; let pmv_s = if sby < ts.sb_height - 1 { tile_pmvs[sby * ts.sb_width + sbx + ts.sb_width][r] } else { None }; assert!(!fi.sequence.use_128x128_superblock); pmvs[1][r] = estimate_motion_ss2( fi, ts, BlockSize::BLOCK_32X32, r, tile_sbo.block_offset(0, 0), &[Some(pmv), pmv_w, pmv_n], i ); pmvs[2][r] = estimate_motion_ss2( fi, ts, BlockSize::BLOCK_32X32, r, tile_sbo.block_offset(8, 0), &[Some(pmv), pmv_e, pmv_n], i ); pmvs[3][r] = estimate_motion_ss2( fi, ts, BlockSize::BLOCK_32X32, r, tile_sbo.block_offset(0, 8), &[Some(pmv), pmv_w, pmv_s], i ); pmvs[4][r] = estimate_motion_ss2( fi, ts, BlockSize::BLOCK_32X32, r, tile_sbo.block_offset(8, 8), &[Some(pmv), pmv_e, pmv_s], i ); if let Some(mv) = pmvs[1][r] { save_block_motion(ts, BlockSize::BLOCK_32X32, tile_sbo.block_offset(0, 0), i, mv); } if let Some(mv) = pmvs[2][r] { save_block_motion(ts, BlockSize::BLOCK_32X32, tile_sbo.block_offset(8, 0), i, mv); } if let Some(mv) = pmvs[3][r] { save_block_motion(ts, BlockSize::BLOCK_32X32, tile_sbo.block_offset(0, 8), i, mv); } if let Some(mv) = pmvs[4][r] { save_block_motion(ts, BlockSize::BLOCK_32X32, tile_sbo.block_offset(8, 8), i, mv); } } } } } // Encode SuperBlock if fi.config.speed_settings.encode_bottomup { encode_partition_bottomup(fi, ts, &mut cw, &mut w_pre_cdef, &mut w_post_cdef, BlockSize::BLOCK_64X64, tile_bo, &mut pmvs, std::f64::MAX); } else { encode_partition_topdown(fi, ts, &mut cw, &mut w_pre_cdef, &mut w_post_cdef, BlockSize::BLOCK_64X64, tile_bo, &None, &mut pmvs); } // CDEF has to be decided before loop restoration, but coded after. // loop restoration must be decided last but coded before anything else. if cw.bc.cdef_coded || fi.sequence.enable_restoration { rdo_loop_decision(tile_sbo, fi, ts, &mut cw, &mut w); } if fi.sequence.enable_restoration { cw.write_lrf(&mut w, fi, &mut ts.restoration, tile_sbo); } // Once loop restoration is coded, we can replay the initial block bits w_pre_cdef.replay(&mut w); if cw.bc.cdef_coded { // CDEF index must be written in the middle, we can code it now let cdef_index = cw.bc.blocks.get_cdef(tile_sbo); cw.write_cdef(&mut w, cdef_index, fi.cdef_bits); // ...and then finally code what comes after the CDEF index w_post_cdef.replay(&mut w); } } } w.done() } #[allow(unused)] fn write_tile_group_header(tile_start_and_end_present_flag: bool) -> Vec<u8> { let mut buf = Vec::new(); { let mut bw = BitWriter::endian(&mut buf, BigEndian); bw.write_bit(tile_start_and_end_present_flag).unwrap(); bw.byte_align().unwrap(); } buf.clone() } // Write a packet containing only the placeholder that tells the decoder // to present the already decoded frame present at `frame_to_show_map_idx` // // See `av1-spec` Section 6.8.2 and 7.18. pub fn encode_show_existing_frame<T: Pixel>( fi: &mut FrameInvariants<T>, fs: &mut FrameState<T> ) -> Vec<u8> { debug_assert!(fi.show_existing_frame); let mut packet = Vec::new(); write_obus(&mut packet, fi, fs).unwrap(); let map_idx = fi.frame_to_show_map_idx as usize; if let Some(ref rec) = fi.rec_buffer.frames[map_idx] { for p in 0..3 { fs.rec.planes[p].data.copy_from_slice(&rec.frame.planes[p].data); } } packet } pub fn encode_frame<T: Pixel>( fi: &mut FrameInvariants<T>, fs: &mut FrameState<T> ) -> Vec<u8> { debug_assert!(!fi.show_existing_frame); let mut packet = Vec::new(); if !fi.intra_only { for i in 0..INTER_REFS_PER_FRAME { fi.ref_frame_sign_bias[i] = if !fi.sequence.enable_order_hint { false } else if let Some(ref rec) = fi.rec_buffer.frames[fi.ref_frames[i] as usize] { let hint = rec.order_hint; fi.sequence.get_relative_dist(hint, fi.order_hint) > 0 } else { false }; } } fs.input_hres.downsample_from(&fs.input.planes[0]); fs.input_hres.pad(fi.width, fi.height); fs.input_qres.downsample_from(&fs.input_hres); fs.input_qres.pad(fi.width, fi.height); segmentation_optimize(fi, fs); let tile_group = encode_tile_group(fi, fs); write_obus(&mut packet, fi, fs).unwrap(); let mut buf1 = Vec::new(); { let mut bw1 = BitWriter::endian(&mut buf1, BigEndian); bw1.write_obu_header(ObuType::OBU_TILE_GROUP, 0).unwrap(); } packet.write_all(&buf1).unwrap(); buf1.clear(); { let mut bw1 = BitWriter::endian(&mut buf1, BigEndian); bw1.write_uleb128(tile_group.len() as u64).unwrap(); } packet.write_all(&buf1).unwrap(); buf1.clear(); packet.write_all(&tile_group).unwrap(); packet } pub fn update_rec_buffer<T: Pixel>(fi: &mut FrameInvariants<T>, fs: FrameState<T>) { let rfs = Arc::new( ReferenceFrame { order_hint: fi.order_hint, frame: fs.rec, input_hres: fs.input_hres, input_qres: fs.input_qres, cdfs: fs.cdfs, frame_mvs: fs.frame_mvs, } ); for i in 0..(REF_FRAMES as usize) { if (fi.refresh_frame_flags & (1 << i)) != 0 { fi.rec_buffer.frames[i] = Some(Arc::clone(&rfs)); fi.rec_buffer.deblock[i] = fs.deblock; } } } #[cfg(test)] mod test { use super::*; #[test] fn check_partition_types_order() { assert_eq!(RAV1E_PARTITION_TYPES[RAV1E_PARTITION_TYPES.len() - 1], PartitionType::PARTITION_SPLIT); } } Add tiling info to FrameInvariants Compute the tiling information and make it accessible from FrameInvariants. // Copyright (c) 2018, The rav1e contributors. All rights reserved // // This source code is subject to the terms of the BSD 2 Clause License and // the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License // was not distributed with this source code in the LICENSE file, you can // obtain it at www.aomedia.org/license/software. If the Alliance for Open // Media Patent License 1.0 was not distributed with this source code in the // PATENTS file, you can obtain it at www.aomedia.org/license/patent. use crate::api::*; use crate::cdef::*; use crate::context::*; use crate::deblock::*; use crate::ec::*; use crate::lrf::*; use crate::mc::*; use crate::me::*; use crate::partition::*; use crate::plane::*; use crate::quantize::*; use crate::rate::QuantizerParameters; use crate::rate::FRAME_SUBTYPE_I; use crate::rate::FRAME_SUBTYPE_P; use crate::rdo::*; use crate::segmentation::*; use crate::tiling::*; use crate::transform::*; use crate::util::*; use crate::partition::PartitionType::*; use crate::partition::RefType::*; use crate::header::*; use arg_enum_proc_macro::ArgEnum; use bitstream_io::{BitWriter, BigEndian}; use bincode::{serialize, deserialize}; use std; use std::{fmt, io, mem}; use std::io::Write; use std::io::Read; use std::sync::Arc; use std::fs::File; #[derive(Debug, Clone)] pub struct Frame<T: Pixel> { pub planes: [Plane<T>; 3] } pub static TEMPORAL_DELIMITER: [u8; 2] = [0x12, 0x00]; const FRAME_MARGIN: usize = 16 + SUBPEL_FILTER_SIZE; impl<T: Pixel> Frame<T> { pub fn new(width: usize, height: usize, chroma_sampling: ChromaSampling) -> Self { let luma_width = width.align_power_of_two(3); let luma_height = height.align_power_of_two(3); let luma_padding = MAX_SB_SIZE + FRAME_MARGIN; let (chroma_sampling_period_x, chroma_sampling_period_y) = chroma_sampling.sampling_period(); let chroma_width = luma_width / chroma_sampling_period_x; let chroma_height = luma_height / chroma_sampling_period_y; let chroma_padding_x = luma_padding / chroma_sampling_period_x; let chroma_padding_y = luma_padding / chroma_sampling_period_y; let chroma_decimation_x = chroma_sampling_period_x - 1; let chroma_decimation_y = chroma_sampling_period_y - 1; Frame { planes: [ Plane::new( luma_width, luma_height, 0, 0, luma_padding, luma_padding ), Plane::new( chroma_width, chroma_height, chroma_decimation_x, chroma_decimation_y, chroma_padding_x, chroma_padding_y ), Plane::new( chroma_width, chroma_height, chroma_decimation_x, chroma_decimation_y, chroma_padding_x, chroma_padding_y ) ] } } pub fn pad(&mut self, w: usize, h: usize) { for p in self.planes.iter_mut() { p.pad(w, h); } } #[inline(always)] pub fn as_tile(&self) -> Tile<'_, T> { let PlaneConfig { width, height, .. } = self.planes[0].cfg; Tile::new(self, TileRect { x: 0, y: 0, width, height }) } #[inline(always)] pub fn as_tile_mut(&mut self) -> TileMut<'_, T> { let PlaneConfig { width, height, .. } = self.planes[0].cfg; TileMut::new(self, TileRect { x: 0, y: 0, width, height }) } /// Returns a `PixelIter` containing the data of this frame's planes in YUV format. /// Each point in the `PixelIter` is a triple consisting of a Y, U, and V component. /// The `PixelIter` is laid out as contiguous rows, e.g. to get a given 0-indexed row /// you could use `data.skip(width * row_idx).take(width)`. /// /// This data retains any padding, e.g. it uses the width and height specifed in /// the Y-plane's `cfg` struct, and not the display width and height specied in /// `FrameInvariants`. pub fn iter(&self) -> PixelIter<'_, T> { PixelIter::new(&self.planes) } } #[derive(Debug)] pub struct PixelIter<'a, T: Pixel> { planes: &'a [Plane<T>; 3], y: usize, x: usize, } impl<'a, T: Pixel> PixelIter<'a, T> { pub fn new(planes: &'a [Plane<T>; 3]) -> Self { PixelIter { planes, y: 0, x: 0, } } fn width(&self) -> usize { self.planes[0].cfg.width } fn height(&self) -> usize { self.planes[0].cfg.height } } impl<'a, T: Pixel> Iterator for PixelIter<'a, T> { type Item = (T, T, T); fn next(&mut self) -> Option<<Self as Iterator>::Item> { if self.y == self.height() - 1 && self.x == self.width() - 1 { return None; } let pixel = ( self.planes[0].p(self.x, self.y), self.planes[1].p(self.x >> self.planes[1].cfg.xdec, self.y >> self.planes[1].cfg.ydec), self.planes[2].p(self.x >> self.planes[2].cfg.xdec, self.y >> self.planes[2].cfg.ydec), ); if self.x == self.width() - 1 { self.x = 0; self.y += 1; } else { self.x += 1; } Some(pixel) } } #[derive(Debug, Clone)] pub struct ReferenceFrame<T: Pixel> { pub order_hint: u32, pub frame: Frame<T>, pub input_hres: Plane<T>, pub input_qres: Plane<T>, pub cdfs: CDFContext, pub frame_mvs: Vec<FrameMotionVectors>, } #[derive(Debug, Clone, Default)] pub struct ReferenceFramesSet<T: Pixel> { pub frames: [Option<Arc<ReferenceFrame<T>>>; (REF_FRAMES as usize)], pub deblock: [DeblockState; (REF_FRAMES as usize)] } impl<T: Pixel> ReferenceFramesSet<T> { pub fn new() -> Self { Self { frames: Default::default(), deblock: Default::default() } } } const MAX_NUM_TEMPORAL_LAYERS: usize = 8; const MAX_NUM_SPATIAL_LAYERS: usize = 4; const MAX_NUM_OPERATING_POINTS: usize = MAX_NUM_TEMPORAL_LAYERS * MAX_NUM_SPATIAL_LAYERS; #[derive(ArgEnum, Copy, Clone, Debug, PartialEq)] #[repr(C)] pub enum Tune { Psnr, Psychovisual } impl Default for Tune { fn default() -> Self { Tune::Psychovisual } } #[derive(Copy, Clone, Debug)] pub struct Sequence { // OBU Sequence header of AV1 pub profile: u8, pub num_bits_width: u32, pub num_bits_height: u32, pub bit_depth: usize, pub chroma_sampling: ChromaSampling, pub chroma_sample_position: ChromaSamplePosition, pub pixel_range: PixelRange, pub color_description: Option<ColorDescription>, pub mastering_display: Option<MasteringDisplay>, pub content_light: Option<ContentLight>, pub max_frame_width: u32, pub max_frame_height: u32, pub frame_id_numbers_present_flag: bool, pub frame_id_length: u32, pub delta_frame_id_length: u32, pub use_128x128_superblock: bool, pub order_hint_bits_minus_1: u32, pub force_screen_content_tools: u32, // 0 - force off // 1 - force on // 2 - adaptive pub force_integer_mv: u32, // 0 - Not to force. MV can be in 1/4 or 1/8 // 1 - force to integer // 2 - adaptive pub still_picture: bool, // Video is a single frame still picture pub reduced_still_picture_hdr: bool, // Use reduced header for still picture pub enable_intra_edge_filter: bool, // enables/disables corner/edge/upsampling pub enable_interintra_compound: bool, // enables/disables interintra_compound pub enable_masked_compound: bool, // enables/disables masked compound pub enable_dual_filter: bool, // 0 - disable dual interpolation filter // 1 - enable vert/horiz filter selection pub enable_order_hint: bool, // 0 - disable order hint, and related tools // jnt_comp, ref_frame_mvs, frame_sign_bias // if 0, enable_jnt_comp and // enable_ref_frame_mvs must be set zs 0. pub enable_jnt_comp: bool, // 0 - disable joint compound modes // 1 - enable it pub enable_ref_frame_mvs: bool, // 0 - disable ref frame mvs // 1 - enable it pub enable_warped_motion: bool, // 0 - disable warped motion for sequence // 1 - enable it for the sequence pub enable_superres: bool,// 0 - Disable superres for the sequence, and disable // transmitting per-frame superres enabled flag. // 1 - Enable superres for the sequence, and also // enable per-frame flag to denote if superres is // enabled for that frame. pub enable_cdef: bool, // To turn on/off CDEF pub enable_restoration: bool, // To turn on/off loop restoration pub operating_points_cnt_minus_1: usize, pub operating_point_idc: [u16; MAX_NUM_OPERATING_POINTS], pub display_model_info_present_flag: bool, pub decoder_model_info_present_flag: bool, pub level: [[usize; 2]; MAX_NUM_OPERATING_POINTS], // minor, major pub tier: [usize; MAX_NUM_OPERATING_POINTS], // seq_tier in the spec. One bit: 0 // or 1. pub film_grain_params_present: bool, pub separate_uv_delta_q: bool, } impl Sequence { pub fn new(config: &EncoderConfig) -> Sequence { let width_bits = 32 - (config.width as u32).leading_zeros(); let height_bits = 32 - (config.height as u32).leading_zeros(); assert!(width_bits <= 16); assert!(height_bits <= 16); let profile = if config.bit_depth == 12 || config.chroma_sampling == ChromaSampling::Cs422 { 2 } else if config.chroma_sampling == ChromaSampling::Cs444 { 1 } else { 0 }; let mut operating_point_idc = [0 as u16; MAX_NUM_OPERATING_POINTS]; let mut level = [[1, 2 as usize]; MAX_NUM_OPERATING_POINTS]; let mut tier = [0 as usize; MAX_NUM_OPERATING_POINTS]; for i in 0..MAX_NUM_OPERATING_POINTS { operating_point_idc[i] = 0; level[i][0] = 1; // minor level[i][1] = 2; // major tier[i] = 0; } Sequence { profile, num_bits_width: width_bits, num_bits_height: height_bits, bit_depth: config.bit_depth, chroma_sampling: config.chroma_sampling, chroma_sample_position: config.chroma_sample_position, pixel_range: config.pixel_range, color_description: config.color_description, mastering_display: config.mastering_display, content_light: config.content_light, max_frame_width: config.width as u32, max_frame_height: config.height as u32, frame_id_numbers_present_flag: false, frame_id_length: 0, delta_frame_id_length: 0, use_128x128_superblock: false, order_hint_bits_minus_1: 5, force_screen_content_tools: 0, force_integer_mv: 2, still_picture: false, reduced_still_picture_hdr: false, enable_intra_edge_filter: false, enable_interintra_compound: false, enable_masked_compound: false, enable_dual_filter: false, enable_order_hint: true, enable_jnt_comp: false, enable_ref_frame_mvs: false, enable_warped_motion: false, enable_superres: false, enable_cdef: config.speed_settings.cdef, enable_restoration: config.chroma_sampling != ChromaSampling::Cs422 && config.chroma_sampling != ChromaSampling::Cs444, // FIXME: not working yet operating_points_cnt_minus_1: 0, operating_point_idc, display_model_info_present_flag: false, decoder_model_info_present_flag: false, level, tier, film_grain_params_present: false, separate_uv_delta_q: true, } } pub fn get_relative_dist(&self, a: u32, b: u32) -> i32 { let diff = a as i32 - b as i32; let m = 1 << self.order_hint_bits_minus_1; (diff & (m - 1)) - (diff & m) } pub fn get_skip_mode_allowed<T: Pixel>(&self, fi: &FrameInvariants<T>, reference_select: bool) -> bool { if fi.intra_only || !reference_select || !self.enable_order_hint { return false; } let mut forward_idx: isize = -1; let mut backward_idx: isize = -1; let mut forward_hint = 0; let mut backward_hint = 0; for i in 0..INTER_REFS_PER_FRAME { if let Some(ref rec) = fi.rec_buffer.frames[fi.ref_frames[i] as usize] { let ref_hint = rec.order_hint; if self.get_relative_dist(ref_hint, fi.order_hint) < 0 { if forward_idx < 0 || self.get_relative_dist(ref_hint, forward_hint) > 0 { forward_idx = i as isize; forward_hint = ref_hint; } } else if self.get_relative_dist(ref_hint, fi.order_hint) > 0 && (backward_idx < 0 || self.get_relative_dist(ref_hint, backward_hint) > 0) { backward_idx = i as isize; backward_hint = ref_hint; } } } if forward_idx < 0 { false } else if backward_idx >= 0 { // set skip_mode_frame true } else { let mut second_forward_idx: isize = -1; let mut second_forward_hint = 0; for i in 0..INTER_REFS_PER_FRAME { if let Some(ref rec) = fi.rec_buffer.frames[fi.ref_frames[i] as usize] { let ref_hint = rec.order_hint; if self.get_relative_dist(ref_hint, forward_hint) < 0 && (second_forward_idx < 0 || self.get_relative_dist(ref_hint, second_forward_hint) > 0) { second_forward_idx = i as isize; second_forward_hint = ref_hint; } } } // TODO: Set skip_mode_frame, when second_forward_idx is not less than 0. second_forward_idx >= 0 } } #[inline(always)] pub fn sb_size_log2(&self) -> usize { if self.use_128x128_superblock { 7 } else { 6 } } #[inline(always)] pub fn sb_size(&self) -> usize { 1 << self.sb_size_log2() } } #[derive(Debug)] pub struct FrameState<T: Pixel> { pub sb_size_log2: usize, pub input: Arc<Frame<T>>, pub input_hres: Plane<T>, // half-resolution version of input luma pub input_qres: Plane<T>, // quarter-resolution version of input luma pub rec: Frame<T>, pub cdfs: CDFContext, pub deblock: DeblockState, pub segmentation: SegmentationState, pub restoration: RestorationState, pub frame_mvs: Vec<FrameMotionVectors>, pub t: RDOTracker, } impl<T: Pixel> FrameState<T> { pub fn new(fi: &FrameInvariants<T>) -> Self { // TODO(negge): Use fi.cfg.chroma_sampling when we store VideoDetails in FrameInvariants FrameState::new_with_frame(fi, Arc::new(Frame::new( fi.width, fi.height, fi.sequence.chroma_sampling))) } pub fn new_with_frame(fi: &FrameInvariants<T>, frame: Arc<Frame<T>>) -> Self { let rs = RestorationState::new(fi, &frame); let luma_width = frame.planes[0].cfg.width; let luma_height = frame.planes[0].cfg.height; let luma_padding_x = frame.planes[0].cfg.xpad; let luma_padding_y = frame.planes[0].cfg.ypad; Self { sb_size_log2: fi.sb_size_log2(), input: frame, input_hres: Plane::new(luma_width / 2, luma_height / 2, 1, 1, luma_padding_x / 2, luma_padding_y / 2), input_qres: Plane::new(luma_width / 4, luma_height / 4, 2, 2, luma_padding_x / 4, luma_padding_y / 4), rec: Frame::new(luma_width, luma_height, fi.sequence.chroma_sampling), cdfs: CDFContext::new(0), deblock: Default::default(), segmentation: Default::default(), restoration: rs, frame_mvs: { let mut vec = Vec::with_capacity(REF_FRAMES); for _ in 0..REF_FRAMES { vec.push(FrameMotionVectors::new(fi.w_in_b, fi.h_in_b)); } vec }, t: RDOTracker::new() } } #[inline(always)] pub fn as_tile_state_mut(&mut self) -> TileStateMut<'_, T> { let PlaneConfig { width, height, .. } = self.rec.planes[0].cfg; let sbo_0 = SuperBlockOffset { x: 0, y: 0 }; TileStateMut::new(self, sbo_0, self.sb_size_log2, width, height) } } #[derive(Copy, Clone, Debug)] pub struct DeblockState { pub levels: [u8; PLANES+1], // Y vertical edges, Y horizontal, U, V pub sharpness: u8, pub deltas_enabled: bool, pub delta_updates_enabled: bool, pub ref_deltas: [i8; REF_FRAMES], pub mode_deltas: [i8; 2], pub block_deltas_enabled: bool, pub block_delta_shift: u8, pub block_delta_multi: bool, } impl Default for DeblockState { fn default() -> Self { DeblockState { levels: [8,8,4,4], sharpness: 0, deltas_enabled: false, // requires delta_q_enabled delta_updates_enabled: false, ref_deltas: [1, 0, 0, 0, 0, -1, -1, -1], mode_deltas: [0, 0], block_deltas_enabled: false, block_delta_shift: 0, block_delta_multi: false } } } #[derive(Copy, Clone, Debug)] pub struct SegmentationState { pub enabled: bool, pub update_data: bool, pub update_map: bool, pub preskip: bool, pub last_active_segid: u8, pub features: [[bool; SegLvl::SEG_LVL_MAX as usize]; 8], pub data: [[i16; SegLvl::SEG_LVL_MAX as usize]; 8], } impl Default for SegmentationState { fn default() -> Self { SegmentationState { enabled: false, update_data: false, update_map: false, preskip: true, last_active_segid: 0, features: [[false; SegLvl::SEG_LVL_MAX as usize]; 8], data: [[0; SegLvl::SEG_LVL_MAX as usize]; 8], } } } // Frame Invariants are invariant inside a frame #[allow(dead_code)] #[derive(Debug, Clone)] pub struct FrameInvariants<T: Pixel> { pub sequence: Sequence, pub width: usize, pub height: usize, pub sb_width: usize, pub sb_height: usize, pub w_in_b: usize, pub h_in_b: usize, pub tiling: TilingInfo, pub number: u64, pub order_hint: u32, pub show_frame: bool, pub showable_frame: bool, pub error_resilient: bool, pub intra_only: bool, pub allow_high_precision_mv: bool, pub frame_type: FrameType, pub show_existing_frame: bool, pub frame_to_show_map_idx: u32, pub use_reduced_tx_set: bool, pub reference_mode: ReferenceMode, pub use_prev_frame_mvs: bool, pub min_partition_size: BlockSize, pub globalmv_transformation_type: [GlobalMVMode; INTER_REFS_PER_FRAME], pub num_tg: usize, pub large_scale_tile: bool, pub disable_cdf_update: bool, pub allow_screen_content_tools: u32, pub force_integer_mv: u32, pub primary_ref_frame: u32, pub refresh_frame_flags: u32, // a bitmask that specifies which // reference frame slots will be updated with the current frame // after it is decoded. pub allow_intrabc: bool, pub use_ref_frame_mvs: bool, pub is_filter_switchable: bool, pub is_motion_mode_switchable: bool, pub disable_frame_end_update_cdf: bool, pub allow_warped_motion: bool, pub cdef_damping: u8, pub cdef_bits: u8, pub cdef_y_strengths: [u8; 8], pub cdef_uv_strengths: [u8; 8], pub delta_q_present: bool, pub config: EncoderConfig, pub ref_frames: [u8; INTER_REFS_PER_FRAME], pub ref_frame_sign_bias: [bool; INTER_REFS_PER_FRAME], pub rec_buffer: ReferenceFramesSet<T>, pub base_q_idx: u8, pub dc_delta_q: [i8; 3], pub ac_delta_q: [i8; 3], pub lambda: f64, pub me_lambda: f64, pub me_range_scale: u8, pub use_tx_domain_distortion: bool, pub use_tx_domain_rate: bool, pub inter_cfg: Option<InterPropsConfig>, pub enable_early_exit: bool, pub tx_mode_select: bool, } pub(crate) fn pos_to_lvl(pos: u64, pyramid_depth: u64) -> u64 { // Derive level within pyramid for a frame with a given coding order position // For example, with a pyramid of depth 2, the 2 least significant bits of the // position determine the level: // 00 -> 0 // 01 -> 2 // 10 -> 1 // 11 -> 2 pyramid_depth - (pos | (1 << pyramid_depth)).trailing_zeros() as u64 } impl<T: Pixel> FrameInvariants<T> { #[allow(clippy::erasing_op, clippy::identity_op)] pub fn new(config: EncoderConfig, sequence: Sequence) -> Self { assert!(sequence.bit_depth <= mem::size_of::<T>() * 8, "bit depth cannot fit into u8"); // Speed level decides the minimum partition size, i.e. higher speed --> larger min partition size, // with exception that SBs on right or bottom frame borders split down to BLOCK_4X4. // At speed = 0, RDO search is exhaustive. let min_partition_size = config.speed_settings.min_block_size; let use_reduced_tx_set = config.speed_settings.reduced_tx_set; let use_tx_domain_distortion = config.tune == Tune::Psnr && config.speed_settings.tx_domain_distortion; let use_tx_domain_rate = config.speed_settings.tx_domain_rate; let w_in_b = 2 * config.width.align_power_of_two_and_shift(3); // MiCols, ((width+7)/8)<<3 >> MI_SIZE_LOG2 let h_in_b = 2 * config.height.align_power_of_two_and_shift(3); // MiRows, ((height+7)/8)<<3 >> MI_SIZE_LOG2 let tiling = TilingInfo::new( sequence.sb_size_log2(), config.width, config.height, config.tile_cols_log2, config.tile_rows_log2 ); Self { sequence, width: config.width, height: config.height, sb_width: config.width.align_power_of_two_and_shift(6), sb_height: config.height.align_power_of_two_and_shift(6), w_in_b, h_in_b, tiling, number: 0, order_hint: 0, show_frame: true, showable_frame: true, error_resilient: false, intra_only: false, allow_high_precision_mv: false, frame_type: FrameType::KEY, show_existing_frame: false, frame_to_show_map_idx: 0, use_reduced_tx_set, reference_mode: ReferenceMode::SINGLE, use_prev_frame_mvs: false, min_partition_size, globalmv_transformation_type: [GlobalMVMode::IDENTITY; INTER_REFS_PER_FRAME], num_tg: 1, large_scale_tile: false, disable_cdf_update: false, allow_screen_content_tools: 0, force_integer_mv: 0, primary_ref_frame: PRIMARY_REF_NONE, refresh_frame_flags: 0, allow_intrabc: false, use_ref_frame_mvs: false, is_filter_switchable: false, is_motion_mode_switchable: false, // 0: only the SIMPLE motion mode will be used. disable_frame_end_update_cdf: false, allow_warped_motion: false, cdef_damping: 3, cdef_bits: 3, cdef_y_strengths: [0*4+0, 1*4+0, 2*4+1, 3*4+1, 5*4+2, 7*4+3, 10*4+3, 13*4+3], cdef_uv_strengths: [0*4+0, 1*4+0, 2*4+1, 3*4+1, 5*4+2, 7*4+3, 10*4+3, 13*4+3], delta_q_present: false, ref_frames: [0; INTER_REFS_PER_FRAME], ref_frame_sign_bias: [false; INTER_REFS_PER_FRAME], rec_buffer: ReferenceFramesSet::new(), base_q_idx: config.quantizer as u8, dc_delta_q: [0; 3], ac_delta_q: [0; 3], lambda: 0.0, me_lambda: 0.0, me_range_scale: 1, use_tx_domain_distortion, use_tx_domain_rate, inter_cfg: None, enable_early_exit: true, config, tx_mode_select : false, } } pub fn new_key_frame(previous_fi: &Self, segment_start_frame: u64) -> Self { let mut fi = previous_fi.clone(); fi.frame_type = FrameType::KEY; fi.intra_only = true; fi.inter_cfg = None; fi.order_hint = 0; fi.refresh_frame_flags = ALL_REF_FRAMES_MASK; fi.show_frame = true; fi.show_existing_frame = false; fi.frame_to_show_map_idx = 0; fi.primary_ref_frame = PRIMARY_REF_NONE; fi.number = segment_start_frame; for i in 0..INTER_REFS_PER_FRAME { fi.ref_frames[i] = 0; } fi.tx_mode_select = fi.config.speed_settings.rdo_tx_decision; // FIXME: tx partition for intra not supported for chroma 422 if fi.tx_mode_select && fi.sequence.chroma_sampling == ChromaSampling::Cs422 { fi.tx_mode_select = false; } fi } fn apply_inter_props_cfg(&mut self, idx_in_segment: u64) { let reorder = !self.config.low_latency; let multiref = reorder || self.config.speed_settings.multiref; let pyramid_depth = if reorder { 2 } else { 0 }; let group_src_len = 1 << pyramid_depth; let group_len = group_src_len + if reorder { pyramid_depth } else { 0 }; let idx_in_group = (idx_in_segment - 1) % group_len; let group_idx = (idx_in_segment - 1) / group_len; self.inter_cfg = Some(InterPropsConfig { reorder, multiref, pyramid_depth, group_src_len, group_len, idx_in_group, group_idx, }) } /// Returns the created FrameInvariants along with a bool indicating success. /// This interface provides simpler usage, because we always need the produced /// FrameInvariants regardless of success or failure. pub fn new_inter_frame( previous_fi: &Self, segment_start_frame: u64, idx_in_segment: u64, next_keyframe: u64 ) -> (Self, bool) { let mut fi = previous_fi.clone(); fi.frame_type = FrameType::INTER; fi.intra_only = false; fi.apply_inter_props_cfg(idx_in_segment); fi.tx_mode_select = false; let inter_cfg = fi.inter_cfg.unwrap(); fi.order_hint = (inter_cfg.group_src_len * inter_cfg.group_idx + if inter_cfg.reorder && inter_cfg.idx_in_group < inter_cfg.pyramid_depth { inter_cfg.group_src_len >> inter_cfg.idx_in_group } else { inter_cfg.idx_in_group - inter_cfg.pyramid_depth + 1 }) as u32; let number = segment_start_frame + fi.order_hint as u64; if number >= next_keyframe { fi.show_existing_frame = false; fi.show_frame = false; return (fi, false); } let lvl = if !inter_cfg.reorder { 0 } else if inter_cfg.idx_in_group < inter_cfg.pyramid_depth { inter_cfg.idx_in_group } else { pos_to_lvl(inter_cfg.idx_in_group - inter_cfg.pyramid_depth + 1, inter_cfg.pyramid_depth) }; // Frames with lvl == 0 are stored in slots 0..4 and frames with higher values // of lvl in slots 4..8 let slot_idx = if lvl == 0 { (fi.order_hint >> inter_cfg.pyramid_depth) % 4 as u32 } else { 3 + lvl as u32 }; fi.show_frame = !inter_cfg.reorder || inter_cfg.idx_in_group >= inter_cfg.pyramid_depth; fi.show_existing_frame = fi.show_frame && inter_cfg.reorder && (inter_cfg.idx_in_group - inter_cfg.pyramid_depth + 1).count_ones() == 1 && inter_cfg.idx_in_group != inter_cfg.pyramid_depth; fi.frame_to_show_map_idx = slot_idx; fi.refresh_frame_flags = if fi.show_existing_frame { 0 } else { 1 << slot_idx }; let second_ref_frame = if !inter_cfg.multiref { LAST_FRAME // make second_ref_frame match first } else if !inter_cfg.reorder || inter_cfg.idx_in_group == 0 { LAST2_FRAME } else { ALTREF_FRAME }; let ref_in_previous_group = LAST3_FRAME; // reuse probability estimates from previous frames only in top level frames fi.primary_ref_frame = if lvl > 0 { PRIMARY_REF_NONE } else { (ref_in_previous_group.to_index()) as u32 }; for i in 0..INTER_REFS_PER_FRAME { fi.ref_frames[i] = if lvl == 0 { if i == second_ref_frame.to_index() { (slot_idx + 4 - 2) as u8 % 4 } else { (slot_idx + 4 - 1) as u8 % 4 } } else if i == second_ref_frame.to_index() { let oh = fi.order_hint + (inter_cfg.group_src_len as u32 >> lvl); let lvl2 = pos_to_lvl(oh as u64, inter_cfg.pyramid_depth); if lvl2 == 0 { ((oh >> inter_cfg.pyramid_depth) % 4) as u8 } else { 3 + lvl2 as u8 } } else if i == ref_in_previous_group.to_index() { if lvl == 0 { (slot_idx + 4 - 1) as u8 % 4 } else { slot_idx as u8 } } else { let oh = fi.order_hint - (inter_cfg.group_src_len as u32 >> lvl); let lvl1 = pos_to_lvl(oh as u64, inter_cfg.pyramid_depth); if lvl1 == 0 { ((oh >> inter_cfg.pyramid_depth) % 4) as u8 } else { 3 + lvl1 as u8 } } } fi.reference_mode = if inter_cfg.multiref && inter_cfg.reorder && inter_cfg.idx_in_group != 0 { ReferenceMode::SELECT } else { ReferenceMode::SINGLE }; fi.number = number; fi.me_range_scale = (inter_cfg.group_src_len >> lvl) as u8; (fi, true) } pub fn get_frame_subtype(&self) -> usize { if self.frame_type == FrameType::KEY { FRAME_SUBTYPE_I } else { let inter_cfg = self.inter_cfg.unwrap(); let lvl = if !inter_cfg.reorder { 0 } else if inter_cfg.idx_in_group < inter_cfg.pyramid_depth { inter_cfg.idx_in_group } else { pos_to_lvl( inter_cfg.idx_in_group - inter_cfg.pyramid_depth + 1, inter_cfg.pyramid_depth ) }; FRAME_SUBTYPE_P + (lvl as usize) } } pub fn set_quantizers(&mut self, qps: &QuantizerParameters) { self.base_q_idx = qps.ac_qi[0]; if self.frame_type != FrameType::KEY { self.cdef_bits = 3 - ((self.base_q_idx.max(128) - 128) >> 5); } else { self.cdef_bits = 3; } let base_q_idx = self.base_q_idx as i32; for pi in 0..3 { debug_assert!(qps.dc_qi[pi] as i32 - base_q_idx >= -128); debug_assert!((qps.dc_qi[pi] as i32 - base_q_idx) < 128); debug_assert!(qps.ac_qi[pi] as i32 - base_q_idx >= -128); debug_assert!((qps.ac_qi[pi] as i32 - base_q_idx) < 128); self.dc_delta_q[pi] = (qps.dc_qi[pi] as i32 - base_q_idx) as i8; self.ac_delta_q[pi] = (qps.ac_qi[pi] as i32 - base_q_idx) as i8; } self.lambda = qps.lambda * ((1 << (2 * (self.sequence.bit_depth - 8))) as f64); self.me_lambda = self.lambda.sqrt(); } #[inline(always)] pub fn sb_size_log2(&self) -> usize { self.sequence.sb_size_log2() } #[inline(always)] pub fn sb_size(&self) -> usize { self.sequence.sb_size() } } impl<T: Pixel> fmt::Display for FrameInvariants<T> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "Frame {} - {}", self.number, self.frame_type) } } #[derive(Debug, Clone, Copy)] pub struct InterPropsConfig { pub reorder: bool, pub multiref: bool, pub pyramid_depth: u64, pub group_src_len: u64, pub group_len: u64, pub idx_in_group: u64, pub group_idx: u64, } pub fn write_temporal_delimiter( packet: &mut dyn io::Write ) -> io::Result<()> { packet.write_all(&TEMPORAL_DELIMITER)?; Ok(()) } fn write_obus<T: Pixel>( packet: &mut dyn io::Write, fi: &mut FrameInvariants<T>, fs: &FrameState<T> ) -> io::Result<()> { let obu_extension = 0 as u32; let mut buf1 = Vec::new(); // write sequence header obu if KEY_FRAME, preceded by 4-byte size if fi.frame_type == FrameType::KEY { let mut buf2 = Vec::new(); { let mut bw2 = BitWriter::endian(&mut buf2, BigEndian); bw2.write_sequence_header_obu(fi)?; bw2.write_bit(true)?; // trailing bit bw2.byte_align()?; } { let mut bw1 = BitWriter::endian(&mut buf1, BigEndian); bw1.write_obu_header(ObuType::OBU_SEQUENCE_HEADER, obu_extension)?; } packet.write_all(&buf1).unwrap(); buf1.clear(); { let mut bw1 = BitWriter::endian(&mut buf1, BigEndian); bw1.write_uleb128(buf2.len() as u64)?; } packet.write_all(&buf1).unwrap(); buf1.clear(); packet.write_all(&buf2).unwrap(); buf2.clear(); if fi.sequence.content_light.is_some() { let mut bw1 = BitWriter::endian(&mut buf1, BigEndian); bw1.write_metadata_obu(ObuMetaType::OBU_META_HDR_CLL, fi.sequence)?; packet.write_all(&buf1).unwrap(); buf1.clear(); } if fi.sequence.mastering_display.is_some() { let mut bw1 = BitWriter::endian(&mut buf1, BigEndian); bw1.write_metadata_obu(ObuMetaType::OBU_META_HDR_MDCV, fi.sequence)?; packet.write_all(&buf1).unwrap(); buf1.clear(); } } let mut buf2 = Vec::new(); { let mut bw2 = BitWriter::endian(&mut buf2, BigEndian); bw2.write_frame_header_obu(fi, fs)?; } { let mut bw1 = BitWriter::endian(&mut buf1, BigEndian); bw1.write_obu_header(ObuType::OBU_FRAME_HEADER, obu_extension)?; } packet.write_all(&buf1).unwrap(); buf1.clear(); { let mut bw1 = BitWriter::endian(&mut buf1, BigEndian); bw1.write_uleb128(buf2.len() as u64)?; } packet.write_all(&buf1).unwrap(); buf1.clear(); packet.write_all(&buf2).unwrap(); buf2.clear(); Ok(()) } /// Write into `dst` the difference between the blocks at `src1` and `src2` fn diff<T: Pixel>( dst: &mut [i16], src1: &PlaneRegion<'_, T>, src2: &PlaneRegion<'_, T>, width: usize, height: usize, ) { for ((l, s1), s2) in dst.chunks_mut(width).take(height) .zip(src1.rows_iter()) .zip(src2.rows_iter()) { for ((r, v1), v2) in l.iter_mut().zip(s1).zip(s2) { *r = i16::cast_from(*v1) - i16::cast_from(*v2); } } } fn get_qidx<T: Pixel>(fi: &FrameInvariants<T>, ts: &TileStateMut<'_, T>, cw: &ContextWriter, tile_bo: BlockOffset) -> u8 { let mut qidx = fi.base_q_idx; let sidx = cw.bc.blocks[tile_bo].segmentation_idx as usize; if ts.segmentation.features[sidx][SegLvl::SEG_LVL_ALT_Q as usize] { let delta = ts.segmentation.data[sidx][SegLvl::SEG_LVL_ALT_Q as usize]; qidx = clamp((qidx as i16) + delta, 0, 255) as u8; } qidx } // For a transform block, // predict, transform, quantize, write coefficients to a bitstream, // dequantize, inverse-transform. pub fn encode_tx_block<T: Pixel>( fi: &FrameInvariants<T>, ts: &mut TileStateMut<'_, T>, cw: &mut ContextWriter, w: &mut dyn Writer, p: usize, tile_bo: BlockOffset, mode: PredictionMode, tx_size: TxSize, tx_type: TxType, plane_bsize: BlockSize, po: PlaneOffset, skip: bool, ac: &[i16], alpha: i16, rdo_type: RDOType, for_rdo_use: bool ) -> (bool, i64) { let qidx = get_qidx(fi, ts, cw, tile_bo); let PlaneConfig { xdec, ydec, .. } = ts.input.planes[p].cfg; let tile_rect = ts.tile_rect().decimated(xdec, ydec); let rec = &mut ts.rec.planes[p]; let area = Area::BlockStartingAt { bo: tile_bo }; assert!(tx_size.sqr() <= TxSize::TX_32X32 || tx_type == TxType::DCT_DCT); if mode.is_intra() { let bit_depth = fi.sequence.bit_depth; let edge_buf = get_intra_edges(&rec.as_const(), po, tx_size, bit_depth, Some(mode)); mode.predict_intra(tile_rect, &mut rec.subregion_mut(area), tx_size, bit_depth, &ac, alpha, &edge_buf); } if skip { return (false, -1); } let mut residual_storage: AlignedArray<[i16; 64 * 64]> = UninitializedAlignedArray(); let mut coeffs_storage: AlignedArray<[i32; 64 * 64]> = UninitializedAlignedArray(); let mut qcoeffs_storage: AlignedArray<[i32; 64 * 64]> = UninitializedAlignedArray(); let mut rcoeffs_storage: AlignedArray<[i32; 64 * 64]> = UninitializedAlignedArray(); let residual = &mut residual_storage.array[..tx_size.area()]; let coeffs = &mut coeffs_storage.array[..tx_size.area()]; let qcoeffs = &mut qcoeffs_storage.array[..tx_size.area()]; let rcoeffs = &mut rcoeffs_storage.array[..tx_size.area()]; diff( residual, &ts.input_tile.planes[p].subregion(area), &rec.subregion(area), tx_size.width(), tx_size.height()); forward_transform(residual, coeffs, tx_size.width(), tx_size, tx_type, fi.sequence.bit_depth); let coded_tx_size = av1_get_coded_tx_size(tx_size).area(); ts.qc.quantize(coeffs, qcoeffs, coded_tx_size); let tell_coeffs = w.tell_frac(); let has_coeff = if !for_rdo_use || rdo_type.needs_coeff_rate() { cw.write_coeffs_lv_map(w, p, tile_bo, &qcoeffs, mode, tx_size, tx_type, plane_bsize, xdec, ydec, fi.use_reduced_tx_set) } else { true }; let cost_coeffs = w.tell_frac() - tell_coeffs; // Reconstruct dequantize(qidx, qcoeffs, rcoeffs, tx_size, fi.sequence.bit_depth, fi.dc_delta_q[p], fi.ac_delta_q[p]); let mut tx_dist: i64 = -1; if !fi.use_tx_domain_distortion || !for_rdo_use { inverse_transform_add(rcoeffs, &mut rec.subregion_mut(area), tx_size, tx_type, fi.sequence.bit_depth); } if rdo_type.needs_tx_dist() { // Store tx-domain distortion of this block tx_dist = coeffs .iter() .zip(rcoeffs) .map(|(a, b)| { let c = *a as i32 - *b as i32; (c * c) as u64 }).sum::<u64>() as i64; let tx_dist_scale_bits = 2*(3 - get_log_tx_scale(tx_size)); let tx_dist_scale_rounding_offset = 1 << (tx_dist_scale_bits - 1); tx_dist = (tx_dist + tx_dist_scale_rounding_offset) >> tx_dist_scale_bits; } if fi.config.train_rdo { ts.rdo.add_rate(fi.base_q_idx, tx_size, tx_dist as u64, cost_coeffs as u64); } if rdo_type == RDOType::TxDistEstRate { // look up rate and distortion in table let estimated_rate = estimate_rate(fi.base_q_idx, tx_size, tx_dist as u64); w.add_bits_frac(estimated_rate as u32); } (has_coeff, tx_dist) } pub fn motion_compensate<T: Pixel>( fi: &FrameInvariants<T>, ts: &mut TileStateMut<'_, T>, cw: &mut ContextWriter, luma_mode: PredictionMode, ref_frames: [RefType; 2], mvs: [MotionVector; 2], bsize: BlockSize, tile_bo: BlockOffset, luma_only: bool ) { debug_assert!(!luma_mode.is_intra()); let PlaneConfig { xdec: u_xdec, ydec: u_ydec, .. } = ts.input.planes[1].cfg; // Inter mode prediction can take place once for a whole partition, // instead of each tx-block. let num_planes = 1 + if !luma_only && has_chroma(tile_bo, bsize, u_xdec, u_ydec) { 2 } else { 0 }; let luma_tile_rect = ts.tile_rect(); for p in 0..num_planes { let plane_bsize = if p == 0 { bsize } else { get_plane_block_size(bsize, u_xdec, u_ydec) }; let rec = &mut ts.rec.planes[p]; let po = tile_bo.plane_offset(&rec.plane_cfg); let &PlaneConfig { xdec, ydec, .. } = rec.plane_cfg; let tile_rect = luma_tile_rect.decimated(xdec, ydec); let area = Area::BlockStartingAt { bo: tile_bo }; if p > 0 && bsize < BlockSize::BLOCK_8X8 { let mut some_use_intra = false; if bsize == BlockSize::BLOCK_4X4 || bsize == BlockSize::BLOCK_4X8 { some_use_intra |= cw.bc.blocks[tile_bo.with_offset(-1,0)].mode.is_intra(); }; if !some_use_intra && bsize == BlockSize::BLOCK_4X4 || bsize == BlockSize::BLOCK_8X4 { some_use_intra |= cw.bc.blocks[tile_bo.with_offset(0,-1)].mode.is_intra(); }; if !some_use_intra && bsize == BlockSize::BLOCK_4X4 { some_use_intra |= cw.bc.blocks[tile_bo.with_offset(-1,-1)].mode.is_intra(); }; if some_use_intra { luma_mode.predict_inter(fi, tile_rect, p, po, &mut rec.subregion_mut(area), plane_bsize.width(), plane_bsize.height(), ref_frames, mvs); } else { assert!(u_xdec == 1 && u_ydec == 1); // TODO: these are absolutely only valid for 4:2:0 if bsize == BlockSize::BLOCK_4X4 { let mv0 = cw.bc.blocks[tile_bo.with_offset(-1,-1)].mv; let rf0 = cw.bc.blocks[tile_bo.with_offset(-1,-1)].ref_frames; let mv1 = cw.bc.blocks[tile_bo.with_offset(0,-1)].mv; let rf1 = cw.bc.blocks[tile_bo.with_offset(0,-1)].ref_frames; let po1 = PlaneOffset { x: po.x+2, y: po.y }; let area1 = Area::StartingAt { x: po1.x, y: po1.y }; let mv2 = cw.bc.blocks[tile_bo.with_offset(-1,0)].mv; let rf2 = cw.bc.blocks[tile_bo.with_offset(-1,0)].ref_frames; let po2 = PlaneOffset { x: po.x, y: po.y+2 }; let area2 = Area::StartingAt { x: po2.x, y: po2.y }; let po3 = PlaneOffset { x: po.x+2, y: po.y+2 }; let area3 = Area::StartingAt { x: po3.x, y: po3.y }; luma_mode.predict_inter(fi, tile_rect, p, po, &mut rec.subregion_mut(area), 2, 2, rf0, mv0); luma_mode.predict_inter(fi, tile_rect, p, po1, &mut rec.subregion_mut(area1), 2, 2, rf1, mv1); luma_mode.predict_inter(fi, tile_rect, p, po2, &mut rec.subregion_mut(area2), 2, 2, rf2, mv2); luma_mode.predict_inter(fi, tile_rect, p, po3, &mut rec.subregion_mut(area3), 2, 2, ref_frames, mvs); } if bsize == BlockSize::BLOCK_8X4 { let mv1 = cw.bc.blocks[tile_bo.with_offset(0,-1)].mv; let rf1 = cw.bc.blocks[tile_bo.with_offset(0,-1)].ref_frames; luma_mode.predict_inter(fi, tile_rect, p, po, &mut rec.subregion_mut(area), 4, 2, rf1, mv1); let po3 = PlaneOffset { x: po.x, y: po.y+2 }; let area3 = Area::StartingAt { x: po3.x, y: po3.y }; luma_mode.predict_inter(fi, tile_rect, p, po3, &mut rec.subregion_mut(area3), 4, 2, ref_frames, mvs); } if bsize == BlockSize::BLOCK_4X8 { let mv2 = cw.bc.blocks[tile_bo.with_offset(-1,0)].mv; let rf2 = cw.bc.blocks[tile_bo.with_offset(-1,0)].ref_frames; luma_mode.predict_inter(fi, tile_rect, p, po, &mut rec.subregion_mut(area), 2, 4, rf2, mv2); let po3 = PlaneOffset { x: po.x+2, y: po.y }; let area3 = Area::StartingAt { x: po3.x, y: po3.y }; luma_mode.predict_inter(fi, tile_rect, p, po3, &mut rec.subregion_mut(area3), 2, 4, ref_frames, mvs); } } } else { luma_mode.predict_inter(fi, tile_rect, p, po, &mut rec.subregion_mut(area), plane_bsize.width(), plane_bsize.height(), ref_frames, mvs); } } } pub fn save_block_motion<T: Pixel>( ts: &mut TileStateMut<'_, T>, bsize: BlockSize, tile_bo: BlockOffset, ref_frame: usize, mv: MotionVector, ) { let tile_mvs = &mut ts.mvs[ref_frame]; let tile_bo_x_end = (tile_bo.x + bsize.width_mi()).min(ts.mi_width); let tile_bo_y_end = (tile_bo.y + bsize.height_mi()).min(ts.mi_height); for mi_y in tile_bo.y..tile_bo_y_end { for mi_x in tile_bo.x..tile_bo_x_end { tile_mvs[mi_y][mi_x] = mv; } } } pub fn encode_block_a<T: Pixel>( seq: &Sequence, ts: &TileStateMut<'_, T>, cw: &mut ContextWriter, w: &mut dyn Writer, bsize: BlockSize, tile_bo: BlockOffset, skip: bool ) -> bool { cw.bc.blocks.set_skip(tile_bo, bsize, skip); if ts.segmentation.enabled && ts.segmentation.update_map && ts.segmentation.preskip { cw.write_segmentation(w, tile_bo, bsize, false, ts.segmentation.last_active_segid); } cw.write_skip(w, tile_bo, skip); if ts.segmentation.enabled && ts.segmentation.update_map && !ts.segmentation.preskip { cw.write_segmentation(w, tile_bo, bsize, skip, ts.segmentation.last_active_segid); } if !skip && seq.enable_cdef { cw.bc.cdef_coded = true; } cw.bc.cdef_coded } pub fn encode_block_b<T: Pixel>( fi: &FrameInvariants<T>, ts: &mut TileStateMut<'_, T>, cw: &mut ContextWriter, w: &mut dyn Writer, luma_mode: PredictionMode, chroma_mode: PredictionMode, ref_frames: [RefType; 2], mvs: [MotionVector; 2], bsize: BlockSize, tile_bo: BlockOffset, skip: bool, cfl: CFLParams, tx_size: TxSize, tx_type: TxType, mode_context: usize, mv_stack: &[CandidateMV], rdo_type: RDOType, for_rdo_use: bool ) -> i64 { let is_inter = !luma_mode.is_intra(); if is_inter { assert!(luma_mode == chroma_mode); }; let sb_size = if fi.sequence.use_128x128_superblock { BlockSize::BLOCK_128X128 } else { BlockSize::BLOCK_64X64 }; let PlaneConfig { xdec, ydec, .. } = ts.input.planes[1].cfg; if skip { cw.bc.reset_skip_context(tile_bo, bsize, xdec, ydec); } cw.bc.blocks.set_block_size(tile_bo, bsize); cw.bc.blocks.set_mode(tile_bo, bsize, luma_mode); cw.bc.blocks.set_tx_size(tile_bo, bsize, tx_size); cw.bc.blocks.set_ref_frames(tile_bo, bsize, ref_frames); cw.bc.blocks.set_motion_vectors(tile_bo, bsize, mvs); //write_q_deltas(); if cw.bc.code_deltas && ts.deblock.block_deltas_enabled && (bsize < sb_size || !skip) { cw.write_block_deblock_deltas(w, tile_bo, ts.deblock.block_delta_multi); } cw.bc.code_deltas = false; if fi.frame_type == FrameType::INTER { cw.write_is_inter(w, tile_bo, is_inter); if is_inter { cw.fill_neighbours_ref_counts(tile_bo); cw.write_ref_frames(w, fi, tile_bo); if luma_mode >= PredictionMode::NEAREST_NEARESTMV { cw.write_compound_mode(w, luma_mode, mode_context); } else { cw.write_inter_mode(w, luma_mode, mode_context); } let ref_mv_idx = 0; let num_mv_found = mv_stack.len(); if luma_mode == PredictionMode::NEWMV || luma_mode == PredictionMode::NEW_NEWMV { if luma_mode == PredictionMode::NEW_NEWMV { assert!(num_mv_found >= 2); } for idx in 0..2 { if num_mv_found > idx + 1 { let drl_mode = ref_mv_idx > idx; let ctx: usize = (mv_stack[idx].weight < REF_CAT_LEVEL) as usize + (mv_stack[idx + 1].weight < REF_CAT_LEVEL) as usize; cw.write_drl_mode(w, drl_mode, ctx); if !drl_mode { break; } } } } let ref_mvs = if num_mv_found > 0 { [mv_stack[ref_mv_idx].this_mv, mv_stack[ref_mv_idx].comp_mv] } else { [MotionVector::default(); 2] }; let mv_precision = if fi.force_integer_mv != 0 { MvSubpelPrecision::MV_SUBPEL_NONE } else if fi.allow_high_precision_mv { MvSubpelPrecision::MV_SUBPEL_HIGH_PRECISION } else { MvSubpelPrecision::MV_SUBPEL_LOW_PRECISION }; if luma_mode == PredictionMode::NEWMV || luma_mode == PredictionMode::NEW_NEWMV || luma_mode == PredictionMode::NEW_NEARESTMV { cw.write_mv(w, mvs[0], ref_mvs[0], mv_precision); } if luma_mode == PredictionMode::NEW_NEWMV || luma_mode == PredictionMode::NEAREST_NEWMV { cw.write_mv(w, mvs[1], ref_mvs[1], mv_precision); } if luma_mode >= PredictionMode::NEAR0MV && luma_mode <= PredictionMode::NEAR2MV { let ref_mv_idx = luma_mode as usize - PredictionMode::NEAR0MV as usize + 1; if luma_mode != PredictionMode::NEAR0MV { assert!(num_mv_found > ref_mv_idx); } for idx in 1..3 { if num_mv_found > idx + 1 { let drl_mode = ref_mv_idx > idx; let ctx: usize = (mv_stack[idx].weight < REF_CAT_LEVEL) as usize + (mv_stack[idx + 1].weight < REF_CAT_LEVEL) as usize; cw.write_drl_mode(w, drl_mode, ctx); if !drl_mode { break; } } } if mv_stack.len() > 1 { assert!(mv_stack[ref_mv_idx].this_mv.row == mvs[0].row); assert!(mv_stack[ref_mv_idx].this_mv.col == mvs[0].col); } else { assert!(0 == mvs[0].row); assert!(0 == mvs[0].col); } } else if luma_mode == PredictionMode::NEARESTMV { if mv_stack.is_empty() { assert_eq!(mvs[0].row, 0); assert_eq!(mvs[0].col, 0); } else { assert_eq!(mvs[0].row, mv_stack[0].this_mv.row); assert_eq!(mvs[0].col, mv_stack[0].this_mv.col); } } } else { cw.write_intra_mode(w, bsize, luma_mode); } } else { cw.write_intra_mode_kf(w, tile_bo, luma_mode); } if !is_inter { if luma_mode.is_directional() && bsize >= BlockSize::BLOCK_8X8 { cw.write_angle_delta(w, 0, luma_mode); } if has_chroma(tile_bo, bsize, xdec, ydec) { cw.write_intra_uv_mode(w, chroma_mode, luma_mode, bsize); if chroma_mode.is_cfl() { assert!(bsize.cfl_allowed()); cw.write_cfl_alphas(w, cfl); } if chroma_mode.is_directional() && bsize >= BlockSize::BLOCK_8X8 { cw.write_angle_delta(w, 0, chroma_mode); } } // TODO: Extra condition related to palette mode, see `read_filter_intra_mode_info` in decodemv.c if luma_mode == PredictionMode::DC_PRED && bsize.width() <= 32 && bsize.height() <= 32 { cw.write_use_filter_intra(w,false, bsize); // Always turn off FILTER_INTRA } } // write tx_size here (for now, intra frame only) // TODO: Add new field tx_mode to fi, then Use the condition, fi.tx_mode == TX_MODE_SELECT if fi.tx_mode_select { if bsize.greater_than(BlockSize::BLOCK_4X4) && !(is_inter && skip) { if !is_inter { cw.write_tx_size_intra(w, tile_bo, bsize, tx_size); cw.bc.update_tx_size_context(tile_bo, bsize, tx_size, false); } /*else { // TODO (yushin): write_tx_size_inter(), i.e. var-tx }*/ } else { cw.bc.update_tx_size_context(tile_bo, bsize, tx_size, is_inter && skip); } } if is_inter { motion_compensate(fi, ts, cw, luma_mode, ref_frames, mvs, bsize, tile_bo, false); write_tx_tree(fi, ts, cw, w, luma_mode, tile_bo, bsize, tx_size, tx_type, skip, false, rdo_type, for_rdo_use) } else { write_tx_blocks(fi, ts, cw, w, luma_mode, chroma_mode, tile_bo, bsize, tx_size, tx_type, skip, cfl, false, rdo_type, for_rdo_use) } } pub fn luma_ac<T: Pixel>( ac: &mut [i16], ts: &mut TileStateMut<'_, T>, tile_bo: BlockOffset, bsize: BlockSize ) { let PlaneConfig { xdec, ydec, .. } = ts.input.planes[1].cfg; let plane_bsize = get_plane_block_size(bsize, xdec, ydec); let bo = if bsize.is_sub8x8(xdec, ydec) { let offset = bsize.sub8x8_offset(xdec, ydec); tile_bo.with_offset(offset.0, offset.1) } else { tile_bo }; let rec = &ts.rec.planes[0]; let luma = &rec.subregion(Area::BlockStartingAt { bo }); let mut sum: i32 = 0; for sub_y in 0..plane_bsize.height() { for sub_x in 0..plane_bsize.width() { let y = sub_y << ydec; let x = sub_x << xdec; let mut sample: i16 = i16::cast_from(luma[y][x]); if xdec != 0 { sample += i16::cast_from(luma[y][x + 1]); } if ydec != 0 { debug_assert!(xdec != 0); sample += i16::cast_from(luma[y + 1][x]) + i16::cast_from(luma[y + 1][x + 1]); } sample <<= 3 - xdec - ydec; ac[sub_y * plane_bsize.width() + sub_x] = sample; sum += sample as i32; } } let shift = plane_bsize.width_log2() + plane_bsize.height_log2(); let average = ((sum + (1 << (shift - 1))) >> shift) as i16; for sub_y in 0..plane_bsize.height() { for sub_x in 0..plane_bsize.width() { ac[sub_y * plane_bsize.width() + sub_x] -= average; } } } pub fn write_tx_blocks<T: Pixel>( fi: &FrameInvariants<T>, ts: &mut TileStateMut<'_, T>, cw: &mut ContextWriter, w: &mut dyn Writer, luma_mode: PredictionMode, chroma_mode: PredictionMode, tile_bo: BlockOffset, bsize: BlockSize, tx_size: TxSize, tx_type: TxType, skip: bool, cfl: CFLParams, luma_only: bool, rdo_type: RDOType, for_rdo_use: bool ) -> i64 { let bw = bsize.width_mi() / tx_size.width_mi(); let bh = bsize.height_mi() / tx_size.height_mi(); let qidx = get_qidx(fi, ts, cw, tile_bo); let PlaneConfig { xdec, ydec, .. } = ts.input.planes[1].cfg; let mut ac: AlignedArray<[i16; 32 * 32]> = UninitializedAlignedArray(); let mut tx_dist: i64 = 0; let do_chroma = has_chroma(tile_bo, bsize, xdec, ydec); ts.qc.update(qidx, tx_size, luma_mode.is_intra(), fi.sequence.bit_depth, fi.dc_delta_q[0], 0); for by in 0..bh { for bx in 0..bw { let tx_bo = BlockOffset { x: tile_bo.x + bx * tx_size.width_mi(), y: tile_bo.y + by * tx_size.height_mi() }; let po = tx_bo.plane_offset(&ts.input.planes[0].cfg); let (_, dist) = encode_tx_block( fi, ts, cw, w, 0, tx_bo, luma_mode, tx_size, tx_type, bsize, po, skip, &ac.array, 0, rdo_type, for_rdo_use ); assert!(!fi.use_tx_domain_distortion || !for_rdo_use || skip || dist >= 0); tx_dist += dist; } } if luma_only { return tx_dist }; let uv_tx_size = bsize.largest_uv_tx_size(xdec, ydec); let mut bw_uv = (bw * tx_size.width_mi()) >> xdec; let mut bh_uv = (bh * tx_size.height_mi()) >> ydec; if (bw_uv == 0 || bh_uv == 0) && do_chroma { bw_uv = 1; bh_uv = 1; } bw_uv /= uv_tx_size.width_mi(); bh_uv /= uv_tx_size.height_mi(); let plane_bsize = get_plane_block_size(bsize, xdec, ydec); if chroma_mode.is_cfl() { luma_ac(&mut ac.array, ts, tile_bo, bsize); } if bw_uv > 0 && bh_uv > 0 { // TODO: Disable these asserts temporarilly, since chroma_sampling_422_aom and chroma_sampling_444_aom // tests seems trigerring them as well, which should not // TODO: Not valid if partition > 64x64 && chroma != 420 /*if xdec == 1 && ydec == 1 { assert!(bw_uv == 1, "bw_uv = {}, bh_uv = {}", bw_uv, bh_uv); assert!(bh_uv == 1, "bw_uv = {}, bh_uv = {}", bw_uv, bh_uv); }*/ let uv_tx_type = if uv_tx_size.width() >= 32 || uv_tx_size.height() >= 32 { TxType::DCT_DCT } else { uv_intra_mode_to_tx_type_context(chroma_mode) }; for p in 1..3 { ts.qc.update(fi.base_q_idx, uv_tx_size, true, fi.sequence.bit_depth, fi.dc_delta_q[p], fi.ac_delta_q[p]); let alpha = cfl.alpha(p - 1); for by in 0..bh_uv { for bx in 0..bw_uv { let tx_bo = BlockOffset { x: tile_bo.x + ((bx * uv_tx_size.width_mi()) << xdec) - ((bw * tx_size.width_mi() == 1) as usize) * xdec, y: tile_bo.y + ((by * uv_tx_size.height_mi()) << ydec) - ((bh * tx_size.height_mi() == 1) as usize) * ydec }; let mut po = tile_bo.plane_offset(&ts.input.planes[p].cfg); po.x += (bx * uv_tx_size.width()) as isize; po.y += (by * uv_tx_size.height()) as isize; let (_, dist) = encode_tx_block(fi, ts, cw, w, p, tx_bo, chroma_mode, uv_tx_size, uv_tx_type, plane_bsize, po, skip, &ac.array, alpha, rdo_type, for_rdo_use); assert!(!fi.use_tx_domain_distortion || !for_rdo_use || skip || dist >= 0); tx_dist += dist; } } } } tx_dist } // FIXME: For now, assume tx_mode is LARGEST_TX, so var-tx is not implemented yet, // which means only one tx block exist for a inter mode partition. pub fn write_tx_tree<T: Pixel>( fi: &FrameInvariants<T>, ts: &mut TileStateMut<'_, T>, cw: &mut ContextWriter, w: &mut dyn Writer, luma_mode: PredictionMode, tile_bo: BlockOffset, bsize: BlockSize, tx_size: TxSize, tx_type: TxType, skip: bool, luma_only: bool, rdo_type: RDOType, for_rdo_use: bool ) -> i64 { let bw = bsize.width_mi() / tx_size.width_mi(); let bh = bsize.height_mi() / tx_size.height_mi(); let qidx = get_qidx(fi, ts, cw, tile_bo); let PlaneConfig { xdec, ydec, .. } = ts.input.planes[1].cfg; let ac = &[0i16; 0]; let mut tx_dist: i64 = 0; ts.qc.update(qidx, tx_size, luma_mode.is_intra(), fi.sequence.bit_depth, fi.dc_delta_q[0], 0); let po = tile_bo.plane_offset(&ts.input.planes[0].cfg); let (has_coeff, dist) = encode_tx_block( fi, ts, cw, w, 0, tile_bo, luma_mode, tx_size, tx_type, bsize, po, skip, ac, 0, rdo_type, for_rdo_use ); assert!(!fi.use_tx_domain_distortion || !for_rdo_use || skip || dist >= 0); tx_dist += dist; if luma_only { return tx_dist }; let uv_tx_size = bsize.largest_uv_tx_size(xdec, ydec); let mut bw_uv = (bw * tx_size.width_mi()) >> xdec; let mut bh_uv = (bh * tx_size.height_mi()) >> ydec; if (bw_uv == 0 || bh_uv == 0) && has_chroma(tile_bo, bsize, xdec, ydec) { bw_uv = 1; bh_uv = 1; } bw_uv /= uv_tx_size.width_mi(); bh_uv /= uv_tx_size.height_mi(); let plane_bsize = get_plane_block_size(bsize, xdec, ydec); if bw_uv > 0 && bh_uv > 0 { // TODO: Disable these asserts temporarilly, since chroma_sampling_422_aom and chroma_sampling_444_aom // tests seems trigerring them as well, which should not // TODO: Not valid if partition > 64x64 && chroma != 420 /*if xdec == 1 && ydec == 1 { debug_assert!(bw_uv == 1, "bw_uv = {}, bh_uv = {}", bw_uv, bh_uv); debug_assert!(bh_uv == 1, "bw_uv = {}, bh_uv = {}", bw_uv, bh_uv); }*/ let uv_tx_type = if has_coeff {tx_type} else {TxType::DCT_DCT}; // if inter mode, uv_tx_type == tx_type for p in 1..3 { ts.qc.update(qidx, uv_tx_size, false, fi.sequence.bit_depth, fi.dc_delta_q[p], fi.ac_delta_q[p]); let tx_bo = BlockOffset { x: tile_bo.x - ((bw * tx_size.width_mi() == 1) as usize), y: tile_bo.y - ((bh * tx_size.height_mi() == 1) as usize) }; let po = tile_bo.plane_offset(&ts.input.planes[p].cfg); let (_, dist) = encode_tx_block(fi, ts, cw, w, p, tx_bo, luma_mode, uv_tx_size, uv_tx_type, plane_bsize, po, skip, ac, 0, rdo_type, for_rdo_use); assert!(!fi.use_tx_domain_distortion || !for_rdo_use || skip || dist >= 0); tx_dist += dist; } } tx_dist } pub fn encode_block_with_modes<T: Pixel>( fi: &FrameInvariants<T>, ts: &mut TileStateMut<'_, T>, cw: &mut ContextWriter, w_pre_cdef: &mut dyn Writer, w_post_cdef: &mut dyn Writer, bsize: BlockSize, tile_bo: BlockOffset, mode_decision: &RDOPartitionOutput, rdo_type: RDOType ) { let (mode_luma, mode_chroma) = (mode_decision.pred_mode_luma, mode_decision.pred_mode_chroma); let cfl = mode_decision.pred_cfl_params; let ref_frames = mode_decision.ref_frames; let mvs = mode_decision.mvs; let skip = mode_decision.skip; let mut cdef_coded = cw.bc.cdef_coded; let (tx_size, tx_type) = (mode_decision.tx_size, mode_decision.tx_type); debug_assert!((tx_size, tx_type) == rdo_tx_size_type(fi, ts, cw, bsize, tile_bo, mode_luma, ref_frames, mvs, skip)); let mut mv_stack = Vec::new(); let is_compound = ref_frames[1] != NONE_FRAME; let mode_context = cw.find_mvrefs(tile_bo, ref_frames, &mut mv_stack, bsize, fi, is_compound); cdef_coded = encode_block_a(&fi.sequence, ts, cw, if cdef_coded {w_post_cdef} else {w_pre_cdef}, bsize, tile_bo, skip); encode_block_b(fi, ts, cw, if cdef_coded {w_post_cdef} else {w_pre_cdef}, mode_luma, mode_chroma, ref_frames, mvs, bsize, tile_bo, skip, cfl, tx_size, tx_type, mode_context, &mv_stack, rdo_type, false); } fn encode_partition_bottomup<T: Pixel>( fi: &FrameInvariants<T>, ts: &mut TileStateMut<'_, T>, cw: &mut ContextWriter, w_pre_cdef: &mut dyn Writer, w_post_cdef: &mut dyn Writer, bsize: BlockSize, tile_bo: BlockOffset, pmvs: &mut [[Option<MotionVector>; REF_FRAMES]; 5], ref_rd_cost: f64 ) -> (RDOOutput) { let rdo_type = RDOType::PixelDistRealRate; let mut rd_cost = std::f64::MAX; let mut best_rd = std::f64::MAX; let mut rdo_output = RDOOutput { rd_cost, part_type: PartitionType::PARTITION_INVALID, part_modes: Vec::new() }; if tile_bo.x >= cw.bc.blocks.cols() || tile_bo.y >= cw.bc.blocks.rows() { return rdo_output } let bsw = bsize.width_mi(); let bsh = bsize.height_mi(); let is_square = bsize.is_sqr(); // Always split if the current partition is too large let must_split = (tile_bo.x + bsw as usize > ts.mi_width || tile_bo.y + bsh as usize > ts.mi_height || bsize.greater_than(BlockSize::BLOCK_64X64)) && is_square; // must_split overrides the minimum partition size when applicable let can_split = (bsize > fi.min_partition_size && is_square) || must_split; let mut best_partition = PartitionType::PARTITION_INVALID; let cw_checkpoint = cw.checkpoint(); let w_pre_checkpoint = w_pre_cdef.checkpoint(); let w_post_checkpoint = w_post_cdef.checkpoint(); // Code the whole block if !must_split { let cost = if bsize.gte(BlockSize::BLOCK_8X8) && is_square { let w: &mut dyn Writer = if cw.bc.cdef_coded {w_post_cdef} else {w_pre_cdef}; let tell = w.tell_frac(); cw.write_partition(w, tile_bo, PartitionType::PARTITION_NONE, bsize); (w.tell_frac() - tell) as f64 * fi.lambda / ((1 << OD_BITRES) as f64) } else { 0.0 }; let pmv_idx = if bsize.greater_than(BlockSize::BLOCK_32X32) { 0 } else { ((tile_bo.x & 32) >> 5) + ((tile_bo.y & 32) >> 4) + 1 }; let spmvs = &mut pmvs[pmv_idx]; let mode_decision = rdo_mode_decision(fi, ts, cw, bsize, tile_bo, spmvs); if !mode_decision.pred_mode_luma.is_intra() { // Fill the saved motion structure save_block_motion( ts, mode_decision.bsize, mode_decision.bo, mode_decision.ref_frames[0].to_index(), mode_decision.mvs[0] ); } rd_cost = mode_decision.rd_cost + cost; best_partition = PartitionType::PARTITION_NONE; best_rd = rd_cost; rdo_output.part_modes.push(mode_decision.clone()); if !can_split { encode_block_with_modes(fi, ts, cw, w_pre_cdef, w_post_cdef, bsize, tile_bo, &mode_decision, rdo_type); } } // Test all partition types other than PARTITION_NONE by comparing their RD costs if can_split { debug_assert!(is_square); for &partition in RAV1E_PARTITION_TYPES { if partition == PartitionType::PARTITION_NONE { continue; } if fi.sequence.chroma_sampling == ChromaSampling::Cs422 && partition == PartitionType::PARTITION_VERT { continue; } if must_split { let cbw = (ts.mi_width - tile_bo.x).min(bsw); // clipped block width, i.e. having effective pixels let cbh = (ts.mi_height - tile_bo.y).min(bsh); let mut split_vert = false; let mut split_horz = false; if cbw == bsw/2 && cbh == bsh { split_vert = true; } if cbh == bsh/2 && cbw == bsw { split_horz = true; } if !split_horz && partition == PartitionType::PARTITION_HORZ { continue; }; if !split_vert && partition == PartitionType::PARTITION_VERT { continue; }; } cw.rollback(&cw_checkpoint); w_pre_cdef.rollback(&w_pre_checkpoint); w_post_cdef.rollback(&w_post_checkpoint); let subsize = bsize.subsize(partition); let hbsw = subsize.width_mi(); // Half the block size width in blocks let hbsh = subsize.height_mi(); // Half the block size height in blocks let mut child_modes: Vec<RDOPartitionOutput> = Vec::new(); rd_cost = 0.0; if bsize.gte(BlockSize::BLOCK_8X8) { let w: &mut dyn Writer = if cw.bc.cdef_coded {w_post_cdef} else {w_pre_cdef}; let tell = w.tell_frac(); cw.write_partition(w, tile_bo, partition, bsize); rd_cost = (w.tell_frac() - tell) as f64 * fi.lambda / ((1 << OD_BITRES) as f64); } let four_partitions = [ tile_bo, BlockOffset{ x: tile_bo.x + hbsw as usize, y: tile_bo.y }, BlockOffset{ x: tile_bo.x, y: tile_bo.y + hbsh as usize }, BlockOffset{ x: tile_bo.x + hbsw as usize, y: tile_bo.y + hbsh as usize } ]; let partitions = get_sub_partitions(&four_partitions, partition); let mut early_exit = false; // If either of horz or vert partition types is being tested, // two partitioned rectangles, defined in 'partitions', of the current block // is passed to encode_partition_bottomup() for offset in partitions { let child_rdo_output = encode_partition_bottomup( fi, ts, cw, w_pre_cdef, w_post_cdef, subsize, offset, pmvs,//&best_decision.mvs[0] best_rd ); let cost = child_rdo_output.rd_cost; assert!(cost >= 0.0); if cost != std::f64::MAX { rd_cost += cost; if fi.enable_early_exit && (rd_cost >= best_rd || rd_cost >= ref_rd_cost) { assert!(cost != std::f64::MAX); early_exit = true; break; } else if partition != PartitionType::PARTITION_SPLIT { child_modes.push(child_rdo_output.part_modes[0].clone()); } } }; if !early_exit && rd_cost < best_rd { best_rd = rd_cost; best_partition = partition; if partition != PartitionType::PARTITION_SPLIT { assert!(!child_modes.is_empty()); rdo_output.part_modes = child_modes; } } } debug_assert!(best_partition != PartitionType::PARTITION_INVALID); // If the best partition is not PARTITION_SPLIT, recode it if best_partition != PartitionType::PARTITION_SPLIT { assert!(!rdo_output.part_modes.is_empty()); cw.rollback(&cw_checkpoint); w_pre_cdef.rollback(&w_pre_checkpoint); w_post_cdef.rollback(&w_post_checkpoint); assert!(best_partition != PartitionType::PARTITION_NONE || !must_split); let subsize = bsize.subsize(best_partition); if bsize.gte(BlockSize::BLOCK_8X8) { let w: &mut dyn Writer = if cw.bc.cdef_coded {w_post_cdef} else {w_pre_cdef}; cw.write_partition(w, tile_bo, best_partition, bsize); } for mode in rdo_output.part_modes.clone() { assert!(subsize == mode.bsize); if !mode.pred_mode_luma.is_intra() { save_block_motion( ts, mode.bsize, mode.bo, mode.ref_frames[0].to_index(), mode.mvs[0] ); } // FIXME: redundant block re-encode encode_block_with_modes(fi, ts, cw, w_pre_cdef, w_post_cdef, mode.bsize, mode.bo, &mode, rdo_type); } } } assert!(best_partition != PartitionType::PARTITION_INVALID); if is_square && bsize.gte(BlockSize::BLOCK_8X8) && (bsize == BlockSize::BLOCK_8X8 || best_partition != PartitionType::PARTITION_SPLIT) { cw.bc.update_partition_context(tile_bo, bsize.subsize(best_partition), bsize); } rdo_output.rd_cost = best_rd; rdo_output.part_type = best_partition; if best_partition != PartitionType::PARTITION_NONE { rdo_output.part_modes.clear(); } rdo_output } fn encode_partition_topdown<T: Pixel>( fi: &FrameInvariants<T>, ts: &mut TileStateMut<'_, T>, cw: &mut ContextWriter, w_pre_cdef: &mut dyn Writer, w_post_cdef: &mut dyn Writer, bsize: BlockSize, tile_bo: BlockOffset, block_output: &Option<RDOOutput>, pmvs: &mut [[Option<MotionVector>; REF_FRAMES]; 5] ) { if tile_bo.x >= cw.bc.blocks.cols() || tile_bo.y >= cw.bc.blocks.rows() { return; } let bsw = bsize.width_mi(); let bsh = bsize.height_mi(); let is_square = bsize.is_sqr(); let rdo_type = RDOType::PixelDistRealRate; // Always split if the current partition is too large let must_split = (tile_bo.x + bsw as usize > ts.mi_width || tile_bo.y + bsh as usize > ts.mi_height || bsize.greater_than(BlockSize::BLOCK_64X64)) && is_square; let mut rdo_output = block_output.clone().unwrap_or(RDOOutput { part_type: PartitionType::PARTITION_INVALID, rd_cost: std::f64::MAX, part_modes: Vec::new() }); let partition: PartitionType; let mut split_vert = false; let mut split_horz = false; if must_split { let cbw = (ts.mi_width - tile_bo.x).min(bsw); // clipped block width, i.e. having effective pixels let cbh = (ts.mi_height - tile_bo.y).min(bsh); if cbw == bsw/2 && cbh == bsh && fi.sequence.chroma_sampling != ChromaSampling::Cs422 { split_vert = true; } if cbh == bsh/2 && cbw == bsw { split_horz = true; } } if must_split && (!split_vert && !split_horz) { // Oversized blocks are split automatically partition = PartitionType::PARTITION_SPLIT; } else if must_split || (bsize > fi.min_partition_size && is_square) { // Blocks of sizes within the supported range are subjected to a partitioning decision let mut partition_types: Vec<PartitionType> = Vec::new(); if must_split { partition_types.push(PartitionType::PARTITION_SPLIT); if split_horz { partition_types.push(PartitionType::PARTITION_HORZ); }; if split_vert { partition_types.push(PartitionType::PARTITION_VERT); }; } else { //partition_types.append(&mut RAV1E_PARTITION_TYPES.to_vec()); partition_types.push(PartitionType::PARTITION_NONE); partition_types.push(PartitionType::PARTITION_SPLIT); } rdo_output = rdo_partition_decision(fi, ts, cw, w_pre_cdef, w_post_cdef, bsize, tile_bo, &rdo_output, pmvs, &partition_types, rdo_type); partition = rdo_output.part_type; } else { // Blocks of sizes below the supported range are encoded directly partition = PartitionType::PARTITION_NONE; } assert!(PartitionType::PARTITION_NONE <= partition && partition < PartitionType::PARTITION_INVALID); let subsize = bsize.subsize(partition); if bsize.gte(BlockSize::BLOCK_8X8) && is_square { let w: &mut dyn Writer = if cw.bc.cdef_coded {w_post_cdef} else {w_pre_cdef}; cw.write_partition(w, tile_bo, partition, bsize); } match partition { PartitionType::PARTITION_NONE => { let part_decision = if !rdo_output.part_modes.is_empty() { // The optimal prediction mode is known from a previous iteration rdo_output.part_modes[0].clone() } else { let pmv_idx = if bsize.greater_than(BlockSize::BLOCK_32X32) { 0 } else { ((tile_bo.x & 32) >> 5) + ((tile_bo.y & 32) >> 4) + 1 }; let spmvs = &mut pmvs[pmv_idx]; // Make a prediction mode decision for blocks encoded with no rdo_partition_decision call (e.g. edges) rdo_mode_decision(fi, ts, cw, bsize, tile_bo, spmvs) }; let mut mode_luma = part_decision.pred_mode_luma; let mut mode_chroma = part_decision.pred_mode_chroma; let cfl = part_decision.pred_cfl_params; let skip = part_decision.skip; let ref_frames = part_decision.ref_frames; let mvs = part_decision.mvs; let mut cdef_coded = cw.bc.cdef_coded; // NOTE: Cannot avoid calling rdo_tx_size_type() here again, // because, with top-down partition RDO, the neighnoring contexts // of current partition can change, i.e. neighboring partitions can split down more. let (tx_size, tx_type) = rdo_tx_size_type(fi, ts, cw, bsize, tile_bo, mode_luma, ref_frames, mvs, skip); let mut mv_stack = Vec::new(); let is_compound = ref_frames[1] != NONE_FRAME; let mode_context = cw.find_mvrefs(tile_bo, ref_frames, &mut mv_stack, bsize, fi, is_compound); // TODO: proper remap when is_compound is true if !mode_luma.is_intra() { if is_compound && mode_luma != PredictionMode::GLOBAL_GLOBALMV { let match0 = mv_stack[0].this_mv.row == mvs[0].row && mv_stack[0].this_mv.col == mvs[0].col; let match1 = mv_stack[0].comp_mv.row == mvs[1].row && mv_stack[0].comp_mv.col == mvs[1].col; mode_luma = if match0 && match1 { PredictionMode::NEAREST_NEARESTMV } else if match0 { PredictionMode::NEAREST_NEWMV } else if match1 { PredictionMode::NEW_NEARESTMV } else { PredictionMode::NEW_NEWMV }; if mode_luma != PredictionMode::NEAREST_NEARESTMV && mvs[0].row == 0 && mvs[0].col == 0 && mvs[1].row == 0 && mvs[1].col == 0 { mode_luma = PredictionMode::GLOBAL_GLOBALMV; } mode_chroma = mode_luma; } else if !is_compound && mode_luma != PredictionMode::GLOBALMV { mode_luma = PredictionMode::NEWMV; for (c, m) in mv_stack.iter().take(4) .zip([PredictionMode::NEARESTMV, PredictionMode::NEAR0MV, PredictionMode::NEAR1MV, PredictionMode::NEAR2MV].iter()) { if c.this_mv.row == mvs[0].row && c.this_mv.col == mvs[0].col { mode_luma = *m; } } if mode_luma == PredictionMode::NEWMV && mvs[0].row == 0 && mvs[0].col == 0 { mode_luma = if mv_stack.is_empty() { PredictionMode::NEARESTMV } else if mv_stack.len() == 1 { PredictionMode::NEAR0MV } else { PredictionMode::GLOBALMV }; } mode_chroma = mode_luma; } save_block_motion( ts, part_decision.bsize, part_decision.bo, part_decision.ref_frames[0].to_index(), part_decision.mvs[0] ); } // FIXME: every final block that has gone through the RDO decision process is encoded twice cdef_coded = encode_block_a(&fi.sequence, ts, cw, if cdef_coded {w_post_cdef} else {w_pre_cdef}, bsize, tile_bo, skip); encode_block_b(fi, ts, cw, if cdef_coded {w_post_cdef} else {w_pre_cdef}, mode_luma, mode_chroma, ref_frames, mvs, bsize, tile_bo, skip, cfl, tx_size, tx_type, mode_context, &mv_stack, RDOType::PixelDistRealRate, false); }, PARTITION_SPLIT | PARTITION_HORZ | PARTITION_VERT => { if !rdo_output.part_modes.is_empty() { // The optimal prediction modes for each split block is known from an rdo_partition_decision() call assert!(subsize != BlockSize::BLOCK_INVALID); for mode in rdo_output.part_modes { // Each block is subjected to a new splitting decision encode_partition_topdown(fi, ts, cw, w_pre_cdef, w_post_cdef, subsize, mode.bo, &Some(RDOOutput { rd_cost: mode.rd_cost, part_type: PartitionType::PARTITION_NONE, part_modes: vec![mode] }), pmvs); } } else { let hbsw = subsize.width_mi(); // Half the block size width in blocks let hbsh = subsize.height_mi(); // Half the block size height in blocks let four_partitions = [ tile_bo, BlockOffset{ x: tile_bo.x + hbsw as usize, y: tile_bo.y }, BlockOffset{ x: tile_bo.x, y: tile_bo.y + hbsh as usize }, BlockOffset{ x: tile_bo.x + hbsw as usize, y: tile_bo.y + hbsh as usize } ]; let partitions = get_sub_partitions(&four_partitions, partition); partitions.iter().for_each(|&offset| { encode_partition_topdown( fi, ts, cw, w_pre_cdef, w_post_cdef, subsize, offset, &None, pmvs ); }); } }, _ => unreachable!(), } if is_square && bsize.gte(BlockSize::BLOCK_8X8) && (bsize == BlockSize::BLOCK_8X8 || partition != PartitionType::PARTITION_SPLIT) { cw.bc.update_partition_context(tile_bo, subsize, bsize); } } #[inline(always)] fn build_coarse_pmvs<T: Pixel>(fi: &FrameInvariants<T>, ts: &TileStateMut<'_, T>) -> Vec<[Option<MotionVector>; REF_FRAMES]> { assert!(!fi.sequence.use_128x128_superblock); if ts.mi_width >= 16 && ts.mi_height >= 16 { let mut frame_pmvs = Vec::with_capacity(ts.sb_width * ts.sb_height); for sby in 0..ts.sb_height { for sbx in 0..ts.sb_width { let sbo = SuperBlockOffset { x: sbx, y: sby }; let bo = sbo.block_offset(0, 0); let mut pmvs: [Option<MotionVector>; REF_FRAMES] = [None; REF_FRAMES]; for i in 0..INTER_REFS_PER_FRAME { let r = fi.ref_frames[i] as usize; if pmvs[r].is_none() { pmvs[r] = estimate_motion_ss4(fi, ts, BlockSize::BLOCK_64X64, r, bo); } } frame_pmvs.push(pmvs); } } frame_pmvs } else { // the block use for motion estimation would be smaller than the whole image vec![[None; REF_FRAMES]; ts.sb_width * ts.sb_height] } } fn get_initial_cdfcontext<T: Pixel>(fi: &FrameInvariants<T>) -> CDFContext { let cdf = if fi.primary_ref_frame == PRIMARY_REF_NONE { None } else { let ref_frame_idx = fi.ref_frames[fi.primary_ref_frame as usize] as usize; let ref_frame = fi.rec_buffer.frames[ref_frame_idx].as_ref(); ref_frame.map(|rec| rec.cdfs) }; // return the retrieved instance if any, a new one otherwise cdf.unwrap_or_else(|| CDFContext::new(fi.base_q_idx)) } fn encode_tile_group<T: Pixel>(fi: &FrameInvariants<T>, fs: &mut FrameState<T>) -> Vec<u8> { let mut fc = get_initial_cdfcontext(fi); let mut blocks = FrameBlocks::new(fi.w_in_b, fi.h_in_b); let mut ts = fs.as_tile_state_mut(); let mut tb = blocks.as_tile_blocks_mut(); let data = encode_tile(fi, &mut ts, &mut fc, &mut tb); /* TODO: Don't apply if lossless */ deblock_filter_optimize(fi, fs, &blocks); if fs.deblock.levels[0] != 0 || fs.deblock.levels[1] != 0 { deblock_filter_frame(fs, &blocks, fi.sequence.bit_depth); } // Until the loop filters are pipelined, we'll need to keep // around a copy of both the pre- and post-cdef frame. let pre_cdef_frame = fs.rec.clone(); /* TODO: Don't apply if lossless */ if fi.sequence.enable_cdef { cdef_filter_frame(fi, &mut fs.rec, &blocks); } /* TODO: Don't apply if lossless */ if fi.sequence.enable_restoration { fs.restoration.lrf_filter_frame(&mut fs.rec, &pre_cdef_frame, &fi); } if fi.config.train_rdo { eprintln!("train rdo"); if let Ok(mut file) = File::open("rdo.dat") { let mut data = vec![]; file.read_to_end(&mut data).unwrap(); fs.t.merge_in(&deserialize(data.as_slice()).unwrap()); } let mut rdo_file = File::create("rdo.dat").unwrap(); rdo_file.write_all(&serialize(&fs.t).unwrap()).unwrap(); fs.t.print_code(); } fs.cdfs = fc; fs.cdfs.reset_counts(); data } fn encode_tile<'a, T: Pixel>( fi: &FrameInvariants<T>, ts: &mut TileStateMut<'_, T>, fc: &'a mut CDFContext, blocks: &'a mut TileBlocksMut<'a>, ) -> Vec<u8> { let mut w = WriterEncoder::new(); let estimate_motion_ss2 = if fi.config.speed_settings.diamond_me { crate::me::DiamondSearch::estimate_motion_ss2 } else { crate::me::FullSearch::estimate_motion_ss2 }; let bc = BlockContext::new(blocks); // For now, restoration unit size is locked to superblock size. let mut cw = ContextWriter::new(fc, bc); let tile_pmvs = build_coarse_pmvs(fi, ts); // main loop for sby in 0..ts.sb_height { cw.bc.reset_left_contexts(); for sbx in 0..ts.sb_width { let mut w_pre_cdef = WriterRecorder::new(); let mut w_post_cdef = WriterRecorder::new(); let tile_sbo = SuperBlockOffset { x: sbx, y: sby }; let tile_bo = tile_sbo.block_offset(0, 0); cw.bc.cdef_coded = false; cw.bc.code_deltas = fi.delta_q_present; // Do subsampled ME let mut pmvs: [[Option<MotionVector>; REF_FRAMES]; 5] = [[None; REF_FRAMES]; 5]; if ts.mi_width >= 8 && ts.mi_height >= 8 { for i in 0..INTER_REFS_PER_FRAME { let r = fi.ref_frames[i] as usize; if pmvs[0][r].is_none() { pmvs[0][r] = tile_pmvs[sby * ts.sb_width + sbx][r]; if let Some(pmv) = pmvs[0][r] { let pmv_w = if sbx > 0 { tile_pmvs[sby * ts.sb_width + sbx - 1][r] } else { None }; let pmv_e = if sbx < ts.sb_width - 1 { tile_pmvs[sby * ts.sb_width + sbx + 1][r] } else { None }; let pmv_n = if sby > 0 { tile_pmvs[sby * ts.sb_width + sbx - ts.sb_width][r] } else { None }; let pmv_s = if sby < ts.sb_height - 1 { tile_pmvs[sby * ts.sb_width + sbx + ts.sb_width][r] } else { None }; assert!(!fi.sequence.use_128x128_superblock); pmvs[1][r] = estimate_motion_ss2( fi, ts, BlockSize::BLOCK_32X32, r, tile_sbo.block_offset(0, 0), &[Some(pmv), pmv_w, pmv_n], i ); pmvs[2][r] = estimate_motion_ss2( fi, ts, BlockSize::BLOCK_32X32, r, tile_sbo.block_offset(8, 0), &[Some(pmv), pmv_e, pmv_n], i ); pmvs[3][r] = estimate_motion_ss2( fi, ts, BlockSize::BLOCK_32X32, r, tile_sbo.block_offset(0, 8), &[Some(pmv), pmv_w, pmv_s], i ); pmvs[4][r] = estimate_motion_ss2( fi, ts, BlockSize::BLOCK_32X32, r, tile_sbo.block_offset(8, 8), &[Some(pmv), pmv_e, pmv_s], i ); if let Some(mv) = pmvs[1][r] { save_block_motion(ts, BlockSize::BLOCK_32X32, tile_sbo.block_offset(0, 0), i, mv); } if let Some(mv) = pmvs[2][r] { save_block_motion(ts, BlockSize::BLOCK_32X32, tile_sbo.block_offset(8, 0), i, mv); } if let Some(mv) = pmvs[3][r] { save_block_motion(ts, BlockSize::BLOCK_32X32, tile_sbo.block_offset(0, 8), i, mv); } if let Some(mv) = pmvs[4][r] { save_block_motion(ts, BlockSize::BLOCK_32X32, tile_sbo.block_offset(8, 8), i, mv); } } } } } // Encode SuperBlock if fi.config.speed_settings.encode_bottomup { encode_partition_bottomup(fi, ts, &mut cw, &mut w_pre_cdef, &mut w_post_cdef, BlockSize::BLOCK_64X64, tile_bo, &mut pmvs, std::f64::MAX); } else { encode_partition_topdown(fi, ts, &mut cw, &mut w_pre_cdef, &mut w_post_cdef, BlockSize::BLOCK_64X64, tile_bo, &None, &mut pmvs); } // CDEF has to be decided before loop restoration, but coded after. // loop restoration must be decided last but coded before anything else. if cw.bc.cdef_coded || fi.sequence.enable_restoration { rdo_loop_decision(tile_sbo, fi, ts, &mut cw, &mut w); } if fi.sequence.enable_restoration { cw.write_lrf(&mut w, fi, &mut ts.restoration, tile_sbo); } // Once loop restoration is coded, we can replay the initial block bits w_pre_cdef.replay(&mut w); if cw.bc.cdef_coded { // CDEF index must be written in the middle, we can code it now let cdef_index = cw.bc.blocks.get_cdef(tile_sbo); cw.write_cdef(&mut w, cdef_index, fi.cdef_bits); // ...and then finally code what comes after the CDEF index w_post_cdef.replay(&mut w); } } } w.done() } #[allow(unused)] fn write_tile_group_header(tile_start_and_end_present_flag: bool) -> Vec<u8> { let mut buf = Vec::new(); { let mut bw = BitWriter::endian(&mut buf, BigEndian); bw.write_bit(tile_start_and_end_present_flag).unwrap(); bw.byte_align().unwrap(); } buf.clone() } // Write a packet containing only the placeholder that tells the decoder // to present the already decoded frame present at `frame_to_show_map_idx` // // See `av1-spec` Section 6.8.2 and 7.18. pub fn encode_show_existing_frame<T: Pixel>( fi: &mut FrameInvariants<T>, fs: &mut FrameState<T> ) -> Vec<u8> { debug_assert!(fi.show_existing_frame); let mut packet = Vec::new(); write_obus(&mut packet, fi, fs).unwrap(); let map_idx = fi.frame_to_show_map_idx as usize; if let Some(ref rec) = fi.rec_buffer.frames[map_idx] { for p in 0..3 { fs.rec.planes[p].data.copy_from_slice(&rec.frame.planes[p].data); } } packet } pub fn encode_frame<T: Pixel>( fi: &mut FrameInvariants<T>, fs: &mut FrameState<T> ) -> Vec<u8> { debug_assert!(!fi.show_existing_frame); let mut packet = Vec::new(); if !fi.intra_only { for i in 0..INTER_REFS_PER_FRAME { fi.ref_frame_sign_bias[i] = if !fi.sequence.enable_order_hint { false } else if let Some(ref rec) = fi.rec_buffer.frames[fi.ref_frames[i] as usize] { let hint = rec.order_hint; fi.sequence.get_relative_dist(hint, fi.order_hint) > 0 } else { false }; } } fs.input_hres.downsample_from(&fs.input.planes[0]); fs.input_hres.pad(fi.width, fi.height); fs.input_qres.downsample_from(&fs.input_hres); fs.input_qres.pad(fi.width, fi.height); segmentation_optimize(fi, fs); let tile_group = encode_tile_group(fi, fs); write_obus(&mut packet, fi, fs).unwrap(); let mut buf1 = Vec::new(); { let mut bw1 = BitWriter::endian(&mut buf1, BigEndian); bw1.write_obu_header(ObuType::OBU_TILE_GROUP, 0).unwrap(); } packet.write_all(&buf1).unwrap(); buf1.clear(); { let mut bw1 = BitWriter::endian(&mut buf1, BigEndian); bw1.write_uleb128(tile_group.len() as u64).unwrap(); } packet.write_all(&buf1).unwrap(); buf1.clear(); packet.write_all(&tile_group).unwrap(); packet } pub fn update_rec_buffer<T: Pixel>(fi: &mut FrameInvariants<T>, fs: FrameState<T>) { let rfs = Arc::new( ReferenceFrame { order_hint: fi.order_hint, frame: fs.rec, input_hres: fs.input_hres, input_qres: fs.input_qres, cdfs: fs.cdfs, frame_mvs: fs.frame_mvs, } ); for i in 0..(REF_FRAMES as usize) { if (fi.refresh_frame_flags & (1 << i)) != 0 { fi.rec_buffer.frames[i] = Some(Arc::clone(&rfs)); fi.rec_buffer.deblock[i] = fs.deblock; } } } #[cfg(test)] mod test { use super::*; #[test] fn check_partition_types_order() { assert_eq!(RAV1E_PARTITION_TYPES[RAV1E_PARTITION_TYPES.len() - 1], PartitionType::PARTITION_SPLIT); } }
// Copyright 2015 The Gfx-rs Developers. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use libc; use log::LogLevel; use std::rc::Rc; use std::slice; use {gl, tex}; use gfx::device as d; use gfx::device::handle; use gfx::device::handle::Producer; use gfx::device::mapping::Builder; use Buffer; use Resources as R; pub fn update_sub_buffer(gl: &gl::Gl, buffer: Buffer, address: *const u8, size: usize, offset: usize, role: d::BufferRole) { let target = match role { d::BufferRole::Vertex => gl::ARRAY_BUFFER, d::BufferRole::Index => gl::ELEMENT_ARRAY_BUFFER, }; unsafe { gl.BindBuffer(target, buffer) }; unsafe { gl.BufferSubData(target, offset as gl::types::GLintptr, size as gl::types::GLsizeiptr, address as *const gl::types::GLvoid ); } } /// GL resource factory. pub struct Factory { caps: d::Capabilities, gl: Rc<gl::Gl>, main_fbo: handle::FrameBuffer<R>, handles: handle::Manager<R>, frame_handles: handle::Manager<R>, } /// Create a new `Factory`. pub fn create(caps: d::Capabilities, gl: Rc<gl::Gl>) -> Factory { let mut handles = handle::Manager::new(); Factory { caps: caps, gl: gl, main_fbo: handles.make_frame_buffer(0), handles: handles, frame_handles: handle::Manager::new(), } } impl Factory { fn create_buffer_internal(&mut self) -> Buffer { let mut name = 0 as Buffer; unsafe { self.gl.GenBuffers(1, &mut name); } info!("\tCreated buffer {}", name); name } fn init_buffer(&mut self, buffer: Buffer, info: &d::BufferInfo) { let target = match info.role { d::BufferRole::Vertex => gl::ARRAY_BUFFER, d::BufferRole::Index => gl::ELEMENT_ARRAY_BUFFER, }; unsafe { self.gl.BindBuffer(target, buffer) }; let usage = match info.usage { d::BufferUsage::Static => gl::STATIC_DRAW, d::BufferUsage::Dynamic => gl::DYNAMIC_DRAW, d::BufferUsage::Stream => gl::STREAM_DRAW, }; unsafe { self.gl.BufferData(target, info.size as gl::types::GLsizeiptr, 0 as *const gl::types::GLvoid, usage ); } } } #[allow(raw_pointer_derive)] #[derive(Copy, Clone)] pub struct RawMapping { pub pointer: *mut libc::c_void, target: gl::types::GLenum, } impl d::mapping::Raw for RawMapping { unsafe fn set<T>(&self, index: usize, val: T) { *(self.pointer as *mut T).offset(index as isize) = val; } unsafe fn to_slice<T>(&self, len: usize) -> &[T] { slice::from_raw_parts(self.pointer as *const T, len) } unsafe fn to_mut_slice<T>(&self, len: usize) -> &mut [T] { slice::from_raw_parts_mut(self.pointer as *mut T, len) } } impl d::Factory<R> for Factory { type Mapper = RawMapping; fn create_buffer_raw(&mut self, size: usize, usage: d::BufferUsage) -> handle::RawBuffer<R> { let name = self.create_buffer_internal(); let info = d::BufferInfo { role: d::BufferRole::Vertex, usage: usage, size: size, }; self.init_buffer(name, &info); self.handles.make_buffer(name, info) } fn create_buffer_static_raw(&mut self, data: &[u8], role: d::BufferRole) -> handle::RawBuffer<R> { let name = self.create_buffer_internal(); let info = d::BufferInfo { role: role, usage: d::BufferUsage::Static, size: data.len(), }; self.init_buffer(name, &info); update_sub_buffer(&self.gl, name, data.as_ptr(), data.len(), 0, role); self.handles.make_buffer(name, info) } fn create_array_buffer(&mut self) -> Result<handle::ArrayBuffer<R>, ()> { if self.caps.array_buffer_supported { let mut name = 0 as ::ArrayBuffer; unsafe { self.gl.GenVertexArrays(1, &mut name); } info!("\tCreated array buffer {}", name); Ok(self.handles.make_array_buffer(name)) } else { error!("\tarray buffer creation unsupported, ignored"); Err(()) } } fn create_shader(&mut self, stage: d::shade::Stage, code: &[u8]) -> Result<handle::Shader<R>, d::shade::CreateShaderError> { let (name, info) = ::shade::create_shader(&self.gl, stage, code); info.map(|info| { let level = if name.is_err() { LogLevel::Error } else { LogLevel::Warn }; log!(level, "\tShader compile log: {}", info); }); name.map(|sh| self.handles.make_shader(sh, stage)) } fn create_program(&mut self, shaders: &[handle::Shader<R>], targets: Option<&[&str]>) -> Result<handle::Program<R>, ()> { let objects: Vec<::Shader> = shaders.iter() .map(|h| self.frame_handles.ref_shader(h)) .collect(); let (prog, log) = ::shade::create_program(&self.gl, &self.caps, objects.into_iter(), targets); log.map(|log| { let level = if prog.is_err() { LogLevel::Error } else { LogLevel::Warn }; log!(level, "\tProgram link log: {}", log); }); prog.map(|(name, info)| self.handles.make_program(name, info)) } fn create_frame_buffer(&mut self) -> handle::FrameBuffer<R> { if !self.caps.render_targets_supported { panic!("No framebuffer objects, can't make a new one!"); } let mut name = 0 as ::FrameBuffer; unsafe { self.gl.GenFramebuffers(1, &mut name); } info!("\tCreated frame buffer {}", name); self.handles.make_frame_buffer(name) } fn create_surface(&mut self, info: d::tex::SurfaceInfo) -> Result<handle::Surface<R>, d::tex::SurfaceError> { if info.format.does_convert_gamma() && !self.caps.srgb_color_supported { return Err(d::tex::SurfaceError::UnsupportedGamma) } tex::make_surface(&self.gl, &info) .map(|suf| self.handles.make_surface(suf, info)) } fn create_texture(&mut self, info: d::tex::TextureInfo) -> Result<handle::Texture<R>, d::tex::TextureError> { if info.width == 0 || info.height == 0 || info.levels == 0 { return Err(d::tex::TextureError::InvalidInfo(info)) } if info.format.does_convert_gamma() && !self.caps.srgb_color_supported { return Err(d::tex::TextureError::UnsupportedGamma) } let name = if self.caps.immutable_storage_supported { tex::make_with_storage(&self.gl, &info) } else { tex::make_without_storage(&self.gl, &info) }; name.map(|tex| self.handles.make_texture(tex, info)) } fn create_sampler(&mut self, info: d::tex::SamplerInfo) -> handle::Sampler<R> { let sam = if self.caps.sampler_objects_supported { tex::make_sampler(&self.gl, &info) } else { 0 }; self.handles.make_sampler(sam, info) } fn get_main_frame_buffer(&self) -> handle::FrameBuffer<R> { self.main_fbo.clone() } fn update_buffer_raw(&mut self, buffer: &handle::RawBuffer<R>, data: &[u8], offset_bytes: usize) { debug_assert!(offset_bytes + data.len() <= buffer.get_info().size); let raw_handle = self.frame_handles.ref_buffer(buffer); update_sub_buffer(&self.gl, raw_handle, data.as_ptr(), data.len(), offset_bytes, buffer.get_info().role) } fn update_texture_raw(&mut self, texture: &handle::Texture<R>, img: &d::tex::ImageInfo, data: &[u8], optkind: Option<d::tex::TextureKind>) -> Result<(), d::tex::TextureError> { // use the specified texture kind if set for this update, otherwise // fall back on the kind that was set when the texture was created. let kind = optkind.unwrap_or(texture.get_info().kind); tex::update_texture(&self.gl, kind, self.frame_handles.ref_texture(texture), img, data.as_ptr(), data.len()) } fn generate_mipmap(&mut self, texture: &handle::Texture<R>) { tex::generate_mipmap(&self.gl, texture.get_info().kind, self.frame_handles.ref_texture(texture)); } fn map_buffer_raw(&mut self, buf: &handle::RawBuffer<R>, access: d::MapAccess) -> RawMapping { let raw_handle = self.frame_handles.ref_buffer(buf); unsafe { self.gl.BindBuffer(gl::ARRAY_BUFFER, raw_handle) }; let ptr = unsafe { self.gl.MapBuffer(gl::ARRAY_BUFFER, match access { d::MapAccess::Readable => gl::READ_ONLY, d::MapAccess::Writable => gl::WRITE_ONLY, d::MapAccess::RW => gl::READ_WRITE }) } as *mut libc::c_void; RawMapping { pointer: ptr, target: gl::ARRAY_BUFFER } } fn unmap_buffer_raw(&mut self, map: RawMapping) { unsafe { self.gl.UnmapBuffer(map.target) }; } fn map_buffer_readable<T: Copy>(&mut self, buf: &handle::Buffer<R, T>) -> d::mapping::Readable<T, R, Factory> { let map = self.map_buffer_raw(buf.raw(), d::MapAccess::Readable); self.map_readable(map, buf.len()) } fn map_buffer_writable<T: Copy>(&mut self, buf: &handle::Buffer<R, T>) -> d::mapping::Writable<T, R, Factory> { let map = self.map_buffer_raw(buf.raw(), d::MapAccess::Writable); self.map_writable(map, buf.len()) } fn map_buffer_rw<T: Copy>(&mut self, buf: &handle::Buffer<R, T>) -> d::mapping::RW<T, R, Factory> { let map = self.map_buffer_raw(buf.raw(), d::MapAccess::RW); self.map_read_write(map, buf.len()) } fn cleanup(&mut self) { self.handles.clean_with(&mut self.gl, |gl, v| unsafe { gl.DeleteBuffers(1, v) }, |gl, v| unsafe { gl.DeleteVertexArrays(1, v) }, |gl, v| unsafe { gl.DeleteShader(*v) }, |gl, v| unsafe { gl.DeleteProgram(*v) }, |gl, v| unsafe { gl.DeleteFramebuffers(1, v) }, |gl, v| unsafe { gl.DeleteRenderbuffers(1, v) }, |gl, v| unsafe { gl.DeleteTextures(1, v) }, |gl, v| unsafe { gl.DeleteSamplers(1, v) }); self.frame_handles.clear(); } } Moved get_main_frame_buffer into the Factory methods // Copyright 2015 The Gfx-rs Developers. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use libc; use log::LogLevel; use std::rc::Rc; use std::slice; use {gl, tex}; use gfx::device as d; use gfx::device::handle; use gfx::device::handle::Producer; use gfx::device::mapping::Builder; use Buffer; use Resources as R; pub fn update_sub_buffer(gl: &gl::Gl, buffer: Buffer, address: *const u8, size: usize, offset: usize, role: d::BufferRole) { let target = match role { d::BufferRole::Vertex => gl::ARRAY_BUFFER, d::BufferRole::Index => gl::ELEMENT_ARRAY_BUFFER, }; unsafe { gl.BindBuffer(target, buffer) }; unsafe { gl.BufferSubData(target, offset as gl::types::GLintptr, size as gl::types::GLsizeiptr, address as *const gl::types::GLvoid ); } } /// GL resource factory. pub struct Factory { caps: d::Capabilities, gl: Rc<gl::Gl>, main_fbo: handle::FrameBuffer<R>, handles: handle::Manager<R>, frame_handles: handle::Manager<R>, } /// Create a new `Factory`. pub fn create(caps: d::Capabilities, gl: Rc<gl::Gl>) -> Factory { let mut handles = handle::Manager::new(); Factory { caps: caps, gl: gl, main_fbo: handles.make_frame_buffer(0), handles: handles, frame_handles: handle::Manager::new(), } } impl Factory { fn create_buffer_internal(&mut self) -> Buffer { let mut name = 0 as Buffer; unsafe { self.gl.GenBuffers(1, &mut name); } info!("\tCreated buffer {}", name); name } fn init_buffer(&mut self, buffer: Buffer, info: &d::BufferInfo) { let target = match info.role { d::BufferRole::Vertex => gl::ARRAY_BUFFER, d::BufferRole::Index => gl::ELEMENT_ARRAY_BUFFER, }; unsafe { self.gl.BindBuffer(target, buffer) }; let usage = match info.usage { d::BufferUsage::Static => gl::STATIC_DRAW, d::BufferUsage::Dynamic => gl::DYNAMIC_DRAW, d::BufferUsage::Stream => gl::STREAM_DRAW, }; unsafe { self.gl.BufferData(target, info.size as gl::types::GLsizeiptr, 0 as *const gl::types::GLvoid, usage ); } } pub fn get_main_frame_buffer(&self) -> handle::FrameBuffer<R> { self.main_fbo.clone() } } #[allow(raw_pointer_derive)] #[derive(Copy, Clone)] pub struct RawMapping { pub pointer: *mut libc::c_void, target: gl::types::GLenum, } impl d::mapping::Raw for RawMapping { unsafe fn set<T>(&self, index: usize, val: T) { *(self.pointer as *mut T).offset(index as isize) = val; } unsafe fn to_slice<T>(&self, len: usize) -> &[T] { slice::from_raw_parts(self.pointer as *const T, len) } unsafe fn to_mut_slice<T>(&self, len: usize) -> &mut [T] { slice::from_raw_parts_mut(self.pointer as *mut T, len) } } impl d::Factory<R> for Factory { type Mapper = RawMapping; fn create_buffer_raw(&mut self, size: usize, usage: d::BufferUsage) -> handle::RawBuffer<R> { let name = self.create_buffer_internal(); let info = d::BufferInfo { role: d::BufferRole::Vertex, usage: usage, size: size, }; self.init_buffer(name, &info); self.handles.make_buffer(name, info) } fn create_buffer_static_raw(&mut self, data: &[u8], role: d::BufferRole) -> handle::RawBuffer<R> { let name = self.create_buffer_internal(); let info = d::BufferInfo { role: role, usage: d::BufferUsage::Static, size: data.len(), }; self.init_buffer(name, &info); update_sub_buffer(&self.gl, name, data.as_ptr(), data.len(), 0, role); self.handles.make_buffer(name, info) } fn create_array_buffer(&mut self) -> Result<handle::ArrayBuffer<R>, ()> { if self.caps.array_buffer_supported { let mut name = 0 as ::ArrayBuffer; unsafe { self.gl.GenVertexArrays(1, &mut name); } info!("\tCreated array buffer {}", name); Ok(self.handles.make_array_buffer(name)) } else { error!("\tarray buffer creation unsupported, ignored"); Err(()) } } fn create_shader(&mut self, stage: d::shade::Stage, code: &[u8]) -> Result<handle::Shader<R>, d::shade::CreateShaderError> { let (name, info) = ::shade::create_shader(&self.gl, stage, code); info.map(|info| { let level = if name.is_err() { LogLevel::Error } else { LogLevel::Warn }; log!(level, "\tShader compile log: {}", info); }); name.map(|sh| self.handles.make_shader(sh, stage)) } fn create_program(&mut self, shaders: &[handle::Shader<R>], targets: Option<&[&str]>) -> Result<handle::Program<R>, ()> { let objects: Vec<::Shader> = shaders.iter() .map(|h| self.frame_handles.ref_shader(h)) .collect(); let (prog, log) = ::shade::create_program(&self.gl, &self.caps, objects.into_iter(), targets); log.map(|log| { let level = if prog.is_err() { LogLevel::Error } else { LogLevel::Warn }; log!(level, "\tProgram link log: {}", log); }); prog.map(|(name, info)| self.handles.make_program(name, info)) } fn create_frame_buffer(&mut self) -> handle::FrameBuffer<R> { if !self.caps.render_targets_supported { panic!("No framebuffer objects, can't make a new one!"); } let mut name = 0 as ::FrameBuffer; unsafe { self.gl.GenFramebuffers(1, &mut name); } info!("\tCreated frame buffer {}", name); self.handles.make_frame_buffer(name) } fn create_surface(&mut self, info: d::tex::SurfaceInfo) -> Result<handle::Surface<R>, d::tex::SurfaceError> { if info.format.does_convert_gamma() && !self.caps.srgb_color_supported { return Err(d::tex::SurfaceError::UnsupportedGamma) } tex::make_surface(&self.gl, &info) .map(|suf| self.handles.make_surface(suf, info)) } fn create_texture(&mut self, info: d::tex::TextureInfo) -> Result<handle::Texture<R>, d::tex::TextureError> { if info.width == 0 || info.height == 0 || info.levels == 0 { return Err(d::tex::TextureError::InvalidInfo(info)) } if info.format.does_convert_gamma() && !self.caps.srgb_color_supported { return Err(d::tex::TextureError::UnsupportedGamma) } let name = if self.caps.immutable_storage_supported { tex::make_with_storage(&self.gl, &info) } else { tex::make_without_storage(&self.gl, &info) }; name.map(|tex| self.handles.make_texture(tex, info)) } fn create_sampler(&mut self, info: d::tex::SamplerInfo) -> handle::Sampler<R> { let sam = if self.caps.sampler_objects_supported { tex::make_sampler(&self.gl, &info) } else { 0 }; self.handles.make_sampler(sam, info) } fn update_buffer_raw(&mut self, buffer: &handle::RawBuffer<R>, data: &[u8], offset_bytes: usize) { debug_assert!(offset_bytes + data.len() <= buffer.get_info().size); let raw_handle = self.frame_handles.ref_buffer(buffer); update_sub_buffer(&self.gl, raw_handle, data.as_ptr(), data.len(), offset_bytes, buffer.get_info().role) } fn update_texture_raw(&mut self, texture: &handle::Texture<R>, img: &d::tex::ImageInfo, data: &[u8], optkind: Option<d::tex::TextureKind>) -> Result<(), d::tex::TextureError> { // use the specified texture kind if set for this update, otherwise // fall back on the kind that was set when the texture was created. let kind = optkind.unwrap_or(texture.get_info().kind); tex::update_texture(&self.gl, kind, self.frame_handles.ref_texture(texture), img, data.as_ptr(), data.len()) } fn generate_mipmap(&mut self, texture: &handle::Texture<R>) { tex::generate_mipmap(&self.gl, texture.get_info().kind, self.frame_handles.ref_texture(texture)); } fn map_buffer_raw(&mut self, buf: &handle::RawBuffer<R>, access: d::MapAccess) -> RawMapping { let raw_handle = self.frame_handles.ref_buffer(buf); unsafe { self.gl.BindBuffer(gl::ARRAY_BUFFER, raw_handle) }; let ptr = unsafe { self.gl.MapBuffer(gl::ARRAY_BUFFER, match access { d::MapAccess::Readable => gl::READ_ONLY, d::MapAccess::Writable => gl::WRITE_ONLY, d::MapAccess::RW => gl::READ_WRITE }) } as *mut libc::c_void; RawMapping { pointer: ptr, target: gl::ARRAY_BUFFER } } fn unmap_buffer_raw(&mut self, map: RawMapping) { unsafe { self.gl.UnmapBuffer(map.target) }; } fn map_buffer_readable<T: Copy>(&mut self, buf: &handle::Buffer<R, T>) -> d::mapping::Readable<T, R, Factory> { let map = self.map_buffer_raw(buf.raw(), d::MapAccess::Readable); self.map_readable(map, buf.len()) } fn map_buffer_writable<T: Copy>(&mut self, buf: &handle::Buffer<R, T>) -> d::mapping::Writable<T, R, Factory> { let map = self.map_buffer_raw(buf.raw(), d::MapAccess::Writable); self.map_writable(map, buf.len()) } fn map_buffer_rw<T: Copy>(&mut self, buf: &handle::Buffer<R, T>) -> d::mapping::RW<T, R, Factory> { let map = self.map_buffer_raw(buf.raw(), d::MapAccess::RW); self.map_read_write(map, buf.len()) } fn cleanup(&mut self) { self.handles.clean_with(&mut self.gl, |gl, v| unsafe { gl.DeleteBuffers(1, v) }, |gl, v| unsafe { gl.DeleteVertexArrays(1, v) }, |gl, v| unsafe { gl.DeleteShader(*v) }, |gl, v| unsafe { gl.DeleteProgram(*v) }, |gl, v| unsafe { gl.DeleteFramebuffers(1, v) }, |gl, v| unsafe { gl.DeleteRenderbuffers(1, v) }, |gl, v| unsafe { gl.DeleteTextures(1, v) }, |gl, v| unsafe { gl.DeleteSamplers(1, v) }); self.frame_handles.clear(); } }
use errno::{errno, Errno}; use glfs::*; use libc::{c_uchar, c_void, dev_t, dirent, ENOENT, flock, LOCK_SH, LOCK_EX, LOCK_UN, ino_t, mode_t, stat, timespec}; use libffi::high::Closure3; use std::error::Error as err; use std::mem::zeroed; use std::ffi::{CStr, CString, IntoStringError, NulError}; use std::fmt; use std::io::Error; use std::os::unix::ffi::OsStrExt; use std::path::{Path, PathBuf}; use std::ptr; use std::string::FromUtf8Error; /// Custom error handling for the library #[derive(Debug)] pub enum GlusterError { FromUtf8Error(FromUtf8Error), NulError(NulError), Error(String), IoError(Error), IntoStringError(IntoStringError), } impl fmt::Display for GlusterError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.write_str(self.description()) } } impl err for GlusterError { fn description(&self) -> &str { match *self { GlusterError::FromUtf8Error(ref e) => e.description(), GlusterError::NulError(ref e) => e.description(), GlusterError::Error(ref e) => &e, GlusterError::IoError(ref e) => e.description(), GlusterError::IntoStringError(ref e) => e.description(), } } fn cause(&self) -> Option<&err> { match *self { GlusterError::FromUtf8Error(ref e) => e.cause(), GlusterError::NulError(ref e) => e.cause(), GlusterError::Error(_) => None, GlusterError::IoError(ref e) => e.cause(), GlusterError::IntoStringError(ref e) => e.cause(), } } } impl GlusterError { /// Create a new GlusterError with a String message fn new(err: String) -> GlusterError { GlusterError::Error(err) } /// Convert a GlusterError into a String representation. pub fn to_string(&self) -> String { match *self { GlusterError::FromUtf8Error(ref err) => err.utf8_error().to_string(), GlusterError::NulError(ref err) => err.description().to_string(), GlusterError::Error(ref err) => err.to_string(), GlusterError::IoError(ref err) => err.description().to_string(), GlusterError::IntoStringError(ref err) => err.description().to_string(), } } } impl From<NulError> for GlusterError { fn from(err: NulError) -> GlusterError { GlusterError::NulError(err) } } impl From<FromUtf8Error> for GlusterError { fn from(err: FromUtf8Error) -> GlusterError { GlusterError::FromUtf8Error(err) } } impl From<IntoStringError> for GlusterError { fn from(err: IntoStringError) -> GlusterError { GlusterError::IntoStringError(err) } } impl From<Error> for GlusterError { fn from(err: Error) -> GlusterError { GlusterError::IoError(err) } } fn get_error() -> String { let error = errno(); format!("{}", error) } /// Apply or remove an advisory lock on the open file. pub enum PosixLockCmd { /// Place an exclusive lock. Only one process may hold an /// exclusive lock for a given file at a given time. Exclusive, /// Place a shared lock. More than one process may hold a shared /// lock for a given file at a given time. Shared, /// Remove an existing lock held by this process. Unlock, } impl Into<i32> for PosixLockCmd { fn into(self) -> i32 { match self { PosixLockCmd::Shared => LOCK_SH, PosixLockCmd::Exclusive => LOCK_EX, PosixLockCmd::Unlock => LOCK_UN, } } } // pub type glfs_io_cbk = ::std::option::Option<extern "C" fn(fd: *mut glfs_fd_t, // ret: ssize_t, // data: *mut c_void) // -> ()>;pub type glfs_io_cbk = ::std::option::Option<extern "C" fn(fd: *mut glfs_fd_t, // ret: ssize_t, // data: *mut c_void) // -> ()>; // #[derive(Debug)] pub struct Gluster { cluster_handle: *mut Struct_glfs, } impl Drop for Gluster { fn drop(&mut self) { if self.cluster_handle.is_null() { // No cleanup needed return; } unsafe { glfs_fini(self.cluster_handle); } } } #[derive(Debug)] pub struct GlusterDirectory { pub dir_handle: *mut Struct_glfs_fd, } #[derive(Debug)] pub struct DirEntry { pub path: PathBuf, pub inode: ino_t, pub file_type: c_uchar, } impl Iterator for GlusterDirectory { type Item = DirEntry; fn next(&mut self) -> Option<DirEntry> { let mut dirent: dirent = unsafe { zeroed() }; let mut next_entry: *mut dirent = ptr::null_mut(); unsafe { let ret_code = glfs_readdir_r(self.dir_handle, &mut dirent, &mut next_entry); if ret_code < 0 { glfs_closedir(self.dir_handle); return None; } if dirent.d_ino == 0 { // End of stream reached return None; } let telldir_retcode = glfs_telldir(self.dir_handle); if telldir_retcode < 0 { return None; } let file_name = CStr::from_ptr(dirent.d_name.as_ptr()); return Some(DirEntry { path: PathBuf::from(file_name.to_string_lossy().into_owned()), inode: dirent.d_ino, file_type: dirent.d_type, }); } } } impl Gluster { /// Connect to a Ceph cluster and return a connection handle glfs_t /// port is usually 24007 but may differ depending on how the service was configured pub fn connect(volume_name: &str, server: &str, port: u16) -> Result<Gluster, GlusterError> { let vol_name = try!(CString::new(volume_name)); let vol_transport = try!(CString::new("tcp")); let vol_host = try!(CString::new(server)); unsafe { let cluster_handle = glfs_new(vol_name.as_ptr()); if cluster_handle.is_null() { return Err(GlusterError::new("glfs_new failed".to_string())); } let ret_code = glfs_set_volfile_server(cluster_handle, vol_transport.as_ptr(), vol_host.as_ptr(), port as ::libc::c_int); if ret_code < 0 { return Err(GlusterError::new(get_error())); } let ret_code = glfs_init(cluster_handle); if ret_code < 0 { return Err(GlusterError::new(get_error())); } Ok(Gluster { cluster_handle: cluster_handle }) } } /// Disconnect from a Gluster cluster and destroy the connection handle /// For clean up, this is only necessary after connect() has succeeded. /// Normally there is no need to call this function. When Rust cleans /// up the Gluster struct it will automatically call disconnect pub fn disconnect(self) { if self.cluster_handle.is_null() { // No cleanup needed return; } unsafe { glfs_fini(self.cluster_handle); } } pub fn open(&self, path: &Path, flags: i32) -> Result<*mut Struct_glfs_fd, GlusterError> { let path = try!(CString::new(path.as_os_str().as_bytes())); unsafe { let file_handle = glfs_open(self.cluster_handle, path.as_ptr(), flags); Ok(file_handle) } } pub fn create(&self, path: &Path, flags: i32, mode: mode_t) -> Result<*mut Struct_glfs_fd, GlusterError> { let path = try!(CString::new(path.as_os_str().as_bytes())); unsafe { let file_handle = glfs_creat(self.cluster_handle, path.as_ptr(), flags, mode); if file_handle.is_null() { return Err(GlusterError::new(get_error())); } Ok(file_handle) } } pub fn close(&self, file_handle: *mut Struct_glfs_fd) -> Result<(), GlusterError> { unsafe { let ret_code = glfs_close(file_handle); if ret_code < 0 { return Err(GlusterError::new(get_error())); } } Ok(()) } pub fn read(&self, file_handle: *mut Struct_glfs_fd, fill_buffer: &mut Vec<u8>, count: usize, flags: i32) -> Result<isize, GlusterError> { unsafe { let read_size = glfs_read(file_handle, fill_buffer.as_mut_ptr() as *mut c_void, count, flags); if read_size < 0 { return Err(GlusterError::new(get_error())); } fill_buffer.set_len(read_size as usize); Ok(read_size) } } pub fn write(&self, file_handle: *mut Struct_glfs_fd, buffer: &[u8], flags: i32) -> Result<isize, GlusterError> { unsafe { let write_size = glfs_write(file_handle, buffer.as_ptr() as *const c_void, buffer.len(), flags); if write_size < 0 { return Err(GlusterError::new(get_error())); } Ok(write_size) } } pub fn write_async<F>(&self, file_handle: *mut Struct_glfs_fd, buffer: &[u8], flags: i32, callback: F, data: &mut ::libc::c_void) -> Result<(), GlusterError> where F: Fn(*mut Struct_glfs_fd, isize, *mut ::libc::c_void) { let closure = Closure3::new(&callback); let callback_ptr = closure.code_ptr(); unsafe { let ret_code = glfs_write_async(file_handle, buffer.as_ptr() as *const c_void, buffer.len(), flags, Some(*callback_ptr), data); if ret_code < 0 { return Err(GlusterError::new(get_error())); } } Ok(()) } pub fn readv(&self, file_handle: *mut Struct_glfs_fd, iov: &mut [&mut [u8]], flags: i32) -> Result<isize, GlusterError> { unsafe { let read_size = glfs_readv(file_handle, iov.as_ptr() as *const iovec, iov.len() as i32, flags); if read_size < 0 { return Err(GlusterError::new(get_error())); } Ok(read_size) } } pub fn writev(&self, file_handle: *mut Struct_glfs_fd, iov: &[&[u8]], flags: i32) -> Result<isize, GlusterError> { unsafe { let write_size = glfs_writev(file_handle, iov.as_ptr() as *const iovec, iov.len() as i32, flags); if write_size < 0 { return Err(GlusterError::new(get_error())); } Ok(write_size) } } pub fn pread(&self, file_handle: *mut Struct_glfs_fd, fill_buffer: &mut Vec<u8>, count: usize, offset: i64, flags: i32) -> Result<isize, GlusterError> { unsafe { let read_size = glfs_pread(file_handle, fill_buffer.as_mut_ptr() as *mut c_void, count, offset, flags); if read_size < 0 { return Err(GlusterError::new(get_error())); } fill_buffer.set_len(read_size as usize); Ok(read_size) } } pub fn pwrite(&self, file_handle: *mut Struct_glfs_fd, buffer: &[u8], count: usize, offset: i64, flags: i32) -> Result<isize, GlusterError> { unsafe { let write_size = glfs_pwrite(file_handle, buffer.as_ptr() as *mut c_void, count, offset, flags); if write_size < 0 { return Err(GlusterError::new(get_error())); } Ok(write_size) } } pub fn preadv(&self, file_handle: *mut Struct_glfs_fd, iov: &mut [&mut [u8]], offset: i64, flags: i32) -> Result<isize, GlusterError> { unsafe { let read_size = glfs_preadv(file_handle, iov.as_ptr() as *const iovec, iov.len() as i32, offset, flags); if read_size < 0 { return Err(GlusterError::new(get_error())); } Ok(read_size) } } // TODO: Use C IoVec pub fn pwritev(&self, file_handle: *mut Struct_glfs_fd, iov: &[&[u8]], offset: i64, flags: i32) -> Result<isize, GlusterError> { unsafe { let write_size = glfs_pwritev(file_handle, iov.as_ptr() as *const iovec, iov.len() as i32, offset, flags); if write_size < 0 { return Err(GlusterError::new(get_error())); } Ok(write_size) } } pub fn lseek(&self, file_handle: *mut Struct_glfs_fd, offset: i64, whence: i32) -> Result<i64, GlusterError> { unsafe { let file_offset = glfs_lseek(file_handle, offset, whence); if file_offset < 0 { return Err(GlusterError::new(get_error())); } Ok(file_offset) } } pub fn truncate(&self, path: &Path, length: i64) -> Result<(), GlusterError> { let path = try!(CString::new(path.as_os_str().as_bytes())); unsafe { let ret_code = glfs_truncate(self.cluster_handle, path.as_ptr(), length); if ret_code < 0 { return Err(GlusterError::new(get_error())); } } Ok(()) } pub fn ftruncate(&self, file_handle: *mut Struct_glfs_fd, length: i64) -> Result<(), GlusterError> { unsafe { let ret_code = glfs_ftruncate(file_handle, length); if ret_code < 0 { return Err(GlusterError::new(get_error())); } } Ok(()) } pub fn lsstat(&self, path: &Path) -> Result<stat, GlusterError> { let path = try!(CString::new(path.as_os_str().as_bytes())); unsafe { let mut stat_buf: stat = zeroed(); let ret_code = glfs_lstat(self.cluster_handle, path.as_ptr(), &mut stat_buf); if ret_code < 0 { return Err(GlusterError::new(get_error())); } Ok(stat_buf) } } /// Tests for the existance of a file. Returns true/false respectively. pub fn exists(&self, path: &Path) -> Result<bool, GlusterError> { let path = try!(CString::new(path.as_os_str().as_bytes())); unsafe { let mut stat_buf: stat = zeroed(); let ret_code = glfs_stat(self.cluster_handle, path.as_ptr(), &mut stat_buf); if ret_code < 0 { let error = errno(); if error == Errno(ENOENT) { return Ok(false); } return Err(GlusterError::new(get_error())); } Ok(false) } } pub fn stat(&self, path: &Path) -> Result<stat, GlusterError> { let path = try!(CString::new(path.as_os_str().as_bytes())); unsafe { let mut stat_buf: stat = zeroed(); let ret_code = glfs_stat(self.cluster_handle, path.as_ptr(), &mut stat_buf); if ret_code < 0 { return Err(GlusterError::new(get_error())); } Ok(stat_buf) } } pub fn fstat(&self, file_handle: *mut Struct_glfs_fd) -> Result<stat, GlusterError> { unsafe { let mut stat_buf: stat = zeroed(); let ret_code = glfs_fstat(file_handle, &mut stat_buf); if ret_code < 0 { return Err(GlusterError::new(get_error())); } Ok(stat_buf) } } pub fn fsync(&self, file_handle: *mut Struct_glfs_fd) -> Result<(), GlusterError> { unsafe { let ret_code = glfs_fsync(file_handle); if ret_code < 0 { return Err(GlusterError::new(get_error())); } } Ok(()) } pub fn fdatasync(&self, file_handle: *mut Struct_glfs_fd) -> Result<(), GlusterError> { unsafe { let ret_code = glfs_fdatasync(file_handle); if ret_code < 0 { return Err(GlusterError::new(get_error())); } } Ok(()) } pub fn access(&self, path: &Path, mode: i32) -> Result<(), GlusterError> { let path = try!(CString::new(path.as_os_str().as_bytes())); unsafe { let ret_code = glfs_access(self.cluster_handle, path.as_ptr(), mode); if ret_code < 0 { return Err(GlusterError::new(get_error())); } } Ok(()) } pub fn symlink(&self, oldpath: &Path, newpath: &Path) -> Result<(), GlusterError> { let old_path = try!(CString::new(oldpath.as_os_str().as_bytes())); let new_path = try!(CString::new(newpath.as_os_str().as_bytes())); unsafe { let ret_code = glfs_symlink(self.cluster_handle, old_path.as_ptr(), new_path.as_ptr()); if ret_code < 0 { return Err(GlusterError::new(get_error())); } } Ok(()) } pub fn readlink(&self, path: &Path, buf: &mut [u8]) -> Result<(), GlusterError> { let path = try!(CString::new(path.as_os_str().as_bytes())); unsafe { let ret_code = glfs_readlink(self.cluster_handle, path.as_ptr(), buf.as_mut_ptr() as *mut i8, buf.len()); if ret_code < 0 { return Err(GlusterError::new(get_error())); } } Ok(()) } pub fn mknod(&self, path: &Path, mode: mode_t, dev: dev_t) -> Result<(), GlusterError> { let path = try!(CString::new(path.as_os_str().as_bytes())); unsafe { let ret_code = glfs_mknod(self.cluster_handle, path.as_ptr(), mode, dev); if ret_code < 0 { return Err(GlusterError::new(get_error())); } } Ok(()) } pub fn mkdir(&self, path: &Path, mode: mode_t) -> Result<(), GlusterError> { let path = try!(CString::new(path.as_os_str().as_bytes())); unsafe { let ret_code = glfs_mkdir(self.cluster_handle, path.as_ptr(), mode); if ret_code < 0 { return Err(GlusterError::new(get_error())); } } Ok(()) } pub fn unlink(&self, path: &Path) -> Result<(), GlusterError> { let path = try!(CString::new(path.as_os_str().as_bytes())); unsafe { let ret_code = glfs_unlink(self.cluster_handle, path.as_ptr()); if ret_code < 0 { return Err(GlusterError::new(get_error())); } } Ok(()) } pub fn rmdir(&self, path: &Path) -> Result<(), GlusterError> { let path = try!(CString::new(path.as_os_str().as_bytes())); unsafe { let ret_code = glfs_rmdir(self.cluster_handle, path.as_ptr()); if ret_code < 0 { return Err(GlusterError::new(get_error())); } } Ok(()) } pub fn rename(&self, oldpath: &Path, newpath: &Path) -> Result<(), GlusterError> { let old_path = try!(CString::new(oldpath.as_os_str().as_bytes())); let new_path = try!(CString::new(newpath.as_os_str().as_bytes())); unsafe { let ret_code = glfs_rename(self.cluster_handle, old_path.as_ptr(), new_path.as_ptr()); if ret_code < 0 { return Err(GlusterError::new(get_error())); } } Ok(()) } pub fn link(&self, oldpath: &Path, newpath: &Path) -> Result<(), GlusterError> { let old_path = try!(CString::new(oldpath.as_os_str().as_bytes())); let new_path = try!(CString::new(newpath.as_os_str().as_bytes())); unsafe { let ret_code = glfs_link(self.cluster_handle, old_path.as_ptr(), new_path.as_ptr()); if ret_code < 0 { return Err(GlusterError::new(get_error())); } } Ok(()) } pub fn opendir(&self, path: &Path) -> Result<*mut Struct_glfs_fd, GlusterError> { let path = try!(CString::new(path.as_os_str().as_bytes())); unsafe { let file_handle = glfs_opendir(self.cluster_handle, path.as_ptr()); Ok(file_handle) } } pub fn getxattr(&self, path: &Path, name: &str) -> Result<String, GlusterError> { let path = try!(CString::new(path.as_os_str().as_bytes())); let name = try!(CString::new(name)); let mut xattr_val_buff: Vec<u8> = Vec::with_capacity(1024); unsafe { let ret_code = glfs_getxattr(self.cluster_handle, path.as_ptr(), name.as_ptr(), xattr_val_buff.as_mut_ptr() as *mut c_void, xattr_val_buff.len()); if ret_code < 0 { return Err(GlusterError::new(get_error())); } // Set the buffer to the size of bytes read into it xattr_val_buff.set_len(ret_code as usize); Ok(String::from_utf8_lossy(&xattr_val_buff).into_owned()) } } pub fn lgetxattr(&self, path: &Path, name: &str) -> Result<String, GlusterError> { let path = try!(CString::new(path.as_os_str().as_bytes())); let name = try!(CString::new(name)); let mut xattr_val_buff: Vec<u8> = Vec::with_capacity(1024); unsafe { let ret_code = glfs_lgetxattr(self.cluster_handle, path.as_ptr(), name.as_ptr(), xattr_val_buff.as_mut_ptr() as *mut c_void, xattr_val_buff.len()); if ret_code < 0 { return Err(GlusterError::new(get_error())); } // Set the buffer to the size of bytes read into it xattr_val_buff.set_len(ret_code as usize); Ok(String::from_utf8_lossy(&xattr_val_buff).into_owned()) } } pub fn fgetxattr(&self, file_handle: *mut Struct_glfs_fd, name: &str) -> Result<String, GlusterError> { let name = try!(CString::new(name)); let mut xattr_val_buff: Vec<u8> = Vec::with_capacity(1024); unsafe { let ret_code = glfs_fgetxattr(file_handle, name.as_ptr(), xattr_val_buff.as_mut_ptr() as *mut c_void, xattr_val_buff.len()); if ret_code < 0 { return Err(GlusterError::new(get_error())); } // Set the buffer to the size of bytes read into it xattr_val_buff.set_len(ret_code as usize); Ok(String::from_utf8_lossy(&xattr_val_buff).into_owned()) } } pub fn listxattr(&self, path: &Path) -> Result<String, GlusterError> { let path = try!(CString::new(path.as_os_str().as_bytes())); let mut xattr_val_buff: Vec<u8> = Vec::with_capacity(1024); unsafe { let ret_code = glfs_listxattr(self.cluster_handle, path.as_ptr(), xattr_val_buff.as_mut_ptr() as *mut c_void, xattr_val_buff.len()); if ret_code < 0 { return Err(GlusterError::new(get_error())); } // Set the buffer to the size of bytes read into it xattr_val_buff.set_len(ret_code as usize); Ok(String::from_utf8_lossy(&xattr_val_buff).into_owned()) } } pub fn llistxattr(&self, path: &Path) -> Result<String, GlusterError> { let path = try!(CString::new(path.as_os_str().as_bytes())); let mut xattr_val_buff: Vec<u8> = Vec::with_capacity(1024); unsafe { let ret_code = glfs_llistxattr(self.cluster_handle, path.as_ptr(), xattr_val_buff.as_mut_ptr() as *mut c_void, xattr_val_buff.len()); if ret_code < 0 { return Err(GlusterError::new(get_error())); } // Set the buffer to the size of bytes read into it xattr_val_buff.set_len(ret_code as usize); Ok(String::from_utf8_lossy(&xattr_val_buff).into_owned()) } } pub fn flistxattr(&self, file_handle: *mut Struct_glfs_fd) -> Result<String, GlusterError> { let mut xattr_val_buff: Vec<u8> = Vec::with_capacity(1024); unsafe { let ret_code = glfs_flistxattr(file_handle, xattr_val_buff.as_mut_ptr() as *mut c_void, xattr_val_buff.len()); if ret_code < 0 { return Err(GlusterError::new(get_error())); } // Set the buffer to the size of bytes read into it xattr_val_buff.set_len(ret_code as usize); Ok(String::from_utf8_lossy(&xattr_val_buff).into_owned()) } } pub fn setxattr(&self, path: &Path, name: &str, value: &[u8], flags: i32) -> Result<(), GlusterError> { let path = try!(CString::new(path.as_os_str().as_bytes())); let name = try!(CString::new(name)); unsafe { let ret_code = glfs_setxattr(self.cluster_handle, path.as_ptr(), name.as_ptr(), value.as_ptr() as *const c_void, value.len(), flags); if ret_code < 0 { return Err(GlusterError::new(get_error())); } } Ok(()) } pub fn lsetxattr(&self, name: &str, value: &[u8], path: &Path, flags: i32) -> Result<(), GlusterError> { let name = try!(CString::new(name)); let path = try!(CString::new(path.as_os_str().as_bytes())); unsafe { let ret_code = glfs_lsetxattr(self.cluster_handle, path.as_ptr(), name.as_ptr(), value.as_ptr() as *const c_void, value.len(), flags); if ret_code < 0 { return Err(GlusterError::new(get_error())); } } Ok(()) } pub fn fsetxattr(&self, file_handle: *mut Struct_glfs_fd, name: &str, value: &[u8], flags: i32) -> Result<(), GlusterError> { let name = try!(CString::new(name)); unsafe { let ret_code = glfs_fsetxattr(file_handle, name.as_ptr(), value.as_ptr() as *const c_void, value.len(), flags); if ret_code < 0 { return Err(GlusterError::new(get_error())); } } Ok(()) } pub fn removexattr(&self, path: &Path, name: &str) -> Result<(), GlusterError> { let path = try!(CString::new(path.as_os_str().as_bytes())); let name = try!(CString::new(name)); unsafe { let ret_code = glfs_removexattr(self.cluster_handle, path.as_ptr(), name.as_ptr()); if ret_code < 0 { return Err(GlusterError::new(get_error())); } } Ok(()) } pub fn lremovexattr(&self, path: &Path, name: &str) -> Result<(), GlusterError> { let path = try!(CString::new(path.as_os_str().as_bytes())); let name = try!(CString::new(name)); unsafe { let ret_code = glfs_lremovexattr(self.cluster_handle, path.as_ptr(), name.as_ptr()); if ret_code < 0 { return Err(GlusterError::new(get_error())); } } Ok(()) } pub fn fremovexattr(&self, file_handle: *mut Struct_glfs_fd, name: &str) -> Result<(), GlusterError> { let name = try!(CString::new(name)); unsafe { let ret_code = glfs_fremovexattr(file_handle, name.as_ptr()); if ret_code < 0 { return Err(GlusterError::new(get_error())); } } Ok(()) } pub fn fallocate(&self, file_handle: *mut Struct_glfs_fd, offset: i64, keep_size: i32, len: usize) -> Result<(), GlusterError> { unsafe { let ret_code = glfs_fallocate(file_handle, keep_size, offset, len); if ret_code < 0 { return Err(GlusterError::new(get_error())); } } Ok(()) } pub fn discard(&self, file_handle: *mut Struct_glfs_fd, offset: i64, len: usize) -> Result<(), GlusterError> { unsafe { let ret_code = glfs_discard(file_handle, offset, len); if ret_code < 0 { return Err(GlusterError::new(get_error())); } } Ok(()) } pub fn zerofill(&self, file_handle: *mut Struct_glfs_fd, offset: i64, len: i64) -> Result<(), GlusterError> { unsafe { let ret_code = glfs_zerofill(file_handle, offset, len); if ret_code < 0 { return Err(GlusterError::new(get_error())); } } Ok(()) } pub fn getcwd(&self) -> Result<String, GlusterError> { let mut cwd_val_buff: Vec<u8> = Vec::with_capacity(1024); unsafe { let cwd = glfs_getcwd(self.cluster_handle, cwd_val_buff.as_mut_ptr() as *mut i8, cwd_val_buff.len()); Ok(CStr::from_ptr(cwd).to_string_lossy().into_owned()) } } pub fn chdir(&self, path: &Path) -> Result<(), GlusterError> { let path = try!(CString::new(path.as_os_str().as_bytes())); unsafe { let ret_code = glfs_chdir(self.cluster_handle, path.as_ptr()); if ret_code < 0 { return Err(GlusterError::new(get_error())); } } Ok(()) } pub fn fchdir(&self, file_handle: *mut Struct_glfs_fd) -> Result<(), GlusterError> { unsafe { let ret_code = glfs_fchdir(file_handle); if ret_code < 0 { return Err(GlusterError::new(get_error())); } } Ok(()) } /// times[0] specifies the new "last access time" (atime); /// times[1] specifies the new "last modification time" (mtime). pub fn utimens(&self, path: &Path, times: &[timespec; 2]) -> Result<(), GlusterError> { let path = try!(CString::new(path.as_os_str().as_bytes())); unsafe { let ret_code = glfs_utimens(self.cluster_handle, path.as_ptr(), times.as_ptr()); if ret_code < 0 { return Err(GlusterError::new(get_error())); } } Ok(()) } /// times[0] specifies the new "last access time" (atime); /// times[1] specifies the new "last modification time" (mtime). pub fn lutimens(&self, path: &Path, times: &[timespec; 2]) -> Result<(), GlusterError> { let path = try!(CString::new(path.as_os_str().as_bytes())); unsafe { let ret_code = glfs_lutimens(self.cluster_handle, path.as_ptr(), times.as_ptr()); if ret_code < 0 { return Err(GlusterError::new(get_error())); } } Ok(()) } /// times[0] specifies the new "last access time" (atime); /// times[1] specifies the new "last modification time" (mtime). pub fn futimens(&self, file_handle: *mut Struct_glfs_fd, times: &[timespec; 2]) -> Result<(), GlusterError> { unsafe { let ret_code = glfs_futimens(file_handle, times.as_ptr()); if ret_code < 0 { return Err(GlusterError::new(get_error())); } } Ok(()) } pub fn posixlock(&self, file_handle: *mut Struct_glfs_fd, command: PosixLockCmd, flock: &mut flock) -> Result<(), GlusterError> { unsafe { let ret_code = glfs_posix_lock(file_handle, command.into(), flock); if ret_code < 0 { return Err(GlusterError::new(get_error())); } } Ok(()) } pub fn chmod(&self, path: &Path, mode: mode_t) -> Result<(), GlusterError> { let path = try!(CString::new(path.as_os_str().as_bytes())); unsafe { let ret_code = glfs_chmod(self.cluster_handle, path.as_ptr(), mode); if ret_code < 0 { return Err(GlusterError::new(get_error())); } } Ok(()) } pub fn fchmod(&self, file_handle: *mut Struct_glfs_fd, mode: mode_t) -> Result<(), GlusterError> { unsafe { let ret_code = glfs_fchmod(file_handle, mode); if ret_code < 0 { return Err(GlusterError::new(get_error())); } } Ok(()) } pub fn chown(&self, path: &Path, uid: u32, gid: u32) -> Result<(), GlusterError> { let path = try!(CString::new(path.as_os_str().as_bytes())); unsafe { let ret_code = glfs_chown(self.cluster_handle, path.as_ptr(), uid, gid); if ret_code < 0 { return Err(GlusterError::new(get_error())); } } Ok(()) } pub fn lchown(&self, path: &Path, uid: u32, gid: u32) -> Result<(), GlusterError> { let path = try!(CString::new(path.as_os_str().as_bytes())); unsafe { let ret_code = glfs_lchown(self.cluster_handle, path.as_ptr(), uid, gid); if ret_code < 0 { return Err(GlusterError::new(get_error())); } } Ok(()) } pub fn fchown(&self, file_handle: *mut Struct_glfs_fd, uid: u32, gid: u32) -> Result<(), GlusterError> { unsafe { let ret_code = glfs_fchown(file_handle, uid, gid); if ret_code < 0 { return Err(GlusterError::new(get_error())); } } Ok(()) } // pub fn realpath(&self, path: &str) -> Result<String, GlusterError> { // let path = try!(CString::new(path)); // let resolved_path_buf: Vec<u8> = Vec::with_capacity(512); // unsafe { // let real_path = glfs_realpath(self.cluster_handle, // path.as_ptr(), // resolved_path: *mut c_char); // Ok(CStr::from_ptr(real_path).to_string_lossy().into_owned()) // } // } // pub fn dup(&self, file_handle: *mut Struct_glfs_fd) -> Result<*mut Struct_glfs_fd, GlusterError> { unsafe { let file_handle = glfs_dup(file_handle); Ok(file_handle) } } } Add directory iterator that uses readdirplus use errno::{errno, Errno}; use glfs::*; use libc::{c_uchar, c_void, dev_t, dirent, ENOENT, flock, LOCK_SH, LOCK_EX, LOCK_UN, ino_t, mode_t, stat, timespec}; use libffi::high::Closure3; use std::error::Error as err; use std::mem::zeroed; use std::ffi::{CStr, CString, IntoStringError, NulError}; use std::fmt; use std::io::Error; use std::os::unix::ffi::OsStrExt; use std::path::{Path, PathBuf}; use std::ptr; use std::string::FromUtf8Error; /// Custom error handling for the library #[derive(Debug)] pub enum GlusterError { FromUtf8Error(FromUtf8Error), NulError(NulError), Error(String), IoError(Error), IntoStringError(IntoStringError), } impl fmt::Display for GlusterError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.write_str(self.description()) } } impl err for GlusterError { fn description(&self) -> &str { match *self { GlusterError::FromUtf8Error(ref e) => e.description(), GlusterError::NulError(ref e) => e.description(), GlusterError::Error(ref e) => &e, GlusterError::IoError(ref e) => e.description(), GlusterError::IntoStringError(ref e) => e.description(), } } fn cause(&self) -> Option<&err> { match *self { GlusterError::FromUtf8Error(ref e) => e.cause(), GlusterError::NulError(ref e) => e.cause(), GlusterError::Error(_) => None, GlusterError::IoError(ref e) => e.cause(), GlusterError::IntoStringError(ref e) => e.cause(), } } } impl GlusterError { /// Create a new GlusterError with a String message fn new(err: String) -> GlusterError { GlusterError::Error(err) } /// Convert a GlusterError into a String representation. pub fn to_string(&self) -> String { match *self { GlusterError::FromUtf8Error(ref err) => err.utf8_error().to_string(), GlusterError::NulError(ref err) => err.description().to_string(), GlusterError::Error(ref err) => err.to_string(), GlusterError::IoError(ref err) => err.description().to_string(), GlusterError::IntoStringError(ref err) => err.description().to_string(), } } } impl From<NulError> for GlusterError { fn from(err: NulError) -> GlusterError { GlusterError::NulError(err) } } impl From<FromUtf8Error> for GlusterError { fn from(err: FromUtf8Error) -> GlusterError { GlusterError::FromUtf8Error(err) } } impl From<IntoStringError> for GlusterError { fn from(err: IntoStringError) -> GlusterError { GlusterError::IntoStringError(err) } } impl From<Error> for GlusterError { fn from(err: Error) -> GlusterError { GlusterError::IoError(err) } } fn get_error() -> String { let error = errno(); format!("{}", error) } /// Apply or remove an advisory lock on the open file. pub enum PosixLockCmd { /// Place an exclusive lock. Only one process may hold an /// exclusive lock for a given file at a given time. Exclusive, /// Place a shared lock. More than one process may hold a shared /// lock for a given file at a given time. Shared, /// Remove an existing lock held by this process. Unlock, } impl Into<i32> for PosixLockCmd { fn into(self) -> i32 { match self { PosixLockCmd::Shared => LOCK_SH, PosixLockCmd::Exclusive => LOCK_EX, PosixLockCmd::Unlock => LOCK_UN, } } } // pub type glfs_io_cbk = ::std::option::Option<extern "C" fn(fd: *mut glfs_fd_t, // ret: ssize_t, // data: *mut c_void) // -> ()>;pub type glfs_io_cbk = ::std::option::Option<extern "C" fn(fd: *mut glfs_fd_t, // ret: ssize_t, // data: *mut c_void) // -> ()>; // #[derive(Debug)] pub struct Gluster { cluster_handle: *mut Struct_glfs, } impl Drop for Gluster { fn drop(&mut self) { if self.cluster_handle.is_null() { // No cleanup needed return; } unsafe { glfs_fini(self.cluster_handle); } } } /// This uses readdirplus which is very efficient in Gluster. In addition /// to returning directory entries this also stats each file. #[derive(Debug)] pub struct GlusterDirectoryPlus { pub dir_handle: *mut Struct_glfs_fd, } pub struct DirEntryPlus { pub path: PathBuf, pub inode: ino_t, pub file_type: c_uchar, pub stat: stat, } impl Iterator for GlusterDirectoryPlus { type Item = DirEntryPlus; fn next(&mut self) -> Option<DirEntryPlus> { let mut dirent: dirent = unsafe { zeroed() }; let mut next_entry: *mut dirent = ptr::null_mut(); unsafe { let mut stat_buf: stat = zeroed(); let ret_code = glfs_readdirplus_r(self.dir_handle, &mut stat_buf, &mut dirent, &mut next_entry); if ret_code < 0 { glfs_closedir(self.dir_handle); return None; } if dirent.d_ino == 0 { // End of stream reached return None; } let telldir_retcode = glfs_telldir(self.dir_handle); if telldir_retcode < 0 { return None; } let file_name = CStr::from_ptr(dirent.d_name.as_ptr()); return Some(DirEntryPlus { path: PathBuf::from(file_name.to_string_lossy().into_owned()), inode: dirent.d_ino, file_type: dirent.d_type, stat: stat_buf, }); } } } #[derive(Debug)] pub struct GlusterDirectory { pub dir_handle: *mut Struct_glfs_fd, } #[derive(Debug)] pub struct DirEntry { pub path: PathBuf, pub inode: ino_t, pub file_type: c_uchar, } impl Iterator for GlusterDirectory { type Item = DirEntry; fn next(&mut self) -> Option<DirEntry> { let mut dirent: dirent = unsafe { zeroed() }; let mut next_entry: *mut dirent = ptr::null_mut(); unsafe { let ret_code = glfs_readdir_r(self.dir_handle, &mut dirent, &mut next_entry); if ret_code < 0 { glfs_closedir(self.dir_handle); return None; } if dirent.d_ino == 0 { // End of stream reached return None; } let telldir_retcode = glfs_telldir(self.dir_handle); if telldir_retcode < 0 { return None; } let file_name = CStr::from_ptr(dirent.d_name.as_ptr()); return Some(DirEntry { path: PathBuf::from(file_name.to_string_lossy().into_owned()), inode: dirent.d_ino, file_type: dirent.d_type, }); } } } impl Gluster { /// Connect to a Ceph cluster and return a connection handle glfs_t /// port is usually 24007 but may differ depending on how the service was configured pub fn connect(volume_name: &str, server: &str, port: u16) -> Result<Gluster, GlusterError> { let vol_name = try!(CString::new(volume_name)); let vol_transport = try!(CString::new("tcp")); let vol_host = try!(CString::new(server)); unsafe { let cluster_handle = glfs_new(vol_name.as_ptr()); if cluster_handle.is_null() { return Err(GlusterError::new("glfs_new failed".to_string())); } let ret_code = glfs_set_volfile_server(cluster_handle, vol_transport.as_ptr(), vol_host.as_ptr(), port as ::libc::c_int); if ret_code < 0 { return Err(GlusterError::new(get_error())); } let ret_code = glfs_init(cluster_handle); if ret_code < 0 { return Err(GlusterError::new(get_error())); } Ok(Gluster { cluster_handle: cluster_handle }) } } /// Disconnect from a Gluster cluster and destroy the connection handle /// For clean up, this is only necessary after connect() has succeeded. /// Normally there is no need to call this function. When Rust cleans /// up the Gluster struct it will automatically call disconnect pub fn disconnect(self) { if self.cluster_handle.is_null() { // No cleanup needed return; } unsafe { glfs_fini(self.cluster_handle); } } pub fn open(&self, path: &Path, flags: i32) -> Result<*mut Struct_glfs_fd, GlusterError> { let path = try!(CString::new(path.as_os_str().as_bytes())); unsafe { let file_handle = glfs_open(self.cluster_handle, path.as_ptr(), flags); Ok(file_handle) } } pub fn create(&self, path: &Path, flags: i32, mode: mode_t) -> Result<*mut Struct_glfs_fd, GlusterError> { let path = try!(CString::new(path.as_os_str().as_bytes())); unsafe { let file_handle = glfs_creat(self.cluster_handle, path.as_ptr(), flags, mode); if file_handle.is_null() { return Err(GlusterError::new(get_error())); } Ok(file_handle) } } pub fn close(&self, file_handle: *mut Struct_glfs_fd) -> Result<(), GlusterError> { unsafe { let ret_code = glfs_close(file_handle); if ret_code < 0 { return Err(GlusterError::new(get_error())); } } Ok(()) } pub fn read(&self, file_handle: *mut Struct_glfs_fd, fill_buffer: &mut Vec<u8>, count: usize, flags: i32) -> Result<isize, GlusterError> { self.pread(file_handle, fill_buffer, count, 0, flags) } pub fn write(&self, file_handle: *mut Struct_glfs_fd, buffer: &[u8], flags: i32) -> Result<isize, GlusterError> { unsafe { let write_size = glfs_write(file_handle, buffer.as_ptr() as *const c_void, buffer.len(), flags); if write_size < 0 { return Err(GlusterError::new(get_error())); } Ok(write_size) } } pub fn write_async<F>(&self, file_handle: *mut Struct_glfs_fd, buffer: &[u8], flags: i32, callback: F, data: &mut ::libc::c_void) -> Result<(), GlusterError> where F: Fn(*mut Struct_glfs_fd, isize, *mut ::libc::c_void) { let closure = Closure3::new(&callback); let callback_ptr = closure.code_ptr(); unsafe { let ret_code = glfs_write_async(file_handle, buffer.as_ptr() as *const c_void, buffer.len(), flags, Some(*callback_ptr), data); if ret_code < 0 { return Err(GlusterError::new(get_error())); } } Ok(()) } pub fn readv(&self, file_handle: *mut Struct_glfs_fd, iov: &mut [&mut [u8]], flags: i32) -> Result<isize, GlusterError> { unsafe { let read_size = glfs_readv(file_handle, iov.as_ptr() as *const iovec, iov.len() as i32, flags); if read_size < 0 { return Err(GlusterError::new(get_error())); } Ok(read_size) } } pub fn writev(&self, file_handle: *mut Struct_glfs_fd, iov: &[&[u8]], flags: i32) -> Result<isize, GlusterError> { unsafe { let write_size = glfs_writev(file_handle, iov.as_ptr() as *const iovec, iov.len() as i32, flags); if write_size < 0 { return Err(GlusterError::new(get_error())); } Ok(write_size) } } pub fn pread(&self, file_handle: *mut Struct_glfs_fd, fill_buffer: &mut [u8], count: usize, offset: i64, flags: i32) -> Result<isize, GlusterError> { unsafe { let read_size = glfs_pread(file_handle, fill_buffer.as_mut_ptr() as *mut c_void, count, offset, flags); if read_size < 0 { return Err(GlusterError::new(get_error())); } // fill_buffer.set_len(read_size as usize); Ok(read_size) } } pub fn pwrite(&self, file_handle: *mut Struct_glfs_fd, buffer: &[u8], count: usize, offset: i64, flags: i32) -> Result<isize, GlusterError> { unsafe { let write_size = glfs_pwrite(file_handle, buffer.as_ptr() as *mut c_void, count, offset, flags); if write_size < 0 { return Err(GlusterError::new(get_error())); } Ok(write_size) } } pub fn preadv(&self, file_handle: *mut Struct_glfs_fd, iov: &mut [&mut [u8]], offset: i64, flags: i32) -> Result<isize, GlusterError> { unsafe { let read_size = glfs_preadv(file_handle, iov.as_ptr() as *const iovec, iov.len() as i32, offset, flags); if read_size < 0 { return Err(GlusterError::new(get_error())); } Ok(read_size) } } // TODO: Use C IoVec pub fn pwritev(&self, file_handle: *mut Struct_glfs_fd, iov: &[&[u8]], offset: i64, flags: i32) -> Result<isize, GlusterError> { unsafe { let write_size = glfs_pwritev(file_handle, iov.as_ptr() as *const iovec, iov.len() as i32, offset, flags); if write_size < 0 { return Err(GlusterError::new(get_error())); } Ok(write_size) } } pub fn lseek(&self, file_handle: *mut Struct_glfs_fd, offset: i64, whence: i32) -> Result<i64, GlusterError> { unsafe { let file_offset = glfs_lseek(file_handle, offset, whence); if file_offset < 0 { return Err(GlusterError::new(get_error())); } Ok(file_offset) } } pub fn truncate(&self, path: &Path, length: i64) -> Result<(), GlusterError> { let path = try!(CString::new(path.as_os_str().as_bytes())); unsafe { let ret_code = glfs_truncate(self.cluster_handle, path.as_ptr(), length); if ret_code < 0 { return Err(GlusterError::new(get_error())); } } Ok(()) } pub fn ftruncate(&self, file_handle: *mut Struct_glfs_fd, length: i64) -> Result<(), GlusterError> { unsafe { let ret_code = glfs_ftruncate(file_handle, length); if ret_code < 0 { return Err(GlusterError::new(get_error())); } } Ok(()) } pub fn lsstat(&self, path: &Path) -> Result<stat, GlusterError> { let path = try!(CString::new(path.as_os_str().as_bytes())); unsafe { let mut stat_buf: stat = zeroed(); let ret_code = glfs_lstat(self.cluster_handle, path.as_ptr(), &mut stat_buf); if ret_code < 0 { return Err(GlusterError::new(get_error())); } Ok(stat_buf) } } /// Tests for the existance of a file. Returns true/false respectively. pub fn exists(&self, path: &Path) -> Result<bool, GlusterError> { let path = try!(CString::new(path.as_os_str().as_bytes())); unsafe { let mut stat_buf: stat = zeroed(); let ret_code = glfs_stat(self.cluster_handle, path.as_ptr(), &mut stat_buf); if ret_code < 0 { let error = errno(); if error == Errno(ENOENT) { return Ok(false); } return Err(GlusterError::new(get_error())); } Ok(false) } } pub fn stat(&self, path: &Path) -> Result<stat, GlusterError> { let path = try!(CString::new(path.as_os_str().as_bytes())); unsafe { let mut stat_buf: stat = zeroed(); let ret_code = glfs_stat(self.cluster_handle, path.as_ptr(), &mut stat_buf); if ret_code < 0 { return Err(GlusterError::new(get_error())); } Ok(stat_buf) } } pub fn fstat(&self, file_handle: *mut Struct_glfs_fd) -> Result<stat, GlusterError> { unsafe { let mut stat_buf: stat = zeroed(); let ret_code = glfs_fstat(file_handle, &mut stat_buf); if ret_code < 0 { return Err(GlusterError::new(get_error())); } Ok(stat_buf) } } pub fn fsync(&self, file_handle: *mut Struct_glfs_fd) -> Result<(), GlusterError> { unsafe { let ret_code = glfs_fsync(file_handle); if ret_code < 0 { return Err(GlusterError::new(get_error())); } } Ok(()) } pub fn fdatasync(&self, file_handle: *mut Struct_glfs_fd) -> Result<(), GlusterError> { unsafe { let ret_code = glfs_fdatasync(file_handle); if ret_code < 0 { return Err(GlusterError::new(get_error())); } } Ok(()) } pub fn access(&self, path: &Path, mode: i32) -> Result<(), GlusterError> { let path = try!(CString::new(path.as_os_str().as_bytes())); unsafe { let ret_code = glfs_access(self.cluster_handle, path.as_ptr(), mode); if ret_code < 0 { return Err(GlusterError::new(get_error())); } } Ok(()) } pub fn symlink(&self, oldpath: &Path, newpath: &Path) -> Result<(), GlusterError> { let old_path = try!(CString::new(oldpath.as_os_str().as_bytes())); let new_path = try!(CString::new(newpath.as_os_str().as_bytes())); unsafe { let ret_code = glfs_symlink(self.cluster_handle, old_path.as_ptr(), new_path.as_ptr()); if ret_code < 0 { return Err(GlusterError::new(get_error())); } } Ok(()) } pub fn readlink(&self, path: &Path, buf: &mut [u8]) -> Result<(), GlusterError> { let path = try!(CString::new(path.as_os_str().as_bytes())); unsafe { let ret_code = glfs_readlink(self.cluster_handle, path.as_ptr(), buf.as_mut_ptr() as *mut i8, buf.len()); if ret_code < 0 { return Err(GlusterError::new(get_error())); } } Ok(()) } pub fn mknod(&self, path: &Path, mode: mode_t, dev: dev_t) -> Result<(), GlusterError> { let path = try!(CString::new(path.as_os_str().as_bytes())); unsafe { let ret_code = glfs_mknod(self.cluster_handle, path.as_ptr(), mode, dev); if ret_code < 0 { return Err(GlusterError::new(get_error())); } } Ok(()) } pub fn mkdir(&self, path: &Path, mode: mode_t) -> Result<(), GlusterError> { let path = try!(CString::new(path.as_os_str().as_bytes())); unsafe { let ret_code = glfs_mkdir(self.cluster_handle, path.as_ptr(), mode); if ret_code < 0 { return Err(GlusterError::new(get_error())); } } Ok(()) } pub fn unlink(&self, path: &Path) -> Result<(), GlusterError> { let path = try!(CString::new(path.as_os_str().as_bytes())); unsafe { let ret_code = glfs_unlink(self.cluster_handle, path.as_ptr()); if ret_code < 0 { return Err(GlusterError::new(get_error())); } } Ok(()) } pub fn rmdir(&self, path: &Path) -> Result<(), GlusterError> { let path = try!(CString::new(path.as_os_str().as_bytes())); unsafe { let ret_code = glfs_rmdir(self.cluster_handle, path.as_ptr()); if ret_code < 0 { return Err(GlusterError::new(get_error())); } } Ok(()) } pub fn rename(&self, oldpath: &Path, newpath: &Path) -> Result<(), GlusterError> { let old_path = try!(CString::new(oldpath.as_os_str().as_bytes())); let new_path = try!(CString::new(newpath.as_os_str().as_bytes())); unsafe { let ret_code = glfs_rename(self.cluster_handle, old_path.as_ptr(), new_path.as_ptr()); if ret_code < 0 { return Err(GlusterError::new(get_error())); } } Ok(()) } pub fn link(&self, oldpath: &Path, newpath: &Path) -> Result<(), GlusterError> { let old_path = try!(CString::new(oldpath.as_os_str().as_bytes())); let new_path = try!(CString::new(newpath.as_os_str().as_bytes())); unsafe { let ret_code = glfs_link(self.cluster_handle, old_path.as_ptr(), new_path.as_ptr()); if ret_code < 0 { return Err(GlusterError::new(get_error())); } } Ok(()) } pub fn opendir(&self, path: &Path) -> Result<*mut Struct_glfs_fd, GlusterError> { let path = try!(CString::new(path.as_os_str().as_bytes())); unsafe { let file_handle = glfs_opendir(self.cluster_handle, path.as_ptr()); Ok(file_handle) } } pub fn getxattr(&self, path: &Path, name: &str) -> Result<String, GlusterError> { let path = try!(CString::new(path.as_os_str().as_bytes())); let name = try!(CString::new(name)); let mut xattr_val_buff: Vec<u8> = Vec::with_capacity(1024); unsafe { let ret_code = glfs_getxattr(self.cluster_handle, path.as_ptr(), name.as_ptr(), xattr_val_buff.as_mut_ptr() as *mut c_void, xattr_val_buff.len()); if ret_code < 0 { return Err(GlusterError::new(get_error())); } // Set the buffer to the size of bytes read into it xattr_val_buff.set_len(ret_code as usize); Ok(String::from_utf8_lossy(&xattr_val_buff).into_owned()) } } pub fn lgetxattr(&self, path: &Path, name: &str) -> Result<String, GlusterError> { let path = try!(CString::new(path.as_os_str().as_bytes())); let name = try!(CString::new(name)); let mut xattr_val_buff: Vec<u8> = Vec::with_capacity(1024); unsafe { let ret_code = glfs_lgetxattr(self.cluster_handle, path.as_ptr(), name.as_ptr(), xattr_val_buff.as_mut_ptr() as *mut c_void, xattr_val_buff.len()); if ret_code < 0 { return Err(GlusterError::new(get_error())); } // Set the buffer to the size of bytes read into it xattr_val_buff.set_len(ret_code as usize); Ok(String::from_utf8_lossy(&xattr_val_buff).into_owned()) } } pub fn fgetxattr(&self, file_handle: *mut Struct_glfs_fd, name: &str) -> Result<String, GlusterError> { let name = try!(CString::new(name)); let mut xattr_val_buff: Vec<u8> = Vec::with_capacity(1024); unsafe { let ret_code = glfs_fgetxattr(file_handle, name.as_ptr(), xattr_val_buff.as_mut_ptr() as *mut c_void, xattr_val_buff.len()); if ret_code < 0 { return Err(GlusterError::new(get_error())); } // Set the buffer to the size of bytes read into it xattr_val_buff.set_len(ret_code as usize); Ok(String::from_utf8_lossy(&xattr_val_buff).into_owned()) } } pub fn listxattr(&self, path: &Path) -> Result<String, GlusterError> { let path = try!(CString::new(path.as_os_str().as_bytes())); let mut xattr_val_buff: Vec<u8> = Vec::with_capacity(1024); unsafe { let ret_code = glfs_listxattr(self.cluster_handle, path.as_ptr(), xattr_val_buff.as_mut_ptr() as *mut c_void, xattr_val_buff.len()); if ret_code < 0 { return Err(GlusterError::new(get_error())); } // Set the buffer to the size of bytes read into it xattr_val_buff.set_len(ret_code as usize); Ok(String::from_utf8_lossy(&xattr_val_buff).into_owned()) } } pub fn llistxattr(&self, path: &Path) -> Result<String, GlusterError> { let path = try!(CString::new(path.as_os_str().as_bytes())); let mut xattr_val_buff: Vec<u8> = Vec::with_capacity(1024); unsafe { let ret_code = glfs_llistxattr(self.cluster_handle, path.as_ptr(), xattr_val_buff.as_mut_ptr() as *mut c_void, xattr_val_buff.len()); if ret_code < 0 { return Err(GlusterError::new(get_error())); } // Set the buffer to the size of bytes read into it xattr_val_buff.set_len(ret_code as usize); Ok(String::from_utf8_lossy(&xattr_val_buff).into_owned()) } } pub fn flistxattr(&self, file_handle: *mut Struct_glfs_fd) -> Result<String, GlusterError> { let mut xattr_val_buff: Vec<u8> = Vec::with_capacity(1024); unsafe { let ret_code = glfs_flistxattr(file_handle, xattr_val_buff.as_mut_ptr() as *mut c_void, xattr_val_buff.len()); if ret_code < 0 { return Err(GlusterError::new(get_error())); } // Set the buffer to the size of bytes read into it xattr_val_buff.set_len(ret_code as usize); Ok(String::from_utf8_lossy(&xattr_val_buff).into_owned()) } } pub fn setxattr(&self, path: &Path, name: &str, value: &[u8], flags: i32) -> Result<(), GlusterError> { let path = try!(CString::new(path.as_os_str().as_bytes())); let name = try!(CString::new(name)); unsafe { let ret_code = glfs_setxattr(self.cluster_handle, path.as_ptr(), name.as_ptr(), value.as_ptr() as *const c_void, value.len(), flags); if ret_code < 0 { return Err(GlusterError::new(get_error())); } } Ok(()) } pub fn lsetxattr(&self, name: &str, value: &[u8], path: &Path, flags: i32) -> Result<(), GlusterError> { let name = try!(CString::new(name)); let path = try!(CString::new(path.as_os_str().as_bytes())); unsafe { let ret_code = glfs_lsetxattr(self.cluster_handle, path.as_ptr(), name.as_ptr(), value.as_ptr() as *const c_void, value.len(), flags); if ret_code < 0 { return Err(GlusterError::new(get_error())); } } Ok(()) } pub fn fsetxattr(&self, file_handle: *mut Struct_glfs_fd, name: &str, value: &[u8], flags: i32) -> Result<(), GlusterError> { let name = try!(CString::new(name)); unsafe { let ret_code = glfs_fsetxattr(file_handle, name.as_ptr(), value.as_ptr() as *const c_void, value.len(), flags); if ret_code < 0 { return Err(GlusterError::new(get_error())); } } Ok(()) } pub fn removexattr(&self, path: &Path, name: &str) -> Result<(), GlusterError> { let path = try!(CString::new(path.as_os_str().as_bytes())); let name = try!(CString::new(name)); unsafe { let ret_code = glfs_removexattr(self.cluster_handle, path.as_ptr(), name.as_ptr()); if ret_code < 0 { return Err(GlusterError::new(get_error())); } } Ok(()) } pub fn lremovexattr(&self, path: &Path, name: &str) -> Result<(), GlusterError> { let path = try!(CString::new(path.as_os_str().as_bytes())); let name = try!(CString::new(name)); unsafe { let ret_code = glfs_lremovexattr(self.cluster_handle, path.as_ptr(), name.as_ptr()); if ret_code < 0 { return Err(GlusterError::new(get_error())); } } Ok(()) } pub fn fremovexattr(&self, file_handle: *mut Struct_glfs_fd, name: &str) -> Result<(), GlusterError> { let name = try!(CString::new(name)); unsafe { let ret_code = glfs_fremovexattr(file_handle, name.as_ptr()); if ret_code < 0 { return Err(GlusterError::new(get_error())); } } Ok(()) } pub fn fallocate(&self, file_handle: *mut Struct_glfs_fd, offset: i64, keep_size: i32, len: usize) -> Result<(), GlusterError> { unsafe { let ret_code = glfs_fallocate(file_handle, keep_size, offset, len); if ret_code < 0 { return Err(GlusterError::new(get_error())); } } Ok(()) } pub fn discard(&self, file_handle: *mut Struct_glfs_fd, offset: i64, len: usize) -> Result<(), GlusterError> { unsafe { let ret_code = glfs_discard(file_handle, offset, len); if ret_code < 0 { return Err(GlusterError::new(get_error())); } } Ok(()) } pub fn zerofill(&self, file_handle: *mut Struct_glfs_fd, offset: i64, len: i64) -> Result<(), GlusterError> { unsafe { let ret_code = glfs_zerofill(file_handle, offset, len); if ret_code < 0 { return Err(GlusterError::new(get_error())); } } Ok(()) } pub fn getcwd(&self) -> Result<String, GlusterError> { let mut cwd_val_buff: Vec<u8> = Vec::with_capacity(1024); unsafe { let cwd = glfs_getcwd(self.cluster_handle, cwd_val_buff.as_mut_ptr() as *mut i8, cwd_val_buff.len()); Ok(CStr::from_ptr(cwd).to_string_lossy().into_owned()) } } pub fn chdir(&self, path: &Path) -> Result<(), GlusterError> { let path = try!(CString::new(path.as_os_str().as_bytes())); unsafe { let ret_code = glfs_chdir(self.cluster_handle, path.as_ptr()); if ret_code < 0 { return Err(GlusterError::new(get_error())); } } Ok(()) } pub fn fchdir(&self, file_handle: *mut Struct_glfs_fd) -> Result<(), GlusterError> { unsafe { let ret_code = glfs_fchdir(file_handle); if ret_code < 0 { return Err(GlusterError::new(get_error())); } } Ok(()) } /// times[0] specifies the new "last access time" (atime); /// times[1] specifies the new "last modification time" (mtime). pub fn utimens(&self, path: &Path, times: &[timespec; 2]) -> Result<(), GlusterError> { let path = try!(CString::new(path.as_os_str().as_bytes())); unsafe { let ret_code = glfs_utimens(self.cluster_handle, path.as_ptr(), times.as_ptr()); if ret_code < 0 { return Err(GlusterError::new(get_error())); } } Ok(()) } /// times[0] specifies the new "last access time" (atime); /// times[1] specifies the new "last modification time" (mtime). pub fn lutimens(&self, path: &Path, times: &[timespec; 2]) -> Result<(), GlusterError> { let path = try!(CString::new(path.as_os_str().as_bytes())); unsafe { let ret_code = glfs_lutimens(self.cluster_handle, path.as_ptr(), times.as_ptr()); if ret_code < 0 { return Err(GlusterError::new(get_error())); } } Ok(()) } /// times[0] specifies the new "last access time" (atime); /// times[1] specifies the new "last modification time" (mtime). pub fn futimens(&self, file_handle: *mut Struct_glfs_fd, times: &[timespec; 2]) -> Result<(), GlusterError> { unsafe { let ret_code = glfs_futimens(file_handle, times.as_ptr()); if ret_code < 0 { return Err(GlusterError::new(get_error())); } } Ok(()) } pub fn posixlock(&self, file_handle: *mut Struct_glfs_fd, command: PosixLockCmd, flock: &mut flock) -> Result<(), GlusterError> { unsafe { let ret_code = glfs_posix_lock(file_handle, command.into(), flock); if ret_code < 0 { return Err(GlusterError::new(get_error())); } } Ok(()) } pub fn chmod(&self, path: &Path, mode: mode_t) -> Result<(), GlusterError> { let path = try!(CString::new(path.as_os_str().as_bytes())); unsafe { let ret_code = glfs_chmod(self.cluster_handle, path.as_ptr(), mode); if ret_code < 0 { return Err(GlusterError::new(get_error())); } } Ok(()) } pub fn fchmod(&self, file_handle: *mut Struct_glfs_fd, mode: mode_t) -> Result<(), GlusterError> { unsafe { let ret_code = glfs_fchmod(file_handle, mode); if ret_code < 0 { return Err(GlusterError::new(get_error())); } } Ok(()) } pub fn chown(&self, path: &Path, uid: u32, gid: u32) -> Result<(), GlusterError> { let path = try!(CString::new(path.as_os_str().as_bytes())); unsafe { let ret_code = glfs_chown(self.cluster_handle, path.as_ptr(), uid, gid); if ret_code < 0 { return Err(GlusterError::new(get_error())); } } Ok(()) } pub fn lchown(&self, path: &Path, uid: u32, gid: u32) -> Result<(), GlusterError> { let path = try!(CString::new(path.as_os_str().as_bytes())); unsafe { let ret_code = glfs_lchown(self.cluster_handle, path.as_ptr(), uid, gid); if ret_code < 0 { return Err(GlusterError::new(get_error())); } } Ok(()) } pub fn fchown(&self, file_handle: *mut Struct_glfs_fd, uid: u32, gid: u32) -> Result<(), GlusterError> { unsafe { let ret_code = glfs_fchown(file_handle, uid, gid); if ret_code < 0 { return Err(GlusterError::new(get_error())); } } Ok(()) } // pub fn realpath(&self, path: &str) -> Result<String, GlusterError> { // let path = try!(CString::new(path)); // let resolved_path_buf: Vec<u8> = Vec::with_capacity(512); // unsafe { // let real_path = glfs_realpath(self.cluster_handle, // path.as_ptr(), // resolved_path: *mut c_char); // Ok(CStr::from_ptr(real_path).to_string_lossy().into_owned()) // } // } // pub fn dup(&self, file_handle: *mut Struct_glfs_fd) -> Result<*mut Struct_glfs_fd, GlusterError> { unsafe { let file_handle = glfs_dup(file_handle); Ok(file_handle) } } }
#![crate_name = "hiredis"] #![crate_type = "lib"] #![feature(globs)] #![allow(non_camel_case_types)] #![allow(non_snake_case)] #![allow(dead_code)] extern crate libc; use std::mem::transmute; pub mod api; pub struct Reply { reply: *const api::Reply } impl Reply { unsafe fn new(reply: *const api::Reply) -> Reply { Reply { reply: reply } } } pub struct Redis { context: *const api::Context } impl Redis { pub fn new(ip: &str, port: i32) -> Redis { unsafe { Redis { context: api::redisConnect( ip.to_c_str().as_ptr(), port ) } } } /* TODO: Move to IoResult using context.err */ pub fn exec(&self, command: &str) -> Option<Reply> { command.with_c_str(|v| { unsafe { let result = api::redisCommand(self.context, v); /* Fail if the command errored for some reason. */ if result == 0 as *const ::libc::c_void { None } /* Otherwise transmute the void pointer memory into a pointer * to a reply structure and return it. */ Some(Reply::new(transmute(result))) } } } } impl Drop for Redis { fn drop(&mut self) { unsafe { api::redisFree(self.context) } } } Fix Syntax #![crate_name = "hiredis"] #![crate_type = "lib"] #![feature(globs)] #![allow(non_camel_case_types)] #![allow(non_snake_case)] #![allow(dead_code)] extern crate libc; use std::mem::transmute; pub mod api; pub struct Reply { reply: *const api::Reply } impl Reply { unsafe fn new(reply: *const api::Reply) -> Reply { Reply { reply: reply } } } pub struct Redis { context: *const api::Context } impl Redis { pub fn new(ip: &str, port: i32) -> Redis { unsafe { Redis { context: api::redisConnect( ip.to_c_str().as_ptr(), port ) } } } /* TODO: Move to IoResult using context.err */ pub fn exec(&self, command: &str) -> Option<Reply> { command.with_c_str(|v| { unsafe { let result = api::redisCommand(self.context, v); /* Fail if the command errored for some reason. */ if result == 0 as *const ::libc::c_void { return None; } /* Otherwise transmute the void pointer memory into a pointer * to a reply structure and return it. */ Some(Reply::new(transmute(result))) } }) } } impl Drop for Redis { fn drop(&mut self) { unsafe { api::redisFree(self.context) } } }
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. use std::cmp::Ordering; use config::lists::*; use syntax::ast::{self, UseTreeKind}; use syntax::codemap::{self, BytePos, Span, DUMMY_SP}; use codemap::SpanUtils; use config::IndentStyle; use lists::{definitive_tactic, itemize_list, write_list, ListFormatting, ListItem, Separator}; use rewrite::{Rewrite, RewriteContext}; use shape::Shape; use spanned::Spanned; use utils::mk_sp; use visitor::FmtVisitor; use std::borrow::Cow; use std::fmt; /// Returns a name imported by a `use` declaration. e.g. returns `Ordering` /// for `std::cmp::Ordering` and `self` for `std::cmp::self`. pub fn path_to_imported_ident(path: &ast::Path) -> ast::Ident { path.segments.last().unwrap().ident } impl<'a> FmtVisitor<'a> { pub fn format_import(&mut self, item: &ast::Item, tree: &ast::UseTree) { let span = item.span; let shape = self.shape(); let rw = UseTree::from_ast( &self.get_context(), tree, None, Some(item.vis.clone()), Some(item.span.lo()), Some(item.attrs.clone()), ).rewrite_top_level(&self.get_context(), shape); match rw { Some(ref s) if s.is_empty() => { // Format up to last newline let prev_span = mk_sp(self.last_pos, source!(self, span).lo()); let trimmed_snippet = self.snippet(prev_span).trim_right(); let span_end = self.last_pos + BytePos(trimmed_snippet.len() as u32); self.format_missing(span_end); // We have an excessive newline from the removed import. if self.buffer.ends_with('\n') { self.buffer.pop(); self.line_number -= 1; } self.last_pos = source!(self, span).hi(); } Some(ref s) => { self.format_missing_with_indent(source!(self, span).lo()); self.push_str(s); self.last_pos = source!(self, span).hi(); } None => { self.format_missing_with_indent(source!(self, span).lo()); self.format_missing(source!(self, span).hi()); } } } } // Ordering of imports // We order imports by translating to our own representation and then sorting. // The Rust AST data structures are really bad for this. Rustfmt applies a bunch // of normalisations to imports and since we want to sort based on the result // of these (and to maintain idempotence) we must apply the same normalisations // to the data structures for sorting. // // We sort `self` and `super` before other imports, then identifier imports, // then glob imports, then lists of imports. We do not take aliases into account // when ordering unless the imports are identical except for the alias (rare in // practice). // FIXME(#2531) - we should unify the comparison code here with the formatting // code elsewhere since we are essentially string-ifying twice. Furthermore, by // parsing to our own format on comparison, we repeat a lot of work when // sorting. // FIXME we do a lot of allocation to make our own representation. #[derive(Clone, Eq, PartialEq)] pub enum UseSegment { Ident(String, Option<String>), Slf(Option<String>), Super(Option<String>), Glob, List(Vec<UseTree>), } #[derive(Clone)] pub struct UseTree { pub path: Vec<UseSegment>, pub span: Span, // Comment information within nested use tree. pub list_item: Option<ListItem>, // Additional fields for top level use items. // Should we have another struct for top-level use items rather than reusing this? visibility: Option<ast::Visibility>, attrs: Option<Vec<ast::Attribute>>, } impl PartialEq for UseTree { fn eq(&self, other: &UseTree) -> bool { self.path == other.path } } impl Eq for UseTree {} impl UseSegment { // Clone a version of self with any top-level alias removed. fn remove_alias(&self) -> UseSegment { match *self { UseSegment::Ident(ref s, _) => UseSegment::Ident(s.clone(), None), UseSegment::Slf(_) => UseSegment::Slf(None), UseSegment::Super(_) => UseSegment::Super(None), _ => self.clone(), } } fn from_path_segment(path_seg: &ast::PathSegment) -> Option<UseSegment> { let name = path_seg.ident.name.as_str(); if name == "{{root}}" { return None; } Some(if name == "self" { UseSegment::Slf(None) } else if name == "super" { UseSegment::Super(None) } else { UseSegment::Ident((*name).to_owned(), None) }) } } pub fn merge_use_trees(use_trees: Vec<UseTree>) -> Vec<UseTree> { let mut result = Vec::with_capacity(use_trees.len()); for use_tree in use_trees { if use_tree.has_comment() || use_tree.attrs.is_some() { result.push(use_tree); continue; } for flattened in use_tree.flatten() { merge_use_trees_inner(&mut result, flattened); } } result } fn merge_use_trees_inner(trees: &mut Vec<UseTree>, use_tree: UseTree) { for tree in trees.iter_mut() { if tree.share_prefix(&use_tree) { tree.merge(use_tree); return; } } trees.push(use_tree); } impl fmt::Debug for UseTree { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self) } } impl fmt::Debug for UseSegment { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self) } } impl fmt::Display for UseSegment { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { UseSegment::Glob => write!(f, "*"), UseSegment::Ident(ref s, _) => write!(f, "{}", s), UseSegment::Slf(..) => write!(f, "self"), UseSegment::Super(..) => write!(f, "super"), UseSegment::List(ref list) => { write!(f, "{{")?; for (i, item) in list.iter().enumerate() { let is_last = i == list.len() - 1; write!(f, "{}", item)?; if !is_last { write!(f, ", ")?; } } write!(f, "}}") } } } } impl fmt::Display for UseTree { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { for (i, segment) in self.path.iter().enumerate() { let is_last = i == self.path.len() - 1; write!(f, "{}", segment)?; if !is_last { write!(f, "::")?; } } write!(f, "") } } impl UseTree { // Rewrite use tree with `use ` and a trailing `;`. pub fn rewrite_top_level(&self, context: &RewriteContext, shape: Shape) -> Option<String> { let mut result = String::with_capacity(256); if let Some(ref attrs) = self.attrs { result.push_str(&attrs.rewrite(context, shape).expect("rewrite attr")); if !result.is_empty() { result.push_str(&shape.indent.to_string_with_newline(context.config)); } } let vis = self.visibility .as_ref() .map_or(Cow::from(""), |vis| ::utils::format_visibility(&vis)); result.push_str(&self.rewrite(context, shape.offset_left(vis.len())?) .map(|s| { if s.is_empty() { s.to_owned() } else { format!("{}use {};", vis, s) } })?); Some(result) } // FIXME: Use correct span? fn from_path(path: Vec<UseSegment>, span: Span) -> UseTree { UseTree { path, span, list_item: None, visibility: None, attrs: None, } } pub fn from_ast_with_normalization( context: &RewriteContext, item: &ast::Item, ) -> Option<UseTree> { match item.node { ast::ItemKind::Use(ref use_tree) => Some( UseTree::from_ast( context, use_tree, None, Some(item.vis.clone()), Some(item.span().lo()), if item.attrs.is_empty() { None } else { Some(item.attrs.clone()) }, ).normalize(context.config.reorder_imported_names()), ), _ => None, } } fn from_ast( context: &RewriteContext, a: &ast::UseTree, list_item: Option<ListItem>, visibility: Option<ast::Visibility>, opt_lo: Option<BytePos>, attrs: Option<Vec<ast::Attribute>>, ) -> UseTree { let span = if let Some(lo) = opt_lo { mk_sp(lo, a.span.hi()) } else { a.span }; let mut result = UseTree { path: vec![], span, list_item, visibility, attrs, }; for p in &a.prefix.segments { if let Some(use_segment) = UseSegment::from_path_segment(p) { result.path.push(use_segment); } } match a.kind { UseTreeKind::Glob => { result.path.push(UseSegment::Glob); } UseTreeKind::Nested(ref list) => { // Extract comments between nested use items. // This needs to be done before sorting use items. let items: Vec<_> = itemize_list( context.snippet_provider, list.iter().map(|(tree, _)| tree), "}", ",", |tree| tree.span.lo(), |tree| tree.span.hi(), |_| Some("".to_owned()), // We only need comments for now. context.snippet_provider.span_after(a.span, "{"), a.span.hi(), false, ).collect(); result.path.push(UseSegment::List( list.iter() .zip(items.into_iter()) .map(|(t, list_item)| { Self::from_ast(context, &t.0, Some(list_item), None, None, None) }) .collect(), )); } UseTreeKind::Simple(ref rename) => { let mut name = (*path_to_imported_ident(&a.prefix).name.as_str()).to_owned(); let alias = rename.and_then(|ident| { if ident == path_to_imported_ident(&a.prefix) { None } else { Some(ident.to_string()) } }); let segment = if &name == "self" { UseSegment::Slf(alias) } else if &name == "super" { UseSegment::Super(alias) } else { UseSegment::Ident(name, alias) }; // `name` is already in result. result.path.pop(); result.path.push(segment); } } result } // Do the adjustments that rustfmt does elsewhere to use paths. pub fn normalize(mut self, do_sort: bool) -> UseTree { let mut last = self.path.pop().expect("Empty use tree?"); // Hack around borrow checker. let mut normalize_sole_list = false; let mut aliased_self = false; // Remove foo::{} or self without attributes. match last { _ if self.attrs.is_some() => (), UseSegment::List(ref list) if list.is_empty() => { self.path = vec![]; return self; } UseSegment::Slf(None) if self.path.is_empty() && self.visibility.is_some() => { self.path = vec![]; return self; } _ => (), } // Normalise foo::self -> foo. if let UseSegment::Slf(None) = last { if self.path.len() > 0 { return self; } } // Normalise foo::self as bar -> foo as bar. if let UseSegment::Slf(_) = last { match self.path.last() { None => {} Some(UseSegment::Ident(_, None)) => { aliased_self = true; } _ => unreachable!(), } } let mut done = false; if aliased_self { match self.path.last_mut() { Some(UseSegment::Ident(_, ref mut old_rename)) => { assert!(old_rename.is_none()); if let UseSegment::Slf(Some(rename)) = last.clone() { *old_rename = Some(rename); done = true; } } _ => unreachable!(), } } if done { return self; } // Normalise foo::{bar} -> foo::bar if let UseSegment::List(ref list) = last { if list.len() == 1 { normalize_sole_list = true; } } if normalize_sole_list { match last { UseSegment::List(list) => { for seg in &list[0].path { self.path.push(seg.clone()); } return self.normalize(do_sort); } _ => unreachable!(), } } // Recursively normalize elements of a list use (including sorting the list). if let UseSegment::List(list) = last { let mut list = list.into_iter() .map(|ut| ut.normalize(do_sort)) .collect::<Vec<_>>(); if do_sort { list.sort(); } last = UseSegment::List(list); } self.path.push(last); self } fn has_comment(&self) -> bool { self.list_item.as_ref().map_or(false, ListItem::has_comment) } fn same_visibility(&self, other: &UseTree) -> bool { match (&self.visibility, &other.visibility) { ( Some(codemap::Spanned { node: ast::VisibilityKind::Inherited, .. }), None, ) | ( None, Some(codemap::Spanned { node: ast::VisibilityKind::Inherited, .. }), ) | (None, None) => true, ( Some(codemap::Spanned { node: lnode, .. }), Some(codemap::Spanned { node: rnode, .. }), ) => lnode == rnode, _ => false, } } fn share_prefix(&self, other: &UseTree) -> bool { if self.path.is_empty() || other.path.is_empty() || self.attrs.is_some() || !self.same_visibility(other) { false } else { self.path[0] == other.path[0] } } fn flatten(self) -> Vec<UseTree> { if self.path.is_empty() { return vec![self]; } match self.path.clone().last().unwrap() { UseSegment::List(list) => { let prefix = &self.path[..self.path.len() - 1]; let mut result = vec![]; for nested_use_tree in list.into_iter() { for mut flattend in nested_use_tree.clone().flatten().iter_mut() { let mut new_path = prefix.to_vec(); new_path.append(&mut flattend.path); result.push(UseTree { path: new_path, span: self.span, list_item: None, visibility: self.visibility.clone(), attrs: None, }); } } result } _ => vec![self], } } fn merge(&mut self, other: UseTree) { let mut new_path = vec![]; let mut len = 0; for (i, (mut a, b)) in self.path .clone() .iter_mut() .zip(other.path.clone().into_iter()) .enumerate() { if *a == b { len = i + 1; new_path.push(b); } else { len = i; break; } } if let Some(merged) = merge_rest(&self.path, &other.path, len) { new_path.push(merged); self.span = self.span.to(other.span); } self.path = new_path; } } fn merge_rest(a: &[UseSegment], b: &[UseSegment], len: usize) -> Option<UseSegment> { let a_rest = &a[len..]; let b_rest = &b[len..]; if a_rest.is_empty() && b_rest.is_empty() { return None; } if a_rest.is_empty() { return Some(UseSegment::List(vec![ UseTree::from_path(vec![UseSegment::Slf(None)], DUMMY_SP), UseTree::from_path(b_rest.to_vec(), DUMMY_SP), ])); } if b_rest.is_empty() { return Some(UseSegment::List(vec![ UseTree::from_path(vec![UseSegment::Slf(None)], DUMMY_SP), UseTree::from_path(a_rest.to_vec(), DUMMY_SP), ])); } if let UseSegment::List(mut list) = a_rest[0].clone() { merge_use_trees_inner(&mut list, UseTree::from_path(b_rest.to_vec(), DUMMY_SP)); list.sort(); return Some(UseSegment::List(list.clone())); } let mut list = vec![ UseTree::from_path(a_rest.to_vec(), DUMMY_SP), UseTree::from_path(b_rest.to_vec(), DUMMY_SP), ]; list.sort(); Some(UseSegment::List(list)) } impl PartialOrd for UseSegment { fn partial_cmp(&self, other: &UseSegment) -> Option<Ordering> { Some(self.cmp(other)) } } impl PartialOrd for UseTree { fn partial_cmp(&self, other: &UseTree) -> Option<Ordering> { Some(self.cmp(other)) } } impl Ord for UseSegment { fn cmp(&self, other: &UseSegment) -> Ordering { use self::UseSegment::*; fn is_upper_snake_case(s: &str) -> bool { s.chars().all(|c| c.is_uppercase() || c == '_') } match (self, other) { (&Slf(ref a), &Slf(ref b)) | (&Super(ref a), &Super(ref b)) => a.cmp(b), (&Glob, &Glob) => Ordering::Equal, (&Ident(ref ia, ref aa), &Ident(ref ib, ref ab)) => { // snake_case < CamelCase < UPPER_SNAKE_CASE if ia.starts_with(char::is_uppercase) && ib.starts_with(char::is_lowercase) { return Ordering::Greater; } if ia.starts_with(char::is_lowercase) && ib.starts_with(char::is_uppercase) { return Ordering::Less; } if is_upper_snake_case(ia) && !is_upper_snake_case(ib) { return Ordering::Greater; } if !is_upper_snake_case(ia) && is_upper_snake_case(ib) { return Ordering::Less; } let ident_ord = ia.cmp(ib); if ident_ord != Ordering::Equal { return ident_ord; } if aa.is_none() && ab.is_some() { return Ordering::Less; } if aa.is_some() && ab.is_none() { return Ordering::Greater; } aa.cmp(ab) } (&List(ref a), &List(ref b)) => { for (a, b) in a.iter().zip(b.iter()) { let ord = a.cmp(b); if ord != Ordering::Equal { return ord; } } a.len().cmp(&b.len()) } (&Slf(_), _) => Ordering::Less, (_, &Slf(_)) => Ordering::Greater, (&Super(_), _) => Ordering::Less, (_, &Super(_)) => Ordering::Greater, (&Ident(..), _) => Ordering::Less, (_, &Ident(..)) => Ordering::Greater, (&Glob, _) => Ordering::Less, (_, &Glob) => Ordering::Greater, } } } impl Ord for UseTree { fn cmp(&self, other: &UseTree) -> Ordering { for (a, b) in self.path.iter().zip(other.path.iter()) { let ord = a.cmp(b); // The comparison without aliases is a hack to avoid situations like // comparing `a::b` to `a as c` - where the latter should be ordered // first since it is shorter. if ord != Ordering::Equal && a.remove_alias().cmp(&b.remove_alias()) != Ordering::Equal { return ord; } } self.path.len().cmp(&other.path.len()) } } fn rewrite_nested_use_tree( context: &RewriteContext, use_tree_list: &[UseTree], shape: Shape, ) -> Option<String> { let mut list_items = Vec::with_capacity(use_tree_list.len()); let nested_shape = match context.config.imports_indent() { IndentStyle::Block => shape .block_indent(context.config.tab_spaces()) .with_max_width(context.config) .sub_width(1)?, IndentStyle::Visual => shape.visual_indent(0), }; for use_tree in use_tree_list { if let Some(mut list_item) = use_tree.list_item.clone() { list_item.item = use_tree.rewrite(context, nested_shape); list_items.push(list_item); } else { list_items.push(ListItem::from_str(use_tree.rewrite(context, nested_shape)?)); } } let (tactic, remaining_width) = if use_tree_list.iter().any(|use_segment| { use_segment .path .last() .map_or(false, |last_segment| match last_segment { UseSegment::List(..) => true, _ => false, }) }) { (DefinitiveListTactic::Vertical, 0) } else { let remaining_width = shape.width.checked_sub(2).unwrap_or(0); let tactic = definitive_tactic( &list_items, context.config.imports_layout(), Separator::Comma, remaining_width, ); (tactic, remaining_width) }; let ends_with_newline = context.config.imports_indent() == IndentStyle::Block && tactic != DefinitiveListTactic::Horizontal; let fmt = ListFormatting { tactic, separator: ",", trailing_separator: if ends_with_newline { context.config.trailing_comma() } else { SeparatorTactic::Never }, separator_place: SeparatorPlace::Back, shape: nested_shape, ends_with_newline, preserve_newline: true, config: context.config, }; let list_str = write_list(&list_items, &fmt)?; let result = if (list_str.contains('\n') || list_str.len() > remaining_width) && context.config.imports_indent() == IndentStyle::Block { format!( "{{\n{}{}\n{}}}", nested_shape.indent.to_string(context.config), list_str, shape.indent.to_string(context.config) ) } else { format!("{{{}}}", list_str) }; Some(result) } impl Rewrite for UseSegment { fn rewrite(&self, context: &RewriteContext, shape: Shape) -> Option<String> { Some(match *self { UseSegment::Ident(ref ident, Some(ref rename)) => format!("{} as {}", ident, rename), UseSegment::Ident(ref ident, None) => ident.clone(), UseSegment::Slf(Some(ref rename)) => format!("self as {}", rename), UseSegment::Slf(None) => "self".to_owned(), UseSegment::Super(Some(ref rename)) => format!("super as {}", rename), UseSegment::Super(None) => "super".to_owned(), UseSegment::Glob => "*".to_owned(), UseSegment::List(ref use_tree_list) => rewrite_nested_use_tree( context, use_tree_list, // 1 = "{" and "}" shape.offset_left(1)?.sub_width(1)?, )?, }) } } impl Rewrite for UseTree { // This does NOT format attributes and visibility or add a trailing `;`. fn rewrite(&self, context: &RewriteContext, mut shape: Shape) -> Option<String> { let mut result = String::with_capacity(256); let mut iter = self.path.iter().peekable(); while let Some(ref segment) = iter.next() { let segment_str = segment.rewrite(context, shape)?; result.push_str(&segment_str); if iter.peek().is_some() { result.push_str("::"); // 2 = "::" shape = shape.offset_left(2 + segment_str.len())?; } } Some(result) } } #[cfg(test)] mod test { use super::*; use syntax::codemap::DUMMY_SP; // Parse the path part of an import. This parser is not robust and is only // suitable for use in a test harness. fn parse_use_tree(s: &str) -> UseTree { use std::iter::Peekable; use std::mem::swap; use std::str::Chars; struct Parser<'a> { input: Peekable<Chars<'a>>, } impl<'a> Parser<'a> { fn bump(&mut self) { self.input.next().unwrap(); } fn eat(&mut self, c: char) { assert!(self.input.next().unwrap() == c); } fn push_segment( result: &mut Vec<UseSegment>, buf: &mut String, alias_buf: &mut Option<String>, ) { if !buf.is_empty() { let mut alias = None; swap(alias_buf, &mut alias); if buf == "self" { result.push(UseSegment::Slf(alias)); *buf = String::new(); *alias_buf = None; } else if buf == "super" { result.push(UseSegment::Super(alias)); *buf = String::new(); *alias_buf = None; } else { let mut name = String::new(); swap(buf, &mut name); result.push(UseSegment::Ident(name, alias)); } } } fn parse_in_list(&mut self) -> UseTree { let mut result = vec![]; let mut buf = String::new(); let mut alias_buf = None; while let Some(&c) = self.input.peek() { match c { '{' => { assert!(buf.is_empty()); self.bump(); result.push(UseSegment::List(self.parse_list())); self.eat('}'); } '*' => { assert!(buf.is_empty()); self.bump(); result.push(UseSegment::Glob); } ':' => { self.bump(); self.eat(':'); Self::push_segment(&mut result, &mut buf, &mut alias_buf); } '}' | ',' => { Self::push_segment(&mut result, &mut buf, &mut alias_buf); return UseTree { path: result, span: DUMMY_SP, list_item: None, visibility: None, attrs: None, }; } ' ' => { self.bump(); self.eat('a'); self.eat('s'); self.eat(' '); alias_buf = Some(String::new()); } c => { self.bump(); if let Some(ref mut buf) = alias_buf { buf.push(c); } else { buf.push(c); } } } } Self::push_segment(&mut result, &mut buf, &mut alias_buf); UseTree { path: result, span: DUMMY_SP, list_item: None, visibility: None, attrs: None, } } fn parse_list(&mut self) -> Vec<UseTree> { let mut result = vec![]; loop { match self.input.peek().unwrap() { ',' | ' ' => self.bump(), '}' => { return result; } _ => result.push(self.parse_in_list()), } } } } let mut parser = Parser { input: s.chars().peekable(), }; parser.parse_in_list() } macro parse_use_trees($($s:expr),* $(,)*) { vec![ $(parse_use_tree($s),)* ] } #[test] fn test_use_tree_merge() { macro test_merge([$($input:expr),* $(,)*], [$($output:expr),* $(,)*]) { assert_eq!( merge_use_trees(parse_use_trees!($($input,)*)), parse_use_trees!($($output,)*), ); } test_merge!(["a::b::{c, d}", "a::b::{e, f}"], ["a::b::{c, d, e, f}"]); test_merge!(["a::b::c", "a::b"], ["a::b::{self, c}"]); test_merge!(["a::b", "a::b"], ["a::b"]); test_merge!(["a", "a::b", "a::b::c"], ["a::{self, b::{self, c}}"]); test_merge!( ["a::{b::{self, c}, d::e}", "a::d::f"], ["a::{b::{self, c}, d::{e, f}}"] ); test_merge!( ["a::d::f", "a::{b::{self, c}, d::e}"], ["a::{b::{self, c}, d::{e, f}}"] ); test_merge!( ["a::{c, d, b}", "a::{d, e, b, a, f}", "a::{f, g, c}"], ["a::{a, b, c, d, e, f, g}"] ); } #[test] fn test_use_tree_flatten() { assert_eq!( parse_use_tree("a::b::{c, d, e, f}").flatten(), parse_use_trees!("a::b::c", "a::b::d", "a::b::e", "a::b::f",) ); assert_eq!( parse_use_tree("a::b::{c::{d, e, f}, g, h::{i, j, k}}").flatten(), parse_use_trees![ "a::b::c::d", "a::b::c::e", "a::b::c::f", "a::b::g", "a::b::h::i", "a::b::h::j", "a::b::h::k", ] ); } #[test] fn test_use_tree_normalize() { assert_eq!( parse_use_tree("a::self").normalize(true), parse_use_tree("a") ); assert_eq!( parse_use_tree("a::self as foo").normalize(true), parse_use_tree("a as foo") ); assert_eq!( parse_use_tree("a::{self}").normalize(true), parse_use_tree("a") ); assert_eq!( parse_use_tree("a::{b}").normalize(true), parse_use_tree("a::b") ); assert_eq!( parse_use_tree("a::{b, c::self}").normalize(true), parse_use_tree("a::{b, c}") ); assert_eq!( parse_use_tree("a::{b as bar, c::self}").normalize(true), parse_use_tree("a::{b as bar, c}") ); } #[test] fn test_use_tree_ord() { assert!(parse_use_tree("a").normalize(true) < parse_use_tree("aa").normalize(true)); assert!(parse_use_tree("a").normalize(true) < parse_use_tree("a::a").normalize(true)); assert!(parse_use_tree("a").normalize(true) < parse_use_tree("*").normalize(true)); assert!(parse_use_tree("a").normalize(true) < parse_use_tree("{a, b}").normalize(true)); assert!(parse_use_tree("*").normalize(true) < parse_use_tree("{a, b}").normalize(true)); assert!( parse_use_tree("aaaaaaaaaaaaaaa::{bb, cc, dddddddd}").normalize(true) < parse_use_tree("aaaaaaaaaaaaaaa::{bb, cc, ddddddddd}").normalize(true) ); assert!( parse_use_tree("serde::de::{Deserialize}").normalize(true) < parse_use_tree("serde_json").normalize(true) ); assert!( parse_use_tree("a::b::c").normalize(true) < parse_use_tree("a::b::*").normalize(true) ); assert!( parse_use_tree("foo::{Bar, Baz}").normalize(true) < parse_use_tree("{Bar, Baz}").normalize(true) ); assert!( parse_use_tree("foo::{self as bar}").normalize(true) < parse_use_tree("foo::{qux as bar}").normalize(true) ); assert!( parse_use_tree("foo::{qux as bar}").normalize(true) < parse_use_tree("foo::{baz, qux as bar}").normalize(true) ); assert!( parse_use_tree("foo::{self as bar, baz}").normalize(true) < parse_use_tree("foo::{baz, qux as bar}").normalize(true) ); assert!(parse_use_tree("foo").normalize(true) < parse_use_tree("Foo").normalize(true)); assert!(parse_use_tree("foo").normalize(true) < parse_use_tree("foo::Bar").normalize(true)); assert!( parse_use_tree("std::cmp::{d, c, b, a}").normalize(true) < parse_use_tree("std::cmp::{b, e, g, f}").normalize(true) ); } } Resolve review comments // Copyright 2015 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. use std::cmp::Ordering; use config::lists::*; use syntax::ast::{self, UseTreeKind}; use syntax::codemap::{self, BytePos, Span, DUMMY_SP}; use codemap::SpanUtils; use config::IndentStyle; use lists::{definitive_tactic, itemize_list, write_list, ListFormatting, ListItem, Separator}; use rewrite::{Rewrite, RewriteContext}; use shape::Shape; use spanned::Spanned; use utils::mk_sp; use visitor::FmtVisitor; use std::borrow::Cow; use std::fmt; /// Returns a name imported by a `use` declaration. e.g. returns `Ordering` /// for `std::cmp::Ordering` and `self` for `std::cmp::self`. pub fn path_to_imported_ident(path: &ast::Path) -> ast::Ident { path.segments.last().unwrap().ident } impl<'a> FmtVisitor<'a> { pub fn format_import(&mut self, item: &ast::Item, tree: &ast::UseTree) { let span = item.span; let shape = self.shape(); let rw = UseTree::from_ast( &self.get_context(), tree, None, Some(item.vis.clone()), Some(item.span.lo()), Some(item.attrs.clone()), ).rewrite_top_level(&self.get_context(), shape); match rw { Some(ref s) if s.is_empty() => { // Format up to last newline let prev_span = mk_sp(self.last_pos, source!(self, span).lo()); let trimmed_snippet = self.snippet(prev_span).trim_right(); let span_end = self.last_pos + BytePos(trimmed_snippet.len() as u32); self.format_missing(span_end); // We have an excessive newline from the removed import. if self.buffer.ends_with('\n') { self.buffer.pop(); self.line_number -= 1; } self.last_pos = source!(self, span).hi(); } Some(ref s) => { self.format_missing_with_indent(source!(self, span).lo()); self.push_str(s); self.last_pos = source!(self, span).hi(); } None => { self.format_missing_with_indent(source!(self, span).lo()); self.format_missing(source!(self, span).hi()); } } } } // Ordering of imports // We order imports by translating to our own representation and then sorting. // The Rust AST data structures are really bad for this. Rustfmt applies a bunch // of normalisations to imports and since we want to sort based on the result // of these (and to maintain idempotence) we must apply the same normalisations // to the data structures for sorting. // // We sort `self` and `super` before other imports, then identifier imports, // then glob imports, then lists of imports. We do not take aliases into account // when ordering unless the imports are identical except for the alias (rare in // practice). // FIXME(#2531) - we should unify the comparison code here with the formatting // code elsewhere since we are essentially string-ifying twice. Furthermore, by // parsing to our own format on comparison, we repeat a lot of work when // sorting. // FIXME we do a lot of allocation to make our own representation. #[derive(Clone, Eq, PartialEq)] pub enum UseSegment { Ident(String, Option<String>), Slf(Option<String>), Super(Option<String>), Glob, List(Vec<UseTree>), } #[derive(Clone)] pub struct UseTree { pub path: Vec<UseSegment>, pub span: Span, // Comment information within nested use tree. pub list_item: Option<ListItem>, // Additional fields for top level use items. // Should we have another struct for top-level use items rather than reusing this? visibility: Option<ast::Visibility>, attrs: Option<Vec<ast::Attribute>>, } impl PartialEq for UseTree { fn eq(&self, other: &UseTree) -> bool { self.path == other.path } } impl Eq for UseTree {} impl UseSegment { // Clone a version of self with any top-level alias removed. fn remove_alias(&self) -> UseSegment { match *self { UseSegment::Ident(ref s, _) => UseSegment::Ident(s.clone(), None), UseSegment::Slf(_) => UseSegment::Slf(None), UseSegment::Super(_) => UseSegment::Super(None), _ => self.clone(), } } fn from_path_segment(path_seg: &ast::PathSegment) -> Option<UseSegment> { let name = path_seg.ident.name.as_str(); if name == "{{root}}" { return None; } Some(if name == "self" { UseSegment::Slf(None) } else if name == "super" { UseSegment::Super(None) } else { UseSegment::Ident((*name).to_owned(), None) }) } } pub fn merge_use_trees(use_trees: Vec<UseTree>) -> Vec<UseTree> { let mut result = Vec::with_capacity(use_trees.len()); for use_tree in use_trees { if use_tree.has_comment() || use_tree.attrs.is_some() { result.push(use_tree); continue; } for flattened in use_tree.flatten() { merge_use_trees_inner(&mut result, flattened); } } result } fn merge_use_trees_inner(trees: &mut Vec<UseTree>, use_tree: UseTree) { for tree in trees.iter_mut() { if tree.share_prefix(&use_tree) { tree.merge(use_tree); return; } } trees.push(use_tree); } impl fmt::Debug for UseTree { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fmt::Display::fmt(self, f) } } impl fmt::Debug for UseSegment { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fmt::Display::fmt(self, f) } } impl fmt::Display for UseSegment { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { UseSegment::Glob => write!(f, "*"), UseSegment::Ident(ref s, _) => write!(f, "{}", s), UseSegment::Slf(..) => write!(f, "self"), UseSegment::Super(..) => write!(f, "super"), UseSegment::List(ref list) => { write!(f, "{{")?; for (i, item) in list.iter().enumerate() { let is_last = i == list.len() - 1; write!(f, "{}", item)?; if !is_last { write!(f, ", ")?; } } write!(f, "}}") } } } } impl fmt::Display for UseTree { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { for (i, segment) in self.path.iter().enumerate() { let is_last = i == self.path.len() - 1; write!(f, "{}", segment)?; if !is_last { write!(f, "::")?; } } write!(f, "") } } impl UseTree { // Rewrite use tree with `use ` and a trailing `;`. pub fn rewrite_top_level(&self, context: &RewriteContext, shape: Shape) -> Option<String> { let mut result = String::with_capacity(256); if let Some(ref attrs) = self.attrs { result.push_str(&attrs.rewrite(context, shape)?); if !result.is_empty() { result.push_str(&shape.indent.to_string_with_newline(context.config)); } } let vis = self.visibility .as_ref() .map_or(Cow::from(""), |vis| ::utils::format_visibility(&vis)); result.push_str(&self.rewrite(context, shape.offset_left(vis.len())?) .map(|s| { if s.is_empty() { s.to_owned() } else { format!("{}use {};", vis, s) } })?); Some(result) } // FIXME: Use correct span? // The given span is essentially incorrect, since we are reconstructing // use statements. This should not be a problem, though, since we have // already tried to extract comment and observed that there are no comment // around the given use item, and the span will not be used afterward. fn from_path(path: Vec<UseSegment>, span: Span) -> UseTree { UseTree { path, span, list_item: None, visibility: None, attrs: None, } } pub fn from_ast_with_normalization( context: &RewriteContext, item: &ast::Item, ) -> Option<UseTree> { match item.node { ast::ItemKind::Use(ref use_tree) => Some( UseTree::from_ast( context, use_tree, None, Some(item.vis.clone()), Some(item.span().lo()), if item.attrs.is_empty() { None } else { Some(item.attrs.clone()) }, ).normalize(context.config.reorder_imported_names()), ), _ => None, } } fn from_ast( context: &RewriteContext, a: &ast::UseTree, list_item: Option<ListItem>, visibility: Option<ast::Visibility>, opt_lo: Option<BytePos>, attrs: Option<Vec<ast::Attribute>>, ) -> UseTree { let span = if let Some(lo) = opt_lo { mk_sp(lo, a.span.hi()) } else { a.span }; let mut result = UseTree { path: vec![], span, list_item, visibility, attrs, }; for p in &a.prefix.segments { if let Some(use_segment) = UseSegment::from_path_segment(p) { result.path.push(use_segment); } } match a.kind { UseTreeKind::Glob => { result.path.push(UseSegment::Glob); } UseTreeKind::Nested(ref list) => { // Extract comments between nested use items. // This needs to be done before sorting use items. let items: Vec<_> = itemize_list( context.snippet_provider, list.iter().map(|(tree, _)| tree), "}", ",", |tree| tree.span.lo(), |tree| tree.span.hi(), |_| Some("".to_owned()), // We only need comments for now. context.snippet_provider.span_after(a.span, "{"), a.span.hi(), false, ).collect(); result.path.push(UseSegment::List( list.iter() .zip(items.into_iter()) .map(|(t, list_item)| { Self::from_ast(context, &t.0, Some(list_item), None, None, None) }) .collect(), )); } UseTreeKind::Simple(ref rename) => { let mut name = (*path_to_imported_ident(&a.prefix).name.as_str()).to_owned(); let alias = rename.and_then(|ident| { if ident == path_to_imported_ident(&a.prefix) { None } else { Some(ident.to_string()) } }); let segment = if &name == "self" { UseSegment::Slf(alias) } else if &name == "super" { UseSegment::Super(alias) } else { UseSegment::Ident(name, alias) }; // `name` is already in result. result.path.pop(); result.path.push(segment); } } result } // Do the adjustments that rustfmt does elsewhere to use paths. pub fn normalize(mut self, do_sort: bool) -> UseTree { let mut last = self.path.pop().expect("Empty use tree?"); // Hack around borrow checker. let mut normalize_sole_list = false; let mut aliased_self = false; // Remove foo::{} or self without attributes. match last { _ if self.attrs.is_some() => (), UseSegment::List(ref list) if list.is_empty() => { self.path = vec![]; return self; } UseSegment::Slf(None) if self.path.is_empty() && self.visibility.is_some() => { self.path = vec![]; return self; } _ => (), } // Normalise foo::self -> foo. if let UseSegment::Slf(None) = last { if self.path.len() > 0 { return self; } } // Normalise foo::self as bar -> foo as bar. if let UseSegment::Slf(_) = last { match self.path.last() { None => {} Some(UseSegment::Ident(_, None)) => { aliased_self = true; } _ => unreachable!(), } } let mut done = false; if aliased_self { match self.path.last_mut() { Some(UseSegment::Ident(_, ref mut old_rename)) => { assert!(old_rename.is_none()); if let UseSegment::Slf(Some(rename)) = last.clone() { *old_rename = Some(rename); done = true; } } _ => unreachable!(), } } if done { return self; } // Normalise foo::{bar} -> foo::bar if let UseSegment::List(ref list) = last { if list.len() == 1 { normalize_sole_list = true; } } if normalize_sole_list { match last { UseSegment::List(list) => { for seg in &list[0].path { self.path.push(seg.clone()); } return self.normalize(do_sort); } _ => unreachable!(), } } // Recursively normalize elements of a list use (including sorting the list). if let UseSegment::List(list) = last { let mut list = list.into_iter() .map(|ut| ut.normalize(do_sort)) .collect::<Vec<_>>(); if do_sort { list.sort(); } last = UseSegment::List(list); } self.path.push(last); self } fn has_comment(&self) -> bool { self.list_item.as_ref().map_or(false, ListItem::has_comment) } fn same_visibility(&self, other: &UseTree) -> bool { match (&self.visibility, &other.visibility) { ( Some(codemap::Spanned { node: ast::VisibilityKind::Inherited, .. }), None, ) | ( None, Some(codemap::Spanned { node: ast::VisibilityKind::Inherited, .. }), ) | (None, None) => true, ( Some(codemap::Spanned { node: lnode, .. }), Some(codemap::Spanned { node: rnode, .. }), ) => lnode == rnode, _ => false, } } fn share_prefix(&self, other: &UseTree) -> bool { if self.path.is_empty() || other.path.is_empty() || self.attrs.is_some() || !self.same_visibility(other) { false } else { self.path[0] == other.path[0] } } fn flatten(self) -> Vec<UseTree> { if self.path.is_empty() { return vec![self]; } match self.path.clone().last().unwrap() { UseSegment::List(list) => { let prefix = &self.path[..self.path.len() - 1]; let mut result = vec![]; for nested_use_tree in list.into_iter() { for mut flattend in nested_use_tree.clone().flatten().iter_mut() { let mut new_path = prefix.to_vec(); new_path.append(&mut flattend.path); result.push(UseTree { path: new_path, span: self.span, list_item: None, visibility: self.visibility.clone(), attrs: None, }); } } result } _ => vec![self], } } fn merge(&mut self, other: UseTree) { let mut new_path = vec![]; for (mut a, b) in self.path .clone() .iter_mut() .zip(other.path.clone().into_iter()) { if *a == b { new_path.push(b); } else { break; } } if let Some(merged) = merge_rest(&self.path, &other.path, new_path.len()) { new_path.push(merged); self.span = self.span.to(other.span); } self.path = new_path; } } fn merge_rest(a: &[UseSegment], b: &[UseSegment], len: usize) -> Option<UseSegment> { let a_rest = &a[len..]; let b_rest = &b[len..]; if a_rest.is_empty() && b_rest.is_empty() { return None; } if a_rest.is_empty() { return Some(UseSegment::List(vec![ UseTree::from_path(vec![UseSegment::Slf(None)], DUMMY_SP), UseTree::from_path(b_rest.to_vec(), DUMMY_SP), ])); } if b_rest.is_empty() { return Some(UseSegment::List(vec![ UseTree::from_path(vec![UseSegment::Slf(None)], DUMMY_SP), UseTree::from_path(a_rest.to_vec(), DUMMY_SP), ])); } if let UseSegment::List(mut list) = a_rest[0].clone() { merge_use_trees_inner(&mut list, UseTree::from_path(b_rest.to_vec(), DUMMY_SP)); list.sort(); return Some(UseSegment::List(list.clone())); } let mut list = vec![ UseTree::from_path(a_rest.to_vec(), DUMMY_SP), UseTree::from_path(b_rest.to_vec(), DUMMY_SP), ]; list.sort(); Some(UseSegment::List(list)) } impl PartialOrd for UseSegment { fn partial_cmp(&self, other: &UseSegment) -> Option<Ordering> { Some(self.cmp(other)) } } impl PartialOrd for UseTree { fn partial_cmp(&self, other: &UseTree) -> Option<Ordering> { Some(self.cmp(other)) } } impl Ord for UseSegment { fn cmp(&self, other: &UseSegment) -> Ordering { use self::UseSegment::*; fn is_upper_snake_case(s: &str) -> bool { s.chars().all(|c| c.is_uppercase() || c == '_') } match (self, other) { (&Slf(ref a), &Slf(ref b)) | (&Super(ref a), &Super(ref b)) => a.cmp(b), (&Glob, &Glob) => Ordering::Equal, (&Ident(ref ia, ref aa), &Ident(ref ib, ref ab)) => { // snake_case < CamelCase < UPPER_SNAKE_CASE if ia.starts_with(char::is_uppercase) && ib.starts_with(char::is_lowercase) { return Ordering::Greater; } if ia.starts_with(char::is_lowercase) && ib.starts_with(char::is_uppercase) { return Ordering::Less; } if is_upper_snake_case(ia) && !is_upper_snake_case(ib) { return Ordering::Greater; } if !is_upper_snake_case(ia) && is_upper_snake_case(ib) { return Ordering::Less; } let ident_ord = ia.cmp(ib); if ident_ord != Ordering::Equal { return ident_ord; } if aa.is_none() && ab.is_some() { return Ordering::Less; } if aa.is_some() && ab.is_none() { return Ordering::Greater; } aa.cmp(ab) } (&List(ref a), &List(ref b)) => { for (a, b) in a.iter().zip(b.iter()) { let ord = a.cmp(b); if ord != Ordering::Equal { return ord; } } a.len().cmp(&b.len()) } (&Slf(_), _) => Ordering::Less, (_, &Slf(_)) => Ordering::Greater, (&Super(_), _) => Ordering::Less, (_, &Super(_)) => Ordering::Greater, (&Ident(..), _) => Ordering::Less, (_, &Ident(..)) => Ordering::Greater, (&Glob, _) => Ordering::Less, (_, &Glob) => Ordering::Greater, } } } impl Ord for UseTree { fn cmp(&self, other: &UseTree) -> Ordering { for (a, b) in self.path.iter().zip(other.path.iter()) { let ord = a.cmp(b); // The comparison without aliases is a hack to avoid situations like // comparing `a::b` to `a as c` - where the latter should be ordered // first since it is shorter. if ord != Ordering::Equal && a.remove_alias().cmp(&b.remove_alias()) != Ordering::Equal { return ord; } } self.path.len().cmp(&other.path.len()) } } fn rewrite_nested_use_tree( context: &RewriteContext, use_tree_list: &[UseTree], shape: Shape, ) -> Option<String> { let mut list_items = Vec::with_capacity(use_tree_list.len()); let nested_shape = match context.config.imports_indent() { IndentStyle::Block => shape .block_indent(context.config.tab_spaces()) .with_max_width(context.config) .sub_width(1)?, IndentStyle::Visual => shape.visual_indent(0), }; for use_tree in use_tree_list { if let Some(mut list_item) = use_tree.list_item.clone() { list_item.item = use_tree.rewrite(context, nested_shape); list_items.push(list_item); } else { list_items.push(ListItem::from_str(use_tree.rewrite(context, nested_shape)?)); } } let (tactic, remaining_width) = if use_tree_list.iter().any(|use_segment| { use_segment .path .last() .map_or(false, |last_segment| match last_segment { UseSegment::List(..) => true, _ => false, }) }) { (DefinitiveListTactic::Vertical, 0) } else { let remaining_width = shape.width.checked_sub(2).unwrap_or(0); let tactic = definitive_tactic( &list_items, context.config.imports_layout(), Separator::Comma, remaining_width, ); (tactic, remaining_width) }; let ends_with_newline = context.config.imports_indent() == IndentStyle::Block && tactic != DefinitiveListTactic::Horizontal; let fmt = ListFormatting { tactic, separator: ",", trailing_separator: if ends_with_newline { context.config.trailing_comma() } else { SeparatorTactic::Never }, separator_place: SeparatorPlace::Back, shape: nested_shape, ends_with_newline, preserve_newline: true, config: context.config, }; let list_str = write_list(&list_items, &fmt)?; let result = if (list_str.contains('\n') || list_str.len() > remaining_width) && context.config.imports_indent() == IndentStyle::Block { format!( "{{\n{}{}\n{}}}", nested_shape.indent.to_string(context.config), list_str, shape.indent.to_string(context.config) ) } else { format!("{{{}}}", list_str) }; Some(result) } impl Rewrite for UseSegment { fn rewrite(&self, context: &RewriteContext, shape: Shape) -> Option<String> { Some(match *self { UseSegment::Ident(ref ident, Some(ref rename)) => format!("{} as {}", ident, rename), UseSegment::Ident(ref ident, None) => ident.clone(), UseSegment::Slf(Some(ref rename)) => format!("self as {}", rename), UseSegment::Slf(None) => "self".to_owned(), UseSegment::Super(Some(ref rename)) => format!("super as {}", rename), UseSegment::Super(None) => "super".to_owned(), UseSegment::Glob => "*".to_owned(), UseSegment::List(ref use_tree_list) => rewrite_nested_use_tree( context, use_tree_list, // 1 = "{" and "}" shape.offset_left(1)?.sub_width(1)?, )?, }) } } impl Rewrite for UseTree { // This does NOT format attributes and visibility or add a trailing `;`. fn rewrite(&self, context: &RewriteContext, mut shape: Shape) -> Option<String> { let mut result = String::with_capacity(256); let mut iter = self.path.iter().peekable(); while let Some(ref segment) = iter.next() { let segment_str = segment.rewrite(context, shape)?; result.push_str(&segment_str); if iter.peek().is_some() { result.push_str("::"); // 2 = "::" shape = shape.offset_left(2 + segment_str.len())?; } } Some(result) } } #[cfg(test)] mod test { use super::*; use syntax::codemap::DUMMY_SP; // Parse the path part of an import. This parser is not robust and is only // suitable for use in a test harness. fn parse_use_tree(s: &str) -> UseTree { use std::iter::Peekable; use std::mem::swap; use std::str::Chars; struct Parser<'a> { input: Peekable<Chars<'a>>, } impl<'a> Parser<'a> { fn bump(&mut self) { self.input.next().unwrap(); } fn eat(&mut self, c: char) { assert!(self.input.next().unwrap() == c); } fn push_segment( result: &mut Vec<UseSegment>, buf: &mut String, alias_buf: &mut Option<String>, ) { if !buf.is_empty() { let mut alias = None; swap(alias_buf, &mut alias); if buf == "self" { result.push(UseSegment::Slf(alias)); *buf = String::new(); *alias_buf = None; } else if buf == "super" { result.push(UseSegment::Super(alias)); *buf = String::new(); *alias_buf = None; } else { let mut name = String::new(); swap(buf, &mut name); result.push(UseSegment::Ident(name, alias)); } } } fn parse_in_list(&mut self) -> UseTree { let mut result = vec![]; let mut buf = String::new(); let mut alias_buf = None; while let Some(&c) = self.input.peek() { match c { '{' => { assert!(buf.is_empty()); self.bump(); result.push(UseSegment::List(self.parse_list())); self.eat('}'); } '*' => { assert!(buf.is_empty()); self.bump(); result.push(UseSegment::Glob); } ':' => { self.bump(); self.eat(':'); Self::push_segment(&mut result, &mut buf, &mut alias_buf); } '}' | ',' => { Self::push_segment(&mut result, &mut buf, &mut alias_buf); return UseTree { path: result, span: DUMMY_SP, list_item: None, visibility: None, attrs: None, }; } ' ' => { self.bump(); self.eat('a'); self.eat('s'); self.eat(' '); alias_buf = Some(String::new()); } c => { self.bump(); if let Some(ref mut buf) = alias_buf { buf.push(c); } else { buf.push(c); } } } } Self::push_segment(&mut result, &mut buf, &mut alias_buf); UseTree { path: result, span: DUMMY_SP, list_item: None, visibility: None, attrs: None, } } fn parse_list(&mut self) -> Vec<UseTree> { let mut result = vec![]; loop { match self.input.peek().unwrap() { ',' | ' ' => self.bump(), '}' => { return result; } _ => result.push(self.parse_in_list()), } } } } let mut parser = Parser { input: s.chars().peekable(), }; parser.parse_in_list() } macro parse_use_trees($($s:expr),* $(,)*) { vec![ $(parse_use_tree($s),)* ] } #[test] fn test_use_tree_merge() { macro test_merge([$($input:expr),* $(,)*], [$($output:expr),* $(,)*]) { assert_eq!( merge_use_trees(parse_use_trees!($($input,)*)), parse_use_trees!($($output,)*), ); } test_merge!(["a::b::{c, d}", "a::b::{e, f}"], ["a::b::{c, d, e, f}"]); test_merge!(["a::b::c", "a::b"], ["a::b::{self, c}"]); test_merge!(["a::b", "a::b"], ["a::b"]); test_merge!(["a", "a::b", "a::b::c"], ["a::{self, b::{self, c}}"]); test_merge!( ["a::{b::{self, c}, d::e}", "a::d::f"], ["a::{b::{self, c}, d::{e, f}}"] ); test_merge!( ["a::d::f", "a::{b::{self, c}, d::e}"], ["a::{b::{self, c}, d::{e, f}}"] ); test_merge!( ["a::{c, d, b}", "a::{d, e, b, a, f}", "a::{f, g, c}"], ["a::{a, b, c, d, e, f, g}"] ); } #[test] fn test_use_tree_flatten() { assert_eq!( parse_use_tree("a::b::{c, d, e, f}").flatten(), parse_use_trees!("a::b::c", "a::b::d", "a::b::e", "a::b::f",) ); assert_eq!( parse_use_tree("a::b::{c::{d, e, f}, g, h::{i, j, k}}").flatten(), parse_use_trees![ "a::b::c::d", "a::b::c::e", "a::b::c::f", "a::b::g", "a::b::h::i", "a::b::h::j", "a::b::h::k", ] ); } #[test] fn test_use_tree_normalize() { assert_eq!( parse_use_tree("a::self").normalize(true), parse_use_tree("a") ); assert_eq!( parse_use_tree("a::self as foo").normalize(true), parse_use_tree("a as foo") ); assert_eq!( parse_use_tree("a::{self}").normalize(true), parse_use_tree("a") ); assert_eq!( parse_use_tree("a::{b}").normalize(true), parse_use_tree("a::b") ); assert_eq!( parse_use_tree("a::{b, c::self}").normalize(true), parse_use_tree("a::{b, c}") ); assert_eq!( parse_use_tree("a::{b as bar, c::self}").normalize(true), parse_use_tree("a::{b as bar, c}") ); } #[test] fn test_use_tree_ord() { assert!(parse_use_tree("a").normalize(true) < parse_use_tree("aa").normalize(true)); assert!(parse_use_tree("a").normalize(true) < parse_use_tree("a::a").normalize(true)); assert!(parse_use_tree("a").normalize(true) < parse_use_tree("*").normalize(true)); assert!(parse_use_tree("a").normalize(true) < parse_use_tree("{a, b}").normalize(true)); assert!(parse_use_tree("*").normalize(true) < parse_use_tree("{a, b}").normalize(true)); assert!( parse_use_tree("aaaaaaaaaaaaaaa::{bb, cc, dddddddd}").normalize(true) < parse_use_tree("aaaaaaaaaaaaaaa::{bb, cc, ddddddddd}").normalize(true) ); assert!( parse_use_tree("serde::de::{Deserialize}").normalize(true) < parse_use_tree("serde_json").normalize(true) ); assert!( parse_use_tree("a::b::c").normalize(true) < parse_use_tree("a::b::*").normalize(true) ); assert!( parse_use_tree("foo::{Bar, Baz}").normalize(true) < parse_use_tree("{Bar, Baz}").normalize(true) ); assert!( parse_use_tree("foo::{self as bar}").normalize(true) < parse_use_tree("foo::{qux as bar}").normalize(true) ); assert!( parse_use_tree("foo::{qux as bar}").normalize(true) < parse_use_tree("foo::{baz, qux as bar}").normalize(true) ); assert!( parse_use_tree("foo::{self as bar, baz}").normalize(true) < parse_use_tree("foo::{baz, qux as bar}").normalize(true) ); assert!(parse_use_tree("foo").normalize(true) < parse_use_tree("Foo").normalize(true)); assert!(parse_use_tree("foo").normalize(true) < parse_use_tree("foo::Bar").normalize(true)); assert!( parse_use_tree("std::cmp::{d, c, b, a}").normalize(true) < parse_use_tree("std::cmp::{b, e, g, f}").normalize(true) ); } }
use edit::buffer::{SplitBuffer, TextBuffer}; use state::editor::{Buffer, Editor}; use std::fs::File; use std::io::{Read, Write}; /// The status of a file IO operation. pub enum FileStatus { /// Oll fino. Ok, /// File not found. NotFound, /// Other error. Other, } impl Editor { /// Open a file. pub fn open(&mut self, path: &str) -> FileStatus { if let Some(mut file) = File::open(path).ok() { let mut con = String::new(); let _ = file.read_to_string(&mut con); let mut new_buffer: Buffer = SplitBuffer::from_str(&con).into(); new_buffer.title = Some(path.into()); let new_buffer_index = self.buffers.new_buffer(new_buffer); self.buffers.switch_to(new_buffer_index); self.hint(); FileStatus::Ok } else { FileStatus::NotFound } } /// Write the file. pub fn write(&mut self, path: &str) -> FileStatus { self.buffers.current_buffer_info_mut().title = Some(path.into()); if let Some(mut file) = File::create(path).ok() { if file.write(self.buffers.current_buffer().to_string().as_bytes()) .is_ok() { FileStatus::Ok } else { FileStatus::Other } } else { FileStatus::NotFound } } } When opening an empty file, always have at least one line of text use edit::buffer::{SplitBuffer, TextBuffer}; use state::editor::{Buffer, Editor}; use std::fs::File; use std::io::{Read, Write}; /// The status of a file IO operation. pub enum FileStatus { /// Oll fino. Ok, /// File not found. NotFound, /// Other error. Other, } impl Editor { /// Open a file. pub fn open(&mut self, path: &str) -> FileStatus { if let Some(mut file) = File::open(path).ok() { let mut con = String::new(); let _ = file.read_to_string(&mut con); if con.is_empty() { con.push('\n'); } let mut new_buffer: Buffer = SplitBuffer::from_str(&con).into(); new_buffer.title = Some(path.into()); let new_buffer_index = self.buffers.new_buffer(new_buffer); self.buffers.switch_to(new_buffer_index); self.hint(); FileStatus::Ok } else { FileStatus::NotFound } } /// Write the file. pub fn write(&mut self, path: &str) -> FileStatus { self.buffers.current_buffer_info_mut().title = Some(path.into()); if let Some(mut file) = File::create(path).ok() { if file.write(self.buffers.current_buffer().to_string().as_bytes()) .is_ok() { FileStatus::Ok } else { FileStatus::Other } } else { FileStatus::NotFound } } }
use crate::{ buffer::Buffer, connection::Connection, connection_status::ConnectionState, Error, Result, }; use amq_protocol::frame::{gen_frame, parse_frame, AMQPFrame, GenError, Offset}; use log::{error, trace}; use mio::{event::Source, Events, Interest, Poll, Token, Waker}; use std::{ io::{Read, Write}, sync::{ atomic::{AtomicBool, Ordering}, Arc, }, thread::{self, Builder as ThreadBuilder, JoinHandle}, time::{Duration, Instant}, }; pub(crate) const SOCKET: Token = Token(1); const WAKER: Token = Token(2); const FRAMES_STORAGE: usize = 32; #[derive(Debug, PartialEq)] enum Status { Initial, Setup, Stop, } pub struct IoLoop<T> { connection: Connection, socket: T, status: Status, poll: Poll, waker: Arc<Waker>, hb_handle: Option<JoinHandle<()>>, frame_size: usize, receive_buffer: Buffer, send_buffer: Buffer, can_write: bool, can_read: bool, send_heartbeat: Arc<AtomicBool>, poll_timeout: Option<Duration>, } impl<T: Source + Read + Write + Send + 'static> IoLoop<T> { pub(crate) fn new( connection: Connection, socket: T, poll: Option<(Poll, Token)>, ) -> Result<Self> { let (poll, registered) = poll .map(|t| Ok((t.0, true))) .unwrap_or_else(|| Poll::new().map(|poll| (poll, false)))?; let frame_size = std::cmp::max(8192, connection.configuration().frame_max() as usize); let waker = Arc::new(Waker::new(poll.registry(), WAKER)?); let mut inner = Self { connection, socket, status: Status::Initial, poll, waker, hb_handle: None, frame_size, receive_buffer: Buffer::with_capacity(FRAMES_STORAGE * frame_size), send_buffer: Buffer::with_capacity(FRAMES_STORAGE * frame_size), can_write: false, can_read: false, send_heartbeat: Arc::new(AtomicBool::new(false)), poll_timeout: None, }; if registered { inner.poll.registry().reregister( &mut inner.socket, SOCKET, Interest::READABLE | Interest::WRITABLE, )?; } else { inner.poll.registry().register( &mut inner.socket, SOCKET, Interest::READABLE | Interest::WRITABLE, )?; } Ok(inner) } fn start_heartbeat(&mut self, interval: Duration) -> Result<()> { let connection = self.connection.clone(); let send_hartbeat = self.send_heartbeat.clone(); let hb_handle = ThreadBuilder::new() .name("heartbeat".to_owned()) .spawn(move || { while connection.status().connected() { let start = Instant::now(); let mut remaining = interval; loop { thread::park_timeout(remaining); let elapsed = start.elapsed(); if elapsed >= remaining { break; } remaining -= interval - elapsed; } send_hartbeat.store(true, Ordering::Relaxed); } })?; self.hb_handle = Some(hb_handle); Ok(()) } fn heartbeat(&mut self) -> Result<()> { if self.send_heartbeat.load(Ordering::Relaxed) { trace!("send heartbeat"); self.connection.send_heartbeat()?; self.send_heartbeat.store(false, Ordering::Relaxed); } Ok(()) } fn ensure_setup(&mut self) -> Result<()> { if self.status != Status::Setup && self.connection.status().connected() { let frame_max = self.connection.configuration().frame_max() as usize; self.frame_size = std::cmp::max(self.frame_size, frame_max); self.receive_buffer.grow(FRAMES_STORAGE * self.frame_size); self.send_buffer.grow(FRAMES_STORAGE * self.frame_size); let heartbeat = self.connection.configuration().heartbeat(); if heartbeat != 0 { trace!("io_loop: start heartbeat"); let heartbeat = Duration::from_secs(u64::from(heartbeat)); self.start_heartbeat(heartbeat)?; self.poll_timeout = Some(heartbeat); trace!("io_loop: heartbeat started"); } self.status = Status::Setup; } Ok(()) } fn has_data(&self) -> bool { self.connection.has_pending_frames() || self.send_buffer.available_data() > 0 } fn can_write(&self) -> bool { self.can_write && self.has_data() && !self.connection.status().blocked() } fn can_read(&self) -> bool { self.can_read } fn can_parse(&self) -> bool { self.receive_buffer.available_data() > 0 } fn should_continue(&self) -> bool { let connection_status = self.connection.status(); (self.status == Status::Initial || connection_status.connected() || connection_status.closing()) && self.status != Status::Stop && !connection_status.errored() } pub fn start(mut self) -> Result<()> { let waker = self.waker.clone(); self.connection.clone().set_io_loop( ThreadBuilder::new() .name("io_loop".to_owned()) .spawn(move || { let mut events = Events::with_capacity(1024); while self.should_continue() { self.run(&mut events)?; } if let Some(hb_handle) = self.hb_handle.take() { hb_handle.thread().unpark(); hb_handle.join().expect("heartbeat loop failed"); } Ok(()) })?, waker, ) } fn poll(&mut self, events: &mut Events) -> Result<()> { trace!("io_loop poll"); self.poll.poll(events, self.poll_timeout)?; trace!("io_loop poll done"); for event in events.iter() { if event.token() == SOCKET { if event.is_readable() { self.can_read = true; } if event.is_writable() { self.can_write = true; } } } Ok(()) } fn run(&mut self, events: &mut Events) -> Result<()> { trace!("io_loop run"); self.ensure_setup()?; self.poll(events)?; self.do_run() } fn do_run(&mut self) -> Result<()> { trace!( "io_loop do_run; can_read={}, can_write={}, has_data={}", self.can_read, self.can_write, self.has_data() ); loop { self.heartbeat()?; self.write()?; if self.connection.status().closed() { self.status = Status::Stop; } if self.should_continue() { self.read()?; } self.parse()?; self.connection.poll_internal_promises()?; if self.stop_looping() { self.maybe_continue()?; break; } } trace!( "io_loop do_run done; can_read={}, can_write={}, has_data={}, status={:?}", self.can_read, self.can_write, self.has_data(), self.status ); Ok(()) } fn stop_looping(&self) -> bool { !self.can_read() || !self.can_write() || self.status == Status::Stop || self.connection.status().errored() } fn has_pending_operations(&self) -> bool { self.status != Status::Stop && (self.can_read() || self.can_parse() || self.can_write()) } fn maybe_continue(&mut self) -> Result<()> { if self.has_pending_operations() { trace!( "io_loop send continue; can_read={}, can_write={}, has_data={}", self.can_read, self.can_write, self.has_data() ); self.send_continue()?; } Ok(()) } fn critical_error(&mut self, error: Error) -> Result<()> { if let ConnectionState::SentProtocolHeader(resolver, ..) = self.connection.status().state() { resolver.swear(Err(error.clone())); self.status = Status::Stop; } self.connection.set_error(error.clone())?; Err(error) } fn write(&mut self) -> Result<()> { if self.can_write() { if let Err(e) = self.write_to_stream() { if e.wouldblock() { self.can_write = false } else { error!("error writing: {:?}", e); self.critical_error(e)?; } } self.send_buffer.shift_unless_available(self.frame_size); } Ok(()) } fn read(&mut self) -> Result<()> { if self.can_read() { if let Err(e) = self.read_from_stream() { if e.wouldblock() { self.can_read = false } else { error!("error reading: {:?}", e); self.critical_error(e)?; } } self.receive_buffer.shift_unless_available(self.frame_size); } Ok(()) } fn send_continue(&mut self) -> Result<()> { self.waker.wake()?; Ok(()) } fn write_to_stream(&mut self) -> Result<()> { self.serialize()?; let sz = self.socket.write(&self.send_buffer.data())?; trace!("wrote {} bytes", sz); self.send_buffer.consume(sz); if sz > 0 && self.send_buffer.available_data() > 0 { // We didn't write all the data yet self.send_continue()?; } Ok(()) } fn read_from_stream(&mut self) -> Result<()> { match self.connection.status().state() { ConnectionState::Closed => Ok(()), ConnectionState::Error => Err(Error::InvalidConnectionState(ConnectionState::Error)), _ => { self.socket .read(&mut self.receive_buffer.space()) .map(|sz| { trace!("read {} bytes", sz); self.receive_buffer.fill(sz); })?; Ok(()) } } } fn serialize(&mut self) -> Result<()> { if let Some((send_id, next_msg, resolver)) = self.connection.next_frame() { trace!("will write to buffer: {:?}", next_msg); let checkpoint = self.send_buffer.checkpoint(); let res = gen_frame(&next_msg)((&mut self.send_buffer).into()); match res.map(|w| w.into_inner().1) { Ok(_) => { if let Some(resolver) = resolver { resolver.swear(Ok(())); // FIXME: do that only once written all } Ok(()) } Err(e) => { self.send_buffer.rollback(checkpoint); match e { GenError::BufferTooSmall(_) => { // Requeue msg self.connection .requeue_frame((send_id, next_msg, resolver))?; self.send_buffer.shift(); Ok(()) } e => { error!("error generating frame: {:?}", e); let error = Error::SerialisationError(Arc::new(e)); self.connection.set_error(error.clone())?; Err(error) } } } } } else { Ok(()) } } fn parse(&mut self) -> Result<()> { if self.can_parse() { if let Some(frame) = self.do_parse()? { self.connection.handle_frame(frame)?; } } Ok(()) } fn do_parse(&mut self) -> Result<Option<AMQPFrame>> { match parse_frame(self.receive_buffer.data()) { Ok((i, f)) => { let consumed = self.receive_buffer.data().offset(i); self.receive_buffer.consume(consumed); Ok(Some(f)) } Err(e) => { if e.is_incomplete() { self.receive_buffer.shift(); Ok(None) } else { error!("parse error: {:?}", e); let error = Error::ParsingError(e); self.connection.set_error(error.clone())?; Err(error) } } } } } IoLoop: wait for frame to be 100% sent before notifying as such Fixes #255 Signed-off-by: Marc-Antoine Perennou <07f76cf0511c79b361712839686f3cee8c75791c@Perennou.com> use crate::{ buffer::Buffer, connection::Connection, connection_status::ConnectionState, Error, PromiseResolver, Result, }; use amq_protocol::frame::{gen_frame, parse_frame, AMQPFrame, GenError, Offset}; use log::{error, trace}; use mio::{event::Source, Events, Interest, Poll, Token, Waker}; use std::{ collections::VecDeque, io::{Read, Write}, sync::{ atomic::{AtomicBool, Ordering}, Arc, }, thread::{self, Builder as ThreadBuilder, JoinHandle}, time::{Duration, Instant}, }; pub(crate) const SOCKET: Token = Token(1); const WAKER: Token = Token(2); const FRAMES_STORAGE: usize = 32; #[derive(Debug, PartialEq)] enum Status { Initial, Setup, Stop, } pub struct IoLoop<T> { connection: Connection, socket: T, status: Status, poll: Poll, waker: Arc<Waker>, hb_handle: Option<JoinHandle<()>>, frame_size: usize, receive_buffer: Buffer, send_buffer: Buffer, can_write: bool, can_read: bool, send_heartbeat: Arc<AtomicBool>, poll_timeout: Option<Duration>, serialized_frames: VecDeque<(u64, Option<PromiseResolver<()>>)>, } impl<T: Source + Read + Write + Send + 'static> IoLoop<T> { pub(crate) fn new( connection: Connection, socket: T, poll: Option<(Poll, Token)>, ) -> Result<Self> { let (poll, registered) = poll .map(|t| Ok((t.0, true))) .unwrap_or_else(|| Poll::new().map(|poll| (poll, false)))?; let frame_size = std::cmp::max(8192, connection.configuration().frame_max() as usize); let waker = Arc::new(Waker::new(poll.registry(), WAKER)?); let mut inner = Self { connection, socket, status: Status::Initial, poll, waker, hb_handle: None, frame_size, receive_buffer: Buffer::with_capacity(FRAMES_STORAGE * frame_size), send_buffer: Buffer::with_capacity(FRAMES_STORAGE * frame_size), can_write: false, can_read: false, send_heartbeat: Arc::new(AtomicBool::new(false)), poll_timeout: None, serialized_frames: VecDeque::default(), }; if registered { inner.poll.registry().reregister( &mut inner.socket, SOCKET, Interest::READABLE | Interest::WRITABLE, )?; } else { inner.poll.registry().register( &mut inner.socket, SOCKET, Interest::READABLE | Interest::WRITABLE, )?; } Ok(inner) } fn start_heartbeat(&mut self, interval: Duration) -> Result<()> { let connection = self.connection.clone(); let send_hartbeat = self.send_heartbeat.clone(); let hb_handle = ThreadBuilder::new() .name("heartbeat".to_owned()) .spawn(move || { while connection.status().connected() { let start = Instant::now(); let mut remaining = interval; loop { thread::park_timeout(remaining); let elapsed = start.elapsed(); if elapsed >= remaining { break; } remaining -= interval - elapsed; } send_hartbeat.store(true, Ordering::Relaxed); } })?; self.hb_handle = Some(hb_handle); Ok(()) } fn heartbeat(&mut self) -> Result<()> { if self.send_heartbeat.load(Ordering::Relaxed) { trace!("send heartbeat"); self.connection.send_heartbeat()?; self.send_heartbeat.store(false, Ordering::Relaxed); } Ok(()) } fn ensure_setup(&mut self) -> Result<()> { if self.status != Status::Setup && self.connection.status().connected() { let frame_max = self.connection.configuration().frame_max() as usize; self.frame_size = std::cmp::max(self.frame_size, frame_max); self.receive_buffer.grow(FRAMES_STORAGE * self.frame_size); self.send_buffer.grow(FRAMES_STORAGE * self.frame_size); let heartbeat = self.connection.configuration().heartbeat(); if heartbeat != 0 { trace!("io_loop: start heartbeat"); let heartbeat = Duration::from_secs(u64::from(heartbeat)); self.start_heartbeat(heartbeat)?; self.poll_timeout = Some(heartbeat); trace!("io_loop: heartbeat started"); } self.status = Status::Setup; } Ok(()) } fn has_data(&self) -> bool { self.connection.has_pending_frames() || self.send_buffer.available_data() > 0 } fn can_write(&self) -> bool { self.can_write && self.has_data() && !self.connection.status().blocked() } fn can_read(&self) -> bool { self.can_read } fn can_parse(&self) -> bool { self.receive_buffer.available_data() > 0 } fn should_continue(&self) -> bool { let connection_status = self.connection.status(); (self.status == Status::Initial || connection_status.connected() || connection_status.closing()) && self.status != Status::Stop && !connection_status.errored() } pub fn start(mut self) -> Result<()> { let waker = self.waker.clone(); self.connection.clone().set_io_loop( ThreadBuilder::new() .name("io_loop".to_owned()) .spawn(move || { let mut events = Events::with_capacity(1024); while self.should_continue() { self.run(&mut events)?; } if let Some(hb_handle) = self.hb_handle.take() { hb_handle.thread().unpark(); hb_handle.join().expect("heartbeat loop failed"); } Ok(()) })?, waker, ) } fn poll(&mut self, events: &mut Events) -> Result<()> { trace!("io_loop poll"); self.poll.poll(events, self.poll_timeout)?; trace!("io_loop poll done"); for event in events.iter() { if event.token() == SOCKET { if event.is_readable() { self.can_read = true; } if event.is_writable() { self.can_write = true; } } } Ok(()) } fn run(&mut self, events: &mut Events) -> Result<()> { trace!("io_loop run"); self.ensure_setup()?; self.poll(events)?; self.do_run() } fn do_run(&mut self) -> Result<()> { trace!( "io_loop do_run; can_read={}, can_write={}, has_data={}", self.can_read, self.can_write, self.has_data() ); loop { self.heartbeat()?; self.write()?; if self.connection.status().closed() { self.status = Status::Stop; } if self.should_continue() { self.read()?; } self.parse()?; self.connection.poll_internal_promises()?; if self.stop_looping() { self.maybe_continue()?; break; } } trace!( "io_loop do_run done; can_read={}, can_write={}, has_data={}, status={:?}", self.can_read, self.can_write, self.has_data(), self.status ); Ok(()) } fn stop_looping(&self) -> bool { !self.can_read() || !self.can_write() || self.status == Status::Stop || self.connection.status().errored() } fn has_pending_operations(&self) -> bool { self.status != Status::Stop && (self.can_read() || self.can_parse() || self.can_write()) } fn maybe_continue(&mut self) -> Result<()> { if self.has_pending_operations() { trace!( "io_loop send continue; can_read={}, can_write={}, has_data={}", self.can_read, self.can_write, self.has_data() ); self.send_continue()?; } Ok(()) } fn critical_error(&mut self, error: Error) -> Result<()> { if let ConnectionState::SentProtocolHeader(resolver, ..) = self.connection.status().state() { resolver.swear(Err(error.clone())); self.status = Status::Stop; } self.connection.set_error(error.clone())?; Err(error) } fn write(&mut self) -> Result<()> { if self.can_write() { if let Err(e) = self.write_to_stream() { if e.wouldblock() { self.can_write = false } else { error!("error writing: {:?}", e); self.critical_error(e)?; } } self.send_buffer.shift_unless_available(self.frame_size); } Ok(()) } fn read(&mut self) -> Result<()> { if self.can_read() { if let Err(e) = self.read_from_stream() { if e.wouldblock() { self.can_read = false } else { error!("error reading: {:?}", e); self.critical_error(e)?; } } self.receive_buffer.shift_unless_available(self.frame_size); } Ok(()) } fn send_continue(&mut self) -> Result<()> { self.waker.wake()?; Ok(()) } fn write_to_stream(&mut self) -> Result<()> { self.serialize()?; let sz = self.socket.write(&self.send_buffer.data())?; trace!("wrote {} bytes", sz); self.send_buffer.consume(sz); let mut written = sz as u64; while written > 0 { if let Some((to_write, resolver)) = self.serialized_frames.pop_front() { if written < to_write { self.serialized_frames .push_front((to_write - written, resolver)); written = 0; } else { if let Some(resolver) = resolver { resolver.swear(Ok(())); } written -= to_write; } } else { break; } } if sz > 0 && self.send_buffer.available_data() > 0 { // We didn't write all the data yet self.send_continue()?; } Ok(()) } fn read_from_stream(&mut self) -> Result<()> { match self.connection.status().state() { ConnectionState::Closed => Ok(()), ConnectionState::Error => Err(Error::InvalidConnectionState(ConnectionState::Error)), _ => { self.socket .read(&mut self.receive_buffer.space()) .map(|sz| { trace!("read {} bytes", sz); self.receive_buffer.fill(sz); })?; Ok(()) } } } fn serialize(&mut self) -> Result<()> { if let Some((send_id, next_msg, resolver)) = self.connection.next_frame() { trace!("will write to buffer: {:?}", next_msg); let checkpoint = self.send_buffer.checkpoint(); let res = gen_frame(&next_msg)((&mut self.send_buffer).into()); match res.map(|w| w.into_inner().1) { Ok(sz) => { self.serialized_frames.push_back((sz, resolver)); Ok(()) } Err(e) => { self.send_buffer.rollback(checkpoint); match e { GenError::BufferTooSmall(_) => { // Requeue msg self.connection .requeue_frame((send_id, next_msg, resolver))?; self.send_buffer.shift(); Ok(()) } e => { error!("error generating frame: {:?}", e); let error = Error::SerialisationError(Arc::new(e)); self.connection.set_error(error.clone())?; Err(error) } } } } } else { Ok(()) } } fn parse(&mut self) -> Result<()> { if self.can_parse() { if let Some(frame) = self.do_parse()? { self.connection.handle_frame(frame)?; } } Ok(()) } fn do_parse(&mut self) -> Result<Option<AMQPFrame>> { match parse_frame(self.receive_buffer.data()) { Ok((i, f)) => { let consumed = self.receive_buffer.data().offset(i); self.receive_buffer.consume(consumed); Ok(Some(f)) } Err(e) => { if e.is_incomplete() { self.receive_buffer.shift(); Ok(None) } else { error!("parse error: {:?}", e); let error = Error::ParsingError(e); self.connection.set_error(error.clone())?; Err(error) } } } } }
//! # cli //! //! The cargo-make cli //! #[cfg(test)] #[path = "cli_test.rs"] mod cli_test; use crate::cli_commands; use crate::config; use crate::descriptor; use crate::environment; use crate::logger; use crate::logger::LoggerOptions; use crate::profile; use crate::recursion_level; use crate::runner; use crate::types::{CliArgs, GlobalConfig}; use crate::version; use clap::{App, Arg, ArgMatches, SubCommand}; static VERSION: &str = env!("CARGO_PKG_VERSION"); static AUTHOR: &str = env!("CARGO_PKG_AUTHORS"); static DESCRIPTION: &str = env!("CARGO_PKG_DESCRIPTION"); static DEFAULT_TOML: &str = "Makefile.toml"; static DEFAULT_LOG_LEVEL: &str = "info"; static DEFAULT_TASK_NAME: &str = "default"; static DEFAULT_OUTPUT_FORMAT: &str = "default"; fn run(cli_args: CliArgs, global_config: &GlobalConfig) { recursion_level::increment(); logger::init(&LoggerOptions { level: cli_args.log_level.clone(), color: !cli_args.disable_color, }); if recursion_level::is_top() { info!("{} {}", &cli_args.command, &VERSION); debug!("Written By {}", &AUTHOR); } debug!("Cli Args {:#?}", &cli_args); debug!("Global Configuration {:#?}", &global_config); // only run check for updates if we are not in a CI env and user didn't ask to skip the check if !cli_args.disable_check_for_updates && !ci_info::is_ci() && version::should_check(&global_config) { version::check(); } let cwd_string_option = match cli_args.cwd.clone() { Some(value) => Some(value), None => match global_config.search_project_root { Some(search) => { if search { match environment::get_project_root() { Some(value) => Some(value.clone()), None => None, } } else { None } } None => None, }, }; let cwd = match cwd_string_option { Some(ref value) => Some(value.as_ref()), None => None, }; let home = environment::setup_cwd(cwd); let force_makefile = cli_args.build_file.is_some(); let build_file = &cli_args .build_file .clone() .unwrap_or(DEFAULT_TOML.to_string()); let task = &cli_args.task; let profile_name = &cli_args .profile .clone() .unwrap_or(profile::DEFAULT_PROFILE.to_string()); let normalized_profile_name = profile::set(&profile_name); environment::load_env_file(cli_args.env_file.clone()); let env = cli_args.env.clone(); let experimental = cli_args.experimental; let descriptor_load_result = descriptor::load(&build_file, force_makefile, env, experimental); let config = match descriptor_load_result { Ok(config) => config, Err(ref min_version) => { error!( "{} version: {} does not meet minimum required version: {}", &cli_args.command, &VERSION, &min_version ); panic!( "{} version: {} does not meet minimum required version: {}", &cli_args.command, &VERSION, &min_version ); } }; match config.config.additional_profiles { Some(ref profiles) => profile::set_additional(profiles), None => profile::set_additional(&vec![]), }; let env_info = environment::setup_env(&cli_args, &config, &task, home); let crate_name = envmnt::get_or("CARGO_MAKE_CRATE_NAME", ""); if crate_name.len() > 0 { info!("Project: {}", &crate_name); } info!("Build File: {}", &build_file); info!("Task: {}", &task); info!("Profile: {}", &normalized_profile_name); // ensure profile env was not overridden profile::set(&normalized_profile_name); if cli_args.list_all_steps { cli_commands::list_steps::run(&config, &cli_args.output_format, &cli_args.output_file); } else if cli_args.diff_execution_plan { let default_config = descriptor::load_internal_descriptors(true, experimental, None); cli_commands::diff_steps::run(&default_config, &config, &task, &cli_args); } else if cli_args.print_only { cli_commands::print_steps::print( &config, &task, &cli_args.output_format, cli_args.disable_workspace, ); } else { runner::run(config, &task, env_info, &cli_args); } } /// Handles the command line arguments and executes the runner. fn run_for_args( matches: ArgMatches, global_config: &GlobalConfig, command_name: &String, sub_command: bool, ) { let cmd_matches = if sub_command { match matches.subcommand_matches(command_name) { Some(value) => value, None => panic!("cargo-{} not invoked via cargo command.", &command_name), } } else { &matches }; let mut cli_args = CliArgs::new(); cli_args.command = if sub_command { let mut binary = "cargo ".to_string(); binary.push_str(&command_name); binary } else { command_name.clone() }; cli_args.env = cmd_matches.values_of_lossy("env"); cli_args.build_file = if cmd_matches.occurrences_of("makefile") == 0 { None } else { let makefile = cmd_matches .value_of("makefile") .unwrap_or(&DEFAULT_TOML) .to_string(); Some(makefile) }; cli_args.cwd = match cmd_matches.value_of("cwd") { Some(value) => Some(value.to_string()), None => None, }; let default_log_level = match global_config.log_level { Some(ref value) => value.as_str(), None => &DEFAULT_LOG_LEVEL, }; cli_args.log_level = if cmd_matches.is_present("v") { "verbose".to_string() } else { cmd_matches .value_of("loglevel") .unwrap_or(default_log_level) .to_string() }; let default_disable_color = match global_config.disable_color { Some(value) => value, None => false, }; cli_args.disable_color = cmd_matches.is_present("no-color") || envmnt::is("CARGO_MAKE_DISABLE_COLOR") || default_disable_color; cli_args.print_time_summary = cmd_matches.is_present("time-summary") || envmnt::is("CARGO_MAKE_PRINT_TIME_SUMMARY"); cli_args.env_file = match cmd_matches.value_of("envfile") { Some(value) => Some(value.to_string()), None => None, }; cli_args.output_format = cmd_matches .value_of("output-format") .unwrap_or(DEFAULT_OUTPUT_FORMAT) .to_string(); cli_args.output_file = match cmd_matches.value_of("output_file") { Some(value) => Some(value.to_string()), None => None, }; let profile_name = cmd_matches .value_of("profile".to_string()) .unwrap_or(profile::DEFAULT_PROFILE); cli_args.profile = Some(profile_name.to_string()); cli_args.disable_check_for_updates = cmd_matches.is_present("disable-check-for-updates"); cli_args.experimental = cmd_matches.is_present("experimental"); cli_args.print_only = cmd_matches.is_present("print-steps"); cli_args.disable_workspace = cmd_matches.is_present("no-workspace"); cli_args.disable_on_error = cmd_matches.is_present("no-on-error"); cli_args.allow_private = cmd_matches.is_present("allow-private"); cli_args.skip_init_end_tasks = cmd_matches.is_present("skip-init-end-tasks"); cli_args.list_all_steps = cmd_matches.is_present("list-steps"); cli_args.diff_execution_plan = cmd_matches.is_present("diff-steps"); let default_task_name = match global_config.default_task_name { Some(ref value) => value.as_str(), None => &DEFAULT_TASK_NAME, }; let task = cmd_matches.value_of("task").unwrap_or(default_task_name); cli_args.task = cmd_matches.value_of("TASK").unwrap_or(task).to_string(); cli_args.arguments = match cmd_matches.values_of("TASK_ARGS") { Some(values) => { let args_str: Vec<&str> = values.collect(); let args_strings = args_str.iter().map(|item| item.to_string()).collect(); Some(args_strings) } None => None, }; run(cli_args, global_config); } fn create_cli<'a, 'b>( global_config: &'a GlobalConfig, command_name: &String, sub_command: bool, ) -> App<'a, 'b> { let default_task_name = match global_config.default_task_name { Some(ref value) => value.as_str(), None => &DEFAULT_TASK_NAME, }; let default_log_level = match global_config.log_level { Some(ref value) => value.as_str(), None => &DEFAULT_LOG_LEVEL, }; let mut cli_app = if sub_command { SubCommand::with_name(&command_name) } else { let name = command_name.as_str(); App::new(name).bin_name(name) }; cli_app = cli_app .version(VERSION) .author(AUTHOR) .about(DESCRIPTION) .arg( Arg::with_name("makefile") .long("--makefile") .value_name("FILE") .help("The optional toml file containing the tasks definitions") .default_value(&DEFAULT_TOML), ) .arg( Arg::with_name("task") .short("-t") .long("--task") .value_name("TASK") .help( "The task name to execute \ (can omit the flag if the task name is the last argument)", ) .default_value(default_task_name), ) .arg( Arg::with_name("profile") .short("-p") .long("--profile") .value_name("PROFILE") .help( "The profile name (will be converted to lower case)", ) .default_value(&profile::DEFAULT_PROFILE), ) .arg( Arg::with_name("cwd") .long("--cwd") .value_name("DIRECTORY") .help( "Will set the current working directory. \ The search for the makefile will be from this directory if defined.", ), ) .arg(Arg::with_name("no-workspace").long("--no-workspace").help( "Disable workspace support (tasks are triggered on workspace and not on members)", )) .arg( Arg::with_name("no-on-error") .long("--no-on-error") .help("Disable on error flow even if defined in config sections"), ) .arg( Arg::with_name("allow-private") .long("--allow-private") .help("Allow invocation of private tasks"), ) .arg( Arg::with_name("skip-init-end-tasks") .long("--skip-init-end-tasks") .help("If set, init and end tasks are skipped"), ) .arg( Arg::with_name("envfile") .long("--env-file") .value_name("FILE") .help("Set environment variables from provided file"), ) .arg( Arg::with_name("env") .long("--env") .short("-e") .value_name("ENV") .multiple(true) .takes_value(true) .number_of_values(1) .help("Set environment variables"), ) .arg( Arg::from_usage("-l, --loglevel=[LOG LEVEL] 'The log level'") .possible_values(&["verbose", "info", "error"]) .default_value(default_log_level), ) .arg( Arg::with_name("v") .short("-v") .long("--verbose") .help("Sets the log level to verbose (shorthand for --loglevel verbose)"), ) .arg( Arg::with_name("no-color") .long("--no-color") .help("Disables colorful output"), ) .arg( Arg::with_name("time-summary") .long("--time-summary") .help("Print task level time summary at end of flow"), ) .arg( Arg::with_name("experimental") .long("--experimental") .help("Allows access unsupported experimental predefined tasks."), ) .arg( Arg::with_name("disable-check-for-updates") .long("--disable-check-for-updates") .help("Disables the update check during startup"), ) .arg( Arg::from_usage("--output-format=[OUTPUT FORMAT] 'The print/list steps format (some operations do not support all formats)'") .possible_values(&["default", "short-description", "markdown", "markdown-single-page", "markdown-sub-section"]) .default_value(DEFAULT_OUTPUT_FORMAT), ) .arg( Arg::with_name("output_file") .long("--output-file") .value_name("OUTPUT_FILE") .help("The list steps output file name"), ) .arg(Arg::with_name("print-steps").long("--print-steps").help( "Only prints the steps of the build in the order they will \ be invoked but without invoking them", )) .arg( Arg::with_name("list-steps") .long("--list-all-steps") .help("Lists all known steps"), ) .arg( Arg::with_name("diff-steps") .long("--diff-steps") .help("Runs diff between custom flow and prebuilt flow (requires git)"), ) .arg(Arg::with_name("TASK").help("The task name to execute")) .arg( Arg::with_name("TASK_ARGS") .multiple(true) .help("Task arguments which can be accessed in the task itself."), ); if sub_command { App::new("cargo").bin_name("cargo").subcommand(cli_app) } else { cli_app } } /// Handles the command line arguments and executes the runner. pub(crate) fn run_cli(command_name: String, sub_command: bool) { let global_config = config::load(); let app = create_cli(&global_config, &command_name, sub_command); let matches = app.get_matches(); run_for_args(matches, &global_config, &command_name, sub_command); } feat: Allow leading hyphen for arguments //! # cli //! //! The cargo-make cli //! #[cfg(test)] #[path = "cli_test.rs"] mod cli_test; use crate::cli_commands; use crate::config; use crate::descriptor; use crate::environment; use crate::logger; use crate::logger::LoggerOptions; use crate::profile; use crate::recursion_level; use crate::runner; use crate::types::{CliArgs, GlobalConfig}; use crate::version; use clap::{App, AppSettings, Arg, ArgMatches, SubCommand}; static VERSION: &str = env!("CARGO_PKG_VERSION"); static AUTHOR: &str = env!("CARGO_PKG_AUTHORS"); static DESCRIPTION: &str = env!("CARGO_PKG_DESCRIPTION"); static DEFAULT_TOML: &str = "Makefile.toml"; static DEFAULT_LOG_LEVEL: &str = "info"; static DEFAULT_TASK_NAME: &str = "default"; static DEFAULT_OUTPUT_FORMAT: &str = "default"; fn run(cli_args: CliArgs, global_config: &GlobalConfig) { recursion_level::increment(); logger::init(&LoggerOptions { level: cli_args.log_level.clone(), color: !cli_args.disable_color, }); if recursion_level::is_top() { info!("{} {}", &cli_args.command, &VERSION); debug!("Written By {}", &AUTHOR); } debug!("Cli Args {:#?}", &cli_args); debug!("Global Configuration {:#?}", &global_config); // only run check for updates if we are not in a CI env and user didn't ask to skip the check if !cli_args.disable_check_for_updates && !ci_info::is_ci() && version::should_check(&global_config) { version::check(); } let cwd_string_option = match cli_args.cwd.clone() { Some(value) => Some(value), None => match global_config.search_project_root { Some(search) => { if search { match environment::get_project_root() { Some(value) => Some(value.clone()), None => None, } } else { None } } None => None, }, }; let cwd = match cwd_string_option { Some(ref value) => Some(value.as_ref()), None => None, }; let home = environment::setup_cwd(cwd); let force_makefile = cli_args.build_file.is_some(); let build_file = &cli_args .build_file .clone() .unwrap_or(DEFAULT_TOML.to_string()); let task = &cli_args.task; let profile_name = &cli_args .profile .clone() .unwrap_or(profile::DEFAULT_PROFILE.to_string()); let normalized_profile_name = profile::set(&profile_name); environment::load_env_file(cli_args.env_file.clone()); let env = cli_args.env.clone(); let experimental = cli_args.experimental; let descriptor_load_result = descriptor::load(&build_file, force_makefile, env, experimental); let config = match descriptor_load_result { Ok(config) => config, Err(ref min_version) => { error!( "{} version: {} does not meet minimum required version: {}", &cli_args.command, &VERSION, &min_version ); panic!( "{} version: {} does not meet minimum required version: {}", &cli_args.command, &VERSION, &min_version ); } }; match config.config.additional_profiles { Some(ref profiles) => profile::set_additional(profiles), None => profile::set_additional(&vec![]), }; let env_info = environment::setup_env(&cli_args, &config, &task, home); let crate_name = envmnt::get_or("CARGO_MAKE_CRATE_NAME", ""); if crate_name.len() > 0 { info!("Project: {}", &crate_name); } info!("Build File: {}", &build_file); info!("Task: {}", &task); info!("Profile: {}", &normalized_profile_name); // ensure profile env was not overridden profile::set(&normalized_profile_name); if cli_args.list_all_steps { cli_commands::list_steps::run(&config, &cli_args.output_format, &cli_args.output_file); } else if cli_args.diff_execution_plan { let default_config = descriptor::load_internal_descriptors(true, experimental, None); cli_commands::diff_steps::run(&default_config, &config, &task, &cli_args); } else if cli_args.print_only { cli_commands::print_steps::print( &config, &task, &cli_args.output_format, cli_args.disable_workspace, ); } else { runner::run(config, &task, env_info, &cli_args); } } /// Handles the command line arguments and executes the runner. fn run_for_args( matches: ArgMatches, global_config: &GlobalConfig, command_name: &String, sub_command: bool, ) { let cmd_matches = if sub_command { match matches.subcommand_matches(command_name) { Some(value) => value, None => panic!("cargo-{} not invoked via cargo command.", &command_name), } } else { &matches }; let mut cli_args = CliArgs::new(); cli_args.command = if sub_command { let mut binary = "cargo ".to_string(); binary.push_str(&command_name); binary } else { command_name.clone() }; cli_args.env = cmd_matches.values_of_lossy("env"); cli_args.build_file = if cmd_matches.occurrences_of("makefile") == 0 { None } else { let makefile = cmd_matches .value_of("makefile") .unwrap_or(&DEFAULT_TOML) .to_string(); Some(makefile) }; cli_args.cwd = match cmd_matches.value_of("cwd") { Some(value) => Some(value.to_string()), None => None, }; let default_log_level = match global_config.log_level { Some(ref value) => value.as_str(), None => &DEFAULT_LOG_LEVEL, }; cli_args.log_level = if cmd_matches.is_present("v") { "verbose".to_string() } else { cmd_matches .value_of("loglevel") .unwrap_or(default_log_level) .to_string() }; let default_disable_color = match global_config.disable_color { Some(value) => value, None => false, }; cli_args.disable_color = cmd_matches.is_present("no-color") || envmnt::is("CARGO_MAKE_DISABLE_COLOR") || default_disable_color; cli_args.print_time_summary = cmd_matches.is_present("time-summary") || envmnt::is("CARGO_MAKE_PRINT_TIME_SUMMARY"); cli_args.env_file = match cmd_matches.value_of("envfile") { Some(value) => Some(value.to_string()), None => None, }; cli_args.output_format = cmd_matches .value_of("output-format") .unwrap_or(DEFAULT_OUTPUT_FORMAT) .to_string(); cli_args.output_file = match cmd_matches.value_of("output_file") { Some(value) => Some(value.to_string()), None => None, }; let profile_name = cmd_matches .value_of("profile".to_string()) .unwrap_or(profile::DEFAULT_PROFILE); cli_args.profile = Some(profile_name.to_string()); cli_args.disable_check_for_updates = cmd_matches.is_present("disable-check-for-updates"); cli_args.experimental = cmd_matches.is_present("experimental"); cli_args.print_only = cmd_matches.is_present("print-steps"); cli_args.disable_workspace = cmd_matches.is_present("no-workspace"); cli_args.disable_on_error = cmd_matches.is_present("no-on-error"); cli_args.allow_private = cmd_matches.is_present("allow-private"); cli_args.skip_init_end_tasks = cmd_matches.is_present("skip-init-end-tasks"); cli_args.list_all_steps = cmd_matches.is_present("list-steps"); cli_args.diff_execution_plan = cmd_matches.is_present("diff-steps"); let default_task_name = match global_config.default_task_name { Some(ref value) => value.as_str(), None => &DEFAULT_TASK_NAME, }; let task = cmd_matches.value_of("task").unwrap_or(default_task_name); cli_args.task = cmd_matches.value_of("TASK").unwrap_or(task).to_string(); cli_args.arguments = match cmd_matches.values_of("TASK_ARGS") { Some(values) => { let args_str: Vec<&str> = values.collect(); let args_strings = args_str.iter().map(|item| item.to_string()).collect(); Some(args_strings) } None => None, }; run(cli_args, global_config); } fn create_cli<'a, 'b>( global_config: &'a GlobalConfig, command_name: &String, sub_command: bool, ) -> App<'a, 'b> { let default_task_name = match global_config.default_task_name { Some(ref value) => value.as_str(), None => &DEFAULT_TASK_NAME, }; let default_log_level = match global_config.log_level { Some(ref value) => value.as_str(), None => &DEFAULT_LOG_LEVEL, }; let mut cli_app = if sub_command { SubCommand::with_name(&command_name) } else { let name = command_name.as_str(); App::new(name).bin_name(name) }; cli_app = cli_app .version(VERSION) .author(AUTHOR) .about(DESCRIPTION) .setting(AppSettings::AllowLeadingHyphen) .arg( Arg::with_name("makefile") .long("--makefile") .value_name("FILE") .help("The optional toml file containing the tasks definitions") .default_value(&DEFAULT_TOML), ) .arg( Arg::with_name("task") .short("-t") .long("--task") .value_name("TASK") .help( "The task name to execute \ (can omit the flag if the task name is the last argument)", ) .default_value(default_task_name), ) .arg( Arg::with_name("profile") .short("-p") .long("--profile") .value_name("PROFILE") .help( "The profile name (will be converted to lower case)", ) .default_value(&profile::DEFAULT_PROFILE), ) .arg( Arg::with_name("cwd") .long("--cwd") .value_name("DIRECTORY") .help( "Will set the current working directory. \ The search for the makefile will be from this directory if defined.", ), ) .arg(Arg::with_name("no-workspace").long("--no-workspace").help( "Disable workspace support (tasks are triggered on workspace and not on members)", )) .arg( Arg::with_name("no-on-error") .long("--no-on-error") .help("Disable on error flow even if defined in config sections"), ) .arg( Arg::with_name("allow-private") .long("--allow-private") .help("Allow invocation of private tasks"), ) .arg( Arg::with_name("skip-init-end-tasks") .long("--skip-init-end-tasks") .help("If set, init and end tasks are skipped"), ) .arg( Arg::with_name("envfile") .long("--env-file") .value_name("FILE") .help("Set environment variables from provided file"), ) .arg( Arg::with_name("env") .long("--env") .short("-e") .value_name("ENV") .multiple(true) .takes_value(true) .number_of_values(1) .help("Set environment variables"), ) .arg( Arg::from_usage("-l, --loglevel=[LOG LEVEL] 'The log level'") .possible_values(&["verbose", "info", "error"]) .default_value(default_log_level), ) .arg( Arg::with_name("v") .short("-v") .long("--verbose") .help("Sets the log level to verbose (shorthand for --loglevel verbose)"), ) .arg( Arg::with_name("no-color") .long("--no-color") .help("Disables colorful output"), ) .arg( Arg::with_name("time-summary") .long("--time-summary") .help("Print task level time summary at end of flow"), ) .arg( Arg::with_name("experimental") .long("--experimental") .help("Allows access unsupported experimental predefined tasks."), ) .arg( Arg::with_name("disable-check-for-updates") .long("--disable-check-for-updates") .help("Disables the update check during startup"), ) .arg( Arg::from_usage("--output-format=[OUTPUT FORMAT] 'The print/list steps format (some operations do not support all formats)'") .possible_values(&["default", "short-description", "markdown", "markdown-single-page", "markdown-sub-section"]) .default_value(DEFAULT_OUTPUT_FORMAT), ) .arg( Arg::with_name("output_file") .long("--output-file") .value_name("OUTPUT_FILE") .help("The list steps output file name"), ) .arg(Arg::with_name("print-steps").long("--print-steps").help( "Only prints the steps of the build in the order they will \ be invoked but without invoking them", )) .arg( Arg::with_name("list-steps") .long("--list-all-steps") .help("Lists all known steps"), ) .arg( Arg::with_name("diff-steps") .long("--diff-steps") .help("Runs diff between custom flow and prebuilt flow (requires git)"), ) .arg(Arg::with_name("TASK").help("The task name to execute")) .arg( Arg::with_name("TASK_ARGS") .multiple(true) .help("Task arguments which can be accessed in the task itself."), ); if sub_command { App::new("cargo").bin_name("cargo").subcommand(cli_app) } else { cli_app } } /// Handles the command line arguments and executes the runner. pub(crate) fn run_cli(command_name: String, sub_command: bool) { let global_config = config::load(); let app = create_cli(&global_config, &command_name, sub_command); let matches = app.get_matches(); run_for_args(matches, &global_config, &command_name, sub_command); }
//! Lua functionality use hlua; use hlua::{Lua, LuaError}; use hlua::any::AnyLuaValue; use std::thread; use std::fs::{File}; use std::path::Path; use std::io::Write; use std::sync::{Mutex, RwLock}; use std::sync::mpsc::{channel, Sender, Receiver}; #[macro_use] mod funcs; #[cfg(test)] mod tests; lazy_static! { /// Sends requests to the lua thread static ref SENDER: Mutex<Option<Sender<LuaQuery>>> = Mutex::new(None); /// Receives data back from the lua thread /// This should only be accessed by the lua thread itself. static ref RECEIVER: Mutex<Option<Receiver<LuaResponse>>> = Mutex::new(None); /// Whether the lua thread is currently running pub static ref RUNNING: RwLock<bool> = RwLock::new(false); } /// Messages sent to the lua thread pub enum LuaQuery { /// Halt the lua thread Terminate, // Restart the lua thread Restart, /// Execute a string Execute(String), /// Execute a file ExecuteFile(String), /// Get a variable GetVariable(String), /// Set a value SetValue { name: Box<::std::borrow::Borrow<str> + Sized>, val: Box<hlua::Push<&'static mut Lua<'static>> + Sized> }, /// Create a new array EmptyArray(String), /// Message to ping the lua thread Ping, /// Unused send type Unused, } /// Messages received from lua thread pub enum LuaResponse { /// Lua variable obtained Variable(Option<AnyLuaValue>), /// Lua error Error(hlua::LuaError), /// A function is returned Function(hlua::functions_read::LuaFunction<String>), /// Pong response from lua ping Pong, /// Unused response type Unused, } unsafe impl Send for LuaQuery { } unsafe impl Send for LuaResponse { } unsafe impl Sync for LuaQuery { } unsafe impl Sync for LuaResponse { } /// Whether the lua thread is currently available pub fn thread_running() -> bool { *RUNNING.read().unwrap() } /// Errors which may arise from attempting /// to sending a message to the lua thread. #[derive(Debug)] pub enum LuaSendError { /// The thread crashed, was shut down, or rebooted. ThreadClosed, /// The thread has not been initialized yet (maybe not used) ThreadUninitialized, /// The sender had an issue, most likey because the thread panicked Sender } /// Attemps to send a LuaQuery to the lua thread. pub fn try_send(query: LuaQuery) -> Result<(), LuaSendError> { if !thread_running() { Err(LuaSendError::ThreadClosed) } else if let Some(ref sender) = *SENDER.lock().unwrap() { match sender.send(query) { Ok(_) => Ok(()), Err(_) => Err(LuaSendError::Sender) } } else { Err(LuaSendError::ThreadUninitialized) } } /// Initialize the lua thread pub fn init() { trace!("Initializing..."); let (query_tx, query_rx) = channel::<LuaQuery>(); let (answer_tx, answer_rx) = channel::<LuaResponse>(); { let mut sender = SENDER.lock().unwrap(); let mut receiver = RECEIVER.lock().unwrap(); *sender = Some(query_tx); *receiver = Some(answer_rx); } thread::spawn(move || { thread_init(answer_tx, query_rx); }); trace!("Created thread. Init finished."); } fn thread_init(sender: Sender<LuaResponse>, receiver: Receiver<LuaQuery>) { trace!("thread: initializing."); let mut lua = Lua::new(); //unsafe { // hlua_ffi::lua_atpanic(&mut lua.as_mut_lua().0, thread_on_panic); //} debug!("thread: Loading Lua libraries..."); lua.openlibs(); trace!("thread: Loading way-cooler lua extensions..."); // We should have some good file handling, read files from /usr by default, // but for now we're reading directly from the source. lua.execute_from_reader::<(), File>( File::open("lib/lua/init.lua").unwrap() ).unwrap(); trace!("thread: loading way-cooler libraries..."); funcs::register_libraries(&mut lua); // Only ready after loading libs *RUNNING.write().unwrap() = true; debug!("thread: entering main loop..."); thread_main_loop(sender, receiver, &mut lua); } fn thread_main_loop(sender: Sender<LuaResponse>, receiver: Receiver<LuaQuery>, lua: &mut Lua) { loop { let request = receiver.recv(); match request { Err(e) => { error!("Lua thread: unable to receive message: {}", e); error!("Lua thread: now panicking!"); *RUNNING.write().unwrap() = false; panic!("Lua thread: lost contact with host, exiting!"); } Ok(message) => { trace!("Handling a request"); thread_handle_message(&sender, message, lua); } } } } fn thread_handle_message(sender: &Sender<LuaResponse>, request: LuaQuery, lua: &mut Lua) { match request { LuaQuery::Terminate => { trace!("thread: Received terminate signal"); *RUNNING.write().unwrap() = false; info!("thread: Lua thread terminating!"); return; }, LuaQuery::Restart => { trace!("thread: Received restart signal!"); error!("thread: Lua thread restart not supported!"); *RUNNING.write().unwrap() = false; panic!("Lua thread: Restart not supported!"); }, LuaQuery::Execute(code) => { trace!("thread: Received request to execute code"); trace!("thread: Executing {:?}", code); match lua.execute::<()>(&code) { Err(error) => { warn!("thread: Error executing code: {:?}", error); let response = LuaResponse::Error(error); thread_send(&sender, response); } Ok(_) => { // This is gonna be really spammy one day trace!("thread: Code executed okay."); } } }, LuaQuery::ExecuteFile(name) => { trace!("thread: Received request to execute file {}", name); info!("thread: Executing {}", name); let path = Path::new(&name); let try_file = File::open(path); if let Ok(file) = try_file { let result = lua.execute_from_reader::<(), File>(file); if let Err(err) = result { warn!("thread: Error executing {}!", name); thread_send(&sender, LuaResponse::Error(err)); } else { trace!("thread: Execution of {} successful.", name); } } else { // Could not open file // Unwrap_err is used because we're in the else of let let read_error = LuaError::ReadError(try_file.unwrap_err()); thread_send(&sender, LuaResponse::Error(read_error)); } }, LuaQuery::GetVariable(varname) => { trace!("thread: Received request to get variable {}", varname); let var_result = lua.get(varname.as_str()); match var_result { Some(var) => { thread_send(&sender, LuaResponse::Variable(Some(var))); } None => { warn!("thread: Unable to get variable {}", varname); thread_send(&sender, LuaResponse::Variable(None)); } } }, LuaQuery::SetValue { name: _name, val: _val } => { panic!("thread: unimplemented LuaQuery::SetValue!"); }, LuaQuery::EmptyArray(_name) => { panic!("thread: unimplemented LuaQuery::EmptyArray!"); }, _ => { panic!("Unimplemented send type for lua thread!"); } } } fn thread_send(sender: &Sender<LuaResponse>, response: LuaResponse) { trace!("Called thread_send"); match sender.send(response) { Err(_) => { error!("thread: Unable to broadcast response!"); error!("thread: Shutting down in response to inability \ to continue!"); panic!("Lua thread unable to communicate with main thread, \ shutting down!"); } Ok(_) => {} } } Improve LuaQuery enum //! Lua functionality use hlua; use hlua::{Lua, LuaError}; use hlua::any::AnyLuaValue; use rustc_serialize::json::Json; use std::thread; use std::fs::{File}; use std::path::Path; use std::io::Write; use std::sync::{Mutex, RwLock}; use std::sync::mpsc::{channel, Sender, Receiver}; #[macro_use] mod funcs; #[cfg(test)] mod tests; lazy_static! { /// Sends requests to the lua thread static ref SENDER: Mutex<Option<Sender<LuaQuery>>> = Mutex::new(None); /// Receives data back from the lua thread /// This should only be accessed by the lua thread itself. static ref RECEIVER: Mutex<Option<Receiver<LuaResponse>>> = Mutex::new(None); /// Whether the lua thread is currently running pub static ref RUNNING: RwLock<bool> = RwLock::new(false); } /// Represents an identifier for dealing with nested tables. /// /// To access foo.bar.baz, use vec!["foo", "bar", "baz"]. /// /// To access foo[2], use vec!["foo", 2]. pub type LuaIdentifier = Vec<AnyLuaValue>; /// Messages sent to the lua thread #[derive(Debug)] pub enum LuaQuery { /// Pings the lua thread Ping, /// Halt the lua thread Terminate, // Restart the lua thread Restart, /// Execute a string Execute(String), /// Execute a file ExecFile(String), /// Get a variable, expecting an AnyLuaValue GetValue(LuaIdentifier), /// Invoke a function found at the position, /// with the specified arguments. Invoke(LuaIdentifier, Vec<AnyLuaValue>), /// Set a value SetValue { /// The name of the thing to stuff name: LuaIdentifier, /// The value to store. val: Json }, /// Create a new table NewTable(LuaIdentifier), } /// Messages received from lua thread pub enum LuaResponse { /// Lua variable obtained Variable(Option<AnyLuaValue>), /// Lua error Error(hlua::LuaError), /// A function is returned Function(hlua::functions_read::LuaFunction<String>), /// Pong response from lua ping Pong, } unsafe impl Send for LuaQuery { } unsafe impl Send for LuaResponse { } unsafe impl Sync for LuaQuery { } unsafe impl Sync for LuaResponse { } /// Whether the lua thread is currently available pub fn thread_running() -> bool { *RUNNING.read().unwrap() } /// Errors which may arise from attempting /// to sending a message to the lua thread. #[derive(Debug)] pub enum LuaSendError { /// The thread crashed, was shut down, or rebooted. ThreadClosed, /// The thread has not been initialized yet (maybe not used) ThreadUninitialized, /// The sender had an issue, most likey because the thread panicked Sender } /// Attemps to send a LuaQuery to the lua thread. pub fn try_send(query: LuaQuery) -> Result<(), LuaSendError> { if !thread_running() { Err(LuaSendError::ThreadClosed) } else if let Some(ref sender) = *SENDER.lock().unwrap() { match sender.send(query) { Ok(_) => Ok(()), Err(_) => Err(LuaSendError::Sender) } } else { Err(LuaSendError::ThreadUninitialized) } } /// Initialize the lua thread pub fn init() { trace!("Initializing..."); let (query_tx, query_rx) = channel::<LuaQuery>(); let (answer_tx, answer_rx) = channel::<LuaResponse>(); { let mut sender = SENDER.lock().unwrap(); let mut receiver = RECEIVER.lock().unwrap(); *sender = Some(query_tx); *receiver = Some(answer_rx); } thread::spawn(move || { thread_init(answer_tx, query_rx); }); trace!("Created thread. Init finished."); } fn thread_init(sender: Sender<LuaResponse>, receiver: Receiver<LuaQuery>) { trace!("thread: initializing."); let mut lua = Lua::new(); //unsafe { // hlua_ffi::lua_atpanic(&mut lua.as_mut_lua().0, thread_on_panic); //} debug!("thread: Loading Lua libraries..."); lua.openlibs(); trace!("thread: Loading way-cooler lua extensions..."); // We should have some good file handling, read files from /usr by default, // but for now we're reading directly from the source. lua.execute_from_reader::<(), File>( File::open("lib/lua/init.lua").unwrap() ).unwrap(); trace!("thread: loading way-cooler libraries..."); funcs::register_libraries(&mut lua); // Only ready after loading libs *RUNNING.write().unwrap() = true; debug!("thread: entering main loop..."); thread_main_loop(sender, receiver, &mut lua); } fn thread_main_loop(sender: Sender<LuaResponse>, receiver: Receiver<LuaQuery>, lua: &mut Lua) { loop { let request = receiver.recv(); match request { Err(e) => { error!("Lua thread: unable to receive message: {}", e); error!("Lua thread: now panicking!"); *RUNNING.write().unwrap() = false; panic!("Lua thread: lost contact with host, exiting!"); } Ok(message) => { trace!("Handling a request"); thread_handle_message(&sender, message, lua); } } } } fn thread_handle_message(sender: &Sender<LuaResponse>, request: LuaQuery, lua: &mut Lua) { match request { LuaQuery::Terminate => { trace!("thread: Received terminate signal"); *RUNNING.write().unwrap() = false; info!("thread: Lua thread terminating!"); return; }, LuaQuery::Restart => { trace!("thread: Received restart signal!"); error!("thread: Lua thread restart not supported!"); *RUNNING.write().unwrap() = false; panic!("Lua thread: Restart not supported!"); }, LuaQuery::Execute(code) => { trace!("thread: Received request to execute code"); trace!("thread: Executing {:?}", code); match lua.execute::<()>(&code) { Err(error) => { warn!("thread: Error executing code: {:?}", error); let response = LuaResponse::Error(error); thread_send(&sender, response); } Ok(_) => { // This is gonna be really spammy one day trace!("thread: Code executed okay."); } } }, LuaQuery::ExecFile(name) => { trace!("thread: Received request to execute file {}", name); info!("thread: Executing {}", name); let path = Path::new(&name); let try_file = File::open(path); if let Ok(file) = try_file { let result = lua.execute_from_reader::<(), File>(file); if let Err(err) = result { warn!("thread: Error executing {}!", name); thread_send(&sender, LuaResponse::Error(err)); } else { trace!("thread: Execution of {} successful.", name); } } else { // Could not open file // Unwrap_err is used because we're in the else of let let read_error = LuaError::ReadError(try_file.unwrap_err()); thread_send(&sender, LuaResponse::Error(read_error)); } }, LuaQuery::GetValue(varname) => { trace!("thread: Received request to get variable {:?}", varname); let var_result = lua.get(format!("{:?}", varname)); match var_result { Some(var) => { thread_send(&sender, LuaResponse::Variable(Some(var))); } None => { warn!("thread: Unable to get variable {:?}", varname); thread_send(&sender, LuaResponse::Variable(None)); } } }, LuaQuery::SetValue { name: _name, val: _val } => { panic!("thread: unimplemented LuaQuery::SetValue!"); }, LuaQuery::NewTable(_name) => { panic!("thread: unimplemented LuaQuery::NewTable!"); }, LuaQuery::Ping => { panic!("thread: unimplemented LuaQuery::Ping!"); }, _ => { panic!("Unimplemented send type for lua thread!"); } } } fn thread_send(sender: &Sender<LuaResponse>, response: LuaResponse) { trace!("Called thread_send"); match sender.send(response) { Err(_) => { error!("thread: Unable to broadcast response!"); error!("thread: Shutting down in response to inability \ to continue!"); panic!("Lua thread unable to communicate with main thread, \ shutting down!"); } Ok(_) => {} } }
// Copyright (c) 2016 Fedor Gogolev <knsd@knsd.net> // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. use std::io::{Read, BufRead, Error as IoError, ErrorKind}; use std::string::{FromUtf8Error}; use std::cell::{RefCell}; use std::rc::{Rc}; use num::{Zero}; use num::bigint::{BigInt, ToBigInt, Sign}; use byteorder::{ReadBytesExt, LittleEndian, BigEndian, Error as ByteorderError}; use from_ascii::{FromAscii, ParseIntError, ParseFloatError}; use string::{unescape, Error as UnescapeError}; use value::{Value}; use opcodes::*; quick_error! { #[derive(Debug)] pub enum Error { EmptyMarker StackTooSmall EmptyStack InvalidValueOnStack InvalidGetValue InvalidPutValue NotImplemented Read(err: ByteorderError) { from() } Io(err: IoError) { from() } UnknownOpcode(opcode: u8) {} InvalidInt { from(ParseIntError) } InvalidLong InvalidFloat { from(ParseFloatError) } InvalidString { from(FromUtf8Error) } UnescapeError(err: UnescapeError) { from() } InvalidProto(proto: u8) NegativeLength {} #[doc(hidden)] __Nonexhaustive } } #[derive(Debug, PartialEq)] pub enum BooleanOrInt { Boolean(bool), Int(i64), } macro_rules! rc { ($term: expr) => (Rc::new(RefCell::new($term))) } fn read_exact<R>(rd: &mut R, mut buf: &mut [u8]) -> Result<(), IoError> where R: Read { while !buf.is_empty() { match rd.read(buf) { Ok(0) => break, Ok(n) => { let tmp = buf; buf = &mut tmp[n..]; } Err(ref e) if e.kind() == ErrorKind::Interrupted => {} Err(e) => return Err(e), } } if !buf.is_empty() { Err(IoError::new(ErrorKind::Other, "failed to fill whole buffer")) } else { Ok(()) } } fn read_until_newline<R>(rd: &mut R) -> Result<Vec<u8>, Error> where R: Read + BufRead { let mut buf = Vec::new(); try!(rd.read_until('\n' as u8, &mut buf)); // Skip last symbol — \n match buf.split_last() { Some((&b'\n', init)) => Ok(init.to_vec()), _ => return Err(Error::InvalidString), } } fn read_decimal_int<R>(rd: &mut R) -> Result<BooleanOrInt, Error> where R: Read + BufRead { let s = try!(read_until_newline(rd)); let val = match &s[..] { b"00" => BooleanOrInt::Boolean(false), b"01" => BooleanOrInt::Boolean(true), _ => BooleanOrInt::Int(try!(i64::from_ascii(&s))) }; Ok(val) } fn read_decimal_long<R>(rd: &mut R) -> Result<BigInt, Error> where R: Read + BufRead { let s = try!(read_until_newline(rd)); let init = match s.split_last() { None => return Err(Error::InvalidString), Some((&b'L', init)) => init, Some(_) => &s[..], }; match BigInt::parse_bytes(&init, 10) { Some(i) => Ok(i), None => Err(Error::InvalidLong) } } fn read_long<R>(rd: &mut R, length: usize) -> Result<BigInt, Error> where R: Read + BufRead { let mut buf = vec![0; length]; try!(read_exact(rd, buf.as_mut())); let mut n = BigInt::from_bytes_le(Sign::Plus, &buf); let last = match buf.last_mut() { None => return Err(Error::InvalidLong), Some(last) => last, }; if *last > 127 { n = n - (1.to_bigint().unwrap() << (length * 8)) } Ok(n) } pub struct Machine { stack: Vec<Value>, memo: Vec<Value>, marker: Option<usize>, } impl Machine { pub fn new() -> Self { Machine { stack: Vec::new(), memo: vec![Value::None], marker: None, } } fn split_off(&mut self) -> Result<Vec<Value>, Error> { let at = match self.marker { None => return Err(Error::EmptyMarker), Some(marker) => marker, }; if at > self.stack.len() { return Err(Error::StackTooSmall); } Ok(self.stack.split_off(at)) } pub fn pop(&mut self) -> Result<Value, Error> { match self.stack.pop() { None => return Err(Error::EmptyStack), Some(value) => Ok(value), } } fn handle_get(&mut self, i: usize) -> Result<(), Error> { let value = match self.memo.get(i) { None => return Err(Error::InvalidGetValue), Some(ref v) => (*v).clone(), }; self.stack.push(value); Ok(()) } fn handle_put(&mut self, i: usize) -> Result<(), Error> { let value = match self.stack.last() { None => return Err(Error::EmptyStack), Some(ref v) => (*v).clone(), }; let len = self.memo.len(); if len != i { return Err(Error::InvalidPutValue) } self.memo.push(value); Ok(()) } pub fn execute<R>(&mut self, rd: &mut R) -> Result<bool, Error> where R: Read + BufRead { macro_rules! ensure_not_negative { ($n: expr) => ({ if $n < Zero::zero() { return Err(Error::NegativeLength) } }) } let marker = try!(rd.read_u8()); match marker { PROTO => { let version = try!(rd.read_u8()); if version < 2 { return Err(Error::InvalidProto(version)) } }, STOP => return Ok(true), INT => { self.stack.push(match try!(read_decimal_int(rd)) { BooleanOrInt::Boolean(v) => Value::Bool(v), BooleanOrInt::Int(v) => Value::Long(BigInt::from(v)), }) }, BININT => self.stack.push(Value::Int(try!(rd.read_i32::<LittleEndian>()) as usize)), BININT1 => self.stack.push(Value::Int(try!(rd.read_u8()) as usize)), BININT2 => self.stack.push(Value::Int(try!(rd.read_u16::<LittleEndian>()) as usize)), LONG => self.stack.push(Value::Long(BigInt::from(try!(read_decimal_long(rd))))), LONG1 => { let length = try!(rd.read_u8()); self.stack.push(Value::Long(BigInt::from(try!(read_long(rd, length as usize))))) } LONG4 => { let length = try!(rd.read_i32::<LittleEndian>()); self.stack.push(Value::Long(BigInt::from(try!(read_long(rd, length as usize))))) } STRING => self.stack.push(Value::String(try!(unescape(&try!(read_until_newline(rd)), false)))), BINSTRING => { let length = try!(rd.read_i32::<LittleEndian>()); ensure_not_negative!(length); let mut buf = vec![0; length as usize]; try!(read_exact(rd, &mut buf)); self.stack.push(Value::String(buf)) }, SHORT_BINSTRING => { let length = try!(rd.read_u8()); let mut buf = vec![0; length as usize]; try!(read_exact(rd, &mut buf)); self.stack.push(Value::String(buf)) }, NONE => self.stack.push(Value::None), NEWTRUE => self.stack.push(Value::Bool(true)), NEWFALSE => self.stack.push(Value::Bool(false)), UNICODE => { let buf = try!(unescape(&try!(read_until_newline(rd)), true)); self.stack.push(Value::Unicode(try!(String::from_utf8(buf)))) }, BINUNICODE => { let length = try!(rd.read_i32::<LittleEndian>()); ensure_not_negative!(length); let mut buf = vec![0; length as usize]; try!(read_exact(rd, buf.as_mut())); self.stack.push(Value::Unicode(try!(String::from_utf8(buf)))) }, FLOAT => { let s = try!(read_until_newline(rd)); self.stack.push(Value::Float(try!(f64::from_ascii(&s)))) }, BINFLOAT => { self.stack.push(Value::Float(try!(rd.read_f64::<BigEndian>()))) }, EMPTY_LIST => { self.stack.push(Value::List(rc!(Vec::new()))) }, APPEND => { let v = try!(self.pop()); match self.stack.last_mut() { None => return Err(Error::EmptyStack), Some(&mut Value::List(ref mut list)) => (*list.borrow_mut()).push(v), _ => return Err(Error::InvalidValueOnStack), } }, APPENDS => { let values = try!(self.split_off()); match self.stack.last_mut() { None => return Err(Error::EmptyStack), Some(&mut Value::List(ref mut list)) => (*list.borrow_mut()).extend(values), _ => return Err(Error::InvalidValueOnStack), } }, LIST => { let values = try!(self.split_off()); self.stack.push(Value::List(rc!(values))); }, EMPTY_TUPLE => self.stack.push(Value::Tuple(rc!(Vec::new()))), TUPLE => { let values = try!(self.split_off()); self.stack.push(Value::Tuple(rc!(values))); }, TUPLE1 => { let v1 = try!(self.pop()); self.stack.push(Value::Tuple(rc!(vec![v1]))) }, TUPLE2 => { let v1 = try!(self.pop()); let v2 = try!(self.pop()); self.stack.push(Value::Tuple(rc!(vec![v1, v2]))) }, TUPLE3 => { let v1 = try!(self.pop()); let v2 = try!(self.pop()); let v3 = try!(self.pop()); self.stack.push(Value::Tuple(rc!(vec![v1, v2, v3]))) } EMPTY_DICT => self.stack.push(Value::Dict(rc!(Vec::new()))), DICT => { let mut values = try!(self.split_off()); let mut dict = Vec::new(); for i in 0 .. values.len() / 2 { // TODO: Check panic let key = values.remove(2 * i); let value = values.remove(2 * i + 1); dict.push((key, value)); } self.stack.push(Value::Dict(rc!(dict))); }, // SETITEM => { // let value = try!(self.pop()); // let key = try!(self.pop()); // match self.stack.last_mut() { // None => return Err(Error::EmptyStack), // Some(ref mut rc) => match *rc.borrow_mut() { // Value::Dict(ref mut dict) => dict.push((key, value)), // _ => return Err(Error::InvalidValueOnStack), // }, // } // }, // SETITEMS => { // let mut values = try!(self.split_off()); // match self.stack.last_mut() { // None => return Err(Error::EmptyStack), // Some(ref mut rc) => match *rc.borrow_mut() { // Value::Dict(ref mut dict) => { // for i in 0 .. values.len() / 2 { // TODO: Check panic // let key = values.remove(2 * i); // let value = values.remove(2 * i + 1); // dict.push((key, value)); // } // }, // _ => return Err(Error::InvalidValueOnStack), // }, // } // }, POP => { try!(self.pop()); }, DUP => { let value = match self.stack.last() { None => return Err(Error::EmptyStack), Some(ref v) => (*v).clone(), }; self.stack.push(value) }, MARK => { self.marker = Some(self.stack.len()) }, POP_MARK => { try!(self.split_off()); }, GET => { let n = match try!(read_decimal_int(rd)) { BooleanOrInt::Int(n) => n, BooleanOrInt::Boolean(false) => 0, BooleanOrInt::Boolean(true) => 1, }; ensure_not_negative!(n); try!(self.handle_get(n as usize)) } BINGET => { try!(self.handle_get(try!(rd.read_u8()) as usize)) } LONG_BINGET => { let n = try!(rd.read_i32::<LittleEndian>()); ensure_not_negative!(n); try!(self.handle_get(n as usize)) } PUT => { let n = match try!(read_decimal_int(rd)) { BooleanOrInt::Int(n) => n, BooleanOrInt::Boolean(false) => 0, BooleanOrInt::Boolean(true) => 1, }; ensure_not_negative!(n); try!(self.handle_put(n as usize)) } BINPUT => { try!(self.handle_put(try!(rd.read_u8()) as usize)) } LONG_BINPUT => { let n = try!(rd.read_i32::<LittleEndian>()); ensure_not_negative!(n); try!(self.handle_put(n as usize)) } c => return Err(Error::UnknownOpcode(c)), } Ok(false) } } Update machine // Copyright (c) 2016 Fedor Gogolev <knsd@knsd.net> // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. use std::io::{Read, BufRead, Error as IoError, ErrorKind}; use std::string::{FromUtf8Error}; use std::cell::{RefCell}; use std::rc::{Rc}; use num::{Zero}; use num::bigint::{BigInt, ToBigInt, Sign}; use byteorder::{ReadBytesExt, LittleEndian, BigEndian, Error as ByteorderError}; use from_ascii::{FromAscii, ParseIntError, ParseFloatError}; use string::{unescape, Error as UnescapeError}; use value::{Value}; use opcodes::*; quick_error! { #[derive(Debug)] pub enum Error { EmptyMarker StackTooSmall EmptyStack InvalidValueOnStack InvalidGetValue InvalidPutValue NotImplemented Read(err: ByteorderError) { from() } Io(err: IoError) { from() } UnknownOpcode(opcode: u8) {} InvalidInt { from(ParseIntError) } InvalidLong InvalidFloat { from(ParseFloatError) } InvalidString { from(FromUtf8Error) } UnescapeError(err: UnescapeError) { from() } InvalidProto(proto: u8) NegativeLength {} #[doc(hidden)] __Nonexhaustive } } #[derive(Debug, PartialEq)] pub enum BooleanOrInt { Boolean(bool), Int(i64), } macro_rules! rc { ($term: expr) => (Rc::new(RefCell::new($term))) } fn read_exact<R>(rd: &mut R, mut buf: &mut [u8]) -> Result<(), IoError> where R: Read { while !buf.is_empty() { match rd.read(buf) { Ok(0) => break, Ok(n) => { let tmp = buf; buf = &mut tmp[n..]; } Err(ref e) if e.kind() == ErrorKind::Interrupted => {} Err(e) => return Err(e), } } if !buf.is_empty() { Err(IoError::new(ErrorKind::Other, "failed to fill whole buffer")) } else { Ok(()) } } fn read_until_newline<R>(rd: &mut R) -> Result<Vec<u8>, Error> where R: Read + BufRead { let mut buf = Vec::new(); try!(rd.read_until('\n' as u8, &mut buf)); // Skip last symbol — \n match buf.split_last() { Some((&b'\n', init)) => Ok(init.to_vec()), _ => return Err(Error::InvalidString), } } fn read_decimal_int<R>(rd: &mut R) -> Result<BooleanOrInt, Error> where R: Read + BufRead { let s = try!(read_until_newline(rd)); let val = match &s[..] { b"00" => BooleanOrInt::Boolean(false), b"01" => BooleanOrInt::Boolean(true), _ => BooleanOrInt::Int(try!(i64::from_ascii(&s))) }; Ok(val) } fn read_decimal_long<R>(rd: &mut R) -> Result<BigInt, Error> where R: Read + BufRead { let s = try!(read_until_newline(rd)); let init = match s.split_last() { None => return Err(Error::InvalidString), Some((&b'L', init)) => init, Some(_) => &s[..], }; match BigInt::parse_bytes(&init, 10) { Some(i) => Ok(i), None => Err(Error::InvalidLong) } } fn read_long<R>(rd: &mut R, length: usize) -> Result<BigInt, Error> where R: Read + BufRead { let mut buf = vec![0; length]; try!(read_exact(rd, buf.as_mut())); let mut n = BigInt::from_bytes_le(Sign::Plus, &buf); let last = match buf.last_mut() { None => return Err(Error::InvalidLong), Some(last) => last, }; if *last > 127 { n = n - (1.to_bigint().unwrap() << (length * 8)) } Ok(n) } pub struct Machine { stack: Vec<Value>, memo: Vec<Value>, marker: Option<usize>, } impl Machine { pub fn new() -> Self { Machine { stack: Vec::new(), memo: vec![Value::None], marker: None, } } fn split_off(&mut self) -> Result<Vec<Value>, Error> { let at = match self.marker { None => return Err(Error::EmptyMarker), Some(marker) => marker, }; if at > self.stack.len() { return Err(Error::StackTooSmall); } Ok(self.stack.split_off(at)) } pub fn pop(&mut self) -> Result<Value, Error> { match self.stack.pop() { None => return Err(Error::EmptyStack), Some(value) => Ok(value), } } fn handle_get(&mut self, i: usize) -> Result<(), Error> { let value = match self.memo.get(i) { None => return Err(Error::InvalidGetValue), Some(ref v) => (*v).clone(), }; self.stack.push(value); Ok(()) } fn handle_put(&mut self, i: usize) -> Result<(), Error> { let value = match self.stack.last() { None => return Err(Error::EmptyStack), Some(ref v) => (*v).clone(), }; let len = self.memo.len(); if len != i { return Err(Error::InvalidPutValue) } self.memo.push(value); Ok(()) } pub fn execute<R>(&mut self, rd: &mut R) -> Result<bool, Error> where R: Read + BufRead { macro_rules! ensure_not_negative { ($n: expr) => ({ if $n < Zero::zero() { return Err(Error::NegativeLength) } }) } let marker = try!(rd.read_u8()); match marker { PROTO => { let version = try!(rd.read_u8()); if version < 2 { return Err(Error::InvalidProto(version)) } }, STOP => return Ok(true), INT => { self.stack.push(match try!(read_decimal_int(rd)) { BooleanOrInt::Boolean(v) => Value::Bool(v), BooleanOrInt::Int(v) => Value::Long(BigInt::from(v)), }) }, BININT => self.stack.push(Value::Int(try!(rd.read_i32::<LittleEndian>()) as usize)), BININT1 => self.stack.push(Value::Int(try!(rd.read_u8()) as usize)), BININT2 => self.stack.push(Value::Int(try!(rd.read_u16::<LittleEndian>()) as usize)), LONG => self.stack.push(Value::Long(BigInt::from(try!(read_decimal_long(rd))))), LONG1 => { let length = try!(rd.read_u8()); self.stack.push(Value::Long(BigInt::from(try!(read_long(rd, length as usize))))) } LONG4 => { let length = try!(rd.read_i32::<LittleEndian>()); self.stack.push(Value::Long(BigInt::from(try!(read_long(rd, length as usize))))) } STRING => self.stack.push(Value::String(try!(unescape(&try!(read_until_newline(rd)), false)))), BINSTRING => { let length = try!(rd.read_i32::<LittleEndian>()); ensure_not_negative!(length); let mut buf = vec![0; length as usize]; try!(read_exact(rd, &mut buf)); self.stack.push(Value::String(buf)) }, SHORT_BINSTRING => { let length = try!(rd.read_u8()); let mut buf = vec![0; length as usize]; try!(read_exact(rd, &mut buf)); self.stack.push(Value::String(buf)) }, NONE => self.stack.push(Value::None), NEWTRUE => self.stack.push(Value::Bool(true)), NEWFALSE => self.stack.push(Value::Bool(false)), UNICODE => { let buf = try!(unescape(&try!(read_until_newline(rd)), true)); self.stack.push(Value::Unicode(try!(String::from_utf8(buf)))) }, BINUNICODE => { let length = try!(rd.read_i32::<LittleEndian>()); ensure_not_negative!(length); let mut buf = vec![0; length as usize]; try!(read_exact(rd, buf.as_mut())); self.stack.push(Value::Unicode(try!(String::from_utf8(buf)))) }, FLOAT => { let s = try!(read_until_newline(rd)); self.stack.push(Value::Float(try!(f64::from_ascii(&s)))) }, BINFLOAT => { self.stack.push(Value::Float(try!(rd.read_f64::<BigEndian>()))) }, EMPTY_LIST => { self.stack.push(Value::List(rc!(Vec::new()))) }, APPEND => { let v = try!(self.pop()); match self.stack.last_mut() { None => return Err(Error::EmptyStack), Some(&mut Value::List(ref mut list)) => (*list.borrow_mut()).push(v), _ => return Err(Error::InvalidValueOnStack), } }, APPENDS => { let values = try!(self.split_off()); match self.stack.last_mut() { None => return Err(Error::EmptyStack), Some(&mut Value::List(ref mut list)) => (*list.borrow_mut()).extend(values), _ => return Err(Error::InvalidValueOnStack), } }, LIST => { let values = try!(self.split_off()); self.stack.push(Value::List(rc!(values))); }, EMPTY_TUPLE => self.stack.push(Value::Tuple(rc!(Vec::new()))), TUPLE => { let values = try!(self.split_off()); self.stack.push(Value::Tuple(rc!(values))); }, TUPLE1 => { let v1 = try!(self.pop()); self.stack.push(Value::Tuple(rc!(vec![v1]))) }, TUPLE2 => { let v1 = try!(self.pop()); let v2 = try!(self.pop()); self.stack.push(Value::Tuple(rc!(vec![v1, v2]))) }, TUPLE3 => { let v1 = try!(self.pop()); let v2 = try!(self.pop()); let v3 = try!(self.pop()); self.stack.push(Value::Tuple(rc!(vec![v1, v2, v3]))) } EMPTY_DICT => self.stack.push(Value::Dict(rc!(Vec::new()))), DICT => { let mut values = try!(self.split_off()); let mut dict = Vec::new(); for i in 0 .. values.len() / 2 { // TODO: Check panic let key = values.remove(2 * i); let value = values.remove(2 * i + 1); dict.push((key, value)); } self.stack.push(Value::Dict(rc!(dict))); }, SETITEM => { let value = try!(self.pop()); let key = try!(self.pop()); match self.stack.last_mut() { None => return Err(Error::EmptyStack), Some(&mut Value::Dict(ref mut dict)) => (*dict.borrow_mut()).push((key, value)), _ => return Err(Error::InvalidValueOnStack), } }, SETITEMS => { let mut values = try!(self.split_off()); match self.stack.last_mut() { None => return Err(Error::EmptyStack), Some(&mut Value::Dict(ref mut dict_ref)) => { for i in 0 .. values.len() / 2 { // TODO: Check panic let key = values.remove(2 * i); let value = values.remove(2 * i + 1); (*dict_ref.borrow_mut()).push((key, value)); } }, _ => return Err(Error::InvalidValueOnStack), } }, POP => { try!(self.pop()); }, DUP => { let value = match self.stack.last() { None => return Err(Error::EmptyStack), Some(ref v) => (*v).clone(), }; self.stack.push(value) }, MARK => { self.marker = Some(self.stack.len()) }, POP_MARK => { try!(self.split_off()); }, GET => { let n = match try!(read_decimal_int(rd)) { BooleanOrInt::Int(n) => n, BooleanOrInt::Boolean(false) => 0, BooleanOrInt::Boolean(true) => 1, }; ensure_not_negative!(n); try!(self.handle_get(n as usize)) } BINGET => { try!(self.handle_get(try!(rd.read_u8()) as usize)) } LONG_BINGET => { let n = try!(rd.read_i32::<LittleEndian>()); ensure_not_negative!(n); try!(self.handle_get(n as usize)) } PUT => { let n = match try!(read_decimal_int(rd)) { BooleanOrInt::Int(n) => n, BooleanOrInt::Boolean(false) => 0, BooleanOrInt::Boolean(true) => 1, }; ensure_not_negative!(n); try!(self.handle_put(n as usize)) } BINPUT => { try!(self.handle_put(try!(rd.read_u8()) as usize)) } LONG_BINPUT => { let n = try!(rd.read_i32::<LittleEndian>()); ensure_not_negative!(n); try!(self.handle_put(n as usize)) } c => return Err(Error::UnknownOpcode(c)), } Ok(false) } }
use super::header::{FromHeader,HeaderMap}; use super::rfc5322::Rfc5322Parser; use super::rfc2045::Rfc2045Parser; use std::collections::HashMap; /// Content-Type string, major/minor as the first and second elements /// respectively. pub type MimeContentType = (String, String); /// Special header type for the Content-Type header. pub struct MimeContentTypeHeader { pub content_type: MimeContentType, pub params: HashMap<String, String>, } impl FromHeader for MimeContentTypeHeader { fn from_header(value: String) -> Option<MimeContentTypeHeader> { let mut parser = Rfc2045Parser::new(value.as_slice()); let (value, params) = parser.consume_all(); let mime_parts: Vec<&str> = value.as_slice().splitn(2, '/').collect(); if mime_parts.len() == 2 { Some(MimeContentTypeHeader { content_type: (mime_parts[0].to_string(), mime_parts[1].to_string()), params: params }) } else { None } } } /// Represents the common data of a MIME message #[deriving(Show)] pub struct MimeMessageData { /// The headers for this message pub headers: HeaderMap, /// The content of this message pub body: String } /// Enum type over the different types of multipart message. #[deriving(Show)] pub enum MimeMessage { /// This message is made of multiple sub parts. /// /// The `body` of MimeMessageData is the content of the message between /// the final header and the first boundary. MimeMultipart(MimeMessageData, Vec<MimeMessage>), /// A simple non-multipart message. MimeNonMultipart(MimeMessageData), } impl MimeMessage { /// Get a reference to the headers for this message. pub fn headers(&self) -> &HeaderMap { match *self { MimeMultipart(ref data, _) => &data.headers, MimeNonMultipart(ref data) => &data.headers, } } // Make a message from a header map and body, parsing out any multi-part // messages that are discovered by looking at the Content-Type header. fn from_headers(headers: HeaderMap, body: String) -> Option<MimeMessage> { let content_type = { let header = headers.get("Content-Type".to_string()); match header { Some(h) => h.get_value(), None => Some(MimeContentTypeHeader{ content_type: ("text".to_string(), "plain".to_string()), params: HashMap::new(), }) } }; if content_type.is_none() { // If we failed to parse the Content-Type header, something went wrong, so bail. None } else { let content_type = content_type.unwrap(); // Pull out the major mime type and the boundary (if it exists) let (mime_type, _) = content_type.content_type; let boundary = content_type.params.find(&"boundary".to_string()); let message = match mime_type.as_slice() { // Only consider a multipart message if we have a boundary, otherwise don't // bother and just assume it's a single message. "multipart" if boundary.is_some() => { let boundary = boundary.unwrap(); // Pull apart the message on the boundary. let mut parts = MimeMessage::split_boundary(body, boundary.clone()); // Pop off the first message, as it's part of the parent. let body = parts.remove(0).unwrap_or("".to_string()); // Parse out each of the child parts, recursively downwards. // Filtering out and unwrapping None as we go. let message_parts: Vec<MimeMessage> = parts.iter() .map(|part| { MimeMessage::parse(part.as_slice()) }) .filter(|part| { part.is_some() }) .map(|part| { part.unwrap() }) .collect(); let data = MimeMessageData { headers: headers, body: body, }; MimeMultipart(data, message_parts) }, _ => { // Boring message, bung the headers & body together and return. let data = MimeMessageData { headers: headers, body: body, }; MimeNonMultipart(data) }, }; Some(message) } } // Split `body` up on the `boundary` string. fn split_boundary(body: String, boundary: String) -> Vec<String> { let mut lines = body.as_slice().split('\n'); let mut parts = Vec::new(); let mut current_part = String::new(); for line in lines { let mut is_boundary = false; if line.starts_with("--") { let boundary_value = line.slice_from(2).trim_right(); is_boundary = boundary_value.to_string() == boundary; } if is_boundary { // Clip off the final \r\n parts.push(current_part); current_part = String::new() } else { current_part.push_str(line); current_part.push_str("\n"); } } if current_part.len() > 0 { // Push what remains as the last message part current_part.pop(); // Clear the final \n that we put in parts.push(current_part); } parts } /// Parse `s` into a MimeMessage. /// /// Recurses down into each message, supporting an unlimited depth of messages. /// /// Be warned that each sub-message that fails to be parsed will be thrown away. pub fn parse(s: &str) -> Option<MimeMessage> { let mut parser = Rfc5322Parser::new(s); match parser.consume_message() { Some((headers, body)) => MimeMessage::from_headers(headers, body), None => None, } } } #[cfg(test)] mod tests { use super::*; use super::super::header::{Header,HeaderMap}; #[deriving(Show)] struct MessageTestResult<'s> { headers: Vec<(&'s str, &'s str)>, body: &'s str, children: Option<Vec<MessageTestResult<'s>>>, } impl<'s> Equiv<MimeMessage> for MessageTestResult<'s> { fn equiv(&self, other: &MimeMessage) -> bool { let mut headers = HeaderMap::new(); for &(name, value) in self.headers.iter() { let header = Header::new(name.to_string(), value.to_string()); headers.insert(header); } match (&self.children, other) { (&Some(ref our_messages), &MimeMultipart(ref data, ref other_messages)) => { let mut children_match = true; for (index, child) in our_messages.iter().enumerate() { if !child.equiv(&other_messages[index]) { children_match = false; break; } } let header_match = headers == data.headers; let body_match = self.body.to_string() == data.body; if !children_match { println!("Children do not match!"); } if !header_match { println!("Headers do not match!"); } if !body_match { println!("Body does not match! ({} != {})", self.body, data.body); } header_match && body_match && children_match }, (&None, &MimeNonMultipart(ref data)) => { let header_match = headers == data.headers; let body_match = self.body.to_string() == data.body; if !header_match { println!("Headers do not match!"); } if !body_match { println!("Body does not match! ({} != {})", self.body, data.body); } header_match && body_match }, (_, _) => { println!("Expected different message type than what was given"); false }, } } } struct ParseTest<'s> { input: &'s str, output: Option<MessageTestResult<'s>>, name: &'s str, } #[test] fn test_message_parse() { let tests = vec![ ParseTest { input: "From: joe@example.org\r\nTo: john@example.org\r\n\r\nHello!", output: Some(MessageTestResult { headers: vec![ ("From", "joe@example.org"), ("To", "john@example.org"), ], body: "Hello!", children: None, }), name: "Simple single part message parse", }, ParseTest { input: "From: joe@example.org\r\n\ To: john@example.org\r\n\ Content-Type: multipart/alternate; boundary=foo\r\n\ \r\n\ Parent\r\n\ --foo\r\n\ Hello!\r\n\ --foo\r\n\ Other\r\n", output: Some(MessageTestResult { headers: vec![ ("From", "joe@example.org"), ("To", "john@example.org"), ("Content-Type", "multipart/alternate; boundary=foo"), ], body: "Parent\r\n", children: Some(vec![ MessageTestResult { headers: vec![ ], body: "Hello!\r\n", children: None, }, MessageTestResult { headers: vec![ ], body: "Other\r\n", children: None, }, ]), }), name: "Simple multipart message parse", }, ]; for test in tests.into_iter() { println!("--- Next test: {}", test.name); let message = MimeMessage::parse(test.input); let result = match (test.output, message) { (Some(ref expected), Some(ref given)) => expected.equiv(given), (None, None) => true, (_, _) => false, }; assert!(result, test.name); } } } Add detection of different types of MIME multipart messages. This should allow us to do the right thing when it comes to HTML messages with text/plain fallback later. use super::header::{FromHeader,HeaderMap}; use super::rfc5322::Rfc5322Parser; use super::rfc2045::Rfc2045Parser; use std::collections::HashMap; /// Content-Type string, major/minor as the first and second elements /// respectively. pub type MimeContentType = (String, String); /// Special header type for the Content-Type header. pub struct MimeContentTypeHeader { pub content_type: MimeContentType, pub params: HashMap<String, String>, } impl FromHeader for MimeContentTypeHeader { fn from_header(value: String) -> Option<MimeContentTypeHeader> { let mut parser = Rfc2045Parser::new(value.as_slice()); let (value, params) = parser.consume_all(); let mime_parts: Vec<&str> = value.as_slice().splitn(2, '/').collect(); if mime_parts.len() == 2 { Some(MimeContentTypeHeader { content_type: (mime_parts[0].to_string(), mime_parts[1].to_string()), params: params }) } else { None } } } /// Represents the common data of a MIME message #[deriving(Show)] pub struct MimeMessageData { /// The headers for this message pub headers: HeaderMap, /// The content of this message pub body: String } /// Marks the type of a multipart message #[deriving(Eq,PartialEq,Show)] pub enum MimeMultipartType { /// Entries which are independent. /// /// This value is the default. /// /// As defined by Section 5.1.3 of RFC 2046 MimeMultipartMixed, /// Entries which are interchangeable, such that the system can choose /// whichever is "best" for its use. /// /// As defined by Section 5.1.4 of RFC 2046 MimeMultipartAlternate, /// Entries are (typically) a collection of messages. /// /// As defined by Section 5.1.5 of RFC 2046 MimeMultipartDigest, /// Entry order does not matter, and could be displayed simultaneously. /// /// As defined by Section 5.1.6 of RFC 2046 MimeMultipartParallel, } impl MimeMultipartType { /// Returns the appropriate `MimeMultipartType` for the given MimeContentType pub fn from_content_type(ct: MimeContentType) -> MimeMultipartType { let (major, minor) = ct; match (major.as_slice(), minor.as_slice()) { ("multipart", "alternate") => MimeMultipartAlternate, ("multipart", "digest") => MimeMultipartDigest, ("multipart", "parallel") => MimeMultipartParallel, ("multipart", "mixed") | ("multipart", _) => MimeMultipartMixed, _ => fail!("ContentType is not multipart"), } } pub fn to_content_type(&self) -> MimeContentType { let multipart = "multipart".to_string(); match *self { MimeMultipartMixed => (multipart, "mixed".to_string()), MimeMultipartAlternate => (multipart, "alternate".to_string()), MimeMultipartDigest => (multipart, "digest".to_string()), MimeMultipartParallel => (multipart, "parallel".to_string()), } } } /// Enum type over the different types of multipart message. #[deriving(Show)] pub enum MimeMessage { /// This message is made of multiple sub parts. /// /// The `body` of MimeMessageData is the content of the message between /// the final header and the first boundary. MimeMultipart(MimeMessageData, MimeMultipartType, Vec<MimeMessage>), /// A simple non-multipart message. MimeNonMultipart(MimeMessageData), } impl MimeMessage { /// Get a reference to the headers for this message. pub fn headers(&self) -> &HeaderMap { match *self { MimeMultipart(ref data, _, _) => &data.headers, MimeNonMultipart(ref data) => &data.headers, } } // Make a message from a header map and body, parsing out any multi-part // messages that are discovered by looking at the Content-Type header. fn from_headers(headers: HeaderMap, body: String) -> Option<MimeMessage> { let content_type = { let header = headers.get("Content-Type".to_string()); match header { Some(h) => h.get_value(), None => Some(MimeContentTypeHeader{ content_type: ("text".to_string(), "plain".to_string()), params: HashMap::new(), }) } }; if content_type.is_none() { // If we failed to parse the Content-Type header, something went wrong, so bail. None } else { let content_type = content_type.unwrap(); // Pull out the major mime type and the boundary (if it exists) let (mime_type, sub_mime_type) = content_type.content_type; let boundary = content_type.params.find(&"boundary".to_string()); let message = match mime_type.as_slice() { // Only consider a multipart message if we have a boundary, otherwise don't // bother and just assume it's a single message. "multipart" if boundary.is_some() => { let boundary = boundary.unwrap(); // Pull apart the message on the boundary. let mut parts = MimeMessage::split_boundary(body, boundary.clone()); // Pop off the first message, as it's part of the parent. let body = parts.remove(0).unwrap_or("".to_string()); // Parse out each of the child parts, recursively downwards. // Filtering out and unwrapping None as we go. let message_parts: Vec<MimeMessage> = parts.iter() .map(|part| { MimeMessage::parse(part.as_slice()) }) .filter(|part| { part.is_some() }) .map(|part| { part.unwrap() }) .collect(); let data = MimeMessageData { headers: headers, body: body, }; let multipart_type = MimeMultipartType::from_content_type((mime_type, sub_mime_type)); MimeMultipart(data, multipart_type, message_parts) }, _ => { // Boring message, bung the headers & body together and return. let data = MimeMessageData { headers: headers, body: body, }; MimeNonMultipart(data) }, }; Some(message) } } // Split `body` up on the `boundary` string. fn split_boundary(body: String, boundary: String) -> Vec<String> { let mut lines = body.as_slice().split('\n'); let mut parts = Vec::new(); let mut current_part = String::new(); for line in lines { let mut is_boundary = false; if line.starts_with("--") { let boundary_value = line.slice_from(2).trim_right(); is_boundary = boundary_value.to_string() == boundary; } if is_boundary { // Clip off the final \r\n parts.push(current_part); current_part = String::new() } else { current_part.push_str(line); current_part.push_str("\n"); } } if current_part.len() > 0 { // Push what remains as the last message part current_part.pop(); // Clear the final \n that we put in parts.push(current_part); } parts } /// Parse `s` into a MimeMessage. /// /// Recurses down into each message, supporting an unlimited depth of messages. /// /// Be warned that each sub-message that fails to be parsed will be thrown away. pub fn parse(s: &str) -> Option<MimeMessage> { let mut parser = Rfc5322Parser::new(s); match parser.consume_message() { Some((headers, body)) => MimeMessage::from_headers(headers, body), None => None, } } } #[cfg(test)] mod tests { use super::*; use super::super::header::{Header,HeaderMap}; #[deriving(Show)] struct MessageTestResult<'s> { headers: Vec<(&'s str, &'s str)>, body: &'s str, children: Option<Vec<MessageTestResult<'s>>>, } impl<'s> Equiv<MimeMessage> for MessageTestResult<'s> { fn equiv(&self, other: &MimeMessage) -> bool { let mut headers = HeaderMap::new(); for &(name, value) in self.headers.iter() { let header = Header::new(name.to_string(), value.to_string()); headers.insert(header); } match (&self.children, other) { (&Some(ref our_messages), &MimeMultipart(ref data, _, ref other_messages)) => { let mut children_match = true; for (index, child) in our_messages.iter().enumerate() { if !child.equiv(&other_messages[index]) { children_match = false; break; } } let header_match = headers == data.headers; let body_match = self.body.to_string() == data.body; if !children_match { println!("Children do not match!"); } if !header_match { println!("Headers do not match!"); } if !body_match { println!("Body does not match! ({} != {})", self.body, data.body); } header_match && body_match && children_match }, (&None, &MimeNonMultipart(ref data)) => { let header_match = headers == data.headers; let body_match = self.body.to_string() == data.body; if !header_match { println!("Headers do not match!"); } if !body_match { println!("Body does not match! ({} != {})", self.body, data.body); } header_match && body_match }, (_, _) => { println!("Expected different message type than what was given"); false }, } } } struct ParseTest<'s> { input: &'s str, output: Option<MessageTestResult<'s>>, name: &'s str, } #[test] fn test_message_parse() { let tests = vec![ ParseTest { input: "From: joe@example.org\r\nTo: john@example.org\r\n\r\nHello!", output: Some(MessageTestResult { headers: vec![ ("From", "joe@example.org"), ("To", "john@example.org"), ], body: "Hello!", children: None, }), name: "Simple single part message parse", }, ParseTest { input: "From: joe@example.org\r\n\ To: john@example.org\r\n\ Content-Type: multipart/alternate; boundary=foo\r\n\ \r\n\ Parent\r\n\ --foo\r\n\ Hello!\r\n\ --foo\r\n\ Other\r\n", output: Some(MessageTestResult { headers: vec![ ("From", "joe@example.org"), ("To", "john@example.org"), ("Content-Type", "multipart/alternate; boundary=foo"), ], body: "Parent\r\n", children: Some(vec![ MessageTestResult { headers: vec![ ], body: "Hello!\r\n", children: None, }, MessageTestResult { headers: vec![ ], body: "Other\r\n", children: None, }, ]), }), name: "Simple multipart message parse", }, ]; for test in tests.into_iter() { println!("--- Next test: {}", test.name); let message = MimeMessage::parse(test.input); let result = match (test.output, message) { (Some(ref expected), Some(ref given)) => expected.equiv(given), (None, None) => true, (_, _) => false, }; assert!(result, test.name); } } #[test] fn test_multipart_type_type_parsing() { let multipart = "multipart".to_string(); assert_eq!(MimeMultipartType::from_content_type((multipart.clone(), "mixed".to_string())), MimeMultipartMixed); assert_eq!(MimeMultipartType::from_content_type((multipart.clone(), "alternate".to_string())), MimeMultipartAlternate); assert_eq!(MimeMultipartType::from_content_type((multipart.clone(), "digest".to_string())), MimeMultipartDigest); assert_eq!(MimeMultipartType::from_content_type((multipart.clone(), "parallel".to_string())), MimeMultipartParallel); // Test failback onto multipart/mixed assert_eq!(MimeMultipartType::from_content_type((multipart.clone(), "potato".to_string())), MimeMultipartMixed); } #[test] fn test_multipart_type_to_content_type() { let multipart = "multipart".to_string(); assert_eq!(MimeMultipartMixed.to_content_type(), (multipart.clone(), "mixed".to_string())); assert_eq!(MimeMultipartAlternate.to_content_type(), (multipart.clone(), "alternate".to_string())); assert_eq!(MimeMultipartDigest.to_content_type(), (multipart.clone(), "digest".to_string())); assert_eq!(MimeMultipartParallel.to_content_type(), (multipart.clone(), "parallel".to_string())); } }
use serde::ser::{Serialize, Serializer, SerializeStruct}; use serde_json::Value; #[derive(Debug, Deserialize)] struct Version; impl Serialize for Version { fn serialize<S: Serializer>(&self, serializer: S) -> Result<S::Ok, S::Error> { serializer.serialize_str("2.0") } } #[derive(Debug, Serialize, Deserialize)] pub struct Request { jsonrpc: Version, pub method: String, #[serde(skip_serializing_if = "Option::is_none")] pub params: Option<Value>, // TODO: Make private? pub id: Value, } #[derive(Debug, Serialize, Deserialize)] pub struct RPCError { pub code: i64, pub message: String, #[serde(skip_serializing_if = "Option::is_none")] pub data: Option<Value>, } #[derive(Debug, Deserialize)] pub struct Response { pub result: Result<Value, RPCError>, pub id: Value, } impl Serialize for Response { fn serialize<S: Serializer>(&self, serializer: S) -> Result<S::Ok, S::Error> { let mut sub = serializer.serialize_struct("Response", 2)?; sub.serialize_field("id", &self.id)?; match self.result { Ok(ref value) => sub.serialize_field("result", value), Err(ref err) => sub.serialize_field("error", err), }?; sub.end() } } #[derive(Debug, Serialize, Deserialize)] pub struct Notification { jsonrpc: Version, pub method: String, #[serde(skip_serializing_if = "Option::is_none")] pub params: Option<Value>, } #[derive(Debug, Deserialize)] pub enum Message { Request(Request), Response(Response), Notification(Notification), Batch(Vec<Message>), Unmatched(Value), } impl Message { pub fn request(method: String, params: Option<Value>) -> Self { Message::Request(Request { jsonrpc: Version, method: method, params: params, // TODO! id: Value::Null, }) } pub fn notification(method: String, params: Option<Value>) -> Self { Message::Notification(Notification { jsonrpc: Version, method: method, params: params, }) } // TODO: Other constructors } impl Serialize for Message { fn serialize<S: Serializer>(&self, serializer: S) -> Result<S::Ok, S::Error> { match *self { Message::Request(ref req) => req.serialize(serializer), Message::Response(ref resp) => resp.serialize(serializer), Message::Notification(ref notif) => notif.serialize(serializer), Message::Batch(ref batch) => batch.serialize(serializer), Message::Unmatched(ref val) => val.serialize(serializer), } } } RPC message deserialization use serde::ser::{Serialize, Serializer, SerializeStruct}; use serde::de::{Deserialize, Deserializer, Unexpected, Error}; use serde_json::{Value, from_value}; #[derive(Debug)] struct Version; impl Serialize for Version { fn serialize<S: Serializer>(&self, serializer: S) -> Result<S::Ok, S::Error> { serializer.serialize_str("2.0") } } impl Deserialize for Version { fn deserialize<D: Deserializer>(deserializer: D) -> Result<Self, D::Error> { // The version is actually a string let parsed: String = Deserialize::deserialize(deserializer)?; if parsed == "2.0" { Ok(Version) } else { Err(D::Error::invalid_value(Unexpected::Str(&parsed), &"value 2.0")) } } } #[derive(Debug, Serialize, Deserialize)] pub struct Request { jsonrpc: Version, pub method: String, #[serde(skip_serializing_if = "Option::is_none")] pub params: Option<Value>, // TODO: Make private? pub id: Value, } #[derive(Debug, Serialize, Deserialize)] pub struct RPCError { pub code: i64, pub message: String, #[serde(skip_serializing_if = "Option::is_none")] pub data: Option<Value>, } #[derive(Debug, Deserialize)] pub struct Response { pub result: Result<Value, RPCError>, pub id: Value, } impl Serialize for Response { fn serialize<S: Serializer>(&self, serializer: S) -> Result<S::Ok, S::Error> { let mut sub = serializer.serialize_struct("Response", 2)?; sub.serialize_field("id", &self.id)?; match self.result { Ok(ref value) => sub.serialize_field("result", value), Err(ref err) => sub.serialize_field("error", err), }?; sub.end() } } #[derive(Debug, Serialize, Deserialize)] pub struct Notification { jsonrpc: Version, pub method: String, #[serde(skip_serializing_if = "Option::is_none")] pub params: Option<Value>, } #[derive(Debug)] pub enum Message { Request(Request), Response(Response), Notification(Notification), Batch(Vec<Message>), Unmatched(Value), } impl Message { pub fn request(method: String, params: Option<Value>) -> Self { Message::Request(Request { jsonrpc: Version, method: method, params: params, // TODO! id: Value::Null, }) } pub fn notification(method: String, params: Option<Value>) -> Self { Message::Notification(Notification { jsonrpc: Version, method: method, params: params, }) } // TODO: Other constructors } impl Serialize for Message { fn serialize<S: Serializer>(&self, serializer: S) -> Result<S::Ok, S::Error> { match *self { Message::Request(ref req) => req.serialize(serializer), Message::Response(ref resp) => resp.serialize(serializer), Message::Notification(ref notif) => notif.serialize(serializer), Message::Batch(ref batch) => batch.serialize(serializer), Message::Unmatched(ref val) => val.serialize(serializer), } } } macro_rules! deser_branch { ($src:expr, $branch:ident) => { match from_value($src.clone()) { Ok(parsed) => return Ok(Message::$branch(parsed)), Err(_) => (), } }; } impl Deserialize for Message { fn deserialize<D: Deserializer>(deserializer: D) -> Result<Self, D::Error> { // Read it as a JSON (delegate the deserialization) let preparsed: Value = Deserialize::deserialize(deserializer)?; // And try decoding it as a concrete message type, one by one (and get a first match) deser_branch!(preparsed, Request); deser_branch!(preparsed, Response); deser_branch!(preparsed, Notification); deser_branch!(preparsed, Batch); Ok(Message::Unmatched(preparsed)) } }
use std::io; use std::io::prelude::*; use std::mem; use byteorder::{BigEndian, ReadBytesExt, WriteBytesExt}; use types::Oid; use util; use self::BackendMessage::*; use self::FrontendMessage::*; pub const PROTOCOL_VERSION: u32 = 0x0003_0000; pub const CANCEL_CODE: u32 = 80877102; pub const SSL_CODE: u32 = 80877103; pub enum BackendMessage { AuthenticationCleartextPassword, AuthenticationGSS, AuthenticationKerberosV5, AuthenticationMD5Password { salt: [u8; 4] }, AuthenticationOk, AuthenticationSCMCredential, AuthenticationSSPI, BackendKeyData { process_id: u32, secret_key: u32 }, BindComplete, CloseComplete, CommandComplete { tag: String, }, CopyInResponse { format: u8, column_formats: Vec<u16>, }, DataRow { row: Vec<Option<Vec<u8>>> }, EmptyQueryResponse, ErrorResponse { fields: Vec<(u8, String)> }, NoData, NoticeResponse { fields: Vec<(u8, String)> }, NotificationResponse { pid: u32, channel: String, payload: String, }, ParameterDescription { types: Vec<Oid> }, ParameterStatus { parameter: String, value: String, }, ParseComplete, PortalSuspended, ReadyForQuery { _state: u8 }, RowDescription { descriptions: Vec<RowDescriptionEntry> } } pub struct RowDescriptionEntry { pub name: String, pub table_oid: Oid, pub column_id: i16, pub type_oid: Oid, pub type_size: i16, pub type_modifier: i32, pub format: i16 } pub enum FrontendMessage<'a> { Bind { portal: &'a str, statement: &'a str, formats: &'a [i16], values: &'a [Option<Vec<u8>>], result_formats: &'a [i16] }, CancelRequest { code: u32, process_id: u32, secret_key: u32, }, Close { variant: u8, name: &'a str }, CopyData { data: &'a [u8], }, CopyDone, CopyFail { message: &'a str }, Describe { variant: u8, name: &'a str }, Execute { portal: &'a str, max_rows: i32 }, Parse { name: &'a str, query: &'a str, param_types: &'a [Oid] }, PasswordMessage { password: &'a str }, Query { query: &'a str }, SslRequest { code: u32 }, StartupMessage { version: u32, parameters: &'a [(String, String)] }, Sync, Terminate } #[doc(hidden)] trait WriteCStr { fn write_cstr(&mut self, s: &str) -> io::Result<()>; } impl<W: Write> WriteCStr for W { fn write_cstr(&mut self, s: &str) -> io::Result<()> { try!(self.write_all(s.as_bytes())); Ok(try!(self.write_u8(0))) } } #[doc(hidden)] pub trait WriteMessage { fn write_message(&mut self, &FrontendMessage) -> io::Result<()> ; } impl<W: Write> WriteMessage for W { fn write_message(&mut self, message: &FrontendMessage) -> io::Result<()> { let mut buf = vec![]; let mut ident = None; match *message { Bind { portal, statement, formats, values, result_formats } => { ident = Some(b'B'); try!(buf.write_cstr(portal)); try!(buf.write_cstr(statement)); try!(buf.write_i16::<BigEndian>(formats.len() as i16)); for format in formats.iter() { try!(buf.write_i16::<BigEndian>(*format)); } try!(buf.write_i16::<BigEndian>(values.len() as i16)); for value in values.iter() { match *value { None => try!(buf.write_i32::<BigEndian>(-1)), Some(ref value) => { try!(buf.write_i32::<BigEndian>(value.len() as i32)); try!(buf.write_all(&**value)); } } } try!(buf.write_i16::<BigEndian>(result_formats.len() as i16)); for format in result_formats.iter() { try!(buf.write_i16::<BigEndian>(*format)); } } CancelRequest { code, process_id, secret_key } => { try!(buf.write_u32::<BigEndian>(code)); try!(buf.write_u32::<BigEndian>(process_id)); try!(buf.write_u32::<BigEndian>(secret_key)); } Close { variant, name } => { ident = Some(b'C'); try!(buf.write_u8(variant)); try!(buf.write_cstr(name)); } CopyData { data } => { ident = Some(b'd'); try!(buf.write_all(data)); } CopyDone => ident = Some(b'c'), CopyFail { message } => { ident = Some(b'f'); try!(buf.write_cstr(message)); } Describe { variant, name } => { ident = Some(b'D'); try!(buf.write_u8(variant)); try!(buf.write_cstr(name)); } Execute { portal, max_rows } => { ident = Some(b'E'); try!(buf.write_cstr(portal)); try!(buf.write_i32::<BigEndian>(max_rows)); } Parse { name, query, param_types } => { ident = Some(b'P'); try!(buf.write_cstr(name)); try!(buf.write_cstr(query)); try!(buf.write_i16::<BigEndian>(param_types.len() as i16)); for ty in param_types.iter() { try!(buf.write_u32::<BigEndian>(*ty)); } } PasswordMessage { password } => { ident = Some(b'p'); try!(buf.write_cstr(password)); } Query { query } => { ident = Some(b'Q'); try!(buf.write_cstr(query)); } StartupMessage { version, parameters } => { try!(buf.write_u32::<BigEndian>(version)); for &(ref k, ref v) in parameters { try!(buf.write_cstr(&**k)); try!(buf.write_cstr(&**v)); } try!(buf.write_u8(0)); } SslRequest { code } => try!(buf.write_u32::<BigEndian>(code)), Sync => ident = Some(b'S'), Terminate => ident = Some(b'X'), } if let Some(ident) = ident { try!(self.write_u8(ident)); } // add size of length value try!(self.write_i32::<BigEndian>((buf.len() + mem::size_of::<i32>()) as i32)); try!(self.write_all(&*buf)); Ok(()) } } #[doc(hidden)] trait ReadCStr { fn read_cstr(&mut self) -> io::Result<String>; } impl<R: BufRead> ReadCStr for R { fn read_cstr(&mut self) -> io::Result<String> { let mut buf = vec![]; try!(self.read_until(0, &mut buf)); buf.pop(); String::from_utf8(buf).map_err(|err| io::Error::new(io::ErrorKind::Other, err)) } } #[doc(hidden)] pub trait ReadMessage { fn read_message(&mut self) -> io::Result<BackendMessage>; } impl<R: BufRead> ReadMessage for R { fn read_message(&mut self) -> io::Result<BackendMessage> { let ident = try!(self.read_u8()); // subtract size of length value let len = try!(self.read_u32::<BigEndian>()) as usize - mem::size_of::<i32>(); let mut rdr = self.by_ref().take(len as u64); let ret = match ident { b'1' => ParseComplete, b'2' => BindComplete, b'3' => CloseComplete, b'A' => NotificationResponse { pid: try!(rdr.read_u32::<BigEndian>()), channel: try!(rdr.read_cstr()), payload: try!(rdr.read_cstr()) }, b'C' => CommandComplete { tag: try!(rdr.read_cstr()) }, b'D' => try!(read_data_row(&mut rdr)), b'E' => ErrorResponse { fields: try!(read_fields(&mut rdr)) }, b'G' => { let format = try!(rdr.read_u8()); let mut column_formats = vec![]; for _ in 0..try!(rdr.read_u16::<BigEndian>()) { column_formats.push(try!(rdr.read_u16::<BigEndian>())); } CopyInResponse { format: format, column_formats: column_formats, } } b'I' => EmptyQueryResponse, b'K' => BackendKeyData { process_id: try!(rdr.read_u32::<BigEndian>()), secret_key: try!(rdr.read_u32::<BigEndian>()) }, b'n' => NoData, b'N' => NoticeResponse { fields: try!(read_fields(&mut rdr)) }, b'R' => try!(read_auth_message(&mut rdr)), b's' => PortalSuspended, b'S' => ParameterStatus { parameter: try!(rdr.read_cstr()), value: try!(rdr.read_cstr()) }, b't' => try!(read_parameter_description(&mut rdr)), b'T' => try!(read_row_description(&mut rdr)), b'Z' => ReadyForQuery { _state: try!(rdr.read_u8()) }, _ => return Err(io::Error::new(io::ErrorKind::Other, "unexpected message tag")), }; if rdr.limit() != 0 { return Err(io::Error::new(io::ErrorKind::Other, "didn't read entire message")); } Ok(ret) } } fn read_fields<R: BufRead>(buf: &mut R) -> io::Result<Vec<(u8, String)>> { let mut fields = vec![]; loop { let ty = try!(buf.read_u8()); if ty == 0 { break; } fields.push((ty, try!(buf.read_cstr()))); } Ok(fields) } fn read_data_row<R: BufRead>(buf: &mut R) -> io::Result<BackendMessage> { let len = try!(buf.read_u16::<BigEndian>()) as usize; let mut values = Vec::with_capacity(len); for _ in 0..len { let val = match try!(buf.read_i32::<BigEndian>()) { -1 => None, len => { let mut data = Vec::with_capacity(len as usize); data.extend((0..len).map(|_| 0)); try!(util::read_all(buf, &mut data)); Some(data) } }; values.push(val); } Ok(DataRow { row: values }) } fn read_auth_message<R: Read>(buf: &mut R) -> io::Result<BackendMessage> { Ok(match try!(buf.read_i32::<BigEndian>()) { 0 => AuthenticationOk, 2 => AuthenticationKerberosV5, 3 => AuthenticationCleartextPassword, 5 => { let mut salt = [0; 4]; try!(util::read_all(buf, &mut salt)); AuthenticationMD5Password { salt: salt } }, 6 => AuthenticationSCMCredential, 7 => AuthenticationGSS, 9 => AuthenticationSSPI, _ => return Err(io::Error::new(io::ErrorKind::Other, "unexpected authentication tag")), }) } fn read_parameter_description<R: Read>(buf: &mut R) -> io::Result<BackendMessage> { let len = try!(buf.read_i16::<BigEndian>()) as usize; let mut types = Vec::with_capacity(len); for _ in 0..len { types.push(try!(buf.read_u32::<BigEndian>())); } Ok(ParameterDescription { types: types }) } fn read_row_description<R: BufRead>(buf: &mut R) -> io::Result<BackendMessage> { let len = try!(buf.read_i16::<BigEndian>()) as usize; let mut types = Vec::with_capacity(len); for _ in 0..len { types.push(RowDescriptionEntry { name: try!(buf.read_cstr()), table_oid: try!(buf.read_u32::<BigEndian>()), column_id: try!(buf.read_i16::<BigEndian>()), type_oid: try!(buf.read_u32::<BigEndian>()), type_size: try!(buf.read_i16::<BigEndian>()), type_modifier: try!(buf.read_i32::<BigEndian>()), format: try!(buf.read_i16::<BigEndian>()) }) } Ok(RowDescription { descriptions: types }) } More cleanup use std::io; use std::io::prelude::*; use std::mem; use byteorder::{BigEndian, ReadBytesExt, WriteBytesExt}; use types::Oid; use util; use self::BackendMessage::*; use self::FrontendMessage::*; pub const PROTOCOL_VERSION: u32 = 0x0003_0000; pub const CANCEL_CODE: u32 = 80877102; pub const SSL_CODE: u32 = 80877103; pub enum BackendMessage { AuthenticationCleartextPassword, AuthenticationGSS, AuthenticationKerberosV5, AuthenticationMD5Password { salt: [u8; 4] }, AuthenticationOk, AuthenticationSCMCredential, AuthenticationSSPI, BackendKeyData { process_id: u32, secret_key: u32 }, BindComplete, CloseComplete, CommandComplete { tag: String, }, CopyInResponse { format: u8, column_formats: Vec<u16>, }, DataRow { row: Vec<Option<Vec<u8>>> }, EmptyQueryResponse, ErrorResponse { fields: Vec<(u8, String)> }, NoData, NoticeResponse { fields: Vec<(u8, String)> }, NotificationResponse { pid: u32, channel: String, payload: String, }, ParameterDescription { types: Vec<Oid> }, ParameterStatus { parameter: String, value: String, }, ParseComplete, PortalSuspended, ReadyForQuery { _state: u8 }, RowDescription { descriptions: Vec<RowDescriptionEntry> } } pub struct RowDescriptionEntry { pub name: String, pub table_oid: Oid, pub column_id: i16, pub type_oid: Oid, pub type_size: i16, pub type_modifier: i32, pub format: i16 } pub enum FrontendMessage<'a> { Bind { portal: &'a str, statement: &'a str, formats: &'a [i16], values: &'a [Option<Vec<u8>>], result_formats: &'a [i16] }, CancelRequest { code: u32, process_id: u32, secret_key: u32, }, Close { variant: u8, name: &'a str }, CopyData { data: &'a [u8], }, CopyDone, CopyFail { message: &'a str }, Describe { variant: u8, name: &'a str }, Execute { portal: &'a str, max_rows: i32 }, Parse { name: &'a str, query: &'a str, param_types: &'a [Oid] }, PasswordMessage { password: &'a str }, Query { query: &'a str }, SslRequest { code: u32 }, StartupMessage { version: u32, parameters: &'a [(String, String)] }, Sync, Terminate } #[doc(hidden)] trait WriteCStr { fn write_cstr(&mut self, s: &str) -> io::Result<()>; } impl<W: Write> WriteCStr for W { fn write_cstr(&mut self, s: &str) -> io::Result<()> { try!(self.write_all(s.as_bytes())); Ok(try!(self.write_u8(0))) } } #[doc(hidden)] pub trait WriteMessage { fn write_message(&mut self, &FrontendMessage) -> io::Result<()> ; } impl<W: Write> WriteMessage for W { fn write_message(&mut self, message: &FrontendMessage) -> io::Result<()> { let mut buf = vec![]; let mut ident = None; match *message { Bind { portal, statement, formats, values, result_formats } => { ident = Some(b'B'); try!(buf.write_cstr(portal)); try!(buf.write_cstr(statement)); try!(buf.write_i16::<BigEndian>(formats.len() as i16)); for &format in formats { try!(buf.write_i16::<BigEndian>(format)); } try!(buf.write_i16::<BigEndian>(values.len() as i16)); for value in values { match *value { None => try!(buf.write_i32::<BigEndian>(-1)), Some(ref value) => { try!(buf.write_i32::<BigEndian>(value.len() as i32)); try!(buf.write_all(&**value)); } } } try!(buf.write_i16::<BigEndian>(result_formats.len() as i16)); for &format in result_formats { try!(buf.write_i16::<BigEndian>(format)); } } CancelRequest { code, process_id, secret_key } => { try!(buf.write_u32::<BigEndian>(code)); try!(buf.write_u32::<BigEndian>(process_id)); try!(buf.write_u32::<BigEndian>(secret_key)); } Close { variant, name } => { ident = Some(b'C'); try!(buf.write_u8(variant)); try!(buf.write_cstr(name)); } CopyData { data } => { ident = Some(b'd'); try!(buf.write_all(data)); } CopyDone => ident = Some(b'c'), CopyFail { message } => { ident = Some(b'f'); try!(buf.write_cstr(message)); } Describe { variant, name } => { ident = Some(b'D'); try!(buf.write_u8(variant)); try!(buf.write_cstr(name)); } Execute { portal, max_rows } => { ident = Some(b'E'); try!(buf.write_cstr(portal)); try!(buf.write_i32::<BigEndian>(max_rows)); } Parse { name, query, param_types } => { ident = Some(b'P'); try!(buf.write_cstr(name)); try!(buf.write_cstr(query)); try!(buf.write_i16::<BigEndian>(param_types.len() as i16)); for &ty in param_types { try!(buf.write_u32::<BigEndian>(ty)); } } PasswordMessage { password } => { ident = Some(b'p'); try!(buf.write_cstr(password)); } Query { query } => { ident = Some(b'Q'); try!(buf.write_cstr(query)); } StartupMessage { version, parameters } => { try!(buf.write_u32::<BigEndian>(version)); for &(ref k, ref v) in parameters { try!(buf.write_cstr(&**k)); try!(buf.write_cstr(&**v)); } try!(buf.write_u8(0)); } SslRequest { code } => try!(buf.write_u32::<BigEndian>(code)), Sync => ident = Some(b'S'), Terminate => ident = Some(b'X'), } if let Some(ident) = ident { try!(self.write_u8(ident)); } // add size of length value try!(self.write_i32::<BigEndian>((buf.len() + mem::size_of::<i32>()) as i32)); try!(self.write_all(&*buf)); Ok(()) } } #[doc(hidden)] trait ReadCStr { fn read_cstr(&mut self) -> io::Result<String>; } impl<R: BufRead> ReadCStr for R { fn read_cstr(&mut self) -> io::Result<String> { let mut buf = vec![]; try!(self.read_until(0, &mut buf)); buf.pop(); String::from_utf8(buf).map_err(|err| io::Error::new(io::ErrorKind::Other, err)) } } #[doc(hidden)] pub trait ReadMessage { fn read_message(&mut self) -> io::Result<BackendMessage>; } impl<R: BufRead> ReadMessage for R { fn read_message(&mut self) -> io::Result<BackendMessage> { let ident = try!(self.read_u8()); // subtract size of length value let len = try!(self.read_u32::<BigEndian>()) as usize - mem::size_of::<i32>(); let mut rdr = self.by_ref().take(len as u64); let ret = match ident { b'1' => ParseComplete, b'2' => BindComplete, b'3' => CloseComplete, b'A' => NotificationResponse { pid: try!(rdr.read_u32::<BigEndian>()), channel: try!(rdr.read_cstr()), payload: try!(rdr.read_cstr()) }, b'C' => CommandComplete { tag: try!(rdr.read_cstr()) }, b'D' => try!(read_data_row(&mut rdr)), b'E' => ErrorResponse { fields: try!(read_fields(&mut rdr)) }, b'G' => { let format = try!(rdr.read_u8()); let mut column_formats = vec![]; for _ in 0..try!(rdr.read_u16::<BigEndian>()) { column_formats.push(try!(rdr.read_u16::<BigEndian>())); } CopyInResponse { format: format, column_formats: column_formats, } } b'I' => EmptyQueryResponse, b'K' => BackendKeyData { process_id: try!(rdr.read_u32::<BigEndian>()), secret_key: try!(rdr.read_u32::<BigEndian>()) }, b'n' => NoData, b'N' => NoticeResponse { fields: try!(read_fields(&mut rdr)) }, b'R' => try!(read_auth_message(&mut rdr)), b's' => PortalSuspended, b'S' => ParameterStatus { parameter: try!(rdr.read_cstr()), value: try!(rdr.read_cstr()) }, b't' => try!(read_parameter_description(&mut rdr)), b'T' => try!(read_row_description(&mut rdr)), b'Z' => ReadyForQuery { _state: try!(rdr.read_u8()) }, _ => return Err(io::Error::new(io::ErrorKind::Other, "unexpected message tag")), }; if rdr.limit() != 0 { return Err(io::Error::new(io::ErrorKind::Other, "didn't read entire message")); } Ok(ret) } } fn read_fields<R: BufRead>(buf: &mut R) -> io::Result<Vec<(u8, String)>> { let mut fields = vec![]; loop { let ty = try!(buf.read_u8()); if ty == 0 { break; } fields.push((ty, try!(buf.read_cstr()))); } Ok(fields) } fn read_data_row<R: BufRead>(buf: &mut R) -> io::Result<BackendMessage> { let len = try!(buf.read_u16::<BigEndian>()) as usize; let mut values = Vec::with_capacity(len); for _ in 0..len { let val = match try!(buf.read_i32::<BigEndian>()) { -1 => None, len => { let mut data = Vec::with_capacity(len as usize); data.extend((0..len).map(|_| 0)); try!(util::read_all(buf, &mut data)); Some(data) } }; values.push(val); } Ok(DataRow { row: values }) } fn read_auth_message<R: Read>(buf: &mut R) -> io::Result<BackendMessage> { Ok(match try!(buf.read_i32::<BigEndian>()) { 0 => AuthenticationOk, 2 => AuthenticationKerberosV5, 3 => AuthenticationCleartextPassword, 5 => { let mut salt = [0; 4]; try!(util::read_all(buf, &mut salt)); AuthenticationMD5Password { salt: salt } }, 6 => AuthenticationSCMCredential, 7 => AuthenticationGSS, 9 => AuthenticationSSPI, _ => return Err(io::Error::new(io::ErrorKind::Other, "unexpected authentication tag")), }) } fn read_parameter_description<R: Read>(buf: &mut R) -> io::Result<BackendMessage> { let len = try!(buf.read_i16::<BigEndian>()) as usize; let mut types = Vec::with_capacity(len); for _ in 0..len { types.push(try!(buf.read_u32::<BigEndian>())); } Ok(ParameterDescription { types: types }) } fn read_row_description<R: BufRead>(buf: &mut R) -> io::Result<BackendMessage> { let len = try!(buf.read_i16::<BigEndian>()) as usize; let mut types = Vec::with_capacity(len); for _ in 0..len { types.push(RowDescriptionEntry { name: try!(buf.read_cstr()), table_oid: try!(buf.read_u32::<BigEndian>()), column_id: try!(buf.read_i16::<BigEndian>()), type_oid: try!(buf.read_u32::<BigEndian>()), type_size: try!(buf.read_i16::<BigEndian>()), type_modifier: try!(buf.read_i32::<BigEndian>()), format: try!(buf.read_i16::<BigEndian>()) }) } Ok(RowDescription { descriptions: types }) }
//! Method macro combinators //! //! These macros make parsers as methods of structs //! and that can take methods of structs to call //! as parsers. //! //! There is a trick to make them easier to assemble, //! combinators are defined like this: //! //! ```ignore //! macro_rules! tag ( //! ($i:expr, $inp: expr) => ( //! { //! ... //! } //! ); //! ); //! ``` //! //! But when used as methods in other combinators, are used //! like this: //! //! ```ignore //! method!(my_function<Parser<'a> >, self, tag!("abcd")); //! ``` //! //! Internally, other combinators will rewrite //! that call to pass the input as second argument: //! //! ```ignore //! macro_rules! method ( //! ($name:ident<$a:ty>, $self_:ident, $submac:ident!( $($args:tt)* )) => ( //! fn $name( $self_: $a, i: &[u8] ) -> $crate::IResult<&[u8], &[u8]> { //! $submac!(i, $($args)*) //! } //! ); //! ); //! ``` //! //! The `method!` macro is similar to the `named!` macro in the macros module. //! While `named!` will create a parser function, `method!` will create a parser //! method on the struct it is defined in. //! //! Compared to the `named!` macro there are a few differences in how they are //! invoked. A `method!` invocation always has to have the type of `self` //! declared and it can't be a reference due to Rust's borrow lifetime //! restrictions: //! ```ignore //! // -`self`'s type- //! method!(method_name< Parser<'a> >, ...); //! ``` //! `self`'s type always comes first. //! The next difference is you have to input the self struct. Due to Rust's //! macro hygiene the macro can't declare it on it's own. //! ```ignore //! // -self- //! method!(method_name<Parser<'a>, &'a str, &'a str>, self, ...); //! ``` //! When making a parsing struct with parsing methods, due to the static borrow //! checker,calling any parsing methods on self (or any other parsing struct) //! will cause self to be moved for the rest of the method.To get around this //! restriction all self is moved into the called method and then the called //! method will return self to the caller. //! //! To call a method on self you need to use the `call_m!` macro. For example: //! ```ignore //! struct<'a> Parser<'a> { //! parsed: &'a str, //! } //! impl<'a> Parser<'a> { //! // Constructor omitted for brevity //! method!(take4<Parser<'a>, &'a str, &'a str>, self, take!(4)); //! method!(caller<Parser<'a>, &'a str, &'a str>, self, call_m!(self.take4)); //! } //! ``` //! More complicated combinations still mostly look the same as their `named!` //! counterparts: //! ```ignore //! method!(pub simple_chain<&mut Parser<'a>, &'a str, &'a str>, self, //! chain!( //! call_m!(self.tag_abc) ~ //! call_m!(self.tag_def) ~ //! call_m!(self.tag_ghi) ~ //! last: call_m!(self.simple_peek) , //! ||{sb.parsed = last; last} //! ) //! ); //! ``` //! The three additions to method definitions to remember are: //! 1. Specify `self`'s type //! 2. Pass `self` to the macro //! 4. Call parser methods using the `call_m!` macro. /// Makes a method from a parser combination /// /// The must be set up because the compiler needs /// the information /// /// ```ignore /// method!(my_function<Parser<'a> >( &[u8] ) -> &[u8], tag!("abcd")); /// // first type parameter is `self`'s type, second is input, third is output /// method!(my_function<Parser<'a>, &[u8], &[u8]>, tag!("abcd")); /// //prefix them with 'pub' to make the methods public /// method!(pub my_function<Parser<'a>,&[u8], &[u8]>, tag!("abcd")); /// ``` #[macro_export] macro_rules! method ( // Non-public immutable self ($name:ident<$a:ty>( $i:ty ) -> $o:ty, $self_:ident, $submac:ident!( $($args:tt)* )) => ( fn $name( $self_: $a, i: $i ) -> ($a, $crate::IResult<$i,$o,u32>) { let result = $submac!(i, $($args)*); ($self_, result) } ); ($name:ident<$a:ty,$i:ty,$o:ty,$e:ty>, $self_:ident, $submac:ident!( $($args:tt)* )) => ( fn $name( $self_: $a, i: $i ) -> ($a, $crate::IResult<$i, $o, $e>) { let result = $submac!(i, $($args)*); ($self_, result) } ); ($name:ident<$a:ty,$i:ty,$o:ty>, $self_:ident, $submac:ident!( $($args:tt)* )) => ( fn $name( $self_: $a, i: $i ) -> ($a, $crate::IResult<$i,$o,u32>) { let result = $submac!(i, $($args)*); ($self_, result) } ); ($name:ident<$a:ty,$o:ty>, $self_:ident, $submac:ident!( $($args:tt)* )) => ( fn $name<'a>( $self_: $a, i: &'a[u8] ) -> ($a, $crate::IResult<&'a [u8], $o, u32>) { let result = $submac!(i, $($args)*); ($self_, result) } ); ($name:ident<$a:ty>, $self_:ident, $submac:ident!( $($args:tt)* )) => ( fn $name( $self_: $a, i: &[u8] ) -> ($a, $crate::IResult<&[u8], &[u8], u32>) { let result = $submac!(i, $($args)*); ($self_, result) } ); // Public immutable self (pub $name:ident<$a:ty>( $i:ty ) -> $o:ty, $self_:ident, $submac:ident!( $($args:tt)* )) => ( pub fn $name( $self_: $a, i: $i ) -> ($a, $crate::IResult<$i,$o,u32>) { let result = $submac!(i, $($args)*); ($self_, result) } ); (pub $name:ident<$a:ty,$i:ty,$o:ty,$e:ty>, $self_:ident, $submac:ident!( $($args:tt)* )) => ( fn $name( $self_: $a, i: $i ) -> ($a, $crate::IResult<$i, $o, $e>) { let result = $submac!(i, $($args)*); ($self_, result) } ); (pub $name:ident<$a:ty,$i:ty,$o:ty>, $self_:ident, $submac:ident!( $($args:tt)* )) => ( pub fn $name( $self_: $a,i: $i ) -> ($a, $crate::IResult<$i,$o,u32>) { let result = $submac!(i, $($args)*); ($self_, result) } ); (pub $name:ident<$a:ty,$o:ty>, $self_:ident, $submac:ident!( $($args:tt)* )) => ( pub fn $name<'a>( $self_: $a, i: &'a[u8] ) -> ($a, $crate::IResult<&'a [u8], $o, u32>) { let result = $submac!(i, $($args)*); ($self_, result) } ); (pub $name:ident<$a:ty>, $self_:ident, $submac:ident!( $($args:tt)* )) => ( pub fn $name( $self_: $a, i: &[u8] ) -> ($a, $crate::IResult<&[u8], &[u8], u32>) { let result = $submac!(i, $($args)*); ($self_, result) } ); // Non-public mutable self ($name:ident<$a:ty>( $i:ty ) -> $o:ty, mut $self_:ident, $submac:ident!( $($args:tt)* )) => ( fn $name( mut $self_: $a, i: $i ) -> ($a, $crate::IResult<$i,$o,u32>) { let result = $submac!(i, $($args)*); ($self_, result) } ); ($name:ident<$a:ty,$i:ty,$o:ty,$e:ty>, mut $self_:ident, $submac:ident!( $($args:tt)* )) => ( fn $name( mut $self_: $a, i: $i ) -> ($a, $crate::IResult<$i, $o, $e>) { let result = $submac!(i, $($args)*); ($self_, result) } ); ($name:ident<$a:ty,$i:ty,$o:ty>, mut $self_:ident, $submac:ident!( $($args:tt)* )) => ( fn $name( mut $self_: $a, i: $i ) -> ($a, $crate::IResult<$i,$o,u32>) { let result = $submac!(i, $($args)*); ($self_, result) } ); ($name:ident<$a:ty,$o:ty>, mut $self_:ident, $submac:ident!( $($args:tt)* )) => ( fn $name<'a>( mut $self_: $a, i: &'a[u8] ) -> ($a, $crate::IResult<&'a [u8], $o, u32>) { let result = $submac!(i, $($args)*); ($self_, result) } ); ($name:ident<$a:ty>, mut $self_:ident, $submac:ident!( $($args:tt)* )) => ( fn $name( mut $self_: $a, i: &[u8] ) -> ($a, $crate::IResult<&[u8], &[u8], u32>) { let result = $submac!(i, $($args)*); ($self_, result) } ); // Public mutable self (pub $name:ident<$a:ty>( $i:ty ) -> $o:ty, mut $self_:ident, $submac:ident!( $($args:tt)* )) => ( pub fn $name( mut $self_: $a, i: $i ) -> ($a, $crate::IResult<$i,$o,u32>) { let result = $submac!(i, $($args)*); ($self_, result) } ); (pub $name:ident<$a:ty,$i:ty,$o:ty,$e:ty>, mut $self_:ident, $submac:ident!( $($args:tt)* )) => ( fn $name( mut $self_: $a, i: $i ) -> ($a, $crate::IResult<$i, $o, $e>) { let result = $submac!(i, $($args)*); ($self_, result) } ); (pub $name:ident<$a:ty,$i:ty,$o:ty>, mut $self_:ident, $submac:ident!( $($args:tt)* )) => ( pub fn $name( mut $self_: $a,i: $i ) -> ($a, $crate::IResult<$i,$o,u32>) { let result = $submac!(i, $($args)*); ($self_, result) } ); (pub $name:ident<$a:ty,$o:ty>, mut $self_:ident, $submac:ident!( $($args:tt)* )) => ( pub fn $name<'a>( mut $self_: $a, i: &'a[u8] ) -> ($a, $crate::IResult<&'a [u8], $o, u32>) { let result = $submac!(i, $($args)*); ($self_, result) } ); (pub $name:ident<$a:ty>, mut $self_:ident, $submac:ident!( $($args:tt)* )) => ( pub fn $name( mut $self_: $a, i: &[u8] ) -> ($a, $crate::IResult<&[u8], &[u8], u32>) { let result = $submac!(i, $($args)*); ($self_, result) } ); ); /// Used to called methods then move self back into self #[macro_export] macro_rules! call_m ( ($i:expr, $self_:ident.$method:ident) => ( { let (tmp, res) = $self_.$method($i); $self_ = tmp; res } ); ($i:expr, $self_:ident.$method:ident, $($args:expr),* ) => ( { let (tmp, res) = $self_.$method($i, $($args),*); $self_ = tmp; res } ); ); /// emulate function currying for method calls on structs /// `apply!(self.my_function, arg1, arg2, ...)` becomes `self.my_function(input, arg1, arg2, ...)` /// /// Supports up to 6 arguments #[macro_export] macro_rules! apply_m ( ($i:expr, $self_:ident.$method:ident, $($args:expr),* ) => ( { let (tmp, res) = $self_.$method( $i, $($args),* ); $self_ = tmp; res } ); ); #[cfg(test)] mod tests { use internal::IResult::*; // reproduce the tag_s and take_s macros, because of module import order macro_rules! tag_s ( ($i:expr, $tag: expr) => ( { let res: $crate::IResult<_,_> = if $tag.len() > $i.len() { $crate::IResult::Incomplete($crate::Needed::Size($tag.len())) //} else if &$i[0..$tag.len()] == $tag { } else if ($i).starts_with($tag) { $crate::IResult::Done(&$i[$tag.len()..], &$i[0..$tag.len()]) } else { $crate::IResult::Error(error_position!($crate::ErrorKind::TagStr, $i)) }; res } ); ); macro_rules! take_s ( ($i:expr, $count:expr) => ( { let cnt = $count as usize; let res: $crate::IResult<_,_> = if $i.chars().count() < cnt { $crate::IResult::Incomplete($crate::Needed::Size(cnt)) } else { let mut offset = $i.len(); let mut count = 0; for (o, _) in $i.char_indices() { if count == cnt { offset = o; break; } count += 1; } $crate::IResult::Done(&$i[offset..], &$i[..offset]) }; res } ); ); struct Parser<'a> { bcd: &'a str, } impl<'a> Parser<'a> { pub fn new() -> Parser<'a> { Parser{bcd: ""} } method!(tag_abc<Parser<'a>, &'a str, &'a str>, self, tag_s!("áβç")); method!(tag_bcd<Parser<'a> >(&'a str) -> &'a str, self, tag_s!("βçδ")); method!(pub tag_hij<Parser<'a> >(&'a str) -> &'a str, self, tag_s!("λïJ")); method!(pub tag_ijk<Parser<'a>, &'a str, &'a str>, self, tag_s!("ïJƙ")); method!(take3<Parser<'a>, &'a str, &'a str>, self, take_s!(3)); method!(pub simple_call<Parser<'a>, &'a str, &'a str>, mut self, call_m!(self.tag_abc) ); method!(pub simple_peek<Parser<'a>, &'a str, &'a str>, mut self, peek!(call_m!(self.take3)) ); method!(pub simple_chain<Parser<'a>, &'a str, &'a str>, mut self, chain!( bcd: call_m!(self.tag_bcd) ~ last: call_m!(self.simple_peek) , ||{self.bcd = bcd; last} ) ); fn tag_stuff(mut self: Parser<'a>, input: &'a str, something: &'a str) -> (Parser<'a>, ::IResult<&'a str, &'a str>) { self.bcd = something; let(tmp, res) = self.tag_abc(input); self = tmp; (self, res) } method!(use_apply<Parser<'a>, &'a str, &'a str>, mut self, apply_m!(self.tag_stuff, "βçδ")); } #[test] fn test_method_call_abc() { let p = Parser::new(); let input: &str = "áβçδèƒϱλïJƙ"; let consumed: &str = "áβç"; let leftover: &str = "δèƒϱλïJƙ"; let(_, res) = p.tag_abc(input); match res { Done(extra, output) => { assert!(extra == leftover, "`Parser.tag_abc` consumed leftover input. leftover: {}", extra); assert!(output == consumed, "`Parser.tag_abc` doesnt return the string it consumed \ on success. Expected `{}`, got `{}`.", consumed, output); }, other => panic!("`Parser.tag_abc` didn't succeed when it should have. \ Got `{:?}`.", other), } } #[test] fn test_method_call_bcd() { let p = Parser::new(); let input: &str = "βçδèƒϱλïJƙ"; let consumed: &str = "βçδ"; let leftover: &str = "èƒϱλïJƙ"; let(_, res) = p.tag_bcd(input); match res { Done(extra, output) => { assert!(extra == leftover, "`Parser.tag_bcd` consumed leftover input. leftover: {}", extra); assert!(output == consumed, "`Parser.tag_bcd` doesn't return the string it consumed \ on success. Expected `{}`, got `{}`.", consumed, output); }, other => panic!("`Parser.tag_bcd` didn't succeed when it should have. \ Got `{:?}`.", other), } } #[test] fn test_method_call_hij() { let p = Parser::new(); let input: &str = "λïJƙℓ₥ñôƥ9řƨ"; let consumed: &str = "λïJ"; let leftover: &str = "ƙℓ₥ñôƥ9řƨ"; let(_, res) = p.tag_hij(input); match res { Done(extra, output) => { assert!(extra == leftover, "`Parser.tag_hij` consumed leftover input. leftover: {}", extra); assert!(output == consumed, "`Parser.tag_hij` doesn't return the string it consumed \ on success. Expected `{}`, got `{}`.", consumed, output); }, other => panic!("`Parser.tag_hij` didn't succeed when it should have. \ Got `{:?}`.", other), } } #[test] fn test_method_call_ijk() { let p = Parser::new(); let input: &str = "ïJƙℓ₥ñôƥ9řƨ"; let consumed: &str = "ïJƙ"; let leftover: &str = "ℓ₥ñôƥ9řƨ"; let(_, res) = p.tag_ijk(input); match res { Done(extra, output) => { assert!(extra == leftover, "`Parser.tag_ijk` consumed leftover input. leftover: {}", extra); assert!(output == consumed, "`Parser.tag_ijk` doesn't return the string it consumed \ on success. Expected `{}`, got `{}`.", consumed, output); }, other => panic!("`Parser.tag_ijk` didn't succeed when it should have. \ Got `{:?}`.", other), } } #[test] fn test_method_simple_call() { let p = Parser::new(); let input: &str = "áβçδèƒϱλïJƙ"; let consumed: &str = "áβç"; let leftover: &str = "δèƒϱλïJƙ"; let(_, res) = p.simple_call(input); match res { Done(extra, output) => { assert!(extra == leftover, "`Parser.simple_call` consumed leftover input. leftover: {}", extra); assert!(output == consumed, "`Parser.simple_call` doesn't return the string it consumed \ on success. Expected `{}`, got `{}`.", consumed, output); }, other => panic!("`Parser.simple_call` didn't succeed when it should have. \ Got `{:?}`.", other), } } #[test] fn test_apply_m() { let mut p = Parser::new(); let input: &str = "áβçδèƒϱλïJƙ"; let consumed: &str = "áβç"; let leftover: &str = "δèƒϱλïJƙ"; let(tmp, res) = p.use_apply(input); p = tmp; match res { Done(extra, output) => { assert!(extra == leftover, "`Parser.use_apply` consumed leftover input. leftover: {}", extra); assert!(output == consumed, "`Parser.use_apply` doesn't return the string it was supposed to \ on success. Expected `{}`, got `{}`.", leftover, output); assert!(p.bcd == "βçδ", "Parser.use_apply didn't modify the parser field correctly: {}", p.bcd); }, other => panic!("`Parser.use_apply` didn't succeed when it should have. \ Got `{:?}`.", other), } } #[test] fn test_method_call_peek() { let p = Parser::new(); let input: &str = "ж¥ƺáβçδèƒϱλïJƙ"; let consumed: &str = "ж¥ƺ"; let(_, res) = p.simple_peek(input); match res { Done(extra, output) => { assert!(extra == input, "`Parser.simple_peek` consumed leftover input. leftover: {}", extra); assert!(output == consumed, "`Parser.simple_peek` doesn't return the string it consumed \ on success. Expected `{}`, got `{}`.", consumed, output); }, other => panic!("`Parser.simple_peek` didn't succeed when it should have. \ Got `{:?}`.", other), } } #[test] fn test_method_call_chain() { let mut p = Parser::new(); let input : &str = "βçδδèƒϱλïJƙℓ"; let leftover : &str = "δèƒϱλïJƙℓ"; let output : &str = "δèƒ"; let(tmp, res) = p.simple_chain(input); p = tmp; match res { Done(extra, out) => { assert!(extra == leftover, "`Parser.simple_chain` consumed leftover input. leftover: {}", extra); assert!(out == output, "`Parser.simple_chain` doesn't return the string it was supposed to \ on success. Expected `{}`, got `{}`.", output, out); assert!(p.bcd == "βçδ", "Parser.simple_chain didn't modify the parser field correctly: {}", p.bcd); }, other => panic!("`Parser.simple_chain` didn't succeed when it should have. \ Got `{:?}`.", other), } } } methods: switch from chain! to do_parse! Signed-off-by: Marc-Antoine Perennou <07f76cf0511c79b361712839686f3cee8c75791c@Perennou.com> //! Method macro combinators //! //! These macros make parsers as methods of structs //! and that can take methods of structs to call //! as parsers. //! //! There is a trick to make them easier to assemble, //! combinators are defined like this: //! //! ```ignore //! macro_rules! tag ( //! ($i:expr, $inp: expr) => ( //! { //! ... //! } //! ); //! ); //! ``` //! //! But when used as methods in other combinators, are used //! like this: //! //! ```ignore //! method!(my_function<Parser<'a> >, self, tag!("abcd")); //! ``` //! //! Internally, other combinators will rewrite //! that call to pass the input as second argument: //! //! ```ignore //! macro_rules! method ( //! ($name:ident<$a:ty>, $self_:ident, $submac:ident!( $($args:tt)* )) => ( //! fn $name( $self_: $a, i: &[u8] ) -> $crate::IResult<&[u8], &[u8]> { //! $submac!(i, $($args)*) //! } //! ); //! ); //! ``` //! //! The `method!` macro is similar to the `named!` macro in the macros module. //! While `named!` will create a parser function, `method!` will create a parser //! method on the struct it is defined in. //! //! Compared to the `named!` macro there are a few differences in how they are //! invoked. A `method!` invocation always has to have the type of `self` //! declared and it can't be a reference due to Rust's borrow lifetime //! restrictions: //! ```ignore //! // -`self`'s type- //! method!(method_name< Parser<'a> >, ...); //! ``` //! `self`'s type always comes first. //! The next difference is you have to input the self struct. Due to Rust's //! macro hygiene the macro can't declare it on it's own. //! ```ignore //! // -self- //! method!(method_name<Parser<'a>, &'a str, &'a str>, self, ...); //! ``` //! When making a parsing struct with parsing methods, due to the static borrow //! checker,calling any parsing methods on self (or any other parsing struct) //! will cause self to be moved for the rest of the method.To get around this //! restriction all self is moved into the called method and then the called //! method will return self to the caller. //! //! To call a method on self you need to use the `call_m!` macro. For example: //! ```ignore //! struct<'a> Parser<'a> { //! parsed: &'a str, //! } //! impl<'a> Parser<'a> { //! // Constructor omitted for brevity //! method!(take4<Parser<'a>, &'a str, &'a str>, self, take!(4)); //! method!(caller<Parser<'a>, &'a str, &'a str>, self, call_m!(self.take4)); //! } //! ``` //! More complicated combinations still mostly look the same as their `named!` //! counterparts: //! ```ignore //! method!(pub simple_chain<&mut Parser<'a>, &'a str, &'a str>, self, //! do_parse!( //! call_m!(self.tag_abc) >> //! call_m!(self.tag_def) >> //! call_m!(self.tag_ghi) >> //! last: map!(call_m!(self.simple_peek), |parsed| sb.parsed = parsed) >> //! (last) //! ) //! ); //! ``` //! The three additions to method definitions to remember are: //! 1. Specify `self`'s type //! 2. Pass `self` to the macro //! 4. Call parser methods using the `call_m!` macro. /// Makes a method from a parser combination /// /// The must be set up because the compiler needs /// the information /// /// ```ignore /// method!(my_function<Parser<'a> >( &[u8] ) -> &[u8], tag!("abcd")); /// // first type parameter is `self`'s type, second is input, third is output /// method!(my_function<Parser<'a>, &[u8], &[u8]>, tag!("abcd")); /// //prefix them with 'pub' to make the methods public /// method!(pub my_function<Parser<'a>,&[u8], &[u8]>, tag!("abcd")); /// ``` #[macro_export] macro_rules! method ( // Non-public immutable self ($name:ident<$a:ty>( $i:ty ) -> $o:ty, $self_:ident, $submac:ident!( $($args:tt)* )) => ( fn $name( $self_: $a, i: $i ) -> ($a, $crate::IResult<$i,$o,u32>) { let result = $submac!(i, $($args)*); ($self_, result) } ); ($name:ident<$a:ty,$i:ty,$o:ty,$e:ty>, $self_:ident, $submac:ident!( $($args:tt)* )) => ( fn $name( $self_: $a, i: $i ) -> ($a, $crate::IResult<$i, $o, $e>) { let result = $submac!(i, $($args)*); ($self_, result) } ); ($name:ident<$a:ty,$i:ty,$o:ty>, $self_:ident, $submac:ident!( $($args:tt)* )) => ( fn $name( $self_: $a, i: $i ) -> ($a, $crate::IResult<$i,$o,u32>) { let result = $submac!(i, $($args)*); ($self_, result) } ); ($name:ident<$a:ty,$o:ty>, $self_:ident, $submac:ident!( $($args:tt)* )) => ( fn $name<'a>( $self_: $a, i: &'a[u8] ) -> ($a, $crate::IResult<&'a [u8], $o, u32>) { let result = $submac!(i, $($args)*); ($self_, result) } ); ($name:ident<$a:ty>, $self_:ident, $submac:ident!( $($args:tt)* )) => ( fn $name( $self_: $a, i: &[u8] ) -> ($a, $crate::IResult<&[u8], &[u8], u32>) { let result = $submac!(i, $($args)*); ($self_, result) } ); // Public immutable self (pub $name:ident<$a:ty>( $i:ty ) -> $o:ty, $self_:ident, $submac:ident!( $($args:tt)* )) => ( pub fn $name( $self_: $a, i: $i ) -> ($a, $crate::IResult<$i,$o,u32>) { let result = $submac!(i, $($args)*); ($self_, result) } ); (pub $name:ident<$a:ty,$i:ty,$o:ty,$e:ty>, $self_:ident, $submac:ident!( $($args:tt)* )) => ( fn $name( $self_: $a, i: $i ) -> ($a, $crate::IResult<$i, $o, $e>) { let result = $submac!(i, $($args)*); ($self_, result) } ); (pub $name:ident<$a:ty,$i:ty,$o:ty>, $self_:ident, $submac:ident!( $($args:tt)* )) => ( pub fn $name( $self_: $a,i: $i ) -> ($a, $crate::IResult<$i,$o,u32>) { let result = $submac!(i, $($args)*); ($self_, result) } ); (pub $name:ident<$a:ty,$o:ty>, $self_:ident, $submac:ident!( $($args:tt)* )) => ( pub fn $name<'a>( $self_: $a, i: &'a[u8] ) -> ($a, $crate::IResult<&'a [u8], $o, u32>) { let result = $submac!(i, $($args)*); ($self_, result) } ); (pub $name:ident<$a:ty>, $self_:ident, $submac:ident!( $($args:tt)* )) => ( pub fn $name( $self_: $a, i: &[u8] ) -> ($a, $crate::IResult<&[u8], &[u8], u32>) { let result = $submac!(i, $($args)*); ($self_, result) } ); // Non-public mutable self ($name:ident<$a:ty>( $i:ty ) -> $o:ty, mut $self_:ident, $submac:ident!( $($args:tt)* )) => ( fn $name( mut $self_: $a, i: $i ) -> ($a, $crate::IResult<$i,$o,u32>) { let result = $submac!(i, $($args)*); ($self_, result) } ); ($name:ident<$a:ty,$i:ty,$o:ty,$e:ty>, mut $self_:ident, $submac:ident!( $($args:tt)* )) => ( fn $name( mut $self_: $a, i: $i ) -> ($a, $crate::IResult<$i, $o, $e>) { let result = $submac!(i, $($args)*); ($self_, result) } ); ($name:ident<$a:ty,$i:ty,$o:ty>, mut $self_:ident, $submac:ident!( $($args:tt)* )) => ( fn $name( mut $self_: $a, i: $i ) -> ($a, $crate::IResult<$i,$o,u32>) { let result = $submac!(i, $($args)*); ($self_, result) } ); ($name:ident<$a:ty,$o:ty>, mut $self_:ident, $submac:ident!( $($args:tt)* )) => ( fn $name<'a>( mut $self_: $a, i: &'a[u8] ) -> ($a, $crate::IResult<&'a [u8], $o, u32>) { let result = $submac!(i, $($args)*); ($self_, result) } ); ($name:ident<$a:ty>, mut $self_:ident, $submac:ident!( $($args:tt)* )) => ( fn $name( mut $self_: $a, i: &[u8] ) -> ($a, $crate::IResult<&[u8], &[u8], u32>) { let result = $submac!(i, $($args)*); ($self_, result) } ); // Public mutable self (pub $name:ident<$a:ty>( $i:ty ) -> $o:ty, mut $self_:ident, $submac:ident!( $($args:tt)* )) => ( pub fn $name( mut $self_: $a, i: $i ) -> ($a, $crate::IResult<$i,$o,u32>) { let result = $submac!(i, $($args)*); ($self_, result) } ); (pub $name:ident<$a:ty,$i:ty,$o:ty,$e:ty>, mut $self_:ident, $submac:ident!( $($args:tt)* )) => ( fn $name( mut $self_: $a, i: $i ) -> ($a, $crate::IResult<$i, $o, $e>) { let result = $submac!(i, $($args)*); ($self_, result) } ); (pub $name:ident<$a:ty,$i:ty,$o:ty>, mut $self_:ident, $submac:ident!( $($args:tt)* )) => ( pub fn $name( mut $self_: $a,i: $i ) -> ($a, $crate::IResult<$i,$o,u32>) { let result = $submac!(i, $($args)*); ($self_, result) } ); (pub $name:ident<$a:ty,$o:ty>, mut $self_:ident, $submac:ident!( $($args:tt)* )) => ( pub fn $name<'a>( mut $self_: $a, i: &'a[u8] ) -> ($a, $crate::IResult<&'a [u8], $o, u32>) { let result = $submac!(i, $($args)*); ($self_, result) } ); (pub $name:ident<$a:ty>, mut $self_:ident, $submac:ident!( $($args:tt)* )) => ( pub fn $name( mut $self_: $a, i: &[u8] ) -> ($a, $crate::IResult<&[u8], &[u8], u32>) { let result = $submac!(i, $($args)*); ($self_, result) } ); ); /// Used to called methods then move self back into self #[macro_export] macro_rules! call_m ( ($i:expr, $self_:ident.$method:ident) => ( { let (tmp, res) = $self_.$method($i); $self_ = tmp; res } ); ($i:expr, $self_:ident.$method:ident, $($args:expr),* ) => ( { let (tmp, res) = $self_.$method($i, $($args),*); $self_ = tmp; res } ); ); /// emulate function currying for method calls on structs /// `apply!(self.my_function, arg1, arg2, ...)` becomes `self.my_function(input, arg1, arg2, ...)` /// /// Supports up to 6 arguments #[macro_export] macro_rules! apply_m ( ($i:expr, $self_:ident.$method:ident, $($args:expr),* ) => ( { let (tmp, res) = $self_.$method( $i, $($args),* ); $self_ = tmp; res } ); ); #[cfg(test)] mod tests { use internal::IResult::*; // reproduce the tag_s and take_s macros, because of module import order macro_rules! tag_s ( ($i:expr, $tag: expr) => ( { let res: $crate::IResult<_,_> = if $tag.len() > $i.len() { $crate::IResult::Incomplete($crate::Needed::Size($tag.len())) //} else if &$i[0..$tag.len()] == $tag { } else if ($i).starts_with($tag) { $crate::IResult::Done(&$i[$tag.len()..], &$i[0..$tag.len()]) } else { $crate::IResult::Error(error_position!($crate::ErrorKind::TagStr, $i)) }; res } ); ); macro_rules! take_s ( ($i:expr, $count:expr) => ( { let cnt = $count as usize; let res: $crate::IResult<_,_> = if $i.chars().count() < cnt { $crate::IResult::Incomplete($crate::Needed::Size(cnt)) } else { let mut offset = $i.len(); let mut count = 0; for (o, _) in $i.char_indices() { if count == cnt { offset = o; break; } count += 1; } $crate::IResult::Done(&$i[offset..], &$i[..offset]) }; res } ); ); struct Parser<'a> { bcd: &'a str, } impl<'a> Parser<'a> { pub fn new() -> Parser<'a> { Parser{bcd: ""} } method!(tag_abc<Parser<'a>, &'a str, &'a str>, self, tag_s!("áβç")); method!(tag_bcd<Parser<'a> >(&'a str) -> &'a str, self, tag_s!("βçδ")); method!(pub tag_hij<Parser<'a> >(&'a str) -> &'a str, self, tag_s!("λïJ")); method!(pub tag_ijk<Parser<'a>, &'a str, &'a str>, self, tag_s!("ïJƙ")); method!(take3<Parser<'a>, &'a str, &'a str>, self, take_s!(3)); method!(pub simple_call<Parser<'a>, &'a str, &'a str>, mut self, call_m!(self.tag_abc) ); method!(pub simple_peek<Parser<'a>, &'a str, &'a str>, mut self, peek!(call_m!(self.take3)) ); method!(pub simple_chain<Parser<'a>, &'a str, &'a str>, mut self, do_parse!( map!(call_m!(self.tag_bcd), |bcd| self.bcd = bcd) >> last: call_m!(self.simple_peek) >> (last) ) ); fn tag_stuff(mut self: Parser<'a>, input: &'a str, something: &'a str) -> (Parser<'a>, ::IResult<&'a str, &'a str>) { self.bcd = something; let(tmp, res) = self.tag_abc(input); self = tmp; (self, res) } method!(use_apply<Parser<'a>, &'a str, &'a str>, mut self, apply_m!(self.tag_stuff, "βçδ")); } #[test] fn test_method_call_abc() { let p = Parser::new(); let input: &str = "áβçδèƒϱλïJƙ"; let consumed: &str = "áβç"; let leftover: &str = "δèƒϱλïJƙ"; let(_, res) = p.tag_abc(input); match res { Done(extra, output) => { assert!(extra == leftover, "`Parser.tag_abc` consumed leftover input. leftover: {}", extra); assert!(output == consumed, "`Parser.tag_abc` doesnt return the string it consumed \ on success. Expected `{}`, got `{}`.", consumed, output); }, other => panic!("`Parser.tag_abc` didn't succeed when it should have. \ Got `{:?}`.", other), } } #[test] fn test_method_call_bcd() { let p = Parser::new(); let input: &str = "βçδèƒϱλïJƙ"; let consumed: &str = "βçδ"; let leftover: &str = "èƒϱλïJƙ"; let(_, res) = p.tag_bcd(input); match res { Done(extra, output) => { assert!(extra == leftover, "`Parser.tag_bcd` consumed leftover input. leftover: {}", extra); assert!(output == consumed, "`Parser.tag_bcd` doesn't return the string it consumed \ on success. Expected `{}`, got `{}`.", consumed, output); }, other => panic!("`Parser.tag_bcd` didn't succeed when it should have. \ Got `{:?}`.", other), } } #[test] fn test_method_call_hij() { let p = Parser::new(); let input: &str = "λïJƙℓ₥ñôƥ9řƨ"; let consumed: &str = "λïJ"; let leftover: &str = "ƙℓ₥ñôƥ9řƨ"; let(_, res) = p.tag_hij(input); match res { Done(extra, output) => { assert!(extra == leftover, "`Parser.tag_hij` consumed leftover input. leftover: {}", extra); assert!(output == consumed, "`Parser.tag_hij` doesn't return the string it consumed \ on success. Expected `{}`, got `{}`.", consumed, output); }, other => panic!("`Parser.tag_hij` didn't succeed when it should have. \ Got `{:?}`.", other), } } #[test] fn test_method_call_ijk() { let p = Parser::new(); let input: &str = "ïJƙℓ₥ñôƥ9řƨ"; let consumed: &str = "ïJƙ"; let leftover: &str = "ℓ₥ñôƥ9řƨ"; let(_, res) = p.tag_ijk(input); match res { Done(extra, output) => { assert!(extra == leftover, "`Parser.tag_ijk` consumed leftover input. leftover: {}", extra); assert!(output == consumed, "`Parser.tag_ijk` doesn't return the string it consumed \ on success. Expected `{}`, got `{}`.", consumed, output); }, other => panic!("`Parser.tag_ijk` didn't succeed when it should have. \ Got `{:?}`.", other), } } #[test] fn test_method_simple_call() { let p = Parser::new(); let input: &str = "áβçδèƒϱλïJƙ"; let consumed: &str = "áβç"; let leftover: &str = "δèƒϱλïJƙ"; let(_, res) = p.simple_call(input); match res { Done(extra, output) => { assert!(extra == leftover, "`Parser.simple_call` consumed leftover input. leftover: {}", extra); assert!(output == consumed, "`Parser.simple_call` doesn't return the string it consumed \ on success. Expected `{}`, got `{}`.", consumed, output); }, other => panic!("`Parser.simple_call` didn't succeed when it should have. \ Got `{:?}`.", other), } } #[test] fn test_apply_m() { let mut p = Parser::new(); let input: &str = "áβçδèƒϱλïJƙ"; let consumed: &str = "áβç"; let leftover: &str = "δèƒϱλïJƙ"; let(tmp, res) = p.use_apply(input); p = tmp; match res { Done(extra, output) => { assert!(extra == leftover, "`Parser.use_apply` consumed leftover input. leftover: {}", extra); assert!(output == consumed, "`Parser.use_apply` doesn't return the string it was supposed to \ on success. Expected `{}`, got `{}`.", leftover, output); assert!(p.bcd == "βçδ", "Parser.use_apply didn't modify the parser field correctly: {}", p.bcd); }, other => panic!("`Parser.use_apply` didn't succeed when it should have. \ Got `{:?}`.", other), } } #[test] fn test_method_call_peek() { let p = Parser::new(); let input: &str = "ж¥ƺáβçδèƒϱλïJƙ"; let consumed: &str = "ж¥ƺ"; let(_, res) = p.simple_peek(input); match res { Done(extra, output) => { assert!(extra == input, "`Parser.simple_peek` consumed leftover input. leftover: {}", extra); assert!(output == consumed, "`Parser.simple_peek` doesn't return the string it consumed \ on success. Expected `{}`, got `{}`.", consumed, output); }, other => panic!("`Parser.simple_peek` didn't succeed when it should have. \ Got `{:?}`.", other), } } #[test] fn test_method_call_chain() { let mut p = Parser::new(); let input : &str = "βçδδèƒϱλïJƙℓ"; let leftover : &str = "δèƒϱλïJƙℓ"; let output : &str = "δèƒ"; let(tmp, res) = p.simple_chain(input); p = tmp; match res { Done(extra, out) => { assert!(extra == leftover, "`Parser.simple_chain` consumed leftover input. leftover: {}", extra); assert!(out == output, "`Parser.simple_chain` doesn't return the string it was supposed to \ on success. Expected `{}`, got `{}`.", output, out); assert!(p.bcd == "βçδ", "Parser.simple_chain didn't modify the parser field correctly: {}", p.bcd); }, other => panic!("`Parser.simple_chain` didn't succeed when it should have. \ Got `{:?}`.", other), } } }
use std::fmt; use std::io::{self, Read, Write}; use std::mem; use std::net::{self, SocketAddr, Shutdown}; use futures::stream::Stream; use futures::sync::oneshot; use futures::{Future, Poll, Async}; use mio; use io::{Io, IoFuture}; use reactor::{Handle, PollEvented}; /// An I/O object representing a TCP socket listening for incoming connections. /// /// This object can be converted into a stream of incoming connections for /// various forms of processing. pub struct TcpListener { io: PollEvented<mio::tcp::TcpListener>, pending_accept: Option<oneshot::Receiver<io::Result<(TcpStream, SocketAddr)>>>, } /// Stream returned by the `TcpListener::incoming` function representing the /// stream of sockets received from a listener. pub struct Incoming { inner: TcpListener, } impl TcpListener { /// Create a new TCP listener associated with this event loop. /// /// The TCP listener will bind to the provided `addr` address, if available. /// If the result is `Ok`, the socket has successfully bound. pub fn bind(addr: &SocketAddr, handle: &Handle) -> io::Result<TcpListener> { let l = try!(mio::tcp::TcpListener::bind(addr)); TcpListener::new(l, handle) } /// Attempt to accept a connection and create a new connected `TcpStream` if /// successful. /// /// This function will attempt an accept operation, but will not block /// waiting for it to complete. If the operation would block then a "would /// block" error is returned. Additionally, if this method would block, it /// registers the current task to receive a notification when it would /// otherwise not block. /// /// Note that typically for simple usage it's easier to treat incoming /// connections as a `Stream` of `TcpStream`s with the `incoming` method /// below. /// /// # Panics /// /// This function will panic if it is called outside the context of a /// future's task. It's recommended to only call this from the /// implementation of a `Future::poll`, if necessary. pub fn accept(&mut self) -> io::Result<(TcpStream, SocketAddr)> { loop { if let Some(mut pending) = self.pending_accept.take() { match pending.poll().expect("shouldn't be canceled") { Async::NotReady => { self.pending_accept = Some(pending); return Err(mio::would_block()) }, Async::Ready(r) => return r, } } if let Async::NotReady = self.io.poll_read() { return Err(io::Error::new(io::ErrorKind::WouldBlock, "not ready")) } match self.io.get_ref().accept() { Err(e) => { if e.kind() == io::ErrorKind::WouldBlock { self.io.need_read(); } return Err(e) }, Ok((sock, addr)) => { let (tx, rx) = oneshot::channel(); let remote = self.io.remote().clone(); remote.spawn(move |handle| { let res = PollEvented::new(sock, handle) .map(move |io| { (TcpStream { io: io }, addr) }); tx.complete(res); Ok(()) }); self.pending_accept = Some(rx); // continue to polling the `rx` at the beginning of the loop } } } } /// Create a new TCP listener from the standard library's TCP listener. /// /// This method can be used when the `Handle::tcp_listen` method isn't /// sufficient because perhaps some more configuration is needed in terms of /// before the calls to `bind` and `listen`. /// /// This API is typically paired with the `net2` crate and the `TcpBuilder` /// type to build up and customize a listener before it's shipped off to the /// backing event loop. This allows configuration of options like /// `SO_REUSEPORT`, binding to multiple addresses, etc. /// /// The `addr` argument here is one of the addresses that `listener` is /// bound to and the listener will only be guaranteed to accept connections /// of the same address type currently. /// /// Finally, the `handle` argument is the event loop that this listener will /// be bound to. /// /// The platform specific behavior of this function looks like: /// /// * On Unix, the socket is placed into nonblocking mode and connections /// can be accepted as normal /// /// * On Windows, the address is stored internally and all future accepts /// will only be for the same IP version as `addr` specified. That is, if /// `addr` is an IPv4 address then all sockets accepted will be IPv4 as /// well (same for IPv6). pub fn from_listener(listener: net::TcpListener, addr: &SocketAddr, handle: &Handle) -> io::Result<TcpListener> { let l = try!(mio::tcp::TcpListener::from_listener(listener, addr)); TcpListener::new(l, handle) } fn new(listener: mio::tcp::TcpListener, handle: &Handle) -> io::Result<TcpListener> { let io = try!(PollEvented::new(listener, handle)); Ok(TcpListener { io: io, pending_accept: None }) } /// Test whether this socket is ready to be read or not. pub fn poll_read(&self) -> Async<()> { self.io.poll_read() } /// Returns the local address that this listener is bound to. /// /// This can be useful, for example, when binding to port 0 to figure out /// which port was actually bound. pub fn local_addr(&self) -> io::Result<SocketAddr> { self.io.get_ref().local_addr() } /// Consumes this listener, returning a stream of the sockets this listener /// accepts. /// /// This method returns an implementation of the `Stream` trait which /// resolves to the sockets the are accepted on this listener. pub fn incoming(self) -> Incoming { Incoming { inner: self } } /// Sets the value for the `IP_TTL` option on this socket. /// /// This value sets the time-to-live field that is used in every packet sent /// from this socket. pub fn set_ttl(&self, ttl: u32) -> io::Result<()> { self.io.get_ref().set_ttl(ttl) } /// Gets the value of the `IP_TTL` option for this socket. /// /// For more information about this option, see [`set_ttl`][link]. /// /// [link]: #method.set_ttl pub fn ttl(&self) -> io::Result<u32> { self.io.get_ref().ttl() } /// Sets the value for the `IPV6_V6ONLY` option on this socket. /// /// If this is set to `true` then the socket is restricted to sending and /// receiving IPv6 packets only. In this case two IPv4 and IPv6 applications /// can bind the same port at the same time. /// /// If this is set to `false` then the socket can be used to send and /// receive packets from an IPv4-mapped IPv6 address. pub fn set_only_v6(&self, only_v6: bool) -> io::Result<()> { self.io.get_ref().set_only_v6(only_v6) } /// Gets the value of the `IPV6_V6ONLY` option for this socket. /// /// For more information about this option, see [`set_only_v6`][link]. /// /// [link]: #method.set_only_v6 pub fn only_v6(&self) -> io::Result<bool> { self.io.get_ref().only_v6() } } impl fmt::Debug for TcpListener { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { self.io.get_ref().fmt(f) } } impl Stream for Incoming { type Item = (TcpStream, SocketAddr); type Error = io::Error; fn poll(&mut self) -> Poll<Option<Self::Item>, io::Error> { Ok(Async::Ready(Some(try_nb!(self.inner.accept())))) } } /// An I/O object representing a TCP stream connected to a remote endpoint. /// /// A TCP stream can either be created by connecting to an endpoint or by /// accepting a connection from a listener. Inside the stream is access to the /// raw underlying I/O object as well as streams for the read/write /// notifications on the stream itself. pub struct TcpStream { io: PollEvented<mio::tcp::TcpStream>, } /// Future returned by `TcpStream::connect` which will resolve to a `TcpStream` /// when the stream is connected. pub struct TcpStreamNew { inner: TcpStreamNewState, } enum TcpStreamNewState { Waiting(TcpStream), Error(io::Error), Empty, } impl TcpStream { /// Create a new TCP stream connected to the specified address. /// /// This function will create a new TCP socket and attempt to connect it to /// the `addr` provided. The returned future will be resolved once the /// stream has successfully connected. If an error happens during the /// connection or during the socket creation, that error will be returned to /// the future instead. pub fn connect(addr: &SocketAddr, handle: &Handle) -> TcpStreamNew { let inner = match mio::tcp::TcpStream::connect(addr) { Ok(tcp) => TcpStream::new(tcp, handle), Err(e) => TcpStreamNewState::Error(e), }; TcpStreamNew { inner: inner } } fn new(connected_stream: mio::tcp::TcpStream, handle: &Handle) -> TcpStreamNewState { match PollEvented::new(connected_stream, handle) { Ok(io) => TcpStreamNewState::Waiting(TcpStream { io: io }), Err(e) => TcpStreamNewState::Error(e), } } /// Creates a new `TcpStream` from the pending socket inside the given /// `std::net::TcpStream`, connecting it to the address specified. /// /// This constructor allows configuring the socket before it's actually /// connected, and this function will transfer ownership to the returned /// `TcpStream` if successful. An unconnected `TcpStream` can be created /// with the `net2::TcpBuilder` type (and also configured via that route). /// /// The platform specific behavior of this function looks like: /// /// * On Unix, the socket is placed into nonblocking mode and then a /// `connect` call is issued. /// /// * On Windows, the address is stored internally and the connect operation /// is issued when the returned `TcpStream` is registered with an event /// loop. Note that on Windows you must `bind` a socket before it can be /// connected, so if a custom `TcpBuilder` is used it should be bound /// (perhaps to `INADDR_ANY`) before this method is called. pub fn connect_stream(stream: net::TcpStream, addr: &SocketAddr, handle: &Handle) -> IoFuture<TcpStream> { let state = match mio::tcp::TcpStream::connect_stream(stream, addr) { Ok(tcp) => TcpStream::new(tcp, handle), Err(e) => TcpStreamNewState::Error(e), }; state.boxed() } /// Test whether this socket is ready to be read or not. /// /// If the socket is *not* readable then the current task is scheduled to /// get a notification when the socket does become readable. That is, this /// is only suitable for calling in a `Future::poll` method and will /// automatically handle ensuring a retry once the socket is readable again. pub fn poll_read(&self) -> Async<()> { self.io.poll_read() } /// Test whether this socket is ready to be written to or not. /// /// If the socket is *not* writable then the current task is scheduled to /// get a notification when the socket does become writable. That is, this /// is only suitable for calling in a `Future::poll` method and will /// automatically handle ensuring a retry once the socket is writable again. pub fn poll_write(&self) -> Async<()> { self.io.poll_write() } /// Returns the local address that this stream is bound to. pub fn local_addr(&self) -> io::Result<SocketAddr> { self.io.get_ref().local_addr() } /// Returns the remote address that this stream is connected to. pub fn peer_addr(&self) -> io::Result<SocketAddr> { self.io.get_ref().peer_addr() } /// Shuts down the read, write, or both halves of this connection. /// /// This function will cause all pending and future I/O on the specified /// portions to return immediately with an appropriate value (see the /// documentation of `Shutdown`). pub fn shutdown(&self, how: Shutdown) -> io::Result<()> { self.io.get_ref().shutdown(how) } /// Sets the value of the `TCP_NODELAY` option on this socket. /// /// If set, this option disables the Nagle algorithm. This means that /// segments are always sent as soon as possible, even if there is only a /// small amount of data. When not set, data is buffered until there is a /// sufficient amount to send out, thereby avoiding the frequent sending of /// small packets. pub fn set_nodelay(&self, nodelay: bool) -> io::Result<()> { self.io.get_ref().set_nodelay(nodelay) } /// Gets the value of the `TCP_NODELAY` option on this socket. /// /// For more information about this option, see [`set_nodelay`][link]. /// /// [link]: #method.set_nodelay pub fn nodelay(&self) -> io::Result<bool> { self.io.get_ref().nodelay() } /// Sets whether keepalive messages are enabled to be sent on this socket. /// /// On Unix, this option will set the `SO_KEEPALIVE` as well as the /// `TCP_KEEPALIVE` or `TCP_KEEPIDLE` option (depending on your platform). /// On Windows, this will set the `SIO_KEEPALIVE_VALS` option. /// /// If `None` is specified then keepalive messages are disabled, otherwise /// the number of milliseconds specified will be the time to remain idle /// before sending a TCP keepalive probe. /// /// Some platforms specify this value in seconds, so sub-second millisecond /// specifications may be omitted. pub fn set_keepalive_ms(&self, keepalive: Option<u32>) -> io::Result<()> { self.io.get_ref().set_keepalive_ms(keepalive) } /// Returns whether keepalive messages are enabled on this socket, and if so /// the amount of milliseconds between them. /// /// For more information about this option, see [`set_keepalive_ms`][link]. /// /// [link]: #method.set_keepalive_ms pub fn keepalive_ms(&self) -> io::Result<Option<u32>> { self.io.get_ref().keepalive_ms() } /// Sets the value for the `IP_TTL` option on this socket. /// /// This value sets the time-to-live field that is used in every packet sent /// from this socket. pub fn set_ttl(&self, ttl: u32) -> io::Result<()> { self.io.get_ref().set_ttl(ttl) } /// Gets the value of the `IP_TTL` option for this socket. /// /// For more information about this option, see [`set_ttl`][link]. /// /// [link]: #method.set_ttl pub fn ttl(&self) -> io::Result<u32> { self.io.get_ref().ttl() } } impl Read for TcpStream { fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> { self.io.read(buf) } } impl Write for TcpStream { fn write(&mut self, buf: &[u8]) -> io::Result<usize> { self.io.write(buf) } fn flush(&mut self) -> io::Result<()> { self.io.flush() } } impl Io for TcpStream { fn poll_read(&mut self) -> Async<()> { <TcpStream>::poll_read(self) } fn poll_write(&mut self) -> Async<()> { <TcpStream>::poll_write(self) } } impl<'a> Read for &'a TcpStream { fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> { (&self.io).read(buf) } } impl<'a> Write for &'a TcpStream { fn write(&mut self, buf: &[u8]) -> io::Result<usize> { (&self.io).write(buf) } fn flush(&mut self) -> io::Result<()> { (&self.io).flush() } } impl<'a> Io for &'a TcpStream { fn poll_read(&mut self) -> Async<()> { <TcpStream>::poll_read(self) } fn poll_write(&mut self) -> Async<()> { <TcpStream>::poll_write(self) } } impl fmt::Debug for TcpStream { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { self.io.get_ref().fmt(f) } } impl Future for TcpStreamNew { type Item = TcpStream; type Error = io::Error; fn poll(&mut self) -> Poll<TcpStream, io::Error> { self.inner.poll() } } impl Future for TcpStreamNewState { type Item = TcpStream; type Error = io::Error; fn poll(&mut self) -> Poll<TcpStream, io::Error> { { let stream = match *self { TcpStreamNewState::Waiting(ref s) => s, TcpStreamNewState::Error(_) => { let e = match mem::replace(self, TcpStreamNewState::Empty) { TcpStreamNewState::Error(e) => e, _ => panic!(), }; return Err(e) } TcpStreamNewState::Empty => panic!("can't poll TCP stream twice"), }; // Once we've connected, wait for the stream to be writable as // that's when the actual connection has been initiated. Once we're // writable we check for `take_socket_error` to see if the connect // actually hit an error or not. // // If all that succeeded then we ship everything on up. if let Async::NotReady = stream.io.poll_write() { return Ok(Async::NotReady) } if let Some(e) = try!(stream.io.get_ref().take_error()) { return Err(e) } } match mem::replace(self, TcpStreamNewState::Empty) { TcpStreamNewState::Waiting(stream) => Ok(Async::Ready(stream)), _ => panic!(), } } } #[cfg(unix)] mod sys { use std::os::unix::prelude::*; use super::{TcpStream, TcpListener}; impl AsRawFd for TcpStream { fn as_raw_fd(&self) -> RawFd { self.io.get_ref().as_raw_fd() } } impl AsRawFd for TcpListener { fn as_raw_fd(&self) -> RawFd { self.io.get_ref().as_raw_fd() } } } #[cfg(windows)] mod sys { // TODO: let's land these upstream with mio and then we can add them here. // // use std::os::windows::prelude::*; // use super::{TcpStream, TcpListener}; // // impl AsRawHandle for TcpStream { // fn as_raw_handle(&self) -> RawHandle { // self.io.get_ref().as_raw_handle() // } // } // // impl AsRawHandle for TcpListener { // fn as_raw_handle(&self) -> RawHandle { // self.listener.io().as_raw_handle() // } // } } Add a fast path to TcpListener::accept use std::fmt; use std::io::{self, Read, Write}; use std::mem; use std::net::{self, SocketAddr, Shutdown}; use futures::stream::Stream; use futures::sync::oneshot; use futures::{Future, Poll, Async}; use mio; use io::{Io, IoFuture}; use reactor::{Handle, PollEvented}; /// An I/O object representing a TCP socket listening for incoming connections. /// /// This object can be converted into a stream of incoming connections for /// various forms of processing. pub struct TcpListener { io: PollEvented<mio::tcp::TcpListener>, pending_accept: Option<oneshot::Receiver<io::Result<(TcpStream, SocketAddr)>>>, } /// Stream returned by the `TcpListener::incoming` function representing the /// stream of sockets received from a listener. pub struct Incoming { inner: TcpListener, } impl TcpListener { /// Create a new TCP listener associated with this event loop. /// /// The TCP listener will bind to the provided `addr` address, if available. /// If the result is `Ok`, the socket has successfully bound. pub fn bind(addr: &SocketAddr, handle: &Handle) -> io::Result<TcpListener> { let l = try!(mio::tcp::TcpListener::bind(addr)); TcpListener::new(l, handle) } /// Attempt to accept a connection and create a new connected `TcpStream` if /// successful. /// /// This function will attempt an accept operation, but will not block /// waiting for it to complete. If the operation would block then a "would /// block" error is returned. Additionally, if this method would block, it /// registers the current task to receive a notification when it would /// otherwise not block. /// /// Note that typically for simple usage it's easier to treat incoming /// connections as a `Stream` of `TcpStream`s with the `incoming` method /// below. /// /// # Panics /// /// This function will panic if it is called outside the context of a /// future's task. It's recommended to only call this from the /// implementation of a `Future::poll`, if necessary. pub fn accept(&mut self) -> io::Result<(TcpStream, SocketAddr)> { loop { if let Some(mut pending) = self.pending_accept.take() { match pending.poll().expect("shouldn't be canceled") { Async::NotReady => { self.pending_accept = Some(pending); return Err(mio::would_block()) }, Async::Ready(r) => return r, } } if let Async::NotReady = self.io.poll_read() { return Err(io::Error::new(io::ErrorKind::WouldBlock, "not ready")) } match self.io.get_ref().accept() { Err(e) => { if e.kind() == io::ErrorKind::WouldBlock { self.io.need_read(); } return Err(e) }, Ok((sock, addr)) => { // Fast path if we haven't left the event loop if let Some(handle) = self.io.remote().handle() { let io = try!(PollEvented::new(sock, &handle)); return Ok((TcpStream { io: io }, addr)) } // If we're off the event loop then send the socket back // over there to get registered and then we'll get it back // eventually. let (tx, rx) = oneshot::channel(); let remote = self.io.remote().clone(); remote.spawn(move |handle| { let res = PollEvented::new(sock, handle) .map(move |io| { (TcpStream { io: io }, addr) }); tx.complete(res); Ok(()) }); self.pending_accept = Some(rx); // continue to polling the `rx` at the beginning of the loop } } } } /// Create a new TCP listener from the standard library's TCP listener. /// /// This method can be used when the `Handle::tcp_listen` method isn't /// sufficient because perhaps some more configuration is needed in terms of /// before the calls to `bind` and `listen`. /// /// This API is typically paired with the `net2` crate and the `TcpBuilder` /// type to build up and customize a listener before it's shipped off to the /// backing event loop. This allows configuration of options like /// `SO_REUSEPORT`, binding to multiple addresses, etc. /// /// The `addr` argument here is one of the addresses that `listener` is /// bound to and the listener will only be guaranteed to accept connections /// of the same address type currently. /// /// Finally, the `handle` argument is the event loop that this listener will /// be bound to. /// /// The platform specific behavior of this function looks like: /// /// * On Unix, the socket is placed into nonblocking mode and connections /// can be accepted as normal /// /// * On Windows, the address is stored internally and all future accepts /// will only be for the same IP version as `addr` specified. That is, if /// `addr` is an IPv4 address then all sockets accepted will be IPv4 as /// well (same for IPv6). pub fn from_listener(listener: net::TcpListener, addr: &SocketAddr, handle: &Handle) -> io::Result<TcpListener> { let l = try!(mio::tcp::TcpListener::from_listener(listener, addr)); TcpListener::new(l, handle) } fn new(listener: mio::tcp::TcpListener, handle: &Handle) -> io::Result<TcpListener> { let io = try!(PollEvented::new(listener, handle)); Ok(TcpListener { io: io, pending_accept: None }) } /// Test whether this socket is ready to be read or not. pub fn poll_read(&self) -> Async<()> { self.io.poll_read() } /// Returns the local address that this listener is bound to. /// /// This can be useful, for example, when binding to port 0 to figure out /// which port was actually bound. pub fn local_addr(&self) -> io::Result<SocketAddr> { self.io.get_ref().local_addr() } /// Consumes this listener, returning a stream of the sockets this listener /// accepts. /// /// This method returns an implementation of the `Stream` trait which /// resolves to the sockets the are accepted on this listener. pub fn incoming(self) -> Incoming { Incoming { inner: self } } /// Sets the value for the `IP_TTL` option on this socket. /// /// This value sets the time-to-live field that is used in every packet sent /// from this socket. pub fn set_ttl(&self, ttl: u32) -> io::Result<()> { self.io.get_ref().set_ttl(ttl) } /// Gets the value of the `IP_TTL` option for this socket. /// /// For more information about this option, see [`set_ttl`][link]. /// /// [link]: #method.set_ttl pub fn ttl(&self) -> io::Result<u32> { self.io.get_ref().ttl() } /// Sets the value for the `IPV6_V6ONLY` option on this socket. /// /// If this is set to `true` then the socket is restricted to sending and /// receiving IPv6 packets only. In this case two IPv4 and IPv6 applications /// can bind the same port at the same time. /// /// If this is set to `false` then the socket can be used to send and /// receive packets from an IPv4-mapped IPv6 address. pub fn set_only_v6(&self, only_v6: bool) -> io::Result<()> { self.io.get_ref().set_only_v6(only_v6) } /// Gets the value of the `IPV6_V6ONLY` option for this socket. /// /// For more information about this option, see [`set_only_v6`][link]. /// /// [link]: #method.set_only_v6 pub fn only_v6(&self) -> io::Result<bool> { self.io.get_ref().only_v6() } } impl fmt::Debug for TcpListener { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { self.io.get_ref().fmt(f) } } impl Stream for Incoming { type Item = (TcpStream, SocketAddr); type Error = io::Error; fn poll(&mut self) -> Poll<Option<Self::Item>, io::Error> { Ok(Async::Ready(Some(try_nb!(self.inner.accept())))) } } /// An I/O object representing a TCP stream connected to a remote endpoint. /// /// A TCP stream can either be created by connecting to an endpoint or by /// accepting a connection from a listener. Inside the stream is access to the /// raw underlying I/O object as well as streams for the read/write /// notifications on the stream itself. pub struct TcpStream { io: PollEvented<mio::tcp::TcpStream>, } /// Future returned by `TcpStream::connect` which will resolve to a `TcpStream` /// when the stream is connected. pub struct TcpStreamNew { inner: TcpStreamNewState, } enum TcpStreamNewState { Waiting(TcpStream), Error(io::Error), Empty, } impl TcpStream { /// Create a new TCP stream connected to the specified address. /// /// This function will create a new TCP socket and attempt to connect it to /// the `addr` provided. The returned future will be resolved once the /// stream has successfully connected. If an error happens during the /// connection or during the socket creation, that error will be returned to /// the future instead. pub fn connect(addr: &SocketAddr, handle: &Handle) -> TcpStreamNew { let inner = match mio::tcp::TcpStream::connect(addr) { Ok(tcp) => TcpStream::new(tcp, handle), Err(e) => TcpStreamNewState::Error(e), }; TcpStreamNew { inner: inner } } fn new(connected_stream: mio::tcp::TcpStream, handle: &Handle) -> TcpStreamNewState { match PollEvented::new(connected_stream, handle) { Ok(io) => TcpStreamNewState::Waiting(TcpStream { io: io }), Err(e) => TcpStreamNewState::Error(e), } } /// Creates a new `TcpStream` from the pending socket inside the given /// `std::net::TcpStream`, connecting it to the address specified. /// /// This constructor allows configuring the socket before it's actually /// connected, and this function will transfer ownership to the returned /// `TcpStream` if successful. An unconnected `TcpStream` can be created /// with the `net2::TcpBuilder` type (and also configured via that route). /// /// The platform specific behavior of this function looks like: /// /// * On Unix, the socket is placed into nonblocking mode and then a /// `connect` call is issued. /// /// * On Windows, the address is stored internally and the connect operation /// is issued when the returned `TcpStream` is registered with an event /// loop. Note that on Windows you must `bind` a socket before it can be /// connected, so if a custom `TcpBuilder` is used it should be bound /// (perhaps to `INADDR_ANY`) before this method is called. pub fn connect_stream(stream: net::TcpStream, addr: &SocketAddr, handle: &Handle) -> IoFuture<TcpStream> { let state = match mio::tcp::TcpStream::connect_stream(stream, addr) { Ok(tcp) => TcpStream::new(tcp, handle), Err(e) => TcpStreamNewState::Error(e), }; state.boxed() } /// Test whether this socket is ready to be read or not. /// /// If the socket is *not* readable then the current task is scheduled to /// get a notification when the socket does become readable. That is, this /// is only suitable for calling in a `Future::poll` method and will /// automatically handle ensuring a retry once the socket is readable again. pub fn poll_read(&self) -> Async<()> { self.io.poll_read() } /// Test whether this socket is ready to be written to or not. /// /// If the socket is *not* writable then the current task is scheduled to /// get a notification when the socket does become writable. That is, this /// is only suitable for calling in a `Future::poll` method and will /// automatically handle ensuring a retry once the socket is writable again. pub fn poll_write(&self) -> Async<()> { self.io.poll_write() } /// Returns the local address that this stream is bound to. pub fn local_addr(&self) -> io::Result<SocketAddr> { self.io.get_ref().local_addr() } /// Returns the remote address that this stream is connected to. pub fn peer_addr(&self) -> io::Result<SocketAddr> { self.io.get_ref().peer_addr() } /// Shuts down the read, write, or both halves of this connection. /// /// This function will cause all pending and future I/O on the specified /// portions to return immediately with an appropriate value (see the /// documentation of `Shutdown`). pub fn shutdown(&self, how: Shutdown) -> io::Result<()> { self.io.get_ref().shutdown(how) } /// Sets the value of the `TCP_NODELAY` option on this socket. /// /// If set, this option disables the Nagle algorithm. This means that /// segments are always sent as soon as possible, even if there is only a /// small amount of data. When not set, data is buffered until there is a /// sufficient amount to send out, thereby avoiding the frequent sending of /// small packets. pub fn set_nodelay(&self, nodelay: bool) -> io::Result<()> { self.io.get_ref().set_nodelay(nodelay) } /// Gets the value of the `TCP_NODELAY` option on this socket. /// /// For more information about this option, see [`set_nodelay`][link]. /// /// [link]: #method.set_nodelay pub fn nodelay(&self) -> io::Result<bool> { self.io.get_ref().nodelay() } /// Sets whether keepalive messages are enabled to be sent on this socket. /// /// On Unix, this option will set the `SO_KEEPALIVE` as well as the /// `TCP_KEEPALIVE` or `TCP_KEEPIDLE` option (depending on your platform). /// On Windows, this will set the `SIO_KEEPALIVE_VALS` option. /// /// If `None` is specified then keepalive messages are disabled, otherwise /// the number of milliseconds specified will be the time to remain idle /// before sending a TCP keepalive probe. /// /// Some platforms specify this value in seconds, so sub-second millisecond /// specifications may be omitted. pub fn set_keepalive_ms(&self, keepalive: Option<u32>) -> io::Result<()> { self.io.get_ref().set_keepalive_ms(keepalive) } /// Returns whether keepalive messages are enabled on this socket, and if so /// the amount of milliseconds between them. /// /// For more information about this option, see [`set_keepalive_ms`][link]. /// /// [link]: #method.set_keepalive_ms pub fn keepalive_ms(&self) -> io::Result<Option<u32>> { self.io.get_ref().keepalive_ms() } /// Sets the value for the `IP_TTL` option on this socket. /// /// This value sets the time-to-live field that is used in every packet sent /// from this socket. pub fn set_ttl(&self, ttl: u32) -> io::Result<()> { self.io.get_ref().set_ttl(ttl) } /// Gets the value of the `IP_TTL` option for this socket. /// /// For more information about this option, see [`set_ttl`][link]. /// /// [link]: #method.set_ttl pub fn ttl(&self) -> io::Result<u32> { self.io.get_ref().ttl() } } impl Read for TcpStream { fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> { self.io.read(buf) } } impl Write for TcpStream { fn write(&mut self, buf: &[u8]) -> io::Result<usize> { self.io.write(buf) } fn flush(&mut self) -> io::Result<()> { self.io.flush() } } impl Io for TcpStream { fn poll_read(&mut self) -> Async<()> { <TcpStream>::poll_read(self) } fn poll_write(&mut self) -> Async<()> { <TcpStream>::poll_write(self) } } impl<'a> Read for &'a TcpStream { fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> { (&self.io).read(buf) } } impl<'a> Write for &'a TcpStream { fn write(&mut self, buf: &[u8]) -> io::Result<usize> { (&self.io).write(buf) } fn flush(&mut self) -> io::Result<()> { (&self.io).flush() } } impl<'a> Io for &'a TcpStream { fn poll_read(&mut self) -> Async<()> { <TcpStream>::poll_read(self) } fn poll_write(&mut self) -> Async<()> { <TcpStream>::poll_write(self) } } impl fmt::Debug for TcpStream { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { self.io.get_ref().fmt(f) } } impl Future for TcpStreamNew { type Item = TcpStream; type Error = io::Error; fn poll(&mut self) -> Poll<TcpStream, io::Error> { self.inner.poll() } } impl Future for TcpStreamNewState { type Item = TcpStream; type Error = io::Error; fn poll(&mut self) -> Poll<TcpStream, io::Error> { { let stream = match *self { TcpStreamNewState::Waiting(ref s) => s, TcpStreamNewState::Error(_) => { let e = match mem::replace(self, TcpStreamNewState::Empty) { TcpStreamNewState::Error(e) => e, _ => panic!(), }; return Err(e) } TcpStreamNewState::Empty => panic!("can't poll TCP stream twice"), }; // Once we've connected, wait for the stream to be writable as // that's when the actual connection has been initiated. Once we're // writable we check for `take_socket_error` to see if the connect // actually hit an error or not. // // If all that succeeded then we ship everything on up. if let Async::NotReady = stream.io.poll_write() { return Ok(Async::NotReady) } if let Some(e) = try!(stream.io.get_ref().take_error()) { return Err(e) } } match mem::replace(self, TcpStreamNewState::Empty) { TcpStreamNewState::Waiting(stream) => Ok(Async::Ready(stream)), _ => panic!(), } } } #[cfg(unix)] mod sys { use std::os::unix::prelude::*; use super::{TcpStream, TcpListener}; impl AsRawFd for TcpStream { fn as_raw_fd(&self) -> RawFd { self.io.get_ref().as_raw_fd() } } impl AsRawFd for TcpListener { fn as_raw_fd(&self) -> RawFd { self.io.get_ref().as_raw_fd() } } } #[cfg(windows)] mod sys { // TODO: let's land these upstream with mio and then we can add them here. // // use std::os::windows::prelude::*; // use super::{TcpStream, TcpListener}; // // impl AsRawHandle for TcpStream { // fn as_raw_handle(&self) -> RawHandle { // self.io.get_ref().as_raw_handle() // } // } // // impl AsRawHandle for TcpListener { // fn as_raw_handle(&self) -> RawHandle { // self.listener.io().as_raw_handle() // } // } }
use chrono::{DateTime, UTC}; use bson::{Array,Bson,Document}; use super::oid::ObjectId; use std::collections::BTreeMap; use std::fmt::{Display, Error, Formatter}; use std::iter::{FromIterator, Map}; use std::vec::IntoIter; use std::slice; /// Error to indicate that either a value was empty or it contained an unexpected /// type, for use with the direct getters. #[derive(Debug,PartialEq)] pub enum ValueAccessError { NotPresent, UnexpectedType } pub type ValueAccessResult<T> = Result<T, ValueAccessError>; /// A BSON document represented as an associative BTree Map with insertion ordering. #[derive(Debug,Clone,PartialEq)] pub struct OrderedDocument { pub keys: Vec<String>, document: BTreeMap<String, Bson>, } impl Display for OrderedDocument { fn fmt(&self, fmt: &mut Formatter) -> Result<(), Error> { let mut string = "{ ".to_owned(); for (key, value) in self.iter() { if !string.eq("{ ") { string.push_str(", "); } string.push_str(&format!("{}: {}", key, value)); } string.push_str(" }"); fmt.write_str(&string) } } /// An iterator over OrderedDocument entries. pub struct OrderedDocumentIntoIterator { vec_iter: IntoIter<String>, document: BTreeMap<String, Bson>, } /// An owning iterator over OrderedDocument entries. pub struct OrderedDocumentIterator<'a> { vec_iter: slice::Iter<'a, String>, document: &'a BTreeMap<String, Bson>, } /// An iterator over an OrderedDocument's keys. pub struct Keys<'a> { inner: Map<OrderedDocumentIterator<'a>, fn((&'a String, &'a Bson)) -> &'a String> } /// An iterator over an OrderedDocument's values. pub struct Values<'a> { inner: Map<OrderedDocumentIterator<'a>, fn((&'a String, &'a Bson)) -> &'a Bson> } impl<'a> Iterator for Keys<'a> { type Item = &'a String; fn next(&mut self) -> Option<(&'a String)> { self.inner.next() } } impl<'a> Iterator for Values<'a> { type Item = &'a Bson; fn next(&mut self) -> Option<(&'a Bson)> { self.inner.next() } } impl IntoIterator for OrderedDocument { type Item = (String, Bson); type IntoIter = OrderedDocumentIntoIterator; fn into_iter(self) -> Self::IntoIter { OrderedDocumentIntoIterator { document: self.document, vec_iter: self.keys.into_iter() } } } impl<'a> IntoIterator for &'a OrderedDocument { type Item = (&'a String, &'a Bson); type IntoIter = OrderedDocumentIterator<'a>; fn into_iter(self) -> Self::IntoIter { let ref keys = self.keys; OrderedDocumentIterator { vec_iter: keys.into_iter(), document: &self.document, } } } impl FromIterator<(String, Bson)> for OrderedDocument { fn from_iter<T: IntoIterator<Item=(String, Bson)>>(iter: T) -> Self { let mut doc = OrderedDocument::new(); for (k, v) in iter { doc.insert(k, v.to_owned()); } doc } } impl<'a> Iterator for OrderedDocumentIntoIterator { type Item = (String, Bson); fn next(&mut self) -> Option<(String, Bson)> { match self.vec_iter.next() { Some(key) => { let val = self.document.remove(&key[..]).unwrap(); Some((key, val)) }, None => None, } } } impl<'a> Iterator for OrderedDocumentIterator<'a> { type Item = (&'a String, &'a Bson); fn next(&mut self) -> Option<(&'a String, &'a Bson)> { match self.vec_iter.next() { Some(key) => { let val = self.document.get(&key[..]).unwrap(); Some((&key, val)) }, None => None, } } } impl OrderedDocument { /// Creates a new empty OrderedDocument. pub fn new() -> OrderedDocument { OrderedDocument { keys: Vec::new(), document: BTreeMap::new(), } } /// Gets an iterator over the entries of the map. pub fn iter<'a>(&'a self) -> OrderedDocumentIterator<'a> { self.into_iter() } /// Clears the document, removing all values. pub fn clear(&mut self) { self.keys.clear(); self.document.clear(); } /// Returns a reference to the Bson corresponding to the key. pub fn get(&self, key: &str) -> Option<&Bson> { self.document.get(key) } /// Gets a mutable reference to the Bson corresponding to the key pub fn get_mut(&mut self, key: &str) -> Option<&mut Bson> { self.document.get_mut(key) } /// Get a floating point value for this key if it exists and has /// the correct type. pub fn get_f64(&self, key: &str) -> ValueAccessResult<f64> { match self.get(key) { Some(&Bson::FloatingPoint(v)) => Ok(v), Some(_) => Err(ValueAccessError::UnexpectedType), None => Err(ValueAccessError::NotPresent) } } /// Get a string slice this key if it exists and has the correct type. pub fn get_str(&self, key: &str) -> ValueAccessResult<&str> { match self.get(key) { Some(&Bson::String(ref v)) => Ok(v), Some(_) => Err(ValueAccessError::UnexpectedType), None => Err(ValueAccessError::NotPresent) } } /// Get a reference to an array for this key if it exists and has /// the correct type. pub fn get_array(&self, key: &str) -> ValueAccessResult<&Array> { match self.get(key) { Some(&Bson::Array(ref v)) => Ok(v), Some(_) => Err(ValueAccessError::UnexpectedType), None => Err(ValueAccessError::NotPresent) } } /// Get a reference to a document for this key if it exists and has /// the correct type. pub fn get_document(&self, key: &str) -> ValueAccessResult<&Document> { match self.get(key) { Some(&Bson::Document(ref v)) => Ok(v), Some(_) => Err(ValueAccessError::UnexpectedType), None => Err(ValueAccessError::NotPresent) } } /// Get a bool value for this key if it exists and has the correct type. pub fn get_bool(&self, key: &str) -> ValueAccessResult<bool> { match self.get(key) { Some(&Bson::Boolean(v)) => Ok(v), Some(_) => Err(ValueAccessError::UnexpectedType), None => Err(ValueAccessError::NotPresent) } } /// Returns wether this key has a null value pub fn is_null(&self, key: &str) -> bool { self.get(key) == Some(&Bson::Null) } /// Get an i32 value for this key if it exists and has the correct type. pub fn get_i32(&self, key: &str) -> ValueAccessResult<i32> { match self.get(key) { Some(&Bson::I32(v)) => Ok(v), Some(_) => Err(ValueAccessError::UnexpectedType), None => Err(ValueAccessError::NotPresent) } } /// Get an i64 value for this key if it exists and has the correct type. pub fn get_i64(&self, key: &str) -> ValueAccessResult<i64> { match self.get(key) { Some(&Bson::I64(v)) => Ok(v), Some(_) => Err(ValueAccessError::UnexpectedType), None => Err(ValueAccessError::NotPresent) } } /// Get a time stamp value for this key if it exists and has the correct type. pub fn get_time_stamp(&self, key: &str) -> ValueAccessResult<i64> { match self.get(key) { Some(&Bson::TimeStamp(v)) => Ok(v), Some(_) => Err(ValueAccessError::UnexpectedType), None => Err(ValueAccessError::NotPresent) } } /// Get an object id value for this key if it exists and has the correct type. pub fn get_object_id(&self, key: &str) -> ValueAccessResult<&ObjectId> { match self.get(key) { Some(&Bson::ObjectId(ref v)) => Ok(v), Some(_) => Err(ValueAccessError::UnexpectedType), None => Err(ValueAccessError::NotPresent) } } /// Get a UTC datetime value for this key if it exists and has the correct type. pub fn get_utc_datetime(&self, key: &str) -> ValueAccessResult<&DateTime<UTC>> { match self.get(key) { Some(&Bson::UtcDatetime(ref v)) => Ok(v), Some(_) => Err(ValueAccessError::UnexpectedType), None => Err(ValueAccessError::NotPresent) } } /// Returns true if the map contains a value for the specified key. pub fn contains_key(&self, key: &str) -> bool { self.document.contains_key(key) } /// Returns the position of the key in the ordered vector, if it exists. pub fn position(&self, key: &str) -> Option<usize> { self.keys.iter().position(|x| x == key) } /// Gets a collection of all keys in the document. pub fn keys<'a>(&'a self) -> Keys<'a> { fn first<A, B>((a, _): (A, B)) -> A { a } let first: fn((&'a String, &'a Bson)) -> &'a String = first; Keys { inner: self.iter().map(first) } } /// Gets a collection of all values in the document. pub fn values<'a>(&'a self) -> Values<'a> { fn second<A, B>((_, b): (A, B)) -> B { b } let second: fn((&'a String, &'a Bson)) -> &'a Bson = second; Values { inner: self.iter().map(second) } } /// Returns the number of elements in the document. pub fn len(&self) -> usize { self.keys.len() } /// Returns true if the document contains no elements pub fn is_empty(&self) -> bool { self.document.is_empty() } /// Sets the value of the entry with the OccupiedEntry's key, /// and returns the entry's old value. Accepts any type that /// can be converted into Bson. pub fn insert<KT: Into<String>, BT: Into<Bson>>(&mut self, key: KT, val: BT) -> Option<Bson> { self.insert_bson(key.into(), val.into()) } /// Sets the value of the entry with the OccupiedEntry's key, /// and returns the entry's old value. pub fn insert_bson(&mut self, key: String, val: Bson) -> Option<Bson> { { let key_slice = &key[..]; if self.contains_key(key_slice) { let position = self.position(key_slice).unwrap(); self.keys.remove(position); } } self.keys.push(key.to_owned()); self.document.insert(key, val) } /// Takes the value of the entry out of the document, and returns it. pub fn remove(&mut self, key: &str) -> Option<Bson> { let position = self.position(key); if position.is_some() { self.keys.remove(position.unwrap()); } self.document.remove(key) } } Implement Debug, Display and Error for ValueAccessError use chrono::{DateTime, UTC}; use bson::{Array,Bson,Document}; use super::oid::ObjectId; use std::collections::BTreeMap; use std::error; use std::fmt; use std::fmt::{Debug, Display, Error, Formatter}; use std::iter::{FromIterator, Map}; use std::vec::IntoIter; use std::slice; /// Error to indicate that either a value was empty or it contained an unexpected /// type, for use with the direct getters. #[derive(PartialEq)] pub enum ValueAccessError { NotPresent, UnexpectedType } pub type ValueAccessResult<T> = Result<T, ValueAccessError>; impl Debug for ValueAccessError { fn fmt(&self, f: &mut Formatter) -> fmt::Result { match *self { ValueAccessError::NotPresent => write!(f, "ValueAccessError: field is not present"), ValueAccessError::UnexpectedType => write!(f, "ValueAccessError: field does not have the expected type") } } } impl Display for ValueAccessError { fn fmt(&self, f: &mut Formatter) -> fmt::Result { match *self { ValueAccessError::NotPresent => write!(f, "field is not present"), ValueAccessError::UnexpectedType => write!(f, "field does not have the expected type") } } } impl error::Error for ValueAccessError { fn description(&self) -> &str { "Error to indicate that either a value was empty or it contained an unexpected type" } } /// A BSON document represented as an associative BTree Map with insertion ordering. #[derive(Debug,Clone,PartialEq)] pub struct OrderedDocument { pub keys: Vec<String>, document: BTreeMap<String, Bson>, } impl Display for OrderedDocument { fn fmt(&self, fmt: &mut Formatter) -> Result<(), Error> { let mut string = "{ ".to_owned(); for (key, value) in self.iter() { if !string.eq("{ ") { string.push_str(", "); } string.push_str(&format!("{}: {}", key, value)); } string.push_str(" }"); fmt.write_str(&string) } } /// An iterator over OrderedDocument entries. pub struct OrderedDocumentIntoIterator { vec_iter: IntoIter<String>, document: BTreeMap<String, Bson>, } /// An owning iterator over OrderedDocument entries. pub struct OrderedDocumentIterator<'a> { vec_iter: slice::Iter<'a, String>, document: &'a BTreeMap<String, Bson>, } /// An iterator over an OrderedDocument's keys. pub struct Keys<'a> { inner: Map<OrderedDocumentIterator<'a>, fn((&'a String, &'a Bson)) -> &'a String> } /// An iterator over an OrderedDocument's values. pub struct Values<'a> { inner: Map<OrderedDocumentIterator<'a>, fn((&'a String, &'a Bson)) -> &'a Bson> } impl<'a> Iterator for Keys<'a> { type Item = &'a String; fn next(&mut self) -> Option<(&'a String)> { self.inner.next() } } impl<'a> Iterator for Values<'a> { type Item = &'a Bson; fn next(&mut self) -> Option<(&'a Bson)> { self.inner.next() } } impl IntoIterator for OrderedDocument { type Item = (String, Bson); type IntoIter = OrderedDocumentIntoIterator; fn into_iter(self) -> Self::IntoIter { OrderedDocumentIntoIterator { document: self.document, vec_iter: self.keys.into_iter() } } } impl<'a> IntoIterator for &'a OrderedDocument { type Item = (&'a String, &'a Bson); type IntoIter = OrderedDocumentIterator<'a>; fn into_iter(self) -> Self::IntoIter { let ref keys = self.keys; OrderedDocumentIterator { vec_iter: keys.into_iter(), document: &self.document, } } } impl FromIterator<(String, Bson)> for OrderedDocument { fn from_iter<T: IntoIterator<Item=(String, Bson)>>(iter: T) -> Self { let mut doc = OrderedDocument::new(); for (k, v) in iter { doc.insert(k, v.to_owned()); } doc } } impl<'a> Iterator for OrderedDocumentIntoIterator { type Item = (String, Bson); fn next(&mut self) -> Option<(String, Bson)> { match self.vec_iter.next() { Some(key) => { let val = self.document.remove(&key[..]).unwrap(); Some((key, val)) }, None => None, } } } impl<'a> Iterator for OrderedDocumentIterator<'a> { type Item = (&'a String, &'a Bson); fn next(&mut self) -> Option<(&'a String, &'a Bson)> { match self.vec_iter.next() { Some(key) => { let val = self.document.get(&key[..]).unwrap(); Some((&key, val)) }, None => None, } } } impl OrderedDocument { /// Creates a new empty OrderedDocument. pub fn new() -> OrderedDocument { OrderedDocument { keys: Vec::new(), document: BTreeMap::new(), } } /// Gets an iterator over the entries of the map. pub fn iter<'a>(&'a self) -> OrderedDocumentIterator<'a> { self.into_iter() } /// Clears the document, removing all values. pub fn clear(&mut self) { self.keys.clear(); self.document.clear(); } /// Returns a reference to the Bson corresponding to the key. pub fn get(&self, key: &str) -> Option<&Bson> { self.document.get(key) } /// Gets a mutable reference to the Bson corresponding to the key pub fn get_mut(&mut self, key: &str) -> Option<&mut Bson> { self.document.get_mut(key) } /// Get a floating point value for this key if it exists and has /// the correct type. pub fn get_f64(&self, key: &str) -> ValueAccessResult<f64> { match self.get(key) { Some(&Bson::FloatingPoint(v)) => Ok(v), Some(_) => Err(ValueAccessError::UnexpectedType), None => Err(ValueAccessError::NotPresent) } } /// Get a string slice this key if it exists and has the correct type. pub fn get_str(&self, key: &str) -> ValueAccessResult<&str> { match self.get(key) { Some(&Bson::String(ref v)) => Ok(v), Some(_) => Err(ValueAccessError::UnexpectedType), None => Err(ValueAccessError::NotPresent) } } /// Get a reference to an array for this key if it exists and has /// the correct type. pub fn get_array(&self, key: &str) -> ValueAccessResult<&Array> { match self.get(key) { Some(&Bson::Array(ref v)) => Ok(v), Some(_) => Err(ValueAccessError::UnexpectedType), None => Err(ValueAccessError::NotPresent) } } /// Get a reference to a document for this key if it exists and has /// the correct type. pub fn get_document(&self, key: &str) -> ValueAccessResult<&Document> { match self.get(key) { Some(&Bson::Document(ref v)) => Ok(v), Some(_) => Err(ValueAccessError::UnexpectedType), None => Err(ValueAccessError::NotPresent) } } /// Get a bool value for this key if it exists and has the correct type. pub fn get_bool(&self, key: &str) -> ValueAccessResult<bool> { match self.get(key) { Some(&Bson::Boolean(v)) => Ok(v), Some(_) => Err(ValueAccessError::UnexpectedType), None => Err(ValueAccessError::NotPresent) } } /// Returns wether this key has a null value pub fn is_null(&self, key: &str) -> bool { self.get(key) == Some(&Bson::Null) } /// Get an i32 value for this key if it exists and has the correct type. pub fn get_i32(&self, key: &str) -> ValueAccessResult<i32> { match self.get(key) { Some(&Bson::I32(v)) => Ok(v), Some(_) => Err(ValueAccessError::UnexpectedType), None => Err(ValueAccessError::NotPresent) } } /// Get an i64 value for this key if it exists and has the correct type. pub fn get_i64(&self, key: &str) -> ValueAccessResult<i64> { match self.get(key) { Some(&Bson::I64(v)) => Ok(v), Some(_) => Err(ValueAccessError::UnexpectedType), None => Err(ValueAccessError::NotPresent) } } /// Get a time stamp value for this key if it exists and has the correct type. pub fn get_time_stamp(&self, key: &str) -> ValueAccessResult<i64> { match self.get(key) { Some(&Bson::TimeStamp(v)) => Ok(v), Some(_) => Err(ValueAccessError::UnexpectedType), None => Err(ValueAccessError::NotPresent) } } /// Get an object id value for this key if it exists and has the correct type. pub fn get_object_id(&self, key: &str) -> ValueAccessResult<&ObjectId> { match self.get(key) { Some(&Bson::ObjectId(ref v)) => Ok(v), Some(_) => Err(ValueAccessError::UnexpectedType), None => Err(ValueAccessError::NotPresent) } } /// Get a UTC datetime value for this key if it exists and has the correct type. pub fn get_utc_datetime(&self, key: &str) -> ValueAccessResult<&DateTime<UTC>> { match self.get(key) { Some(&Bson::UtcDatetime(ref v)) => Ok(v), Some(_) => Err(ValueAccessError::UnexpectedType), None => Err(ValueAccessError::NotPresent) } } /// Returns true if the map contains a value for the specified key. pub fn contains_key(&self, key: &str) -> bool { self.document.contains_key(key) } /// Returns the position of the key in the ordered vector, if it exists. pub fn position(&self, key: &str) -> Option<usize> { self.keys.iter().position(|x| x == key) } /// Gets a collection of all keys in the document. pub fn keys<'a>(&'a self) -> Keys<'a> { fn first<A, B>((a, _): (A, B)) -> A { a } let first: fn((&'a String, &'a Bson)) -> &'a String = first; Keys { inner: self.iter().map(first) } } /// Gets a collection of all values in the document. pub fn values<'a>(&'a self) -> Values<'a> { fn second<A, B>((_, b): (A, B)) -> B { b } let second: fn((&'a String, &'a Bson)) -> &'a Bson = second; Values { inner: self.iter().map(second) } } /// Returns the number of elements in the document. pub fn len(&self) -> usize { self.keys.len() } /// Returns true if the document contains no elements pub fn is_empty(&self) -> bool { self.document.is_empty() } /// Sets the value of the entry with the OccupiedEntry's key, /// and returns the entry's old value. Accepts any type that /// can be converted into Bson. pub fn insert<KT: Into<String>, BT: Into<Bson>>(&mut self, key: KT, val: BT) -> Option<Bson> { self.insert_bson(key.into(), val.into()) } /// Sets the value of the entry with the OccupiedEntry's key, /// and returns the entry's old value. pub fn insert_bson(&mut self, key: String, val: Bson) -> Option<Bson> { { let key_slice = &key[..]; if self.contains_key(key_slice) { let position = self.position(key_slice).unwrap(); self.keys.remove(position); } } self.keys.push(key.to_owned()); self.document.insert(key, val) } /// Takes the value of the entry out of the document, and returns it. pub fn remove(&mut self, key: &str) -> Option<Bson> { let position = self.position(key); if position.is_some() { self.keys.remove(position.unwrap()); } self.document.remove(key) } }
//! This module implements encoding and decoding of the (63, 16, 23) BCH code used to //! protect P25's NID field. //! //! It uses an optimized "matrix multiplication" for encoding and //! the Berlekamp-Massey algorithm followed by Chien search for decoding, and both use //! only stack memory. //! //! Most Galois field information as well as the Berlekamp-Massey implementation are //! derived from \[1] and the Chien search was derived from \[2]. //! //! \[1]: "Coding Theory and Cryptography: The Essentials", 2nd ed, Hankerson, Hoffman, et //! al, 2000 //! //! \[2]: https://en.wikipedia.org/wiki/Chien_search use std; /// Encode the given word into a P25 BCH codeword. pub fn encode(word: u16) -> u64 { GEN.iter().fold(0, |accum, row| { // Continually shift in bits created by "multiplying" the word with the generator // row. accum << 1 | ((word & row).count_ones() % 2) as u64 }) } /// Decode the given codeword into data bits, correcting up to 11 errors. Return /// `Some((data, err))`, where `data` is the data bits and `err` is the number of errors, /// if the codeword could be corrected and `None` if it couldn't. pub fn decode(word: u64) -> Option<(u16, usize)> { // The BCH code is only over the first 63 bits, so strip off the P25 parity bit. let word = word >> 1; // Get the error location polynomial. let poly = BCHDecoder::new(Syndromes::new(word)).decode(); // The degree indicates the number of errors that need to be corrected. let errors = match poly.degree() { Some(deg) => deg, None => panic!("invalid polynomial"), }; // Even if there are more errors, the BM algorithm produces a polynomial with degree // no greater than ERRORS. assert!(errors <= ERRORS); // Get the bit locations from the polynomial. let locs = ErrorLocations::new(poly.coefs().iter().cloned()); // Correct the codeword and count the number of corrected errors. Stop the // `ErrorLocations` iteration after `errors` iterations since it won't yield any more // locations after that anyway. let (word, count) = locs.take(errors).fold((word, 0), |(w, s), loc| { (w ^ 1 << loc, s + 1) }); if count == errors { // Strip off the (corrected) parity-check bits. Some(((word >> 47) as u16, errors)) } else { None } } /// The n in (n,k,d). const WORD_SIZE: usize = 63; /// The d in (n,k,d). const DISTANCE: usize = 23; /// 2t+1 = 23 => t = 11 const ERRORS: usize = 11; /// Required syndrome codewords. const SYNDROMES: usize = 2 * ERRORS; /// Maps α^i to its codeword. const CODEWORDS: &'static [u8] = &[ 0b100000, 0b010000, 0b001000, 0b000100, 0b000010, 0b000001, 0b110000, 0b011000, 0b001100, 0b000110, 0b000011, 0b110001, 0b101000, 0b010100, 0b001010, 0b000101, 0b110010, 0b011001, 0b111100, 0b011110, 0b001111, 0b110111, 0b101011, 0b100101, 0b100010, 0b010001, 0b111000, 0b011100, 0b001110, 0b000111, 0b110011, 0b101001, 0b100100, 0b010010, 0b001001, 0b110100, 0b011010, 0b001101, 0b110110, 0b011011, 0b111101, 0b101110, 0b010111, 0b111011, 0b101101, 0b100110, 0b010011, 0b111001, 0b101100, 0b010110, 0b001011, 0b110101, 0b101010, 0b010101, 0b111010, 0b011101, 0b111110, 0b011111, 0b111111, 0b101111, 0b100111, 0b100011, 0b100001, ]; /// Maps a codeword to i in α^i. const POWERS: &'static [usize] = &[ 5, 4, 10, 3, 15, 9, 29, 2, 34, 14, 50, 8, 37, 28, 20, 1, 25, 33, 46, 13, 53, 49, 42, 7, 17, 36, 39, 27, 55, 19, 57, 0, 62, 24, 61, 32, 23, 45, 60, 12, 31, 52, 22, 48, 44, 41, 59, 6, 11, 16, 30, 35, 51, 38, 21, 26, 47, 54, 43, 18, 40, 56, 58, ]; /// Generator matrix from P25, transformed for more efficient codeword generation. const GEN: &'static [u16] = &[ 0b1000000000000000, 0b0100000000000000, 0b0010000000000000, 0b0001000000000000, 0b0000100000000000, 0b0000010000000000, 0b0000001000000000, 0b0000000100000000, 0b0000000010000000, 0b0000000001000000, 0b0000000000100000, 0b0000000000010000, 0b0000000000001000, 0b0000000000000100, 0b0000000000000010, 0b0000000000000001, 0b1110110001000111, 0b1001101001100100, 0b0100110100110010, 0b0010011010011001, 0b1111111100001011, 0b1001001111000010, 0b0100100111100001, 0b1100100010110111, 0b1000100000011100, 0b0100010000001110, 0b0010001000000111, 0b1111110101000100, 0b0111111010100010, 0b0011111101010001, 0b1111001111101111, 0b1001010110110000, 0b0100101011011000, 0b0010010101101100, 0b0001001010110110, 0b0000100101011011, 0b1110100011101010, 0b0111010001110101, 0b1101011001111101, 0b1000011101111001, 0b1010111111111011, 0b1011101110111010, 0b0101110111011101, 0b1100001010101001, 0b1000110100010011, 0b1010101011001110, 0b0101010101100111, 0b1100011011110100, 0b0110001101111010, 0b0011000110111101, 0b1111010010011001, 0b1001011000001011, 0b1010011101000010, 0b0101001110100001, 0b1100010110010111, 0b1000111010001100, 0b0100011101000110, 0b0010001110100011, 0b1111110110010110, 0b0111111011001011, 0b1101001100100010, 0b0110100110010001, 0b1101100010001111, 0b0000000000000011, ]; #[derive(Copy, Clone)] /// Codeword in GF(2^6) defined by α^6+α+1. struct Codeword(u8); impl Codeword { /// Construct a new `Codeword` with the given (valid) codeword in the field. fn new(codeword: u8) -> Codeword { Codeword(codeword) } /// Check if the codeword is zero. pub fn zero(&self) -> bool { self.0 == 0 } /// Return `Some(i)` if the codeword is equal to α^i and `None` if it's equal to zero. pub fn power(&self) -> Option<usize> { if self.zero() { None } else { // Convert to zero-based index. Some(POWERS[self.0 as usize - 1]) } } /// Return the codeword for the given power, which is cyclic in the field. pub fn for_power(power: usize) -> Codeword { Codeword::new(CODEWORDS[power % POWERS.len()]) } /// Find 1/a^i for the codeword equal to a^i. Panic if the codeword is zero. pub fn invert(self) -> Codeword { match self.power() { Some(p) => Codeword::for_power(POWERS.len() - p), None => panic!("divide by zero"), } } } impl Default for Codeword { /// Get the additive identity codeword. fn default() -> Self { Codeword::new(0) } } impl std::ops::Mul for Codeword { type Output = Codeword; fn mul(self, rhs: Codeword) -> Self::Output { match (self.power(), rhs.power()) { (Some(p), Some(q)) => Codeword::for_power(p + q), _ => Codeword::default(), } } } impl std::ops::Div for Codeword { type Output = Codeword; fn div(self, rhs: Codeword) -> Self::Output { match (self.power(), rhs.power()) { // max(q) = 62 => 63-max(power) > 0 (Some(p), Some(q)) => Codeword::for_power(p + POWERS.len() - q), (None, Some(_)) => Codeword::default(), (_, None) => panic!("divide by zero"), } } } impl std::ops::Add for Codeword { type Output = Codeword; fn add(self, rhs: Codeword) -> Self::Output { Codeword::new(self.0 ^ rhs.0) } } impl std::ops::Sub for Codeword { type Output = Codeword; fn sub(self, rhs: Codeword) -> Self::Output { self + rhs } } impl std::cmp::PartialEq for Codeword { fn eq(&self, other: &Self) -> bool { self.0 == other.0 } } impl std::cmp::Eq for Codeword {} impl std::cmp::PartialOrd for Codeword { fn partial_cmp(&self, rhs: &Self) -> Option<std::cmp::Ordering> { use std::cmp::Ordering::*; match (self.power(), rhs.power()) { (Some(p), Some(q)) => Some(p.cmp(&q)), (Some(_), None) => Some(Greater), (None, Some(_)) => Some(Less), (None, None) => Some(Equal), } } } impl std::cmp::Ord for Codeword { fn cmp(&self, rhs: &Self) -> std::cmp::Ordering { self.partial_cmp(rhs).unwrap() } } #[derive(Copy, Clone)] /// A syndrome polynomial with GF(2^6) codewords as coefficients. struct Polynomial { /// Coefficients of the polynomial. The maximum degree span in the algorithm is [0, /// 2t+1], or 2t+2 coefficients. coefs: [Codeword; SYNDROMES + 2], /// Index into `coefs` of the degree-0 coefficient. Coefficients with a lesser index /// will be zero. start: usize, } impl Polynomial { /// Construct a new `Polynomial` from the given coefficients, so /// p(x) = coefs[0] + coefs[1]*x + ... + coefs[n]*x^n. Only `SYNDROMES+2` coefficients /// will be used from the iterator. pub fn new<T: Iterator<Item = Codeword>>(coefs: T) -> Polynomial { // Start with all zero coefficients and add in the given ones. let mut poly = [Codeword::default(); SYNDROMES + 2]; for (cur, coef) in poly.iter_mut().zip(coefs) { *cur = *cur + coef; } Polynomial { coefs: poly, start: 0, } } /// Get the degree-0 coefficient. pub fn constant(&self) -> Codeword { self.coefs[self.start] } /// Get the coefficients starting from degree-0. pub fn coefs(&self) -> &[Codeword] { &self.coefs[self.start..] } /// Return `Some(deg)`, where `deg` is the highest degree term in the polynomial, if /// the polynomial is nonzero and `None` if it's zero. pub fn degree(&self) -> Option<usize> { for (deg, coef) in self.coefs.iter().enumerate().rev() { if !coef.zero() { // Any coefficients before `start` aren't part of the polynomial. return Some(deg - self.start); } } None } /// Divide the polynomial by x -- shift all coefficients to a lower degree -- and /// replace the shifted coefficient with the zero codeword. There must be no constant /// term. pub fn shift(mut self) -> Polynomial { self.coefs[self.start] = Codeword::default(); self.start += 1; self } /// Get the coefficient of the given absolute degree if it exists in the polynomial /// or the zero codeword if it doesn't. fn get(&self, idx: usize) -> Codeword { match self.coefs.get(idx) { Some(&c) => c, None => Codeword::default(), } } /// Get the coefficient of the given degree or the zero codeword if the degree doesn't /// exist in the polynomial. pub fn coef(&self, deg: usize) -> Codeword { self.get(self.start + deg) } } impl std::ops::Add for Polynomial { type Output = Polynomial; fn add(mut self, rhs: Polynomial) -> Self::Output { // Sum the coefficients and reset the degree-0 term back to index 0. Since start > // 0 => start+i >= i, so there's no overwriting. for i in 0..self.coefs.len() { self.coefs[i] = self.coef(i) + rhs.coef(i); } self.start = 0; self } } impl std::ops::Mul<Codeword> for Polynomial { type Output = Polynomial; fn mul(mut self, rhs: Codeword) -> Self::Output { for coef in self.coefs.iter_mut() { *coef = *coef * rhs; } self } } /// Iterator over the syndromes of a received codeword. Each syndrome is a codeword in /// GF(2^6). struct Syndromes { /// Exponent power of the current syndrome. pow: std::ops::Range<usize>, /// Received codeword itself. word: u64, } impl Syndromes { /// Construct a new `Syndromes` for the given received codeword. pub fn new(word: u64) -> Syndromes { Syndromes { pow: 1..DISTANCE, word: word, } } } impl Iterator for Syndromes { type Item = Codeword; fn next(&mut self) -> Option<Self::Item> { match self.pow.next() { Some(pow) => Some((0..WORD_SIZE).fold(Codeword::default(), |s, b| { if self.word >> b & 1 == 0 { s } else { s + Codeword::for_power(b * pow) } })), None => None, } } } /// Implements the iterative part of the Berlekamp-Massey algorithm. struct BCHDecoder { /// Saved p polynomial: p_{z_i-1}. p_saved: Polynomial, /// Previous iteration's p polynomial: p_{i-1}. p_cur: Polynomial, /// Saved q polynomial: q_{z_i-1}. q_saved: Polynomial, /// Previous iteration's q polynomial: q_{i-1}. q_cur: Polynomial, /// Degree-related term of saved p polynomial: D_{z_i-1}. deg_saved: usize, /// Degree-related term of previous p polynomial: D_{i-1}. deg_cur: usize, } impl BCHDecoder { /// Construct a new `BCHDecoder` from the given syndrome codeword iterator. pub fn new<T: Iterator<Item = Codeword>>(syndromes: T) -> BCHDecoder { // A zero followed by the syndromes. let q = Polynomial::new(std::iter::once(Codeword::for_power(0)) .chain(syndromes.into_iter())); // 2t zeroes followed by a one. let p = Polynomial::new((0..SYNDROMES+1).map(|_| Codeword::default()) .chain(std::iter::once(Codeword::for_power(0)))); BCHDecoder { q_saved: q, q_cur: q.shift(), p_saved: p, p_cur: p.shift(), deg_saved: 0, deg_cur: 1, } } /// Perform the iterative steps to get the error-location polynomial Λ(x) wih deg(Λ) /// <= t. pub fn decode(mut self) -> Polynomial { for _ in 0..SYNDROMES { self.step(); } self.p_cur } /// Perform one iterative step of the algorithm, updating the state polynomials and /// degrees. fn step(&mut self) { let (save, q, p, d) = if self.q_cur.constant().zero() { self.reduce() } else { self.transform() }; if save { self.q_saved = self.q_cur; self.p_saved = self.p_cur; self.deg_saved = self.deg_cur; } self.q_cur = q; self.p_cur = p; self.deg_cur = d; } /// Simply shift the polynomials since they have no degree-0 term. fn reduce(&mut self) -> (bool, Polynomial, Polynomial, usize) { ( false, self.q_cur.shift(), self.p_cur.shift(), 2 + self.deg_cur, ) } /// Remove the degree-0 terms and shift the polynomials. fn transform(&mut self) -> (bool, Polynomial, Polynomial, usize) { let mult = self.q_cur.constant() / self.q_saved.constant(); ( self.deg_cur >= self.deg_saved, (self.q_cur + self.q_saved * mult).shift(), (self.p_cur + self.p_saved * mult).shift(), 2 + std::cmp::min(self.deg_cur, self.deg_saved), ) } } /// Uses Chien search to find the roots in GF(2^6) of an error-locator polynomial and /// produce an iterator of error bit positions. struct ErrorLocations { /// Coefficients of the polynomial. terms: [Codeword; ERRORS + 1], /// Current exponent power of the iteration. pow: std::ops::Range<usize>, } impl ErrorLocations { /// Construct a new `ErrorLocations` from the given coefficients, where Λ(x) = /// coefs[0] + coefs[1]*x + ... + coefs[e]*x^e. pub fn new<T: Iterator<Item = Codeword>>(coefs: T) -> ErrorLocations { // The maximum degree is t error locations (t+1 coefficients.) let mut poly = [Codeword::default(); ERRORS + 1]; for (pow, (cur, coef)) in poly.iter_mut().zip(coefs).enumerate() { // Since the first call to `update_terms()` multiplies by `pow` and the // coefficients should equal themselves on the first iteration, divide by // `pow` here. *cur = *cur + coef / Codeword::for_power(pow) } ErrorLocations { terms: poly, pow: 0..POWERS.len(), } } /// Perform the term-updating step of the algorithm: x_{j,i} = x_{j,i-1} * α^j. fn update_terms(&mut self) { for (pow, term) in self.terms.iter_mut().enumerate() { *term = *term * Codeword::for_power(pow); } } /// Calculate the sum of the terms: x_{0,i} + x_{1,i} + ... + x_{t,i} -- evaluate the /// error-locator polynomial at Λ(α^i). fn sum_terms(&self) -> Codeword { self.terms.iter().fold(Codeword::default(), |s, &x| s + x) } } impl Iterator for ErrorLocations { type Item = usize; fn next(&mut self) -> Option<Self::Item> { loop { let pow = match self.pow.next() { Some(pow) => pow, None => return None, }; self.update_terms(); if self.sum_terms().zero() { return Some(Codeword::for_power(pow).invert().power().unwrap()); } } } } #[cfg(test)] mod test { use super::{encode, Syndromes, Codeword, Polynomial, decode}; #[test] fn test_for_power() { assert_eq!(Codeword::for_power(0).0, 0b100000); assert_eq!(Codeword::for_power(62).0, 0b100001); assert_eq!(Codeword::for_power(63).0, 0b100000); } #[test] fn test_add_sub() { assert_eq!((Codeword::new(0b100000) + Codeword::new(0b010000)).0, 0b110000); assert_eq!((Codeword::new(0b100000) - Codeword::new(0b010000)).0, 0b110000); assert_eq!((Codeword::new(0b100001) + Codeword::new(0b100001)).0, 0b000000); assert_eq!((Codeword::new(0b100001) - Codeword::new(0b100001)).0, 0b000000); assert_eq!((Codeword::new(0b100001) + Codeword::new(0b110100)).0, 0b010101); assert_eq!((Codeword::new(0b100001) - Codeword::new(0b110100)).0, 0b010101); } #[test] fn test_mul() { assert_eq!((Codeword::new(0b011000) * Codeword::new(0b101000)).0, 0b011110); assert_eq!((Codeword::new(0b000000) * Codeword::new(0b101000)).0, 0b000000); assert_eq!((Codeword::new(0b011000) * Codeword::new(0b000000)).0, 0b000000); assert_eq!((Codeword::new(0b000000) * Codeword::new(0b000000)).0, 0b000000); assert_eq!((Codeword::new(0b100001) * Codeword::new(0b100000)).0, 0b100001); assert_eq!((Codeword::new(0b100001) * Codeword::new(0b010000)).0, 0b100000); assert_eq!((Codeword::new(0b110011) * Codeword::new(0b110011)).0, 0b100111); assert_eq!((Codeword::new(0b111101) * Codeword::new(0b111101)).0, 0b011001); } #[test] fn test_div() { assert_eq!((Codeword::new(0b000100) / Codeword::new(0b101000)).0, 0b111010); assert_eq!((Codeword::new(0b000000) / Codeword::new(0b101000)).0, 0b000000); assert_eq!((Codeword::new(0b011110) / Codeword::new(0b100000)).0, 0b011110); assert_eq!((Codeword::new(0b011110) / Codeword::new(0b011110)).0, 0b100000); } #[test] fn test_cmp() { assert!(Codeword::new(0b100000) > Codeword::new(0b000000)); assert!(Codeword::new(0b000000) == Codeword::new(0b000000)); assert!(Codeword::new(0b010000) > Codeword::new(0b100000)); assert!(Codeword::new(0b100001) > Codeword::new(0b100000)); } #[test] fn test_encode() { assert_eq!(encode(0b1111111100000000), 0b1111111100000000100100110001000011000010001100000110100001101000); assert_eq!(encode(0b0011)&1, 0); assert_eq!(encode(0b0101)&1, 1); assert_eq!(encode(0b1010)&1, 1); assert_eq!(encode(0b1100)&1, 0); assert_eq!(encode(0b1111)&1, 0); } #[test] fn test_syndromes() { let w = encode(0b1111111100000000)>>1; assert!(Syndromes::new(w).all(|s| s.zero())); assert!(!Syndromes::new(w ^ 1<<60).all(|s| s.zero())); } #[test] fn test_polynomial() { let p = Polynomial::new((0..23).map(|i| { Codeword::for_power(i) })); assert!(p.degree().unwrap() == 22); assert!(p.constant() == Codeword::for_power(0)); let p = p.shift(); assert!(p.degree().unwrap() == 21); assert!(p.constant() == Codeword::for_power(1)); let q = p.clone() * Codeword::for_power(0); assert!(q.degree().unwrap() == 21); assert!(q.constant() == Codeword::for_power(1)); let q = p.clone() * Codeword::for_power(2); assert!(q.degree().unwrap() == 21); assert!(q.constant() == Codeword::for_power(3)); let q = p.clone() + p.clone(); assert!(q.constant().zero()); for coef in q.coefs() { assert!(coef.zero()); } let p = Polynomial::new((4..27).map(|i| { Codeword::for_power(i) })); let q = Polynomial::new((3..26).map(|i| { Codeword::for_power(i) })); let r = p + q.shift(); assert!(r.coefs[0].zero()); assert!(r.coefs[1].zero()); assert!(r.coefs[2].zero()); assert!(r.coefs[3].zero()); assert!(r.coefs[4].zero()); assert!(!r.coefs[22].zero()); let p = Polynomial::new((0..2).map(|_| { Codeword::for_power(0) })); let q = Polynomial::new((0..4).map(|_| { Codeword::for_power(1) })); let r = p + q; assert!(r.coef(0) == Codeword::for_power(6)); } #[test] fn test_decode() { assert!(decode(encode(0b0000111100001111) ^ 1<<63).unwrap() == (0b0000111100001111, 1)); assert!(decode(encode(0b1100011111111111) ^ 1).unwrap() == (0b1100011111111111, 0)); assert!(decode(encode(0b1111111100000000) ^ 0b11010011<<30).unwrap() == (0b1111111100000000, 5)); assert!(decode(encode(0b1101101101010001) ^ (1<<63 | 1)).unwrap() == (0b1101101101010001, 1)); assert!(decode(encode(0b1111111111111111) ^ 0b11111111111).unwrap() == (0b1111111111111111, 10)); assert!(decode(encode(0b0000000000000000) ^ 0b11111111111).unwrap() == (0b0000000000000000, 10)); assert!(decode(encode(0b0000111110000000) ^ 0b111111111110).unwrap() == (0b0000111110000000, 11)); assert!(decode(encode(0b0000111110000000) ^ 0b111111111110).unwrap() == (0b0000111110000000, 11)); assert!(decode(encode(0b0000111110001010) ^ 0b1111111111110).is_none()); assert!(decode(encode(0b0000001111111111) ^ 0b11111111111111111111110).is_none()); assert!(decode(encode(0b0000001111111111) ^ 0b00100101010101000010001100100010011111111110).is_none()); } } Add some more tests to bch //! This module implements encoding and decoding of the (63, 16, 23) BCH code used to //! protect P25's NID field. //! //! It uses an optimized "matrix multiplication" for encoding and //! the Berlekamp-Massey algorithm followed by Chien search for decoding, and both use //! only stack memory. //! //! Most Galois field information as well as the Berlekamp-Massey implementation are //! derived from \[1] and the Chien search was derived from \[2]. //! //! \[1]: "Coding Theory and Cryptography: The Essentials", 2nd ed, Hankerson, Hoffman, et //! al, 2000 //! //! \[2]: https://en.wikipedia.org/wiki/Chien_search use std; /// Encode the given word into a P25 BCH codeword. pub fn encode(word: u16) -> u64 { GEN.iter().fold(0, |accum, row| { // Continually shift in bits created by "multiplying" the word with the generator // row. accum << 1 | ((word & row).count_ones() % 2) as u64 }) } /// Decode the given codeword into data bits, correcting up to 11 errors. Return /// `Some((data, err))`, where `data` is the data bits and `err` is the number of errors, /// if the codeword could be corrected and `None` if it couldn't. pub fn decode(word: u64) -> Option<(u16, usize)> { // The BCH code is only over the first 63 bits, so strip off the P25 parity bit. let word = word >> 1; // Get the error location polynomial. let poly = BCHDecoder::new(Syndromes::new(word)).decode(); // The degree indicates the number of errors that need to be corrected. let errors = match poly.degree() { Some(deg) => deg, None => panic!("invalid polynomial"), }; // Even if there are more errors, the BM algorithm produces a polynomial with degree // no greater than ERRORS. assert!(errors <= ERRORS); // Get the bit locations from the polynomial. let locs = ErrorLocations::new(poly.coefs().iter().cloned()); // Correct the codeword and count the number of corrected errors. Stop the // `ErrorLocations` iteration after `errors` iterations since it won't yield any more // locations after that anyway. let (word, count) = locs.take(errors).fold((word, 0), |(w, s), loc| { (w ^ 1 << loc, s + 1) }); if count == errors { // Strip off the (corrected) parity-check bits. Some(((word >> 47) as u16, errors)) } else { None } } /// The n in (n,k,d). const WORD_SIZE: usize = 63; /// The d in (n,k,d). const DISTANCE: usize = 23; /// 2t+1 = 23 => t = 11 const ERRORS: usize = 11; /// Required syndrome codewords. const SYNDROMES: usize = 2 * ERRORS; /// Maps α^i to its codeword. const CODEWORDS: &'static [u8] = &[ 0b100000, 0b010000, 0b001000, 0b000100, 0b000010, 0b000001, 0b110000, 0b011000, 0b001100, 0b000110, 0b000011, 0b110001, 0b101000, 0b010100, 0b001010, 0b000101, 0b110010, 0b011001, 0b111100, 0b011110, 0b001111, 0b110111, 0b101011, 0b100101, 0b100010, 0b010001, 0b111000, 0b011100, 0b001110, 0b000111, 0b110011, 0b101001, 0b100100, 0b010010, 0b001001, 0b110100, 0b011010, 0b001101, 0b110110, 0b011011, 0b111101, 0b101110, 0b010111, 0b111011, 0b101101, 0b100110, 0b010011, 0b111001, 0b101100, 0b010110, 0b001011, 0b110101, 0b101010, 0b010101, 0b111010, 0b011101, 0b111110, 0b011111, 0b111111, 0b101111, 0b100111, 0b100011, 0b100001, ]; /// Maps a codeword to i in α^i. const POWERS: &'static [usize] = &[ 5, 4, 10, 3, 15, 9, 29, 2, 34, 14, 50, 8, 37, 28, 20, 1, 25, 33, 46, 13, 53, 49, 42, 7, 17, 36, 39, 27, 55, 19, 57, 0, 62, 24, 61, 32, 23, 45, 60, 12, 31, 52, 22, 48, 44, 41, 59, 6, 11, 16, 30, 35, 51, 38, 21, 26, 47, 54, 43, 18, 40, 56, 58, ]; /// Generator matrix from P25, transformed for more efficient codeword generation. const GEN: &'static [u16] = &[ 0b1000000000000000, 0b0100000000000000, 0b0010000000000000, 0b0001000000000000, 0b0000100000000000, 0b0000010000000000, 0b0000001000000000, 0b0000000100000000, 0b0000000010000000, 0b0000000001000000, 0b0000000000100000, 0b0000000000010000, 0b0000000000001000, 0b0000000000000100, 0b0000000000000010, 0b0000000000000001, 0b1110110001000111, 0b1001101001100100, 0b0100110100110010, 0b0010011010011001, 0b1111111100001011, 0b1001001111000010, 0b0100100111100001, 0b1100100010110111, 0b1000100000011100, 0b0100010000001110, 0b0010001000000111, 0b1111110101000100, 0b0111111010100010, 0b0011111101010001, 0b1111001111101111, 0b1001010110110000, 0b0100101011011000, 0b0010010101101100, 0b0001001010110110, 0b0000100101011011, 0b1110100011101010, 0b0111010001110101, 0b1101011001111101, 0b1000011101111001, 0b1010111111111011, 0b1011101110111010, 0b0101110111011101, 0b1100001010101001, 0b1000110100010011, 0b1010101011001110, 0b0101010101100111, 0b1100011011110100, 0b0110001101111010, 0b0011000110111101, 0b1111010010011001, 0b1001011000001011, 0b1010011101000010, 0b0101001110100001, 0b1100010110010111, 0b1000111010001100, 0b0100011101000110, 0b0010001110100011, 0b1111110110010110, 0b0111111011001011, 0b1101001100100010, 0b0110100110010001, 0b1101100010001111, 0b0000000000000011, ]; #[derive(Copy, Clone)] /// Codeword in GF(2^6) defined by α^6+α+1. struct Codeword(u8); impl Codeword { /// Construct a new `Codeword` with the given (valid) codeword in the field. fn new(codeword: u8) -> Codeword { Codeword(codeword) } /// Check if the codeword is zero. pub fn zero(&self) -> bool { self.0 == 0 } /// Return `Some(i)` if the codeword is equal to α^i and `None` if it's equal to zero. pub fn power(&self) -> Option<usize> { if self.zero() { None } else { // Convert to zero-based index. Some(POWERS[self.0 as usize - 1]) } } /// Return the codeword for the given power, which is cyclic in the field. pub fn for_power(power: usize) -> Codeword { Codeword::new(CODEWORDS[power % POWERS.len()]) } /// Find 1/a^i for the codeword equal to a^i. Panic if the codeword is zero. pub fn invert(self) -> Codeword { match self.power() { Some(p) => Codeword::for_power(POWERS.len() - p), None => panic!("divide by zero"), } } } impl Default for Codeword { /// Get the additive identity codeword. fn default() -> Self { Codeword::new(0) } } impl std::ops::Mul for Codeword { type Output = Codeword; fn mul(self, rhs: Codeword) -> Self::Output { match (self.power(), rhs.power()) { (Some(p), Some(q)) => Codeword::for_power(p + q), _ => Codeword::default(), } } } impl std::ops::Div for Codeword { type Output = Codeword; fn div(self, rhs: Codeword) -> Self::Output { match (self.power(), rhs.power()) { // max(q) = 62 => 63-max(power) > 0 (Some(p), Some(q)) => Codeword::for_power(p + POWERS.len() - q), (None, Some(_)) => Codeword::default(), (_, None) => panic!("divide by zero"), } } } impl std::ops::Add for Codeword { type Output = Codeword; fn add(self, rhs: Codeword) -> Self::Output { Codeword::new(self.0 ^ rhs.0) } } impl std::ops::Sub for Codeword { type Output = Codeword; fn sub(self, rhs: Codeword) -> Self::Output { self + rhs } } impl std::cmp::PartialEq for Codeword { fn eq(&self, other: &Self) -> bool { self.0 == other.0 } } impl std::cmp::Eq for Codeword {} impl std::cmp::PartialOrd for Codeword { fn partial_cmp(&self, rhs: &Self) -> Option<std::cmp::Ordering> { use std::cmp::Ordering::*; match (self.power(), rhs.power()) { (Some(p), Some(q)) => Some(p.cmp(&q)), (Some(_), None) => Some(Greater), (None, Some(_)) => Some(Less), (None, None) => Some(Equal), } } } impl std::cmp::Ord for Codeword { fn cmp(&self, rhs: &Self) -> std::cmp::Ordering { self.partial_cmp(rhs).unwrap() } } #[derive(Copy, Clone)] /// A syndrome polynomial with GF(2^6) codewords as coefficients. struct Polynomial { /// Coefficients of the polynomial. The maximum degree span in the algorithm is [0, /// 2t+1], or 2t+2 coefficients. coefs: [Codeword; SYNDROMES + 2], /// Index into `coefs` of the degree-0 coefficient. Coefficients with a lesser index /// will be zero. start: usize, } impl Polynomial { /// Construct a new `Polynomial` from the given coefficients, so /// p(x) = coefs[0] + coefs[1]*x + ... + coefs[n]*x^n. Only `SYNDROMES+2` coefficients /// will be used from the iterator. pub fn new<T: Iterator<Item = Codeword>>(coefs: T) -> Polynomial { // Start with all zero coefficients and add in the given ones. let mut poly = [Codeword::default(); SYNDROMES + 2]; for (cur, coef) in poly.iter_mut().zip(coefs) { *cur = *cur + coef; } Polynomial { coefs: poly, start: 0, } } /// Get the degree-0 coefficient. pub fn constant(&self) -> Codeword { self.coefs[self.start] } /// Get the coefficients starting from degree-0. pub fn coefs(&self) -> &[Codeword] { &self.coefs[self.start..] } /// Return `Some(deg)`, where `deg` is the highest degree term in the polynomial, if /// the polynomial is nonzero and `None` if it's zero. pub fn degree(&self) -> Option<usize> { for (deg, coef) in self.coefs.iter().enumerate().rev() { if !coef.zero() { // Any coefficients before `start` aren't part of the polynomial. return Some(deg - self.start); } } None } /// Divide the polynomial by x -- shift all coefficients to a lower degree -- and /// replace the shifted coefficient with the zero codeword. There must be no constant /// term. pub fn shift(mut self) -> Polynomial { self.coefs[self.start] = Codeword::default(); self.start += 1; self } /// Get the coefficient of the given absolute degree if it exists in the polynomial /// or the zero codeword if it doesn't. fn get(&self, idx: usize) -> Codeword { match self.coefs.get(idx) { Some(&c) => c, None => Codeword::default(), } } /// Get the coefficient of the given degree or the zero codeword if the degree doesn't /// exist in the polynomial. pub fn coef(&self, deg: usize) -> Codeword { self.get(self.start + deg) } } impl std::ops::Add for Polynomial { type Output = Polynomial; fn add(mut self, rhs: Polynomial) -> Self::Output { // Sum the coefficients and reset the degree-0 term back to index 0. Since start > // 0 => start+i >= i, so there's no overwriting. for i in 0..self.coefs.len() { self.coefs[i] = self.coef(i) + rhs.coef(i); } self.start = 0; self } } impl std::ops::Mul<Codeword> for Polynomial { type Output = Polynomial; fn mul(mut self, rhs: Codeword) -> Self::Output { for coef in self.coefs.iter_mut() { *coef = *coef * rhs; } self } } /// Iterator over the syndromes of a received codeword. Each syndrome is a codeword in /// GF(2^6). struct Syndromes { /// Exponent power of the current syndrome. pow: std::ops::Range<usize>, /// Received codeword itself. word: u64, } impl Syndromes { /// Construct a new `Syndromes` for the given received codeword. pub fn new(word: u64) -> Syndromes { Syndromes { pow: 1..DISTANCE, word: word, } } } impl Iterator for Syndromes { type Item = Codeword; fn next(&mut self) -> Option<Self::Item> { match self.pow.next() { Some(pow) => Some((0..WORD_SIZE).fold(Codeword::default(), |s, b| { if self.word >> b & 1 == 0 { s } else { s + Codeword::for_power(b * pow) } })), None => None, } } } /// Implements the iterative part of the Berlekamp-Massey algorithm. struct BCHDecoder { /// Saved p polynomial: p_{z_i-1}. p_saved: Polynomial, /// Previous iteration's p polynomial: p_{i-1}. p_cur: Polynomial, /// Saved q polynomial: q_{z_i-1}. q_saved: Polynomial, /// Previous iteration's q polynomial: q_{i-1}. q_cur: Polynomial, /// Degree-related term of saved p polynomial: D_{z_i-1}. deg_saved: usize, /// Degree-related term of previous p polynomial: D_{i-1}. deg_cur: usize, } impl BCHDecoder { /// Construct a new `BCHDecoder` from the given syndrome codeword iterator. pub fn new<T: Iterator<Item = Codeword>>(syndromes: T) -> BCHDecoder { // A zero followed by the syndromes. let q = Polynomial::new(std::iter::once(Codeword::for_power(0)) .chain(syndromes.into_iter())); // 2t zeroes followed by a one. let p = Polynomial::new((0..SYNDROMES+1).map(|_| Codeword::default()) .chain(std::iter::once(Codeword::for_power(0)))); BCHDecoder { q_saved: q, q_cur: q.shift(), p_saved: p, p_cur: p.shift(), deg_saved: 0, deg_cur: 1, } } /// Perform the iterative steps to get the error-location polynomial Λ(x) wih deg(Λ) /// <= t. pub fn decode(mut self) -> Polynomial { for _ in 0..SYNDROMES { self.step(); } self.p_cur } /// Perform one iterative step of the algorithm, updating the state polynomials and /// degrees. fn step(&mut self) { let (save, q, p, d) = if self.q_cur.constant().zero() { self.reduce() } else { self.transform() }; if save { self.q_saved = self.q_cur; self.p_saved = self.p_cur; self.deg_saved = self.deg_cur; } self.q_cur = q; self.p_cur = p; self.deg_cur = d; } /// Simply shift the polynomials since they have no degree-0 term. fn reduce(&mut self) -> (bool, Polynomial, Polynomial, usize) { ( false, self.q_cur.shift(), self.p_cur.shift(), 2 + self.deg_cur, ) } /// Remove the degree-0 terms and shift the polynomials. fn transform(&mut self) -> (bool, Polynomial, Polynomial, usize) { let mult = self.q_cur.constant() / self.q_saved.constant(); ( self.deg_cur >= self.deg_saved, (self.q_cur + self.q_saved * mult).shift(), (self.p_cur + self.p_saved * mult).shift(), 2 + std::cmp::min(self.deg_cur, self.deg_saved), ) } } /// Uses Chien search to find the roots in GF(2^6) of an error-locator polynomial and /// produce an iterator of error bit positions. struct ErrorLocations { /// Coefficients of the polynomial. terms: [Codeword; ERRORS + 1], /// Current exponent power of the iteration. pow: std::ops::Range<usize>, } impl ErrorLocations { /// Construct a new `ErrorLocations` from the given coefficients, where Λ(x) = /// coefs[0] + coefs[1]*x + ... + coefs[e]*x^e. pub fn new<T: Iterator<Item = Codeword>>(coefs: T) -> ErrorLocations { // The maximum degree is t error locations (t+1 coefficients.) let mut poly = [Codeword::default(); ERRORS + 1]; for (pow, (cur, coef)) in poly.iter_mut().zip(coefs).enumerate() { // Since the first call to `update_terms()` multiplies by `pow` and the // coefficients should equal themselves on the first iteration, divide by // `pow` here. *cur = *cur + coef / Codeword::for_power(pow) } ErrorLocations { terms: poly, pow: 0..POWERS.len(), } } /// Perform the term-updating step of the algorithm: x_{j,i} = x_{j,i-1} * α^j. fn update_terms(&mut self) { for (pow, term) in self.terms.iter_mut().enumerate() { *term = *term * Codeword::for_power(pow); } } /// Calculate the sum of the terms: x_{0,i} + x_{1,i} + ... + x_{t,i} -- evaluate the /// error-locator polynomial at Λ(α^i). fn sum_terms(&self) -> Codeword { self.terms.iter().fold(Codeword::default(), |s, &x| s + x) } } impl Iterator for ErrorLocations { type Item = usize; fn next(&mut self) -> Option<Self::Item> { loop { let pow = match self.pow.next() { Some(pow) => pow, None => return None, }; self.update_terms(); if self.sum_terms().zero() { return Some(Codeword::for_power(pow).invert().power().unwrap()); } } } } #[cfg(test)] mod test { use super::{encode, Syndromes, Codeword, Polynomial, decode, BCHDecoder, ErrorLocations}; #[test] fn test_for_power() { assert_eq!(Codeword::for_power(0).0, 0b100000); assert_eq!(Codeword::for_power(62).0, 0b100001); assert_eq!(Codeword::for_power(63).0, 0b100000); } #[test] fn test_add_sub() { assert_eq!((Codeword::new(0b100000) + Codeword::new(0b010000)).0, 0b110000); assert_eq!((Codeword::new(0b100000) - Codeword::new(0b010000)).0, 0b110000); assert_eq!((Codeword::new(0b100001) + Codeword::new(0b100001)).0, 0b000000); assert_eq!((Codeword::new(0b100001) - Codeword::new(0b100001)).0, 0b000000); assert_eq!((Codeword::new(0b100001) + Codeword::new(0b110100)).0, 0b010101); assert_eq!((Codeword::new(0b100001) - Codeword::new(0b110100)).0, 0b010101); } #[test] fn test_mul() { assert_eq!((Codeword::new(0b011000) * Codeword::new(0b101000)).0, 0b011110); assert_eq!((Codeword::new(0b000000) * Codeword::new(0b101000)).0, 0b000000); assert_eq!((Codeword::new(0b011000) * Codeword::new(0b000000)).0, 0b000000); assert_eq!((Codeword::new(0b000000) * Codeword::new(0b000000)).0, 0b000000); assert_eq!((Codeword::new(0b100001) * Codeword::new(0b100000)).0, 0b100001); assert_eq!((Codeword::new(0b100001) * Codeword::new(0b010000)).0, 0b100000); assert_eq!((Codeword::new(0b110011) * Codeword::new(0b110011)).0, 0b100111); assert_eq!((Codeword::new(0b111101) * Codeword::new(0b111101)).0, 0b011001); } #[test] fn test_div() { assert_eq!((Codeword::new(0b000100) / Codeword::new(0b101000)).0, 0b111010); assert_eq!((Codeword::new(0b000000) / Codeword::new(0b101000)).0, 0b000000); assert_eq!((Codeword::new(0b011110) / Codeword::new(0b100000)).0, 0b011110); assert_eq!((Codeword::new(0b011110) / Codeword::new(0b011110)).0, 0b100000); } #[test] fn test_cmp() { assert!(Codeword::new(0b100000) > Codeword::new(0b000000)); assert!(Codeword::new(0b000000) == Codeword::new(0b000000)); assert!(Codeword::new(0b010000) > Codeword::new(0b100000)); assert!(Codeword::new(0b100001) > Codeword::new(0b100000)); } #[test] fn test_encode() { assert_eq!(encode(0b1111111100000000), 0b1111111100000000100100110001000011000010001100000110100001101000); assert_eq!(encode(0b0011)&1, 0); assert_eq!(encode(0b0101)&1, 1); assert_eq!(encode(0b1010)&1, 1); assert_eq!(encode(0b1100)&1, 0); assert_eq!(encode(0b1111)&1, 0); } #[test] fn test_syndromes() { let w = encode(0b1111111100000000)>>1; assert!(Syndromes::new(w).all(|s| s.zero())); assert!(!Syndromes::new(w ^ 1<<60).all(|s| s.zero())); } #[test] fn test_polynomial() { let p = Polynomial::new((0..23).map(|i| { Codeword::for_power(i) })); assert!(p.degree().unwrap() == 22); assert!(p.constant() == Codeword::for_power(0)); let p = p.shift(); assert!(p.degree().unwrap() == 21); assert!(p.constant() == Codeword::for_power(1)); let q = p.clone() * Codeword::for_power(0); assert!(q.degree().unwrap() == 21); assert!(q.constant() == Codeword::for_power(1)); let q = p.clone() * Codeword::for_power(2); assert!(q.degree().unwrap() == 21); assert!(q.constant() == Codeword::for_power(3)); let q = p.clone() + p.clone(); assert!(q.constant().zero()); for coef in q.coefs() { assert!(coef.zero()); } let p = Polynomial::new((4..27).map(|i| { Codeword::for_power(i) })); let q = Polynomial::new((3..26).map(|i| { Codeword::for_power(i) })); let r = p + q.shift(); assert!(r.coefs[0].zero()); assert!(r.coefs[1].zero()); assert!(r.coefs[2].zero()); assert!(r.coefs[3].zero()); assert!(r.coefs[4].zero()); assert!(!r.coefs[22].zero()); let p = Polynomial::new((0..2).map(|_| { Codeword::for_power(0) })); let q = Polynomial::new((0..4).map(|_| { Codeword::for_power(1) })); let r = p + q; assert!(r.coef(0) == Codeword::for_power(6)); } #[test] fn test_decoder() { let w = encode(0b1111111100000000)^0b11<<61; let poly = BCHDecoder::new(Syndromes::new(w >> 1)).decode(); assert!(poly.coef(0).power().unwrap() == 0); assert!(poly.coef(1).power().unwrap() == 3); assert!(poly.coef(2).power().unwrap() == 58); } #[test] fn test_locs() { let coefs = [Codeword::for_power(0), Codeword::for_power(3), Codeword::for_power(58)]; let mut locs = ErrorLocations::new(coefs.iter().cloned()); assert!(locs.next().unwrap() == 61); assert!(locs.next().unwrap() == 60); assert!(locs.next().is_none()); } #[test] fn test_decode() { assert!(decode(encode(0b0000111100001111) ^ 1<<63).unwrap() == (0b0000111100001111, 1)); assert!(decode(encode(0b1100011111111111) ^ 1).unwrap() == (0b1100011111111111, 0)); assert!(decode(encode(0b1111111100000000) ^ 0b11010011<<30).unwrap() == (0b1111111100000000, 5)); assert!(decode(encode(0b1101101101010001) ^ (1<<63 | 1)).unwrap() == (0b1101101101010001, 1)); assert!(decode(encode(0b1111111111111111) ^ 0b11111111111).unwrap() == (0b1111111111111111, 10)); assert!(decode(encode(0b0000000000000000) ^ 0b11111111111).unwrap() == (0b0000000000000000, 10)); assert!(decode(encode(0b0000111110000000) ^ 0b111111111110).unwrap() == (0b0000111110000000, 11)); assert!(decode(encode(0b0000111110000000) ^ 0b111111111110).unwrap() == (0b0000111110000000, 11)); assert!(decode(encode(0b0000111110001010) ^ 0b1111111111110).is_none()); assert!(decode(encode(0b0000001111111111) ^ 0b11111111111111111111110).is_none()); assert!(decode(encode(0b0000001111111111) ^ 0b00100101010101000010001100100010011111111110).is_none()); } }
// Copyright 2017 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. use std::result; use std::sync::Arc; use std::sync::RwLock; use std::time::Duration; use std::time::Instant; use util::collections::HashSet; use futures::future::{loop_fn, ok, Loop}; use futures::sync::mpsc::UnboundedSender; use futures::task::Task; use futures::{task, Async, Future, Poll, Stream}; use grpc::{ CallOption, ChannelBuilder, ClientDuplexReceiver, ClientDuplexSender, Environment, Result as GrpcResult, }; use kvproto::pdpb::{ ErrorType, GetMembersRequest, GetMembersResponse, Member, RegionHeartbeatRequest, RegionHeartbeatResponse, ResponseHeader, }; use kvproto::pdpb_grpc::PdClient; use tokio_timer::timer::Handle; use super::{Config, Error, PdFuture, Result, REQUEST_TIMEOUT}; use util::security::SecurityManager; use util::timer::GLOBAL_TIMER_HANDLE; use util::{Either, HandyRwLock}; pub struct Inner { env: Arc<Environment>, pub hb_sender: Either< Option<ClientDuplexSender<RegionHeartbeatRequest>>, UnboundedSender<RegionHeartbeatRequest>, >, pub hb_receiver: Either<Option<ClientDuplexReceiver<RegionHeartbeatResponse>>, Task>, pub client: PdClient, members: GetMembersResponse, security_mgr: Arc<SecurityManager>, on_reconnect: Option<Box<Fn() + Sync + Send + 'static>>, last_update: Instant, } pub struct HeartbeatReceiver { receiver: Option<ClientDuplexReceiver<RegionHeartbeatResponse>>, inner: Arc<RwLock<Inner>>, } impl Stream for HeartbeatReceiver { type Item = RegionHeartbeatResponse; type Error = Error; fn poll(&mut self) -> Poll<Option<Self::Item>, Error> { loop { if let Some(ref mut receiver) = self.receiver { match receiver.poll() { Ok(Async::Ready(Some(item))) => return Ok(Async::Ready(Some(item))), Ok(Async::NotReady) => return Ok(Async::NotReady), // If it's None or there's error, we need to update receiver. _ => {} } } self.receiver.take(); let mut inner = self.inner.wl(); let mut receiver = None; if let Either::Left(ref mut recv) = inner.hb_receiver { receiver = recv.take(); } if receiver.is_some() { info!("heartbeat receiver is refreshed."); self.receiver = receiver; } else { inner.hb_receiver = Either::Right(task::current()); return Ok(Async::NotReady); } } } } /// A leader client doing requests asynchronous. pub struct LeaderClient { timer: Handle, inner: Arc<RwLock<Inner>>, } impl LeaderClient { pub fn new( env: Arc<Environment>, security_mgr: Arc<SecurityManager>, client: PdClient, members: GetMembersResponse, ) -> LeaderClient { let (tx, rx) = client.region_heartbeat().unwrap(); LeaderClient { timer: GLOBAL_TIMER_HANDLE.clone(), inner: Arc::new(RwLock::new(Inner { env, hb_sender: Either::Left(Some(tx)), hb_receiver: Either::Left(Some(rx)), client, members, security_mgr, on_reconnect: None, last_update: Instant::now(), })), } } pub fn handle_region_heartbeat_response<F>(&self, f: F) -> PdFuture<()> where F: Fn(RegionHeartbeatResponse) + Send + 'static, { let recv = HeartbeatReceiver { receiver: None, inner: Arc::clone(&self.inner), }; Box::new( recv.for_each(move |resp| { f(resp); Ok(()) }).map_err(|e| panic!("unexpected error: {:?}", e)), ) } pub fn on_reconnect(&self, f: Box<Fn() + Sync + Send + 'static>) { let mut inner = self.inner.wl(); inner.on_reconnect = Some(f); } pub fn request<Req, Resp, F>(&self, req: Req, func: F, retry: usize) -> Request<Req, Resp, F> where Req: Clone + 'static, F: FnMut(&RwLock<Inner>, Req) -> PdFuture<Resp> + Send + 'static, { Request { reconnect_count: retry, request_sent: 0, client: LeaderClient { timer: self.timer.clone(), inner: Arc::clone(&self.inner), }, req, resp: None, func, } } pub fn get_leader(&self) -> Member { self.inner.rl().members.get_leader().clone() } // Re-establish connection with PD leader in synchronized fashion. pub fn reconnect(&self) -> Result<()> { let ((client, members), start) = { let inner = self.inner.rl(); if inner.last_update.elapsed() < Duration::from_secs(RECONNECT_INTERVAL_SEC) { // Avoid unnecessary updating. return Ok(()); } let start = Instant::now(); ( try_connect_leader(Arc::clone(&inner.env), &inner.security_mgr, &inner.members)?, start, ) }; { let mut inner = self.inner.wl(); let (tx, rx) = client.region_heartbeat().unwrap(); warn!("heartbeat sender and receiver are stale, refreshing.."); // Try to cancel an unused heartbeat sender. if let Either::Left(Some(ref mut r)) = inner.hb_sender { info!("cancel region heartbeat sender"); r.cancel(); } inner.hb_sender = Either::Left(Some(tx)); if let Either::Right(ref mut task) = inner.hb_receiver { task.notify(); } inner.hb_receiver = Either::Left(Some(rx)); inner.client = client; inner.members = members; inner.last_update = Instant::now(); if let Some(ref on_reconnect) = inner.on_reconnect { on_reconnect(); } } warn!("updating PD client done, spent {:?}", start.elapsed()); Ok(()) } } pub const RECONNECT_INTERVAL_SEC: u64 = 1; // 1s /// The context of sending requets. pub struct Request<Req, Resp, F> { reconnect_count: usize, request_sent: usize, client: LeaderClient, req: Req, resp: Option<Result<Resp>>, func: F, } const MAX_REQUEST_COUNT: usize = 3; impl<Req, Resp, F> Request<Req, Resp, F> where Req: Clone + Send + 'static, Resp: Send + 'static, F: FnMut(&RwLock<Inner>, Req) -> PdFuture<Resp> + Send + 'static, { fn reconnect_if_needed(mut self) -> Box<Future<Item = Self, Error = Self> + Send> { debug!("reconnect remains: {}", self.reconnect_count); if self.request_sent < MAX_REQUEST_COUNT { return Box::new(ok(self)); } // Updating client. self.reconnect_count -= 1; // FIXME: should not block the core. warn!("updating PD client, block the tokio core"); match self.client.reconnect() { Ok(_) => { self.request_sent = 0; Box::new(ok(self)) } Err(_) => Box::new( self.client .timer .delay(Instant::now() + Duration::from_secs(RECONNECT_INTERVAL_SEC)) .then(|_| Err(self)), ), } } fn send_and_receive(mut self) -> Box<Future<Item = Self, Error = Self> + Send> { self.request_sent += 1; debug!("request sent: {}", self.request_sent); let r = self.req.clone(); Box::new(ok(self).and_then(|mut ctx| { let req = (ctx.func)(&ctx.client.inner, r); req.then(|resp| match resp { Ok(resp) => { ctx.resp = Some(Ok(resp)); Ok(ctx) } Err(err) => { error!("request failed: {:?}", err); Err(ctx) } }) })) } fn break_or_continue(ctx: result::Result<Self, Self>) -> Result<Loop<Self, Self>> { let ctx = match ctx { Ok(ctx) | Err(ctx) => ctx, }; let done = ctx.reconnect_count == 0 || ctx.resp.is_some(); if done { Ok(Loop::Break(ctx)) } else { Ok(Loop::Continue(ctx)) } } fn post_loop(ctx: Result<Self>) -> Result<Resp> { let ctx = ctx.expect("end loop with Ok(_)"); ctx.resp.unwrap_or_else(|| Err(box_err!("fail to request"))) } /// Returns a Future, it is resolves once a future returned by the closure /// is resolved successfully, otherwise it repeats `retry` times. pub fn execute(self) -> PdFuture<Resp> { let ctx = self; Box::new( loop_fn(ctx, |ctx| { ctx.reconnect_if_needed() .and_then(Self::send_and_receive) .then(Self::break_or_continue) }).then(Self::post_loop), ) } } /// Do a request in synchronized fashion. pub fn sync_request<F, R>(client: &LeaderClient, retry: usize, func: F) -> Result<R> where F: Fn(&PdClient) -> GrpcResult<R>, { for _ in 0..retry { // DO NOT put any lock operation in match statement, or it will cause dead lock! let ret = { func(&client.inner.rl().client).map_err(Error::Grpc) }; match ret { Ok(r) => { return Ok(r); } Err(e) => { error!("fail to request: {:?}", e); if let Err(e) = client.reconnect() { error!("fail to reconnect: {:?}", e); } } } } Err(box_err!("fail to request")) } pub fn validate_endpoints( env: Arc<Environment>, cfg: &Config, security_mgr: &SecurityManager, ) -> Result<(PdClient, GetMembersResponse)> { let len = cfg.endpoints.len(); let mut endpoints_set = HashSet::with_capacity_and_hasher(len, Default::default()); let mut members = None; let mut cluster_id = None; for ep in &cfg.endpoints { if !endpoints_set.insert(ep) { return Err(box_err!("duplicate PD endpoint {}", ep)); } let (_, resp) = match connect(Arc::clone(&env), security_mgr, ep) { Ok(resp) => resp, // Ignore failed PD node. Err(e) => { error!("PD endpoint {} failed to respond: {:?}", ep, e); continue; } }; // Check cluster ID. let cid = resp.get_header().get_cluster_id(); if let Some(sample) = cluster_id { if sample != cid { return Err(box_err!( "PD response cluster_id mismatch, want {}, got {}", sample, cid )); } } else { cluster_id = Some(cid); } // TODO: check all fields later? if members.is_none() { members = Some(resp); } } match members { Some(members) => { let (client, members) = try_connect_leader(Arc::clone(&env), security_mgr, &members)?; info!("All PD endpoints are consistent: {:?}", cfg.endpoints); Ok((client, members)) } _ => Err(box_err!("PD cluster failed to respond")), } } fn connect( env: Arc<Environment>, security_mgr: &SecurityManager, addr: &str, ) -> Result<(PdClient, GetMembersResponse)> { info!("connect to PD endpoint: {:?}", addr); let addr = addr .trim_left_matches("http://") .trim_left_matches("https://"); let cb = ChannelBuilder::new(env) .keepalive_time(Duration::from_secs(10)) .keepalive_timeout(Duration::from_secs(3)); let channel = security_mgr.connect(cb, addr); let client = PdClient::new(channel); let option = CallOption::default().timeout(Duration::from_secs(REQUEST_TIMEOUT)); match client.get_members_opt(&GetMembersRequest::new(), option) { Ok(resp) => Ok((client, resp)), Err(e) => Err(Error::Grpc(e)), } } pub fn try_connect_leader( env: Arc<Environment>, security_mgr: &SecurityManager, previous: &GetMembersResponse, ) -> Result<(PdClient, GetMembersResponse)> { let previous_leader = previous.get_leader(); let members = previous.get_members(); let cluster_id = previous.get_header().get_cluster_id(); let mut resp = None; // Try to connect to other members, then the previous leader. 'outer: for m in members .into_iter() .filter(|m| *m != previous_leader) .chain(&[previous_leader.clone()]) { for ep in m.get_client_urls() { match connect(Arc::clone(&env), security_mgr, ep.as_str()) { Ok((_, r)) => { let new_cluster_id = r.get_header().get_cluster_id(); if new_cluster_id == cluster_id { resp = Some(r); break 'outer; } else { panic!( "{} no longer belongs to cluster {}, it is in {}", ep, cluster_id, new_cluster_id ); } } Err(e) => { error!("failed to connect to {}, {:?}", ep, e); continue; } } } } // Then try to connect the PD cluster leader. if let Some(resp) = resp { let leader = resp.get_leader().clone(); for ep in leader.get_client_urls() { if let Ok((client, _)) = connect(Arc::clone(&env), security_mgr, ep.as_str()) { info!("connect to PD leader {:?}", ep); return Ok((client, resp)); } } } Err(box_err!("failed to connect to {:?}", members)) } pub fn check_resp_header(header: &ResponseHeader) -> Result<()> { if !header.has_error() { return Ok(()); } // TODO: translate more error types let err = header.get_error(); match err.get_field_type() { ErrorType::ALREADY_BOOTSTRAPPED => Err(Error::ClusterBootstrapped(header.get_cluster_id())), ErrorType::NOT_BOOTSTRAPPED => Err(Error::ClusterNotBootstrapped(header.get_cluster_id())), _ => Err(box_err!(err.get_message())), } } use verb tense to indicate connection state (#3416) Signed-off-by: Greg Weber <79e2475f81a6317276bf7cbb3958b20d289b78df@gregweber.info> // Copyright 2017 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. use std::result; use std::sync::Arc; use std::sync::RwLock; use std::time::Duration; use std::time::Instant; use util::collections::HashSet; use futures::future::{loop_fn, ok, Loop}; use futures::sync::mpsc::UnboundedSender; use futures::task::Task; use futures::{task, Async, Future, Poll, Stream}; use grpc::{ CallOption, ChannelBuilder, ClientDuplexReceiver, ClientDuplexSender, Environment, Result as GrpcResult, }; use kvproto::pdpb::{ ErrorType, GetMembersRequest, GetMembersResponse, Member, RegionHeartbeatRequest, RegionHeartbeatResponse, ResponseHeader, }; use kvproto::pdpb_grpc::PdClient; use tokio_timer::timer::Handle; use super::{Config, Error, PdFuture, Result, REQUEST_TIMEOUT}; use util::security::SecurityManager; use util::timer::GLOBAL_TIMER_HANDLE; use util::{Either, HandyRwLock}; pub struct Inner { env: Arc<Environment>, pub hb_sender: Either< Option<ClientDuplexSender<RegionHeartbeatRequest>>, UnboundedSender<RegionHeartbeatRequest>, >, pub hb_receiver: Either<Option<ClientDuplexReceiver<RegionHeartbeatResponse>>, Task>, pub client: PdClient, members: GetMembersResponse, security_mgr: Arc<SecurityManager>, on_reconnect: Option<Box<Fn() + Sync + Send + 'static>>, last_update: Instant, } pub struct HeartbeatReceiver { receiver: Option<ClientDuplexReceiver<RegionHeartbeatResponse>>, inner: Arc<RwLock<Inner>>, } impl Stream for HeartbeatReceiver { type Item = RegionHeartbeatResponse; type Error = Error; fn poll(&mut self) -> Poll<Option<Self::Item>, Error> { loop { if let Some(ref mut receiver) = self.receiver { match receiver.poll() { Ok(Async::Ready(Some(item))) => return Ok(Async::Ready(Some(item))), Ok(Async::NotReady) => return Ok(Async::NotReady), // If it's None or there's error, we need to update receiver. _ => {} } } self.receiver.take(); let mut inner = self.inner.wl(); let mut receiver = None; if let Either::Left(ref mut recv) = inner.hb_receiver { receiver = recv.take(); } if receiver.is_some() { info!("heartbeat receiver is refreshed."); self.receiver = receiver; } else { inner.hb_receiver = Either::Right(task::current()); return Ok(Async::NotReady); } } } } /// A leader client doing requests asynchronous. pub struct LeaderClient { timer: Handle, inner: Arc<RwLock<Inner>>, } impl LeaderClient { pub fn new( env: Arc<Environment>, security_mgr: Arc<SecurityManager>, client: PdClient, members: GetMembersResponse, ) -> LeaderClient { let (tx, rx) = client.region_heartbeat().unwrap(); LeaderClient { timer: GLOBAL_TIMER_HANDLE.clone(), inner: Arc::new(RwLock::new(Inner { env, hb_sender: Either::Left(Some(tx)), hb_receiver: Either::Left(Some(rx)), client, members, security_mgr, on_reconnect: None, last_update: Instant::now(), })), } } pub fn handle_region_heartbeat_response<F>(&self, f: F) -> PdFuture<()> where F: Fn(RegionHeartbeatResponse) + Send + 'static, { let recv = HeartbeatReceiver { receiver: None, inner: Arc::clone(&self.inner), }; Box::new( recv.for_each(move |resp| { f(resp); Ok(()) }).map_err(|e| panic!("unexpected error: {:?}", e)), ) } pub fn on_reconnect(&self, f: Box<Fn() + Sync + Send + 'static>) { let mut inner = self.inner.wl(); inner.on_reconnect = Some(f); } pub fn request<Req, Resp, F>(&self, req: Req, func: F, retry: usize) -> Request<Req, Resp, F> where Req: Clone + 'static, F: FnMut(&RwLock<Inner>, Req) -> PdFuture<Resp> + Send + 'static, { Request { reconnect_count: retry, request_sent: 0, client: LeaderClient { timer: self.timer.clone(), inner: Arc::clone(&self.inner), }, req, resp: None, func, } } pub fn get_leader(&self) -> Member { self.inner.rl().members.get_leader().clone() } // Re-establish connection with PD leader in synchronized fashion. pub fn reconnect(&self) -> Result<()> { let ((client, members), start) = { let inner = self.inner.rl(); if inner.last_update.elapsed() < Duration::from_secs(RECONNECT_INTERVAL_SEC) { // Avoid unnecessary updating. return Ok(()); } let start = Instant::now(); ( try_connect_leader(Arc::clone(&inner.env), &inner.security_mgr, &inner.members)?, start, ) }; { let mut inner = self.inner.wl(); let (tx, rx) = client.region_heartbeat().unwrap(); warn!("heartbeat sender and receiver are stale, refreshing.."); // Try to cancel an unused heartbeat sender. if let Either::Left(Some(ref mut r)) = inner.hb_sender { info!("cancel region heartbeat sender"); r.cancel(); } inner.hb_sender = Either::Left(Some(tx)); if let Either::Right(ref mut task) = inner.hb_receiver { task.notify(); } inner.hb_receiver = Either::Left(Some(rx)); inner.client = client; inner.members = members; inner.last_update = Instant::now(); if let Some(ref on_reconnect) = inner.on_reconnect { on_reconnect(); } } warn!("updating PD client done, spent {:?}", start.elapsed()); Ok(()) } } pub const RECONNECT_INTERVAL_SEC: u64 = 1; // 1s /// The context of sending requets. pub struct Request<Req, Resp, F> { reconnect_count: usize, request_sent: usize, client: LeaderClient, req: Req, resp: Option<Result<Resp>>, func: F, } const MAX_REQUEST_COUNT: usize = 3; impl<Req, Resp, F> Request<Req, Resp, F> where Req: Clone + Send + 'static, Resp: Send + 'static, F: FnMut(&RwLock<Inner>, Req) -> PdFuture<Resp> + Send + 'static, { fn reconnect_if_needed(mut self) -> Box<Future<Item = Self, Error = Self> + Send> { debug!("reconnect remains: {}", self.reconnect_count); if self.request_sent < MAX_REQUEST_COUNT { return Box::new(ok(self)); } // Updating client. self.reconnect_count -= 1; // FIXME: should not block the core. warn!("updating PD client, block the tokio core"); match self.client.reconnect() { Ok(_) => { self.request_sent = 0; Box::new(ok(self)) } Err(_) => Box::new( self.client .timer .delay(Instant::now() + Duration::from_secs(RECONNECT_INTERVAL_SEC)) .then(|_| Err(self)), ), } } fn send_and_receive(mut self) -> Box<Future<Item = Self, Error = Self> + Send> { self.request_sent += 1; debug!("request sent: {}", self.request_sent); let r = self.req.clone(); Box::new(ok(self).and_then(|mut ctx| { let req = (ctx.func)(&ctx.client.inner, r); req.then(|resp| match resp { Ok(resp) => { ctx.resp = Some(Ok(resp)); Ok(ctx) } Err(err) => { error!("request failed: {:?}", err); Err(ctx) } }) })) } fn break_or_continue(ctx: result::Result<Self, Self>) -> Result<Loop<Self, Self>> { let ctx = match ctx { Ok(ctx) | Err(ctx) => ctx, }; let done = ctx.reconnect_count == 0 || ctx.resp.is_some(); if done { Ok(Loop::Break(ctx)) } else { Ok(Loop::Continue(ctx)) } } fn post_loop(ctx: Result<Self>) -> Result<Resp> { let ctx = ctx.expect("end loop with Ok(_)"); ctx.resp.unwrap_or_else(|| Err(box_err!("fail to request"))) } /// Returns a Future, it is resolves once a future returned by the closure /// is resolved successfully, otherwise it repeats `retry` times. pub fn execute(self) -> PdFuture<Resp> { let ctx = self; Box::new( loop_fn(ctx, |ctx| { ctx.reconnect_if_needed() .and_then(Self::send_and_receive) .then(Self::break_or_continue) }).then(Self::post_loop), ) } } /// Do a request in synchronized fashion. pub fn sync_request<F, R>(client: &LeaderClient, retry: usize, func: F) -> Result<R> where F: Fn(&PdClient) -> GrpcResult<R>, { for _ in 0..retry { // DO NOT put any lock operation in match statement, or it will cause dead lock! let ret = { func(&client.inner.rl().client).map_err(Error::Grpc) }; match ret { Ok(r) => { return Ok(r); } Err(e) => { error!("fail to request: {:?}", e); if let Err(e) = client.reconnect() { error!("fail to reconnect: {:?}", e); } } } } Err(box_err!("fail to request")) } pub fn validate_endpoints( env: Arc<Environment>, cfg: &Config, security_mgr: &SecurityManager, ) -> Result<(PdClient, GetMembersResponse)> { let len = cfg.endpoints.len(); let mut endpoints_set = HashSet::with_capacity_and_hasher(len, Default::default()); let mut members = None; let mut cluster_id = None; for ep in &cfg.endpoints { if !endpoints_set.insert(ep) { return Err(box_err!("duplicate PD endpoint {}", ep)); } let (_, resp) = match connect(Arc::clone(&env), security_mgr, ep) { Ok(resp) => resp, // Ignore failed PD node. Err(e) => { error!("PD endpoint {} failed to respond: {:?}", ep, e); continue; } }; // Check cluster ID. let cid = resp.get_header().get_cluster_id(); if let Some(sample) = cluster_id { if sample != cid { return Err(box_err!( "PD response cluster_id mismatch, want {}, got {}", sample, cid )); } } else { cluster_id = Some(cid); } // TODO: check all fields later? if members.is_none() { members = Some(resp); } } match members { Some(members) => { let (client, members) = try_connect_leader(Arc::clone(&env), security_mgr, &members)?; info!("All PD endpoints are consistent: {:?}", cfg.endpoints); Ok((client, members)) } _ => Err(box_err!("PD cluster failed to respond")), } } fn connect( env: Arc<Environment>, security_mgr: &SecurityManager, addr: &str, ) -> Result<(PdClient, GetMembersResponse)> { info!("connecting to PD endpoint: {:?}", addr); let addr = addr .trim_left_matches("http://") .trim_left_matches("https://"); let cb = ChannelBuilder::new(env) .keepalive_time(Duration::from_secs(10)) .keepalive_timeout(Duration::from_secs(3)); let channel = security_mgr.connect(cb, addr); let client = PdClient::new(channel); let option = CallOption::default().timeout(Duration::from_secs(REQUEST_TIMEOUT)); match client.get_members_opt(&GetMembersRequest::new(), option) { Ok(resp) => Ok((client, resp)), Err(e) => Err(Error::Grpc(e)), } } pub fn try_connect_leader( env: Arc<Environment>, security_mgr: &SecurityManager, previous: &GetMembersResponse, ) -> Result<(PdClient, GetMembersResponse)> { let previous_leader = previous.get_leader(); let members = previous.get_members(); let cluster_id = previous.get_header().get_cluster_id(); let mut resp = None; // Try to connect to other members, then the previous leader. 'outer: for m in members .into_iter() .filter(|m| *m != previous_leader) .chain(&[previous_leader.clone()]) { for ep in m.get_client_urls() { match connect(Arc::clone(&env), security_mgr, ep.as_str()) { Ok((_, r)) => { let new_cluster_id = r.get_header().get_cluster_id(); if new_cluster_id == cluster_id { resp = Some(r); break 'outer; } else { panic!( "{} no longer belongs to cluster {}, it is in {}", ep, cluster_id, new_cluster_id ); } } Err(e) => { error!("failed to connect to {}, {:?}", ep, e); continue; } } } } // Then try to connect the PD cluster leader. if let Some(resp) = resp { let leader = resp.get_leader().clone(); for ep in leader.get_client_urls() { if let Ok((client, _)) = connect(Arc::clone(&env), security_mgr, ep.as_str()) { info!("connected to PD leader {:?}", ep); return Ok((client, resp)); } } } Err(box_err!("failed to connect to {:?}", members)) } pub fn check_resp_header(header: &ResponseHeader) -> Result<()> { if !header.has_error() { return Ok(()); } // TODO: translate more error types let err = header.get_error(); match err.get_field_type() { ErrorType::ALREADY_BOOTSTRAPPED => Err(Error::ClusterBootstrapped(header.get_cluster_id())), ErrorType::NOT_BOOTSTRAPPED => Err(Error::ClusterNotBootstrapped(header.get_cluster_id())), _ => Err(box_err!(err.get_message())), } }
use rustc::mir::repr as mir; use error::{EvalError, EvalResult}; use memory::Pointer; #[derive(Clone, Copy, Debug, PartialEq)] pub enum PrimVal { Bool(bool), I8(i8), I16(i16), I32(i32), I64(i64), U8(u8), U16(u16), U32(u32), U64(u64), AbstractPtr(Pointer), FnPtr(Pointer), IntegerPtr(u64), Char(char), F32(f32), F64(f64), } /// returns the result of the operation and whether the operation overflowed pub fn binary_op<'tcx>(bin_op: mir::BinOp, left: PrimVal, right: PrimVal) -> EvalResult<'tcx, (PrimVal, bool)> { use rustc::mir::repr::BinOp::*; use self::PrimVal::*; macro_rules! overflow { ($v:ident, $v2:ident, $l:ident, $op:ident, $r:ident) => ({ let (val, of) = $l.$op($r); if of { return Ok(($v(val), true)); } else { $v(val) } }) } macro_rules! int_binops { ($v:ident, $l:ident, $r:ident) => ({ match bin_op { Add => overflow!($v, $v, $l, overflowing_add, $r), Sub => overflow!($v, $v, $l, overflowing_sub, $r), Mul => overflow!($v, $v, $l, overflowing_mul, $r), Div => overflow!($v, $v, $l, overflowing_div, $r), Rem => overflow!($v, $v, $l, overflowing_rem, $r), BitXor => $v($l ^ $r), BitAnd => $v($l & $r), BitOr => $v($l | $r), // these have already been handled Shl => unreachable!(), Shr => unreachable!(), Eq => Bool($l == $r), Ne => Bool($l != $r), Lt => Bool($l < $r), Le => Bool($l <= $r), Gt => Bool($l > $r), Ge => Bool($l >= $r), } }) } macro_rules! float_binops { ($v:ident, $l:ident, $r:ident) => ({ match bin_op { Add => $v($l + $r), Sub => $v($l - $r), Mul => $v($l * $r), Div => $v($l / $r), Rem => $v($l % $r), // invalid float ops BitXor => unreachable!(), BitAnd => unreachable!(), BitOr => unreachable!(), Shl => unreachable!(), Shr => unreachable!(), // directly comparing floats is questionable // miri could forbid it, or at least miri as rust const eval should forbid it Eq => Bool($l == $r), Ne => Bool($l != $r), Lt => Bool($l < $r), Le => Bool($l <= $r), Gt => Bool($l > $r), Ge => Bool($l >= $r), } }) } fn unrelated_ptr_ops<'tcx>(bin_op: mir::BinOp) -> EvalResult<'tcx, PrimVal> { use rustc::mir::repr::BinOp::*; match bin_op { Eq => Ok(Bool(false)), Ne => Ok(Bool(true)), Lt | Le | Gt | Ge => Err(EvalError::InvalidPointerMath), _ => unimplemented!(), } } match bin_op { // can have rhs with a different numeric type Shl | Shr => { // these numbers are the maximum number a bitshift rhs could possibly have // e.g. u16 can be bitshifted by 0..16, so masking with 0b1111 (16 - 1) will ensure we are in that range let type_bits: u32 = match left { I8(_) | U8(_) => 8, I16(_) | U16(_) => 16, I32(_) | U32(_) => 32, I64(_) | U64(_) => 64, _ => unreachable!(), }; assert!(type_bits.is_power_of_two()); // turn into `u32` because `overflowing_sh{l,r}` only take `u32` let r = match right { I8(i) => i as u32, I16(i) => i as u32, I32(i) => i as u32, I64(i) => i as u32, U8(i) => i as u32, U16(i) => i as u32, U32(i) => i as u32, U64(i) => i as u32, _ => panic!("bad MIR: bitshift rhs is not integral"), }; // apply mask let r = r & (type_bits - 1); macro_rules! shift { ($v:ident, $l:ident, $r:ident) => ({ match bin_op { Shl => overflow!($v, U32, $l, overflowing_shl, $r), Shr => overflow!($v, U32, $l, overflowing_shr, $r), _ => unreachable!(), } }) } let val = match left { I8(l) => shift!(I8, l, r), I16(l) => shift!(I16, l, r), I32(l) => shift!(I32, l, r), I64(l) => shift!(I64, l, r), U8(l) => shift!(U8, l, r), U16(l) => shift!(U16, l, r), U32(l) => shift!(U32, l, r), U64(l) => shift!(U64, l, r), _ => unreachable!(), }; return Ok((val, false)); }, _ => {}, } let val = match (left, right) { (I8(l), I8(r)) => int_binops!(I8, l, r), (I16(l), I16(r)) => int_binops!(I16, l, r), (I32(l), I32(r)) => int_binops!(I32, l, r), (I64(l), I64(r)) => int_binops!(I64, l, r), (U8(l), U8(r)) => int_binops!(U8, l, r), (U16(l), U16(r)) => int_binops!(U16, l, r), (U32(l), U32(r)) => int_binops!(U32, l, r), (U64(l), U64(r)) => int_binops!(U64, l, r), (F32(l), F32(r)) => float_binops!(F32, l, r), (F64(l), F64(r)) => float_binops!(F64, l, r), (Char(l), Char(r)) => match bin_op { Eq => Bool(l == r), Ne => Bool(l != r), Lt => Bool(l < r), Le => Bool(l <= r), Gt => Bool(l > r), Ge => Bool(l >= r), _ => panic!("invalid char op: {:?}", bin_op), }, (Bool(l), Bool(r)) => { Bool(match bin_op { Eq => l == r, Ne => l != r, Lt => l < r, Le => l <= r, Gt => l > r, Ge => l >= r, BitOr => l | r, BitXor => l ^ r, BitAnd => l & r, Add | Sub | Mul | Div | Rem | Shl | Shr => return Err(EvalError::InvalidBoolOp(bin_op)), }) } (IntegerPtr(l), IntegerPtr(r)) => int_binops!(IntegerPtr, l, r), (AbstractPtr(_), IntegerPtr(_)) | (IntegerPtr(_), AbstractPtr(_)) | (FnPtr(_), AbstractPtr(_)) | (AbstractPtr(_), FnPtr(_)) | (FnPtr(_), IntegerPtr(_)) | (IntegerPtr(_), FnPtr(_)) => unrelated_ptr_ops(bin_op)?, (FnPtr(l_ptr), FnPtr(r_ptr)) => match bin_op { Eq => Bool(l_ptr == r_ptr), Ne => Bool(l_ptr != r_ptr), _ => return Err(EvalError::Unimplemented(format!("unimplemented fn ptr comparison: {:?}", bin_op))), }, (AbstractPtr(l_ptr), AbstractPtr(r_ptr)) => { if l_ptr.alloc_id != r_ptr.alloc_id { return Ok((unrelated_ptr_ops(bin_op)?, false)); } let l = l_ptr.offset; let r = r_ptr.offset; match bin_op { Eq => Bool(l == r), Ne => Bool(l != r), Lt => Bool(l < r), Le => Bool(l <= r), Gt => Bool(l > r), Ge => Bool(l >= r), _ => return Err(EvalError::Unimplemented(format!("unimplemented ptr op: {:?}", bin_op))), } } (l, r) => return Err(EvalError::Unimplemented(format!("unimplemented binary op: {:?}, {:?}, {:?}", l, r, bin_op))), }; Ok((val, false)) } pub fn unary_op<'tcx>(un_op: mir::UnOp, val: PrimVal) -> EvalResult<'tcx, PrimVal> { use rustc::mir::repr::UnOp::*; use self::PrimVal::*; match (un_op, val) { (Not, Bool(b)) => Ok(Bool(!b)), (Not, I8(n)) => Ok(I8(!n)), (Neg, I8(n)) => Ok(I8(-n)), (Not, I16(n)) => Ok(I16(!n)), (Neg, I16(n)) => Ok(I16(-n)), (Not, I32(n)) => Ok(I32(!n)), (Neg, I32(n)) => Ok(I32(-n)), (Not, I64(n)) => Ok(I64(!n)), (Neg, I64(n)) => Ok(I64(-n)), (Not, U8(n)) => Ok(U8(!n)), (Not, U16(n)) => Ok(U16(!n)), (Not, U32(n)) => Ok(U32(!n)), (Not, U64(n)) => Ok(U64(!n)), (Neg, F64(n)) => Ok(F64(-n)), (Neg, F32(n)) => Ok(F32(-n)), _ => Err(EvalError::Unimplemented(format!("unimplemented unary op: {:?}, {:?}", un_op, val))), } } comparing floats is necessary in rare cases use rustc::mir::repr as mir; use error::{EvalError, EvalResult}; use memory::Pointer; #[derive(Clone, Copy, Debug, PartialEq)] pub enum PrimVal { Bool(bool), I8(i8), I16(i16), I32(i32), I64(i64), U8(u8), U16(u16), U32(u32), U64(u64), AbstractPtr(Pointer), FnPtr(Pointer), IntegerPtr(u64), Char(char), F32(f32), F64(f64), } /// returns the result of the operation and whether the operation overflowed pub fn binary_op<'tcx>(bin_op: mir::BinOp, left: PrimVal, right: PrimVal) -> EvalResult<'tcx, (PrimVal, bool)> { use rustc::mir::repr::BinOp::*; use self::PrimVal::*; macro_rules! overflow { ($v:ident, $v2:ident, $l:ident, $op:ident, $r:ident) => ({ let (val, of) = $l.$op($r); if of { return Ok(($v(val), true)); } else { $v(val) } }) } macro_rules! int_binops { ($v:ident, $l:ident, $r:ident) => ({ match bin_op { Add => overflow!($v, $v, $l, overflowing_add, $r), Sub => overflow!($v, $v, $l, overflowing_sub, $r), Mul => overflow!($v, $v, $l, overflowing_mul, $r), Div => overflow!($v, $v, $l, overflowing_div, $r), Rem => overflow!($v, $v, $l, overflowing_rem, $r), BitXor => $v($l ^ $r), BitAnd => $v($l & $r), BitOr => $v($l | $r), // these have already been handled Shl => unreachable!(), Shr => unreachable!(), Eq => Bool($l == $r), Ne => Bool($l != $r), Lt => Bool($l < $r), Le => Bool($l <= $r), Gt => Bool($l > $r), Ge => Bool($l >= $r), } }) } macro_rules! float_binops { ($v:ident, $l:ident, $r:ident) => ({ match bin_op { Add => $v($l + $r), Sub => $v($l - $r), Mul => $v($l * $r), Div => $v($l / $r), Rem => $v($l % $r), // invalid float ops BitXor => unreachable!(), BitAnd => unreachable!(), BitOr => unreachable!(), Shl => unreachable!(), Shr => unreachable!(), Eq => Bool($l == $r), Ne => Bool($l != $r), Lt => Bool($l < $r), Le => Bool($l <= $r), Gt => Bool($l > $r), Ge => Bool($l >= $r), } }) } fn unrelated_ptr_ops<'tcx>(bin_op: mir::BinOp) -> EvalResult<'tcx, PrimVal> { use rustc::mir::repr::BinOp::*; match bin_op { Eq => Ok(Bool(false)), Ne => Ok(Bool(true)), Lt | Le | Gt | Ge => Err(EvalError::InvalidPointerMath), _ => unimplemented!(), } } match bin_op { // can have rhs with a different numeric type Shl | Shr => { // these numbers are the maximum number a bitshift rhs could possibly have // e.g. u16 can be bitshifted by 0..16, so masking with 0b1111 (16 - 1) will ensure we are in that range let type_bits: u32 = match left { I8(_) | U8(_) => 8, I16(_) | U16(_) => 16, I32(_) | U32(_) => 32, I64(_) | U64(_) => 64, _ => unreachable!(), }; assert!(type_bits.is_power_of_two()); // turn into `u32` because `overflowing_sh{l,r}` only take `u32` let r = match right { I8(i) => i as u32, I16(i) => i as u32, I32(i) => i as u32, I64(i) => i as u32, U8(i) => i as u32, U16(i) => i as u32, U32(i) => i as u32, U64(i) => i as u32, _ => panic!("bad MIR: bitshift rhs is not integral"), }; // apply mask let r = r & (type_bits - 1); macro_rules! shift { ($v:ident, $l:ident, $r:ident) => ({ match bin_op { Shl => overflow!($v, U32, $l, overflowing_shl, $r), Shr => overflow!($v, U32, $l, overflowing_shr, $r), _ => unreachable!(), } }) } let val = match left { I8(l) => shift!(I8, l, r), I16(l) => shift!(I16, l, r), I32(l) => shift!(I32, l, r), I64(l) => shift!(I64, l, r), U8(l) => shift!(U8, l, r), U16(l) => shift!(U16, l, r), U32(l) => shift!(U32, l, r), U64(l) => shift!(U64, l, r), _ => unreachable!(), }; return Ok((val, false)); }, _ => {}, } let val = match (left, right) { (I8(l), I8(r)) => int_binops!(I8, l, r), (I16(l), I16(r)) => int_binops!(I16, l, r), (I32(l), I32(r)) => int_binops!(I32, l, r), (I64(l), I64(r)) => int_binops!(I64, l, r), (U8(l), U8(r)) => int_binops!(U8, l, r), (U16(l), U16(r)) => int_binops!(U16, l, r), (U32(l), U32(r)) => int_binops!(U32, l, r), (U64(l), U64(r)) => int_binops!(U64, l, r), (F32(l), F32(r)) => float_binops!(F32, l, r), (F64(l), F64(r)) => float_binops!(F64, l, r), (Char(l), Char(r)) => match bin_op { Eq => Bool(l == r), Ne => Bool(l != r), Lt => Bool(l < r), Le => Bool(l <= r), Gt => Bool(l > r), Ge => Bool(l >= r), _ => panic!("invalid char op: {:?}", bin_op), }, (Bool(l), Bool(r)) => { Bool(match bin_op { Eq => l == r, Ne => l != r, Lt => l < r, Le => l <= r, Gt => l > r, Ge => l >= r, BitOr => l | r, BitXor => l ^ r, BitAnd => l & r, Add | Sub | Mul | Div | Rem | Shl | Shr => return Err(EvalError::InvalidBoolOp(bin_op)), }) } (IntegerPtr(l), IntegerPtr(r)) => int_binops!(IntegerPtr, l, r), (AbstractPtr(_), IntegerPtr(_)) | (IntegerPtr(_), AbstractPtr(_)) | (FnPtr(_), AbstractPtr(_)) | (AbstractPtr(_), FnPtr(_)) | (FnPtr(_), IntegerPtr(_)) | (IntegerPtr(_), FnPtr(_)) => unrelated_ptr_ops(bin_op)?, (FnPtr(l_ptr), FnPtr(r_ptr)) => match bin_op { Eq => Bool(l_ptr == r_ptr), Ne => Bool(l_ptr != r_ptr), _ => return Err(EvalError::Unimplemented(format!("unimplemented fn ptr comparison: {:?}", bin_op))), }, (AbstractPtr(l_ptr), AbstractPtr(r_ptr)) => { if l_ptr.alloc_id != r_ptr.alloc_id { return Ok((unrelated_ptr_ops(bin_op)?, false)); } let l = l_ptr.offset; let r = r_ptr.offset; match bin_op { Eq => Bool(l == r), Ne => Bool(l != r), Lt => Bool(l < r), Le => Bool(l <= r), Gt => Bool(l > r), Ge => Bool(l >= r), _ => return Err(EvalError::Unimplemented(format!("unimplemented ptr op: {:?}", bin_op))), } } (l, r) => return Err(EvalError::Unimplemented(format!("unimplemented binary op: {:?}, {:?}, {:?}", l, r, bin_op))), }; Ok((val, false)) } pub fn unary_op<'tcx>(un_op: mir::UnOp, val: PrimVal) -> EvalResult<'tcx, PrimVal> { use rustc::mir::repr::UnOp::*; use self::PrimVal::*; match (un_op, val) { (Not, Bool(b)) => Ok(Bool(!b)), (Not, I8(n)) => Ok(I8(!n)), (Neg, I8(n)) => Ok(I8(-n)), (Not, I16(n)) => Ok(I16(!n)), (Neg, I16(n)) => Ok(I16(-n)), (Not, I32(n)) => Ok(I32(!n)), (Neg, I32(n)) => Ok(I32(-n)), (Not, I64(n)) => Ok(I64(!n)), (Neg, I64(n)) => Ok(I64(-n)), (Not, U8(n)) => Ok(U8(!n)), (Not, U16(n)) => Ok(U16(!n)), (Not, U32(n)) => Ok(U32(!n)), (Not, U64(n)) => Ok(U64(!n)), (Neg, F64(n)) => Ok(F64(-n)), (Neg, F32(n)) => Ok(F32(-n)), _ => Err(EvalError::Unimplemented(format!("unimplemented unary op: {:?}, {:?}", un_op, val))), } }
pub mod info; pub mod cartdata; pub mod emscripten; pub mod noise; pub mod math; use std::collections::HashMap; use std::io::BufReader; use std::io::Cursor; use std::sync::{Arc, Mutex}; use time; use nalgebra::clamp; use image; use gif; use gif::SetParameter; use std::io::prelude::*; use std::path::Path; use std::fs::File; use plugins::lua_plugin::plugin::LuaPlugin; use plugins::python_plugin::plugin::PythonPlugin; use config::Players; use self::info::Info; use self::noise::Noise; use gfx; use cartridge::{Cartridge, CartridgeFormat}; use sound::sound::Sound; include!(concat!(env!("OUT_DIR"), "/parameters.rs")); pub const SCREEN_PIXELS: usize = SCREEN_WIDTH * SCREEN_HEIGHT; pub const SCREEN_PIXELS_RGB: usize = SCREEN_PIXELS * 3; pub type ScreenBuffer = [u32; SCREEN_PIXELS]; pub type ScreenBufferRGB = [u8; SCREEN_PIXELS_RGB]; pub const SCREEN_EMPTY: ScreenBuffer = [0; SCREEN_PIXELS]; pub struct Palette { colors: HashMap<u32, RGB>, rcolors: HashMap<u32, u32>, cached_colors: [u32; 16], idx: u32, } impl Palette { pub fn new() -> Palette { Palette { colors: HashMap::new(), rcolors: HashMap::new(), cached_colors: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], idx: 16, } } pub fn get_rgb(&mut self, value: u32) -> RGB { if value < 16 { let v = self.cached_colors[value as usize]; let r = ((v & 0xff0000) >> 16) as u8; let g = ((v & 0x00ff00) >> 8) as u8; let b = (v & 0x0000ff) as u8; return RGB::new(r, g, b); } match self.colors.get(&value) { Some(rgb_value) => RGB::new(rgb_value.r, rgb_value.g, rgb_value.b), _ => RGB::new(0, 0, 0), } } pub fn reset(&mut self) { self.colors.clear(); } pub fn set_color(&mut self, color: u32, r: u8, g: u8, b: u8) { let u32_color = (r as u32) << 16 | (g as u32) << 8 | (b as u32); self.colors.insert(color, RGB::new(r, g, b)); self.rcolors.insert(u32_color, color); if color < 16 { self.cached_colors[color as usize] = u32_color; } } pub fn get_color(&mut self, color: u32) -> u32 { match self.colors.get(&color) { Some(rgb_value) => { return (rgb_value.r as u32) << 16 | (rgb_value.g as u32) << 8 | (rgb_value.b as u32) } _ => return 0, } } pub fn add_color(&mut self, r: u8, g: u8, b: u8) -> u32 { let value = self.idx; let v = (r as u32) << 16 | (g as u32) << 8 | (b as u32); match self.rcolors.get(&v) { Some(color) => return *color, _ => (), } self.set_color(value, r, g, b); self.idx += 1; value } } lazy_static! { pub static ref PALETTE: Mutex<Palette> = { let m = Mutex::new(Palette::new()); m }; } #[derive(Clone)] pub struct RGB { pub r: u8, pub g: u8, pub b: u8, } impl RGB { pub fn new(r: u8, g: u8, b: u8) -> RGB { RGB { r: r, g: g, b: b } } pub fn new_hexa(v: u32) -> RGB { RGB { r: ((v & 0xff0000) >> 16) as u8, g: ((v & 0x00ff00) >> 8) as u8, b: (v & 0x0000ff) as u8, } } } pub trait RustPlugin { fn init(&mut self, screen: Arc<Mutex<gfx::Screen>>) -> f64; fn update(&mut self, players: Arc<Mutex<Players>>) -> f64; fn draw(&mut self, screen: Arc<Mutex<gfx::Screen>>) -> f64; } #[derive(PartialEq)] pub enum PX8Mode { PX8, PICO8, } pub enum PX8State { RUN, PAUSE, } pub enum Code { UNKNOWN = 0, LUA = 1, PYTHON = 2, RUST = 3, } pub struct Menu { idx: u32, selected_idx: i32, items: Vec<String>, } impl Menu { pub fn new() -> Menu { let mut items = Vec::new(); items.push("Continue".to_string()); items.push("Config".to_string()); items.push("Quit".to_string()); Menu { idx: 0, selected_idx: -1, items: items.clone(), } } pub fn reset(&mut self) { self.selected_idx = -1; self.idx = 0; } pub fn stop(&mut self) -> bool { // Continue is clicked self.selected_idx == 0 } pub fn update(&mut self, players: Arc<Mutex<Players>>) -> bool { if players.lock().unwrap().btnp(0, 6) { self.selected_idx = self.idx as i32; if self.selected_idx == self.items.len() as i32 { return false; } } else { if players.lock().unwrap().btnp(0, 2) { self.idx = clamp(self.idx - 1, 0, (self.items.len() as u32) - 1); } if players.lock().unwrap().btnp(0, 3) { self.idx = clamp(self.idx + 1, 0, (self.items.len() as u32) - 1); } } return true; } pub fn draw(&mut self, screen: Arc<Mutex<gfx::Screen>>) { if self.selected_idx == -1 { let idx_x = (SCREEN_WIDTH / 2 - 20) as i32; let idx_y = (SCREEN_WIDTH / 2 - 10) as i32; screen .lock() .unwrap() .rectfill(idx_x, idx_y - 5, idx_x + 40, idx_y + 10 * self.items.len() as i32, 11); screen .lock() .unwrap() .pset(idx_x, idx_y + (self.idx as i32) * 10, 7); self.draw_logo(screen.clone()); let mut pos = 0; for item in &self.items { screen .lock() .unwrap() .print(item.to_string(), idx_x + 5, idx_y + pos * 10, 7); pos += 1; } } if self.selected_idx == 1 { screen.lock().unwrap().cls(); } } #[cfg_attr(rustfmt, rustfmt_skip)] pub fn draw_logo(&mut self, screen: Arc<Mutex<gfx::Screen>>) { let logo = vec![ 0, 0, 0, 0, 0, 0, 0, 0, 8, 0, 0, 0, 0, 0, 0, 8, 0, 8, 8, 8, 8, 8, 8, 0, 8, 8, 8, 9, 8, 8, 9, 8, 0, 8, 8, 8, 8, 8, 8, 0, 8, 0, 0, 0, 0, 0, 0, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]; screen .lock() .unwrap() .print("Powered by PX8".to_string(), 64, 112, 7); let idx_x = 114; let idx_y = 120; let mut x = 0; let mut y = 0; for c in logo { if x > 0 && x % 8 == 0 { x = 0; y += 1; } if c != 0 { screen.lock().unwrap().pset(idx_x + x, idx_y + y, c); } x += 1; } } } pub struct Record { pub recording: bool, pub images: Vec<u8>, pub filename: String, pub nb: i32, } impl Record { pub fn new() -> Record { let images = Vec::new(); Record { recording: false, images: images, filename: "".to_string(), nb: 0, } } } pub struct Palettes { pub palette_idx: u32, pub palettes: HashMap<String, Vec<RGB>>, pub palettes_list: Vec<String>, pub name: String, } impl Palettes { pub fn new() -> Palettes { Palettes { palette_idx: 0, palettes: HashMap::new(), palettes_list: Vec::new(), name: "".to_string(), } } pub fn init(&mut self) { // load palettes statically for emscripten self.load("a64".to_string(), include_str!("../../sys/assets/palettes/a64.gpl").to_string()); self.load("apple-ii".to_string(), include_str!("../../sys/assets/palettes/apple-ii.gpl").to_string()); self.load("arne-paldac".to_string(), include_str!("../../sys/assets/palettes/arne-paldac.gpl").to_string()); self.load("arne16".to_string(), include_str!("../../sys/assets/palettes/arne16.gpl").to_string()); self.load("arne32".to_string(), include_str!("../../sys/assets/palettes/arne32.gpl").to_string()); self.load("atari2600-ntsc".to_string(), include_str!("../../sys/assets/palettes/atari2600-ntsc.gpl").to_string()); self.load("atari2600-pal".to_string(), include_str!("../../sys/assets/palettes/atari2600-pal.gpl").to_string()); self.load("cg-arne".to_string(), include_str!("../../sys/assets/palettes/cg-arne.gpl").to_string()); self.load("cga".to_string(), include_str!("../../sys/assets/palettes/cga.gpl").to_string()); self.load("commodore-plus4".to_string(), include_str!("../../sys/assets/palettes/commodore-plus4.gpl").to_string()); self.load("commodore-vic20".to_string(), include_str!("../../sys/assets/palettes/commodore-vic20.gpl").to_string()); self.load("commodore64".to_string(), include_str!("../../sys/assets/palettes/commodore64.gpl").to_string()); self.load("copper-tech".to_string(), include_str!("../../sys/assets/palettes/copper-tech.gpl").to_string()); self.load("cpc-boy".to_string(), include_str!("../../sys/assets/palettes/cpc-boy.gpl").to_string()); self.load("db16".to_string(), include_str!("../../sys/assets/palettes/db16.gpl").to_string()); self.load("db32".to_string(), include_str!("../../sys/assets/palettes/db32.gpl").to_string()); self.load("edg16".to_string(), include_str!("../../sys/assets/palettes/edg16.gpl").to_string()); self.load("edg32".to_string(), include_str!("../../sys/assets/palettes/edg32.gpl").to_string()); self.load("eroge-copper".to_string(), include_str!("../../sys/assets/palettes/eroge-copper.gpl").to_string()); self.load("gameboy-color-type1".to_string(), include_str!("../../sys/assets/palettes/gameboy-color-type1.gpl").to_string()); self.load("gameboy".to_string(), include_str!("../../sys/assets/palettes/gameboy.gpl").to_string()); self.load("google-ui".to_string(), include_str!("../../sys/assets/palettes/google-ui.gpl").to_string()); self.load("jmp".to_string(), include_str!("../../sys/assets/palettes/jmp.gpl").to_string()); self.load("mail24".to_string(), include_str!("../../sys/assets/palettes/mail24.gpl").to_string()); self.load("master-system".to_string(), include_str!("../../sys/assets/palettes/master-system.gpl").to_string()); self.load("monokai".to_string(), include_str!("../../sys/assets/palettes/monokai.gpl").to_string()); self.load("nes-ntsc".to_string(), include_str!("../../sys/assets/palettes/nes-ntsc.gpl").to_string()); self.load("nes".to_string(), include_str!("../../sys/assets/palettes/nes.gpl").to_string()); self.load("pico-8".to_string(), include_str!("../../sys/assets/palettes/pico-8.gpl").to_string()); self.load("psygnork".to_string(), include_str!("../../sys/assets/palettes/psygnork.gpl").to_string()); self.load("smile-basic".to_string(), include_str!("../../sys/assets/palettes/smile-basic.gpl").to_string()); self.load("solarized".to_string(), include_str!("../../sys/assets/palettes/solarized.gpl").to_string()); self.load("teletext".to_string(), include_str!("../../sys/assets/palettes/teletext.gpl").to_string()); self.load("vga-13h".to_string(), include_str!("../../sys/assets/palettes/vga-13h.gpl").to_string()); self.load("web-safe-colors".to_string(), include_str!("../../sys/assets/palettes/web-safe-colors.gpl").to_string()); self.load("win16".to_string(), include_str!("../../sys/assets/palettes/win16.gpl").to_string()); self.load("x11".to_string(), include_str!("../../sys/assets/palettes/x11.gpl").to_string()); self.load("zx-spectrum".to_string(), include_str!("../../sys/assets/palettes/zx-spectrum.gpl").to_string()); } pub fn load(&mut self, name: String, data: String) { let buf_reader = Cursor::new(data); let mut values = Vec::new(); for line in buf_reader.lines() { let line = line.unwrap(); let l = line.trim_left().to_string(); if l.len() == 0 { continue; } if l.starts_with("#") { continue; } let l_b = l.as_bytes(); if !(l_b[0] as char).is_digit(10) { continue; } let mut iter = l.split_whitespace(); let r = iter.next().unwrap().parse::<u8>().unwrap(); let g = iter.next().unwrap().parse::<u8>().unwrap(); let b = iter.next().unwrap().parse::<u8>().unwrap(); values.push(RGB::new(r, g, b)); } self.palettes.insert(name.clone(), values); self.palettes_list.push(name.clone()); } pub fn next(&mut self) { self.palette_idx = (self.palette_idx + 1) % self.palettes_list.len() as u32; let ref mut p_value = self.palettes_list[self.palette_idx as usize].clone(); self.switch_to(p_value.clone()); } pub fn switch_to(&mut self, name: String) { let ref values = *self.palettes.get(&name).unwrap(); let mut idx = 0; for rgb_value in values { PALETTE .lock() .unwrap() .set_color(idx, rgb_value.r, rgb_value.g, rgb_value.b); idx += 1; } self.name = name.clone(); } pub fn set_color(&mut self, color: u32, r: u8, g: u8, b: u8) { PALETTE.lock().unwrap().set_color(color, r, g, b); } pub fn get_color(&mut self, color: u32) -> u32 { PALETTE.lock().unwrap().get_color(color) } pub fn reset(&mut self) { PALETTE.lock().unwrap().reset(); } } pub struct Px8New { pub screen: Arc<Mutex<gfx::Screen>>, pub palettes: Arc<Mutex<Palettes>>, pub noise: Arc<Mutex<Noise>>, pub cartridges: Vec<Cartridge>, pub current_cartridge: usize, pub lua_plugin: LuaPlugin, pub python_plugin: PythonPlugin, pub rust_plugin: Vec<Box<RustPlugin>>, pub code_type: Code, pub state: PX8State, pub menu: Menu, pub show_info_overlay: bool, pub fps: f64, pub draw_time: f64, pub init_time: f64, pub update_time: f64, pub record: Record, pub draw_return: bool, pub update_return: bool, } impl Px8New { pub fn new() -> Px8New { Px8New { screen: Arc::new(Mutex::new(gfx::Screen::new())), palettes: Arc::new(Mutex::new(Palettes::new())), noise: Arc::new(Mutex::new(Noise::new())), cartridges: Vec::new(), current_cartridge: 0, lua_plugin: LuaPlugin::new(), python_plugin: PythonPlugin::new(), rust_plugin: Vec::new(), code_type: Code::UNKNOWN, state: PX8State::RUN, menu: Menu::new(), show_info_overlay: false, fps: 0.0, draw_time: 0.0, init_time: 0.0, update_time: 0.0, record: Record::new(), draw_return: true, update_return: true, } } pub fn init(&mut self) { self.palettes.lock().unwrap().init(); self.palettes .lock() .unwrap() .switch_to("pico-8".to_string()); self.screen.lock().unwrap().init(); self.update_return = true; self.draw_return = true; } pub fn next_palette(&mut self) { self.palettes.lock().unwrap().next(); } pub fn toggle_info_overlay(&mut self) { self.show_info_overlay = !self.show_info_overlay; } pub fn debug_update(&mut self) { if self.show_info_overlay { self.screen .lock() .unwrap() .rectfill(0, 0, SCREEN_WIDTH as i32, 8, 0); self.screen .lock() .unwrap() .force_print(format!("{:.0}FPS {:.2} {:.2} {:?}", self.fps, self.draw_time, self.update_time, &self.palettes.lock().unwrap().name) .to_string(), 0, 0, 7); } } pub fn update(&mut self, players: Arc<Mutex<Players>>) -> bool { match self.state { PX8State::PAUSE => { if self.menu.stop() { self.state = PX8State::RUN; } return self.menu.update(players); } PX8State::RUN => { if self.is_end() { return false; } self.update_time = self.call_update(players) * 1000.0; } } return true; } pub fn draw(&mut self) { match self.state { PX8State::PAUSE => { self.menu.draw(self.screen.clone()); } PX8State::RUN => { self.draw_time = self.call_draw() * 1000.0; if self.is_recording() { self.record(); } } } } pub fn is_end(&mut self) -> bool { return !self.update_return; } pub fn is_recording(&mut self) -> bool { return self.record.recording; } pub fn start_record(&mut self, filename: String) { info!("[PX8] Start to record the frame"); self.record.recording = true; self.record.images.clear(); self.record.filename = filename; } pub fn record(&mut self) { info!("[PX8] Recording the frame"); if self.record.nb % 4 == 0 { let mut buffer: Vec<u8> = Vec::new(); for x in 0..self::SCREEN_WIDTH { for y in 0..self::SCREEN_HEIGHT { let value = self.screen.lock().unwrap().pget(x as u32, y as u32); let rgb_value = PALETTE.lock().unwrap().get_rgb(value); buffer.push(rgb_value.r); buffer.push(rgb_value.g); buffer.push(rgb_value.b); } } self.record.images.append(&mut buffer); } self.record.nb += 1; } pub fn stop_record(&mut self, scale: usize) { info!("[PX8] Stop to record the frame {:?}", self.record.images.len()); self.record.recording = false; let mut filedata = File::create(self.record.filename.clone()).unwrap(); let mut encoder = gif::Encoder::new(&mut filedata, SCREEN_WIDTH as u16, SCREEN_HEIGHT as u16, &[]) .unwrap(); encoder.set(gif::Repeat::Infinite).unwrap(); let mut idx = 0; for i in 0..self.record.images.len() / (SCREEN_WIDTH * SCREEN_HEIGHT * 3) { info!("[PX8] Generate frame {:?} {:?}/{:?}", i, self.record.images.len(), idx); let mut buffer: Vec<u8> = Vec::new(); for _ in 0..SCREEN_WIDTH { for _ in 0..SCREEN_HEIGHT { buffer.push(*self.record.images.get(idx).unwrap()); buffer.push(*self.record.images.get(idx + 1).unwrap()); buffer.push(*self.record.images.get(idx + 2).unwrap()); idx += 3; } } info!("[PX8] Creating ImageBuffer {:?}", buffer.len()); let image = image::ImageBuffer::from_raw(SCREEN_WIDTH as u32, SCREEN_HEIGHT as u32, buffer) .unwrap(); info!("[PX8] Rotating image"); let image = image::DynamicImage::ImageRgb8(image) .rotate90() .resize((SCREEN_WIDTH * scale) as u32, (SCREEN_HEIGHT * scale) as u32, image::FilterType::Nearest) .fliph(); info!("[PX8] Creating gif Frame"); let mut frame = gif::Frame::from_rgb((SCREEN_WIDTH * scale) as u16, (SCREEN_HEIGHT * scale) as u16, &mut *image.raw_pixels()); frame.delay = 1; encoder.write_frame(&frame).unwrap(); } info!("[PX8] GIF created in {:?}", self.record.filename.clone()); } pub fn screenshot(&mut self, filename: String) { info!("[PX8] Taking screenshot in {:?}", filename); let mut buffer: Vec<u8> = Vec::new(); for x in 0..SCREEN_WIDTH { for y in 0..SCREEN_HEIGHT { let value = self.screen.lock().unwrap().pget(x as u32, y as u32); let rgb_value = PALETTE.lock().unwrap().get_rgb(value); buffer.push(rgb_value.r); buffer.push(rgb_value.g); buffer.push(rgb_value.b); } } let image = image::ImageBuffer::from_raw(SCREEN_WIDTH as u32, SCREEN_HEIGHT as u32, buffer) .unwrap(); let image = image::DynamicImage::ImageRgb8(image) .rotate270() .resize((SCREEN_WIDTH * 4) as u32, (SCREEN_WIDTH * 4) as u32, image::FilterType::Nearest) .flipv(); let mut output = File::create(&Path::new(&filename)).unwrap(); image.save(&mut output, image::ImageFormat::PNG).unwrap(); } pub fn save_current_cartridge(&mut self) { let ref mut cartridge = self.cartridges[self.current_cartridge]; let output_filename = cartridge.filename.clone(); info!("[PX8] Saving the current cartridge in {:?}", output_filename); cartridge .gfx .set_sprites(self.screen.lock().unwrap().sprites.clone()); cartridge.map.set_map(self.screen.lock().unwrap().map); match cartridge.format { CartridgeFormat::P8Format => { cartridge.save_in_p8(output_filename); } CartridgeFormat::PngFormat => { cartridge.save_in_p8(output_filename); } CartridgeFormat::Px8Format => { cartridge.save_data(); } } } pub fn switch_pause(&mut self) { match self.state { PX8State::PAUSE => { self.state = PX8State::RUN; self.screen.lock().unwrap().restore(); } PX8State::RUN => { self.menu.reset(); self.state = PX8State::PAUSE; self.screen.lock().unwrap().save(); } } } #[allow(dead_code)] pub fn register<F: RustPlugin + 'static>(&mut self, callback: F) { self.rust_plugin.push(Box::new(callback)); } pub fn load_cartridge(&mut self, filename: String, players: Arc<Mutex<Players>>, info: Arc<Mutex<Info>>, sound: Arc<Mutex<Sound>>, editor: bool, mode: PX8Mode) -> bool { let idx = self.cartridges.len(); if filename.contains(".png") { match Cartridge::from_png_file(filename.clone()) { Ok(c) => self.cartridges.push(c), Err(e) => panic!("Impossible to load the png cartridge {:?}", e), } } else if filename.contains(".p8") { match Cartridge::from_p8_file(filename.clone()) { Ok(c) => self.cartridges.push(c), Err(e) => panic!("Impossible to load the p8 cartridge {:?}", e), } } else if filename.contains(".py") { match Cartridge::from_p8_file(filename.clone()) { Ok(c) => self.cartridges.push(c), Err(e) => panic!("Impossible to load the p8 cartridge {:?}", e), } } else if filename.contains(".px8") { match Cartridge::from_px8_file(filename.clone()) { Ok(c) => self.cartridges.push(c), Err(e) => panic!("Impossible to load the px8 cartridge {:?}", e), } } else { panic!("[PX8] Unknown file format !"); } self.current_cartridge = idx; self.cartridges[idx].set_mode(mode == PX8Mode::PICO8); self.screen .lock() .unwrap() .set_sprites(self.cartridges[idx].gfx.sprites.clone()); self.screen .lock() .unwrap() .set_sprites_flags(self.cartridges[idx].gff.flags.clone()); self.screen .lock() .unwrap() .set_map(self.cartridges[idx].map.map); self.load_plugin(idx, players, info, sound, editor) } #[allow(dead_code)] pub fn load_cartridge_raw(&mut self, filename: String, data: Vec<u8>, players: Arc<Mutex<Players>>, info: Arc<Mutex<Info>>, sound: Arc<Mutex<Sound>>, editor: bool, mode: PX8Mode) -> bool { let idx = self.cartridges.len(); if filename.contains(".png") { match Cartridge::from_png_raw(filename.clone(), data) { Ok(c) => self.cartridges.push(c), Err(e) => panic!("Impossible to load the png cartridge {:?}", e), } } else if filename.contains(".p8") { match Cartridge::from_p8_raw(filename.clone(), data) { Ok(c) => self.cartridges.push(c), Err(e) => panic!("Impossible to load the p8 cartridge {:?}", e), } } else if filename.contains(".py") { match Cartridge::from_p8_raw(filename.clone(), data) { Ok(c) => self.cartridges.push(c), Err(e) => panic!("Impossible to load the p8 cartridge {:?}", e), } } else { panic!("[PX8] Unknown file"); } self.current_cartridge = idx; self.cartridges[idx].set_mode(mode == PX8Mode::PICO8); self.screen .lock() .unwrap() .set_sprites(self.cartridges[idx].gfx.sprites.clone()); self.screen .lock() .unwrap() .set_map(self.cartridges[idx].map.map); self.load_plugin(idx, players, info, sound, editor) } pub fn _get_code_type(&mut self, idx: usize) -> Code { if self.cartridges[idx].code.get_name() == "lua" { return Code::LUA; } if self.cartridges[idx].code.get_name() == "python" { return Code::PYTHON; } return Code::UNKNOWN; } pub fn switch_code(&mut self) { let idx = self.current_cartridge; let data; if self.cartridges[idx].edit { // Reload the code for the px8 format match self.cartridges[idx].format { CartridgeFormat::Px8Format => { info!("[PX8] Reloading code section for the cartridge"); self.cartridges[idx].code.reload(); } _ => (), } data = self.cartridges[idx].code.get_data().clone(); self.cartridges[idx].edit = false; self.code_type = self._get_code_type(idx); } else { data = self.load_editor("./sys/editor/editor.py".to_string()) .clone(); self.cartridges[idx].edit = true; self.code_type = Code::PYTHON; } match self.code_type { Code::LUA => { self.lua_plugin.load_code(data); } Code::PYTHON => { self.python_plugin.load_code(data); } _ => (), } self.init(); } #[allow(dead_code)] pub fn is_editing_current_cartridge(&mut self) -> bool { let idx = self.current_cartridge; return self.cartridges[idx].edit; } pub fn load_plugin(&mut self, idx: usize, players: Arc<Mutex<Players>>, info: Arc<Mutex<Info>>, sound: Arc<Mutex<Sound>>, editor: bool) -> bool { let data; info!("[PX8] Load the plugin"); self.code_type = self._get_code_type(idx); if editor { // Editor mode and original code type is different from Python match self.code_type { Code::LUA => { info!("[PX8] Loading LUA Plugin"); // load the lua plugin self.lua_plugin .load(players.clone(), info.clone(), self.screen.clone(), self.noise.clone()); } _ => (), } data = self.load_editor("./sys/editor/editor.py".to_string()) .clone(); self.cartridges[idx].edit = true; self.code_type = Code::PYTHON; } else { data = self.cartridges[idx].code.get_data().clone(); } match self.code_type { Code::LUA => { info!("[PX8] Loading LUA Plugin"); self.lua_plugin .load(players.clone(), info.clone(), self.screen.clone(), self.noise.clone()); return self.lua_plugin.load_code(data); } Code::PYTHON => { info!("[PX8] Loading PYTHON Plugin"); self.python_plugin .load(self.palettes.clone(), players.clone(), info.clone(), self.screen.clone(), sound.clone(), self.noise.clone()); return self.python_plugin.load_code(data); } _ => (), } false } pub fn load_editor(&mut self, filename: String) -> String { let mut data = "".to_string(); let f = File::open(filename.clone()).unwrap(); let buf_reader = BufReader::new(f); for line in buf_reader.lines() { let l = line.unwrap(); data = data + "\n" + &l; } return data; } pub fn call_init(&mut self) -> f64 { let current_time = time::now(); match self.code_type { Code::LUA => self.lua_plugin.init(), Code::PYTHON => self.python_plugin.init(), Code::RUST => { self.draw_return = true; for callback in self.rust_plugin.iter_mut() { callback.init(self.screen.clone()); } } _ => (), } let diff_time = time::now() - current_time; let nanoseconds = (diff_time.num_nanoseconds().unwrap() as f64) - (diff_time.num_seconds() * 1000000000) as f64; let elapsed_time = diff_time.num_seconds() as f64 + nanoseconds / 1000000000.0; return elapsed_time; } pub fn call_draw(&mut self) -> f64 { let current_time = time::now(); match self.code_type { Code::LUA => self.draw_return = self.lua_plugin.draw(), Code::PYTHON => self.draw_return = self.python_plugin.draw(), Code::RUST => { self.draw_return = true; for callback in self.rust_plugin.iter_mut() { callback.draw(self.screen.clone()); } } _ => (), } let diff_time = time::now() - current_time; let nanoseconds = (diff_time.num_nanoseconds().unwrap() as f64) - (diff_time.num_seconds() * 1000000000) as f64; let elapsed_time = diff_time.num_seconds() as f64 + nanoseconds / 1000000000.0; return elapsed_time; } pub fn call_update(&mut self, players: Arc<Mutex<Players>>) -> f64 { let current_time = time::now(); match self.code_type { Code::LUA => self.update_return = self.lua_plugin.update(), Code::PYTHON => self.update_return = self.python_plugin.update(), Code::RUST => { self.update_return = true; for callback in self.rust_plugin.iter_mut() { callback.update(players.clone()); } } _ => (), } let diff_time = time::now() - current_time; let nanoseconds = (diff_time.num_nanoseconds().unwrap() as f64) - (diff_time.num_seconds() * 1000000000) as f64; let elapsed_time = diff_time.num_seconds() as f64 + nanoseconds / 1000000000.0; return elapsed_time; } } Tweak blank lines pub mod info; pub mod cartdata; pub mod emscripten; pub mod noise; pub mod math; use std::collections::HashMap; use std::io::BufReader; use std::io::Cursor; use std::sync::{Arc, Mutex}; use time; use nalgebra::clamp; use image; use gif; use gif::SetParameter; use std::io::prelude::*; use std::path::Path; use std::fs::File; use plugins::lua_plugin::plugin::LuaPlugin; use plugins::python_plugin::plugin::PythonPlugin; use config::Players; use self::info::Info; use self::noise::Noise; use gfx; use cartridge::{Cartridge, CartridgeFormat}; use sound::sound::Sound; include!(concat!(env!("OUT_DIR"), "/parameters.rs")); pub const SCREEN_PIXELS: usize = SCREEN_WIDTH * SCREEN_HEIGHT; pub const SCREEN_PIXELS_RGB: usize = SCREEN_PIXELS * 3; pub type ScreenBuffer = [u32; SCREEN_PIXELS]; pub type ScreenBufferRGB = [u8; SCREEN_PIXELS_RGB]; pub const SCREEN_EMPTY: ScreenBuffer = [0; SCREEN_PIXELS]; pub struct Palette { colors: HashMap<u32, RGB>, rcolors: HashMap<u32, u32>, cached_colors: [u32; 16], idx: u32, } impl Palette { pub fn new() -> Palette { Palette { colors: HashMap::new(), rcolors: HashMap::new(), cached_colors: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], idx: 16, } } pub fn get_rgb(&mut self, value: u32) -> RGB { if value < 16 { let v = self.cached_colors[value as usize]; let r = ((v & 0xff0000) >> 16) as u8; let g = ((v & 0x00ff00) >> 8) as u8; let b = (v & 0x0000ff) as u8; return RGB::new(r, g, b); } match self.colors.get(&value) { Some(rgb_value) => RGB::new(rgb_value.r, rgb_value.g, rgb_value.b), _ => RGB::new(0, 0, 0), } } pub fn reset(&mut self) { self.colors.clear(); } pub fn set_color(&mut self, color: u32, r: u8, g: u8, b: u8) { let u32_color = (r as u32) << 16 | (g as u32) << 8 | (b as u32); self.colors.insert(color, RGB::new(r, g, b)); self.rcolors.insert(u32_color, color); if color < 16 { self.cached_colors[color as usize] = u32_color; } } pub fn get_color(&mut self, color: u32) -> u32 { match self.colors.get(&color) { Some(rgb_value) => { return (rgb_value.r as u32) << 16 | (rgb_value.g as u32) << 8 | (rgb_value.b as u32) } _ => return 0, } } pub fn add_color(&mut self, r: u8, g: u8, b: u8) -> u32 { let value = self.idx; let v = (r as u32) << 16 | (g as u32) << 8 | (b as u32); match self.rcolors.get(&v) { Some(color) => return *color, _ => (), } self.set_color(value, r, g, b); self.idx += 1; value } } lazy_static! { pub static ref PALETTE: Mutex<Palette> = { let m = Mutex::new(Palette::new()); m }; } #[derive(Clone)] pub struct RGB { pub r: u8, pub g: u8, pub b: u8, } impl RGB { pub fn new(r: u8, g: u8, b: u8) -> RGB { RGB { r: r, g: g, b: b } } pub fn new_hexa(v: u32) -> RGB { RGB { r: ((v & 0xff0000) >> 16) as u8, g: ((v & 0x00ff00) >> 8) as u8, b: (v & 0x0000ff) as u8, } } } pub trait RustPlugin { fn init(&mut self, screen: Arc<Mutex<gfx::Screen>>) -> f64; fn update(&mut self, players: Arc<Mutex<Players>>) -> f64; fn draw(&mut self, screen: Arc<Mutex<gfx::Screen>>) -> f64; } #[derive(PartialEq)] pub enum PX8Mode { PX8, PICO8, } pub enum PX8State { RUN, PAUSE, } pub enum Code { UNKNOWN = 0, LUA = 1, PYTHON = 2, RUST = 3, } pub struct Menu { idx: u32, selected_idx: i32, items: Vec<String>, } impl Menu { pub fn new() -> Menu { let mut items = Vec::new(); items.push("Continue".to_string()); items.push("Config".to_string()); items.push("Quit".to_string()); Menu { idx: 0, selected_idx: -1, items: items.clone(), } } pub fn reset(&mut self) { self.selected_idx = -1; self.idx = 0; } pub fn stop(&mut self) -> bool { // Continue is clicked self.selected_idx == 0 } pub fn update(&mut self, players: Arc<Mutex<Players>>) -> bool { if players.lock().unwrap().btnp(0, 6) { self.selected_idx = self.idx as i32; if self.selected_idx == self.items.len() as i32 { return false; } } else { if players.lock().unwrap().btnp(0, 2) { self.idx = clamp(self.idx - 1, 0, (self.items.len() as u32) - 1); } if players.lock().unwrap().btnp(0, 3) { self.idx = clamp(self.idx + 1, 0, (self.items.len() as u32) - 1); } } return true; } pub fn draw(&mut self, screen: Arc<Mutex<gfx::Screen>>) { if self.selected_idx == -1 { let idx_x = (SCREEN_WIDTH / 2 - 20) as i32; let idx_y = (SCREEN_WIDTH / 2 - 10) as i32; screen .lock() .unwrap() .rectfill(idx_x, idx_y - 5, idx_x + 40, idx_y + 10 * self.items.len() as i32, 11); screen .lock() .unwrap() .pset(idx_x, idx_y + (self.idx as i32) * 10, 7); self.draw_logo(screen.clone()); let mut pos = 0; for item in &self.items { screen .lock() .unwrap() .print(item.to_string(), idx_x + 5, idx_y + pos * 10, 7); pos += 1; } } if self.selected_idx == 1 { screen.lock().unwrap().cls(); } } #[cfg_attr(rustfmt, rustfmt_skip)] pub fn draw_logo(&mut self, screen: Arc<Mutex<gfx::Screen>>) { let logo = vec![ 0, 0, 0, 0, 0, 0, 0, 0, 8, 0, 0, 0, 0, 0, 0, 8, 0, 8, 8, 8, 8, 8, 8, 0, 8, 8, 8, 9, 8, 8, 9, 8, 0, 8, 8, 8, 8, 8, 8, 0, 8, 0, 0, 0, 0, 0, 0, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]; screen .lock() .unwrap() .print("Powered by PX8".to_string(), 64, 112, 7); let idx_x = 114; let idx_y = 120; let mut x = 0; let mut y = 0; for c in logo { if x > 0 && x % 8 == 0 { x = 0; y += 1; } if c != 0 { screen.lock().unwrap().pset(idx_x + x, idx_y + y, c); } x += 1; } } } pub struct Record { pub recording: bool, pub images: Vec<u8>, pub filename: String, pub nb: i32, } impl Record { pub fn new() -> Record { let images = Vec::new(); Record { recording: false, images: images, filename: "".to_string(), nb: 0, } } } pub struct Palettes { pub palette_idx: u32, pub palettes: HashMap<String, Vec<RGB>>, pub palettes_list: Vec<String>, pub name: String, } impl Palettes { pub fn new() -> Palettes { Palettes { palette_idx: 0, palettes: HashMap::new(), palettes_list: Vec::new(), name: "".to_string(), } } pub fn init(&mut self) { // load palettes statically for emscripten self.load("a64".to_string(), include_str!("../../sys/assets/palettes/a64.gpl").to_string()); self.load("apple-ii".to_string(), include_str!("../../sys/assets/palettes/apple-ii.gpl").to_string()); self.load("arne-paldac".to_string(), include_str!("../../sys/assets/palettes/arne-paldac.gpl").to_string()); self.load("arne16".to_string(), include_str!("../../sys/assets/palettes/arne16.gpl").to_string()); self.load("arne32".to_string(), include_str!("../../sys/assets/palettes/arne32.gpl").to_string()); self.load("atari2600-ntsc".to_string(), include_str!("../../sys/assets/palettes/atari2600-ntsc.gpl").to_string()); self.load("atari2600-pal".to_string(), include_str!("../../sys/assets/palettes/atari2600-pal.gpl").to_string()); self.load("cg-arne".to_string(), include_str!("../../sys/assets/palettes/cg-arne.gpl").to_string()); self.load("cga".to_string(), include_str!("../../sys/assets/palettes/cga.gpl").to_string()); self.load("commodore-plus4".to_string(), include_str!("../../sys/assets/palettes/commodore-plus4.gpl").to_string()); self.load("commodore-vic20".to_string(), include_str!("../../sys/assets/palettes/commodore-vic20.gpl").to_string()); self.load("commodore64".to_string(), include_str!("../../sys/assets/palettes/commodore64.gpl").to_string()); self.load("copper-tech".to_string(), include_str!("../../sys/assets/palettes/copper-tech.gpl").to_string()); self.load("cpc-boy".to_string(), include_str!("../../sys/assets/palettes/cpc-boy.gpl").to_string()); self.load("db16".to_string(), include_str!("../../sys/assets/palettes/db16.gpl").to_string()); self.load("db32".to_string(), include_str!("../../sys/assets/palettes/db32.gpl").to_string()); self.load("edg16".to_string(), include_str!("../../sys/assets/palettes/edg16.gpl").to_string()); self.load("edg32".to_string(), include_str!("../../sys/assets/palettes/edg32.gpl").to_string()); self.load("eroge-copper".to_string(), include_str!("../../sys/assets/palettes/eroge-copper.gpl").to_string()); self.load("gameboy-color-type1".to_string(), include_str!("../../sys/assets/palettes/gameboy-color-type1.gpl").to_string()); self.load("gameboy".to_string(), include_str!("../../sys/assets/palettes/gameboy.gpl").to_string()); self.load("google-ui".to_string(), include_str!("../../sys/assets/palettes/google-ui.gpl").to_string()); self.load("jmp".to_string(), include_str!("../../sys/assets/palettes/jmp.gpl").to_string()); self.load("mail24".to_string(), include_str!("../../sys/assets/palettes/mail24.gpl").to_string()); self.load("master-system".to_string(), include_str!("../../sys/assets/palettes/master-system.gpl").to_string()); self.load("monokai".to_string(), include_str!("../../sys/assets/palettes/monokai.gpl").to_string()); self.load("nes-ntsc".to_string(), include_str!("../../sys/assets/palettes/nes-ntsc.gpl").to_string()); self.load("nes".to_string(), include_str!("../../sys/assets/palettes/nes.gpl").to_string()); self.load("pico-8".to_string(), include_str!("../../sys/assets/palettes/pico-8.gpl").to_string()); self.load("psygnork".to_string(), include_str!("../../sys/assets/palettes/psygnork.gpl").to_string()); self.load("smile-basic".to_string(), include_str!("../../sys/assets/palettes/smile-basic.gpl").to_string()); self.load("solarized".to_string(), include_str!("../../sys/assets/palettes/solarized.gpl").to_string()); self.load("teletext".to_string(), include_str!("../../sys/assets/palettes/teletext.gpl").to_string()); self.load("vga-13h".to_string(), include_str!("../../sys/assets/palettes/vga-13h.gpl").to_string()); self.load("web-safe-colors".to_string(), include_str!("../../sys/assets/palettes/web-safe-colors.gpl").to_string()); self.load("win16".to_string(), include_str!("../../sys/assets/palettes/win16.gpl").to_string()); self.load("x11".to_string(), include_str!("../../sys/assets/palettes/x11.gpl").to_string()); self.load("zx-spectrum".to_string(), include_str!("../../sys/assets/palettes/zx-spectrum.gpl").to_string()); } pub fn load(&mut self, name: String, data: String) { let buf_reader = Cursor::new(data); let mut values = Vec::new(); for line in buf_reader.lines() { let line = line.unwrap(); let l = line.trim_left().to_string(); if l.len() == 0 { continue; } if l.starts_with("#") { continue; } let l_b = l.as_bytes(); if !(l_b[0] as char).is_digit(10) { continue; } let mut iter = l.split_whitespace(); let r = iter.next().unwrap().parse::<u8>().unwrap(); let g = iter.next().unwrap().parse::<u8>().unwrap(); let b = iter.next().unwrap().parse::<u8>().unwrap(); values.push(RGB::new(r, g, b)); } self.palettes.insert(name.clone(), values); self.palettes_list.push(name.clone()); } pub fn next(&mut self) { self.palette_idx = (self.palette_idx + 1) % self.palettes_list.len() as u32; let ref mut p_value = self.palettes_list[self.palette_idx as usize].clone(); self.switch_to(p_value.clone()); } pub fn switch_to(&mut self, name: String) { let ref values = *self.palettes.get(&name).unwrap(); let mut idx = 0; for rgb_value in values { PALETTE .lock() .unwrap() .set_color(idx, rgb_value.r, rgb_value.g, rgb_value.b); idx += 1; } self.name = name.clone(); } pub fn set_color(&mut self, color: u32, r: u8, g: u8, b: u8) { PALETTE.lock().unwrap().set_color(color, r, g, b); } pub fn get_color(&mut self, color: u32) -> u32 { PALETTE.lock().unwrap().get_color(color) } pub fn reset(&mut self) { PALETTE.lock().unwrap().reset(); } } pub struct Px8New { pub screen: Arc<Mutex<gfx::Screen>>, pub palettes: Arc<Mutex<Palettes>>, pub noise: Arc<Mutex<Noise>>, pub cartridges: Vec<Cartridge>, pub current_cartridge: usize, pub lua_plugin: LuaPlugin, pub python_plugin: PythonPlugin, pub rust_plugin: Vec<Box<RustPlugin>>, pub code_type: Code, pub state: PX8State, pub menu: Menu, pub show_info_overlay: bool, pub fps: f64, pub draw_time: f64, pub init_time: f64, pub update_time: f64, pub record: Record, pub draw_return: bool, pub update_return: bool, } impl Px8New { pub fn new() -> Px8New { Px8New { screen: Arc::new(Mutex::new(gfx::Screen::new())), palettes: Arc::new(Mutex::new(Palettes::new())), noise: Arc::new(Mutex::new(Noise::new())), cartridges: Vec::new(), current_cartridge: 0, lua_plugin: LuaPlugin::new(), python_plugin: PythonPlugin::new(), rust_plugin: Vec::new(), code_type: Code::UNKNOWN, state: PX8State::RUN, menu: Menu::new(), show_info_overlay: false, fps: 0.0, draw_time: 0.0, init_time: 0.0, update_time: 0.0, record: Record::new(), draw_return: true, update_return: true, } } pub fn init(&mut self) { self.palettes.lock().unwrap().init(); self.palettes .lock() .unwrap() .switch_to("pico-8".to_string()); self.screen.lock().unwrap().init(); self.update_return = true; self.draw_return = true; } pub fn next_palette(&mut self) { self.palettes.lock().unwrap().next(); } pub fn toggle_info_overlay(&mut self) { self.show_info_overlay = !self.show_info_overlay; } pub fn debug_update(&mut self) { if self.show_info_overlay { self.screen .lock() .unwrap() .rectfill(0, 0, SCREEN_WIDTH as i32, 8, 0); self.screen .lock() .unwrap() .force_print(format!("{:.0}FPS {:.2} {:.2} {:?}", self.fps, self.draw_time, self.update_time, &self.palettes.lock().unwrap().name) .to_string(), 0, 0, 7); } } pub fn update(&mut self, players: Arc<Mutex<Players>>) -> bool { match self.state { PX8State::PAUSE => { if self.menu.stop() { self.state = PX8State::RUN; } return self.menu.update(players); } PX8State::RUN => { if self.is_end() { return false; } self.update_time = self.call_update(players) * 1000.0; } } return true; } pub fn draw(&mut self) { match self.state { PX8State::PAUSE => { self.menu.draw(self.screen.clone()); } PX8State::RUN => { self.draw_time = self.call_draw() * 1000.0; if self.is_recording() { self.record(); } } } } pub fn is_end(&mut self) -> bool { return !self.update_return; } pub fn is_recording(&mut self) -> bool { return self.record.recording; } pub fn start_record(&mut self, filename: String) { info!("[PX8] Start to record the frame"); self.record.recording = true; self.record.images.clear(); self.record.filename = filename; } pub fn record(&mut self) { info!("[PX8] Recording the frame"); if self.record.nb % 4 == 0 { let mut buffer: Vec<u8> = Vec::new(); for x in 0..self::SCREEN_WIDTH { for y in 0..self::SCREEN_HEIGHT { let value = self.screen.lock().unwrap().pget(x as u32, y as u32); let rgb_value = PALETTE.lock().unwrap().get_rgb(value); buffer.push(rgb_value.r); buffer.push(rgb_value.g); buffer.push(rgb_value.b); } } self.record.images.append(&mut buffer); } self.record.nb += 1; } pub fn stop_record(&mut self, scale: usize) { info!("[PX8] Stop to record the frame {:?}", self.record.images.len()); self.record.recording = false; let mut filedata = File::create(self.record.filename.clone()).unwrap(); let mut encoder = gif::Encoder::new(&mut filedata, SCREEN_WIDTH as u16, SCREEN_HEIGHT as u16, &[]) .unwrap(); encoder.set(gif::Repeat::Infinite).unwrap(); let mut idx = 0; for i in 0..self.record.images.len() / (SCREEN_WIDTH * SCREEN_HEIGHT * 3) { info!("[PX8] Generate frame {:?} {:?}/{:?}", i, self.record.images.len(), idx); let mut buffer: Vec<u8> = Vec::new(); for _ in 0..SCREEN_WIDTH { for _ in 0..SCREEN_HEIGHT { buffer.push(*self.record.images.get(idx).unwrap()); buffer.push(*self.record.images.get(idx + 1).unwrap()); buffer.push(*self.record.images.get(idx + 2).unwrap()); idx += 3; } } info!("[PX8] Creating ImageBuffer {:?}", buffer.len()); let image = image::ImageBuffer::from_raw(SCREEN_WIDTH as u32, SCREEN_HEIGHT as u32, buffer) .unwrap(); info!("[PX8] Rotating image"); let image = image::DynamicImage::ImageRgb8(image) .rotate90() .resize((SCREEN_WIDTH * scale) as u32, (SCREEN_HEIGHT * scale) as u32, image::FilterType::Nearest) .fliph(); info!("[PX8] Creating gif Frame"); let mut frame = gif::Frame::from_rgb((SCREEN_WIDTH * scale) as u16, (SCREEN_HEIGHT * scale) as u16, &mut *image.raw_pixels()); frame.delay = 1; encoder.write_frame(&frame).unwrap(); } info!("[PX8] GIF created in {:?}", self.record.filename.clone()); } pub fn screenshot(&mut self, filename: String) { info!("[PX8] Taking screenshot in {:?}", filename); let mut buffer: Vec<u8> = Vec::new(); for x in 0..SCREEN_WIDTH { for y in 0..SCREEN_HEIGHT { let value = self.screen.lock().unwrap().pget(x as u32, y as u32); let rgb_value = PALETTE.lock().unwrap().get_rgb(value); buffer.push(rgb_value.r); buffer.push(rgb_value.g); buffer.push(rgb_value.b); } } let image = image::ImageBuffer::from_raw(SCREEN_WIDTH as u32, SCREEN_HEIGHT as u32, buffer) .unwrap(); let image = image::DynamicImage::ImageRgb8(image) .rotate270() .resize((SCREEN_WIDTH * 4) as u32, (SCREEN_WIDTH * 4) as u32, image::FilterType::Nearest) .flipv(); let mut output = File::create(&Path::new(&filename)).unwrap(); image.save(&mut output, image::ImageFormat::PNG).unwrap(); } pub fn save_current_cartridge(&mut self) { let ref mut cartridge = self.cartridges[self.current_cartridge]; let output_filename = cartridge.filename.clone(); info!("[PX8] Saving the current cartridge in {:?}", output_filename); cartridge .gfx .set_sprites(self.screen.lock().unwrap().sprites.clone()); cartridge.map.set_map(self.screen.lock().unwrap().map); match cartridge.format { CartridgeFormat::P8Format => { cartridge.save_in_p8(output_filename); } CartridgeFormat::PngFormat => { cartridge.save_in_p8(output_filename); } CartridgeFormat::Px8Format => { cartridge.save_data(); } } } pub fn switch_pause(&mut self) { match self.state { PX8State::PAUSE => { self.state = PX8State::RUN; self.screen.lock().unwrap().restore(); } PX8State::RUN => { self.menu.reset(); self.state = PX8State::PAUSE; self.screen.lock().unwrap().save(); } } } #[allow(dead_code)] pub fn register<F: RustPlugin + 'static>(&mut self, callback: F) { self.rust_plugin.push(Box::new(callback)); } pub fn load_cartridge(&mut self, filename: String, players: Arc<Mutex<Players>>, info: Arc<Mutex<Info>>, sound: Arc<Mutex<Sound>>, editor: bool, mode: PX8Mode) -> bool { let idx = self.cartridges.len(); if filename.contains(".png") { match Cartridge::from_png_file(filename.clone()) { Ok(c) => self.cartridges.push(c), Err(e) => panic!("Impossible to load the png cartridge {:?}", e), } } else if filename.contains(".p8") { match Cartridge::from_p8_file(filename.clone()) { Ok(c) => self.cartridges.push(c), Err(e) => panic!("Impossible to load the p8 cartridge {:?}", e), } } else if filename.contains(".py") { match Cartridge::from_p8_file(filename.clone()) { Ok(c) => self.cartridges.push(c), Err(e) => panic!("Impossible to load the p8 cartridge {:?}", e), } } else if filename.contains(".px8") { match Cartridge::from_px8_file(filename.clone()) { Ok(c) => self.cartridges.push(c), Err(e) => panic!("Impossible to load the px8 cartridge {:?}", e), } } else { panic!("[PX8] Unknown file format !"); } self.current_cartridge = idx; self.cartridges[idx].set_mode(mode == PX8Mode::PICO8); self.screen .lock() .unwrap() .set_sprites(self.cartridges[idx].gfx.sprites.clone()); self.screen .lock() .unwrap() .set_sprites_flags(self.cartridges[idx].gff.flags.clone()); self.screen .lock() .unwrap() .set_map(self.cartridges[idx].map.map); self.load_plugin(idx, players, info, sound, editor) } #[allow(dead_code)] pub fn load_cartridge_raw(&mut self, filename: String, data: Vec<u8>, players: Arc<Mutex<Players>>, info: Arc<Mutex<Info>>, sound: Arc<Mutex<Sound>>, editor: bool, mode: PX8Mode) -> bool { let idx = self.cartridges.len(); if filename.contains(".png") { match Cartridge::from_png_raw(filename.clone(), data) { Ok(c) => self.cartridges.push(c), Err(e) => panic!("Impossible to load the png cartridge {:?}", e), } } else if filename.contains(".p8") { match Cartridge::from_p8_raw(filename.clone(), data) { Ok(c) => self.cartridges.push(c), Err(e) => panic!("Impossible to load the p8 cartridge {:?}", e), } } else if filename.contains(".py") { match Cartridge::from_p8_raw(filename.clone(), data) { Ok(c) => self.cartridges.push(c), Err(e) => panic!("Impossible to load the p8 cartridge {:?}", e), } } else { panic!("[PX8] Unknown file"); } self.current_cartridge = idx; self.cartridges[idx].set_mode(mode == PX8Mode::PICO8); self.screen .lock() .unwrap() .set_sprites(self.cartridges[idx].gfx.sprites.clone()); self.screen .lock() .unwrap() .set_map(self.cartridges[idx].map.map); self.load_plugin(idx, players, info, sound, editor) } pub fn _get_code_type(&mut self, idx: usize) -> Code { if self.cartridges[idx].code.get_name() == "lua" { return Code::LUA; } if self.cartridges[idx].code.get_name() == "python" { return Code::PYTHON; } return Code::UNKNOWN; } pub fn switch_code(&mut self) { let idx = self.current_cartridge; let data; if self.cartridges[idx].edit { // Reload the code for the px8 format match self.cartridges[idx].format { CartridgeFormat::Px8Format => { info!("[PX8] Reloading code section for the cartridge"); self.cartridges[idx].code.reload(); } _ => (), } data = self.cartridges[idx].code.get_data().clone(); self.cartridges[idx].edit = false; self.code_type = self._get_code_type(idx); } else { data = self.load_editor("./sys/editor/editor.py".to_string()) .clone(); self.cartridges[idx].edit = true; self.code_type = Code::PYTHON; } match self.code_type { Code::LUA => { self.lua_plugin.load_code(data); } Code::PYTHON => { self.python_plugin.load_code(data); } _ => (), } self.init(); } #[allow(dead_code)] pub fn is_editing_current_cartridge(&mut self) -> bool { let idx = self.current_cartridge; return self.cartridges[idx].edit; } pub fn load_plugin(&mut self, idx: usize, players: Arc<Mutex<Players>>, info: Arc<Mutex<Info>>, sound: Arc<Mutex<Sound>>, editor: bool) -> bool { let data; info!("[PX8] Load the plugin"); self.code_type = self._get_code_type(idx); if editor { // Editor mode and original code type is different from Python match self.code_type { Code::LUA => { info!("[PX8] Loading LUA Plugin"); // load the lua plugin self.lua_plugin .load(players.clone(), info.clone(), self.screen.clone(), self.noise.clone()); } _ => (), } data = self.load_editor("./sys/editor/editor.py".to_string()) .clone(); self.cartridges[idx].edit = true; self.code_type = Code::PYTHON; } else { data = self.cartridges[idx].code.get_data().clone(); } match self.code_type { Code::LUA => { info!("[PX8] Loading LUA Plugin"); self.lua_plugin .load(players.clone(), info.clone(), self.screen.clone(), self.noise.clone()); return self.lua_plugin.load_code(data); } Code::PYTHON => { info!("[PX8] Loading PYTHON Plugin"); self.python_plugin .load(self.palettes.clone(), players.clone(), info.clone(), self.screen.clone(), sound.clone(), self.noise.clone()); return self.python_plugin.load_code(data); } _ => (), } false } pub fn load_editor(&mut self, filename: String) -> String { let mut data = "".to_string(); let f = File::open(filename.clone()).unwrap(); let buf_reader = BufReader::new(f); for line in buf_reader.lines() { let l = line.unwrap(); data = data + "\n" + &l; } return data; } pub fn call_init(&mut self) -> f64 { let current_time = time::now(); match self.code_type { Code::LUA => self.lua_plugin.init(), Code::PYTHON => self.python_plugin.init(), Code::RUST => { self.draw_return = true; for callback in self.rust_plugin.iter_mut() { callback.init(self.screen.clone()); } } _ => (), } let diff_time = time::now() - current_time; let nanoseconds = (diff_time.num_nanoseconds().unwrap() as f64) - (diff_time.num_seconds() * 1000000000) as f64; let elapsed_time = diff_time.num_seconds() as f64 + nanoseconds / 1000000000.0; return elapsed_time; } pub fn call_draw(&mut self) -> f64 { let current_time = time::now(); match self.code_type { Code::LUA => self.draw_return = self.lua_plugin.draw(), Code::PYTHON => self.draw_return = self.python_plugin.draw(), Code::RUST => { self.draw_return = true; for callback in self.rust_plugin.iter_mut() { callback.draw(self.screen.clone()); } } _ => (), } let diff_time = time::now() - current_time; let nanoseconds = (diff_time.num_nanoseconds().unwrap() as f64) - (diff_time.num_seconds() * 1000000000) as f64; let elapsed_time = diff_time.num_seconds() as f64 + nanoseconds / 1000000000.0; return elapsed_time; } pub fn call_update(&mut self, players: Arc<Mutex<Players>>) -> f64 { let current_time = time::now(); match self.code_type { Code::LUA => self.update_return = self.lua_plugin.update(), Code::PYTHON => self.update_return = self.python_plugin.update(), Code::RUST => { self.update_return = true; for callback in self.rust_plugin.iter_mut() { callback.update(players.clone()); } } _ => (), } let diff_time = time::now() - current_time; let nanoseconds = (diff_time.num_nanoseconds().unwrap() as f64) - (diff_time.num_seconds() * 1000000000) as f64; let elapsed_time = diff_time.num_seconds() as f64 + nanoseconds / 1000000000.0; return elapsed_time; } }
//! Raw access to the `Termios` structure and its flags #[allow(dead_code, missing_docs, non_camel_case_types)] mod ffi; use std::fmt; pub use self::ffi::cc_t; pub use self::ffi::speed_t; pub use self::ffi::tcflag_t; pub use self::ffi::Struct_termios as Termios; pub use self::ffi::{ cfgetispeed, cfgetospeed, cfmakeraw, cfsetispeed, cfsetospeed, cfsetspeed, tcdrain, tcflow, tcflush, tcgetattr, tcsendbreak, tcsetattr, }; #[cfg(target_os = "freebsd")] pub use self::ffi::{ cfmakesane, tcgetsid, tcsetsid, }; #[cfg(target_os = "linux")] pub use self::ffi::{ tcgetsid, }; // XXX (Debug) Formatting may change impl fmt::Debug for Termios { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { try!(writeln!(f, "iflag:\t{:?}", self.c_iflag)); try!(writeln!(f, "oflag:\t{:?}", self.c_oflag)); try!(writeln!(f, "cflag:\t{:?}", self.c_cflag)); try!(writeln!(f, "lflag:\t{:?}", self.c_lflag)); try!(writeln!(f, "cc:\t{:?}", self.c_cc)); try!(writeln!(f, "ispeed:\t{:?}", self.c_ispeed)); try!(write!(f, "ospeed:\t{:?}", self.c_ospeed)); Ok(()) } } // FIXME (concat_idents!) All these macros should use only one input macro_rules! cc { ($($ffi:ident = $ident:ident,)+) => { $( pub use self::ffi::$ffi as $ident; )+ } } cc! { NCCS_ = NCCS, VDISCARD_ = VDISCARD, VEOF_ = VEOF, VEOL2_ = VEOL2, VEOL_ = VEOL, VERASE_ = VERASE, VINTR_ = VINTR, VKILL_ = VKILL, VLNEXT_ = VLNEXT, VMIN_ = VMIN, VQUIT_ = VQUIT, VREPRINT_ = VREPRINT, VSTART_ = VSTART, VSTOP_ = VSTOP, VSUSP_ = VSUSP, VTIME_ = VTIME, VWERASE_ = VWERASE, } #[cfg(target_os = "freebsd")] cc! { VDSUSP_ = VDSUSP, VERASE2_ = VERASE2, VSTATUS_ = VSTATUS, } #[cfg(target_os = "linux")] cc! { VSWTC_ = VSWTC, } #[cfg(target_os = "macos")] cc! { VDSUSP_ = VDSUSP, VSTATUS_ = VSTATUS, } macro_rules! iflag { ($($ffi:ident = $ident:ident,)+) => { $( pub use self::ffi::$ffi as $ident; )+ } } iflag! { BRKINT_ = BRKINT, ICRNL_ = ICRNL, IGNBRK_ = IGNBRK, IGNCR_ = IGNCR, IGNPAR_ = IGNPAR, IMAXBEL_ = IMAXBEL, INLCR_ = INLCR, INPCK_ = INPCK, ISTRIP_ = ISTRIP, IXANY_ = IXANY, IXOFF_ = IXOFF, IXON_ = IXON, PARMRK_ = PARMRK, } #[cfg(target_os = "linux")] iflag! { IUCLC_ = IUCLC, IUTF8_ = IUTF8, } #[cfg(target_os = "macos")] iflag! { IUTF8_ = IUTF8, } macro_rules! oflag { ($($ffi:ident = $ident:ident,)+) => { $( pub use self::ffi::$ffi as $ident; )+ } } oflag! { OCRNL_ = OCRNL, ONLCR_ = ONLCR, ONLRET_ = ONLRET, ONOCR_ = ONOCR, OPOST_ = OPOST, } #[cfg(target_os = "freebsd")] oflag! { ONOEOT_ = ONOEOT, TAB0_ = TAB0, TAB3_ = TAB3, TABDLY_ = TABDLY, } #[cfg(target_os = "linux")] oflag! { OFDEL_ = OFDEL, OFILL_ = OFILL, OLCUC_ = OLCUC, } #[cfg(target_os = "macos")] oflag! { BSDLY_ = BSDLY, CRDLY_ = CRDLY, FFDLY_ = FFDLY, NLDLY_ = NLDLY, OFDEL_ = OFDEL, OFILL_ = OFILL, ONOEOT_ = ONOEOT, OXTABS_ = OXTABS, TABDLY_ = TABDLY, VTDLY_ = VTDLY, } macro_rules! cflag { ($($ffi:ident = $ident:ident,)+) => { $( pub use self::ffi::$ffi as $ident; )+ } } cflag! { CLOCAL_ = CLOCAL, CREAD_ = CREAD, CRTSCTS_ = CRTSCTS, CS5_ = CS5, CS6_ = CS6, CS7_ = CS7, CS8_ = CS8, CSIZE_ = CSIZE, CSTOPB_ = CSTOPB, HUPCL_ = HUPCL, PARENB_ = PARENB, PARODD_ = PARODD, } #[cfg(target_os = "freebsd")] cflag! { CCAR_OFLOW_ = CCAR_OFLOW, CCTS_OFLOW_ = CCTS_OFLOW, CDSR_OFLOW_ = CDSR_OFLOW, CDTR_IFLOW_ = CDTR_IFLOW, CIGNORE_ = CIGNORE, CRTS_IFLOW_ = CRTS_IFLOW, } #[cfg(target_os = "linux")] cflag! { CIBAUD_ = CIBAUD, CMSPAR_ = CMSPAR, } #[cfg(target_os = "macos")] cflag! { CCAR_OFLOW_ = CCAR_OFLOW, CCTS_OFLOW_ = CCTS_OFLOW, CDSR_OFLOW_ = CDSR_OFLOW, CDTR_IFLOW_ = CDTR_IFLOW, CIGNORE_ = CIGNORE, CRTS_IFLOW_ = CRTS_IFLOW, MDMBUF_ = MDMBUF, } macro_rules! lflag { ($($ffi:ident = $ident:ident,)+) => { $( pub use self::ffi::$ffi as $ident; )+ } } lflag! { ECHOCTL_ = ECHOCTL, ECHOE_ = ECHOE, ECHOKE_ = ECHOKE, ECHOK_ = ECHOK, ECHONL_ = ECHONL, ECHOPRT_ = ECHOPRT, ECHO_ = ECHO, EXTPROC_ = EXTPROC, FLUSHO_ = FLUSHO, ICANON_ = ICANON, IEXTEN_ = IEXTEN, ISIG_ = ISIG, NOFLSH_ = NOFLSH, PENDIN_ = PENDIN, TOSTOP_ = TOSTOP, } #[cfg(target_os = "freebsd")] lflag! { ALTWERASE_ = ALTWERASE, NOKERNINFO_ = NOKERNINFO, } #[cfg(target_os = "linux")] lflag! { XCASE_ = XCASE, } #[cfg(target_os = "macos")] lflag! { ALTWERASE_ = ALTWERASE, NOKERNINFO_ = NOKERNINFO, } macro_rules! tcflow { ($($ffi:ident = $ident:ident,)+) => { $( pub use self::ffi::$ffi as $ident; )+ } } tcflow! { TCIOFF_ = TCIOFF, TCION_ = TCION, TCOOFF_ = TCOOFF, TCOON_ = TCOON, } macro_rules! tcflush { ($($ffi:ident = $ident:ident,)+) => { $( pub use self::ffi::$ffi as $ident; )+ } } tcflush! { TCIFLUSH_ = TCIFLUSH, TCIOFLUSH_ = TCIOFLUSH, TCOFLUSH_ = TCOFLUSH, } macro_rules! tcsetattr { ($($ffi:ident = $ident:ident,)+) => { $( pub use self::ffi::$ffi as $ident; )+ } } tcsetattr! { TCSADRAIN_ = TCSADRAIN, TCSAFLUSH_ = TCSAFLUSH, TCSANOW_ = TCSANOW, } #[cfg(target_os = "freebsd")] tcsetattr! { TCSASOFT_ = TCSASOFT, } #[cfg(target_os = "macos")] tcsetattr! { TCSASOFT_ = TCSASOFT, } macro_rules! baud { ($($ffi:ident = $ident:ident,)+) => { $( pub use self::ffi::$ffi as $ident; )+ } } baud! { B0_ = B0, B110_ = B110, B115200_ = B115200, B1200_ = B1200, B134_ = B134, B150_ = B150, B1800_ = B1800, B19200_ = B19200, B200_ = B200, B230400_ = B230400, B2400_ = B2400, B300_ = B300, B38400_ = B38400, B4800_ = B4800, B50_ = B50, B57600_ = B57600, B600_ = B600, B75_ = B75, B9600_ = B9600, EXTA_ = EXTA, EXTB_ = EXTB, } #[cfg(target_os = "freebsd")] baud! { B14400_ = B14400, B28800_ = B28800, B460800_ = B460800, B7200_ = B7200, B76800_ = B76800, B921600_ = B921600, } #[cfg(target_os = "linux")] baud! { B1000000_ = B1000000, B1152000_ = B1152000, B1500000_ = B1500000, B2000000_ = B2000000, B2500000_ = B2500000, B3000000_ = B3000000, B3500000_ = B3500000, B4000000_ = B4000000, B460800_ = B460800, B500000_ = B500000, B576000_ = B576000, B921600_ = B921600, } #[cfg(target_os = "macos")] baud! { B14400_ = B14400, B28800_ = B28800, B7200_ = B7200, B76800_ = B76800, } fix: tcgetsid doesn't seem to be available in glibc-2.15 //! Raw access to the `Termios` structure and its flags #[allow(dead_code, missing_docs, non_camel_case_types)] mod ffi; use std::fmt; pub use self::ffi::cc_t; pub use self::ffi::speed_t; pub use self::ffi::tcflag_t; pub use self::ffi::Struct_termios as Termios; pub use self::ffi::{ cfgetispeed, cfgetospeed, cfmakeraw, cfsetispeed, cfsetospeed, cfsetspeed, tcdrain, tcflow, tcflush, tcgetattr, tcsendbreak, tcsetattr, }; #[cfg(target_os = "freebsd")] pub use self::ffi::{ cfmakesane, tcgetsid, tcsetsid, }; // XXX (Debug) Formatting may change impl fmt::Debug for Termios { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { try!(writeln!(f, "iflag:\t{:?}", self.c_iflag)); try!(writeln!(f, "oflag:\t{:?}", self.c_oflag)); try!(writeln!(f, "cflag:\t{:?}", self.c_cflag)); try!(writeln!(f, "lflag:\t{:?}", self.c_lflag)); try!(writeln!(f, "cc:\t{:?}", self.c_cc)); try!(writeln!(f, "ispeed:\t{:?}", self.c_ispeed)); try!(write!(f, "ospeed:\t{:?}", self.c_ospeed)); Ok(()) } } // FIXME (concat_idents!) All these macros should use only one input macro_rules! cc { ($($ffi:ident = $ident:ident,)+) => { $( pub use self::ffi::$ffi as $ident; )+ } } cc! { NCCS_ = NCCS, VDISCARD_ = VDISCARD, VEOF_ = VEOF, VEOL2_ = VEOL2, VEOL_ = VEOL, VERASE_ = VERASE, VINTR_ = VINTR, VKILL_ = VKILL, VLNEXT_ = VLNEXT, VMIN_ = VMIN, VQUIT_ = VQUIT, VREPRINT_ = VREPRINT, VSTART_ = VSTART, VSTOP_ = VSTOP, VSUSP_ = VSUSP, VTIME_ = VTIME, VWERASE_ = VWERASE, } #[cfg(target_os = "freebsd")] cc! { VDSUSP_ = VDSUSP, VERASE2_ = VERASE2, VSTATUS_ = VSTATUS, } #[cfg(target_os = "linux")] cc! { VSWTC_ = VSWTC, } #[cfg(target_os = "macos")] cc! { VDSUSP_ = VDSUSP, VSTATUS_ = VSTATUS, } macro_rules! iflag { ($($ffi:ident = $ident:ident,)+) => { $( pub use self::ffi::$ffi as $ident; )+ } } iflag! { BRKINT_ = BRKINT, ICRNL_ = ICRNL, IGNBRK_ = IGNBRK, IGNCR_ = IGNCR, IGNPAR_ = IGNPAR, IMAXBEL_ = IMAXBEL, INLCR_ = INLCR, INPCK_ = INPCK, ISTRIP_ = ISTRIP, IXANY_ = IXANY, IXOFF_ = IXOFF, IXON_ = IXON, PARMRK_ = PARMRK, } #[cfg(target_os = "linux")] iflag! { IUCLC_ = IUCLC, IUTF8_ = IUTF8, } #[cfg(target_os = "macos")] iflag! { IUTF8_ = IUTF8, } macro_rules! oflag { ($($ffi:ident = $ident:ident,)+) => { $( pub use self::ffi::$ffi as $ident; )+ } } oflag! { OCRNL_ = OCRNL, ONLCR_ = ONLCR, ONLRET_ = ONLRET, ONOCR_ = ONOCR, OPOST_ = OPOST, } #[cfg(target_os = "freebsd")] oflag! { ONOEOT_ = ONOEOT, TAB0_ = TAB0, TAB3_ = TAB3, TABDLY_ = TABDLY, } #[cfg(target_os = "linux")] oflag! { OFDEL_ = OFDEL, OFILL_ = OFILL, OLCUC_ = OLCUC, } #[cfg(target_os = "macos")] oflag! { BSDLY_ = BSDLY, CRDLY_ = CRDLY, FFDLY_ = FFDLY, NLDLY_ = NLDLY, OFDEL_ = OFDEL, OFILL_ = OFILL, ONOEOT_ = ONOEOT, OXTABS_ = OXTABS, TABDLY_ = TABDLY, VTDLY_ = VTDLY, } macro_rules! cflag { ($($ffi:ident = $ident:ident,)+) => { $( pub use self::ffi::$ffi as $ident; )+ } } cflag! { CLOCAL_ = CLOCAL, CREAD_ = CREAD, CRTSCTS_ = CRTSCTS, CS5_ = CS5, CS6_ = CS6, CS7_ = CS7, CS8_ = CS8, CSIZE_ = CSIZE, CSTOPB_ = CSTOPB, HUPCL_ = HUPCL, PARENB_ = PARENB, PARODD_ = PARODD, } #[cfg(target_os = "freebsd")] cflag! { CCAR_OFLOW_ = CCAR_OFLOW, CCTS_OFLOW_ = CCTS_OFLOW, CDSR_OFLOW_ = CDSR_OFLOW, CDTR_IFLOW_ = CDTR_IFLOW, CIGNORE_ = CIGNORE, CRTS_IFLOW_ = CRTS_IFLOW, } #[cfg(target_os = "linux")] cflag! { CIBAUD_ = CIBAUD, CMSPAR_ = CMSPAR, } #[cfg(target_os = "macos")] cflag! { CCAR_OFLOW_ = CCAR_OFLOW, CCTS_OFLOW_ = CCTS_OFLOW, CDSR_OFLOW_ = CDSR_OFLOW, CDTR_IFLOW_ = CDTR_IFLOW, CIGNORE_ = CIGNORE, CRTS_IFLOW_ = CRTS_IFLOW, MDMBUF_ = MDMBUF, } macro_rules! lflag { ($($ffi:ident = $ident:ident,)+) => { $( pub use self::ffi::$ffi as $ident; )+ } } lflag! { ECHOCTL_ = ECHOCTL, ECHOE_ = ECHOE, ECHOKE_ = ECHOKE, ECHOK_ = ECHOK, ECHONL_ = ECHONL, ECHOPRT_ = ECHOPRT, ECHO_ = ECHO, EXTPROC_ = EXTPROC, FLUSHO_ = FLUSHO, ICANON_ = ICANON, IEXTEN_ = IEXTEN, ISIG_ = ISIG, NOFLSH_ = NOFLSH, PENDIN_ = PENDIN, TOSTOP_ = TOSTOP, } #[cfg(target_os = "freebsd")] lflag! { ALTWERASE_ = ALTWERASE, NOKERNINFO_ = NOKERNINFO, } #[cfg(target_os = "linux")] lflag! { XCASE_ = XCASE, } #[cfg(target_os = "macos")] lflag! { ALTWERASE_ = ALTWERASE, NOKERNINFO_ = NOKERNINFO, } macro_rules! tcflow { ($($ffi:ident = $ident:ident,)+) => { $( pub use self::ffi::$ffi as $ident; )+ } } tcflow! { TCIOFF_ = TCIOFF, TCION_ = TCION, TCOOFF_ = TCOOFF, TCOON_ = TCOON, } macro_rules! tcflush { ($($ffi:ident = $ident:ident,)+) => { $( pub use self::ffi::$ffi as $ident; )+ } } tcflush! { TCIFLUSH_ = TCIFLUSH, TCIOFLUSH_ = TCIOFLUSH, TCOFLUSH_ = TCOFLUSH, } macro_rules! tcsetattr { ($($ffi:ident = $ident:ident,)+) => { $( pub use self::ffi::$ffi as $ident; )+ } } tcsetattr! { TCSADRAIN_ = TCSADRAIN, TCSAFLUSH_ = TCSAFLUSH, TCSANOW_ = TCSANOW, } #[cfg(target_os = "freebsd")] tcsetattr! { TCSASOFT_ = TCSASOFT, } #[cfg(target_os = "macos")] tcsetattr! { TCSASOFT_ = TCSASOFT, } macro_rules! baud { ($($ffi:ident = $ident:ident,)+) => { $( pub use self::ffi::$ffi as $ident; )+ } } baud! { B0_ = B0, B110_ = B110, B115200_ = B115200, B1200_ = B1200, B134_ = B134, B150_ = B150, B1800_ = B1800, B19200_ = B19200, B200_ = B200, B230400_ = B230400, B2400_ = B2400, B300_ = B300, B38400_ = B38400, B4800_ = B4800, B50_ = B50, B57600_ = B57600, B600_ = B600, B75_ = B75, B9600_ = B9600, EXTA_ = EXTA, EXTB_ = EXTB, } #[cfg(target_os = "freebsd")] baud! { B14400_ = B14400, B28800_ = B28800, B460800_ = B460800, B7200_ = B7200, B76800_ = B76800, B921600_ = B921600, } #[cfg(target_os = "linux")] baud! { B1000000_ = B1000000, B1152000_ = B1152000, B1500000_ = B1500000, B2000000_ = B2000000, B2500000_ = B2500000, B3000000_ = B3000000, B3500000_ = B3500000, B4000000_ = B4000000, B460800_ = B460800, B500000_ = B500000, B576000_ = B576000, B921600_ = B921600, } #[cfg(target_os = "macos")] baud! { B14400_ = B14400, B28800_ = B28800, B7200_ = B7200, B76800_ = B76800, }
use std::rc::Rc; use std::collections::BTreeMap; use std::borrow::Borrow; use std::cmp::Ordering; use std::usize; use regex::Regex; use std::collections::{HashMap, HashSet}; use crate::db::*; use crate::types::*; use crate::data_types::*; use crate::web::data::*; use crate::web::config::*; use crate::web::util::cmp_str_dates; use crate::bio::util::rev_comp; use pombase_rc_string::RcString; use crate::interpro::UniprotResult; use crate::pfam::PfamProteinDetails; fn make_organism(rc_organism: &Rc<Organism>) -> ConfigOrganism { let mut maybe_taxonid: Option<u32> = None; for prop in rc_organism.organismprops.borrow().iter() { if prop.prop_type.name == "taxon_id" { maybe_taxonid = Some(prop.value.parse().unwrap()); } } ConfigOrganism { taxonid: maybe_taxonid.unwrap(), genus: rc_organism.genus.clone(), species: rc_organism.species.clone(), alternative_names: vec![], assembly_version: None, } } type TermShortOptionMap = HashMap<TermId, Option<TermShort>>; type UniprotIdentifier = RcString; pub struct WebDataBuild<'a> { raw: &'a Raw, domain_data: &'a HashMap<UniprotIdentifier, UniprotResult>, pfam_data: &'a Option<HashMap<UniprotIdentifier, PfamProteinDetails>>, rnacentral_data: &'a Option<RNAcentralAnnotations>, config: &'a Config, genes: UniquenameGeneMap, genotypes: UniquenameGenotypeMap, genotype_backgrounds: HashMap<GenotypeUniquename, RcString>, alleles: UniquenameAlleleMap, transcripts: UniquenameTranscriptMap, other_features: UniquenameFeatureShortMap, terms: TermIdDetailsMap, chromosomes: ChrNameDetailsMap, references: UniquenameReferenceMap, all_ont_annotations: HashMap<TermId, Vec<OntAnnotationId>>, all_not_ont_annotations: HashMap<TermId, Vec<OntAnnotationId>>, // map from term name to term ID (ie "nucleus" -> "GO:0005634") term_ids_by_name: HashMap<RcString, TermId>, genes_of_transcripts: HashMap<RcString, RcString>, transcripts_of_polypeptides: HashMap<RcString, RcString>, parts_of_transcripts: HashMap<RcString, Vec<FeatureShort>>, genes_of_alleles: HashMap<RcString, RcString>, loci_of_genotypes: HashMap<RcString, HashMap<String, GenotypeLocus>>, // a map from IDs of terms from the "PomBase annotation extension terms" cv // to a Vec of the details of each of the extension parts_of_extensions: HashMap<TermId, Vec<ExtPart>>, base_term_of_extensions: HashMap<TermId, TermId>, // a set of child terms for each term from the cvtermpath table children_by_termid: HashMap<TermId, HashSet<TermId>>, dbxrefs_of_features: HashMap<RcString, HashSet<RcString>>, possible_interesting_parents: HashSet<InterestingParent>, recent_references: RecentReferences, all_community_curated: Vec<ReferenceShort>, all_admin_curated: Vec<ReferenceShort>, gene_expression_measurements: GeneExDataSetMeasurements, term_subsets: IdTermSubsetMap, gene_subsets: IdGeneSubsetMap, annotation_details: IdOntAnnotationDetailMap, ont_annotations: Vec<OntAnnotation>, } fn get_maps() -> (HashMap<RcString, ReferenceShortOptionMap>, HashMap<RcString, GeneShortOptionMap>, HashMap<RcString, GenotypeShortMap>, HashMap<RcString, AlleleShortMap>, HashMap<RcString, TranscriptDetailsOptionMap>, HashMap<GeneUniquename, TermShortOptionMap>) { (HashMap::new(), HashMap::new(), HashMap::new(), HashMap::new(), HashMap::new(), HashMap::new()) } fn get_feat_rel_expression(feature: &Feature, feature_relationship: &FeatureRelationship) -> Option<RcString> { for feature_prop in feature.featureprops.borrow().iter() { if feature_prop.prop_type.name == "allele_type" { if let Some(ref value) = feature_prop.value { if value == "deletion" { return Some("Null".into()); } } } } for rel_prop in feature_relationship.feature_relationshipprops.borrow().iter() { if rel_prop.prop_type.name == "expression" { return rel_prop.value.clone(); } } None } fn get_feat_rel_prop_value(prop_name: &str, feature_relationship: &FeatureRelationship) -> Option<RcString> { for rel_prop in feature_relationship.feature_relationshipprops.borrow().iter() { if rel_prop.prop_type.name == prop_name { return rel_prop.value.clone(); } } None } fn reference_has_annotation(reference_details: &ReferenceDetails) -> bool { !reference_details.cv_annotations.is_empty() || !reference_details.physical_interactions.is_empty() || !reference_details.genetic_interactions.is_empty() || !reference_details.ortholog_annotations.is_empty() || !reference_details.paralog_annotations.is_empty() } fn is_gene_type(feature_type_name: &str) -> bool { feature_type_name == "gene" || feature_type_name == "pseudogene" } pub fn compare_ext_part_with_config(extension_relation_order: &RelationOrder, ep1: &ExtPart, ep2: &ExtPart) -> Ordering { let rel_order_conf = extension_relation_order; let order_conf = &rel_order_conf.relation_order; let always_last_conf = &rel_order_conf.always_last; let maybe_ep1_index = order_conf.iter().position(|r| *r == ep1.rel_type_name); let maybe_ep2_index = order_conf.iter().position(|r| *r == ep2.rel_type_name); if let Some(ep1_index) = maybe_ep1_index { if let Some(ep2_index) = maybe_ep2_index { ep1_index.cmp(&ep2_index) } else { Ordering::Less } } else { if maybe_ep2_index.is_some() { Ordering::Greater } else { let maybe_ep1_last_index = always_last_conf.iter().position(|r| *r == ep1.rel_type_name); let maybe_ep2_last_index = always_last_conf.iter().position(|r| *r == ep2.rel_type_name); if let Some(ep1_last_index) = maybe_ep1_last_index { if let Some(ep2_last_index) = maybe_ep2_last_index { ep1_last_index.cmp(&ep2_last_index) } else { Ordering::Greater } } else { if maybe_ep2_last_index.is_some() { Ordering::Less } else { let name_cmp = ep1.rel_type_name.cmp(&ep2.rel_type_name); if name_cmp == Ordering::Equal { if ep1.ext_range.is_gene() && !ep2.ext_range.is_gene() { Ordering::Less } else { if !ep1.ext_range.is_gene() && ep2.ext_range.is_gene() { Ordering::Greater } else { Ordering::Equal } } } else { name_cmp } } } } } } lazy_static! { static ref BAD_GENOTYPE_NAME_CHARS_RE: Regex = Regex::new(r"[% /&;]").unwrap(); } pub fn make_genotype_display_name(loci: &[GenotypeLocus], allele_map: &UniquenameAlleleMap) -> RcString { let mut locus_display_names: Vec<String> = loci.iter().map(|locus| { let mut allele_display_names: Vec<String> = locus.expressed_alleles.iter().map(|expressed_allele| { let allele_short = allele_map.get(&expressed_allele.allele_uniquename).unwrap(); let mut encoded_name_and_type = String::from(&allele_short.encoded_name_and_type); if allele_short.allele_type != "deletion" { if encoded_name_and_type == "unnamed-unrecorded-unrecorded" { encoded_name_and_type = format!("{}-{}", allele_short.gene_uniquename, encoded_name_and_type); } if let Some(ref expression) = expressed_allele.expression { encoded_name_and_type += &format!("-expression-{}", expression.to_lowercase()); } } encoded_name_and_type }).collect(); allele_display_names.sort(); allele_display_names.join("/") }).collect(); locus_display_names.sort(); let joined_alleles = locus_display_names.join(" "); let clean_display_name = BAD_GENOTYPE_NAME_CHARS_RE.replace_all(&joined_alleles, "_"); RcString::from(clean_display_name.as_ref()) } fn make_phase(feature_loc: &Featureloc) -> Option<Phase> { if let Some(phase) = feature_loc.phase { match phase { 0 => Some(Phase::Zero), 1 => Some(Phase::One), 2 => Some(Phase::Two), _ => panic!(), } } else { None } } fn make_location(chromosome_map: &ChrNameDetailsMap, feat: &Feature) -> Option<ChromosomeLocation> { let feature_locs = feat.featurelocs.borrow(); match feature_locs.get(0) { Some(feature_loc) => { let start_pos = if feature_loc.fmin + 1 >= 1 { (feature_loc.fmin + 1) as usize } else { panic!("start_pos less than 1"); }; let end_pos = if feature_loc.fmax >= 1 { feature_loc.fmax as usize } else { panic!("start_end less than 1"); }; let feature_uniquename = &feature_loc.srcfeature.uniquename; let chr_short = make_chromosome_short(chromosome_map, feature_uniquename); Some(ChromosomeLocation { chromosome_name: chr_short.name, start_pos, end_pos, strand: match feature_loc.strand { 1 => Strand::Forward, -1 => Strand::Reverse, _ => panic!(), }, phase: make_phase(feature_loc), }) }, None => None, } } fn get_loc_residues(chr: &ChromosomeDetails, loc: &ChromosomeLocation) -> Residues { let start = (loc.start_pos - 1) as usize; let end = loc.end_pos as usize; let residues: Residues = chr.residues[start..end].into(); if loc.strand == Strand::Forward { residues } else { rev_comp(&residues) } } fn make_feature_short(chromosome_map: &ChrNameDetailsMap, feat: &Feature) -> FeatureShort { let maybe_loc = make_location(chromosome_map, feat); if let Some(loc) = maybe_loc { if let Some(chr) = chromosome_map.get(&loc.chromosome_name) { let residues = get_loc_residues(chr, &loc); let feature_type = match &feat.feat_type.name as &str { "five_prime_UTR" => FeatureType::FivePrimeUtr, "pseudogenic_exon" | "exon" => FeatureType::Exon, "three_prime_UTR" => FeatureType::ThreePrimeUtr, "dg_repeat" => FeatureType::DGRepeat, "dh_repeat" => FeatureType::DHRepeat, "gap" => FeatureType::Gap, "gene_group" => FeatureType::GeneGroup, "long_terminal_repeat" => FeatureType::LongTerminalRepeat, "low_complexity_region" => FeatureType::LowComplexityRegion, "LTR_retrotransposon" => FeatureType::LTRRetrotransposon, "mating_type_region" => FeatureType::MatingTypeRegion, "nuclear_mt_pseudogene" => FeatureType::NuclearMtPseudogene, "origin_of_replication" => FeatureType::OriginOfReplication, "polyA_signal_sequence" => FeatureType::PolyASignalSequence, "polyA_site" => FeatureType::PolyASite, "promoter" => FeatureType::Promoter, "region" => FeatureType::Region, "regional_centromere" => FeatureType::RegionalCentromere, "regional_centromere_central_core" => FeatureType::RegionalCentromereCentralCore, "regional_centromere_inner_repeat_region" => FeatureType::RegionalCentromereInnerRepeatRegion, "repeat_region" => FeatureType::RepeatRegion, "TR_box" => FeatureType::TRBox, "SNP" => FeatureType::SNP, _ => panic!("can't handle feature type: {}", feat.feat_type.name), }; FeatureShort { feature_type, uniquename: feat.uniquename.clone(), name: feat.name.clone(), location: loc, residues, } } else { panic!("can't find chromosome {}", loc.chromosome_name); } } else { panic!("{} has no featureloc", feat.uniquename); } } pub fn make_chromosome_short<'a>(chromosome_map: &'a ChrNameDetailsMap, chromosome_name: &'a str) -> ChromosomeShort { if let Some(chr) = chromosome_map.get(chromosome_name) { chr.make_chromosome_short() } else { panic!("can't find chromosome: {}", chromosome_name); } } fn make_reference_short(reference_map: &UniquenameReferenceMap, reference_uniquename: &str) -> Option<ReferenceShort> { if reference_uniquename == "null" { None } else { let reference_details = reference_map.get(reference_uniquename) .unwrap_or_else(|| panic!("missing reference in make_reference_short(): {}", reference_uniquename)); let reference_short = ReferenceShort::from_reference_details(reference_details); Some(reference_short) } } lazy_static! { static ref PROMOTER_RE: Regex = Regex::new(r"^(?P<gene>.*)-promoter$").unwrap(); static ref PREFIX_AND_ID_RE: Regex = Regex::new(r"^(?P<prefix>\S+):(?P<id>\S+)$").unwrap(); static ref TRANSCRIPT_ID_RE: Regex = Regex::new(r"^(?P<gene>.*)\.(?P<suffix>\d+)$").unwrap(); } // Some ancestor terms are useful in the web code. This function uses the Config and returns // the terms that might be useful. fn get_possible_interesting_parents(config: &Config) -> HashSet<InterestingParent> { let mut ret = HashSet::new(); for parent_conf in &config.interesting_parents { ret.insert(parent_conf.clone()); } for ext_conf in &config.extension_display_names { if let Some(ref conf_termid) = ext_conf.if_descendant_of { ret.insert(InterestingParent { termid: conf_termid.clone(), rel_name: RcString::from("is_a"), }); } } let add_to_set = |set: &mut HashSet<_>, termid: RcString| { for rel_name in &DESCENDANT_REL_NAMES { set.insert(InterestingParent { termid: termid.to_owned(), rel_name: RcString::from(*rel_name), }); } }; for slim_config in config.slims.values() { for go_slim_conf in &slim_config.terms { add_to_set(&mut ret, go_slim_conf.termid.clone()); } } for field_name in &config.gene_results.visualisation_field_names { if let Some(column_conf) = config.gene_results.field_config.get(field_name) { for attr_value_conf in &column_conf.attr_values { if let Some(ref termid) = attr_value_conf.termid { add_to_set(&mut ret, termid.clone()); } } } else { panic!["can't find field configuration for {}", field_name]; } } ret.insert(InterestingParent { termid: config.viability_terms.viable.clone(), rel_name: RcString::from("is_a"), }); ret.insert(InterestingParent { termid: config.viability_terms.inviable.clone(), rel_name: RcString::from("is_a"), }); let add_filter_ancestor = |set: &mut HashSet<_>, category: &AncestorFilterCategory, cv_name: &str| { for ancestor in &category.ancestors { for config_rel_name in &DESCENDANT_REL_NAMES { if *config_rel_name == "has_part" && !HAS_PART_CV_NAMES.contains(&cv_name) { continue; } set.insert(InterestingParent { termid: ancestor.clone(), rel_name: RcString::from(*config_rel_name), }); } } }; for (cv_name, conf) in &config.cv_config { for filter in &conf.filters { for category in &filter.term_categories { add_filter_ancestor(&mut ret, category, cv_name); } for category in &filter.extension_categories { add_filter_ancestor(&mut ret, category, cv_name); } } for split_by_parent_config in &conf.split_by_parents { for ancestor in &split_by_parent_config.termids { let ancestor_termid = if let Some(without_prefix) = ancestor.strip_prefix("NOT ") { RcString::from(without_prefix) } else { ancestor.clone() }; ret.insert(InterestingParent { termid: ancestor_termid, rel_name: "is_a".into(), }); } } } ret } const MAX_RECENT_REFS: usize = 20; fn make_recently_added(references_map: &UniquenameReferenceMap, all_ref_uniquenames: &[RcString]) -> Vec<ReferenceShort> { let mut date_sorted_pub_uniquenames = all_ref_uniquenames.to_owned(); { let ref_added_date_cmp = |ref_uniquename1: &ReferenceUniquename, ref_uniquename2: &ReferenceUniquename| { let ref1 = references_map.get(ref_uniquename1).unwrap(); let ref2 = references_map.get(ref_uniquename2).unwrap(); if let Some(ref ref1_added_date) = ref1.canto_added_date { if let Some(ref ref2_added_date) = ref2.canto_added_date { cmp_str_dates(ref1_added_date, ref2_added_date).reverse() } else { Ordering::Less } } else { if ref2.canto_added_date.is_some() { Ordering::Greater } else { Ordering::Equal } } }; date_sorted_pub_uniquenames.sort_by(ref_added_date_cmp); } let recently_added_iter = date_sorted_pub_uniquenames.iter().take(MAX_RECENT_REFS); let mut recently_added: Vec<ReferenceShort> = vec![]; for ref_uniquename in recently_added_iter { let ref_short_maybe = make_reference_short(references_map, ref_uniquename); if let Some(ref_short) = ref_short_maybe { recently_added.push(ref_short); } } recently_added } fn make_canto_curated(references_map: &UniquenameReferenceMap, all_ref_uniquenames: &[RcString]) -> (Vec<ReferenceShort>, Vec<ReferenceShort>, Vec<ReferenceShort>, Vec<ReferenceShort>) { let mut sorted_pub_uniquenames: Vec<ReferenceUniquename> = all_ref_uniquenames.iter() .filter(|ref_uniquename| { let reference = references_map.get(*ref_uniquename).unwrap(); reference.canto_approved_date.is_some() && reference.canto_curator_role.is_some() }) .cloned() .collect(); { let pub_date_cmp = |ref_uniquename1: &ReferenceUniquename, ref_uniquename2: &ReferenceUniquename| { let ref1 = references_map.get(ref_uniquename1).unwrap(); let ref2 = references_map.get(ref_uniquename2).unwrap(); // use first approval date, but fall back to the most recent approval date let ref1_date = ref1.canto_first_approved_date.as_ref() .unwrap_or_else(|| ref1.canto_session_submitted_date.as_ref().unwrap()); let ref2_date = ref2.canto_first_approved_date.as_ref() .unwrap_or_else(|| ref2.canto_session_submitted_date.as_ref().unwrap()); cmp_str_dates(ref2_date, ref1_date) }; sorted_pub_uniquenames.sort_by(pub_date_cmp); } let mut recent_admin_curated = vec![]; let mut recent_community_curated = vec![]; let mut all_community_curated = vec![]; let mut all_admin_curated = vec![]; let ref_uniquename_iter = sorted_pub_uniquenames.iter(); for ref_uniquename in ref_uniquename_iter { let reference = references_map.get(ref_uniquename).unwrap(); let ref_short = make_reference_short(references_map, ref_uniquename).unwrap(); if reference.canto_curator_role == Some("community".into()) { all_community_curated.push(ref_short.clone()); if recent_community_curated.len() <= MAX_RECENT_REFS { recent_community_curated.push(ref_short); } } else { all_admin_curated.push(ref_short.clone()); if recent_admin_curated.len() <= MAX_RECENT_REFS { recent_admin_curated.push(ref_short); } } } (recent_admin_curated, recent_community_curated, all_community_curated, all_admin_curated) } fn add_introns_to_transcript(chromosome: &ChromosomeDetails, transcript_uniquename: &str, parts: &mut Vec<FeatureShort>) { let mut new_parts: Vec<FeatureShort> = vec![]; let mut intron_count = 0; for part in parts.drain(0..) { let mut maybe_new_intron = None; if let Some(prev_part) = new_parts.last() { let intron_start = prev_part.location.end_pos + 1; let intron_end = part.location.start_pos - 1; if intron_start > intron_end { if intron_start > intron_end + 1 { println!("no gap between exons at {}..{} in {}", intron_start, intron_end, transcript_uniquename); } // if intron_start == intron_end-1 then it is a one base overlap that // represents a frameshift in the reference See: // https://github.com/pombase/curation/issues/1453#issuecomment-303214177 } else { intron_count += 1; let new_intron_loc = ChromosomeLocation { chromosome_name: prev_part.location.chromosome_name.clone(), start_pos: intron_start, end_pos: intron_end, strand: prev_part.location.strand, phase: None, }; let intron_uniquename = format!("{}:intron:{}", transcript_uniquename, intron_count); let intron_residues = get_loc_residues(chromosome, &new_intron_loc); let intron_type = if prev_part.feature_type == FeatureType::Exon && part.feature_type == FeatureType::Exon { FeatureType::CdsIntron } else { if prev_part.feature_type == FeatureType::FivePrimeUtr { FeatureType::FivePrimeUtrIntron } else { FeatureType::ThreePrimeUtrIntron } }; maybe_new_intron = Some(FeatureShort { feature_type: intron_type, uniquename: RcString::from(&intron_uniquename), name: None, location: new_intron_loc, residues: intron_residues, }); } } if let Some(new_intron) = maybe_new_intron { new_parts.push(new_intron); } new_parts.push(part); } *parts = new_parts; } fn validate_transcript_parts(transcript_uniquename: &str, parts: &[FeatureShort]) { let mut seen_exon = false; for part in parts { if part.feature_type == FeatureType::Exon { seen_exon = true; break; } } if !seen_exon { panic!("transcript has no exons: {}", transcript_uniquename); } if parts[0].feature_type != FeatureType::Exon { for i in 1..parts.len() { let part = &parts[i]; if part.feature_type == FeatureType::Exon { let last_utr_before_exons = &parts[i-1]; let first_exon = &parts[i]; if last_utr_before_exons.location.end_pos + 1 != first_exon.location.start_pos { println!("{} and exon don't meet up: {} at pos {}", last_utr_before_exons.feature_type, transcript_uniquename, last_utr_before_exons.location.end_pos); } break; } else { if part.location.strand == Strand::Forward { if part.feature_type != FeatureType::FivePrimeUtr { println!("{:?}", parts); panic!("wrong feature type '{}' before exons in {}", part.feature_type, transcript_uniquename); } } else { if part.feature_type != FeatureType::ThreePrimeUtr { println!("{:?}", parts); panic!("wrong feature type '{}' after exons in {}", part.feature_type, transcript_uniquename); } } } } } let last_part = parts.last().unwrap(); if last_part.feature_type != FeatureType::Exon { for i in (0..parts.len()-1).rev() { let part = &parts[i]; if part.feature_type == FeatureType::Exon { let first_utr_after_exons = &parts[i+1]; let last_exon = &parts[i]; if last_exon.location.end_pos + 1 != first_utr_after_exons.location.start_pos { println!("{} and exon don't meet up: {} at pos {}", first_utr_after_exons.feature_type, transcript_uniquename, first_utr_after_exons.location.end_pos); } break; } else { if part.location.strand == Strand::Forward { if part.feature_type != FeatureType::ThreePrimeUtr { panic!("wrong feature type '{}' before exons in {}", part.feature_type, transcript_uniquename); } } else { if part.feature_type != FeatureType::FivePrimeUtr { panic!("wrong feature type '{}' after exons in {}", part.feature_type, transcript_uniquename); } } } } } } impl <'a> WebDataBuild<'a> { pub fn new(raw: &'a Raw, domain_data: &'a HashMap<UniprotIdentifier, UniprotResult>, pfam_data: &'a Option<HashMap<UniprotIdentifier, PfamProteinDetails>>, rnacentral_data: &'a Option<RNAcentralAnnotations>, config: &'a Config) -> WebDataBuild<'a> { WebDataBuild { raw, domain_data, pfam_data, rnacentral_data, config, genes: BTreeMap::new(), genotypes: HashMap::new(), genotype_backgrounds: HashMap::new(), alleles: HashMap::new(), transcripts: HashMap::new(), other_features: HashMap::new(), terms: HashMap::new(), chromosomes: BTreeMap::new(), references: HashMap::new(), all_ont_annotations: HashMap::new(), all_not_ont_annotations: HashMap::new(), recent_references: RecentReferences { admin_curated: vec![], community_curated: vec![], pubmed: vec![], }, all_community_curated: vec![], all_admin_curated: vec![], term_ids_by_name: HashMap::new(), genes_of_transcripts: HashMap::new(), transcripts_of_polypeptides: HashMap::new(), parts_of_transcripts: HashMap::new(), genes_of_alleles: HashMap::new(), loci_of_genotypes: HashMap::new(), parts_of_extensions: HashMap::new(), base_term_of_extensions: HashMap::new(), children_by_termid: HashMap::new(), dbxrefs_of_features: HashMap::new(), possible_interesting_parents: get_possible_interesting_parents(config), term_subsets: HashMap::new(), gene_subsets: HashMap::new(), annotation_details: HashMap::new(), ont_annotations: vec![], gene_expression_measurements: HashMap::new(), } } fn add_ref_to_hash(&self, seen_references: &mut HashMap<RcString, ReferenceShortOptionMap>, identifier: &str, maybe_reference_uniquename: &Option<ReferenceUniquename>) { if let Some(reference_uniquename) = maybe_reference_uniquename { if reference_uniquename != "null" { seen_references .entry(identifier.into()) .or_insert_with(HashMap::new) .insert(reference_uniquename.clone(), None); } } } fn add_gene_to_hash(&self, seen_genes: &mut HashMap<RcString, GeneShortOptionMap>, identifier: &RcString, other_gene_uniquename: &GeneUniquename) { if !self.genes.contains_key(other_gene_uniquename) { panic!("{}", other_gene_uniquename); } seen_genes .entry(identifier.clone()) .or_insert_with(HashMap::new) .insert(other_gene_uniquename.clone(), None); } fn add_genotype_to_hash(&self, seen_genotypes: &mut HashMap<RcString, GenotypeShortMap>, seen_alleles: &mut HashMap<RcString, AlleleShortMap>, seen_genes: &mut HashMap<RcString, GeneShortOptionMap>, identifier: &RcString, genotype_uniquename: &RcString) { let genotype_short = self.make_genotype_short(genotype_uniquename); for locus in &genotype_short.loci { for expressed_allele in &locus.expressed_alleles { self.add_allele_to_hash(seen_alleles, seen_genes, identifier, &expressed_allele.allele_uniquename); } } seen_genotypes .entry(identifier.clone()) .or_insert_with(HashMap::new) .insert(genotype_uniquename.clone(), genotype_short); } fn add_allele_to_hash(&self, seen_alleles: &mut HashMap<RcString, AlleleShortMap>, seen_genes: &mut HashMap<RcString, GeneShortOptionMap>, identifier: &RcString, allele_uniquename: &AlleleUniquename) -> AlleleShort { let allele_short = self.make_allele_short(allele_uniquename); { let allele_gene_uniquename = &allele_short.gene_uniquename; self.add_gene_to_hash(seen_genes, identifier, allele_gene_uniquename); seen_alleles .entry(identifier.clone()) .or_insert_with(HashMap::new) .insert(allele_uniquename.clone(), allele_short.clone()); } allele_short } fn add_transcript_to_hashes(&self, seen_transcripts: &mut HashMap<RcString, TranscriptDetailsOptionMap>, seen_genes: &mut HashMap<RcString, GeneShortOptionMap>, identifier: &RcString, transcript_uniquename: &TranscriptUniquename) { if let Some(transcript_details) = self.transcripts.get(transcript_uniquename) { seen_transcripts .entry(identifier.clone()) .or_insert_with(HashMap::new) .insert(transcript_uniquename.clone(), None); self.add_gene_to_hash(seen_genes, identifier, &transcript_details.gene_uniquename); } else { panic!("internal error, can't find transcript {}", transcript_uniquename); } } fn add_term_to_hash(&self, seen_terms: &mut HashMap<TermId, TermShortOptionMap>, identifier: &RcString, other_termid: &TermId) { seen_terms .entry(identifier.clone()) .or_insert_with(HashMap::new) .insert(other_termid.clone(), None); } fn get_gene<'b>(&'b self, gene_uniquename: &'b str) -> &'b GeneDetails { if let Some(gene_details) = self.genes.get(gene_uniquename) { gene_details } else { panic!("can't find GeneDetails for gene uniquename {}", gene_uniquename) } } fn get_gene_mut<'b>(&'b mut self, gene_uniquename: &'b str) -> &'b mut GeneDetails { if let Some(gene_details) = self.genes.get_mut(gene_uniquename) { gene_details } else { panic!("can't find GeneDetails for gene uniquename {}", gene_uniquename) } } fn make_gene_short(&self, gene_uniquename: &str) -> GeneShort { let gene_details = self.get_gene(gene_uniquename); GeneShort { uniquename: gene_details.uniquename.clone(), name: gene_details.name.clone(), product: gene_details.product.clone(), transcript_count: gene_details.transcripts.len(), } } fn make_gene_summary(&self, gene_uniquename: &str) -> GeneSummary { let gene_details = self.get_gene(gene_uniquename); let synonyms = gene_details.synonyms.iter() .filter(|synonym| synonym.synonym_type == "exact") .map(|synonym| synonym.name.clone()) .collect::<Vec<RcString>>(); let ortholog_ids = gene_details.ortholog_annotations.iter() .map(|ortholog_annotation| { let ortholog_uniquename = ortholog_annotation.ortholog_uniquename.clone(); let ortholog_gene_summary = &self.genes.get(&ortholog_uniquename).unwrap(); let maybe_secondary_identifier = ortholog_gene_summary.secondary_identifier.clone(); let maybe_ortholog_name = ortholog_gene_summary.name.clone(); IdNameAndOrganism { identifier: ortholog_uniquename, secondary_identifier: maybe_secondary_identifier, name: maybe_ortholog_name, taxonid: ortholog_annotation.ortholog_taxonid, } }) .collect::<Vec<IdNameAndOrganism>>(); GeneSummary { uniquename: gene_details.uniquename.clone(), name: gene_details.name.clone(), product: gene_details.product.clone(), uniprot_identifier: gene_details.uniprot_identifier.clone(), secondary_identifier: gene_details.secondary_identifier.clone(), synonyms, orthologs: ortholog_ids, feature_type: gene_details.feature_type.clone(), taxonid: gene_details.taxonid, transcript_count: gene_details.transcripts.len(), location: gene_details.location.clone(), } } fn make_api_gene_summary(&self, gene_uniquename: &str) -> APIGeneSummary { let gene_details = self.get_gene(gene_uniquename); let synonyms = gene_details.synonyms.iter() .filter(|synonym| synonym.synonym_type == "exact") .map(|synonym| synonym.name.clone()) .collect::<Vec<RcString>>(); let exon_count = if let Some(transcript_uniquename) = gene_details.transcripts.get(0) { let transcript = self.transcripts .get(transcript_uniquename) .expect(&format!("internal error, can't find transcript details for {}", transcript_uniquename)); let mut count = 0; for part in &transcript.parts { if part.feature_type == FeatureType::Exon { count += 1; } } count } else { 0 }; let mut ortholog_taxonids = HashSet::new(); for ortholog_annotation in &gene_details.ortholog_annotations { ortholog_taxonids.insert(ortholog_annotation.ortholog_taxonid); } let transcript_details = gene_details.transcripts .iter() .map(|transcript_uniquename| { self.transcripts.get(transcript_uniquename) .expect(&format!("internal error, failed to find transcript: {}", transcript_uniquename)) .clone() }).collect::<Vec<_>>(); APIGeneSummary { uniquename: gene_details.uniquename.clone(), name: gene_details.name.clone(), product: gene_details.product.clone(), uniprot_identifier: gene_details.uniprot_identifier.clone(), exact_synonyms: synonyms, dbxrefs: gene_details.dbxrefs.clone(), location: gene_details.location.clone(), transcripts: transcript_details, tm_domain_count: gene_details.tm_domain_coords.len(), coiled_coil_count: gene_details.coiled_coil_coords.len(), disordered_regions_count: gene_details.disordered_region_coords.len(), low_complexity_regions_count: gene_details.low_complexity_region_coords.len(), exon_count, transcript_count: gene_details.transcripts.len(), ortholog_taxonids, } } fn make_term_short(&self, termid: &str) -> TermShort { if let Some(term_details) = self.terms.get(termid) { TermShort::from_term_details(term_details) } else { panic!("can't find TermDetails for termid: {}", termid) } } fn add_characterisation_status(&mut self, gene_uniquename: &str, cvterm_name: &RcString) { let gene_details = self.genes.get_mut(gene_uniquename).unwrap(); gene_details.characterisation_status = Some(cvterm_name.clone()); } fn add_gene_product(&mut self, gene_uniquename: &str, product: &RcString) { let gene_details = self.get_gene_mut(gene_uniquename); gene_details.product = Some(product.clone()); } fn add_name_description(&mut self, gene_uniquename: &str, name_description: &str) { let gene_details = self.get_gene_mut(gene_uniquename); gene_details.name_descriptions.push(name_description.into()); } fn add_annotation(&mut self, extension_relation_order: &RelationOrder, cvterm: &Cvterm, is_not: bool, annotation_template: OntAnnotationDetail) { let termid = match self.base_term_of_extensions.get(&cvterm.termid()) { Some(base_termid) => base_termid.clone(), None => cvterm.termid(), }; let extension_parts = match self.parts_of_extensions.get(&cvterm.termid()) { Some(parts) => parts.clone(), None => vec![], }; let mut new_extension = extension_parts; let mut existing_extensions = annotation_template.extension.clone(); new_extension.append(&mut existing_extensions); let compare_ext_part_func = |e1: &ExtPart, e2: &ExtPart| { compare_ext_part_with_config(extension_relation_order, e1, e2) }; new_extension.sort_by(compare_ext_part_func); let ont_annotation_detail = OntAnnotationDetail { extension: new_extension, .. annotation_template }; let annotation_map = if is_not { &mut self.all_not_ont_annotations } else { &mut self.all_ont_annotations }; let entry = annotation_map.entry(termid); entry.or_insert_with(Vec::new).push(ont_annotation_detail.id); self.annotation_details.insert(ont_annotation_detail.id, ont_annotation_detail); } fn process_dbxrefs(&mut self) { let mut map = HashMap::new(); for feature_dbxref in &self.raw.feature_dbxrefs { let feature = &feature_dbxref.feature; let dbxref = &feature_dbxref.dbxref; map.entry(feature.uniquename.clone()) .or_insert_with(HashSet::new) .insert(dbxref.identifier()); } self.dbxrefs_of_features = map; } fn process_references(&mut self) { for rc_publication in &self.raw.publications { let reference_uniquename = &rc_publication.uniquename; if reference_uniquename.to_lowercase() == "null" { continue; } let mut pubmed_authors: Option<RcString> = None; let mut pubmed_publication_date: Option<RcString> = None; let mut pubmed_abstract: Option<RcString> = None; let mut pubmed_doi: Option<RcString> = None; let mut canto_annotation_status: Option<RcString> = None; let mut canto_triage_status: Option<RcString> = None; let mut canto_curator_role: Option<RcString> = None; let mut canto_curator_name: Option<RcString> = None; let mut canto_first_approved_date: Option<RcString> = None; let mut canto_approved_date: Option<RcString> = None; let mut canto_added_date: Option<RcString> = None; let mut canto_session_submitted_date: Option<RcString> = None; for prop in rc_publication.publicationprops.borrow().iter() { match &prop.prop_type.name as &str { "pubmed_publication_date" => pubmed_publication_date = Some(prop.value.clone()), "pubmed_authors" => pubmed_authors = Some(prop.value.clone()), "pubmed_abstract" => pubmed_abstract = Some(prop.value.clone()), "pubmed_doi" => pubmed_doi = Some(prop.value.clone()), "canto_annotation_status" => canto_annotation_status = Some(prop.value.clone()), "canto_triage_status" => canto_triage_status = Some(prop.value.clone()), "canto_curator_role" => canto_curator_role = Some(prop.value.clone()), "canto_curator_name" => canto_curator_name = Some(prop.value.clone()), "canto_first_approved_date" => canto_first_approved_date = Some(prop.value.clone()), "canto_approved_date" => canto_approved_date = Some(prop.value.clone()), "canto_added_date" => canto_added_date = Some(prop.value.clone()), "canto_session_submitted_date" => canto_session_submitted_date = Some(prop.value.clone()), _ => () } } if let Some(ref canto_triage_status) = canto_triage_status { let triage_status_to_ignore = &self.config.reference_page_config.triage_status_to_ignore; if triage_status_to_ignore.contains(canto_triage_status) { continue; } } let mut authors_abbrev = None; let mut publication_year = None; if let Some(authors) = pubmed_authors.clone() { if authors.contains(',') { let author_re = Regex::new(r"^(?P<f>[^,]+),.*$").unwrap(); let replaced: String = author_re.replace_all(&authors, "$f et al.").into(); authors_abbrev = Some(RcString::from(&replaced)); } else { authors_abbrev = Some(authors.clone()); } } if let Some(publication_date) = pubmed_publication_date.clone() { let date_re = Regex::new(r"^(.* )?(?P<y>\d\d\d\d)$").unwrap(); publication_year = Some(RcString::from(date_re.replace_all(&publication_date, "$y").as_ref())); } let mut approved_date = canto_first_approved_date.clone(); if approved_date.is_none() { approved_date = canto_session_submitted_date.clone(); } approved_date = if let Some(date) = approved_date { let re = Regex::new(r"^(?P<date>\d\d\d\d-\d\d-\d\d).*").unwrap(); Some(RcString::from(re.replace_all(&date, "$date").as_ref())) } else { None }; if let Some(ref canto_annotation_status) = canto_annotation_status { if canto_annotation_status != "APPROVED" { approved_date = None; } } self.references.insert(reference_uniquename.clone(), ReferenceDetails { uniquename: reference_uniquename.clone(), title: rc_publication.title.clone(), citation: rc_publication.miniref.clone(), pubmed_abstract, pubmed_doi, authors: pubmed_authors.clone(), authors_abbrev, pubmed_publication_date: pubmed_publication_date.clone(), canto_annotation_status, canto_triage_status, canto_curator_role, canto_curator_name, canto_first_approved_date, canto_approved_date, canto_session_submitted_date, canto_added_date, approved_date, publication_year, cv_annotations: HashMap::new(), physical_interactions: vec![], genetic_interactions: vec![], ortholog_annotations: vec![], paralog_annotations: vec![], genes_by_uniquename: HashMap::new(), genotypes_by_uniquename: HashMap::new(), alleles_by_uniquename: HashMap::new(), transcripts_by_uniquename: HashMap::new(), terms_by_termid: HashMap::new(), annotation_details: HashMap::new(), gene_count: 0, }); } } // make maps from genes to transcript, transcripts to polypeptide, // exon, intron, UTRs fn make_feature_rel_maps(&mut self) { for feature_rel in &self.raw.feature_relationships { let subject_type_name = &feature_rel.subject.feat_type.name; let rel_name = &feature_rel.rel_type.name; let object_type_name = &feature_rel.object.feat_type.name; let subject_uniquename = &feature_rel.subject.uniquename; let object_uniquename = &feature_rel.object.uniquename; if TRANSCRIPT_FEATURE_TYPES.contains(&subject_type_name.as_str()) && rel_name == "part_of" && is_gene_type(object_type_name) { self.genes_of_transcripts.insert(subject_uniquename.clone(), object_uniquename.clone()); continue; } if subject_type_name == "polypeptide" && rel_name == "derives_from" && object_type_name == "mRNA" { self.transcripts_of_polypeptides.insert(subject_uniquename.clone(), object_uniquename.clone()); continue; } if subject_type_name == "allele" { if feature_rel.rel_type.name == "instance_of" && (object_type_name == "gene" || object_type_name == "pseudogene") { self.genes_of_alleles.insert(subject_uniquename.clone(), object_uniquename.clone()); continue; } if feature_rel.rel_type.name == "part_of" && object_type_name == "genotype" { let expression = get_feat_rel_expression(&feature_rel.subject, feature_rel); let genotype_locus_identifier = get_feat_rel_prop_value("genotype_locus", feature_rel) .unwrap_or_else(|| { RcString::from(&format!("{}-{}", feature_rel.object.uniquename, feature_rel.feature_relationship_id)) }); let allele_and_expression = ExpressedAllele { allele_uniquename: subject_uniquename.clone(), expression, }; let genotype_entry = self.loci_of_genotypes.entry(object_uniquename.clone()); let locus_map = genotype_entry.or_insert_with(HashMap::new); let genotype_locus = locus_map.entry(String::from(&genotype_locus_identifier)) .or_insert_with(|| GenotypeLocus { expressed_alleles: vec![] }); genotype_locus.expressed_alleles.push(allele_and_expression); continue; } } if TRANSCRIPT_PART_TYPES.contains(&subject_type_name.as_str()) { let entry = self.parts_of_transcripts.entry(object_uniquename.clone()); let part = make_feature_short(&self.chromosomes, &feature_rel.subject); entry.or_insert_with(Vec::new).push(part); } } } fn get_feature_dbxrefs(&self, feature: &Feature) -> HashSet<RcString> { if let Some(dbxrefs) = self.dbxrefs_of_features.get(&feature.uniquename) { dbxrefs.clone() } else { HashSet::new() } } fn store_gene_details(&mut self, feat: &Feature) { let maybe_location = make_location(&self.chromosomes, feat); if let Some(ref location) = maybe_location { if let Some(ref mut chr) = self.chromosomes.get_mut(&location.chromosome_name) { chr.gene_uniquenames.push(feat.uniquename.clone()); } } let organism = make_organism(&feat.organism); let dbxrefs = self.get_feature_dbxrefs(feat); let mut orfeome_identifier = None; for dbxref in &dbxrefs { if let Some(without_prefix) = dbxref.strip_prefix("SPD:") { orfeome_identifier = Some(RcString::from(without_prefix)); } } let mut uniprot_identifier = None; let mut secondary_identifier = None; let mut biogrid_interactor_id: Option<u32> = None; let mut rnacentral_urs_identifier = None; for prop in feat.featureprops.borrow().iter() { match prop.prop_type.name.as_str() { "uniprot_identifier" => uniprot_identifier = prop.value.clone(), "sgd_identifier" => secondary_identifier = prop.value.clone(), "biogrid_interactor_id" => { if let Some(ref chado_biogrid_id) = prop.value { biogrid_interactor_id = match chado_biogrid_id.parse::<u32>() { Ok(val) => Some(val), Err(err) => panic!("error parsing BioGRID interactor ID from Chado: {}", err), } } }, "rnacentral_identifier" => rnacentral_urs_identifier = prop.value.clone(), _ => (), } } let (interpro_matches, tm_domain_coords) = if let Some(ref uniprot_identifier) = uniprot_identifier { if let Some(result) = self.domain_data.get(uniprot_identifier as &str) { let tm_domain_matches = result.tmhmm_matches.iter() .map(|tm_match| (tm_match.start, tm_match.end)) .collect::<Vec<_>>(); (result.interpro_matches.clone(), tm_domain_matches) } else { (vec![], vec![]) } } else { (vec![], vec![]) }; let (disordered_region_coords, low_complexity_region_coords, coiled_coil_coords) = if let Some(pfam_data) = self.pfam_data { if let Some(ref uniprot_identifier) = uniprot_identifier { if let Some(result) = pfam_data.get(uniprot_identifier as &str) { let mut disordered_region_coords = vec![]; let mut low_complexity_region_coords = vec![]; let mut coiled_coil_coords = vec![]; for motif in &result.motifs { match &motif.motif_type as &str { "disorder" => disordered_region_coords.push((motif.start, motif.end)), "low_complexity" => low_complexity_region_coords.push((motif.start, motif.end)), "coiled_coil" => coiled_coil_coords.push((motif.start, motif.end)), _ => (), } } (disordered_region_coords, low_complexity_region_coords, coiled_coil_coords) } else { (vec![], vec![], vec![]) } } else { (vec![], vec![], vec![]) } } else { (vec![], vec![], vec![]) }; let rfam_annotations = if let Some(rnacentral_data) = self.rnacentral_data { if let Some(ref rnacentral_urs_identifier) = rnacentral_urs_identifier { if let Some(result) = rnacentral_data.get(rnacentral_urs_identifier.as_str()) { result.clone() } else { vec![] } } else { vec![] } } else { vec![] }; let gene_feature = GeneDetails { uniquename: feat.uniquename.clone(), name: feat.name.clone(), taxonid: organism.taxonid, product: None, deletion_viability: DeletionViability::Unknown, uniprot_identifier, secondary_identifier, biogrid_interactor_id, rnacentral_urs_identifier, interpro_matches, tm_domain_coords, disordered_region_coords, low_complexity_region_coords, coiled_coil_coords, rfam_annotations, orfeome_identifier, name_descriptions: vec![], synonyms: vec![], dbxrefs, feature_type: feat.feat_type.name.clone(), feature_so_termid: feat.feat_type.termid(), transcript_so_termid: feat.feat_type.termid(), characterisation_status: None, taxonomic_distribution: None, location: maybe_location, gene_neighbourhood: vec![], cv_annotations: HashMap::new(), physical_interactions: vec![], genetic_interactions: vec![], ortholog_annotations: vec![], paralog_annotations: vec![], target_of_annotations: vec![], transcripts: vec![], transcripts_by_uniquename: HashMap::new(), genes_by_uniquename: HashMap::new(), genotypes_by_uniquename: HashMap::new(), alleles_by_uniquename: HashMap::new(), references_by_uniquename: HashMap::new(), terms_by_termid: HashMap::new(), annotation_details: HashMap::new(), feature_publications: HashSet::new(), subset_termids: HashSet::new(), }; self.genes.insert(feat.uniquename.clone(), gene_feature); } fn get_transcript_parts(&mut self, transcript_uniquename: &str) -> Vec<FeatureShort> { if let Some(mut parts) = self.parts_of_transcripts.remove(transcript_uniquename) { if parts.is_empty() { panic!("transcript has no parts: {}", transcript_uniquename); } let part_cmp = |a: &FeatureShort, b: &FeatureShort| { a.location.start_pos.cmp(&b.location.start_pos) }; parts.sort_by(&part_cmp); validate_transcript_parts(transcript_uniquename, &parts); let chr_name = &parts[0].location.chromosome_name.clone(); if let Some(chromosome) = self.chromosomes.get(chr_name) { add_introns_to_transcript(chromosome, transcript_uniquename, &mut parts); } else { panic!("can't find chromosome details for: {}", chr_name); } if parts[0].location.strand == Strand::Reverse { parts.reverse(); } parts } else { vec![] } } fn store_transcript_details(&mut self, feat: &Feature) { let transcript_uniquename = feat.uniquename.clone(); let parts = self.get_transcript_parts(&transcript_uniquename); if parts.is_empty() { return; } let mut transcript_start = usize::MAX; let mut transcript_end = 0; for part in &parts { if part.location.start_pos < transcript_start { transcript_start = part.location.start_pos; } if part.location.end_pos > transcript_end { transcript_end = part.location.end_pos; } } // use the first part as a template to get the chromosome details let transcript_location = ChromosomeLocation { start_pos: transcript_start, end_pos: transcript_end, phase: None, .. parts[0].location.clone() }; let maybe_cds_location = if feat.feat_type.name == "mRNA" { let mut cds_start = usize::MAX; let mut cds_end = 0; for part in &parts { if part.feature_type == FeatureType::Exon { if part.location.start_pos < cds_start { cds_start = part.location.start_pos; } if part.location.end_pos > cds_end { cds_end = part.location.end_pos; } } } if cds_end == 0 { None } else { if let Some(mrna_location) = feat.featurelocs.borrow().get(0) { let first_part_loc = &parts[0].location; Some(ChromosomeLocation { chromosome_name: first_part_loc.chromosome_name.clone(), start_pos: cds_start, end_pos: cds_end, strand: first_part_loc.strand, phase: make_phase(mrna_location), }) } else { None } } } else { None }; if let Some(gene_uniquename) = self.genes_of_transcripts.get(&transcript_uniquename) { let gene_details = self.genes.get_mut(gene_uniquename).unwrap(); let transcript_type = feat.feat_type.name.clone(); if gene_details.feature_type == "gene" { let feature_type = format!("{} {}", transcript_type, gene_details.feature_type); gene_details.feature_type = RcString::from(&feature_type); } let transcript = TranscriptDetails { uniquename: transcript_uniquename.clone(), location: transcript_location, transcript_type, parts, protein: None, cds_location: maybe_cds_location, gene_uniquename: gene_uniquename.to_owned(), }; self.transcripts.insert(transcript_uniquename.clone(), transcript.clone()); gene_details.transcripts.push(transcript_uniquename); gene_details.transcript_so_termid = feat.feat_type.termid(); } else { panic!("can't find gene for transcript: {}", transcript_uniquename); } } fn store_protein_details(&mut self, feat: &Feature) { if let Some(residues) = feat.residues.clone() { let protein_uniquename = feat.uniquename.clone(); let mut molecular_weight = None; let mut average_residue_weight = None; let mut charge_at_ph7 = None; let mut isoelectric_point = None; let mut codon_adaptation_index = None; let parse_prop_as_f32 = |p: &Option<RcString>| { if let Some(ref prop_value) = p { let maybe_value = prop_value.parse(); if let Ok(parsed_prop) = maybe_value { Some(parsed_prop) } else { println!("{}: couldn't parse {} as f32", feat.uniquename, &prop_value); None } } else { None } }; for prop in feat.featureprops.borrow().iter() { if prop.prop_type.name == "molecular_weight" { if let Some(value) = parse_prop_as_f32(&prop.value) { molecular_weight = Some(value / 1000.0); } } if prop.prop_type.name == "average_residue_weight" { if let Some(value) = parse_prop_as_f32(&prop.value) { average_residue_weight = Some(value / 1000.0); } } if prop.prop_type.name == "charge_at_ph7" { charge_at_ph7 = parse_prop_as_f32(&prop.value); } if prop.prop_type.name == "isoelectric_point" { isoelectric_point = parse_prop_as_f32(&prop.value); } if prop.prop_type.name == "codon_adaptation_index" { codon_adaptation_index = parse_prop_as_f32(&prop.value); } } if molecular_weight.is_none() { panic!("{} has no molecular_weight", feat.uniquename) } let protein = ProteinDetails { uniquename: feat.uniquename.clone(), sequence: RcString::from(&residues), product: None, molecular_weight: molecular_weight.unwrap(), average_residue_weight: average_residue_weight.unwrap(), charge_at_ph7: charge_at_ph7.unwrap(), isoelectric_point: isoelectric_point.unwrap(), codon_adaptation_index: codon_adaptation_index.unwrap(), }; if let Some(transcript_uniquename) = self.transcripts_of_polypeptides.get(&protein_uniquename) { self.transcripts.get_mut(transcript_uniquename) .expect(&format!("internal error, failed to find transcript: {}", transcript_uniquename)) .protein = Some(protein); } else { panic!("can't find transcript of polypeptide: {}", protein_uniquename) } } else { panic!("no residues for protein: {}", feat.uniquename); } } fn store_chromosome_details(&mut self, feat: &Feature) { let mut ena_identifier = None; for prop in feat.featureprops.borrow().iter() { if prop.prop_type.name == "ena_id" { ena_identifier = prop.value.clone() } } if feat.residues.is_none() { panic!("{:?}", feat.uniquename); } let org = make_organism(&feat.organism); let residues = feat.residues.clone().unwrap(); if !residues.is_ascii() { panic!("sequence for chromosome {} contains non-ascii characters", feat.uniquename); } let chr = ChromosomeDetails { name: feat.uniquename.clone(), residues: RcString::from(&residues), ena_identifier: RcString::from(&ena_identifier.unwrap()), gene_uniquenames: vec![], taxonid: org.taxonid, gene_count: 0, // we'll update the counts once the genes are processed coding_gene_count: 0, }; self.chromosomes.insert(feat.uniquename.clone(), chr); } fn store_genotype_details(&mut self, feat: &Feature) { let mut loci: Vec<_> = self.loci_of_genotypes[&feat.uniquename] .values().cloned().collect(); let genotype_display_uniquename = make_genotype_display_name(&loci, &self.alleles); let mut ploidiness = Ploidiness::Haploid; for locus in &loci { if locus.expressed_alleles.len() > 1 { ploidiness = Ploidiness::Diploid; break; } } { let loci_cmp = |locus1: &GenotypeLocus, locus2: &GenotypeLocus| { let locus1_display_name = &self.alleles[&locus1.expressed_alleles[0].allele_uniquename] .encoded_name_and_type; let locus2_display_name = &self.alleles[&locus2.expressed_alleles[0].allele_uniquename] .encoded_name_and_type; locus1_display_name.cmp(locus2_display_name) }; loci.sort_by(&loci_cmp); } for prop in feat.featureprops.borrow().iter() { if prop.prop_type.name == "genotype_background" { if let Some(ref background) = prop.value { self.genotype_backgrounds.insert(feat.uniquename.clone(), background.clone()); } } } let rc_display_name = RcString::from(&genotype_display_uniquename); self.genotypes.insert(rc_display_name.clone(), GenotypeDetails { display_uniquename: rc_display_name, name: feat.name.as_ref().map(|s| RcString::from(s)), loci, ploidiness, cv_annotations: HashMap::new(), genes_by_uniquename: HashMap::new(), alleles_by_uniquename: HashMap::new(), references_by_uniquename: HashMap::new(), transcripts_by_uniquename: HashMap::new(), terms_by_termid: HashMap::new(), annotation_details: HashMap::new(), }); } fn store_allele_details(&mut self, feat: &Feature) { let mut allele_type = None; let mut description = None; for prop in feat.featureprops.borrow().iter() { match &prop.prop_type.name as &str { "allele_type" => allele_type = prop.value.clone(), "description" => description = prop.value.clone(), _ => () } } if let Some(allele_type) = allele_type { let gene_uniquename = &self.genes_of_alleles[&feat.uniquename]; let allele_details = AlleleShort::new(&feat.uniquename, &feat.name, &allele_type, &description, gene_uniquename); self.alleles.insert(feat.uniquename.clone(), allele_details); } else { panic!("no allele_type cvtermprop for {}", &feat.uniquename); } } fn process_chromosome_features(&mut self) { // we need to process all chromosomes before other featuers for feat in &self.raw.features { if feat.feat_type.name == "chromosome" { self.store_chromosome_details(feat); } } } fn process_features(&mut self) { // we need to process all genes before transcripts for feat in &self.raw.features { if feat.feat_type.name == "gene" || feat.feat_type.name == "pseudogene" { self.store_gene_details(feat); } } for feat in &self.raw.features { if TRANSCRIPT_FEATURE_TYPES.contains(&feat.feat_type.name.as_str()) { self.store_transcript_details(feat) } } for feat in &self.raw.features { if feat.feat_type.name == "polypeptide"{ self.store_protein_details(feat); } } for feat in &self.raw.features { if !TRANSCRIPT_FEATURE_TYPES.contains(&feat.feat_type.name.as_str()) && !TRANSCRIPT_PART_TYPES.contains(&feat.feat_type.name.as_str()) && !HANDLED_FEATURE_TYPES.contains(&feat.feat_type.name.as_str()) { // for now, ignore features without locations if feat.featurelocs.borrow().len() > 0 { let feature_short = make_feature_short(&self.chromosomes, feat); self.other_features.insert(feat.uniquename.clone(), feature_short); } } } } fn add_interesting_parents(&mut self) { let mut interesting_parents_by_termid: HashMap<RcString, HashSet<InterestingParent>> = HashMap::new(); for cvtermpath in &self.raw.cvtermpaths { let subject_term = &cvtermpath.subject; let subject_termid = subject_term.termid(); let object_term = &cvtermpath.object; let object_termid = object_term.termid(); let rel_termid = match cvtermpath.rel_type { Some(ref rel_type) => { rel_type.termid() }, None => panic!("no relation type for {} <-> {}\n", &subject_term.name, &object_term.name) }; let rel_term_name = self.make_term_short(&rel_termid).name; if self.is_interesting_parent(&object_termid, &rel_term_name) { interesting_parents_by_termid .entry(subject_termid.clone()) .or_insert_with(HashSet::new) .insert(InterestingParent { termid: object_termid, rel_name: rel_term_name, }); }; } for (termid, interesting_parents) in interesting_parents_by_termid { let term_details = self.terms.get_mut(&termid).unwrap(); let interesting_parent_ids = interesting_parents.iter() .map(|p| p.termid.clone()) .collect::<HashSet<_>>(); term_details.interesting_parent_ids = interesting_parent_ids; term_details.interesting_parent_details = interesting_parents; } } fn process_allele_features(&mut self) { for feat in &self.raw.features { if feat.feat_type.name == "allele" { self.store_allele_details(feat); } } } fn process_genotype_features(&mut self) { for feat in &self.raw.features { if feat.feat_type.name == "genotype" { self.store_genotype_details(feat); } } } fn add_gene_neighbourhoods(&mut self) { struct GeneAndLoc { gene_uniquename: RcString, loc: ChromosomeLocation, } let mut genes_and_locs: Vec<GeneAndLoc> = vec![]; for gene_details in self.genes.values() { if let Some(ref location) = gene_details.location { genes_and_locs.push(GeneAndLoc { gene_uniquename: gene_details.uniquename.clone(), loc: location.clone(), }); } } let cmp = |a: &GeneAndLoc, b: &GeneAndLoc| { let order = a.loc.chromosome_name.cmp(&b.loc.chromosome_name); if order == Ordering::Equal { a.loc.start_pos.cmp(&b.loc.start_pos) } else { order } }; genes_and_locs.sort_by(cmp); for (i, this_gene_and_loc) in genes_and_locs.iter().enumerate() { let mut nearby_genes: Vec<GeneShort> = vec![]; if i > 0 { let start_index = if i > GENE_NEIGHBOURHOOD_DISTANCE { i - GENE_NEIGHBOURHOOD_DISTANCE } else { 0 }; for back_index in (start_index..i).rev() { let back_gene_and_loc = &genes_and_locs[back_index]; if back_gene_and_loc.loc.chromosome_name != this_gene_and_loc.loc.chromosome_name { break; } let back_gene_short = self.make_gene_short(&back_gene_and_loc.gene_uniquename); nearby_genes.insert(0, back_gene_short); } } let gene_short = self.make_gene_short(&this_gene_and_loc.gene_uniquename); nearby_genes.push(gene_short); if i < genes_and_locs.len() - 1 { let end_index = if i + GENE_NEIGHBOURHOOD_DISTANCE >= genes_and_locs.len() { genes_and_locs.len() } else { i + GENE_NEIGHBOURHOOD_DISTANCE + 1 }; for forward_index in i+1..end_index { let forward_gene_and_loc = &genes_and_locs[forward_index]; if forward_gene_and_loc.loc.chromosome_name != this_gene_and_loc.loc.chromosome_name { break; } let forward_gene_short = self.make_gene_short(&forward_gene_and_loc.gene_uniquename); nearby_genes.push(forward_gene_short); } } let this_gene_details = self.genes.get_mut(&this_gene_and_loc.gene_uniquename).unwrap(); this_gene_details.gene_neighbourhood.append(&mut nearby_genes); } } // add interaction, ortholog and paralog annotations fn process_annotation_feature_rels(&mut self) { for feature_rel in &self.raw.feature_relationships { let rel_name = &feature_rel.rel_type.name; let subject_uniquename = &feature_rel.subject.uniquename; let object_uniquename = &feature_rel.object.uniquename; for rel_config in &FEATURE_REL_CONFIGS { if rel_name == rel_config.rel_type_name && is_gene_type(&feature_rel.subject.feat_type.name) && is_gene_type(&feature_rel.object.feat_type.name) { let mut evidence: Option<Evidence> = None; let mut throughput: Option<Throughput> = None; let mut is_inferred_interaction: bool = false; let mut interaction_note: Option<RcString> = None; let borrowed_publications = feature_rel.publications.borrow(); let maybe_publication = borrowed_publications.get(0); let maybe_reference_uniquename = match maybe_publication { Some(publication) => if publication.uniquename == "null" { None } else { Some(publication.uniquename.clone()) }, None => None, }; for prop in feature_rel.feature_relationshipprops.borrow().iter() { if prop.prop_type.name == "evidence" { if let Some(ref evidence_long) = prop.value { for (evidence_code, ev_details) in &self.config.evidence_types { if &ev_details.long == evidence_long { evidence = Some(evidence_code.clone()); } } if evidence.is_none() { evidence = Some(evidence_long.clone()); } } } if prop.prop_type.name == "is_inferred" { if let Some(is_inferred_value) = prop.value.clone() { if is_inferred_value == "yes" { is_inferred_interaction = true; } } } if prop.prop_type.name == "annotation_throughput_type" { if let Some(throughput_type) = prop.value.clone() { throughput = Some(match throughput_type.as_ref() { "low throughput" => Throughput::LowThroughput, "high throughput" => Throughput::HighThroughput, "non-experimental" => Throughput::NonExperimental, _ => { panic!("unknown throughput type: {}", throughput_type); } }); } } if prop.prop_type.name == "interaction_note" { if let Some(interaction_note_value) = prop.value.clone() { interaction_note = Some(interaction_note_value); } } } let evidence_clone = evidence.clone(); let gene_uniquename = subject_uniquename; let gene_organism_taxonid = { self.genes[subject_uniquename].taxonid }; let other_gene_uniquename = object_uniquename; let other_gene_organism_taxonid = { self.genes[object_uniquename].taxonid }; match rel_config.annotation_type { FeatureRelAnnotationType::Interaction => if !is_inferred_interaction { let interaction_annotation = InteractionAnnotation { gene_uniquename: gene_uniquename.clone(), interactor_uniquename: other_gene_uniquename.clone(), evidence, reference_uniquename: maybe_reference_uniquename.clone(), throughput, interaction_note, }; { let gene_details = self.genes.get_mut(subject_uniquename).unwrap(); if rel_name == "interacts_physically" { gene_details.physical_interactions.push(interaction_annotation.clone()); } else { if rel_name == "interacts_genetically" { gene_details.genetic_interactions.push(interaction_annotation.clone()); } else { panic!("unknown interaction type: {}", rel_name); } }; } if gene_uniquename != other_gene_uniquename { let other_gene_details = self.genes.get_mut(object_uniquename).unwrap(); if rel_name == "interacts_physically" { other_gene_details.physical_interactions.push(interaction_annotation.clone()); } else { if rel_name == "interacts_genetically" { other_gene_details.genetic_interactions.push(interaction_annotation.clone()); } else { panic!("unknown interaction type: {}", rel_name); } }; } if let Some(ref_details) = if let Some(ref reference_uniquename) = maybe_reference_uniquename { self.references.get_mut(reference_uniquename) } else { None } { if rel_name == "interacts_physically" { ref_details.physical_interactions.push(interaction_annotation.clone()); } else { if rel_name == "interacts_genetically" { ref_details.genetic_interactions.push(interaction_annotation.clone()); } else { panic!("unknown interaction type: {}", rel_name); } }; } }, FeatureRelAnnotationType::Ortholog => { let ortholog_annotation = OrthologAnnotation { gene_uniquename: gene_uniquename.clone(), ortholog_uniquename: other_gene_uniquename.clone(), ortholog_taxonid: other_gene_organism_taxonid, evidence, reference_uniquename: maybe_reference_uniquename.clone(), }; let gene_details = self.genes.get_mut(subject_uniquename).unwrap(); gene_details.ortholog_annotations.push(ortholog_annotation.clone()); if let Some(ref_details) = if let Some(ref reference_uniquename) = maybe_reference_uniquename { self.references.get_mut(reference_uniquename) } else { None } { ref_details.ortholog_annotations.push(ortholog_annotation); } }, FeatureRelAnnotationType::Paralog => { let paralog_annotation = ParalogAnnotation { gene_uniquename: gene_uniquename.clone(), paralog_uniquename: other_gene_uniquename.clone(), evidence, reference_uniquename: maybe_reference_uniquename.clone(), }; let gene_details = self.genes.get_mut(subject_uniquename).unwrap(); gene_details.paralog_annotations.push(paralog_annotation.clone()); if let Some(ref_details) = if let Some(ref reference_uniquename) = maybe_reference_uniquename { self.references.get_mut(reference_uniquename) } else { None } { if self.config.load_organism_taxonid.is_some() && self.config.load_organism_taxonid.unwrap() == gene_details.taxonid || gene_organism_taxonid < other_gene_organism_taxonid { ref_details.paralog_annotations.push(paralog_annotation); } } } } // for orthologs and paralogs, store the reverse annotation too let other_gene_details = self.genes.get_mut(object_uniquename).unwrap(); match rel_config.annotation_type { FeatureRelAnnotationType::Interaction => {}, FeatureRelAnnotationType::Ortholog => { let ortholog_annotation = OrthologAnnotation { gene_uniquename: other_gene_uniquename.clone(), ortholog_uniquename: gene_uniquename.clone(), ortholog_taxonid: gene_organism_taxonid, evidence: evidence_clone, reference_uniquename: maybe_reference_uniquename.clone(), }; other_gene_details.ortholog_annotations.push(ortholog_annotation); }, FeatureRelAnnotationType::Paralog => { let paralog_annotation = ParalogAnnotation { gene_uniquename: other_gene_uniquename.clone(), paralog_uniquename: gene_uniquename.clone(), evidence: evidence_clone, reference_uniquename: maybe_reference_uniquename.clone(), }; other_gene_details.paralog_annotations.push(paralog_annotation.clone()); if let Some(ref_details) = if let Some(ref reference_uniquename) = maybe_reference_uniquename { self.references.get_mut(reference_uniquename) } else { None } { if self.config.load_organism_taxonid.is_some() && self.config.load_organism_taxonid.unwrap() == other_gene_details.taxonid || gene_organism_taxonid > other_gene_organism_taxonid { ref_details.paralog_annotations.push(paralog_annotation); } } }, } } } } for ref_details in self.references.values_mut() { ref_details.physical_interactions.sort(); ref_details.genetic_interactions.sort(); ref_details.ortholog_annotations.sort(); ref_details.paralog_annotations.sort(); } for gene_details in self.genes.values_mut() { gene_details.physical_interactions.sort(); gene_details.genetic_interactions.sort(); gene_details.ortholog_annotations.sort(); gene_details.paralog_annotations.sort(); } } // find the extension_display_names config for the given termid and relation type name fn matching_ext_config(&self, annotation_termid: &str, rel_type_name: &str) -> Option<ExtensionDisplayNames> { let ext_configs = &self.config.extension_display_names; if let Some(annotation_term_details) = self.terms.get(annotation_termid) { for ext_config in ext_configs { if ext_config.rel_name == rel_type_name { if let Some(ref if_descendant_of) = ext_config.if_descendant_of { if annotation_termid == if_descendant_of.as_str() || annotation_term_details.interesting_parent_ids.contains(if_descendant_of) { return Some((*ext_config).clone()); } } else { return Some((*ext_config).clone()); } } } } None } // create and returns any TargetOfAnnotations implied by the extension fn make_target_of_for_ext(&self, cv_name: &str, genes: &[RcString], maybe_genotype_uniquename: &Option<RcString>, reference_uniquename: &Option<RcString>, annotation_termid: &str, extension: &[ExtPart]) -> Vec<(GeneUniquename, TargetOfAnnotation)> { if genes.len() != 1 { panic!("expected an annotation with one gene for {}, got: {:?}", annotation_termid, genes); } let gene = &genes[0]; let mut ret_vec = vec![]; for ext_part in extension { let maybe_ext_config = self.matching_ext_config(annotation_termid, &ext_part.rel_type_name); if let ExtRange::Gene(ref target_gene_uniquename) = ext_part.ext_range { if let Some(ext_config) = maybe_ext_config { if let Some(reciprocal_display_name) = ext_config.reciprocal_display { let (annotation_gene_uniquename, annotation_genotype_uniquename) = if maybe_genotype_uniquename.is_some() { (gene.clone(), maybe_genotype_uniquename.clone()) } else { (gene.clone(), None) }; ret_vec.push(((*target_gene_uniquename).clone(), TargetOfAnnotation { show_in_summary: true, // set this later ontology_name: cv_name.into(), ext_rel_display_name: reciprocal_display_name, gene: annotation_gene_uniquename, genotype_uniquename: annotation_genotype_uniquename, reference_uniquename: reference_uniquename.clone(), })); } } } } ret_vec } // return an ordered vector of annotations, setting the show_in_summary flag // see: https://github.com/pombase/website/issues/299 fn process_target_of_annotations(&self, gene_details: &GeneDetails, annotations: &mut HashSet<TargetOfAnnotation>) -> Vec<TargetOfAnnotation> { let mut processed_annotations = annotations.drain().collect::<Vec<_>>(); let target_of_config = &self.config.target_of_config; let priority_config = &target_of_config.relation_priority; for annotation in &processed_annotations { if priority_config.get(annotation.ext_rel_display_name.as_str()).is_none() { eprintln!(r#"No priority configured for "{}" (from {})"#, annotation.ext_rel_display_name, gene_details.uniquename); } } let cmp_fn = |a: &TargetOfAnnotation, b: &TargetOfAnnotation| { let a_rel_name = a.ext_rel_display_name.as_str(); let a_pri = priority_config.get(a_rel_name).unwrap_or(&0); let b_rel_name = b.ext_rel_display_name.as_str(); let b_pri = priority_config.get(b_rel_name).unwrap_or(&0); let pri_order = b_pri.cmp(a_pri); if pri_order == Ordering::Equal { let rel_name_order = a_rel_name.cmp(b_rel_name); if rel_name_order == Ordering::Equal { let a_gene_details = self.genes.get(&a.gene).unwrap(); let b_gene_details = self.genes.get(&b.gene).unwrap(); if let (Some(a_name), Some(b_name)) = (&a_gene_details.name, &b_gene_details.name) { a_name.cmp(b_name) } else { a_gene_details.uniquename.cmp(&b_gene_details.uniquename) } } else { rel_name_order } } else { pri_order } }; processed_annotations.sort_by(cmp_fn); let mut seen_gene_rels = HashMap::new(); for annotation in processed_annotations.iter_mut() { let rel_priority = priority_config.get(annotation.ext_rel_display_name.as_str()) .unwrap_or(&0); let existing_rel = seen_gene_rels.get(&annotation.gene); if let Some(existing_rel) = existing_rel { if *existing_rel > rel_priority { annotation.show_in_summary = false; continue; } } seen_gene_rels.insert(annotation.gene.clone(), rel_priority); } processed_annotations } fn add_target_of_annotations(&mut self) { let mut target_of_annotations: HashMap<GeneUniquename, HashSet<TargetOfAnnotation>> = HashMap::new(); for term_details in self.terms.values() { for term_annotations in term_details.cv_annotations.values() { for term_annotation in term_annotations { 'ANNOTATION: for annotation_id in &term_annotation.annotations { let annotation = self.annotation_details .get(annotation_id).expect("can't find OntAnnotationDetail"); if let Some(ref genotype_uniquename) = annotation.genotype { let genotype = &self.genotypes[genotype_uniquename]; if genotype.loci.len() > 1 || genotype.loci[0].expressed_alleles.len() > 1 { break 'ANNOTATION; } } let new_annotations = self.make_target_of_for_ext(&term_details.cv_name, &annotation.genes, &annotation.genotype, &annotation.reference, &term_details.termid, &annotation.extension); for (target_gene_uniquename, new_annotation) in new_annotations { if self.genes.get(&target_gene_uniquename).is_some() { target_of_annotations .entry(target_gene_uniquename.clone()) .or_insert_with(HashSet::new) .insert(new_annotation); } else { eprintln!("can't find gene {} in extension for {}", target_gene_uniquename, term_details.termid); for annotation_gene in &annotation.genes { eprintln!(" in annotation of {}", annotation_gene); } } } } } } } for (gene_uniquename, mut target_of_annotations) in target_of_annotations { let gene_details = self.genes.get(&gene_uniquename).unwrap(); let processed_target_of_annotations = self.process_target_of_annotations(gene_details, &mut target_of_annotations); let gene_details = self.genes.get_mut(&gene_uniquename).unwrap(); gene_details.target_of_annotations = processed_target_of_annotations; } } fn set_deletion_viability(&mut self) { let some_null = Some(RcString::from("Null")); let mut gene_statuses = HashMap::new(); let condition_string = |condition_ids: HashSet<RcString>| { let mut ids_vec: Vec<RcString> = condition_ids.iter().cloned().collect(); ids_vec.sort(); RcString::from(&ids_vec.join(" ")) }; let viable_termid = &self.config.viability_terms.viable; let inviable_termid = &self.config.viability_terms.inviable; for (gene_uniquename, gene_details) in &mut self.genes { let mut new_status = DeletionViability::Unknown; if let Some(single_locus_term_annotations) = gene_details.cv_annotations.get("single_locus_phenotype") { let mut viable_conditions: HashMap<RcString, TermId> = HashMap::new(); let mut inviable_conditions: HashMap<RcString, TermId> = HashMap::new(); for term_annotation in single_locus_term_annotations { 'ANNOTATION: for annotation_id in &term_annotation.annotations { let annotation = self.annotation_details .get(annotation_id).expect("can't find OntAnnotationDetail"); let genotype_uniquename = annotation.genotype.as_ref().unwrap(); let genotype = &self.genotypes[genotype_uniquename]; if genotype.loci[0].expressed_alleles.len() > 1 { // diploid locus continue 'ANNOTATION; } let expressed_allele = &genotype.loci[0].expressed_alleles[0]; let allele = &self.alleles[&expressed_allele.allele_uniquename]; if allele.allele_type != "deletion" && expressed_allele.expression != some_null { continue 'ANNOTATION; } let term = &self.terms[&term_annotation.term]; let interesting_parent_ids = &term.interesting_parent_ids; let conditions_as_string = condition_string(annotation.conditions.clone()); if interesting_parent_ids.contains(viable_termid) || *viable_termid == term_annotation.term { viable_conditions.insert(conditions_as_string, term_annotation.term.clone()); } else { if interesting_parent_ids.contains(inviable_termid) || *inviable_termid == term_annotation.term { inviable_conditions.insert(conditions_as_string, term_annotation.term.clone()); } } } } if viable_conditions.is_empty() { if !inviable_conditions.is_empty() { new_status = DeletionViability::Inviable; } } else { if inviable_conditions.is_empty() { new_status = DeletionViability::Viable; } else { new_status = DeletionViability::DependsOnConditions; let viable_conditions_set: HashSet<RcString> = viable_conditions.keys().cloned().collect(); let inviable_conditions_set: HashSet<RcString> = inviable_conditions.keys().cloned().collect(); let intersecting_conditions = viable_conditions_set.intersection(&inviable_conditions_set); if intersecting_conditions.clone().count() > 0 { println!("{} is viable and inviable with", gene_uniquename); for cond in intersecting_conditions { if cond.is_empty() { println!(" no conditions"); } else { println!(" conditions: {}", cond); } println!(" viable term: {}", viable_conditions[cond]); println!(" inviable term: {}", inviable_conditions[cond]); } } } } } gene_statuses.insert(gene_uniquename.clone(), new_status); } for (gene_uniquename, status) in &gene_statuses { if let Some(ref mut gene_details) = self.genes.get_mut(gene_uniquename) { gene_details.deletion_viability = status.clone(); } } } fn set_term_details_subsets(&mut self) { let mut subsets_by_termid = HashMap::new(); for (slim_name, slim_config) in self.config.slims.iter() { for term_and_name in &slim_config.terms { subsets_by_termid .entry(term_and_name.termid.clone()) .or_insert_with(HashSet::new) .insert(slim_name.clone()); } } for term_details in self.terms.values_mut() { if let Some(subsets) = subsets_by_termid.remove(&term_details.termid) { term_details.in_subsets = subsets; } } } // On each GeneDetails, add a set of the term IDs of subsets for // this gene. Any useful subset that contains any term for any // annotation in the gene is included. "useful" means that the // front end might need it, eg. slim term IDs fn set_gene_details_subset_termids(&mut self) { let is_subset_member = |subset_termid: &str, test_termid: &str| { if subset_termid == test_termid { return true; } if let Some(children) = self.children_by_termid.get(subset_termid) { children.contains(test_termid) } else { false } }; let mut subsets_by_gene = HashMap::new(); for slim_config in self.config.slims.values() { for term_and_name in &slim_config.terms { for gene_details in self.genes.values() { for term_annotations in gene_details.cv_annotations.values() { for term_annotation in term_annotations { let gene_termid = &term_annotation.term; if is_subset_member(&term_and_name.termid, gene_termid) { subsets_by_gene .entry(gene_details.uniquename.clone()) .or_insert_with(HashSet::new) .insert(term_and_name.termid.clone()); } } } } } } for gene_details in self.genes.values_mut() { if let Some(subset_termids) = subsets_by_gene.remove(&gene_details.uniquename) { gene_details.subset_termids = subset_termids; } } } fn set_taxonomic_distributions(&mut self) { let mut term_name_map = HashMap::new(); let in_archaea = "conserved in archaea"; let in_bacteria = "conserved in bacteria"; let in_fungi_only = "conserved in fungi only"; let in_metazoa = "conserved in metazoa"; let pombe_specific = "Schizosaccharomyces pombe specific"; let schizo_specific = "Schizosaccharomyces specific"; let names = vec![in_archaea, in_bacteria, in_fungi_only, in_metazoa, pombe_specific, schizo_specific]; for name in names { if let Some(termid) = self.term_ids_by_name.get(name) { term_name_map.insert(termid.clone(), name.to_owned()); } else { eprintln!("configuration error: can't find {} in term_ids_by_name map", name); eprintln!("skipping taxonomic distribution"); return; } } 'GENE: for gene_details in self.genes.values_mut() { let mut dist_names = HashSet::new(); if let Some(species_dists) = gene_details.cv_annotations.get("species_dist") { for ont_term_annotations in species_dists { let term = &ont_term_annotations.term; if let Some(term_name) = term_name_map.get(term) { dist_names.insert(term_name.to_owned()); } } } if (dist_names.contains(in_archaea) || dist_names.contains(in_bacteria)) && !dist_names.contains(in_metazoa) { gene_details.taxonomic_distribution = Some(RcString::from("fungi and prokaryotes")); continue 'GENE; } if dist_names.contains(in_metazoa) && !((dist_names.contains(in_archaea) || dist_names.contains(in_bacteria)) && dist_names.contains(in_metazoa)) { gene_details.taxonomic_distribution = Some(RcString::from("eukaryotes only, fungi and metazoa")); continue 'GENE; } if (dist_names.contains(in_archaea) || dist_names.contains(in_bacteria)) && dist_names.contains(in_metazoa) { gene_details.taxonomic_distribution = Some(RcString::from("eukaryotes and prokaryotes")); continue 'GENE; } if dist_names.contains(in_fungi_only) { gene_details.taxonomic_distribution = Some(RcString::from("fungi only")); continue 'GENE; } if dist_names.contains(pombe_specific) { gene_details.taxonomic_distribution = Some(RcString::from("S. pombe specific")); continue 'GENE; } if dist_names.contains(schizo_specific) { gene_details.taxonomic_distribution = Some(RcString::from("Schizos. specific")); continue 'GENE; } if let Some(ref characterisation_status) = gene_details.characterisation_status { if characterisation_status == "dubious" { gene_details.taxonomic_distribution = Some(RcString::from("dubious")); continue 'GENE; } } if gene_details.feature_type != "mRNA gene" { gene_details.taxonomic_distribution = Some(RcString::from("not curated")); continue 'GENE; } gene_details.taxonomic_distribution = Some(RcString::from("other")); } } fn process_cvterms(&mut self) { for cvterm in &self.raw.cvterms { if cvterm.cv.name != POMBASE_ANN_EXT_TERM_CV_NAME { let cv_config = self.config.cv_config_by_name(&cvterm.cv.name); let annotation_feature_type = cv_config.feature_type.clone(); let mut xrefs = HashMap::new(); for (source_name, source_config) in cv_config.source_config { let mut maybe_xref_id = None; if let Some(ref term_xref_id_prop) = source_config.id_source { if let Some(term_xref_id_prop) = term_xref_id_prop.strip_prefix("prop_name:") { for cvtermprop in cvterm.cvtermprops.borrow().iter() { if cvtermprop.prop_type.name == *term_xref_id_prop { maybe_xref_id = Some(cvtermprop.value.clone()); break; } } } else { if term_xref_id_prop == "ACCESSION" { let dbxref: &Dbxref = cvterm.dbxref.borrow(); maybe_xref_id = Some(dbxref.accession.clone()); } } } let mut maybe_xref_display_name = None; if let Some(ref xref_display_name_prop) = source_config.display_name_prop { for cvtermprop in cvterm.cvtermprops.borrow().iter() { if cvtermprop.prop_type.name == *xref_display_name_prop { maybe_xref_display_name = Some(cvtermprop.value.clone()); } } } if let Some(xref_id) = maybe_xref_id { let term_xref = TermXref { xref_id, xref_display_name: maybe_xref_display_name, }; xrefs.insert(source_name.clone(), term_xref); } } let synonyms = cvterm.cvtermsynonyms.borrow().iter().map(|syn| { SynonymDetails { synonym_type: (*syn).synonym_type.name.clone(), name: syn.name.clone(), } }).collect::<Vec<_>>(); let definition_xrefs = cvterm.definition_xrefs.borrow().iter() .map(|dbxref| { dbxref.identifier() }).collect::<HashSet<_>>(); let secondary_identifiers = cvterm.other_dbxrefs.borrow().iter() .map(|dbxref| { dbxref.identifier() }).collect::<HashSet<_>>(); self.terms.insert(cvterm.termid(), TermDetails { name: cvterm.name.clone(), cv_name: cvterm.cv.name.clone(), annotation_feature_type, interesting_parent_ids: HashSet::new(), interesting_parent_details: HashSet::new(), in_subsets: HashSet::new(), termid: cvterm.termid(), synonyms, definition: cvterm.definition.clone(), direct_ancestors: vec![], definition_xrefs, secondary_identifiers, genes_annotated_with: HashSet::new(), is_obsolete: cvterm.is_obsolete, single_locus_genotype_uniquenames: HashSet::new(), cv_annotations: HashMap::new(), genes_by_uniquename: HashMap::new(), genotypes_by_uniquename: HashMap::new(), alleles_by_uniquename: HashMap::new(), transcripts_by_uniquename: HashMap::new(), references_by_uniquename: HashMap::new(), terms_by_termid: HashMap::new(), annotation_details: HashMap::new(), gene_count: 0, genotype_count: 0, xrefs, }); self.term_ids_by_name.insert(cvterm.name.clone(), cvterm.termid()); } } } fn get_ext_rel_display_name(&self, annotation_termid: &str, ext_rel_name: &str) -> RcString { if let Some(ext_conf) = self.matching_ext_config(annotation_termid, ext_rel_name) { ext_conf.display_name } else { RcString::from(&str::replace(ext_rel_name, "_", " ")) } } fn process_extension_cvterms(&mut self) { let db_prefix = format!("{}:", self.config.database_name); for cvterm in &self.raw.cvterms { if cvterm.cv.name == POMBASE_ANN_EXT_TERM_CV_NAME { for cvtermprop in cvterm.cvtermprops.borrow().iter() { if (*cvtermprop).prop_type.name.starts_with(ANNOTATION_EXT_REL_PREFIX) { let ext_rel_name_str = &(*cvtermprop).prop_type.name[ANNOTATION_EXT_REL_PREFIX.len()..]; let ext_rel_name = RcString::from(ext_rel_name_str); let ext_range = (*cvtermprop).value.clone(); let range: ExtRange = if ext_range.starts_with(&db_prefix) { let db_feature_uniquename = &ext_range[db_prefix.len()..]; if let Some(captures) = PROMOTER_RE.captures(db_feature_uniquename) { let gene_uniquename = RcString::from(&captures["gene"]); if self.genes.contains_key(&gene_uniquename) { ExtRange::Promoter(gene_uniquename) } else { panic!("unknown gene in promoter: {}", db_feature_uniquename); } } else { if self.genes.contains_key(db_feature_uniquename) { ExtRange::Gene(RcString::from(db_feature_uniquename)) } else { if let Some(captures) = TRANSCRIPT_ID_RE.captures(db_feature_uniquename) { if self.genes.contains_key(&captures["gene"]) { ExtRange::Transcript(RcString::from(db_feature_uniquename)) } else { panic!("unknown gene for transcript: {}", db_feature_uniquename); } } else { panic!("can't find gene or transcript for: {}", db_feature_uniquename); } } } } else { ExtRange::Misc(ext_range) }; if let Some(base_termid) = self.base_term_of_extensions.get(&cvterm.termid()) { let rel_type_display_name = self.get_ext_rel_display_name(base_termid, &ext_rel_name); let rel_type_id = self.term_ids_by_name.get(&ext_rel_name).cloned(); self.parts_of_extensions.entry(cvterm.termid()) .or_insert_with(Vec::new).push(ExtPart { rel_type_id, rel_type_name: ext_rel_name, rel_type_display_name, ext_range: range, }); } else { panic!("can't find details for term: {}\n", cvterm.termid()); } } } } } } fn process_cvterm_rels(&mut self) { for cvterm_rel in &self.raw.cvterm_relationships { let subject_term = &cvterm_rel.subject; let object_term = &cvterm_rel.object; let rel_type = &cvterm_rel.rel_type; if subject_term.cv.name == POMBASE_ANN_EXT_TERM_CV_NAME { let subject_termid = subject_term.termid(); if rel_type.name == "is_a" { self.base_term_of_extensions.insert(subject_termid.clone(), object_term.termid().clone()); } } else { let object_term_short = self.make_term_short(&object_term.termid()); if let Some(ref mut subject_term_details) = self.terms.get_mut(&subject_term.termid()) { subject_term_details.direct_ancestors.push(TermAndRelation { termid: object_term_short.termid.clone(), term_name: object_term_short.name.clone(), relation_name: rel_type.name.clone(), }); } } } for cvterm_rel in &self.raw.cvterm_relationships { let subject_term = &cvterm_rel.subject; let object_term = &cvterm_rel.object; let rel_type = &cvterm_rel.rel_type; if subject_term.cv.name == POMBASE_ANN_EXT_TERM_CV_NAME { let subject_termid = subject_term.termid(); if rel_type.name != "is_a" { let object_termid = object_term.termid(); if let Some(base_termid) = self.base_term_of_extensions.get(&subject_term.termid()) { let rel_type_display_name = self.get_ext_rel_display_name(base_termid, &rel_type.name); let ext_range = if object_termid.starts_with("PR:") { ExtRange::GeneProduct(object_termid) } else { ExtRange::Term(object_termid) }; self.parts_of_extensions.entry(subject_termid) .or_insert_with(Vec::new).push(ExtPart { rel_type_id: Some(rel_type.termid()), rel_type_name: rel_type.name.clone(), rel_type_display_name, ext_range, }); } else { panic!("can't find details for {}\n", object_termid); } } } } } fn process_feature_synonyms(&mut self) { for feature_synonym in &self.raw.feature_synonyms { let feature = &feature_synonym.feature; let synonym = &feature_synonym.synonym; let make_synonym = || { SynonymDetails { name: synonym.name.clone(), synonym_type: synonym.synonym_type.name.clone() } }; if let Some(ref mut gene_details) = self.genes.get_mut(&feature.uniquename) { gene_details.synonyms.push(make_synonym()); } else { if let Some(ref mut allele) = self.alleles.get_mut(&feature.uniquename) { allele.synonyms.push(make_synonym()) } } } } fn process_feature_publications(&mut self) { for feature_pub in &self.raw.feature_pubs { let feature = &feature_pub.feature; let publication = &feature_pub.publication; if publication.uniquename.starts_with("PMID:") { if let Some(ref mut gene_details) = self.genes.get_mut(&feature.uniquename) { gene_details.feature_publications.insert(publication.uniquename.clone()); } } } } fn make_genotype_short(&self, genotype_display_name: &str) -> GenotypeShort { if let Some(details) = self.genotypes.get(genotype_display_name) { GenotypeShort { display_uniquename: details.display_uniquename.clone(), name: details.name.clone(), loci: details.loci.clone(), } } else { panic!("can't find genotype {}", genotype_display_name); } } fn make_allele_short(&self, allele_uniquename: &str) -> AlleleShort { self.alleles[allele_uniquename].clone() } fn add_product_to_protein(&mut self, transcript_uniquename: &str, product: RcString) { if let Some(transcript_details) = self.transcripts.get_mut(transcript_uniquename) { if let Some(ref mut protein) = transcript_details .protein { protein.product = Some(product); } } } // process feature properties stored as cvterms, // eg. characterisation_status and product fn process_props_from_feature_cvterms(&mut self) { for feature_cvterm in &self.raw.feature_cvterms { let feature = &feature_cvterm.feature; let cvterm = &feature_cvterm.cvterm; let (maybe_gene_uniquename, maybe_transcript_uniquename) = if cvterm.cv.name == "PomBase gene products" { if feature.feat_type.name == "polypeptide" { if let Some(transcript_uniquename) = self.transcripts_of_polypeptides.get(&feature.uniquename) { if let Some(gene_uniquename) = self.genes_of_transcripts.get(transcript_uniquename) { (Some(gene_uniquename.clone()), Some(transcript_uniquename.clone()) ) } else { (None, None) } } else { (None, None) } } else { if TRANSCRIPT_FEATURE_TYPES.contains(&feature.feat_type.name.as_str()) { if let Some(gene_uniquename) = self.genes_of_transcripts.get(&feature.uniquename) { (Some(gene_uniquename.clone()), Some(feature.uniquename.clone())) } else { (None, None) } } else { if feature.feat_type.name == "gene" { (Some(feature.uniquename.clone()), None) } else { (None, None) } } } } else { (None, None) }; if let Some(gene_uniquename) = maybe_gene_uniquename { if let Some(transcript_uniquename) = maybe_transcript_uniquename { if transcript_uniquename.ends_with(".1") { // for multi-transcript genes, use the product // from the first transcript self.add_gene_product(&gene_uniquename, &cvterm.name); } self.add_product_to_protein(&transcript_uniquename, cvterm.name.clone()); } } if feature.feat_type.name == "gene" || feature.feat_type.name == "pseudogene" { if cvterm.cv.name == "PomBase gene characterisation status" { self.add_characterisation_status(&feature.uniquename, &cvterm.name); } else { if cvterm.cv.name == "name_description" { self.add_name_description(&feature.uniquename, &cvterm.name); } } } } } fn make_with_or_from_value(&self, with_or_from_value: &RcString) -> WithFromValue { if let Some(captures) = PREFIX_AND_ID_RE.captures(with_or_from_value) { let prefix = &captures["prefix"]; let id = &captures["id"]; if self.genes.contains_key(id) { let gene_short = self.make_gene_short(id); if self.config.database_name == prefix { // a gene from the main organism return WithFromValue::Gene(gene_short); } else { if let Some(name) = &gene_short.name { return WithFromValue::IdentifierAndName({ IdentifierAndName { identifier: with_or_from_value.clone(), name: RcString::from(name), } }); } } } else { if self.transcripts.contains_key(id) { if self.config.database_name == prefix { return WithFromValue::Transcript(RcString::from(id)); } } } } else { if self.genes.contains_key(with_or_from_value) { let gene_short = self.make_gene_short(with_or_from_value); // a gene from the main organism return WithFromValue::Gene(gene_short); } else { if self.transcripts.contains_key(with_or_from_value) { return WithFromValue::Transcript(RcString::from(with_or_from_value)); } } } if self.terms.get(with_or_from_value).is_some() { return WithFromValue::Term(self.make_term_short(with_or_from_value)) } WithFromValue::Identifier(with_or_from_value.clone()) } // process annotation fn process_feature_cvterms(&mut self) { let rel_order = self.config.extension_relation_order.clone(); 'FEATURE_CVTERM: for feature_cvterm in &self.raw.feature_cvterms { let feature = &feature_cvterm.feature; let cvterm = &feature_cvterm.cvterm; let termid = cvterm.termid(); let mut transcript_uniquenames = vec![]; let mut extension = vec![]; if cvterm.cv.name == "PomBase gene characterisation status" || cvterm.cv.name == "PomBase gene products" || cvterm.cv.name == "name_description" { continue; } let publication = &feature_cvterm.publication; let mut extra_props: HashMap<RcString, RcString> = HashMap::new(); let mut conditions: HashSet<TermId> = HashSet::new(); let mut withs: HashSet<WithFromValue> = HashSet::new(); let mut froms: HashSet<WithFromValue> = HashSet::new(); let mut qualifiers: Vec<Qualifier> = vec![]; let mut date: Option<RcString> = None; let mut assigned_by: Option<RcString> = None; let mut evidence: Option<RcString> = None; let mut genotype_background: Option<RcString> = None; let mut throughput: Option<Throughput> = None; // need to get evidence first as it's used later // See: https://github.com/pombase/website/issues/455 for prop in feature_cvterm.feature_cvtermprops.borrow().iter() { if &prop.type_name() == "evidence" { if let Some(ref evidence_long) = prop.value { for (evidence_code, ev_details) in &self.config.evidence_types { if &ev_details.long == evidence_long { evidence = Some(evidence_code.clone()); } } if evidence.is_none() { evidence = Some(evidence_long.clone()); } } } } for prop in feature_cvterm.feature_cvtermprops.borrow().iter() { match &prop.type_name() as &str { "residue" | "scale" | "gene_product_form_id" | "quant_gene_ex_copies_per_cell" | "quant_gene_ex_avg_copies_per_cell" => { if let Some(value) = prop.value.clone() { if prop.type_name() == "residue" && &cvterm.cv.name != "sequence" { let residue = value.clone(); let display_name = self.get_ext_rel_display_name(&termid, "modified residue"); let residue_range_part = ExtPart { rel_type_id: None, rel_type_name: display_name.clone(), rel_type_display_name: display_name, ext_range: ExtRange::SummaryModifiedResidues(vec![residue]), }; extension.insert(0, residue_range_part); } extra_props.insert(prop.type_name().clone(), value); } }, "condition" => if let Some(value) = prop.value.clone() { if value.contains(':') { conditions.insert(value.clone()); } else { eprintln!(r#"ignoring condition that isn't a term ID "{}" (from annotation of {} with {})"#, value, feature.uniquename, termid); } }, "qualifier" => if let Some(value) = prop.value.clone() { qualifiers.push(value); }, "assigned_by" => if let Some(value) = prop.value.clone() { assigned_by = Some(value); }, "date" => { if let Some(value) = prop.value.clone() { date = Some(value); } }, "with" => { if let Some(value) = prop.value.clone() { withs.insert(self.make_with_or_from_value(&value)); } }, "from" => { if let Some(value) = prop.value.clone() { froms.insert(self.make_with_or_from_value(&value)); } }, "annotation_throughput_type" => { if let Some(throughput_type) = prop.value.clone() { throughput = Some(match throughput_type.as_ref() { "low throughput" => Throughput::LowThroughput, "high throughput" => Throughput::HighThroughput, "non-experimental" => Throughput::NonExperimental, _ => { panic!("unknown throughput type: {}", throughput_type); } }); } }, _ => () } } let mut maybe_genotype_uniquename = None; let mut gene_uniquenames_vec: Vec<GeneUniquename> = match &feature.feat_type.name as &str { "polypeptide" => { if let Some(transcript_uniquename) = self.transcripts_of_polypeptides.get(&feature.uniquename) { if let Some(gene_uniquename) = self.genes_of_transcripts.get(transcript_uniquename) { vec![gene_uniquename.clone()] } else { vec![] } } else { vec![] } }, "genotype" => { let loci: Vec<_> = self.loci_of_genotypes[&feature.uniquename] .values().cloned().collect(); let genotype_display_name = make_genotype_display_name(&loci, &self.alleles); maybe_genotype_uniquename = Some(genotype_display_name); genotype_background = self.genotype_backgrounds.get(&feature.uniquename) .cloned(); loci.iter() .map(|locus| { locus.expressed_alleles.iter() .map(|expressed_allele| { let allele_short = self.make_allele_short(&expressed_allele.allele_uniquename); allele_short.gene_uniquename }) .collect() }) .collect::<Vec<Vec<_>>>() .concat() }, "gene" | "pseudogene" => { vec![feature.uniquename.clone()] }, _ => if TRANSCRIPT_FEATURE_TYPES.contains(&feature.feat_type.name.as_str()) { if let Some(gene_uniquename) = self.genes_of_transcripts.get(&feature.uniquename) { if let Some(gene_details) = self.genes.get(gene_uniquename) { if gene_details.transcripts.len() > 1 { // only bother to record the specific transcript if // there is more than one transcript_uniquenames.push(feature.uniquename.clone()); } } vec![gene_uniquename.clone()] } else { vec![] } } else { eprintln!("can't handle annotation on {} {}", &feature.feat_type.name, &feature.uniquename); continue 'FEATURE_CVTERM; } }; gene_uniquenames_vec.dedup(); gene_uniquenames_vec = gene_uniquenames_vec.iter().map(|gene_uniquename: &RcString| { self.make_gene_short(gene_uniquename).uniquename }).collect(); let reference_uniquename = if publication.uniquename == "null" { None } else { Some(publication.uniquename.clone()) }; let mut extra_props_clone = extra_props.clone(); let copies_per_cell = extra_props_clone.remove("quant_gene_ex_copies_per_cell"); let avg_copies_per_cell = extra_props_clone.remove("quant_gene_ex_avg_copies_per_cell"); let gene_ex_props = if copies_per_cell.is_some() || avg_copies_per_cell.is_some() { let scale = extra_props_clone.remove("scale") .expect("gene ex scale missing"); Some(GeneExProps { copies_per_cell, avg_copies_per_cell, scale, }) } else { None }; if gene_uniquenames_vec.len() > 1 && maybe_genotype_uniquename.is_none() { panic!("non-genotype annotation has more than one gene"); } let annotation_detail = OntAnnotationDetail { id: feature_cvterm.feature_cvterm_id, genes: gene_uniquenames_vec, transcript_uniquenames, reference: reference_uniquename, genotype: maybe_genotype_uniquename, genotype_background, withs, froms, residue: extra_props_clone.remove("residue"), gene_product_form_id: extra_props_clone.remove("gene_product_form_id"), gene_ex_props, qualifiers, evidence, conditions, extension, date, assigned_by, throughput, }; self.add_annotation(&rel_order, cvterm.borrow(), feature_cvterm.is_not, annotation_detail); } } fn make_term_annotations(&self, termid: &RcString, detail_ids: &[OntAnnotationId], is_not: bool) -> Vec<(CvName, OntTermAnnotations)> { let term_details = &self.terms[termid]; let cv_name = term_details.cv_name.clone(); match cv_name.as_ref() { "gene_ex" | "PomGeneExRNA" | "PomGeneExProt" | "PomGeneExRD" => { if is_not { panic!("gene_ex annotations can't be NOT annotations"); } let mut qual_annotations = OntTermAnnotations { term: termid.clone(), is_not: false, rel_names: HashSet::new(), annotations: vec![], summary: None, }; let mut quant_annotations = OntTermAnnotations { term: termid.clone(), is_not: false, rel_names: HashSet::new(), annotations: vec![], summary: None, }; for annotation_id in detail_ids { let annotation = self.annotation_details. get(annotation_id).expect("can't find OntAnnotationDetail"); if annotation.gene_ex_props.is_some() { quant_annotations.annotations.push(*annotation_id) } else { qual_annotations.annotations.push(*annotation_id) } } let mut return_vec = vec![]; if !qual_annotations.annotations.is_empty() { return_vec.push((RcString::from("qualitative_gene_expression"), qual_annotations)); } if !quant_annotations.annotations.is_empty() { return_vec.push((RcString::from("quantitative_gene_expression"), quant_annotations)); } return_vec }, "fission_yeast_phenotype" => { let mut single_locus = OntTermAnnotations { term: termid.clone(), is_not, rel_names: HashSet::new(), annotations: vec![], summary: None, }; let mut multi_locus = OntTermAnnotations { term: termid.clone(), is_not, rel_names: HashSet::new(), annotations: vec![], summary: None, }; for annotation_id in detail_ids { let annotation = self.annotation_details. get(annotation_id).expect("can't find OntAnnotationDetail"); let genotype_uniquename = annotation.genotype.as_ref().unwrap(); if let Some(genotype_details) = self.genotypes.get(genotype_uniquename) { if genotype_details.loci.len() == 1 { single_locus.annotations.push(*annotation_id); } else { if !multi_locus.annotations.contains(annotation_id) { multi_locus.annotations.push(*annotation_id); } } } else { panic!("can't find genotype details for {}\n", genotype_uniquename); } } let mut return_vec = vec![]; if !single_locus.annotations.is_empty() { return_vec.push((RcString::from("single_locus_phenotype"), single_locus)); } if !multi_locus.annotations.is_empty() { return_vec.push((RcString::from("multi_locus_phenotype"), multi_locus)); } return_vec }, _ => { vec![(cv_name, OntTermAnnotations { term: termid.clone(), is_not, rel_names: HashSet::new(), annotations: detail_ids.to_owned(), summary: None, })] } } } fn remove_duplicate_transcript_annotation(&mut self) { let ont_annotation_map = &mut self.all_ont_annotations; for (_, annotations) in ont_annotation_map { let (no_transcript_annotations, mut has_transcript_annotations): (Vec<i32>, Vec<i32>) = annotations .iter() .partition(|&annotation_id| { if let Some(ont_annotation_detail) = self.annotation_details.get(annotation_id) { ont_annotation_detail.transcript_uniquenames.len() == 0 } else { panic!("can't find annotation details for {}", annotation_id); } }); *annotations = no_transcript_annotations; if has_transcript_annotations.len() >= 2 { // merge annotations that differ only by transcript ID has_transcript_annotations.sort(); let mut prev_annotation_id = has_transcript_annotations.remove(0); for current_annotation_id in has_transcript_annotations.drain(0..) { let (annotations_equal, current_transcript_uniquename) = { let prev_annotation = self.annotation_details.get(&prev_annotation_id).unwrap(); let current_annotation = self.annotation_details.get(&current_annotation_id).unwrap(); (prev_annotation == current_annotation, current_annotation.transcript_uniquenames[0].clone()) }; if annotations_equal { if let Some(ref annotation_details) = self.annotation_details.get(&prev_annotation_id) { if !annotation_details.transcript_uniquenames.contains(&current_transcript_uniquename) { self.annotation_details.get_mut(&prev_annotation_id).unwrap() .transcript_uniquenames.push(current_transcript_uniquename); } } } else { annotations.push(prev_annotation_id); prev_annotation_id = current_annotation_id; } } annotations.push(prev_annotation_id); } else { annotations.extend(has_transcript_annotations.iter()); } } } // store the OntTermAnnotations in the TermDetails, GeneDetails, // GenotypeDetails and ReferenceDetails fn store_ont_annotations(&mut self, is_not: bool) { let ont_annotation_map = if is_not { &self.all_not_ont_annotations } else { &self.all_ont_annotations }; let mut gene_annotation_by_term: HashMap<GeneUniquename, HashMap<TermId, Vec<OntAnnotationId>>> = HashMap::new(); let mut genotype_annotation_by_term: HashMap<GenotypeUniquename, HashMap<TermId, Vec<OntAnnotationId>>> = HashMap::new(); let mut ref_annotation_by_term: HashMap<RcString, HashMap<TermId, Vec<OntAnnotationId>>> = HashMap::new(); let mut ont_annotations = vec![]; for (termid, annotations) in ont_annotation_map { if !is_not { let new_annotations = self.make_term_annotations(termid, &annotations, is_not); if let Some(ref mut term_details) = self.terms.get_mut(termid) { for (cv_name, new_annotation) in new_annotations { term_details.cv_annotations.entry(cv_name.clone()) .or_insert_with(Vec::new) .push(new_annotation); } } else { panic!("missing termid: {}\n", termid); } } for annotation_id in annotations { let annotation = self.annotation_details. get(&annotation_id).expect("can't find OntAnnotationDetail"); for gene_uniquename in &annotation.genes { gene_annotation_by_term.entry(gene_uniquename.clone()) .or_insert_with(HashMap::new) .entry(termid.clone()) .or_insert_with(Vec::new) .push(*annotation_id); } if let Some(ref genotype_uniquename) = annotation.genotype { let existing = genotype_annotation_by_term.entry(genotype_uniquename.clone()) .or_insert_with(HashMap::new) .entry(termid.clone()) .or_insert_with(Vec::new); if !existing.contains(&annotation_id) { existing.push(*annotation_id); } } if let Some(reference_uniquename) = annotation.reference.clone() { ref_annotation_by_term.entry(reference_uniquename) .or_insert_with(HashMap::new) .entry(termid.clone()) .or_insert_with(Vec::new) .push(*annotation_id); } for condition_termid in &annotation.conditions { let cv_name = if let Some(term_details) = self.terms.get(condition_termid) { term_details.cv_name.clone() } else { panic!("can't find term details for {}", condition_termid); }; if let Some(ref mut condition_term_details) = self.terms.get_mut(&condition_termid.clone()) { condition_term_details.cv_annotations .entry(cv_name.clone()) .or_insert({ let mut new_vec = Vec::new(); let new_term_annotation = OntTermAnnotations { term: condition_termid.clone(), is_not, rel_names: HashSet::new(), annotations: vec![], summary: None, }; new_vec.push(new_term_annotation); new_vec }); condition_term_details.cv_annotations.get_mut(&cv_name) .unwrap()[0] .annotations.push(*annotation_id); } } /* Remove for now because it's messing with the gene counts. See: https://github.com/pombase/website/issues/1705 // Add annotations to terms referred to in extensions. They // are added to fake CV that have a name starting with // "extension:". The CV name will end with ":genotype" if the // annotation is a phentoype/genotype, and will end with ":end" // otherwise. The middle of the fake CV name is the display // name for the extension relation. // eg. "extension:directly activates:gene" for ext_part in &annotation.extension { if let ExtRange::Term(ref part_termid) = ext_part.ext_range { let cv_name = "extension:".to_owned() + &ext_part.rel_type_display_name; if let Some(ref mut part_term_details) = self.terms.get_mut(part_termid) { let extension_cv_name = if annotation.genotype.is_some() { cv_name.clone() + ":genotype" } else { cv_name.clone() + ":gene" }; part_term_details.cv_annotations .entry(RcString::from(&extension_cv_name)) .or_insert({ let mut new_vec = Vec::new(); let new_term_annotation = OntTermAnnotations { term: part_termid.to_owned(), is_not, rel_names: HashSet::new(), annotations: vec![], summary: None, }; new_vec.push(new_term_annotation); new_vec }); part_term_details.cv_annotations.get_mut(&extension_cv_name) .unwrap()[0] .annotations.push(annotation_id); } } } */ let gene_short_list = annotation.genes.iter().map(|uniquename: &RcString| { self.make_gene_short(uniquename) }).collect::<HashSet<_>>(); let reference_short = annotation.reference.as_ref().and_then(|uniquename: &RcString| { make_reference_short(&self.references, uniquename) }); let genotype_short = annotation.genotype.as_ref().map(|uniquename: &RcString| { self.make_genotype_short(uniquename) }); let conditions = annotation.conditions.iter().map(|termid| { self.make_term_short(termid) }).collect::<HashSet<_>>(); if gene_short_list.is_empty() { panic!("no genes for {:?}", &annotation); } let ont_annotation = OntAnnotation { term_short: self.make_term_short(termid), id: annotation.id, genes: gene_short_list, reference_short, genotype_short, genotype_background: annotation.genotype_background.clone(), withs: annotation.withs.clone(), froms: annotation.froms.clone(), residue: annotation.residue.clone(), gene_ex_props: annotation.gene_ex_props.clone(), qualifiers: annotation.qualifiers.clone(), evidence: annotation.evidence.clone(), conditions, extension: annotation.extension.clone(), assigned_by: annotation.assigned_by.clone(), }; ont_annotations.push(ont_annotation); } } let mut term_names = HashMap::new(); for (termid, term_details) in &self.terms { term_names.insert(termid.clone(), term_details.name.to_lowercase()); } let ont_term_cmp = |ont_term_1: &OntTermAnnotations, ont_term_2: &OntTermAnnotations| { if !ont_term_1.is_not && ont_term_2.is_not { return Ordering::Less; } if ont_term_1.is_not && !ont_term_2.is_not { return Ordering::Greater; } let term1 = &term_names[&ont_term_1.term]; let term2 = &term_names[&ont_term_2.term]; term1.cmp(term2) }; for (gene_uniquename, term_annotation_map) in &gene_annotation_by_term { for (termid, details) in term_annotation_map { let new_annotations = self.make_term_annotations(termid, details, is_not); let gene_details = self.genes.get_mut(gene_uniquename).unwrap(); for (cv_name, new_annotation) in new_annotations { gene_details.cv_annotations.entry(cv_name.clone()) .or_insert_with(Vec::new) .push(new_annotation); } } let gene_details = self.genes.get_mut(gene_uniquename).unwrap(); for cv_annotations in gene_details.cv_annotations.values_mut() { cv_annotations.sort_by(&ont_term_cmp) } } for (genotype_uniquename, term_annotation_map) in &genotype_annotation_by_term { for (termid, details) in term_annotation_map { let new_annotations = self.make_term_annotations(termid, details, is_not); let details = self.genotypes.get_mut(genotype_uniquename).unwrap(); for (cv_name, new_annotation) in new_annotations { details.cv_annotations.entry(cv_name.clone()) .or_insert_with(Vec::new) .push(new_annotation); } } let details = self.genotypes.get_mut(genotype_uniquename).unwrap(); for cv_annotations in details.cv_annotations.values_mut() { cv_annotations.sort_by(&ont_term_cmp) } } for (reference_uniquename, ref_annotation_map) in &ref_annotation_by_term { for (termid, details) in ref_annotation_map { let new_annotations = self.make_term_annotations(termid, details, is_not); let ref_details = self.references.get_mut(reference_uniquename).unwrap(); for (cv_name, new_annotation) in new_annotations { ref_details.cv_annotations.entry(cv_name).or_insert_with(Vec::new) .push(new_annotation.clone()); } } let ref_details = self.references.get_mut(reference_uniquename).unwrap(); for cv_annotations in ref_details.cv_annotations.values_mut() { cv_annotations.sort_by(&ont_term_cmp) } } for ont_annotation in ont_annotations.drain(0..) { self.ont_annotations.push(ont_annotation); } } // return true if the term could or should appear in the interesting_parent_details // field of the TermDetails and TermShort structs fn is_interesting_parent(&self, termid: &str, rel_name: &str) -> bool { self.possible_interesting_parents.contains(&InterestingParent { termid: termid.into(), rel_name: rel_name.into(), }) } fn process_cvtermpath(&mut self) { let mut slim_termids = HashSet::new(); for slim_config in self.config.slims.values() { for term_and_name in &slim_config.terms { slim_termids.insert(term_and_name.termid.clone()); } } let mut new_annotations: HashMap<(CvName, TermId), HashMap<TermId, HashMap<i32, HashSet<RelName>>>> = HashMap::new(); let mut children_by_termid: HashMap<TermId, HashSet<TermId>> = HashMap::new(); for cvtermpath in &self.raw.cvtermpaths { let subject_term = &cvtermpath.subject; let subject_termid = subject_term.termid(); let object_term = &cvtermpath.object; let object_termid = object_term.termid(); if let Some(subject_term_details) = self.terms.get(&subject_termid) { let rel_termid = match cvtermpath.rel_type { Some(ref rel_type) => { rel_type.termid() }, None => panic!("no relation type for {} <-> {}\n", &subject_term.name, &object_term.name) }; let rel_term_name = self.make_term_short(&rel_termid).name; if rel_term_name == "has_part" && !HAS_PART_CV_NAMES.contains(&subject_term_details.cv_name.as_str()) { continue; } if !DESCENDANT_REL_NAMES.contains(&rel_term_name.as_str()) { continue; } if subject_term_details.cv_annotations.keys().len() > 0 || slim_termids.contains(&object_termid) { children_by_termid .entry(object_termid.clone()) .or_insert_with(HashSet::new) .insert(subject_termid.clone()); } for (cv_name, term_annotations) in &subject_term_details.cv_annotations { for term_annotation in term_annotations { for annotation_id in &term_annotation.annotations { let dest_termid = object_termid.clone(); let source_termid = subject_termid.clone(); if !term_annotation.is_not { new_annotations.entry((cv_name.clone(), dest_termid)) .or_insert_with(HashMap::new) .entry(source_termid) .or_insert_with(HashMap::new) .entry(*annotation_id) .or_insert_with(HashSet::new) .insert(rel_term_name.clone()); } } } } } else { panic!("TermDetails not found for {}", &subject_termid); } } for ((dest_cv_name, dest_termid), dest_annotations_map) in new_annotations.drain() { for (source_termid, source_annotations_map) in dest_annotations_map { let mut new_annotations: Vec<OntAnnotationId> = vec![]; let mut all_rel_names: HashSet<RcString> = HashSet::new(); for (annotation_id, rel_names) in source_annotations_map { new_annotations.push(annotation_id); for rel_name in rel_names { all_rel_names.insert(rel_name); } } let new_annotations = self.make_term_annotations(&source_termid, &new_annotations, false); let dest_term_details = { self.terms.get_mut(&dest_termid).unwrap() }; for (_, new_annotation) in new_annotations { let mut new_annotation_clone = new_annotation.clone(); new_annotation_clone.rel_names.extend(all_rel_names.clone()); dest_term_details.cv_annotations .entry(dest_cv_name.clone()) .or_insert_with(Vec::new) .push(new_annotation_clone); } } } let mut term_names = HashMap::new(); for (termid, term_details) in &self.terms { term_names.insert(termid.clone(), term_details.name.to_lowercase()); } for term_details in self.terms.values_mut() { let term_details_termid = &term_details.termid; for term_annotations in term_details.cv_annotations.values_mut() { let ont_term_cmp = |ont_term_1: &OntTermAnnotations, ont_term_2: &OntTermAnnotations| { if ont_term_1.term == ont_term_2.term { return Ordering::Equal; } // put direct annotation first on page if ont_term_1.term == *term_details_termid { return Ordering::Less; } if ont_term_2.term == *term_details_termid { return Ordering::Greater; } if !ont_term_1.is_not && ont_term_2.is_not { return Ordering::Less; } if ont_term_1.is_not && !ont_term_2.is_not { return Ordering::Greater; } let term1 = &term_names[&ont_term_1.term]; let term2 = &term_names[&ont_term_2.term]; term1.cmp(term2) }; term_annotations.sort_by(&ont_term_cmp); } } self.children_by_termid = children_by_termid; } fn make_metadata(&mut self) -> Metadata { let mut db_creation_datetime = None; for chadoprop in &self.raw.chadoprops { if chadoprop.prop_type.name == "db_creation_datetime" { db_creation_datetime = chadoprop.value.clone(); } } let mut cv_versions = HashMap::new(); for cvprop in &self.raw.cvprops { if cvprop.prop_type.name == "cv_version" { cv_versions.insert(cvprop.cv.name.clone(), cvprop.value.clone()); } } const PKG_NAME: &str = env!("CARGO_PKG_NAME"); const VERSION: &str = env!("CARGO_PKG_VERSION"); Metadata { export_prog_name: RcString::from(PKG_NAME), export_prog_version: RcString::from(VERSION), db_creation_datetime: db_creation_datetime.unwrap(), gene_count: self.genes.len(), term_count: self.terms.len(), cv_versions, } } pub fn get_api_genotype_annotation(&self) -> HashMap<TermId, Vec<APIGenotypeAnnotation>> { let mut app_genotype_annotation = HashMap::new(); for term_details in self.terms.values() { for annotations_vec in term_details.cv_annotations.values() { for ont_term_annotations in annotations_vec { 'DETAILS: for annotation_id in &ont_term_annotations.annotations { let annotation_details = self.annotation_details. get(annotation_id).expect("can't find OntAnnotationDetail"); if annotation_details.genotype.is_none() { continue 'DETAILS; } let genotype_uniquename = annotation_details.genotype.clone().unwrap(); let genotype = &term_details.genotypes_by_uniquename[&genotype_uniquename]; let conditions = annotation_details.conditions.iter() .map(|cond_termid| { let cond_term = self.terms.get(cond_termid).unwrap(); TermAndName { termid: cond_term.termid.clone(), name: cond_term.name.clone(), } }) .collect::<HashSet<_>>(); let mut api_annotation = APIGenotypeAnnotation { is_multi: genotype.loci.len() > 1, ploidiness: genotype.ploidiness(), conditions, alleles: vec![], }; for locus in &genotype.loci { for allele in &locus.expressed_alleles { let allele_uniquename = &allele.allele_uniquename; let allele_short = self.alleles.get(allele_uniquename).expect("Can't find allele"); let allele_gene_uniquename = allele_short.gene_uniquename.clone(); let allele_details = APIAlleleDetails { gene: allele_gene_uniquename, allele_type: allele_short.allele_type.clone(), expression: allele.expression.clone(), }; api_annotation.alleles.push(allele_details); } } app_genotype_annotation .entry(term_details.termid.clone()) .or_insert_with(Vec::new) .push(api_annotation); } } } } app_genotype_annotation } fn make_protein_data(&self, gene_details: &GeneDetails) -> (Option<f32>, Option<usize>, Option<GeneQueryAttrName>) { let mut molecular_weight = None; let mut protein_length = None; for transcript_uniquename in &gene_details.transcripts { if let Some(transcript) = self.transcripts.get(transcript_uniquename) { if let Some(ref protein) = transcript.protein { molecular_weight = Some((100.0 * protein.molecular_weight).round() / 100.0); if protein.sequence.ends_with('*') { protein_length = Some(protein.sequence.len() - 1); } else { protein_length = Some(protein.sequence.len()); } break; } } } for field_name in &self.config.gene_results.visualisation_field_names { let column_conf = &self.config.gene_results.field_config[field_name]; for attr_value_conf in &column_conf.attr_values { if let (Some(ref bin_start), Some(ref bin_end)) = (attr_value_conf.bin_start, attr_value_conf.bin_end) { if let Some(prot_len) = protein_length { if *bin_start <= prot_len && *bin_end >= prot_len { return (molecular_weight, Some(prot_len), Some(attr_value_conf.name.clone())); } } } } } (None, None, None) } fn make_gene_query_go_data(&self, gene_details: &GeneDetails, term_config: &[TermId], cv_name: &str) -> Option<GeneQueryTermData> { let component_term_annotations = gene_details.cv_annotations.get(cv_name)?; let in_component = |check_termid: &str| { for term_annotation in component_term_annotations { let maybe_term_details = self.terms.get(&term_annotation.term); let term_details = maybe_term_details .unwrap_or_else(|| { panic!("can't find TermDetails for {}", &term_annotation.term) }); let interesting_parent_ids = &term_details.interesting_parent_ids; if !term_annotation.is_not && (term_annotation.term == check_termid || interesting_parent_ids.contains(check_termid)) { return true; } } false }; for go_component_termid in term_config { if in_component(go_component_termid) { return Some(GeneQueryTermData::Term(TermAndName { termid: go_component_termid.to_owned(), name: self.terms.get(go_component_termid).unwrap().name.clone(), })); } } Some(GeneQueryTermData::Other) } fn get_ortholog_taxonids(&self, gene_details: &GeneDetails) -> HashSet<u32> { let mut return_set = HashSet::new(); for ortholog_annotation in &gene_details.ortholog_annotations { return_set.insert(ortholog_annotation.ortholog_taxonid); } return_set } fn get_physical_interactors(&self, gene_details: &GeneDetails) -> HashSet<GeneUniquename> { let mut return_set = HashSet::new(); for physical_interaction in &gene_details.physical_interactions { if gene_details.uniquename == physical_interaction.gene_uniquename { return_set.insert(physical_interaction.interactor_uniquename.clone()); } else { // gene is the prey for this interaction return_set.insert(physical_interaction.gene_uniquename.clone()); } } return_set } fn make_gene_query_data_map(&self) -> HashMap<GeneUniquename, GeneQueryData> { let mut gene_query_data_map = HashMap::new(); for gene_details in self.genes.values() { let ortholog_taxonids = self.get_ortholog_taxonids(gene_details); let physical_interactors = self.get_physical_interactors(gene_details); let mut cc_terms = vec![]; let mut process_terms = vec![]; let mut function_terms = vec![]; for field_name in &self.config.gene_results.visualisation_field_names { let column_conf = &self.config.gene_results.field_config[field_name]; for attr_value_conf in &column_conf.attr_values { if let Some(ref termid) = attr_value_conf.termid { match field_name.as_ref() { "go_component" => cc_terms.push(termid.clone()), "go_process_superslim" => process_terms.push(termid.clone()), "go_function" => function_terms.push(termid.clone()), _ => (), } } } } let go_component = self.make_gene_query_go_data(gene_details, &cc_terms, "cellular_component"); let go_process_superslim = self.make_gene_query_go_data(gene_details, &process_terms, "biological_process"); let go_function = self.make_gene_query_go_data(gene_details, &function_terms, "molecular_function"); let tmm = if gene_details.feature_type == "mRNA gene" { if gene_details.tm_domain_coords.is_empty() { Some(PresentAbsent::Absent) } else { Some(PresentAbsent::Present) } } else { Some(PresentAbsent::NotApplicable) }; let (molecular_weight, protein_length, protein_length_bin) = self.make_protein_data(gene_details); let gene_query_data = GeneQueryData { gene_uniquename: gene_details.uniquename.clone(), deletion_viability: gene_details.deletion_viability.clone(), go_component, go_process_superslim, go_function, characterisation_status: gene_details.characterisation_status.clone(), taxonomic_distribution: gene_details.taxonomic_distribution.clone(), tmm, ortholog_taxonids, physical_interactors, molecular_weight, protein_length, protein_length_bin, subset_termids: gene_details.subset_termids.clone(), }; gene_query_data_map.insert(gene_details.uniquename.clone(), gene_query_data); } gene_query_data_map } pub fn make_api_maps(mut self) -> APIMaps { let mut gene_summaries: HashMap<GeneUniquename, APIGeneSummary> = HashMap::new(); let mut gene_name_gene_map = HashMap::new(); let mut interactors_of_genes = HashMap::new(); for (gene_uniquename, gene_details) in &self.genes { if self.config.load_organism_taxonid.is_none() || self.config.load_organism_taxonid.unwrap() == gene_details.taxonid { let gene_summary = self.make_api_gene_summary(gene_uniquename); if let Some(ref gene_name) = gene_summary.name { gene_name_gene_map.insert(gene_name.clone(), gene_uniquename.clone()); } gene_summaries.insert(gene_uniquename.clone(), gene_summary); let mut interactors = vec![]; for interaction_annotation in &gene_details.physical_interactions { let interactor_uniquename = if gene_uniquename == &interaction_annotation.gene_uniquename { interaction_annotation.interactor_uniquename.clone() } else { interaction_annotation.gene_uniquename.clone() }; let interactor = APIInteractor { interaction_type: InteractionType::Physical, interactor_uniquename, }; if !interactors.contains(&interactor) { interactors.push(interactor); } } for interaction_annotation in &gene_details.genetic_interactions { let interactor_uniquename = if gene_uniquename == &interaction_annotation.gene_uniquename { interaction_annotation.interactor_uniquename.clone() } else { interaction_annotation.gene_uniquename.clone() }; let interactor = APIInteractor { interaction_type: InteractionType::Genetic, interactor_uniquename, }; if !interactors.contains(&interactor) { interactors.push(interactor); } } interactors_of_genes.insert(gene_uniquename.clone(), interactors); } } let gene_query_data_map = self.make_gene_query_data_map(); let mut term_summaries: HashSet<TermShort> = HashSet::new(); let mut termid_genes: HashMap<TermId, HashSet<GeneUniquename>> = HashMap::new(); let mut terms_for_api: HashMap<TermId, TermDetails> = HashMap::new(); for termid in self.terms.keys() { term_summaries.insert(self.make_term_short(termid)); } let termid_genotype_annotation: HashMap<TermId, Vec<APIGenotypeAnnotation>> = self.get_api_genotype_annotation(); for (termid, term_details) in self.terms.drain() { let cv_config = &self.config.cv_config; if let Some(term_config) = cv_config.get(&term_details.cv_name) { if term_config.feature_type == "gene" { termid_genes.insert(termid.clone(), term_details.genes_annotated_with.clone()); } } terms_for_api.insert(termid.clone(), term_details); } let seq_feature_page_features: Vec<FeatureShort> = self.other_features.values() .filter(|feature_short| { let so_types_to_show = &self.config.sequence_feature_page.so_types_to_show; let feature_type_string = feature_short.feature_type.to_string(); so_types_to_show.contains(&feature_type_string) }) .map(|feature_short| { let mut new_feature = feature_short.clone(); // we don't need the residues for the seq feature page new_feature.residues = RcString::new(); new_feature }).collect(); // avoid clone() let mut term_subsets = HashMap::new(); std::mem::swap(&mut term_subsets, &mut self.term_subsets); let mut gene_subsets = HashMap::new(); std::mem::swap(&mut gene_subsets, &mut self.gene_subsets); let mut children_by_termid = HashMap::new(); std::mem::swap(&mut children_by_termid, &mut self.children_by_termid); let mut gene_expression_measurements = HashMap::new(); std::mem::swap(&mut gene_expression_measurements, &mut self.gene_expression_measurements); APIMaps { gene_summaries, gene_query_data_map, termid_genes, termid_genotype_annotation, term_summaries, genes: self.genes, gene_name_gene_map, transcripts: self.transcripts, alleles: self.alleles, genotypes: self.genotypes, terms: terms_for_api, interactors_of_genes, references: self.references, other_features: self.other_features, seq_feature_page_features, annotation_details: self.annotation_details, chromosomes: self.chromosomes, term_subsets, gene_subsets, children_by_termid, gene_expression_measurements, } } fn add_cv_annotations_to_maps(&self, identifier: &RcString, cv_annotations: &OntAnnotationMap, seen_references: &mut HashMap<RcString, ReferenceShortOptionMap>, seen_genes: &mut HashMap<RcString, GeneShortOptionMap>, seen_genotypes: &mut HashMap<RcString, GenotypeShortMap>, seen_alleles: &mut HashMap<RcString, AlleleShortMap>, seen_transcripts: &mut HashMap<RcString, TranscriptDetailsOptionMap>, seen_terms: &mut HashMap<RcString, TermShortOptionMap>) { for feat_annotations in cv_annotations.values() { for feat_annotation in feat_annotations.iter() { self.add_term_to_hash(seen_terms, identifier, &feat_annotation.term); for annotation_detail_id in &feat_annotation.annotations { let annotation_detail = self.annotation_details. get(annotation_detail_id).expect("can't find OntAnnotationDetail"); for transcript_uniquename in &annotation_detail.transcript_uniquenames { self.add_transcript_to_hashes(seen_transcripts, seen_genes, identifier, transcript_uniquename); } self.add_ref_to_hash(seen_references, identifier, &annotation_detail.reference); for condition_termid in &annotation_detail.conditions { self.add_term_to_hash(seen_terms, identifier, condition_termid); } if let Some(ref gene_product_form_id) = annotation_detail.gene_product_form_id { if gene_product_form_id.starts_with("PR:") { self.add_term_to_hash(seen_terms, identifier, gene_product_form_id); } } for ext_part in &annotation_detail.extension { match ext_part.ext_range { ExtRange::Term(ref range_termid) | ExtRange::GeneProduct(ref range_termid) => self.add_term_to_hash(seen_terms, identifier, range_termid), ExtRange::Gene(ref gene_uniquename) | ExtRange::Promoter(ref gene_uniquename) => self.add_gene_to_hash(seen_genes, identifier, gene_uniquename), ExtRange::Transcript(ref transcript_uniquename) => self.add_transcript_to_hashes(seen_transcripts, seen_genes, identifier, transcript_uniquename), _ => {}, } } if let Some(ref genotype_uniquename) = annotation_detail.genotype { self.add_genotype_to_hash(seen_genotypes, seen_alleles, seen_genes, identifier, genotype_uniquename); } let with_from_iter = annotation_detail.withs .iter() .chain(annotation_detail.froms.iter()); for with_from_value in with_from_iter { match with_from_value { WithFromValue::Gene(ref gene_short) => { self.add_gene_to_hash(seen_genes, identifier, &gene_short.uniquename) }, &WithFromValue::Transcript(ref transcript_uniquename) => { self.add_transcript_to_hashes(seen_transcripts, seen_genes, identifier, transcript_uniquename); }, _ => (), } } } } } } fn set_term_details_maps(&mut self) { let (mut seen_references, mut seen_genes, mut seen_genotypes, mut seen_alleles, mut seen_transcripts, mut seen_terms) = get_maps(); let mut genes_annotated_with_map: HashMap<TermId, HashSet<GeneUniquename>> = HashMap::new(); for (termid, term_details) in &self.terms { for xref in &term_details.definition_xrefs { if xref.starts_with("PMID:") && self.references.contains_key(xref) { self.add_ref_to_hash(&mut seen_references, termid, &Some(xref.clone())); } } for (cv_name, term_annotations) in &term_details.cv_annotations { for term_annotation in term_annotations { self.add_term_to_hash(&mut seen_terms, termid, &term_annotation.term); for annotation_detail_id in &term_annotation.annotations { let annotation_detail = self.annotation_details .get(annotation_detail_id).expect("can't find OntAnnotationDetail"); for gene_uniquename in &annotation_detail.genes { self.add_gene_to_hash(&mut seen_genes, termid, gene_uniquename); if !cv_name.starts_with("extension:") { // prevent extension annotations from appearing // in the normal query builder searches genes_annotated_with_map .entry(termid.clone()).or_insert_with(HashSet::new) .insert(gene_uniquename.clone()); } } for transcript_uniquename in &annotation_detail.transcript_uniquenames { self.add_transcript_to_hashes(&mut seen_transcripts, &mut seen_genes, termid, transcript_uniquename); } self.add_ref_to_hash(&mut seen_references, termid, &annotation_detail.reference); for condition_termid in &annotation_detail.conditions { self.add_term_to_hash(&mut seen_terms, termid, condition_termid); } if let Some(ref gene_product_form_id) = annotation_detail.gene_product_form_id { if gene_product_form_id.starts_with("PR:") { self.add_term_to_hash(&mut seen_terms, termid, gene_product_form_id); } } for ext_part in &annotation_detail.extension { match ext_part.ext_range { ExtRange::Term(ref range_termid) | ExtRange::GeneProduct(ref range_termid) => self.add_term_to_hash(&mut seen_terms, termid, range_termid), ExtRange::Gene(ref gene_uniquename) | ExtRange::Promoter(ref gene_uniquename) => self.add_gene_to_hash(&mut seen_genes, termid, gene_uniquename), ExtRange::Transcript(ref transcript_uniquename) => self.add_transcript_to_hashes(&mut seen_transcripts, &mut seen_genes, termid, transcript_uniquename), _ => {}, } } if let Some(ref genotype_uniquename) = annotation_detail.genotype { self.add_genotype_to_hash(&mut seen_genotypes, &mut seen_alleles, &mut seen_genes, termid, genotype_uniquename); } let with_from_iter = annotation_detail.withs .iter() .chain(annotation_detail.froms.iter()); for with_from_value in with_from_iter { match with_from_value { WithFromValue::Gene(ref gene_short) => { self.add_gene_to_hash(&mut seen_genes, termid, &gene_short.uniquename) }, &WithFromValue::Transcript(ref transcript_uniquename) => { self.add_transcript_to_hashes(&mut seen_transcripts, &mut seen_genes, termid, transcript_uniquename); }, _ => (), } } } } } } for (termid, term_details) in &mut self.terms { if let Some(genes) = seen_genes.remove(termid) { term_details.genes_by_uniquename = genes; } if let Some(genotypes) = seen_genotypes.remove(termid) { term_details.genotypes_by_uniquename = genotypes; } if let Some(alleles) = seen_alleles.remove(termid) { term_details.alleles_by_uniquename = alleles; } if let Some(references) = seen_references.remove(termid) { term_details.references_by_uniquename = references; } if let Some(transcripts) = seen_transcripts.remove(termid) { term_details.transcripts_by_uniquename = transcripts; } if let Some(terms) = seen_terms.remove(termid) { term_details.terms_by_termid = terms; } if let Some(gene_uniquename_set) = genes_annotated_with_map.remove(termid) { term_details.genes_annotated_with = gene_uniquename_set; } } } fn set_gene_details_maps(&mut self) { let (mut seen_references, mut seen_genes, mut seen_genotypes, mut seen_alleles, mut seen_transcripts, mut seen_terms) = get_maps(); { for (gene_uniquename, gene_details) in &self.genes { self.add_cv_annotations_to_maps(gene_uniquename, &gene_details.cv_annotations, &mut seen_references, &mut seen_genes, &mut seen_genotypes, &mut seen_alleles, &mut seen_transcripts, &mut seen_terms); for transcript_uniquename in &gene_details.transcripts { self.add_transcript_to_hashes(&mut seen_transcripts, &mut seen_genes, gene_uniquename, transcript_uniquename); } let interaction_iter = gene_details.physical_interactions.iter().chain(&gene_details.genetic_interactions); for interaction in interaction_iter { self.add_ref_to_hash(&mut seen_references, gene_uniquename, &interaction.reference_uniquename); self.add_gene_to_hash(&mut seen_genes, gene_uniquename, &interaction.gene_uniquename); self.add_gene_to_hash(&mut seen_genes, gene_uniquename, &interaction.interactor_uniquename); } for ortholog_annotation in &gene_details.ortholog_annotations { self.add_ref_to_hash(&mut seen_references, gene_uniquename, &ortholog_annotation.reference_uniquename); self.add_gene_to_hash(&mut seen_genes, gene_uniquename, &ortholog_annotation.gene_uniquename); self.add_gene_to_hash(&mut seen_genes, gene_uniquename, &ortholog_annotation.ortholog_uniquename); } for paralog_annotation in &gene_details.paralog_annotations { self.add_ref_to_hash(&mut seen_references, gene_uniquename, &paralog_annotation.reference_uniquename); self.add_gene_to_hash(&mut seen_genes, gene_uniquename, &paralog_annotation.gene_uniquename); self.add_gene_to_hash(&mut seen_genes, gene_uniquename, &paralog_annotation.paralog_uniquename); } for target_of_annotation in &gene_details.target_of_annotations { let target_of_gene = &target_of_annotation.gene; self.add_gene_to_hash(&mut seen_genes, gene_uniquename, target_of_gene); if let Some(ref annotation_genotype_uniquename) = target_of_annotation.genotype_uniquename { self.add_genotype_to_hash(&mut seen_genotypes, &mut seen_alleles, &mut seen_genes, gene_uniquename, annotation_genotype_uniquename) } self.add_ref_to_hash(&mut seen_references, gene_uniquename, &target_of_annotation.reference_uniquename); } for publication in &gene_details.feature_publications { self.add_ref_to_hash(&mut seen_references, gene_uniquename, &Some(publication.clone())); } } } for (gene_uniquename, gene_details) in &mut self.genes { if let Some(references) = seen_references.remove(gene_uniquename) { gene_details.references_by_uniquename = references; } if let Some(alleles) = seen_alleles.remove(gene_uniquename) { gene_details.alleles_by_uniquename = alleles; } if let Some(genes) = seen_genes.remove(gene_uniquename) { gene_details.genes_by_uniquename = genes; } if let Some(genotypes) = seen_genotypes.remove(gene_uniquename) { gene_details.genotypes_by_uniquename = genotypes; } if let Some(transcripts) = seen_transcripts.remove(gene_uniquename) { gene_details.transcripts_by_uniquename = transcripts; } if let Some(terms) = seen_terms.remove(gene_uniquename) { gene_details.terms_by_termid = terms; } } } fn set_genotype_details_maps(&mut self) { let (mut seen_references, mut seen_genes, mut seen_genotypes, mut seen_alleles, mut seen_transcripts, mut seen_terms) = get_maps(); for (genotype_uniquename, genotype_details) in &self.genotypes { self.add_cv_annotations_to_maps(genotype_uniquename, &genotype_details.cv_annotations, &mut seen_references, &mut seen_genes, &mut seen_genotypes, &mut seen_alleles, &mut seen_transcripts, &mut seen_terms); } for (genotype_uniquename, genotype_details) in &mut self.genotypes { if let Some(references) = seen_references.remove(genotype_uniquename) { genotype_details.references_by_uniquename = references; } if let Some(alleles) = seen_alleles.remove(genotype_uniquename) { genotype_details.alleles_by_uniquename = alleles; } if let Some(genotypes) = seen_genes.remove(genotype_uniquename) { genotype_details.genes_by_uniquename = genotypes; } if let Some(transcripts) = seen_transcripts.remove(genotype_uniquename) { genotype_details.transcripts_by_uniquename = transcripts; } if let Some(terms) = seen_terms.remove(genotype_uniquename) { genotype_details.terms_by_termid = terms; } } } fn set_reference_details_maps(&mut self) { // for calculating the gene_count field, we don't incude non-pombe genes let mut gene_count_hash: HashMap<RcString, GeneShortOptionMap> = HashMap::new(); let mut maybe_add_to_gene_count_hash = |reference_uniquename: &RcString, gene_uniquename: &GeneUniquename| { if let Some(load_org_taxonid) = self.config.load_organism_taxonid { if let Some(gene_details) = self.genes.get(gene_uniquename) { if gene_details.taxonid == load_org_taxonid { self.add_gene_to_hash(&mut gene_count_hash, reference_uniquename, gene_uniquename); } } } }; let (_, mut seen_genes, mut seen_genotypes, mut seen_alleles, mut seen_transcripts, mut seen_terms) = get_maps(); { for (reference_uniquename, reference_details) in &self.references { for feat_annotations in reference_details.cv_annotations.values() { for feat_annotation in feat_annotations.iter() { self.add_term_to_hash(&mut seen_terms, reference_uniquename, &feat_annotation.term); for annotation_detail_id in &feat_annotation.annotations { let annotation_detail = self.annotation_details .get(annotation_detail_id).expect("can't find OntAnnotationDetail"); for transcript_uniquename in &annotation_detail.transcript_uniquenames { self.add_transcript_to_hashes(&mut seen_transcripts, &mut seen_genes, reference_uniquename, transcript_uniquename); } for gene_uniquename in &annotation_detail.genes { self.add_gene_to_hash(&mut seen_genes, reference_uniquename, gene_uniquename); maybe_add_to_gene_count_hash(reference_uniquename, gene_uniquename); } for condition_termid in &annotation_detail.conditions { self.add_term_to_hash(&mut seen_terms, reference_uniquename, condition_termid); } if let Some(ref gene_product_form_id) = annotation_detail.gene_product_form_id { if gene_product_form_id.starts_with("PR:") { self.add_term_to_hash(&mut seen_terms, reference_uniquename, gene_product_form_id); } } for ext_part in &annotation_detail.extension { match ext_part.ext_range { ExtRange::Term(ref range_termid) | ExtRange::GeneProduct(ref range_termid) => self.add_term_to_hash(&mut seen_terms, reference_uniquename, range_termid), ExtRange::Gene(ref gene_uniquename) | ExtRange::Promoter(ref gene_uniquename) => { self.add_gene_to_hash(&mut seen_genes, reference_uniquename, gene_uniquename); maybe_add_to_gene_count_hash(reference_uniquename, gene_uniquename); }, ExtRange::Transcript(ref transcript_uniquename) => self.add_transcript_to_hashes(&mut seen_transcripts, &mut seen_genes, reference_uniquename, transcript_uniquename), _ => {}, } } let with_from_iter = annotation_detail.withs .iter() .chain(annotation_detail.froms.iter()); for with_from_value in with_from_iter { match with_from_value { WithFromValue::Gene(ref gene_short) => { self.add_gene_to_hash(&mut seen_genes, reference_uniquename, &gene_short.uniquename); maybe_add_to_gene_count_hash(reference_uniquename, &gene_short.uniquename); }, WithFromValue::Transcript(ref transcript_uniquename) => { self.add_transcript_to_hashes(&mut seen_transcripts, &mut seen_genes, reference_uniquename, transcript_uniquename); }, _ => (), } } if let Some(ref genotype_uniquename) = annotation_detail.genotype { let genotype = self.make_genotype_short(genotype_uniquename); self.add_genotype_to_hash(&mut seen_genotypes, &mut seen_alleles, &mut seen_genes, reference_uniquename, &genotype.display_uniquename); } } } } let interaction_iter = reference_details.physical_interactions.iter() .chain(&reference_details.genetic_interactions); for interaction in interaction_iter { self.add_gene_to_hash(&mut seen_genes, reference_uniquename, &interaction.gene_uniquename); maybe_add_to_gene_count_hash(reference_uniquename, &interaction.gene_uniquename); self.add_gene_to_hash(&mut seen_genes, reference_uniquename, &interaction.interactor_uniquename); maybe_add_to_gene_count_hash(reference_uniquename, &interaction.interactor_uniquename); } for ortholog_annotation in &reference_details.ortholog_annotations { self.add_gene_to_hash(&mut seen_genes, reference_uniquename, &ortholog_annotation.gene_uniquename); maybe_add_to_gene_count_hash(reference_uniquename, &ortholog_annotation.gene_uniquename); self.add_gene_to_hash(&mut seen_genes, reference_uniquename, &ortholog_annotation.ortholog_uniquename); maybe_add_to_gene_count_hash(reference_uniquename, &ortholog_annotation.ortholog_uniquename); } for paralog_annotation in &reference_details.paralog_annotations { self.add_gene_to_hash(&mut seen_genes, reference_uniquename, &paralog_annotation.gene_uniquename); maybe_add_to_gene_count_hash(reference_uniquename, &paralog_annotation.gene_uniquename); self.add_gene_to_hash(&mut seen_genes, reference_uniquename, &paralog_annotation.paralog_uniquename); maybe_add_to_gene_count_hash(reference_uniquename, &paralog_annotation.paralog_uniquename); } } } for (reference_uniquename, reference_details) in &mut self.references { if let Some(genes) = seen_genes.remove(reference_uniquename) { reference_details.genes_by_uniquename = genes; } if let Some(genotypes) = seen_genotypes.remove(reference_uniquename) { reference_details.genotypes_by_uniquename = genotypes; } if let Some(alleles) = seen_alleles.remove(reference_uniquename) { reference_details.alleles_by_uniquename = alleles; } if let Some(terms) = seen_terms.remove(reference_uniquename) { reference_details.terms_by_termid = terms; } if let Some(transcripts) = seen_transcripts.remove(reference_uniquename) { reference_details.transcripts_by_uniquename = transcripts; } if let Some(gene_count_genes) = gene_count_hash.remove(reference_uniquename) { reference_details.gene_count = gene_count_genes.len(); } } } pub fn set_counts(&mut self) { let mut term_seen_genes: HashMap<TermId, HashSet<GeneUniquename>> = HashMap::new(); let mut term_seen_genotypes: HashMap<TermId, HashSet<GenotypeUniquename>> = HashMap::new(); let mut term_seen_single_locus_genotypes: HashMap<TermId, HashSet<GenotypeUniquename>> = HashMap::new(); let mut ref_seen_genes: HashMap<ReferenceUniquename, HashSet<GeneUniquename>> = HashMap::new(); for (termid, term_details) in &self.terms { let mut seen_genes: HashSet<GeneUniquename> = HashSet::new(); let mut seen_genotypes: HashSet<GenotypeUniquename> = HashSet::new(); let mut seen_single_locus_genotypes: HashSet<GenotypeUniquename> = HashSet::new(); for term_annotations in term_details.cv_annotations.values() { for term_annotation in term_annotations { for annotation_detail_id in &term_annotation.annotations { let annotation_detail = self.annotation_details .get(annotation_detail_id).expect("can't find OntAnnotationDetail"); for gene_uniquename in &annotation_detail.genes { seen_genes.insert(gene_uniquename.clone()); } if let Some(ref genotype_uniquename) = annotation_detail.genotype { seen_genotypes.insert(genotype_uniquename.clone()); let genotype = &self.genotypes[genotype_uniquename]; if genotype.loci.len() == 1 && genotype.loci[0].expressed_alleles.len() == 1 { seen_single_locus_genotypes.insert(genotype_uniquename.clone()); } } } } } term_seen_genes.insert(termid.clone(), seen_genes); term_seen_genotypes.insert(termid.clone(), seen_genotypes); term_seen_single_locus_genotypes.insert(termid.clone(), seen_single_locus_genotypes); } let mut all_published_uniquenames = vec![]; for (reference_uniquename, reference_details) in &self.references { let mut seen_genes: HashSet<GeneUniquename> = HashSet::new(); for rel_annotations in reference_details.cv_annotations.values() { for rel_annotation in rel_annotations { for annotation_detail_id in &rel_annotation.annotations { let annotation_detail = self.annotation_details .get(annotation_detail_id).expect("can't find OntAnnotationDetail"); if !rel_annotation.is_not { for gene_uniquename in &annotation_detail.genes { seen_genes.insert(gene_uniquename.clone()); } } } } } let interaction_iter = reference_details.physical_interactions.iter().chain(&reference_details.genetic_interactions); for interaction in interaction_iter { seen_genes.insert(interaction.gene_uniquename.clone()); seen_genes.insert(interaction.interactor_uniquename.clone()); } for ortholog_annotation in &reference_details.ortholog_annotations { seen_genes.insert(ortholog_annotation.gene_uniquename.clone()); } ref_seen_genes.insert(reference_uniquename.clone(), seen_genes); if reference_details.pubmed_publication_date.is_some() { all_published_uniquenames.push(reference_uniquename.clone()); } } let (recent_admin_curated, recent_community_curated, all_community_curated, all_admin_curated) = make_canto_curated(&self.references, &all_published_uniquenames); let recent_references = RecentReferences { pubmed: make_recently_added(&self.references, &all_published_uniquenames), admin_curated: recent_admin_curated, community_curated: recent_community_curated, }; self.recent_references = recent_references; self.all_community_curated = all_community_curated; self.all_admin_curated = all_admin_curated; for term_details in self.terms.values_mut() { term_details.single_locus_genotype_uniquenames = term_seen_single_locus_genotypes.remove(&term_details.termid).unwrap(); term_details.gene_count = term_seen_genes[&term_details.termid].len(); term_details.genotype_count = term_seen_genotypes[&term_details.termid].len(); } } // make gene subsets for genes the are not in a slim category fn make_non_slim_subset(&self, cv_name: &str, slim_subset: &TermSubsetDetails) -> IdGeneSubsetMap { let slim_termid_set: HashSet<RcString> = slim_subset.elements.keys().cloned().collect(); let mut non_slim_with_bp_annotation = HashSet::new(); let mut non_slim_without_bp_annotation = HashSet::new(); let has_parent_in_slim = |term_annotations: &Vec<OntTermAnnotations>| { for term_annotation in term_annotations { let interesting_parent_ids = &self.terms[&term_annotation.term].interesting_parent_ids; if !term_annotation.is_not && (slim_termid_set.contains(&term_annotation.term) || interesting_parent_ids.intersection(&slim_termid_set).count() > 0) { return true; } } false }; for gene_details in self.genes.values() { if let Some(load_organism_taxonid) = self.config.load_organism_taxonid { if load_organism_taxonid != gene_details.taxonid { continue; } } if gene_details.feature_type != "mRNA gene" { continue; } if gene_details.characterisation_status == Some(RcString::from("transposon")) || gene_details.characterisation_status == Some(RcString::from("dubious")) { continue; } let mut bp_count = 0; if let Some(annotations) = gene_details.cv_annotations.get(cv_name) { if has_parent_in_slim(annotations) { continue } bp_count = annotations.len(); } if bp_count == 0 { non_slim_without_bp_annotation.insert(gene_details.uniquename.clone()); } else { non_slim_with_bp_annotation.insert(gene_details.uniquename.clone()); } } let mut return_map = HashMap::new(); let cv_display_name = str::replace(cv_name, "_", " "); let with_annotation_display_name = String::from("Gene products with ") + &cv_display_name + " annotation that are not in a slim category"; let name = RcString::from(&format!("non_slim_with_{}_annotation", cv_name)); return_map.insert(name.clone(), GeneSubsetDetails { name, display_name: RcString::from(&with_annotation_display_name), elements: non_slim_with_bp_annotation, }); let without_annotation_display_name = String::from("Gene products with no ") + &cv_display_name + " annotation and are not in a slim category"; let name = RcString::from(&format!("non_slim_without_{}_annotation", cv_name)); return_map.insert(name.clone(), GeneSubsetDetails { name, display_name: RcString::from(&without_annotation_display_name), elements: non_slim_without_bp_annotation, }); return_map } fn make_slim_subset(&self, slim_name: &str) -> TermSubsetDetails { let mut all_genes = HashSet::new(); let mut slim_subset: HashMap<TermId, TermSubsetElement> = HashMap::new(); let slim_config = self.config.slims.get(slim_name) .unwrap_or_else(|| panic!("no slim config for {}", slim_name)); for slim_conf in &slim_config.terms { let slim_termid = &slim_conf.termid; let term_details = self.terms.get(slim_termid) .unwrap_or_else(|| panic!("can't find TermDetails for {}", slim_termid)); let subset_element = TermSubsetElement { name: term_details.name.clone(), gene_count: term_details.genes_annotated_with.len(), }; for gene in &term_details.genes_annotated_with { all_genes.insert(gene); } slim_subset.insert(slim_termid.clone(), subset_element); } TermSubsetDetails { name: RcString::from(slim_name), total_gene_count: all_genes.len(), elements: slim_subset, } } fn make_feature_type_subsets(&self, subsets: &mut IdGeneSubsetMap) { for gene_details in self.genes.values() { if let Some(load_organism_taxonid) = self.config.load_organism_taxonid { if load_organism_taxonid != gene_details.taxonid { continue; } } let subset_name = RcString::from("feature_type:") + &gene_details.feature_type; let re = Regex::new(r"[\s,:]+").unwrap(); let subset_name_no_spaces = RcString::from(re.replace_all(&subset_name, "_").as_ref()); subsets.entry(subset_name_no_spaces.clone()) .or_insert(GeneSubsetDetails { name: subset_name_no_spaces, display_name: RcString::from(&subset_name), elements: HashSet::new() }) .elements.insert(gene_details.uniquename.clone()); } } // make subsets using the characterisation_status field of GeneDetails fn make_characterisation_status_subsets(&self, subsets: &mut IdGeneSubsetMap) { for gene_details in self.genes.values() { if let Some(load_organism_taxonid) = self.config.load_organism_taxonid { if load_organism_taxonid != gene_details.taxonid { continue; } } if gene_details.feature_type != "mRNA gene" { continue; } if let Some(ref characterisation_status) = gene_details.characterisation_status { let subset_name = RcString::from("characterisation_status:") + characterisation_status; let re = Regex::new(r"[\s,:]+").unwrap(); let subset_name_no_spaces = RcString::from(re.replace_all(&subset_name, "_").as_ref()); subsets.entry(subset_name_no_spaces.clone()) .or_insert(GeneSubsetDetails { name: subset_name_no_spaces, display_name: RcString::from(&subset_name), elements: HashSet::new() }) .elements.insert(gene_details.uniquename.clone()); } } } // make InterPro subsets using the interpro_matches field of GeneDetails fn make_interpro_subsets(&mut self, subsets: &mut IdGeneSubsetMap) { for (gene_uniquename, gene_details) in &self.genes { if self.config.load_organism_taxonid.is_none() || self.config.load_organism_taxonid.unwrap() != gene_details.taxonid { continue; } for interpro_match in &gene_details.interpro_matches { let mut new_subset_names = vec![]; if !interpro_match.interpro_id.is_empty() { let subset_name = String::from("interpro:") + &interpro_match.interpro_id; new_subset_names.push((RcString::from(&subset_name), interpro_match.interpro_name.clone())); } let subset_name = String::from("interpro:") + &interpro_match.dbname.clone() + ":" + &interpro_match.id; new_subset_names.push((RcString::from(&subset_name), interpro_match.name.clone())); for (subset_name, display_name) in new_subset_names { subsets.entry(subset_name.clone()) .or_insert(GeneSubsetDetails { name: subset_name, display_name, elements: HashSet::new(), }) .elements.insert(gene_uniquename.clone()); } } } } // populated the subsets HashMap fn make_subsets(&mut self) { let mut gene_subsets: IdGeneSubsetMap = HashMap::new(); for (slim_name, slim_config) in &self.config.slims { let slim_subset = self.make_slim_subset(slim_name); let gene_subset = self.make_non_slim_subset(&slim_config.cv_name, &slim_subset); gene_subsets.extend(gene_subset); self.term_subsets.insert(slim_name.clone(), slim_subset); } self.make_feature_type_subsets(&mut gene_subsets); self.make_characterisation_status_subsets(&mut gene_subsets); self.make_interpro_subsets(&mut gene_subsets); self.gene_subsets = gene_subsets; } // sort the list of genes in the ChromosomeDetails by start_pos pub fn sort_chromosome_genes(&mut self) { let mut genes_to_sort: HashMap<ChromosomeName, Vec<GeneUniquename>> = HashMap::new(); { let sorter = |uniquename1: &GeneUniquename, uniquename2: &GeneUniquename| { let gene1 = &self.genes[uniquename1]; let gene2 = &self.genes[uniquename2]; if let Some(ref gene1_loc) = gene1.location { if let Some(ref gene2_loc) = gene2.location { let cmp = gene1_loc.start_pos.cmp(&gene2_loc.start_pos); if cmp != Ordering::Equal { return cmp; } } } if gene1.name.is_some() { if gene2.name.is_some() { gene1.name.cmp(&gene2.name) } else { Ordering::Less } } else { if gene2.name.is_some() { Ordering::Greater } else { gene1.uniquename.cmp(&gene2.uniquename) } } }; for (chr_uniquename, chr_details) in &self.chromosomes { genes_to_sort.insert(chr_uniquename.clone(), chr_details.gene_uniquenames.clone()); } for gene_uniquenames in genes_to_sort.values_mut() { gene_uniquenames.sort_by(&sorter); } } for (chr_uniquename, gene_uniquenames) in genes_to_sort { self.chromosomes.get_mut(&chr_uniquename).unwrap().gene_uniquenames = gene_uniquenames; } } fn get_dataset_name_for_measurement(&self, reference_uniquename: &str, level_type_termid: &str, during_termid: &str, scale: &str) -> Option<RcString> { for conf in &self.config.gene_expression.datasets { if conf.pubmed_id == reference_uniquename && conf.level_type_termid == level_type_termid && conf.during_termid == during_termid && conf.scale == scale { return Some(conf.name.clone()); } } None } fn set_gene_expression_measurements(&mut self) { let mut measurements = HashMap::new(); for annotation in &self.ont_annotations { if &annotation.term_short.cv_name != "gene_ex" { continue; } let gene_uniquename = if let Some(gene_short) = annotation.genes.iter().next() { gene_short.uniquename.clone() } else { continue; }; let level_type_termid = annotation.term_short.termid.clone(); let reference_uniquename = if let Some(ref_short) = &annotation.reference_short { ref_short.uniquename.clone() } else { continue; }; let mut during_ext = None; for extpart in &annotation.extension { if extpart.rel_type_name == "during" { during_ext = Some(&extpart.ext_range); } } let during_termid = if let Some(ExtRange::Term(termid)) = during_ext { termid.clone() } else { continue; }; let gene_ex_props = if let Some(ref props) = annotation.gene_ex_props { props } else { continue; }; let scale = gene_ex_props.scale.clone(); let copies_per_cell = gene_ex_props.copies_per_cell.as_ref().cloned(); let avg_copies_per_cell = gene_ex_props.avg_copies_per_cell.as_ref().cloned(); if let Some(dataset_name) = self.get_dataset_name_for_measurement(&reference_uniquename, &level_type_termid, &during_termid, &scale) { measurements .entry(gene_uniquename) .or_insert_with(HashMap::new) .insert(dataset_name, GeneExMeasurement { reference_uniquename, level_type_termid, during_termid, copies_per_cell, avg_copies_per_cell, scale }); } } self.gene_expression_measurements = measurements; } fn set_chromosome_gene_counts(&mut self) { let mut counts = HashMap::new(); let mut coding_counts = HashMap::new(); for gene_details in self.genes.values() { if let Some(ref loc) = gene_details.location { *counts .entry(&loc.chromosome_name) .or_insert(0) += 1; } if gene_details.feature_type == "mRNA gene" { if let Some(ref loc) = gene_details.location { *coding_counts .entry(&loc.chromosome_name) .or_insert(0) += 1; } } } for chromosome_detail in self.chromosomes.values_mut() { if let Some(count) = counts.get(&chromosome_detail.name) { chromosome_detail.gene_count = *count; } if let Some(count) = coding_counts.get(&chromosome_detail.name) { chromosome_detail.coding_gene_count = *count; } } } // remove some of the refs that have no annotations. // See: https://github.com/pombase/website/issues/628 fn remove_non_curatable_refs(&mut self) { let filtered_refs = self.references.drain() .filter(|&(_, ref reference_details)| { if reference_has_annotation(reference_details) { return true; } if let Some(ref canto_triage_status) = reference_details.canto_triage_status { if canto_triage_status == "New" { return false; } } else { if reference_details.uniquename.starts_with("PMID:") { println!("reference {} has no canto_triage_status", reference_details.uniquename); } } if let Some (ref triage_status) = reference_details.canto_triage_status { return triage_status != "Wrong organism" && triage_status != "Loaded in error"; } // default to true because there are references that // haven't or shouldn't be triaged, eg. GO_REF:... true }) .collect(); self.references = filtered_refs; } fn make_solr_term_summaries(&mut self) -> Vec<SolrTermSummary> { let mut return_summaries = vec![]; let term_name_split_re = Regex::new(r"\W+").unwrap(); for (termid, term_details) in &self.terms { if term_details.is_obsolete { continue; } let trimmable_p = |c: char| { c.is_whitespace() || c == ',' || c == ':' || c == ';' || c == '.' || c == '\'' }; let term_name_words = term_name_split_re.split(&term_details.name) .map(|s: &str| { s.trim_matches(&trimmable_p).to_owned() }).collect::<Vec<String>>(); let mut close_synonyms = vec![]; let mut close_synonym_words_vec: Vec<RcString> = vec![]; let mut distant_synonyms = vec![]; let mut distant_synonym_words_vec: Vec<RcString> = vec![]; let add_to_words_vec = |synonym: &str, words_vec: &mut Vec<RcString>| { let synonym_words = term_name_split_re.split(synonym); for word in synonym_words { let word_string = RcString::from(word.trim_matches(&trimmable_p)); if !words_vec.contains(&word_string) && !term_name_words.contains(&word_string) { words_vec.push(word_string); } } }; for synonym in &term_details.synonyms { if synonym.synonym_type == "exact" || synonym.synonym_type == "narrow" { add_to_words_vec(&synonym.name, &mut close_synonym_words_vec); close_synonyms.push(synonym.name.clone()); } else { add_to_words_vec(&synonym.name, &mut distant_synonym_words_vec); distant_synonyms.push(synonym.name.clone()); } } distant_synonyms = distant_synonyms.into_iter() .filter(|synonym| { !close_synonyms.contains(synonym) }) .collect::<Vec<_>>(); let annotation_count = term_details.annotation_count(); let interesting_parent_ids_for_solr = term_details.interesting_parent_ids.clone(); let term_summ = SolrTermSummary { id: termid.clone(), cv_name: term_details.cv_name.clone(), name: term_details.name.clone(), definition: term_details.definition.clone(), close_synonyms, close_synonym_words: RcString::from(&close_synonym_words_vec.join(" ")), distant_synonyms, distant_synonym_words: RcString::from(&distant_synonym_words_vec.join(" ")), interesting_parent_ids: interesting_parent_ids_for_solr, secondary_identifiers: term_details.secondary_identifiers.clone(), annotation_count, gene_count: term_details.gene_count, genotype_count: term_details.genotype_count, highlighting: HashMap::new(), }; return_summaries.push(term_summ); } return_summaries } fn make_solr_reference_summaries(&mut self) -> Vec<SolrReferenceSummary> { let mut return_summaries = vec![]; for reference_details in self.references.values() { return_summaries.push(SolrReferenceSummary::from_reference_details(reference_details)); } return_summaries } fn get_stats(&self) -> Stats { let mut by_taxon = HashMap::new(); for gene_details in self.genes.values() { let taxonid = gene_details.taxonid; by_taxon .entry(taxonid) .or_insert_with(StatCountsByTaxon::empty) .genes += 1; let mut annotation_count = 0; for term_annotations in gene_details.cv_annotations.values() { for term_annotation in term_annotations { annotation_count += term_annotation.annotations.len(); } } by_taxon .entry(taxonid) .or_insert_with(StatCountsByTaxon::empty) .annotations += annotation_count; } Stats { by_taxon, community_pubs_count: self.all_community_curated.len(), non_community_pubs_count: self.all_admin_curated.len(), } } pub fn get_web_data(mut self) -> WebData { self.process_dbxrefs(); self.process_references(); self.process_chromosome_features(); self.make_feature_rel_maps(); self.process_features(); self.add_gene_neighbourhoods(); self.process_props_from_feature_cvterms(); self.process_allele_features(); self.process_genotype_features(); self.process_cvterms(); self.add_interesting_parents(); self.process_cvterm_rels(); self.process_extension_cvterms(); self.process_feature_synonyms(); self.process_feature_publications(); self.process_feature_cvterms(); self.remove_duplicate_transcript_annotation(); self.store_ont_annotations(false); self.store_ont_annotations(true); self.process_cvtermpath(); self.process_annotation_feature_rels(); self.add_target_of_annotations(); self.set_deletion_viability(); self.set_term_details_subsets(); self.set_taxonomic_distributions(); self.remove_non_curatable_refs(); self.set_term_details_maps(); self.set_gene_details_maps(); self.set_gene_details_subset_termids(); self.set_genotype_details_maps(); self.set_reference_details_maps(); self.set_chromosome_gene_counts(); self.set_counts(); self.make_subsets(); self.sort_chromosome_genes(); self.set_gene_expression_measurements(); let stats = self.get_stats(); let metadata = self.make_metadata(); let mut gene_summaries: Vec<GeneSummary> = vec![]; let mut solr_gene_summaries: Vec<SolrGeneSummary> = vec![]; for (gene_uniquename, gene_details) in &self.genes { if self.config.load_organism_taxonid.is_none() || self.config.load_organism_taxonid.unwrap() == gene_details.taxonid { let gene_summary = self.make_gene_summary(gene_uniquename); let solr_gene_summary = SolrGeneSummary { id: gene_summary.uniquename.clone(), name: gene_summary.name.clone(), taxonid: gene_summary.taxonid, product: gene_summary.product.clone(), uniprot_identifier: gene_summary.uniprot_identifier.clone(), synonyms: gene_summary.synonyms.clone(), feature_type: gene_summary.feature_type.clone(), }; gene_summaries.push(gene_summary); solr_gene_summaries.push(solr_gene_summary); } } let solr_term_summaries = self.make_solr_term_summaries(); let solr_reference_summaries = self.make_solr_reference_summaries(); let solr_data = SolrData { term_summaries: solr_term_summaries, gene_summaries: solr_gene_summaries, reference_summaries: solr_reference_summaries, }; let chromosomes = self.chromosomes.clone(); let mut chromosome_summaries = vec![]; for chr_details in self.chromosomes.values() { chromosome_summaries.push(chr_details.make_chromosome_short()); } let recent_references = self.recent_references.clone(); let all_community_curated = self.all_community_curated.clone(); let all_admin_curated = self.all_admin_curated.clone(); let ont_annotations = self.ont_annotations.clone(); WebData { metadata, chromosomes, chromosome_summaries, recent_references, all_community_curated, all_admin_curated, api_maps: self.make_api_maps(), search_gene_summaries: gene_summaries, solr_data, ont_annotations, stats, } } } Simplify code for removing un-annotated references Refs pombase/curation#3156 use std::rc::Rc; use std::collections::BTreeMap; use std::borrow::Borrow; use std::cmp::Ordering; use std::usize; use regex::Regex; use std::collections::{HashMap, HashSet}; use crate::db::*; use crate::types::*; use crate::data_types::*; use crate::web::data::*; use crate::web::config::*; use crate::web::util::cmp_str_dates; use crate::bio::util::rev_comp; use pombase_rc_string::RcString; use crate::interpro::UniprotResult; use crate::pfam::PfamProteinDetails; fn make_organism(rc_organism: &Rc<Organism>) -> ConfigOrganism { let mut maybe_taxonid: Option<u32> = None; for prop in rc_organism.organismprops.borrow().iter() { if prop.prop_type.name == "taxon_id" { maybe_taxonid = Some(prop.value.parse().unwrap()); } } ConfigOrganism { taxonid: maybe_taxonid.unwrap(), genus: rc_organism.genus.clone(), species: rc_organism.species.clone(), alternative_names: vec![], assembly_version: None, } } type TermShortOptionMap = HashMap<TermId, Option<TermShort>>; type UniprotIdentifier = RcString; pub struct WebDataBuild<'a> { raw: &'a Raw, domain_data: &'a HashMap<UniprotIdentifier, UniprotResult>, pfam_data: &'a Option<HashMap<UniprotIdentifier, PfamProteinDetails>>, rnacentral_data: &'a Option<RNAcentralAnnotations>, config: &'a Config, genes: UniquenameGeneMap, genotypes: UniquenameGenotypeMap, genotype_backgrounds: HashMap<GenotypeUniquename, RcString>, alleles: UniquenameAlleleMap, transcripts: UniquenameTranscriptMap, other_features: UniquenameFeatureShortMap, terms: TermIdDetailsMap, chromosomes: ChrNameDetailsMap, references: UniquenameReferenceMap, all_ont_annotations: HashMap<TermId, Vec<OntAnnotationId>>, all_not_ont_annotations: HashMap<TermId, Vec<OntAnnotationId>>, // map from term name to term ID (ie "nucleus" -> "GO:0005634") term_ids_by_name: HashMap<RcString, TermId>, genes_of_transcripts: HashMap<RcString, RcString>, transcripts_of_polypeptides: HashMap<RcString, RcString>, parts_of_transcripts: HashMap<RcString, Vec<FeatureShort>>, genes_of_alleles: HashMap<RcString, RcString>, loci_of_genotypes: HashMap<RcString, HashMap<String, GenotypeLocus>>, // a map from IDs of terms from the "PomBase annotation extension terms" cv // to a Vec of the details of each of the extension parts_of_extensions: HashMap<TermId, Vec<ExtPart>>, base_term_of_extensions: HashMap<TermId, TermId>, // a set of child terms for each term from the cvtermpath table children_by_termid: HashMap<TermId, HashSet<TermId>>, dbxrefs_of_features: HashMap<RcString, HashSet<RcString>>, possible_interesting_parents: HashSet<InterestingParent>, recent_references: RecentReferences, all_community_curated: Vec<ReferenceShort>, all_admin_curated: Vec<ReferenceShort>, gene_expression_measurements: GeneExDataSetMeasurements, term_subsets: IdTermSubsetMap, gene_subsets: IdGeneSubsetMap, annotation_details: IdOntAnnotationDetailMap, ont_annotations: Vec<OntAnnotation>, } fn get_maps() -> (HashMap<RcString, ReferenceShortOptionMap>, HashMap<RcString, GeneShortOptionMap>, HashMap<RcString, GenotypeShortMap>, HashMap<RcString, AlleleShortMap>, HashMap<RcString, TranscriptDetailsOptionMap>, HashMap<GeneUniquename, TermShortOptionMap>) { (HashMap::new(), HashMap::new(), HashMap::new(), HashMap::new(), HashMap::new(), HashMap::new()) } fn get_feat_rel_expression(feature: &Feature, feature_relationship: &FeatureRelationship) -> Option<RcString> { for feature_prop in feature.featureprops.borrow().iter() { if feature_prop.prop_type.name == "allele_type" { if let Some(ref value) = feature_prop.value { if value == "deletion" { return Some("Null".into()); } } } } for rel_prop in feature_relationship.feature_relationshipprops.borrow().iter() { if rel_prop.prop_type.name == "expression" { return rel_prop.value.clone(); } } None } fn get_feat_rel_prop_value(prop_name: &str, feature_relationship: &FeatureRelationship) -> Option<RcString> { for rel_prop in feature_relationship.feature_relationshipprops.borrow().iter() { if rel_prop.prop_type.name == prop_name { return rel_prop.value.clone(); } } None } fn reference_has_annotation(reference_details: &ReferenceDetails) -> bool { !reference_details.cv_annotations.is_empty() || !reference_details.physical_interactions.is_empty() || !reference_details.genetic_interactions.is_empty() || !reference_details.ortholog_annotations.is_empty() || !reference_details.paralog_annotations.is_empty() } fn is_gene_type(feature_type_name: &str) -> bool { feature_type_name == "gene" || feature_type_name == "pseudogene" } pub fn compare_ext_part_with_config(extension_relation_order: &RelationOrder, ep1: &ExtPart, ep2: &ExtPart) -> Ordering { let rel_order_conf = extension_relation_order; let order_conf = &rel_order_conf.relation_order; let always_last_conf = &rel_order_conf.always_last; let maybe_ep1_index = order_conf.iter().position(|r| *r == ep1.rel_type_name); let maybe_ep2_index = order_conf.iter().position(|r| *r == ep2.rel_type_name); if let Some(ep1_index) = maybe_ep1_index { if let Some(ep2_index) = maybe_ep2_index { ep1_index.cmp(&ep2_index) } else { Ordering::Less } } else { if maybe_ep2_index.is_some() { Ordering::Greater } else { let maybe_ep1_last_index = always_last_conf.iter().position(|r| *r == ep1.rel_type_name); let maybe_ep2_last_index = always_last_conf.iter().position(|r| *r == ep2.rel_type_name); if let Some(ep1_last_index) = maybe_ep1_last_index { if let Some(ep2_last_index) = maybe_ep2_last_index { ep1_last_index.cmp(&ep2_last_index) } else { Ordering::Greater } } else { if maybe_ep2_last_index.is_some() { Ordering::Less } else { let name_cmp = ep1.rel_type_name.cmp(&ep2.rel_type_name); if name_cmp == Ordering::Equal { if ep1.ext_range.is_gene() && !ep2.ext_range.is_gene() { Ordering::Less } else { if !ep1.ext_range.is_gene() && ep2.ext_range.is_gene() { Ordering::Greater } else { Ordering::Equal } } } else { name_cmp } } } } } } lazy_static! { static ref BAD_GENOTYPE_NAME_CHARS_RE: Regex = Regex::new(r"[% /&;]").unwrap(); } pub fn make_genotype_display_name(loci: &[GenotypeLocus], allele_map: &UniquenameAlleleMap) -> RcString { let mut locus_display_names: Vec<String> = loci.iter().map(|locus| { let mut allele_display_names: Vec<String> = locus.expressed_alleles.iter().map(|expressed_allele| { let allele_short = allele_map.get(&expressed_allele.allele_uniquename).unwrap(); let mut encoded_name_and_type = String::from(&allele_short.encoded_name_and_type); if allele_short.allele_type != "deletion" { if encoded_name_and_type == "unnamed-unrecorded-unrecorded" { encoded_name_and_type = format!("{}-{}", allele_short.gene_uniquename, encoded_name_and_type); } if let Some(ref expression) = expressed_allele.expression { encoded_name_and_type += &format!("-expression-{}", expression.to_lowercase()); } } encoded_name_and_type }).collect(); allele_display_names.sort(); allele_display_names.join("/") }).collect(); locus_display_names.sort(); let joined_alleles = locus_display_names.join(" "); let clean_display_name = BAD_GENOTYPE_NAME_CHARS_RE.replace_all(&joined_alleles, "_"); RcString::from(clean_display_name.as_ref()) } fn make_phase(feature_loc: &Featureloc) -> Option<Phase> { if let Some(phase) = feature_loc.phase { match phase { 0 => Some(Phase::Zero), 1 => Some(Phase::One), 2 => Some(Phase::Two), _ => panic!(), } } else { None } } fn make_location(chromosome_map: &ChrNameDetailsMap, feat: &Feature) -> Option<ChromosomeLocation> { let feature_locs = feat.featurelocs.borrow(); match feature_locs.get(0) { Some(feature_loc) => { let start_pos = if feature_loc.fmin + 1 >= 1 { (feature_loc.fmin + 1) as usize } else { panic!("start_pos less than 1"); }; let end_pos = if feature_loc.fmax >= 1 { feature_loc.fmax as usize } else { panic!("start_end less than 1"); }; let feature_uniquename = &feature_loc.srcfeature.uniquename; let chr_short = make_chromosome_short(chromosome_map, feature_uniquename); Some(ChromosomeLocation { chromosome_name: chr_short.name, start_pos, end_pos, strand: match feature_loc.strand { 1 => Strand::Forward, -1 => Strand::Reverse, _ => panic!(), }, phase: make_phase(feature_loc), }) }, None => None, } } fn get_loc_residues(chr: &ChromosomeDetails, loc: &ChromosomeLocation) -> Residues { let start = (loc.start_pos - 1) as usize; let end = loc.end_pos as usize; let residues: Residues = chr.residues[start..end].into(); if loc.strand == Strand::Forward { residues } else { rev_comp(&residues) } } fn make_feature_short(chromosome_map: &ChrNameDetailsMap, feat: &Feature) -> FeatureShort { let maybe_loc = make_location(chromosome_map, feat); if let Some(loc) = maybe_loc { if let Some(chr) = chromosome_map.get(&loc.chromosome_name) { let residues = get_loc_residues(chr, &loc); let feature_type = match &feat.feat_type.name as &str { "five_prime_UTR" => FeatureType::FivePrimeUtr, "pseudogenic_exon" | "exon" => FeatureType::Exon, "three_prime_UTR" => FeatureType::ThreePrimeUtr, "dg_repeat" => FeatureType::DGRepeat, "dh_repeat" => FeatureType::DHRepeat, "gap" => FeatureType::Gap, "gene_group" => FeatureType::GeneGroup, "long_terminal_repeat" => FeatureType::LongTerminalRepeat, "low_complexity_region" => FeatureType::LowComplexityRegion, "LTR_retrotransposon" => FeatureType::LTRRetrotransposon, "mating_type_region" => FeatureType::MatingTypeRegion, "nuclear_mt_pseudogene" => FeatureType::NuclearMtPseudogene, "origin_of_replication" => FeatureType::OriginOfReplication, "polyA_signal_sequence" => FeatureType::PolyASignalSequence, "polyA_site" => FeatureType::PolyASite, "promoter" => FeatureType::Promoter, "region" => FeatureType::Region, "regional_centromere" => FeatureType::RegionalCentromere, "regional_centromere_central_core" => FeatureType::RegionalCentromereCentralCore, "regional_centromere_inner_repeat_region" => FeatureType::RegionalCentromereInnerRepeatRegion, "repeat_region" => FeatureType::RepeatRegion, "TR_box" => FeatureType::TRBox, "SNP" => FeatureType::SNP, _ => panic!("can't handle feature type: {}", feat.feat_type.name), }; FeatureShort { feature_type, uniquename: feat.uniquename.clone(), name: feat.name.clone(), location: loc, residues, } } else { panic!("can't find chromosome {}", loc.chromosome_name); } } else { panic!("{} has no featureloc", feat.uniquename); } } pub fn make_chromosome_short<'a>(chromosome_map: &'a ChrNameDetailsMap, chromosome_name: &'a str) -> ChromosomeShort { if let Some(chr) = chromosome_map.get(chromosome_name) { chr.make_chromosome_short() } else { panic!("can't find chromosome: {}", chromosome_name); } } fn make_reference_short(reference_map: &UniquenameReferenceMap, reference_uniquename: &str) -> Option<ReferenceShort> { if reference_uniquename == "null" { None } else { let reference_details = reference_map.get(reference_uniquename) .unwrap_or_else(|| panic!("missing reference in make_reference_short(): {}", reference_uniquename)); let reference_short = ReferenceShort::from_reference_details(reference_details); Some(reference_short) } } lazy_static! { static ref PROMOTER_RE: Regex = Regex::new(r"^(?P<gene>.*)-promoter$").unwrap(); static ref PREFIX_AND_ID_RE: Regex = Regex::new(r"^(?P<prefix>\S+):(?P<id>\S+)$").unwrap(); static ref TRANSCRIPT_ID_RE: Regex = Regex::new(r"^(?P<gene>.*)\.(?P<suffix>\d+)$").unwrap(); } // Some ancestor terms are useful in the web code. This function uses the Config and returns // the terms that might be useful. fn get_possible_interesting_parents(config: &Config) -> HashSet<InterestingParent> { let mut ret = HashSet::new(); for parent_conf in &config.interesting_parents { ret.insert(parent_conf.clone()); } for ext_conf in &config.extension_display_names { if let Some(ref conf_termid) = ext_conf.if_descendant_of { ret.insert(InterestingParent { termid: conf_termid.clone(), rel_name: RcString::from("is_a"), }); } } let add_to_set = |set: &mut HashSet<_>, termid: RcString| { for rel_name in &DESCENDANT_REL_NAMES { set.insert(InterestingParent { termid: termid.to_owned(), rel_name: RcString::from(*rel_name), }); } }; for slim_config in config.slims.values() { for go_slim_conf in &slim_config.terms { add_to_set(&mut ret, go_slim_conf.termid.clone()); } } for field_name in &config.gene_results.visualisation_field_names { if let Some(column_conf) = config.gene_results.field_config.get(field_name) { for attr_value_conf in &column_conf.attr_values { if let Some(ref termid) = attr_value_conf.termid { add_to_set(&mut ret, termid.clone()); } } } else { panic!["can't find field configuration for {}", field_name]; } } ret.insert(InterestingParent { termid: config.viability_terms.viable.clone(), rel_name: RcString::from("is_a"), }); ret.insert(InterestingParent { termid: config.viability_terms.inviable.clone(), rel_name: RcString::from("is_a"), }); let add_filter_ancestor = |set: &mut HashSet<_>, category: &AncestorFilterCategory, cv_name: &str| { for ancestor in &category.ancestors { for config_rel_name in &DESCENDANT_REL_NAMES { if *config_rel_name == "has_part" && !HAS_PART_CV_NAMES.contains(&cv_name) { continue; } set.insert(InterestingParent { termid: ancestor.clone(), rel_name: RcString::from(*config_rel_name), }); } } }; for (cv_name, conf) in &config.cv_config { for filter in &conf.filters { for category in &filter.term_categories { add_filter_ancestor(&mut ret, category, cv_name); } for category in &filter.extension_categories { add_filter_ancestor(&mut ret, category, cv_name); } } for split_by_parent_config in &conf.split_by_parents { for ancestor in &split_by_parent_config.termids { let ancestor_termid = if let Some(without_prefix) = ancestor.strip_prefix("NOT ") { RcString::from(without_prefix) } else { ancestor.clone() }; ret.insert(InterestingParent { termid: ancestor_termid, rel_name: "is_a".into(), }); } } } ret } const MAX_RECENT_REFS: usize = 20; fn make_recently_added(references_map: &UniquenameReferenceMap, all_ref_uniquenames: &[RcString]) -> Vec<ReferenceShort> { let mut date_sorted_pub_uniquenames = all_ref_uniquenames.to_owned(); { let ref_added_date_cmp = |ref_uniquename1: &ReferenceUniquename, ref_uniquename2: &ReferenceUniquename| { let ref1 = references_map.get(ref_uniquename1).unwrap(); let ref2 = references_map.get(ref_uniquename2).unwrap(); if let Some(ref ref1_added_date) = ref1.canto_added_date { if let Some(ref ref2_added_date) = ref2.canto_added_date { cmp_str_dates(ref1_added_date, ref2_added_date).reverse() } else { Ordering::Less } } else { if ref2.canto_added_date.is_some() { Ordering::Greater } else { Ordering::Equal } } }; date_sorted_pub_uniquenames.sort_by(ref_added_date_cmp); } let recently_added_iter = date_sorted_pub_uniquenames.iter().take(MAX_RECENT_REFS); let mut recently_added: Vec<ReferenceShort> = vec![]; for ref_uniquename in recently_added_iter { let ref_short_maybe = make_reference_short(references_map, ref_uniquename); if let Some(ref_short) = ref_short_maybe { recently_added.push(ref_short); } } recently_added } fn make_canto_curated(references_map: &UniquenameReferenceMap, all_ref_uniquenames: &[RcString]) -> (Vec<ReferenceShort>, Vec<ReferenceShort>, Vec<ReferenceShort>, Vec<ReferenceShort>) { let mut sorted_pub_uniquenames: Vec<ReferenceUniquename> = all_ref_uniquenames.iter() .filter(|ref_uniquename| { let reference = references_map.get(*ref_uniquename).unwrap(); reference.canto_approved_date.is_some() && reference.canto_curator_role.is_some() }) .cloned() .collect(); { let pub_date_cmp = |ref_uniquename1: &ReferenceUniquename, ref_uniquename2: &ReferenceUniquename| { let ref1 = references_map.get(ref_uniquename1).unwrap(); let ref2 = references_map.get(ref_uniquename2).unwrap(); // use first approval date, but fall back to the most recent approval date let ref1_date = ref1.canto_first_approved_date.as_ref() .unwrap_or_else(|| ref1.canto_session_submitted_date.as_ref().unwrap()); let ref2_date = ref2.canto_first_approved_date.as_ref() .unwrap_or_else(|| ref2.canto_session_submitted_date.as_ref().unwrap()); cmp_str_dates(ref2_date, ref1_date) }; sorted_pub_uniquenames.sort_by(pub_date_cmp); } let mut recent_admin_curated = vec![]; let mut recent_community_curated = vec![]; let mut all_community_curated = vec![]; let mut all_admin_curated = vec![]; let ref_uniquename_iter = sorted_pub_uniquenames.iter(); for ref_uniquename in ref_uniquename_iter { let reference = references_map.get(ref_uniquename).unwrap(); let ref_short = make_reference_short(references_map, ref_uniquename).unwrap(); if reference.canto_curator_role == Some("community".into()) { all_community_curated.push(ref_short.clone()); if recent_community_curated.len() <= MAX_RECENT_REFS { recent_community_curated.push(ref_short); } } else { all_admin_curated.push(ref_short.clone()); if recent_admin_curated.len() <= MAX_RECENT_REFS { recent_admin_curated.push(ref_short); } } } (recent_admin_curated, recent_community_curated, all_community_curated, all_admin_curated) } fn add_introns_to_transcript(chromosome: &ChromosomeDetails, transcript_uniquename: &str, parts: &mut Vec<FeatureShort>) { let mut new_parts: Vec<FeatureShort> = vec![]; let mut intron_count = 0; for part in parts.drain(0..) { let mut maybe_new_intron = None; if let Some(prev_part) = new_parts.last() { let intron_start = prev_part.location.end_pos + 1; let intron_end = part.location.start_pos - 1; if intron_start > intron_end { if intron_start > intron_end + 1 { println!("no gap between exons at {}..{} in {}", intron_start, intron_end, transcript_uniquename); } // if intron_start == intron_end-1 then it is a one base overlap that // represents a frameshift in the reference See: // https://github.com/pombase/curation/issues/1453#issuecomment-303214177 } else { intron_count += 1; let new_intron_loc = ChromosomeLocation { chromosome_name: prev_part.location.chromosome_name.clone(), start_pos: intron_start, end_pos: intron_end, strand: prev_part.location.strand, phase: None, }; let intron_uniquename = format!("{}:intron:{}", transcript_uniquename, intron_count); let intron_residues = get_loc_residues(chromosome, &new_intron_loc); let intron_type = if prev_part.feature_type == FeatureType::Exon && part.feature_type == FeatureType::Exon { FeatureType::CdsIntron } else { if prev_part.feature_type == FeatureType::FivePrimeUtr { FeatureType::FivePrimeUtrIntron } else { FeatureType::ThreePrimeUtrIntron } }; maybe_new_intron = Some(FeatureShort { feature_type: intron_type, uniquename: RcString::from(&intron_uniquename), name: None, location: new_intron_loc, residues: intron_residues, }); } } if let Some(new_intron) = maybe_new_intron { new_parts.push(new_intron); } new_parts.push(part); } *parts = new_parts; } fn validate_transcript_parts(transcript_uniquename: &str, parts: &[FeatureShort]) { let mut seen_exon = false; for part in parts { if part.feature_type == FeatureType::Exon { seen_exon = true; break; } } if !seen_exon { panic!("transcript has no exons: {}", transcript_uniquename); } if parts[0].feature_type != FeatureType::Exon { for i in 1..parts.len() { let part = &parts[i]; if part.feature_type == FeatureType::Exon { let last_utr_before_exons = &parts[i-1]; let first_exon = &parts[i]; if last_utr_before_exons.location.end_pos + 1 != first_exon.location.start_pos { println!("{} and exon don't meet up: {} at pos {}", last_utr_before_exons.feature_type, transcript_uniquename, last_utr_before_exons.location.end_pos); } break; } else { if part.location.strand == Strand::Forward { if part.feature_type != FeatureType::FivePrimeUtr { println!("{:?}", parts); panic!("wrong feature type '{}' before exons in {}", part.feature_type, transcript_uniquename); } } else { if part.feature_type != FeatureType::ThreePrimeUtr { println!("{:?}", parts); panic!("wrong feature type '{}' after exons in {}", part.feature_type, transcript_uniquename); } } } } } let last_part = parts.last().unwrap(); if last_part.feature_type != FeatureType::Exon { for i in (0..parts.len()-1).rev() { let part = &parts[i]; if part.feature_type == FeatureType::Exon { let first_utr_after_exons = &parts[i+1]; let last_exon = &parts[i]; if last_exon.location.end_pos + 1 != first_utr_after_exons.location.start_pos { println!("{} and exon don't meet up: {} at pos {}", first_utr_after_exons.feature_type, transcript_uniquename, first_utr_after_exons.location.end_pos); } break; } else { if part.location.strand == Strand::Forward { if part.feature_type != FeatureType::ThreePrimeUtr { panic!("wrong feature type '{}' before exons in {}", part.feature_type, transcript_uniquename); } } else { if part.feature_type != FeatureType::FivePrimeUtr { panic!("wrong feature type '{}' after exons in {}", part.feature_type, transcript_uniquename); } } } } } } impl <'a> WebDataBuild<'a> { pub fn new(raw: &'a Raw, domain_data: &'a HashMap<UniprotIdentifier, UniprotResult>, pfam_data: &'a Option<HashMap<UniprotIdentifier, PfamProteinDetails>>, rnacentral_data: &'a Option<RNAcentralAnnotations>, config: &'a Config) -> WebDataBuild<'a> { WebDataBuild { raw, domain_data, pfam_data, rnacentral_data, config, genes: BTreeMap::new(), genotypes: HashMap::new(), genotype_backgrounds: HashMap::new(), alleles: HashMap::new(), transcripts: HashMap::new(), other_features: HashMap::new(), terms: HashMap::new(), chromosomes: BTreeMap::new(), references: HashMap::new(), all_ont_annotations: HashMap::new(), all_not_ont_annotations: HashMap::new(), recent_references: RecentReferences { admin_curated: vec![], community_curated: vec![], pubmed: vec![], }, all_community_curated: vec![], all_admin_curated: vec![], term_ids_by_name: HashMap::new(), genes_of_transcripts: HashMap::new(), transcripts_of_polypeptides: HashMap::new(), parts_of_transcripts: HashMap::new(), genes_of_alleles: HashMap::new(), loci_of_genotypes: HashMap::new(), parts_of_extensions: HashMap::new(), base_term_of_extensions: HashMap::new(), children_by_termid: HashMap::new(), dbxrefs_of_features: HashMap::new(), possible_interesting_parents: get_possible_interesting_parents(config), term_subsets: HashMap::new(), gene_subsets: HashMap::new(), annotation_details: HashMap::new(), ont_annotations: vec![], gene_expression_measurements: HashMap::new(), } } fn add_ref_to_hash(&self, seen_references: &mut HashMap<RcString, ReferenceShortOptionMap>, identifier: &str, maybe_reference_uniquename: &Option<ReferenceUniquename>) { if let Some(reference_uniquename) = maybe_reference_uniquename { if reference_uniquename != "null" { seen_references .entry(identifier.into()) .or_insert_with(HashMap::new) .insert(reference_uniquename.clone(), None); } } } fn add_gene_to_hash(&self, seen_genes: &mut HashMap<RcString, GeneShortOptionMap>, identifier: &RcString, other_gene_uniquename: &GeneUniquename) { if !self.genes.contains_key(other_gene_uniquename) { panic!("{}", other_gene_uniquename); } seen_genes .entry(identifier.clone()) .or_insert_with(HashMap::new) .insert(other_gene_uniquename.clone(), None); } fn add_genotype_to_hash(&self, seen_genotypes: &mut HashMap<RcString, GenotypeShortMap>, seen_alleles: &mut HashMap<RcString, AlleleShortMap>, seen_genes: &mut HashMap<RcString, GeneShortOptionMap>, identifier: &RcString, genotype_uniquename: &RcString) { let genotype_short = self.make_genotype_short(genotype_uniquename); for locus in &genotype_short.loci { for expressed_allele in &locus.expressed_alleles { self.add_allele_to_hash(seen_alleles, seen_genes, identifier, &expressed_allele.allele_uniquename); } } seen_genotypes .entry(identifier.clone()) .or_insert_with(HashMap::new) .insert(genotype_uniquename.clone(), genotype_short); } fn add_allele_to_hash(&self, seen_alleles: &mut HashMap<RcString, AlleleShortMap>, seen_genes: &mut HashMap<RcString, GeneShortOptionMap>, identifier: &RcString, allele_uniquename: &AlleleUniquename) -> AlleleShort { let allele_short = self.make_allele_short(allele_uniquename); { let allele_gene_uniquename = &allele_short.gene_uniquename; self.add_gene_to_hash(seen_genes, identifier, allele_gene_uniquename); seen_alleles .entry(identifier.clone()) .or_insert_with(HashMap::new) .insert(allele_uniquename.clone(), allele_short.clone()); } allele_short } fn add_transcript_to_hashes(&self, seen_transcripts: &mut HashMap<RcString, TranscriptDetailsOptionMap>, seen_genes: &mut HashMap<RcString, GeneShortOptionMap>, identifier: &RcString, transcript_uniquename: &TranscriptUniquename) { if let Some(transcript_details) = self.transcripts.get(transcript_uniquename) { seen_transcripts .entry(identifier.clone()) .or_insert_with(HashMap::new) .insert(transcript_uniquename.clone(), None); self.add_gene_to_hash(seen_genes, identifier, &transcript_details.gene_uniquename); } else { panic!("internal error, can't find transcript {}", transcript_uniquename); } } fn add_term_to_hash(&self, seen_terms: &mut HashMap<TermId, TermShortOptionMap>, identifier: &RcString, other_termid: &TermId) { seen_terms .entry(identifier.clone()) .or_insert_with(HashMap::new) .insert(other_termid.clone(), None); } fn get_gene<'b>(&'b self, gene_uniquename: &'b str) -> &'b GeneDetails { if let Some(gene_details) = self.genes.get(gene_uniquename) { gene_details } else { panic!("can't find GeneDetails for gene uniquename {}", gene_uniquename) } } fn get_gene_mut<'b>(&'b mut self, gene_uniquename: &'b str) -> &'b mut GeneDetails { if let Some(gene_details) = self.genes.get_mut(gene_uniquename) { gene_details } else { panic!("can't find GeneDetails for gene uniquename {}", gene_uniquename) } } fn make_gene_short(&self, gene_uniquename: &str) -> GeneShort { let gene_details = self.get_gene(gene_uniquename); GeneShort { uniquename: gene_details.uniquename.clone(), name: gene_details.name.clone(), product: gene_details.product.clone(), transcript_count: gene_details.transcripts.len(), } } fn make_gene_summary(&self, gene_uniquename: &str) -> GeneSummary { let gene_details = self.get_gene(gene_uniquename); let synonyms = gene_details.synonyms.iter() .filter(|synonym| synonym.synonym_type == "exact") .map(|synonym| synonym.name.clone()) .collect::<Vec<RcString>>(); let ortholog_ids = gene_details.ortholog_annotations.iter() .map(|ortholog_annotation| { let ortholog_uniquename = ortholog_annotation.ortholog_uniquename.clone(); let ortholog_gene_summary = &self.genes.get(&ortholog_uniquename).unwrap(); let maybe_secondary_identifier = ortholog_gene_summary.secondary_identifier.clone(); let maybe_ortholog_name = ortholog_gene_summary.name.clone(); IdNameAndOrganism { identifier: ortholog_uniquename, secondary_identifier: maybe_secondary_identifier, name: maybe_ortholog_name, taxonid: ortholog_annotation.ortholog_taxonid, } }) .collect::<Vec<IdNameAndOrganism>>(); GeneSummary { uniquename: gene_details.uniquename.clone(), name: gene_details.name.clone(), product: gene_details.product.clone(), uniprot_identifier: gene_details.uniprot_identifier.clone(), secondary_identifier: gene_details.secondary_identifier.clone(), synonyms, orthologs: ortholog_ids, feature_type: gene_details.feature_type.clone(), taxonid: gene_details.taxonid, transcript_count: gene_details.transcripts.len(), location: gene_details.location.clone(), } } fn make_api_gene_summary(&self, gene_uniquename: &str) -> APIGeneSummary { let gene_details = self.get_gene(gene_uniquename); let synonyms = gene_details.synonyms.iter() .filter(|synonym| synonym.synonym_type == "exact") .map(|synonym| synonym.name.clone()) .collect::<Vec<RcString>>(); let exon_count = if let Some(transcript_uniquename) = gene_details.transcripts.get(0) { let transcript = self.transcripts .get(transcript_uniquename) .expect(&format!("internal error, can't find transcript details for {}", transcript_uniquename)); let mut count = 0; for part in &transcript.parts { if part.feature_type == FeatureType::Exon { count += 1; } } count } else { 0 }; let mut ortholog_taxonids = HashSet::new(); for ortholog_annotation in &gene_details.ortholog_annotations { ortholog_taxonids.insert(ortholog_annotation.ortholog_taxonid); } let transcript_details = gene_details.transcripts .iter() .map(|transcript_uniquename| { self.transcripts.get(transcript_uniquename) .expect(&format!("internal error, failed to find transcript: {}", transcript_uniquename)) .clone() }).collect::<Vec<_>>(); APIGeneSummary { uniquename: gene_details.uniquename.clone(), name: gene_details.name.clone(), product: gene_details.product.clone(), uniprot_identifier: gene_details.uniprot_identifier.clone(), exact_synonyms: synonyms, dbxrefs: gene_details.dbxrefs.clone(), location: gene_details.location.clone(), transcripts: transcript_details, tm_domain_count: gene_details.tm_domain_coords.len(), coiled_coil_count: gene_details.coiled_coil_coords.len(), disordered_regions_count: gene_details.disordered_region_coords.len(), low_complexity_regions_count: gene_details.low_complexity_region_coords.len(), exon_count, transcript_count: gene_details.transcripts.len(), ortholog_taxonids, } } fn make_term_short(&self, termid: &str) -> TermShort { if let Some(term_details) = self.terms.get(termid) { TermShort::from_term_details(term_details) } else { panic!("can't find TermDetails for termid: {}", termid) } } fn add_characterisation_status(&mut self, gene_uniquename: &str, cvterm_name: &RcString) { let gene_details = self.genes.get_mut(gene_uniquename).unwrap(); gene_details.characterisation_status = Some(cvterm_name.clone()); } fn add_gene_product(&mut self, gene_uniquename: &str, product: &RcString) { let gene_details = self.get_gene_mut(gene_uniquename); gene_details.product = Some(product.clone()); } fn add_name_description(&mut self, gene_uniquename: &str, name_description: &str) { let gene_details = self.get_gene_mut(gene_uniquename); gene_details.name_descriptions.push(name_description.into()); } fn add_annotation(&mut self, extension_relation_order: &RelationOrder, cvterm: &Cvterm, is_not: bool, annotation_template: OntAnnotationDetail) { let termid = match self.base_term_of_extensions.get(&cvterm.termid()) { Some(base_termid) => base_termid.clone(), None => cvterm.termid(), }; let extension_parts = match self.parts_of_extensions.get(&cvterm.termid()) { Some(parts) => parts.clone(), None => vec![], }; let mut new_extension = extension_parts; let mut existing_extensions = annotation_template.extension.clone(); new_extension.append(&mut existing_extensions); let compare_ext_part_func = |e1: &ExtPart, e2: &ExtPart| { compare_ext_part_with_config(extension_relation_order, e1, e2) }; new_extension.sort_by(compare_ext_part_func); let ont_annotation_detail = OntAnnotationDetail { extension: new_extension, .. annotation_template }; let annotation_map = if is_not { &mut self.all_not_ont_annotations } else { &mut self.all_ont_annotations }; let entry = annotation_map.entry(termid); entry.or_insert_with(Vec::new).push(ont_annotation_detail.id); self.annotation_details.insert(ont_annotation_detail.id, ont_annotation_detail); } fn process_dbxrefs(&mut self) { let mut map = HashMap::new(); for feature_dbxref in &self.raw.feature_dbxrefs { let feature = &feature_dbxref.feature; let dbxref = &feature_dbxref.dbxref; map.entry(feature.uniquename.clone()) .or_insert_with(HashSet::new) .insert(dbxref.identifier()); } self.dbxrefs_of_features = map; } fn process_references(&mut self) { for rc_publication in &self.raw.publications { let reference_uniquename = &rc_publication.uniquename; if reference_uniquename.to_lowercase() == "null" { continue; } let mut pubmed_authors: Option<RcString> = None; let mut pubmed_publication_date: Option<RcString> = None; let mut pubmed_abstract: Option<RcString> = None; let mut pubmed_doi: Option<RcString> = None; let mut canto_annotation_status: Option<RcString> = None; let mut canto_triage_status: Option<RcString> = None; let mut canto_curator_role: Option<RcString> = None; let mut canto_curator_name: Option<RcString> = None; let mut canto_first_approved_date: Option<RcString> = None; let mut canto_approved_date: Option<RcString> = None; let mut canto_added_date: Option<RcString> = None; let mut canto_session_submitted_date: Option<RcString> = None; for prop in rc_publication.publicationprops.borrow().iter() { match &prop.prop_type.name as &str { "pubmed_publication_date" => pubmed_publication_date = Some(prop.value.clone()), "pubmed_authors" => pubmed_authors = Some(prop.value.clone()), "pubmed_abstract" => pubmed_abstract = Some(prop.value.clone()), "pubmed_doi" => pubmed_doi = Some(prop.value.clone()), "canto_annotation_status" => canto_annotation_status = Some(prop.value.clone()), "canto_triage_status" => canto_triage_status = Some(prop.value.clone()), "canto_curator_role" => canto_curator_role = Some(prop.value.clone()), "canto_curator_name" => canto_curator_name = Some(prop.value.clone()), "canto_first_approved_date" => canto_first_approved_date = Some(prop.value.clone()), "canto_approved_date" => canto_approved_date = Some(prop.value.clone()), "canto_added_date" => canto_added_date = Some(prop.value.clone()), "canto_session_submitted_date" => canto_session_submitted_date = Some(prop.value.clone()), _ => () } } if let Some(ref canto_triage_status) = canto_triage_status { let triage_status_to_ignore = &self.config.reference_page_config.triage_status_to_ignore; if triage_status_to_ignore.contains(canto_triage_status) { continue; } } let mut authors_abbrev = None; let mut publication_year = None; if let Some(authors) = pubmed_authors.clone() { if authors.contains(',') { let author_re = Regex::new(r"^(?P<f>[^,]+),.*$").unwrap(); let replaced: String = author_re.replace_all(&authors, "$f et al.").into(); authors_abbrev = Some(RcString::from(&replaced)); } else { authors_abbrev = Some(authors.clone()); } } if let Some(publication_date) = pubmed_publication_date.clone() { let date_re = Regex::new(r"^(.* )?(?P<y>\d\d\d\d)$").unwrap(); publication_year = Some(RcString::from(date_re.replace_all(&publication_date, "$y").as_ref())); } let mut approved_date = canto_first_approved_date.clone(); if approved_date.is_none() { approved_date = canto_session_submitted_date.clone(); } approved_date = if let Some(date) = approved_date { let re = Regex::new(r"^(?P<date>\d\d\d\d-\d\d-\d\d).*").unwrap(); Some(RcString::from(re.replace_all(&date, "$date").as_ref())) } else { None }; if let Some(ref canto_annotation_status) = canto_annotation_status { if canto_annotation_status != "APPROVED" { approved_date = None; } } self.references.insert(reference_uniquename.clone(), ReferenceDetails { uniquename: reference_uniquename.clone(), title: rc_publication.title.clone(), citation: rc_publication.miniref.clone(), pubmed_abstract, pubmed_doi, authors: pubmed_authors.clone(), authors_abbrev, pubmed_publication_date: pubmed_publication_date.clone(), canto_annotation_status, canto_triage_status, canto_curator_role, canto_curator_name, canto_first_approved_date, canto_approved_date, canto_session_submitted_date, canto_added_date, approved_date, publication_year, cv_annotations: HashMap::new(), physical_interactions: vec![], genetic_interactions: vec![], ortholog_annotations: vec![], paralog_annotations: vec![], genes_by_uniquename: HashMap::new(), genotypes_by_uniquename: HashMap::new(), alleles_by_uniquename: HashMap::new(), transcripts_by_uniquename: HashMap::new(), terms_by_termid: HashMap::new(), annotation_details: HashMap::new(), gene_count: 0, }); } } // make maps from genes to transcript, transcripts to polypeptide, // exon, intron, UTRs fn make_feature_rel_maps(&mut self) { for feature_rel in &self.raw.feature_relationships { let subject_type_name = &feature_rel.subject.feat_type.name; let rel_name = &feature_rel.rel_type.name; let object_type_name = &feature_rel.object.feat_type.name; let subject_uniquename = &feature_rel.subject.uniquename; let object_uniquename = &feature_rel.object.uniquename; if TRANSCRIPT_FEATURE_TYPES.contains(&subject_type_name.as_str()) && rel_name == "part_of" && is_gene_type(object_type_name) { self.genes_of_transcripts.insert(subject_uniquename.clone(), object_uniquename.clone()); continue; } if subject_type_name == "polypeptide" && rel_name == "derives_from" && object_type_name == "mRNA" { self.transcripts_of_polypeptides.insert(subject_uniquename.clone(), object_uniquename.clone()); continue; } if subject_type_name == "allele" { if feature_rel.rel_type.name == "instance_of" && (object_type_name == "gene" || object_type_name == "pseudogene") { self.genes_of_alleles.insert(subject_uniquename.clone(), object_uniquename.clone()); continue; } if feature_rel.rel_type.name == "part_of" && object_type_name == "genotype" { let expression = get_feat_rel_expression(&feature_rel.subject, feature_rel); let genotype_locus_identifier = get_feat_rel_prop_value("genotype_locus", feature_rel) .unwrap_or_else(|| { RcString::from(&format!("{}-{}", feature_rel.object.uniquename, feature_rel.feature_relationship_id)) }); let allele_and_expression = ExpressedAllele { allele_uniquename: subject_uniquename.clone(), expression, }; let genotype_entry = self.loci_of_genotypes.entry(object_uniquename.clone()); let locus_map = genotype_entry.or_insert_with(HashMap::new); let genotype_locus = locus_map.entry(String::from(&genotype_locus_identifier)) .or_insert_with(|| GenotypeLocus { expressed_alleles: vec![] }); genotype_locus.expressed_alleles.push(allele_and_expression); continue; } } if TRANSCRIPT_PART_TYPES.contains(&subject_type_name.as_str()) { let entry = self.parts_of_transcripts.entry(object_uniquename.clone()); let part = make_feature_short(&self.chromosomes, &feature_rel.subject); entry.or_insert_with(Vec::new).push(part); } } } fn get_feature_dbxrefs(&self, feature: &Feature) -> HashSet<RcString> { if let Some(dbxrefs) = self.dbxrefs_of_features.get(&feature.uniquename) { dbxrefs.clone() } else { HashSet::new() } } fn store_gene_details(&mut self, feat: &Feature) { let maybe_location = make_location(&self.chromosomes, feat); if let Some(ref location) = maybe_location { if let Some(ref mut chr) = self.chromosomes.get_mut(&location.chromosome_name) { chr.gene_uniquenames.push(feat.uniquename.clone()); } } let organism = make_organism(&feat.organism); let dbxrefs = self.get_feature_dbxrefs(feat); let mut orfeome_identifier = None; for dbxref in &dbxrefs { if let Some(without_prefix) = dbxref.strip_prefix("SPD:") { orfeome_identifier = Some(RcString::from(without_prefix)); } } let mut uniprot_identifier = None; let mut secondary_identifier = None; let mut biogrid_interactor_id: Option<u32> = None; let mut rnacentral_urs_identifier = None; for prop in feat.featureprops.borrow().iter() { match prop.prop_type.name.as_str() { "uniprot_identifier" => uniprot_identifier = prop.value.clone(), "sgd_identifier" => secondary_identifier = prop.value.clone(), "biogrid_interactor_id" => { if let Some(ref chado_biogrid_id) = prop.value { biogrid_interactor_id = match chado_biogrid_id.parse::<u32>() { Ok(val) => Some(val), Err(err) => panic!("error parsing BioGRID interactor ID from Chado: {}", err), } } }, "rnacentral_identifier" => rnacentral_urs_identifier = prop.value.clone(), _ => (), } } let (interpro_matches, tm_domain_coords) = if let Some(ref uniprot_identifier) = uniprot_identifier { if let Some(result) = self.domain_data.get(uniprot_identifier as &str) { let tm_domain_matches = result.tmhmm_matches.iter() .map(|tm_match| (tm_match.start, tm_match.end)) .collect::<Vec<_>>(); (result.interpro_matches.clone(), tm_domain_matches) } else { (vec![], vec![]) } } else { (vec![], vec![]) }; let (disordered_region_coords, low_complexity_region_coords, coiled_coil_coords) = if let Some(pfam_data) = self.pfam_data { if let Some(ref uniprot_identifier) = uniprot_identifier { if let Some(result) = pfam_data.get(uniprot_identifier as &str) { let mut disordered_region_coords = vec![]; let mut low_complexity_region_coords = vec![]; let mut coiled_coil_coords = vec![]; for motif in &result.motifs { match &motif.motif_type as &str { "disorder" => disordered_region_coords.push((motif.start, motif.end)), "low_complexity" => low_complexity_region_coords.push((motif.start, motif.end)), "coiled_coil" => coiled_coil_coords.push((motif.start, motif.end)), _ => (), } } (disordered_region_coords, low_complexity_region_coords, coiled_coil_coords) } else { (vec![], vec![], vec![]) } } else { (vec![], vec![], vec![]) } } else { (vec![], vec![], vec![]) }; let rfam_annotations = if let Some(rnacentral_data) = self.rnacentral_data { if let Some(ref rnacentral_urs_identifier) = rnacentral_urs_identifier { if let Some(result) = rnacentral_data.get(rnacentral_urs_identifier.as_str()) { result.clone() } else { vec![] } } else { vec![] } } else { vec![] }; let gene_feature = GeneDetails { uniquename: feat.uniquename.clone(), name: feat.name.clone(), taxonid: organism.taxonid, product: None, deletion_viability: DeletionViability::Unknown, uniprot_identifier, secondary_identifier, biogrid_interactor_id, rnacentral_urs_identifier, interpro_matches, tm_domain_coords, disordered_region_coords, low_complexity_region_coords, coiled_coil_coords, rfam_annotations, orfeome_identifier, name_descriptions: vec![], synonyms: vec![], dbxrefs, feature_type: feat.feat_type.name.clone(), feature_so_termid: feat.feat_type.termid(), transcript_so_termid: feat.feat_type.termid(), characterisation_status: None, taxonomic_distribution: None, location: maybe_location, gene_neighbourhood: vec![], cv_annotations: HashMap::new(), physical_interactions: vec![], genetic_interactions: vec![], ortholog_annotations: vec![], paralog_annotations: vec![], target_of_annotations: vec![], transcripts: vec![], transcripts_by_uniquename: HashMap::new(), genes_by_uniquename: HashMap::new(), genotypes_by_uniquename: HashMap::new(), alleles_by_uniquename: HashMap::new(), references_by_uniquename: HashMap::new(), terms_by_termid: HashMap::new(), annotation_details: HashMap::new(), feature_publications: HashSet::new(), subset_termids: HashSet::new(), }; self.genes.insert(feat.uniquename.clone(), gene_feature); } fn get_transcript_parts(&mut self, transcript_uniquename: &str) -> Vec<FeatureShort> { if let Some(mut parts) = self.parts_of_transcripts.remove(transcript_uniquename) { if parts.is_empty() { panic!("transcript has no parts: {}", transcript_uniquename); } let part_cmp = |a: &FeatureShort, b: &FeatureShort| { a.location.start_pos.cmp(&b.location.start_pos) }; parts.sort_by(&part_cmp); validate_transcript_parts(transcript_uniquename, &parts); let chr_name = &parts[0].location.chromosome_name.clone(); if let Some(chromosome) = self.chromosomes.get(chr_name) { add_introns_to_transcript(chromosome, transcript_uniquename, &mut parts); } else { panic!("can't find chromosome details for: {}", chr_name); } if parts[0].location.strand == Strand::Reverse { parts.reverse(); } parts } else { vec![] } } fn store_transcript_details(&mut self, feat: &Feature) { let transcript_uniquename = feat.uniquename.clone(); let parts = self.get_transcript_parts(&transcript_uniquename); if parts.is_empty() { return; } let mut transcript_start = usize::MAX; let mut transcript_end = 0; for part in &parts { if part.location.start_pos < transcript_start { transcript_start = part.location.start_pos; } if part.location.end_pos > transcript_end { transcript_end = part.location.end_pos; } } // use the first part as a template to get the chromosome details let transcript_location = ChromosomeLocation { start_pos: transcript_start, end_pos: transcript_end, phase: None, .. parts[0].location.clone() }; let maybe_cds_location = if feat.feat_type.name == "mRNA" { let mut cds_start = usize::MAX; let mut cds_end = 0; for part in &parts { if part.feature_type == FeatureType::Exon { if part.location.start_pos < cds_start { cds_start = part.location.start_pos; } if part.location.end_pos > cds_end { cds_end = part.location.end_pos; } } } if cds_end == 0 { None } else { if let Some(mrna_location) = feat.featurelocs.borrow().get(0) { let first_part_loc = &parts[0].location; Some(ChromosomeLocation { chromosome_name: first_part_loc.chromosome_name.clone(), start_pos: cds_start, end_pos: cds_end, strand: first_part_loc.strand, phase: make_phase(mrna_location), }) } else { None } } } else { None }; if let Some(gene_uniquename) = self.genes_of_transcripts.get(&transcript_uniquename) { let gene_details = self.genes.get_mut(gene_uniquename).unwrap(); let transcript_type = feat.feat_type.name.clone(); if gene_details.feature_type == "gene" { let feature_type = format!("{} {}", transcript_type, gene_details.feature_type); gene_details.feature_type = RcString::from(&feature_type); } let transcript = TranscriptDetails { uniquename: transcript_uniquename.clone(), location: transcript_location, transcript_type, parts, protein: None, cds_location: maybe_cds_location, gene_uniquename: gene_uniquename.to_owned(), }; self.transcripts.insert(transcript_uniquename.clone(), transcript.clone()); gene_details.transcripts.push(transcript_uniquename); gene_details.transcript_so_termid = feat.feat_type.termid(); } else { panic!("can't find gene for transcript: {}", transcript_uniquename); } } fn store_protein_details(&mut self, feat: &Feature) { if let Some(residues) = feat.residues.clone() { let protein_uniquename = feat.uniquename.clone(); let mut molecular_weight = None; let mut average_residue_weight = None; let mut charge_at_ph7 = None; let mut isoelectric_point = None; let mut codon_adaptation_index = None; let parse_prop_as_f32 = |p: &Option<RcString>| { if let Some(ref prop_value) = p { let maybe_value = prop_value.parse(); if let Ok(parsed_prop) = maybe_value { Some(parsed_prop) } else { println!("{}: couldn't parse {} as f32", feat.uniquename, &prop_value); None } } else { None } }; for prop in feat.featureprops.borrow().iter() { if prop.prop_type.name == "molecular_weight" { if let Some(value) = parse_prop_as_f32(&prop.value) { molecular_weight = Some(value / 1000.0); } } if prop.prop_type.name == "average_residue_weight" { if let Some(value) = parse_prop_as_f32(&prop.value) { average_residue_weight = Some(value / 1000.0); } } if prop.prop_type.name == "charge_at_ph7" { charge_at_ph7 = parse_prop_as_f32(&prop.value); } if prop.prop_type.name == "isoelectric_point" { isoelectric_point = parse_prop_as_f32(&prop.value); } if prop.prop_type.name == "codon_adaptation_index" { codon_adaptation_index = parse_prop_as_f32(&prop.value); } } if molecular_weight.is_none() { panic!("{} has no molecular_weight", feat.uniquename) } let protein = ProteinDetails { uniquename: feat.uniquename.clone(), sequence: RcString::from(&residues), product: None, molecular_weight: molecular_weight.unwrap(), average_residue_weight: average_residue_weight.unwrap(), charge_at_ph7: charge_at_ph7.unwrap(), isoelectric_point: isoelectric_point.unwrap(), codon_adaptation_index: codon_adaptation_index.unwrap(), }; if let Some(transcript_uniquename) = self.transcripts_of_polypeptides.get(&protein_uniquename) { self.transcripts.get_mut(transcript_uniquename) .expect(&format!("internal error, failed to find transcript: {}", transcript_uniquename)) .protein = Some(protein); } else { panic!("can't find transcript of polypeptide: {}", protein_uniquename) } } else { panic!("no residues for protein: {}", feat.uniquename); } } fn store_chromosome_details(&mut self, feat: &Feature) { let mut ena_identifier = None; for prop in feat.featureprops.borrow().iter() { if prop.prop_type.name == "ena_id" { ena_identifier = prop.value.clone() } } if feat.residues.is_none() { panic!("{:?}", feat.uniquename); } let org = make_organism(&feat.organism); let residues = feat.residues.clone().unwrap(); if !residues.is_ascii() { panic!("sequence for chromosome {} contains non-ascii characters", feat.uniquename); } let chr = ChromosomeDetails { name: feat.uniquename.clone(), residues: RcString::from(&residues), ena_identifier: RcString::from(&ena_identifier.unwrap()), gene_uniquenames: vec![], taxonid: org.taxonid, gene_count: 0, // we'll update the counts once the genes are processed coding_gene_count: 0, }; self.chromosomes.insert(feat.uniquename.clone(), chr); } fn store_genotype_details(&mut self, feat: &Feature) { let mut loci: Vec<_> = self.loci_of_genotypes[&feat.uniquename] .values().cloned().collect(); let genotype_display_uniquename = make_genotype_display_name(&loci, &self.alleles); let mut ploidiness = Ploidiness::Haploid; for locus in &loci { if locus.expressed_alleles.len() > 1 { ploidiness = Ploidiness::Diploid; break; } } { let loci_cmp = |locus1: &GenotypeLocus, locus2: &GenotypeLocus| { let locus1_display_name = &self.alleles[&locus1.expressed_alleles[0].allele_uniquename] .encoded_name_and_type; let locus2_display_name = &self.alleles[&locus2.expressed_alleles[0].allele_uniquename] .encoded_name_and_type; locus1_display_name.cmp(locus2_display_name) }; loci.sort_by(&loci_cmp); } for prop in feat.featureprops.borrow().iter() { if prop.prop_type.name == "genotype_background" { if let Some(ref background) = prop.value { self.genotype_backgrounds.insert(feat.uniquename.clone(), background.clone()); } } } let rc_display_name = RcString::from(&genotype_display_uniquename); self.genotypes.insert(rc_display_name.clone(), GenotypeDetails { display_uniquename: rc_display_name, name: feat.name.as_ref().map(|s| RcString::from(s)), loci, ploidiness, cv_annotations: HashMap::new(), genes_by_uniquename: HashMap::new(), alleles_by_uniquename: HashMap::new(), references_by_uniquename: HashMap::new(), transcripts_by_uniquename: HashMap::new(), terms_by_termid: HashMap::new(), annotation_details: HashMap::new(), }); } fn store_allele_details(&mut self, feat: &Feature) { let mut allele_type = None; let mut description = None; for prop in feat.featureprops.borrow().iter() { match &prop.prop_type.name as &str { "allele_type" => allele_type = prop.value.clone(), "description" => description = prop.value.clone(), _ => () } } if let Some(allele_type) = allele_type { let gene_uniquename = &self.genes_of_alleles[&feat.uniquename]; let allele_details = AlleleShort::new(&feat.uniquename, &feat.name, &allele_type, &description, gene_uniquename); self.alleles.insert(feat.uniquename.clone(), allele_details); } else { panic!("no allele_type cvtermprop for {}", &feat.uniquename); } } fn process_chromosome_features(&mut self) { // we need to process all chromosomes before other featuers for feat in &self.raw.features { if feat.feat_type.name == "chromosome" { self.store_chromosome_details(feat); } } } fn process_features(&mut self) { // we need to process all genes before transcripts for feat in &self.raw.features { if feat.feat_type.name == "gene" || feat.feat_type.name == "pseudogene" { self.store_gene_details(feat); } } for feat in &self.raw.features { if TRANSCRIPT_FEATURE_TYPES.contains(&feat.feat_type.name.as_str()) { self.store_transcript_details(feat) } } for feat in &self.raw.features { if feat.feat_type.name == "polypeptide"{ self.store_protein_details(feat); } } for feat in &self.raw.features { if !TRANSCRIPT_FEATURE_TYPES.contains(&feat.feat_type.name.as_str()) && !TRANSCRIPT_PART_TYPES.contains(&feat.feat_type.name.as_str()) && !HANDLED_FEATURE_TYPES.contains(&feat.feat_type.name.as_str()) { // for now, ignore features without locations if feat.featurelocs.borrow().len() > 0 { let feature_short = make_feature_short(&self.chromosomes, feat); self.other_features.insert(feat.uniquename.clone(), feature_short); } } } } fn add_interesting_parents(&mut self) { let mut interesting_parents_by_termid: HashMap<RcString, HashSet<InterestingParent>> = HashMap::new(); for cvtermpath in &self.raw.cvtermpaths { let subject_term = &cvtermpath.subject; let subject_termid = subject_term.termid(); let object_term = &cvtermpath.object; let object_termid = object_term.termid(); let rel_termid = match cvtermpath.rel_type { Some(ref rel_type) => { rel_type.termid() }, None => panic!("no relation type for {} <-> {}\n", &subject_term.name, &object_term.name) }; let rel_term_name = self.make_term_short(&rel_termid).name; if self.is_interesting_parent(&object_termid, &rel_term_name) { interesting_parents_by_termid .entry(subject_termid.clone()) .or_insert_with(HashSet::new) .insert(InterestingParent { termid: object_termid, rel_name: rel_term_name, }); }; } for (termid, interesting_parents) in interesting_parents_by_termid { let term_details = self.terms.get_mut(&termid).unwrap(); let interesting_parent_ids = interesting_parents.iter() .map(|p| p.termid.clone()) .collect::<HashSet<_>>(); term_details.interesting_parent_ids = interesting_parent_ids; term_details.interesting_parent_details = interesting_parents; } } fn process_allele_features(&mut self) { for feat in &self.raw.features { if feat.feat_type.name == "allele" { self.store_allele_details(feat); } } } fn process_genotype_features(&mut self) { for feat in &self.raw.features { if feat.feat_type.name == "genotype" { self.store_genotype_details(feat); } } } fn add_gene_neighbourhoods(&mut self) { struct GeneAndLoc { gene_uniquename: RcString, loc: ChromosomeLocation, } let mut genes_and_locs: Vec<GeneAndLoc> = vec![]; for gene_details in self.genes.values() { if let Some(ref location) = gene_details.location { genes_and_locs.push(GeneAndLoc { gene_uniquename: gene_details.uniquename.clone(), loc: location.clone(), }); } } let cmp = |a: &GeneAndLoc, b: &GeneAndLoc| { let order = a.loc.chromosome_name.cmp(&b.loc.chromosome_name); if order == Ordering::Equal { a.loc.start_pos.cmp(&b.loc.start_pos) } else { order } }; genes_and_locs.sort_by(cmp); for (i, this_gene_and_loc) in genes_and_locs.iter().enumerate() { let mut nearby_genes: Vec<GeneShort> = vec![]; if i > 0 { let start_index = if i > GENE_NEIGHBOURHOOD_DISTANCE { i - GENE_NEIGHBOURHOOD_DISTANCE } else { 0 }; for back_index in (start_index..i).rev() { let back_gene_and_loc = &genes_and_locs[back_index]; if back_gene_and_loc.loc.chromosome_name != this_gene_and_loc.loc.chromosome_name { break; } let back_gene_short = self.make_gene_short(&back_gene_and_loc.gene_uniquename); nearby_genes.insert(0, back_gene_short); } } let gene_short = self.make_gene_short(&this_gene_and_loc.gene_uniquename); nearby_genes.push(gene_short); if i < genes_and_locs.len() - 1 { let end_index = if i + GENE_NEIGHBOURHOOD_DISTANCE >= genes_and_locs.len() { genes_and_locs.len() } else { i + GENE_NEIGHBOURHOOD_DISTANCE + 1 }; for forward_index in i+1..end_index { let forward_gene_and_loc = &genes_and_locs[forward_index]; if forward_gene_and_loc.loc.chromosome_name != this_gene_and_loc.loc.chromosome_name { break; } let forward_gene_short = self.make_gene_short(&forward_gene_and_loc.gene_uniquename); nearby_genes.push(forward_gene_short); } } let this_gene_details = self.genes.get_mut(&this_gene_and_loc.gene_uniquename).unwrap(); this_gene_details.gene_neighbourhood.append(&mut nearby_genes); } } // add interaction, ortholog and paralog annotations fn process_annotation_feature_rels(&mut self) { for feature_rel in &self.raw.feature_relationships { let rel_name = &feature_rel.rel_type.name; let subject_uniquename = &feature_rel.subject.uniquename; let object_uniquename = &feature_rel.object.uniquename; for rel_config in &FEATURE_REL_CONFIGS { if rel_name == rel_config.rel_type_name && is_gene_type(&feature_rel.subject.feat_type.name) && is_gene_type(&feature_rel.object.feat_type.name) { let mut evidence: Option<Evidence> = None; let mut throughput: Option<Throughput> = None; let mut is_inferred_interaction: bool = false; let mut interaction_note: Option<RcString> = None; let borrowed_publications = feature_rel.publications.borrow(); let maybe_publication = borrowed_publications.get(0); let maybe_reference_uniquename = match maybe_publication { Some(publication) => if publication.uniquename == "null" { None } else { Some(publication.uniquename.clone()) }, None => None, }; for prop in feature_rel.feature_relationshipprops.borrow().iter() { if prop.prop_type.name == "evidence" { if let Some(ref evidence_long) = prop.value { for (evidence_code, ev_details) in &self.config.evidence_types { if &ev_details.long == evidence_long { evidence = Some(evidence_code.clone()); } } if evidence.is_none() { evidence = Some(evidence_long.clone()); } } } if prop.prop_type.name == "is_inferred" { if let Some(is_inferred_value) = prop.value.clone() { if is_inferred_value == "yes" { is_inferred_interaction = true; } } } if prop.prop_type.name == "annotation_throughput_type" { if let Some(throughput_type) = prop.value.clone() { throughput = Some(match throughput_type.as_ref() { "low throughput" => Throughput::LowThroughput, "high throughput" => Throughput::HighThroughput, "non-experimental" => Throughput::NonExperimental, _ => { panic!("unknown throughput type: {}", throughput_type); } }); } } if prop.prop_type.name == "interaction_note" { if let Some(interaction_note_value) = prop.value.clone() { interaction_note = Some(interaction_note_value); } } } let evidence_clone = evidence.clone(); let gene_uniquename = subject_uniquename; let gene_organism_taxonid = { self.genes[subject_uniquename].taxonid }; let other_gene_uniquename = object_uniquename; let other_gene_organism_taxonid = { self.genes[object_uniquename].taxonid }; match rel_config.annotation_type { FeatureRelAnnotationType::Interaction => if !is_inferred_interaction { let interaction_annotation = InteractionAnnotation { gene_uniquename: gene_uniquename.clone(), interactor_uniquename: other_gene_uniquename.clone(), evidence, reference_uniquename: maybe_reference_uniquename.clone(), throughput, interaction_note, }; { let gene_details = self.genes.get_mut(subject_uniquename).unwrap(); if rel_name == "interacts_physically" { gene_details.physical_interactions.push(interaction_annotation.clone()); } else { if rel_name == "interacts_genetically" { gene_details.genetic_interactions.push(interaction_annotation.clone()); } else { panic!("unknown interaction type: {}", rel_name); } }; } if gene_uniquename != other_gene_uniquename { let other_gene_details = self.genes.get_mut(object_uniquename).unwrap(); if rel_name == "interacts_physically" { other_gene_details.physical_interactions.push(interaction_annotation.clone()); } else { if rel_name == "interacts_genetically" { other_gene_details.genetic_interactions.push(interaction_annotation.clone()); } else { panic!("unknown interaction type: {}", rel_name); } }; } if let Some(ref_details) = if let Some(ref reference_uniquename) = maybe_reference_uniquename { self.references.get_mut(reference_uniquename) } else { None } { if rel_name == "interacts_physically" { ref_details.physical_interactions.push(interaction_annotation.clone()); } else { if rel_name == "interacts_genetically" { ref_details.genetic_interactions.push(interaction_annotation.clone()); } else { panic!("unknown interaction type: {}", rel_name); } }; } }, FeatureRelAnnotationType::Ortholog => { let ortholog_annotation = OrthologAnnotation { gene_uniquename: gene_uniquename.clone(), ortholog_uniquename: other_gene_uniquename.clone(), ortholog_taxonid: other_gene_organism_taxonid, evidence, reference_uniquename: maybe_reference_uniquename.clone(), }; let gene_details = self.genes.get_mut(subject_uniquename).unwrap(); gene_details.ortholog_annotations.push(ortholog_annotation.clone()); if let Some(ref_details) = if let Some(ref reference_uniquename) = maybe_reference_uniquename { self.references.get_mut(reference_uniquename) } else { None } { ref_details.ortholog_annotations.push(ortholog_annotation); } }, FeatureRelAnnotationType::Paralog => { let paralog_annotation = ParalogAnnotation { gene_uniquename: gene_uniquename.clone(), paralog_uniquename: other_gene_uniquename.clone(), evidence, reference_uniquename: maybe_reference_uniquename.clone(), }; let gene_details = self.genes.get_mut(subject_uniquename).unwrap(); gene_details.paralog_annotations.push(paralog_annotation.clone()); if let Some(ref_details) = if let Some(ref reference_uniquename) = maybe_reference_uniquename { self.references.get_mut(reference_uniquename) } else { None } { if self.config.load_organism_taxonid.is_some() && self.config.load_organism_taxonid.unwrap() == gene_details.taxonid || gene_organism_taxonid < other_gene_organism_taxonid { ref_details.paralog_annotations.push(paralog_annotation); } } } } // for orthologs and paralogs, store the reverse annotation too let other_gene_details = self.genes.get_mut(object_uniquename).unwrap(); match rel_config.annotation_type { FeatureRelAnnotationType::Interaction => {}, FeatureRelAnnotationType::Ortholog => { let ortholog_annotation = OrthologAnnotation { gene_uniquename: other_gene_uniquename.clone(), ortholog_uniquename: gene_uniquename.clone(), ortholog_taxonid: gene_organism_taxonid, evidence: evidence_clone, reference_uniquename: maybe_reference_uniquename.clone(), }; other_gene_details.ortholog_annotations.push(ortholog_annotation); }, FeatureRelAnnotationType::Paralog => { let paralog_annotation = ParalogAnnotation { gene_uniquename: other_gene_uniquename.clone(), paralog_uniquename: gene_uniquename.clone(), evidence: evidence_clone, reference_uniquename: maybe_reference_uniquename.clone(), }; other_gene_details.paralog_annotations.push(paralog_annotation.clone()); if let Some(ref_details) = if let Some(ref reference_uniquename) = maybe_reference_uniquename { self.references.get_mut(reference_uniquename) } else { None } { if self.config.load_organism_taxonid.is_some() && self.config.load_organism_taxonid.unwrap() == other_gene_details.taxonid || gene_organism_taxonid > other_gene_organism_taxonid { ref_details.paralog_annotations.push(paralog_annotation); } } }, } } } } for ref_details in self.references.values_mut() { ref_details.physical_interactions.sort(); ref_details.genetic_interactions.sort(); ref_details.ortholog_annotations.sort(); ref_details.paralog_annotations.sort(); } for gene_details in self.genes.values_mut() { gene_details.physical_interactions.sort(); gene_details.genetic_interactions.sort(); gene_details.ortholog_annotations.sort(); gene_details.paralog_annotations.sort(); } } // find the extension_display_names config for the given termid and relation type name fn matching_ext_config(&self, annotation_termid: &str, rel_type_name: &str) -> Option<ExtensionDisplayNames> { let ext_configs = &self.config.extension_display_names; if let Some(annotation_term_details) = self.terms.get(annotation_termid) { for ext_config in ext_configs { if ext_config.rel_name == rel_type_name { if let Some(ref if_descendant_of) = ext_config.if_descendant_of { if annotation_termid == if_descendant_of.as_str() || annotation_term_details.interesting_parent_ids.contains(if_descendant_of) { return Some((*ext_config).clone()); } } else { return Some((*ext_config).clone()); } } } } None } // create and returns any TargetOfAnnotations implied by the extension fn make_target_of_for_ext(&self, cv_name: &str, genes: &[RcString], maybe_genotype_uniquename: &Option<RcString>, reference_uniquename: &Option<RcString>, annotation_termid: &str, extension: &[ExtPart]) -> Vec<(GeneUniquename, TargetOfAnnotation)> { if genes.len() != 1 { panic!("expected an annotation with one gene for {}, got: {:?}", annotation_termid, genes); } let gene = &genes[0]; let mut ret_vec = vec![]; for ext_part in extension { let maybe_ext_config = self.matching_ext_config(annotation_termid, &ext_part.rel_type_name); if let ExtRange::Gene(ref target_gene_uniquename) = ext_part.ext_range { if let Some(ext_config) = maybe_ext_config { if let Some(reciprocal_display_name) = ext_config.reciprocal_display { let (annotation_gene_uniquename, annotation_genotype_uniquename) = if maybe_genotype_uniquename.is_some() { (gene.clone(), maybe_genotype_uniquename.clone()) } else { (gene.clone(), None) }; ret_vec.push(((*target_gene_uniquename).clone(), TargetOfAnnotation { show_in_summary: true, // set this later ontology_name: cv_name.into(), ext_rel_display_name: reciprocal_display_name, gene: annotation_gene_uniquename, genotype_uniquename: annotation_genotype_uniquename, reference_uniquename: reference_uniquename.clone(), })); } } } } ret_vec } // return an ordered vector of annotations, setting the show_in_summary flag // see: https://github.com/pombase/website/issues/299 fn process_target_of_annotations(&self, gene_details: &GeneDetails, annotations: &mut HashSet<TargetOfAnnotation>) -> Vec<TargetOfAnnotation> { let mut processed_annotations = annotations.drain().collect::<Vec<_>>(); let target_of_config = &self.config.target_of_config; let priority_config = &target_of_config.relation_priority; for annotation in &processed_annotations { if priority_config.get(annotation.ext_rel_display_name.as_str()).is_none() { eprintln!(r#"No priority configured for "{}" (from {})"#, annotation.ext_rel_display_name, gene_details.uniquename); } } let cmp_fn = |a: &TargetOfAnnotation, b: &TargetOfAnnotation| { let a_rel_name = a.ext_rel_display_name.as_str(); let a_pri = priority_config.get(a_rel_name).unwrap_or(&0); let b_rel_name = b.ext_rel_display_name.as_str(); let b_pri = priority_config.get(b_rel_name).unwrap_or(&0); let pri_order = b_pri.cmp(a_pri); if pri_order == Ordering::Equal { let rel_name_order = a_rel_name.cmp(b_rel_name); if rel_name_order == Ordering::Equal { let a_gene_details = self.genes.get(&a.gene).unwrap(); let b_gene_details = self.genes.get(&b.gene).unwrap(); if let (Some(a_name), Some(b_name)) = (&a_gene_details.name, &b_gene_details.name) { a_name.cmp(b_name) } else { a_gene_details.uniquename.cmp(&b_gene_details.uniquename) } } else { rel_name_order } } else { pri_order } }; processed_annotations.sort_by(cmp_fn); let mut seen_gene_rels = HashMap::new(); for annotation in processed_annotations.iter_mut() { let rel_priority = priority_config.get(annotation.ext_rel_display_name.as_str()) .unwrap_or(&0); let existing_rel = seen_gene_rels.get(&annotation.gene); if let Some(existing_rel) = existing_rel { if *existing_rel > rel_priority { annotation.show_in_summary = false; continue; } } seen_gene_rels.insert(annotation.gene.clone(), rel_priority); } processed_annotations } fn add_target_of_annotations(&mut self) { let mut target_of_annotations: HashMap<GeneUniquename, HashSet<TargetOfAnnotation>> = HashMap::new(); for term_details in self.terms.values() { for term_annotations in term_details.cv_annotations.values() { for term_annotation in term_annotations { 'ANNOTATION: for annotation_id in &term_annotation.annotations { let annotation = self.annotation_details .get(annotation_id).expect("can't find OntAnnotationDetail"); if let Some(ref genotype_uniquename) = annotation.genotype { let genotype = &self.genotypes[genotype_uniquename]; if genotype.loci.len() > 1 || genotype.loci[0].expressed_alleles.len() > 1 { break 'ANNOTATION; } } let new_annotations = self.make_target_of_for_ext(&term_details.cv_name, &annotation.genes, &annotation.genotype, &annotation.reference, &term_details.termid, &annotation.extension); for (target_gene_uniquename, new_annotation) in new_annotations { if self.genes.get(&target_gene_uniquename).is_some() { target_of_annotations .entry(target_gene_uniquename.clone()) .or_insert_with(HashSet::new) .insert(new_annotation); } else { eprintln!("can't find gene {} in extension for {}", target_gene_uniquename, term_details.termid); for annotation_gene in &annotation.genes { eprintln!(" in annotation of {}", annotation_gene); } } } } } } } for (gene_uniquename, mut target_of_annotations) in target_of_annotations { let gene_details = self.genes.get(&gene_uniquename).unwrap(); let processed_target_of_annotations = self.process_target_of_annotations(gene_details, &mut target_of_annotations); let gene_details = self.genes.get_mut(&gene_uniquename).unwrap(); gene_details.target_of_annotations = processed_target_of_annotations; } } fn set_deletion_viability(&mut self) { let some_null = Some(RcString::from("Null")); let mut gene_statuses = HashMap::new(); let condition_string = |condition_ids: HashSet<RcString>| { let mut ids_vec: Vec<RcString> = condition_ids.iter().cloned().collect(); ids_vec.sort(); RcString::from(&ids_vec.join(" ")) }; let viable_termid = &self.config.viability_terms.viable; let inviable_termid = &self.config.viability_terms.inviable; for (gene_uniquename, gene_details) in &mut self.genes { let mut new_status = DeletionViability::Unknown; if let Some(single_locus_term_annotations) = gene_details.cv_annotations.get("single_locus_phenotype") { let mut viable_conditions: HashMap<RcString, TermId> = HashMap::new(); let mut inviable_conditions: HashMap<RcString, TermId> = HashMap::new(); for term_annotation in single_locus_term_annotations { 'ANNOTATION: for annotation_id in &term_annotation.annotations { let annotation = self.annotation_details .get(annotation_id).expect("can't find OntAnnotationDetail"); let genotype_uniquename = annotation.genotype.as_ref().unwrap(); let genotype = &self.genotypes[genotype_uniquename]; if genotype.loci[0].expressed_alleles.len() > 1 { // diploid locus continue 'ANNOTATION; } let expressed_allele = &genotype.loci[0].expressed_alleles[0]; let allele = &self.alleles[&expressed_allele.allele_uniquename]; if allele.allele_type != "deletion" && expressed_allele.expression != some_null { continue 'ANNOTATION; } let term = &self.terms[&term_annotation.term]; let interesting_parent_ids = &term.interesting_parent_ids; let conditions_as_string = condition_string(annotation.conditions.clone()); if interesting_parent_ids.contains(viable_termid) || *viable_termid == term_annotation.term { viable_conditions.insert(conditions_as_string, term_annotation.term.clone()); } else { if interesting_parent_ids.contains(inviable_termid) || *inviable_termid == term_annotation.term { inviable_conditions.insert(conditions_as_string, term_annotation.term.clone()); } } } } if viable_conditions.is_empty() { if !inviable_conditions.is_empty() { new_status = DeletionViability::Inviable; } } else { if inviable_conditions.is_empty() { new_status = DeletionViability::Viable; } else { new_status = DeletionViability::DependsOnConditions; let viable_conditions_set: HashSet<RcString> = viable_conditions.keys().cloned().collect(); let inviable_conditions_set: HashSet<RcString> = inviable_conditions.keys().cloned().collect(); let intersecting_conditions = viable_conditions_set.intersection(&inviable_conditions_set); if intersecting_conditions.clone().count() > 0 { println!("{} is viable and inviable with", gene_uniquename); for cond in intersecting_conditions { if cond.is_empty() { println!(" no conditions"); } else { println!(" conditions: {}", cond); } println!(" viable term: {}", viable_conditions[cond]); println!(" inviable term: {}", inviable_conditions[cond]); } } } } } gene_statuses.insert(gene_uniquename.clone(), new_status); } for (gene_uniquename, status) in &gene_statuses { if let Some(ref mut gene_details) = self.genes.get_mut(gene_uniquename) { gene_details.deletion_viability = status.clone(); } } } fn set_term_details_subsets(&mut self) { let mut subsets_by_termid = HashMap::new(); for (slim_name, slim_config) in self.config.slims.iter() { for term_and_name in &slim_config.terms { subsets_by_termid .entry(term_and_name.termid.clone()) .or_insert_with(HashSet::new) .insert(slim_name.clone()); } } for term_details in self.terms.values_mut() { if let Some(subsets) = subsets_by_termid.remove(&term_details.termid) { term_details.in_subsets = subsets; } } } // On each GeneDetails, add a set of the term IDs of subsets for // this gene. Any useful subset that contains any term for any // annotation in the gene is included. "useful" means that the // front end might need it, eg. slim term IDs fn set_gene_details_subset_termids(&mut self) { let is_subset_member = |subset_termid: &str, test_termid: &str| { if subset_termid == test_termid { return true; } if let Some(children) = self.children_by_termid.get(subset_termid) { children.contains(test_termid) } else { false } }; let mut subsets_by_gene = HashMap::new(); for slim_config in self.config.slims.values() { for term_and_name in &slim_config.terms { for gene_details in self.genes.values() { for term_annotations in gene_details.cv_annotations.values() { for term_annotation in term_annotations { let gene_termid = &term_annotation.term; if is_subset_member(&term_and_name.termid, gene_termid) { subsets_by_gene .entry(gene_details.uniquename.clone()) .or_insert_with(HashSet::new) .insert(term_and_name.termid.clone()); } } } } } } for gene_details in self.genes.values_mut() { if let Some(subset_termids) = subsets_by_gene.remove(&gene_details.uniquename) { gene_details.subset_termids = subset_termids; } } } fn set_taxonomic_distributions(&mut self) { let mut term_name_map = HashMap::new(); let in_archaea = "conserved in archaea"; let in_bacteria = "conserved in bacteria"; let in_fungi_only = "conserved in fungi only"; let in_metazoa = "conserved in metazoa"; let pombe_specific = "Schizosaccharomyces pombe specific"; let schizo_specific = "Schizosaccharomyces specific"; let names = vec![in_archaea, in_bacteria, in_fungi_only, in_metazoa, pombe_specific, schizo_specific]; for name in names { if let Some(termid) = self.term_ids_by_name.get(name) { term_name_map.insert(termid.clone(), name.to_owned()); } else { eprintln!("configuration error: can't find {} in term_ids_by_name map", name); eprintln!("skipping taxonomic distribution"); return; } } 'GENE: for gene_details in self.genes.values_mut() { let mut dist_names = HashSet::new(); if let Some(species_dists) = gene_details.cv_annotations.get("species_dist") { for ont_term_annotations in species_dists { let term = &ont_term_annotations.term; if let Some(term_name) = term_name_map.get(term) { dist_names.insert(term_name.to_owned()); } } } if (dist_names.contains(in_archaea) || dist_names.contains(in_bacteria)) && !dist_names.contains(in_metazoa) { gene_details.taxonomic_distribution = Some(RcString::from("fungi and prokaryotes")); continue 'GENE; } if dist_names.contains(in_metazoa) && !((dist_names.contains(in_archaea) || dist_names.contains(in_bacteria)) && dist_names.contains(in_metazoa)) { gene_details.taxonomic_distribution = Some(RcString::from("eukaryotes only, fungi and metazoa")); continue 'GENE; } if (dist_names.contains(in_archaea) || dist_names.contains(in_bacteria)) && dist_names.contains(in_metazoa) { gene_details.taxonomic_distribution = Some(RcString::from("eukaryotes and prokaryotes")); continue 'GENE; } if dist_names.contains(in_fungi_only) { gene_details.taxonomic_distribution = Some(RcString::from("fungi only")); continue 'GENE; } if dist_names.contains(pombe_specific) { gene_details.taxonomic_distribution = Some(RcString::from("S. pombe specific")); continue 'GENE; } if dist_names.contains(schizo_specific) { gene_details.taxonomic_distribution = Some(RcString::from("Schizos. specific")); continue 'GENE; } if let Some(ref characterisation_status) = gene_details.characterisation_status { if characterisation_status == "dubious" { gene_details.taxonomic_distribution = Some(RcString::from("dubious")); continue 'GENE; } } if gene_details.feature_type != "mRNA gene" { gene_details.taxonomic_distribution = Some(RcString::from("not curated")); continue 'GENE; } gene_details.taxonomic_distribution = Some(RcString::from("other")); } } fn process_cvterms(&mut self) { for cvterm in &self.raw.cvterms { if cvterm.cv.name != POMBASE_ANN_EXT_TERM_CV_NAME { let cv_config = self.config.cv_config_by_name(&cvterm.cv.name); let annotation_feature_type = cv_config.feature_type.clone(); let mut xrefs = HashMap::new(); for (source_name, source_config) in cv_config.source_config { let mut maybe_xref_id = None; if let Some(ref term_xref_id_prop) = source_config.id_source { if let Some(term_xref_id_prop) = term_xref_id_prop.strip_prefix("prop_name:") { for cvtermprop in cvterm.cvtermprops.borrow().iter() { if cvtermprop.prop_type.name == *term_xref_id_prop { maybe_xref_id = Some(cvtermprop.value.clone()); break; } } } else { if term_xref_id_prop == "ACCESSION" { let dbxref: &Dbxref = cvterm.dbxref.borrow(); maybe_xref_id = Some(dbxref.accession.clone()); } } } let mut maybe_xref_display_name = None; if let Some(ref xref_display_name_prop) = source_config.display_name_prop { for cvtermprop in cvterm.cvtermprops.borrow().iter() { if cvtermprop.prop_type.name == *xref_display_name_prop { maybe_xref_display_name = Some(cvtermprop.value.clone()); } } } if let Some(xref_id) = maybe_xref_id { let term_xref = TermXref { xref_id, xref_display_name: maybe_xref_display_name, }; xrefs.insert(source_name.clone(), term_xref); } } let synonyms = cvterm.cvtermsynonyms.borrow().iter().map(|syn| { SynonymDetails { synonym_type: (*syn).synonym_type.name.clone(), name: syn.name.clone(), } }).collect::<Vec<_>>(); let definition_xrefs = cvterm.definition_xrefs.borrow().iter() .map(|dbxref| { dbxref.identifier() }).collect::<HashSet<_>>(); let secondary_identifiers = cvterm.other_dbxrefs.borrow().iter() .map(|dbxref| { dbxref.identifier() }).collect::<HashSet<_>>(); self.terms.insert(cvterm.termid(), TermDetails { name: cvterm.name.clone(), cv_name: cvterm.cv.name.clone(), annotation_feature_type, interesting_parent_ids: HashSet::new(), interesting_parent_details: HashSet::new(), in_subsets: HashSet::new(), termid: cvterm.termid(), synonyms, definition: cvterm.definition.clone(), direct_ancestors: vec![], definition_xrefs, secondary_identifiers, genes_annotated_with: HashSet::new(), is_obsolete: cvterm.is_obsolete, single_locus_genotype_uniquenames: HashSet::new(), cv_annotations: HashMap::new(), genes_by_uniquename: HashMap::new(), genotypes_by_uniquename: HashMap::new(), alleles_by_uniquename: HashMap::new(), transcripts_by_uniquename: HashMap::new(), references_by_uniquename: HashMap::new(), terms_by_termid: HashMap::new(), annotation_details: HashMap::new(), gene_count: 0, genotype_count: 0, xrefs, }); self.term_ids_by_name.insert(cvterm.name.clone(), cvterm.termid()); } } } fn get_ext_rel_display_name(&self, annotation_termid: &str, ext_rel_name: &str) -> RcString { if let Some(ext_conf) = self.matching_ext_config(annotation_termid, ext_rel_name) { ext_conf.display_name } else { RcString::from(&str::replace(ext_rel_name, "_", " ")) } } fn process_extension_cvterms(&mut self) { let db_prefix = format!("{}:", self.config.database_name); for cvterm in &self.raw.cvterms { if cvterm.cv.name == POMBASE_ANN_EXT_TERM_CV_NAME { for cvtermprop in cvterm.cvtermprops.borrow().iter() { if (*cvtermprop).prop_type.name.starts_with(ANNOTATION_EXT_REL_PREFIX) { let ext_rel_name_str = &(*cvtermprop).prop_type.name[ANNOTATION_EXT_REL_PREFIX.len()..]; let ext_rel_name = RcString::from(ext_rel_name_str); let ext_range = (*cvtermprop).value.clone(); let range: ExtRange = if ext_range.starts_with(&db_prefix) { let db_feature_uniquename = &ext_range[db_prefix.len()..]; if let Some(captures) = PROMOTER_RE.captures(db_feature_uniquename) { let gene_uniquename = RcString::from(&captures["gene"]); if self.genes.contains_key(&gene_uniquename) { ExtRange::Promoter(gene_uniquename) } else { panic!("unknown gene in promoter: {}", db_feature_uniquename); } } else { if self.genes.contains_key(db_feature_uniquename) { ExtRange::Gene(RcString::from(db_feature_uniquename)) } else { if let Some(captures) = TRANSCRIPT_ID_RE.captures(db_feature_uniquename) { if self.genes.contains_key(&captures["gene"]) { ExtRange::Transcript(RcString::from(db_feature_uniquename)) } else { panic!("unknown gene for transcript: {}", db_feature_uniquename); } } else { panic!("can't find gene or transcript for: {}", db_feature_uniquename); } } } } else { ExtRange::Misc(ext_range) }; if let Some(base_termid) = self.base_term_of_extensions.get(&cvterm.termid()) { let rel_type_display_name = self.get_ext_rel_display_name(base_termid, &ext_rel_name); let rel_type_id = self.term_ids_by_name.get(&ext_rel_name).cloned(); self.parts_of_extensions.entry(cvterm.termid()) .or_insert_with(Vec::new).push(ExtPart { rel_type_id, rel_type_name: ext_rel_name, rel_type_display_name, ext_range: range, }); } else { panic!("can't find details for term: {}\n", cvterm.termid()); } } } } } } fn process_cvterm_rels(&mut self) { for cvterm_rel in &self.raw.cvterm_relationships { let subject_term = &cvterm_rel.subject; let object_term = &cvterm_rel.object; let rel_type = &cvterm_rel.rel_type; if subject_term.cv.name == POMBASE_ANN_EXT_TERM_CV_NAME { let subject_termid = subject_term.termid(); if rel_type.name == "is_a" { self.base_term_of_extensions.insert(subject_termid.clone(), object_term.termid().clone()); } } else { let object_term_short = self.make_term_short(&object_term.termid()); if let Some(ref mut subject_term_details) = self.terms.get_mut(&subject_term.termid()) { subject_term_details.direct_ancestors.push(TermAndRelation { termid: object_term_short.termid.clone(), term_name: object_term_short.name.clone(), relation_name: rel_type.name.clone(), }); } } } for cvterm_rel in &self.raw.cvterm_relationships { let subject_term = &cvterm_rel.subject; let object_term = &cvterm_rel.object; let rel_type = &cvterm_rel.rel_type; if subject_term.cv.name == POMBASE_ANN_EXT_TERM_CV_NAME { let subject_termid = subject_term.termid(); if rel_type.name != "is_a" { let object_termid = object_term.termid(); if let Some(base_termid) = self.base_term_of_extensions.get(&subject_term.termid()) { let rel_type_display_name = self.get_ext_rel_display_name(base_termid, &rel_type.name); let ext_range = if object_termid.starts_with("PR:") { ExtRange::GeneProduct(object_termid) } else { ExtRange::Term(object_termid) }; self.parts_of_extensions.entry(subject_termid) .or_insert_with(Vec::new).push(ExtPart { rel_type_id: Some(rel_type.termid()), rel_type_name: rel_type.name.clone(), rel_type_display_name, ext_range, }); } else { panic!("can't find details for {}\n", object_termid); } } } } } fn process_feature_synonyms(&mut self) { for feature_synonym in &self.raw.feature_synonyms { let feature = &feature_synonym.feature; let synonym = &feature_synonym.synonym; let make_synonym = || { SynonymDetails { name: synonym.name.clone(), synonym_type: synonym.synonym_type.name.clone() } }; if let Some(ref mut gene_details) = self.genes.get_mut(&feature.uniquename) { gene_details.synonyms.push(make_synonym()); } else { if let Some(ref mut allele) = self.alleles.get_mut(&feature.uniquename) { allele.synonyms.push(make_synonym()) } } } } fn process_feature_publications(&mut self) { for feature_pub in &self.raw.feature_pubs { let feature = &feature_pub.feature; let publication = &feature_pub.publication; if publication.uniquename.starts_with("PMID:") { if let Some(ref mut gene_details) = self.genes.get_mut(&feature.uniquename) { gene_details.feature_publications.insert(publication.uniquename.clone()); } } } } fn make_genotype_short(&self, genotype_display_name: &str) -> GenotypeShort { if let Some(details) = self.genotypes.get(genotype_display_name) { GenotypeShort { display_uniquename: details.display_uniquename.clone(), name: details.name.clone(), loci: details.loci.clone(), } } else { panic!("can't find genotype {}", genotype_display_name); } } fn make_allele_short(&self, allele_uniquename: &str) -> AlleleShort { self.alleles[allele_uniquename].clone() } fn add_product_to_protein(&mut self, transcript_uniquename: &str, product: RcString) { if let Some(transcript_details) = self.transcripts.get_mut(transcript_uniquename) { if let Some(ref mut protein) = transcript_details .protein { protein.product = Some(product); } } } // process feature properties stored as cvterms, // eg. characterisation_status and product fn process_props_from_feature_cvterms(&mut self) { for feature_cvterm in &self.raw.feature_cvterms { let feature = &feature_cvterm.feature; let cvterm = &feature_cvterm.cvterm; let (maybe_gene_uniquename, maybe_transcript_uniquename) = if cvterm.cv.name == "PomBase gene products" { if feature.feat_type.name == "polypeptide" { if let Some(transcript_uniquename) = self.transcripts_of_polypeptides.get(&feature.uniquename) { if let Some(gene_uniquename) = self.genes_of_transcripts.get(transcript_uniquename) { (Some(gene_uniquename.clone()), Some(transcript_uniquename.clone()) ) } else { (None, None) } } else { (None, None) } } else { if TRANSCRIPT_FEATURE_TYPES.contains(&feature.feat_type.name.as_str()) { if let Some(gene_uniquename) = self.genes_of_transcripts.get(&feature.uniquename) { (Some(gene_uniquename.clone()), Some(feature.uniquename.clone())) } else { (None, None) } } else { if feature.feat_type.name == "gene" { (Some(feature.uniquename.clone()), None) } else { (None, None) } } } } else { (None, None) }; if let Some(gene_uniquename) = maybe_gene_uniquename { if let Some(transcript_uniquename) = maybe_transcript_uniquename { if transcript_uniquename.ends_with(".1") { // for multi-transcript genes, use the product // from the first transcript self.add_gene_product(&gene_uniquename, &cvterm.name); } self.add_product_to_protein(&transcript_uniquename, cvterm.name.clone()); } } if feature.feat_type.name == "gene" || feature.feat_type.name == "pseudogene" { if cvterm.cv.name == "PomBase gene characterisation status" { self.add_characterisation_status(&feature.uniquename, &cvterm.name); } else { if cvterm.cv.name == "name_description" { self.add_name_description(&feature.uniquename, &cvterm.name); } } } } } fn make_with_or_from_value(&self, with_or_from_value: &RcString) -> WithFromValue { if let Some(captures) = PREFIX_AND_ID_RE.captures(with_or_from_value) { let prefix = &captures["prefix"]; let id = &captures["id"]; if self.genes.contains_key(id) { let gene_short = self.make_gene_short(id); if self.config.database_name == prefix { // a gene from the main organism return WithFromValue::Gene(gene_short); } else { if let Some(name) = &gene_short.name { return WithFromValue::IdentifierAndName({ IdentifierAndName { identifier: with_or_from_value.clone(), name: RcString::from(name), } }); } } } else { if self.transcripts.contains_key(id) { if self.config.database_name == prefix { return WithFromValue::Transcript(RcString::from(id)); } } } } else { if self.genes.contains_key(with_or_from_value) { let gene_short = self.make_gene_short(with_or_from_value); // a gene from the main organism return WithFromValue::Gene(gene_short); } else { if self.transcripts.contains_key(with_or_from_value) { return WithFromValue::Transcript(RcString::from(with_or_from_value)); } } } if self.terms.get(with_or_from_value).is_some() { return WithFromValue::Term(self.make_term_short(with_or_from_value)) } WithFromValue::Identifier(with_or_from_value.clone()) } // process annotation fn process_feature_cvterms(&mut self) { let rel_order = self.config.extension_relation_order.clone(); 'FEATURE_CVTERM: for feature_cvterm in &self.raw.feature_cvterms { let feature = &feature_cvterm.feature; let cvterm = &feature_cvterm.cvterm; let termid = cvterm.termid(); let mut transcript_uniquenames = vec![]; let mut extension = vec![]; if cvterm.cv.name == "PomBase gene characterisation status" || cvterm.cv.name == "PomBase gene products" || cvterm.cv.name == "name_description" { continue; } let publication = &feature_cvterm.publication; let mut extra_props: HashMap<RcString, RcString> = HashMap::new(); let mut conditions: HashSet<TermId> = HashSet::new(); let mut withs: HashSet<WithFromValue> = HashSet::new(); let mut froms: HashSet<WithFromValue> = HashSet::new(); let mut qualifiers: Vec<Qualifier> = vec![]; let mut date: Option<RcString> = None; let mut assigned_by: Option<RcString> = None; let mut evidence: Option<RcString> = None; let mut genotype_background: Option<RcString> = None; let mut throughput: Option<Throughput> = None; // need to get evidence first as it's used later // See: https://github.com/pombase/website/issues/455 for prop in feature_cvterm.feature_cvtermprops.borrow().iter() { if &prop.type_name() == "evidence" { if let Some(ref evidence_long) = prop.value { for (evidence_code, ev_details) in &self.config.evidence_types { if &ev_details.long == evidence_long { evidence = Some(evidence_code.clone()); } } if evidence.is_none() { evidence = Some(evidence_long.clone()); } } } } for prop in feature_cvterm.feature_cvtermprops.borrow().iter() { match &prop.type_name() as &str { "residue" | "scale" | "gene_product_form_id" | "quant_gene_ex_copies_per_cell" | "quant_gene_ex_avg_copies_per_cell" => { if let Some(value) = prop.value.clone() { if prop.type_name() == "residue" && &cvterm.cv.name != "sequence" { let residue = value.clone(); let display_name = self.get_ext_rel_display_name(&termid, "modified residue"); let residue_range_part = ExtPart { rel_type_id: None, rel_type_name: display_name.clone(), rel_type_display_name: display_name, ext_range: ExtRange::SummaryModifiedResidues(vec![residue]), }; extension.insert(0, residue_range_part); } extra_props.insert(prop.type_name().clone(), value); } }, "condition" => if let Some(value) = prop.value.clone() { if value.contains(':') { conditions.insert(value.clone()); } else { eprintln!(r#"ignoring condition that isn't a term ID "{}" (from annotation of {} with {})"#, value, feature.uniquename, termid); } }, "qualifier" => if let Some(value) = prop.value.clone() { qualifiers.push(value); }, "assigned_by" => if let Some(value) = prop.value.clone() { assigned_by = Some(value); }, "date" => { if let Some(value) = prop.value.clone() { date = Some(value); } }, "with" => { if let Some(value) = prop.value.clone() { withs.insert(self.make_with_or_from_value(&value)); } }, "from" => { if let Some(value) = prop.value.clone() { froms.insert(self.make_with_or_from_value(&value)); } }, "annotation_throughput_type" => { if let Some(throughput_type) = prop.value.clone() { throughput = Some(match throughput_type.as_ref() { "low throughput" => Throughput::LowThroughput, "high throughput" => Throughput::HighThroughput, "non-experimental" => Throughput::NonExperimental, _ => { panic!("unknown throughput type: {}", throughput_type); } }); } }, _ => () } } let mut maybe_genotype_uniquename = None; let mut gene_uniquenames_vec: Vec<GeneUniquename> = match &feature.feat_type.name as &str { "polypeptide" => { if let Some(transcript_uniquename) = self.transcripts_of_polypeptides.get(&feature.uniquename) { if let Some(gene_uniquename) = self.genes_of_transcripts.get(transcript_uniquename) { vec![gene_uniquename.clone()] } else { vec![] } } else { vec![] } }, "genotype" => { let loci: Vec<_> = self.loci_of_genotypes[&feature.uniquename] .values().cloned().collect(); let genotype_display_name = make_genotype_display_name(&loci, &self.alleles); maybe_genotype_uniquename = Some(genotype_display_name); genotype_background = self.genotype_backgrounds.get(&feature.uniquename) .cloned(); loci.iter() .map(|locus| { locus.expressed_alleles.iter() .map(|expressed_allele| { let allele_short = self.make_allele_short(&expressed_allele.allele_uniquename); allele_short.gene_uniquename }) .collect() }) .collect::<Vec<Vec<_>>>() .concat() }, "gene" | "pseudogene" => { vec![feature.uniquename.clone()] }, _ => if TRANSCRIPT_FEATURE_TYPES.contains(&feature.feat_type.name.as_str()) { if let Some(gene_uniquename) = self.genes_of_transcripts.get(&feature.uniquename) { if let Some(gene_details) = self.genes.get(gene_uniquename) { if gene_details.transcripts.len() > 1 { // only bother to record the specific transcript if // there is more than one transcript_uniquenames.push(feature.uniquename.clone()); } } vec![gene_uniquename.clone()] } else { vec![] } } else { eprintln!("can't handle annotation on {} {}", &feature.feat_type.name, &feature.uniquename); continue 'FEATURE_CVTERM; } }; gene_uniquenames_vec.dedup(); gene_uniquenames_vec = gene_uniquenames_vec.iter().map(|gene_uniquename: &RcString| { self.make_gene_short(gene_uniquename).uniquename }).collect(); let reference_uniquename = if publication.uniquename == "null" { None } else { Some(publication.uniquename.clone()) }; let mut extra_props_clone = extra_props.clone(); let copies_per_cell = extra_props_clone.remove("quant_gene_ex_copies_per_cell"); let avg_copies_per_cell = extra_props_clone.remove("quant_gene_ex_avg_copies_per_cell"); let gene_ex_props = if copies_per_cell.is_some() || avg_copies_per_cell.is_some() { let scale = extra_props_clone.remove("scale") .expect("gene ex scale missing"); Some(GeneExProps { copies_per_cell, avg_copies_per_cell, scale, }) } else { None }; if gene_uniquenames_vec.len() > 1 && maybe_genotype_uniquename.is_none() { panic!("non-genotype annotation has more than one gene"); } let annotation_detail = OntAnnotationDetail { id: feature_cvterm.feature_cvterm_id, genes: gene_uniquenames_vec, transcript_uniquenames, reference: reference_uniquename, genotype: maybe_genotype_uniquename, genotype_background, withs, froms, residue: extra_props_clone.remove("residue"), gene_product_form_id: extra_props_clone.remove("gene_product_form_id"), gene_ex_props, qualifiers, evidence, conditions, extension, date, assigned_by, throughput, }; self.add_annotation(&rel_order, cvterm.borrow(), feature_cvterm.is_not, annotation_detail); } } fn make_term_annotations(&self, termid: &RcString, detail_ids: &[OntAnnotationId], is_not: bool) -> Vec<(CvName, OntTermAnnotations)> { let term_details = &self.terms[termid]; let cv_name = term_details.cv_name.clone(); match cv_name.as_ref() { "gene_ex" | "PomGeneExRNA" | "PomGeneExProt" | "PomGeneExRD" => { if is_not { panic!("gene_ex annotations can't be NOT annotations"); } let mut qual_annotations = OntTermAnnotations { term: termid.clone(), is_not: false, rel_names: HashSet::new(), annotations: vec![], summary: None, }; let mut quant_annotations = OntTermAnnotations { term: termid.clone(), is_not: false, rel_names: HashSet::new(), annotations: vec![], summary: None, }; for annotation_id in detail_ids { let annotation = self.annotation_details. get(annotation_id).expect("can't find OntAnnotationDetail"); if annotation.gene_ex_props.is_some() { quant_annotations.annotations.push(*annotation_id) } else { qual_annotations.annotations.push(*annotation_id) } } let mut return_vec = vec![]; if !qual_annotations.annotations.is_empty() { return_vec.push((RcString::from("qualitative_gene_expression"), qual_annotations)); } if !quant_annotations.annotations.is_empty() { return_vec.push((RcString::from("quantitative_gene_expression"), quant_annotations)); } return_vec }, "fission_yeast_phenotype" => { let mut single_locus = OntTermAnnotations { term: termid.clone(), is_not, rel_names: HashSet::new(), annotations: vec![], summary: None, }; let mut multi_locus = OntTermAnnotations { term: termid.clone(), is_not, rel_names: HashSet::new(), annotations: vec![], summary: None, }; for annotation_id in detail_ids { let annotation = self.annotation_details. get(annotation_id).expect("can't find OntAnnotationDetail"); let genotype_uniquename = annotation.genotype.as_ref().unwrap(); if let Some(genotype_details) = self.genotypes.get(genotype_uniquename) { if genotype_details.loci.len() == 1 { single_locus.annotations.push(*annotation_id); } else { if !multi_locus.annotations.contains(annotation_id) { multi_locus.annotations.push(*annotation_id); } } } else { panic!("can't find genotype details for {}\n", genotype_uniquename); } } let mut return_vec = vec![]; if !single_locus.annotations.is_empty() { return_vec.push((RcString::from("single_locus_phenotype"), single_locus)); } if !multi_locus.annotations.is_empty() { return_vec.push((RcString::from("multi_locus_phenotype"), multi_locus)); } return_vec }, _ => { vec![(cv_name, OntTermAnnotations { term: termid.clone(), is_not, rel_names: HashSet::new(), annotations: detail_ids.to_owned(), summary: None, })] } } } fn remove_duplicate_transcript_annotation(&mut self) { let ont_annotation_map = &mut self.all_ont_annotations; for (_, annotations) in ont_annotation_map { let (no_transcript_annotations, mut has_transcript_annotations): (Vec<i32>, Vec<i32>) = annotations .iter() .partition(|&annotation_id| { if let Some(ont_annotation_detail) = self.annotation_details.get(annotation_id) { ont_annotation_detail.transcript_uniquenames.len() == 0 } else { panic!("can't find annotation details for {}", annotation_id); } }); *annotations = no_transcript_annotations; if has_transcript_annotations.len() >= 2 { // merge annotations that differ only by transcript ID has_transcript_annotations.sort(); let mut prev_annotation_id = has_transcript_annotations.remove(0); for current_annotation_id in has_transcript_annotations.drain(0..) { let (annotations_equal, current_transcript_uniquename) = { let prev_annotation = self.annotation_details.get(&prev_annotation_id).unwrap(); let current_annotation = self.annotation_details.get(&current_annotation_id).unwrap(); (prev_annotation == current_annotation, current_annotation.transcript_uniquenames[0].clone()) }; if annotations_equal { if let Some(ref annotation_details) = self.annotation_details.get(&prev_annotation_id) { if !annotation_details.transcript_uniquenames.contains(&current_transcript_uniquename) { self.annotation_details.get_mut(&prev_annotation_id).unwrap() .transcript_uniquenames.push(current_transcript_uniquename); } } } else { annotations.push(prev_annotation_id); prev_annotation_id = current_annotation_id; } } annotations.push(prev_annotation_id); } else { annotations.extend(has_transcript_annotations.iter()); } } } // store the OntTermAnnotations in the TermDetails, GeneDetails, // GenotypeDetails and ReferenceDetails fn store_ont_annotations(&mut self, is_not: bool) { let ont_annotation_map = if is_not { &self.all_not_ont_annotations } else { &self.all_ont_annotations }; let mut gene_annotation_by_term: HashMap<GeneUniquename, HashMap<TermId, Vec<OntAnnotationId>>> = HashMap::new(); let mut genotype_annotation_by_term: HashMap<GenotypeUniquename, HashMap<TermId, Vec<OntAnnotationId>>> = HashMap::new(); let mut ref_annotation_by_term: HashMap<RcString, HashMap<TermId, Vec<OntAnnotationId>>> = HashMap::new(); let mut ont_annotations = vec![]; for (termid, annotations) in ont_annotation_map { if !is_not { let new_annotations = self.make_term_annotations(termid, &annotations, is_not); if let Some(ref mut term_details) = self.terms.get_mut(termid) { for (cv_name, new_annotation) in new_annotations { term_details.cv_annotations.entry(cv_name.clone()) .or_insert_with(Vec::new) .push(new_annotation); } } else { panic!("missing termid: {}\n", termid); } } for annotation_id in annotations { let annotation = self.annotation_details. get(&annotation_id).expect("can't find OntAnnotationDetail"); for gene_uniquename in &annotation.genes { gene_annotation_by_term.entry(gene_uniquename.clone()) .or_insert_with(HashMap::new) .entry(termid.clone()) .or_insert_with(Vec::new) .push(*annotation_id); } if let Some(ref genotype_uniquename) = annotation.genotype { let existing = genotype_annotation_by_term.entry(genotype_uniquename.clone()) .or_insert_with(HashMap::new) .entry(termid.clone()) .or_insert_with(Vec::new); if !existing.contains(&annotation_id) { existing.push(*annotation_id); } } if let Some(reference_uniquename) = annotation.reference.clone() { ref_annotation_by_term.entry(reference_uniquename) .or_insert_with(HashMap::new) .entry(termid.clone()) .or_insert_with(Vec::new) .push(*annotation_id); } for condition_termid in &annotation.conditions { let cv_name = if let Some(term_details) = self.terms.get(condition_termid) { term_details.cv_name.clone() } else { panic!("can't find term details for {}", condition_termid); }; if let Some(ref mut condition_term_details) = self.terms.get_mut(&condition_termid.clone()) { condition_term_details.cv_annotations .entry(cv_name.clone()) .or_insert({ let mut new_vec = Vec::new(); let new_term_annotation = OntTermAnnotations { term: condition_termid.clone(), is_not, rel_names: HashSet::new(), annotations: vec![], summary: None, }; new_vec.push(new_term_annotation); new_vec }); condition_term_details.cv_annotations.get_mut(&cv_name) .unwrap()[0] .annotations.push(*annotation_id); } } /* Remove for now because it's messing with the gene counts. See: https://github.com/pombase/website/issues/1705 // Add annotations to terms referred to in extensions. They // are added to fake CV that have a name starting with // "extension:". The CV name will end with ":genotype" if the // annotation is a phentoype/genotype, and will end with ":end" // otherwise. The middle of the fake CV name is the display // name for the extension relation. // eg. "extension:directly activates:gene" for ext_part in &annotation.extension { if let ExtRange::Term(ref part_termid) = ext_part.ext_range { let cv_name = "extension:".to_owned() + &ext_part.rel_type_display_name; if let Some(ref mut part_term_details) = self.terms.get_mut(part_termid) { let extension_cv_name = if annotation.genotype.is_some() { cv_name.clone() + ":genotype" } else { cv_name.clone() + ":gene" }; part_term_details.cv_annotations .entry(RcString::from(&extension_cv_name)) .or_insert({ let mut new_vec = Vec::new(); let new_term_annotation = OntTermAnnotations { term: part_termid.to_owned(), is_not, rel_names: HashSet::new(), annotations: vec![], summary: None, }; new_vec.push(new_term_annotation); new_vec }); part_term_details.cv_annotations.get_mut(&extension_cv_name) .unwrap()[0] .annotations.push(annotation_id); } } } */ let gene_short_list = annotation.genes.iter().map(|uniquename: &RcString| { self.make_gene_short(uniquename) }).collect::<HashSet<_>>(); let reference_short = annotation.reference.as_ref().and_then(|uniquename: &RcString| { make_reference_short(&self.references, uniquename) }); let genotype_short = annotation.genotype.as_ref().map(|uniquename: &RcString| { self.make_genotype_short(uniquename) }); let conditions = annotation.conditions.iter().map(|termid| { self.make_term_short(termid) }).collect::<HashSet<_>>(); if gene_short_list.is_empty() { panic!("no genes for {:?}", &annotation); } let ont_annotation = OntAnnotation { term_short: self.make_term_short(termid), id: annotation.id, genes: gene_short_list, reference_short, genotype_short, genotype_background: annotation.genotype_background.clone(), withs: annotation.withs.clone(), froms: annotation.froms.clone(), residue: annotation.residue.clone(), gene_ex_props: annotation.gene_ex_props.clone(), qualifiers: annotation.qualifiers.clone(), evidence: annotation.evidence.clone(), conditions, extension: annotation.extension.clone(), assigned_by: annotation.assigned_by.clone(), }; ont_annotations.push(ont_annotation); } } let mut term_names = HashMap::new(); for (termid, term_details) in &self.terms { term_names.insert(termid.clone(), term_details.name.to_lowercase()); } let ont_term_cmp = |ont_term_1: &OntTermAnnotations, ont_term_2: &OntTermAnnotations| { if !ont_term_1.is_not && ont_term_2.is_not { return Ordering::Less; } if ont_term_1.is_not && !ont_term_2.is_not { return Ordering::Greater; } let term1 = &term_names[&ont_term_1.term]; let term2 = &term_names[&ont_term_2.term]; term1.cmp(term2) }; for (gene_uniquename, term_annotation_map) in &gene_annotation_by_term { for (termid, details) in term_annotation_map { let new_annotations = self.make_term_annotations(termid, details, is_not); let gene_details = self.genes.get_mut(gene_uniquename).unwrap(); for (cv_name, new_annotation) in new_annotations { gene_details.cv_annotations.entry(cv_name.clone()) .or_insert_with(Vec::new) .push(new_annotation); } } let gene_details = self.genes.get_mut(gene_uniquename).unwrap(); for cv_annotations in gene_details.cv_annotations.values_mut() { cv_annotations.sort_by(&ont_term_cmp) } } for (genotype_uniquename, term_annotation_map) in &genotype_annotation_by_term { for (termid, details) in term_annotation_map { let new_annotations = self.make_term_annotations(termid, details, is_not); let details = self.genotypes.get_mut(genotype_uniquename).unwrap(); for (cv_name, new_annotation) in new_annotations { details.cv_annotations.entry(cv_name.clone()) .or_insert_with(Vec::new) .push(new_annotation); } } let details = self.genotypes.get_mut(genotype_uniquename).unwrap(); for cv_annotations in details.cv_annotations.values_mut() { cv_annotations.sort_by(&ont_term_cmp) } } for (reference_uniquename, ref_annotation_map) in &ref_annotation_by_term { for (termid, details) in ref_annotation_map { let new_annotations = self.make_term_annotations(termid, details, is_not); let ref_details = self.references.get_mut(reference_uniquename).unwrap(); for (cv_name, new_annotation) in new_annotations { ref_details.cv_annotations.entry(cv_name).or_insert_with(Vec::new) .push(new_annotation.clone()); } } let ref_details = self.references.get_mut(reference_uniquename).unwrap(); for cv_annotations in ref_details.cv_annotations.values_mut() { cv_annotations.sort_by(&ont_term_cmp) } } for ont_annotation in ont_annotations.drain(0..) { self.ont_annotations.push(ont_annotation); } } // return true if the term could or should appear in the interesting_parent_details // field of the TermDetails and TermShort structs fn is_interesting_parent(&self, termid: &str, rel_name: &str) -> bool { self.possible_interesting_parents.contains(&InterestingParent { termid: termid.into(), rel_name: rel_name.into(), }) } fn process_cvtermpath(&mut self) { let mut slim_termids = HashSet::new(); for slim_config in self.config.slims.values() { for term_and_name in &slim_config.terms { slim_termids.insert(term_and_name.termid.clone()); } } let mut new_annotations: HashMap<(CvName, TermId), HashMap<TermId, HashMap<i32, HashSet<RelName>>>> = HashMap::new(); let mut children_by_termid: HashMap<TermId, HashSet<TermId>> = HashMap::new(); for cvtermpath in &self.raw.cvtermpaths { let subject_term = &cvtermpath.subject; let subject_termid = subject_term.termid(); let object_term = &cvtermpath.object; let object_termid = object_term.termid(); if let Some(subject_term_details) = self.terms.get(&subject_termid) { let rel_termid = match cvtermpath.rel_type { Some(ref rel_type) => { rel_type.termid() }, None => panic!("no relation type for {} <-> {}\n", &subject_term.name, &object_term.name) }; let rel_term_name = self.make_term_short(&rel_termid).name; if rel_term_name == "has_part" && !HAS_PART_CV_NAMES.contains(&subject_term_details.cv_name.as_str()) { continue; } if !DESCENDANT_REL_NAMES.contains(&rel_term_name.as_str()) { continue; } if subject_term_details.cv_annotations.keys().len() > 0 || slim_termids.contains(&object_termid) { children_by_termid .entry(object_termid.clone()) .or_insert_with(HashSet::new) .insert(subject_termid.clone()); } for (cv_name, term_annotations) in &subject_term_details.cv_annotations { for term_annotation in term_annotations { for annotation_id in &term_annotation.annotations { let dest_termid = object_termid.clone(); let source_termid = subject_termid.clone(); if !term_annotation.is_not { new_annotations.entry((cv_name.clone(), dest_termid)) .or_insert_with(HashMap::new) .entry(source_termid) .or_insert_with(HashMap::new) .entry(*annotation_id) .or_insert_with(HashSet::new) .insert(rel_term_name.clone()); } } } } } else { panic!("TermDetails not found for {}", &subject_termid); } } for ((dest_cv_name, dest_termid), dest_annotations_map) in new_annotations.drain() { for (source_termid, source_annotations_map) in dest_annotations_map { let mut new_annotations: Vec<OntAnnotationId> = vec![]; let mut all_rel_names: HashSet<RcString> = HashSet::new(); for (annotation_id, rel_names) in source_annotations_map { new_annotations.push(annotation_id); for rel_name in rel_names { all_rel_names.insert(rel_name); } } let new_annotations = self.make_term_annotations(&source_termid, &new_annotations, false); let dest_term_details = { self.terms.get_mut(&dest_termid).unwrap() }; for (_, new_annotation) in new_annotations { let mut new_annotation_clone = new_annotation.clone(); new_annotation_clone.rel_names.extend(all_rel_names.clone()); dest_term_details.cv_annotations .entry(dest_cv_name.clone()) .or_insert_with(Vec::new) .push(new_annotation_clone); } } } let mut term_names = HashMap::new(); for (termid, term_details) in &self.terms { term_names.insert(termid.clone(), term_details.name.to_lowercase()); } for term_details in self.terms.values_mut() { let term_details_termid = &term_details.termid; for term_annotations in term_details.cv_annotations.values_mut() { let ont_term_cmp = |ont_term_1: &OntTermAnnotations, ont_term_2: &OntTermAnnotations| { if ont_term_1.term == ont_term_2.term { return Ordering::Equal; } // put direct annotation first on page if ont_term_1.term == *term_details_termid { return Ordering::Less; } if ont_term_2.term == *term_details_termid { return Ordering::Greater; } if !ont_term_1.is_not && ont_term_2.is_not { return Ordering::Less; } if ont_term_1.is_not && !ont_term_2.is_not { return Ordering::Greater; } let term1 = &term_names[&ont_term_1.term]; let term2 = &term_names[&ont_term_2.term]; term1.cmp(term2) }; term_annotations.sort_by(&ont_term_cmp); } } self.children_by_termid = children_by_termid; } fn make_metadata(&mut self) -> Metadata { let mut db_creation_datetime = None; for chadoprop in &self.raw.chadoprops { if chadoprop.prop_type.name == "db_creation_datetime" { db_creation_datetime = chadoprop.value.clone(); } } let mut cv_versions = HashMap::new(); for cvprop in &self.raw.cvprops { if cvprop.prop_type.name == "cv_version" { cv_versions.insert(cvprop.cv.name.clone(), cvprop.value.clone()); } } const PKG_NAME: &str = env!("CARGO_PKG_NAME"); const VERSION: &str = env!("CARGO_PKG_VERSION"); Metadata { export_prog_name: RcString::from(PKG_NAME), export_prog_version: RcString::from(VERSION), db_creation_datetime: db_creation_datetime.unwrap(), gene_count: self.genes.len(), term_count: self.terms.len(), cv_versions, } } pub fn get_api_genotype_annotation(&self) -> HashMap<TermId, Vec<APIGenotypeAnnotation>> { let mut app_genotype_annotation = HashMap::new(); for term_details in self.terms.values() { for annotations_vec in term_details.cv_annotations.values() { for ont_term_annotations in annotations_vec { 'DETAILS: for annotation_id in &ont_term_annotations.annotations { let annotation_details = self.annotation_details. get(annotation_id).expect("can't find OntAnnotationDetail"); if annotation_details.genotype.is_none() { continue 'DETAILS; } let genotype_uniquename = annotation_details.genotype.clone().unwrap(); let genotype = &term_details.genotypes_by_uniquename[&genotype_uniquename]; let conditions = annotation_details.conditions.iter() .map(|cond_termid| { let cond_term = self.terms.get(cond_termid).unwrap(); TermAndName { termid: cond_term.termid.clone(), name: cond_term.name.clone(), } }) .collect::<HashSet<_>>(); let mut api_annotation = APIGenotypeAnnotation { is_multi: genotype.loci.len() > 1, ploidiness: genotype.ploidiness(), conditions, alleles: vec![], }; for locus in &genotype.loci { for allele in &locus.expressed_alleles { let allele_uniquename = &allele.allele_uniquename; let allele_short = self.alleles.get(allele_uniquename).expect("Can't find allele"); let allele_gene_uniquename = allele_short.gene_uniquename.clone(); let allele_details = APIAlleleDetails { gene: allele_gene_uniquename, allele_type: allele_short.allele_type.clone(), expression: allele.expression.clone(), }; api_annotation.alleles.push(allele_details); } } app_genotype_annotation .entry(term_details.termid.clone()) .or_insert_with(Vec::new) .push(api_annotation); } } } } app_genotype_annotation } fn make_protein_data(&self, gene_details: &GeneDetails) -> (Option<f32>, Option<usize>, Option<GeneQueryAttrName>) { let mut molecular_weight = None; let mut protein_length = None; for transcript_uniquename in &gene_details.transcripts { if let Some(transcript) = self.transcripts.get(transcript_uniquename) { if let Some(ref protein) = transcript.protein { molecular_weight = Some((100.0 * protein.molecular_weight).round() / 100.0); if protein.sequence.ends_with('*') { protein_length = Some(protein.sequence.len() - 1); } else { protein_length = Some(protein.sequence.len()); } break; } } } for field_name in &self.config.gene_results.visualisation_field_names { let column_conf = &self.config.gene_results.field_config[field_name]; for attr_value_conf in &column_conf.attr_values { if let (Some(ref bin_start), Some(ref bin_end)) = (attr_value_conf.bin_start, attr_value_conf.bin_end) { if let Some(prot_len) = protein_length { if *bin_start <= prot_len && *bin_end >= prot_len { return (molecular_weight, Some(prot_len), Some(attr_value_conf.name.clone())); } } } } } (None, None, None) } fn make_gene_query_go_data(&self, gene_details: &GeneDetails, term_config: &[TermId], cv_name: &str) -> Option<GeneQueryTermData> { let component_term_annotations = gene_details.cv_annotations.get(cv_name)?; let in_component = |check_termid: &str| { for term_annotation in component_term_annotations { let maybe_term_details = self.terms.get(&term_annotation.term); let term_details = maybe_term_details .unwrap_or_else(|| { panic!("can't find TermDetails for {}", &term_annotation.term) }); let interesting_parent_ids = &term_details.interesting_parent_ids; if !term_annotation.is_not && (term_annotation.term == check_termid || interesting_parent_ids.contains(check_termid)) { return true; } } false }; for go_component_termid in term_config { if in_component(go_component_termid) { return Some(GeneQueryTermData::Term(TermAndName { termid: go_component_termid.to_owned(), name: self.terms.get(go_component_termid).unwrap().name.clone(), })); } } Some(GeneQueryTermData::Other) } fn get_ortholog_taxonids(&self, gene_details: &GeneDetails) -> HashSet<u32> { let mut return_set = HashSet::new(); for ortholog_annotation in &gene_details.ortholog_annotations { return_set.insert(ortholog_annotation.ortholog_taxonid); } return_set } fn get_physical_interactors(&self, gene_details: &GeneDetails) -> HashSet<GeneUniquename> { let mut return_set = HashSet::new(); for physical_interaction in &gene_details.physical_interactions { if gene_details.uniquename == physical_interaction.gene_uniquename { return_set.insert(physical_interaction.interactor_uniquename.clone()); } else { // gene is the prey for this interaction return_set.insert(physical_interaction.gene_uniquename.clone()); } } return_set } fn make_gene_query_data_map(&self) -> HashMap<GeneUniquename, GeneQueryData> { let mut gene_query_data_map = HashMap::new(); for gene_details in self.genes.values() { let ortholog_taxonids = self.get_ortholog_taxonids(gene_details); let physical_interactors = self.get_physical_interactors(gene_details); let mut cc_terms = vec![]; let mut process_terms = vec![]; let mut function_terms = vec![]; for field_name in &self.config.gene_results.visualisation_field_names { let column_conf = &self.config.gene_results.field_config[field_name]; for attr_value_conf in &column_conf.attr_values { if let Some(ref termid) = attr_value_conf.termid { match field_name.as_ref() { "go_component" => cc_terms.push(termid.clone()), "go_process_superslim" => process_terms.push(termid.clone()), "go_function" => function_terms.push(termid.clone()), _ => (), } } } } let go_component = self.make_gene_query_go_data(gene_details, &cc_terms, "cellular_component"); let go_process_superslim = self.make_gene_query_go_data(gene_details, &process_terms, "biological_process"); let go_function = self.make_gene_query_go_data(gene_details, &function_terms, "molecular_function"); let tmm = if gene_details.feature_type == "mRNA gene" { if gene_details.tm_domain_coords.is_empty() { Some(PresentAbsent::Absent) } else { Some(PresentAbsent::Present) } } else { Some(PresentAbsent::NotApplicable) }; let (molecular_weight, protein_length, protein_length_bin) = self.make_protein_data(gene_details); let gene_query_data = GeneQueryData { gene_uniquename: gene_details.uniquename.clone(), deletion_viability: gene_details.deletion_viability.clone(), go_component, go_process_superslim, go_function, characterisation_status: gene_details.characterisation_status.clone(), taxonomic_distribution: gene_details.taxonomic_distribution.clone(), tmm, ortholog_taxonids, physical_interactors, molecular_weight, protein_length, protein_length_bin, subset_termids: gene_details.subset_termids.clone(), }; gene_query_data_map.insert(gene_details.uniquename.clone(), gene_query_data); } gene_query_data_map } pub fn make_api_maps(mut self) -> APIMaps { let mut gene_summaries: HashMap<GeneUniquename, APIGeneSummary> = HashMap::new(); let mut gene_name_gene_map = HashMap::new(); let mut interactors_of_genes = HashMap::new(); for (gene_uniquename, gene_details) in &self.genes { if self.config.load_organism_taxonid.is_none() || self.config.load_organism_taxonid.unwrap() == gene_details.taxonid { let gene_summary = self.make_api_gene_summary(gene_uniquename); if let Some(ref gene_name) = gene_summary.name { gene_name_gene_map.insert(gene_name.clone(), gene_uniquename.clone()); } gene_summaries.insert(gene_uniquename.clone(), gene_summary); let mut interactors = vec![]; for interaction_annotation in &gene_details.physical_interactions { let interactor_uniquename = if gene_uniquename == &interaction_annotation.gene_uniquename { interaction_annotation.interactor_uniquename.clone() } else { interaction_annotation.gene_uniquename.clone() }; let interactor = APIInteractor { interaction_type: InteractionType::Physical, interactor_uniquename, }; if !interactors.contains(&interactor) { interactors.push(interactor); } } for interaction_annotation in &gene_details.genetic_interactions { let interactor_uniquename = if gene_uniquename == &interaction_annotation.gene_uniquename { interaction_annotation.interactor_uniquename.clone() } else { interaction_annotation.gene_uniquename.clone() }; let interactor = APIInteractor { interaction_type: InteractionType::Genetic, interactor_uniquename, }; if !interactors.contains(&interactor) { interactors.push(interactor); } } interactors_of_genes.insert(gene_uniquename.clone(), interactors); } } let gene_query_data_map = self.make_gene_query_data_map(); let mut term_summaries: HashSet<TermShort> = HashSet::new(); let mut termid_genes: HashMap<TermId, HashSet<GeneUniquename>> = HashMap::new(); let mut terms_for_api: HashMap<TermId, TermDetails> = HashMap::new(); for termid in self.terms.keys() { term_summaries.insert(self.make_term_short(termid)); } let termid_genotype_annotation: HashMap<TermId, Vec<APIGenotypeAnnotation>> = self.get_api_genotype_annotation(); for (termid, term_details) in self.terms.drain() { let cv_config = &self.config.cv_config; if let Some(term_config) = cv_config.get(&term_details.cv_name) { if term_config.feature_type == "gene" { termid_genes.insert(termid.clone(), term_details.genes_annotated_with.clone()); } } terms_for_api.insert(termid.clone(), term_details); } let seq_feature_page_features: Vec<FeatureShort> = self.other_features.values() .filter(|feature_short| { let so_types_to_show = &self.config.sequence_feature_page.so_types_to_show; let feature_type_string = feature_short.feature_type.to_string(); so_types_to_show.contains(&feature_type_string) }) .map(|feature_short| { let mut new_feature = feature_short.clone(); // we don't need the residues for the seq feature page new_feature.residues = RcString::new(); new_feature }).collect(); // avoid clone() let mut term_subsets = HashMap::new(); std::mem::swap(&mut term_subsets, &mut self.term_subsets); let mut gene_subsets = HashMap::new(); std::mem::swap(&mut gene_subsets, &mut self.gene_subsets); let mut children_by_termid = HashMap::new(); std::mem::swap(&mut children_by_termid, &mut self.children_by_termid); let mut gene_expression_measurements = HashMap::new(); std::mem::swap(&mut gene_expression_measurements, &mut self.gene_expression_measurements); APIMaps { gene_summaries, gene_query_data_map, termid_genes, termid_genotype_annotation, term_summaries, genes: self.genes, gene_name_gene_map, transcripts: self.transcripts, alleles: self.alleles, genotypes: self.genotypes, terms: terms_for_api, interactors_of_genes, references: self.references, other_features: self.other_features, seq_feature_page_features, annotation_details: self.annotation_details, chromosomes: self.chromosomes, term_subsets, gene_subsets, children_by_termid, gene_expression_measurements, } } fn add_cv_annotations_to_maps(&self, identifier: &RcString, cv_annotations: &OntAnnotationMap, seen_references: &mut HashMap<RcString, ReferenceShortOptionMap>, seen_genes: &mut HashMap<RcString, GeneShortOptionMap>, seen_genotypes: &mut HashMap<RcString, GenotypeShortMap>, seen_alleles: &mut HashMap<RcString, AlleleShortMap>, seen_transcripts: &mut HashMap<RcString, TranscriptDetailsOptionMap>, seen_terms: &mut HashMap<RcString, TermShortOptionMap>) { for feat_annotations in cv_annotations.values() { for feat_annotation in feat_annotations.iter() { self.add_term_to_hash(seen_terms, identifier, &feat_annotation.term); for annotation_detail_id in &feat_annotation.annotations { let annotation_detail = self.annotation_details. get(annotation_detail_id).expect("can't find OntAnnotationDetail"); for transcript_uniquename in &annotation_detail.transcript_uniquenames { self.add_transcript_to_hashes(seen_transcripts, seen_genes, identifier, transcript_uniquename); } self.add_ref_to_hash(seen_references, identifier, &annotation_detail.reference); for condition_termid in &annotation_detail.conditions { self.add_term_to_hash(seen_terms, identifier, condition_termid); } if let Some(ref gene_product_form_id) = annotation_detail.gene_product_form_id { if gene_product_form_id.starts_with("PR:") { self.add_term_to_hash(seen_terms, identifier, gene_product_form_id); } } for ext_part in &annotation_detail.extension { match ext_part.ext_range { ExtRange::Term(ref range_termid) | ExtRange::GeneProduct(ref range_termid) => self.add_term_to_hash(seen_terms, identifier, range_termid), ExtRange::Gene(ref gene_uniquename) | ExtRange::Promoter(ref gene_uniquename) => self.add_gene_to_hash(seen_genes, identifier, gene_uniquename), ExtRange::Transcript(ref transcript_uniquename) => self.add_transcript_to_hashes(seen_transcripts, seen_genes, identifier, transcript_uniquename), _ => {}, } } if let Some(ref genotype_uniquename) = annotation_detail.genotype { self.add_genotype_to_hash(seen_genotypes, seen_alleles, seen_genes, identifier, genotype_uniquename); } let with_from_iter = annotation_detail.withs .iter() .chain(annotation_detail.froms.iter()); for with_from_value in with_from_iter { match with_from_value { WithFromValue::Gene(ref gene_short) => { self.add_gene_to_hash(seen_genes, identifier, &gene_short.uniquename) }, &WithFromValue::Transcript(ref transcript_uniquename) => { self.add_transcript_to_hashes(seen_transcripts, seen_genes, identifier, transcript_uniquename); }, _ => (), } } } } } } fn set_term_details_maps(&mut self) { let (mut seen_references, mut seen_genes, mut seen_genotypes, mut seen_alleles, mut seen_transcripts, mut seen_terms) = get_maps(); let mut genes_annotated_with_map: HashMap<TermId, HashSet<GeneUniquename>> = HashMap::new(); for (termid, term_details) in &self.terms { for xref in &term_details.definition_xrefs { if xref.starts_with("PMID:") && self.references.contains_key(xref) { self.add_ref_to_hash(&mut seen_references, termid, &Some(xref.clone())); } } for (cv_name, term_annotations) in &term_details.cv_annotations { for term_annotation in term_annotations { self.add_term_to_hash(&mut seen_terms, termid, &term_annotation.term); for annotation_detail_id in &term_annotation.annotations { let annotation_detail = self.annotation_details .get(annotation_detail_id).expect("can't find OntAnnotationDetail"); for gene_uniquename in &annotation_detail.genes { self.add_gene_to_hash(&mut seen_genes, termid, gene_uniquename); if !cv_name.starts_with("extension:") { // prevent extension annotations from appearing // in the normal query builder searches genes_annotated_with_map .entry(termid.clone()).or_insert_with(HashSet::new) .insert(gene_uniquename.clone()); } } for transcript_uniquename in &annotation_detail.transcript_uniquenames { self.add_transcript_to_hashes(&mut seen_transcripts, &mut seen_genes, termid, transcript_uniquename); } self.add_ref_to_hash(&mut seen_references, termid, &annotation_detail.reference); for condition_termid in &annotation_detail.conditions { self.add_term_to_hash(&mut seen_terms, termid, condition_termid); } if let Some(ref gene_product_form_id) = annotation_detail.gene_product_form_id { if gene_product_form_id.starts_with("PR:") { self.add_term_to_hash(&mut seen_terms, termid, gene_product_form_id); } } for ext_part in &annotation_detail.extension { match ext_part.ext_range { ExtRange::Term(ref range_termid) | ExtRange::GeneProduct(ref range_termid) => self.add_term_to_hash(&mut seen_terms, termid, range_termid), ExtRange::Gene(ref gene_uniquename) | ExtRange::Promoter(ref gene_uniquename) => self.add_gene_to_hash(&mut seen_genes, termid, gene_uniquename), ExtRange::Transcript(ref transcript_uniquename) => self.add_transcript_to_hashes(&mut seen_transcripts, &mut seen_genes, termid, transcript_uniquename), _ => {}, } } if let Some(ref genotype_uniquename) = annotation_detail.genotype { self.add_genotype_to_hash(&mut seen_genotypes, &mut seen_alleles, &mut seen_genes, termid, genotype_uniquename); } let with_from_iter = annotation_detail.withs .iter() .chain(annotation_detail.froms.iter()); for with_from_value in with_from_iter { match with_from_value { WithFromValue::Gene(ref gene_short) => { self.add_gene_to_hash(&mut seen_genes, termid, &gene_short.uniquename) }, &WithFromValue::Transcript(ref transcript_uniquename) => { self.add_transcript_to_hashes(&mut seen_transcripts, &mut seen_genes, termid, transcript_uniquename); }, _ => (), } } } } } } for (termid, term_details) in &mut self.terms { if let Some(genes) = seen_genes.remove(termid) { term_details.genes_by_uniquename = genes; } if let Some(genotypes) = seen_genotypes.remove(termid) { term_details.genotypes_by_uniquename = genotypes; } if let Some(alleles) = seen_alleles.remove(termid) { term_details.alleles_by_uniquename = alleles; } if let Some(references) = seen_references.remove(termid) { term_details.references_by_uniquename = references; } if let Some(transcripts) = seen_transcripts.remove(termid) { term_details.transcripts_by_uniquename = transcripts; } if let Some(terms) = seen_terms.remove(termid) { term_details.terms_by_termid = terms; } if let Some(gene_uniquename_set) = genes_annotated_with_map.remove(termid) { term_details.genes_annotated_with = gene_uniquename_set; } } } fn set_gene_details_maps(&mut self) { let (mut seen_references, mut seen_genes, mut seen_genotypes, mut seen_alleles, mut seen_transcripts, mut seen_terms) = get_maps(); { for (gene_uniquename, gene_details) in &self.genes { self.add_cv_annotations_to_maps(gene_uniquename, &gene_details.cv_annotations, &mut seen_references, &mut seen_genes, &mut seen_genotypes, &mut seen_alleles, &mut seen_transcripts, &mut seen_terms); for transcript_uniquename in &gene_details.transcripts { self.add_transcript_to_hashes(&mut seen_transcripts, &mut seen_genes, gene_uniquename, transcript_uniquename); } let interaction_iter = gene_details.physical_interactions.iter().chain(&gene_details.genetic_interactions); for interaction in interaction_iter { self.add_ref_to_hash(&mut seen_references, gene_uniquename, &interaction.reference_uniquename); self.add_gene_to_hash(&mut seen_genes, gene_uniquename, &interaction.gene_uniquename); self.add_gene_to_hash(&mut seen_genes, gene_uniquename, &interaction.interactor_uniquename); } for ortholog_annotation in &gene_details.ortholog_annotations { self.add_ref_to_hash(&mut seen_references, gene_uniquename, &ortholog_annotation.reference_uniquename); self.add_gene_to_hash(&mut seen_genes, gene_uniquename, &ortholog_annotation.gene_uniquename); self.add_gene_to_hash(&mut seen_genes, gene_uniquename, &ortholog_annotation.ortholog_uniquename); } for paralog_annotation in &gene_details.paralog_annotations { self.add_ref_to_hash(&mut seen_references, gene_uniquename, &paralog_annotation.reference_uniquename); self.add_gene_to_hash(&mut seen_genes, gene_uniquename, &paralog_annotation.gene_uniquename); self.add_gene_to_hash(&mut seen_genes, gene_uniquename, &paralog_annotation.paralog_uniquename); } for target_of_annotation in &gene_details.target_of_annotations { let target_of_gene = &target_of_annotation.gene; self.add_gene_to_hash(&mut seen_genes, gene_uniquename, target_of_gene); if let Some(ref annotation_genotype_uniquename) = target_of_annotation.genotype_uniquename { self.add_genotype_to_hash(&mut seen_genotypes, &mut seen_alleles, &mut seen_genes, gene_uniquename, annotation_genotype_uniquename) } self.add_ref_to_hash(&mut seen_references, gene_uniquename, &target_of_annotation.reference_uniquename); } for publication in &gene_details.feature_publications { self.add_ref_to_hash(&mut seen_references, gene_uniquename, &Some(publication.clone())); } } } for (gene_uniquename, gene_details) in &mut self.genes { if let Some(references) = seen_references.remove(gene_uniquename) { gene_details.references_by_uniquename = references; } if let Some(alleles) = seen_alleles.remove(gene_uniquename) { gene_details.alleles_by_uniquename = alleles; } if let Some(genes) = seen_genes.remove(gene_uniquename) { gene_details.genes_by_uniquename = genes; } if let Some(genotypes) = seen_genotypes.remove(gene_uniquename) { gene_details.genotypes_by_uniquename = genotypes; } if let Some(transcripts) = seen_transcripts.remove(gene_uniquename) { gene_details.transcripts_by_uniquename = transcripts; } if let Some(terms) = seen_terms.remove(gene_uniquename) { gene_details.terms_by_termid = terms; } } } fn set_genotype_details_maps(&mut self) { let (mut seen_references, mut seen_genes, mut seen_genotypes, mut seen_alleles, mut seen_transcripts, mut seen_terms) = get_maps(); for (genotype_uniquename, genotype_details) in &self.genotypes { self.add_cv_annotations_to_maps(genotype_uniquename, &genotype_details.cv_annotations, &mut seen_references, &mut seen_genes, &mut seen_genotypes, &mut seen_alleles, &mut seen_transcripts, &mut seen_terms); } for (genotype_uniquename, genotype_details) in &mut self.genotypes { if let Some(references) = seen_references.remove(genotype_uniquename) { genotype_details.references_by_uniquename = references; } if let Some(alleles) = seen_alleles.remove(genotype_uniquename) { genotype_details.alleles_by_uniquename = alleles; } if let Some(genotypes) = seen_genes.remove(genotype_uniquename) { genotype_details.genes_by_uniquename = genotypes; } if let Some(transcripts) = seen_transcripts.remove(genotype_uniquename) { genotype_details.transcripts_by_uniquename = transcripts; } if let Some(terms) = seen_terms.remove(genotype_uniquename) { genotype_details.terms_by_termid = terms; } } } fn set_reference_details_maps(&mut self) { // for calculating the gene_count field, we don't incude non-pombe genes let mut gene_count_hash: HashMap<RcString, GeneShortOptionMap> = HashMap::new(); let mut maybe_add_to_gene_count_hash = |reference_uniquename: &RcString, gene_uniquename: &GeneUniquename| { if let Some(load_org_taxonid) = self.config.load_organism_taxonid { if let Some(gene_details) = self.genes.get(gene_uniquename) { if gene_details.taxonid == load_org_taxonid { self.add_gene_to_hash(&mut gene_count_hash, reference_uniquename, gene_uniquename); } } } }; let (_, mut seen_genes, mut seen_genotypes, mut seen_alleles, mut seen_transcripts, mut seen_terms) = get_maps(); { for (reference_uniquename, reference_details) in &self.references { for feat_annotations in reference_details.cv_annotations.values() { for feat_annotation in feat_annotations.iter() { self.add_term_to_hash(&mut seen_terms, reference_uniquename, &feat_annotation.term); for annotation_detail_id in &feat_annotation.annotations { let annotation_detail = self.annotation_details .get(annotation_detail_id).expect("can't find OntAnnotationDetail"); for transcript_uniquename in &annotation_detail.transcript_uniquenames { self.add_transcript_to_hashes(&mut seen_transcripts, &mut seen_genes, reference_uniquename, transcript_uniquename); } for gene_uniquename in &annotation_detail.genes { self.add_gene_to_hash(&mut seen_genes, reference_uniquename, gene_uniquename); maybe_add_to_gene_count_hash(reference_uniquename, gene_uniquename); } for condition_termid in &annotation_detail.conditions { self.add_term_to_hash(&mut seen_terms, reference_uniquename, condition_termid); } if let Some(ref gene_product_form_id) = annotation_detail.gene_product_form_id { if gene_product_form_id.starts_with("PR:") { self.add_term_to_hash(&mut seen_terms, reference_uniquename, gene_product_form_id); } } for ext_part in &annotation_detail.extension { match ext_part.ext_range { ExtRange::Term(ref range_termid) | ExtRange::GeneProduct(ref range_termid) => self.add_term_to_hash(&mut seen_terms, reference_uniquename, range_termid), ExtRange::Gene(ref gene_uniquename) | ExtRange::Promoter(ref gene_uniquename) => { self.add_gene_to_hash(&mut seen_genes, reference_uniquename, gene_uniquename); maybe_add_to_gene_count_hash(reference_uniquename, gene_uniquename); }, ExtRange::Transcript(ref transcript_uniquename) => self.add_transcript_to_hashes(&mut seen_transcripts, &mut seen_genes, reference_uniquename, transcript_uniquename), _ => {}, } } let with_from_iter = annotation_detail.withs .iter() .chain(annotation_detail.froms.iter()); for with_from_value in with_from_iter { match with_from_value { WithFromValue::Gene(ref gene_short) => { self.add_gene_to_hash(&mut seen_genes, reference_uniquename, &gene_short.uniquename); maybe_add_to_gene_count_hash(reference_uniquename, &gene_short.uniquename); }, WithFromValue::Transcript(ref transcript_uniquename) => { self.add_transcript_to_hashes(&mut seen_transcripts, &mut seen_genes, reference_uniquename, transcript_uniquename); }, _ => (), } } if let Some(ref genotype_uniquename) = annotation_detail.genotype { let genotype = self.make_genotype_short(genotype_uniquename); self.add_genotype_to_hash(&mut seen_genotypes, &mut seen_alleles, &mut seen_genes, reference_uniquename, &genotype.display_uniquename); } } } } let interaction_iter = reference_details.physical_interactions.iter() .chain(&reference_details.genetic_interactions); for interaction in interaction_iter { self.add_gene_to_hash(&mut seen_genes, reference_uniquename, &interaction.gene_uniquename); maybe_add_to_gene_count_hash(reference_uniquename, &interaction.gene_uniquename); self.add_gene_to_hash(&mut seen_genes, reference_uniquename, &interaction.interactor_uniquename); maybe_add_to_gene_count_hash(reference_uniquename, &interaction.interactor_uniquename); } for ortholog_annotation in &reference_details.ortholog_annotations { self.add_gene_to_hash(&mut seen_genes, reference_uniquename, &ortholog_annotation.gene_uniquename); maybe_add_to_gene_count_hash(reference_uniquename, &ortholog_annotation.gene_uniquename); self.add_gene_to_hash(&mut seen_genes, reference_uniquename, &ortholog_annotation.ortholog_uniquename); maybe_add_to_gene_count_hash(reference_uniquename, &ortholog_annotation.ortholog_uniquename); } for paralog_annotation in &reference_details.paralog_annotations { self.add_gene_to_hash(&mut seen_genes, reference_uniquename, &paralog_annotation.gene_uniquename); maybe_add_to_gene_count_hash(reference_uniquename, &paralog_annotation.gene_uniquename); self.add_gene_to_hash(&mut seen_genes, reference_uniquename, &paralog_annotation.paralog_uniquename); maybe_add_to_gene_count_hash(reference_uniquename, &paralog_annotation.paralog_uniquename); } } } for (reference_uniquename, reference_details) in &mut self.references { if let Some(genes) = seen_genes.remove(reference_uniquename) { reference_details.genes_by_uniquename = genes; } if let Some(genotypes) = seen_genotypes.remove(reference_uniquename) { reference_details.genotypes_by_uniquename = genotypes; } if let Some(alleles) = seen_alleles.remove(reference_uniquename) { reference_details.alleles_by_uniquename = alleles; } if let Some(terms) = seen_terms.remove(reference_uniquename) { reference_details.terms_by_termid = terms; } if let Some(transcripts) = seen_transcripts.remove(reference_uniquename) { reference_details.transcripts_by_uniquename = transcripts; } if let Some(gene_count_genes) = gene_count_hash.remove(reference_uniquename) { reference_details.gene_count = gene_count_genes.len(); } } } pub fn set_counts(&mut self) { let mut term_seen_genes: HashMap<TermId, HashSet<GeneUniquename>> = HashMap::new(); let mut term_seen_genotypes: HashMap<TermId, HashSet<GenotypeUniquename>> = HashMap::new(); let mut term_seen_single_locus_genotypes: HashMap<TermId, HashSet<GenotypeUniquename>> = HashMap::new(); let mut ref_seen_genes: HashMap<ReferenceUniquename, HashSet<GeneUniquename>> = HashMap::new(); for (termid, term_details) in &self.terms { let mut seen_genes: HashSet<GeneUniquename> = HashSet::new(); let mut seen_genotypes: HashSet<GenotypeUniquename> = HashSet::new(); let mut seen_single_locus_genotypes: HashSet<GenotypeUniquename> = HashSet::new(); for term_annotations in term_details.cv_annotations.values() { for term_annotation in term_annotations { for annotation_detail_id in &term_annotation.annotations { let annotation_detail = self.annotation_details .get(annotation_detail_id).expect("can't find OntAnnotationDetail"); for gene_uniquename in &annotation_detail.genes { seen_genes.insert(gene_uniquename.clone()); } if let Some(ref genotype_uniquename) = annotation_detail.genotype { seen_genotypes.insert(genotype_uniquename.clone()); let genotype = &self.genotypes[genotype_uniquename]; if genotype.loci.len() == 1 && genotype.loci[0].expressed_alleles.len() == 1 { seen_single_locus_genotypes.insert(genotype_uniquename.clone()); } } } } } term_seen_genes.insert(termid.clone(), seen_genes); term_seen_genotypes.insert(termid.clone(), seen_genotypes); term_seen_single_locus_genotypes.insert(termid.clone(), seen_single_locus_genotypes); } let mut all_published_uniquenames = vec![]; for (reference_uniquename, reference_details) in &self.references { let mut seen_genes: HashSet<GeneUniquename> = HashSet::new(); for rel_annotations in reference_details.cv_annotations.values() { for rel_annotation in rel_annotations { for annotation_detail_id in &rel_annotation.annotations { let annotation_detail = self.annotation_details .get(annotation_detail_id).expect("can't find OntAnnotationDetail"); if !rel_annotation.is_not { for gene_uniquename in &annotation_detail.genes { seen_genes.insert(gene_uniquename.clone()); } } } } } let interaction_iter = reference_details.physical_interactions.iter().chain(&reference_details.genetic_interactions); for interaction in interaction_iter { seen_genes.insert(interaction.gene_uniquename.clone()); seen_genes.insert(interaction.interactor_uniquename.clone()); } for ortholog_annotation in &reference_details.ortholog_annotations { seen_genes.insert(ortholog_annotation.gene_uniquename.clone()); } ref_seen_genes.insert(reference_uniquename.clone(), seen_genes); if reference_details.pubmed_publication_date.is_some() { all_published_uniquenames.push(reference_uniquename.clone()); } } let (recent_admin_curated, recent_community_curated, all_community_curated, all_admin_curated) = make_canto_curated(&self.references, &all_published_uniquenames); let recent_references = RecentReferences { pubmed: make_recently_added(&self.references, &all_published_uniquenames), admin_curated: recent_admin_curated, community_curated: recent_community_curated, }; self.recent_references = recent_references; self.all_community_curated = all_community_curated; self.all_admin_curated = all_admin_curated; for term_details in self.terms.values_mut() { term_details.single_locus_genotype_uniquenames = term_seen_single_locus_genotypes.remove(&term_details.termid).unwrap(); term_details.gene_count = term_seen_genes[&term_details.termid].len(); term_details.genotype_count = term_seen_genotypes[&term_details.termid].len(); } } // make gene subsets for genes the are not in a slim category fn make_non_slim_subset(&self, cv_name: &str, slim_subset: &TermSubsetDetails) -> IdGeneSubsetMap { let slim_termid_set: HashSet<RcString> = slim_subset.elements.keys().cloned().collect(); let mut non_slim_with_bp_annotation = HashSet::new(); let mut non_slim_without_bp_annotation = HashSet::new(); let has_parent_in_slim = |term_annotations: &Vec<OntTermAnnotations>| { for term_annotation in term_annotations { let interesting_parent_ids = &self.terms[&term_annotation.term].interesting_parent_ids; if !term_annotation.is_not && (slim_termid_set.contains(&term_annotation.term) || interesting_parent_ids.intersection(&slim_termid_set).count() > 0) { return true; } } false }; for gene_details in self.genes.values() { if let Some(load_organism_taxonid) = self.config.load_organism_taxonid { if load_organism_taxonid != gene_details.taxonid { continue; } } if gene_details.feature_type != "mRNA gene" { continue; } if gene_details.characterisation_status == Some(RcString::from("transposon")) || gene_details.characterisation_status == Some(RcString::from("dubious")) { continue; } let mut bp_count = 0; if let Some(annotations) = gene_details.cv_annotations.get(cv_name) { if has_parent_in_slim(annotations) { continue } bp_count = annotations.len(); } if bp_count == 0 { non_slim_without_bp_annotation.insert(gene_details.uniquename.clone()); } else { non_slim_with_bp_annotation.insert(gene_details.uniquename.clone()); } } let mut return_map = HashMap::new(); let cv_display_name = str::replace(cv_name, "_", " "); let with_annotation_display_name = String::from("Gene products with ") + &cv_display_name + " annotation that are not in a slim category"; let name = RcString::from(&format!("non_slim_with_{}_annotation", cv_name)); return_map.insert(name.clone(), GeneSubsetDetails { name, display_name: RcString::from(&with_annotation_display_name), elements: non_slim_with_bp_annotation, }); let without_annotation_display_name = String::from("Gene products with no ") + &cv_display_name + " annotation and are not in a slim category"; let name = RcString::from(&format!("non_slim_without_{}_annotation", cv_name)); return_map.insert(name.clone(), GeneSubsetDetails { name, display_name: RcString::from(&without_annotation_display_name), elements: non_slim_without_bp_annotation, }); return_map } fn make_slim_subset(&self, slim_name: &str) -> TermSubsetDetails { let mut all_genes = HashSet::new(); let mut slim_subset: HashMap<TermId, TermSubsetElement> = HashMap::new(); let slim_config = self.config.slims.get(slim_name) .unwrap_or_else(|| panic!("no slim config for {}", slim_name)); for slim_conf in &slim_config.terms { let slim_termid = &slim_conf.termid; let term_details = self.terms.get(slim_termid) .unwrap_or_else(|| panic!("can't find TermDetails for {}", slim_termid)); let subset_element = TermSubsetElement { name: term_details.name.clone(), gene_count: term_details.genes_annotated_with.len(), }; for gene in &term_details.genes_annotated_with { all_genes.insert(gene); } slim_subset.insert(slim_termid.clone(), subset_element); } TermSubsetDetails { name: RcString::from(slim_name), total_gene_count: all_genes.len(), elements: slim_subset, } } fn make_feature_type_subsets(&self, subsets: &mut IdGeneSubsetMap) { for gene_details in self.genes.values() { if let Some(load_organism_taxonid) = self.config.load_organism_taxonid { if load_organism_taxonid != gene_details.taxonid { continue; } } let subset_name = RcString::from("feature_type:") + &gene_details.feature_type; let re = Regex::new(r"[\s,:]+").unwrap(); let subset_name_no_spaces = RcString::from(re.replace_all(&subset_name, "_").as_ref()); subsets.entry(subset_name_no_spaces.clone()) .or_insert(GeneSubsetDetails { name: subset_name_no_spaces, display_name: RcString::from(&subset_name), elements: HashSet::new() }) .elements.insert(gene_details.uniquename.clone()); } } // make subsets using the characterisation_status field of GeneDetails fn make_characterisation_status_subsets(&self, subsets: &mut IdGeneSubsetMap) { for gene_details in self.genes.values() { if let Some(load_organism_taxonid) = self.config.load_organism_taxonid { if load_organism_taxonid != gene_details.taxonid { continue; } } if gene_details.feature_type != "mRNA gene" { continue; } if let Some(ref characterisation_status) = gene_details.characterisation_status { let subset_name = RcString::from("characterisation_status:") + characterisation_status; let re = Regex::new(r"[\s,:]+").unwrap(); let subset_name_no_spaces = RcString::from(re.replace_all(&subset_name, "_").as_ref()); subsets.entry(subset_name_no_spaces.clone()) .or_insert(GeneSubsetDetails { name: subset_name_no_spaces, display_name: RcString::from(&subset_name), elements: HashSet::new() }) .elements.insert(gene_details.uniquename.clone()); } } } // make InterPro subsets using the interpro_matches field of GeneDetails fn make_interpro_subsets(&mut self, subsets: &mut IdGeneSubsetMap) { for (gene_uniquename, gene_details) in &self.genes { if self.config.load_organism_taxonid.is_none() || self.config.load_organism_taxonid.unwrap() != gene_details.taxonid { continue; } for interpro_match in &gene_details.interpro_matches { let mut new_subset_names = vec![]; if !interpro_match.interpro_id.is_empty() { let subset_name = String::from("interpro:") + &interpro_match.interpro_id; new_subset_names.push((RcString::from(&subset_name), interpro_match.interpro_name.clone())); } let subset_name = String::from("interpro:") + &interpro_match.dbname.clone() + ":" + &interpro_match.id; new_subset_names.push((RcString::from(&subset_name), interpro_match.name.clone())); for (subset_name, display_name) in new_subset_names { subsets.entry(subset_name.clone()) .or_insert(GeneSubsetDetails { name: subset_name, display_name, elements: HashSet::new(), }) .elements.insert(gene_uniquename.clone()); } } } } // populated the subsets HashMap fn make_subsets(&mut self) { let mut gene_subsets: IdGeneSubsetMap = HashMap::new(); for (slim_name, slim_config) in &self.config.slims { let slim_subset = self.make_slim_subset(slim_name); let gene_subset = self.make_non_slim_subset(&slim_config.cv_name, &slim_subset); gene_subsets.extend(gene_subset); self.term_subsets.insert(slim_name.clone(), slim_subset); } self.make_feature_type_subsets(&mut gene_subsets); self.make_characterisation_status_subsets(&mut gene_subsets); self.make_interpro_subsets(&mut gene_subsets); self.gene_subsets = gene_subsets; } // sort the list of genes in the ChromosomeDetails by start_pos pub fn sort_chromosome_genes(&mut self) { let mut genes_to_sort: HashMap<ChromosomeName, Vec<GeneUniquename>> = HashMap::new(); { let sorter = |uniquename1: &GeneUniquename, uniquename2: &GeneUniquename| { let gene1 = &self.genes[uniquename1]; let gene2 = &self.genes[uniquename2]; if let Some(ref gene1_loc) = gene1.location { if let Some(ref gene2_loc) = gene2.location { let cmp = gene1_loc.start_pos.cmp(&gene2_loc.start_pos); if cmp != Ordering::Equal { return cmp; } } } if gene1.name.is_some() { if gene2.name.is_some() { gene1.name.cmp(&gene2.name) } else { Ordering::Less } } else { if gene2.name.is_some() { Ordering::Greater } else { gene1.uniquename.cmp(&gene2.uniquename) } } }; for (chr_uniquename, chr_details) in &self.chromosomes { genes_to_sort.insert(chr_uniquename.clone(), chr_details.gene_uniquenames.clone()); } for gene_uniquenames in genes_to_sort.values_mut() { gene_uniquenames.sort_by(&sorter); } } for (chr_uniquename, gene_uniquenames) in genes_to_sort { self.chromosomes.get_mut(&chr_uniquename).unwrap().gene_uniquenames = gene_uniquenames; } } fn get_dataset_name_for_measurement(&self, reference_uniquename: &str, level_type_termid: &str, during_termid: &str, scale: &str) -> Option<RcString> { for conf in &self.config.gene_expression.datasets { if conf.pubmed_id == reference_uniquename && conf.level_type_termid == level_type_termid && conf.during_termid == during_termid && conf.scale == scale { return Some(conf.name.clone()); } } None } fn set_gene_expression_measurements(&mut self) { let mut measurements = HashMap::new(); for annotation in &self.ont_annotations { if &annotation.term_short.cv_name != "gene_ex" { continue; } let gene_uniquename = if let Some(gene_short) = annotation.genes.iter().next() { gene_short.uniquename.clone() } else { continue; }; let level_type_termid = annotation.term_short.termid.clone(); let reference_uniquename = if let Some(ref_short) = &annotation.reference_short { ref_short.uniquename.clone() } else { continue; }; let mut during_ext = None; for extpart in &annotation.extension { if extpart.rel_type_name == "during" { during_ext = Some(&extpart.ext_range); } } let during_termid = if let Some(ExtRange::Term(termid)) = during_ext { termid.clone() } else { continue; }; let gene_ex_props = if let Some(ref props) = annotation.gene_ex_props { props } else { continue; }; let scale = gene_ex_props.scale.clone(); let copies_per_cell = gene_ex_props.copies_per_cell.as_ref().cloned(); let avg_copies_per_cell = gene_ex_props.avg_copies_per_cell.as_ref().cloned(); if let Some(dataset_name) = self.get_dataset_name_for_measurement(&reference_uniquename, &level_type_termid, &during_termid, &scale) { measurements .entry(gene_uniquename) .or_insert_with(HashMap::new) .insert(dataset_name, GeneExMeasurement { reference_uniquename, level_type_termid, during_termid, copies_per_cell, avg_copies_per_cell, scale }); } } self.gene_expression_measurements = measurements; } fn set_chromosome_gene_counts(&mut self) { let mut counts = HashMap::new(); let mut coding_counts = HashMap::new(); for gene_details in self.genes.values() { if let Some(ref loc) = gene_details.location { *counts .entry(&loc.chromosome_name) .or_insert(0) += 1; } if gene_details.feature_type == "mRNA gene" { if let Some(ref loc) = gene_details.location { *coding_counts .entry(&loc.chromosome_name) .or_insert(0) += 1; } } } for chromosome_detail in self.chromosomes.values_mut() { if let Some(count) = counts.get(&chromosome_detail.name) { chromosome_detail.gene_count = *count; } if let Some(count) = coding_counts.get(&chromosome_detail.name) { chromosome_detail.coding_gene_count = *count; } } } // remove some of the refs that have no annotations. // See: https://github.com/pombase/website/issues/628 fn remove_non_curatable_refs(&mut self) { let filtered_refs = self.references.drain() .filter(|&(_, ref reference_details)| { if reference_has_annotation(reference_details) { return true; } if let Some(ref triage_status) = reference_details.canto_triage_status { if triage_status == "New" || triage_status == "Wrong organism" && triage_status == "Loaded in error"{ return false; } } // default to true because there are references that // haven't or shouldn't be triaged, eg. GO_REF:... true }) .collect(); self.references = filtered_refs; } fn make_solr_term_summaries(&mut self) -> Vec<SolrTermSummary> { let mut return_summaries = vec![]; let term_name_split_re = Regex::new(r"\W+").unwrap(); for (termid, term_details) in &self.terms { if term_details.is_obsolete { continue; } let trimmable_p = |c: char| { c.is_whitespace() || c == ',' || c == ':' || c == ';' || c == '.' || c == '\'' }; let term_name_words = term_name_split_re.split(&term_details.name) .map(|s: &str| { s.trim_matches(&trimmable_p).to_owned() }).collect::<Vec<String>>(); let mut close_synonyms = vec![]; let mut close_synonym_words_vec: Vec<RcString> = vec![]; let mut distant_synonyms = vec![]; let mut distant_synonym_words_vec: Vec<RcString> = vec![]; let add_to_words_vec = |synonym: &str, words_vec: &mut Vec<RcString>| { let synonym_words = term_name_split_re.split(synonym); for word in synonym_words { let word_string = RcString::from(word.trim_matches(&trimmable_p)); if !words_vec.contains(&word_string) && !term_name_words.contains(&word_string) { words_vec.push(word_string); } } }; for synonym in &term_details.synonyms { if synonym.synonym_type == "exact" || synonym.synonym_type == "narrow" { add_to_words_vec(&synonym.name, &mut close_synonym_words_vec); close_synonyms.push(synonym.name.clone()); } else { add_to_words_vec(&synonym.name, &mut distant_synonym_words_vec); distant_synonyms.push(synonym.name.clone()); } } distant_synonyms = distant_synonyms.into_iter() .filter(|synonym| { !close_synonyms.contains(synonym) }) .collect::<Vec<_>>(); let annotation_count = term_details.annotation_count(); let interesting_parent_ids_for_solr = term_details.interesting_parent_ids.clone(); let term_summ = SolrTermSummary { id: termid.clone(), cv_name: term_details.cv_name.clone(), name: term_details.name.clone(), definition: term_details.definition.clone(), close_synonyms, close_synonym_words: RcString::from(&close_synonym_words_vec.join(" ")), distant_synonyms, distant_synonym_words: RcString::from(&distant_synonym_words_vec.join(" ")), interesting_parent_ids: interesting_parent_ids_for_solr, secondary_identifiers: term_details.secondary_identifiers.clone(), annotation_count, gene_count: term_details.gene_count, genotype_count: term_details.genotype_count, highlighting: HashMap::new(), }; return_summaries.push(term_summ); } return_summaries } fn make_solr_reference_summaries(&mut self) -> Vec<SolrReferenceSummary> { let mut return_summaries = vec![]; for reference_details in self.references.values() { return_summaries.push(SolrReferenceSummary::from_reference_details(reference_details)); } return_summaries } fn get_stats(&self) -> Stats { let mut by_taxon = HashMap::new(); for gene_details in self.genes.values() { let taxonid = gene_details.taxonid; by_taxon .entry(taxonid) .or_insert_with(StatCountsByTaxon::empty) .genes += 1; let mut annotation_count = 0; for term_annotations in gene_details.cv_annotations.values() { for term_annotation in term_annotations { annotation_count += term_annotation.annotations.len(); } } by_taxon .entry(taxonid) .or_insert_with(StatCountsByTaxon::empty) .annotations += annotation_count; } Stats { by_taxon, community_pubs_count: self.all_community_curated.len(), non_community_pubs_count: self.all_admin_curated.len(), } } pub fn get_web_data(mut self) -> WebData { self.process_dbxrefs(); self.process_references(); self.process_chromosome_features(); self.make_feature_rel_maps(); self.process_features(); self.add_gene_neighbourhoods(); self.process_props_from_feature_cvterms(); self.process_allele_features(); self.process_genotype_features(); self.process_cvterms(); self.add_interesting_parents(); self.process_cvterm_rels(); self.process_extension_cvterms(); self.process_feature_synonyms(); self.process_feature_publications(); self.process_feature_cvterms(); self.remove_duplicate_transcript_annotation(); self.store_ont_annotations(false); self.store_ont_annotations(true); self.process_cvtermpath(); self.process_annotation_feature_rels(); self.add_target_of_annotations(); self.set_deletion_viability(); self.set_term_details_subsets(); self.set_taxonomic_distributions(); self.remove_non_curatable_refs(); self.set_term_details_maps(); self.set_gene_details_maps(); self.set_gene_details_subset_termids(); self.set_genotype_details_maps(); self.set_reference_details_maps(); self.set_chromosome_gene_counts(); self.set_counts(); self.make_subsets(); self.sort_chromosome_genes(); self.set_gene_expression_measurements(); let stats = self.get_stats(); let metadata = self.make_metadata(); let mut gene_summaries: Vec<GeneSummary> = vec![]; let mut solr_gene_summaries: Vec<SolrGeneSummary> = vec![]; for (gene_uniquename, gene_details) in &self.genes { if self.config.load_organism_taxonid.is_none() || self.config.load_organism_taxonid.unwrap() == gene_details.taxonid { let gene_summary = self.make_gene_summary(gene_uniquename); let solr_gene_summary = SolrGeneSummary { id: gene_summary.uniquename.clone(), name: gene_summary.name.clone(), taxonid: gene_summary.taxonid, product: gene_summary.product.clone(), uniprot_identifier: gene_summary.uniprot_identifier.clone(), synonyms: gene_summary.synonyms.clone(), feature_type: gene_summary.feature_type.clone(), }; gene_summaries.push(gene_summary); solr_gene_summaries.push(solr_gene_summary); } } let solr_term_summaries = self.make_solr_term_summaries(); let solr_reference_summaries = self.make_solr_reference_summaries(); let solr_data = SolrData { term_summaries: solr_term_summaries, gene_summaries: solr_gene_summaries, reference_summaries: solr_reference_summaries, }; let chromosomes = self.chromosomes.clone(); let mut chromosome_summaries = vec![]; for chr_details in self.chromosomes.values() { chromosome_summaries.push(chr_details.make_chromosome_short()); } let recent_references = self.recent_references.clone(); let all_community_curated = self.all_community_curated.clone(); let all_admin_curated = self.all_admin_curated.clone(); let ont_annotations = self.ont_annotations.clone(); WebData { metadata, chromosomes, chromosome_summaries, recent_references, all_community_curated, all_admin_curated, api_maps: self.make_api_maps(), search_gene_summaries: gene_summaries, solr_data, ont_annotations, stats, } } }
#[macro_use] extern crate clap; extern crate names; use clap::{App, Arg}; use names::{Generator, Name}; fn main() { let matches = App::new("names") .version(&crate_version!()[..]) .author("Fletcher Nichol <fnichol@nichol.ca>") .about("Random name generator") .arg(Arg::with_name("AMOUNT") .help("Number of names to generate (default: 1)") .index(1) ) .arg(Arg::with_name("number") .short("n") .long("number") .help("Adds a random number to the name(s)") ) .get_matches(); let amount = value_t!(matches.value_of("AMOUNT"), usize).unwrap_or(1); let naming = if matches.is_present("number") { Name::Numbered } else { Name::Plain }; let mut generator = Generator::default(naming); for _ in 0..amount { println!("{}", generator.next().unwrap()); } } Update usage output. #[macro_use] extern crate clap; extern crate names; use clap::{App, Arg}; use names::{Generator, Name}; fn main() { let matches = App::new("names") .version(&crate_version!()[..]) .author("\nAuthor: Fletcher Nichol <fnichol@nichol.ca>\n") .about("A random name generator with results like `delirious-pail'.") .arg(Arg::with_name("amount") .help("Number of names to generate (default: 1)") .index(1) ) .arg(Arg::with_name("number") .short("n") .long("number") .help("Adds a random number to the name(s)") ) .get_matches(); let amount = value_t!(matches.value_of("amount"), usize).unwrap_or(1); let naming = if matches.is_present("number") { Name::Numbered } else { Name::Plain }; let mut generator = Generator::default(naming); for _ in 0..amount { println!("{}", generator.next().unwrap()); } }
use std::ops::{Index, IndexMut}; use std::slice::{Iter, IterMut}; /// A simple MRU-list data structure. Create a list of the appropriate /// maximum size (which can be changed later) then use `push` to add new /// items. New items are always added at the front of the list. Adding /// an item which is already in the list is ok - it is moved to the beginning /// of the list. pub struct MRUList<T> { max_items: usize, data: Vec<T> } impl<T> MRUList<T> where T: Eq { pub fn new(max_items: usize) -> MRUList<T> { MRUList { max_items: max_items, data: Vec::<T>::new() } } pub fn push(&mut self, value: T) { self.remove(&value); self.data.insert(0, value); self.data.truncate(self.max_items); } pub fn remove(&mut self, value: &T) { let pos = self.data.iter().position(|v| v == value); if let Some(idx) = pos { self.data.remove(idx); } } pub fn set_max_items(&mut self, max_items: usize) { self.max_items = max_items; self.data.truncate(self.max_items); } pub fn clear(&mut self) { self.data.clear(); } pub fn len(&self) -> usize { self.data.len() } pub fn is_empty(&self) -> bool { self.data.is_empty() } pub fn iter(&self) -> Iter<T> { self.data.iter() } pub fn iter_mut(&mut self) -> IterMut<T> { self.data.iter_mut() } } impl<T> Index<usize> for MRUList<T> { type Output = T; fn index(&self, index: usize) -> &T { &self.data[index] } } impl<T> IndexMut<usize> for MRUList<T> { fn index_mut<'a>(&'a mut self, index: usize) -> &'a mut T { &mut self.data[index] } } pub fn new_string_mru(max_items: usize) -> MRUList<String> { MRUList::new(max_items) } // Run the tests using String since that is what we are likely to be using this class for. // This makes them a little more verbose than using int or str but is worth it. #[cfg(test)] mod tests { use super::*; #[test] fn new_for_zero_size_creates_empty_list() { let mut mru = new_string_mru(0); assert_eq!(mru.len(), 0); assert!(mru.is_empty()); mru.push("a".to_owned()); assert_eq!(mru.len(), 0, "Since max_items is zero, pushing a new element should not increase the length"); assert!(mru.is_empty()); } #[test] fn new_for_size_of_one_creates_list() { let mut mru = new_string_mru(1); assert_eq!(mru.len(), 0); assert!(mru.is_empty()); mru.push("a".to_owned()); assert_eq!(mru.len(), 1); assert!(!mru.is_empty()); mru.push("b".to_owned()); assert_eq!(mru.len(), 1, "Since max_items is 1, pushing a 2nd element should not increase the length"); } #[test] fn is_empty_for_empty_list_returns_true() { let mut mru = new_string_mru(0); assert!(mru.is_empty()); let mut mru = new_string_mru(1); assert!(mru.is_empty()); } #[test] fn clear_for_empty_list_does_not_panic() { let mut mru = new_string_mru(20); mru.clear(); assert!(mru.is_empty()); } #[test] fn clear_for_non_empty_list_clears_list() { let mut mru = new_string_mru(20); mru.push("a".to_owned()); mru.clear(); assert!(mru.is_empty()); } #[test] fn push_adds_items_in_push_down_stack_order() { let mut mru = new_string_mru(20); mru.push("a".to_owned()); mru.push("b".to_owned()); assert_eq!(mru[0], "b", "b was pushed last, so should be at the head of the list"); assert_eq!(mru[1], "a", "a was pushed before b, so should be the second item"); } #[test] fn push_for_item_already_in_list_moves_item_to_front() { let mut mru = new_string_mru(20); mru.push("a".to_owned()); mru.push("b".to_owned()); mru.push("c".to_owned()); mru.push("a".to_owned()); assert_eq!(mru[0], "a"); assert_eq!(mru[1], "c"); assert_eq!(mru[2], "b"); assert_eq!(mru.len(), 3); } #[test] fn push_for_list_at_capacity_drops_items_off_end() { let mut mru = new_string_mru(2); mru.push("a".to_owned()); mru.push("b".to_owned()); mru.push("c".to_owned()); assert_eq!(mru[0], "c"); assert_eq!(mru[1], "b"); assert_eq!(mru.len(), 2); } #[test] fn remove_for_item_not_in_list_does_nothing() { let mut mru = new_string_mru(20); mru.push("a".to_owned()); mru.push("b".to_owned()); mru.remove(&"c".to_owned()); assert_eq!(mru[0], "b"); assert_eq!(mru[1], "a"); assert_eq!(mru.len(), 2); } #[test] fn remove_for_list_of_one_item_removes_item() { let mut mru = new_string_mru(20); mru.push("a".to_owned()); mru.remove(&"a".to_owned()); assert!(mru.is_empty()); } #[test] fn remove_for_list_of_several_items_with_item_at_end_removes_item() { let mut mru = new_string_mru(20); mru.push("a".to_owned()); mru.push("b".to_owned()); mru.push("c".to_owned()); mru.remove(&"a".to_owned()); assert_eq!(mru.len(), 2); assert_eq!(mru[0], "c"); assert_eq!(mru[1], "b"); } #[test] fn remove_for_list_of_several_items_with_item_at_beginning_removes_item() { let mut mru = new_string_mru(20); mru.push("a".to_owned()); mru.push("b".to_owned()); mru.push("c".to_owned()); mru.remove(&"c".to_owned()); assert_eq!(mru.len(), 2); assert_eq!(mru[0], "b"); assert_eq!(mru[1], "a"); } #[test] fn set_max_items_for_new_size_smaller_than_current_trims_list_to_size() { let mut mru = new_string_mru(20); mru.push("a".to_owned()); mru.push("b".to_owned()); mru.push("c".to_owned()); mru.set_max_items(2); assert_eq!(mru[0], "c"); assert_eq!(mru[1], "b"); assert_eq!(mru.len(), 2); } #[test] fn set_max_items_for_new_size_greater_than_current_leaves_list_untouched() { let mut mru = new_string_mru(3); mru.push("a".to_owned()); mru.push("b".to_owned()); mru.push("c".to_owned()); mru.set_max_items(20); assert_eq!(mru[0], "c"); assert_eq!(mru[1], "b"); assert_eq!(mru[2], "a"); assert_eq!(mru.len(), 3); } #[test] fn index_mut_changes_item() { let mut mru = new_string_mru(20); mru.push("a".to_owned()); mru[0] = "b".to_owned(); assert_eq!(mru.len(), 1); assert_eq!(mru[0], "b"); } #[test] fn iter_for_empty_list_returns_zero_items() { let mut mru = new_string_mru(0); let mut iter = mru.iter(); assert_eq!(iter.next(), None); let mut mru = new_string_mru(1); let mut iter = mru.iter(); assert_eq!(iter.next(), None); } #[test] fn iter_for_list_with_items_returns_items_in_correct_order() { let mut mru = new_string_mru(20); mru.push("a".to_owned()); mru.push("b".to_owned()); mru.push("c".to_owned()); let mut iter = mru.iter(); assert_eq!(iter.next(), Some(&"c".to_owned())); assert_eq!(iter.next(), Some(&"b".to_owned())); assert_eq!(iter.next(), Some(&"a".to_owned())); assert_eq!(iter.next(), None); } } Remove the new_string_mru helper function. Can use type inference in most places. use std::ops::{Index, IndexMut}; use std::slice::{Iter, IterMut}; /// A simple MRU-list data structure. Create a list of the appropriate /// maximum size (which can be changed later) then use `push` to add new /// items. New items are always added at the front of the list. Adding /// an item which is already in the list is ok - it is moved to the beginning /// of the list. The MRUList is not intended to be a high-performance data /// structure, it is intended for managing small numbers of items such as /// might appear in an editor's MRU menu. pub struct MRUList<T> { max_items: usize, data: Vec<T> } impl<T> MRUList<T> // This constaint is required by the `remove` method. where T: Eq { pub fn new(max_items: usize) -> MRUList<T> { MRUList { max_items: max_items, data: Vec::<T>::new() } } pub fn push(&mut self, value: T) { self.remove(&value); self.data.insert(0, value); self.data.truncate(self.max_items); } pub fn remove(&mut self, value: &T) { let pos = self.data.iter().position(|v| v == value); if let Some(idx) = pos { self.data.remove(idx); } } pub fn set_max_items(&mut self, max_items: usize) { self.max_items = max_items; self.data.truncate(self.max_items); } pub fn clear(&mut self) { self.data.clear(); } pub fn len(&self) -> usize { self.data.len() } pub fn is_empty(&self) -> bool { self.data.is_empty() } pub fn iter(&self) -> Iter<T> { self.data.iter() } pub fn iter_mut(&mut self) -> IterMut<T> { self.data.iter_mut() } } impl<T> Index<usize> for MRUList<T> { type Output = T; fn index(&self, index: usize) -> &T { &self.data[index] } } impl<T> IndexMut<usize> for MRUList<T> { fn index_mut<'a>(&'a mut self, index: usize) -> &'a mut T { &mut self.data[index] } } // Run the tests using String since that is what we are likely to be using this class for. // This makes them a little more verbose than using int or str but is worth it. #[cfg(test)] mod tests { use super::*; #[test] fn new_for_zero_size_creates_empty_list() { let mut mru = MRUList::new(0); assert_eq!(mru.len(), 0); assert!(mru.is_empty()); mru.push("a".to_owned()); assert_eq!(mru.len(), 0, "Since max_items is zero, pushing a new element should not increase the length"); assert!(mru.is_empty()); } #[test] fn new_for_size_of_one_creates_list() { let mut mru = MRUList::new(1); assert_eq!(mru.len(), 0); assert!(mru.is_empty()); mru.push("a".to_owned()); assert_eq!(mru.len(), 1); assert!(!mru.is_empty()); mru.push("b".to_owned()); assert_eq!(mru.len(), 1, "Since max_items is 1, pushing a 2nd element should not increase the length"); } #[test] fn is_empty_for_empty_list_returns_true() { let mut mru = MRUList::<String>::new(0); assert!(mru.is_empty()); let mut mru = MRUList::<String>::new(1); assert!(mru.is_empty()); } #[test] fn clear_for_empty_list_does_not_panic() { let mut mru = MRUList::<String>::new(20); mru.clear(); assert!(mru.is_empty()); } #[test] fn clear_for_non_empty_list_clears_list() { let mut mru = MRUList::new(20); mru.push("a".to_owned()); mru.clear(); assert!(mru.is_empty()); } #[test] fn push_adds_items_in_push_down_stack_order() { let mut mru = MRUList::new(20); mru.push("a".to_owned()); mru.push("b".to_owned()); assert_eq!(mru[0], "b", "b was pushed last, so should be at the head of the list"); assert_eq!(mru[1], "a", "a was pushed before b, so should be the second item"); } #[test] fn push_for_item_already_in_list_moves_item_to_front() { let mut mru = MRUList::new(20); mru.push("a".to_owned()); mru.push("b".to_owned()); mru.push("c".to_owned()); mru.push("a".to_owned()); assert_eq!(mru[0], "a"); assert_eq!(mru[1], "c"); assert_eq!(mru[2], "b"); assert_eq!(mru.len(), 3); } #[test] fn push_for_list_at_capacity_drops_items_off_end() { let mut mru = MRUList::new(2); mru.push("a".to_owned()); mru.push("b".to_owned()); mru.push("c".to_owned()); assert_eq!(mru[0], "c"); assert_eq!(mru[1], "b"); assert_eq!(mru.len(), 2); } #[test] fn remove_for_item_not_in_list_does_nothing() { let mut mru = MRUList::new(20); mru.push("a".to_owned()); mru.push("b".to_owned()); mru.remove(&"c".to_owned()); assert_eq!(mru[0], "b"); assert_eq!(mru[1], "a"); assert_eq!(mru.len(), 2); } #[test] fn remove_for_list_of_one_item_removes_item() { let mut mru = MRUList::new(20); mru.push("a".to_owned()); mru.remove(&"a".to_owned()); assert!(mru.is_empty()); } #[test] fn remove_for_list_of_several_items_with_item_at_end_removes_item() { let mut mru = MRUList::new(20); mru.push("a".to_owned()); mru.push("b".to_owned()); mru.push("c".to_owned()); mru.remove(&"a".to_owned()); assert_eq!(mru.len(), 2); assert_eq!(mru[0], "c"); assert_eq!(mru[1], "b"); } #[test] fn remove_for_list_of_several_items_with_item_at_beginning_removes_item() { let mut mru = MRUList::new(20); mru.push("a".to_owned()); mru.push("b".to_owned()); mru.push("c".to_owned()); mru.remove(&"c".to_owned()); assert_eq!(mru.len(), 2); assert_eq!(mru[0], "b"); assert_eq!(mru[1], "a"); } #[test] fn set_max_items_for_new_size_smaller_than_current_trims_list_to_size() { let mut mru = MRUList::new(20); mru.push("a".to_owned()); mru.push("b".to_owned()); mru.push("c".to_owned()); mru.set_max_items(2); assert_eq!(mru[0], "c"); assert_eq!(mru[1], "b"); assert_eq!(mru.len(), 2); } #[test] fn set_max_items_for_new_size_greater_than_current_leaves_list_untouched() { let mut mru = MRUList::new(3); mru.push("a".to_owned()); mru.push("b".to_owned()); mru.push("c".to_owned()); mru.set_max_items(20); assert_eq!(mru[0], "c"); assert_eq!(mru[1], "b"); assert_eq!(mru[2], "a"); assert_eq!(mru.len(), 3); } #[test] fn index_mut_changes_item() { let mut mru = MRUList::new(20); mru.push("a".to_owned()); mru[0] = "b".to_owned(); assert_eq!(mru.len(), 1); assert_eq!(mru[0], "b"); } #[test] fn iter_for_empty_list_returns_zero_items() { let mut mru = MRUList::<String>::new(0); let mut iter = mru.iter(); assert_eq!(iter.next(), None); let mut mru = MRUList::<String>::new(1); let mut iter = mru.iter(); assert_eq!(iter.next(), None); } #[test] fn iter_for_list_with_items_returns_items_in_correct_order() { let mut mru = MRUList::new(20); mru.push("a".to_owned()); mru.push("b".to_owned()); mru.push("c".to_owned()); let mut iter = mru.iter(); assert_eq!(iter.next(), Some(&"c".to_owned())); assert_eq!(iter.next(), Some(&"b".to_owned())); assert_eq!(iter.next(), Some(&"a".to_owned())); assert_eq!(iter.next(), None); } }
use crate::ich::StableHashingContext; use crate::ty::{self, TyCtxt}; use rustc_data_structures::profiling::SelfProfilerRef; use rustc_data_structures::sync::Lock; use rustc_data_structures::thin_vec::ThinVec; use rustc_errors::Diagnostic; use rustc_hir::def_id::DefId; mod dep_node; mod safe; pub(crate) use rustc_query_system::dep_graph::DepNodeParams; pub use rustc_query_system::dep_graph::{ debug, hash_result, DepContext, DepNodeColor, DepNodeIndex, SerializedDepNodeIndex, WorkProduct, WorkProductFileKind, WorkProductId, }; pub use dep_node::{label_strs, DepConstructor, DepKind, DepNode, DepNodeExt}; pub use safe::AssertDepGraphSafe; pub use safe::DepGraphSafe; pub type DepGraph = rustc_query_system::dep_graph::DepGraph<DepKind>; pub type TaskDeps = rustc_query_system::dep_graph::TaskDeps<DepKind>; pub type DepGraphQuery = rustc_query_system::dep_graph::DepGraphQuery<DepKind>; pub type PreviousDepGraph = rustc_query_system::dep_graph::PreviousDepGraph<DepKind>; pub type SerializedDepGraph = rustc_query_system::dep_graph::SerializedDepGraph<DepKind>; impl rustc_query_system::dep_graph::DepKind for DepKind { fn is_eval_always(&self) -> bool { DepKind::is_eval_always(self) } fn has_params(&self) -> bool { DepKind::has_params(self) } fn debug_node(node: &DepNode, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "{:?}", node.kind)?; if !node.kind.has_params() && !node.kind.is_anon() { return Ok(()); } write!(f, "(")?; ty::tls::with_opt(|opt_tcx| { if let Some(tcx) = opt_tcx { if let Some(def_id) = node.extract_def_id(tcx) { write!(f, "{}", tcx.def_path_debug_str(def_id))?; } else if let Some(ref s) = tcx.dep_graph.dep_node_debug_str(*node) { write!(f, "{}", s)?; } else { write!(f, "{}", node.hash)?; } } else { write!(f, "{}", node.hash)?; } Ok(()) })?; write!(f, ")") } fn with_deps<OP, R>(task_deps: Option<&Lock<TaskDeps>>, op: OP) -> R where OP: FnOnce() -> R, { ty::tls::with_context(|icx| { let icx = ty::tls::ImplicitCtxt { task_deps, ..icx.clone() }; ty::tls::enter_context(&icx, |_| op()) }) } fn read_deps<OP>(op: OP) -> () where OP: for<'a> FnOnce(Option<&'a Lock<TaskDeps>>) -> (), { ty::tls::with_context_opt(|icx| { let icx = if let Some(icx) = icx { icx } else { return }; op(icx.task_deps) }) } } impl<'tcx> DepContext for TyCtxt<'tcx> { type DepKind = DepKind; type StableHashingContext = StableHashingContext<'tcx>; fn create_stable_hashing_context(&self) -> Self::StableHashingContext { TyCtxt::create_stable_hashing_context(*self) } fn try_force_from_dep_node(&self, dep_node: &DepNode) -> bool { // FIXME: This match is just a workaround for incremental bugs and should // be removed. https://github.com/rust-lang/rust/issues/62649 is one such // bug that must be fixed before removing this. match dep_node.kind { DepKind::hir_owner | DepKind::hir_owner_nodes | DepKind::CrateMetadata => { if let Some(def_id) = dep_node.extract_def_id(*self) { if def_id_corresponds_to_hir_dep_node(*self, def_id) { if dep_node.kind == DepKind::CrateMetadata { // The `DefPath` has corresponding node, // and that node should have been marked // either red or green in `data.colors`. bug!( "DepNode {:?} should have been \ pre-marked as red or green but wasn't.", dep_node ); } } else { // This `DefPath` does not have a // corresponding `DepNode` (e.g. a // struct field), and the ` DefPath` // collided with the `DefPath` of a // proper item that existed in the // previous compilation session. // // Since the given `DefPath` does not // denote the item that previously // existed, we just fail to mark green. return false; } } else { // If the node does not exist anymore, we // just fail to mark green. return false; } } _ => { // For other kinds of nodes it's OK to be // forced. } } debug!("try_force_from_dep_node({:?}) --- trying to force", dep_node); ty::query::force_from_dep_node(*self, dep_node) } fn has_errors_or_delayed_span_bugs(&self) -> bool { self.sess.has_errors_or_delayed_span_bugs() } fn diagnostic(&self) -> &rustc_errors::Handler { self.sess.diagnostic() } // Interactions with on_disk_cache fn try_load_from_on_disk_cache(&self, dep_node: &DepNode) { use crate::ty::query::try_load_from_on_disk_cache; try_load_from_on_disk_cache(*self, dep_node) } fn load_diagnostics(&self, prev_dep_node_index: SerializedDepNodeIndex) -> Vec<Diagnostic> { self.queries.on_disk_cache.load_diagnostics(*self, prev_dep_node_index) } fn store_diagnostics(&self, dep_node_index: DepNodeIndex, diagnostics: ThinVec<Diagnostic>) { self.queries.on_disk_cache.store_diagnostics(dep_node_index, diagnostics) } fn profiler(&self) -> &SelfProfilerRef { &self.prof } } fn def_id_corresponds_to_hir_dep_node(tcx: TyCtxt<'_>, def_id: DefId) -> bool { let hir_id = tcx.hir().as_local_hir_id(def_id).unwrap(); def_id.index == hir_id.owner.local_def_index } impl rustc_query_system::HashStableContext for StableHashingContext<'_> { fn debug_dep_tasks(&self) -> bool { self.sess().opts.debugging_opts.dep_tasks } } impl rustc_query_system::HashStableContextProvider<StableHashingContext<'tcx>> for TyCtxt<'tcx> { fn get_stable_hashing_context(&self) -> StableHashingContext<'tcx> { self.create_stable_hashing_context() } } impl rustc_query_system::HashStableContextProvider<StableHashingContext<'a>> for StableHashingContext<'a> { fn get_stable_hashing_context(&self) -> Self { self.clone() } } Move import. use crate::ich::StableHashingContext; use crate::ty::query::try_load_from_on_disk_cache; use crate::ty::{self, TyCtxt}; use rustc_data_structures::profiling::SelfProfilerRef; use rustc_data_structures::sync::Lock; use rustc_data_structures::thin_vec::ThinVec; use rustc_errors::Diagnostic; use rustc_hir::def_id::DefId; mod dep_node; mod safe; pub(crate) use rustc_query_system::dep_graph::DepNodeParams; pub use rustc_query_system::dep_graph::{ debug, hash_result, DepContext, DepNodeColor, DepNodeIndex, SerializedDepNodeIndex, WorkProduct, WorkProductFileKind, WorkProductId, }; pub use dep_node::{label_strs, DepConstructor, DepKind, DepNode, DepNodeExt}; pub use safe::AssertDepGraphSafe; pub use safe::DepGraphSafe; pub type DepGraph = rustc_query_system::dep_graph::DepGraph<DepKind>; pub type TaskDeps = rustc_query_system::dep_graph::TaskDeps<DepKind>; pub type DepGraphQuery = rustc_query_system::dep_graph::DepGraphQuery<DepKind>; pub type PreviousDepGraph = rustc_query_system::dep_graph::PreviousDepGraph<DepKind>; pub type SerializedDepGraph = rustc_query_system::dep_graph::SerializedDepGraph<DepKind>; impl rustc_query_system::dep_graph::DepKind for DepKind { fn is_eval_always(&self) -> bool { DepKind::is_eval_always(self) } fn has_params(&self) -> bool { DepKind::has_params(self) } fn debug_node(node: &DepNode, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "{:?}", node.kind)?; if !node.kind.has_params() && !node.kind.is_anon() { return Ok(()); } write!(f, "(")?; ty::tls::with_opt(|opt_tcx| { if let Some(tcx) = opt_tcx { if let Some(def_id) = node.extract_def_id(tcx) { write!(f, "{}", tcx.def_path_debug_str(def_id))?; } else if let Some(ref s) = tcx.dep_graph.dep_node_debug_str(*node) { write!(f, "{}", s)?; } else { write!(f, "{}", node.hash)?; } } else { write!(f, "{}", node.hash)?; } Ok(()) })?; write!(f, ")") } fn with_deps<OP, R>(task_deps: Option<&Lock<TaskDeps>>, op: OP) -> R where OP: FnOnce() -> R, { ty::tls::with_context(|icx| { let icx = ty::tls::ImplicitCtxt { task_deps, ..icx.clone() }; ty::tls::enter_context(&icx, |_| op()) }) } fn read_deps<OP>(op: OP) -> () where OP: for<'a> FnOnce(Option<&'a Lock<TaskDeps>>) -> (), { ty::tls::with_context_opt(|icx| { let icx = if let Some(icx) = icx { icx } else { return }; op(icx.task_deps) }) } } impl<'tcx> DepContext for TyCtxt<'tcx> { type DepKind = DepKind; type StableHashingContext = StableHashingContext<'tcx>; fn create_stable_hashing_context(&self) -> Self::StableHashingContext { TyCtxt::create_stable_hashing_context(*self) } fn try_force_from_dep_node(&self, dep_node: &DepNode) -> bool { // FIXME: This match is just a workaround for incremental bugs and should // be removed. https://github.com/rust-lang/rust/issues/62649 is one such // bug that must be fixed before removing this. match dep_node.kind { DepKind::hir_owner | DepKind::hir_owner_nodes | DepKind::CrateMetadata => { if let Some(def_id) = dep_node.extract_def_id(*self) { if def_id_corresponds_to_hir_dep_node(*self, def_id) { if dep_node.kind == DepKind::CrateMetadata { // The `DefPath` has corresponding node, // and that node should have been marked // either red or green in `data.colors`. bug!( "DepNode {:?} should have been \ pre-marked as red or green but wasn't.", dep_node ); } } else { // This `DefPath` does not have a // corresponding `DepNode` (e.g. a // struct field), and the ` DefPath` // collided with the `DefPath` of a // proper item that existed in the // previous compilation session. // // Since the given `DefPath` does not // denote the item that previously // existed, we just fail to mark green. return false; } } else { // If the node does not exist anymore, we // just fail to mark green. return false; } } _ => { // For other kinds of nodes it's OK to be // forced. } } debug!("try_force_from_dep_node({:?}) --- trying to force", dep_node); ty::query::force_from_dep_node(*self, dep_node) } fn has_errors_or_delayed_span_bugs(&self) -> bool { self.sess.has_errors_or_delayed_span_bugs() } fn diagnostic(&self) -> &rustc_errors::Handler { self.sess.diagnostic() } // Interactions with on_disk_cache fn try_load_from_on_disk_cache(&self, dep_node: &DepNode) { try_load_from_on_disk_cache(*self, dep_node) } fn load_diagnostics(&self, prev_dep_node_index: SerializedDepNodeIndex) -> Vec<Diagnostic> { self.queries.on_disk_cache.load_diagnostics(*self, prev_dep_node_index) } fn store_diagnostics(&self, dep_node_index: DepNodeIndex, diagnostics: ThinVec<Diagnostic>) { self.queries.on_disk_cache.store_diagnostics(dep_node_index, diagnostics) } fn profiler(&self) -> &SelfProfilerRef { &self.prof } } fn def_id_corresponds_to_hir_dep_node(tcx: TyCtxt<'_>, def_id: DefId) -> bool { let hir_id = tcx.hir().as_local_hir_id(def_id).unwrap(); def_id.index == hir_id.owner.local_def_index } impl rustc_query_system::HashStableContext for StableHashingContext<'_> { fn debug_dep_tasks(&self) -> bool { self.sess().opts.debugging_opts.dep_tasks } } impl rustc_query_system::HashStableContextProvider<StableHashingContext<'tcx>> for TyCtxt<'tcx> { fn get_stable_hashing_context(&self) -> StableHashingContext<'tcx> { self.create_stable_hashing_context() } } impl rustc_query_system::HashStableContextProvider<StableHashingContext<'a>> for StableHashingContext<'a> { fn get_stable_hashing_context(&self) -> Self { self.clone() } }
//! This module contains `HashStable` implementations for various HIR data //! types in no particular order. use crate::hir; use crate::hir::map::DefPathHash; use crate::hir::def_id::{DefId, LocalDefId, CrateNum, CRATE_DEF_INDEX}; use crate::ich::{StableHashingContext, NodeIdHashingMode, Fingerprint}; use rustc_data_structures::stable_hasher::{HashStable, ToStableHashKey, StableHasher, StableHasherResult}; use std::mem; use syntax::ast; use syntax::attr; impl<'a> HashStable<StableHashingContext<'a>> for DefId { #[inline] fn hash_stable<W: StableHasherResult>(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher<W>) { hcx.def_path_hash(*self).hash_stable(hcx, hasher); } } impl<'a> ToStableHashKey<StableHashingContext<'a>> for DefId { type KeyType = DefPathHash; #[inline] fn to_stable_hash_key(&self, hcx: &StableHashingContext<'a>) -> DefPathHash { hcx.def_path_hash(*self) } } impl<'a> HashStable<StableHashingContext<'a>> for LocalDefId { #[inline] fn hash_stable<W: StableHasherResult>(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher<W>) { hcx.def_path_hash(self.to_def_id()).hash_stable(hcx, hasher); } } impl<'a> ToStableHashKey<StableHashingContext<'a>> for LocalDefId { type KeyType = DefPathHash; #[inline] fn to_stable_hash_key(&self, hcx: &StableHashingContext<'a>) -> DefPathHash { hcx.def_path_hash(self.to_def_id()) } } impl<'a> HashStable<StableHashingContext<'a>> for CrateNum { #[inline] fn hash_stable<W: StableHasherResult>(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher<W>) { hcx.def_path_hash(DefId { krate: *self, index: CRATE_DEF_INDEX }).hash_stable(hcx, hasher); } } impl<'a> ToStableHashKey<StableHashingContext<'a>> for CrateNum { type KeyType = DefPathHash; #[inline] fn to_stable_hash_key(&self, hcx: &StableHashingContext<'a>) -> DefPathHash { let def_id = DefId { krate: *self, index: CRATE_DEF_INDEX }; def_id.to_stable_hash_key(hcx) } } impl<'a> ToStableHashKey<StableHashingContext<'a>> for hir::ItemLocalId { type KeyType = hir::ItemLocalId; #[inline] fn to_stable_hash_key(&self, _: &StableHashingContext<'a>) -> hir::ItemLocalId { *self } } // The following implementations of HashStable for ItemId, TraitItemId, and // ImplItemId deserve special attention. Normally we do not hash NodeIds within // the HIR, since they just signify a HIR nodes own path. But ItemId et al // are used when another item in the HIR is *referenced* and we certainly // want to pick up on a reference changing its target, so we hash the NodeIds // in "DefPath Mode". impl<'a> HashStable<StableHashingContext<'a>> for hir::ItemId { fn hash_stable<W: StableHasherResult>(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher<W>) { let hir::ItemId { id } = *self; hcx.with_node_id_hashing_mode(NodeIdHashingMode::HashDefPath, |hcx| { id.hash_stable(hcx, hasher); }) } } impl<'a> HashStable<StableHashingContext<'a>> for hir::TraitItemId { fn hash_stable<W: StableHasherResult>(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher<W>) { let hir::TraitItemId { hir_id } = * self; hcx.with_node_id_hashing_mode(NodeIdHashingMode::HashDefPath, |hcx| { hir_id.hash_stable(hcx, hasher); }) } } impl<'a> HashStable<StableHashingContext<'a>> for hir::ImplItemId { fn hash_stable<W: StableHasherResult>(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher<W>) { let hir::ImplItemId { hir_id } = * self; hcx.with_node_id_hashing_mode(NodeIdHashingMode::HashDefPath, |hcx| { hir_id.hash_stable(hcx, hasher); }) } } impl_stable_hash_for!(struct ast::Label { ident }); impl<'a> HashStable<StableHashingContext<'a>> for hir::Ty { fn hash_stable<W: StableHasherResult>(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher<W>) { hcx.while_hashing_hir_bodies(true, |hcx| { let hir::Ty { hir_id: _, ref node, ref span, } = *self; node.hash_stable(hcx, hasher); span.hash_stable(hcx, hasher); }) } } impl_stable_hash_for_spanned!(hir::FieldPat); impl_stable_hash_for_spanned!(hir::BinOpKind); impl_stable_hash_for!(struct hir::Stmt { hir_id, node, span, }); impl_stable_hash_for_spanned!(ast::Name); impl<'a> HashStable<StableHashingContext<'a>> for hir::Expr { fn hash_stable<W: StableHasherResult>(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher<W>) { hcx.while_hashing_hir_bodies(true, |hcx| { let hir::Expr { hir_id: _, ref span, ref node, ref attrs } = *self; span.hash_stable(hcx, hasher); node.hash_stable(hcx, hasher); attrs.hash_stable(hcx, hasher); }) } } impl_stable_hash_for_spanned!(usize); impl_stable_hash_for_spanned!(ast::Ident); impl_stable_hash_for!(struct ast::Ident { name, span, }); impl<'a> HashStable<StableHashingContext<'a>> for hir::TraitItem { fn hash_stable<W: StableHasherResult>(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher<W>) { let hir::TraitItem { hir_id: _, ident, ref attrs, ref generics, ref node, span } = *self; hcx.hash_hir_item_like(|hcx| { ident.name.hash_stable(hcx, hasher); attrs.hash_stable(hcx, hasher); generics.hash_stable(hcx, hasher); node.hash_stable(hcx, hasher); span.hash_stable(hcx, hasher); }); } } impl<'a> HashStable<StableHashingContext<'a>> for hir::ImplItem { fn hash_stable<W: StableHasherResult>(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher<W>) { let hir::ImplItem { hir_id: _, ident, ref vis, defaultness, ref attrs, ref generics, ref node, span } = *self; hcx.hash_hir_item_like(|hcx| { ident.name.hash_stable(hcx, hasher); vis.hash_stable(hcx, hasher); defaultness.hash_stable(hcx, hasher); attrs.hash_stable(hcx, hasher); generics.hash_stable(hcx, hasher); node.hash_stable(hcx, hasher); span.hash_stable(hcx, hasher); }); } } impl_stable_hash_for!(enum ::syntax::ast::CrateSugar { JustCrate, PubCrate, }); impl<'a> HashStable<StableHashingContext<'a>> for hir::VisibilityKind { fn hash_stable<W: StableHasherResult>(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher<W>) { mem::discriminant(self).hash_stable(hcx, hasher); match *self { hir::VisibilityKind::Public | hir::VisibilityKind::Inherited => { // No fields to hash. } hir::VisibilityKind::Crate(sugar) => { sugar.hash_stable(hcx, hasher); } hir::VisibilityKind::Restricted { ref path, hir_id } => { hcx.with_node_id_hashing_mode(NodeIdHashingMode::HashDefPath, |hcx| { hir_id.hash_stable(hcx, hasher); }); path.hash_stable(hcx, hasher); } } } } impl_stable_hash_for_spanned!(hir::VisibilityKind); impl<'a> HashStable<StableHashingContext<'a>> for hir::Mod { fn hash_stable<W: StableHasherResult>(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher<W>) { let hir::Mod { inner: ref inner_span, ref item_ids, } = *self; inner_span.hash_stable(hcx, hasher); // Combining the DefPathHashes directly is faster than feeding them // into the hasher. Because we use a commutative combine, we also don't // have to sort the array. let item_ids_hash = item_ids .iter() .map(|id| { let (def_path_hash, local_id) = id.id.to_stable_hash_key(hcx); debug_assert_eq!(local_id, hir::ItemLocalId::from_u32(0)); def_path_hash.0 }).fold(Fingerprint::ZERO, |a, b| { a.combine_commutative(b) }); item_ids.len().hash_stable(hcx, hasher); item_ids_hash.hash_stable(hcx, hasher); } } impl_stable_hash_for_spanned!(hir::VariantKind); impl<'a> HashStable<StableHashingContext<'a>> for hir::Item { fn hash_stable<W: StableHasherResult>(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher<W>) { let hir::Item { ident, ref attrs, hir_id: _, ref node, ref vis, span } = *self; hcx.hash_hir_item_like(|hcx| { ident.name.hash_stable(hcx, hasher); attrs.hash_stable(hcx, hasher); node.hash_stable(hcx, hasher); vis.hash_stable(hcx, hasher); span.hash_stable(hcx, hasher); }); } } impl<'a> HashStable<StableHashingContext<'a>> for hir::Body { fn hash_stable<W: StableHasherResult>(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher<W>) { let hir::Body { ref arguments, ref value, is_generator, } = *self; hcx.with_node_id_hashing_mode(NodeIdHashingMode::Ignore, |hcx| { arguments.hash_stable(hcx, hasher); value.hash_stable(hcx, hasher); is_generator.hash_stable(hcx, hasher); }); } } impl<'a> ToStableHashKey<StableHashingContext<'a>> for hir::BodyId { type KeyType = (DefPathHash, hir::ItemLocalId); #[inline] fn to_stable_hash_key(&self, hcx: &StableHashingContext<'a>) -> (DefPathHash, hir::ItemLocalId) { let hir::BodyId { hir_id } = *self; hir_id.to_stable_hash_key(hcx) } } impl<'a> HashStable<StableHashingContext<'a>> for hir::def_id::DefIndex { fn hash_stable<W: StableHasherResult>(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher<W>) { hcx.local_def_path_hash(*self).hash_stable(hcx, hasher); } } impl<'a> ToStableHashKey<StableHashingContext<'a>> for hir::def_id::DefIndex { type KeyType = DefPathHash; #[inline] fn to_stable_hash_key(&self, hcx: &StableHashingContext<'a>) -> DefPathHash { hcx.local_def_path_hash(*self) } } impl<'a> HashStable<StableHashingContext<'a>> for crate::middle::lang_items::LangItem { fn hash_stable<W: StableHasherResult>(&self, _: &mut StableHashingContext<'a>, hasher: &mut StableHasher<W>) { ::std::hash::Hash::hash(self, hasher); } } impl<'a> HashStable<StableHashingContext<'a>> for hir::TraitCandidate { fn hash_stable<W: StableHasherResult>(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher<W>) { hcx.with_node_id_hashing_mode(NodeIdHashingMode::HashDefPath, |hcx| { let &hir::TraitCandidate { def_id, import_ids, } = &self; def_id.hash_stable(hcx, hasher); // We only use the outermost import NodeId as key import_ids.first().hash_stable(hcx, hasher); }); } } impl<'a> ToStableHashKey<StableHashingContext<'a>> for hir::TraitCandidate { type KeyType = (DefPathHash, Option<(DefPathHash, hir::ItemLocalId)>); fn to_stable_hash_key(&self, hcx: &StableHashingContext<'a>) -> Self::KeyType { let hir::TraitCandidate { def_id, import_ids, } = self; let first_import_id = import_ids.first().map(|node_id| hcx.node_to_hir_id(*node_id)) .map(|hir_id| (hcx.local_def_path_hash(hir_id.owner), hir_id.local_id)); (hcx.def_path_hash(*def_id), first_import_id) } } impl<'hir> HashStable<StableHashingContext<'hir>> for attr::InlineAttr { fn hash_stable<W: StableHasherResult>(&self, hcx: &mut StableHashingContext<'hir>, hasher: &mut StableHasher<W>) { mem::discriminant(self).hash_stable(hcx, hasher); } } impl<'hir> HashStable<StableHashingContext<'hir>> for attr::OptimizeAttr { fn hash_stable<W: StableHasherResult>(&self, hcx: &mut StableHashingContext<'hir>, hasher: &mut StableHasher<W>) { mem::discriminant(self).hash_stable(hcx, hasher); } } Use binding autoref, because we can. //! This module contains `HashStable` implementations for various HIR data //! types in no particular order. use crate::hir; use crate::hir::map::DefPathHash; use crate::hir::def_id::{DefId, LocalDefId, CrateNum, CRATE_DEF_INDEX}; use crate::ich::{StableHashingContext, NodeIdHashingMode, Fingerprint}; use rustc_data_structures::stable_hasher::{HashStable, ToStableHashKey, StableHasher, StableHasherResult}; use std::mem; use syntax::ast; use syntax::attr; impl<'a> HashStable<StableHashingContext<'a>> for DefId { #[inline] fn hash_stable<W: StableHasherResult>(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher<W>) { hcx.def_path_hash(*self).hash_stable(hcx, hasher); } } impl<'a> ToStableHashKey<StableHashingContext<'a>> for DefId { type KeyType = DefPathHash; #[inline] fn to_stable_hash_key(&self, hcx: &StableHashingContext<'a>) -> DefPathHash { hcx.def_path_hash(*self) } } impl<'a> HashStable<StableHashingContext<'a>> for LocalDefId { #[inline] fn hash_stable<W: StableHasherResult>(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher<W>) { hcx.def_path_hash(self.to_def_id()).hash_stable(hcx, hasher); } } impl<'a> ToStableHashKey<StableHashingContext<'a>> for LocalDefId { type KeyType = DefPathHash; #[inline] fn to_stable_hash_key(&self, hcx: &StableHashingContext<'a>) -> DefPathHash { hcx.def_path_hash(self.to_def_id()) } } impl<'a> HashStable<StableHashingContext<'a>> for CrateNum { #[inline] fn hash_stable<W: StableHasherResult>(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher<W>) { hcx.def_path_hash(DefId { krate: *self, index: CRATE_DEF_INDEX }).hash_stable(hcx, hasher); } } impl<'a> ToStableHashKey<StableHashingContext<'a>> for CrateNum { type KeyType = DefPathHash; #[inline] fn to_stable_hash_key(&self, hcx: &StableHashingContext<'a>) -> DefPathHash { let def_id = DefId { krate: *self, index: CRATE_DEF_INDEX }; def_id.to_stable_hash_key(hcx) } } impl<'a> ToStableHashKey<StableHashingContext<'a>> for hir::ItemLocalId { type KeyType = hir::ItemLocalId; #[inline] fn to_stable_hash_key(&self, _: &StableHashingContext<'a>) -> hir::ItemLocalId { *self } } // The following implementations of HashStable for ItemId, TraitItemId, and // ImplItemId deserve special attention. Normally we do not hash NodeIds within // the HIR, since they just signify a HIR nodes own path. But ItemId et al // are used when another item in the HIR is *referenced* and we certainly // want to pick up on a reference changing its target, so we hash the NodeIds // in "DefPath Mode". impl<'a> HashStable<StableHashingContext<'a>> for hir::ItemId { fn hash_stable<W: StableHasherResult>(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher<W>) { let hir::ItemId { id } = *self; hcx.with_node_id_hashing_mode(NodeIdHashingMode::HashDefPath, |hcx| { id.hash_stable(hcx, hasher); }) } } impl<'a> HashStable<StableHashingContext<'a>> for hir::TraitItemId { fn hash_stable<W: StableHasherResult>(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher<W>) { let hir::TraitItemId { hir_id } = * self; hcx.with_node_id_hashing_mode(NodeIdHashingMode::HashDefPath, |hcx| { hir_id.hash_stable(hcx, hasher); }) } } impl<'a> HashStable<StableHashingContext<'a>> for hir::ImplItemId { fn hash_stable<W: StableHasherResult>(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher<W>) { let hir::ImplItemId { hir_id } = * self; hcx.with_node_id_hashing_mode(NodeIdHashingMode::HashDefPath, |hcx| { hir_id.hash_stable(hcx, hasher); }) } } impl_stable_hash_for!(struct ast::Label { ident }); impl<'a> HashStable<StableHashingContext<'a>> for hir::Ty { fn hash_stable<W: StableHasherResult>(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher<W>) { hcx.while_hashing_hir_bodies(true, |hcx| { let hir::Ty { hir_id: _, ref node, ref span, } = *self; node.hash_stable(hcx, hasher); span.hash_stable(hcx, hasher); }) } } impl_stable_hash_for_spanned!(hir::FieldPat); impl_stable_hash_for_spanned!(hir::BinOpKind); impl_stable_hash_for!(struct hir::Stmt { hir_id, node, span, }); impl_stable_hash_for_spanned!(ast::Name); impl<'a> HashStable<StableHashingContext<'a>> for hir::Expr { fn hash_stable<W: StableHasherResult>(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher<W>) { hcx.while_hashing_hir_bodies(true, |hcx| { let hir::Expr { hir_id: _, ref span, ref node, ref attrs } = *self; span.hash_stable(hcx, hasher); node.hash_stable(hcx, hasher); attrs.hash_stable(hcx, hasher); }) } } impl_stable_hash_for_spanned!(usize); impl_stable_hash_for_spanned!(ast::Ident); impl_stable_hash_for!(struct ast::Ident { name, span, }); impl<'a> HashStable<StableHashingContext<'a>> for hir::TraitItem { fn hash_stable<W: StableHasherResult>(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher<W>) { let hir::TraitItem { hir_id: _, ident, ref attrs, ref generics, ref node, span } = *self; hcx.hash_hir_item_like(|hcx| { ident.name.hash_stable(hcx, hasher); attrs.hash_stable(hcx, hasher); generics.hash_stable(hcx, hasher); node.hash_stable(hcx, hasher); span.hash_stable(hcx, hasher); }); } } impl<'a> HashStable<StableHashingContext<'a>> for hir::ImplItem { fn hash_stable<W: StableHasherResult>(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher<W>) { let hir::ImplItem { hir_id: _, ident, ref vis, defaultness, ref attrs, ref generics, ref node, span } = *self; hcx.hash_hir_item_like(|hcx| { ident.name.hash_stable(hcx, hasher); vis.hash_stable(hcx, hasher); defaultness.hash_stable(hcx, hasher); attrs.hash_stable(hcx, hasher); generics.hash_stable(hcx, hasher); node.hash_stable(hcx, hasher); span.hash_stable(hcx, hasher); }); } } impl_stable_hash_for!(enum ::syntax::ast::CrateSugar { JustCrate, PubCrate, }); impl<'a> HashStable<StableHashingContext<'a>> for hir::VisibilityKind { fn hash_stable<W: StableHasherResult>(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher<W>) { mem::discriminant(self).hash_stable(hcx, hasher); match *self { hir::VisibilityKind::Public | hir::VisibilityKind::Inherited => { // No fields to hash. } hir::VisibilityKind::Crate(sugar) => { sugar.hash_stable(hcx, hasher); } hir::VisibilityKind::Restricted { ref path, hir_id } => { hcx.with_node_id_hashing_mode(NodeIdHashingMode::HashDefPath, |hcx| { hir_id.hash_stable(hcx, hasher); }); path.hash_stable(hcx, hasher); } } } } impl_stable_hash_for_spanned!(hir::VisibilityKind); impl<'a> HashStable<StableHashingContext<'a>> for hir::Mod { fn hash_stable<W: StableHasherResult>(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher<W>) { let hir::Mod { inner: ref inner_span, ref item_ids, } = *self; inner_span.hash_stable(hcx, hasher); // Combining the DefPathHashes directly is faster than feeding them // into the hasher. Because we use a commutative combine, we also don't // have to sort the array. let item_ids_hash = item_ids .iter() .map(|id| { let (def_path_hash, local_id) = id.id.to_stable_hash_key(hcx); debug_assert_eq!(local_id, hir::ItemLocalId::from_u32(0)); def_path_hash.0 }).fold(Fingerprint::ZERO, |a, b| { a.combine_commutative(b) }); item_ids.len().hash_stable(hcx, hasher); item_ids_hash.hash_stable(hcx, hasher); } } impl_stable_hash_for_spanned!(hir::VariantKind); impl<'a> HashStable<StableHashingContext<'a>> for hir::Item { fn hash_stable<W: StableHasherResult>(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher<W>) { let hir::Item { ident, ref attrs, hir_id: _, ref node, ref vis, span } = *self; hcx.hash_hir_item_like(|hcx| { ident.name.hash_stable(hcx, hasher); attrs.hash_stable(hcx, hasher); node.hash_stable(hcx, hasher); vis.hash_stable(hcx, hasher); span.hash_stable(hcx, hasher); }); } } impl<'a> HashStable<StableHashingContext<'a>> for hir::Body { fn hash_stable<W: StableHasherResult>(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher<W>) { let hir::Body { ref arguments, ref value, is_generator, } = *self; hcx.with_node_id_hashing_mode(NodeIdHashingMode::Ignore, |hcx| { arguments.hash_stable(hcx, hasher); value.hash_stable(hcx, hasher); is_generator.hash_stable(hcx, hasher); }); } } impl<'a> ToStableHashKey<StableHashingContext<'a>> for hir::BodyId { type KeyType = (DefPathHash, hir::ItemLocalId); #[inline] fn to_stable_hash_key(&self, hcx: &StableHashingContext<'a>) -> (DefPathHash, hir::ItemLocalId) { let hir::BodyId { hir_id } = *self; hir_id.to_stable_hash_key(hcx) } } impl<'a> HashStable<StableHashingContext<'a>> for hir::def_id::DefIndex { fn hash_stable<W: StableHasherResult>(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher<W>) { hcx.local_def_path_hash(*self).hash_stable(hcx, hasher); } } impl<'a> ToStableHashKey<StableHashingContext<'a>> for hir::def_id::DefIndex { type KeyType = DefPathHash; #[inline] fn to_stable_hash_key(&self, hcx: &StableHashingContext<'a>) -> DefPathHash { hcx.local_def_path_hash(*self) } } impl<'a> HashStable<StableHashingContext<'a>> for crate::middle::lang_items::LangItem { fn hash_stable<W: StableHasherResult>(&self, _: &mut StableHashingContext<'a>, hasher: &mut StableHasher<W>) { ::std::hash::Hash::hash(self, hasher); } } impl<'a> HashStable<StableHashingContext<'a>> for hir::TraitCandidate { fn hash_stable<W: StableHasherResult>(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher<W>) { hcx.with_node_id_hashing_mode(NodeIdHashingMode::HashDefPath, |hcx| { let hir::TraitCandidate { def_id, import_ids, } = self; def_id.hash_stable(hcx, hasher); // We only use the outermost import NodeId as key import_ids.first().hash_stable(hcx, hasher); }); } } impl<'a> ToStableHashKey<StableHashingContext<'a>> for hir::TraitCandidate { type KeyType = (DefPathHash, Option<(DefPathHash, hir::ItemLocalId)>); fn to_stable_hash_key(&self, hcx: &StableHashingContext<'a>) -> Self::KeyType { let hir::TraitCandidate { def_id, import_ids, } = self; let first_import_id = import_ids.first().map(|node_id| hcx.node_to_hir_id(*node_id)) .map(|hir_id| (hcx.local_def_path_hash(hir_id.owner), hir_id.local_id)); (hcx.def_path_hash(*def_id), first_import_id) } } impl<'hir> HashStable<StableHashingContext<'hir>> for attr::InlineAttr { fn hash_stable<W: StableHasherResult>(&self, hcx: &mut StableHashingContext<'hir>, hasher: &mut StableHasher<W>) { mem::discriminant(self).hash_stable(hcx, hasher); } } impl<'hir> HashStable<StableHashingContext<'hir>> for attr::OptimizeAttr { fn hash_stable<W: StableHasherResult>(&self, hcx: &mut StableHashingContext<'hir>, hasher: &mut StableHasher<W>) { mem::discriminant(self).hash_stable(hcx, hasher); } }
//! Defines the set of legal keys that can be used in queries. use crate::infer::canonical::Canonical; use crate::hir::def_id::{CrateNum, DefId, LOCAL_CRATE, DefIndex}; use crate::traits; use crate::ty::{self, Ty, TyCtxt}; use crate::ty::subst::SubstsRef; use crate::ty::fast_reject::SimplifiedType; use crate::mir; use std::fmt::Debug; use std::hash::Hash; use syntax_pos::{Span, DUMMY_SP}; use syntax_pos::symbol::InternedString; /// The `Key` trait controls what types can legally be used as the key /// for a query. pub(super) trait Key: Clone + Hash + Eq + Debug { /// Given an instance of this key, what crate is it referring to? /// This is used to find the provider. fn query_crate(&self) -> CrateNum; /// In the event that a cycle occurs, if no explicit span has been /// given for a query with key `self`, what span should we use? fn default_span(&self, tcx: TyCtxt<'_>) -> Span; } impl<'tcx> Key for ty::InstanceDef<'tcx> { fn query_crate(&self) -> CrateNum { LOCAL_CRATE } fn default_span(&self, tcx: TyCtxt<'_>) -> Span { tcx.def_span(self.def_id()) } } impl<'tcx> Key for ty::Instance<'tcx> { fn query_crate(&self) -> CrateNum { LOCAL_CRATE } fn default_span(&self, tcx: TyCtxt<'_>) -> Span { tcx.def_span(self.def_id()) } } impl<'tcx> Key for mir::interpret::GlobalId<'tcx> { fn query_crate(&self) -> CrateNum { self.instance.query_crate() } fn default_span(&self, tcx: TyCtxt<'_>) -> Span { self.instance.default_span(tcx) } } impl Key for CrateNum { fn query_crate(&self) -> CrateNum { *self } fn default_span(&self, _: TyCtxt<'_>) -> Span { DUMMY_SP } } impl Key for DefIndex { fn query_crate(&self) -> CrateNum { LOCAL_CRATE } fn default_span(&self, _tcx: TyCtxt<'_>) -> Span { DUMMY_SP } } impl Key for DefId { fn query_crate(&self) -> CrateNum { self.krate } fn default_span(&self, tcx: TyCtxt<'_>) -> Span { tcx.def_span(*self) } } impl Key for (DefId, DefId) { fn query_crate(&self) -> CrateNum { self.0.krate } fn default_span(&self, tcx: TyCtxt<'_>) -> Span { self.1.default_span(tcx) } } impl Key for (CrateNum, DefId) { fn query_crate(&self) -> CrateNum { self.0 } fn default_span(&self, tcx: TyCtxt<'_>) -> Span { self.1.default_span(tcx) } } impl Key for (DefId, SimplifiedType) { fn query_crate(&self) -> CrateNum { self.0.krate } fn default_span(&self, tcx: TyCtxt<'_>) -> Span { self.0.default_span(tcx) } } impl<'tcx> Key for (DefId, SubstsRef<'tcx>) { fn query_crate(&self) -> CrateNum { self.0.krate } fn default_span(&self, tcx: TyCtxt<'_>) -> Span { self.0.default_span(tcx) } } impl<'tcx> Key for (ty::ParamEnv<'tcx>, ty::PolyTraitRef<'tcx>) { fn query_crate(&self) -> CrateNum { self.1.def_id().krate } fn default_span(&self, tcx: TyCtxt<'_>) -> Span { tcx.def_span(self.1.def_id()) } } impl<'tcx> Key for (&'tcx ty::Const<'tcx>, mir::Field) { fn query_crate(&self) -> CrateNum { LOCAL_CRATE } fn default_span(&self, _: TyCtxt<'_>) -> Span { DUMMY_SP } } impl<'tcx> Key for ty::PolyTraitRef<'tcx> { fn query_crate(&self) -> CrateNum { self.def_id().krate } fn default_span(&self, tcx: TyCtxt<'_>) -> Span { tcx.def_span(self.def_id()) } } impl<'tcx> Key for ty::Const<'tcx> { fn query_crate(&self) -> CrateNum { LOCAL_CRATE } fn default_span(&self, _: TyCtxt<'_>) -> Span { DUMMY_SP } } impl<'tcx> Key for Ty<'tcx> { fn query_crate(&self) -> CrateNum { LOCAL_CRATE } fn default_span(&self, _: TyCtxt<'_>) -> Span { DUMMY_SP } } impl<'tcx> Key for ty::ParamEnv<'tcx> { fn query_crate(&self) -> CrateNum { LOCAL_CRATE } fn default_span(&self, _: TyCtxt<'_>) -> Span { DUMMY_SP } } impl<'tcx, T: Key> Key for ty::ParamEnvAnd<'tcx, T> { fn query_crate(&self) -> CrateNum { self.value.query_crate() } fn default_span(&self, tcx: TyCtxt<'_>) -> Span { self.value.default_span(tcx) } } impl<'tcx> Key for traits::Environment<'tcx> { fn query_crate(&self) -> CrateNum { LOCAL_CRATE } fn default_span(&self, _: TyCtxt<'_>) -> Span { DUMMY_SP } } impl Key for InternedString { fn query_crate(&self) -> CrateNum { LOCAL_CRATE } fn default_span(&self, _tcx: TyCtxt<'_>) -> Span { DUMMY_SP } } /// Canonical query goals correspond to abstract trait operations that /// are not tied to any crate in particular. impl<'tcx, T> Key for Canonical<'tcx, T> where T: Debug + Hash + Clone + Eq, { fn query_crate(&self) -> CrateNum { LOCAL_CRATE } fn default_span(&self, _tcx: TyCtxt<'_>) -> Span { DUMMY_SP } } Remove unnecessary trait bounds from `keys::Keys`. //! Defines the set of legal keys that can be used in queries. use crate::infer::canonical::Canonical; use crate::hir::def_id::{CrateNum, DefId, LOCAL_CRATE, DefIndex}; use crate::traits; use crate::ty::{self, Ty, TyCtxt}; use crate::ty::subst::SubstsRef; use crate::ty::fast_reject::SimplifiedType; use crate::mir; use syntax_pos::{Span, DUMMY_SP}; use syntax_pos::symbol::InternedString; /// The `Key` trait controls what types can legally be used as the key /// for a query. pub(super) trait Key { /// Given an instance of this key, what crate is it referring to? /// This is used to find the provider. fn query_crate(&self) -> CrateNum; /// In the event that a cycle occurs, if no explicit span has been /// given for a query with key `self`, what span should we use? fn default_span(&self, tcx: TyCtxt<'_>) -> Span; } impl<'tcx> Key for ty::InstanceDef<'tcx> { fn query_crate(&self) -> CrateNum { LOCAL_CRATE } fn default_span(&self, tcx: TyCtxt<'_>) -> Span { tcx.def_span(self.def_id()) } } impl<'tcx> Key for ty::Instance<'tcx> { fn query_crate(&self) -> CrateNum { LOCAL_CRATE } fn default_span(&self, tcx: TyCtxt<'_>) -> Span { tcx.def_span(self.def_id()) } } impl<'tcx> Key for mir::interpret::GlobalId<'tcx> { fn query_crate(&self) -> CrateNum { self.instance.query_crate() } fn default_span(&self, tcx: TyCtxt<'_>) -> Span { self.instance.default_span(tcx) } } impl Key for CrateNum { fn query_crate(&self) -> CrateNum { *self } fn default_span(&self, _: TyCtxt<'_>) -> Span { DUMMY_SP } } impl Key for DefIndex { fn query_crate(&self) -> CrateNum { LOCAL_CRATE } fn default_span(&self, _tcx: TyCtxt<'_>) -> Span { DUMMY_SP } } impl Key for DefId { fn query_crate(&self) -> CrateNum { self.krate } fn default_span(&self, tcx: TyCtxt<'_>) -> Span { tcx.def_span(*self) } } impl Key for (DefId, DefId) { fn query_crate(&self) -> CrateNum { self.0.krate } fn default_span(&self, tcx: TyCtxt<'_>) -> Span { self.1.default_span(tcx) } } impl Key for (CrateNum, DefId) { fn query_crate(&self) -> CrateNum { self.0 } fn default_span(&self, tcx: TyCtxt<'_>) -> Span { self.1.default_span(tcx) } } impl Key for (DefId, SimplifiedType) { fn query_crate(&self) -> CrateNum { self.0.krate } fn default_span(&self, tcx: TyCtxt<'_>) -> Span { self.0.default_span(tcx) } } impl<'tcx> Key for (DefId, SubstsRef<'tcx>) { fn query_crate(&self) -> CrateNum { self.0.krate } fn default_span(&self, tcx: TyCtxt<'_>) -> Span { self.0.default_span(tcx) } } impl<'tcx> Key for (ty::ParamEnv<'tcx>, ty::PolyTraitRef<'tcx>) { fn query_crate(&self) -> CrateNum { self.1.def_id().krate } fn default_span(&self, tcx: TyCtxt<'_>) -> Span { tcx.def_span(self.1.def_id()) } } impl<'tcx> Key for (&'tcx ty::Const<'tcx>, mir::Field) { fn query_crate(&self) -> CrateNum { LOCAL_CRATE } fn default_span(&self, _: TyCtxt<'_>) -> Span { DUMMY_SP } } impl<'tcx> Key for ty::PolyTraitRef<'tcx> { fn query_crate(&self) -> CrateNum { self.def_id().krate } fn default_span(&self, tcx: TyCtxt<'_>) -> Span { tcx.def_span(self.def_id()) } } impl<'tcx> Key for ty::Const<'tcx> { fn query_crate(&self) -> CrateNum { LOCAL_CRATE } fn default_span(&self, _: TyCtxt<'_>) -> Span { DUMMY_SP } } impl<'tcx> Key for Ty<'tcx> { fn query_crate(&self) -> CrateNum { LOCAL_CRATE } fn default_span(&self, _: TyCtxt<'_>) -> Span { DUMMY_SP } } impl<'tcx> Key for ty::ParamEnv<'tcx> { fn query_crate(&self) -> CrateNum { LOCAL_CRATE } fn default_span(&self, _: TyCtxt<'_>) -> Span { DUMMY_SP } } impl<'tcx, T: Key> Key for ty::ParamEnvAnd<'tcx, T> { fn query_crate(&self) -> CrateNum { self.value.query_crate() } fn default_span(&self, tcx: TyCtxt<'_>) -> Span { self.value.default_span(tcx) } } impl<'tcx> Key for traits::Environment<'tcx> { fn query_crate(&self) -> CrateNum { LOCAL_CRATE } fn default_span(&self, _: TyCtxt<'_>) -> Span { DUMMY_SP } } impl Key for InternedString { fn query_crate(&self) -> CrateNum { LOCAL_CRATE } fn default_span(&self, _tcx: TyCtxt<'_>) -> Span { DUMMY_SP } } /// Canonical query goals correspond to abstract trait operations that /// are not tied to any crate in particular. impl<'tcx, T> Key for Canonical<'tcx, T> { fn query_crate(&self) -> CrateNum { LOCAL_CRATE } fn default_span(&self, _tcx: TyCtxt<'_>) -> Span { DUMMY_SP } }
// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. //! Rustdoc's HTML Rendering module //! //! This modules contains the bulk of the logic necessary for rendering a //! rustdoc `clean::Crate` instance to a set of static HTML pages. This //! rendering process is largely driven by the `format!` syntax extension to //! perform all I/O into files and streams. //! //! The rendering process is largely driven by the `Context` and `Cache` //! structures. The cache is pre-populated by crawling the crate in question, //! and then it is shared among the various rendering tasks. The cache is meant //! to be a fairly large structure not implementing `Clone` (because it's shared //! among tasks). The context, however, should be a lightweight structure. This //! is cloned per-task and contains information about what is currently being //! rendered. //! //! In order to speed up rendering (mostly because of markdown rendering), the //! rendering process has been parallelized. This parallelization is only //! exposed through the `crate` method on the context, and then also from the //! fact that the shared cache is stored in TLS (and must be accessed as such). //! //! In addition to rendering the crate itself, this module is also responsible //! for creating the corresponding search index and source file renderings. //! These tasks are not parallelized (they haven't been a bottleneck yet), and //! both occur before the crate is rendered. use collections::{HashMap, HashSet}; use std::fmt; use std::io::{fs, File, BufferedWriter, MemWriter, BufferedReader}; use std::io; use std::local_data; use std::str; use std::strbuf::StrBuf; use sync::Arc; use serialize::json::ToJson; use syntax::ast; use syntax::attr; use syntax::parse::token::InternedString; use rustc::util::nodemap::NodeSet; use clean; use doctree; use fold::DocFolder; use html::item_type; use html::item_type::{ItemType, shortty}; use html::format::{VisSpace, Method, FnStyleSpace}; use html::layout; use html::markdown; use html::markdown::Markdown; use html::highlight; /// Major driving force in all rustdoc rendering. This contains information /// about where in the tree-like hierarchy rendering is occurring and controls /// how the current page is being rendered. /// /// It is intended that this context is a lightweight object which can be fairly /// easily cloned because it is cloned per work-job (about once per item in the /// rustdoc tree). #[deriving(Clone)] pub struct Context { /// Current hierarchy of components leading down to what's currently being /// rendered pub current: Vec<~str> , /// String representation of how to get back to the root path of the 'doc/' /// folder in terms of a relative URL. pub root_path: StrBuf, /// The current destination folder of where HTML artifacts should be placed. /// This changes as the context descends into the module hierarchy. pub dst: Path, /// This describes the layout of each page, and is not modified after /// creation of the context (contains info like the favicon) pub layout: layout::Layout, /// This map is a list of what should be displayed on the sidebar of the /// current page. The key is the section header (traits, modules, /// functions), and the value is the list of containers belonging to this /// header. This map will change depending on the surrounding context of the /// page. pub sidebar: HashMap<~str, Vec<~str> >, /// This flag indicates whether [src] links should be generated or not. If /// the source files are present in the html rendering, then this will be /// `true`. pub include_sources: bool, } /// Indicates where an external crate can be found. pub enum ExternalLocation { /// Remote URL root of the external crate Remote(~str), /// This external crate can be found in the local doc/ folder Local, /// The external crate could not be found. Unknown, } /// Different ways an implementor of a trait can be rendered. pub enum Implementor { /// Paths are displayed specially by omitting the `impl XX for` cruft PathType(clean::Type), /// This is the generic representation of a trait implementor, used for /// primitive types and otherwise non-path types. OtherType(clean::Generics, /* trait */ clean::Type, /* for */ clean::Type), } /// This cache is used to store information about the `clean::Crate` being /// rendered in order to provide more useful documentation. This contains /// information like all implementors of a trait, all traits a type implements, /// documentation for all known traits, etc. /// /// This structure purposefully does not implement `Clone` because it's intended /// to be a fairly large and expensive structure to clone. Instead this adheres /// to `Send` so it may be stored in a `Arc` instance and shared among the various /// rendering tasks. pub struct Cache { /// Mapping of typaram ids to the name of the type parameter. This is used /// when pretty-printing a type (so pretty printing doesn't have to /// painfully maintain a context like this) pub typarams: HashMap<ast::NodeId, ~str>, /// Maps a type id to all known implementations for that type. This is only /// recognized for intra-crate `ResolvedPath` types, and is used to print /// out extra documentation on the page of an enum/struct. /// /// The values of the map are a list of implementations and documentation /// found on that implementation. pub impls: HashMap<ast::NodeId, Vec<(clean::Impl, Option<~str>)> >, /// Maintains a mapping of local crate node ids to the fully qualified name /// and "short type description" of that node. This is used when generating /// URLs when a type is being linked to. External paths are not located in /// this map because the `External` type itself has all the information /// necessary. pub paths: HashMap<ast::NodeId, (Vec<~str> , ItemType)>, /// This map contains information about all known traits of this crate. /// Implementations of a crate should inherit the documentation of the /// parent trait if no extra documentation is specified, and default methods /// should show up in documentation about trait implementations. pub traits: HashMap<ast::NodeId, clean::Trait>, /// When rendering traits, it's often useful to be able to list all /// implementors of the trait, and this mapping is exactly, that: a mapping /// of trait ids to the list of known implementors of the trait pub implementors: HashMap<ast::NodeId, Vec<Implementor> >, /// Cache of where external crate documentation can be found. pub extern_locations: HashMap<ast::CrateNum, ExternalLocation>, // Private fields only used when initially crawling a crate to build a cache stack: Vec<~str> , parent_stack: Vec<ast::NodeId> , search_index: Vec<IndexItem> , privmod: bool, public_items: NodeSet, // In rare case where a structure is defined in one module but implemented // in another, if the implementing module is parsed before defining module, // then the fully qualified name of the structure isn't presented in `paths` // yet when its implementation methods are being indexed. Caches such methods // and their parent id here and indexes them at the end of crate parsing. orphan_methods: Vec<(ast::NodeId, clean::Item)>, } /// Helper struct to render all source code to HTML pages struct SourceCollector<'a> { cx: &'a mut Context, /// Processed source-file paths seen: HashSet<~str>, /// Root destination to place all HTML output into dst: Path, } /// Wrapper struct to render the source code of a file. This will do things like /// adding line numbers to the left-hand side. struct Source<'a>(&'a str); // Helper structs for rendering items/sidebars and carrying along contextual // information struct Item<'a> { cx: &'a Context, item: &'a clean::Item, } struct Sidebar<'a> { cx: &'a Context, item: &'a clean::Item, } /// Struct representing one entry in the JS search index. These are all emitted /// by hand to a large JS file at the end of cache-creation. struct IndexItem { ty: ItemType, name: ~str, path: ~str, desc: ~str, parent: Option<ast::NodeId>, } // TLS keys used to carry information around during rendering. local_data_key!(pub cache_key: Arc<Cache>) local_data_key!(pub current_location_key: Vec<~str> ) /// Generates the documentation for `crate` into the directory `dst` pub fn run(mut krate: clean::Crate, dst: Path) -> io::IoResult<()> { let mut cx = Context { dst: dst, current: Vec::new(), root_path: StrBuf::new(), sidebar: HashMap::new(), layout: layout::Layout { logo: "".to_owned(), favicon: "".to_owned(), krate: krate.name.clone(), }, include_sources: true, }; try!(mkdir(&cx.dst)); match krate.module.as_ref().map(|m| m.doc_list().unwrap_or(&[])) { Some(attrs) => { for attr in attrs.iter() { match *attr { clean::NameValue(ref x, ref s) if "html_favicon_url" == *x => { cx.layout.favicon = s.to_owned(); } clean::NameValue(ref x, ref s) if "html_logo_url" == *x => { cx.layout.logo = s.to_owned(); } clean::Word(ref x) if "html_no_source" == *x => { cx.include_sources = false; } _ => {} } } } None => {} } // Crawl the crate to build various caches used for the output let mut cache = local_data::get(::analysiskey, |analysis| { let public_items = analysis.map(|a| a.public_items.clone()); let public_items = public_items.unwrap_or(NodeSet::new()); Cache { impls: HashMap::new(), typarams: HashMap::new(), paths: HashMap::new(), traits: HashMap::new(), implementors: HashMap::new(), stack: Vec::new(), parent_stack: Vec::new(), search_index: Vec::new(), extern_locations: HashMap::new(), privmod: false, public_items: public_items, orphan_methods: Vec::new(), } }); cache.stack.push(krate.name.clone()); krate = cache.fold_crate(krate); let mut nodeid_to_pathid = HashMap::new(); let mut pathid_to_nodeid = Vec::new(); { let Cache { search_index: ref mut index, orphan_methods: ref meths, paths: ref mut paths, ..} = cache; // Attach all orphan methods to the type's definition if the type // has since been learned. for &(ref pid, ref item) in meths.iter() { match paths.find(pid) { Some(&(ref fqp, _)) => { index.push(IndexItem { ty: shortty(item), name: item.name.clone().unwrap(), path: fqp.slice_to(fqp.len() - 1).connect("::"), desc: shorter(item.doc_value()).to_owned(), parent: Some(*pid), }); }, None => {} } }; // Reduce `NodeId` in paths into smaller sequential numbers, // and prune the paths that do not appear in the index. for item in index.iter() { match item.parent { Some(nodeid) => { if !nodeid_to_pathid.contains_key(&nodeid) { let pathid = pathid_to_nodeid.len(); nodeid_to_pathid.insert(nodeid, pathid); pathid_to_nodeid.push(nodeid); } } None => {} } } assert_eq!(nodeid_to_pathid.len(), pathid_to_nodeid.len()); } // Publish the search index let index = { let mut w = MemWriter::new(); try!(write!(&mut w, r#"searchIndex['{}'] = \{"items":["#, krate.name)); let mut lastpath = "".to_owned(); for (i, item) in cache.search_index.iter().enumerate() { // Omit the path if it is same to that of the prior item. let path; if lastpath == item.path { path = ""; } else { lastpath = item.path.clone(); path = item.path.as_slice(); }; if i > 0 { try!(write!(&mut w, ",")); } try!(write!(&mut w, r#"[{:u},"{}","{}",{}"#, item.ty, item.name, path, item.desc.to_json().to_str())); match item.parent { Some(nodeid) => { let pathid = *nodeid_to_pathid.find(&nodeid).unwrap(); try!(write!(&mut w, ",{}", pathid)); } None => {} } try!(write!(&mut w, "]")); } try!(write!(&mut w, r#"],"paths":["#)); for (i, &nodeid) in pathid_to_nodeid.iter().enumerate() { let &(ref fqp, short) = cache.paths.find(&nodeid).unwrap(); if i > 0 { try!(write!(&mut w, ",")); } try!(write!(&mut w, r#"[{:u},"{}"]"#, short, *fqp.last().unwrap())); } try!(write!(&mut w, r"]\};")); str::from_utf8(w.unwrap().as_slice()).unwrap().to_owned() }; // Write out the shared files. Note that these are shared among all rustdoc // docs placed in the output directory, so this needs to be a synchronized // operation with respect to all other rustdocs running around. { try!(mkdir(&cx.dst)); let _lock = ::flock::Lock::new(&cx.dst.join(".lock")); // Add all the static files. These may already exist, but we just // overwrite them anyway to make sure that they're fresh and up-to-date. try!(write(cx.dst.join("jquery.js"), include_bin!("static/jquery-2.1.0.min.js"))); try!(write(cx.dst.join("main.js"), include_bin!("static/main.js"))); try!(write(cx.dst.join("main.css"), include_bin!("static/main.css"))); try!(write(cx.dst.join("normalize.css"), include_bin!("static/normalize.css"))); try!(write(cx.dst.join("FiraSans-Regular.woff"), include_bin!("static/FiraSans-Regular.woff"))); try!(write(cx.dst.join("FiraSans-Medium.woff"), include_bin!("static/FiraSans-Medium.woff"))); try!(write(cx.dst.join("Heuristica-Regular.woff"), include_bin!("static/Heuristica-Regular.woff"))); try!(write(cx.dst.join("Heuristica-Italic.woff"), include_bin!("static/Heuristica-Italic.woff"))); try!(write(cx.dst.join("Heuristica-Bold.woff"), include_bin!("static/Heuristica-Bold.woff"))); // Update the search index let dst = cx.dst.join("search-index.js"); let mut all_indexes = Vec::new(); all_indexes.push(index); if dst.exists() { for line in BufferedReader::new(File::open(&dst)).lines() { let line = try!(line); if !line.starts_with("searchIndex") { continue } if line.starts_with(format!("searchIndex['{}']", krate.name)) { continue } all_indexes.push(line); } } let mut w = try!(File::create(&dst)); try!(writeln!(&mut w, r"var searchIndex = \{\};")); for index in all_indexes.iter() { try!(writeln!(&mut w, "{}", *index)); } try!(writeln!(&mut w, "initSearch(searchIndex);")); } // Render all source files (this may turn into a giant no-op) { info!("emitting source files"); let dst = cx.dst.join("src"); try!(mkdir(&dst)); let dst = dst.join(krate.name.as_slice()); try!(mkdir(&dst)); let mut folder = SourceCollector { dst: dst, seen: HashSet::new(), cx: &mut cx, }; krate = folder.fold_crate(krate); } for &(n, ref e) in krate.externs.iter() { cache.extern_locations.insert(n, extern_location(e, &cx.dst)); } // And finally render the whole crate's documentation cx.krate(krate, cache) } /// Writes the entire contents of a string to a destination, not attempting to /// catch any errors. fn write(dst: Path, contents: &[u8]) -> io::IoResult<()> { File::create(&dst).write(contents) } /// Makes a directory on the filesystem, failing the task if an error occurs and /// skipping if the directory already exists. fn mkdir(path: &Path) -> io::IoResult<()> { if !path.exists() { fs::mkdir(path, io::UserRWX) } else { Ok(()) } } /// Takes a path to a source file and cleans the path to it. This canonicalizes /// things like ".." to components which preserve the "top down" hierarchy of a /// static HTML tree. // FIXME (#9639): The closure should deal with &[u8] instead of &str fn clean_srcpath(src: &[u8], f: |&str|) { let p = Path::new(src); if p.as_vec() != bytes!(".") { for c in p.str_components().map(|x|x.unwrap()) { if ".." == c { f("up"); } else { f(c.as_slice()) } } } } /// Attempts to find where an external crate is located, given that we're /// rendering in to the specified source destination. fn extern_location(e: &clean::ExternalCrate, dst: &Path) -> ExternalLocation { // See if there's documentation generated into the local directory let local_location = dst.join(e.name.as_slice()); if local_location.is_dir() { return Local; } // Failing that, see if there's an attribute specifying where to find this // external crate for attr in e.attrs.iter() { match *attr { clean::List(ref x, ref list) if "doc" == *x => { for attr in list.iter() { match *attr { clean::NameValue(ref x, ref s) if "html_root_url" == *x => { if s.ends_with("/") { return Remote(s.to_owned()); } return Remote(*s + "/"); } _ => {} } } } _ => {} } } // Well, at least we tried. return Unknown; } impl<'a> DocFolder for SourceCollector<'a> { fn fold_item(&mut self, item: clean::Item) -> Option<clean::Item> { // If we're including source files, and we haven't seen this file yet, // then we need to render it out to the filesystem if self.cx.include_sources && !self.seen.contains(&item.source.filename) { // If it turns out that we couldn't read this file, then we probably // can't read any of the files (generating html output from json or // something like that), so just don't include sources for the // entire crate. The other option is maintaining this mapping on a // per-file basis, but that's probably not worth it... self.cx.include_sources = match self.emit_source(item.source.filename) { Ok(()) => true, Err(e) => { println!("warning: source code was requested to be rendered, \ but processing `{}` had an error: {}", item.source.filename, e); println!(" skipping rendering of source code"); false } }; self.seen.insert(item.source.filename.clone()); } self.fold_item_recur(item) } } impl<'a> SourceCollector<'a> { /// Renders the given filename into its corresponding HTML source file. fn emit_source(&mut self, filename: &str) -> io::IoResult<()> { let p = Path::new(filename); // If we couldn't open this file, then just returns because it // probably means that it's some standard library macro thing and we // can't have the source to it anyway. let contents = match File::open(&p).read_to_end() { Ok(r) => r, // macros from other libraries get special filenames which we can // safely ignore Err(..) if filename.starts_with("<") && filename.ends_with("macros>") => return Ok(()), Err(e) => return Err(e) }; let contents = str::from_utf8(contents.as_slice()).unwrap(); // Remove the utf-8 BOM if any let contents = if contents.starts_with("\ufeff") { contents.as_slice().slice_from(3) } else { contents.as_slice() }; // Create the intermediate directories let mut cur = self.dst.clone(); let mut root_path = StrBuf::from_str("../../"); clean_srcpath(p.dirname(), |component| { cur.push(component); mkdir(&cur).unwrap(); root_path.push_str("../"); }); cur.push(p.filename().expect("source has no filename") + bytes!(".html")); let mut w = BufferedWriter::new(try!(File::create(&cur))); let title = format!("{} -- source", cur.filename_display()); let page = layout::Page { title: title, ty: "source", root_path: root_path.as_slice(), }; try!(layout::render(&mut w as &mut Writer, &self.cx.layout, &page, &(""), &Source(contents))); try!(w.flush()); return Ok(()); } } impl DocFolder for Cache { fn fold_item(&mut self, item: clean::Item) -> Option<clean::Item> { // If this is a private module, we don't want it in the search index. let orig_privmod = match item.inner { clean::ModuleItem(..) => { let prev = self.privmod; self.privmod = prev || item.visibility != Some(ast::Public); prev } _ => self.privmod, }; // Register any generics to their corresponding string. This is used // when pretty-printing types match item.inner { clean::StructItem(ref s) => self.generics(&s.generics), clean::EnumItem(ref e) => self.generics(&e.generics), clean::FunctionItem(ref f) => self.generics(&f.generics), clean::TypedefItem(ref t) => self.generics(&t.generics), clean::TraitItem(ref t) => self.generics(&t.generics), clean::ImplItem(ref i) => self.generics(&i.generics), clean::TyMethodItem(ref i) => self.generics(&i.generics), clean::MethodItem(ref i) => self.generics(&i.generics), clean::ForeignFunctionItem(ref f) => self.generics(&f.generics), _ => {} } // Propagate a trait methods' documentation to all implementors of the // trait match item.inner { clean::TraitItem(ref t) => { self.traits.insert(item.id, t.clone()); } _ => {} } // Collect all the implementors of traits. match item.inner { clean::ImplItem(ref i) => { match i.trait_ { Some(clean::ResolvedPath{ id, .. }) => { let v = self.implementors.find_or_insert_with(id, |_|{ Vec::new() }); match i.for_ { clean::ResolvedPath{..} => { v.unshift(PathType(i.for_.clone())); } _ => { v.push(OtherType(i.generics.clone(), i.trait_.get_ref().clone(), i.for_.clone())); } } } Some(..) | None => {} } } _ => {} } // Index this method for searching later on match item.name { Some(ref s) => { let parent = match item.inner { clean::TyMethodItem(..) | clean::StructFieldItem(..) | clean::VariantItem(..) => { (Some(*self.parent_stack.last().unwrap()), Some(self.stack.slice_to(self.stack.len() - 1))) } clean::MethodItem(..) => { if self.parent_stack.len() == 0 { (None, None) } else { let last = self.parent_stack.last().unwrap(); let path = match self.paths.find(last) { Some(&(_, item_type::Trait)) => Some(self.stack.slice_to(self.stack.len() - 1)), // The current stack not necessarily has correlation for // where the type was defined. On the other hand, // `paths` always has the right information if present. Some(&(ref fqp, item_type::Struct)) | Some(&(ref fqp, item_type::Enum)) => Some(fqp.slice_to(fqp.len() - 1)), Some(..) => Some(self.stack.as_slice()), None => None }; (Some(*last), path) } } _ => (None, Some(self.stack.as_slice())) }; match parent { (parent, Some(path)) if !self.privmod => { self.search_index.push(IndexItem { ty: shortty(&item), name: s.to_owned(), path: path.connect("::"), desc: shorter(item.doc_value()).to_owned(), parent: parent, }); } (Some(parent), None) if !self.privmod => { // We have a parent, but we don't know where they're // defined yet. Wait for later to index this item. self.orphan_methods.push((parent, item.clone())) } _ => {} } } None => {} } // Keep track of the fully qualified path for this item. let pushed = if item.name.is_some() { let n = item.name.get_ref(); if n.len() > 0 { self.stack.push(n.to_owned()); true } else { false } } else { false }; match item.inner { clean::StructItem(..) | clean::EnumItem(..) | clean::TypedefItem(..) | clean::TraitItem(..) | clean::FunctionItem(..) | clean::ModuleItem(..) | clean::ForeignFunctionItem(..) => { // Reexported items mean that the same id can show up twice in // the rustdoc ast that we're looking at. We know, however, that // a reexported item doesn't show up in the `public_items` map, // so we can skip inserting into the paths map if there was // already an entry present and we're not a public item. if !self.paths.contains_key(&item.id) || self.public_items.contains(&item.id) { self.paths.insert(item.id, (self.stack.clone(), shortty(&item))); } } // link variants to their parent enum because pages aren't emitted // for each variant clean::VariantItem(..) => { let mut stack = self.stack.clone(); stack.pop(); self.paths.insert(item.id, (stack, item_type::Enum)); } _ => {} } // Maintain the parent stack let parent_pushed = match item.inner { clean::TraitItem(..) | clean::EnumItem(..) | clean::StructItem(..) => { self.parent_stack.push(item.id); true } clean::ImplItem(ref i) => { match i.for_ { clean::ResolvedPath{ id, .. } => { self.parent_stack.push(id); true } _ => false } } _ => false }; // Once we've recursively found all the generics, then hoard off all the // implementations elsewhere let ret = match self.fold_item_recur(item) { Some(item) => { match item { clean::Item{ attrs, inner: clean::ImplItem(i), .. } => { match i.for_ { clean::ResolvedPath { id, .. } => { let v = self.impls.find_or_insert_with(id, |_| { Vec::new() }); // extract relevant documentation for this impl match attrs.move_iter().find(|a| { match *a { clean::NameValue(ref x, _) if "doc" == *x => true, _ => false } }) { Some(clean::NameValue(_, dox)) => { v.push((i, Some(dox))); } Some(..) | None => { v.push((i, None)); } } } _ => {} } None } // Private modules may survive the strip-private pass if // they contain impls for public types, but those will get // stripped here clean::Item { inner: clean::ModuleItem(ref m), visibility, .. } if (m.items.len() == 0 && item.doc_value().is_none()) || visibility != Some(ast::Public) => None, i => Some(i), } } i => i, }; if pushed { self.stack.pop().unwrap(); } if parent_pushed { self.parent_stack.pop().unwrap(); } self.privmod = orig_privmod; return ret; } } impl<'a> Cache { fn generics(&mut self, generics: &clean::Generics) { for typ in generics.type_params.iter() { self.typarams.insert(typ.id, typ.name.clone()); } } } impl Context { /// Recurse in the directory structure and change the "root path" to make /// sure it always points to the top (relatively) fn recurse<T>(&mut self, s: ~str, f: |&mut Context| -> T) -> T { if s.len() == 0 { fail!("what {:?}", self); } let prev = self.dst.clone(); self.dst.push(s.as_slice()); self.root_path.push_str("../"); self.current.push(s); info!("Recursing into {}", self.dst.display()); mkdir(&self.dst).unwrap(); let ret = f(self); info!("Recursed; leaving {}", self.dst.display()); // Go back to where we were at self.dst = prev; let len = self.root_path.len(); self.root_path.truncate(len - 3); self.current.pop().unwrap(); return ret; } /// Main method for rendering a crate. /// /// This currently isn't parallelized, but it'd be pretty easy to add /// parallelization to this function. fn krate(self, mut krate: clean::Crate, cache: Cache) -> io::IoResult<()> { let mut item = match krate.module.take() { Some(i) => i, None => return Ok(()) }; item.name = Some(krate.name); // using a rwarc makes this parallelizable in the future local_data::set(cache_key, Arc::new(cache)); let mut work = vec!((self, item)); loop { match work.pop() { Some((mut cx, item)) => try!(cx.item(item, |cx, item| { work.push((cx.clone(), item)); })), None => break, } } Ok(()) } /// Non-parellelized version of rendering an item. This will take the input /// item, render its contents, and then invoke the specified closure with /// all sub-items which need to be rendered. /// /// The rendering driver uses this closure to queue up more work. fn item(&mut self, item: clean::Item, f: |&mut Context, clean::Item|) -> io::IoResult<()> { fn render(w: io::File, cx: &mut Context, it: &clean::Item, pushname: bool) -> io::IoResult<()> { info!("Rendering an item to {}", w.path().display()); // A little unfortunate that this is done like this, but it sure // does make formatting *a lot* nicer. local_data::set(current_location_key, cx.current.clone()); let mut title = StrBuf::from_str(cx.current.connect("::")); if pushname { if title.len() > 0 { title.push_str("::"); } title.push_str(*it.name.get_ref()); } title.push_str(" - Rust"); let page = layout::Page { ty: shortty(it).to_static_str(), root_path: cx.root_path.as_slice(), title: title.as_slice(), }; markdown::reset_headers(); // We have a huge number of calls to write, so try to alleviate some // of the pain by using a buffered writer instead of invoking the // write sycall all the time. let mut writer = BufferedWriter::new(w); try!(layout::render(&mut writer as &mut Writer, &cx.layout, &page, &Sidebar{ cx: cx, item: it }, &Item{ cx: cx, item: it })); writer.flush() } match item.inner { // modules are special because they add a namespace. We also need to // recurse into the items of the module as well. clean::ModuleItem(..) => { let name = item.name.get_ref().to_owned(); let mut item = Some(item); self.recurse(name, |this| { let item = item.take_unwrap(); let dst = this.dst.join("index.html"); let dst = try!(File::create(&dst)); try!(render(dst, this, &item, false)); let m = match item.inner { clean::ModuleItem(m) => m, _ => unreachable!() }; this.sidebar = build_sidebar(&m); for item in m.items.move_iter() { f(this,item); } Ok(()) }) } // Things which don't have names (like impls) don't get special // pages dedicated to them. _ if item.name.is_some() => { let dst = self.dst.join(item_path(&item)); let dst = try!(File::create(&dst)); render(dst, self, &item, true) } _ => Ok(()) } } } impl<'a> Item<'a> { fn ismodule(&self) -> bool { match self.item.inner { clean::ModuleItem(..) => true, _ => false } } } impl<'a> fmt::Show for Item<'a> { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { // Write the breadcrumb trail header for the top try!(write!(fmt.buf, "\n<h1 class='fqn'>")); match self.item.inner { clean::ModuleItem(ref m) => if m.is_crate { try!(write!(fmt.buf, "Crate ")); } else { try!(write!(fmt.buf, "Module ")); }, clean::FunctionItem(..) => try!(write!(fmt.buf, "Function ")), clean::TraitItem(..) => try!(write!(fmt.buf, "Trait ")), clean::StructItem(..) => try!(write!(fmt.buf, "Struct ")), clean::EnumItem(..) => try!(write!(fmt.buf, "Enum ")), _ => {} } let cur = self.cx.current.as_slice(); let amt = if self.ismodule() { cur.len() - 1 } else { cur.len() }; for (i, component) in cur.iter().enumerate().take(amt) { let mut trail = StrBuf::new(); for _ in range(0, cur.len() - i - 1) { trail.push_str("../"); } try!(write!(fmt.buf, "<a href='{}index.html'>{}</a>::", trail, component.as_slice())); } try!(write!(fmt.buf, "<a class='{}' href=''>{}</a>", shortty(self.item), self.item.name.get_ref().as_slice())); // Write stability attributes match attr::find_stability(self.item.attrs.iter()) { Some(ref stability) => { try!(write!(fmt.buf, "<a class='stability {lvl}' title='{reason}'>{lvl}</a>", lvl = stability.level.to_str(), reason = match stability.text { Some(ref s) => (*s).clone(), None => InternedString::new(""), })); } None => {} } // Write `src` tag if self.cx.include_sources { let mut path = Vec::new(); clean_srcpath(self.item.source.filename.as_bytes(), |component| { path.push(component.to_owned()); }); let href = if self.item.source.loline == self.item.source.hiline { format!("{}", self.item.source.loline) } else { format!("{}-{}", self.item.source.loline, self.item.source.hiline) }; try!(write!(fmt.buf, "<a class='source' \ href='{root}src/{krate}/{path}.html\\#{href}'>\ [src]</a>", root = self.cx.root_path, krate = self.cx.layout.krate, path = path.connect("/"), href = href)); } try!(write!(fmt.buf, "</h1>\n")); match self.item.inner { clean::ModuleItem(ref m) => { item_module(fmt.buf, self.cx, self.item, m.items.as_slice()) } clean::FunctionItem(ref f) | clean::ForeignFunctionItem(ref f) => item_function(fmt.buf, self.item, f), clean::TraitItem(ref t) => item_trait(fmt.buf, self.item, t), clean::StructItem(ref s) => item_struct(fmt.buf, self.item, s), clean::EnumItem(ref e) => item_enum(fmt.buf, self.item, e), clean::TypedefItem(ref t) => item_typedef(fmt.buf, self.item, t), clean::MacroItem(ref m) => item_macro(fmt.buf, self.item, m), _ => Ok(()) } } } fn item_path(item: &clean::Item) -> ~str { match item.inner { clean::ModuleItem(..) => *item.name.get_ref() + "/index.html", _ => shortty(item).to_static_str() + "." + *item.name.get_ref() + ".html" } } fn full_path(cx: &Context, item: &clean::Item) -> ~str { let mut s = StrBuf::from_str(cx.current.connect("::")); s.push_str("::"); s.push_str(item.name.get_ref().as_slice()); return s.into_owned(); } fn blank<'a>(s: Option<&'a str>) -> &'a str { match s { Some(s) => s, None => "" } } fn shorter<'a>(s: Option<&'a str>) -> &'a str { match s { Some(s) => match s.find_str("\n\n") { Some(pos) => s.slice_to(pos), None => s, }, None => "" } } fn document(w: &mut Writer, item: &clean::Item) -> fmt::Result { match item.doc_value() { Some(s) => { try!(write!(w, "<div class='docblock'>{}</div>", Markdown(s))); } None => {} } Ok(()) } fn item_module(w: &mut Writer, cx: &Context, item: &clean::Item, items: &[clean::Item]) -> fmt::Result { try!(document(w, item)); debug!("{:?}", items); let mut indices = Vec::from_fn(items.len(), |i| i); fn cmp(i1: &clean::Item, i2: &clean::Item, idx1: uint, idx2: uint) -> Ordering { if shortty(i1) == shortty(i2) { return i1.name.cmp(&i2.name); } match (&i1.inner, &i2.inner) { (&clean::ViewItemItem(ref a), &clean::ViewItemItem(ref b)) => { match (&a.inner, &b.inner) { (&clean::ExternCrate(..), _) => Less, (_, &clean::ExternCrate(..)) => Greater, _ => idx1.cmp(&idx2), } } (&clean::ViewItemItem(..), _) => Less, (_, &clean::ViewItemItem(..)) => Greater, (&clean::ModuleItem(..), _) => Less, (_, &clean::ModuleItem(..)) => Greater, (&clean::MacroItem(..), _) => Less, (_, &clean::MacroItem(..)) => Greater, (&clean::StructItem(..), _) => Less, (_, &clean::StructItem(..)) => Greater, (&clean::EnumItem(..), _) => Less, (_, &clean::EnumItem(..)) => Greater, (&clean::StaticItem(..), _) => Less, (_, &clean::StaticItem(..)) => Greater, (&clean::ForeignFunctionItem(..), _) => Less, (_, &clean::ForeignFunctionItem(..)) => Greater, (&clean::ForeignStaticItem(..), _) => Less, (_, &clean::ForeignStaticItem(..)) => Greater, (&clean::TraitItem(..), _) => Less, (_, &clean::TraitItem(..)) => Greater, (&clean::FunctionItem(..), _) => Less, (_, &clean::FunctionItem(..)) => Greater, (&clean::TypedefItem(..), _) => Less, (_, &clean::TypedefItem(..)) => Greater, _ => idx1.cmp(&idx2), } } debug!("{:?}", indices); indices.sort_by(|&i1, &i2| cmp(&items[i1], &items[i2], i1, i2)); debug!("{:?}", indices); let mut curty = None; for &idx in indices.iter() { let myitem = &items[idx]; let myty = Some(shortty(myitem)); if myty != curty { if curty.is_some() { try!(write!(w, "</table>")); } curty = myty; let (short, name) = match myitem.inner { clean::ModuleItem(..) => ("modules", "Modules"), clean::StructItem(..) => ("structs", "Structs"), clean::EnumItem(..) => ("enums", "Enums"), clean::FunctionItem(..) => ("functions", "Functions"), clean::TypedefItem(..) => ("types", "Type Definitions"), clean::StaticItem(..) => ("statics", "Statics"), clean::TraitItem(..) => ("traits", "Traits"), clean::ImplItem(..) => ("impls", "Implementations"), clean::ViewItemItem(..) => ("reexports", "Reexports"), clean::TyMethodItem(..) => ("tymethods", "Type Methods"), clean::MethodItem(..) => ("methods", "Methods"), clean::StructFieldItem(..) => ("fields", "Struct Fields"), clean::VariantItem(..) => ("variants", "Variants"), clean::ForeignFunctionItem(..) => ("ffi-fns", "Foreign Functions"), clean::ForeignStaticItem(..) => ("ffi-statics", "Foreign Statics"), clean::MacroItem(..) => ("macros", "Macros"), }; try!(write!(w, "<h2 id='{id}' class='section-header'>\ <a href=\"\\#{id}\">{name}</a></h2>\n<table>", id = short, name = name)); } match myitem.inner { clean::StaticItem(ref s) | clean::ForeignStaticItem(ref s) => { struct Initializer<'a>(&'a str); impl<'a> fmt::Show for Initializer<'a> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { let Initializer(s) = *self; if s.len() == 0 { return Ok(()); } try!(write!(f.buf, "<code> = </code>")); let tag = if s.contains("\n") { "pre" } else { "code" }; try!(write!(f.buf, "<{tag}>{}</{tag}>", s.as_slice(), tag=tag)); Ok(()) } } try!(write!(w, " <tr> <td><code>{}static {}: {}</code>{}</td> <td class='docblock'>{}&nbsp;</td> </tr> ", VisSpace(myitem.visibility), *myitem.name.get_ref(), s.type_, Initializer(s.expr), Markdown(blank(myitem.doc_value())))); } clean::ViewItemItem(ref item) => { match item.inner { clean::ExternCrate(ref name, ref src, _) => { try!(write!(w, "<tr><td><code>extern crate {}", name.as_slice())); match *src { Some(ref src) => try!(write!(w, " = \"{}\"", src.as_slice())), None => {} } try!(write!(w, ";</code></td></tr>")); } clean::Import(ref import) => { try!(write!(w, "<tr><td><code>{}{}</code></td></tr>", VisSpace(myitem.visibility), *import)); } } } _ => { if myitem.name.is_none() { continue } try!(write!(w, " <tr> <td><a class='{class}' href='{href}' title='{title}'>{}</a></td> <td class='docblock short'>{}</td> </tr> ", *myitem.name.get_ref(), Markdown(shorter(myitem.doc_value())), class = shortty(myitem), href = item_path(myitem), title = full_path(cx, myitem))); } } } write!(w, "</table>") } fn item_function(w: &mut Writer, it: &clean::Item, f: &clean::Function) -> fmt::Result { try!(write!(w, "<pre class='rust fn'>{vis}{fn_style}fn \ {name}{generics}{decl}</pre>", vis = VisSpace(it.visibility), fn_style = FnStyleSpace(f.fn_style), name = it.name.get_ref().as_slice(), generics = f.generics, decl = f.decl)); document(w, it) } fn item_trait(w: &mut Writer, it: &clean::Item, t: &clean::Trait) -> fmt::Result { let mut parents = StrBuf::new(); if t.parents.len() > 0 { parents.push_str(": "); for (i, p) in t.parents.iter().enumerate() { if i > 0 { parents.push_str(" + "); } parents.push_str(format!("{}", *p)); } } // Output the trait definition try!(write!(w, "<pre class='rust trait'>{}trait {}{}{} ", VisSpace(it.visibility), it.name.get_ref().as_slice(), t.generics, parents)); let required = t.methods.iter().filter(|m| m.is_req()).collect::<Vec<&clean::TraitMethod>>(); let provided = t.methods.iter().filter(|m| !m.is_req()).collect::<Vec<&clean::TraitMethod>>(); if t.methods.len() == 0 { try!(write!(w, "\\{ \\}")); } else { try!(write!(w, "\\{\n")); for m in required.iter() { try!(write!(w, " ")); try!(render_method(w, m.item())); try!(write!(w, ";\n")); } if required.len() > 0 && provided.len() > 0 { try!(w.write("\n".as_bytes())); } for m in provided.iter() { try!(write!(w, " ")); try!(render_method(w, m.item())); try!(write!(w, " \\{ ... \\}\n")); } try!(write!(w, "\\}")); } try!(write!(w, "</pre>")); // Trait documentation try!(document(w, it)); fn meth(w: &mut Writer, m: &clean::TraitMethod) -> fmt::Result { try!(write!(w, "<h3 id='{}.{}' class='method'><code>", shortty(m.item()), *m.item().name.get_ref())); try!(render_method(w, m.item())); try!(write!(w, "</code></h3>")); try!(document(w, m.item())); Ok(()) } // Output the documentation for each function individually if required.len() > 0 { try!(write!(w, " <h2 id='required-methods'>Required Methods</h2> <div class='methods'> ")); for m in required.iter() { try!(meth(w, *m)); } try!(write!(w, "</div>")); } if provided.len() > 0 { try!(write!(w, " <h2 id='provided-methods'>Provided Methods</h2> <div class='methods'> ")); for m in provided.iter() { try!(meth(w, *m)); } try!(write!(w, "</div>")); } local_data::get(cache_key, |cache| { let cache = cache.unwrap(); match cache.implementors.find(&it.id) { Some(implementors) => { try!(write!(w, " <h2 id='implementors'>Implementors</h2> <ul class='item-list'> ")); for i in implementors.iter() { match *i { PathType(ref ty) => { try!(write!(w, "<li><code>{}</code></li>", *ty)); } OtherType(ref generics, ref trait_, ref for_) => { try!(write!(w, "<li><code>impl{} {} for {}</code></li>", *generics, *trait_, *for_)); } } } try!(write!(w, "</ul>")); } None => {} } Ok(()) }) } fn render_method(w: &mut Writer, meth: &clean::Item) -> fmt::Result { fn fun(w: &mut Writer, it: &clean::Item, fn_style: ast::FnStyle, g: &clean::Generics, selfty: &clean::SelfTy, d: &clean::FnDecl) -> fmt::Result { write!(w, "{}fn <a href='\\#{ty}.{name}' class='fnname'>{name}</a>\ {generics}{decl}", match fn_style { ast::UnsafeFn => "unsafe ", _ => "", }, ty = shortty(it), name = it.name.get_ref().as_slice(), generics = *g, decl = Method(selfty, d)) } match meth.inner { clean::TyMethodItem(ref m) => { fun(w, meth, m.fn_style, &m.generics, &m.self_, &m.decl) } clean::MethodItem(ref m) => { fun(w, meth, m.fn_style, &m.generics, &m.self_, &m.decl) } _ => unreachable!() } } fn item_struct(w: &mut Writer, it: &clean::Item, s: &clean::Struct) -> fmt::Result { try!(write!(w, "<pre class='rust struct'>")); try!(render_struct(w, it, Some(&s.generics), s.struct_type, s.fields.as_slice(), "", true)); try!(write!(w, "</pre>")); try!(document(w, it)); let mut fields = s.fields.iter().filter(|f| { match f.inner { clean::StructFieldItem(clean::HiddenStructField) => false, clean::StructFieldItem(clean::TypedStructField(..)) => true, _ => false, } }).peekable(); match s.struct_type { doctree::Plain if fields.peek().is_some() => { try!(write!(w, "<h2 class='fields'>Fields</h2>\n<table>")); for field in fields { try!(write!(w, "<tr><td id='structfield.{name}'>\ <code>{name}</code></td><td>", name = field.name.get_ref().as_slice())); try!(document(w, field)); try!(write!(w, "</td></tr>")); } try!(write!(w, "</table>")); } _ => {} } render_methods(w, it) } fn item_enum(w: &mut Writer, it: &clean::Item, e: &clean::Enum) -> fmt::Result { try!(write!(w, "<pre class='rust enum'>{}enum {}{}", VisSpace(it.visibility), it.name.get_ref().as_slice(), e.generics)); if e.variants.len() == 0 && !e.variants_stripped { try!(write!(w, " \\{\\}")); } else { try!(write!(w, " \\{\n")); for v in e.variants.iter() { try!(write!(w, " ")); let name = v.name.get_ref().as_slice(); match v.inner { clean::VariantItem(ref var) => { match var.kind { clean::CLikeVariant => try!(write!(w, "{}", name)), clean::TupleVariant(ref tys) => { try!(write!(w, "{}(", name)); for (i, ty) in tys.iter().enumerate() { if i > 0 { try!(write!(w, ", ")) } try!(write!(w, "{}", *ty)); } try!(write!(w, ")")); } clean::StructVariant(ref s) => { try!(render_struct(w, v, None, s.struct_type, s.fields.as_slice(), " ", false)); } } } _ => unreachable!() } try!(write!(w, ",\n")); } if e.variants_stripped { try!(write!(w, " // some variants omitted\n")); } try!(write!(w, "\\}")); } try!(write!(w, "</pre>")); try!(document(w, it)); if e.variants.len() > 0 { try!(write!(w, "<h2 class='variants'>Variants</h2>\n<table>")); for variant in e.variants.iter() { try!(write!(w, "<tr><td id='variant.{name}'><code>{name}</code></td><td>", name = variant.name.get_ref().as_slice())); try!(document(w, variant)); match variant.inner { clean::VariantItem(ref var) => { match var.kind { clean::StructVariant(ref s) => { let mut fields = s.fields.iter().filter(|f| { match f.inner { clean::StructFieldItem(ref t) => match *t { clean::HiddenStructField => false, clean::TypedStructField(..) => true, }, _ => false, } }); try!(write!(w, "<h3 class='fields'>Fields</h3>\n <table>")); for field in fields { try!(write!(w, "<tr><td \ id='variant.{v}.field.{f}'>\ <code>{f}</code></td><td>", v = variant.name.get_ref().as_slice(), f = field.name.get_ref().as_slice())); try!(document(w, field)); try!(write!(w, "</td></tr>")); } try!(write!(w, "</table>")); } _ => () } } _ => () } try!(write!(w, "</td></tr>")); } try!(write!(w, "</table>")); } try!(render_methods(w, it)); Ok(()) } fn render_struct(w: &mut Writer, it: &clean::Item, g: Option<&clean::Generics>, ty: doctree::StructType, fields: &[clean::Item], tab: &str, structhead: bool) -> fmt::Result { try!(write!(w, "{}{}{}", VisSpace(it.visibility), if structhead {"struct "} else {""}, it.name.get_ref().as_slice())); match g { Some(g) => try!(write!(w, "{}", *g)), None => {} } match ty { doctree::Plain => { try!(write!(w, " \\{\n{}", tab)); let mut fields_stripped = false; for field in fields.iter() { match field.inner { clean::StructFieldItem(clean::HiddenStructField) => { fields_stripped = true; } clean::StructFieldItem(clean::TypedStructField(ref ty)) => { try!(write!(w, " {}{}: {},\n{}", VisSpace(field.visibility), field.name.get_ref().as_slice(), *ty, tab)); } _ => unreachable!(), }; } if fields_stripped { try!(write!(w, " // some fields omitted\n{}", tab)); } try!(write!(w, "\\}")); } doctree::Tuple | doctree::Newtype => { try!(write!(w, "(")); for (i, field) in fields.iter().enumerate() { if i > 0 { try!(write!(w, ", ")); } match field.inner { clean::StructFieldItem(clean::HiddenStructField) => { try!(write!(w, "_")) } clean::StructFieldItem(clean::TypedStructField(ref ty)) => { try!(write!(w, "{}{}", VisSpace(field.visibility), *ty)) } _ => unreachable!() } } try!(write!(w, ");")); } doctree::Unit => { try!(write!(w, ";")); } } Ok(()) } fn render_methods(w: &mut Writer, it: &clean::Item) -> fmt::Result { local_data::get(cache_key, |cache| { let c = cache.unwrap(); match c.impls.find(&it.id) { Some(v) => { let mut non_trait = v.iter().filter(|p| { p.ref0().trait_.is_none() }); let non_trait = non_trait.collect::<Vec<&(clean::Impl, Option<~str>)>>(); let mut traits = v.iter().filter(|p| { p.ref0().trait_.is_some() }); let traits = traits.collect::<Vec<&(clean::Impl, Option<~str>)>>(); if non_trait.len() > 0 { try!(write!(w, "<h2 id='methods'>Methods</h2>")); for &(ref i, ref dox) in non_trait.move_iter() { try!(render_impl(w, i, dox)); } } if traits.len() > 0 { try!(write!(w, "<h2 id='implementations'>Trait \ Implementations</h2>")); let mut any_derived = false; for & &(ref i, ref dox) in traits.iter() { if !i.derived { try!(render_impl(w, i, dox)); } else { any_derived = true; } } if any_derived { try!(write!(w, "<h3 id='derived_implementations'>Derived Implementations \ </h3>")); for &(ref i, ref dox) in traits.move_iter() { if i.derived { try!(render_impl(w, i, dox)); } } } } } None => {} } Ok(()) }) } fn render_impl(w: &mut Writer, i: &clean::Impl, dox: &Option<~str>) -> fmt::Result { try!(write!(w, "<h3 class='impl'><code>impl{} ", i.generics)); let trait_id = match i.trait_ { Some(ref ty) => { try!(write!(w, "{} for ", *ty)); match *ty { clean::ResolvedPath { id, .. } => Some(id), _ => None, } } None => None }; try!(write!(w, "{}</code></h3>", i.for_)); match *dox { Some(ref dox) => { try!(write!(w, "<div class='docblock'>{}</div>", Markdown(dox.as_slice()))); } None => {} } fn docmeth(w: &mut Writer, item: &clean::Item) -> io::IoResult<bool> { try!(write!(w, "<h4 id='method.{}' class='method'><code>", *item.name.get_ref())); try!(render_method(w, item)); try!(write!(w, "</code></h4>\n")); match item.doc_value() { Some(s) => { try!(write!(w, "<div class='docblock'>{}</div>", Markdown(s))); Ok(true) } None => Ok(false) } } try!(write!(w, "<div class='methods'>")); for meth in i.methods.iter() { if try!(docmeth(w, meth)) { continue } // No documentation? Attempt to slurp in the trait's documentation let trait_id = match trait_id { None => continue, Some(id) => id, }; try!(local_data::get(cache_key, |cache| { let cache = cache.unwrap(); match cache.traits.find(&trait_id) { Some(t) => { let name = meth.name.clone(); match t.methods.iter().find(|t| t.item().name == name) { Some(method) => { match method.item().doc_value() { Some(s) => { try!(write!(w, "<div class='docblock'>{}</div>", Markdown(s))); } None => {} } } None => {} } } None => {} } Ok(()) })) } // If we've implemented a trait, then also emit documentation for all // default methods which weren't overridden in the implementation block. match trait_id { None => {} Some(id) => { try!(local_data::get(cache_key, |cache| { let cache = cache.unwrap(); match cache.traits.find(&id) { Some(t) => { for method in t.methods.iter() { let n = method.item().name.clone(); match i.methods.iter().find(|m| m.name == n) { Some(..) => continue, None => {} } try!(docmeth(w, method.item())); } } None => {} } Ok(()) })) } } try!(write!(w, "</div>")); Ok(()) } fn item_typedef(w: &mut Writer, it: &clean::Item, t: &clean::Typedef) -> fmt::Result { try!(write!(w, "<pre class='rust typedef'>type {}{} = {};</pre>", it.name.get_ref().as_slice(), t.generics, t.type_)); document(w, it) } impl<'a> fmt::Show for Sidebar<'a> { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { let cx = self.cx; let it = self.item; try!(write!(fmt.buf, "<p class='location'>")); let len = cx.current.len() - if it.is_mod() {1} else {0}; for (i, name) in cx.current.iter().take(len).enumerate() { if i > 0 { try!(write!(fmt.buf, "&\\#8203;::")); } try!(write!(fmt.buf, "<a href='{}index.html'>{}</a>", cx.root_path .as_slice() .slice_to((cx.current.len() - i - 1) * 3), *name)); } try!(write!(fmt.buf, "</p>")); fn block(w: &mut Writer, short: &str, longty: &str, cur: &clean::Item, cx: &Context) -> fmt::Result { let items = match cx.sidebar.find_equiv(&short) { Some(items) => items.as_slice(), None => return Ok(()) }; try!(write!(w, "<div class='block {}'><h2>{}</h2>", short, longty)); for item in items.iter() { let curty = shortty(cur).to_static_str(); let class = if cur.name.get_ref() == item && short == curty { "current" } else { "" }; try!(write!(w, "<a class='{ty} {class}' href='{curty, select, mod{../} other{} }{tysel, select, mod{{name}/index.html} other{#.{name}.html} }'>{name}</a><br/>", ty = short, tysel = short, class = class, curty = curty, name = item.as_slice())); } try!(write!(w, "</div>")); Ok(()) } try!(block(fmt.buf, "mod", "Modules", it, cx)); try!(block(fmt.buf, "struct", "Structs", it, cx)); try!(block(fmt.buf, "enum", "Enums", it, cx)); try!(block(fmt.buf, "trait", "Traits", it, cx)); try!(block(fmt.buf, "fn", "Functions", it, cx)); Ok(()) } } fn build_sidebar(m: &clean::Module) -> HashMap<~str, Vec<~str> > { let mut map = HashMap::new(); for item in m.items.iter() { let short = shortty(item).to_static_str(); let myname = match item.name { None => continue, Some(ref s) => s.to_owned(), }; let v = map.find_or_insert_with(short.to_owned(), |_| Vec::new()); v.push(myname); } for (_, items) in map.mut_iter() { items.as_mut_slice().sort(); } return map; } impl<'a> fmt::Show for Source<'a> { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { let Source(s) = *self; let lines = s.lines().len(); let mut cols = 0; let mut tmp = lines; while tmp > 0 { cols += 1; tmp /= 10; } try!(write!(fmt.buf, "<pre class='line-numbers'>")); for i in range(1, lines + 1) { try!(write!(fmt.buf, "<span id='{0:u}'>{0:1$u}</span>\n", i, cols)); } try!(write!(fmt.buf, "</pre>")); try!(write!(fmt.buf, "{}", highlight::highlight(s.as_slice(), None))); Ok(()) } } fn item_macro(w: &mut Writer, it: &clean::Item, t: &clean::Macro) -> fmt::Result { try!(w.write_str(highlight::highlight(t.source, Some("macro")))); document(w, it) } rustdoc: Make static initalizers prettier Previously, if an initializer took multiple lines or was just large in general, it was pretty poorly rendered [1] [2]. This alters the logic to just link back to the source for any multi-line static, with a placeholder of "[definition]". This should make reading statics a little easier on the eyes. All single-line statics are still inlined in the documentation. Closes #13198 [1] - http://static.rust-lang.org/doc/master/sync/mutex/index.html#statics [2] - http://static.rust-lang.org/doc/master/std/sync/atomics/index.html#statics // Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. //! Rustdoc's HTML Rendering module //! //! This modules contains the bulk of the logic necessary for rendering a //! rustdoc `clean::Crate` instance to a set of static HTML pages. This //! rendering process is largely driven by the `format!` syntax extension to //! perform all I/O into files and streams. //! //! The rendering process is largely driven by the `Context` and `Cache` //! structures. The cache is pre-populated by crawling the crate in question, //! and then it is shared among the various rendering tasks. The cache is meant //! to be a fairly large structure not implementing `Clone` (because it's shared //! among tasks). The context, however, should be a lightweight structure. This //! is cloned per-task and contains information about what is currently being //! rendered. //! //! In order to speed up rendering (mostly because of markdown rendering), the //! rendering process has been parallelized. This parallelization is only //! exposed through the `crate` method on the context, and then also from the //! fact that the shared cache is stored in TLS (and must be accessed as such). //! //! In addition to rendering the crate itself, this module is also responsible //! for creating the corresponding search index and source file renderings. //! These tasks are not parallelized (they haven't been a bottleneck yet), and //! both occur before the crate is rendered. use collections::{HashMap, HashSet}; use std::fmt; use std::io::{fs, File, BufferedWriter, MemWriter, BufferedReader}; use std::io; use std::local_data; use std::str; use std::strbuf::StrBuf; use sync::Arc; use serialize::json::ToJson; use syntax::ast; use syntax::attr; use syntax::parse::token::InternedString; use rustc::util::nodemap::NodeSet; use clean; use doctree; use fold::DocFolder; use html::item_type; use html::item_type::{ItemType, shortty}; use html::format::{VisSpace, Method, FnStyleSpace}; use html::layout; use html::markdown; use html::markdown::Markdown; use html::highlight; /// Major driving force in all rustdoc rendering. This contains information /// about where in the tree-like hierarchy rendering is occurring and controls /// how the current page is being rendered. /// /// It is intended that this context is a lightweight object which can be fairly /// easily cloned because it is cloned per work-job (about once per item in the /// rustdoc tree). #[deriving(Clone)] pub struct Context { /// Current hierarchy of components leading down to what's currently being /// rendered pub current: Vec<~str> , /// String representation of how to get back to the root path of the 'doc/' /// folder in terms of a relative URL. pub root_path: StrBuf, /// The current destination folder of where HTML artifacts should be placed. /// This changes as the context descends into the module hierarchy. pub dst: Path, /// This describes the layout of each page, and is not modified after /// creation of the context (contains info like the favicon) pub layout: layout::Layout, /// This map is a list of what should be displayed on the sidebar of the /// current page. The key is the section header (traits, modules, /// functions), and the value is the list of containers belonging to this /// header. This map will change depending on the surrounding context of the /// page. pub sidebar: HashMap<~str, Vec<~str> >, /// This flag indicates whether [src] links should be generated or not. If /// the source files are present in the html rendering, then this will be /// `true`. pub include_sources: bool, } /// Indicates where an external crate can be found. pub enum ExternalLocation { /// Remote URL root of the external crate Remote(~str), /// This external crate can be found in the local doc/ folder Local, /// The external crate could not be found. Unknown, } /// Different ways an implementor of a trait can be rendered. pub enum Implementor { /// Paths are displayed specially by omitting the `impl XX for` cruft PathType(clean::Type), /// This is the generic representation of a trait implementor, used for /// primitive types and otherwise non-path types. OtherType(clean::Generics, /* trait */ clean::Type, /* for */ clean::Type), } /// This cache is used to store information about the `clean::Crate` being /// rendered in order to provide more useful documentation. This contains /// information like all implementors of a trait, all traits a type implements, /// documentation for all known traits, etc. /// /// This structure purposefully does not implement `Clone` because it's intended /// to be a fairly large and expensive structure to clone. Instead this adheres /// to `Send` so it may be stored in a `Arc` instance and shared among the various /// rendering tasks. pub struct Cache { /// Mapping of typaram ids to the name of the type parameter. This is used /// when pretty-printing a type (so pretty printing doesn't have to /// painfully maintain a context like this) pub typarams: HashMap<ast::NodeId, ~str>, /// Maps a type id to all known implementations for that type. This is only /// recognized for intra-crate `ResolvedPath` types, and is used to print /// out extra documentation on the page of an enum/struct. /// /// The values of the map are a list of implementations and documentation /// found on that implementation. pub impls: HashMap<ast::NodeId, Vec<(clean::Impl, Option<~str>)> >, /// Maintains a mapping of local crate node ids to the fully qualified name /// and "short type description" of that node. This is used when generating /// URLs when a type is being linked to. External paths are not located in /// this map because the `External` type itself has all the information /// necessary. pub paths: HashMap<ast::NodeId, (Vec<~str> , ItemType)>, /// This map contains information about all known traits of this crate. /// Implementations of a crate should inherit the documentation of the /// parent trait if no extra documentation is specified, and default methods /// should show up in documentation about trait implementations. pub traits: HashMap<ast::NodeId, clean::Trait>, /// When rendering traits, it's often useful to be able to list all /// implementors of the trait, and this mapping is exactly, that: a mapping /// of trait ids to the list of known implementors of the trait pub implementors: HashMap<ast::NodeId, Vec<Implementor> >, /// Cache of where external crate documentation can be found. pub extern_locations: HashMap<ast::CrateNum, ExternalLocation>, // Private fields only used when initially crawling a crate to build a cache stack: Vec<~str> , parent_stack: Vec<ast::NodeId> , search_index: Vec<IndexItem> , privmod: bool, public_items: NodeSet, // In rare case where a structure is defined in one module but implemented // in another, if the implementing module is parsed before defining module, // then the fully qualified name of the structure isn't presented in `paths` // yet when its implementation methods are being indexed. Caches such methods // and their parent id here and indexes them at the end of crate parsing. orphan_methods: Vec<(ast::NodeId, clean::Item)>, } /// Helper struct to render all source code to HTML pages struct SourceCollector<'a> { cx: &'a mut Context, /// Processed source-file paths seen: HashSet<~str>, /// Root destination to place all HTML output into dst: Path, } /// Wrapper struct to render the source code of a file. This will do things like /// adding line numbers to the left-hand side. struct Source<'a>(&'a str); // Helper structs for rendering items/sidebars and carrying along contextual // information struct Item<'a> { cx: &'a Context, item: &'a clean::Item, } struct Sidebar<'a> { cx: &'a Context, item: &'a clean::Item, } /// Struct representing one entry in the JS search index. These are all emitted /// by hand to a large JS file at the end of cache-creation. struct IndexItem { ty: ItemType, name: ~str, path: ~str, desc: ~str, parent: Option<ast::NodeId>, } // TLS keys used to carry information around during rendering. local_data_key!(pub cache_key: Arc<Cache>) local_data_key!(pub current_location_key: Vec<~str> ) /// Generates the documentation for `crate` into the directory `dst` pub fn run(mut krate: clean::Crate, dst: Path) -> io::IoResult<()> { let mut cx = Context { dst: dst, current: Vec::new(), root_path: StrBuf::new(), sidebar: HashMap::new(), layout: layout::Layout { logo: "".to_owned(), favicon: "".to_owned(), krate: krate.name.clone(), }, include_sources: true, }; try!(mkdir(&cx.dst)); match krate.module.as_ref().map(|m| m.doc_list().unwrap_or(&[])) { Some(attrs) => { for attr in attrs.iter() { match *attr { clean::NameValue(ref x, ref s) if "html_favicon_url" == *x => { cx.layout.favicon = s.to_owned(); } clean::NameValue(ref x, ref s) if "html_logo_url" == *x => { cx.layout.logo = s.to_owned(); } clean::Word(ref x) if "html_no_source" == *x => { cx.include_sources = false; } _ => {} } } } None => {} } // Crawl the crate to build various caches used for the output let mut cache = local_data::get(::analysiskey, |analysis| { let public_items = analysis.map(|a| a.public_items.clone()); let public_items = public_items.unwrap_or(NodeSet::new()); Cache { impls: HashMap::new(), typarams: HashMap::new(), paths: HashMap::new(), traits: HashMap::new(), implementors: HashMap::new(), stack: Vec::new(), parent_stack: Vec::new(), search_index: Vec::new(), extern_locations: HashMap::new(), privmod: false, public_items: public_items, orphan_methods: Vec::new(), } }); cache.stack.push(krate.name.clone()); krate = cache.fold_crate(krate); let mut nodeid_to_pathid = HashMap::new(); let mut pathid_to_nodeid = Vec::new(); { let Cache { search_index: ref mut index, orphan_methods: ref meths, paths: ref mut paths, ..} = cache; // Attach all orphan methods to the type's definition if the type // has since been learned. for &(ref pid, ref item) in meths.iter() { match paths.find(pid) { Some(&(ref fqp, _)) => { index.push(IndexItem { ty: shortty(item), name: item.name.clone().unwrap(), path: fqp.slice_to(fqp.len() - 1).connect("::"), desc: shorter(item.doc_value()).to_owned(), parent: Some(*pid), }); }, None => {} } }; // Reduce `NodeId` in paths into smaller sequential numbers, // and prune the paths that do not appear in the index. for item in index.iter() { match item.parent { Some(nodeid) => { if !nodeid_to_pathid.contains_key(&nodeid) { let pathid = pathid_to_nodeid.len(); nodeid_to_pathid.insert(nodeid, pathid); pathid_to_nodeid.push(nodeid); } } None => {} } } assert_eq!(nodeid_to_pathid.len(), pathid_to_nodeid.len()); } // Publish the search index let index = { let mut w = MemWriter::new(); try!(write!(&mut w, r#"searchIndex['{}'] = \{"items":["#, krate.name)); let mut lastpath = "".to_owned(); for (i, item) in cache.search_index.iter().enumerate() { // Omit the path if it is same to that of the prior item. let path; if lastpath == item.path { path = ""; } else { lastpath = item.path.clone(); path = item.path.as_slice(); }; if i > 0 { try!(write!(&mut w, ",")); } try!(write!(&mut w, r#"[{:u},"{}","{}",{}"#, item.ty, item.name, path, item.desc.to_json().to_str())); match item.parent { Some(nodeid) => { let pathid = *nodeid_to_pathid.find(&nodeid).unwrap(); try!(write!(&mut w, ",{}", pathid)); } None => {} } try!(write!(&mut w, "]")); } try!(write!(&mut w, r#"],"paths":["#)); for (i, &nodeid) in pathid_to_nodeid.iter().enumerate() { let &(ref fqp, short) = cache.paths.find(&nodeid).unwrap(); if i > 0 { try!(write!(&mut w, ",")); } try!(write!(&mut w, r#"[{:u},"{}"]"#, short, *fqp.last().unwrap())); } try!(write!(&mut w, r"]\};")); str::from_utf8(w.unwrap().as_slice()).unwrap().to_owned() }; // Write out the shared files. Note that these are shared among all rustdoc // docs placed in the output directory, so this needs to be a synchronized // operation with respect to all other rustdocs running around. { try!(mkdir(&cx.dst)); let _lock = ::flock::Lock::new(&cx.dst.join(".lock")); // Add all the static files. These may already exist, but we just // overwrite them anyway to make sure that they're fresh and up-to-date. try!(write(cx.dst.join("jquery.js"), include_bin!("static/jquery-2.1.0.min.js"))); try!(write(cx.dst.join("main.js"), include_bin!("static/main.js"))); try!(write(cx.dst.join("main.css"), include_bin!("static/main.css"))); try!(write(cx.dst.join("normalize.css"), include_bin!("static/normalize.css"))); try!(write(cx.dst.join("FiraSans-Regular.woff"), include_bin!("static/FiraSans-Regular.woff"))); try!(write(cx.dst.join("FiraSans-Medium.woff"), include_bin!("static/FiraSans-Medium.woff"))); try!(write(cx.dst.join("Heuristica-Regular.woff"), include_bin!("static/Heuristica-Regular.woff"))); try!(write(cx.dst.join("Heuristica-Italic.woff"), include_bin!("static/Heuristica-Italic.woff"))); try!(write(cx.dst.join("Heuristica-Bold.woff"), include_bin!("static/Heuristica-Bold.woff"))); // Update the search index let dst = cx.dst.join("search-index.js"); let mut all_indexes = Vec::new(); all_indexes.push(index); if dst.exists() { for line in BufferedReader::new(File::open(&dst)).lines() { let line = try!(line); if !line.starts_with("searchIndex") { continue } if line.starts_with(format!("searchIndex['{}']", krate.name)) { continue } all_indexes.push(line); } } let mut w = try!(File::create(&dst)); try!(writeln!(&mut w, r"var searchIndex = \{\};")); for index in all_indexes.iter() { try!(writeln!(&mut w, "{}", *index)); } try!(writeln!(&mut w, "initSearch(searchIndex);")); } // Render all source files (this may turn into a giant no-op) { info!("emitting source files"); let dst = cx.dst.join("src"); try!(mkdir(&dst)); let dst = dst.join(krate.name.as_slice()); try!(mkdir(&dst)); let mut folder = SourceCollector { dst: dst, seen: HashSet::new(), cx: &mut cx, }; krate = folder.fold_crate(krate); } for &(n, ref e) in krate.externs.iter() { cache.extern_locations.insert(n, extern_location(e, &cx.dst)); } // And finally render the whole crate's documentation cx.krate(krate, cache) } /// Writes the entire contents of a string to a destination, not attempting to /// catch any errors. fn write(dst: Path, contents: &[u8]) -> io::IoResult<()> { File::create(&dst).write(contents) } /// Makes a directory on the filesystem, failing the task if an error occurs and /// skipping if the directory already exists. fn mkdir(path: &Path) -> io::IoResult<()> { if !path.exists() { fs::mkdir(path, io::UserRWX) } else { Ok(()) } } /// Takes a path to a source file and cleans the path to it. This canonicalizes /// things like ".." to components which preserve the "top down" hierarchy of a /// static HTML tree. // FIXME (#9639): The closure should deal with &[u8] instead of &str fn clean_srcpath(src: &[u8], f: |&str|) { let p = Path::new(src); if p.as_vec() != bytes!(".") { for c in p.str_components().map(|x|x.unwrap()) { if ".." == c { f("up"); } else { f(c.as_slice()) } } } } /// Attempts to find where an external crate is located, given that we're /// rendering in to the specified source destination. fn extern_location(e: &clean::ExternalCrate, dst: &Path) -> ExternalLocation { // See if there's documentation generated into the local directory let local_location = dst.join(e.name.as_slice()); if local_location.is_dir() { return Local; } // Failing that, see if there's an attribute specifying where to find this // external crate for attr in e.attrs.iter() { match *attr { clean::List(ref x, ref list) if "doc" == *x => { for attr in list.iter() { match *attr { clean::NameValue(ref x, ref s) if "html_root_url" == *x => { if s.ends_with("/") { return Remote(s.to_owned()); } return Remote(*s + "/"); } _ => {} } } } _ => {} } } // Well, at least we tried. return Unknown; } impl<'a> DocFolder for SourceCollector<'a> { fn fold_item(&mut self, item: clean::Item) -> Option<clean::Item> { // If we're including source files, and we haven't seen this file yet, // then we need to render it out to the filesystem if self.cx.include_sources && !self.seen.contains(&item.source.filename) { // If it turns out that we couldn't read this file, then we probably // can't read any of the files (generating html output from json or // something like that), so just don't include sources for the // entire crate. The other option is maintaining this mapping on a // per-file basis, but that's probably not worth it... self.cx.include_sources = match self.emit_source(item.source.filename) { Ok(()) => true, Err(e) => { println!("warning: source code was requested to be rendered, \ but processing `{}` had an error: {}", item.source.filename, e); println!(" skipping rendering of source code"); false } }; self.seen.insert(item.source.filename.clone()); } self.fold_item_recur(item) } } impl<'a> SourceCollector<'a> { /// Renders the given filename into its corresponding HTML source file. fn emit_source(&mut self, filename: &str) -> io::IoResult<()> { let p = Path::new(filename); // If we couldn't open this file, then just returns because it // probably means that it's some standard library macro thing and we // can't have the source to it anyway. let contents = match File::open(&p).read_to_end() { Ok(r) => r, // macros from other libraries get special filenames which we can // safely ignore Err(..) if filename.starts_with("<") && filename.ends_with("macros>") => return Ok(()), Err(e) => return Err(e) }; let contents = str::from_utf8(contents.as_slice()).unwrap(); // Remove the utf-8 BOM if any let contents = if contents.starts_with("\ufeff") { contents.as_slice().slice_from(3) } else { contents.as_slice() }; // Create the intermediate directories let mut cur = self.dst.clone(); let mut root_path = StrBuf::from_str("../../"); clean_srcpath(p.dirname(), |component| { cur.push(component); mkdir(&cur).unwrap(); root_path.push_str("../"); }); cur.push(p.filename().expect("source has no filename") + bytes!(".html")); let mut w = BufferedWriter::new(try!(File::create(&cur))); let title = format!("{} -- source", cur.filename_display()); let page = layout::Page { title: title, ty: "source", root_path: root_path.as_slice(), }; try!(layout::render(&mut w as &mut Writer, &self.cx.layout, &page, &(""), &Source(contents))); try!(w.flush()); return Ok(()); } } impl DocFolder for Cache { fn fold_item(&mut self, item: clean::Item) -> Option<clean::Item> { // If this is a private module, we don't want it in the search index. let orig_privmod = match item.inner { clean::ModuleItem(..) => { let prev = self.privmod; self.privmod = prev || item.visibility != Some(ast::Public); prev } _ => self.privmod, }; // Register any generics to their corresponding string. This is used // when pretty-printing types match item.inner { clean::StructItem(ref s) => self.generics(&s.generics), clean::EnumItem(ref e) => self.generics(&e.generics), clean::FunctionItem(ref f) => self.generics(&f.generics), clean::TypedefItem(ref t) => self.generics(&t.generics), clean::TraitItem(ref t) => self.generics(&t.generics), clean::ImplItem(ref i) => self.generics(&i.generics), clean::TyMethodItem(ref i) => self.generics(&i.generics), clean::MethodItem(ref i) => self.generics(&i.generics), clean::ForeignFunctionItem(ref f) => self.generics(&f.generics), _ => {} } // Propagate a trait methods' documentation to all implementors of the // trait match item.inner { clean::TraitItem(ref t) => { self.traits.insert(item.id, t.clone()); } _ => {} } // Collect all the implementors of traits. match item.inner { clean::ImplItem(ref i) => { match i.trait_ { Some(clean::ResolvedPath{ id, .. }) => { let v = self.implementors.find_or_insert_with(id, |_|{ Vec::new() }); match i.for_ { clean::ResolvedPath{..} => { v.unshift(PathType(i.for_.clone())); } _ => { v.push(OtherType(i.generics.clone(), i.trait_.get_ref().clone(), i.for_.clone())); } } } Some(..) | None => {} } } _ => {} } // Index this method for searching later on match item.name { Some(ref s) => { let parent = match item.inner { clean::TyMethodItem(..) | clean::StructFieldItem(..) | clean::VariantItem(..) => { (Some(*self.parent_stack.last().unwrap()), Some(self.stack.slice_to(self.stack.len() - 1))) } clean::MethodItem(..) => { if self.parent_stack.len() == 0 { (None, None) } else { let last = self.parent_stack.last().unwrap(); let path = match self.paths.find(last) { Some(&(_, item_type::Trait)) => Some(self.stack.slice_to(self.stack.len() - 1)), // The current stack not necessarily has correlation for // where the type was defined. On the other hand, // `paths` always has the right information if present. Some(&(ref fqp, item_type::Struct)) | Some(&(ref fqp, item_type::Enum)) => Some(fqp.slice_to(fqp.len() - 1)), Some(..) => Some(self.stack.as_slice()), None => None }; (Some(*last), path) } } _ => (None, Some(self.stack.as_slice())) }; match parent { (parent, Some(path)) if !self.privmod => { self.search_index.push(IndexItem { ty: shortty(&item), name: s.to_owned(), path: path.connect("::"), desc: shorter(item.doc_value()).to_owned(), parent: parent, }); } (Some(parent), None) if !self.privmod => { // We have a parent, but we don't know where they're // defined yet. Wait for later to index this item. self.orphan_methods.push((parent, item.clone())) } _ => {} } } None => {} } // Keep track of the fully qualified path for this item. let pushed = if item.name.is_some() { let n = item.name.get_ref(); if n.len() > 0 { self.stack.push(n.to_owned()); true } else { false } } else { false }; match item.inner { clean::StructItem(..) | clean::EnumItem(..) | clean::TypedefItem(..) | clean::TraitItem(..) | clean::FunctionItem(..) | clean::ModuleItem(..) | clean::ForeignFunctionItem(..) => { // Reexported items mean that the same id can show up twice in // the rustdoc ast that we're looking at. We know, however, that // a reexported item doesn't show up in the `public_items` map, // so we can skip inserting into the paths map if there was // already an entry present and we're not a public item. if !self.paths.contains_key(&item.id) || self.public_items.contains(&item.id) { self.paths.insert(item.id, (self.stack.clone(), shortty(&item))); } } // link variants to their parent enum because pages aren't emitted // for each variant clean::VariantItem(..) => { let mut stack = self.stack.clone(); stack.pop(); self.paths.insert(item.id, (stack, item_type::Enum)); } _ => {} } // Maintain the parent stack let parent_pushed = match item.inner { clean::TraitItem(..) | clean::EnumItem(..) | clean::StructItem(..) => { self.parent_stack.push(item.id); true } clean::ImplItem(ref i) => { match i.for_ { clean::ResolvedPath{ id, .. } => { self.parent_stack.push(id); true } _ => false } } _ => false }; // Once we've recursively found all the generics, then hoard off all the // implementations elsewhere let ret = match self.fold_item_recur(item) { Some(item) => { match item { clean::Item{ attrs, inner: clean::ImplItem(i), .. } => { match i.for_ { clean::ResolvedPath { id, .. } => { let v = self.impls.find_or_insert_with(id, |_| { Vec::new() }); // extract relevant documentation for this impl match attrs.move_iter().find(|a| { match *a { clean::NameValue(ref x, _) if "doc" == *x => true, _ => false } }) { Some(clean::NameValue(_, dox)) => { v.push((i, Some(dox))); } Some(..) | None => { v.push((i, None)); } } } _ => {} } None } // Private modules may survive the strip-private pass if // they contain impls for public types, but those will get // stripped here clean::Item { inner: clean::ModuleItem(ref m), visibility, .. } if (m.items.len() == 0 && item.doc_value().is_none()) || visibility != Some(ast::Public) => None, i => Some(i), } } i => i, }; if pushed { self.stack.pop().unwrap(); } if parent_pushed { self.parent_stack.pop().unwrap(); } self.privmod = orig_privmod; return ret; } } impl<'a> Cache { fn generics(&mut self, generics: &clean::Generics) { for typ in generics.type_params.iter() { self.typarams.insert(typ.id, typ.name.clone()); } } } impl Context { /// Recurse in the directory structure and change the "root path" to make /// sure it always points to the top (relatively) fn recurse<T>(&mut self, s: ~str, f: |&mut Context| -> T) -> T { if s.len() == 0 { fail!("what {:?}", self); } let prev = self.dst.clone(); self.dst.push(s.as_slice()); self.root_path.push_str("../"); self.current.push(s); info!("Recursing into {}", self.dst.display()); mkdir(&self.dst).unwrap(); let ret = f(self); info!("Recursed; leaving {}", self.dst.display()); // Go back to where we were at self.dst = prev; let len = self.root_path.len(); self.root_path.truncate(len - 3); self.current.pop().unwrap(); return ret; } /// Main method for rendering a crate. /// /// This currently isn't parallelized, but it'd be pretty easy to add /// parallelization to this function. fn krate(self, mut krate: clean::Crate, cache: Cache) -> io::IoResult<()> { let mut item = match krate.module.take() { Some(i) => i, None => return Ok(()) }; item.name = Some(krate.name); // using a rwarc makes this parallelizable in the future local_data::set(cache_key, Arc::new(cache)); let mut work = vec!((self, item)); loop { match work.pop() { Some((mut cx, item)) => try!(cx.item(item, |cx, item| { work.push((cx.clone(), item)); })), None => break, } } Ok(()) } /// Non-parellelized version of rendering an item. This will take the input /// item, render its contents, and then invoke the specified closure with /// all sub-items which need to be rendered. /// /// The rendering driver uses this closure to queue up more work. fn item(&mut self, item: clean::Item, f: |&mut Context, clean::Item|) -> io::IoResult<()> { fn render(w: io::File, cx: &mut Context, it: &clean::Item, pushname: bool) -> io::IoResult<()> { info!("Rendering an item to {}", w.path().display()); // A little unfortunate that this is done like this, but it sure // does make formatting *a lot* nicer. local_data::set(current_location_key, cx.current.clone()); let mut title = StrBuf::from_str(cx.current.connect("::")); if pushname { if title.len() > 0 { title.push_str("::"); } title.push_str(*it.name.get_ref()); } title.push_str(" - Rust"); let page = layout::Page { ty: shortty(it).to_static_str(), root_path: cx.root_path.as_slice(), title: title.as_slice(), }; markdown::reset_headers(); // We have a huge number of calls to write, so try to alleviate some // of the pain by using a buffered writer instead of invoking the // write sycall all the time. let mut writer = BufferedWriter::new(w); try!(layout::render(&mut writer as &mut Writer, &cx.layout, &page, &Sidebar{ cx: cx, item: it }, &Item{ cx: cx, item: it })); writer.flush() } match item.inner { // modules are special because they add a namespace. We also need to // recurse into the items of the module as well. clean::ModuleItem(..) => { let name = item.name.get_ref().to_owned(); let mut item = Some(item); self.recurse(name, |this| { let item = item.take_unwrap(); let dst = this.dst.join("index.html"); let dst = try!(File::create(&dst)); try!(render(dst, this, &item, false)); let m = match item.inner { clean::ModuleItem(m) => m, _ => unreachable!() }; this.sidebar = build_sidebar(&m); for item in m.items.move_iter() { f(this,item); } Ok(()) }) } // Things which don't have names (like impls) don't get special // pages dedicated to them. _ if item.name.is_some() => { let dst = self.dst.join(item_path(&item)); let dst = try!(File::create(&dst)); render(dst, self, &item, true) } _ => Ok(()) } } } impl<'a> Item<'a> { fn ismodule(&self) -> bool { match self.item.inner { clean::ModuleItem(..) => true, _ => false } } fn link(&self) -> ~str { let mut path = Vec::new(); clean_srcpath(self.item.source.filename.as_bytes(), |component| { path.push(component.to_owned()); }); let href = if self.item.source.loline == self.item.source.hiline { format!("{}", self.item.source.loline) } else { format!("{}-{}", self.item.source.loline, self.item.source.hiline) }; format!("{root}src/{krate}/{path}.html\\#{href}", root = self.cx.root_path, krate = self.cx.layout.krate, path = path.connect("/"), href = href) } } impl<'a> fmt::Show for Item<'a> { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { // Write the breadcrumb trail header for the top try!(write!(fmt.buf, "\n<h1 class='fqn'>")); match self.item.inner { clean::ModuleItem(ref m) => if m.is_crate { try!(write!(fmt.buf, "Crate ")); } else { try!(write!(fmt.buf, "Module ")); }, clean::FunctionItem(..) => try!(write!(fmt.buf, "Function ")), clean::TraitItem(..) => try!(write!(fmt.buf, "Trait ")), clean::StructItem(..) => try!(write!(fmt.buf, "Struct ")), clean::EnumItem(..) => try!(write!(fmt.buf, "Enum ")), _ => {} } let cur = self.cx.current.as_slice(); let amt = if self.ismodule() { cur.len() - 1 } else { cur.len() }; for (i, component) in cur.iter().enumerate().take(amt) { let mut trail = StrBuf::new(); for _ in range(0, cur.len() - i - 1) { trail.push_str("../"); } try!(write!(fmt.buf, "<a href='{}index.html'>{}</a>::", trail, component.as_slice())); } try!(write!(fmt.buf, "<a class='{}' href=''>{}</a>", shortty(self.item), self.item.name.get_ref().as_slice())); // Write stability attributes match attr::find_stability(self.item.attrs.iter()) { Some(ref stability) => { try!(write!(fmt.buf, "<a class='stability {lvl}' title='{reason}'>{lvl}</a>", lvl = stability.level.to_str(), reason = match stability.text { Some(ref s) => (*s).clone(), None => InternedString::new(""), })); } None => {} } // Write `src` tag if self.cx.include_sources { try!(write!(fmt.buf, "<a class='source' href='{}'>[src]</a>", self.link())); } try!(write!(fmt.buf, "</h1>\n")); match self.item.inner { clean::ModuleItem(ref m) => { item_module(fmt.buf, self.cx, self.item, m.items.as_slice()) } clean::FunctionItem(ref f) | clean::ForeignFunctionItem(ref f) => item_function(fmt.buf, self.item, f), clean::TraitItem(ref t) => item_trait(fmt.buf, self.item, t), clean::StructItem(ref s) => item_struct(fmt.buf, self.item, s), clean::EnumItem(ref e) => item_enum(fmt.buf, self.item, e), clean::TypedefItem(ref t) => item_typedef(fmt.buf, self.item, t), clean::MacroItem(ref m) => item_macro(fmt.buf, self.item, m), _ => Ok(()) } } } fn item_path(item: &clean::Item) -> ~str { match item.inner { clean::ModuleItem(..) => *item.name.get_ref() + "/index.html", _ => shortty(item).to_static_str() + "." + *item.name.get_ref() + ".html" } } fn full_path(cx: &Context, item: &clean::Item) -> ~str { let mut s = StrBuf::from_str(cx.current.connect("::")); s.push_str("::"); s.push_str(item.name.get_ref().as_slice()); return s.into_owned(); } fn blank<'a>(s: Option<&'a str>) -> &'a str { match s { Some(s) => s, None => "" } } fn shorter<'a>(s: Option<&'a str>) -> &'a str { match s { Some(s) => match s.find_str("\n\n") { Some(pos) => s.slice_to(pos), None => s, }, None => "" } } fn document(w: &mut Writer, item: &clean::Item) -> fmt::Result { match item.doc_value() { Some(s) => { try!(write!(w, "<div class='docblock'>{}</div>", Markdown(s))); } None => {} } Ok(()) } fn item_module(w: &mut Writer, cx: &Context, item: &clean::Item, items: &[clean::Item]) -> fmt::Result { try!(document(w, item)); debug!("{:?}", items); let mut indices = Vec::from_fn(items.len(), |i| i); fn cmp(i1: &clean::Item, i2: &clean::Item, idx1: uint, idx2: uint) -> Ordering { if shortty(i1) == shortty(i2) { return i1.name.cmp(&i2.name); } match (&i1.inner, &i2.inner) { (&clean::ViewItemItem(ref a), &clean::ViewItemItem(ref b)) => { match (&a.inner, &b.inner) { (&clean::ExternCrate(..), _) => Less, (_, &clean::ExternCrate(..)) => Greater, _ => idx1.cmp(&idx2), } } (&clean::ViewItemItem(..), _) => Less, (_, &clean::ViewItemItem(..)) => Greater, (&clean::ModuleItem(..), _) => Less, (_, &clean::ModuleItem(..)) => Greater, (&clean::MacroItem(..), _) => Less, (_, &clean::MacroItem(..)) => Greater, (&clean::StructItem(..), _) => Less, (_, &clean::StructItem(..)) => Greater, (&clean::EnumItem(..), _) => Less, (_, &clean::EnumItem(..)) => Greater, (&clean::StaticItem(..), _) => Less, (_, &clean::StaticItem(..)) => Greater, (&clean::ForeignFunctionItem(..), _) => Less, (_, &clean::ForeignFunctionItem(..)) => Greater, (&clean::ForeignStaticItem(..), _) => Less, (_, &clean::ForeignStaticItem(..)) => Greater, (&clean::TraitItem(..), _) => Less, (_, &clean::TraitItem(..)) => Greater, (&clean::FunctionItem(..), _) => Less, (_, &clean::FunctionItem(..)) => Greater, (&clean::TypedefItem(..), _) => Less, (_, &clean::TypedefItem(..)) => Greater, _ => idx1.cmp(&idx2), } } debug!("{:?}", indices); indices.sort_by(|&i1, &i2| cmp(&items[i1], &items[i2], i1, i2)); debug!("{:?}", indices); let mut curty = None; for &idx in indices.iter() { let myitem = &items[idx]; let myty = Some(shortty(myitem)); if myty != curty { if curty.is_some() { try!(write!(w, "</table>")); } curty = myty; let (short, name) = match myitem.inner { clean::ModuleItem(..) => ("modules", "Modules"), clean::StructItem(..) => ("structs", "Structs"), clean::EnumItem(..) => ("enums", "Enums"), clean::FunctionItem(..) => ("functions", "Functions"), clean::TypedefItem(..) => ("types", "Type Definitions"), clean::StaticItem(..) => ("statics", "Statics"), clean::TraitItem(..) => ("traits", "Traits"), clean::ImplItem(..) => ("impls", "Implementations"), clean::ViewItemItem(..) => ("reexports", "Reexports"), clean::TyMethodItem(..) => ("tymethods", "Type Methods"), clean::MethodItem(..) => ("methods", "Methods"), clean::StructFieldItem(..) => ("fields", "Struct Fields"), clean::VariantItem(..) => ("variants", "Variants"), clean::ForeignFunctionItem(..) => ("ffi-fns", "Foreign Functions"), clean::ForeignStaticItem(..) => ("ffi-statics", "Foreign Statics"), clean::MacroItem(..) => ("macros", "Macros"), }; try!(write!(w, "<h2 id='{id}' class='section-header'>\ <a href=\"\\#{id}\">{name}</a></h2>\n<table>", id = short, name = name)); } match myitem.inner { clean::StaticItem(ref s) | clean::ForeignStaticItem(ref s) => { struct Initializer<'a>(&'a str, Item<'a>); impl<'a> fmt::Show for Initializer<'a> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { let Initializer(s, item) = *self; if s.len() == 0 { return Ok(()); } try!(write!(f.buf, "<code> = </code>")); if s.contains("\n") { write!(f.buf, "<a href='{}'>[definition]</a>", item.link()) } else { write!(f.buf, "<code>{}</code>", s.as_slice()) } } } try!(write!(w, " <tr> <td><code>{}static {}: {}</code>{}</td> <td class='docblock'>{}&nbsp;</td> </tr> ", VisSpace(myitem.visibility), *myitem.name.get_ref(), s.type_, Initializer(s.expr, Item { cx: cx, item: myitem }), Markdown(blank(myitem.doc_value())))); } clean::ViewItemItem(ref item) => { match item.inner { clean::ExternCrate(ref name, ref src, _) => { try!(write!(w, "<tr><td><code>extern crate {}", name.as_slice())); match *src { Some(ref src) => try!(write!(w, " = \"{}\"", src.as_slice())), None => {} } try!(write!(w, ";</code></td></tr>")); } clean::Import(ref import) => { try!(write!(w, "<tr><td><code>{}{}</code></td></tr>", VisSpace(myitem.visibility), *import)); } } } _ => { if myitem.name.is_none() { continue } try!(write!(w, " <tr> <td><a class='{class}' href='{href}' title='{title}'>{}</a></td> <td class='docblock short'>{}</td> </tr> ", *myitem.name.get_ref(), Markdown(shorter(myitem.doc_value())), class = shortty(myitem), href = item_path(myitem), title = full_path(cx, myitem))); } } } write!(w, "</table>") } fn item_function(w: &mut Writer, it: &clean::Item, f: &clean::Function) -> fmt::Result { try!(write!(w, "<pre class='rust fn'>{vis}{fn_style}fn \ {name}{generics}{decl}</pre>", vis = VisSpace(it.visibility), fn_style = FnStyleSpace(f.fn_style), name = it.name.get_ref().as_slice(), generics = f.generics, decl = f.decl)); document(w, it) } fn item_trait(w: &mut Writer, it: &clean::Item, t: &clean::Trait) -> fmt::Result { let mut parents = StrBuf::new(); if t.parents.len() > 0 { parents.push_str(": "); for (i, p) in t.parents.iter().enumerate() { if i > 0 { parents.push_str(" + "); } parents.push_str(format!("{}", *p)); } } // Output the trait definition try!(write!(w, "<pre class='rust trait'>{}trait {}{}{} ", VisSpace(it.visibility), it.name.get_ref().as_slice(), t.generics, parents)); let required = t.methods.iter().filter(|m| m.is_req()).collect::<Vec<&clean::TraitMethod>>(); let provided = t.methods.iter().filter(|m| !m.is_req()).collect::<Vec<&clean::TraitMethod>>(); if t.methods.len() == 0 { try!(write!(w, "\\{ \\}")); } else { try!(write!(w, "\\{\n")); for m in required.iter() { try!(write!(w, " ")); try!(render_method(w, m.item())); try!(write!(w, ";\n")); } if required.len() > 0 && provided.len() > 0 { try!(w.write("\n".as_bytes())); } for m in provided.iter() { try!(write!(w, " ")); try!(render_method(w, m.item())); try!(write!(w, " \\{ ... \\}\n")); } try!(write!(w, "\\}")); } try!(write!(w, "</pre>")); // Trait documentation try!(document(w, it)); fn meth(w: &mut Writer, m: &clean::TraitMethod) -> fmt::Result { try!(write!(w, "<h3 id='{}.{}' class='method'><code>", shortty(m.item()), *m.item().name.get_ref())); try!(render_method(w, m.item())); try!(write!(w, "</code></h3>")); try!(document(w, m.item())); Ok(()) } // Output the documentation for each function individually if required.len() > 0 { try!(write!(w, " <h2 id='required-methods'>Required Methods</h2> <div class='methods'> ")); for m in required.iter() { try!(meth(w, *m)); } try!(write!(w, "</div>")); } if provided.len() > 0 { try!(write!(w, " <h2 id='provided-methods'>Provided Methods</h2> <div class='methods'> ")); for m in provided.iter() { try!(meth(w, *m)); } try!(write!(w, "</div>")); } local_data::get(cache_key, |cache| { let cache = cache.unwrap(); match cache.implementors.find(&it.id) { Some(implementors) => { try!(write!(w, " <h2 id='implementors'>Implementors</h2> <ul class='item-list'> ")); for i in implementors.iter() { match *i { PathType(ref ty) => { try!(write!(w, "<li><code>{}</code></li>", *ty)); } OtherType(ref generics, ref trait_, ref for_) => { try!(write!(w, "<li><code>impl{} {} for {}</code></li>", *generics, *trait_, *for_)); } } } try!(write!(w, "</ul>")); } None => {} } Ok(()) }) } fn render_method(w: &mut Writer, meth: &clean::Item) -> fmt::Result { fn fun(w: &mut Writer, it: &clean::Item, fn_style: ast::FnStyle, g: &clean::Generics, selfty: &clean::SelfTy, d: &clean::FnDecl) -> fmt::Result { write!(w, "{}fn <a href='\\#{ty}.{name}' class='fnname'>{name}</a>\ {generics}{decl}", match fn_style { ast::UnsafeFn => "unsafe ", _ => "", }, ty = shortty(it), name = it.name.get_ref().as_slice(), generics = *g, decl = Method(selfty, d)) } match meth.inner { clean::TyMethodItem(ref m) => { fun(w, meth, m.fn_style, &m.generics, &m.self_, &m.decl) } clean::MethodItem(ref m) => { fun(w, meth, m.fn_style, &m.generics, &m.self_, &m.decl) } _ => unreachable!() } } fn item_struct(w: &mut Writer, it: &clean::Item, s: &clean::Struct) -> fmt::Result { try!(write!(w, "<pre class='rust struct'>")); try!(render_struct(w, it, Some(&s.generics), s.struct_type, s.fields.as_slice(), "", true)); try!(write!(w, "</pre>")); try!(document(w, it)); let mut fields = s.fields.iter().filter(|f| { match f.inner { clean::StructFieldItem(clean::HiddenStructField) => false, clean::StructFieldItem(clean::TypedStructField(..)) => true, _ => false, } }).peekable(); match s.struct_type { doctree::Plain if fields.peek().is_some() => { try!(write!(w, "<h2 class='fields'>Fields</h2>\n<table>")); for field in fields { try!(write!(w, "<tr><td id='structfield.{name}'>\ <code>{name}</code></td><td>", name = field.name.get_ref().as_slice())); try!(document(w, field)); try!(write!(w, "</td></tr>")); } try!(write!(w, "</table>")); } _ => {} } render_methods(w, it) } fn item_enum(w: &mut Writer, it: &clean::Item, e: &clean::Enum) -> fmt::Result { try!(write!(w, "<pre class='rust enum'>{}enum {}{}", VisSpace(it.visibility), it.name.get_ref().as_slice(), e.generics)); if e.variants.len() == 0 && !e.variants_stripped { try!(write!(w, " \\{\\}")); } else { try!(write!(w, " \\{\n")); for v in e.variants.iter() { try!(write!(w, " ")); let name = v.name.get_ref().as_slice(); match v.inner { clean::VariantItem(ref var) => { match var.kind { clean::CLikeVariant => try!(write!(w, "{}", name)), clean::TupleVariant(ref tys) => { try!(write!(w, "{}(", name)); for (i, ty) in tys.iter().enumerate() { if i > 0 { try!(write!(w, ", ")) } try!(write!(w, "{}", *ty)); } try!(write!(w, ")")); } clean::StructVariant(ref s) => { try!(render_struct(w, v, None, s.struct_type, s.fields.as_slice(), " ", false)); } } } _ => unreachable!() } try!(write!(w, ",\n")); } if e.variants_stripped { try!(write!(w, " // some variants omitted\n")); } try!(write!(w, "\\}")); } try!(write!(w, "</pre>")); try!(document(w, it)); if e.variants.len() > 0 { try!(write!(w, "<h2 class='variants'>Variants</h2>\n<table>")); for variant in e.variants.iter() { try!(write!(w, "<tr><td id='variant.{name}'><code>{name}</code></td><td>", name = variant.name.get_ref().as_slice())); try!(document(w, variant)); match variant.inner { clean::VariantItem(ref var) => { match var.kind { clean::StructVariant(ref s) => { let mut fields = s.fields.iter().filter(|f| { match f.inner { clean::StructFieldItem(ref t) => match *t { clean::HiddenStructField => false, clean::TypedStructField(..) => true, }, _ => false, } }); try!(write!(w, "<h3 class='fields'>Fields</h3>\n <table>")); for field in fields { try!(write!(w, "<tr><td \ id='variant.{v}.field.{f}'>\ <code>{f}</code></td><td>", v = variant.name.get_ref().as_slice(), f = field.name.get_ref().as_slice())); try!(document(w, field)); try!(write!(w, "</td></tr>")); } try!(write!(w, "</table>")); } _ => () } } _ => () } try!(write!(w, "</td></tr>")); } try!(write!(w, "</table>")); } try!(render_methods(w, it)); Ok(()) } fn render_struct(w: &mut Writer, it: &clean::Item, g: Option<&clean::Generics>, ty: doctree::StructType, fields: &[clean::Item], tab: &str, structhead: bool) -> fmt::Result { try!(write!(w, "{}{}{}", VisSpace(it.visibility), if structhead {"struct "} else {""}, it.name.get_ref().as_slice())); match g { Some(g) => try!(write!(w, "{}", *g)), None => {} } match ty { doctree::Plain => { try!(write!(w, " \\{\n{}", tab)); let mut fields_stripped = false; for field in fields.iter() { match field.inner { clean::StructFieldItem(clean::HiddenStructField) => { fields_stripped = true; } clean::StructFieldItem(clean::TypedStructField(ref ty)) => { try!(write!(w, " {}{}: {},\n{}", VisSpace(field.visibility), field.name.get_ref().as_slice(), *ty, tab)); } _ => unreachable!(), }; } if fields_stripped { try!(write!(w, " // some fields omitted\n{}", tab)); } try!(write!(w, "\\}")); } doctree::Tuple | doctree::Newtype => { try!(write!(w, "(")); for (i, field) in fields.iter().enumerate() { if i > 0 { try!(write!(w, ", ")); } match field.inner { clean::StructFieldItem(clean::HiddenStructField) => { try!(write!(w, "_")) } clean::StructFieldItem(clean::TypedStructField(ref ty)) => { try!(write!(w, "{}{}", VisSpace(field.visibility), *ty)) } _ => unreachable!() } } try!(write!(w, ");")); } doctree::Unit => { try!(write!(w, ";")); } } Ok(()) } fn render_methods(w: &mut Writer, it: &clean::Item) -> fmt::Result { local_data::get(cache_key, |cache| { let c = cache.unwrap(); match c.impls.find(&it.id) { Some(v) => { let mut non_trait = v.iter().filter(|p| { p.ref0().trait_.is_none() }); let non_trait = non_trait.collect::<Vec<&(clean::Impl, Option<~str>)>>(); let mut traits = v.iter().filter(|p| { p.ref0().trait_.is_some() }); let traits = traits.collect::<Vec<&(clean::Impl, Option<~str>)>>(); if non_trait.len() > 0 { try!(write!(w, "<h2 id='methods'>Methods</h2>")); for &(ref i, ref dox) in non_trait.move_iter() { try!(render_impl(w, i, dox)); } } if traits.len() > 0 { try!(write!(w, "<h2 id='implementations'>Trait \ Implementations</h2>")); let mut any_derived = false; for & &(ref i, ref dox) in traits.iter() { if !i.derived { try!(render_impl(w, i, dox)); } else { any_derived = true; } } if any_derived { try!(write!(w, "<h3 id='derived_implementations'>Derived Implementations \ </h3>")); for &(ref i, ref dox) in traits.move_iter() { if i.derived { try!(render_impl(w, i, dox)); } } } } } None => {} } Ok(()) }) } fn render_impl(w: &mut Writer, i: &clean::Impl, dox: &Option<~str>) -> fmt::Result { try!(write!(w, "<h3 class='impl'><code>impl{} ", i.generics)); let trait_id = match i.trait_ { Some(ref ty) => { try!(write!(w, "{} for ", *ty)); match *ty { clean::ResolvedPath { id, .. } => Some(id), _ => None, } } None => None }; try!(write!(w, "{}</code></h3>", i.for_)); match *dox { Some(ref dox) => { try!(write!(w, "<div class='docblock'>{}</div>", Markdown(dox.as_slice()))); } None => {} } fn docmeth(w: &mut Writer, item: &clean::Item) -> io::IoResult<bool> { try!(write!(w, "<h4 id='method.{}' class='method'><code>", *item.name.get_ref())); try!(render_method(w, item)); try!(write!(w, "</code></h4>\n")); match item.doc_value() { Some(s) => { try!(write!(w, "<div class='docblock'>{}</div>", Markdown(s))); Ok(true) } None => Ok(false) } } try!(write!(w, "<div class='methods'>")); for meth in i.methods.iter() { if try!(docmeth(w, meth)) { continue } // No documentation? Attempt to slurp in the trait's documentation let trait_id = match trait_id { None => continue, Some(id) => id, }; try!(local_data::get(cache_key, |cache| { let cache = cache.unwrap(); match cache.traits.find(&trait_id) { Some(t) => { let name = meth.name.clone(); match t.methods.iter().find(|t| t.item().name == name) { Some(method) => { match method.item().doc_value() { Some(s) => { try!(write!(w, "<div class='docblock'>{}</div>", Markdown(s))); } None => {} } } None => {} } } None => {} } Ok(()) })) } // If we've implemented a trait, then also emit documentation for all // default methods which weren't overridden in the implementation block. match trait_id { None => {} Some(id) => { try!(local_data::get(cache_key, |cache| { let cache = cache.unwrap(); match cache.traits.find(&id) { Some(t) => { for method in t.methods.iter() { let n = method.item().name.clone(); match i.methods.iter().find(|m| m.name == n) { Some(..) => continue, None => {} } try!(docmeth(w, method.item())); } } None => {} } Ok(()) })) } } try!(write!(w, "</div>")); Ok(()) } fn item_typedef(w: &mut Writer, it: &clean::Item, t: &clean::Typedef) -> fmt::Result { try!(write!(w, "<pre class='rust typedef'>type {}{} = {};</pre>", it.name.get_ref().as_slice(), t.generics, t.type_)); document(w, it) } impl<'a> fmt::Show for Sidebar<'a> { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { let cx = self.cx; let it = self.item; try!(write!(fmt.buf, "<p class='location'>")); let len = cx.current.len() - if it.is_mod() {1} else {0}; for (i, name) in cx.current.iter().take(len).enumerate() { if i > 0 { try!(write!(fmt.buf, "&\\#8203;::")); } try!(write!(fmt.buf, "<a href='{}index.html'>{}</a>", cx.root_path .as_slice() .slice_to((cx.current.len() - i - 1) * 3), *name)); } try!(write!(fmt.buf, "</p>")); fn block(w: &mut Writer, short: &str, longty: &str, cur: &clean::Item, cx: &Context) -> fmt::Result { let items = match cx.sidebar.find_equiv(&short) { Some(items) => items.as_slice(), None => return Ok(()) }; try!(write!(w, "<div class='block {}'><h2>{}</h2>", short, longty)); for item in items.iter() { let curty = shortty(cur).to_static_str(); let class = if cur.name.get_ref() == item && short == curty { "current" } else { "" }; try!(write!(w, "<a class='{ty} {class}' href='{curty, select, mod{../} other{} }{tysel, select, mod{{name}/index.html} other{#.{name}.html} }'>{name}</a><br/>", ty = short, tysel = short, class = class, curty = curty, name = item.as_slice())); } try!(write!(w, "</div>")); Ok(()) } try!(block(fmt.buf, "mod", "Modules", it, cx)); try!(block(fmt.buf, "struct", "Structs", it, cx)); try!(block(fmt.buf, "enum", "Enums", it, cx)); try!(block(fmt.buf, "trait", "Traits", it, cx)); try!(block(fmt.buf, "fn", "Functions", it, cx)); Ok(()) } } fn build_sidebar(m: &clean::Module) -> HashMap<~str, Vec<~str> > { let mut map = HashMap::new(); for item in m.items.iter() { let short = shortty(item).to_static_str(); let myname = match item.name { None => continue, Some(ref s) => s.to_owned(), }; let v = map.find_or_insert_with(short.to_owned(), |_| Vec::new()); v.push(myname); } for (_, items) in map.mut_iter() { items.as_mut_slice().sort(); } return map; } impl<'a> fmt::Show for Source<'a> { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { let Source(s) = *self; let lines = s.lines().len(); let mut cols = 0; let mut tmp = lines; while tmp > 0 { cols += 1; tmp /= 10; } try!(write!(fmt.buf, "<pre class='line-numbers'>")); for i in range(1, lines + 1) { try!(write!(fmt.buf, "<span id='{0:u}'>{0:1$u}</span>\n", i, cols)); } try!(write!(fmt.buf, "</pre>")); try!(write!(fmt.buf, "{}", highlight::highlight(s.as_slice(), None))); Ok(()) } } fn item_macro(w: &mut Writer, it: &clean::Item, t: &clean::Macro) -> fmt::Result { try!(w.write_str(highlight::highlight(t.source, Some("macro")))); document(w, it) }
import print::pprust::expr_to_str; import result::result; import either::{Either, Left, Right}; import std::map::{hashmap, str_hash}; import token::{can_begin_expr, is_ident, is_ident_or_path, is_plain_ident, INTERPOLATED}; import codemap::{span,fss_none}; import util::interner; import ast_util::{spanned, respan, mk_sp, ident_to_path, operator_prec}; import lexer::reader; import prec::{as_prec, token_to_binop}; import attr::parser_attr; import common::{seq_sep_trailing_disallowed, seq_sep_trailing_allowed, seq_sep_none, token_to_str}; import dvec::dvec; import vec::{push}; import ast::{_mod, add, alt_check, alt_exhaustive, arg, arm, attribute, bind_by_ref, bind_by_implicit_ref, bind_by_value, bitand, bitor, bitxor, blk, blk_check_mode, bound_const, bound_copy, bound_send, bound_trait, bound_owned, box, by_copy, by_move, by_mutbl_ref, by_ref, by_val, capture_clause, capture_item, cdir_dir_mod, cdir_src_mod, cdir_view_item, class_immutable, class_mutable, crate, crate_cfg, crate_directive, decl, decl_item, decl_local, default_blk, deref, div, enum_def, enum_variant_kind, expl, expr, expr_, expr_addr_of, expr_match, expr_again, expr_assert, expr_assign, expr_assign_op, expr_binary, expr_block, expr_break, expr_call, expr_cast, expr_copy, expr_do_body, expr_fail, expr_field, expr_fn, expr_fn_block, expr_if, expr_index, expr_lit, expr_log, expr_loop, expr_loop_body, expr_mac, expr_move, expr_path, expr_rec, expr_repeat, expr_ret, expr_swap, expr_struct, expr_tup, expr_unary, expr_unary_move, expr_vec, expr_vstore, expr_while, extern_fn, field, fn_decl, foreign_item, foreign_item_fn, foreign_mod, ident, impure_fn, infer, inherited, init_assign, init_move, initializer, item, item_, item_class, item_const, item_enum, item_fn, item_foreign_mod, item_impl, item_mac, item_mod, item_trait, item_ty, lit, lit_, lit_bool, lit_float, lit_int, lit_int_unsuffixed, lit_nil, lit_str, lit_uint, local, m_const, m_imm, m_mutbl, mac_, mac_aq, mac_ellipsis, mac_invoc, mac_invoc_tt, mac_var, matcher, match_nonterminal, match_seq, match_tok, method, mode, mt, mul, mutability, named_field, neg, noreturn, not, pat, pat_box, pat_enum, pat_ident, pat_lit, pat_range, pat_rec, pat_struct, pat_tup, pat_uniq, pat_wild, path, private, proto, proto_bare, proto_block, proto_box, proto_uniq, provided, public, pure_fn, purity, re_anon, re_named, region, rem, required, ret_style, return_val, self_ty, shl, shr, stmt, stmt_decl, stmt_expr, stmt_semi, struct_def, struct_field, struct_variant_kind, subtract, sty_box, sty_by_ref, sty_region, sty_static, sty_uniq, sty_value, token_tree, trait_method, trait_ref, tt_delim, tt_seq, tt_tok, tt_nonterminal, ty, ty_, ty_bot, ty_box, ty_field, ty_fn, ty_infer, ty_mac, ty_method, ty_nil, ty_param, ty_param_bound, ty_path, ty_ptr, ty_rec, ty_rptr, ty_tup, ty_u32, ty_uniq, ty_vec, ty_fixed_length, tuple_variant_kind, unchecked_blk, uniq, unnamed_field, unsafe_blk, unsafe_fn, variant, view_item, view_item_, view_item_export, view_item_import, view_item_use, view_path, view_path_glob, view_path_list, view_path_simple, visibility, vstore, vstore_box, vstore_fixed, vstore_slice, vstore_uniq}; export file_type; export parser; export CRATE_FILE; export SOURCE_FILE; // FIXME (#1893): #ast expects to find this here but it's actually // defined in `parse` Fixing this will be easier when we have export // decls on individual items -- then parse can export this publicly, and // everything else crate-visibly. import parse_from_source_str; export parse_from_source_str; export item_or_view_item, iovi_none, iovi_view_item, iovi_item; enum restriction { UNRESTRICTED, RESTRICT_STMT_EXPR, RESTRICT_NO_CALL_EXPRS, RESTRICT_NO_BAR_OP, RESTRICT_NO_BAR_OR_DOUBLEBAR_OP, } enum file_type { CRATE_FILE, SOURCE_FILE, } // We don't allow single-entry tuples in the true AST; that indicates a // parenthesized expression. However, we preserve them temporarily while // parsing because `(while{...})+3` parses differently from `while{...}+3`. // // To reflect the fact that the @expr is not a true expr that should be // part of the AST, we wrap such expressions in the pexpr enum. They // can then be converted to true expressions by a call to `to_expr()`. enum pexpr { pexpr(@expr), } enum class_member { field_member(@struct_field), method_member(@method) } /* So that we can distinguish a class ctor or dtor from other class members */ enum class_contents { ctor_decl(fn_decl, ~[attribute], blk, codemap::span), dtor_decl(blk, ~[attribute], codemap::span), members(~[@class_member]) } type arg_or_capture_item = Either<arg, capture_item>; type item_info = (ident, item_, option<~[attribute]>); enum item_or_view_item { iovi_none, iovi_item(@item), iovi_view_item(@view_item) } enum view_item_parse_mode { VIEW_ITEMS_AND_ITEMS_ALLOWED, VIEW_ITEMS_ALLOWED, IMPORTS_AND_ITEMS_ALLOWED } /* The expr situation is not as complex as I thought it would be. The important thing is to make sure that lookahead doesn't balk at INTERPOLATED tokens */ macro_rules! maybe_whole_expr { {$p:expr} => { match copy $p.token { INTERPOLATED(token::nt_expr(e)) => { $p.bump(); return pexpr(e); } INTERPOLATED(token::nt_path(pt)) => { $p.bump(); return $p.mk_pexpr($p.span.lo, $p.span.lo, expr_path(pt)); } _ => () }} } macro_rules! maybe_whole { {$p:expr, $constructor:ident} => { match copy $p.token { INTERPOLATED(token::$constructor(x)) => { $p.bump(); return x; } _ => () }} ; {deref $p:expr, $constructor:ident} => { match copy $p.token { INTERPOLATED(token::$constructor(x)) => { $p.bump(); return *x; } _ => () }} ; {some $p:expr, $constructor:ident} => { match copy $p.token { INTERPOLATED(token::$constructor(x)) => { $p.bump(); return some(x); } _ => () }} ; {iovi $p:expr, $constructor:ident} => { match copy $p.token { INTERPOLATED(token::$constructor(x)) => { $p.bump(); return iovi_item(x); } _ => () }} ; {pair_empty $p:expr, $constructor:ident} => { match copy $p.token { INTERPOLATED(token::$constructor(x)) => { $p.bump(); return (~[], x); } _ => () }} } pure fn maybe_append(+lhs: ~[attribute], rhs: option<~[attribute]>) -> ~[attribute] { match rhs { none => lhs, some(attrs) => vec::append(lhs, attrs) } } /* ident is handled by common.rs */ struct parser { let sess: parse_sess; let cfg: crate_cfg; let file_type: file_type; let mut token: token::token; let mut span: span; let mut last_span: span; let mut buffer: [mut {tok: token::token, sp: span}]/4; let mut buffer_start: int; let mut buffer_end: int; let mut restriction: restriction; let mut quote_depth: uint; // not (yet) related to the quasiquoter let reader: reader; let keywords: hashmap<~str, ()>; let restricted_keywords: hashmap<~str, ()>; new(sess: parse_sess, cfg: ast::crate_cfg, +rdr: reader, ftype: file_type) { self.reader <- rdr; let tok0 = self.reader.next_token(); let span0 = tok0.sp; self.sess = sess; self.cfg = cfg; self.file_type = ftype; self.token = tok0.tok; self.span = span0; self.last_span = span0; self.buffer = [mut {tok: tok0.tok, sp: span0}, {tok: tok0.tok, sp: span0}, {tok: tok0.tok, sp: span0}, {tok: tok0.tok, sp: span0} ]/4; self.buffer_start = 0; self.buffer_end = 0; self.restriction = UNRESTRICTED; self.quote_depth = 0u; self.keywords = token::keyword_table(); self.restricted_keywords = token::restricted_keyword_table(); } drop {} /* do not copy the parser; its state is tied to outside state */ fn bump() { self.last_span = self.span; let next = if self.buffer_start == self.buffer_end { self.reader.next_token() } else { let next = self.buffer[self.buffer_start]; self.buffer_start = (self.buffer_start + 1) & 3; next }; self.token = next.tok; self.span = next.sp; } fn swap(next: token::token, lo: uint, hi: uint) { self.token = next; self.span = mk_sp(lo, hi); } fn buffer_length() -> int { if self.buffer_start <= self.buffer_end { return self.buffer_end - self.buffer_start; } return (4 - self.buffer_start) + self.buffer_end; } fn look_ahead(distance: uint) -> token::token { let dist = distance as int; while self.buffer_length() < dist { self.buffer[self.buffer_end] = self.reader.next_token(); self.buffer_end = (self.buffer_end + 1) & 3; } return copy self.buffer[(self.buffer_start + dist - 1) & 3].tok; } fn fatal(m: ~str) -> ! { self.sess.span_diagnostic.span_fatal(copy self.span, m) } fn span_fatal(sp: span, m: ~str) -> ! { self.sess.span_diagnostic.span_fatal(sp, m) } fn span_note(sp: span, m: ~str) { self.sess.span_diagnostic.span_note(sp, m) } fn bug(m: ~str) -> ! { self.sess.span_diagnostic.span_bug(copy self.span, m) } fn warn(m: ~str) { self.sess.span_diagnostic.span_warn(copy self.span, m) } pure fn get_str(i: token::str_num) -> @~str { self.reader.interner().get(i) } fn get_id() -> node_id { next_node_id(self.sess) } fn parse_ty_fn(purity: ast::purity) -> ty_ { let proto, bounds; if self.eat_keyword(~"extern") { self.expect_keyword(~"fn"); proto = ast::proto_bare; bounds = @~[]; } else { self.expect_keyword(~"fn"); proto = self.parse_fn_ty_proto(); bounds = self.parse_optional_ty_param_bounds(); }; ty_fn(proto, bounds, self.parse_ty_fn_decl(purity)) } fn parse_ty_fn_decl(purity: ast::purity) -> fn_decl { let inputs = do self.parse_unspanned_seq( token::LPAREN, token::RPAREN, seq_sep_trailing_disallowed(token::COMMA)) |p| { let mode = p.parse_arg_mode(); let name = if is_plain_ident(p.token) && p.look_ahead(1u) == token::COLON { let name = self.parse_value_ident(); p.bump(); name } else { @~"" }; {mode: mode, ty: p.parse_ty(false), ident: name, id: p.get_id()} }; let (ret_style, ret_ty) = self.parse_ret_ty(); return {inputs: inputs, output: ret_ty, purity: purity, cf: ret_style}; } fn parse_trait_methods() -> ~[trait_method] { do self.parse_unspanned_seq(token::LBRACE, token::RBRACE, seq_sep_none()) |p| { let attrs = p.parse_outer_attributes(); let lo = p.span.lo; let is_static = p.parse_staticness(); let static_sty = spanned(lo, p.span.hi, sty_static); let pur = p.parse_fn_purity(); // NB: at the moment, trait methods are public by default; this // could change. let vis = p.parse_visibility(); let ident = p.parse_method_name(); let tps = p.parse_ty_params(); let d = p.parse_ty_fn_decl(pur); let hi = p.last_span.hi; let self_ty = if is_static { static_sty } else { spanned(lo, hi, sty_by_ref) }; // XXX: Wrong. debug!{"parse_trait_methods(): trait method signature ends in \ `%s`", token_to_str(p.reader, p.token)}; match p.token { token::SEMI => { p.bump(); debug!{"parse_trait_methods(): parsing required method"}; // NB: at the moment, visibility annotations on required // methods are ignored; this could change. required({ident: ident, attrs: attrs, decl: {purity: pur with d}, tps: tps, self_ty: self_ty, id: p.get_id(), span: mk_sp(lo, hi)}) } token::LBRACE => { debug!{"parse_trait_methods(): parsing provided method"}; let (inner_attrs, body) = p.parse_inner_attrs_and_block(true); let attrs = vec::append(attrs, inner_attrs); provided(@{ident: ident, attrs: attrs, tps: tps, self_ty: self_ty, decl: d, body: body, id: p.get_id(), span: mk_sp(lo, hi), self_id: p.get_id(), vis: vis}) } _ => { p.fatal(~"expected `;` or `}` but found `" + token_to_str(p.reader, p.token) + ~"`"); } } } } fn parse_mt() -> mt { let mutbl = self.parse_mutability(); let t = self.parse_ty(false); return {ty: t, mutbl: mutbl}; } fn parse_ty_field() -> ty_field { let lo = self.span.lo; let mutbl = self.parse_mutability(); let id = self.parse_ident(); self.expect(token::COLON); let ty = self.parse_ty(false); return spanned(lo, ty.span.hi, { ident: id, mt: {ty: ty, mutbl: mutbl} }); } fn parse_ret_ty() -> (ret_style, @ty) { return if self.eat(token::RARROW) { let lo = self.span.lo; if self.eat(token::NOT) { (noreturn, @{id: self.get_id(), node: ty_bot, span: mk_sp(lo, self.last_span.hi)}) } else { (return_val, self.parse_ty(false)) } } else { let pos = self.span.lo; (return_val, @{id: self.get_id(), node: ty_nil, span: mk_sp(pos, pos)}) } } fn region_from_name(s: option<@~str>) -> @region { let r = match s { some (string) => re_named(string), none => re_anon }; @{id: self.get_id(), node: r} } // Parses something like "&x" fn parse_region() -> @region { self.expect(token::BINOP(token::AND)); match copy self.token { token::IDENT(sid, _) => { self.bump(); let n = self.get_str(sid); self.region_from_name(some(n)) } _ => { self.region_from_name(none) } } } // Parses something like "&x/" (note the trailing slash) fn parse_region_with_sep() -> @region { let name = match copy self.token { token::IDENT(sid, _) => { if self.look_ahead(1u) == token::BINOP(token::SLASH) { self.bump(); self.bump(); some(self.get_str(sid)) } else { none } } _ => { none } }; self.region_from_name(name) } fn parse_ty(colons_before_params: bool) -> @ty { maybe_whole!{self, nt_ty}; let lo = self.span.lo; match self.maybe_parse_dollar_mac() { some(e) => { return @{id: self.get_id(), node: ty_mac(spanned(lo, self.span.hi, e)), span: mk_sp(lo, self.span.hi)}; } none => () } let t = if self.token == token::LPAREN { self.bump(); if self.token == token::RPAREN { self.bump(); ty_nil } else { let mut ts = ~[self.parse_ty(false)]; while self.token == token::COMMA { self.bump(); vec::push(ts, self.parse_ty(false)); } let t = if vec::len(ts) == 1u { ts[0].node } else { ty_tup(ts) }; self.expect(token::RPAREN); t } } else if self.token == token::AT { self.bump(); ty_box(self.parse_mt()) } else if self.token == token::TILDE { self.bump(); ty_uniq(self.parse_mt()) } else if self.token == token::BINOP(token::STAR) { self.bump(); ty_ptr(self.parse_mt()) } else if self.token == token::LBRACE { let elems = self.parse_unspanned_seq( token::LBRACE, token::RBRACE, seq_sep_trailing_allowed(token::COMMA), |p| p.parse_ty_field()); if vec::len(elems) == 0u { self.unexpected_last(token::RBRACE); } ty_rec(elems) } else if self.token == token::LBRACKET { self.expect(token::LBRACKET); let mut t = ty_vec(self.parse_mt()); // Parse the `* 3` in `[ int * 3 ]` match self.maybe_parse_fixed_vstore_with_star() { none => {} some(suffix) => { t = ty_fixed_length(@{ id: self.get_id(), node: t, span: mk_sp(lo, self.last_span.hi) }, suffix) } } self.expect(token::RBRACKET); t } else if self.token == token::BINOP(token::AND) { self.bump(); let region = self.parse_region_with_sep(); let mt = self.parse_mt(); ty_rptr(region, mt) } else if self.eat_keyword(~"pure") { self.parse_ty_fn(ast::pure_fn) } else if self.eat_keyword(~"unsafe") { self.parse_ty_fn(ast::unsafe_fn) } else if self.is_keyword(~"fn") { self.parse_ty_fn(ast::impure_fn) } else if self.eat_keyword(~"extern") { self.expect_keyword(~"fn"); ty_fn(proto_bare, @~[], self.parse_ty_fn_decl(ast::impure_fn)) } else if self.token == token::MOD_SEP || is_ident(self.token) { let path = self.parse_path_with_tps(colons_before_params); ty_path(path, self.get_id()) } else { self.fatal(~"expected type"); }; let sp = mk_sp(lo, self.last_span.hi); return @{id: self.get_id(), node: match self.maybe_parse_fixed_vstore() { // Consider a fixed vstore suffix (/N or /_) none => t, some(v) => { ty_fixed_length(@{id: self.get_id(), node:t, span: sp}, v) } }, span: sp} } fn parse_arg_mode() -> mode { if self.eat(token::BINOP(token::AND)) { expl(by_mutbl_ref) } else if self.eat(token::BINOP(token::MINUS)) { expl(by_move) } else if self.eat(token::ANDAND) { expl(by_ref) } else if self.eat(token::BINOP(token::PLUS)) { if self.eat(token::BINOP(token::PLUS)) { expl(by_val) } else { expl(by_copy) } } else { infer(self.get_id()) } } fn parse_capture_item_or(parse_arg_fn: fn(parser) -> arg_or_capture_item) -> arg_or_capture_item { fn parse_capture_item(p:parser, is_move: bool) -> capture_item { let sp = mk_sp(p.span.lo, p.span.hi); let ident = p.parse_ident(); @{id: p.get_id(), is_move: is_move, name: ident, span: sp} } if self.eat_keyword(~"move") { either::Right(parse_capture_item(self, true)) } else if self.eat_keyword(~"copy") { either::Right(parse_capture_item(self, false)) } else { parse_arg_fn(self) } } fn parse_arg() -> arg_or_capture_item { let m = self.parse_arg_mode(); let i = self.parse_value_ident(); self.expect(token::COLON); let t = self.parse_ty(false); either::Left({mode: m, ty: t, ident: i, id: self.get_id()}) } fn parse_arg_or_capture_item() -> arg_or_capture_item { self.parse_capture_item_or(|p| p.parse_arg()) } fn parse_fn_block_arg() -> arg_or_capture_item { do self.parse_capture_item_or |p| { let m = p.parse_arg_mode(); let i = p.parse_value_ident(); let t = if p.eat(token::COLON) { p.parse_ty(false) } else { @{id: p.get_id(), node: ty_infer, span: mk_sp(p.span.lo, p.span.hi)} }; either::Left({mode: m, ty: t, ident: i, id: p.get_id()}) } } fn maybe_parse_dollar_mac() -> option<mac_> { match copy self.token { token::DOLLAR => { let lo = self.span.lo; self.bump(); match copy self.token { token::LIT_INT_UNSUFFIXED(num) => { self.bump(); some(mac_var(num as uint)) } token::LPAREN => { self.bump(); let e = self.parse_expr(); self.expect(token::RPAREN); let hi = self.last_span.hi; some(mac_aq(mk_sp(lo,hi), e)) } _ => { self.fatal(~"expected `(` or unsuffixed integer literal"); } } } _ => none } } fn maybe_parse_fixed_vstore() -> option<option<uint>> { if self.token == token::BINOP(token::SLASH) { self.bump(); match copy self.token { token::UNDERSCORE => { self.bump(); some(none) } token::LIT_INT_UNSUFFIXED(i) if i >= 0i64 => { self.bump(); some(some(i as uint)) } _ => none } } else { none } } fn maybe_parse_fixed_vstore_with_star() -> option<option<uint>> { if self.eat(token::BINOP(token::STAR)) { match copy self.token { token::UNDERSCORE => { self.bump(); some(none) } token::LIT_INT_UNSUFFIXED(i) if i >= 0i64 => { self.bump(); some(some(i as uint)) } _ => none } } else { none } } fn lit_from_token(tok: token::token) -> lit_ { match tok { token::LIT_INT(i, it) => lit_int(i, it), token::LIT_UINT(u, ut) => lit_uint(u, ut), token::LIT_INT_UNSUFFIXED(i) => lit_int_unsuffixed(i), token::LIT_FLOAT(s, ft) => lit_float(self.get_str(s), ft), token::LIT_STR(s) => lit_str(self.get_str(s)), token::LPAREN => { self.expect(token::RPAREN); lit_nil } _ => self.unexpected_last(tok) } } fn parse_lit() -> lit { let lo = self.span.lo; let lit = if self.eat_keyword(~"true") { lit_bool(true) } else if self.eat_keyword(~"false") { lit_bool(false) } else { let tok = self.token; self.bump(); self.lit_from_token(tok) }; return {node: lit, span: mk_sp(lo, self.last_span.hi)}; } fn parse_path_without_tps() -> @path { self.parse_path_without_tps_(|p| p.parse_ident(), |p| p.parse_ident()) } fn parse_path_without_tps_( parse_ident: fn(parser) -> ident, parse_last_ident: fn(parser) -> ident) -> @path { maybe_whole!{self, nt_path}; let lo = self.span.lo; let global = self.eat(token::MOD_SEP); let mut ids = ~[]; loop { let is_not_last = self.look_ahead(2u) != token::LT && self.look_ahead(1u) == token::MOD_SEP; if is_not_last { vec::push(ids, parse_ident(self)); self.expect(token::MOD_SEP); } else { vec::push(ids, parse_last_ident(self)); break; } } @{span: mk_sp(lo, self.last_span.hi), global: global, idents: ids, rp: none, types: ~[]} } fn parse_value_path() -> @path { self.parse_path_without_tps_(|p| p.parse_ident(), |p| p.parse_value_ident()) } fn parse_path_with_tps(colons: bool) -> @path { debug!{"parse_path_with_tps(colons=%b)", colons}; maybe_whole!{self, nt_path}; let lo = self.span.lo; let path = self.parse_path_without_tps(); if colons && !self.eat(token::MOD_SEP) { return path; } // Parse the region parameter, if any, which will // be written "foo/&x" let rp = { // Hack: avoid parsing vstores like /@ and /~. This is painful // because the notation for region bounds and the notation for // vstores is... um... the same. I guess that's my fault. This // is still not ideal as for &str we end up parsing more than we // ought to and have to sort it out later. if self.token == token::BINOP(token::SLASH) && self.look_ahead(1u) == token::BINOP(token::AND) { self.expect(token::BINOP(token::SLASH)); some(self.parse_region()) } else { none } }; // Parse any type parameters which may appear: let tps = { if self.token == token::LT { self.parse_seq_lt_gt(some(token::COMMA), |p| p.parse_ty(false)) } else { {node: ~[], span: path.span} } }; return @{span: mk_sp(lo, tps.span.hi), rp: rp, types: tps.node with *path}; } fn parse_mutability() -> mutability { if self.eat_keyword(~"mut") { m_mutbl } else if self.eat_keyword(~"const") { m_const } else { m_imm } } fn parse_field(sep: token::token) -> field { let lo = self.span.lo; let m = self.parse_mutability(); let i = self.parse_ident(); self.expect(sep); let e = self.parse_expr(); return spanned(lo, e.span.hi, {mutbl: m, ident: i, expr: e}); } fn mk_expr(lo: uint, hi: uint, +node: expr_) -> @expr { return @{id: self.get_id(), callee_id: self.get_id(), node: node, span: mk_sp(lo, hi)}; } fn mk_mac_expr(lo: uint, hi: uint, m: mac_) -> @expr { return @{id: self.get_id(), callee_id: self.get_id(), node: expr_mac({node: m, span: mk_sp(lo, hi)}), span: mk_sp(lo, hi)}; } fn mk_lit_u32(i: u32) -> @expr { let span = self.span; let lv_lit = @{node: lit_uint(i as u64, ty_u32), span: span}; return @{id: self.get_id(), callee_id: self.get_id(), node: expr_lit(lv_lit), span: span}; } fn mk_pexpr(lo: uint, hi: uint, node: expr_) -> pexpr { return pexpr(self.mk_expr(lo, hi, node)); } fn to_expr(e: pexpr) -> @expr { match e.node { expr_tup(es) if vec::len(es) == 1u => es[0u], _ => *e } } fn parse_bottom_expr() -> pexpr { maybe_whole_expr!{self}; let lo = self.span.lo; let mut hi = self.span.hi; let mut ex: expr_; match self.maybe_parse_dollar_mac() { some(x) => return pexpr(self.mk_mac_expr(lo, self.span.hi, x)), _ => () } if self.token == token::LPAREN { self.bump(); if self.token == token::RPAREN { hi = self.span.hi; self.bump(); let lit = @spanned(lo, hi, lit_nil); return self.mk_pexpr(lo, hi, expr_lit(lit)); } let mut es = ~[self.parse_expr()]; while self.token == token::COMMA { self.bump(); vec::push(es, self.parse_expr()); } hi = self.span.hi; self.expect(token::RPAREN); // Note: we retain the expr_tup() even for simple // parenthesized expressions, but only for a "little while". // This is so that wrappers around parse_bottom_expr() // can tell whether the expression was parenthesized or not, // which affects expr_is_complete(). return self.mk_pexpr(lo, hi, expr_tup(es)); } else if self.token == token::LBRACE { if self.looking_at_record_literal() { ex = self.parse_record_literal(); hi = self.span.hi; } else { self.bump(); let blk = self.parse_block_tail(lo, default_blk); return self.mk_pexpr(blk.span.lo, blk.span.hi, expr_block(blk)); } } else if token::is_bar(self.token) { return pexpr(self.parse_lambda_expr()); } else if self.eat_keyword(~"if") { return pexpr(self.parse_if_expr()); } else if self.eat_keyword(~"for") { return pexpr(self.parse_sugary_call_expr(~"for", expr_loop_body)); } else if self.eat_keyword(~"do") { return pexpr(self.parse_sugary_call_expr(~"do", expr_do_body)); } else if self.eat_keyword(~"while") { return pexpr(self.parse_while_expr()); } else if self.eat_keyword(~"loop") { return pexpr(self.parse_loop_expr()); } else if self.eat_keyword(~"match") { return pexpr(self.parse_alt_expr()); } else if self.eat_keyword(~"fn") { let proto = self.parse_fn_ty_proto(); match proto { proto_bare => self.fatal(~"fn expr are deprecated, use fn@"), _ => { /* fallthrough */ } } return pexpr(self.parse_fn_expr(proto)); } else if self.eat_keyword(~"unchecked") { return pexpr(self.parse_block_expr(lo, unchecked_blk)); } else if self.eat_keyword(~"unsafe") { return pexpr(self.parse_block_expr(lo, unsafe_blk)); } else if self.token == token::LBRACKET { self.bump(); let mutbl = self.parse_mutability(); if self.token == token::RBRACKET { // Empty vector. self.bump(); ex = expr_vec(~[], mutbl); } else { // Nonempty vector. let first_expr = self.parse_expr(); if self.token == token::COMMA && self.look_ahead(1) == token::DOTDOT { // Repeating vector syntax: [ 0, ..512 ] self.bump(); self.bump(); let count = self.parse_expr(); self.expect(token::RBRACKET); ex = expr_repeat(first_expr, count, mutbl); } else if self.token == token::COMMA { // Vector with two or more elements. self.bump(); let remaining_exprs = self.parse_seq_to_end(token::RBRACKET, seq_sep_trailing_allowed(token::COMMA), |p| p.parse_expr()); ex = expr_vec(~[first_expr] + remaining_exprs, mutbl); } else { // Vector with one element. self.expect(token::RBRACKET); ex = expr_vec(~[first_expr], mutbl); } } hi = self.span.hi; } else if self.token == token::ELLIPSIS { self.bump(); return pexpr(self.mk_mac_expr(lo, self.span.hi, mac_ellipsis)); } else if self.token == token::POUND { let ex_ext = self.parse_syntax_ext(); hi = ex_ext.span.hi; ex = ex_ext.node; } else if self.eat_keyword(~"fail") { if can_begin_expr(self.token) { let e = self.parse_expr(); hi = e.span.hi; ex = expr_fail(some(e)); } else { ex = expr_fail(none); } } else if self.eat_keyword(~"log") { self.expect(token::LPAREN); let lvl = self.parse_expr(); self.expect(token::COMMA); let e = self.parse_expr(); ex = expr_log(2, lvl, e); hi = self.span.hi; self.expect(token::RPAREN); } else if self.eat_keyword(~"assert") { let e = self.parse_expr(); ex = expr_assert(e); hi = e.span.hi; } else if self.eat_keyword(~"return") { if can_begin_expr(self.token) { let e = self.parse_expr(); hi = e.span.hi; ex = expr_ret(some(e)); } else { ex = expr_ret(none); } } else if self.eat_keyword(~"break") { if is_ident(self.token) { ex = expr_break(some(self.parse_ident())); } else { ex = expr_break(none); } hi = self.span.hi; } else if self.eat_keyword(~"again") { if is_ident(self.token) { ex = expr_again(some(self.parse_ident())); } else { ex = expr_again(none); } hi = self.span.hi; } else if self.eat_keyword(~"copy") { let e = self.parse_expr(); ex = expr_copy(e); hi = e.span.hi; } else if self.eat_keyword(~"move") { let e = self.parse_expr(); ex = expr_unary_move(e); hi = e.span.hi; } else if self.token == token::MOD_SEP || is_ident(self.token) && !self.is_keyword(~"true") && !self.is_keyword(~"false") { let pth = self.parse_path_with_tps(true); /* `!`, as an operator, is prefix, so we know this isn't that */ if self.token == token::NOT { self.bump(); let tts = match self.token { token::LPAREN | token::LBRACE | token::LBRACKET => { let ket = token::flip_delimiter(self.token); self.parse_unspanned_seq(copy self.token, ket, seq_sep_none(), |p| p.parse_token_tree()) } _ => self.fatal(~"expected open delimiter") }; let hi = self.span.hi; return pexpr(self.mk_mac_expr( lo, hi, mac_invoc_tt(pth, tts))); } else if self.token == token::LBRACE { // This might be a struct literal. if self.looking_at_record_literal() { // It's a struct literal. self.bump(); let mut fields = ~[]; vec::push(fields, self.parse_field(token::COLON)); while self.token != token::RBRACE && !self.is_keyword(~"with") { self.expect(token::COMMA); if self.token == token::RBRACE || self.is_keyword(~"with") || self.token == token::DOTDOT { // Accept an optional trailing comma. break; } vec::push(fields, self.parse_field(token::COLON)); } let base; if self.eat_keyword(~"with") || self.eat(token::DOTDOT) { base = some(self.parse_expr()); } else { base = none; } hi = pth.span.hi; self.expect(token::RBRACE); ex = expr_struct(pth, fields, base); return self.mk_pexpr(lo, hi, ex); } } hi = pth.span.hi; ex = expr_path(pth); } else { let lit = self.parse_lit(); hi = lit.span.hi; ex = expr_lit(@lit); } // Vstore is legal following expr_lit(lit_str(...)) and expr_vec(...) // only. match ex { expr_lit(@{node: lit_str(_), span: _}) | expr_vec(_, _) => match self.maybe_parse_fixed_vstore() { none => (), some(v) => { hi = self.span.hi; ex = expr_vstore(self.mk_expr(lo, hi, ex), vstore_fixed(v)); } }, _ => () } return self.mk_pexpr(lo, hi, ex); } fn parse_block_expr(lo: uint, blk_mode: blk_check_mode) -> @expr { self.expect(token::LBRACE); let blk = self.parse_block_tail(lo, blk_mode); return self.mk_expr(blk.span.lo, blk.span.hi, expr_block(blk)); } fn parse_syntax_ext() -> @expr { let lo = self.span.lo; self.expect(token::POUND); return self.parse_syntax_ext_naked(lo); } fn parse_syntax_ext_naked(lo: uint) -> @expr { match self.token { token::IDENT(_, _) => (), _ => self.fatal(~"expected a syntax expander name") } let pth = self.parse_path_without_tps(); //temporary for a backwards-compatible cycle: let sep = seq_sep_trailing_disallowed(token::COMMA); let mut e = none; if (self.token == token::LPAREN || self.token == token::LBRACKET) { let lo = self.span.lo; let es = if self.token == token::LPAREN { self.parse_unspanned_seq(token::LPAREN, token::RPAREN, sep, |p| p.parse_expr()) } else { self.parse_unspanned_seq(token::LBRACKET, token::RBRACKET, sep, |p| p.parse_expr()) }; let hi = self.span.hi; e = some(self.mk_expr(lo, hi, expr_vec(es, m_imm))); } let mut b = none; if self.token == token::LBRACE { self.bump(); let lo = self.span.lo; let mut depth = 1u; while (depth > 0u) { match (self.token) { token::LBRACE => depth += 1u, token::RBRACE => depth -= 1u, token::EOF => self.fatal(~"unexpected EOF in macro body"), _ => () } self.bump(); } let hi = self.last_span.lo; b = some({span: mk_sp(lo,hi)}); } return self.mk_mac_expr(lo, self.span.hi, mac_invoc(pth, e, b)); } fn parse_dot_or_call_expr() -> pexpr { let b = self.parse_bottom_expr(); self.parse_dot_or_call_expr_with(b) } fn permits_call() -> bool { return self.restriction != RESTRICT_NO_CALL_EXPRS; } fn parse_dot_or_call_expr_with(e0: pexpr) -> pexpr { let mut e = e0; let lo = e.span.lo; let mut hi; loop { // expr.f if self.eat(token::DOT) { match copy self.token { token::IDENT(i, _) => { hi = self.span.hi; self.bump(); let tys = if self.eat(token::MOD_SEP) { self.expect(token::LT); self.parse_seq_to_gt(some(token::COMMA), |p| p.parse_ty(false)) } else { ~[] }; e = self.mk_pexpr(lo, hi, expr_field(self.to_expr(e), self.get_str(i), tys)); } _ => self.unexpected() } again; } if self.expr_is_complete(e) { break; } match copy self.token { // expr(...) token::LPAREN if self.permits_call() => { let es = self.parse_unspanned_seq( token::LPAREN, token::RPAREN, seq_sep_trailing_disallowed(token::COMMA), |p| p.parse_expr()); hi = self.span.hi; let nd = expr_call(self.to_expr(e), es, false); e = self.mk_pexpr(lo, hi, nd); } // expr[...] token::LBRACKET => { self.bump(); let ix = self.parse_expr(); hi = ix.span.hi; self.expect(token::RBRACKET); e = self.mk_pexpr(lo, hi, expr_index(self.to_expr(e), ix)); } _ => return e } } return e; } fn parse_sep_and_zerok() -> (option<token::token>, bool) { if self.token == token::BINOP(token::STAR) || self.token == token::BINOP(token::PLUS) { let zerok = self.token == token::BINOP(token::STAR); self.bump(); return (none, zerok); } else { let sep = self.token; self.bump(); if self.token == token::BINOP(token::STAR) || self.token == token::BINOP(token::PLUS) { let zerok = self.token == token::BINOP(token::STAR); self.bump(); return (some(sep), zerok); } else { self.fatal(~"expected `*` or `+`"); } } } fn parse_token_tree() -> token_tree { maybe_whole!{deref self, nt_tt}; fn parse_tt_tok(p: parser, delim_ok: bool) -> token_tree { match p.token { token::RPAREN | token::RBRACE | token::RBRACKET if !delim_ok => { p.fatal(~"incorrect close delimiter: `" + token_to_str(p.reader, p.token) + ~"`"); } token::EOF => { p.fatal(~"file ended in the middle of a macro invocation"); } /* we ought to allow different depths of unquotation */ token::DOLLAR if p.quote_depth > 0u => { p.bump(); let sp = p.span; if p.token == token::LPAREN { let seq = p.parse_seq(token::LPAREN, token::RPAREN, seq_sep_none(), |p| p.parse_token_tree()); let (s, z) = p.parse_sep_and_zerok(); return tt_seq(mk_sp(sp.lo ,p.span.hi), seq.node, s, z); } else { return tt_nonterminal(sp, p.parse_ident()); } } _ => { /* ok */ } } let res = tt_tok(p.span, p.token); p.bump(); return res; } return match self.token { token::LPAREN | token::LBRACE | token::LBRACKET => { let ket = token::flip_delimiter(self.token); tt_delim(vec::append( ~[parse_tt_tok(self, true)], vec::append( self.parse_seq_to_before_end( ket, seq_sep_none(), |p| p.parse_token_tree()), ~[parse_tt_tok(self, true)]))) } _ => parse_tt_tok(self, false) }; } fn parse_matchers() -> ~[matcher] { // unification of matchers and token_trees would vastly improve // the interpolation of matchers maybe_whole!{self, nt_matchers}; let name_idx = @mut 0u; return match self.token { token::LBRACE | token::LPAREN | token::LBRACKET => { self.parse_matcher_subseq(name_idx, copy self.token, token::flip_delimiter(self.token)) } _ => self.fatal(~"expected open delimiter") } } // This goofy function is necessary to correctly match parens in matchers. // Otherwise, `$( ( )` would be a valid matcher, and `$( () )` would be // invalid. It's similar to common::parse_seq. fn parse_matcher_subseq(name_idx: @mut uint, bra: token::token, ket: token::token) -> ~[matcher] { let mut ret_val = ~[]; let mut lparens = 0u; self.expect(bra); while self.token != ket || lparens > 0u { if self.token == token::LPAREN { lparens += 1u; } if self.token == token::RPAREN { lparens -= 1u; } vec::push(ret_val, self.parse_matcher(name_idx)); } self.bump(); return ret_val; } fn parse_matcher(name_idx: @mut uint) -> matcher { let lo = self.span.lo; let m = if self.token == token::DOLLAR { self.bump(); if self.token == token::LPAREN { let name_idx_lo = *name_idx; let ms = self.parse_matcher_subseq(name_idx, token::LPAREN, token::RPAREN); if ms.len() == 0u { self.fatal(~"repetition body must be nonempty"); } let (sep, zerok) = self.parse_sep_and_zerok(); match_seq(ms, sep, zerok, name_idx_lo, *name_idx) } else { let bound_to = self.parse_ident(); self.expect(token::COLON); let nt_name = self.parse_ident(); let m = match_nonterminal(bound_to, nt_name, *name_idx); *name_idx += 1u; m } } else { let m = match_tok(self.token); self.bump(); m }; return spanned(lo, self.span.hi, m); } fn parse_prefix_expr() -> pexpr { let lo = self.span.lo; let mut hi; let mut ex; match copy self.token { token::NOT => { self.bump(); let e = self.to_expr(self.parse_prefix_expr()); hi = e.span.hi; self.get_id(); // see ast_util::op_expr_callee_id ex = expr_unary(not, e); } token::BINOP(b) => { match b { token::MINUS => { self.bump(); let e = self.to_expr(self.parse_prefix_expr()); hi = e.span.hi; self.get_id(); // see ast_util::op_expr_callee_id ex = expr_unary(neg, e); } token::STAR => { self.bump(); let e = self.to_expr(self.parse_prefix_expr()); hi = e.span.hi; ex = expr_unary(deref, e); } token::AND => { self.bump(); let m = self.parse_mutability(); let e = self.to_expr(self.parse_prefix_expr()); hi = e.span.hi; // HACK: turn &[...] into a &-evec ex = match e.node { expr_vec(*) | expr_lit(@{node: lit_str(_), span: _}) if m == m_imm => { expr_vstore(e, vstore_slice(self.region_from_name(none))) } _ => expr_addr_of(m, e) }; } _ => return self.parse_dot_or_call_expr() } } token::AT => { self.bump(); let m = self.parse_mutability(); let e = self.to_expr(self.parse_prefix_expr()); hi = e.span.hi; // HACK: turn @[...] into a @-evec ex = match e.node { expr_vec(*) | expr_lit(@{node: lit_str(_), span: _}) if m == m_imm => expr_vstore(e, vstore_box), _ => expr_unary(box(m), e) }; } token::TILDE => { self.bump(); let m = self.parse_mutability(); let e = self.to_expr(self.parse_prefix_expr()); hi = e.span.hi; // HACK: turn ~[...] into a ~-evec ex = match e.node { expr_vec(*) | expr_lit(@{node: lit_str(_), span: _}) if m == m_imm => expr_vstore(e, vstore_uniq), _ => expr_unary(uniq(m), e) }; } _ => return self.parse_dot_or_call_expr() } return self.mk_pexpr(lo, hi, ex); } fn parse_binops() -> @expr { return self.parse_more_binops(self.parse_prefix_expr(), 0u); } fn parse_more_binops(plhs: pexpr, min_prec: uint) -> @expr { let lhs = self.to_expr(plhs); if self.expr_is_complete(plhs) { return lhs; } let peeked = self.token; if peeked == token::BINOP(token::OR) && (self.restriction == RESTRICT_NO_BAR_OP || self.restriction == RESTRICT_NO_BAR_OR_DOUBLEBAR_OP) { return lhs; } if peeked == token::OROR && self.restriction == RESTRICT_NO_BAR_OR_DOUBLEBAR_OP { return lhs; } let cur_opt = token_to_binop(peeked); match cur_opt { some(cur_op) => { let cur_prec = operator_prec(cur_op); if cur_prec > min_prec { self.bump(); let expr = self.parse_prefix_expr(); let rhs = self.parse_more_binops(expr, cur_prec); self.get_id(); // see ast_util::op_expr_callee_id let bin = self.mk_pexpr(lhs.span.lo, rhs.span.hi, expr_binary(cur_op, lhs, rhs)); return self.parse_more_binops(bin, min_prec); } } _ => () } if as_prec > min_prec && self.eat_keyword(~"as") { let rhs = self.parse_ty(true); let _as = self.mk_pexpr(lhs.span.lo, rhs.span.hi, expr_cast(lhs, rhs)); return self.parse_more_binops(_as, min_prec); } return lhs; } fn parse_assign_expr() -> @expr { let lo = self.span.lo; let lhs = self.parse_binops(); match copy self.token { token::EQ => { self.bump(); let rhs = self.parse_expr(); return self.mk_expr(lo, rhs.span.hi, expr_assign(lhs, rhs)); } token::BINOPEQ(op) => { self.bump(); let rhs = self.parse_expr(); let mut aop; match op { token::PLUS => aop = add, token::MINUS => aop = subtract, token::STAR => aop = mul, token::SLASH => aop = div, token::PERCENT => aop = rem, token::CARET => aop = bitxor, token::AND => aop = bitand, token::OR => aop = bitor, token::SHL => aop = shl, token::SHR => aop = shr } self.get_id(); // see ast_util::op_expr_callee_id return self.mk_expr(lo, rhs.span.hi, expr_assign_op(aop, lhs, rhs)); } token::LARROW => { self.bump(); let rhs = self.parse_expr(); return self.mk_expr(lo, rhs.span.hi, expr_move(lhs, rhs)); } token::DARROW => { self.bump(); let rhs = self.parse_expr(); return self.mk_expr(lo, rhs.span.hi, expr_swap(lhs, rhs)); } _ => {/* fall through */ } } return lhs; } fn parse_if_expr() -> @expr { let lo = self.last_span.lo; let cond = self.parse_expr(); let thn = self.parse_block(); let mut els: option<@expr> = none; let mut hi = thn.span.hi; if self.eat_keyword(~"else") { let elexpr = self.parse_else_expr(); els = some(elexpr); hi = elexpr.span.hi; } let q = {cond: cond, then: thn, els: els, lo: lo, hi: hi}; return self.mk_expr(q.lo, q.hi, expr_if(q.cond, q.then, q.els)); } fn parse_fn_expr(proto: proto) -> @expr { let lo = self.last_span.lo; // if we want to allow fn expression argument types to be inferred in // the future, just have to change parse_arg to parse_fn_block_arg. let (decl, capture_clause) = self.parse_fn_decl(impure_fn, |p| p.parse_arg_or_capture_item()); let body = self.parse_block(); return self.mk_expr(lo, body.span.hi, expr_fn(proto, decl, body, capture_clause)); } // `|args| { ... }` like in `do` expressions fn parse_lambda_block_expr() -> @expr { self.parse_lambda_expr_( || { match self.token { token::BINOP(token::OR) | token::OROR => { self.parse_fn_block_decl() } _ => { // No argument list - `do foo {` ({ { inputs: ~[], output: @{ id: self.get_id(), node: ty_infer, span: self.span }, purity: impure_fn, cf: return_val } }, @~[]) } } }, || { let blk = self.parse_block(); self.mk_expr(blk.span.lo, blk.span.hi, expr_block(blk)) }) } // `|args| expr` fn parse_lambda_expr() -> @expr { self.parse_lambda_expr_(|| self.parse_fn_block_decl(), || self.parse_expr()) } fn parse_lambda_expr_(parse_decl: fn&() -> (fn_decl, capture_clause), parse_body: fn&() -> @expr) -> @expr { let lo = self.last_span.lo; let (decl, captures) = parse_decl(); let body = parse_body(); let fakeblock = {view_items: ~[], stmts: ~[], expr: some(body), id: self.get_id(), rules: default_blk}; let fakeblock = spanned(body.span.lo, body.span.hi, fakeblock); return self.mk_expr(lo, body.span.hi, expr_fn_block(decl, fakeblock, captures)); } fn parse_else_expr() -> @expr { if self.eat_keyword(~"if") { return self.parse_if_expr(); } else { let blk = self.parse_block(); return self.mk_expr(blk.span.lo, blk.span.hi, expr_block(blk)); } } fn parse_sugary_call_expr(keyword: ~str, ctor: fn(+@expr) -> expr_) -> @expr { let lo = self.last_span; // Parse the callee `foo` in // for foo || { // for foo.bar || { // etc, or the portion of the call expression before the lambda in // for foo() || { // or // for foo.bar(a) || { // Turn on the restriction to stop at | or || so we can parse // them as the lambda arguments let e = self.parse_expr_res(RESTRICT_NO_BAR_OR_DOUBLEBAR_OP); match e.node { expr_call(f, args, false) => { let block = self.parse_lambda_block_expr(); let last_arg = self.mk_expr(block.span.lo, block.span.hi, ctor(block)); let args = vec::append(args, ~[last_arg]); @{node: expr_call(f, args, true) with *e} } expr_path(*) | expr_field(*) | expr_call(*) => { let block = self.parse_lambda_block_expr(); let last_arg = self.mk_expr(block.span.lo, block.span.hi, ctor(block)); self.mk_expr(lo.lo, last_arg.span.hi, expr_call(e, ~[last_arg], true)) } _ => { // There may be other types of expressions that can // represent the callee in `for` and `do` expressions // but they aren't represented by tests debug!{"sugary call on %?", e.node}; self.span_fatal( lo, fmt!{"`%s` must be followed by a block call", keyword}); } } } fn parse_while_expr() -> @expr { let lo = self.last_span.lo; let cond = self.parse_expr(); let body = self.parse_block_no_value(); let mut hi = body.span.hi; return self.mk_expr(lo, hi, expr_while(cond, body)); } fn parse_loop_expr() -> @expr { let opt_ident; if is_ident(self.token) && !self.is_any_keyword(copy self.token) { opt_ident = some(self.parse_ident()); self.expect(token::COLON); } else { opt_ident = none; } let lo = self.last_span.lo; let body = self.parse_block_no_value(); let mut hi = body.span.hi; return self.mk_expr(lo, hi, expr_loop(body, opt_ident)); } // For distingishing between record literals and blocks fn looking_at_record_literal() -> bool { let lookahead = self.look_ahead(1); self.token == token::LBRACE && (self.token_is_keyword(~"mut", lookahead) || (is_plain_ident(lookahead) && self.look_ahead(2) == token::COLON)) } fn parse_record_literal() -> expr_ { self.expect(token::LBRACE); let mut fields = ~[self.parse_field(token::COLON)]; let mut base = none; while self.token != token::RBRACE { if self.token == token::COMMA && self.look_ahead(1) == token::DOTDOT { self.bump(); self.bump(); base = some(self.parse_expr()); break; } // XXX: Remove "with" after all code is converted over and there's // a snapshot. // optional comma before "with" if self.token == token::COMMA && self.token_is_keyword(~"with", self.look_ahead(1u)) { self.bump(); } if self.eat_keyword(~"with") { base = some(self.parse_expr()); break; } self.expect(token::COMMA); if self.token == token::RBRACE { // record ends by an optional trailing comma break; } vec::push(fields, self.parse_field(token::COLON)); } self.expect(token::RBRACE); return expr_rec(fields, base); } fn parse_alt_expr() -> @expr { let lo = self.last_span.lo; let mode = if self.eat_keyword(~"check") { alt_check } else { alt_exhaustive }; let discriminant = self.parse_expr(); self.expect(token::LBRACE); let mut arms: ~[arm] = ~[]; while self.token != token::RBRACE { let pats = self.parse_pats(); let mut guard = none; if self.eat_keyword(~"if") { guard = some(self.parse_expr()); } self.expect(token::FAT_ARROW); let expr = self.parse_expr_res(RESTRICT_STMT_EXPR); let require_comma = !classify::expr_is_simple_block(expr) && self.token != token::RBRACE; if require_comma { self.expect(token::COMMA); } else { self.eat(token::COMMA); } let blk = {node: {view_items: ~[], stmts: ~[], expr: some(expr), id: self.get_id(), rules: default_blk}, span: expr.span}; vec::push(arms, {pats: pats, guard: guard, body: blk}); } let mut hi = self.span.hi; self.bump(); return self.mk_expr(lo, hi, expr_match(discriminant, arms, mode)); } fn parse_expr() -> @expr { return self.parse_expr_res(UNRESTRICTED); } fn parse_expr_res(r: restriction) -> @expr { let old = self.restriction; self.restriction = r; let e = self.parse_assign_expr(); self.restriction = old; return e; } fn parse_initializer() -> option<initializer> { match self.token { token::EQ => { self.bump(); return some({op: init_assign, expr: self.parse_expr()}); } token::LARROW => { self.bump(); return some({op: init_move, expr: self.parse_expr()}); } // Now that the the channel is the first argument to receive, // combining it with an initializer doesn't really make sense. // case (token::RECV) { // self.bump(); // return some(rec(op = init_recv, // expr = self.parse_expr())); // } _ => { return none; } } } fn parse_pats() -> ~[@pat] { let mut pats = ~[]; loop { vec::push(pats, self.parse_pat(true)); if self.token == token::BINOP(token::OR) { self.bump(); } else { return pats; } }; } fn parse_pat_fields(refutable: bool) -> (~[ast::field_pat], bool) { let mut fields = ~[]; let mut etc = false; let mut first = true; while self.token != token::RBRACE { if first { first = false; } else { self.expect(token::COMMA); } if self.token == token::UNDERSCORE { self.bump(); if self.token != token::RBRACE { self.fatal(~"expected `}`, found `" + token_to_str(self.reader, self.token) + ~"`"); } etc = true; break; } let lo1 = self.last_span.lo; let fieldname = if self.look_ahead(1u) == token::COLON { self.parse_ident() } else { self.parse_value_ident() }; let hi1 = self.last_span.lo; let fieldpath = ast_util::ident_to_path(mk_sp(lo1, hi1), fieldname); let mut subpat; if self.token == token::COLON { self.bump(); subpat = self.parse_pat(refutable); } else { subpat = @{ id: self.get_id(), node: pat_ident(bind_by_implicit_ref, fieldpath, none), span: self.last_span }; } vec::push(fields, {ident: fieldname, pat: subpat}); } return (fields, etc); } fn parse_pat(refutable: bool) -> @pat { maybe_whole!{self, nt_pat}; let lo = self.span.lo; let mut hi = self.span.hi; let mut pat; match self.token { token::UNDERSCORE => { self.bump(); pat = pat_wild; } token::AT => { self.bump(); let sub = self.parse_pat(refutable); hi = sub.span.hi; // HACK: parse @"..." as a literal of a vstore @str pat = match sub.node { pat_lit(e@@{ node: expr_lit(@{node: lit_str(_), span: _}), _ }) => { let vst = @{id: self.get_id(), callee_id: self.get_id(), node: expr_vstore(e, vstore_box), span: mk_sp(lo, hi)}; pat_lit(vst) } _ => pat_box(sub) }; } token::TILDE => { self.bump(); let sub = self.parse_pat(refutable); hi = sub.span.hi; // HACK: parse ~"..." as a literal of a vstore ~str pat = match sub.node { pat_lit(e@@{ node: expr_lit(@{node: lit_str(_), span: _}), _ }) => { let vst = @{id: self.get_id(), callee_id: self.get_id(), node: expr_vstore(e, vstore_uniq), span: mk_sp(lo, hi)}; pat_lit(vst) } _ => pat_uniq(sub) }; } token::LBRACE => { self.bump(); let (fields, etc) = self.parse_pat_fields(refutable); hi = self.span.hi; self.bump(); pat = pat_rec(fields, etc); } token::LPAREN => { self.bump(); if self.token == token::RPAREN { hi = self.span.hi; self.bump(); let lit = @{node: lit_nil, span: mk_sp(lo, hi)}; let expr = self.mk_expr(lo, hi, expr_lit(lit)); pat = pat_lit(expr); } else { let mut fields = ~[self.parse_pat(refutable)]; while self.token == token::COMMA { self.bump(); vec::push(fields, self.parse_pat(refutable)); } if vec::len(fields) == 1u { self.expect(token::COMMA); } hi = self.span.hi; self.expect(token::RPAREN); pat = pat_tup(fields); } } tok => { if !is_ident_or_path(tok) || self.is_keyword(~"true") || self.is_keyword(~"false") { let val = self.parse_expr_res(RESTRICT_NO_BAR_OP); if self.eat_keyword(~"to") || self.eat(token::DOTDOT) { let end = self.parse_expr_res(RESTRICT_NO_BAR_OP); pat = pat_range(val, end); } else { pat = pat_lit(val); } } else if self.eat_keyword(~"ref") { let mutbl = self.parse_mutability(); pat = self.parse_pat_ident(refutable, bind_by_ref(mutbl)); } else if self.eat_keyword(~"copy") { pat = self.parse_pat_ident(refutable, bind_by_value); } else if !is_plain_ident(self.token) { pat = self.parse_enum_variant(refutable); } else { let binding_mode; if self.eat_keyword(~"copy") { binding_mode = bind_by_value; } else if refutable { // XXX: Should be bind_by_value, but that's not // backward compatible. binding_mode = bind_by_implicit_ref; } else { binding_mode = bind_by_value; } let cannot_be_enum_or_struct; match self.look_ahead(1) { token::LPAREN | token::LBRACKET | token::LT | token::LBRACE => cannot_be_enum_or_struct = false, _ => cannot_be_enum_or_struct = true } if is_plain_ident(self.token) && cannot_be_enum_or_struct { let name = self.parse_value_path(); let sub; if self.eat(token::AT) { sub = some(self.parse_pat(refutable)); } else { sub = none; }; pat = pat_ident(binding_mode, name, sub); } else { let enum_path = self.parse_path_with_tps(true); match self.token { token::LBRACE => { self.bump(); let (fields, etc) = self.parse_pat_fields(refutable); self.bump(); pat = pat_struct(enum_path, fields, etc); } _ => { let mut args: ~[@pat] = ~[]; let mut star_pat = false; match self.token { token::LPAREN => match self.look_ahead(1u) { token::BINOP(token::STAR) => { // This is a "top constructor only" pat self.bump(); self.bump(); star_pat = true; self.expect(token::RPAREN); } _ => { args = self.parse_unspanned_seq( token::LPAREN, token::RPAREN, seq_sep_trailing_disallowed (token::COMMA), |p| p.parse_pat(refutable)); } }, _ => () } // at this point, we're not sure whether it's a // enum or a bind if star_pat { pat = pat_enum(enum_path, none); } else if vec::is_empty(args) && vec::len(enum_path.idents) == 1u { pat = pat_ident(binding_mode, enum_path, none); } else { pat = pat_enum(enum_path, some(args)); } } } } } hi = self.span.hi; } } return @{id: self.get_id(), node: pat, span: mk_sp(lo, hi)}; } fn parse_pat_ident(refutable: bool, binding_mode: ast::binding_mode) -> ast::pat_ { if !is_plain_ident(self.token) { self.span_fatal( copy self.last_span, ~"expected identifier, found path"); } let name = self.parse_value_path(); let sub = if self.eat(token::AT) { some(self.parse_pat(refutable)) } else { none }; // just to be friendly, if they write something like // ref some(i) // we end up here with ( as the current token. This shortly // leads to a parse error. Note that if there is no explicit // binding mode then we do not end up here, because the lookahead // will direct us over to parse_enum_variant() if self.token == token::LPAREN { self.span_fatal( copy self.last_span, ~"expected identifier, found enum pattern"); } pat_ident(binding_mode, name, sub) } fn parse_enum_variant(refutable: bool) -> ast::pat_ { let enum_path = self.parse_path_with_tps(true); match self.token { token::LPAREN => { match self.look_ahead(1u) { token::BINOP(token::STAR) => { // foo(*) self.expect(token::LPAREN); self.expect(token::BINOP(token::STAR)); self.expect(token::RPAREN); pat_enum(enum_path, none) } _ => { // foo(a, ..., z) let args = self.parse_unspanned_seq( token::LPAREN, token::RPAREN, seq_sep_trailing_disallowed(token::COMMA), |p| p.parse_pat(refutable)); pat_enum(enum_path, some(args)) } } } _ => { // option::none pat_enum(enum_path, some(~[])) } } } fn parse_local(is_mutbl: bool, allow_init: bool) -> @local { let lo = self.span.lo; let pat = self.parse_pat(false); let mut ty = @{id: self.get_id(), node: ty_infer, span: mk_sp(lo, lo)}; if self.eat(token::COLON) { ty = self.parse_ty(false); } let init = if allow_init { self.parse_initializer() } else { none }; return @spanned(lo, self.last_span.hi, {is_mutbl: is_mutbl, ty: ty, pat: pat, init: init, id: self.get_id()}); } fn parse_let() -> @decl { let is_mutbl = self.eat_keyword(~"mut"); let lo = self.span.lo; let mut locals = ~[self.parse_local(is_mutbl, true)]; while self.eat(token::COMMA) { vec::push(locals, self.parse_local(is_mutbl, true)); } return @spanned(lo, self.last_span.hi, decl_local(locals)); } /* assumes "let" token has already been consumed */ fn parse_instance_var(pr: visibility) -> @class_member { let mut is_mutbl = class_immutable; let lo = self.span.lo; if self.eat_keyword(~"mut") { is_mutbl = class_mutable; } if !is_plain_ident(self.token) { self.fatal(~"expected ident"); } let name = self.parse_ident(); self.expect(token::COLON); let ty = self.parse_ty(false); return @field_member(@spanned(lo, self.last_span.hi, { kind: named_field(name, is_mutbl, pr), id: self.get_id(), ty: ty })); } fn parse_stmt(+first_item_attrs: ~[attribute]) -> @stmt { maybe_whole!{self, nt_stmt}; fn check_expected_item(p: parser, current_attrs: ~[attribute]) { // If we have attributes then we should have an item if vec::is_not_empty(current_attrs) { p.fatal(~"expected item"); } } let lo = self.span.lo; if self.is_keyword(~"let") { check_expected_item(self, first_item_attrs); self.expect_keyword(~"let"); let decl = self.parse_let(); return @spanned(lo, decl.span.hi, stmt_decl(decl, self.get_id())); } else { let mut item_attrs; match self.parse_outer_attrs_or_ext(first_item_attrs) { none => item_attrs = ~[], some(Left(attrs)) => item_attrs = attrs, some(Right(ext)) => { return @spanned(lo, ext.span.hi, stmt_expr(ext, self.get_id())); } } let item_attrs = vec::append(first_item_attrs, item_attrs); match self.parse_item_or_view_item(item_attrs, true) { iovi_item(i) => { let mut hi = i.span.hi; let decl = @spanned(lo, hi, decl_item(i)); return @spanned(lo, hi, stmt_decl(decl, self.get_id())); } iovi_view_item(vi) => { self.span_fatal(vi.span, ~"view items must be declared at \ the top of the block"); } iovi_none() => { /* fallthrough */ } } check_expected_item(self, item_attrs); // Remainder are line-expr stmts. let e = self.parse_expr_res(RESTRICT_STMT_EXPR); return @spanned(lo, e.span.hi, stmt_expr(e, self.get_id())); } } fn expr_is_complete(e: pexpr) -> bool { log(debug, (~"expr_is_complete", self.restriction, print::pprust::expr_to_str(*e), classify::expr_requires_semi_to_be_stmt(*e))); return self.restriction == RESTRICT_STMT_EXPR && !classify::expr_requires_semi_to_be_stmt(*e); } fn parse_block() -> blk { let (attrs, blk) = self.parse_inner_attrs_and_block(false); assert vec::is_empty(attrs); return blk; } fn parse_inner_attrs_and_block(parse_attrs: bool) -> (~[attribute], blk) { maybe_whole!{pair_empty self, nt_block}; fn maybe_parse_inner_attrs_and_next(p: parser, parse_attrs: bool) -> {inner: ~[attribute], next: ~[attribute]} { if parse_attrs { p.parse_inner_attrs_and_next() } else { {inner: ~[], next: ~[]} } } let lo = self.span.lo; if self.eat_keyword(~"unchecked") { self.expect(token::LBRACE); let {inner, next} = maybe_parse_inner_attrs_and_next(self, parse_attrs); return (inner, self.parse_block_tail_(lo, unchecked_blk, next)); } else if self.eat_keyword(~"unsafe") { self.expect(token::LBRACE); let {inner, next} = maybe_parse_inner_attrs_and_next(self, parse_attrs); return (inner, self.parse_block_tail_(lo, unsafe_blk, next)); } else { self.expect(token::LBRACE); let {inner, next} = maybe_parse_inner_attrs_and_next(self, parse_attrs); return (inner, self.parse_block_tail_(lo, default_blk, next)); } } fn parse_block_no_value() -> blk { // We parse blocks that cannot have a value the same as any other // block; the type checker will make sure that the tail expression (if // any) has unit type. return self.parse_block(); } // Precondition: already parsed the '{' or '#{' // I guess that also means "already parsed the 'impure'" if // necessary, and this should take a qualifier. // some blocks start with "#{"... fn parse_block_tail(lo: uint, s: blk_check_mode) -> blk { self.parse_block_tail_(lo, s, ~[]) } fn parse_block_tail_(lo: uint, s: blk_check_mode, +first_item_attrs: ~[attribute]) -> blk { let mut stmts = ~[]; let mut expr = none; let {attrs_remaining, view_items, items: items} = self.parse_items_and_view_items(first_item_attrs, IMPORTS_AND_ITEMS_ALLOWED); for items.each |item| { let decl = @spanned(item.span.lo, item.span.hi, decl_item(item)); push(stmts, @spanned(item.span.lo, item.span.hi, stmt_decl(decl, self.get_id()))); } let mut initial_attrs = attrs_remaining; if self.token == token::RBRACE && !vec::is_empty(initial_attrs) { self.fatal(~"expected item"); } while self.token != token::RBRACE { match self.token { token::SEMI => { self.bump(); // empty } _ => { let stmt = self.parse_stmt(initial_attrs); initial_attrs = ~[]; match stmt.node { stmt_expr(e, stmt_id) => { // Expression without semicolon: match self.token { token::SEMI => { self.bump(); push(stmts, @{node: stmt_semi(e, stmt_id) with *stmt}); } token::RBRACE => { expr = some(e); } t => { if classify::stmt_ends_with_semi(*stmt) { self.fatal(~"expected `;` or `}` after \ expression but found `" + token_to_str(self.reader, t) + ~"`"); } vec::push(stmts, stmt); } } } _ => { // All other kinds of statements: vec::push(stmts, stmt); if classify::stmt_ends_with_semi(*stmt) { self.expect(token::SEMI); } } } } } } let mut hi = self.span.hi; self.bump(); let bloc = {view_items: view_items, stmts: stmts, expr: expr, id: self.get_id(), rules: s}; return spanned(lo, hi, bloc); } fn parse_optional_ty_param_bounds() -> @~[ty_param_bound] { let mut bounds = ~[]; if self.eat(token::COLON) { while is_ident(self.token) { if self.eat_keyword(~"send") { push(bounds, bound_send); } else if self.eat_keyword(~"copy") { push(bounds, bound_copy) } else if self.eat_keyword(~"const") { push(bounds, bound_const); } else if self.eat_keyword(~"owned") { push(bounds, bound_owned); } else { push(bounds, bound_trait(self.parse_ty(false))); } } } return @move bounds; } fn parse_ty_param() -> ty_param { let ident = self.parse_ident(); let bounds = self.parse_optional_ty_param_bounds(); return {ident: ident, id: self.get_id(), bounds: bounds}; } fn parse_ty_params() -> ~[ty_param] { if self.eat(token::LT) { self.parse_seq_to_gt(some(token::COMMA), |p| p.parse_ty_param()) } else { ~[] } } fn parse_fn_decl(purity: purity, parse_arg_fn: fn(parser) -> arg_or_capture_item) -> (fn_decl, capture_clause) { let args_or_capture_items: ~[arg_or_capture_item] = self.parse_unspanned_seq( token::LPAREN, token::RPAREN, seq_sep_trailing_disallowed(token::COMMA), parse_arg_fn); let inputs = either::lefts(args_or_capture_items); let capture_clause = @either::rights(args_or_capture_items); let (ret_style, ret_ty) = self.parse_ret_ty(); return ({inputs: inputs, output: ret_ty, purity: purity, cf: ret_style}, capture_clause); } fn is_self_ident() -> bool { match self.token { token::IDENT(sid, false) if ~"self" == *self.get_str(sid) => true, _ => false } } fn expect_self_ident() { if !self.is_self_ident() { self.fatal(#fmt("expected `self` but found `%s`", token_to_str(self.reader, self.token))); } self.bump(); } fn parse_fn_decl_with_self(purity: purity, parse_arg_fn: fn(parser) -> arg_or_capture_item) -> (self_ty, fn_decl, capture_clause) { self.expect(token::LPAREN); // A bit of complexity and lookahead is needed here in order to to be // backwards compatible. let lo = self.span.lo; let self_ty; match copy self.token { token::BINOP(token::AND) => { // We need to make sure it isn't a mode. if self.token_is_keyword(~"self", self.look_ahead(1)) || ((self.token_is_keyword(~"const", self.look_ahead(1)) || self.token_is_keyword(~"mut", self.look_ahead(1))) && self.token_is_keyword(~"self", self.look_ahead(2))) { self.bump(); let mutability = self.parse_mutability(); self.expect_self_ident(); self_ty = sty_region(mutability); } else { self_ty = sty_by_ref; } } token::AT => { self.bump(); let mutability = self.parse_mutability(); self.expect_self_ident(); self_ty = sty_box(mutability); } token::TILDE => { self.bump(); let mutability = self.parse_mutability(); self.expect_self_ident(); self_ty = sty_uniq(mutability); } token::IDENT(*) if self.is_self_ident() => { self.bump(); self_ty = sty_value; } _ => { self_ty = sty_by_ref; } } // If we parsed a self type, expect a comma before the argument list. let args_or_capture_items; if self_ty != sty_by_ref { match copy self.token { token::COMMA => { self.bump(); let sep = seq_sep_trailing_disallowed(token::COMMA); args_or_capture_items = self.parse_seq_to_before_end(token::RPAREN, sep, parse_arg_fn); } token::RPAREN => { args_or_capture_items = ~[]; } _ => { self.fatal(~"expected `,` or `)`, found `" + token_to_str(self.reader, self.token) + ~"`"); } } } else { let sep = seq_sep_trailing_disallowed(token::COMMA); args_or_capture_items = self.parse_seq_to_before_end(token::RPAREN, sep, parse_arg_fn); } self.expect(token::RPAREN); let hi = self.span.hi; let inputs = either::lefts(args_or_capture_items); let capture_clause = @either::rights(args_or_capture_items); let (ret_style, ret_ty) = self.parse_ret_ty(); let fn_decl = { inputs: inputs, output: ret_ty, purity: purity, cf: ret_style }; (spanned(lo, hi, self_ty), fn_decl, capture_clause) } fn parse_fn_block_decl() -> (fn_decl, capture_clause) { let inputs_captures = { if self.eat(token::OROR) { ~[] } else { self.parse_unspanned_seq( token::BINOP(token::OR), token::BINOP(token::OR), seq_sep_trailing_disallowed(token::COMMA), |p| p.parse_fn_block_arg()) } }; let output = if self.eat(token::RARROW) { self.parse_ty(false) } else { @{id: self.get_id(), node: ty_infer, span: self.span} }; return ({inputs: either::lefts(inputs_captures), output: output, purity: impure_fn, cf: return_val}, @either::rights(inputs_captures)); } fn parse_fn_header() -> {ident: ident, tps: ~[ty_param]} { let id = self.parse_value_ident(); let ty_params = self.parse_ty_params(); return {ident: id, tps: ty_params}; } fn mk_item(lo: uint, hi: uint, +ident: ident, +node: item_, vis: visibility, +attrs: ~[attribute]) -> @item { return @{ident: ident, attrs: attrs, id: self.get_id(), node: node, vis: vis, span: mk_sp(lo, hi)}; } fn parse_item_fn(purity: purity) -> item_info { let t = self.parse_fn_header(); let (decl, _) = self.parse_fn_decl(purity, |p| p.parse_arg()); let (inner_attrs, body) = self.parse_inner_attrs_and_block(true); (t.ident, item_fn(decl, t.tps, body), some(inner_attrs)) } fn parse_method_name() -> ident { self.parse_value_ident() } fn parse_method(pr: visibility) -> @method { let attrs = self.parse_outer_attributes(); let lo = self.span.lo; let is_static = self.parse_staticness(); let static_sty = spanned(lo, self.span.hi, sty_static); let pur = self.parse_fn_purity(); let ident = self.parse_method_name(); let tps = self.parse_ty_params(); let (self_ty, decl, _) = do self.parse_fn_decl_with_self(pur) |p| { p.parse_arg() }; // XXX: interaction between staticness, self_ty is broken now let self_ty = if is_static { static_sty} else { self_ty }; let (inner_attrs, body) = self.parse_inner_attrs_and_block(true); let attrs = vec::append(attrs, inner_attrs); @{ident: ident, attrs: attrs, tps: tps, self_ty: self_ty, decl: decl, body: body, id: self.get_id(), span: mk_sp(lo, body.span.hi), self_id: self.get_id(), vis: pr} } fn parse_item_trait() -> item_info { let ident = self.parse_ident(); self.parse_region_param(); let tps = self.parse_ty_params(); // Parse traits, if necessary. let traits; if self.token == token::COLON { self.bump(); traits = self.parse_trait_ref_list(token::LBRACE); } else { traits = ~[]; } let meths = self.parse_trait_methods(); (ident, item_trait(tps, traits, meths), none) } // Parses four variants (with the region/type params always optional): // impl<T> ~[T] : to_str { ... } fn parse_item_impl() -> item_info { fn wrap_path(p: parser, pt: @path) -> @ty { @{id: p.get_id(), node: ty_path(pt, p.get_id()), span: pt.span} } // We do two separate paths here: old-style impls and new-style impls. // First, parse type parameters if necessary. let mut tps; if self.token == token::LT { tps = self.parse_ty_params(); } else { tps = ~[]; } // This is a new-style impl declaration. let ident = @~"__extensions__"; // XXX: clownshoes // Parse the type. let ty = self.parse_ty(false); // Parse traits, if necessary. let traits = if self.token == token::COLON { self.bump(); self.parse_trait_ref_list(token::LBRACE) } else { ~[] }; let mut meths = ~[]; self.expect(token::LBRACE); while !self.eat(token::RBRACE) { vec::push(meths, self.parse_method(public)); } (ident, item_impl(tps, traits, ty, meths), none) } // Instantiates ident <i> with references to <typarams> as arguments. // Used to create a path that refers to a class which will be defined as // the return type of the ctor function. fn ident_to_path_tys(i: ident, typarams: ~[ty_param]) -> @path { let s = self.last_span; @{span: s, global: false, idents: ~[i], rp: none, types: vec::map(typarams, |tp| { @{id: self.get_id(), node: ty_path(ident_to_path(s, tp.ident), self.get_id()), span: s}}) } } fn parse_trait_ref() -> @trait_ref { @{path: self.parse_path_with_tps(false), ref_id: self.get_id(), impl_id: self.get_id()} } fn parse_trait_ref_list(ket: token::token) -> ~[@trait_ref] { self.parse_seq_to_before_end( ket, seq_sep_trailing_disallowed(token::COMMA), |p| p.parse_trait_ref()) } fn parse_item_class() -> item_info { let class_name = self.parse_value_ident(); self.parse_region_param(); let ty_params = self.parse_ty_params(); let class_path = self.ident_to_path_tys(class_name, ty_params); let traits : ~[@trait_ref] = if self.eat(token::COLON) { self.parse_trait_ref_list(token::LBRACE) } else { ~[] }; let mut fields: ~[@struct_field]; let mut methods: ~[@method] = ~[]; let mut the_ctor: option<(fn_decl, ~[attribute], blk, codemap::span)> = none; let mut the_dtor: option<(blk, ~[attribute], codemap::span)> = none; let ctor_id = self.get_id(); if self.eat(token::LBRACE) { // It's a record-like struct. fields = ~[]; while self.token != token::RBRACE { match self.parse_class_item(class_path) { ctor_decl(a_fn_decl, attrs, blk, s) => { match the_ctor { some((_, _, _, s_first)) => { self.span_note(s, #fmt("Duplicate constructor \ declaration for class %s", *class_name)); self.span_fatal(copy s_first, ~"First constructor \ declared here"); } none => { the_ctor = some((a_fn_decl, attrs, blk, s)); } } } dtor_decl(blk, attrs, s) => { match the_dtor { some((_, _, s_first)) => { self.span_note(s, #fmt("Duplicate destructor \ declaration for class %s", *class_name)); self.span_fatal(copy s_first, ~"First destructor \ declared here"); } none => { the_dtor = some((blk, attrs, s)); } } } members(mms) => { for mms.each |mm| { match mm { @field_member(struct_field) => vec::push(fields, struct_field), @method_member(the_method_member) => vec::push(methods, the_method_member) } } } } } self.bump(); } else if self.token == token::LPAREN { // It's a tuple-like struct. fields = do self.parse_unspanned_seq(token::LPAREN, token::RPAREN, seq_sep_trailing_allowed (token::COMMA)) |p| { let lo = p.span.lo; let struct_field_ = { kind: unnamed_field, id: self.get_id(), ty: p.parse_ty(false) }; @spanned(lo, p.span.hi, struct_field_) }; self.expect(token::SEMI); } else if self.eat(token::SEMI) { // It's a unit-like struct. fields = ~[]; } else { self.fatal(fmt!("expected `{`, `(`, or `;` after struct name \ but found `%s`", token_to_str(self.reader, self.token))); } let actual_dtor = do option::map(the_dtor) |dtor| { let (d_body, d_attrs, d_s) = dtor; {node: {id: self.get_id(), attrs: d_attrs, self_id: self.get_id(), body: d_body}, span: d_s}}; match the_ctor { some((ct_d, ct_attrs, ct_b, ct_s)) => { (class_name, item_class(@{ traits: traits, fields: move fields, methods: move methods, ctor: some({ node: {id: ctor_id, attrs: ct_attrs, self_id: self.get_id(), dec: ct_d, body: ct_b}, span: ct_s}), dtor: actual_dtor }, ty_params), none) } none => { (class_name, item_class(@{ traits: traits, fields: move fields, methods: move methods, ctor: none, dtor: actual_dtor }, ty_params), none) } } } fn token_is_pound_or_doc_comment(++tok: token::token) -> bool { match tok { token::POUND | token::DOC_COMMENT(_) => true, _ => false } } fn parse_single_class_item(vis: visibility) -> @class_member { if (self.eat_keyword(~"let") || self.token_is_keyword(~"mut", copy self.token) || !self.is_any_keyword(copy self.token)) && !self.token_is_pound_or_doc_comment(self.token) { let a_var = self.parse_instance_var(vis); self.expect(token::SEMI); return a_var; } else { let m = self.parse_method(vis); return @method_member(m); } } fn parse_ctor(attrs: ~[attribute], result_ty: ast::ty_) -> class_contents { let lo = self.last_span.lo; let (decl_, _) = self.parse_fn_decl(impure_fn, |p| p.parse_arg()); let decl = {output: @{id: self.get_id(), node: result_ty, span: decl_.output.span} with decl_}; let body = self.parse_block(); ctor_decl(decl, attrs, body, mk_sp(lo, self.last_span.hi)) } fn parse_dtor(attrs: ~[attribute]) -> class_contents { let lo = self.last_span.lo; let body = self.parse_block(); dtor_decl(body, attrs, mk_sp(lo, self.last_span.hi)) } fn parse_class_item(class_name_with_tps: @path) -> class_contents { if self.eat_keyword(~"priv") { // XXX: Remove after snapshot. match self.token { token::LBRACE => { self.bump(); let mut results = ~[]; while self.token != token::RBRACE { vec::push(results, self.parse_single_class_item(private)); } self.bump(); return members(results); } _ => return members(~[self.parse_single_class_item(private)]) } } if self.eat_keyword(~"pub") { return members(~[self.parse_single_class_item(public)]); } let attrs = self.parse_outer_attributes(); if self.eat_keyword(~"new") { // result type is always the type of the class return self.parse_ctor(attrs, ty_path(class_name_with_tps, self.get_id())); } else if self.eat_keyword(~"drop") { return self.parse_dtor(attrs); } else { return members(~[self.parse_single_class_item(inherited)]); } } fn parse_visibility() -> visibility { if self.eat_keyword(~"pub") { public } else if self.eat_keyword(~"priv") { private } else { inherited } } fn parse_staticness() -> bool { self.eat_keyword(~"static") } fn parse_mod_items(term: token::token, +first_item_attrs: ~[attribute]) -> _mod { // Shouldn't be any view items since we've already parsed an item attr let {attrs_remaining, view_items, items: starting_items} = self.parse_items_and_view_items(first_item_attrs, VIEW_ITEMS_AND_ITEMS_ALLOWED); let mut items: ~[@item] = move starting_items; let mut first = true; while self.token != term { let mut attrs = self.parse_outer_attributes(); if first { attrs = vec::append(attrs_remaining, attrs); first = false; } debug!("parse_mod_items: parse_item_or_view_item(attrs=%?)", attrs); match self.parse_item_or_view_item(attrs, true) { iovi_item(item) => vec::push(items, item), iovi_view_item(view_item) => { self.span_fatal(view_item.span, ~"view items must be \ declared at the top of the \ module"); } _ => { self.fatal(~"expected item but found `" + token_to_str(self.reader, self.token) + ~"`"); } } debug!{"parse_mod_items: attrs=%?", attrs}; } if first && attrs_remaining.len() > 0u { // We parsed attributes for the first item but didn't find it self.fatal(~"expected item"); } return {view_items: view_items, items: items}; } fn parse_item_const() -> item_info { let id = self.parse_value_ident(); self.expect(token::COLON); let ty = self.parse_ty(false); self.expect(token::EQ); let e = self.parse_expr(); self.expect(token::SEMI); (id, item_const(ty, e), none) } fn parse_item_mod() -> item_info { let id = self.parse_ident(); self.expect(token::LBRACE); let inner_attrs = self.parse_inner_attrs_and_next(); let m = self.parse_mod_items(token::RBRACE, inner_attrs.next); self.expect(token::RBRACE); (id, item_mod(m), some(inner_attrs.inner)) } fn parse_item_foreign_fn(+attrs: ~[attribute], purity: purity) -> @foreign_item { let lo = self.last_span.lo; let t = self.parse_fn_header(); let (decl, _) = self.parse_fn_decl(purity, |p| p.parse_arg()); let mut hi = self.span.hi; self.expect(token::SEMI); return @{ident: t.ident, attrs: attrs, node: foreign_item_fn(decl, t.tps), id: self.get_id(), span: mk_sp(lo, hi)}; } fn parse_fn_purity() -> purity { if self.eat_keyword(~"fn") { impure_fn } else if self.eat_keyword(~"pure") { self.expect_keyword(~"fn"); pure_fn } else if self.eat_keyword(~"unsafe") { self.expect_keyword(~"fn"); unsafe_fn } else { self.unexpected(); } } fn parse_foreign_item(+attrs: ~[attribute]) -> @foreign_item { self.parse_item_foreign_fn(attrs, self.parse_fn_purity()) } fn parse_foreign_mod_items(+first_item_attrs: ~[attribute]) -> foreign_mod { // Shouldn't be any view items since we've already parsed an item attr let {attrs_remaining, view_items, items: _} = self.parse_items_and_view_items(first_item_attrs, VIEW_ITEMS_ALLOWED); let mut items: ~[@foreign_item] = ~[]; let mut initial_attrs = attrs_remaining; while self.token != token::RBRACE { let attrs = vec::append(initial_attrs, self.parse_outer_attributes()); initial_attrs = ~[]; vec::push(items, self.parse_foreign_item(attrs)); } return {view_items: view_items, items: items}; } fn parse_item_foreign_mod(lo: uint, visibility: visibility, attrs: ~[attribute], items_allowed: bool) -> item_or_view_item { if self.is_keyword(~"mod") { self.expect_keyword(~"mod"); } else { self.expect_keyword(~"module"); } let ident = self.parse_ident(); // extern mod { ... } if items_allowed && self.eat(token::LBRACE) { let extra_attrs = self.parse_inner_attrs_and_next(); let m = self.parse_foreign_mod_items(extra_attrs.next); self.expect(token::RBRACE); return iovi_item(self.mk_item(lo, self.last_span.hi, ident, item_foreign_mod(m), visibility, maybe_append(attrs, some(extra_attrs. inner)))); } // extern mod foo; let metadata = self.parse_optional_meta(); self.expect(token::SEMI); return iovi_view_item(@{ node: view_item_use(ident, metadata, self.get_id()), attrs: attrs, vis: visibility, span: mk_sp(lo, self.last_span.hi) }); } fn parse_type_decl() -> {lo: uint, ident: ident} { let lo = self.last_span.lo; let id = self.parse_ident(); return {lo: lo, ident: id}; } fn parse_item_type() -> item_info { let t = self.parse_type_decl(); self.parse_region_param(); let tps = self.parse_ty_params(); self.expect(token::EQ); let ty = self.parse_ty(false); self.expect(token::SEMI); (t.ident, item_ty(ty, tps), none) } fn parse_region_param() { if self.eat(token::BINOP(token::SLASH)) { self.expect(token::BINOP(token::AND)); } } fn parse_struct_def(path: @path) -> @struct_def { let mut the_dtor: option<(blk, ~[attribute], codemap::span)> = none; let mut fields: ~[@struct_field] = ~[]; let mut methods: ~[@method] = ~[]; while self.token != token::RBRACE { match self.parse_class_item(path) { ctor_decl(*) => { self.span_fatal(copy self.span, ~"deprecated explicit \ constructors are not allowed \ here"); } dtor_decl(blk, attrs, s) => { match the_dtor { some((_, _, s_first)) => { self.span_note(s, ~"duplicate destructor \ declaration"); self.span_fatal(copy s_first, ~"first destructor \ declared here"); } none => { the_dtor = some((blk, attrs, s)); } } } members(mms) => { for mms.each |mm| { match mm { @field_member(struct_field) => vec::push(fields, struct_field), @method_member(the_method_member) => vec::push(methods, the_method_member) } } } } } self.bump(); let mut actual_dtor = do option::map(the_dtor) |dtor| { let (d_body, d_attrs, d_s) = dtor; {node: {id: self.get_id(), attrs: d_attrs, self_id: self.get_id(), body: d_body}, span: d_s} }; return @{ traits: ~[], fields: move fields, methods: move methods, ctor: none, dtor: actual_dtor }; } fn parse_enum_def(ident: ast::ident, ty_params: ~[ast::ty_param]) -> enum_def { let mut variants: ~[variant] = ~[]; let mut all_nullary = true, have_disr = false; let mut common_fields = none; while self.token != token::RBRACE { let variant_attrs = self.parse_outer_attributes(); let vlo = self.span.lo; // Is this a common field declaration? if self.eat_keyword(~"struct") { if common_fields.is_some() { self.fatal(~"duplicate declaration of shared fields"); } self.expect(token::LBRACE); let path = self.ident_to_path_tys(ident, ty_params); common_fields = some(self.parse_struct_def(path)); again; } let vis = self.parse_visibility(); // Is this a nested enum declaration? let ident, needs_comma, kind; let mut args = ~[], disr_expr = none; if self.eat_keyword(~"enum") { ident = self.parse_ident(); self.expect(token::LBRACE); let nested_enum_def = self.parse_enum_def(ident, ty_params); kind = enum_variant_kind(move nested_enum_def); needs_comma = false; } else { ident = self.parse_value_ident(); if self.eat(token::LBRACE) { // Parse a struct variant. all_nullary = false; let path = self.ident_to_path_tys(ident, ty_params); kind = struct_variant_kind(self.parse_struct_def(path)); } else if self.token == token::LPAREN { all_nullary = false; let arg_tys = self.parse_unspanned_seq( token::LPAREN, token::RPAREN, seq_sep_trailing_disallowed(token::COMMA), |p| p.parse_ty(false)); for arg_tys.each |ty| { vec::push(args, {ty: ty, id: self.get_id()}); } kind = tuple_variant_kind(args); } else if self.eat(token::EQ) { have_disr = true; disr_expr = some(self.parse_expr()); kind = tuple_variant_kind(args); } else { kind = tuple_variant_kind(~[]); } needs_comma = true; } let vr = {name: ident, attrs: variant_attrs, kind: kind, id: self.get_id(), disr_expr: disr_expr, vis: vis}; vec::push(variants, spanned(vlo, self.last_span.hi, vr)); if needs_comma && !self.eat(token::COMMA) { break; } } self.expect(token::RBRACE); if (have_disr && !all_nullary) { self.fatal(~"discriminator values can only be used with a c-like \ enum"); } return enum_def({ variants: variants, common: common_fields }); } fn parse_item_enum() -> item_info { let id = self.parse_ident(); self.parse_region_param(); let ty_params = self.parse_ty_params(); // Newtype syntax if self.token == token::EQ { self.check_restricted_keywords_(*id); self.bump(); let ty = self.parse_ty(false); self.expect(token::SEMI); let variant = spanned(ty.span.lo, ty.span.hi, {name: id, attrs: ~[], kind: tuple_variant_kind (~[{ty: ty, id: self.get_id()}]), id: self.get_id(), disr_expr: none, vis: public}); return (id, item_enum(enum_def({ variants: ~[variant], common: none }), ty_params), none); } self.expect(token::LBRACE); let enum_definition = self.parse_enum_def(id, ty_params); (id, item_enum(enum_definition, ty_params), none) } fn parse_fn_ty_proto() -> proto { match self.token { token::AT => { self.bump(); proto_box } token::TILDE => { self.bump(); proto_uniq } token::BINOP(token::AND) => { self.bump(); proto_block } _ => { proto_block } } } fn fn_expr_lookahead(tok: token::token) -> bool { match tok { token::LPAREN | token::AT | token::TILDE | token::BINOP(_) => true, _ => false } } fn parse_item_or_view_item(+attrs: ~[attribute], items_allowed: bool) -> item_or_view_item { maybe_whole!{iovi self,nt_item}; let lo = self.span.lo; let visibility; if self.eat_keyword(~"pub") { visibility = public; } else if self.eat_keyword(~"priv") { visibility = private; } else { visibility = inherited; } if items_allowed && self.eat_keyword(~"const") { let (ident, item_, extra_attrs) = self.parse_item_const(); return iovi_item(self.mk_item(lo, self.last_span.hi, ident, item_, visibility, maybe_append(attrs, extra_attrs))); } else if items_allowed && self.is_keyword(~"fn") && !self.fn_expr_lookahead(self.look_ahead(1u)) { self.bump(); let (ident, item_, extra_attrs) = self.parse_item_fn(impure_fn); return iovi_item(self.mk_item(lo, self.last_span.hi, ident, item_, visibility, maybe_append(attrs, extra_attrs))); } else if items_allowed && self.eat_keyword(~"pure") { self.expect_keyword(~"fn"); let (ident, item_, extra_attrs) = self.parse_item_fn(pure_fn); return iovi_item(self.mk_item(lo, self.last_span.hi, ident, item_, visibility, maybe_append(attrs, extra_attrs))); } else if items_allowed && self.is_keyword(~"unsafe") && self.look_ahead(1u) != token::LBRACE { self.bump(); self.expect_keyword(~"fn"); let (ident, item_, extra_attrs) = self.parse_item_fn(unsafe_fn); return iovi_item(self.mk_item(lo, self.last_span.hi, ident, item_, visibility, maybe_append(attrs, extra_attrs))); } else if self.eat_keyword(~"extern") { if items_allowed && self.eat_keyword(~"fn") { let (ident, item_, extra_attrs) = self.parse_item_fn(extern_fn); return iovi_item(self.mk_item(lo, self.last_span.hi, ident, item_, visibility, maybe_append(attrs, extra_attrs))); } return self.parse_item_foreign_mod(lo, visibility, attrs, items_allowed); } else if items_allowed && (self.eat_keyword(~"mod") || self.eat_keyword(~"module")) { let (ident, item_, extra_attrs) = self.parse_item_mod(); return iovi_item(self.mk_item(lo, self.last_span.hi, ident, item_, visibility, maybe_append(attrs, extra_attrs))); } else if items_allowed && self.eat_keyword(~"type") { let (ident, item_, extra_attrs) = self.parse_item_type(); return iovi_item(self.mk_item(lo, self.last_span.hi, ident, item_, visibility, maybe_append(attrs, extra_attrs))); } else if items_allowed && self.eat_keyword(~"enum") { let (ident, item_, extra_attrs) = self.parse_item_enum(); return iovi_item(self.mk_item(lo, self.last_span.hi, ident, item_, visibility, maybe_append(attrs, extra_attrs))); } else if items_allowed && self.eat_keyword(~"trait") { let (ident, item_, extra_attrs) = self.parse_item_trait(); return iovi_item(self.mk_item(lo, self.last_span.hi, ident, item_, visibility, maybe_append(attrs, extra_attrs))); } else if items_allowed && self.eat_keyword(~"impl") { let (ident, item_, extra_attrs) = self.parse_item_impl(); return iovi_item(self.mk_item(lo, self.last_span.hi, ident, item_, visibility, maybe_append(attrs, extra_attrs))); } else if items_allowed && self.eat_keyword(~"struct") { let (ident, item_, extra_attrs) = self.parse_item_class(); return iovi_item(self.mk_item(lo, self.last_span.hi, ident, item_, visibility, maybe_append(attrs, extra_attrs))); } else if self.eat_keyword(~"use") { let view_item = self.parse_use(); self.expect(token::SEMI); return iovi_view_item(@{ node: view_item, attrs: attrs, vis: visibility, span: mk_sp(lo, self.last_span.hi) }); } else if self.eat_keyword(~"import") { let view_paths = self.parse_view_paths(); self.expect(token::SEMI); return iovi_view_item(@{ node: view_item_import(view_paths), attrs: attrs, vis: visibility, span: mk_sp(lo, self.last_span.hi) }); } else if self.eat_keyword(~"export") { let view_paths = self.parse_view_paths(); self.expect(token::SEMI); return iovi_view_item(@{ node: view_item_export(view_paths), attrs: attrs, vis: visibility, span: mk_sp(lo, self.last_span.hi) }); } else if items_allowed && (!self.is_any_keyword(copy self.token) && self.look_ahead(1) == token::NOT && is_plain_ident(self.look_ahead(2))) { // item macro. let pth = self.parse_path_without_tps(); self.expect(token::NOT); let id = self.parse_ident(); let tts = match self.token { token::LPAREN | token::LBRACE | token::LBRACKET => { let ket = token::flip_delimiter(self.token); self.parse_unspanned_seq(copy self.token, ket, seq_sep_none(), |p| p.parse_token_tree()) } _ => self.fatal(~"expected open delimiter") }; let m = ast::mac_invoc_tt(pth, tts); let m: ast::mac = {node: m, span: {lo: self.span.lo, hi: self.span.hi, expn_info: none}}; let item_ = item_mac(m); return iovi_item(self.mk_item(lo, self.last_span.hi, id, item_, visibility, attrs)); } else { return iovi_none; }; } fn parse_item(+attrs: ~[attribute]) -> option<@ast::item> { match self.parse_item_or_view_item(attrs, true) { iovi_none => none, iovi_view_item(_) => self.fatal(~"view items are not allowed here"), iovi_item(item) => some(item) } } fn parse_use() -> view_item_ { if self.look_ahead(1) == token::SEMI || self.look_ahead(1) == token::LPAREN { // Old-style "use"; i.e. what we now call "extern mod". let ident = self.parse_ident(); let metadata = self.parse_optional_meta(); return view_item_use(ident, metadata, self.get_id()); } return view_item_import(self.parse_view_paths()); } fn parse_view_path() -> @view_path { let lo = self.span.lo; let first_ident = self.parse_ident(); let mut path = ~[first_ident]; debug!{"parsed view_path: %s", *first_ident}; match self.token { token::EQ => { // x = foo::bar self.bump(); path = ~[self.parse_ident()]; while self.token == token::MOD_SEP { self.bump(); let id = self.parse_ident(); vec::push(path, id); } let path = @{span: mk_sp(lo, self.span.hi), global: false, idents: path, rp: none, types: ~[]}; return @spanned(lo, self.span.hi, view_path_simple(first_ident, path, self.get_id())); } token::MOD_SEP => { // foo::bar or foo::{a,b,c} or foo::* while self.token == token::MOD_SEP { self.bump(); match copy self.token { token::IDENT(i, _) => { self.bump(); vec::push(path, self.get_str(i)); } // foo::bar::{a,b,c} token::LBRACE => { let idents = self.parse_unspanned_seq( token::LBRACE, token::RBRACE, seq_sep_trailing_allowed(token::COMMA), |p| p.parse_path_list_ident()); let path = @{span: mk_sp(lo, self.span.hi), global: false, idents: path, rp: none, types: ~[]}; return @spanned(lo, self.span.hi, view_path_list(path, idents, self.get_id())); } // foo::bar::* token::BINOP(token::STAR) => { self.bump(); let path = @{span: mk_sp(lo, self.span.hi), global: false, idents: path, rp: none, types: ~[]}; return @spanned(lo, self.span.hi, view_path_glob(path, self.get_id())); } _ => break } } } _ => () } let last = path[vec::len(path) - 1u]; let path = @{span: mk_sp(lo, self.span.hi), global: false, idents: path, rp: none, types: ~[]}; return @spanned(lo, self.span.hi, view_path_simple(last, path, self.get_id())); } fn parse_view_paths() -> ~[@view_path] { let mut vp = ~[self.parse_view_path()]; while self.token == token::COMMA { self.bump(); vec::push(vp, self.parse_view_path()); } return vp; } fn is_view_item() -> bool { let tok = if !self.is_keyword(~"pub") && !self.is_keyword(~"priv") { self.token } else { self.look_ahead(1u) }; self.token_is_keyword(~"use", tok) || self.token_is_keyword(~"import", tok) || self.token_is_keyword(~"export", tok) } fn parse_view_item(+attrs: ~[attribute]) -> @view_item { let lo = self.span.lo, vis = self.parse_visibility(); let node = if self.eat_keyword(~"use") { self.parse_use() } else if self.eat_keyword(~"import") { view_item_import(self.parse_view_paths()) } else if self.eat_keyword(~"export") { view_item_export(self.parse_view_paths()) } else { fail; }; self.expect(token::SEMI); @{node: node, attrs: attrs, vis: vis, span: mk_sp(lo, self.last_span.hi)} } fn parse_items_and_view_items(+first_item_attrs: ~[attribute], mode: view_item_parse_mode) -> {attrs_remaining: ~[attribute], view_items: ~[@view_item], items: ~[@item]} { let mut attrs = vec::append(first_item_attrs, self.parse_outer_attributes()); let items_allowed; match mode { VIEW_ITEMS_AND_ITEMS_ALLOWED | IMPORTS_AND_ITEMS_ALLOWED => items_allowed = true, VIEW_ITEMS_ALLOWED => items_allowed = false } let (view_items, items) = (dvec(), dvec()); loop { match self.parse_item_or_view_item(attrs, items_allowed) { iovi_none => break, iovi_view_item(view_item) => { match mode { VIEW_ITEMS_AND_ITEMS_ALLOWED | VIEW_ITEMS_ALLOWED => {} IMPORTS_AND_ITEMS_ALLOWED => match view_item.node { view_item_import(_) => {} view_item_export(_) | view_item_use(*) => self.fatal(~"exports and \"extern mod\" \ declarations are not \ allowed here") } } view_items.push(view_item); } iovi_item(item) => { assert items_allowed; items.push(item) } } attrs = self.parse_outer_attributes(); } {attrs_remaining: attrs, view_items: vec::from_mut(dvec::unwrap(view_items)), items: vec::from_mut(dvec::unwrap(items))} } // Parses a source module as a crate fn parse_crate_mod(_cfg: crate_cfg) -> @crate { let lo = self.span.lo; let crate_attrs = self.parse_inner_attrs_and_next(); let first_item_outer_attrs = crate_attrs.next; let m = self.parse_mod_items(token::EOF, first_item_outer_attrs); return @spanned(lo, self.span.lo, {directives: ~[], module: m, attrs: crate_attrs.inner, config: self.cfg}); } fn parse_str() -> @~str { match copy self.token { token::LIT_STR(s) => { self.bump(); self.get_str(s) } _ => self.fatal(~"expected string literal") } } // Logic for parsing crate files (.rc) // // Each crate file is a sequence of directives. // // Each directive imperatively extends its environment with 0 or more // items. fn parse_crate_directive(first_outer_attr: ~[attribute]) -> crate_directive { // Collect the next attributes let outer_attrs = vec::append(first_outer_attr, self.parse_outer_attributes()); // In a crate file outer attributes are only going to apply to mods let expect_mod = vec::len(outer_attrs) > 0u; let lo = self.span.lo; if expect_mod || self.is_keyword(~"mod") || self.is_keyword(~"module") { if self.is_keyword(~"mod") { self.expect_keyword(~"mod"); } else { self.expect_keyword(~"module"); } let id = self.parse_ident(); match self.token { // mod x = "foo.rs"; token::SEMI => { let mut hi = self.span.hi; self.bump(); return spanned(lo, hi, cdir_src_mod(id, outer_attrs)); } // mod x = "foo_dir" { ...directives... } token::LBRACE => { self.bump(); let inner_attrs = self.parse_inner_attrs_and_next(); let mod_attrs = vec::append(outer_attrs, inner_attrs.inner); let next_outer_attr = inner_attrs.next; let cdirs = self.parse_crate_directives(token::RBRACE, next_outer_attr); let mut hi = self.span.hi; self.expect(token::RBRACE); return spanned(lo, hi, cdir_dir_mod(id, cdirs, mod_attrs)); } _ => self.unexpected() } } else if self.is_view_item() { let vi = self.parse_view_item(outer_attrs); return spanned(lo, vi.span.hi, cdir_view_item(vi)); } else { return self.fatal(~"expected crate directive"); } } fn parse_crate_directives(term: token::token, first_outer_attr: ~[attribute]) -> ~[@crate_directive] { // This is pretty ugly. If we have an outer attribute then we can't // accept seeing the terminator next, so if we do see it then fail the // same way parse_crate_directive would if vec::len(first_outer_attr) > 0u && self.token == term { if self.is_keyword(~"mod") { self.expect_keyword(~"mod"); } else { self.expect_keyword(~"module"); } } let mut cdirs: ~[@crate_directive] = ~[]; let mut first_outer_attr = first_outer_attr; while self.token != term { let cdir = @self.parse_crate_directive(first_outer_attr); vec::push(cdirs, cdir); first_outer_attr = ~[]; } return cdirs; } } // // Local Variables: // mode: rust // fill-column: 78; // indent-tabs-mode: nil // c-basic-offset: 4 // buffer-file-coding-system: utf-8-unix // End: // Parse explicit self in more places. Work on #2585. import print::pprust::expr_to_str; import result::result; import either::{Either, Left, Right}; import std::map::{hashmap, str_hash}; import token::{can_begin_expr, is_ident, is_ident_or_path, is_plain_ident, INTERPOLATED}; import codemap::{span,fss_none}; import util::interner; import ast_util::{spanned, respan, mk_sp, ident_to_path, operator_prec}; import lexer::reader; import prec::{as_prec, token_to_binop}; import attr::parser_attr; import common::{seq_sep_trailing_disallowed, seq_sep_trailing_allowed, seq_sep_none, token_to_str}; import dvec::dvec; import vec::{push}; import ast::{_mod, add, alt_check, alt_exhaustive, arg, arm, attribute, bind_by_ref, bind_by_implicit_ref, bind_by_value, bitand, bitor, bitxor, blk, blk_check_mode, bound_const, bound_copy, bound_send, bound_trait, bound_owned, box, by_copy, by_move, by_mutbl_ref, by_ref, by_val, capture_clause, capture_item, cdir_dir_mod, cdir_src_mod, cdir_view_item, class_immutable, class_mutable, crate, crate_cfg, crate_directive, decl, decl_item, decl_local, default_blk, deref, div, enum_def, enum_variant_kind, expl, expr, expr_, expr_addr_of, expr_match, expr_again, expr_assert, expr_assign, expr_assign_op, expr_binary, expr_block, expr_break, expr_call, expr_cast, expr_copy, expr_do_body, expr_fail, expr_field, expr_fn, expr_fn_block, expr_if, expr_index, expr_lit, expr_log, expr_loop, expr_loop_body, expr_mac, expr_move, expr_path, expr_rec, expr_repeat, expr_ret, expr_swap, expr_struct, expr_tup, expr_unary, expr_unary_move, expr_vec, expr_vstore, expr_while, extern_fn, field, fn_decl, foreign_item, foreign_item_fn, foreign_mod, ident, impure_fn, infer, inherited, init_assign, init_move, initializer, item, item_, item_class, item_const, item_enum, item_fn, item_foreign_mod, item_impl, item_mac, item_mod, item_trait, item_ty, lit, lit_, lit_bool, lit_float, lit_int, lit_int_unsuffixed, lit_nil, lit_str, lit_uint, local, m_const, m_imm, m_mutbl, mac_, mac_aq, mac_ellipsis, mac_invoc, mac_invoc_tt, mac_var, matcher, match_nonterminal, match_seq, match_tok, method, mode, mt, mul, mutability, named_field, neg, noreturn, not, pat, pat_box, pat_enum, pat_ident, pat_lit, pat_range, pat_rec, pat_struct, pat_tup, pat_uniq, pat_wild, path, private, proto, proto_bare, proto_block, proto_box, proto_uniq, provided, public, pure_fn, purity, re_anon, re_named, region, rem, required, ret_style, return_val, self_ty, shl, shr, stmt, stmt_decl, stmt_expr, stmt_semi, struct_def, struct_field, struct_variant_kind, subtract, sty_box, sty_by_ref, sty_region, sty_static, sty_uniq, sty_value, token_tree, trait_method, trait_ref, tt_delim, tt_seq, tt_tok, tt_nonterminal, ty, ty_, ty_bot, ty_box, ty_field, ty_fn, ty_infer, ty_mac, ty_method, ty_nil, ty_param, ty_param_bound, ty_path, ty_ptr, ty_rec, ty_rptr, ty_tup, ty_u32, ty_uniq, ty_vec, ty_fixed_length, tuple_variant_kind, unchecked_blk, uniq, unnamed_field, unsafe_blk, unsafe_fn, variant, view_item, view_item_, view_item_export, view_item_import, view_item_use, view_path, view_path_glob, view_path_list, view_path_simple, visibility, vstore, vstore_box, vstore_fixed, vstore_slice, vstore_uniq}; export file_type; export parser; export CRATE_FILE; export SOURCE_FILE; // FIXME (#1893): #ast expects to find this here but it's actually // defined in `parse` Fixing this will be easier when we have export // decls on individual items -- then parse can export this publicly, and // everything else crate-visibly. import parse_from_source_str; export parse_from_source_str; export item_or_view_item, iovi_none, iovi_view_item, iovi_item; enum restriction { UNRESTRICTED, RESTRICT_STMT_EXPR, RESTRICT_NO_CALL_EXPRS, RESTRICT_NO_BAR_OP, RESTRICT_NO_BAR_OR_DOUBLEBAR_OP, } enum file_type { CRATE_FILE, SOURCE_FILE, } // We don't allow single-entry tuples in the true AST; that indicates a // parenthesized expression. However, we preserve them temporarily while // parsing because `(while{...})+3` parses differently from `while{...}+3`. // // To reflect the fact that the @expr is not a true expr that should be // part of the AST, we wrap such expressions in the pexpr enum. They // can then be converted to true expressions by a call to `to_expr()`. enum pexpr { pexpr(@expr), } enum class_member { field_member(@struct_field), method_member(@method) } /* So that we can distinguish a class ctor or dtor from other class members */ enum class_contents { ctor_decl(fn_decl, ~[attribute], blk, codemap::span), dtor_decl(blk, ~[attribute], codemap::span), members(~[@class_member]) } type arg_or_capture_item = Either<arg, capture_item>; type item_info = (ident, item_, option<~[attribute]>); enum item_or_view_item { iovi_none, iovi_item(@item), iovi_view_item(@view_item) } enum view_item_parse_mode { VIEW_ITEMS_AND_ITEMS_ALLOWED, VIEW_ITEMS_ALLOWED, IMPORTS_AND_ITEMS_ALLOWED } /* The expr situation is not as complex as I thought it would be. The important thing is to make sure that lookahead doesn't balk at INTERPOLATED tokens */ macro_rules! maybe_whole_expr { {$p:expr} => { match copy $p.token { INTERPOLATED(token::nt_expr(e)) => { $p.bump(); return pexpr(e); } INTERPOLATED(token::nt_path(pt)) => { $p.bump(); return $p.mk_pexpr($p.span.lo, $p.span.lo, expr_path(pt)); } _ => () }} } macro_rules! maybe_whole { {$p:expr, $constructor:ident} => { match copy $p.token { INTERPOLATED(token::$constructor(x)) => { $p.bump(); return x; } _ => () }} ; {deref $p:expr, $constructor:ident} => { match copy $p.token { INTERPOLATED(token::$constructor(x)) => { $p.bump(); return *x; } _ => () }} ; {some $p:expr, $constructor:ident} => { match copy $p.token { INTERPOLATED(token::$constructor(x)) => { $p.bump(); return some(x); } _ => () }} ; {iovi $p:expr, $constructor:ident} => { match copy $p.token { INTERPOLATED(token::$constructor(x)) => { $p.bump(); return iovi_item(x); } _ => () }} ; {pair_empty $p:expr, $constructor:ident} => { match copy $p.token { INTERPOLATED(token::$constructor(x)) => { $p.bump(); return (~[], x); } _ => () }} } pure fn maybe_append(+lhs: ~[attribute], rhs: option<~[attribute]>) -> ~[attribute] { match rhs { none => lhs, some(attrs) => vec::append(lhs, attrs) } } /* ident is handled by common.rs */ struct parser { let sess: parse_sess; let cfg: crate_cfg; let file_type: file_type; let mut token: token::token; let mut span: span; let mut last_span: span; let mut buffer: [mut {tok: token::token, sp: span}]/4; let mut buffer_start: int; let mut buffer_end: int; let mut restriction: restriction; let mut quote_depth: uint; // not (yet) related to the quasiquoter let reader: reader; let keywords: hashmap<~str, ()>; let restricted_keywords: hashmap<~str, ()>; new(sess: parse_sess, cfg: ast::crate_cfg, +rdr: reader, ftype: file_type) { self.reader <- rdr; let tok0 = self.reader.next_token(); let span0 = tok0.sp; self.sess = sess; self.cfg = cfg; self.file_type = ftype; self.token = tok0.tok; self.span = span0; self.last_span = span0; self.buffer = [mut {tok: tok0.tok, sp: span0}, {tok: tok0.tok, sp: span0}, {tok: tok0.tok, sp: span0}, {tok: tok0.tok, sp: span0} ]/4; self.buffer_start = 0; self.buffer_end = 0; self.restriction = UNRESTRICTED; self.quote_depth = 0u; self.keywords = token::keyword_table(); self.restricted_keywords = token::restricted_keyword_table(); } drop {} /* do not copy the parser; its state is tied to outside state */ fn bump() { self.last_span = self.span; let next = if self.buffer_start == self.buffer_end { self.reader.next_token() } else { let next = self.buffer[self.buffer_start]; self.buffer_start = (self.buffer_start + 1) & 3; next }; self.token = next.tok; self.span = next.sp; } fn swap(next: token::token, lo: uint, hi: uint) { self.token = next; self.span = mk_sp(lo, hi); } fn buffer_length() -> int { if self.buffer_start <= self.buffer_end { return self.buffer_end - self.buffer_start; } return (4 - self.buffer_start) + self.buffer_end; } fn look_ahead(distance: uint) -> token::token { let dist = distance as int; while self.buffer_length() < dist { self.buffer[self.buffer_end] = self.reader.next_token(); self.buffer_end = (self.buffer_end + 1) & 3; } return copy self.buffer[(self.buffer_start + dist - 1) & 3].tok; } fn fatal(m: ~str) -> ! { self.sess.span_diagnostic.span_fatal(copy self.span, m) } fn span_fatal(sp: span, m: ~str) -> ! { self.sess.span_diagnostic.span_fatal(sp, m) } fn span_note(sp: span, m: ~str) { self.sess.span_diagnostic.span_note(sp, m) } fn bug(m: ~str) -> ! { self.sess.span_diagnostic.span_bug(copy self.span, m) } fn warn(m: ~str) { self.sess.span_diagnostic.span_warn(copy self.span, m) } pure fn get_str(i: token::str_num) -> @~str { self.reader.interner().get(i) } fn get_id() -> node_id { next_node_id(self.sess) } fn parse_ty_fn(purity: ast::purity) -> ty_ { let proto, bounds; if self.eat_keyword(~"extern") { self.expect_keyword(~"fn"); proto = ast::proto_bare; bounds = @~[]; } else { self.expect_keyword(~"fn"); proto = self.parse_fn_ty_proto(); bounds = self.parse_optional_ty_param_bounds(); }; ty_fn(proto, bounds, self.parse_ty_fn_decl(purity)) } fn parse_ty_fn_decl(purity: ast::purity) -> fn_decl { let inputs = do self.parse_unspanned_seq( token::LPAREN, token::RPAREN, seq_sep_trailing_disallowed(token::COMMA)) |p| { p.parse_arg_general(false) }; let (ret_style, ret_ty) = self.parse_ret_ty(); return {inputs: inputs, output: ret_ty, purity: purity, cf: ret_style}; } fn parse_trait_methods() -> ~[trait_method] { do self.parse_unspanned_seq(token::LBRACE, token::RBRACE, seq_sep_none()) |p| { let attrs = p.parse_outer_attributes(); let lo = p.span.lo; let is_static = p.parse_staticness(); let static_sty = spanned(lo, p.span.hi, sty_static); let pur = p.parse_fn_purity(); // NB: at the moment, trait methods are public by default; this // could change. let vis = p.parse_visibility(); let ident = p.parse_method_name(); let tps = p.parse_ty_params(); let (self_ty, d, _) = do self.parse_fn_decl_with_self(pur) |p| { // This is somewhat dubious; We don't want to allow argument // names to be left off if there is a definition... either::Left(p.parse_arg_general(false)) }; // XXX: Wrong. Shouldn't allow both static and self_ty let self_ty = if is_static { static_sty } else { self_ty }; let hi = p.last_span.hi; debug!{"parse_trait_methods(): trait method signature ends in \ `%s`", token_to_str(p.reader, p.token)}; match p.token { token::SEMI => { p.bump(); debug!{"parse_trait_methods(): parsing required method"}; // NB: at the moment, visibility annotations on required // methods are ignored; this could change. required({ident: ident, attrs: attrs, decl: {purity: pur with d}, tps: tps, self_ty: self_ty, id: p.get_id(), span: mk_sp(lo, hi)}) } token::LBRACE => { debug!{"parse_trait_methods(): parsing provided method"}; let (inner_attrs, body) = p.parse_inner_attrs_and_block(true); let attrs = vec::append(attrs, inner_attrs); provided(@{ident: ident, attrs: attrs, tps: tps, self_ty: self_ty, decl: d, body: body, id: p.get_id(), span: mk_sp(lo, hi), self_id: p.get_id(), vis: vis}) } _ => { p.fatal(~"expected `;` or `}` but found `" + token_to_str(p.reader, p.token) + ~"`"); } } } } fn parse_mt() -> mt { let mutbl = self.parse_mutability(); let t = self.parse_ty(false); return {ty: t, mutbl: mutbl}; } fn parse_ty_field() -> ty_field { let lo = self.span.lo; let mutbl = self.parse_mutability(); let id = self.parse_ident(); self.expect(token::COLON); let ty = self.parse_ty(false); return spanned(lo, ty.span.hi, { ident: id, mt: {ty: ty, mutbl: mutbl} }); } fn parse_ret_ty() -> (ret_style, @ty) { return if self.eat(token::RARROW) { let lo = self.span.lo; if self.eat(token::NOT) { (noreturn, @{id: self.get_id(), node: ty_bot, span: mk_sp(lo, self.last_span.hi)}) } else { (return_val, self.parse_ty(false)) } } else { let pos = self.span.lo; (return_val, @{id: self.get_id(), node: ty_nil, span: mk_sp(pos, pos)}) } } fn region_from_name(s: option<@~str>) -> @region { let r = match s { some (string) => re_named(string), none => re_anon }; @{id: self.get_id(), node: r} } // Parses something like "&x" fn parse_region() -> @region { self.expect(token::BINOP(token::AND)); match copy self.token { token::IDENT(sid, _) => { self.bump(); let n = self.get_str(sid); self.region_from_name(some(n)) } _ => { self.region_from_name(none) } } } // Parses something like "&x/" (note the trailing slash) fn parse_region_with_sep() -> @region { let name = match copy self.token { token::IDENT(sid, _) => { if self.look_ahead(1u) == token::BINOP(token::SLASH) { self.bump(); self.bump(); some(self.get_str(sid)) } else { none } } _ => { none } }; self.region_from_name(name) } fn parse_ty(colons_before_params: bool) -> @ty { maybe_whole!{self, nt_ty}; let lo = self.span.lo; match self.maybe_parse_dollar_mac() { some(e) => { return @{id: self.get_id(), node: ty_mac(spanned(lo, self.span.hi, e)), span: mk_sp(lo, self.span.hi)}; } none => () } let t = if self.token == token::LPAREN { self.bump(); if self.token == token::RPAREN { self.bump(); ty_nil } else { let mut ts = ~[self.parse_ty(false)]; while self.token == token::COMMA { self.bump(); vec::push(ts, self.parse_ty(false)); } let t = if vec::len(ts) == 1u { ts[0].node } else { ty_tup(ts) }; self.expect(token::RPAREN); t } } else if self.token == token::AT { self.bump(); ty_box(self.parse_mt()) } else if self.token == token::TILDE { self.bump(); ty_uniq(self.parse_mt()) } else if self.token == token::BINOP(token::STAR) { self.bump(); ty_ptr(self.parse_mt()) } else if self.token == token::LBRACE { let elems = self.parse_unspanned_seq( token::LBRACE, token::RBRACE, seq_sep_trailing_allowed(token::COMMA), |p| p.parse_ty_field()); if vec::len(elems) == 0u { self.unexpected_last(token::RBRACE); } ty_rec(elems) } else if self.token == token::LBRACKET { self.expect(token::LBRACKET); let mut t = ty_vec(self.parse_mt()); // Parse the `* 3` in `[ int * 3 ]` match self.maybe_parse_fixed_vstore_with_star() { none => {} some(suffix) => { t = ty_fixed_length(@{ id: self.get_id(), node: t, span: mk_sp(lo, self.last_span.hi) }, suffix) } } self.expect(token::RBRACKET); t } else if self.token == token::BINOP(token::AND) { self.bump(); let region = self.parse_region_with_sep(); let mt = self.parse_mt(); ty_rptr(region, mt) } else if self.eat_keyword(~"pure") { self.parse_ty_fn(ast::pure_fn) } else if self.eat_keyword(~"unsafe") { self.parse_ty_fn(ast::unsafe_fn) } else if self.is_keyword(~"fn") { self.parse_ty_fn(ast::impure_fn) } else if self.eat_keyword(~"extern") { self.expect_keyword(~"fn"); ty_fn(proto_bare, @~[], self.parse_ty_fn_decl(ast::impure_fn)) } else if self.token == token::MOD_SEP || is_ident(self.token) { let path = self.parse_path_with_tps(colons_before_params); ty_path(path, self.get_id()) } else { self.fatal(~"expected type"); }; let sp = mk_sp(lo, self.last_span.hi); return @{id: self.get_id(), node: match self.maybe_parse_fixed_vstore() { // Consider a fixed vstore suffix (/N or /_) none => t, some(v) => { ty_fixed_length(@{id: self.get_id(), node:t, span: sp}, v) } }, span: sp} } fn parse_arg_mode() -> mode { if self.eat(token::BINOP(token::AND)) { expl(by_mutbl_ref) } else if self.eat(token::BINOP(token::MINUS)) { expl(by_move) } else if self.eat(token::ANDAND) { expl(by_ref) } else if self.eat(token::BINOP(token::PLUS)) { if self.eat(token::BINOP(token::PLUS)) { expl(by_val) } else { expl(by_copy) } } else { infer(self.get_id()) } } fn parse_capture_item_or(parse_arg_fn: fn(parser) -> arg_or_capture_item) -> arg_or_capture_item { fn parse_capture_item(p:parser, is_move: bool) -> capture_item { let sp = mk_sp(p.span.lo, p.span.hi); let ident = p.parse_ident(); @{id: p.get_id(), is_move: is_move, name: ident, span: sp} } if self.eat_keyword(~"move") { either::Right(parse_capture_item(self, true)) } else if self.eat_keyword(~"copy") { either::Right(parse_capture_item(self, false)) } else { parse_arg_fn(self) } } // This version of parse arg doesn't necessarily require // identifier names. fn parse_arg_general(require_name: bool) -> arg { let m = self.parse_arg_mode(); let i = if require_name { let name = self.parse_value_ident(); self.expect(token::COLON); name } else { if is_plain_ident(self.token) && self.look_ahead(1u) == token::COLON { let name = self.parse_value_ident(); self.bump(); name } else { @~"" } }; let t = self.parse_ty(false); {mode: m, ty: t, ident: i, id: self.get_id()} } fn parse_arg() -> arg_or_capture_item { either::Left(self.parse_arg_general(true)) } fn parse_arg_or_capture_item() -> arg_or_capture_item { self.parse_capture_item_or(|p| p.parse_arg()) } fn parse_fn_block_arg() -> arg_or_capture_item { do self.parse_capture_item_or |p| { let m = p.parse_arg_mode(); let i = p.parse_value_ident(); let t = if p.eat(token::COLON) { p.parse_ty(false) } else { @{id: p.get_id(), node: ty_infer, span: mk_sp(p.span.lo, p.span.hi)} }; either::Left({mode: m, ty: t, ident: i, id: p.get_id()}) } } fn maybe_parse_dollar_mac() -> option<mac_> { match copy self.token { token::DOLLAR => { let lo = self.span.lo; self.bump(); match copy self.token { token::LIT_INT_UNSUFFIXED(num) => { self.bump(); some(mac_var(num as uint)) } token::LPAREN => { self.bump(); let e = self.parse_expr(); self.expect(token::RPAREN); let hi = self.last_span.hi; some(mac_aq(mk_sp(lo,hi), e)) } _ => { self.fatal(~"expected `(` or unsuffixed integer literal"); } } } _ => none } } fn maybe_parse_fixed_vstore() -> option<option<uint>> { if self.token == token::BINOP(token::SLASH) { self.bump(); match copy self.token { token::UNDERSCORE => { self.bump(); some(none) } token::LIT_INT_UNSUFFIXED(i) if i >= 0i64 => { self.bump(); some(some(i as uint)) } _ => none } } else { none } } fn maybe_parse_fixed_vstore_with_star() -> option<option<uint>> { if self.eat(token::BINOP(token::STAR)) { match copy self.token { token::UNDERSCORE => { self.bump(); some(none) } token::LIT_INT_UNSUFFIXED(i) if i >= 0i64 => { self.bump(); some(some(i as uint)) } _ => none } } else { none } } fn lit_from_token(tok: token::token) -> lit_ { match tok { token::LIT_INT(i, it) => lit_int(i, it), token::LIT_UINT(u, ut) => lit_uint(u, ut), token::LIT_INT_UNSUFFIXED(i) => lit_int_unsuffixed(i), token::LIT_FLOAT(s, ft) => lit_float(self.get_str(s), ft), token::LIT_STR(s) => lit_str(self.get_str(s)), token::LPAREN => { self.expect(token::RPAREN); lit_nil } _ => self.unexpected_last(tok) } } fn parse_lit() -> lit { let lo = self.span.lo; let lit = if self.eat_keyword(~"true") { lit_bool(true) } else if self.eat_keyword(~"false") { lit_bool(false) } else { let tok = self.token; self.bump(); self.lit_from_token(tok) }; return {node: lit, span: mk_sp(lo, self.last_span.hi)}; } fn parse_path_without_tps() -> @path { self.parse_path_without_tps_(|p| p.parse_ident(), |p| p.parse_ident()) } fn parse_path_without_tps_( parse_ident: fn(parser) -> ident, parse_last_ident: fn(parser) -> ident) -> @path { maybe_whole!{self, nt_path}; let lo = self.span.lo; let global = self.eat(token::MOD_SEP); let mut ids = ~[]; loop { let is_not_last = self.look_ahead(2u) != token::LT && self.look_ahead(1u) == token::MOD_SEP; if is_not_last { vec::push(ids, parse_ident(self)); self.expect(token::MOD_SEP); } else { vec::push(ids, parse_last_ident(self)); break; } } @{span: mk_sp(lo, self.last_span.hi), global: global, idents: ids, rp: none, types: ~[]} } fn parse_value_path() -> @path { self.parse_path_without_tps_(|p| p.parse_ident(), |p| p.parse_value_ident()) } fn parse_path_with_tps(colons: bool) -> @path { debug!{"parse_path_with_tps(colons=%b)", colons}; maybe_whole!{self, nt_path}; let lo = self.span.lo; let path = self.parse_path_without_tps(); if colons && !self.eat(token::MOD_SEP) { return path; } // Parse the region parameter, if any, which will // be written "foo/&x" let rp = { // Hack: avoid parsing vstores like /@ and /~. This is painful // because the notation for region bounds and the notation for // vstores is... um... the same. I guess that's my fault. This // is still not ideal as for &str we end up parsing more than we // ought to and have to sort it out later. if self.token == token::BINOP(token::SLASH) && self.look_ahead(1u) == token::BINOP(token::AND) { self.expect(token::BINOP(token::SLASH)); some(self.parse_region()) } else { none } }; // Parse any type parameters which may appear: let tps = { if self.token == token::LT { self.parse_seq_lt_gt(some(token::COMMA), |p| p.parse_ty(false)) } else { {node: ~[], span: path.span} } }; return @{span: mk_sp(lo, tps.span.hi), rp: rp, types: tps.node with *path}; } fn parse_mutability() -> mutability { if self.eat_keyword(~"mut") { m_mutbl } else if self.eat_keyword(~"const") { m_const } else { m_imm } } fn parse_field(sep: token::token) -> field { let lo = self.span.lo; let m = self.parse_mutability(); let i = self.parse_ident(); self.expect(sep); let e = self.parse_expr(); return spanned(lo, e.span.hi, {mutbl: m, ident: i, expr: e}); } fn mk_expr(lo: uint, hi: uint, +node: expr_) -> @expr { return @{id: self.get_id(), callee_id: self.get_id(), node: node, span: mk_sp(lo, hi)}; } fn mk_mac_expr(lo: uint, hi: uint, m: mac_) -> @expr { return @{id: self.get_id(), callee_id: self.get_id(), node: expr_mac({node: m, span: mk_sp(lo, hi)}), span: mk_sp(lo, hi)}; } fn mk_lit_u32(i: u32) -> @expr { let span = self.span; let lv_lit = @{node: lit_uint(i as u64, ty_u32), span: span}; return @{id: self.get_id(), callee_id: self.get_id(), node: expr_lit(lv_lit), span: span}; } fn mk_pexpr(lo: uint, hi: uint, node: expr_) -> pexpr { return pexpr(self.mk_expr(lo, hi, node)); } fn to_expr(e: pexpr) -> @expr { match e.node { expr_tup(es) if vec::len(es) == 1u => es[0u], _ => *e } } fn parse_bottom_expr() -> pexpr { maybe_whole_expr!{self}; let lo = self.span.lo; let mut hi = self.span.hi; let mut ex: expr_; match self.maybe_parse_dollar_mac() { some(x) => return pexpr(self.mk_mac_expr(lo, self.span.hi, x)), _ => () } if self.token == token::LPAREN { self.bump(); if self.token == token::RPAREN { hi = self.span.hi; self.bump(); let lit = @spanned(lo, hi, lit_nil); return self.mk_pexpr(lo, hi, expr_lit(lit)); } let mut es = ~[self.parse_expr()]; while self.token == token::COMMA { self.bump(); vec::push(es, self.parse_expr()); } hi = self.span.hi; self.expect(token::RPAREN); // Note: we retain the expr_tup() even for simple // parenthesized expressions, but only for a "little while". // This is so that wrappers around parse_bottom_expr() // can tell whether the expression was parenthesized or not, // which affects expr_is_complete(). return self.mk_pexpr(lo, hi, expr_tup(es)); } else if self.token == token::LBRACE { if self.looking_at_record_literal() { ex = self.parse_record_literal(); hi = self.span.hi; } else { self.bump(); let blk = self.parse_block_tail(lo, default_blk); return self.mk_pexpr(blk.span.lo, blk.span.hi, expr_block(blk)); } } else if token::is_bar(self.token) { return pexpr(self.parse_lambda_expr()); } else if self.eat_keyword(~"if") { return pexpr(self.parse_if_expr()); } else if self.eat_keyword(~"for") { return pexpr(self.parse_sugary_call_expr(~"for", expr_loop_body)); } else if self.eat_keyword(~"do") { return pexpr(self.parse_sugary_call_expr(~"do", expr_do_body)); } else if self.eat_keyword(~"while") { return pexpr(self.parse_while_expr()); } else if self.eat_keyword(~"loop") { return pexpr(self.parse_loop_expr()); } else if self.eat_keyword(~"match") { return pexpr(self.parse_alt_expr()); } else if self.eat_keyword(~"fn") { let proto = self.parse_fn_ty_proto(); match proto { proto_bare => self.fatal(~"fn expr are deprecated, use fn@"), _ => { /* fallthrough */ } } return pexpr(self.parse_fn_expr(proto)); } else if self.eat_keyword(~"unchecked") { return pexpr(self.parse_block_expr(lo, unchecked_blk)); } else if self.eat_keyword(~"unsafe") { return pexpr(self.parse_block_expr(lo, unsafe_blk)); } else if self.token == token::LBRACKET { self.bump(); let mutbl = self.parse_mutability(); if self.token == token::RBRACKET { // Empty vector. self.bump(); ex = expr_vec(~[], mutbl); } else { // Nonempty vector. let first_expr = self.parse_expr(); if self.token == token::COMMA && self.look_ahead(1) == token::DOTDOT { // Repeating vector syntax: [ 0, ..512 ] self.bump(); self.bump(); let count = self.parse_expr(); self.expect(token::RBRACKET); ex = expr_repeat(first_expr, count, mutbl); } else if self.token == token::COMMA { // Vector with two or more elements. self.bump(); let remaining_exprs = self.parse_seq_to_end(token::RBRACKET, seq_sep_trailing_allowed(token::COMMA), |p| p.parse_expr()); ex = expr_vec(~[first_expr] + remaining_exprs, mutbl); } else { // Vector with one element. self.expect(token::RBRACKET); ex = expr_vec(~[first_expr], mutbl); } } hi = self.span.hi; } else if self.token == token::ELLIPSIS { self.bump(); return pexpr(self.mk_mac_expr(lo, self.span.hi, mac_ellipsis)); } else if self.token == token::POUND { let ex_ext = self.parse_syntax_ext(); hi = ex_ext.span.hi; ex = ex_ext.node; } else if self.eat_keyword(~"fail") { if can_begin_expr(self.token) { let e = self.parse_expr(); hi = e.span.hi; ex = expr_fail(some(e)); } else { ex = expr_fail(none); } } else if self.eat_keyword(~"log") { self.expect(token::LPAREN); let lvl = self.parse_expr(); self.expect(token::COMMA); let e = self.parse_expr(); ex = expr_log(2, lvl, e); hi = self.span.hi; self.expect(token::RPAREN); } else if self.eat_keyword(~"assert") { let e = self.parse_expr(); ex = expr_assert(e); hi = e.span.hi; } else if self.eat_keyword(~"return") { if can_begin_expr(self.token) { let e = self.parse_expr(); hi = e.span.hi; ex = expr_ret(some(e)); } else { ex = expr_ret(none); } } else if self.eat_keyword(~"break") { if is_ident(self.token) { ex = expr_break(some(self.parse_ident())); } else { ex = expr_break(none); } hi = self.span.hi; } else if self.eat_keyword(~"again") { if is_ident(self.token) { ex = expr_again(some(self.parse_ident())); } else { ex = expr_again(none); } hi = self.span.hi; } else if self.eat_keyword(~"copy") { let e = self.parse_expr(); ex = expr_copy(e); hi = e.span.hi; } else if self.eat_keyword(~"move") { let e = self.parse_expr(); ex = expr_unary_move(e); hi = e.span.hi; } else if self.token == token::MOD_SEP || is_ident(self.token) && !self.is_keyword(~"true") && !self.is_keyword(~"false") { let pth = self.parse_path_with_tps(true); /* `!`, as an operator, is prefix, so we know this isn't that */ if self.token == token::NOT { self.bump(); let tts = match self.token { token::LPAREN | token::LBRACE | token::LBRACKET => { let ket = token::flip_delimiter(self.token); self.parse_unspanned_seq(copy self.token, ket, seq_sep_none(), |p| p.parse_token_tree()) } _ => self.fatal(~"expected open delimiter") }; let hi = self.span.hi; return pexpr(self.mk_mac_expr( lo, hi, mac_invoc_tt(pth, tts))); } else if self.token == token::LBRACE { // This might be a struct literal. if self.looking_at_record_literal() { // It's a struct literal. self.bump(); let mut fields = ~[]; vec::push(fields, self.parse_field(token::COLON)); while self.token != token::RBRACE && !self.is_keyword(~"with") { self.expect(token::COMMA); if self.token == token::RBRACE || self.is_keyword(~"with") || self.token == token::DOTDOT { // Accept an optional trailing comma. break; } vec::push(fields, self.parse_field(token::COLON)); } let base; if self.eat_keyword(~"with") || self.eat(token::DOTDOT) { base = some(self.parse_expr()); } else { base = none; } hi = pth.span.hi; self.expect(token::RBRACE); ex = expr_struct(pth, fields, base); return self.mk_pexpr(lo, hi, ex); } } hi = pth.span.hi; ex = expr_path(pth); } else { let lit = self.parse_lit(); hi = lit.span.hi; ex = expr_lit(@lit); } // Vstore is legal following expr_lit(lit_str(...)) and expr_vec(...) // only. match ex { expr_lit(@{node: lit_str(_), span: _}) | expr_vec(_, _) => match self.maybe_parse_fixed_vstore() { none => (), some(v) => { hi = self.span.hi; ex = expr_vstore(self.mk_expr(lo, hi, ex), vstore_fixed(v)); } }, _ => () } return self.mk_pexpr(lo, hi, ex); } fn parse_block_expr(lo: uint, blk_mode: blk_check_mode) -> @expr { self.expect(token::LBRACE); let blk = self.parse_block_tail(lo, blk_mode); return self.mk_expr(blk.span.lo, blk.span.hi, expr_block(blk)); } fn parse_syntax_ext() -> @expr { let lo = self.span.lo; self.expect(token::POUND); return self.parse_syntax_ext_naked(lo); } fn parse_syntax_ext_naked(lo: uint) -> @expr { match self.token { token::IDENT(_, _) => (), _ => self.fatal(~"expected a syntax expander name") } let pth = self.parse_path_without_tps(); //temporary for a backwards-compatible cycle: let sep = seq_sep_trailing_disallowed(token::COMMA); let mut e = none; if (self.token == token::LPAREN || self.token == token::LBRACKET) { let lo = self.span.lo; let es = if self.token == token::LPAREN { self.parse_unspanned_seq(token::LPAREN, token::RPAREN, sep, |p| p.parse_expr()) } else { self.parse_unspanned_seq(token::LBRACKET, token::RBRACKET, sep, |p| p.parse_expr()) }; let hi = self.span.hi; e = some(self.mk_expr(lo, hi, expr_vec(es, m_imm))); } let mut b = none; if self.token == token::LBRACE { self.bump(); let lo = self.span.lo; let mut depth = 1u; while (depth > 0u) { match (self.token) { token::LBRACE => depth += 1u, token::RBRACE => depth -= 1u, token::EOF => self.fatal(~"unexpected EOF in macro body"), _ => () } self.bump(); } let hi = self.last_span.lo; b = some({span: mk_sp(lo,hi)}); } return self.mk_mac_expr(lo, self.span.hi, mac_invoc(pth, e, b)); } fn parse_dot_or_call_expr() -> pexpr { let b = self.parse_bottom_expr(); self.parse_dot_or_call_expr_with(b) } fn permits_call() -> bool { return self.restriction != RESTRICT_NO_CALL_EXPRS; } fn parse_dot_or_call_expr_with(e0: pexpr) -> pexpr { let mut e = e0; let lo = e.span.lo; let mut hi; loop { // expr.f if self.eat(token::DOT) { match copy self.token { token::IDENT(i, _) => { hi = self.span.hi; self.bump(); let tys = if self.eat(token::MOD_SEP) { self.expect(token::LT); self.parse_seq_to_gt(some(token::COMMA), |p| p.parse_ty(false)) } else { ~[] }; e = self.mk_pexpr(lo, hi, expr_field(self.to_expr(e), self.get_str(i), tys)); } _ => self.unexpected() } again; } if self.expr_is_complete(e) { break; } match copy self.token { // expr(...) token::LPAREN if self.permits_call() => { let es = self.parse_unspanned_seq( token::LPAREN, token::RPAREN, seq_sep_trailing_disallowed(token::COMMA), |p| p.parse_expr()); hi = self.span.hi; let nd = expr_call(self.to_expr(e), es, false); e = self.mk_pexpr(lo, hi, nd); } // expr[...] token::LBRACKET => { self.bump(); let ix = self.parse_expr(); hi = ix.span.hi; self.expect(token::RBRACKET); e = self.mk_pexpr(lo, hi, expr_index(self.to_expr(e), ix)); } _ => return e } } return e; } fn parse_sep_and_zerok() -> (option<token::token>, bool) { if self.token == token::BINOP(token::STAR) || self.token == token::BINOP(token::PLUS) { let zerok = self.token == token::BINOP(token::STAR); self.bump(); return (none, zerok); } else { let sep = self.token; self.bump(); if self.token == token::BINOP(token::STAR) || self.token == token::BINOP(token::PLUS) { let zerok = self.token == token::BINOP(token::STAR); self.bump(); return (some(sep), zerok); } else { self.fatal(~"expected `*` or `+`"); } } } fn parse_token_tree() -> token_tree { maybe_whole!{deref self, nt_tt}; fn parse_tt_tok(p: parser, delim_ok: bool) -> token_tree { match p.token { token::RPAREN | token::RBRACE | token::RBRACKET if !delim_ok => { p.fatal(~"incorrect close delimiter: `" + token_to_str(p.reader, p.token) + ~"`"); } token::EOF => { p.fatal(~"file ended in the middle of a macro invocation"); } /* we ought to allow different depths of unquotation */ token::DOLLAR if p.quote_depth > 0u => { p.bump(); let sp = p.span; if p.token == token::LPAREN { let seq = p.parse_seq(token::LPAREN, token::RPAREN, seq_sep_none(), |p| p.parse_token_tree()); let (s, z) = p.parse_sep_and_zerok(); return tt_seq(mk_sp(sp.lo ,p.span.hi), seq.node, s, z); } else { return tt_nonterminal(sp, p.parse_ident()); } } _ => { /* ok */ } } let res = tt_tok(p.span, p.token); p.bump(); return res; } return match self.token { token::LPAREN | token::LBRACE | token::LBRACKET => { let ket = token::flip_delimiter(self.token); tt_delim(vec::append( ~[parse_tt_tok(self, true)], vec::append( self.parse_seq_to_before_end( ket, seq_sep_none(), |p| p.parse_token_tree()), ~[parse_tt_tok(self, true)]))) } _ => parse_tt_tok(self, false) }; } fn parse_matchers() -> ~[matcher] { // unification of matchers and token_trees would vastly improve // the interpolation of matchers maybe_whole!{self, nt_matchers}; let name_idx = @mut 0u; return match self.token { token::LBRACE | token::LPAREN | token::LBRACKET => { self.parse_matcher_subseq(name_idx, copy self.token, token::flip_delimiter(self.token)) } _ => self.fatal(~"expected open delimiter") } } // This goofy function is necessary to correctly match parens in matchers. // Otherwise, `$( ( )` would be a valid matcher, and `$( () )` would be // invalid. It's similar to common::parse_seq. fn parse_matcher_subseq(name_idx: @mut uint, bra: token::token, ket: token::token) -> ~[matcher] { let mut ret_val = ~[]; let mut lparens = 0u; self.expect(bra); while self.token != ket || lparens > 0u { if self.token == token::LPAREN { lparens += 1u; } if self.token == token::RPAREN { lparens -= 1u; } vec::push(ret_val, self.parse_matcher(name_idx)); } self.bump(); return ret_val; } fn parse_matcher(name_idx: @mut uint) -> matcher { let lo = self.span.lo; let m = if self.token == token::DOLLAR { self.bump(); if self.token == token::LPAREN { let name_idx_lo = *name_idx; let ms = self.parse_matcher_subseq(name_idx, token::LPAREN, token::RPAREN); if ms.len() == 0u { self.fatal(~"repetition body must be nonempty"); } let (sep, zerok) = self.parse_sep_and_zerok(); match_seq(ms, sep, zerok, name_idx_lo, *name_idx) } else { let bound_to = self.parse_ident(); self.expect(token::COLON); let nt_name = self.parse_ident(); let m = match_nonterminal(bound_to, nt_name, *name_idx); *name_idx += 1u; m } } else { let m = match_tok(self.token); self.bump(); m }; return spanned(lo, self.span.hi, m); } fn parse_prefix_expr() -> pexpr { let lo = self.span.lo; let mut hi; let mut ex; match copy self.token { token::NOT => { self.bump(); let e = self.to_expr(self.parse_prefix_expr()); hi = e.span.hi; self.get_id(); // see ast_util::op_expr_callee_id ex = expr_unary(not, e); } token::BINOP(b) => { match b { token::MINUS => { self.bump(); let e = self.to_expr(self.parse_prefix_expr()); hi = e.span.hi; self.get_id(); // see ast_util::op_expr_callee_id ex = expr_unary(neg, e); } token::STAR => { self.bump(); let e = self.to_expr(self.parse_prefix_expr()); hi = e.span.hi; ex = expr_unary(deref, e); } token::AND => { self.bump(); let m = self.parse_mutability(); let e = self.to_expr(self.parse_prefix_expr()); hi = e.span.hi; // HACK: turn &[...] into a &-evec ex = match e.node { expr_vec(*) | expr_lit(@{node: lit_str(_), span: _}) if m == m_imm => { expr_vstore(e, vstore_slice(self.region_from_name(none))) } _ => expr_addr_of(m, e) }; } _ => return self.parse_dot_or_call_expr() } } token::AT => { self.bump(); let m = self.parse_mutability(); let e = self.to_expr(self.parse_prefix_expr()); hi = e.span.hi; // HACK: turn @[...] into a @-evec ex = match e.node { expr_vec(*) | expr_lit(@{node: lit_str(_), span: _}) if m == m_imm => expr_vstore(e, vstore_box), _ => expr_unary(box(m), e) }; } token::TILDE => { self.bump(); let m = self.parse_mutability(); let e = self.to_expr(self.parse_prefix_expr()); hi = e.span.hi; // HACK: turn ~[...] into a ~-evec ex = match e.node { expr_vec(*) | expr_lit(@{node: lit_str(_), span: _}) if m == m_imm => expr_vstore(e, vstore_uniq), _ => expr_unary(uniq(m), e) }; } _ => return self.parse_dot_or_call_expr() } return self.mk_pexpr(lo, hi, ex); } fn parse_binops() -> @expr { return self.parse_more_binops(self.parse_prefix_expr(), 0u); } fn parse_more_binops(plhs: pexpr, min_prec: uint) -> @expr { let lhs = self.to_expr(plhs); if self.expr_is_complete(plhs) { return lhs; } let peeked = self.token; if peeked == token::BINOP(token::OR) && (self.restriction == RESTRICT_NO_BAR_OP || self.restriction == RESTRICT_NO_BAR_OR_DOUBLEBAR_OP) { return lhs; } if peeked == token::OROR && self.restriction == RESTRICT_NO_BAR_OR_DOUBLEBAR_OP { return lhs; } let cur_opt = token_to_binop(peeked); match cur_opt { some(cur_op) => { let cur_prec = operator_prec(cur_op); if cur_prec > min_prec { self.bump(); let expr = self.parse_prefix_expr(); let rhs = self.parse_more_binops(expr, cur_prec); self.get_id(); // see ast_util::op_expr_callee_id let bin = self.mk_pexpr(lhs.span.lo, rhs.span.hi, expr_binary(cur_op, lhs, rhs)); return self.parse_more_binops(bin, min_prec); } } _ => () } if as_prec > min_prec && self.eat_keyword(~"as") { let rhs = self.parse_ty(true); let _as = self.mk_pexpr(lhs.span.lo, rhs.span.hi, expr_cast(lhs, rhs)); return self.parse_more_binops(_as, min_prec); } return lhs; } fn parse_assign_expr() -> @expr { let lo = self.span.lo; let lhs = self.parse_binops(); match copy self.token { token::EQ => { self.bump(); let rhs = self.parse_expr(); return self.mk_expr(lo, rhs.span.hi, expr_assign(lhs, rhs)); } token::BINOPEQ(op) => { self.bump(); let rhs = self.parse_expr(); let mut aop; match op { token::PLUS => aop = add, token::MINUS => aop = subtract, token::STAR => aop = mul, token::SLASH => aop = div, token::PERCENT => aop = rem, token::CARET => aop = bitxor, token::AND => aop = bitand, token::OR => aop = bitor, token::SHL => aop = shl, token::SHR => aop = shr } self.get_id(); // see ast_util::op_expr_callee_id return self.mk_expr(lo, rhs.span.hi, expr_assign_op(aop, lhs, rhs)); } token::LARROW => { self.bump(); let rhs = self.parse_expr(); return self.mk_expr(lo, rhs.span.hi, expr_move(lhs, rhs)); } token::DARROW => { self.bump(); let rhs = self.parse_expr(); return self.mk_expr(lo, rhs.span.hi, expr_swap(lhs, rhs)); } _ => {/* fall through */ } } return lhs; } fn parse_if_expr() -> @expr { let lo = self.last_span.lo; let cond = self.parse_expr(); let thn = self.parse_block(); let mut els: option<@expr> = none; let mut hi = thn.span.hi; if self.eat_keyword(~"else") { let elexpr = self.parse_else_expr(); els = some(elexpr); hi = elexpr.span.hi; } let q = {cond: cond, then: thn, els: els, lo: lo, hi: hi}; return self.mk_expr(q.lo, q.hi, expr_if(q.cond, q.then, q.els)); } fn parse_fn_expr(proto: proto) -> @expr { let lo = self.last_span.lo; // if we want to allow fn expression argument types to be inferred in // the future, just have to change parse_arg to parse_fn_block_arg. let (decl, capture_clause) = self.parse_fn_decl(impure_fn, |p| p.parse_arg_or_capture_item()); let body = self.parse_block(); return self.mk_expr(lo, body.span.hi, expr_fn(proto, decl, body, capture_clause)); } // `|args| { ... }` like in `do` expressions fn parse_lambda_block_expr() -> @expr { self.parse_lambda_expr_( || { match self.token { token::BINOP(token::OR) | token::OROR => { self.parse_fn_block_decl() } _ => { // No argument list - `do foo {` ({ { inputs: ~[], output: @{ id: self.get_id(), node: ty_infer, span: self.span }, purity: impure_fn, cf: return_val } }, @~[]) } } }, || { let blk = self.parse_block(); self.mk_expr(blk.span.lo, blk.span.hi, expr_block(blk)) }) } // `|args| expr` fn parse_lambda_expr() -> @expr { self.parse_lambda_expr_(|| self.parse_fn_block_decl(), || self.parse_expr()) } fn parse_lambda_expr_(parse_decl: fn&() -> (fn_decl, capture_clause), parse_body: fn&() -> @expr) -> @expr { let lo = self.last_span.lo; let (decl, captures) = parse_decl(); let body = parse_body(); let fakeblock = {view_items: ~[], stmts: ~[], expr: some(body), id: self.get_id(), rules: default_blk}; let fakeblock = spanned(body.span.lo, body.span.hi, fakeblock); return self.mk_expr(lo, body.span.hi, expr_fn_block(decl, fakeblock, captures)); } fn parse_else_expr() -> @expr { if self.eat_keyword(~"if") { return self.parse_if_expr(); } else { let blk = self.parse_block(); return self.mk_expr(blk.span.lo, blk.span.hi, expr_block(blk)); } } fn parse_sugary_call_expr(keyword: ~str, ctor: fn(+@expr) -> expr_) -> @expr { let lo = self.last_span; // Parse the callee `foo` in // for foo || { // for foo.bar || { // etc, or the portion of the call expression before the lambda in // for foo() || { // or // for foo.bar(a) || { // Turn on the restriction to stop at | or || so we can parse // them as the lambda arguments let e = self.parse_expr_res(RESTRICT_NO_BAR_OR_DOUBLEBAR_OP); match e.node { expr_call(f, args, false) => { let block = self.parse_lambda_block_expr(); let last_arg = self.mk_expr(block.span.lo, block.span.hi, ctor(block)); let args = vec::append(args, ~[last_arg]); @{node: expr_call(f, args, true) with *e} } expr_path(*) | expr_field(*) | expr_call(*) => { let block = self.parse_lambda_block_expr(); let last_arg = self.mk_expr(block.span.lo, block.span.hi, ctor(block)); self.mk_expr(lo.lo, last_arg.span.hi, expr_call(e, ~[last_arg], true)) } _ => { // There may be other types of expressions that can // represent the callee in `for` and `do` expressions // but they aren't represented by tests debug!{"sugary call on %?", e.node}; self.span_fatal( lo, fmt!{"`%s` must be followed by a block call", keyword}); } } } fn parse_while_expr() -> @expr { let lo = self.last_span.lo; let cond = self.parse_expr(); let body = self.parse_block_no_value(); let mut hi = body.span.hi; return self.mk_expr(lo, hi, expr_while(cond, body)); } fn parse_loop_expr() -> @expr { let opt_ident; if is_ident(self.token) && !self.is_any_keyword(copy self.token) { opt_ident = some(self.parse_ident()); self.expect(token::COLON); } else { opt_ident = none; } let lo = self.last_span.lo; let body = self.parse_block_no_value(); let mut hi = body.span.hi; return self.mk_expr(lo, hi, expr_loop(body, opt_ident)); } // For distingishing between record literals and blocks fn looking_at_record_literal() -> bool { let lookahead = self.look_ahead(1); self.token == token::LBRACE && (self.token_is_keyword(~"mut", lookahead) || (is_plain_ident(lookahead) && self.look_ahead(2) == token::COLON)) } fn parse_record_literal() -> expr_ { self.expect(token::LBRACE); let mut fields = ~[self.parse_field(token::COLON)]; let mut base = none; while self.token != token::RBRACE { if self.token == token::COMMA && self.look_ahead(1) == token::DOTDOT { self.bump(); self.bump(); base = some(self.parse_expr()); break; } // XXX: Remove "with" after all code is converted over and there's // a snapshot. // optional comma before "with" if self.token == token::COMMA && self.token_is_keyword(~"with", self.look_ahead(1u)) { self.bump(); } if self.eat_keyword(~"with") { base = some(self.parse_expr()); break; } self.expect(token::COMMA); if self.token == token::RBRACE { // record ends by an optional trailing comma break; } vec::push(fields, self.parse_field(token::COLON)); } self.expect(token::RBRACE); return expr_rec(fields, base); } fn parse_alt_expr() -> @expr { let lo = self.last_span.lo; let mode = if self.eat_keyword(~"check") { alt_check } else { alt_exhaustive }; let discriminant = self.parse_expr(); self.expect(token::LBRACE); let mut arms: ~[arm] = ~[]; while self.token != token::RBRACE { let pats = self.parse_pats(); let mut guard = none; if self.eat_keyword(~"if") { guard = some(self.parse_expr()); } self.expect(token::FAT_ARROW); let expr = self.parse_expr_res(RESTRICT_STMT_EXPR); let require_comma = !classify::expr_is_simple_block(expr) && self.token != token::RBRACE; if require_comma { self.expect(token::COMMA); } else { self.eat(token::COMMA); } let blk = {node: {view_items: ~[], stmts: ~[], expr: some(expr), id: self.get_id(), rules: default_blk}, span: expr.span}; vec::push(arms, {pats: pats, guard: guard, body: blk}); } let mut hi = self.span.hi; self.bump(); return self.mk_expr(lo, hi, expr_match(discriminant, arms, mode)); } fn parse_expr() -> @expr { return self.parse_expr_res(UNRESTRICTED); } fn parse_expr_res(r: restriction) -> @expr { let old = self.restriction; self.restriction = r; let e = self.parse_assign_expr(); self.restriction = old; return e; } fn parse_initializer() -> option<initializer> { match self.token { token::EQ => { self.bump(); return some({op: init_assign, expr: self.parse_expr()}); } token::LARROW => { self.bump(); return some({op: init_move, expr: self.parse_expr()}); } // Now that the the channel is the first argument to receive, // combining it with an initializer doesn't really make sense. // case (token::RECV) { // self.bump(); // return some(rec(op = init_recv, // expr = self.parse_expr())); // } _ => { return none; } } } fn parse_pats() -> ~[@pat] { let mut pats = ~[]; loop { vec::push(pats, self.parse_pat(true)); if self.token == token::BINOP(token::OR) { self.bump(); } else { return pats; } }; } fn parse_pat_fields(refutable: bool) -> (~[ast::field_pat], bool) { let mut fields = ~[]; let mut etc = false; let mut first = true; while self.token != token::RBRACE { if first { first = false; } else { self.expect(token::COMMA); } if self.token == token::UNDERSCORE { self.bump(); if self.token != token::RBRACE { self.fatal(~"expected `}`, found `" + token_to_str(self.reader, self.token) + ~"`"); } etc = true; break; } let lo1 = self.last_span.lo; let fieldname = if self.look_ahead(1u) == token::COLON { self.parse_ident() } else { self.parse_value_ident() }; let hi1 = self.last_span.lo; let fieldpath = ast_util::ident_to_path(mk_sp(lo1, hi1), fieldname); let mut subpat; if self.token == token::COLON { self.bump(); subpat = self.parse_pat(refutable); } else { subpat = @{ id: self.get_id(), node: pat_ident(bind_by_implicit_ref, fieldpath, none), span: self.last_span }; } vec::push(fields, {ident: fieldname, pat: subpat}); } return (fields, etc); } fn parse_pat(refutable: bool) -> @pat { maybe_whole!{self, nt_pat}; let lo = self.span.lo; let mut hi = self.span.hi; let mut pat; match self.token { token::UNDERSCORE => { self.bump(); pat = pat_wild; } token::AT => { self.bump(); let sub = self.parse_pat(refutable); hi = sub.span.hi; // HACK: parse @"..." as a literal of a vstore @str pat = match sub.node { pat_lit(e@@{ node: expr_lit(@{node: lit_str(_), span: _}), _ }) => { let vst = @{id: self.get_id(), callee_id: self.get_id(), node: expr_vstore(e, vstore_box), span: mk_sp(lo, hi)}; pat_lit(vst) } _ => pat_box(sub) }; } token::TILDE => { self.bump(); let sub = self.parse_pat(refutable); hi = sub.span.hi; // HACK: parse ~"..." as a literal of a vstore ~str pat = match sub.node { pat_lit(e@@{ node: expr_lit(@{node: lit_str(_), span: _}), _ }) => { let vst = @{id: self.get_id(), callee_id: self.get_id(), node: expr_vstore(e, vstore_uniq), span: mk_sp(lo, hi)}; pat_lit(vst) } _ => pat_uniq(sub) }; } token::LBRACE => { self.bump(); let (fields, etc) = self.parse_pat_fields(refutable); hi = self.span.hi; self.bump(); pat = pat_rec(fields, etc); } token::LPAREN => { self.bump(); if self.token == token::RPAREN { hi = self.span.hi; self.bump(); let lit = @{node: lit_nil, span: mk_sp(lo, hi)}; let expr = self.mk_expr(lo, hi, expr_lit(lit)); pat = pat_lit(expr); } else { let mut fields = ~[self.parse_pat(refutable)]; while self.token == token::COMMA { self.bump(); vec::push(fields, self.parse_pat(refutable)); } if vec::len(fields) == 1u { self.expect(token::COMMA); } hi = self.span.hi; self.expect(token::RPAREN); pat = pat_tup(fields); } } tok => { if !is_ident_or_path(tok) || self.is_keyword(~"true") || self.is_keyword(~"false") { let val = self.parse_expr_res(RESTRICT_NO_BAR_OP); if self.eat_keyword(~"to") || self.eat(token::DOTDOT) { let end = self.parse_expr_res(RESTRICT_NO_BAR_OP); pat = pat_range(val, end); } else { pat = pat_lit(val); } } else if self.eat_keyword(~"ref") { let mutbl = self.parse_mutability(); pat = self.parse_pat_ident(refutable, bind_by_ref(mutbl)); } else if self.eat_keyword(~"copy") { pat = self.parse_pat_ident(refutable, bind_by_value); } else if !is_plain_ident(self.token) { pat = self.parse_enum_variant(refutable); } else { let binding_mode; if self.eat_keyword(~"copy") { binding_mode = bind_by_value; } else if refutable { // XXX: Should be bind_by_value, but that's not // backward compatible. binding_mode = bind_by_implicit_ref; } else { binding_mode = bind_by_value; } let cannot_be_enum_or_struct; match self.look_ahead(1) { token::LPAREN | token::LBRACKET | token::LT | token::LBRACE => cannot_be_enum_or_struct = false, _ => cannot_be_enum_or_struct = true } if is_plain_ident(self.token) && cannot_be_enum_or_struct { let name = self.parse_value_path(); let sub; if self.eat(token::AT) { sub = some(self.parse_pat(refutable)); } else { sub = none; }; pat = pat_ident(binding_mode, name, sub); } else { let enum_path = self.parse_path_with_tps(true); match self.token { token::LBRACE => { self.bump(); let (fields, etc) = self.parse_pat_fields(refutable); self.bump(); pat = pat_struct(enum_path, fields, etc); } _ => { let mut args: ~[@pat] = ~[]; let mut star_pat = false; match self.token { token::LPAREN => match self.look_ahead(1u) { token::BINOP(token::STAR) => { // This is a "top constructor only" pat self.bump(); self.bump(); star_pat = true; self.expect(token::RPAREN); } _ => { args = self.parse_unspanned_seq( token::LPAREN, token::RPAREN, seq_sep_trailing_disallowed (token::COMMA), |p| p.parse_pat(refutable)); } }, _ => () } // at this point, we're not sure whether it's a // enum or a bind if star_pat { pat = pat_enum(enum_path, none); } else if vec::is_empty(args) && vec::len(enum_path.idents) == 1u { pat = pat_ident(binding_mode, enum_path, none); } else { pat = pat_enum(enum_path, some(args)); } } } } } hi = self.span.hi; } } return @{id: self.get_id(), node: pat, span: mk_sp(lo, hi)}; } fn parse_pat_ident(refutable: bool, binding_mode: ast::binding_mode) -> ast::pat_ { if !is_plain_ident(self.token) { self.span_fatal( copy self.last_span, ~"expected identifier, found path"); } let name = self.parse_value_path(); let sub = if self.eat(token::AT) { some(self.parse_pat(refutable)) } else { none }; // just to be friendly, if they write something like // ref some(i) // we end up here with ( as the current token. This shortly // leads to a parse error. Note that if there is no explicit // binding mode then we do not end up here, because the lookahead // will direct us over to parse_enum_variant() if self.token == token::LPAREN { self.span_fatal( copy self.last_span, ~"expected identifier, found enum pattern"); } pat_ident(binding_mode, name, sub) } fn parse_enum_variant(refutable: bool) -> ast::pat_ { let enum_path = self.parse_path_with_tps(true); match self.token { token::LPAREN => { match self.look_ahead(1u) { token::BINOP(token::STAR) => { // foo(*) self.expect(token::LPAREN); self.expect(token::BINOP(token::STAR)); self.expect(token::RPAREN); pat_enum(enum_path, none) } _ => { // foo(a, ..., z) let args = self.parse_unspanned_seq( token::LPAREN, token::RPAREN, seq_sep_trailing_disallowed(token::COMMA), |p| p.parse_pat(refutable)); pat_enum(enum_path, some(args)) } } } _ => { // option::none pat_enum(enum_path, some(~[])) } } } fn parse_local(is_mutbl: bool, allow_init: bool) -> @local { let lo = self.span.lo; let pat = self.parse_pat(false); let mut ty = @{id: self.get_id(), node: ty_infer, span: mk_sp(lo, lo)}; if self.eat(token::COLON) { ty = self.parse_ty(false); } let init = if allow_init { self.parse_initializer() } else { none }; return @spanned(lo, self.last_span.hi, {is_mutbl: is_mutbl, ty: ty, pat: pat, init: init, id: self.get_id()}); } fn parse_let() -> @decl { let is_mutbl = self.eat_keyword(~"mut"); let lo = self.span.lo; let mut locals = ~[self.parse_local(is_mutbl, true)]; while self.eat(token::COMMA) { vec::push(locals, self.parse_local(is_mutbl, true)); } return @spanned(lo, self.last_span.hi, decl_local(locals)); } /* assumes "let" token has already been consumed */ fn parse_instance_var(pr: visibility) -> @class_member { let mut is_mutbl = class_immutable; let lo = self.span.lo; if self.eat_keyword(~"mut") { is_mutbl = class_mutable; } if !is_plain_ident(self.token) { self.fatal(~"expected ident"); } let name = self.parse_ident(); self.expect(token::COLON); let ty = self.parse_ty(false); return @field_member(@spanned(lo, self.last_span.hi, { kind: named_field(name, is_mutbl, pr), id: self.get_id(), ty: ty })); } fn parse_stmt(+first_item_attrs: ~[attribute]) -> @stmt { maybe_whole!{self, nt_stmt}; fn check_expected_item(p: parser, current_attrs: ~[attribute]) { // If we have attributes then we should have an item if vec::is_not_empty(current_attrs) { p.fatal(~"expected item"); } } let lo = self.span.lo; if self.is_keyword(~"let") { check_expected_item(self, first_item_attrs); self.expect_keyword(~"let"); let decl = self.parse_let(); return @spanned(lo, decl.span.hi, stmt_decl(decl, self.get_id())); } else { let mut item_attrs; match self.parse_outer_attrs_or_ext(first_item_attrs) { none => item_attrs = ~[], some(Left(attrs)) => item_attrs = attrs, some(Right(ext)) => { return @spanned(lo, ext.span.hi, stmt_expr(ext, self.get_id())); } } let item_attrs = vec::append(first_item_attrs, item_attrs); match self.parse_item_or_view_item(item_attrs, true) { iovi_item(i) => { let mut hi = i.span.hi; let decl = @spanned(lo, hi, decl_item(i)); return @spanned(lo, hi, stmt_decl(decl, self.get_id())); } iovi_view_item(vi) => { self.span_fatal(vi.span, ~"view items must be declared at \ the top of the block"); } iovi_none() => { /* fallthrough */ } } check_expected_item(self, item_attrs); // Remainder are line-expr stmts. let e = self.parse_expr_res(RESTRICT_STMT_EXPR); return @spanned(lo, e.span.hi, stmt_expr(e, self.get_id())); } } fn expr_is_complete(e: pexpr) -> bool { log(debug, (~"expr_is_complete", self.restriction, print::pprust::expr_to_str(*e), classify::expr_requires_semi_to_be_stmt(*e))); return self.restriction == RESTRICT_STMT_EXPR && !classify::expr_requires_semi_to_be_stmt(*e); } fn parse_block() -> blk { let (attrs, blk) = self.parse_inner_attrs_and_block(false); assert vec::is_empty(attrs); return blk; } fn parse_inner_attrs_and_block(parse_attrs: bool) -> (~[attribute], blk) { maybe_whole!{pair_empty self, nt_block}; fn maybe_parse_inner_attrs_and_next(p: parser, parse_attrs: bool) -> {inner: ~[attribute], next: ~[attribute]} { if parse_attrs { p.parse_inner_attrs_and_next() } else { {inner: ~[], next: ~[]} } } let lo = self.span.lo; if self.eat_keyword(~"unchecked") { self.expect(token::LBRACE); let {inner, next} = maybe_parse_inner_attrs_and_next(self, parse_attrs); return (inner, self.parse_block_tail_(lo, unchecked_blk, next)); } else if self.eat_keyword(~"unsafe") { self.expect(token::LBRACE); let {inner, next} = maybe_parse_inner_attrs_and_next(self, parse_attrs); return (inner, self.parse_block_tail_(lo, unsafe_blk, next)); } else { self.expect(token::LBRACE); let {inner, next} = maybe_parse_inner_attrs_and_next(self, parse_attrs); return (inner, self.parse_block_tail_(lo, default_blk, next)); } } fn parse_block_no_value() -> blk { // We parse blocks that cannot have a value the same as any other // block; the type checker will make sure that the tail expression (if // any) has unit type. return self.parse_block(); } // Precondition: already parsed the '{' or '#{' // I guess that also means "already parsed the 'impure'" if // necessary, and this should take a qualifier. // some blocks start with "#{"... fn parse_block_tail(lo: uint, s: blk_check_mode) -> blk { self.parse_block_tail_(lo, s, ~[]) } fn parse_block_tail_(lo: uint, s: blk_check_mode, +first_item_attrs: ~[attribute]) -> blk { let mut stmts = ~[]; let mut expr = none; let {attrs_remaining, view_items, items: items} = self.parse_items_and_view_items(first_item_attrs, IMPORTS_AND_ITEMS_ALLOWED); for items.each |item| { let decl = @spanned(item.span.lo, item.span.hi, decl_item(item)); push(stmts, @spanned(item.span.lo, item.span.hi, stmt_decl(decl, self.get_id()))); } let mut initial_attrs = attrs_remaining; if self.token == token::RBRACE && !vec::is_empty(initial_attrs) { self.fatal(~"expected item"); } while self.token != token::RBRACE { match self.token { token::SEMI => { self.bump(); // empty } _ => { let stmt = self.parse_stmt(initial_attrs); initial_attrs = ~[]; match stmt.node { stmt_expr(e, stmt_id) => { // Expression without semicolon: match self.token { token::SEMI => { self.bump(); push(stmts, @{node: stmt_semi(e, stmt_id) with *stmt}); } token::RBRACE => { expr = some(e); } t => { if classify::stmt_ends_with_semi(*stmt) { self.fatal(~"expected `;` or `}` after \ expression but found `" + token_to_str(self.reader, t) + ~"`"); } vec::push(stmts, stmt); } } } _ => { // All other kinds of statements: vec::push(stmts, stmt); if classify::stmt_ends_with_semi(*stmt) { self.expect(token::SEMI); } } } } } } let mut hi = self.span.hi; self.bump(); let bloc = {view_items: view_items, stmts: stmts, expr: expr, id: self.get_id(), rules: s}; return spanned(lo, hi, bloc); } fn parse_optional_ty_param_bounds() -> @~[ty_param_bound] { let mut bounds = ~[]; if self.eat(token::COLON) { while is_ident(self.token) { if self.eat_keyword(~"send") { push(bounds, bound_send); } else if self.eat_keyword(~"copy") { push(bounds, bound_copy) } else if self.eat_keyword(~"const") { push(bounds, bound_const); } else if self.eat_keyword(~"owned") { push(bounds, bound_owned); } else { push(bounds, bound_trait(self.parse_ty(false))); } } } return @move bounds; } fn parse_ty_param() -> ty_param { let ident = self.parse_ident(); let bounds = self.parse_optional_ty_param_bounds(); return {ident: ident, id: self.get_id(), bounds: bounds}; } fn parse_ty_params() -> ~[ty_param] { if self.eat(token::LT) { self.parse_seq_to_gt(some(token::COMMA), |p| p.parse_ty_param()) } else { ~[] } } fn parse_fn_decl(purity: purity, parse_arg_fn: fn(parser) -> arg_or_capture_item) -> (fn_decl, capture_clause) { let args_or_capture_items: ~[arg_or_capture_item] = self.parse_unspanned_seq( token::LPAREN, token::RPAREN, seq_sep_trailing_disallowed(token::COMMA), parse_arg_fn); let inputs = either::lefts(args_or_capture_items); let capture_clause = @either::rights(args_or_capture_items); let (ret_style, ret_ty) = self.parse_ret_ty(); return ({inputs: inputs, output: ret_ty, purity: purity, cf: ret_style}, capture_clause); } fn is_self_ident() -> bool { match self.token { token::IDENT(sid, false) if ~"self" == *self.get_str(sid) => true, _ => false } } fn expect_self_ident() { if !self.is_self_ident() { self.fatal(#fmt("expected `self` but found `%s`", token_to_str(self.reader, self.token))); } self.bump(); } fn parse_fn_decl_with_self(purity: purity, parse_arg_fn: fn(parser) -> arg_or_capture_item) -> (self_ty, fn_decl, capture_clause) { fn maybe_parse_self_ty(cnstr: fn(+mutability) -> ast::self_ty_, p: parser) -> ast::self_ty_ { // We need to make sure it isn't a mode or a type if p.token_is_keyword(~"self", p.look_ahead(1)) || ((p.token_is_keyword(~"const", p.look_ahead(1)) || p.token_is_keyword(~"mut", p.look_ahead(1))) && p.token_is_keyword(~"self", p.look_ahead(2))) { p.bump(); let mutability = p.parse_mutability(); p.expect_self_ident(); cnstr(mutability) } else { sty_by_ref } } self.expect(token::LPAREN); // A bit of complexity and lookahead is needed here in order to to be // backwards compatible. let lo = self.span.lo; let self_ty = match copy self.token { token::BINOP(token::AND) => { maybe_parse_self_ty(sty_region, self) } token::AT => { maybe_parse_self_ty(sty_box, self) } token::TILDE => { maybe_parse_self_ty(sty_uniq, self) } token::IDENT(*) if self.is_self_ident() => { self.bump(); sty_value } _ => { sty_by_ref } }; // If we parsed a self type, expect a comma before the argument list. let args_or_capture_items; if self_ty != sty_by_ref { match copy self.token { token::COMMA => { self.bump(); let sep = seq_sep_trailing_disallowed(token::COMMA); args_or_capture_items = self.parse_seq_to_before_end(token::RPAREN, sep, parse_arg_fn); } token::RPAREN => { args_or_capture_items = ~[]; } _ => { self.fatal(~"expected `,` or `)`, found `" + token_to_str(self.reader, self.token) + ~"`"); } } } else { let sep = seq_sep_trailing_disallowed(token::COMMA); args_or_capture_items = self.parse_seq_to_before_end(token::RPAREN, sep, parse_arg_fn); } self.expect(token::RPAREN); let hi = self.span.hi; let inputs = either::lefts(args_or_capture_items); let capture_clause = @either::rights(args_or_capture_items); let (ret_style, ret_ty) = self.parse_ret_ty(); let fn_decl = { inputs: inputs, output: ret_ty, purity: purity, cf: ret_style }; (spanned(lo, hi, self_ty), fn_decl, capture_clause) } fn parse_fn_block_decl() -> (fn_decl, capture_clause) { let inputs_captures = { if self.eat(token::OROR) { ~[] } else { self.parse_unspanned_seq( token::BINOP(token::OR), token::BINOP(token::OR), seq_sep_trailing_disallowed(token::COMMA), |p| p.parse_fn_block_arg()) } }; let output = if self.eat(token::RARROW) { self.parse_ty(false) } else { @{id: self.get_id(), node: ty_infer, span: self.span} }; return ({inputs: either::lefts(inputs_captures), output: output, purity: impure_fn, cf: return_val}, @either::rights(inputs_captures)); } fn parse_fn_header() -> {ident: ident, tps: ~[ty_param]} { let id = self.parse_value_ident(); let ty_params = self.parse_ty_params(); return {ident: id, tps: ty_params}; } fn mk_item(lo: uint, hi: uint, +ident: ident, +node: item_, vis: visibility, +attrs: ~[attribute]) -> @item { return @{ident: ident, attrs: attrs, id: self.get_id(), node: node, vis: vis, span: mk_sp(lo, hi)}; } fn parse_item_fn(purity: purity) -> item_info { let t = self.parse_fn_header(); let (decl, _) = self.parse_fn_decl(purity, |p| p.parse_arg()); let (inner_attrs, body) = self.parse_inner_attrs_and_block(true); (t.ident, item_fn(decl, t.tps, body), some(inner_attrs)) } fn parse_method_name() -> ident { self.parse_value_ident() } fn parse_method(pr: visibility) -> @method { let attrs = self.parse_outer_attributes(); let lo = self.span.lo; let is_static = self.parse_staticness(); let static_sty = spanned(lo, self.span.hi, sty_static); let pur = self.parse_fn_purity(); let ident = self.parse_method_name(); let tps = self.parse_ty_params(); let (self_ty, decl, _) = do self.parse_fn_decl_with_self(pur) |p| { p.parse_arg() }; // XXX: interaction between staticness, self_ty is broken now let self_ty = if is_static { static_sty} else { self_ty }; let (inner_attrs, body) = self.parse_inner_attrs_and_block(true); let attrs = vec::append(attrs, inner_attrs); @{ident: ident, attrs: attrs, tps: tps, self_ty: self_ty, decl: decl, body: body, id: self.get_id(), span: mk_sp(lo, body.span.hi), self_id: self.get_id(), vis: pr} } fn parse_item_trait() -> item_info { let ident = self.parse_ident(); self.parse_region_param(); let tps = self.parse_ty_params(); // Parse traits, if necessary. let traits; if self.token == token::COLON { self.bump(); traits = self.parse_trait_ref_list(token::LBRACE); } else { traits = ~[]; } let meths = self.parse_trait_methods(); (ident, item_trait(tps, traits, meths), none) } // Parses four variants (with the region/type params always optional): // impl<T> ~[T] : to_str { ... } fn parse_item_impl() -> item_info { fn wrap_path(p: parser, pt: @path) -> @ty { @{id: p.get_id(), node: ty_path(pt, p.get_id()), span: pt.span} } // We do two separate paths here: old-style impls and new-style impls. // First, parse type parameters if necessary. let mut tps; if self.token == token::LT { tps = self.parse_ty_params(); } else { tps = ~[]; } // This is a new-style impl declaration. let ident = @~"__extensions__"; // XXX: clownshoes // Parse the type. let ty = self.parse_ty(false); // Parse traits, if necessary. let traits = if self.token == token::COLON { self.bump(); self.parse_trait_ref_list(token::LBRACE) } else { ~[] }; let mut meths = ~[]; self.expect(token::LBRACE); while !self.eat(token::RBRACE) { vec::push(meths, self.parse_method(public)); } (ident, item_impl(tps, traits, ty, meths), none) } // Instantiates ident <i> with references to <typarams> as arguments. // Used to create a path that refers to a class which will be defined as // the return type of the ctor function. fn ident_to_path_tys(i: ident, typarams: ~[ty_param]) -> @path { let s = self.last_span; @{span: s, global: false, idents: ~[i], rp: none, types: vec::map(typarams, |tp| { @{id: self.get_id(), node: ty_path(ident_to_path(s, tp.ident), self.get_id()), span: s}}) } } fn parse_trait_ref() -> @trait_ref { @{path: self.parse_path_with_tps(false), ref_id: self.get_id(), impl_id: self.get_id()} } fn parse_trait_ref_list(ket: token::token) -> ~[@trait_ref] { self.parse_seq_to_before_end( ket, seq_sep_trailing_disallowed(token::COMMA), |p| p.parse_trait_ref()) } fn parse_item_class() -> item_info { let class_name = self.parse_value_ident(); self.parse_region_param(); let ty_params = self.parse_ty_params(); let class_path = self.ident_to_path_tys(class_name, ty_params); let traits : ~[@trait_ref] = if self.eat(token::COLON) { self.parse_trait_ref_list(token::LBRACE) } else { ~[] }; let mut fields: ~[@struct_field]; let mut methods: ~[@method] = ~[]; let mut the_ctor: option<(fn_decl, ~[attribute], blk, codemap::span)> = none; let mut the_dtor: option<(blk, ~[attribute], codemap::span)> = none; let ctor_id = self.get_id(); if self.eat(token::LBRACE) { // It's a record-like struct. fields = ~[]; while self.token != token::RBRACE { match self.parse_class_item(class_path) { ctor_decl(a_fn_decl, attrs, blk, s) => { match the_ctor { some((_, _, _, s_first)) => { self.span_note(s, #fmt("Duplicate constructor \ declaration for class %s", *class_name)); self.span_fatal(copy s_first, ~"First constructor \ declared here"); } none => { the_ctor = some((a_fn_decl, attrs, blk, s)); } } } dtor_decl(blk, attrs, s) => { match the_dtor { some((_, _, s_first)) => { self.span_note(s, #fmt("Duplicate destructor \ declaration for class %s", *class_name)); self.span_fatal(copy s_first, ~"First destructor \ declared here"); } none => { the_dtor = some((blk, attrs, s)); } } } members(mms) => { for mms.each |mm| { match mm { @field_member(struct_field) => vec::push(fields, struct_field), @method_member(the_method_member) => vec::push(methods, the_method_member) } } } } } self.bump(); } else if self.token == token::LPAREN { // It's a tuple-like struct. fields = do self.parse_unspanned_seq(token::LPAREN, token::RPAREN, seq_sep_trailing_allowed (token::COMMA)) |p| { let lo = p.span.lo; let struct_field_ = { kind: unnamed_field, id: self.get_id(), ty: p.parse_ty(false) }; @spanned(lo, p.span.hi, struct_field_) }; self.expect(token::SEMI); } else if self.eat(token::SEMI) { // It's a unit-like struct. fields = ~[]; } else { self.fatal(fmt!("expected `{`, `(`, or `;` after struct name \ but found `%s`", token_to_str(self.reader, self.token))); } let actual_dtor = do option::map(the_dtor) |dtor| { let (d_body, d_attrs, d_s) = dtor; {node: {id: self.get_id(), attrs: d_attrs, self_id: self.get_id(), body: d_body}, span: d_s}}; match the_ctor { some((ct_d, ct_attrs, ct_b, ct_s)) => { (class_name, item_class(@{ traits: traits, fields: move fields, methods: move methods, ctor: some({ node: {id: ctor_id, attrs: ct_attrs, self_id: self.get_id(), dec: ct_d, body: ct_b}, span: ct_s}), dtor: actual_dtor }, ty_params), none) } none => { (class_name, item_class(@{ traits: traits, fields: move fields, methods: move methods, ctor: none, dtor: actual_dtor }, ty_params), none) } } } fn token_is_pound_or_doc_comment(++tok: token::token) -> bool { match tok { token::POUND | token::DOC_COMMENT(_) => true, _ => false } } fn parse_single_class_item(vis: visibility) -> @class_member { if (self.eat_keyword(~"let") || self.token_is_keyword(~"mut", copy self.token) || !self.is_any_keyword(copy self.token)) && !self.token_is_pound_or_doc_comment(self.token) { let a_var = self.parse_instance_var(vis); self.expect(token::SEMI); return a_var; } else { let m = self.parse_method(vis); return @method_member(m); } } fn parse_ctor(attrs: ~[attribute], result_ty: ast::ty_) -> class_contents { let lo = self.last_span.lo; let (decl_, _) = self.parse_fn_decl(impure_fn, |p| p.parse_arg()); let decl = {output: @{id: self.get_id(), node: result_ty, span: decl_.output.span} with decl_}; let body = self.parse_block(); ctor_decl(decl, attrs, body, mk_sp(lo, self.last_span.hi)) } fn parse_dtor(attrs: ~[attribute]) -> class_contents { let lo = self.last_span.lo; let body = self.parse_block(); dtor_decl(body, attrs, mk_sp(lo, self.last_span.hi)) } fn parse_class_item(class_name_with_tps: @path) -> class_contents { if self.eat_keyword(~"priv") { // XXX: Remove after snapshot. match self.token { token::LBRACE => { self.bump(); let mut results = ~[]; while self.token != token::RBRACE { vec::push(results, self.parse_single_class_item(private)); } self.bump(); return members(results); } _ => return members(~[self.parse_single_class_item(private)]) } } if self.eat_keyword(~"pub") { return members(~[self.parse_single_class_item(public)]); } let attrs = self.parse_outer_attributes(); if self.eat_keyword(~"new") { // result type is always the type of the class return self.parse_ctor(attrs, ty_path(class_name_with_tps, self.get_id())); } else if self.eat_keyword(~"drop") { return self.parse_dtor(attrs); } else { return members(~[self.parse_single_class_item(inherited)]); } } fn parse_visibility() -> visibility { if self.eat_keyword(~"pub") { public } else if self.eat_keyword(~"priv") { private } else { inherited } } fn parse_staticness() -> bool { self.eat_keyword(~"static") } fn parse_mod_items(term: token::token, +first_item_attrs: ~[attribute]) -> _mod { // Shouldn't be any view items since we've already parsed an item attr let {attrs_remaining, view_items, items: starting_items} = self.parse_items_and_view_items(first_item_attrs, VIEW_ITEMS_AND_ITEMS_ALLOWED); let mut items: ~[@item] = move starting_items; let mut first = true; while self.token != term { let mut attrs = self.parse_outer_attributes(); if first { attrs = vec::append(attrs_remaining, attrs); first = false; } debug!("parse_mod_items: parse_item_or_view_item(attrs=%?)", attrs); match self.parse_item_or_view_item(attrs, true) { iovi_item(item) => vec::push(items, item), iovi_view_item(view_item) => { self.span_fatal(view_item.span, ~"view items must be \ declared at the top of the \ module"); } _ => { self.fatal(~"expected item but found `" + token_to_str(self.reader, self.token) + ~"`"); } } debug!{"parse_mod_items: attrs=%?", attrs}; } if first && attrs_remaining.len() > 0u { // We parsed attributes for the first item but didn't find it self.fatal(~"expected item"); } return {view_items: view_items, items: items}; } fn parse_item_const() -> item_info { let id = self.parse_value_ident(); self.expect(token::COLON); let ty = self.parse_ty(false); self.expect(token::EQ); let e = self.parse_expr(); self.expect(token::SEMI); (id, item_const(ty, e), none) } fn parse_item_mod() -> item_info { let id = self.parse_ident(); self.expect(token::LBRACE); let inner_attrs = self.parse_inner_attrs_and_next(); let m = self.parse_mod_items(token::RBRACE, inner_attrs.next); self.expect(token::RBRACE); (id, item_mod(m), some(inner_attrs.inner)) } fn parse_item_foreign_fn(+attrs: ~[attribute], purity: purity) -> @foreign_item { let lo = self.last_span.lo; let t = self.parse_fn_header(); let (decl, _) = self.parse_fn_decl(purity, |p| p.parse_arg()); let mut hi = self.span.hi; self.expect(token::SEMI); return @{ident: t.ident, attrs: attrs, node: foreign_item_fn(decl, t.tps), id: self.get_id(), span: mk_sp(lo, hi)}; } fn parse_fn_purity() -> purity { if self.eat_keyword(~"fn") { impure_fn } else if self.eat_keyword(~"pure") { self.expect_keyword(~"fn"); pure_fn } else if self.eat_keyword(~"unsafe") { self.expect_keyword(~"fn"); unsafe_fn } else { self.unexpected(); } } fn parse_foreign_item(+attrs: ~[attribute]) -> @foreign_item { self.parse_item_foreign_fn(attrs, self.parse_fn_purity()) } fn parse_foreign_mod_items(+first_item_attrs: ~[attribute]) -> foreign_mod { // Shouldn't be any view items since we've already parsed an item attr let {attrs_remaining, view_items, items: _} = self.parse_items_and_view_items(first_item_attrs, VIEW_ITEMS_ALLOWED); let mut items: ~[@foreign_item] = ~[]; let mut initial_attrs = attrs_remaining; while self.token != token::RBRACE { let attrs = vec::append(initial_attrs, self.parse_outer_attributes()); initial_attrs = ~[]; vec::push(items, self.parse_foreign_item(attrs)); } return {view_items: view_items, items: items}; } fn parse_item_foreign_mod(lo: uint, visibility: visibility, attrs: ~[attribute], items_allowed: bool) -> item_or_view_item { if self.is_keyword(~"mod") { self.expect_keyword(~"mod"); } else { self.expect_keyword(~"module"); } let ident = self.parse_ident(); // extern mod { ... } if items_allowed && self.eat(token::LBRACE) { let extra_attrs = self.parse_inner_attrs_and_next(); let m = self.parse_foreign_mod_items(extra_attrs.next); self.expect(token::RBRACE); return iovi_item(self.mk_item(lo, self.last_span.hi, ident, item_foreign_mod(m), visibility, maybe_append(attrs, some(extra_attrs. inner)))); } // extern mod foo; let metadata = self.parse_optional_meta(); self.expect(token::SEMI); return iovi_view_item(@{ node: view_item_use(ident, metadata, self.get_id()), attrs: attrs, vis: visibility, span: mk_sp(lo, self.last_span.hi) }); } fn parse_type_decl() -> {lo: uint, ident: ident} { let lo = self.last_span.lo; let id = self.parse_ident(); return {lo: lo, ident: id}; } fn parse_item_type() -> item_info { let t = self.parse_type_decl(); self.parse_region_param(); let tps = self.parse_ty_params(); self.expect(token::EQ); let ty = self.parse_ty(false); self.expect(token::SEMI); (t.ident, item_ty(ty, tps), none) } fn parse_region_param() { if self.eat(token::BINOP(token::SLASH)) { self.expect(token::BINOP(token::AND)); } } fn parse_struct_def(path: @path) -> @struct_def { let mut the_dtor: option<(blk, ~[attribute], codemap::span)> = none; let mut fields: ~[@struct_field] = ~[]; let mut methods: ~[@method] = ~[]; while self.token != token::RBRACE { match self.parse_class_item(path) { ctor_decl(*) => { self.span_fatal(copy self.span, ~"deprecated explicit \ constructors are not allowed \ here"); } dtor_decl(blk, attrs, s) => { match the_dtor { some((_, _, s_first)) => { self.span_note(s, ~"duplicate destructor \ declaration"); self.span_fatal(copy s_first, ~"first destructor \ declared here"); } none => { the_dtor = some((blk, attrs, s)); } } } members(mms) => { for mms.each |mm| { match mm { @field_member(struct_field) => vec::push(fields, struct_field), @method_member(the_method_member) => vec::push(methods, the_method_member) } } } } } self.bump(); let mut actual_dtor = do option::map(the_dtor) |dtor| { let (d_body, d_attrs, d_s) = dtor; {node: {id: self.get_id(), attrs: d_attrs, self_id: self.get_id(), body: d_body}, span: d_s} }; return @{ traits: ~[], fields: move fields, methods: move methods, ctor: none, dtor: actual_dtor }; } fn parse_enum_def(ident: ast::ident, ty_params: ~[ast::ty_param]) -> enum_def { let mut variants: ~[variant] = ~[]; let mut all_nullary = true, have_disr = false; let mut common_fields = none; while self.token != token::RBRACE { let variant_attrs = self.parse_outer_attributes(); let vlo = self.span.lo; // Is this a common field declaration? if self.eat_keyword(~"struct") { if common_fields.is_some() { self.fatal(~"duplicate declaration of shared fields"); } self.expect(token::LBRACE); let path = self.ident_to_path_tys(ident, ty_params); common_fields = some(self.parse_struct_def(path)); again; } let vis = self.parse_visibility(); // Is this a nested enum declaration? let ident, needs_comma, kind; let mut args = ~[], disr_expr = none; if self.eat_keyword(~"enum") { ident = self.parse_ident(); self.expect(token::LBRACE); let nested_enum_def = self.parse_enum_def(ident, ty_params); kind = enum_variant_kind(move nested_enum_def); needs_comma = false; } else { ident = self.parse_value_ident(); if self.eat(token::LBRACE) { // Parse a struct variant. all_nullary = false; let path = self.ident_to_path_tys(ident, ty_params); kind = struct_variant_kind(self.parse_struct_def(path)); } else if self.token == token::LPAREN { all_nullary = false; let arg_tys = self.parse_unspanned_seq( token::LPAREN, token::RPAREN, seq_sep_trailing_disallowed(token::COMMA), |p| p.parse_ty(false)); for arg_tys.each |ty| { vec::push(args, {ty: ty, id: self.get_id()}); } kind = tuple_variant_kind(args); } else if self.eat(token::EQ) { have_disr = true; disr_expr = some(self.parse_expr()); kind = tuple_variant_kind(args); } else { kind = tuple_variant_kind(~[]); } needs_comma = true; } let vr = {name: ident, attrs: variant_attrs, kind: kind, id: self.get_id(), disr_expr: disr_expr, vis: vis}; vec::push(variants, spanned(vlo, self.last_span.hi, vr)); if needs_comma && !self.eat(token::COMMA) { break; } } self.expect(token::RBRACE); if (have_disr && !all_nullary) { self.fatal(~"discriminator values can only be used with a c-like \ enum"); } return enum_def({ variants: variants, common: common_fields }); } fn parse_item_enum() -> item_info { let id = self.parse_ident(); self.parse_region_param(); let ty_params = self.parse_ty_params(); // Newtype syntax if self.token == token::EQ { self.check_restricted_keywords_(*id); self.bump(); let ty = self.parse_ty(false); self.expect(token::SEMI); let variant = spanned(ty.span.lo, ty.span.hi, {name: id, attrs: ~[], kind: tuple_variant_kind (~[{ty: ty, id: self.get_id()}]), id: self.get_id(), disr_expr: none, vis: public}); return (id, item_enum(enum_def({ variants: ~[variant], common: none }), ty_params), none); } self.expect(token::LBRACE); let enum_definition = self.parse_enum_def(id, ty_params); (id, item_enum(enum_definition, ty_params), none) } fn parse_fn_ty_proto() -> proto { match self.token { token::AT => { self.bump(); proto_box } token::TILDE => { self.bump(); proto_uniq } token::BINOP(token::AND) => { self.bump(); proto_block } _ => { proto_block } } } fn fn_expr_lookahead(tok: token::token) -> bool { match tok { token::LPAREN | token::AT | token::TILDE | token::BINOP(_) => true, _ => false } } fn parse_item_or_view_item(+attrs: ~[attribute], items_allowed: bool) -> item_or_view_item { maybe_whole!{iovi self,nt_item}; let lo = self.span.lo; let visibility; if self.eat_keyword(~"pub") { visibility = public; } else if self.eat_keyword(~"priv") { visibility = private; } else { visibility = inherited; } if items_allowed && self.eat_keyword(~"const") { let (ident, item_, extra_attrs) = self.parse_item_const(); return iovi_item(self.mk_item(lo, self.last_span.hi, ident, item_, visibility, maybe_append(attrs, extra_attrs))); } else if items_allowed && self.is_keyword(~"fn") && !self.fn_expr_lookahead(self.look_ahead(1u)) { self.bump(); let (ident, item_, extra_attrs) = self.parse_item_fn(impure_fn); return iovi_item(self.mk_item(lo, self.last_span.hi, ident, item_, visibility, maybe_append(attrs, extra_attrs))); } else if items_allowed && self.eat_keyword(~"pure") { self.expect_keyword(~"fn"); let (ident, item_, extra_attrs) = self.parse_item_fn(pure_fn); return iovi_item(self.mk_item(lo, self.last_span.hi, ident, item_, visibility, maybe_append(attrs, extra_attrs))); } else if items_allowed && self.is_keyword(~"unsafe") && self.look_ahead(1u) != token::LBRACE { self.bump(); self.expect_keyword(~"fn"); let (ident, item_, extra_attrs) = self.parse_item_fn(unsafe_fn); return iovi_item(self.mk_item(lo, self.last_span.hi, ident, item_, visibility, maybe_append(attrs, extra_attrs))); } else if self.eat_keyword(~"extern") { if items_allowed && self.eat_keyword(~"fn") { let (ident, item_, extra_attrs) = self.parse_item_fn(extern_fn); return iovi_item(self.mk_item(lo, self.last_span.hi, ident, item_, visibility, maybe_append(attrs, extra_attrs))); } return self.parse_item_foreign_mod(lo, visibility, attrs, items_allowed); } else if items_allowed && (self.eat_keyword(~"mod") || self.eat_keyword(~"module")) { let (ident, item_, extra_attrs) = self.parse_item_mod(); return iovi_item(self.mk_item(lo, self.last_span.hi, ident, item_, visibility, maybe_append(attrs, extra_attrs))); } else if items_allowed && self.eat_keyword(~"type") { let (ident, item_, extra_attrs) = self.parse_item_type(); return iovi_item(self.mk_item(lo, self.last_span.hi, ident, item_, visibility, maybe_append(attrs, extra_attrs))); } else if items_allowed && self.eat_keyword(~"enum") { let (ident, item_, extra_attrs) = self.parse_item_enum(); return iovi_item(self.mk_item(lo, self.last_span.hi, ident, item_, visibility, maybe_append(attrs, extra_attrs))); } else if items_allowed && self.eat_keyword(~"trait") { let (ident, item_, extra_attrs) = self.parse_item_trait(); return iovi_item(self.mk_item(lo, self.last_span.hi, ident, item_, visibility, maybe_append(attrs, extra_attrs))); } else if items_allowed && self.eat_keyword(~"impl") { let (ident, item_, extra_attrs) = self.parse_item_impl(); return iovi_item(self.mk_item(lo, self.last_span.hi, ident, item_, visibility, maybe_append(attrs, extra_attrs))); } else if items_allowed && self.eat_keyword(~"struct") { let (ident, item_, extra_attrs) = self.parse_item_class(); return iovi_item(self.mk_item(lo, self.last_span.hi, ident, item_, visibility, maybe_append(attrs, extra_attrs))); } else if self.eat_keyword(~"use") { let view_item = self.parse_use(); self.expect(token::SEMI); return iovi_view_item(@{ node: view_item, attrs: attrs, vis: visibility, span: mk_sp(lo, self.last_span.hi) }); } else if self.eat_keyword(~"import") { let view_paths = self.parse_view_paths(); self.expect(token::SEMI); return iovi_view_item(@{ node: view_item_import(view_paths), attrs: attrs, vis: visibility, span: mk_sp(lo, self.last_span.hi) }); } else if self.eat_keyword(~"export") { let view_paths = self.parse_view_paths(); self.expect(token::SEMI); return iovi_view_item(@{ node: view_item_export(view_paths), attrs: attrs, vis: visibility, span: mk_sp(lo, self.last_span.hi) }); } else if items_allowed && (!self.is_any_keyword(copy self.token) && self.look_ahead(1) == token::NOT && is_plain_ident(self.look_ahead(2))) { // item macro. let pth = self.parse_path_without_tps(); self.expect(token::NOT); let id = self.parse_ident(); let tts = match self.token { token::LPAREN | token::LBRACE | token::LBRACKET => { let ket = token::flip_delimiter(self.token); self.parse_unspanned_seq(copy self.token, ket, seq_sep_none(), |p| p.parse_token_tree()) } _ => self.fatal(~"expected open delimiter") }; let m = ast::mac_invoc_tt(pth, tts); let m: ast::mac = {node: m, span: {lo: self.span.lo, hi: self.span.hi, expn_info: none}}; let item_ = item_mac(m); return iovi_item(self.mk_item(lo, self.last_span.hi, id, item_, visibility, attrs)); } else { return iovi_none; }; } fn parse_item(+attrs: ~[attribute]) -> option<@ast::item> { match self.parse_item_or_view_item(attrs, true) { iovi_none => none, iovi_view_item(_) => self.fatal(~"view items are not allowed here"), iovi_item(item) => some(item) } } fn parse_use() -> view_item_ { if self.look_ahead(1) == token::SEMI || self.look_ahead(1) == token::LPAREN { // Old-style "use"; i.e. what we now call "extern mod". let ident = self.parse_ident(); let metadata = self.parse_optional_meta(); return view_item_use(ident, metadata, self.get_id()); } return view_item_import(self.parse_view_paths()); } fn parse_view_path() -> @view_path { let lo = self.span.lo; let first_ident = self.parse_ident(); let mut path = ~[first_ident]; debug!{"parsed view_path: %s", *first_ident}; match self.token { token::EQ => { // x = foo::bar self.bump(); path = ~[self.parse_ident()]; while self.token == token::MOD_SEP { self.bump(); let id = self.parse_ident(); vec::push(path, id); } let path = @{span: mk_sp(lo, self.span.hi), global: false, idents: path, rp: none, types: ~[]}; return @spanned(lo, self.span.hi, view_path_simple(first_ident, path, self.get_id())); } token::MOD_SEP => { // foo::bar or foo::{a,b,c} or foo::* while self.token == token::MOD_SEP { self.bump(); match copy self.token { token::IDENT(i, _) => { self.bump(); vec::push(path, self.get_str(i)); } // foo::bar::{a,b,c} token::LBRACE => { let idents = self.parse_unspanned_seq( token::LBRACE, token::RBRACE, seq_sep_trailing_allowed(token::COMMA), |p| p.parse_path_list_ident()); let path = @{span: mk_sp(lo, self.span.hi), global: false, idents: path, rp: none, types: ~[]}; return @spanned(lo, self.span.hi, view_path_list(path, idents, self.get_id())); } // foo::bar::* token::BINOP(token::STAR) => { self.bump(); let path = @{span: mk_sp(lo, self.span.hi), global: false, idents: path, rp: none, types: ~[]}; return @spanned(lo, self.span.hi, view_path_glob(path, self.get_id())); } _ => break } } } _ => () } let last = path[vec::len(path) - 1u]; let path = @{span: mk_sp(lo, self.span.hi), global: false, idents: path, rp: none, types: ~[]}; return @spanned(lo, self.span.hi, view_path_simple(last, path, self.get_id())); } fn parse_view_paths() -> ~[@view_path] { let mut vp = ~[self.parse_view_path()]; while self.token == token::COMMA { self.bump(); vec::push(vp, self.parse_view_path()); } return vp; } fn is_view_item() -> bool { let tok = if !self.is_keyword(~"pub") && !self.is_keyword(~"priv") { self.token } else { self.look_ahead(1u) }; self.token_is_keyword(~"use", tok) || self.token_is_keyword(~"import", tok) || self.token_is_keyword(~"export", tok) } fn parse_view_item(+attrs: ~[attribute]) -> @view_item { let lo = self.span.lo, vis = self.parse_visibility(); let node = if self.eat_keyword(~"use") { self.parse_use() } else if self.eat_keyword(~"import") { view_item_import(self.parse_view_paths()) } else if self.eat_keyword(~"export") { view_item_export(self.parse_view_paths()) } else { fail; }; self.expect(token::SEMI); @{node: node, attrs: attrs, vis: vis, span: mk_sp(lo, self.last_span.hi)} } fn parse_items_and_view_items(+first_item_attrs: ~[attribute], mode: view_item_parse_mode) -> {attrs_remaining: ~[attribute], view_items: ~[@view_item], items: ~[@item]} { let mut attrs = vec::append(first_item_attrs, self.parse_outer_attributes()); let items_allowed; match mode { VIEW_ITEMS_AND_ITEMS_ALLOWED | IMPORTS_AND_ITEMS_ALLOWED => items_allowed = true, VIEW_ITEMS_ALLOWED => items_allowed = false } let (view_items, items) = (dvec(), dvec()); loop { match self.parse_item_or_view_item(attrs, items_allowed) { iovi_none => break, iovi_view_item(view_item) => { match mode { VIEW_ITEMS_AND_ITEMS_ALLOWED | VIEW_ITEMS_ALLOWED => {} IMPORTS_AND_ITEMS_ALLOWED => match view_item.node { view_item_import(_) => {} view_item_export(_) | view_item_use(*) => self.fatal(~"exports and \"extern mod\" \ declarations are not \ allowed here") } } view_items.push(view_item); } iovi_item(item) => { assert items_allowed; items.push(item) } } attrs = self.parse_outer_attributes(); } {attrs_remaining: attrs, view_items: vec::from_mut(dvec::unwrap(view_items)), items: vec::from_mut(dvec::unwrap(items))} } // Parses a source module as a crate fn parse_crate_mod(_cfg: crate_cfg) -> @crate { let lo = self.span.lo; let crate_attrs = self.parse_inner_attrs_and_next(); let first_item_outer_attrs = crate_attrs.next; let m = self.parse_mod_items(token::EOF, first_item_outer_attrs); return @spanned(lo, self.span.lo, {directives: ~[], module: m, attrs: crate_attrs.inner, config: self.cfg}); } fn parse_str() -> @~str { match copy self.token { token::LIT_STR(s) => { self.bump(); self.get_str(s) } _ => self.fatal(~"expected string literal") } } // Logic for parsing crate files (.rc) // // Each crate file is a sequence of directives. // // Each directive imperatively extends its environment with 0 or more // items. fn parse_crate_directive(first_outer_attr: ~[attribute]) -> crate_directive { // Collect the next attributes let outer_attrs = vec::append(first_outer_attr, self.parse_outer_attributes()); // In a crate file outer attributes are only going to apply to mods let expect_mod = vec::len(outer_attrs) > 0u; let lo = self.span.lo; if expect_mod || self.is_keyword(~"mod") || self.is_keyword(~"module") { if self.is_keyword(~"mod") { self.expect_keyword(~"mod"); } else { self.expect_keyword(~"module"); } let id = self.parse_ident(); match self.token { // mod x = "foo.rs"; token::SEMI => { let mut hi = self.span.hi; self.bump(); return spanned(lo, hi, cdir_src_mod(id, outer_attrs)); } // mod x = "foo_dir" { ...directives... } token::LBRACE => { self.bump(); let inner_attrs = self.parse_inner_attrs_and_next(); let mod_attrs = vec::append(outer_attrs, inner_attrs.inner); let next_outer_attr = inner_attrs.next; let cdirs = self.parse_crate_directives(token::RBRACE, next_outer_attr); let mut hi = self.span.hi; self.expect(token::RBRACE); return spanned(lo, hi, cdir_dir_mod(id, cdirs, mod_attrs)); } _ => self.unexpected() } } else if self.is_view_item() { let vi = self.parse_view_item(outer_attrs); return spanned(lo, vi.span.hi, cdir_view_item(vi)); } else { return self.fatal(~"expected crate directive"); } } fn parse_crate_directives(term: token::token, first_outer_attr: ~[attribute]) -> ~[@crate_directive] { // This is pretty ugly. If we have an outer attribute then we can't // accept seeing the terminator next, so if we do see it then fail the // same way parse_crate_directive would if vec::len(first_outer_attr) > 0u && self.token == term { if self.is_keyword(~"mod") { self.expect_keyword(~"mod"); } else { self.expect_keyword(~"module"); } } let mut cdirs: ~[@crate_directive] = ~[]; let mut first_outer_attr = first_outer_attr; while self.token != term { let cdir = @self.parse_crate_directive(first_outer_attr); vec::push(cdirs, cdir); first_outer_attr = ~[]; } return cdirs; } } // // Local Variables: // mode: rust // fill-column: 78; // indent-tabs-mode: nil // c-basic-offset: 4 // buffer-file-coding-system: utf-8-unix // End: //
use super::{AnimeEntry, AnimeInfo, Status, SyncBackend}; use chrono::{Date, Datelike, Local, NaiveDate, TimeZone}; use config::Config; use error::BackendError; use input; use reqwest::header::{Accept, Authorization, Bearer, ContentType, Headers}; use reqwest::{Client, Response}; use serde_json; use std::io; use std::process::{Command, ExitStatus}; const LOGIN_URL: &str = "https://anilist.co/api/v2/oauth/authorize?client_id=427&response_type=token"; const API_URL: &str = "https://graphql.anilist.co"; macro_rules! send_query { ($backend:expr, $query_str:expr, {$($vars:tt)*}, $($response_root:expr)=>*) => {{ let vars = json!({ $($vars)* }); let json = $backend.send_json_request($query_str, &vars)?; json$([$response_root])*.clone() }}; } pub struct Anilist { client: Client, user_id: u32, access_token: String, } impl Anilist { fn send_request( &self, query_str: &str, variables: &serde_json::Value, ) -> Result<Response, BackendError> { let body = json!({ "query": query_str, "variables": variables, }).to_string(); let mut headers = Headers::new(); headers.set(ContentType::json()); headers.set(Accept::json()); headers.set(Authorization(Bearer { token: self.access_token.to_owned(), })); let response = self .client .post(API_URL) .headers(headers) .body(body) .send()?; Ok(response) } fn send_json_request( &self, query_str: &str, variables: &serde_json::Value, ) -> Result<serde_json::Value, BackendError> { let text = self.send_request(query_str, variables)?.text()?; let json: serde_json::Value = serde_json::from_str(&text)?; if json["errors"] != serde_json::Value::Null { // TODO: add error chaining let err = &json["errors"][0]; let msg = err["message"].as_str().unwrap_or("unknown error"); let status_code = err["status"].as_u64().unwrap_or(0) as u32; return Err(BackendError::BadResponse(status_code, msg.into())); } Ok(json) } fn request_user_id(&self) -> Result<u32, BackendError> { let resp = send_query!(self, r#" query { Viewer { id } } "#, {}, "data" => "Viewer" => "id" ); let id = serde_json::from_value(resp)?; Ok(id) } fn prompt_for_access_token(open_url: bool) -> Result<String, BackendError> { if open_url { try_open_url(LOGIN_URL); } println!( "please authorize your account in the opened browser tab and paste the code below:" ); let token = input::read_line()?; Ok(token) } fn login(&mut self, is_first_launch: bool, config: &mut Config) -> Result<(), BackendError> { let mut times_token_incorrect = 0; loop { match self.request_user_id() { Ok(user_id) => { self.user_id = user_id; break; } // As bad as checking for a specific error via its message is, the API does not provide // anything else to narrow it down to an invalid token error Err(BackendError::BadResponse(400, ref msg)) if msg.to_lowercase() == "invalid token" => { times_token_incorrect += 1; println!("\ninvalid access token"); let should_open_url = !is_first_launch && times_token_incorrect <= 1; let token = Anilist::prompt_for_access_token(should_open_url)?; self.access_token = token; } Err(err) => return Err(err), } } if times_token_incorrect > 0 { config.user.encode_access_token(&self.access_token); } Ok(()) } } impl SyncBackend for Anilist { fn init(config: &mut Config) -> Result<Anilist, BackendError> { let is_first_launch = config.user.access_token.is_none(); let access_token = if is_first_launch { let token = Anilist::prompt_for_access_token(true)?; config.user.encode_access_token(&token); token } else { config.user.decode_access_token()? }; let mut anilist = Anilist { client: Client::new(), user_id: 0, access_token, }; anilist.login(is_first_launch, config)?; Ok(anilist) } fn search_by_name(&self, name: &str) -> Result<Vec<AnimeInfo>, BackendError> { let resp = send_query!(self, r#" query ($name: String) { Page (page: 1, perPage: 30) { media (search: $name, type: ANIME) { id title { romaji } episodes } } } "#, { "name": name }, "data" => "Page" => "media" ); use serde_json::Value; let mut series = Vec::new(); match resp { Value::Array(ref entries) => { for entry in entries { let series_info: MediaData = serde_json::from_value(entry.clone())?; series.push(series_info.into()); } } _ => return Err(BackendError::InvalidJsonResponse), } Ok(series) } fn get_series_info_by_id(&self, id: u32) -> Result<AnimeInfo, BackendError> { let resp = send_query!(self, r#" query ($id: Int) { Media (id: $id) { id title { romaji } episodes } } "#, { "id": id }, "data" => "Media" ); let info: MediaData = serde_json::from_value(resp)?; Ok(info.into()) } fn get_list_entry(&self, info: AnimeInfo) -> Result<Option<AnimeEntry>, BackendError> { let resp = send_query!(self, r#" query ($id: Int, $userID: Int) { MediaList(mediaId: $id, userId: $userID) { progress status score startedAt { year month day } completedAt { year month day } } } "#, { "id": info.id, "userID": self.user_id }, "data" => "MediaList" ); match resp { serde_json::Value::Null => Ok(None), _ => { let media_entry: MediaListEntry = serde_json::from_value(resp)?; Ok(Some(media_entry.into_generic_entry(info))) } } } fn update_list_entry(&self, entry: &AnimeEntry) -> Result<(), BackendError> { send_query!(self, r#" mutation ($mediaId: Int, $watched_eps: Int, $score: Float, $status: MediaListStatus, $start_date: FuzzyDateInput, $finish_date: FuzzyDateInput) { SaveMediaListEntry (mediaId: $mediaId, progress: $watched_eps, score: $score, status: $status, startedAt: $start_date, completedAt: $finish_date) { mediaId } } "#, { "mediaId": entry.info.id, "watched_eps": entry.watched_episodes, "score": entry.score, "status": MediaStatus::from(entry.status.clone()), "start_date": MediaDate::from_date(entry.start_date), "finish_date": MediaDate::from_date(entry.finish_date), }, ); Ok(()) } fn max_score(&self) -> u8 { // TODO: add support for other scoring types 10 } } fn open_url(url: &str) -> io::Result<ExitStatus> { #[cfg(target_os = "windows")] const LAUNCH_PROGRAM: &str = "start"; #[cfg(target_os = "macos")] const LAUNCH_PROGRAM: &str = "open"; #[cfg(target_os = "linux")] const LAUNCH_PROGRAM: &str = "xdg-open"; #[cfg(not(any(target_os = "windows", target_os = "macos", target_os = "linux")))] compile_error!("support for opening URL's not implemented for this platform"); Command::new(LAUNCH_PROGRAM).arg(url).status() } fn try_open_url(url: &str) { match open_url(url) { Ok(status) if status.success() => (), result => { eprintln!( "failed to open URL in default browser. please open it manually: {}", url ); if let Err(err) = result { eprintln!("error message: {}", err); } } } } #[derive(Deserialize)] struct MediaData { id: u32, title: Title, episodes: Option<u32>, } #[derive(Deserialize)] struct Title { romaji: String, } impl Into<AnimeInfo> for MediaData { fn into(self) -> AnimeInfo { AnimeInfo { id: self.id, title: self.title.romaji, episodes: self.episodes, } } } #[derive(Deserialize)] struct MediaListEntry { progress: u32, status: MediaStatus, score: f32, #[serde(rename = "startedAt")] start_date: MediaDate, #[serde(rename = "completedAt")] finish_date: MediaDate, } impl MediaListEntry { fn into_generic_entry(self, info: AnimeInfo) -> AnimeEntry { AnimeEntry { info, watched_episodes: self.progress, score: self.score, status: self.status.into(), start_date: self.start_date.into_date(), finish_date: self.finish_date.into_date(), } } } #[derive(Serialize, Deserialize, Copy, Clone, PartialEq)] enum MediaStatus { #[serde(rename = "CURRENT")] Current, #[serde(rename = "COMPLETED")] Completed, #[serde(rename = "PAUSED")] Paused, #[serde(rename = "DROPPED")] Dropped, #[serde(rename = "PLANNING")] Planning, #[serde(rename = "REPEATING")] Repeating, } impl Into<Status> for MediaStatus { fn into(self) -> Status { match self { MediaStatus::Current => Status::Watching, MediaStatus::Completed => Status::Completed, MediaStatus::Paused => Status::OnHold, MediaStatus::Dropped => Status::Dropped, MediaStatus::Planning => Status::PlanToWatch, MediaStatus::Repeating => Status::Rewatching, } } } impl From<Status> for MediaStatus { fn from(status: Status) -> MediaStatus { match status { Status::Watching => MediaStatus::Current, Status::Completed => MediaStatus::Completed, Status::OnHold => MediaStatus::Paused, Status::Dropped => MediaStatus::Dropped, Status::PlanToWatch => MediaStatus::Planning, Status::Rewatching => MediaStatus::Repeating, } } } #[derive(Serialize, Deserialize)] struct MediaDate { year: Option<i32>, month: Option<u32>, day: Option<u32>, } impl MediaDate { fn into_date(self) -> Option<Date<Local>> { match (self.year, self.month, self.day) { (Some(year), Some(month), Some(day)) => Some(Local.ymd(year, month, day)), _ => None, } } fn from_date(date: Option<Date<Local>>) -> MediaDate { match date { Some(date) => MediaDate { year: Some(date.year()), month: Some(date.month()), day: Some(date.day()), }, None => MediaDate { year: None, month: None, day: None, }, } } } impl From<NaiveDate> for MediaDate { fn from(date: NaiveDate) -> MediaDate { MediaDate { year: Some(date.year()), month: Some(date.month()), day: Some(date.day()), } } } Import serde_json as "json" to improve readability use super::{AnimeEntry, AnimeInfo, Status, SyncBackend}; use chrono::{Date, Datelike, Local, NaiveDate, TimeZone}; use config::Config; use error::BackendError; use input; use reqwest::header::{Accept, Authorization, Bearer, ContentType, Headers}; use reqwest::{Client, Response}; use serde_json as json; use std::io; use std::process::{Command, ExitStatus}; const LOGIN_URL: &str = "https://anilist.co/api/v2/oauth/authorize?client_id=427&response_type=token"; const API_URL: &str = "https://graphql.anilist.co"; macro_rules! send_query { ($backend:expr, $query_str:expr, {$($vars:tt)*}, $($response_root:expr)=>*) => {{ let vars = json!({ $($vars)* }); let json = $backend.send_json_request($query_str, &vars)?; json$([$response_root])*.clone() }}; } pub struct Anilist { client: Client, user_id: u32, access_token: String, } impl Anilist { fn send_request( &self, query_str: &str, variables: &json::Value, ) -> Result<Response, BackendError> { let body = json!({ "query": query_str, "variables": variables, }).to_string(); let mut headers = Headers::new(); headers.set(ContentType::json()); headers.set(Accept::json()); headers.set(Authorization(Bearer { token: self.access_token.to_owned(), })); let response = self .client .post(API_URL) .headers(headers) .body(body) .send()?; Ok(response) } fn send_json_request( &self, query_str: &str, variables: &json::Value, ) -> Result<json::Value, BackendError> { let text = self.send_request(query_str, variables)?.text()?; let json: json::Value = json::from_str(&text)?; if json["errors"] != json::Value::Null { // TODO: add error chaining let err = &json["errors"][0]; let msg = err["message"].as_str().unwrap_or("unknown error"); let status_code = err["status"].as_u64().unwrap_or(0) as u32; return Err(BackendError::BadResponse(status_code, msg.into())); } Ok(json) } fn request_user_id(&self) -> Result<u32, BackendError> { let resp = send_query!(self, r#" query { Viewer { id } } "#, {}, "data" => "Viewer" => "id" ); let id = json::from_value(resp)?; Ok(id) } fn prompt_for_access_token(open_url: bool) -> Result<String, BackendError> { if open_url { try_open_url(LOGIN_URL); } println!( "please authorize your account in the opened browser tab and paste the code below:" ); let token = input::read_line()?; Ok(token) } fn login(&mut self, is_first_launch: bool, config: &mut Config) -> Result<(), BackendError> { let mut times_token_incorrect = 0; loop { match self.request_user_id() { Ok(user_id) => { self.user_id = user_id; break; } // As bad as checking for a specific error via its message is, the API does not provide // anything else to narrow it down to an invalid token error Err(BackendError::BadResponse(400, ref msg)) if msg.to_lowercase() == "invalid token" => { times_token_incorrect += 1; println!("\ninvalid access token"); let should_open_url = !is_first_launch && times_token_incorrect <= 1; let token = Anilist::prompt_for_access_token(should_open_url)?; self.access_token = token; } Err(err) => return Err(err), } } if times_token_incorrect > 0 { config.user.encode_access_token(&self.access_token); } Ok(()) } } impl SyncBackend for Anilist { fn init(config: &mut Config) -> Result<Anilist, BackendError> { let is_first_launch = config.user.access_token.is_none(); let access_token = if is_first_launch { let token = Anilist::prompt_for_access_token(true)?; config.user.encode_access_token(&token); token } else { config.user.decode_access_token()? }; let mut anilist = Anilist { client: Client::new(), user_id: 0, access_token, }; anilist.login(is_first_launch, config)?; Ok(anilist) } fn search_by_name(&self, name: &str) -> Result<Vec<AnimeInfo>, BackendError> { let resp = send_query!(self, r#" query ($name: String) { Page (page: 1, perPage: 30) { media (search: $name, type: ANIME) { id title { romaji } episodes } } } "#, { "name": name }, "data" => "Page" => "media" ); use self::json::Value; let mut series = Vec::new(); match resp { Value::Array(ref entries) => { for entry in entries { let series_info: MediaData = json::from_value(entry.clone())?; series.push(series_info.into()); } } _ => return Err(BackendError::InvalidJsonResponse), } Ok(series) } fn get_series_info_by_id(&self, id: u32) -> Result<AnimeInfo, BackendError> { let resp = send_query!(self, r#" query ($id: Int) { Media (id: $id) { id title { romaji } episodes } } "#, { "id": id }, "data" => "Media" ); let info: MediaData = json::from_value(resp)?; Ok(info.into()) } fn get_list_entry(&self, info: AnimeInfo) -> Result<Option<AnimeEntry>, BackendError> { let resp = send_query!(self, r#" query ($id: Int, $userID: Int) { MediaList(mediaId: $id, userId: $userID) { progress status score startedAt { year month day } completedAt { year month day } } } "#, { "id": info.id, "userID": self.user_id }, "data" => "MediaList" ); match resp { json::Value::Null => Ok(None), _ => { let media_entry: MediaListEntry = json::from_value(resp)?; Ok(Some(media_entry.into_generic_entry(info))) } } } fn update_list_entry(&self, entry: &AnimeEntry) -> Result<(), BackendError> { send_query!(self, r#" mutation ($mediaId: Int, $watched_eps: Int, $score: Float, $status: MediaListStatus, $start_date: FuzzyDateInput, $finish_date: FuzzyDateInput) { SaveMediaListEntry (mediaId: $mediaId, progress: $watched_eps, score: $score, status: $status, startedAt: $start_date, completedAt: $finish_date) { mediaId } } "#, { "mediaId": entry.info.id, "watched_eps": entry.watched_episodes, "score": entry.score, "status": MediaStatus::from(entry.status.clone()), "start_date": MediaDate::from_date(entry.start_date), "finish_date": MediaDate::from_date(entry.finish_date), }, ); Ok(()) } fn max_score(&self) -> u8 { // TODO: add support for other scoring types 10 } } fn open_url(url: &str) -> io::Result<ExitStatus> { #[cfg(target_os = "windows")] const LAUNCH_PROGRAM: &str = "start"; #[cfg(target_os = "macos")] const LAUNCH_PROGRAM: &str = "open"; #[cfg(target_os = "linux")] const LAUNCH_PROGRAM: &str = "xdg-open"; #[cfg(not(any(target_os = "windows", target_os = "macos", target_os = "linux")))] compile_error!("support for opening URL's not implemented for this platform"); Command::new(LAUNCH_PROGRAM).arg(url).status() } fn try_open_url(url: &str) { match open_url(url) { Ok(status) if status.success() => (), result => { eprintln!( "failed to open URL in default browser. please open it manually: {}", url ); if let Err(err) = result { eprintln!("error message: {}", err); } } } } #[derive(Deserialize)] struct MediaData { id: u32, title: Title, episodes: Option<u32>, } #[derive(Deserialize)] struct Title { romaji: String, } impl Into<AnimeInfo> for MediaData { fn into(self) -> AnimeInfo { AnimeInfo { id: self.id, title: self.title.romaji, episodes: self.episodes, } } } #[derive(Deserialize)] struct MediaListEntry { progress: u32, status: MediaStatus, score: f32, #[serde(rename = "startedAt")] start_date: MediaDate, #[serde(rename = "completedAt")] finish_date: MediaDate, } impl MediaListEntry { fn into_generic_entry(self, info: AnimeInfo) -> AnimeEntry { AnimeEntry { info, watched_episodes: self.progress, score: self.score, status: self.status.into(), start_date: self.start_date.into_date(), finish_date: self.finish_date.into_date(), } } } #[derive(Serialize, Deserialize, Copy, Clone, PartialEq)] enum MediaStatus { #[serde(rename = "CURRENT")] Current, #[serde(rename = "COMPLETED")] Completed, #[serde(rename = "PAUSED")] Paused, #[serde(rename = "DROPPED")] Dropped, #[serde(rename = "PLANNING")] Planning, #[serde(rename = "REPEATING")] Repeating, } impl Into<Status> for MediaStatus { fn into(self) -> Status { match self { MediaStatus::Current => Status::Watching, MediaStatus::Completed => Status::Completed, MediaStatus::Paused => Status::OnHold, MediaStatus::Dropped => Status::Dropped, MediaStatus::Planning => Status::PlanToWatch, MediaStatus::Repeating => Status::Rewatching, } } } impl From<Status> for MediaStatus { fn from(status: Status) -> MediaStatus { match status { Status::Watching => MediaStatus::Current, Status::Completed => MediaStatus::Completed, Status::OnHold => MediaStatus::Paused, Status::Dropped => MediaStatus::Dropped, Status::PlanToWatch => MediaStatus::Planning, Status::Rewatching => MediaStatus::Repeating, } } } #[derive(Serialize, Deserialize)] struct MediaDate { year: Option<i32>, month: Option<u32>, day: Option<u32>, } impl MediaDate { fn into_date(self) -> Option<Date<Local>> { match (self.year, self.month, self.day) { (Some(year), Some(month), Some(day)) => Some(Local.ymd(year, month, day)), _ => None, } } fn from_date(date: Option<Date<Local>>) -> MediaDate { match date { Some(date) => MediaDate { year: Some(date.year()), month: Some(date.month()), day: Some(date.day()), }, None => MediaDate { year: None, month: None, day: None, }, } } } impl From<NaiveDate> for MediaDate { fn from(date: NaiveDate) -> MediaDate { MediaDate { year: Some(date.year()), month: Some(date.month()), day: Some(date.day()), } } }
use compositor::{Server, Shell, View}; use std::rc::Rc; use std::time::Duration; use wlroots; use wlroots::events::seat_events::SetCursorEvent; use wlroots::pointer_events::ButtonEvent; use wlroots::utils::{current_time, L_DEBUG}; use wlroots::{CompositorHandle, Cursor, DragIconHandle, Origin, SeatHandle, SeatHandler, SurfaceHandle, SurfaceHandler, XCursorManager}; #[derive(Debug, Default)] pub struct SeatManager; #[derive(Debug, Clone, Copy, Eq, PartialEq)] pub enum Action { /// We are moving a view. /// /// The start is the surface level coordinates of where the first click was Moving { start: Origin } } #[derive(Debug, Clone, Eq, PartialEq)] pub struct DragIcon { pub handle: DragIconHandle } #[derive(Debug, Default, Clone, Eq, PartialEq)] pub struct Seat { pub seat: SeatHandle, pub focused: Option<Rc<View>>, pub action: Option<Action>, pub has_client_cursor: bool, pub meta: bool, pub drag_icons: Vec<Rc<DragIcon>> } impl Seat { pub fn new(seat: SeatHandle) -> Seat { Seat { seat, meta: false, ..Seat::default() } } pub fn clear_focus(&mut self) { if let Some(focused_view) = self.focused.take() { focused_view.activate(false); } with_handles!([(seat: {&mut self.seat})] => { seat.keyboard_clear_focus(); }).unwrap(); } pub fn focus_view(&mut self, view: Rc<View>, views: &mut Vec<Rc<View>>) { if let Some(ref focused) = self.focused { if *focused == view { return } focused.activate(false); } self.focused = Some(view.clone()); view.activate(true); if let Some(idx) = views.iter().position(|v| *v == view) { let v = views.remove(idx); views.insert(0, v); } with_handles!([(seat: {&mut self.seat})] => { if let Some(keyboard) = seat.get_keyboard() { with_handles!([(keyboard: {keyboard}), (surface: {view.surface()})] => { seat.keyboard_notify_enter(surface, &mut keyboard.keycodes(), &mut keyboard.get_modifier_masks()); }).unwrap(); } }).unwrap(); } pub fn send_button(&self, event: &ButtonEvent) { with_handles!([(seat: {&self.seat})] => { seat.pointer_notify_button(Duration::from_millis(event.time_msec() as _), event.button(), event.state() as u32); }).unwrap(); } pub fn move_view<O>(&mut self, cursor: &mut Cursor, view: &View, start: O) where O: Into<Option<Origin>> { let Origin { x: shell_x, y: shell_y } = view.origin.get(); let (lx, ly) = cursor.coords(); match start.into() { None => { let (view_sx, view_sy) = (lx - shell_x as f64, ly - shell_y as f64); let start = Origin::new(view_sx as _, view_sy as _); self.action = Some(Action::Moving { start }); } Some(start) => { let pos = Origin::new(lx as i32 - start.x, ly as i32 - start.y); view.origin.replace(pos); } }; } pub fn view_at_pointer(views: &mut [Rc<View>], cursor: &mut Cursor) -> (Option<Rc<View>>, Option<SurfaceHandle>, f64, f64) { for view in views { match view.shell { Shell::XdgV6(ref shell) => { let (mut sx, mut sy) = (0.0, 0.0); let surface = with_handles!([(shell: {shell})] => { let (lx, ly) = cursor.coords(); let Origin {x: shell_x, y: shell_y} = view.origin.get(); let (view_sx, view_sy) = (lx - shell_x as f64, ly - shell_y as f64); shell.surface_at(view_sx, view_sy, &mut sx, &mut sy) }).unwrap(); if surface.is_some() { return (Some(view.clone()), surface, sx, sy) } } } } (None, None, 0.0, 0.0) } pub fn update_cursor_position(&mut self, cursor: &mut Cursor, xcursor_manager: &mut XCursorManager, views: &mut [Rc<View>], time_msec: Option<u32>) { let time = if let Some(time_msec) = time_msec { Duration::from_millis(time_msec as u64) } else { current_time() }; match self.action { Some(Action::Moving { start }) => { self.focused = self.focused.take().map(|f| { self.move_view(cursor, &f, start); f }); } _ => { let (_view, surface, sx, sy) = Seat::view_at_pointer(views, cursor); match surface { Some(surface) => { with_handles!([(surface: {surface}), (seat: {&mut self.seat})] => { seat.pointer_notify_enter(surface, sx, sy); seat.pointer_notify_motion(time, sx, sy); }).unwrap(); } None => { if self.has_client_cursor { xcursor_manager.set_cursor_image("left_ptr".to_string(), cursor); self.has_client_cursor = false; } with_handles!([(seat: {&mut self.seat})] => { seat.pointer_clear_focus(); }).unwrap(); } } } } } } struct DragIconHandler; impl wlroots::DragIconHandler for DragIconHandler { fn on_map(&mut self, compositor: CompositorHandle, drag_icon: DragIconHandle) { wlr_log!(L_DEBUG, "TODO: handle drag icon mapped"); } fn on_unmap(&mut self, compositor: CompositorHandle, drag_icon: DragIconHandle) { wlr_log!(L_DEBUG, "TODO: handle drag icon unmapped"); } fn destroyed(&mut self, compositor: CompositorHandle, drag_icon: DragIconHandle) { with_handles!([(compositor: {compositor})] => { let server: &mut Server = compositor.into(); let Server { ref mut seat, .. } = *server; let idx = seat.drag_icons.iter().position(|icon| { icon.handle == drag_icon }); if let Some(idx) = idx { seat.drag_icons.remove(idx); } }).unwrap(); } } impl SeatHandler for SeatManager { fn cursor_set(&mut self, compositor: CompositorHandle, _: SeatHandle, event: &SetCursorEvent) { if let Some(surface) = event.surface() { with_handles!([(compositor: {compositor}), (surface: {surface})] => { let server: &mut Server = compositor.into(); let Server { ref mut cursor, ref mut seat, .. } = *server; with_handles!([(cursor: {&mut *cursor})] => { let (hotspot_x, hotspot_y) = event.location(); let surface = &*surface; cursor.set_surface(Some(surface), hotspot_x, hotspot_y); seat.has_client_cursor = true; }).unwrap(); }).unwrap(); } } fn new_drag_icon(&mut self, compositor: CompositorHandle, seat: SeatHandle, drag_icon: DragIconHandle) -> (Option<Box<wlroots::DragIconHandler>>, Option<Box<SurfaceHandler>>) { with_handles!([(compositor: {compositor})] => { let server: &mut Server = compositor.into(); let Server { ref mut seat, .. } = *server; seat.drag_icons.push(Rc::new(DragIcon { handle: drag_icon })); }).unwrap(); (Some(Box::new(DragIconHandler)), None) } } impl SeatManager { pub fn new() -> Self { SeatManager::default() } } make drag icons a hashset use compositor::{Server, Shell, View}; use std::rc::Rc; use std::time::Duration; use std::collections::HashSet; use wlroots; use wlroots::events::seat_events::SetCursorEvent; use wlroots::pointer_events::ButtonEvent; use wlroots::utils::{current_time, L_DEBUG}; use wlroots::{CompositorHandle, Cursor, DragIconHandle, Origin, SeatHandle, SeatHandler, SurfaceHandle, SurfaceHandler, XCursorManager}; #[derive(Debug, Default)] pub struct SeatManager; #[derive(Debug, Clone, Copy, Eq, PartialEq)] pub enum Action { /// We are moving a view. /// /// The start is the surface level coordinates of where the first click was Moving { start: Origin } } #[derive(Debug, Clone, Eq, PartialEq, Hash)] pub struct DragIcon { pub handle: DragIconHandle } #[derive(Debug, Default, Clone, Eq, PartialEq)] pub struct Seat { pub seat: SeatHandle, pub focused: Option<Rc<View>>, pub action: Option<Action>, pub has_client_cursor: bool, pub meta: bool, pub drag_icons: HashSet<Rc<DragIcon>> } impl Seat { pub fn new(seat: SeatHandle) -> Seat { Seat { seat, meta: false, ..Seat::default() } } pub fn clear_focus(&mut self) { if let Some(focused_view) = self.focused.take() { focused_view.activate(false); } with_handles!([(seat: {&mut self.seat})] => { seat.keyboard_clear_focus(); }).unwrap(); } pub fn focus_view(&mut self, view: Rc<View>, views: &mut Vec<Rc<View>>) { if let Some(ref focused) = self.focused { if *focused == view { return } focused.activate(false); } self.focused = Some(view.clone()); view.activate(true); if let Some(idx) = views.iter().position(|v| *v == view) { let v = views.remove(idx); views.insert(0, v); } with_handles!([(seat: {&mut self.seat})] => { if let Some(keyboard) = seat.get_keyboard() { with_handles!([(keyboard: {keyboard}), (surface: {view.surface()})] => { seat.keyboard_notify_enter(surface, &mut keyboard.keycodes(), &mut keyboard.get_modifier_masks()); }).unwrap(); } }).unwrap(); } pub fn send_button(&self, event: &ButtonEvent) { with_handles!([(seat: {&self.seat})] => { seat.pointer_notify_button(Duration::from_millis(event.time_msec() as _), event.button(), event.state() as u32); }).unwrap(); } pub fn move_view<O>(&mut self, cursor: &mut Cursor, view: &View, start: O) where O: Into<Option<Origin>> { let Origin { x: shell_x, y: shell_y } = view.origin.get(); let (lx, ly) = cursor.coords(); match start.into() { None => { let (view_sx, view_sy) = (lx - shell_x as f64, ly - shell_y as f64); let start = Origin::new(view_sx as _, view_sy as _); self.action = Some(Action::Moving { start }); } Some(start) => { let pos = Origin::new(lx as i32 - start.x, ly as i32 - start.y); view.origin.replace(pos); } }; } pub fn view_at_pointer(views: &mut [Rc<View>], cursor: &mut Cursor) -> (Option<Rc<View>>, Option<SurfaceHandle>, f64, f64) { for view in views { match view.shell { Shell::XdgV6(ref shell) => { let (mut sx, mut sy) = (0.0, 0.0); let surface = with_handles!([(shell: {shell})] => { let (lx, ly) = cursor.coords(); let Origin {x: shell_x, y: shell_y} = view.origin.get(); let (view_sx, view_sy) = (lx - shell_x as f64, ly - shell_y as f64); shell.surface_at(view_sx, view_sy, &mut sx, &mut sy) }).unwrap(); if surface.is_some() { return (Some(view.clone()), surface, sx, sy) } } } } (None, None, 0.0, 0.0) } pub fn update_cursor_position(&mut self, cursor: &mut Cursor, xcursor_manager: &mut XCursorManager, views: &mut [Rc<View>], time_msec: Option<u32>) { let time = if let Some(time_msec) = time_msec { Duration::from_millis(time_msec as u64) } else { current_time() }; match self.action { Some(Action::Moving { start }) => { self.focused = self.focused.take().map(|f| { self.move_view(cursor, &f, start); f }); } _ => { let (_view, surface, sx, sy) = Seat::view_at_pointer(views, cursor); match surface { Some(surface) => { with_handles!([(surface: {surface}), (seat: {&mut self.seat})] => { seat.pointer_notify_enter(surface, sx, sy); seat.pointer_notify_motion(time, sx, sy); }).unwrap(); } None => { if self.has_client_cursor { xcursor_manager.set_cursor_image("left_ptr".to_string(), cursor); self.has_client_cursor = false; } with_handles!([(seat: {&mut self.seat})] => { seat.pointer_clear_focus(); }).unwrap(); } } } } } } struct DragIconHandler; impl wlroots::DragIconHandler for DragIconHandler { fn on_map(&mut self, compositor: CompositorHandle, drag_icon: DragIconHandle) { wlr_log!(L_DEBUG, "TODO: handle drag icon mapped"); } fn on_unmap(&mut self, compositor: CompositorHandle, drag_icon: DragIconHandle) { wlr_log!(L_DEBUG, "TODO: handle drag icon unmapped"); } fn destroyed(&mut self, compositor: CompositorHandle, drag_icon: DragIconHandle) { with_handles!([(compositor: {compositor})] => { let server: &mut Server = compositor.into(); server.seat.drag_icons.remove(&DragIcon{ handle: drag_icon }); }).unwrap(); } } impl SeatHandler for SeatManager { fn cursor_set(&mut self, compositor: CompositorHandle, _: SeatHandle, event: &SetCursorEvent) { if let Some(surface) = event.surface() { with_handles!([(compositor: {compositor}), (surface: {surface})] => { let server: &mut Server = compositor.into(); let Server { ref mut cursor, ref mut seat, .. } = *server; with_handles!([(cursor: {&mut *cursor})] => { let (hotspot_x, hotspot_y) = event.location(); let surface = &*surface; cursor.set_surface(Some(surface), hotspot_x, hotspot_y); seat.has_client_cursor = true; }).unwrap(); }).unwrap(); } } fn new_drag_icon(&mut self, compositor: CompositorHandle, seat: SeatHandle, drag_icon: DragIconHandle) -> (Option<Box<wlroots::DragIconHandler>>, Option<Box<SurfaceHandler>>) { with_handles!([(compositor: {compositor})] => { let server: &mut Server = compositor.into(); let Server { ref mut seat, .. } = *server; seat.drag_icons.insert(Rc::new(DragIcon { handle: drag_icon })); }).unwrap(); (Some(Box::new(DragIconHandler)), None) } } impl SeatManager { pub fn new() -> Self { SeatManager::default() } }
pub mod builtins; use std::collections::HashMap; use std::path::PathBuf; use std::io; use std::io::{Read, Write}; #[derive(PartialEq, Debug)] pub struct State { cwd: PathBuf, builtins: HashMap<String, builtins::Builtin>, environment: HashMap<String, String>, aliases: HashMap<String, String>, argv: Vec<String>, argc: usize, } impl State { pub fn new(cwd: String) -> State { State{ cwd: PathBuf::from(cwd), builtins: builtins::load(), environment: HashMap::new(), aliases: HashMap::new(), argv: Vec::new(), argc: 0, } } } pub fn run(mut s: State) { println!("Welcome to rsh! {:?}", s); loop { print!("{} -> ", s.cwd.to_str().unwrap()); // this forces the prompt to print io::stdout().flush(); // read the user input let mut input = String::new(); io::stdin().read_line(&mut input).unwrap(); s.argv = input.split_whitespace(). map(|s| s.to_string() ). collect(); s.argc = s.argv.len(); print!("\n"); println!("Input: {}\nState: {:?}", input, s); } } actually will run builtins pub mod builtins; use std::collections::HashMap; use std::path::PathBuf; use std::io; use std::io::{Read, Write}; use std::collections::hash_map::Entry; #[derive(PartialEq, Debug, Clone)] pub struct State { cwd: PathBuf, environment: HashMap<String, String>, aliases: HashMap<String, String>, argv: Vec<String>, argc: usize, } impl State { pub fn new(cwd: String) -> State { State{ cwd: PathBuf::from(cwd), environment: HashMap::new(), aliases: HashMap::new(), argv: Vec::new(), argc: 0, } } } pub fn run(initial_state: State) { let mut builtins = builtins::load(); let mut s = initial_state.clone(); println!("Welcome to rsh! {:?}", s); loop { print!("{} -> ", s.cwd.to_str().unwrap()); // this forces the prompt to print io::stdout().flush(); // read the user input let mut input = String::new(); io::stdin().read_line(&mut input).unwrap(); s.argv = input.split_whitespace(). map(|s| s.to_string() ). collect(); s.argc = s.argv.len(); print!("\n"); println!("Input: {}\nState: {:?}", input, s); let first_arg = s.argv.get(0).unwrap().clone(); if let Entry::Occupied(f) = builtins.entry(String::from(first_arg)) { let bn = f.get(); let newS = bn(s.clone()); } } }
pub mod builtins; use std::collections::HashMap; use std::path::PathBuf; use std::io; use std::io::{Read, Write}; use std::collections::hash_map::Entry; #[derive(PartialEq, Debug, Clone)] pub struct State { cwd: PathBuf, environment: HashMap<String, String>, aliases: HashMap<String, String>, argv: Vec<String>, argc: usize, } impl State { pub fn new(cwd: String) -> State { State { cwd: PathBuf::from(cwd), environment: HashMap::new(), aliases: HashMap::new(), argv: Vec::new(), argc: 0, } } } pub fn run(initial_state: State) { let mut builtins = builtins::load(); let mut s = initial_state.clone(); println!("Welcome to rsh! {:?}", s); loop { print!("\n"); print!("{} -> ", s.cwd.to_str().unwrap()); // this forces the prompt to print io::stdout().flush(); // read the user input let mut input = String::new(); io::stdin().read_line(&mut input).unwrap(); s.argv = input.split_whitespace() .map(|s| s.to_string()) .collect(); s.argc = s.argv.len(); print!("\n"); println!("Input: {}\nState: {:?}", input, s); let first_arg = s.argv.get(0).unwrap().clone(); if let Entry::Occupied(f) = builtins.entry(String::from(first_arg)) { let bn = f.get(); s = bn(s.clone()); } } } Create non-working parse_args function pub mod builtins; use std::collections::HashMap; use std::path::PathBuf; use std::io; use std::io::{Read, Write}; use std::collections::hash_map::Entry; #[derive(PartialEq, Debug, Clone)] pub struct State { cwd: PathBuf, environment: HashMap<String, String>, aliases: HashMap<String, String>, argv: Vec<String>, argc: usize, } impl State { pub fn new(cwd: String) -> State { State { cwd: PathBuf::from(cwd), environment: HashMap::new(), aliases: HashMap::new(), argv: Vec::new(), argc: 0, } } } pub fn run(initial_state: State) { let mut builtins = builtins::load(); let mut s = initial_state.clone(); println!("Welcome to rsh! {:?}", s); loop { print!("\n"); print!("{} -> ", s.cwd.to_str().unwrap()); // this forces the prompt to print io::stdout().flush(); // read the user input let mut input = String::new(); io::stdin().read_line(&mut input).unwrap(); s.argv = parse_args(&input); s.argc = s.argv.len(); print!("\n"); println!("Input: {}\nState: {:?}", input, s); let first_arg = s.argv.get(0).unwrap().clone(); if let Entry::Occupied(f) = builtins.entry(String::from(first_arg)) { let bn = f.get(); s = bn(s.clone()); } } } fn parse_args(args: &String) -> Vec<String> { let mut result: Vec<String> = vec!["test".to_string()]; result } #[test] fn parse_args_test() { let args = String::from("echo -n \"Hello World\""); let result = parse_args(&args); let expected = vec!["echo".to_string(), "-n".into(), "\"Hello World\"".into()]; assert_eq!(result, expected); }
/* * Copyright (c) Facebook, Inc. and its affiliates. * * This source code is licensed under the MIT license found in the * LICENSE file in the root directory of this source tree. */ //! This module is responsible to build a single project. It does not handle //! watch mode or other state. mod apply_transforms; mod artifact_content; pub mod artifact_writer; mod build_ir; mod build_schema; mod generate_artifacts; pub mod generate_extra_artifacts; mod is_operation_preloadable; mod log_program_stats; mod persist_operations; mod source_control; mod validate; use crate::compiler_state::{ArtifactMapKind, CompilerState, ProjectName, SourceSetName}; use crate::config::{Config, ProjectConfig}; use crate::errors::BuildProjectError; use crate::{artifact_map::ArtifactMap, graphql_asts::GraphQLAsts}; pub use apply_transforms::apply_transforms; pub use apply_transforms::Programs; use build_ir::BuildIRResult; pub use build_ir::SourceHashes; pub use build_schema::build_schema; use common::{PerfLogEvent, PerfLogger}; use fnv::{FnvHashMap, FnvHashSet}; pub use generate_artifacts::{ create_path_for_artifact, generate_artifacts, Artifact, ArtifactContent, }; use generate_extra_artifacts::generate_extra_artifacts; use graphql_ir::Program; use interner::StringKey; pub use is_operation_preloadable::is_operation_preloadable; use log::{debug, info}; use relay_codegen::Printer; use schema::SDLSchema; pub use source_control::add_to_mercurial; use std::{ collections::hash_map::Entry, path::PathBuf, sync::atomic::{AtomicBool, Ordering}, sync::Arc, }; pub use validate::{validate, AdditionalValidations}; pub enum BuildProjectFailure { Error(BuildProjectError), Cancelled, } impl From<BuildProjectError> for BuildProjectFailure { fn from(err: BuildProjectError) -> BuildProjectFailure { BuildProjectFailure::Error(err) } } /// This program doesn't have IR transforms applied to it, so it's not optimized. /// It's perfect for the LSP server: we have all the documents with /// their locations to provide information to go_to_definition, hover, etc. pub fn build_raw_program( project_config: &ProjectConfig, graphql_asts: &FnvHashMap<SourceSetName, GraphQLAsts>, schema: Arc<SDLSchema>, log_event: &impl PerfLogEvent, ) -> Result<Program, BuildProjectError> { let BuildIRResult { ir, .. } = log_event.time("build_ir_time", || { build_ir::build_ir(project_config, &schema, graphql_asts, false) .map_err(|errors| BuildProjectError::ValidationErrors { errors }) })?; Ok(log_event.time("build_program_time", || { Program::from_definitions(schema, ir) })) } fn build_programs( config: &Config, project_config: &ProjectConfig, compiler_state: &CompilerState, graphql_asts: &FnvHashMap<SourceSetName, GraphQLAsts>, schema: Arc<SDLSchema>, log_event: &impl PerfLogEvent, perf_logger: Arc<impl PerfLogger + 'static>, ) -> Result<(Programs, Arc<SourceHashes>), BuildProjectFailure> { let project_name = project_config.name; let is_incremental_build = compiler_state.has_processed_changes() && !compiler_state.has_breaking_schema_change(); // Build a type aware IR. let BuildIRResult { ir, base_fragment_names, source_hashes, } = log_event.time("build_ir_time", || { build_ir::build_ir(project_config, &schema, graphql_asts, is_incremental_build).map_err( |errors| BuildProjectFailure::Error(BuildProjectError::ValidationErrors { errors }), ) })?; // Turn the IR into a base Program. let program = log_event.time("build_program_time", || { Program::from_definitions(schema, ir) }); if compiler_state.should_cancel_current_build() { debug!("Build is cancelled: updates in source code/or new file changes are pending."); return Err(BuildProjectFailure::Cancelled); } // Call validation rules that go beyond type checking. log_event.time("validate_time", || { validate( &program, &config.connection_interface, &config.additional_validations, ) .map_err(|errors| { BuildProjectFailure::Error(BuildProjectError::ValidationErrors { errors }) }) })?; // Apply various chains of transforms to create a set of output programs. let programs = log_event.time("apply_transforms_time", || { apply_transforms( project_name, Arc::new(program), Arc::new(base_fragment_names), &config.connection_interface, Arc::new(project_config.feature_flags.unwrap_or(config.feature_flags)), perf_logger, ) .map_err(|errors| { BuildProjectFailure::Error(BuildProjectError::ValidationErrors { errors }) }) })?; Ok((programs, Arc::new(source_hashes))) } pub fn build_project( config: &Config, project_config: &ProjectConfig, compiler_state: &CompilerState, graphql_asts: &FnvHashMap<SourceSetName, GraphQLAsts>, perf_logger: Arc<impl PerfLogger + 'static>, ) -> Result<(ProjectName, Arc<SDLSchema>, Programs, Vec<Artifact>), BuildProjectFailure> { let log_event = perf_logger.create_event("build_project"); let build_time = log_event.start("build_project_time"); let project_name = project_config.name.lookup(); log_event.string("project", project_name.to_string()); info!("[{}] compiling...", project_name); // Construct a schema instance including project specific extensions. let schema = log_event .time("build_schema_time", || { Ok(build_schema(compiler_state, project_config)?) }) .map_err(|errors| { BuildProjectFailure::Error(BuildProjectError::ValidationErrors { errors }) })?; if compiler_state.should_cancel_current_build() { debug!("Build is cancelled: updates in source code/or new file changes are pending."); return Err(BuildProjectFailure::Cancelled); } // Apply different transform pipelines to produce the `Programs`. let (programs, source_hashes) = build_programs( config, project_config, compiler_state, graphql_asts, Arc::clone(&schema), &log_event, Arc::clone(&perf_logger), )?; if compiler_state.should_cancel_current_build() { debug!("Build is cancelled: updates in source code/or new file changes are pending."); return Err(BuildProjectFailure::Cancelled); } // Generate artifacts by collecting information from the `Programs`. let artifacts_timer = log_event.start("generate_artifacts_time"); let artifacts = generate_artifacts(project_config, &programs, Arc::clone(&source_hashes)) .map_err(BuildProjectFailure::Error)?; log_event.stop(artifacts_timer); log_event.number( "generated_artifacts", programs.reader.document_count() + programs.normalization.document_count(), ); log_event.stop(build_time); perf_logger.complete_event(log_event); Ok((project_config.name, schema, programs, artifacts)) } pub async fn commit_project( config: &Config, project_config: &ProjectConfig, perf_logger: Arc<impl PerfLogger + 'static>, schema: &SDLSchema, programs: Programs, mut artifacts: Vec<Artifact>, artifact_map: Arc<ArtifactMapKind>, // Definitions that are removed from the previous artifact map removed_definition_names: Vec<StringKey>, // Dirty artifacts that should be removed if no longer in the artifacts map mut artifacts_to_remove: FnvHashSet<PathBuf>, source_control_update_in_progress: Arc<AtomicBool>, ) -> Result<ArtifactMap, BuildProjectFailure> { let log_event = perf_logger.create_event("commit_project"); log_event.string("project", project_config.name.to_string()); let commit_time = log_event.start("commit_project_time"); if source_control_update_in_progress.load(Ordering::Relaxed) { debug!("commit_project cancelled before persisting due to source control updates"); return Err(BuildProjectFailure::Cancelled); } if let Some(ref operation_persister) = config.operation_persister { if let Some(ref persist_config) = project_config.persist { let persist_operations_timer = log_event.start("persist_operations_time"); persist_operations::persist_operations( &mut artifacts, &config.root_dir, &persist_config, config, &operation_persister, &log_event, ) .await?; log_event.stop(persist_operations_timer); } } if source_control_update_in_progress.load(Ordering::Relaxed) { debug!( "commit_project cancelled before generating extra artifacts due to source control updates" ); return Err(BuildProjectFailure::Cancelled); } // In some cases we need to create additional (platform specific) artifacts // For that, we will use `generate_extra_artifacts` from the configs if let Some(generate_extra_artifacts_fn) = &config.generate_extra_artifacts { log_event.time("generate_extra_artifacts_time", || { generate_extra_artifacts( schema, project_config, &mut artifacts, generate_extra_artifacts_fn, ) }); } if source_control_update_in_progress.load(Ordering::Relaxed) { debug!("commit_project cancelled before writing artifacts due to source control updates"); return Err(BuildProjectFailure::Cancelled); } let should_stop_updating_artifacts = || { if source_control_update_in_progress.load(Ordering::Relaxed) { debug!("artifact_writer updates cancelled due source control updates"); true } else { false } }; // Write the generated artifacts to disk. This step is separate from // generating artifacts or persisting to avoid partial writes in case of // errors as much as possible. let next_artifact_map = match Arc::as_ref(&artifact_map) { ArtifactMapKind::Unconnected(existing_artifacts) => { let mut existing_artifacts = existing_artifacts.clone(); let mut printer = Printer::with_dedupe(); let write_artifacts_time = log_event.start("write_artifacts_time"); for artifact in &artifacts { if should_stop_updating_artifacts() { break; } if !existing_artifacts.remove(&artifact.path) { info!( "[{}] NEW: {:?} -> {:?}", project_config.name, &artifact.source_definition_names, &artifact.path ); } let path = config.root_dir.join(&artifact.path); let content = artifact.content.as_bytes( config, project_config, &mut printer, schema, artifact.source_file, ); if config.artifact_writer.should_write(&path, &content)? { config.artifact_writer.write(path, content)?; } } log_event.stop(write_artifacts_time); let delete_artifacts_time = log_event.start("delete_artifacts_time"); for remaining_artifact in &existing_artifacts { if should_stop_updating_artifacts() { break; } let path = config.root_dir.join(remaining_artifact); config.artifact_writer.remove(path)?; } log_event.stop(delete_artifacts_time); ArtifactMap::from(artifacts) } ArtifactMapKind::Mapping(artifact_map) => { let mut printer = Printer::with_dedupe(); let mut artifact_map = artifact_map.clone(); let mut current_paths_map = ArtifactMap::default(); let write_artifacts_incremental_time = log_event.start("write_artifacts_incremental_time"); // Write or update artifacts for artifact in artifacts { if should_stop_updating_artifacts() { break; } let path = config.root_dir.join(&artifact.path); let content = artifact.content.as_bytes( config, project_config, &mut printer, schema, artifact.source_file, ); if config.artifact_writer.should_write(&path, &content)? { config.artifact_writer.write(path, content)?; } current_paths_map.insert(artifact); } log_event.stop(write_artifacts_incremental_time); log_event.time("update_artifact_map_time", || { // All generated paths for removed definitions should be removed for name in &removed_definition_names { if let Some(artifacts) = artifact_map.0.remove(&name) { for artifact in artifacts { artifacts_to_remove.insert(artifact.path); } } } // Update the artifact map, and delete any removed artifacts for (definition_name, artifact_records) in current_paths_map.0 { match artifact_map.0.entry(definition_name) { Entry::Occupied(mut entry) => { let prev_records = entry.get_mut(); for prev_record in prev_records.drain(..) { if !artifact_records.iter().any(|t| t.path == prev_record.path) { artifacts_to_remove.insert(prev_record.path); } } prev_records.extend(artifact_records.into_iter()); } Entry::Vacant(entry) => { entry.insert(artifact_records); } } } // Filter out any artifact that is in the artifact map for artifacts in artifact_map.0.values() { for artifact in artifacts { artifacts_to_remove.remove(&artifact.path); } } }); let delete_artifacts_incremental_time = log_event.start("delete_artifacts_incremental_time"); // The remaining dirty artifacts are no longer required for path in artifacts_to_remove { if should_stop_updating_artifacts() { break; } config.artifact_writer.remove(config.root_dir.join(path))?; } log_event.stop(delete_artifacts_incremental_time); artifact_map } }; if source_control_update_in_progress.load(Ordering::Relaxed) { log_event.number("update_artifacts_after_source_control_update", 1); debug!( "We just updated artifacts after source control update happened. Most likely we have outdated artifacts now..." ); } else { // For now, lets log how often this is happening, so we can decide if we want to // adjust the way we write artifacts. For example, we could write them to the temp // directory first, then move to a correct destination. log_event.number("update_artifacts_after_source_control_update", 0); } info!( "[{}] compiled documents: {} reader, {} normalization, {} operation text", project_config.name, programs.reader.document_count(), programs.normalization.document_count(), programs.operation_text.document_count() ); log_event.stop(commit_time); perf_logger.complete_event(log_event); Ok(next_artifact_map) } Extract validate_program Reviewed By: tyao1 Differential Revision: D26461051 fbshipit-source-id: 8c56cc9d009611b21aba988387b8a3ac018ab48c /* * Copyright (c) Facebook, Inc. and its affiliates. * * This source code is licensed under the MIT license found in the * LICENSE file in the root directory of this source tree. */ //! This module is responsible to build a single project. It does not handle //! watch mode or other state. mod apply_transforms; mod artifact_content; pub mod artifact_writer; mod build_ir; mod build_schema; mod generate_artifacts; pub mod generate_extra_artifacts; mod is_operation_preloadable; mod log_program_stats; mod persist_operations; mod source_control; mod validate; use crate::compiler_state::{ArtifactMapKind, CompilerState, ProjectName, SourceSetName}; use crate::config::{Config, ProjectConfig}; use crate::errors::BuildProjectError; use crate::{artifact_map::ArtifactMap, graphql_asts::GraphQLAsts}; pub use apply_transforms::apply_transforms; pub use apply_transforms::Programs; use build_ir::BuildIRResult; pub use build_ir::SourceHashes; pub use build_schema::build_schema; use common::{PerfLogEvent, PerfLogger}; use fnv::{FnvHashMap, FnvHashSet}; pub use generate_artifacts::{ create_path_for_artifact, generate_artifacts, Artifact, ArtifactContent, }; use generate_extra_artifacts::generate_extra_artifacts; use graphql_ir::Program; use interner::StringKey; pub use is_operation_preloadable::is_operation_preloadable; use log::{debug, info}; use relay_codegen::Printer; use schema::SDLSchema; pub use source_control::add_to_mercurial; use std::{ collections::hash_map::Entry, path::PathBuf, sync::atomic::{AtomicBool, Ordering}, sync::Arc, }; pub use validate::{validate, AdditionalValidations}; pub enum BuildProjectFailure { Error(BuildProjectError), Cancelled, } impl From<BuildProjectError> for BuildProjectFailure { fn from(err: BuildProjectError) -> BuildProjectFailure { BuildProjectFailure::Error(err) } } /// This program doesn't have IR transforms applied to it, so it's not optimized. /// It's perfect for the LSP server: we have all the documents with /// their locations to provide information to go_to_definition, hover, etc. pub fn build_raw_program( project_config: &ProjectConfig, graphql_asts: &FnvHashMap<SourceSetName, GraphQLAsts>, schema: Arc<SDLSchema>, log_event: &impl PerfLogEvent, ) -> Result<Program, BuildProjectError> { let BuildIRResult { ir, .. } = log_event.time("build_ir_time", || { build_ir::build_ir(project_config, &schema, graphql_asts, false) .map_err(|errors| BuildProjectError::ValidationErrors { errors }) })?; Ok(log_event.time("build_program_time", || { Program::from_definitions(schema, ir) })) } pub fn validate_program( config: &Config, program: &Program, log_event: &impl PerfLogEvent, ) -> Result<(), BuildProjectError> { let timer = log_event.start("validate_time"); let result = validate( program, &config.connection_interface, &config.additional_validations, ) .map_err(|errors| BuildProjectError::ValidationErrors { errors }); log_event.stop(timer); result } fn build_programs( config: &Config, project_config: &ProjectConfig, compiler_state: &CompilerState, graphql_asts: &FnvHashMap<SourceSetName, GraphQLAsts>, schema: Arc<SDLSchema>, log_event: &impl PerfLogEvent, perf_logger: Arc<impl PerfLogger + 'static>, ) -> Result<(Programs, Arc<SourceHashes>), BuildProjectFailure> { let project_name = project_config.name; let is_incremental_build = compiler_state.has_processed_changes() && !compiler_state.has_breaking_schema_change(); // Build a type aware IR. let BuildIRResult { ir, base_fragment_names, source_hashes, } = log_event.time("build_ir_time", || { build_ir::build_ir(project_config, &schema, graphql_asts, is_incremental_build).map_err( |errors| BuildProjectFailure::Error(BuildProjectError::ValidationErrors { errors }), ) })?; // Turn the IR into a base Program. let program = log_event.time("build_program_time", || { Program::from_definitions(schema, ir) }); if compiler_state.should_cancel_current_build() { debug!("Build is cancelled: updates in source code/or new file changes are pending."); return Err(BuildProjectFailure::Cancelled); } // Call validation rules that go beyond type checking. validate_program(&config, &program, log_event)?; // Apply various chains of transforms to create a set of output programs. let programs = log_event.time("apply_transforms_time", || { apply_transforms( project_name, Arc::new(program), Arc::new(base_fragment_names), &config.connection_interface, Arc::new(project_config.feature_flags.unwrap_or(config.feature_flags)), perf_logger, ) .map_err(|errors| { BuildProjectFailure::Error(BuildProjectError::ValidationErrors { errors }) }) })?; Ok((programs, Arc::new(source_hashes))) } pub fn build_project( config: &Config, project_config: &ProjectConfig, compiler_state: &CompilerState, graphql_asts: &FnvHashMap<SourceSetName, GraphQLAsts>, perf_logger: Arc<impl PerfLogger + 'static>, ) -> Result<(ProjectName, Arc<SDLSchema>, Programs, Vec<Artifact>), BuildProjectFailure> { let log_event = perf_logger.create_event("build_project"); let build_time = log_event.start("build_project_time"); let project_name = project_config.name.lookup(); log_event.string("project", project_name.to_string()); info!("[{}] compiling...", project_name); // Construct a schema instance including project specific extensions. let schema = log_event .time("build_schema_time", || { Ok(build_schema(compiler_state, project_config)?) }) .map_err(|errors| { BuildProjectFailure::Error(BuildProjectError::ValidationErrors { errors }) })?; if compiler_state.should_cancel_current_build() { debug!("Build is cancelled: updates in source code/or new file changes are pending."); return Err(BuildProjectFailure::Cancelled); } // Apply different transform pipelines to produce the `Programs`. let (programs, source_hashes) = build_programs( config, project_config, compiler_state, graphql_asts, Arc::clone(&schema), &log_event, Arc::clone(&perf_logger), )?; if compiler_state.should_cancel_current_build() { debug!("Build is cancelled: updates in source code/or new file changes are pending."); return Err(BuildProjectFailure::Cancelled); } // Generate artifacts by collecting information from the `Programs`. let artifacts_timer = log_event.start("generate_artifacts_time"); let artifacts = generate_artifacts(project_config, &programs, Arc::clone(&source_hashes)) .map_err(BuildProjectFailure::Error)?; log_event.stop(artifacts_timer); log_event.number( "generated_artifacts", programs.reader.document_count() + programs.normalization.document_count(), ); log_event.stop(build_time); perf_logger.complete_event(log_event); Ok((project_config.name, schema, programs, artifacts)) } pub async fn commit_project( config: &Config, project_config: &ProjectConfig, perf_logger: Arc<impl PerfLogger + 'static>, schema: &SDLSchema, programs: Programs, mut artifacts: Vec<Artifact>, artifact_map: Arc<ArtifactMapKind>, // Definitions that are removed from the previous artifact map removed_definition_names: Vec<StringKey>, // Dirty artifacts that should be removed if no longer in the artifacts map mut artifacts_to_remove: FnvHashSet<PathBuf>, source_control_update_in_progress: Arc<AtomicBool>, ) -> Result<ArtifactMap, BuildProjectFailure> { let log_event = perf_logger.create_event("commit_project"); log_event.string("project", project_config.name.to_string()); let commit_time = log_event.start("commit_project_time"); if source_control_update_in_progress.load(Ordering::Relaxed) { debug!("commit_project cancelled before persisting due to source control updates"); return Err(BuildProjectFailure::Cancelled); } if let Some(ref operation_persister) = config.operation_persister { if let Some(ref persist_config) = project_config.persist { let persist_operations_timer = log_event.start("persist_operations_time"); persist_operations::persist_operations( &mut artifacts, &config.root_dir, &persist_config, config, &operation_persister, &log_event, ) .await?; log_event.stop(persist_operations_timer); } } if source_control_update_in_progress.load(Ordering::Relaxed) { debug!( "commit_project cancelled before generating extra artifacts due to source control updates" ); return Err(BuildProjectFailure::Cancelled); } // In some cases we need to create additional (platform specific) artifacts // For that, we will use `generate_extra_artifacts` from the configs if let Some(generate_extra_artifacts_fn) = &config.generate_extra_artifacts { log_event.time("generate_extra_artifacts_time", || { generate_extra_artifacts( schema, project_config, &mut artifacts, generate_extra_artifacts_fn, ) }); } if source_control_update_in_progress.load(Ordering::Relaxed) { debug!("commit_project cancelled before writing artifacts due to source control updates"); return Err(BuildProjectFailure::Cancelled); } let should_stop_updating_artifacts = || { if source_control_update_in_progress.load(Ordering::Relaxed) { debug!("artifact_writer updates cancelled due source control updates"); true } else { false } }; // Write the generated artifacts to disk. This step is separate from // generating artifacts or persisting to avoid partial writes in case of // errors as much as possible. let next_artifact_map = match Arc::as_ref(&artifact_map) { ArtifactMapKind::Unconnected(existing_artifacts) => { let mut existing_artifacts = existing_artifacts.clone(); let mut printer = Printer::with_dedupe(); let write_artifacts_time = log_event.start("write_artifacts_time"); for artifact in &artifacts { if should_stop_updating_artifacts() { break; } if !existing_artifacts.remove(&artifact.path) { info!( "[{}] NEW: {:?} -> {:?}", project_config.name, &artifact.source_definition_names, &artifact.path ); } let path = config.root_dir.join(&artifact.path); let content = artifact.content.as_bytes( config, project_config, &mut printer, schema, artifact.source_file, ); if config.artifact_writer.should_write(&path, &content)? { config.artifact_writer.write(path, content)?; } } log_event.stop(write_artifacts_time); let delete_artifacts_time = log_event.start("delete_artifacts_time"); for remaining_artifact in &existing_artifacts { if should_stop_updating_artifacts() { break; } let path = config.root_dir.join(remaining_artifact); config.artifact_writer.remove(path)?; } log_event.stop(delete_artifacts_time); ArtifactMap::from(artifacts) } ArtifactMapKind::Mapping(artifact_map) => { let mut printer = Printer::with_dedupe(); let mut artifact_map = artifact_map.clone(); let mut current_paths_map = ArtifactMap::default(); let write_artifacts_incremental_time = log_event.start("write_artifacts_incremental_time"); // Write or update artifacts for artifact in artifacts { if should_stop_updating_artifacts() { break; } let path = config.root_dir.join(&artifact.path); let content = artifact.content.as_bytes( config, project_config, &mut printer, schema, artifact.source_file, ); if config.artifact_writer.should_write(&path, &content)? { config.artifact_writer.write(path, content)?; } current_paths_map.insert(artifact); } log_event.stop(write_artifacts_incremental_time); log_event.time("update_artifact_map_time", || { // All generated paths for removed definitions should be removed for name in &removed_definition_names { if let Some(artifacts) = artifact_map.0.remove(&name) { for artifact in artifacts { artifacts_to_remove.insert(artifact.path); } } } // Update the artifact map, and delete any removed artifacts for (definition_name, artifact_records) in current_paths_map.0 { match artifact_map.0.entry(definition_name) { Entry::Occupied(mut entry) => { let prev_records = entry.get_mut(); for prev_record in prev_records.drain(..) { if !artifact_records.iter().any(|t| t.path == prev_record.path) { artifacts_to_remove.insert(prev_record.path); } } prev_records.extend(artifact_records.into_iter()); } Entry::Vacant(entry) => { entry.insert(artifact_records); } } } // Filter out any artifact that is in the artifact map for artifacts in artifact_map.0.values() { for artifact in artifacts { artifacts_to_remove.remove(&artifact.path); } } }); let delete_artifacts_incremental_time = log_event.start("delete_artifacts_incremental_time"); // The remaining dirty artifacts are no longer required for path in artifacts_to_remove { if should_stop_updating_artifacts() { break; } config.artifact_writer.remove(config.root_dir.join(path))?; } log_event.stop(delete_artifacts_incremental_time); artifact_map } }; if source_control_update_in_progress.load(Ordering::Relaxed) { log_event.number("update_artifacts_after_source_control_update", 1); debug!( "We just updated artifacts after source control update happened. Most likely we have outdated artifacts now..." ); } else { // For now, lets log how often this is happening, so we can decide if we want to // adjust the way we write artifacts. For example, we could write them to the temp // directory first, then move to a correct destination. log_event.number("update_artifacts_after_source_control_update", 0); } info!( "[{}] compiled documents: {} reader, {} normalization, {} operation text", project_config.name, programs.reader.document_count(), programs.normalization.document_count(), programs.operation_text.document_count() ); log_event.stop(commit_time); perf_logger.complete_event(log_event); Ok(next_artifact_map) }
//! Note: tests specific to this file can be found in: //! //! - `ui/pattern/usefulness` //! - `ui/or-patterns` //! - `ui/consts/const_in_pattern` //! - `ui/rfc-2008-non-exhaustive` //! - `ui/half-open-range-patterns` //! - probably many others //! //! I (Nadrieril) prefer to put new tests in `ui/pattern/usefulness` unless there's a specific //! reason not to, for example if they depend on a particular feature like `or_patterns`. //! //! ----- //! //! This file includes the logic for exhaustiveness and reachability checking for pattern-matching. //! Specifically, given a list of patterns for a type, we can tell whether: //! (a) each pattern is reachable (reachability) //! (b) the patterns cover every possible value for the type (exhaustiveness) //! //! The algorithm implemented here is a modified version of the one described in [this //! paper](http://moscova.inria.fr/~maranget/papers/warn/index.html). We have however generalized //! it to accommodate the variety of patterns that Rust supports. We thus explain our version here, //! without being as rigorous. //! //! //! # Summary //! //! The core of the algorithm is the notion of "usefulness". A pattern `q` is said to be *useful* //! relative to another pattern `p` of the same type if there is a value that is matched by `q` and //! not matched by `p`. This generalizes to many `p`s: `q` is useful w.r.t. a list of patterns //! `p_1 .. p_n` if there is a value that is matched by `q` and by none of the `p_i`. We write //! `usefulness(p_1 .. p_n, q)` for a function that returns a list of such values. The aim of this //! file is to compute it efficiently. //! //! This is enough to compute reachability: a pattern in a `match` expression is reachable iff it //! is useful w.r.t. the patterns above it: //! ```rust //! match x { //! Some(_) => ..., //! None => ..., // reachable: `None` is matched by this but not the branch above //! Some(0) => ..., // unreachable: all the values this matches are already matched by //! // `Some(_)` above //! } //! ``` //! //! This is also enough to compute exhaustiveness: a match is exhaustive iff the wildcard `_` //! pattern is _not_ useful w.r.t. the patterns in the match. The values returned by `usefulness` //! are used to tell the user which values are missing. //! ```rust //! match x { //! Some(0) => ..., //! None => ..., //! // not exhaustive: `_` is useful because it matches `Some(1)` //! } //! ``` //! //! The entrypoint of this file is the [`compute_match_usefulness`] function, which computes //! reachability for each match branch and exhaustiveness for the whole match. //! //! //! # Constructors and fields //! //! Note: we will often abbreviate "constructor" as "ctor". //! //! The idea that powers everything that is done in this file is the following: a (matcheable) //! value is made from a constructor applied to a number of subvalues. Examples of constructors are //! `Some`, `None`, `(,)` (the 2-tuple constructor), `Foo {..}` (the constructor for a struct //! `Foo`), and `2` (the constructor for the number `2`). This is natural when we think of //! pattern-matching, and this is the basis for what follows. //! //! Some of the ctors listed above might feel weird: `None` and `2` don't take any arguments. //! That's ok: those are ctors that take a list of 0 arguments; they are the simplest case of //! ctors. We treat `2` as a ctor because `u64` and other number types behave exactly like a huge //! `enum`, with one variant for each number. This allows us to see any matcheable value as made up //! from a tree of ctors, each having a set number of children. For example: `Foo { bar: None, //! baz: Ok(0) }` is made from 4 different ctors, namely `Foo{..}`, `None`, `Ok` and `0`. //! //! This idea can be extended to patterns: they are also made from constructors applied to fields. //! A pattern for a given type is allowed to use all the ctors for values of that type (which we //! call "value constructors"), but there are also pattern-only ctors. The most important one is //! the wildcard (`_`), and the others are integer ranges (`0..=10`), variable-length slices (`[x, //! ..]`), and or-patterns (`Ok(0) | Err(_)`). Examples of valid patterns are `42`, `Some(_)`, `Foo //! { bar: Some(0) | None, baz: _ }`. Note that a binder in a pattern (e.g. `Some(x)`) matches the //! same values as a wildcard (e.g. `Some(_)`), so we treat both as wildcards. //! //! From this deconstruction we can compute whether a given value matches a given pattern; we //! simply look at ctors one at a time. Given a pattern `p` and a value `v`, we want to compute //! `matches!(v, p)`. It's mostly straightforward: we compare the head ctors and when they match //! we compare their fields recursively. A few representative examples: //! //! - `matches!(v, _) := true` //! - `matches!((v0, v1), (p0, p1)) := matches!(v0, p0) && matches!(v1, p1)` //! - `matches!(Foo { bar: v0, baz: v1 }, Foo { bar: p0, baz: p1 }) := matches!(v0, p0) && matches!(v1, p1)` //! - `matches!(Ok(v0), Ok(p0)) := matches!(v0, p0)` //! - `matches!(Ok(v0), Err(p0)) := false` (incompatible variants) //! - `matches!(v, 1..=100) := matches!(v, 1) || ... || matches!(v, 100)` //! - `matches!([v0], [p0, .., p1]) := false` (incompatible lengths) //! - `matches!([v0, v1, v2], [p0, .., p1]) := matches!(v0, p0) && matches!(v2, p1)` //! - `matches!(v, p0 | p1) := matches!(v, p0) || matches!(v, p1)` //! //! Constructors, fields and relevant operations are defined in the [`super::deconstruct_pat`] module. //! //! Note: this constructors/fields distinction may not straightforwardly apply to every Rust type. //! For example a value of type `Rc<u64>` can't be deconstructed that way, and `&str` has an //! infinitude of constructors. There are also subtleties with visibility of fields and //! uninhabitedness and various other things. The constructors idea can be extended to handle most //! of these subtleties though; caveats are documented where relevant throughout the code. //! //! Whether constructors cover each other is computed by [`Constructor::is_covered_by`]. //! //! //! # Specialization //! //! Recall that we wish to compute `usefulness(p_1 .. p_n, q)`: given a list of patterns `p_1 .. //! p_n` and a pattern `q`, all of the same type, we want to find a list of values (called //! "witnesses") that are matched by `q` and by none of the `p_i`. We obviously don't just //! enumerate all possible values. From the discussion above we see that we can proceed //! ctor-by-ctor: for each value ctor of the given type, we ask "is there a value that starts with //! this constructor and matches `q` and none of the `p_i`?". As we saw above, there's a lot we can //! say from knowing only the first constructor of our candidate value. //! //! Let's take the following example: //! ``` //! match x { //! Enum::Variant1(_) => {} // `p1` //! Enum::Variant2(None, 0) => {} // `p2` //! Enum::Variant2(Some(_), 0) => {} // `q` //! } //! ``` //! //! We can easily see that if our candidate value `v` starts with `Variant1` it will not match `q`. //! If `v = Variant2(v0, v1)` however, whether or not it matches `p2` and `q` will depend on `v0` //! and `v1`. In fact, such a `v` will be a witness of usefulness of `q` exactly when the tuple //! `(v0, v1)` is a witness of usefulness of `q'` in the following reduced match: //! //! ``` //! match x { //! (None, 0) => {} // `p2'` //! (Some(_), 0) => {} // `q'` //! } //! ``` //! //! This motivates a new step in computing usefulness, that we call _specialization_. //! Specialization consist of filtering a list of patterns for those that match a constructor, and //! then looking into the constructor's fields. This enables usefulness to be computed recursively. //! //! Instead of acting on a single pattern in each row, we will consider a list of patterns for each //! row, and we call such a list a _pattern-stack_. The idea is that we will specialize the //! leftmost pattern, which amounts to popping the constructor and pushing its fields, which feels //! like a stack. We note a pattern-stack simply with `[p_1 ... p_n]`. //! Here's a sequence of specializations of a list of pattern-stacks, to illustrate what's //! happening: //! ``` //! [Enum::Variant1(_)] //! [Enum::Variant2(None, 0)] //! [Enum::Variant2(Some(_), 0)] //! //==>> specialize with `Variant2` //! [None, 0] //! [Some(_), 0] //! //==>> specialize with `Some` //! [_, 0] //! //==>> specialize with `true` (say the type was `bool`) //! [0] //! //==>> specialize with `0` //! [] //! ``` //! //! The function `specialize(c, p)` takes a value constructor `c` and a pattern `p`, and returns 0 //! or more pattern-stacks. If `c` does not match the head constructor of `p`, it returns nothing; //! otherwise if returns the fields of the constructor. This only returns more than one //! pattern-stack if `p` has a pattern-only constructor. //! //! - Specializing for the wrong constructor returns nothing //! //! `specialize(None, Some(p0)) := []` //! //! - Specializing for the correct constructor returns a single row with the fields //! //! `specialize(Variant1, Variant1(p0, p1, p2)) := [[p0, p1, p2]]` //! //! `specialize(Foo{..}, Foo { bar: p0, baz: p1 }) := [[p0, p1]]` //! //! - For or-patterns, we specialize each branch and concatenate the results //! //! `specialize(c, p0 | p1) := specialize(c, p0) ++ specialize(c, p1)` //! //! - We treat the other pattern constructors as if they were a large or-pattern of all the //! possibilities: //! //! `specialize(c, _) := specialize(c, Variant1(_) | Variant2(_, _) | ...)` //! //! `specialize(c, 1..=100) := specialize(c, 1 | ... | 100)` //! //! `specialize(c, [p0, .., p1]) := specialize(c, [p0, p1] | [p0, _, p1] | [p0, _, _, p1] | ...)` //! //! - If `c` is a pattern-only constructor, `specialize` is defined on a case-by-case basis. See //! the discussion about constructor splitting in [`super::deconstruct_pat`]. //! //! //! We then extend this function to work with pattern-stacks as input, by acting on the first //! column and keeping the other columns untouched. //! //! Specialization for the whole matrix is done in [`Matrix::specialize_constructor`]. Note that //! or-patterns in the first column are expanded before being stored in the matrix. Specialization //! for a single patstack is done from a combination of [`Constructor::is_covered_by`] and //! [`PatStack::pop_head_constructor`]. The internals of how it's done mostly live in the //! [`Fields`] struct. //! //! //! # Computing usefulness //! //! We now have all we need to compute usefulness. The inputs to usefulness are a list of //! pattern-stacks `p_1 ... p_n` (one per row), and a new pattern_stack `q`. The paper and this //! file calls the list of patstacks a _matrix_. They must all have the same number of columns and //! the patterns in a given column must all have the same type. `usefulness` returns a (possibly //! empty) list of witnesses of usefulness. These witnesses will also be pattern-stacks. //! //! - base case: `n_columns == 0`. //! Since a pattern-stack functions like a tuple of patterns, an empty one functions like the //! unit type. Thus `q` is useful iff there are no rows above it, i.e. if `n == 0`. //! //! - inductive case: `n_columns > 0`. //! We need a way to list the constructors we want to try. We will be more clever in the next //! section but for now assume we list all value constructors for the type of the first column. //! //! - for each such ctor `c`: //! //! - for each `q'` returned by `specialize(c, q)`: //! //! - we compute `usefulness(specialize(c, p_1) ... specialize(c, p_n), q')` //! //! - for each witness found, we revert specialization by pushing the constructor `c` on top. //! //! - We return the concatenation of all the witnesses found, if any. //! //! Example: //! ``` //! [Some(true)] // p_1 //! [None] // p_2 //! [Some(_)] // q //! //==>> try `None`: `specialize(None, q)` returns nothing //! //==>> try `Some`: `specialize(Some, q)` returns a single row //! [true] // p_1' //! [_] // q' //! //==>> try `true`: `specialize(true, q')` returns a single row //! [] // p_1'' //! [] // q'' //! //==>> base case; `n != 0` so `q''` is not useful. //! //==>> go back up a step //! [true] // p_1' //! [_] // q' //! //==>> try `false`: `specialize(false, q')` returns a single row //! [] // q'' //! //==>> base case; `n == 0` so `q''` is useful. We return the single witness `[]` //! witnesses: //! [] //! //==>> undo the specialization with `false` //! witnesses: //! [false] //! //==>> undo the specialization with `Some` //! witnesses: //! [Some(false)] //! //==>> we have tried all the constructors. The output is the single witness `[Some(false)]`. //! ``` //! //! This computation is done in [`is_useful`]. In practice we don't care about the list of //! witnesses when computing reachability; we only need to know whether any exist. We do keep the //! witnesses when computing exhaustiveness to report them to the user. //! //! //! # Making usefulness tractable: constructor splitting //! //! We're missing one last detail: which constructors do we list? Naively listing all value //! constructors cannot work for types like `u64` or `&str`, so we need to be more clever. The //! first obvious insight is that we only want to list constructors that are covered by the head //! constructor of `q`. If it's a value constructor, we only try that one. If it's a pattern-only //! constructor, we use the final clever idea for this algorithm: _constructor splitting_, where we //! group together constructors that behave the same. //! //! The details are not necessary to understand this file, so we explain them in //! [`super::deconstruct_pat`]. Splitting is done by the [`Constructor::split`] function. use self::Usefulness::*; use self::WitnessPreference::*; use super::deconstruct_pat::{Constructor, Fields, SplitWildcard}; use super::{Pat, PatKind}; use super::{PatternFoldable, PatternFolder}; use rustc_data_structures::captures::Captures; use rustc_data_structures::fx::FxHashMap; use rustc_data_structures::sync::OnceCell; use rustc_arena::TypedArena; use rustc_hir::def_id::DefId; use rustc_hir::HirId; use rustc_middle::ty::{self, Ty, TyCtxt}; use rustc_span::Span; use smallvec::{smallvec, SmallVec}; use std::fmt; use std::iter::{FromIterator, IntoIterator}; crate struct MatchCheckCtxt<'a, 'tcx> { crate tcx: TyCtxt<'tcx>, /// The module in which the match occurs. This is necessary for /// checking inhabited-ness of types because whether a type is (visibly) /// inhabited can depend on whether it was defined in the current module or /// not. E.g., `struct Foo { _private: ! }` cannot be seen to be empty /// outside its module and should not be matchable with an empty match statement. crate module: DefId, crate param_env: ty::ParamEnv<'tcx>, crate pattern_arena: &'a TypedArena<Pat<'tcx>>, } impl<'a, 'tcx> MatchCheckCtxt<'a, 'tcx> { pub(super) fn is_uninhabited(&self, ty: Ty<'tcx>) -> bool { if self.tcx.features().exhaustive_patterns { self.tcx.is_ty_uninhabited_from(self.module, ty, self.param_env) } else { false } } /// Returns whether the given type is an enum from another crate declared `#[non_exhaustive]`. pub(super) fn is_foreign_non_exhaustive_enum(&self, ty: Ty<'tcx>) -> bool { match ty.kind() { ty::Adt(def, ..) => { def.is_enum() && def.is_variant_list_non_exhaustive() && !def.did.is_local() } _ => false, } } } #[derive(Copy, Clone)] pub(super) struct PatCtxt<'a, 'p, 'tcx> { pub(super) cx: &'a MatchCheckCtxt<'p, 'tcx>, /// Type of the current column under investigation. pub(super) ty: Ty<'tcx>, /// Span of the current pattern under investigation. pub(super) span: Span, /// Whether the current pattern is the whole pattern as found in a match arm, or if it's a /// subpattern. pub(super) is_top_level: bool, } impl<'a, 'p, 'tcx> fmt::Debug for PatCtxt<'a, 'p, 'tcx> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("PatCtxt").field("ty", &self.ty).finish() } } crate fn expand_pattern<'tcx>(pat: Pat<'tcx>) -> Pat<'tcx> { LiteralExpander.fold_pattern(&pat) } struct LiteralExpander; impl<'tcx> PatternFolder<'tcx> for LiteralExpander { fn fold_pattern(&mut self, pat: &Pat<'tcx>) -> Pat<'tcx> { debug!("fold_pattern {:?} {:?} {:?}", pat, pat.ty.kind(), pat.kind); match (pat.ty.kind(), pat.kind.as_ref()) { (_, PatKind::Binding { subpattern: Some(s), .. }) => s.fold_with(self), (_, PatKind::AscribeUserType { subpattern: s, .. }) => s.fold_with(self), (ty::Ref(_, t, _), PatKind::Constant { .. }) if t.is_str() => { // Treat string literal patterns as deref patterns to a `str` constant, i.e. // `&CONST`. This expands them like other const patterns. This could have been done // in `const_to_pat`, but that causes issues with the rest of the matching code. let mut new_pat = pat.super_fold_with(self); // Make a fake const pattern of type `str` (instead of `&str`). That the carried // constant value still knows it is of type `&str`. new_pat.ty = t; Pat { kind: Box::new(PatKind::Deref { subpattern: new_pat }), span: pat.span, ty: pat.ty, } } _ => pat.super_fold_with(self), } } } impl<'tcx> Pat<'tcx> { pub(super) fn is_wildcard(&self) -> bool { matches!(*self.kind, PatKind::Binding { subpattern: None, .. } | PatKind::Wild) } fn is_or_pat(&self) -> bool { matches!(*self.kind, PatKind::Or { .. }) } /// Recursively expand this pattern into its subpatterns. Only useful for or-patterns. fn expand_or_pat(&self) -> Vec<&Self> { fn expand<'p, 'tcx>(pat: &'p Pat<'tcx>, vec: &mut Vec<&'p Pat<'tcx>>) { if let PatKind::Or { pats } = pat.kind.as_ref() { for pat in pats { expand(pat, vec); } } else { vec.push(pat) } } let mut pats = Vec::new(); expand(self, &mut pats); pats } } /// A row of a matrix. Rows of len 1 are very common, which is why `SmallVec[_; 2]` /// works well. #[derive(Clone)] struct PatStack<'p, 'tcx> { pats: SmallVec<[&'p Pat<'tcx>; 2]>, /// Cache for the constructor of the head head_ctor: OnceCell<Constructor<'tcx>>, } impl<'p, 'tcx> PatStack<'p, 'tcx> { fn from_pattern(pat: &'p Pat<'tcx>) -> Self { Self::from_vec(smallvec![pat]) } fn from_vec(vec: SmallVec<[&'p Pat<'tcx>; 2]>) -> Self { PatStack { pats: vec, head_ctor: OnceCell::new() } } fn is_empty(&self) -> bool { self.pats.is_empty() } fn len(&self) -> usize { self.pats.len() } fn head(&self) -> &'p Pat<'tcx> { self.pats[0] } fn head_ctor<'a>(&'a self, cx: &MatchCheckCtxt<'p, 'tcx>) -> &'a Constructor<'tcx> { self.head_ctor.get_or_init(|| Constructor::from_pat(cx, self.head())) } fn iter(&self) -> impl Iterator<Item = &Pat<'tcx>> { self.pats.iter().copied() } // Recursively expand the first pattern into its subpatterns. Only useful if the pattern is an // or-pattern. Panics if `self` is empty. fn expand_or_pat<'a>(&'a self) -> impl Iterator<Item = PatStack<'p, 'tcx>> + Captures<'a> { self.head().expand_or_pat().into_iter().map(move |pat| { let mut new_patstack = PatStack::from_pattern(pat); new_patstack.pats.extend_from_slice(&self.pats[1..]); new_patstack }) } /// This computes `S(self.head_ctor(), self)`. See top of the file for explanations. /// /// Structure patterns with a partial wild pattern (Foo { a: 42, .. }) have their missing /// fields filled with wild patterns. /// /// This is roughly the inverse of `Constructor::apply`. fn pop_head_constructor(&self, ctor_wild_subpatterns: &Fields<'p, 'tcx>) -> PatStack<'p, 'tcx> { // We pop the head pattern and push the new fields extracted from the arguments of // `self.head()`. let mut new_fields = ctor_wild_subpatterns.replace_with_pattern_arguments(self.head()).into_patterns(); new_fields.extend_from_slice(&self.pats[1..]); PatStack::from_vec(new_fields) } } impl<'p, 'tcx> Default for PatStack<'p, 'tcx> { fn default() -> Self { Self::from_vec(smallvec![]) } } impl<'p, 'tcx> PartialEq for PatStack<'p, 'tcx> { fn eq(&self, other: &Self) -> bool { self.pats == other.pats } } impl<'p, 'tcx> FromIterator<&'p Pat<'tcx>> for PatStack<'p, 'tcx> { fn from_iter<T>(iter: T) -> Self where T: IntoIterator<Item = &'p Pat<'tcx>>, { Self::from_vec(iter.into_iter().collect()) } } /// Pretty-printing for matrix row. impl<'p, 'tcx> fmt::Debug for PatStack<'p, 'tcx> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "+")?; for pat in self.iter() { write!(f, " {} +", pat)?; } Ok(()) } } /// A 2D matrix. #[derive(Clone, PartialEq)] pub(super) struct Matrix<'p, 'tcx> { patterns: Vec<PatStack<'p, 'tcx>>, } impl<'p, 'tcx> Matrix<'p, 'tcx> { fn empty() -> Self { Matrix { patterns: vec![] } } /// Number of columns of this matrix. `None` is the matrix is empty. pub(super) fn column_count(&self) -> Option<usize> { self.patterns.get(0).map(|r| r.len()) } /// Pushes a new row to the matrix. If the row starts with an or-pattern, this recursively /// expands it. fn push(&mut self, row: PatStack<'p, 'tcx>) { if !row.is_empty() && row.head().is_or_pat() { for row in row.expand_or_pat() { self.patterns.push(row); } } else { self.patterns.push(row); } } /// Iterate over the first component of each row fn heads<'a>(&'a self) -> impl Iterator<Item = &'a Pat<'tcx>> + Captures<'p> { self.patterns.iter().map(|r| r.head()) } /// Iterate over the first constructor of each row. pub(super) fn head_ctors<'a>( &'a self, cx: &'a MatchCheckCtxt<'p, 'tcx>, ) -> impl Iterator<Item = &'a Constructor<'tcx>> + Captures<'p> + Clone { self.patterns.iter().map(move |r| r.head_ctor(cx)) } /// Iterate over the first constructor and the corresponding span of each row. pub(super) fn head_ctors_and_spans<'a>( &'a self, cx: &'a MatchCheckCtxt<'p, 'tcx>, ) -> impl Iterator<Item = (&'a Constructor<'tcx>, Span)> + Captures<'p> { self.patterns.iter().map(move |r| (r.head_ctor(cx), r.head().span)) } /// This computes `S(constructor, self)`. See top of the file for explanations. fn specialize_constructor( &self, pcx: PatCtxt<'_, 'p, 'tcx>, ctor: &Constructor<'tcx>, ctor_wild_subpatterns: &Fields<'p, 'tcx>, ) -> Matrix<'p, 'tcx> { self.patterns .iter() .filter(|r| ctor.is_covered_by(pcx, r.head_ctor(pcx.cx))) .map(|r| r.pop_head_constructor(ctor_wild_subpatterns)) .collect() } } /// Pretty-printer for matrices of patterns, example: /// /// ```text /// + _ + [] + /// + true + [First] + /// + true + [Second(true)] + /// + false + [_] + /// + _ + [_, _, tail @ ..] + /// ``` impl<'p, 'tcx> fmt::Debug for Matrix<'p, 'tcx> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "\n")?; let Matrix { patterns: m, .. } = self; let pretty_printed_matrix: Vec<Vec<String>> = m.iter().map(|row| row.iter().map(|pat| format!("{}", pat)).collect()).collect(); let column_count = m.iter().map(|row| row.len()).next().unwrap_or(0); assert!(m.iter().all(|row| row.len() == column_count)); let column_widths: Vec<usize> = (0..column_count) .map(|col| pretty_printed_matrix.iter().map(|row| row[col].len()).max().unwrap_or(0)) .collect(); for row in pretty_printed_matrix { write!(f, "+")?; for (column, pat_str) in row.into_iter().enumerate() { write!(f, " ")?; write!(f, "{:1$}", pat_str, column_widths[column])?; write!(f, " +")?; } write!(f, "\n")?; } Ok(()) } } impl<'p, 'tcx> FromIterator<PatStack<'p, 'tcx>> for Matrix<'p, 'tcx> { fn from_iter<T>(iter: T) -> Self where T: IntoIterator<Item = PatStack<'p, 'tcx>>, { let mut matrix = Matrix::empty(); for x in iter { // Using `push` ensures we correctly expand or-patterns. matrix.push(x); } matrix } } /// Given a pattern or a pattern-stack, this struct captures a set of its subpatterns. We use that /// to track reachable sub-patterns arising from or-patterns. In the absence of or-patterns this /// will always be either `Empty` (the whole pattern is unreachable) or `Full` (the whole pattern /// is reachable). When there are or-patterns, some subpatterns may be reachable while others /// aren't. In this case the whole pattern still counts as reachable, but we will lint the /// unreachable subpatterns. /// /// This supports a limited set of operations, so not all possible sets of subpatterns can be /// represented. That's ok, we only want the ones that make sense for our usage. /// /// What we're doing is illustrated by this: /// ``` /// match (true, 0) { /// (true, 0) => {} /// (_, 1) => {} /// (true | false, 0 | 1) => {} /// } /// ``` /// When we try the alternatives of the `true | false` or-pattern, the last `0` is reachable in the /// `false` alternative but not the `true`. So overall it is reachable. By contrast, the last `1` /// is not reachable in either alternative, so we want to signal this to the user. /// Therefore we take the union of sets of reachable patterns coming from different alternatives in /// order to figure out which subpatterns are overall reachable. /// /// Invariant: we try to construct the smallest representation we can. In particular if /// `self.is_empty()` we ensure that `self` is `Empty`, and same with `Full`. This is not important /// for correctness currently. #[derive(Debug, Clone)] enum SubPatSet<'p, 'tcx> { /// The empty set. This means the pattern is unreachable. Empty, /// The set containing the full pattern. Full, /// If the pattern is a pattern with a constructor or a pattern-stack, we store a set for each /// of its subpatterns. Missing entries in the map are implicitly full, because that's the /// common case. Seq { subpats: FxHashMap<usize, SubPatSet<'p, 'tcx>> }, /// If the pattern is an or-pattern, we store a set for each of its alternatives. Missing /// entries in the map are implicitly empty. Note: we always flatten nested or-patterns. Alt { subpats: FxHashMap<usize, SubPatSet<'p, 'tcx>>, /// Counts the total number of alternatives in the pattern alt_count: usize, /// We keep the pattern around to retrieve spans. pat: &'p Pat<'tcx>, }, } impl<'p, 'tcx> SubPatSet<'p, 'tcx> { fn full() -> Self { SubPatSet::Full } fn empty() -> Self { SubPatSet::Empty } fn is_empty(&self) -> bool { match self { SubPatSet::Empty => true, SubPatSet::Full => false, // If any subpattern in a sequence is unreachable, the whole pattern is unreachable. SubPatSet::Seq { subpats } => subpats.values().any(|set| set.is_empty()), // An or-pattern is reachable if any of its alternatives is. SubPatSet::Alt { subpats, .. } => subpats.values().all(|set| set.is_empty()), } } fn is_full(&self) -> bool { match self { SubPatSet::Empty => false, SubPatSet::Full => true, // The whole pattern is reachable only when all its alternatives are. SubPatSet::Seq { subpats } => subpats.values().all(|sub_set| sub_set.is_full()), // The whole or-pattern is reachable only when all its alternatives are. SubPatSet::Alt { subpats, alt_count, .. } => { subpats.len() == *alt_count && subpats.values().all(|set| set.is_full()) } } } /// Union `self` with `other`, mutating `self`. fn union(&mut self, other: Self) { use SubPatSet::*; // Union with full stays full; union with empty changes nothing. if self.is_full() || other.is_empty() { return; } else if self.is_empty() { *self = other; return; } else if other.is_full() { *self = Full; return; } match (&mut *self, other) { (Seq { subpats: s_set }, Seq { subpats: mut o_set }) => { s_set.retain(|i, s_sub_set| { // Missing entries count as full. let o_sub_set = o_set.remove(&i).unwrap_or(Full); s_sub_set.union(o_sub_set); // We drop full entries. !s_sub_set.is_full() }); // Everything left in `o_set` is missing from `s_set`, i.e. counts as full. Since // unioning with full returns full, we can drop those entries. } (Alt { subpats: s_set, .. }, Alt { subpats: mut o_set, .. }) => { s_set.retain(|i, s_sub_set| { // Missing entries count as empty. let o_sub_set = o_set.remove(&i).unwrap_or(Empty); s_sub_set.union(o_sub_set); // We drop empty entries. !s_sub_set.is_empty() }); // Everything left in `o_set` is missing from `s_set`, i.e. counts as empty. Since // unioning with empty changes nothing, we can take those entries as is. s_set.extend(o_set); } _ => bug!(), } if self.is_full() { *self = Full; } } /// Returns a list of the spans of the unreachable subpatterns. If `self` is empty (i.e. the /// whole pattern is unreachable) we return `None`. fn list_unreachable_spans(&self) -> Option<Vec<Span>> { /// Panics if `set.is_empty()`. fn fill_spans(set: &SubPatSet<'_, '_>, spans: &mut Vec<Span>) { match set { SubPatSet::Empty => bug!(), SubPatSet::Full => {} SubPatSet::Seq { subpats } => { for (_, sub_set) in subpats { fill_spans(sub_set, spans); } } SubPatSet::Alt { subpats, pat, alt_count, .. } => { let expanded = pat.expand_or_pat(); for i in 0..*alt_count { let sub_set = subpats.get(&i).unwrap_or(&SubPatSet::Empty); if sub_set.is_empty() { // Found a unreachable subpattern. spans.push(expanded[i].span); } else { fill_spans(sub_set, spans); } } } } } if self.is_empty() { return None; } if self.is_full() { // No subpatterns are unreachable. return Some(Vec::new()); } let mut spans = Vec::new(); fill_spans(self, &mut spans); Some(spans) } /// When `self` refers to a patstack that was obtained from specialization, after running /// `unspecialize` it will refer to the original patstack before specialization. fn unspecialize(self, arity: usize) -> Self { use SubPatSet::*; match self { Full => Full, Empty => Empty, Seq { subpats } => { // We gather the first `arity` subpatterns together and shift the remaining ones. let mut new_subpats = FxHashMap::default(); let mut new_subpats_first_col = FxHashMap::default(); for (i, sub_set) in subpats { if i < arity { // The first `arity` indices are now part of the pattern in the first // column. new_subpats_first_col.insert(i, sub_set); } else { // Indices after `arity` are simply shifted new_subpats.insert(i - arity + 1, sub_set); } } // If `new_subpats_first_col` has no entries it counts as full, so we can omit it. if !new_subpats_first_col.is_empty() { new_subpats.insert(0, Seq { subpats: new_subpats_first_col }); } Seq { subpats: new_subpats } } Alt { .. } => bug!(), // `self` is a patstack } } /// When `self` refers to a patstack that was obtained from splitting an or-pattern, after /// running `unspecialize` it will refer to the original patstack before splitting. /// /// For example: /// ``` /// match Some(true) { /// Some(true) => {} /// None | Some(true | false) => {} /// } /// ``` /// Here `None` would return the full set and `Some(true | false)` would return the set /// containing `false`. After `unsplit_or_pat`, we want the set to contain `None` and `false`. /// This is what this function does. fn unsplit_or_pat(mut self, alt_id: usize, alt_count: usize, pat: &'p Pat<'tcx>) -> Self { use SubPatSet::*; if self.is_empty() { return Empty; } // Subpatterns coming from inside the or-pattern alternative itself, e.g. in `None | Some(0 // | 1)`. let set_first_col = match &mut self { Full => Full, Seq { subpats } => subpats.remove(&0).unwrap_or(Full), Empty => unreachable!(), Alt { .. } => bug!(), // `self` is a patstack }; let mut subpats_first_col = FxHashMap::default(); subpats_first_col.insert(alt_id, set_first_col); let set_first_col = Alt { subpats: subpats_first_col, pat, alt_count }; let mut subpats = match self { Full => FxHashMap::default(), Seq { subpats } => subpats, Empty => unreachable!(), Alt { .. } => bug!(), // `self` is a patstack }; subpats.insert(0, set_first_col); Seq { subpats } } } /// This carries the results of computing usefulness, as described at the top of the file. When /// checking usefulness of a match branch, we use the `NoWitnesses` variant, which also keeps track /// of potential unreachable sub-patterns (in the presence of or-patterns). When checking /// exhaustiveness of a whole match, we use the `WithWitnesses` variant, which carries a list of /// witnesses of non-exhaustiveness when there are any. /// Which variant to use is dictated by `WitnessPreference`. #[derive(Clone, Debug)] enum Usefulness<'p, 'tcx> { /// Carries a set of subpatterns that have been found to be reachable. If empty, this indicates /// the whole pattern is unreachable. If not, this indicates that the pattern is reachable but /// that some sub-patterns may be unreachable (due to or-patterns). In the absence of /// or-patterns this will always be either `Empty` (the whole pattern is unreachable) or `Full` /// (the whole pattern is reachable). NoWitnesses(SubPatSet<'p, 'tcx>), /// Carries a list of witnesses of non-exhaustiveness. If empty, indicates that the whole /// pattern is unreachable. WithWitnesses(Vec<Witness<'tcx>>), } impl<'p, 'tcx> Usefulness<'p, 'tcx> { fn new_useful(preference: WitnessPreference) -> Self { match preference { ConstructWitness => WithWitnesses(vec![Witness(vec![])]), LeaveOutWitness => NoWitnesses(SubPatSet::full()), } } fn new_not_useful(preference: WitnessPreference) -> Self { match preference { ConstructWitness => WithWitnesses(vec![]), LeaveOutWitness => NoWitnesses(SubPatSet::empty()), } } /// Combine usefulnesses from two branches. This is an associative operation. fn extend(&mut self, other: Self) { match (&mut *self, other) { (WithWitnesses(_), WithWitnesses(o)) if o.is_empty() => {} (WithWitnesses(s), WithWitnesses(o)) if s.is_empty() => *self = WithWitnesses(o), (WithWitnesses(s), WithWitnesses(o)) => s.extend(o), (NoWitnesses(s), NoWitnesses(o)) => s.union(o), _ => unreachable!(), } } /// When trying several branches and each returns a `Usefulness`, we need to combine the /// results together. fn merge(pref: WitnessPreference, usefulnesses: impl Iterator<Item = Self>) -> Self { let mut ret = Self::new_not_useful(pref); for u in usefulnesses { ret.extend(u); if let NoWitnesses(subpats) = &ret { if subpats.is_full() { // Once we reach the full set, more unions won't change the result. return ret; } } } ret } /// After calculating the usefulness for a branch of an or-pattern, call this to make this /// usefulness mergeable with those from the other branches. fn unsplit_or_pat(self, alt_id: usize, alt_count: usize, pat: &'p Pat<'tcx>) -> Self { match self { NoWitnesses(subpats) => NoWitnesses(subpats.unsplit_or_pat(alt_id, alt_count, pat)), WithWitnesses(_) => bug!(), } } /// After calculating usefulness after a specialization, call this to recontruct a usefulness /// that makes sense for the matrix pre-specialization. This new usefulness can then be merged /// with the results of specializing with the other constructors. fn apply_constructor( self, pcx: PatCtxt<'_, 'p, 'tcx>, matrix: &Matrix<'p, 'tcx>, // used to compute missing ctors ctor: &Constructor<'tcx>, ctor_wild_subpatterns: &Fields<'p, 'tcx>, ) -> Self { match self { WithWitnesses(witnesses) if witnesses.is_empty() => WithWitnesses(witnesses), WithWitnesses(witnesses) => { let new_witnesses = if matches!(ctor, Constructor::Missing) { let mut split_wildcard = SplitWildcard::new(pcx); split_wildcard.split(pcx, matrix.head_ctors(pcx.cx)); // Construct for each missing constructor a "wild" version of this // constructor, that matches everything that can be built with // it. For example, if `ctor` is a `Constructor::Variant` for // `Option::Some`, we get the pattern `Some(_)`. let new_patterns: Vec<_> = split_wildcard .iter_missing(pcx) .map(|missing_ctor| { Fields::wildcards(pcx, missing_ctor).apply(pcx, missing_ctor) }) .collect(); witnesses .into_iter() .flat_map(|witness| { new_patterns.iter().map(move |pat| { let mut witness = witness.clone(); witness.0.push(pat.clone()); witness }) }) .collect() } else { witnesses .into_iter() .map(|witness| witness.apply_constructor(pcx, &ctor, ctor_wild_subpatterns)) .collect() }; WithWitnesses(new_witnesses) } NoWitnesses(subpats) => NoWitnesses(subpats.unspecialize(ctor_wild_subpatterns.len())), } } } #[derive(Copy, Clone, Debug)] enum WitnessPreference { ConstructWitness, LeaveOutWitness, } /// A witness of non-exhaustiveness for error reporting, represented /// as a list of patterns (in reverse order of construction) with /// wildcards inside to represent elements that can take any inhabitant /// of the type as a value. /// /// A witness against a list of patterns should have the same types /// and length as the pattern matched against. Because Rust `match` /// is always against a single pattern, at the end the witness will /// have length 1, but in the middle of the algorithm, it can contain /// multiple patterns. /// /// For example, if we are constructing a witness for the match against /// /// ``` /// struct Pair(Option<(u32, u32)>, bool); /// /// match (p: Pair) { /// Pair(None, _) => {} /// Pair(_, false) => {} /// } /// ``` /// /// We'll perform the following steps: /// 1. Start with an empty witness /// `Witness(vec![])` /// 2. Push a witness `true` against the `false` /// `Witness(vec![true])` /// 3. Push a witness `Some(_)` against the `None` /// `Witness(vec![true, Some(_)])` /// 4. Apply the `Pair` constructor to the witnesses /// `Witness(vec![Pair(Some(_), true)])` /// /// The final `Pair(Some(_), true)` is then the resulting witness. #[derive(Clone, Debug)] crate struct Witness<'tcx>(Vec<Pat<'tcx>>); impl<'tcx> Witness<'tcx> { /// Asserts that the witness contains a single pattern, and returns it. fn single_pattern(self) -> Pat<'tcx> { assert_eq!(self.0.len(), 1); self.0.into_iter().next().unwrap() } /// Constructs a partial witness for a pattern given a list of /// patterns expanded by the specialization step. /// /// When a pattern P is discovered to be useful, this function is used bottom-up /// to reconstruct a complete witness, e.g., a pattern P' that covers a subset /// of values, V, where each value in that set is not covered by any previously /// used patterns and is covered by the pattern P'. Examples: /// /// left_ty: tuple of 3 elements /// pats: [10, 20, _] => (10, 20, _) /// /// left_ty: struct X { a: (bool, &'static str), b: usize} /// pats: [(false, "foo"), 42] => X { a: (false, "foo"), b: 42 } fn apply_constructor<'p>( mut self, pcx: PatCtxt<'_, 'p, 'tcx>, ctor: &Constructor<'tcx>, ctor_wild_subpatterns: &Fields<'p, 'tcx>, ) -> Self { let pat = { let len = self.0.len(); let arity = ctor_wild_subpatterns.len(); let pats = self.0.drain((len - arity)..).rev(); ctor_wild_subpatterns.replace_fields(pcx.cx, pats).apply(pcx, ctor) }; self.0.push(pat); self } } /// Algorithm from <http://moscova.inria.fr/~maranget/papers/warn/index.html>. /// The algorithm from the paper has been modified to correctly handle empty /// types. The changes are: /// (0) We don't exit early if the pattern matrix has zero rows. We just /// continue to recurse over columns. /// (1) all_constructors will only return constructors that are statically /// possible. E.g., it will only return `Ok` for `Result<T, !>`. /// /// This finds whether a (row) vector `v` of patterns is 'useful' in relation /// to a set of such vectors `m` - this is defined as there being a set of /// inputs that will match `v` but not any of the sets in `m`. /// /// All the patterns at each column of the `matrix ++ v` matrix must have the same type. /// /// This is used both for reachability checking (if a pattern isn't useful in /// relation to preceding patterns, it is not reachable) and exhaustiveness /// checking (if a wildcard pattern is useful in relation to a matrix, the /// matrix isn't exhaustive). /// /// `is_under_guard` is used to inform if the pattern has a guard. If it /// has one it must not be inserted into the matrix. This shouldn't be /// relied on for soundness. #[instrument( level = "debug", skip(cx, matrix, witness_preference, hir_id, is_under_guard, is_top_level) )] fn is_useful<'p, 'tcx>( cx: &MatchCheckCtxt<'p, 'tcx>, matrix: &Matrix<'p, 'tcx>, v: &PatStack<'p, 'tcx>, witness_preference: WitnessPreference, hir_id: HirId, is_under_guard: bool, is_top_level: bool, ) -> Usefulness<'p, 'tcx> { debug!("matrix,v={:?}{:?}", matrix, v); let Matrix { patterns: rows, .. } = matrix; // The base case. We are pattern-matching on () and the return value is // based on whether our matrix has a row or not. // NOTE: This could potentially be optimized by checking rows.is_empty() // first and then, if v is non-empty, the return value is based on whether // the type of the tuple we're checking is inhabited or not. if v.is_empty() { let ret = if rows.is_empty() { Usefulness::new_useful(witness_preference) } else { Usefulness::new_not_useful(witness_preference) }; debug!(?ret); return ret; } assert!(rows.iter().all(|r| r.len() == v.len())); // FIXME(Nadrieril): Hack to work around type normalization issues (see #72476). let ty = matrix.heads().next().map_or(v.head().ty, |r| r.ty); let pcx = PatCtxt { cx, ty, span: v.head().span, is_top_level }; // If the first pattern is an or-pattern, expand it. let ret = if v.head().is_or_pat() { debug!("expanding or-pattern"); let v_head = v.head(); let vs: Vec<_> = v.expand_or_pat().collect(); let alt_count = vs.len(); // We try each or-pattern branch in turn. let mut matrix = matrix.clone(); let usefulnesses = vs.into_iter().enumerate().map(|(i, v)| { let usefulness = is_useful(cx, &matrix, &v, witness_preference, hir_id, is_under_guard, false); // If pattern has a guard don't add it to the matrix. if !is_under_guard { // We push the already-seen patterns into the matrix in order to detect redundant // branches like `Some(_) | Some(0)`. matrix.push(v); } usefulness.unsplit_or_pat(i, alt_count, v_head) }); Usefulness::merge(witness_preference, usefulnesses) } else { let v_ctor = v.head_ctor(cx); if let Constructor::IntRange(ctor_range) = &v_ctor { // Lint on likely incorrect range patterns (#63987) ctor_range.lint_overlapping_range_endpoints( pcx, matrix.head_ctors_and_spans(cx), matrix.column_count().unwrap_or(0), hir_id, ) } // We split the head constructor of `v`. let split_ctors = v_ctor.split(pcx, matrix.head_ctors(cx)); // For each constructor, we compute whether there's a value that starts with it that would // witness the usefulness of `v`. let start_matrix = &matrix; let usefulnesses = split_ctors.into_iter().map(|ctor| { debug!("specialize({:?})", ctor); // We cache the result of `Fields::wildcards` because it is used a lot. let ctor_wild_subpatterns = Fields::wildcards(pcx, &ctor); let spec_matrix = start_matrix.specialize_constructor(pcx, &ctor, &ctor_wild_subpatterns); let v = v.pop_head_constructor(&ctor_wild_subpatterns); let usefulness = is_useful(cx, &spec_matrix, &v, witness_preference, hir_id, is_under_guard, false); usefulness.apply_constructor(pcx, start_matrix, &ctor, &ctor_wild_subpatterns) }); Usefulness::merge(witness_preference, usefulnesses) }; debug!(?ret); ret } /// The arm of a match expression. #[derive(Clone, Copy)] crate struct MatchArm<'p, 'tcx> { /// The pattern must have been lowered through `check_match::MatchVisitor::lower_pattern`. crate pat: &'p super::Pat<'tcx>, crate hir_id: HirId, crate has_guard: bool, } /// Indicates whether or not a given arm is reachable. #[derive(Clone, Debug)] crate enum Reachability { /// The arm is reachable. This additionally carries a set of or-pattern branches that have been /// found to be unreachable despite the overall arm being reachable. Used only in the presence /// of or-patterns, otherwise it stays empty. Reachable(Vec<Span>), /// The arm is unreachable. Unreachable, } /// The output of checking a match for exhaustiveness and arm reachability. crate struct UsefulnessReport<'p, 'tcx> { /// For each arm of the input, whether that arm is reachable after the arms above it. crate arm_usefulness: Vec<(MatchArm<'p, 'tcx>, Reachability)>, /// If the match is exhaustive, this is empty. If not, this contains witnesses for the lack of /// exhaustiveness. crate non_exhaustiveness_witnesses: Vec<super::Pat<'tcx>>, } /// The entrypoint for the usefulness algorithm. Computes whether a match is exhaustive and which /// of its arms are reachable. /// /// Note: the input patterns must have been lowered through /// `check_match::MatchVisitor::lower_pattern`. crate fn compute_match_usefulness<'p, 'tcx>( cx: &MatchCheckCtxt<'p, 'tcx>, arms: &[MatchArm<'p, 'tcx>], scrut_hir_id: HirId, scrut_ty: Ty<'tcx>, ) -> UsefulnessReport<'p, 'tcx> { let mut matrix = Matrix::empty(); let arm_usefulness: Vec<_> = arms .iter() .copied() .map(|arm| { let v = PatStack::from_pattern(arm.pat); let usefulness = is_useful(cx, &matrix, &v, LeaveOutWitness, arm.hir_id, arm.has_guard, true); if !arm.has_guard { matrix.push(v); } let reachability = match usefulness { NoWitnesses(subpats) if subpats.is_empty() => Reachability::Unreachable, NoWitnesses(subpats) => { Reachability::Reachable(subpats.list_unreachable_spans().unwrap()) } WithWitnesses(..) => bug!(), }; (arm, reachability) }) .collect(); let wild_pattern = cx.pattern_arena.alloc(super::Pat::wildcard_from_ty(scrut_ty)); let v = PatStack::from_pattern(wild_pattern); let usefulness = is_useful(cx, &matrix, &v, ConstructWitness, scrut_hir_id, false, true); let non_exhaustiveness_witnesses = match usefulness { WithWitnesses(pats) => pats.into_iter().map(|w| w.single_pattern()).collect(), NoWitnesses(_) => bug!(), }; UsefulnessReport { arm_usefulness, non_exhaustiveness_witnesses } } Rollup merge of #82155 - tmiasko:once, r=matthewjasper Use !Sync std::lazy::OnceCell in usefulness checking The `rustc_data_structures::sync::OnceCell` is thread-safe when building a parallel compiler. This is unnecessary for the purposes of pattern usefulness checking. Use `!Sync` `std::lazy::OnceCell` instead. //! Note: tests specific to this file can be found in: //! //! - `ui/pattern/usefulness` //! - `ui/or-patterns` //! - `ui/consts/const_in_pattern` //! - `ui/rfc-2008-non-exhaustive` //! - `ui/half-open-range-patterns` //! - probably many others //! //! I (Nadrieril) prefer to put new tests in `ui/pattern/usefulness` unless there's a specific //! reason not to, for example if they depend on a particular feature like `or_patterns`. //! //! ----- //! //! This file includes the logic for exhaustiveness and reachability checking for pattern-matching. //! Specifically, given a list of patterns for a type, we can tell whether: //! (a) each pattern is reachable (reachability) //! (b) the patterns cover every possible value for the type (exhaustiveness) //! //! The algorithm implemented here is a modified version of the one described in [this //! paper](http://moscova.inria.fr/~maranget/papers/warn/index.html). We have however generalized //! it to accommodate the variety of patterns that Rust supports. We thus explain our version here, //! without being as rigorous. //! //! //! # Summary //! //! The core of the algorithm is the notion of "usefulness". A pattern `q` is said to be *useful* //! relative to another pattern `p` of the same type if there is a value that is matched by `q` and //! not matched by `p`. This generalizes to many `p`s: `q` is useful w.r.t. a list of patterns //! `p_1 .. p_n` if there is a value that is matched by `q` and by none of the `p_i`. We write //! `usefulness(p_1 .. p_n, q)` for a function that returns a list of such values. The aim of this //! file is to compute it efficiently. //! //! This is enough to compute reachability: a pattern in a `match` expression is reachable iff it //! is useful w.r.t. the patterns above it: //! ```rust //! match x { //! Some(_) => ..., //! None => ..., // reachable: `None` is matched by this but not the branch above //! Some(0) => ..., // unreachable: all the values this matches are already matched by //! // `Some(_)` above //! } //! ``` //! //! This is also enough to compute exhaustiveness: a match is exhaustive iff the wildcard `_` //! pattern is _not_ useful w.r.t. the patterns in the match. The values returned by `usefulness` //! are used to tell the user which values are missing. //! ```rust //! match x { //! Some(0) => ..., //! None => ..., //! // not exhaustive: `_` is useful because it matches `Some(1)` //! } //! ``` //! //! The entrypoint of this file is the [`compute_match_usefulness`] function, which computes //! reachability for each match branch and exhaustiveness for the whole match. //! //! //! # Constructors and fields //! //! Note: we will often abbreviate "constructor" as "ctor". //! //! The idea that powers everything that is done in this file is the following: a (matcheable) //! value is made from a constructor applied to a number of subvalues. Examples of constructors are //! `Some`, `None`, `(,)` (the 2-tuple constructor), `Foo {..}` (the constructor for a struct //! `Foo`), and `2` (the constructor for the number `2`). This is natural when we think of //! pattern-matching, and this is the basis for what follows. //! //! Some of the ctors listed above might feel weird: `None` and `2` don't take any arguments. //! That's ok: those are ctors that take a list of 0 arguments; they are the simplest case of //! ctors. We treat `2` as a ctor because `u64` and other number types behave exactly like a huge //! `enum`, with one variant for each number. This allows us to see any matcheable value as made up //! from a tree of ctors, each having a set number of children. For example: `Foo { bar: None, //! baz: Ok(0) }` is made from 4 different ctors, namely `Foo{..}`, `None`, `Ok` and `0`. //! //! This idea can be extended to patterns: they are also made from constructors applied to fields. //! A pattern for a given type is allowed to use all the ctors for values of that type (which we //! call "value constructors"), but there are also pattern-only ctors. The most important one is //! the wildcard (`_`), and the others are integer ranges (`0..=10`), variable-length slices (`[x, //! ..]`), and or-patterns (`Ok(0) | Err(_)`). Examples of valid patterns are `42`, `Some(_)`, `Foo //! { bar: Some(0) | None, baz: _ }`. Note that a binder in a pattern (e.g. `Some(x)`) matches the //! same values as a wildcard (e.g. `Some(_)`), so we treat both as wildcards. //! //! From this deconstruction we can compute whether a given value matches a given pattern; we //! simply look at ctors one at a time. Given a pattern `p` and a value `v`, we want to compute //! `matches!(v, p)`. It's mostly straightforward: we compare the head ctors and when they match //! we compare their fields recursively. A few representative examples: //! //! - `matches!(v, _) := true` //! - `matches!((v0, v1), (p0, p1)) := matches!(v0, p0) && matches!(v1, p1)` //! - `matches!(Foo { bar: v0, baz: v1 }, Foo { bar: p0, baz: p1 }) := matches!(v0, p0) && matches!(v1, p1)` //! - `matches!(Ok(v0), Ok(p0)) := matches!(v0, p0)` //! - `matches!(Ok(v0), Err(p0)) := false` (incompatible variants) //! - `matches!(v, 1..=100) := matches!(v, 1) || ... || matches!(v, 100)` //! - `matches!([v0], [p0, .., p1]) := false` (incompatible lengths) //! - `matches!([v0, v1, v2], [p0, .., p1]) := matches!(v0, p0) && matches!(v2, p1)` //! - `matches!(v, p0 | p1) := matches!(v, p0) || matches!(v, p1)` //! //! Constructors, fields and relevant operations are defined in the [`super::deconstruct_pat`] module. //! //! Note: this constructors/fields distinction may not straightforwardly apply to every Rust type. //! For example a value of type `Rc<u64>` can't be deconstructed that way, and `&str` has an //! infinitude of constructors. There are also subtleties with visibility of fields and //! uninhabitedness and various other things. The constructors idea can be extended to handle most //! of these subtleties though; caveats are documented where relevant throughout the code. //! //! Whether constructors cover each other is computed by [`Constructor::is_covered_by`]. //! //! //! # Specialization //! //! Recall that we wish to compute `usefulness(p_1 .. p_n, q)`: given a list of patterns `p_1 .. //! p_n` and a pattern `q`, all of the same type, we want to find a list of values (called //! "witnesses") that are matched by `q` and by none of the `p_i`. We obviously don't just //! enumerate all possible values. From the discussion above we see that we can proceed //! ctor-by-ctor: for each value ctor of the given type, we ask "is there a value that starts with //! this constructor and matches `q` and none of the `p_i`?". As we saw above, there's a lot we can //! say from knowing only the first constructor of our candidate value. //! //! Let's take the following example: //! ``` //! match x { //! Enum::Variant1(_) => {} // `p1` //! Enum::Variant2(None, 0) => {} // `p2` //! Enum::Variant2(Some(_), 0) => {} // `q` //! } //! ``` //! //! We can easily see that if our candidate value `v` starts with `Variant1` it will not match `q`. //! If `v = Variant2(v0, v1)` however, whether or not it matches `p2` and `q` will depend on `v0` //! and `v1`. In fact, such a `v` will be a witness of usefulness of `q` exactly when the tuple //! `(v0, v1)` is a witness of usefulness of `q'` in the following reduced match: //! //! ``` //! match x { //! (None, 0) => {} // `p2'` //! (Some(_), 0) => {} // `q'` //! } //! ``` //! //! This motivates a new step in computing usefulness, that we call _specialization_. //! Specialization consist of filtering a list of patterns for those that match a constructor, and //! then looking into the constructor's fields. This enables usefulness to be computed recursively. //! //! Instead of acting on a single pattern in each row, we will consider a list of patterns for each //! row, and we call such a list a _pattern-stack_. The idea is that we will specialize the //! leftmost pattern, which amounts to popping the constructor and pushing its fields, which feels //! like a stack. We note a pattern-stack simply with `[p_1 ... p_n]`. //! Here's a sequence of specializations of a list of pattern-stacks, to illustrate what's //! happening: //! ``` //! [Enum::Variant1(_)] //! [Enum::Variant2(None, 0)] //! [Enum::Variant2(Some(_), 0)] //! //==>> specialize with `Variant2` //! [None, 0] //! [Some(_), 0] //! //==>> specialize with `Some` //! [_, 0] //! //==>> specialize with `true` (say the type was `bool`) //! [0] //! //==>> specialize with `0` //! [] //! ``` //! //! The function `specialize(c, p)` takes a value constructor `c` and a pattern `p`, and returns 0 //! or more pattern-stacks. If `c` does not match the head constructor of `p`, it returns nothing; //! otherwise if returns the fields of the constructor. This only returns more than one //! pattern-stack if `p` has a pattern-only constructor. //! //! - Specializing for the wrong constructor returns nothing //! //! `specialize(None, Some(p0)) := []` //! //! - Specializing for the correct constructor returns a single row with the fields //! //! `specialize(Variant1, Variant1(p0, p1, p2)) := [[p0, p1, p2]]` //! //! `specialize(Foo{..}, Foo { bar: p0, baz: p1 }) := [[p0, p1]]` //! //! - For or-patterns, we specialize each branch and concatenate the results //! //! `specialize(c, p0 | p1) := specialize(c, p0) ++ specialize(c, p1)` //! //! - We treat the other pattern constructors as if they were a large or-pattern of all the //! possibilities: //! //! `specialize(c, _) := specialize(c, Variant1(_) | Variant2(_, _) | ...)` //! //! `specialize(c, 1..=100) := specialize(c, 1 | ... | 100)` //! //! `specialize(c, [p0, .., p1]) := specialize(c, [p0, p1] | [p0, _, p1] | [p0, _, _, p1] | ...)` //! //! - If `c` is a pattern-only constructor, `specialize` is defined on a case-by-case basis. See //! the discussion about constructor splitting in [`super::deconstruct_pat`]. //! //! //! We then extend this function to work with pattern-stacks as input, by acting on the first //! column and keeping the other columns untouched. //! //! Specialization for the whole matrix is done in [`Matrix::specialize_constructor`]. Note that //! or-patterns in the first column are expanded before being stored in the matrix. Specialization //! for a single patstack is done from a combination of [`Constructor::is_covered_by`] and //! [`PatStack::pop_head_constructor`]. The internals of how it's done mostly live in the //! [`Fields`] struct. //! //! //! # Computing usefulness //! //! We now have all we need to compute usefulness. The inputs to usefulness are a list of //! pattern-stacks `p_1 ... p_n` (one per row), and a new pattern_stack `q`. The paper and this //! file calls the list of patstacks a _matrix_. They must all have the same number of columns and //! the patterns in a given column must all have the same type. `usefulness` returns a (possibly //! empty) list of witnesses of usefulness. These witnesses will also be pattern-stacks. //! //! - base case: `n_columns == 0`. //! Since a pattern-stack functions like a tuple of patterns, an empty one functions like the //! unit type. Thus `q` is useful iff there are no rows above it, i.e. if `n == 0`. //! //! - inductive case: `n_columns > 0`. //! We need a way to list the constructors we want to try. We will be more clever in the next //! section but for now assume we list all value constructors for the type of the first column. //! //! - for each such ctor `c`: //! //! - for each `q'` returned by `specialize(c, q)`: //! //! - we compute `usefulness(specialize(c, p_1) ... specialize(c, p_n), q')` //! //! - for each witness found, we revert specialization by pushing the constructor `c` on top. //! //! - We return the concatenation of all the witnesses found, if any. //! //! Example: //! ``` //! [Some(true)] // p_1 //! [None] // p_2 //! [Some(_)] // q //! //==>> try `None`: `specialize(None, q)` returns nothing //! //==>> try `Some`: `specialize(Some, q)` returns a single row //! [true] // p_1' //! [_] // q' //! //==>> try `true`: `specialize(true, q')` returns a single row //! [] // p_1'' //! [] // q'' //! //==>> base case; `n != 0` so `q''` is not useful. //! //==>> go back up a step //! [true] // p_1' //! [_] // q' //! //==>> try `false`: `specialize(false, q')` returns a single row //! [] // q'' //! //==>> base case; `n == 0` so `q''` is useful. We return the single witness `[]` //! witnesses: //! [] //! //==>> undo the specialization with `false` //! witnesses: //! [false] //! //==>> undo the specialization with `Some` //! witnesses: //! [Some(false)] //! //==>> we have tried all the constructors. The output is the single witness `[Some(false)]`. //! ``` //! //! This computation is done in [`is_useful`]. In practice we don't care about the list of //! witnesses when computing reachability; we only need to know whether any exist. We do keep the //! witnesses when computing exhaustiveness to report them to the user. //! //! //! # Making usefulness tractable: constructor splitting //! //! We're missing one last detail: which constructors do we list? Naively listing all value //! constructors cannot work for types like `u64` or `&str`, so we need to be more clever. The //! first obvious insight is that we only want to list constructors that are covered by the head //! constructor of `q`. If it's a value constructor, we only try that one. If it's a pattern-only //! constructor, we use the final clever idea for this algorithm: _constructor splitting_, where we //! group together constructors that behave the same. //! //! The details are not necessary to understand this file, so we explain them in //! [`super::deconstruct_pat`]. Splitting is done by the [`Constructor::split`] function. use self::Usefulness::*; use self::WitnessPreference::*; use super::deconstruct_pat::{Constructor, Fields, SplitWildcard}; use super::{Pat, PatKind}; use super::{PatternFoldable, PatternFolder}; use rustc_data_structures::captures::Captures; use rustc_data_structures::fx::FxHashMap; use rustc_arena::TypedArena; use rustc_hir::def_id::DefId; use rustc_hir::HirId; use rustc_middle::ty::{self, Ty, TyCtxt}; use rustc_span::Span; use smallvec::{smallvec, SmallVec}; use std::fmt; use std::iter::{FromIterator, IntoIterator}; use std::lazy::OnceCell; crate struct MatchCheckCtxt<'a, 'tcx> { crate tcx: TyCtxt<'tcx>, /// The module in which the match occurs. This is necessary for /// checking inhabited-ness of types because whether a type is (visibly) /// inhabited can depend on whether it was defined in the current module or /// not. E.g., `struct Foo { _private: ! }` cannot be seen to be empty /// outside its module and should not be matchable with an empty match statement. crate module: DefId, crate param_env: ty::ParamEnv<'tcx>, crate pattern_arena: &'a TypedArena<Pat<'tcx>>, } impl<'a, 'tcx> MatchCheckCtxt<'a, 'tcx> { pub(super) fn is_uninhabited(&self, ty: Ty<'tcx>) -> bool { if self.tcx.features().exhaustive_patterns { self.tcx.is_ty_uninhabited_from(self.module, ty, self.param_env) } else { false } } /// Returns whether the given type is an enum from another crate declared `#[non_exhaustive]`. pub(super) fn is_foreign_non_exhaustive_enum(&self, ty: Ty<'tcx>) -> bool { match ty.kind() { ty::Adt(def, ..) => { def.is_enum() && def.is_variant_list_non_exhaustive() && !def.did.is_local() } _ => false, } } } #[derive(Copy, Clone)] pub(super) struct PatCtxt<'a, 'p, 'tcx> { pub(super) cx: &'a MatchCheckCtxt<'p, 'tcx>, /// Type of the current column under investigation. pub(super) ty: Ty<'tcx>, /// Span of the current pattern under investigation. pub(super) span: Span, /// Whether the current pattern is the whole pattern as found in a match arm, or if it's a /// subpattern. pub(super) is_top_level: bool, } impl<'a, 'p, 'tcx> fmt::Debug for PatCtxt<'a, 'p, 'tcx> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("PatCtxt").field("ty", &self.ty).finish() } } crate fn expand_pattern<'tcx>(pat: Pat<'tcx>) -> Pat<'tcx> { LiteralExpander.fold_pattern(&pat) } struct LiteralExpander; impl<'tcx> PatternFolder<'tcx> for LiteralExpander { fn fold_pattern(&mut self, pat: &Pat<'tcx>) -> Pat<'tcx> { debug!("fold_pattern {:?} {:?} {:?}", pat, pat.ty.kind(), pat.kind); match (pat.ty.kind(), pat.kind.as_ref()) { (_, PatKind::Binding { subpattern: Some(s), .. }) => s.fold_with(self), (_, PatKind::AscribeUserType { subpattern: s, .. }) => s.fold_with(self), (ty::Ref(_, t, _), PatKind::Constant { .. }) if t.is_str() => { // Treat string literal patterns as deref patterns to a `str` constant, i.e. // `&CONST`. This expands them like other const patterns. This could have been done // in `const_to_pat`, but that causes issues with the rest of the matching code. let mut new_pat = pat.super_fold_with(self); // Make a fake const pattern of type `str` (instead of `&str`). That the carried // constant value still knows it is of type `&str`. new_pat.ty = t; Pat { kind: Box::new(PatKind::Deref { subpattern: new_pat }), span: pat.span, ty: pat.ty, } } _ => pat.super_fold_with(self), } } } impl<'tcx> Pat<'tcx> { pub(super) fn is_wildcard(&self) -> bool { matches!(*self.kind, PatKind::Binding { subpattern: None, .. } | PatKind::Wild) } fn is_or_pat(&self) -> bool { matches!(*self.kind, PatKind::Or { .. }) } /// Recursively expand this pattern into its subpatterns. Only useful for or-patterns. fn expand_or_pat(&self) -> Vec<&Self> { fn expand<'p, 'tcx>(pat: &'p Pat<'tcx>, vec: &mut Vec<&'p Pat<'tcx>>) { if let PatKind::Or { pats } = pat.kind.as_ref() { for pat in pats { expand(pat, vec); } } else { vec.push(pat) } } let mut pats = Vec::new(); expand(self, &mut pats); pats } } /// A row of a matrix. Rows of len 1 are very common, which is why `SmallVec[_; 2]` /// works well. #[derive(Clone)] struct PatStack<'p, 'tcx> { pats: SmallVec<[&'p Pat<'tcx>; 2]>, /// Cache for the constructor of the head head_ctor: OnceCell<Constructor<'tcx>>, } impl<'p, 'tcx> PatStack<'p, 'tcx> { fn from_pattern(pat: &'p Pat<'tcx>) -> Self { Self::from_vec(smallvec![pat]) } fn from_vec(vec: SmallVec<[&'p Pat<'tcx>; 2]>) -> Self { PatStack { pats: vec, head_ctor: OnceCell::new() } } fn is_empty(&self) -> bool { self.pats.is_empty() } fn len(&self) -> usize { self.pats.len() } fn head(&self) -> &'p Pat<'tcx> { self.pats[0] } fn head_ctor<'a>(&'a self, cx: &MatchCheckCtxt<'p, 'tcx>) -> &'a Constructor<'tcx> { self.head_ctor.get_or_init(|| Constructor::from_pat(cx, self.head())) } fn iter(&self) -> impl Iterator<Item = &Pat<'tcx>> { self.pats.iter().copied() } // Recursively expand the first pattern into its subpatterns. Only useful if the pattern is an // or-pattern. Panics if `self` is empty. fn expand_or_pat<'a>(&'a self) -> impl Iterator<Item = PatStack<'p, 'tcx>> + Captures<'a> { self.head().expand_or_pat().into_iter().map(move |pat| { let mut new_patstack = PatStack::from_pattern(pat); new_patstack.pats.extend_from_slice(&self.pats[1..]); new_patstack }) } /// This computes `S(self.head_ctor(), self)`. See top of the file for explanations. /// /// Structure patterns with a partial wild pattern (Foo { a: 42, .. }) have their missing /// fields filled with wild patterns. /// /// This is roughly the inverse of `Constructor::apply`. fn pop_head_constructor(&self, ctor_wild_subpatterns: &Fields<'p, 'tcx>) -> PatStack<'p, 'tcx> { // We pop the head pattern and push the new fields extracted from the arguments of // `self.head()`. let mut new_fields = ctor_wild_subpatterns.replace_with_pattern_arguments(self.head()).into_patterns(); new_fields.extend_from_slice(&self.pats[1..]); PatStack::from_vec(new_fields) } } impl<'p, 'tcx> Default for PatStack<'p, 'tcx> { fn default() -> Self { Self::from_vec(smallvec![]) } } impl<'p, 'tcx> PartialEq for PatStack<'p, 'tcx> { fn eq(&self, other: &Self) -> bool { self.pats == other.pats } } impl<'p, 'tcx> FromIterator<&'p Pat<'tcx>> for PatStack<'p, 'tcx> { fn from_iter<T>(iter: T) -> Self where T: IntoIterator<Item = &'p Pat<'tcx>>, { Self::from_vec(iter.into_iter().collect()) } } /// Pretty-printing for matrix row. impl<'p, 'tcx> fmt::Debug for PatStack<'p, 'tcx> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "+")?; for pat in self.iter() { write!(f, " {} +", pat)?; } Ok(()) } } /// A 2D matrix. #[derive(Clone, PartialEq)] pub(super) struct Matrix<'p, 'tcx> { patterns: Vec<PatStack<'p, 'tcx>>, } impl<'p, 'tcx> Matrix<'p, 'tcx> { fn empty() -> Self { Matrix { patterns: vec![] } } /// Number of columns of this matrix. `None` is the matrix is empty. pub(super) fn column_count(&self) -> Option<usize> { self.patterns.get(0).map(|r| r.len()) } /// Pushes a new row to the matrix. If the row starts with an or-pattern, this recursively /// expands it. fn push(&mut self, row: PatStack<'p, 'tcx>) { if !row.is_empty() && row.head().is_or_pat() { for row in row.expand_or_pat() { self.patterns.push(row); } } else { self.patterns.push(row); } } /// Iterate over the first component of each row fn heads<'a>(&'a self) -> impl Iterator<Item = &'a Pat<'tcx>> + Captures<'p> { self.patterns.iter().map(|r| r.head()) } /// Iterate over the first constructor of each row. pub(super) fn head_ctors<'a>( &'a self, cx: &'a MatchCheckCtxt<'p, 'tcx>, ) -> impl Iterator<Item = &'a Constructor<'tcx>> + Captures<'p> + Clone { self.patterns.iter().map(move |r| r.head_ctor(cx)) } /// Iterate over the first constructor and the corresponding span of each row. pub(super) fn head_ctors_and_spans<'a>( &'a self, cx: &'a MatchCheckCtxt<'p, 'tcx>, ) -> impl Iterator<Item = (&'a Constructor<'tcx>, Span)> + Captures<'p> { self.patterns.iter().map(move |r| (r.head_ctor(cx), r.head().span)) } /// This computes `S(constructor, self)`. See top of the file for explanations. fn specialize_constructor( &self, pcx: PatCtxt<'_, 'p, 'tcx>, ctor: &Constructor<'tcx>, ctor_wild_subpatterns: &Fields<'p, 'tcx>, ) -> Matrix<'p, 'tcx> { self.patterns .iter() .filter(|r| ctor.is_covered_by(pcx, r.head_ctor(pcx.cx))) .map(|r| r.pop_head_constructor(ctor_wild_subpatterns)) .collect() } } /// Pretty-printer for matrices of patterns, example: /// /// ```text /// + _ + [] + /// + true + [First] + /// + true + [Second(true)] + /// + false + [_] + /// + _ + [_, _, tail @ ..] + /// ``` impl<'p, 'tcx> fmt::Debug for Matrix<'p, 'tcx> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "\n")?; let Matrix { patterns: m, .. } = self; let pretty_printed_matrix: Vec<Vec<String>> = m.iter().map(|row| row.iter().map(|pat| format!("{}", pat)).collect()).collect(); let column_count = m.iter().map(|row| row.len()).next().unwrap_or(0); assert!(m.iter().all(|row| row.len() == column_count)); let column_widths: Vec<usize> = (0..column_count) .map(|col| pretty_printed_matrix.iter().map(|row| row[col].len()).max().unwrap_or(0)) .collect(); for row in pretty_printed_matrix { write!(f, "+")?; for (column, pat_str) in row.into_iter().enumerate() { write!(f, " ")?; write!(f, "{:1$}", pat_str, column_widths[column])?; write!(f, " +")?; } write!(f, "\n")?; } Ok(()) } } impl<'p, 'tcx> FromIterator<PatStack<'p, 'tcx>> for Matrix<'p, 'tcx> { fn from_iter<T>(iter: T) -> Self where T: IntoIterator<Item = PatStack<'p, 'tcx>>, { let mut matrix = Matrix::empty(); for x in iter { // Using `push` ensures we correctly expand or-patterns. matrix.push(x); } matrix } } /// Given a pattern or a pattern-stack, this struct captures a set of its subpatterns. We use that /// to track reachable sub-patterns arising from or-patterns. In the absence of or-patterns this /// will always be either `Empty` (the whole pattern is unreachable) or `Full` (the whole pattern /// is reachable). When there are or-patterns, some subpatterns may be reachable while others /// aren't. In this case the whole pattern still counts as reachable, but we will lint the /// unreachable subpatterns. /// /// This supports a limited set of operations, so not all possible sets of subpatterns can be /// represented. That's ok, we only want the ones that make sense for our usage. /// /// What we're doing is illustrated by this: /// ``` /// match (true, 0) { /// (true, 0) => {} /// (_, 1) => {} /// (true | false, 0 | 1) => {} /// } /// ``` /// When we try the alternatives of the `true | false` or-pattern, the last `0` is reachable in the /// `false` alternative but not the `true`. So overall it is reachable. By contrast, the last `1` /// is not reachable in either alternative, so we want to signal this to the user. /// Therefore we take the union of sets of reachable patterns coming from different alternatives in /// order to figure out which subpatterns are overall reachable. /// /// Invariant: we try to construct the smallest representation we can. In particular if /// `self.is_empty()` we ensure that `self` is `Empty`, and same with `Full`. This is not important /// for correctness currently. #[derive(Debug, Clone)] enum SubPatSet<'p, 'tcx> { /// The empty set. This means the pattern is unreachable. Empty, /// The set containing the full pattern. Full, /// If the pattern is a pattern with a constructor or a pattern-stack, we store a set for each /// of its subpatterns. Missing entries in the map are implicitly full, because that's the /// common case. Seq { subpats: FxHashMap<usize, SubPatSet<'p, 'tcx>> }, /// If the pattern is an or-pattern, we store a set for each of its alternatives. Missing /// entries in the map are implicitly empty. Note: we always flatten nested or-patterns. Alt { subpats: FxHashMap<usize, SubPatSet<'p, 'tcx>>, /// Counts the total number of alternatives in the pattern alt_count: usize, /// We keep the pattern around to retrieve spans. pat: &'p Pat<'tcx>, }, } impl<'p, 'tcx> SubPatSet<'p, 'tcx> { fn full() -> Self { SubPatSet::Full } fn empty() -> Self { SubPatSet::Empty } fn is_empty(&self) -> bool { match self { SubPatSet::Empty => true, SubPatSet::Full => false, // If any subpattern in a sequence is unreachable, the whole pattern is unreachable. SubPatSet::Seq { subpats } => subpats.values().any(|set| set.is_empty()), // An or-pattern is reachable if any of its alternatives is. SubPatSet::Alt { subpats, .. } => subpats.values().all(|set| set.is_empty()), } } fn is_full(&self) -> bool { match self { SubPatSet::Empty => false, SubPatSet::Full => true, // The whole pattern is reachable only when all its alternatives are. SubPatSet::Seq { subpats } => subpats.values().all(|sub_set| sub_set.is_full()), // The whole or-pattern is reachable only when all its alternatives are. SubPatSet::Alt { subpats, alt_count, .. } => { subpats.len() == *alt_count && subpats.values().all(|set| set.is_full()) } } } /// Union `self` with `other`, mutating `self`. fn union(&mut self, other: Self) { use SubPatSet::*; // Union with full stays full; union with empty changes nothing. if self.is_full() || other.is_empty() { return; } else if self.is_empty() { *self = other; return; } else if other.is_full() { *self = Full; return; } match (&mut *self, other) { (Seq { subpats: s_set }, Seq { subpats: mut o_set }) => { s_set.retain(|i, s_sub_set| { // Missing entries count as full. let o_sub_set = o_set.remove(&i).unwrap_or(Full); s_sub_set.union(o_sub_set); // We drop full entries. !s_sub_set.is_full() }); // Everything left in `o_set` is missing from `s_set`, i.e. counts as full. Since // unioning with full returns full, we can drop those entries. } (Alt { subpats: s_set, .. }, Alt { subpats: mut o_set, .. }) => { s_set.retain(|i, s_sub_set| { // Missing entries count as empty. let o_sub_set = o_set.remove(&i).unwrap_or(Empty); s_sub_set.union(o_sub_set); // We drop empty entries. !s_sub_set.is_empty() }); // Everything left in `o_set` is missing from `s_set`, i.e. counts as empty. Since // unioning with empty changes nothing, we can take those entries as is. s_set.extend(o_set); } _ => bug!(), } if self.is_full() { *self = Full; } } /// Returns a list of the spans of the unreachable subpatterns. If `self` is empty (i.e. the /// whole pattern is unreachable) we return `None`. fn list_unreachable_spans(&self) -> Option<Vec<Span>> { /// Panics if `set.is_empty()`. fn fill_spans(set: &SubPatSet<'_, '_>, spans: &mut Vec<Span>) { match set { SubPatSet::Empty => bug!(), SubPatSet::Full => {} SubPatSet::Seq { subpats } => { for (_, sub_set) in subpats { fill_spans(sub_set, spans); } } SubPatSet::Alt { subpats, pat, alt_count, .. } => { let expanded = pat.expand_or_pat(); for i in 0..*alt_count { let sub_set = subpats.get(&i).unwrap_or(&SubPatSet::Empty); if sub_set.is_empty() { // Found a unreachable subpattern. spans.push(expanded[i].span); } else { fill_spans(sub_set, spans); } } } } } if self.is_empty() { return None; } if self.is_full() { // No subpatterns are unreachable. return Some(Vec::new()); } let mut spans = Vec::new(); fill_spans(self, &mut spans); Some(spans) } /// When `self` refers to a patstack that was obtained from specialization, after running /// `unspecialize` it will refer to the original patstack before specialization. fn unspecialize(self, arity: usize) -> Self { use SubPatSet::*; match self { Full => Full, Empty => Empty, Seq { subpats } => { // We gather the first `arity` subpatterns together and shift the remaining ones. let mut new_subpats = FxHashMap::default(); let mut new_subpats_first_col = FxHashMap::default(); for (i, sub_set) in subpats { if i < arity { // The first `arity` indices are now part of the pattern in the first // column. new_subpats_first_col.insert(i, sub_set); } else { // Indices after `arity` are simply shifted new_subpats.insert(i - arity + 1, sub_set); } } // If `new_subpats_first_col` has no entries it counts as full, so we can omit it. if !new_subpats_first_col.is_empty() { new_subpats.insert(0, Seq { subpats: new_subpats_first_col }); } Seq { subpats: new_subpats } } Alt { .. } => bug!(), // `self` is a patstack } } /// When `self` refers to a patstack that was obtained from splitting an or-pattern, after /// running `unspecialize` it will refer to the original patstack before splitting. /// /// For example: /// ``` /// match Some(true) { /// Some(true) => {} /// None | Some(true | false) => {} /// } /// ``` /// Here `None` would return the full set and `Some(true | false)` would return the set /// containing `false`. After `unsplit_or_pat`, we want the set to contain `None` and `false`. /// This is what this function does. fn unsplit_or_pat(mut self, alt_id: usize, alt_count: usize, pat: &'p Pat<'tcx>) -> Self { use SubPatSet::*; if self.is_empty() { return Empty; } // Subpatterns coming from inside the or-pattern alternative itself, e.g. in `None | Some(0 // | 1)`. let set_first_col = match &mut self { Full => Full, Seq { subpats } => subpats.remove(&0).unwrap_or(Full), Empty => unreachable!(), Alt { .. } => bug!(), // `self` is a patstack }; let mut subpats_first_col = FxHashMap::default(); subpats_first_col.insert(alt_id, set_first_col); let set_first_col = Alt { subpats: subpats_first_col, pat, alt_count }; let mut subpats = match self { Full => FxHashMap::default(), Seq { subpats } => subpats, Empty => unreachable!(), Alt { .. } => bug!(), // `self` is a patstack }; subpats.insert(0, set_first_col); Seq { subpats } } } /// This carries the results of computing usefulness, as described at the top of the file. When /// checking usefulness of a match branch, we use the `NoWitnesses` variant, which also keeps track /// of potential unreachable sub-patterns (in the presence of or-patterns). When checking /// exhaustiveness of a whole match, we use the `WithWitnesses` variant, which carries a list of /// witnesses of non-exhaustiveness when there are any. /// Which variant to use is dictated by `WitnessPreference`. #[derive(Clone, Debug)] enum Usefulness<'p, 'tcx> { /// Carries a set of subpatterns that have been found to be reachable. If empty, this indicates /// the whole pattern is unreachable. If not, this indicates that the pattern is reachable but /// that some sub-patterns may be unreachable (due to or-patterns). In the absence of /// or-patterns this will always be either `Empty` (the whole pattern is unreachable) or `Full` /// (the whole pattern is reachable). NoWitnesses(SubPatSet<'p, 'tcx>), /// Carries a list of witnesses of non-exhaustiveness. If empty, indicates that the whole /// pattern is unreachable. WithWitnesses(Vec<Witness<'tcx>>), } impl<'p, 'tcx> Usefulness<'p, 'tcx> { fn new_useful(preference: WitnessPreference) -> Self { match preference { ConstructWitness => WithWitnesses(vec![Witness(vec![])]), LeaveOutWitness => NoWitnesses(SubPatSet::full()), } } fn new_not_useful(preference: WitnessPreference) -> Self { match preference { ConstructWitness => WithWitnesses(vec![]), LeaveOutWitness => NoWitnesses(SubPatSet::empty()), } } /// Combine usefulnesses from two branches. This is an associative operation. fn extend(&mut self, other: Self) { match (&mut *self, other) { (WithWitnesses(_), WithWitnesses(o)) if o.is_empty() => {} (WithWitnesses(s), WithWitnesses(o)) if s.is_empty() => *self = WithWitnesses(o), (WithWitnesses(s), WithWitnesses(o)) => s.extend(o), (NoWitnesses(s), NoWitnesses(o)) => s.union(o), _ => unreachable!(), } } /// When trying several branches and each returns a `Usefulness`, we need to combine the /// results together. fn merge(pref: WitnessPreference, usefulnesses: impl Iterator<Item = Self>) -> Self { let mut ret = Self::new_not_useful(pref); for u in usefulnesses { ret.extend(u); if let NoWitnesses(subpats) = &ret { if subpats.is_full() { // Once we reach the full set, more unions won't change the result. return ret; } } } ret } /// After calculating the usefulness for a branch of an or-pattern, call this to make this /// usefulness mergeable with those from the other branches. fn unsplit_or_pat(self, alt_id: usize, alt_count: usize, pat: &'p Pat<'tcx>) -> Self { match self { NoWitnesses(subpats) => NoWitnesses(subpats.unsplit_or_pat(alt_id, alt_count, pat)), WithWitnesses(_) => bug!(), } } /// After calculating usefulness after a specialization, call this to recontruct a usefulness /// that makes sense for the matrix pre-specialization. This new usefulness can then be merged /// with the results of specializing with the other constructors. fn apply_constructor( self, pcx: PatCtxt<'_, 'p, 'tcx>, matrix: &Matrix<'p, 'tcx>, // used to compute missing ctors ctor: &Constructor<'tcx>, ctor_wild_subpatterns: &Fields<'p, 'tcx>, ) -> Self { match self { WithWitnesses(witnesses) if witnesses.is_empty() => WithWitnesses(witnesses), WithWitnesses(witnesses) => { let new_witnesses = if matches!(ctor, Constructor::Missing) { let mut split_wildcard = SplitWildcard::new(pcx); split_wildcard.split(pcx, matrix.head_ctors(pcx.cx)); // Construct for each missing constructor a "wild" version of this // constructor, that matches everything that can be built with // it. For example, if `ctor` is a `Constructor::Variant` for // `Option::Some`, we get the pattern `Some(_)`. let new_patterns: Vec<_> = split_wildcard .iter_missing(pcx) .map(|missing_ctor| { Fields::wildcards(pcx, missing_ctor).apply(pcx, missing_ctor) }) .collect(); witnesses .into_iter() .flat_map(|witness| { new_patterns.iter().map(move |pat| { let mut witness = witness.clone(); witness.0.push(pat.clone()); witness }) }) .collect() } else { witnesses .into_iter() .map(|witness| witness.apply_constructor(pcx, &ctor, ctor_wild_subpatterns)) .collect() }; WithWitnesses(new_witnesses) } NoWitnesses(subpats) => NoWitnesses(subpats.unspecialize(ctor_wild_subpatterns.len())), } } } #[derive(Copy, Clone, Debug)] enum WitnessPreference { ConstructWitness, LeaveOutWitness, } /// A witness of non-exhaustiveness for error reporting, represented /// as a list of patterns (in reverse order of construction) with /// wildcards inside to represent elements that can take any inhabitant /// of the type as a value. /// /// A witness against a list of patterns should have the same types /// and length as the pattern matched against. Because Rust `match` /// is always against a single pattern, at the end the witness will /// have length 1, but in the middle of the algorithm, it can contain /// multiple patterns. /// /// For example, if we are constructing a witness for the match against /// /// ``` /// struct Pair(Option<(u32, u32)>, bool); /// /// match (p: Pair) { /// Pair(None, _) => {} /// Pair(_, false) => {} /// } /// ``` /// /// We'll perform the following steps: /// 1. Start with an empty witness /// `Witness(vec![])` /// 2. Push a witness `true` against the `false` /// `Witness(vec![true])` /// 3. Push a witness `Some(_)` against the `None` /// `Witness(vec![true, Some(_)])` /// 4. Apply the `Pair` constructor to the witnesses /// `Witness(vec![Pair(Some(_), true)])` /// /// The final `Pair(Some(_), true)` is then the resulting witness. #[derive(Clone, Debug)] crate struct Witness<'tcx>(Vec<Pat<'tcx>>); impl<'tcx> Witness<'tcx> { /// Asserts that the witness contains a single pattern, and returns it. fn single_pattern(self) -> Pat<'tcx> { assert_eq!(self.0.len(), 1); self.0.into_iter().next().unwrap() } /// Constructs a partial witness for a pattern given a list of /// patterns expanded by the specialization step. /// /// When a pattern P is discovered to be useful, this function is used bottom-up /// to reconstruct a complete witness, e.g., a pattern P' that covers a subset /// of values, V, where each value in that set is not covered by any previously /// used patterns and is covered by the pattern P'. Examples: /// /// left_ty: tuple of 3 elements /// pats: [10, 20, _] => (10, 20, _) /// /// left_ty: struct X { a: (bool, &'static str), b: usize} /// pats: [(false, "foo"), 42] => X { a: (false, "foo"), b: 42 } fn apply_constructor<'p>( mut self, pcx: PatCtxt<'_, 'p, 'tcx>, ctor: &Constructor<'tcx>, ctor_wild_subpatterns: &Fields<'p, 'tcx>, ) -> Self { let pat = { let len = self.0.len(); let arity = ctor_wild_subpatterns.len(); let pats = self.0.drain((len - arity)..).rev(); ctor_wild_subpatterns.replace_fields(pcx.cx, pats).apply(pcx, ctor) }; self.0.push(pat); self } } /// Algorithm from <http://moscova.inria.fr/~maranget/papers/warn/index.html>. /// The algorithm from the paper has been modified to correctly handle empty /// types. The changes are: /// (0) We don't exit early if the pattern matrix has zero rows. We just /// continue to recurse over columns. /// (1) all_constructors will only return constructors that are statically /// possible. E.g., it will only return `Ok` for `Result<T, !>`. /// /// This finds whether a (row) vector `v` of patterns is 'useful' in relation /// to a set of such vectors `m` - this is defined as there being a set of /// inputs that will match `v` but not any of the sets in `m`. /// /// All the patterns at each column of the `matrix ++ v` matrix must have the same type. /// /// This is used both for reachability checking (if a pattern isn't useful in /// relation to preceding patterns, it is not reachable) and exhaustiveness /// checking (if a wildcard pattern is useful in relation to a matrix, the /// matrix isn't exhaustive). /// /// `is_under_guard` is used to inform if the pattern has a guard. If it /// has one it must not be inserted into the matrix. This shouldn't be /// relied on for soundness. #[instrument( level = "debug", skip(cx, matrix, witness_preference, hir_id, is_under_guard, is_top_level) )] fn is_useful<'p, 'tcx>( cx: &MatchCheckCtxt<'p, 'tcx>, matrix: &Matrix<'p, 'tcx>, v: &PatStack<'p, 'tcx>, witness_preference: WitnessPreference, hir_id: HirId, is_under_guard: bool, is_top_level: bool, ) -> Usefulness<'p, 'tcx> { debug!("matrix,v={:?}{:?}", matrix, v); let Matrix { patterns: rows, .. } = matrix; // The base case. We are pattern-matching on () and the return value is // based on whether our matrix has a row or not. // NOTE: This could potentially be optimized by checking rows.is_empty() // first and then, if v is non-empty, the return value is based on whether // the type of the tuple we're checking is inhabited or not. if v.is_empty() { let ret = if rows.is_empty() { Usefulness::new_useful(witness_preference) } else { Usefulness::new_not_useful(witness_preference) }; debug!(?ret); return ret; } assert!(rows.iter().all(|r| r.len() == v.len())); // FIXME(Nadrieril): Hack to work around type normalization issues (see #72476). let ty = matrix.heads().next().map_or(v.head().ty, |r| r.ty); let pcx = PatCtxt { cx, ty, span: v.head().span, is_top_level }; // If the first pattern is an or-pattern, expand it. let ret = if v.head().is_or_pat() { debug!("expanding or-pattern"); let v_head = v.head(); let vs: Vec<_> = v.expand_or_pat().collect(); let alt_count = vs.len(); // We try each or-pattern branch in turn. let mut matrix = matrix.clone(); let usefulnesses = vs.into_iter().enumerate().map(|(i, v)| { let usefulness = is_useful(cx, &matrix, &v, witness_preference, hir_id, is_under_guard, false); // If pattern has a guard don't add it to the matrix. if !is_under_guard { // We push the already-seen patterns into the matrix in order to detect redundant // branches like `Some(_) | Some(0)`. matrix.push(v); } usefulness.unsplit_or_pat(i, alt_count, v_head) }); Usefulness::merge(witness_preference, usefulnesses) } else { let v_ctor = v.head_ctor(cx); if let Constructor::IntRange(ctor_range) = &v_ctor { // Lint on likely incorrect range patterns (#63987) ctor_range.lint_overlapping_range_endpoints( pcx, matrix.head_ctors_and_spans(cx), matrix.column_count().unwrap_or(0), hir_id, ) } // We split the head constructor of `v`. let split_ctors = v_ctor.split(pcx, matrix.head_ctors(cx)); // For each constructor, we compute whether there's a value that starts with it that would // witness the usefulness of `v`. let start_matrix = &matrix; let usefulnesses = split_ctors.into_iter().map(|ctor| { debug!("specialize({:?})", ctor); // We cache the result of `Fields::wildcards` because it is used a lot. let ctor_wild_subpatterns = Fields::wildcards(pcx, &ctor); let spec_matrix = start_matrix.specialize_constructor(pcx, &ctor, &ctor_wild_subpatterns); let v = v.pop_head_constructor(&ctor_wild_subpatterns); let usefulness = is_useful(cx, &spec_matrix, &v, witness_preference, hir_id, is_under_guard, false); usefulness.apply_constructor(pcx, start_matrix, &ctor, &ctor_wild_subpatterns) }); Usefulness::merge(witness_preference, usefulnesses) }; debug!(?ret); ret } /// The arm of a match expression. #[derive(Clone, Copy)] crate struct MatchArm<'p, 'tcx> { /// The pattern must have been lowered through `check_match::MatchVisitor::lower_pattern`. crate pat: &'p super::Pat<'tcx>, crate hir_id: HirId, crate has_guard: bool, } /// Indicates whether or not a given arm is reachable. #[derive(Clone, Debug)] crate enum Reachability { /// The arm is reachable. This additionally carries a set of or-pattern branches that have been /// found to be unreachable despite the overall arm being reachable. Used only in the presence /// of or-patterns, otherwise it stays empty. Reachable(Vec<Span>), /// The arm is unreachable. Unreachable, } /// The output of checking a match for exhaustiveness and arm reachability. crate struct UsefulnessReport<'p, 'tcx> { /// For each arm of the input, whether that arm is reachable after the arms above it. crate arm_usefulness: Vec<(MatchArm<'p, 'tcx>, Reachability)>, /// If the match is exhaustive, this is empty. If not, this contains witnesses for the lack of /// exhaustiveness. crate non_exhaustiveness_witnesses: Vec<super::Pat<'tcx>>, } /// The entrypoint for the usefulness algorithm. Computes whether a match is exhaustive and which /// of its arms are reachable. /// /// Note: the input patterns must have been lowered through /// `check_match::MatchVisitor::lower_pattern`. crate fn compute_match_usefulness<'p, 'tcx>( cx: &MatchCheckCtxt<'p, 'tcx>, arms: &[MatchArm<'p, 'tcx>], scrut_hir_id: HirId, scrut_ty: Ty<'tcx>, ) -> UsefulnessReport<'p, 'tcx> { let mut matrix = Matrix::empty(); let arm_usefulness: Vec<_> = arms .iter() .copied() .map(|arm| { let v = PatStack::from_pattern(arm.pat); let usefulness = is_useful(cx, &matrix, &v, LeaveOutWitness, arm.hir_id, arm.has_guard, true); if !arm.has_guard { matrix.push(v); } let reachability = match usefulness { NoWitnesses(subpats) if subpats.is_empty() => Reachability::Unreachable, NoWitnesses(subpats) => { Reachability::Reachable(subpats.list_unreachable_spans().unwrap()) } WithWitnesses(..) => bug!(), }; (arm, reachability) }) .collect(); let wild_pattern = cx.pattern_arena.alloc(super::Pat::wildcard_from_ty(scrut_ty)); let v = PatStack::from_pattern(wild_pattern); let usefulness = is_useful(cx, &matrix, &v, ConstructWitness, scrut_hir_id, false, true); let non_exhaustiveness_witnesses = match usefulness { WithWitnesses(pats) => pats.into_iter().map(|w| w.single_pattern()).collect(), NoWitnesses(_) => bug!(), }; UsefulnessReport { arm_usefulness, non_exhaustiveness_witnesses } }
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. //! See [rustc guide] for more info on how this works. //! //! [rustc guide]: https://rust-lang-nursery.github.io/rustc-guide/trait-resolution.html#selection use self::SelectionCandidate::*; use self::EvaluationResult::*; use super::coherence::{self, Conflict}; use super::DerivedObligationCause; use super::IntercrateMode; use super::project; use super::project::{normalize_with_depth, Normalized, ProjectionCacheKey}; use super::{PredicateObligation, TraitObligation, ObligationCause}; use super::{ObligationCauseCode, BuiltinDerivedObligation, ImplDerivedObligation}; use super::{SelectionError, Unimplemented, OutputTypeParameterMismatch}; use super::{ObjectCastObligation, Obligation}; use super::TraitNotObjectSafe; use super::Selection; use super::SelectionResult; use super::{VtableBuiltin, VtableImpl, VtableParam, VtableClosure, VtableGenerator, VtableFnPointer, VtableObject, VtableAutoImpl}; use super::{VtableImplData, VtableObjectData, VtableBuiltinData, VtableGeneratorData, VtableClosureData, VtableAutoImplData, VtableFnPointerData}; use super::util; use dep_graph::{DepNodeIndex, DepKind}; use hir::def_id::DefId; use infer; use infer::{InferCtxt, InferOk, TypeFreshener}; use ty::subst::{Kind, Subst, Substs}; use ty::{self, ToPredicate, ToPolyTraitRef, Ty, TyCtxt, TypeFoldable}; use ty::fast_reject; use ty::relate::TypeRelation; use middle::lang_items; use mir::interpret::{GlobalId}; use rustc_data_structures::bitvec::BitVector; use std::iter; use std::cell::RefCell; use std::cmp; use std::fmt; use std::mem; use std::rc::Rc; use syntax::abi::Abi; use hir; use util::nodemap::{FxHashMap, FxHashSet}; pub struct SelectionContext<'cx, 'gcx: 'cx+'tcx, 'tcx: 'cx> { infcx: &'cx InferCtxt<'cx, 'gcx, 'tcx>, /// Freshener used specifically for skolemizing entries on the /// obligation stack. This ensures that all entries on the stack /// at one time will have the same set of skolemized entries, /// which is important for checking for trait bounds that /// recursively require themselves. freshener: TypeFreshener<'cx, 'gcx, 'tcx>, /// If true, indicates that the evaluation should be conservative /// and consider the possibility of types outside this crate. /// This comes up primarily when resolving ambiguity. Imagine /// there is some trait reference `$0 : Bar` where `$0` is an /// inference variable. If `intercrate` is true, then we can never /// say for sure that this reference is not implemented, even if /// there are *no impls at all for `Bar`*, because `$0` could be /// bound to some type that in a downstream crate that implements /// `Bar`. This is the suitable mode for coherence. Elsewhere, /// though, we set this to false, because we are only interested /// in types that the user could actually have written --- in /// other words, we consider `$0 : Bar` to be unimplemented if /// there is no type that the user could *actually name* that /// would satisfy it. This avoids crippling inference, basically. intercrate: Option<IntercrateMode>, intercrate_ambiguity_causes: Option<Vec<IntercrateAmbiguityCause>>, /// Controls whether or not to filter out negative impls when selecting. /// This is used in librustdoc to distinguish between the lack of an impl /// and a negative impl allow_negative_impls: bool } #[derive(Clone, Debug)] pub enum IntercrateAmbiguityCause { DownstreamCrate { trait_desc: String, self_desc: Option<String>, }, UpstreamCrateUpdate { trait_desc: String, self_desc: Option<String>, }, } impl IntercrateAmbiguityCause { /// Emits notes when the overlap is caused by complex intercrate ambiguities. /// See #23980 for details. pub fn add_intercrate_ambiguity_hint<'a, 'tcx>(&self, err: &mut ::errors::DiagnosticBuilder) { err.note(&self.intercrate_ambiguity_hint()); } pub fn intercrate_ambiguity_hint(&self) -> String { match self { &IntercrateAmbiguityCause::DownstreamCrate { ref trait_desc, ref self_desc } => { let self_desc = if let &Some(ref ty) = self_desc { format!(" for type `{}`", ty) } else { "".to_string() }; format!("downstream crates may implement trait `{}`{}", trait_desc, self_desc) } &IntercrateAmbiguityCause::UpstreamCrateUpdate { ref trait_desc, ref self_desc } => { let self_desc = if let &Some(ref ty) = self_desc { format!(" for type `{}`", ty) } else { "".to_string() }; format!("upstream crates may add new impl of trait `{}`{} \ in future versions", trait_desc, self_desc) } } } } // A stack that walks back up the stack frame. struct TraitObligationStack<'prev, 'tcx: 'prev> { obligation: &'prev TraitObligation<'tcx>, /// Trait ref from `obligation` but skolemized with the /// selection-context's freshener. Used to check for recursion. fresh_trait_ref: ty::PolyTraitRef<'tcx>, previous: TraitObligationStackList<'prev, 'tcx>, } #[derive(Clone)] pub struct SelectionCache<'tcx> { hashmap: RefCell<FxHashMap<ty::TraitRef<'tcx>, WithDepNode<SelectionResult<'tcx, SelectionCandidate<'tcx>>>>>, } /// The selection process begins by considering all impls, where /// clauses, and so forth that might resolve an obligation. Sometimes /// we'll be able to say definitively that (e.g.) an impl does not /// apply to the obligation: perhaps it is defined for `usize` but the /// obligation is for `int`. In that case, we drop the impl out of the /// list. But the other cases are considered *candidates*. /// /// For selection to succeed, there must be exactly one matching /// candidate. If the obligation is fully known, this is guaranteed /// by coherence. However, if the obligation contains type parameters /// or variables, there may be multiple such impls. /// /// It is not a real problem if multiple matching impls exist because /// of type variables - it just means the obligation isn't sufficiently /// elaborated. In that case we report an ambiguity, and the caller can /// try again after more type information has been gathered or report a /// "type annotations required" error. /// /// However, with type parameters, this can be a real problem - type /// parameters don't unify with regular types, but they *can* unify /// with variables from blanket impls, and (unless we know its bounds /// will always be satisfied) picking the blanket impl will be wrong /// for at least *some* substitutions. To make this concrete, if we have /// /// trait AsDebug { type Out : fmt::Debug; fn debug(self) -> Self::Out; } /// impl<T: fmt::Debug> AsDebug for T { /// type Out = T; /// fn debug(self) -> fmt::Debug { self } /// } /// fn foo<T: AsDebug>(t: T) { println!("{:?}", <T as AsDebug>::debug(t)); } /// /// we can't just use the impl to resolve the <T as AsDebug> obligation /// - a type from another crate (that doesn't implement fmt::Debug) could /// implement AsDebug. /// /// Because where-clauses match the type exactly, multiple clauses can /// only match if there are unresolved variables, and we can mostly just /// report this ambiguity in that case. This is still a problem - we can't /// *do anything* with ambiguities that involve only regions. This is issue /// #21974. /// /// If a single where-clause matches and there are no inference /// variables left, then it definitely matches and we can just select /// it. /// /// In fact, we even select the where-clause when the obligation contains /// inference variables. The can lead to inference making "leaps of logic", /// for example in this situation: /// /// pub trait Foo<T> { fn foo(&self) -> T; } /// impl<T> Foo<()> for T { fn foo(&self) { } } /// impl Foo<bool> for bool { fn foo(&self) -> bool { *self } } /// /// pub fn foo<T>(t: T) where T: Foo<bool> { /// println!("{:?}", <T as Foo<_>>::foo(&t)); /// } /// fn main() { foo(false); } /// /// Here the obligation <T as Foo<$0>> can be matched by both the blanket /// impl and the where-clause. We select the where-clause and unify $0=bool, /// so the program prints "false". However, if the where-clause is omitted, /// the blanket impl is selected, we unify $0=(), and the program prints /// "()". /// /// Exactly the same issues apply to projection and object candidates, except /// that we can have both a projection candidate and a where-clause candidate /// for the same obligation. In that case either would do (except that /// different "leaps of logic" would occur if inference variables are /// present), and we just pick the where-clause. This is, for example, /// required for associated types to work in default impls, as the bounds /// are visible both as projection bounds and as where-clauses from the /// parameter environment. #[derive(PartialEq,Eq,Debug,Clone)] enum SelectionCandidate<'tcx> { BuiltinCandidate { has_nested: bool }, ParamCandidate(ty::PolyTraitRef<'tcx>), ImplCandidate(DefId), AutoImplCandidate(DefId), /// This is a trait matching with a projected type as `Self`, and /// we found an applicable bound in the trait definition. ProjectionCandidate, /// Implementation of a `Fn`-family trait by one of the anonymous types /// generated for a `||` expression. ClosureCandidate, /// Implementation of a `Generator` trait by one of the anonymous types /// generated for a generator. GeneratorCandidate, /// Implementation of a `Fn`-family trait by one of the anonymous /// types generated for a fn pointer type (e.g., `fn(int)->int`) FnPointerCandidate, ObjectCandidate, BuiltinObjectCandidate, BuiltinUnsizeCandidate, } impl<'a, 'tcx> ty::Lift<'tcx> for SelectionCandidate<'a> { type Lifted = SelectionCandidate<'tcx>; fn lift_to_tcx<'b, 'gcx>(&self, tcx: TyCtxt<'b, 'gcx, 'tcx>) -> Option<Self::Lifted> { Some(match *self { BuiltinCandidate { has_nested } => { BuiltinCandidate { has_nested, } } ImplCandidate(def_id) => ImplCandidate(def_id), AutoImplCandidate(def_id) => AutoImplCandidate(def_id), ProjectionCandidate => ProjectionCandidate, FnPointerCandidate => FnPointerCandidate, ObjectCandidate => ObjectCandidate, BuiltinObjectCandidate => BuiltinObjectCandidate, BuiltinUnsizeCandidate => BuiltinUnsizeCandidate, ClosureCandidate => ClosureCandidate, GeneratorCandidate => GeneratorCandidate, ParamCandidate(ref trait_ref) => { return tcx.lift(trait_ref).map(ParamCandidate); } }) } } struct SelectionCandidateSet<'tcx> { // a list of candidates that definitely apply to the current // obligation (meaning: types unify). vec: Vec<SelectionCandidate<'tcx>>, // if this is true, then there were candidates that might or might // not have applied, but we couldn't tell. This occurs when some // of the input types are type variables, in which case there are // various "builtin" rules that might or might not trigger. ambiguous: bool, } #[derive(PartialEq,Eq,Debug,Clone)] struct EvaluatedCandidate<'tcx> { candidate: SelectionCandidate<'tcx>, evaluation: EvaluationResult, } /// When does the builtin impl for `T: Trait` apply? enum BuiltinImplConditions<'tcx> { /// The impl is conditional on T1,T2,.. : Trait Where(ty::Binder<Vec<Ty<'tcx>>>), /// There is no built-in impl. There may be some other /// candidate (a where-clause or user-defined impl). None, /// There is *no* impl for this, builtin or not. Ignore /// all where-clauses. Never, /// It is unknown whether there is an impl. Ambiguous } #[derive(Copy, Clone, Debug, PartialOrd, Ord, PartialEq, Eq)] /// The result of trait evaluation. The order is important /// here as the evaluation of a list is the maximum of the /// evaluations. /// /// The evaluation results are ordered: /// - `EvaluatedToOk` implies `EvaluatedToAmbig` implies `EvaluatedToUnknown` /// - `EvaluatedToErr` implies `EvaluatedToRecur` /// - the "union" of evaluation results is equal to their maximum - /// all the "potential success" candidates can potentially succeed, /// so they are no-ops when unioned with a definite error, and within /// the categories it's easy to see that the unions are correct. enum EvaluationResult { /// Evaluation successful EvaluatedToOk, /// Evaluation is known to be ambiguous - it *might* hold for some /// assignment of inference variables, but it might not. /// /// While this has the same meaning as `EvaluatedToUnknown` - we can't /// know whether this obligation holds or not - it is the result we /// would get with an empty stack, and therefore is cacheable. EvaluatedToAmbig, /// Evaluation failed because of recursion involving inference /// variables. We are somewhat imprecise there, so we don't actually /// know the real result. /// /// This can't be trivially cached for the same reason as `EvaluatedToRecur`. EvaluatedToUnknown, /// Evaluation failed because we encountered an obligation we are already /// trying to prove on this branch. /// /// We know this branch can't be a part of a minimal proof-tree for /// the "root" of our cycle, because then we could cut out the recursion /// and maintain a valid proof tree. However, this does not mean /// that all the obligations on this branch do not hold - it's possible /// that we entered this branch "speculatively", and that there /// might be some other way to prove this obligation that does not /// go through this cycle - so we can't cache this as a failure. /// /// For example, suppose we have this: /// /// ```rust,ignore (pseudo-Rust) /// pub trait Trait { fn xyz(); } /// // This impl is "useless", but we can still have /// // an `impl Trait for SomeUnsizedType` somewhere. /// impl<T: Trait + Sized> Trait for T { fn xyz() {} } /// /// pub fn foo<T: Trait + ?Sized>() { /// <T as Trait>::xyz(); /// } /// ``` /// /// When checking `foo`, we have to prove `T: Trait`. This basically /// translates into this: /// /// (T: Trait + Sized →_\impl T: Trait), T: Trait ⊢ T: Trait /// /// When we try to prove it, we first go the first option, which /// recurses. This shows us that the impl is "useless" - it won't /// tell us that `T: Trait` unless it already implemented `Trait` /// by some other means. However, that does not prevent `T: Trait` /// does not hold, because of the bound (which can indeed be satisfied /// by `SomeUnsizedType` from another crate). /// /// FIXME: when an `EvaluatedToRecur` goes past its parent root, we /// ought to convert it to an `EvaluatedToErr`, because we know /// there definitely isn't a proof tree for that obligation. Not /// doing so is still sound - there isn't any proof tree, so the /// branch still can't be a part of a minimal one - but does not /// re-enable caching. EvaluatedToRecur, /// Evaluation failed EvaluatedToErr, } impl EvaluationResult { fn may_apply(self) -> bool { match self { EvaluatedToOk | EvaluatedToAmbig | EvaluatedToUnknown => true, EvaluatedToErr | EvaluatedToRecur => false } } fn is_stack_dependent(self) -> bool { match self { EvaluatedToUnknown | EvaluatedToRecur => true, EvaluatedToOk | EvaluatedToAmbig | EvaluatedToErr => false, } } } #[derive(Clone)] pub struct EvaluationCache<'tcx> { hashmap: RefCell<FxHashMap<ty::PolyTraitRef<'tcx>, WithDepNode<EvaluationResult>>> } impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { pub fn new(infcx: &'cx InferCtxt<'cx, 'gcx, 'tcx>) -> SelectionContext<'cx, 'gcx, 'tcx> { SelectionContext { infcx, freshener: infcx.freshener(), intercrate: None, intercrate_ambiguity_causes: None, allow_negative_impls: false, } } pub fn intercrate(infcx: &'cx InferCtxt<'cx, 'gcx, 'tcx>, mode: IntercrateMode) -> SelectionContext<'cx, 'gcx, 'tcx> { debug!("intercrate({:?})", mode); SelectionContext { infcx, freshener: infcx.freshener(), intercrate: Some(mode), intercrate_ambiguity_causes: None, allow_negative_impls: false, } } pub fn with_negative(infcx: &'cx InferCtxt<'cx, 'gcx, 'tcx>, allow_negative_impls: bool) -> SelectionContext<'cx, 'gcx, 'tcx> { debug!("with_negative({:?})", allow_negative_impls); SelectionContext { infcx, freshener: infcx.freshener(), intercrate: None, intercrate_ambiguity_causes: None, allow_negative_impls, } } /// Enables tracking of intercrate ambiguity causes. These are /// used in coherence to give improved diagnostics. We don't do /// this until we detect a coherence error because it can lead to /// false overflow results (#47139) and because it costs /// computation time. pub fn enable_tracking_intercrate_ambiguity_causes(&mut self) { assert!(self.intercrate.is_some()); assert!(self.intercrate_ambiguity_causes.is_none()); self.intercrate_ambiguity_causes = Some(vec![]); debug!("selcx: enable_tracking_intercrate_ambiguity_causes"); } /// Gets the intercrate ambiguity causes collected since tracking /// was enabled and disables tracking at the same time. If /// tracking is not enabled, just returns an empty vector. pub fn take_intercrate_ambiguity_causes(&mut self) -> Vec<IntercrateAmbiguityCause> { assert!(self.intercrate.is_some()); self.intercrate_ambiguity_causes.take().unwrap_or(vec![]) } pub fn infcx(&self) -> &'cx InferCtxt<'cx, 'gcx, 'tcx> { self.infcx } pub fn tcx(&self) -> TyCtxt<'cx, 'gcx, 'tcx> { self.infcx.tcx } pub fn closure_typer(&self) -> &'cx InferCtxt<'cx, 'gcx, 'tcx> { self.infcx } /// Wraps the inference context's in_snapshot s.t. snapshot handling is only from the selection /// context's self. fn in_snapshot<R, F>(&mut self, f: F) -> R where F: FnOnce(&mut Self, &infer::CombinedSnapshot<'cx, 'tcx>) -> R { self.infcx.in_snapshot(|snapshot| f(self, snapshot)) } /// Wraps a probe s.t. obligations collected during it are ignored and old obligations are /// retained. fn probe<R, F>(&mut self, f: F) -> R where F: FnOnce(&mut Self, &infer::CombinedSnapshot<'cx, 'tcx>) -> R { self.infcx.probe(|snapshot| f(self, snapshot)) } /// Wraps a commit_if_ok s.t. obligations collected during it are not returned in selection if /// the transaction fails and s.t. old obligations are retained. fn commit_if_ok<T, E, F>(&mut self, f: F) -> Result<T, E> where F: FnOnce(&mut Self, &infer::CombinedSnapshot) -> Result<T, E> { self.infcx.commit_if_ok(|snapshot| f(self, snapshot)) } /////////////////////////////////////////////////////////////////////////// // Selection // // The selection phase tries to identify *how* an obligation will // be resolved. For example, it will identify which impl or // parameter bound is to be used. The process can be inconclusive // if the self type in the obligation is not fully inferred. Selection // can result in an error in one of two ways: // // 1. If no applicable impl or parameter bound can be found. // 2. If the output type parameters in the obligation do not match // those specified by the impl/bound. For example, if the obligation // is `Vec<Foo>:Iterable<Bar>`, but the impl specifies // `impl<T> Iterable<T> for Vec<T>`, than an error would result. /// Attempts to satisfy the obligation. If successful, this will affect the surrounding /// type environment by performing unification. pub fn select(&mut self, obligation: &TraitObligation<'tcx>) -> SelectionResult<'tcx, Selection<'tcx>> { debug!("select({:?})", obligation); assert!(!obligation.predicate.has_escaping_regions()); let stack = self.push_stack(TraitObligationStackList::empty(), obligation); let ret = match self.candidate_from_obligation(&stack)? { None => None, Some(candidate) => Some(self.confirm_candidate(obligation, candidate)?) }; Ok(ret) } /////////////////////////////////////////////////////////////////////////// // EVALUATION // // Tests whether an obligation can be selected or whether an impl // can be applied to particular types. It skips the "confirmation" // step and hence completely ignores output type parameters. // // The result is "true" if the obligation *may* hold and "false" if // we can be sure it does not. /// Evaluates whether the obligation `obligation` can be satisfied (by any means). pub fn evaluate_obligation(&mut self, obligation: &PredicateObligation<'tcx>) -> bool { debug!("evaluate_obligation({:?})", obligation); self.probe(|this, _| { this.evaluate_predicate_recursively(TraitObligationStackList::empty(), obligation) .may_apply() }) } /// Evaluates whether the obligation `obligation` can be satisfied, /// and returns `false` if not certain. However, this is not entirely /// accurate if inference variables are involved. pub fn evaluate_obligation_conservatively(&mut self, obligation: &PredicateObligation<'tcx>) -> bool { debug!("evaluate_obligation_conservatively({:?})", obligation); self.probe(|this, _| { this.evaluate_predicate_recursively(TraitObligationStackList::empty(), obligation) == EvaluatedToOk }) } /// Evaluates the predicates in `predicates` recursively. Note that /// this applies projections in the predicates, and therefore /// is run within an inference probe. fn evaluate_predicates_recursively<'a,'o,I>(&mut self, stack: TraitObligationStackList<'o, 'tcx>, predicates: I) -> EvaluationResult where I : IntoIterator<Item=&'a PredicateObligation<'tcx>>, 'tcx:'a { let mut result = EvaluatedToOk; for obligation in predicates { let eval = self.evaluate_predicate_recursively(stack, obligation); debug!("evaluate_predicate_recursively({:?}) = {:?}", obligation, eval); if let EvaluatedToErr = eval { // fast-path - EvaluatedToErr is the top of the lattice, // so we don't need to look on the other predicates. return EvaluatedToErr; } else { result = cmp::max(result, eval); } } result } fn evaluate_predicate_recursively<'o>(&mut self, previous_stack: TraitObligationStackList<'o, 'tcx>, obligation: &PredicateObligation<'tcx>) -> EvaluationResult { debug!("evaluate_predicate_recursively({:?})", obligation); match obligation.predicate { ty::Predicate::Trait(ref t) => { assert!(!t.has_escaping_regions()); let obligation = obligation.with(t.clone()); self.evaluate_trait_predicate_recursively(previous_stack, obligation) } ty::Predicate::Subtype(ref p) => { // does this code ever run? match self.infcx.subtype_predicate(&obligation.cause, obligation.param_env, p) { Some(Ok(InferOk { obligations, .. })) => { self.evaluate_predicates_recursively(previous_stack, &obligations); EvaluatedToOk }, Some(Err(_)) => EvaluatedToErr, None => EvaluatedToAmbig, } } ty::Predicate::WellFormed(ty) => { match ty::wf::obligations(self.infcx, obligation.param_env, obligation.cause.body_id, ty, obligation.cause.span) { Some(obligations) => self.evaluate_predicates_recursively(previous_stack, obligations.iter()), None => EvaluatedToAmbig, } } ty::Predicate::TypeOutlives(..) | ty::Predicate::RegionOutlives(..) => { // we do not consider region relationships when // evaluating trait matches EvaluatedToOk } ty::Predicate::ObjectSafe(trait_def_id) => { if self.tcx().is_object_safe(trait_def_id) { EvaluatedToOk } else { EvaluatedToErr } } ty::Predicate::Projection(ref data) => { let project_obligation = obligation.with(data.clone()); match project::poly_project_and_unify_type(self, &project_obligation) { Ok(Some(subobligations)) => { let result = self.evaluate_predicates_recursively(previous_stack, subobligations.iter()); if let Some(key) = ProjectionCacheKey::from_poly_projection_predicate(self, data) { self.infcx.projection_cache.borrow_mut().complete(key); } result } Ok(None) => { EvaluatedToAmbig } Err(_) => { EvaluatedToErr } } } ty::Predicate::ClosureKind(closure_def_id, closure_substs, kind) => { match self.infcx.closure_kind(closure_def_id, closure_substs) { Some(closure_kind) => { if closure_kind.extends(kind) { EvaluatedToOk } else { EvaluatedToErr } } None => { EvaluatedToAmbig } } } ty::Predicate::ConstEvaluatable(def_id, substs) => { let tcx = self.tcx(); match tcx.lift_to_global(&(obligation.param_env, substs)) { Some((param_env, substs)) => { let instance = ty::Instance::resolve( tcx.global_tcx(), param_env, def_id, substs, ); if let Some(instance) = instance { let cid = GlobalId { instance, promoted: None }; match self.tcx().const_eval(param_env.and(cid)) { Ok(_) => EvaluatedToOk, Err(_) => EvaluatedToErr } } else { EvaluatedToErr } } None => { // Inference variables still left in param_env or substs. EvaluatedToAmbig } } } } } fn evaluate_trait_predicate_recursively<'o>(&mut self, previous_stack: TraitObligationStackList<'o, 'tcx>, mut obligation: TraitObligation<'tcx>) -> EvaluationResult { debug!("evaluate_trait_predicate_recursively({:?})", obligation); if !self.intercrate.is_some() && obligation.is_global() { // If a param env is consistent, global obligations do not depend on its particular // value in order to work, so we can clear out the param env and get better // caching. (If the current param env is inconsistent, we don't care what happens). debug!("evaluate_trait_predicate_recursively({:?}) - in global", obligation); obligation.param_env = obligation.param_env.without_caller_bounds(); } let stack = self.push_stack(previous_stack, &obligation); let fresh_trait_ref = stack.fresh_trait_ref; if let Some(result) = self.check_evaluation_cache(obligation.param_env, fresh_trait_ref) { debug!("CACHE HIT: EVAL({:?})={:?}", fresh_trait_ref, result); return result; } let (result, dep_node) = self.in_task(|this| this.evaluate_stack(&stack)); debug!("CACHE MISS: EVAL({:?})={:?}", fresh_trait_ref, result); self.insert_evaluation_cache(obligation.param_env, fresh_trait_ref, dep_node, result); result } fn evaluate_stack<'o>(&mut self, stack: &TraitObligationStack<'o, 'tcx>) -> EvaluationResult { // In intercrate mode, whenever any of the types are unbound, // there can always be an impl. Even if there are no impls in // this crate, perhaps the type would be unified with // something from another crate that does provide an impl. // // In intra mode, we must still be conservative. The reason is // that we want to avoid cycles. Imagine an impl like: // // impl<T:Eq> Eq for Vec<T> // // and a trait reference like `$0 : Eq` where `$0` is an // unbound variable. When we evaluate this trait-reference, we // will unify `$0` with `Vec<$1>` (for some fresh variable // `$1`), on the condition that `$1 : Eq`. We will then wind // up with many candidates (since that are other `Eq` impls // that apply) and try to winnow things down. This results in // a recursive evaluation that `$1 : Eq` -- as you can // imagine, this is just where we started. To avoid that, we // check for unbound variables and return an ambiguous (hence possible) // match if we've seen this trait before. // // This suffices to allow chains like `FnMut` implemented in // terms of `Fn` etc, but we could probably make this more // precise still. let unbound_input_types = stack.fresh_trait_ref.input_types().any(|ty| ty.is_fresh()); // this check was an imperfect workaround for a bug n the old // intercrate mode, it should be removed when that goes away. if unbound_input_types && self.intercrate == Some(IntercrateMode::Issue43355) { debug!("evaluate_stack({:?}) --> unbound argument, intercrate --> ambiguous", stack.fresh_trait_ref); // Heuristics: show the diagnostics when there are no candidates in crate. if self.intercrate_ambiguity_causes.is_some() { debug!("evaluate_stack: intercrate_ambiguity_causes is some"); if let Ok(candidate_set) = self.assemble_candidates(stack) { if !candidate_set.ambiguous && candidate_set.vec.is_empty() { let trait_ref = stack.obligation.predicate.skip_binder().trait_ref; let self_ty = trait_ref.self_ty(); let cause = IntercrateAmbiguityCause::DownstreamCrate { trait_desc: trait_ref.to_string(), self_desc: if self_ty.has_concrete_skeleton() { Some(self_ty.to_string()) } else { None }, }; debug!("evaluate_stack: pushing cause = {:?}", cause); self.intercrate_ambiguity_causes.as_mut().unwrap().push(cause); } } } return EvaluatedToAmbig; } if unbound_input_types && stack.iter().skip(1).any( |prev| stack.obligation.param_env == prev.obligation.param_env && self.match_fresh_trait_refs(&stack.fresh_trait_ref, &prev.fresh_trait_ref)) { debug!("evaluate_stack({:?}) --> unbound argument, recursive --> giving up", stack.fresh_trait_ref); return EvaluatedToUnknown; } // If there is any previous entry on the stack that precisely // matches this obligation, then we can assume that the // obligation is satisfied for now (still all other conditions // must be met of course). One obvious case this comes up is // marker traits like `Send`. Think of a linked list: // // struct List<T> { data: T, next: Option<Box<List<T>>> { // // `Box<List<T>>` will be `Send` if `T` is `Send` and // `Option<Box<List<T>>>` is `Send`, and in turn // `Option<Box<List<T>>>` is `Send` if `Box<List<T>>` is // `Send`. // // Note that we do this comparison using the `fresh_trait_ref` // fields. Because these have all been skolemized using // `self.freshener`, we can be sure that (a) this will not // affect the inferencer state and (b) that if we see two // skolemized types with the same index, they refer to the // same unbound type variable. if let Some(rec_index) = stack.iter() .skip(1) // skip top-most frame .position(|prev| stack.obligation.param_env == prev.obligation.param_env && stack.fresh_trait_ref == prev.fresh_trait_ref) { debug!("evaluate_stack({:?}) --> recursive", stack.fresh_trait_ref); let cycle = stack.iter().skip(1).take(rec_index+1); let cycle = cycle.map(|stack| ty::Predicate::Trait(stack.obligation.predicate)); if self.coinductive_match(cycle) { debug!("evaluate_stack({:?}) --> recursive, coinductive", stack.fresh_trait_ref); return EvaluatedToOk; } else { debug!("evaluate_stack({:?}) --> recursive, inductive", stack.fresh_trait_ref); return EvaluatedToRecur; } } match self.candidate_from_obligation(stack) { Ok(Some(c)) => self.evaluate_candidate(stack, &c), Ok(None) => EvaluatedToAmbig, Err(..) => EvaluatedToErr } } /// For defaulted traits, we use a co-inductive strategy to solve, so /// that recursion is ok. This routine returns true if the top of the /// stack (`cycle[0]`): /// /// - is a defaulted trait, and /// - it also appears in the backtrace at some position `X`; and, /// - all the predicates at positions `X..` between `X` an the top are /// also defaulted traits. pub fn coinductive_match<I>(&mut self, cycle: I) -> bool where I: Iterator<Item=ty::Predicate<'tcx>> { let mut cycle = cycle; cycle.all(|predicate| self.coinductive_predicate(predicate)) } fn coinductive_predicate(&self, predicate: ty::Predicate<'tcx>) -> bool { let result = match predicate { ty::Predicate::Trait(ref data) => { self.tcx().trait_is_auto(data.def_id()) } _ => { false } }; debug!("coinductive_predicate({:?}) = {:?}", predicate, result); result } /// Further evaluate `candidate` to decide whether all type parameters match and whether nested /// obligations are met. Returns true if `candidate` remains viable after this further /// scrutiny. fn evaluate_candidate<'o>(&mut self, stack: &TraitObligationStack<'o, 'tcx>, candidate: &SelectionCandidate<'tcx>) -> EvaluationResult { debug!("evaluate_candidate: depth={} candidate={:?}", stack.obligation.recursion_depth, candidate); let result = self.probe(|this, _| { let candidate = (*candidate).clone(); match this.confirm_candidate(stack.obligation, candidate) { Ok(selection) => { this.evaluate_predicates_recursively( stack.list(), selection.nested_obligations().iter()) } Err(..) => EvaluatedToErr } }); debug!("evaluate_candidate: depth={} result={:?}", stack.obligation.recursion_depth, result); result } fn check_evaluation_cache(&self, param_env: ty::ParamEnv<'tcx>, trait_ref: ty::PolyTraitRef<'tcx>) -> Option<EvaluationResult> { let tcx = self.tcx(); if self.can_use_global_caches(param_env) { let cache = tcx.evaluation_cache.hashmap.borrow(); if let Some(cached) = cache.get(&trait_ref) { return Some(cached.get(tcx)); } } self.infcx.evaluation_cache.hashmap .borrow() .get(&trait_ref) .map(|v| v.get(tcx)) } fn insert_evaluation_cache(&mut self, param_env: ty::ParamEnv<'tcx>, trait_ref: ty::PolyTraitRef<'tcx>, dep_node: DepNodeIndex, result: EvaluationResult) { // Avoid caching results that depend on more than just the trait-ref // - the stack can create recursion. if result.is_stack_dependent() { return; } if self.can_use_global_caches(param_env) { let mut cache = self.tcx().evaluation_cache.hashmap.borrow_mut(); if let Some(trait_ref) = self.tcx().lift_to_global(&trait_ref) { cache.insert(trait_ref, WithDepNode::new(dep_node, result)); return; } } self.infcx.evaluation_cache.hashmap .borrow_mut() .insert(trait_ref, WithDepNode::new(dep_node, result)); } /////////////////////////////////////////////////////////////////////////// // CANDIDATE ASSEMBLY // // The selection process begins by examining all in-scope impls, // caller obligations, and so forth and assembling a list of // candidates. See [rustc guide] for more details. // // [rustc guide]: // https://rust-lang-nursery.github.io/rustc-guide/trait-resolution.html#candidate-assembly fn candidate_from_obligation<'o>(&mut self, stack: &TraitObligationStack<'o, 'tcx>) -> SelectionResult<'tcx, SelectionCandidate<'tcx>> { // Watch out for overflow. This intentionally bypasses (and does // not update) the cache. let recursion_limit = self.infcx.tcx.sess.recursion_limit.get(); if stack.obligation.recursion_depth >= recursion_limit { self.infcx().report_overflow_error(&stack.obligation, true); } // Check the cache. Note that we skolemize the trait-ref // separately rather than using `stack.fresh_trait_ref` -- this // is because we want the unbound variables to be replaced // with fresh skolemized types starting from index 0. let cache_fresh_trait_pred = self.infcx.freshen(stack.obligation.predicate.clone()); debug!("candidate_from_obligation(cache_fresh_trait_pred={:?}, obligation={:?})", cache_fresh_trait_pred, stack); assert!(!stack.obligation.predicate.has_escaping_regions()); if let Some(c) = self.check_candidate_cache(stack.obligation.param_env, &cache_fresh_trait_pred) { debug!("CACHE HIT: SELECT({:?})={:?}", cache_fresh_trait_pred, c); return c; } // If no match, compute result and insert into cache. let (candidate, dep_node) = self.in_task(|this| { this.candidate_from_obligation_no_cache(stack) }); debug!("CACHE MISS: SELECT({:?})={:?}", cache_fresh_trait_pred, candidate); self.insert_candidate_cache(stack.obligation.param_env, cache_fresh_trait_pred, dep_node, candidate.clone()); candidate } fn in_task<OP, R>(&mut self, op: OP) -> (R, DepNodeIndex) where OP: FnOnce(&mut Self) -> R { let (result, dep_node) = self.tcx().dep_graph.with_anon_task(DepKind::TraitSelect, || { op(self) }); self.tcx().dep_graph.read_index(dep_node); (result, dep_node) } // Treat negative impls as unimplemented fn filter_negative_impls(&self, candidate: SelectionCandidate<'tcx>) -> SelectionResult<'tcx, SelectionCandidate<'tcx>> { if let ImplCandidate(def_id) = candidate { if !self.allow_negative_impls && self.tcx().impl_polarity(def_id) == hir::ImplPolarity::Negative { return Err(Unimplemented) } } Ok(Some(candidate)) } fn candidate_from_obligation_no_cache<'o>(&mut self, stack: &TraitObligationStack<'o, 'tcx>) -> SelectionResult<'tcx, SelectionCandidate<'tcx>> { if stack.obligation.predicate.references_error() { // If we encounter a `TyError`, we generally prefer the // most "optimistic" result in response -- that is, the // one least likely to report downstream errors. But // because this routine is shared by coherence and by // trait selection, there isn't an obvious "right" choice // here in that respect, so we opt to just return // ambiguity and let the upstream clients sort it out. return Ok(None); } match self.is_knowable(stack) { None => {} Some(conflict) => { debug!("coherence stage: not knowable"); if self.intercrate_ambiguity_causes.is_some() { debug!("evaluate_stack: intercrate_ambiguity_causes is some"); // Heuristics: show the diagnostics when there are no candidates in crate. let candidate_set = self.assemble_candidates(stack)?; if !candidate_set.ambiguous && candidate_set.vec.iter().all(|c| { !self.evaluate_candidate(stack, &c).may_apply() }) { let trait_ref = stack.obligation.predicate.skip_binder().trait_ref; let self_ty = trait_ref.self_ty(); let trait_desc = trait_ref.to_string(); let self_desc = if self_ty.has_concrete_skeleton() { Some(self_ty.to_string()) } else { None }; let cause = if let Conflict::Upstream = conflict { IntercrateAmbiguityCause::UpstreamCrateUpdate { trait_desc, self_desc } } else { IntercrateAmbiguityCause::DownstreamCrate { trait_desc, self_desc } }; debug!("evaluate_stack: pushing cause = {:?}", cause); self.intercrate_ambiguity_causes.as_mut().unwrap().push(cause); } } return Ok(None); } } let candidate_set = self.assemble_candidates(stack)?; if candidate_set.ambiguous { debug!("candidate set contains ambig"); return Ok(None); } let mut candidates = candidate_set.vec; debug!("assembled {} candidates for {:?}: {:?}", candidates.len(), stack, candidates); // At this point, we know that each of the entries in the // candidate set is *individually* applicable. Now we have to // figure out if they contain mutual incompatibilities. This // frequently arises if we have an unconstrained input type -- // for example, we are looking for $0:Eq where $0 is some // unconstrained type variable. In that case, we'll get a // candidate which assumes $0 == int, one that assumes $0 == // usize, etc. This spells an ambiguity. // If there is more than one candidate, first winnow them down // by considering extra conditions (nested obligations and so // forth). We don't winnow if there is exactly one // candidate. This is a relatively minor distinction but it // can lead to better inference and error-reporting. An // example would be if there was an impl: // // impl<T:Clone> Vec<T> { fn push_clone(...) { ... } } // // and we were to see some code `foo.push_clone()` where `boo` // is a `Vec<Bar>` and `Bar` does not implement `Clone`. If // we were to winnow, we'd wind up with zero candidates. // Instead, we select the right impl now but report `Bar does // not implement Clone`. if candidates.len() == 1 { return self.filter_negative_impls(candidates.pop().unwrap()); } // Winnow, but record the exact outcome of evaluation, which // is needed for specialization. let mut candidates: Vec<_> = candidates.into_iter().filter_map(|c| { let eval = self.evaluate_candidate(stack, &c); if eval.may_apply() { Some(EvaluatedCandidate { candidate: c, evaluation: eval, }) } else { None } }).collect(); // If there are STILL multiple candidate, we can further // reduce the list by dropping duplicates -- including // resolving specializations. if candidates.len() > 1 { let mut i = 0; while i < candidates.len() { let is_dup = (0..candidates.len()) .filter(|&j| i != j) .any(|j| self.candidate_should_be_dropped_in_favor_of(&candidates[i], &candidates[j])); if is_dup { debug!("Dropping candidate #{}/{}: {:?}", i, candidates.len(), candidates[i]); candidates.swap_remove(i); } else { debug!("Retaining candidate #{}/{}: {:?}", i, candidates.len(), candidates[i]); i += 1; // If there are *STILL* multiple candidates, give up // and report ambiguity. if i > 1 { debug!("multiple matches, ambig"); return Ok(None); } } } } // If there are *NO* candidates, then there are no impls -- // that we know of, anyway. Note that in the case where there // are unbound type variables within the obligation, it might // be the case that you could still satisfy the obligation // from another crate by instantiating the type variables with // a type from another crate that does have an impl. This case // is checked for in `evaluate_stack` (and hence users // who might care about this case, like coherence, should use // that function). if candidates.is_empty() { return Err(Unimplemented); } // Just one candidate left. self.filter_negative_impls(candidates.pop().unwrap().candidate) } fn is_knowable<'o>(&mut self, stack: &TraitObligationStack<'o, 'tcx>) -> Option<Conflict> { debug!("is_knowable(intercrate={:?})", self.intercrate); if !self.intercrate.is_some() { return None; } let obligation = &stack.obligation; let predicate = self.infcx().resolve_type_vars_if_possible(&obligation.predicate); // ok to skip binder because of the nature of the // trait-ref-is-knowable check, which does not care about // bound regions let trait_ref = predicate.skip_binder().trait_ref; let result = coherence::trait_ref_is_knowable(self.tcx(), trait_ref); if let (Some(Conflict::Downstream { used_to_be_broken: true }), Some(IntercrateMode::Issue43355)) = (result, self.intercrate) { debug!("is_knowable: IGNORING conflict to be bug-compatible with #43355"); None } else { result } } /// Returns true if the global caches can be used. /// Do note that if the type itself is not in the /// global tcx, the local caches will be used. fn can_use_global_caches(&self, param_env: ty::ParamEnv<'tcx>) -> bool { // If there are any where-clauses in scope, then we always use // a cache local to this particular scope. Otherwise, we // switch to a global cache. We used to try and draw // finer-grained distinctions, but that led to a serious of // annoying and weird bugs like #22019 and #18290. This simple // rule seems to be pretty clearly safe and also still retains // a very high hit rate (~95% when compiling rustc). if !param_env.caller_bounds.is_empty() { return false; } // Avoid using the master cache during coherence and just rely // on the local cache. This effectively disables caching // during coherence. It is really just a simplification to // avoid us having to fear that coherence results "pollute" // the master cache. Since coherence executes pretty quickly, // it's not worth going to more trouble to increase the // hit-rate I don't think. if self.intercrate.is_some() { return false; } // Otherwise, we can use the global cache. true } fn check_candidate_cache(&mut self, param_env: ty::ParamEnv<'tcx>, cache_fresh_trait_pred: &ty::PolyTraitPredicate<'tcx>) -> Option<SelectionResult<'tcx, SelectionCandidate<'tcx>>> { let tcx = self.tcx(); let trait_ref = &cache_fresh_trait_pred.0.trait_ref; if self.can_use_global_caches(param_env) { let cache = tcx.selection_cache.hashmap.borrow(); if let Some(cached) = cache.get(&trait_ref) { return Some(cached.get(tcx)); } } self.infcx.selection_cache.hashmap .borrow() .get(trait_ref) .map(|v| v.get(tcx)) } fn insert_candidate_cache(&mut self, param_env: ty::ParamEnv<'tcx>, cache_fresh_trait_pred: ty::PolyTraitPredicate<'tcx>, dep_node: DepNodeIndex, candidate: SelectionResult<'tcx, SelectionCandidate<'tcx>>) { let tcx = self.tcx(); let trait_ref = cache_fresh_trait_pred.0.trait_ref; if self.can_use_global_caches(param_env) { let mut cache = tcx.selection_cache.hashmap.borrow_mut(); if let Some(trait_ref) = tcx.lift_to_global(&trait_ref) { if let Some(candidate) = tcx.lift_to_global(&candidate) { cache.insert(trait_ref, WithDepNode::new(dep_node, candidate)); return; } } } self.infcx.selection_cache.hashmap .borrow_mut() .insert(trait_ref, WithDepNode::new(dep_node, candidate)); } fn assemble_candidates<'o>(&mut self, stack: &TraitObligationStack<'o, 'tcx>) -> Result<SelectionCandidateSet<'tcx>, SelectionError<'tcx>> { let TraitObligationStack { obligation, .. } = *stack; let ref obligation = Obligation { param_env: obligation.param_env, cause: obligation.cause.clone(), recursion_depth: obligation.recursion_depth, predicate: self.infcx().resolve_type_vars_if_possible(&obligation.predicate) }; if obligation.predicate.skip_binder().self_ty().is_ty_var() { // Self is a type variable (e.g. `_: AsRef<str>`). // // This is somewhat problematic, as the current scheme can't really // handle it turning to be a projection. This does end up as truly // ambiguous in most cases anyway. // // Take the fast path out - this also improves // performance by preventing assemble_candidates_from_impls from // matching every impl for this trait. return Ok(SelectionCandidateSet { vec: vec![], ambiguous: true }); } let mut candidates = SelectionCandidateSet { vec: Vec::new(), ambiguous: false }; // Other bounds. Consider both in-scope bounds from fn decl // and applicable impls. There is a certain set of precedence rules here. let def_id = obligation.predicate.def_id(); let lang_items = self.tcx().lang_items(); if lang_items.copy_trait() == Some(def_id) { debug!("obligation self ty is {:?}", obligation.predicate.0.self_ty()); // User-defined copy impls are permitted, but only for // structs and enums. self.assemble_candidates_from_impls(obligation, &mut candidates)?; // For other types, we'll use the builtin rules. let copy_conditions = self.copy_clone_conditions(obligation); self.assemble_builtin_bound_candidates(copy_conditions, &mut candidates)?; } else if lang_items.sized_trait() == Some(def_id) { // Sized is never implementable by end-users, it is // always automatically computed. let sized_conditions = self.sized_conditions(obligation); self.assemble_builtin_bound_candidates(sized_conditions, &mut candidates)?; } else if lang_items.unsize_trait() == Some(def_id) { self.assemble_candidates_for_unsizing(obligation, &mut candidates); } else { if lang_items.clone_trait() == Some(def_id) { // Same builtin conditions as `Copy`, i.e. every type which has builtin support // for `Copy` also has builtin support for `Clone`, + tuples and arrays of `Clone` // types have builtin support for `Clone`. let clone_conditions = self.copy_clone_conditions(obligation); self.assemble_builtin_bound_candidates(clone_conditions, &mut candidates)?; } self.assemble_generator_candidates(obligation, &mut candidates)?; self.assemble_closure_candidates(obligation, &mut candidates)?; self.assemble_fn_pointer_candidates(obligation, &mut candidates)?; self.assemble_candidates_from_impls(obligation, &mut candidates)?; self.assemble_candidates_from_object_ty(obligation, &mut candidates); } self.assemble_candidates_from_projected_tys(obligation, &mut candidates); self.assemble_candidates_from_caller_bounds(stack, &mut candidates)?; // Auto implementations have lower priority, so we only // consider triggering a default if there is no other impl that can apply. if candidates.vec.is_empty() { self.assemble_candidates_from_auto_impls(obligation, &mut candidates)?; } debug!("candidate list size: {}", candidates.vec.len()); Ok(candidates) } fn assemble_candidates_from_projected_tys(&mut self, obligation: &TraitObligation<'tcx>, candidates: &mut SelectionCandidateSet<'tcx>) { debug!("assemble_candidates_for_projected_tys({:?})", obligation); // before we go into the whole skolemization thing, just // quickly check if the self-type is a projection at all. match obligation.predicate.0.trait_ref.self_ty().sty { ty::TyProjection(_) | ty::TyAnon(..) => {} ty::TyInfer(ty::TyVar(_)) => { span_bug!(obligation.cause.span, "Self=_ should have been handled by assemble_candidates"); } _ => return } let result = self.probe(|this, snapshot| { this.match_projection_obligation_against_definition_bounds(obligation, snapshot) }); if result { candidates.vec.push(ProjectionCandidate); } } fn match_projection_obligation_against_definition_bounds( &mut self, obligation: &TraitObligation<'tcx>, snapshot: &infer::CombinedSnapshot<'cx, 'tcx>) -> bool { let poly_trait_predicate = self.infcx().resolve_type_vars_if_possible(&obligation.predicate); let (skol_trait_predicate, skol_map) = self.infcx().skolemize_late_bound_regions(&poly_trait_predicate, snapshot); debug!("match_projection_obligation_against_definition_bounds: \ skol_trait_predicate={:?} skol_map={:?}", skol_trait_predicate, skol_map); let (def_id, substs) = match skol_trait_predicate.trait_ref.self_ty().sty { ty::TyProjection(ref data) => (data.trait_ref(self.tcx()).def_id, data.substs), ty::TyAnon(def_id, substs) => (def_id, substs), _ => { span_bug!( obligation.cause.span, "match_projection_obligation_against_definition_bounds() called \ but self-ty not a projection: {:?}", skol_trait_predicate.trait_ref.self_ty()); } }; debug!("match_projection_obligation_against_definition_bounds: \ def_id={:?}, substs={:?}", def_id, substs); let predicates_of = self.tcx().predicates_of(def_id); let bounds = predicates_of.instantiate(self.tcx(), substs); debug!("match_projection_obligation_against_definition_bounds: \ bounds={:?}", bounds); let matching_bound = util::elaborate_predicates(self.tcx(), bounds.predicates) .filter_to_traits() .find( |bound| self.probe( |this, _| this.match_projection(obligation, bound.clone(), skol_trait_predicate.trait_ref.clone(), &skol_map, snapshot))); debug!("match_projection_obligation_against_definition_bounds: \ matching_bound={:?}", matching_bound); match matching_bound { None => false, Some(bound) => { // Repeat the successful match, if any, this time outside of a probe. let result = self.match_projection(obligation, bound, skol_trait_predicate.trait_ref.clone(), &skol_map, snapshot); self.infcx.pop_skolemized(skol_map, snapshot); assert!(result); true } } } fn match_projection(&mut self, obligation: &TraitObligation<'tcx>, trait_bound: ty::PolyTraitRef<'tcx>, skol_trait_ref: ty::TraitRef<'tcx>, skol_map: &infer::SkolemizationMap<'tcx>, snapshot: &infer::CombinedSnapshot<'cx, 'tcx>) -> bool { assert!(!skol_trait_ref.has_escaping_regions()); if let Err(_) = self.infcx.at(&obligation.cause, obligation.param_env) .sup(ty::Binder(skol_trait_ref), trait_bound) { return false; } self.infcx.leak_check(false, obligation.cause.span, skol_map, snapshot).is_ok() } /// Given an obligation like `<SomeTrait for T>`, search the obligations that the caller /// supplied to find out whether it is listed among them. /// /// Never affects inference environment. fn assemble_candidates_from_caller_bounds<'o>(&mut self, stack: &TraitObligationStack<'o, 'tcx>, candidates: &mut SelectionCandidateSet<'tcx>) -> Result<(),SelectionError<'tcx>> { debug!("assemble_candidates_from_caller_bounds({:?})", stack.obligation); let all_bounds = stack.obligation.param_env.caller_bounds .iter() .filter_map(|o| o.to_opt_poly_trait_ref()); // micro-optimization: filter out predicates relating to different // traits. let matching_bounds = all_bounds.filter(|p| p.def_id() == stack.obligation.predicate.def_id()); let matching_bounds = matching_bounds.filter( |bound| self.evaluate_where_clause(stack, bound.clone()).may_apply()); let param_candidates = matching_bounds.map(|bound| ParamCandidate(bound)); candidates.vec.extend(param_candidates); Ok(()) } fn evaluate_where_clause<'o>(&mut self, stack: &TraitObligationStack<'o, 'tcx>, where_clause_trait_ref: ty::PolyTraitRef<'tcx>) -> EvaluationResult { self.probe(move |this, _| { match this.match_where_clause_trait_ref(stack.obligation, where_clause_trait_ref) { Ok(obligations) => { this.evaluate_predicates_recursively(stack.list(), obligations.iter()) } Err(()) => EvaluatedToErr } }) } fn assemble_generator_candidates(&mut self, obligation: &TraitObligation<'tcx>, candidates: &mut SelectionCandidateSet<'tcx>) -> Result<(),SelectionError<'tcx>> { if self.tcx().lang_items().gen_trait() != Some(obligation.predicate.def_id()) { return Ok(()); } // ok to skip binder because the substs on generator types never // touch bound regions, they just capture the in-scope // type/region parameters let self_ty = *obligation.self_ty().skip_binder(); match self_ty.sty { ty::TyGenerator(..) => { debug!("assemble_generator_candidates: self_ty={:?} obligation={:?}", self_ty, obligation); candidates.vec.push(GeneratorCandidate); Ok(()) } ty::TyInfer(ty::TyVar(_)) => { debug!("assemble_generator_candidates: ambiguous self-type"); candidates.ambiguous = true; return Ok(()); } _ => { return Ok(()); } } } /// Check for the artificial impl that the compiler will create for an obligation like `X : /// FnMut<..>` where `X` is a closure type. /// /// Note: the type parameters on a closure candidate are modeled as *output* type /// parameters and hence do not affect whether this trait is a match or not. They will be /// unified during the confirmation step. fn assemble_closure_candidates(&mut self, obligation: &TraitObligation<'tcx>, candidates: &mut SelectionCandidateSet<'tcx>) -> Result<(),SelectionError<'tcx>> { let kind = match self.tcx().lang_items().fn_trait_kind(obligation.predicate.0.def_id()) { Some(k) => k, None => { return Ok(()); } }; // ok to skip binder because the substs on closure types never // touch bound regions, they just capture the in-scope // type/region parameters match obligation.self_ty().skip_binder().sty { ty::TyClosure(closure_def_id, closure_substs) => { debug!("assemble_unboxed_candidates: kind={:?} obligation={:?}", kind, obligation); match self.infcx.closure_kind(closure_def_id, closure_substs) { Some(closure_kind) => { debug!("assemble_unboxed_candidates: closure_kind = {:?}", closure_kind); if closure_kind.extends(kind) { candidates.vec.push(ClosureCandidate); } } None => { debug!("assemble_unboxed_candidates: closure_kind not yet known"); candidates.vec.push(ClosureCandidate); } }; Ok(()) } ty::TyInfer(ty::TyVar(_)) => { debug!("assemble_unboxed_closure_candidates: ambiguous self-type"); candidates.ambiguous = true; return Ok(()); } _ => { return Ok(()); } } } /// Implement one of the `Fn()` family for a fn pointer. fn assemble_fn_pointer_candidates(&mut self, obligation: &TraitObligation<'tcx>, candidates: &mut SelectionCandidateSet<'tcx>) -> Result<(),SelectionError<'tcx>> { // We provide impl of all fn traits for fn pointers. if self.tcx().lang_items().fn_trait_kind(obligation.predicate.def_id()).is_none() { return Ok(()); } // ok to skip binder because what we are inspecting doesn't involve bound regions let self_ty = *obligation.self_ty().skip_binder(); match self_ty.sty { ty::TyInfer(ty::TyVar(_)) => { debug!("assemble_fn_pointer_candidates: ambiguous self-type"); candidates.ambiguous = true; // could wind up being a fn() type } // provide an impl, but only for suitable `fn` pointers ty::TyFnDef(..) | ty::TyFnPtr(_) => { if let ty::Binder(ty::FnSig { unsafety: hir::Unsafety::Normal, abi: Abi::Rust, variadic: false, .. }) = self_ty.fn_sig(self.tcx()) { candidates.vec.push(FnPointerCandidate); } } _ => { } } Ok(()) } /// Search for impls that might apply to `obligation`. fn assemble_candidates_from_impls(&mut self, obligation: &TraitObligation<'tcx>, candidates: &mut SelectionCandidateSet<'tcx>) -> Result<(), SelectionError<'tcx>> { debug!("assemble_candidates_from_impls(obligation={:?})", obligation); self.tcx().for_each_relevant_impl( obligation.predicate.def_id(), obligation.predicate.0.trait_ref.self_ty(), |impl_def_id| { self.probe(|this, snapshot| { /* [1] */ match this.match_impl(impl_def_id, obligation, snapshot) { Ok(skol_map) => { candidates.vec.push(ImplCandidate(impl_def_id)); // NB: we can safely drop the skol map // since we are in a probe [1] mem::drop(skol_map); } Err(_) => { } } }); } ); Ok(()) } fn assemble_candidates_from_auto_impls(&mut self, obligation: &TraitObligation<'tcx>, candidates: &mut SelectionCandidateSet<'tcx>) -> Result<(), SelectionError<'tcx>> { // OK to skip binder here because the tests we do below do not involve bound regions let self_ty = *obligation.self_ty().skip_binder(); debug!("assemble_candidates_from_auto_impls(self_ty={:?})", self_ty); let def_id = obligation.predicate.def_id(); if self.tcx().trait_is_auto(def_id) { match self_ty.sty { ty::TyDynamic(..) => { // For object types, we don't know what the closed // over types are. This means we conservatively // say nothing; a candidate may be added by // `assemble_candidates_from_object_ty`. } ty::TyForeign(..) => { // Since the contents of foreign types is unknown, // we don't add any `..` impl. Default traits could // still be provided by a manual implementation for // this trait and type. } ty::TyParam(..) | ty::TyProjection(..) => { // In these cases, we don't know what the actual // type is. Therefore, we cannot break it down // into its constituent types. So we don't // consider the `..` impl but instead just add no // candidates: this means that typeck will only // succeed if there is another reason to believe // that this obligation holds. That could be a // where-clause or, in the case of an object type, // it could be that the object type lists the // trait (e.g. `Foo+Send : Send`). See // `compile-fail/typeck-default-trait-impl-send-param.rs` // for an example of a test case that exercises // this path. } ty::TyInfer(ty::TyVar(_)) => { // the auto impl might apply, we don't know candidates.ambiguous = true; } _ => { candidates.vec.push(AutoImplCandidate(def_id.clone())) } } } Ok(()) } /// Search for impls that might apply to `obligation`. fn assemble_candidates_from_object_ty(&mut self, obligation: &TraitObligation<'tcx>, candidates: &mut SelectionCandidateSet<'tcx>) { debug!("assemble_candidates_from_object_ty(self_ty={:?})", obligation.self_ty().skip_binder()); // Object-safety candidates are only applicable to object-safe // traits. Including this check is useful because it helps // inference in cases of traits like `BorrowFrom`, which are // not object-safe, and which rely on being able to infer the // self-type from one of the other inputs. Without this check, // these cases wind up being considered ambiguous due to a // (spurious) ambiguity introduced here. let predicate_trait_ref = obligation.predicate.to_poly_trait_ref(); if !self.tcx().is_object_safe(predicate_trait_ref.def_id()) { return; } self.probe(|this, _snapshot| { // the code below doesn't care about regions, and the // self-ty here doesn't escape this probe, so just erase // any LBR. let self_ty = this.tcx().erase_late_bound_regions(&obligation.self_ty()); let poly_trait_ref = match self_ty.sty { ty::TyDynamic(ref data, ..) => { if data.auto_traits().any(|did| did == obligation.predicate.def_id()) { debug!("assemble_candidates_from_object_ty: matched builtin bound, \ pushing candidate"); candidates.vec.push(BuiltinObjectCandidate); return; } match data.principal() { Some(p) => p.with_self_ty(this.tcx(), self_ty), None => return, } } ty::TyInfer(ty::TyVar(_)) => { debug!("assemble_candidates_from_object_ty: ambiguous"); candidates.ambiguous = true; // could wind up being an object type return; } _ => { return; } }; debug!("assemble_candidates_from_object_ty: poly_trait_ref={:?}", poly_trait_ref); // Count only those upcast versions that match the trait-ref // we are looking for. Specifically, do not only check for the // correct trait, but also the correct type parameters. // For example, we may be trying to upcast `Foo` to `Bar<i32>`, // but `Foo` is declared as `trait Foo : Bar<u32>`. let upcast_trait_refs = util::supertraits(this.tcx(), poly_trait_ref) .filter(|upcast_trait_ref| { this.probe(|this, _| { let upcast_trait_ref = upcast_trait_ref.clone(); this.match_poly_trait_ref(obligation, upcast_trait_ref).is_ok() }) }) .count(); if upcast_trait_refs > 1 { // can be upcast in many ways; need more type information candidates.ambiguous = true; } else if upcast_trait_refs == 1 { candidates.vec.push(ObjectCandidate); } }) } /// Search for unsizing that might apply to `obligation`. fn assemble_candidates_for_unsizing(&mut self, obligation: &TraitObligation<'tcx>, candidates: &mut SelectionCandidateSet<'tcx>) { // We currently never consider higher-ranked obligations e.g. // `for<'a> &'a T: Unsize<Trait+'a>` to be implemented. This is not // because they are a priori invalid, and we could potentially add support // for them later, it's just that there isn't really a strong need for it. // A `T: Unsize<U>` obligation is always used as part of a `T: CoerceUnsize<U>` // impl, and those are generally applied to concrete types. // // That said, one might try to write a fn with a where clause like // for<'a> Foo<'a, T>: Unsize<Foo<'a, Trait>> // where the `'a` is kind of orthogonal to the relevant part of the `Unsize`. // Still, you'd be more likely to write that where clause as // T: Trait // so it seems ok if we (conservatively) fail to accept that `Unsize` // obligation above. Should be possible to extend this in the future. let source = match obligation.self_ty().no_late_bound_regions() { Some(t) => t, None => { // Don't add any candidates if there are bound regions. return; } }; let target = obligation.predicate.skip_binder().trait_ref.substs.type_at(1); debug!("assemble_candidates_for_unsizing(source={:?}, target={:?})", source, target); let may_apply = match (&source.sty, &target.sty) { // Trait+Kx+'a -> Trait+Ky+'b (upcasts). (&ty::TyDynamic(ref data_a, ..), &ty::TyDynamic(ref data_b, ..)) => { // Upcasts permit two things: // // 1. Dropping builtin bounds, e.g. `Foo+Send` to `Foo` // 2. Tightening the region bound, e.g. `Foo+'a` to `Foo+'b` if `'a : 'b` // // Note that neither of these changes requires any // change at runtime. Eventually this will be // generalized. // // We always upcast when we can because of reason // #2 (region bounds). match (data_a.principal(), data_b.principal()) { (Some(a), Some(b)) => a.def_id() == b.def_id() && data_b.auto_traits() // All of a's auto traits need to be in b's auto traits. .all(|b| data_a.auto_traits().any(|a| a == b)), _ => false } } // T -> Trait. (_, &ty::TyDynamic(..)) => true, // Ambiguous handling is below T -> Trait, because inference // variables can still implement Unsize<Trait> and nested // obligations will have the final say (likely deferred). (&ty::TyInfer(ty::TyVar(_)), _) | (_, &ty::TyInfer(ty::TyVar(_))) => { debug!("assemble_candidates_for_unsizing: ambiguous"); candidates.ambiguous = true; false } // [T; n] -> [T]. (&ty::TyArray(..), &ty::TySlice(_)) => true, // Struct<T> -> Struct<U>. (&ty::TyAdt(def_id_a, _), &ty::TyAdt(def_id_b, _)) if def_id_a.is_struct() => { def_id_a == def_id_b } // (.., T) -> (.., U). (&ty::TyTuple(tys_a), &ty::TyTuple(tys_b)) => { tys_a.len() == tys_b.len() } _ => false }; if may_apply { candidates.vec.push(BuiltinUnsizeCandidate); } } /////////////////////////////////////////////////////////////////////////// // WINNOW // // Winnowing is the process of attempting to resolve ambiguity by // probing further. During the winnowing process, we unify all // type variables (ignoring skolemization) and then we also // attempt to evaluate recursive bounds to see if they are // satisfied. /// Returns true if `candidate_i` should be dropped in favor of /// `candidate_j`. Generally speaking we will drop duplicate /// candidates and prefer where-clause candidates. /// Returns true if `victim` should be dropped in favor of /// `other`. Generally speaking we will drop duplicate /// candidates and prefer where-clause candidates. /// /// See the comment for "SelectionCandidate" for more details. fn candidate_should_be_dropped_in_favor_of<'o>( &mut self, victim: &EvaluatedCandidate<'tcx>, other: &EvaluatedCandidate<'tcx>) -> bool { if victim.candidate == other.candidate { return true; } match other.candidate { ObjectCandidate | ParamCandidate(_) | ProjectionCandidate => match victim.candidate { AutoImplCandidate(..) => { bug!( "default implementations shouldn't be recorded \ when there are other valid candidates"); } ImplCandidate(..) | ClosureCandidate | GeneratorCandidate | FnPointerCandidate | BuiltinObjectCandidate | BuiltinUnsizeCandidate | BuiltinCandidate { .. } => { // We have a where-clause so don't go around looking // for impls. true } ObjectCandidate | ProjectionCandidate => { // Arbitrarily give param candidates priority // over projection and object candidates. true }, ParamCandidate(..) => false, }, ImplCandidate(other_def) => { // See if we can toss out `victim` based on specialization. // This requires us to know *for sure* that the `other` impl applies // i.e. EvaluatedToOk: if other.evaluation == EvaluatedToOk { if let ImplCandidate(victim_def) = victim.candidate { let tcx = self.tcx().global_tcx(); return tcx.specializes((other_def, victim_def)) || tcx.impls_are_allowed_to_overlap(other_def, victim_def); } } false }, _ => false } } /////////////////////////////////////////////////////////////////////////// // BUILTIN BOUNDS // // These cover the traits that are built-in to the language // itself. This includes `Copy` and `Sized` for sure. For the // moment, it also includes `Send` / `Sync` and a few others, but // those will hopefully change to library-defined traits in the // future. // HACK: if this returns an error, selection exits without considering // other impls. fn assemble_builtin_bound_candidates<'o>(&mut self, conditions: BuiltinImplConditions<'tcx>, candidates: &mut SelectionCandidateSet<'tcx>) -> Result<(),SelectionError<'tcx>> { match conditions { BuiltinImplConditions::Where(nested) => { debug!("builtin_bound: nested={:?}", nested); candidates.vec.push(BuiltinCandidate { has_nested: nested.skip_binder().len() > 0 }); Ok(()) } BuiltinImplConditions::None => { Ok(()) } BuiltinImplConditions::Ambiguous => { debug!("assemble_builtin_bound_candidates: ambiguous builtin"); Ok(candidates.ambiguous = true) } BuiltinImplConditions::Never => { Err(Unimplemented) } } } fn sized_conditions(&mut self, obligation: &TraitObligation<'tcx>) -> BuiltinImplConditions<'tcx> { use self::BuiltinImplConditions::{Ambiguous, None, Never, Where}; // NOTE: binder moved to (*) let self_ty = self.infcx.shallow_resolve( obligation.predicate.skip_binder().self_ty()); match self_ty.sty { ty::TyInfer(ty::IntVar(_)) | ty::TyInfer(ty::FloatVar(_)) | ty::TyUint(_) | ty::TyInt(_) | ty::TyBool | ty::TyFloat(_) | ty::TyFnDef(..) | ty::TyFnPtr(_) | ty::TyRawPtr(..) | ty::TyChar | ty::TyRef(..) | ty::TyGenerator(..) | ty::TyGeneratorWitness(..) | ty::TyArray(..) | ty::TyClosure(..) | ty::TyNever | ty::TyError => { // safe for everything Where(ty::Binder(Vec::new())) } ty::TyStr | ty::TySlice(_) | ty::TyDynamic(..) | ty::TyForeign(..) => Never, ty::TyTuple(tys) => { Where(ty::Binder(tys.last().into_iter().cloned().collect())) } ty::TyAdt(def, substs) => { let sized_crit = def.sized_constraint(self.tcx()); // (*) binder moved here Where(ty::Binder( sized_crit.iter().map(|ty| ty.subst(self.tcx(), substs)).collect() )) } ty::TyProjection(_) | ty::TyParam(_) | ty::TyAnon(..) => None, ty::TyInfer(ty::TyVar(_)) => Ambiguous, ty::TyInfer(ty::CanonicalTy(_)) | ty::TyInfer(ty::FreshTy(_)) | ty::TyInfer(ty::FreshIntTy(_)) | ty::TyInfer(ty::FreshFloatTy(_)) => { bug!("asked to assemble builtin bounds of unexpected type: {:?}", self_ty); } } } fn copy_clone_conditions(&mut self, obligation: &TraitObligation<'tcx>) -> BuiltinImplConditions<'tcx> { // NOTE: binder moved to (*) let self_ty = self.infcx.shallow_resolve( obligation.predicate.skip_binder().self_ty()); use self::BuiltinImplConditions::{Ambiguous, None, Never, Where}; match self_ty.sty { ty::TyInfer(ty::IntVar(_)) | ty::TyInfer(ty::FloatVar(_)) | ty::TyUint(_) | ty::TyInt(_) | ty::TyBool | ty::TyFloat(_) | ty::TyFnDef(..) | ty::TyFnPtr(_) | ty::TyChar | ty::TyRawPtr(..) | ty::TyError | ty::TyNever | ty::TyRef(_, ty::TypeAndMut { ty: _, mutbl: hir::MutImmutable }) => { Where(ty::Binder(Vec::new())) } ty::TyDynamic(..) | ty::TyStr | ty::TySlice(..) | ty::TyGenerator(..) | ty::TyGeneratorWitness(..) | ty::TyForeign(..) | ty::TyRef(_, ty::TypeAndMut { ty: _, mutbl: hir::MutMutable }) => { Never } ty::TyArray(element_ty, _) => { // (*) binder moved here Where(ty::Binder(vec![element_ty])) } ty::TyTuple(tys) => { // (*) binder moved here Where(ty::Binder(tys.to_vec())) } ty::TyClosure(def_id, substs) => { let trait_id = obligation.predicate.def_id(); let copy_closures = Some(trait_id) == self.tcx().lang_items().copy_trait(); let clone_closures = Some(trait_id) == self.tcx().lang_items().clone_trait(); if copy_closures || clone_closures { Where(ty::Binder(substs.upvar_tys(def_id, self.tcx()).collect())) } else { Never } } ty::TyAdt(..) | ty::TyProjection(..) | ty::TyParam(..) | ty::TyAnon(..) => { // Fallback to whatever user-defined impls exist in this case. None } ty::TyInfer(ty::TyVar(_)) => { // Unbound type variable. Might or might not have // applicable impls and so forth, depending on what // those type variables wind up being bound to. Ambiguous } ty::TyInfer(ty::CanonicalTy(_)) | ty::TyInfer(ty::FreshTy(_)) | ty::TyInfer(ty::FreshIntTy(_)) | ty::TyInfer(ty::FreshFloatTy(_)) => { bug!("asked to assemble builtin bounds of unexpected type: {:?}", self_ty); } } } /// For default impls, we need to break apart a type into its /// "constituent types" -- meaning, the types that it contains. /// /// Here are some (simple) examples: /// /// ``` /// (i32, u32) -> [i32, u32] /// Foo where struct Foo { x: i32, y: u32 } -> [i32, u32] /// Bar<i32> where struct Bar<T> { x: T, y: u32 } -> [i32, u32] /// Zed<i32> where enum Zed { A(T), B(u32) } -> [i32, u32] /// ``` fn constituent_types_for_ty(&self, t: Ty<'tcx>) -> Vec<Ty<'tcx>> { match t.sty { ty::TyUint(_) | ty::TyInt(_) | ty::TyBool | ty::TyFloat(_) | ty::TyFnDef(..) | ty::TyFnPtr(_) | ty::TyStr | ty::TyError | ty::TyInfer(ty::IntVar(_)) | ty::TyInfer(ty::FloatVar(_)) | ty::TyNever | ty::TyChar => { Vec::new() } ty::TyDynamic(..) | ty::TyParam(..) | ty::TyForeign(..) | ty::TyProjection(..) | ty::TyInfer(ty::CanonicalTy(_)) | ty::TyInfer(ty::TyVar(_)) | ty::TyInfer(ty::FreshTy(_)) | ty::TyInfer(ty::FreshIntTy(_)) | ty::TyInfer(ty::FreshFloatTy(_)) => { bug!("asked to assemble constituent types of unexpected type: {:?}", t); } ty::TyRawPtr(ty::TypeAndMut { ty: element_ty, ..}) | ty::TyRef(_, ty::TypeAndMut { ty: element_ty, ..}) => { vec![element_ty] }, ty::TyArray(element_ty, _) | ty::TySlice(element_ty) => { vec![element_ty] } ty::TyTuple(ref tys) => { // (T1, ..., Tn) -- meets any bound that all of T1...Tn meet tys.to_vec() } ty::TyClosure(def_id, ref substs) => { substs.upvar_tys(def_id, self.tcx()).collect() } ty::TyGenerator(def_id, ref substs, interior) => { substs.upvar_tys(def_id, self.tcx()).chain(iter::once(interior.witness)).collect() } ty::TyGeneratorWitness(types) => { // This is sound because no regions in the witness can refer to // the binder outside the witness. So we'll effectivly reuse // the implicit binder around the witness. types.skip_binder().to_vec() } // for `PhantomData<T>`, we pass `T` ty::TyAdt(def, substs) if def.is_phantom_data() => { substs.types().collect() } ty::TyAdt(def, substs) => { def.all_fields() .map(|f| f.ty(self.tcx(), substs)) .collect() } ty::TyAnon(def_id, substs) => { // We can resolve the `impl Trait` to its concrete type, // which enforces a DAG between the functions requiring // the auto trait bounds in question. vec![self.tcx().type_of(def_id).subst(self.tcx(), substs)] } } } fn collect_predicates_for_types(&mut self, param_env: ty::ParamEnv<'tcx>, cause: ObligationCause<'tcx>, recursion_depth: usize, trait_def_id: DefId, types: ty::Binder<Vec<Ty<'tcx>>>) -> Vec<PredicateObligation<'tcx>> { // Because the types were potentially derived from // higher-ranked obligations they may reference late-bound // regions. For example, `for<'a> Foo<&'a int> : Copy` would // yield a type like `for<'a> &'a int`. In general, we // maintain the invariant that we never manipulate bound // regions, so we have to process these bound regions somehow. // // The strategy is to: // // 1. Instantiate those regions to skolemized regions (e.g., // `for<'a> &'a int` becomes `&0 int`. // 2. Produce something like `&'0 int : Copy` // 3. Re-bind the regions back to `for<'a> &'a int : Copy` types.skip_binder().into_iter().flat_map(|ty| { // binder moved -\ let ty: ty::Binder<Ty<'tcx>> = ty::Binder(ty); // <----------/ self.in_snapshot(|this, snapshot| { let (skol_ty, skol_map) = this.infcx().skolemize_late_bound_regions(&ty, snapshot); let Normalized { value: normalized_ty, mut obligations } = project::normalize_with_depth(this, param_env, cause.clone(), recursion_depth, &skol_ty); let skol_obligation = this.tcx().predicate_for_trait_def(param_env, cause.clone(), trait_def_id, recursion_depth, normalized_ty, &[]); obligations.push(skol_obligation); this.infcx().plug_leaks(skol_map, snapshot, obligations) }) }).collect() } /////////////////////////////////////////////////////////////////////////// // CONFIRMATION // // Confirmation unifies the output type parameters of the trait // with the values found in the obligation, possibly yielding a // type error. See [rustc guide] for more details. // // [rustc guide]: // https://rust-lang-nursery.github.io/rustc-guide/trait-resolution.html#confirmation fn confirm_candidate(&mut self, obligation: &TraitObligation<'tcx>, candidate: SelectionCandidate<'tcx>) -> Result<Selection<'tcx>,SelectionError<'tcx>> { debug!("confirm_candidate({:?}, {:?})", obligation, candidate); match candidate { BuiltinCandidate { has_nested } => { let data = self.confirm_builtin_candidate(obligation, has_nested); Ok(VtableBuiltin(data)) } ParamCandidate(param) => { let obligations = self.confirm_param_candidate(obligation, param); Ok(VtableParam(obligations)) } AutoImplCandidate(trait_def_id) => { let data = self.confirm_auto_impl_candidate(obligation, trait_def_id); Ok(VtableAutoImpl(data)) } ImplCandidate(impl_def_id) => { Ok(VtableImpl(self.confirm_impl_candidate(obligation, impl_def_id))) } ClosureCandidate => { let vtable_closure = self.confirm_closure_candidate(obligation)?; Ok(VtableClosure(vtable_closure)) } GeneratorCandidate => { let vtable_generator = self.confirm_generator_candidate(obligation)?; Ok(VtableGenerator(vtable_generator)) } BuiltinObjectCandidate => { // This indicates something like `(Trait+Send) : // Send`. In this case, we know that this holds // because that's what the object type is telling us, // and there's really no additional obligations to // prove and no types in particular to unify etc. Ok(VtableParam(Vec::new())) } ObjectCandidate => { let data = self.confirm_object_candidate(obligation); Ok(VtableObject(data)) } FnPointerCandidate => { let data = self.confirm_fn_pointer_candidate(obligation)?; Ok(VtableFnPointer(data)) } ProjectionCandidate => { self.confirm_projection_candidate(obligation); Ok(VtableParam(Vec::new())) } BuiltinUnsizeCandidate => { let data = self.confirm_builtin_unsize_candidate(obligation)?; Ok(VtableBuiltin(data)) } } } fn confirm_projection_candidate(&mut self, obligation: &TraitObligation<'tcx>) { self.in_snapshot(|this, snapshot| { let result = this.match_projection_obligation_against_definition_bounds(obligation, snapshot); assert!(result); }) } fn confirm_param_candidate(&mut self, obligation: &TraitObligation<'tcx>, param: ty::PolyTraitRef<'tcx>) -> Vec<PredicateObligation<'tcx>> { debug!("confirm_param_candidate({:?},{:?})", obligation, param); // During evaluation, we already checked that this // where-clause trait-ref could be unified with the obligation // trait-ref. Repeat that unification now without any // transactional boundary; it should not fail. match self.match_where_clause_trait_ref(obligation, param.clone()) { Ok(obligations) => obligations, Err(()) => { bug!("Where clause `{:?}` was applicable to `{:?}` but now is not", param, obligation); } } } fn confirm_builtin_candidate(&mut self, obligation: &TraitObligation<'tcx>, has_nested: bool) -> VtableBuiltinData<PredicateObligation<'tcx>> { debug!("confirm_builtin_candidate({:?}, {:?})", obligation, has_nested); let lang_items = self.tcx().lang_items(); let obligations = if has_nested { let trait_def = obligation.predicate.def_id(); let conditions = match trait_def { _ if Some(trait_def) == lang_items.sized_trait() => { self.sized_conditions(obligation) } _ if Some(trait_def) == lang_items.copy_trait() => { self.copy_clone_conditions(obligation) } _ if Some(trait_def) == lang_items.clone_trait() => { self.copy_clone_conditions(obligation) } _ => bug!("unexpected builtin trait {:?}", trait_def) }; let nested = match conditions { BuiltinImplConditions::Where(nested) => nested, _ => bug!("obligation {:?} had matched a builtin impl but now doesn't", obligation) }; let cause = obligation.derived_cause(BuiltinDerivedObligation); self.collect_predicates_for_types(obligation.param_env, cause, obligation.recursion_depth+1, trait_def, nested) } else { vec![] }; debug!("confirm_builtin_candidate: obligations={:?}", obligations); VtableBuiltinData { nested: obligations } } /// This handles the case where a `auto trait Foo` impl is being used. /// The idea is that the impl applies to `X : Foo` if the following conditions are met: /// /// 1. For each constituent type `Y` in `X`, `Y : Foo` holds /// 2. For each where-clause `C` declared on `Foo`, `[Self => X] C` holds. fn confirm_auto_impl_candidate(&mut self, obligation: &TraitObligation<'tcx>, trait_def_id: DefId) -> VtableAutoImplData<PredicateObligation<'tcx>> { debug!("confirm_auto_impl_candidate({:?}, {:?})", obligation, trait_def_id); // binder is moved below let self_ty = self.infcx.shallow_resolve(obligation.predicate.skip_binder().self_ty()); let types = self.constituent_types_for_ty(self_ty); self.vtable_auto_impl(obligation, trait_def_id, ty::Binder(types)) } /// See `confirm_auto_impl_candidate` fn vtable_auto_impl(&mut self, obligation: &TraitObligation<'tcx>, trait_def_id: DefId, nested: ty::Binder<Vec<Ty<'tcx>>>) -> VtableAutoImplData<PredicateObligation<'tcx>> { debug!("vtable_auto_impl: nested={:?}", nested); let cause = obligation.derived_cause(BuiltinDerivedObligation); let mut obligations = self.collect_predicates_for_types( obligation.param_env, cause, obligation.recursion_depth+1, trait_def_id, nested); let trait_obligations = self.in_snapshot(|this, snapshot| { let poly_trait_ref = obligation.predicate.to_poly_trait_ref(); let (trait_ref, skol_map) = this.infcx().skolemize_late_bound_regions(&poly_trait_ref, snapshot); let cause = obligation.derived_cause(ImplDerivedObligation); this.impl_or_trait_obligations(cause, obligation.recursion_depth + 1, obligation.param_env, trait_def_id, &trait_ref.substs, skol_map, snapshot) }); obligations.extend(trait_obligations); debug!("vtable_auto_impl: obligations={:?}", obligations); VtableAutoImplData { trait_def_id, nested: obligations } } fn confirm_impl_candidate(&mut self, obligation: &TraitObligation<'tcx>, impl_def_id: DefId) -> VtableImplData<'tcx, PredicateObligation<'tcx>> { debug!("confirm_impl_candidate({:?},{:?})", obligation, impl_def_id); // First, create the substitutions by matching the impl again, // this time not in a probe. self.in_snapshot(|this, snapshot| { let (substs, skol_map) = this.rematch_impl(impl_def_id, obligation, snapshot); debug!("confirm_impl_candidate substs={:?}", substs); let cause = obligation.derived_cause(ImplDerivedObligation); this.vtable_impl(impl_def_id, substs, cause, obligation.recursion_depth + 1, obligation.param_env, skol_map, snapshot) }) } fn vtable_impl(&mut self, impl_def_id: DefId, mut substs: Normalized<'tcx, &'tcx Substs<'tcx>>, cause: ObligationCause<'tcx>, recursion_depth: usize, param_env: ty::ParamEnv<'tcx>, skol_map: infer::SkolemizationMap<'tcx>, snapshot: &infer::CombinedSnapshot<'cx, 'tcx>) -> VtableImplData<'tcx, PredicateObligation<'tcx>> { debug!("vtable_impl(impl_def_id={:?}, substs={:?}, recursion_depth={}, skol_map={:?})", impl_def_id, substs, recursion_depth, skol_map); let mut impl_obligations = self.impl_or_trait_obligations(cause, recursion_depth, param_env, impl_def_id, &substs.value, skol_map, snapshot); debug!("vtable_impl: impl_def_id={:?} impl_obligations={:?}", impl_def_id, impl_obligations); // Because of RFC447, the impl-trait-ref and obligations // are sufficient to determine the impl substs, without // relying on projections in the impl-trait-ref. // // e.g. `impl<U: Tr, V: Iterator<Item=U>> Foo<<U as Tr>::T> for V` impl_obligations.append(&mut substs.obligations); VtableImplData { impl_def_id, substs: substs.value, nested: impl_obligations } } fn confirm_object_candidate(&mut self, obligation: &TraitObligation<'tcx>) -> VtableObjectData<'tcx, PredicateObligation<'tcx>> { debug!("confirm_object_candidate({:?})", obligation); // FIXME skipping binder here seems wrong -- we should // probably flatten the binder from the obligation and the // binder from the object. Have to try to make a broken test // case that results. -nmatsakis let self_ty = self.infcx.shallow_resolve(*obligation.self_ty().skip_binder()); let poly_trait_ref = match self_ty.sty { ty::TyDynamic(ref data, ..) => { data.principal().unwrap().with_self_ty(self.tcx(), self_ty) } _ => { span_bug!(obligation.cause.span, "object candidate with non-object"); } }; let mut upcast_trait_ref = None; let mut nested = vec![]; let vtable_base; { let tcx = self.tcx(); // We want to find the first supertrait in the list of // supertraits that we can unify with, and do that // unification. We know that there is exactly one in the list // where we can unify because otherwise select would have // reported an ambiguity. (When we do find a match, also // record it for later.) let nonmatching = util::supertraits(tcx, poly_trait_ref) .take_while(|&t| { match self.commit_if_ok( |this, _| this.match_poly_trait_ref(obligation, t)) { Ok(obligations) => { upcast_trait_ref = Some(t); nested.extend(obligations); false } Err(_) => { true } } }); // Additionally, for each of the nonmatching predicates that // we pass over, we sum up the set of number of vtable // entries, so that we can compute the offset for the selected // trait. vtable_base = nonmatching.map(|t| tcx.count_own_vtable_entries(t)) .sum(); } VtableObjectData { upcast_trait_ref: upcast_trait_ref.unwrap(), vtable_base, nested, } } fn confirm_fn_pointer_candidate(&mut self, obligation: &TraitObligation<'tcx>) -> Result<VtableFnPointerData<'tcx, PredicateObligation<'tcx>>, SelectionError<'tcx>> { debug!("confirm_fn_pointer_candidate({:?})", obligation); // ok to skip binder; it is reintroduced below let self_ty = self.infcx.shallow_resolve(*obligation.self_ty().skip_binder()); let sig = self_ty.fn_sig(self.tcx()); let trait_ref = self.tcx().closure_trait_ref_and_return_type(obligation.predicate.def_id(), self_ty, sig, util::TupleArgumentsFlag::Yes) .map_bound(|(trait_ref, _)| trait_ref); let Normalized { value: trait_ref, obligations } = project::normalize_with_depth(self, obligation.param_env, obligation.cause.clone(), obligation.recursion_depth + 1, &trait_ref); self.confirm_poly_trait_refs(obligation.cause.clone(), obligation.param_env, obligation.predicate.to_poly_trait_ref(), trait_ref)?; Ok(VtableFnPointerData { fn_ty: self_ty, nested: obligations }) } fn confirm_generator_candidate(&mut self, obligation: &TraitObligation<'tcx>) -> Result<VtableGeneratorData<'tcx, PredicateObligation<'tcx>>, SelectionError<'tcx>> { // ok to skip binder because the substs on generator types never // touch bound regions, they just capture the in-scope // type/region parameters let self_ty = self.infcx.shallow_resolve(obligation.self_ty().skip_binder()); let (closure_def_id, substs) = match self_ty.sty { ty::TyGenerator(id, substs, _) => (id, substs), _ => bug!("closure candidate for non-closure {:?}", obligation) }; debug!("confirm_generator_candidate({:?},{:?},{:?})", obligation, closure_def_id, substs); let trait_ref = self.generator_trait_ref_unnormalized(obligation, closure_def_id, substs); let Normalized { value: trait_ref, mut obligations } = normalize_with_depth(self, obligation.param_env, obligation.cause.clone(), obligation.recursion_depth+1, &trait_ref); debug!("confirm_generator_candidate(closure_def_id={:?}, trait_ref={:?}, obligations={:?})", closure_def_id, trait_ref, obligations); obligations.extend( self.confirm_poly_trait_refs(obligation.cause.clone(), obligation.param_env, obligation.predicate.to_poly_trait_ref(), trait_ref)?); Ok(VtableGeneratorData { closure_def_id: closure_def_id, substs: substs.clone(), nested: obligations }) } fn confirm_closure_candidate(&mut self, obligation: &TraitObligation<'tcx>) -> Result<VtableClosureData<'tcx, PredicateObligation<'tcx>>, SelectionError<'tcx>> { debug!("confirm_closure_candidate({:?})", obligation); let kind = match self.tcx().lang_items().fn_trait_kind(obligation.predicate.0.def_id()) { Some(k) => k, None => bug!("closure candidate for non-fn trait {:?}", obligation) }; // ok to skip binder because the substs on closure types never // touch bound regions, they just capture the in-scope // type/region parameters let self_ty = self.infcx.shallow_resolve(obligation.self_ty().skip_binder()); let (closure_def_id, substs) = match self_ty.sty { ty::TyClosure(id, substs) => (id, substs), _ => bug!("closure candidate for non-closure {:?}", obligation) }; let trait_ref = self.closure_trait_ref_unnormalized(obligation, closure_def_id, substs); let Normalized { value: trait_ref, mut obligations } = normalize_with_depth(self, obligation.param_env, obligation.cause.clone(), obligation.recursion_depth+1, &trait_ref); debug!("confirm_closure_candidate(closure_def_id={:?}, trait_ref={:?}, obligations={:?})", closure_def_id, trait_ref, obligations); obligations.extend( self.confirm_poly_trait_refs(obligation.cause.clone(), obligation.param_env, obligation.predicate.to_poly_trait_ref(), trait_ref)?); obligations.push(Obligation::new( obligation.cause.clone(), obligation.param_env, ty::Predicate::ClosureKind(closure_def_id, substs, kind))); Ok(VtableClosureData { closure_def_id, substs: substs.clone(), nested: obligations }) } /// In the case of closure types and fn pointers, /// we currently treat the input type parameters on the trait as /// outputs. This means that when we have a match we have only /// considered the self type, so we have to go back and make sure /// to relate the argument types too. This is kind of wrong, but /// since we control the full set of impls, also not that wrong, /// and it DOES yield better error messages (since we don't report /// errors as if there is no applicable impl, but rather report /// errors are about mismatched argument types. /// /// Here is an example. Imagine we have a closure expression /// and we desugared it so that the type of the expression is /// `Closure`, and `Closure` expects an int as argument. Then it /// is "as if" the compiler generated this impl: /// /// impl Fn(int) for Closure { ... } /// /// Now imagine our obligation is `Fn(usize) for Closure`. So far /// we have matched the self-type `Closure`. At this point we'll /// compare the `int` to `usize` and generate an error. /// /// Note that this checking occurs *after* the impl has selected, /// because these output type parameters should not affect the /// selection of the impl. Therefore, if there is a mismatch, we /// report an error to the user. fn confirm_poly_trait_refs(&mut self, obligation_cause: ObligationCause<'tcx>, obligation_param_env: ty::ParamEnv<'tcx>, obligation_trait_ref: ty::PolyTraitRef<'tcx>, expected_trait_ref: ty::PolyTraitRef<'tcx>) -> Result<Vec<PredicateObligation<'tcx>>, SelectionError<'tcx>> { let obligation_trait_ref = obligation_trait_ref.clone(); self.infcx .at(&obligation_cause, obligation_param_env) .sup(obligation_trait_ref, expected_trait_ref) .map(|InferOk { obligations, .. }| obligations) .map_err(|e| OutputTypeParameterMismatch(expected_trait_ref, obligation_trait_ref, e)) } fn confirm_builtin_unsize_candidate(&mut self, obligation: &TraitObligation<'tcx>,) -> Result<VtableBuiltinData<PredicateObligation<'tcx>>, SelectionError<'tcx>> { let tcx = self.tcx(); // assemble_candidates_for_unsizing should ensure there are no late bound // regions here. See the comment there for more details. let source = self.infcx.shallow_resolve( obligation.self_ty().no_late_bound_regions().unwrap()); let target = obligation.predicate.skip_binder().trait_ref.substs.type_at(1); let target = self.infcx.shallow_resolve(target); debug!("confirm_builtin_unsize_candidate(source={:?}, target={:?})", source, target); let mut nested = vec![]; match (&source.sty, &target.sty) { // Trait+Kx+'a -> Trait+Ky+'b (upcasts). (&ty::TyDynamic(ref data_a, r_a), &ty::TyDynamic(ref data_b, r_b)) => { // See assemble_candidates_for_unsizing for more info. // Binders reintroduced below in call to mk_existential_predicates. let principal = data_a.skip_binder().principal(); let iter = principal.into_iter().map(ty::ExistentialPredicate::Trait) .chain(data_a.skip_binder().projection_bounds() .map(|x| ty::ExistentialPredicate::Projection(x))) .chain(data_b.auto_traits().map(ty::ExistentialPredicate::AutoTrait)); let new_trait = tcx.mk_dynamic( ty::Binder(tcx.mk_existential_predicates(iter)), r_b); let InferOk { obligations, .. } = self.infcx.at(&obligation.cause, obligation.param_env) .eq(target, new_trait) .map_err(|_| Unimplemented)?; nested.extend(obligations); // Register one obligation for 'a: 'b. let cause = ObligationCause::new(obligation.cause.span, obligation.cause.body_id, ObjectCastObligation(target)); let outlives = ty::OutlivesPredicate(r_a, r_b); nested.push(Obligation::with_depth(cause, obligation.recursion_depth + 1, obligation.param_env, ty::Binder(outlives).to_predicate())); } // T -> Trait. (_, &ty::TyDynamic(ref data, r)) => { let mut object_dids = data.auto_traits().chain(data.principal().map(|p| p.def_id())); if let Some(did) = object_dids.find(|did| { !tcx.is_object_safe(*did) }) { return Err(TraitNotObjectSafe(did)) } let cause = ObligationCause::new(obligation.cause.span, obligation.cause.body_id, ObjectCastObligation(target)); let mut push = |predicate| { nested.push(Obligation::with_depth(cause.clone(), obligation.recursion_depth + 1, obligation.param_env, predicate)); }; // Create obligations: // - Casting T to Trait // - For all the various builtin bounds attached to the object cast. (In other // words, if the object type is Foo+Send, this would create an obligation for the // Send check.) // - Projection predicates for predicate in data.iter() { push(predicate.with_self_ty(tcx, source)); } // We can only make objects from sized types. let tr = ty::TraitRef { def_id: tcx.require_lang_item(lang_items::SizedTraitLangItem), substs: tcx.mk_substs_trait(source, &[]), }; push(tr.to_predicate()); // If the type is `Foo+'a`, ensures that the type // being cast to `Foo+'a` outlives `'a`: let outlives = ty::OutlivesPredicate(source, r); push(ty::Binder(outlives).to_predicate()); } // [T; n] -> [T]. (&ty::TyArray(a, _), &ty::TySlice(b)) => { let InferOk { obligations, .. } = self.infcx.at(&obligation.cause, obligation.param_env) .eq(b, a) .map_err(|_| Unimplemented)?; nested.extend(obligations); } // Struct<T> -> Struct<U>. (&ty::TyAdt(def, substs_a), &ty::TyAdt(_, substs_b)) => { let fields = def .all_fields() .map(|f| tcx.type_of(f.did)) .collect::<Vec<_>>(); // The last field of the structure has to exist and contain type parameters. let field = if let Some(&field) = fields.last() { field } else { return Err(Unimplemented); }; let mut ty_params = BitVector::new(substs_a.types().count()); let mut found = false; for ty in field.walk() { if let ty::TyParam(p) = ty.sty { ty_params.insert(p.idx as usize); found = true; } } if !found { return Err(Unimplemented); } // Replace type parameters used in unsizing with // TyError and ensure they do not affect any other fields. // This could be checked after type collection for any struct // with a potentially unsized trailing field. let params = substs_a.iter().enumerate().map(|(i, &k)| { if ty_params.contains(i) { Kind::from(tcx.types.err) } else { k } }); let substs = tcx.mk_substs(params); for &ty in fields.split_last().unwrap().1 { if ty.subst(tcx, substs).references_error() { return Err(Unimplemented); } } // Extract Field<T> and Field<U> from Struct<T> and Struct<U>. let inner_source = field.subst(tcx, substs_a); let inner_target = field.subst(tcx, substs_b); // Check that the source struct with the target's // unsized parameters is equal to the target. let params = substs_a.iter().enumerate().map(|(i, &k)| { if ty_params.contains(i) { substs_b.type_at(i).into() } else { k } }); let new_struct = tcx.mk_adt(def, tcx.mk_substs(params)); let InferOk { obligations, .. } = self.infcx.at(&obligation.cause, obligation.param_env) .eq(target, new_struct) .map_err(|_| Unimplemented)?; nested.extend(obligations); // Construct the nested Field<T>: Unsize<Field<U>> predicate. nested.push(tcx.predicate_for_trait_def( obligation.param_env, obligation.cause.clone(), obligation.predicate.def_id(), obligation.recursion_depth + 1, inner_source, &[inner_target])); } // (.., T) -> (.., U). (&ty::TyTuple(tys_a), &ty::TyTuple(tys_b)) => { assert_eq!(tys_a.len(), tys_b.len()); // The last field of the tuple has to exist. let (a_last, a_mid) = if let Some(x) = tys_a.split_last() { x } else { return Err(Unimplemented); }; let b_last = tys_b.last().unwrap(); // Check that the source tuple with the target's // last element is equal to the target. let new_tuple = tcx.mk_tup(a_mid.iter().chain(Some(b_last))); let InferOk { obligations, .. } = self.infcx.at(&obligation.cause, obligation.param_env) .eq(target, new_tuple) .map_err(|_| Unimplemented)?; nested.extend(obligations); // Construct the nested T: Unsize<U> predicate. nested.push(tcx.predicate_for_trait_def( obligation.param_env, obligation.cause.clone(), obligation.predicate.def_id(), obligation.recursion_depth + 1, a_last, &[b_last])); } _ => bug!() }; Ok(VtableBuiltinData { nested: nested }) } /////////////////////////////////////////////////////////////////////////// // Matching // // Matching is a common path used for both evaluation and // confirmation. It basically unifies types that appear in impls // and traits. This does affect the surrounding environment; // therefore, when used during evaluation, match routines must be // run inside of a `probe()` so that their side-effects are // contained. fn rematch_impl(&mut self, impl_def_id: DefId, obligation: &TraitObligation<'tcx>, snapshot: &infer::CombinedSnapshot<'cx, 'tcx>) -> (Normalized<'tcx, &'tcx Substs<'tcx>>, infer::SkolemizationMap<'tcx>) { match self.match_impl(impl_def_id, obligation, snapshot) { Ok((substs, skol_map)) => (substs, skol_map), Err(()) => { bug!("Impl {:?} was matchable against {:?} but now is not", impl_def_id, obligation); } } } fn match_impl(&mut self, impl_def_id: DefId, obligation: &TraitObligation<'tcx>, snapshot: &infer::CombinedSnapshot<'cx, 'tcx>) -> Result<(Normalized<'tcx, &'tcx Substs<'tcx>>, infer::SkolemizationMap<'tcx>), ()> { let impl_trait_ref = self.tcx().impl_trait_ref(impl_def_id).unwrap(); // Before we create the substitutions and everything, first // consider a "quick reject". This avoids creating more types // and so forth that we need to. if self.fast_reject_trait_refs(obligation, &impl_trait_ref) { return Err(()); } let (skol_obligation, skol_map) = self.infcx().skolemize_late_bound_regions( &obligation.predicate, snapshot); let skol_obligation_trait_ref = skol_obligation.trait_ref; let impl_substs = self.infcx.fresh_substs_for_item(obligation.param_env.universe, obligation.cause.span, impl_def_id); let impl_trait_ref = impl_trait_ref.subst(self.tcx(), impl_substs); let Normalized { value: impl_trait_ref, obligations: mut nested_obligations } = project::normalize_with_depth(self, obligation.param_env, obligation.cause.clone(), obligation.recursion_depth + 1, &impl_trait_ref); debug!("match_impl(impl_def_id={:?}, obligation={:?}, \ impl_trait_ref={:?}, skol_obligation_trait_ref={:?})", impl_def_id, obligation, impl_trait_ref, skol_obligation_trait_ref); let InferOk { obligations, .. } = self.infcx.at(&obligation.cause, obligation.param_env) .eq(skol_obligation_trait_ref, impl_trait_ref) .map_err(|e| { debug!("match_impl: failed eq_trait_refs due to `{}`", e); () })?; nested_obligations.extend(obligations); if let Err(e) = self.infcx.leak_check(false, obligation.cause.span, &skol_map, snapshot) { debug!("match_impl: failed leak check due to `{}`", e); return Err(()); } debug!("match_impl: success impl_substs={:?}", impl_substs); Ok((Normalized { value: impl_substs, obligations: nested_obligations }, skol_map)) } fn fast_reject_trait_refs(&mut self, obligation: &TraitObligation, impl_trait_ref: &ty::TraitRef) -> bool { // We can avoid creating type variables and doing the full // substitution if we find that any of the input types, when // simplified, do not match. obligation.predicate.skip_binder().input_types() .zip(impl_trait_ref.input_types()) .any(|(obligation_ty, impl_ty)| { let simplified_obligation_ty = fast_reject::simplify_type(self.tcx(), obligation_ty, true); let simplified_impl_ty = fast_reject::simplify_type(self.tcx(), impl_ty, false); simplified_obligation_ty.is_some() && simplified_impl_ty.is_some() && simplified_obligation_ty != simplified_impl_ty }) } /// Normalize `where_clause_trait_ref` and try to match it against /// `obligation`. If successful, return any predicates that /// result from the normalization. Normalization is necessary /// because where-clauses are stored in the parameter environment /// unnormalized. fn match_where_clause_trait_ref(&mut self, obligation: &TraitObligation<'tcx>, where_clause_trait_ref: ty::PolyTraitRef<'tcx>) -> Result<Vec<PredicateObligation<'tcx>>,()> { self.match_poly_trait_ref(obligation, where_clause_trait_ref) } /// Returns `Ok` if `poly_trait_ref` being true implies that the /// obligation is satisfied. fn match_poly_trait_ref(&mut self, obligation: &TraitObligation<'tcx>, poly_trait_ref: ty::PolyTraitRef<'tcx>) -> Result<Vec<PredicateObligation<'tcx>>,()> { debug!("match_poly_trait_ref: obligation={:?} poly_trait_ref={:?}", obligation, poly_trait_ref); self.infcx.at(&obligation.cause, obligation.param_env) .sup(obligation.predicate.to_poly_trait_ref(), poly_trait_ref) .map(|InferOk { obligations, .. }| obligations) .map_err(|_| ()) } /////////////////////////////////////////////////////////////////////////// // Miscellany fn match_fresh_trait_refs(&self, previous: &ty::PolyTraitRef<'tcx>, current: &ty::PolyTraitRef<'tcx>) -> bool { let mut matcher = ty::_match::Match::new(self.tcx()); matcher.relate(previous, current).is_ok() } fn push_stack<'o,'s:'o>(&mut self, previous_stack: TraitObligationStackList<'s, 'tcx>, obligation: &'o TraitObligation<'tcx>) -> TraitObligationStack<'o, 'tcx> { let fresh_trait_ref = obligation.predicate.to_poly_trait_ref().fold_with(&mut self.freshener); TraitObligationStack { obligation, fresh_trait_ref, previous: previous_stack, } } fn closure_trait_ref_unnormalized(&mut self, obligation: &TraitObligation<'tcx>, closure_def_id: DefId, substs: ty::ClosureSubsts<'tcx>) -> ty::PolyTraitRef<'tcx> { let closure_type = self.infcx.closure_sig(closure_def_id, substs); let ty::Binder((trait_ref, _)) = self.tcx().closure_trait_ref_and_return_type(obligation.predicate.def_id(), obligation.predicate.0.self_ty(), // (1) closure_type, util::TupleArgumentsFlag::No); // (1) Feels icky to skip the binder here, but OTOH we know // that the self-type is an unboxed closure type and hence is // in fact unparameterized (or at least does not reference any // regions bound in the obligation). Still probably some // refactoring could make this nicer. ty::Binder(trait_ref) } fn generator_trait_ref_unnormalized(&mut self, obligation: &TraitObligation<'tcx>, closure_def_id: DefId, substs: ty::ClosureSubsts<'tcx>) -> ty::PolyTraitRef<'tcx> { let gen_sig = substs.generator_poly_sig(closure_def_id, self.tcx()); let ty::Binder((trait_ref, ..)) = self.tcx().generator_trait_ref_and_outputs(obligation.predicate.def_id(), obligation.predicate.0.self_ty(), // (1) gen_sig); // (1) Feels icky to skip the binder here, but OTOH we know // that the self-type is an generator type and hence is // in fact unparameterized (or at least does not reference any // regions bound in the obligation). Still probably some // refactoring could make this nicer. ty::Binder(trait_ref) } /// Returns the obligations that are implied by instantiating an /// impl or trait. The obligations are substituted and fully /// normalized. This is used when confirming an impl or default /// impl. fn impl_or_trait_obligations(&mut self, cause: ObligationCause<'tcx>, recursion_depth: usize, param_env: ty::ParamEnv<'tcx>, def_id: DefId, // of impl or trait substs: &Substs<'tcx>, // for impl or trait skol_map: infer::SkolemizationMap<'tcx>, snapshot: &infer::CombinedSnapshot<'cx, 'tcx>) -> Vec<PredicateObligation<'tcx>> { debug!("impl_or_trait_obligations(def_id={:?})", def_id); let tcx = self.tcx(); // To allow for one-pass evaluation of the nested obligation, // each predicate must be preceded by the obligations required // to normalize it. // for example, if we have: // impl<U: Iterator, V: Iterator<Item=U>> Foo for V where U::Item: Copy // the impl will have the following predicates: // <V as Iterator>::Item = U, // U: Iterator, U: Sized, // V: Iterator, V: Sized, // <U as Iterator>::Item: Copy // When we substitute, say, `V => IntoIter<u32>, U => $0`, the last // obligation will normalize to `<$0 as Iterator>::Item = $1` and // `$1: Copy`, so we must ensure the obligations are emitted in // that order. let predicates = tcx.predicates_of(def_id); assert_eq!(predicates.parent, None); let mut predicates: Vec<_> = predicates.predicates.iter().flat_map(|predicate| { let predicate = normalize_with_depth(self, param_env, cause.clone(), recursion_depth, &predicate.subst(tcx, substs)); predicate.obligations.into_iter().chain( Some(Obligation { cause: cause.clone(), recursion_depth, param_env, predicate: predicate.value })) }).collect(); // We are performing deduplication here to avoid exponential blowups // (#38528) from happening, but the real cause of the duplication is // unknown. What we know is that the deduplication avoids exponential // amount of predicates being propogated when processing deeply nested // types. let mut seen = FxHashSet(); predicates.retain(|i| seen.insert(i.clone())); self.infcx().plug_leaks(skol_map, snapshot, predicates) } } impl<'tcx> TraitObligation<'tcx> { #[allow(unused_comparisons)] pub fn derived_cause(&self, variant: fn(DerivedObligationCause<'tcx>) -> ObligationCauseCode<'tcx>) -> ObligationCause<'tcx> { /*! * Creates a cause for obligations that are derived from * `obligation` by a recursive search (e.g., for a builtin * bound, or eventually a `auto trait Foo`). If `obligation` * is itself a derived obligation, this is just a clone, but * otherwise we create a "derived obligation" cause so as to * keep track of the original root obligation for error * reporting. */ let obligation = self; // NOTE(flaper87): As of now, it keeps track of the whole error // chain. Ideally, we should have a way to configure this either // by using -Z verbose or just a CLI argument. if obligation.recursion_depth >= 0 { let derived_cause = DerivedObligationCause { parent_trait_ref: obligation.predicate.to_poly_trait_ref(), parent_code: Rc::new(obligation.cause.code.clone()) }; let derived_code = variant(derived_cause); ObligationCause::new(obligation.cause.span, obligation.cause.body_id, derived_code) } else { obligation.cause.clone() } } } impl<'tcx> SelectionCache<'tcx> { pub fn new() -> SelectionCache<'tcx> { SelectionCache { hashmap: RefCell::new(FxHashMap()) } } pub fn clear(&self) { *self.hashmap.borrow_mut() = FxHashMap() } } impl<'tcx> EvaluationCache<'tcx> { pub fn new() -> EvaluationCache<'tcx> { EvaluationCache { hashmap: RefCell::new(FxHashMap()) } } pub fn clear(&self) { *self.hashmap.borrow_mut() = FxHashMap() } } impl<'o,'tcx> TraitObligationStack<'o,'tcx> { fn list(&'o self) -> TraitObligationStackList<'o,'tcx> { TraitObligationStackList::with(self) } fn iter(&'o self) -> TraitObligationStackList<'o,'tcx> { self.list() } } #[derive(Copy, Clone)] struct TraitObligationStackList<'o,'tcx:'o> { head: Option<&'o TraitObligationStack<'o,'tcx>> } impl<'o,'tcx> TraitObligationStackList<'o,'tcx> { fn empty() -> TraitObligationStackList<'o,'tcx> { TraitObligationStackList { head: None } } fn with(r: &'o TraitObligationStack<'o,'tcx>) -> TraitObligationStackList<'o,'tcx> { TraitObligationStackList { head: Some(r) } } } impl<'o,'tcx> Iterator for TraitObligationStackList<'o,'tcx>{ type Item = &'o TraitObligationStack<'o,'tcx>; fn next(&mut self) -> Option<&'o TraitObligationStack<'o,'tcx>> { match self.head { Some(o) => { *self = o.previous; Some(o) } None => None } } } impl<'o,'tcx> fmt::Debug for TraitObligationStack<'o,'tcx> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "TraitObligationStack({:?})", self.obligation) } } #[derive(Clone)] pub struct WithDepNode<T> { dep_node: DepNodeIndex, cached_value: T } impl<T: Clone> WithDepNode<T> { pub fn new(dep_node: DepNodeIndex, cached_value: T) -> Self { WithDepNode { dep_node, cached_value } } pub fn get(&self, tcx: TyCtxt) -> T { tcx.dep_graph.read_index(self.dep_node); self.cached_value.clone() } } Rename variables in rustc’s SelectionContext::copy_clone_conditions // Copyright 2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. //! See [rustc guide] for more info on how this works. //! //! [rustc guide]: https://rust-lang-nursery.github.io/rustc-guide/trait-resolution.html#selection use self::SelectionCandidate::*; use self::EvaluationResult::*; use super::coherence::{self, Conflict}; use super::DerivedObligationCause; use super::IntercrateMode; use super::project; use super::project::{normalize_with_depth, Normalized, ProjectionCacheKey}; use super::{PredicateObligation, TraitObligation, ObligationCause}; use super::{ObligationCauseCode, BuiltinDerivedObligation, ImplDerivedObligation}; use super::{SelectionError, Unimplemented, OutputTypeParameterMismatch}; use super::{ObjectCastObligation, Obligation}; use super::TraitNotObjectSafe; use super::Selection; use super::SelectionResult; use super::{VtableBuiltin, VtableImpl, VtableParam, VtableClosure, VtableGenerator, VtableFnPointer, VtableObject, VtableAutoImpl}; use super::{VtableImplData, VtableObjectData, VtableBuiltinData, VtableGeneratorData, VtableClosureData, VtableAutoImplData, VtableFnPointerData}; use super::util; use dep_graph::{DepNodeIndex, DepKind}; use hir::def_id::DefId; use infer; use infer::{InferCtxt, InferOk, TypeFreshener}; use ty::subst::{Kind, Subst, Substs}; use ty::{self, ToPredicate, ToPolyTraitRef, Ty, TyCtxt, TypeFoldable}; use ty::fast_reject; use ty::relate::TypeRelation; use middle::lang_items; use mir::interpret::{GlobalId}; use rustc_data_structures::bitvec::BitVector; use std::iter; use std::cell::RefCell; use std::cmp; use std::fmt; use std::mem; use std::rc::Rc; use syntax::abi::Abi; use hir; use util::nodemap::{FxHashMap, FxHashSet}; pub struct SelectionContext<'cx, 'gcx: 'cx+'tcx, 'tcx: 'cx> { infcx: &'cx InferCtxt<'cx, 'gcx, 'tcx>, /// Freshener used specifically for skolemizing entries on the /// obligation stack. This ensures that all entries on the stack /// at one time will have the same set of skolemized entries, /// which is important for checking for trait bounds that /// recursively require themselves. freshener: TypeFreshener<'cx, 'gcx, 'tcx>, /// If true, indicates that the evaluation should be conservative /// and consider the possibility of types outside this crate. /// This comes up primarily when resolving ambiguity. Imagine /// there is some trait reference `$0 : Bar` where `$0` is an /// inference variable. If `intercrate` is true, then we can never /// say for sure that this reference is not implemented, even if /// there are *no impls at all for `Bar`*, because `$0` could be /// bound to some type that in a downstream crate that implements /// `Bar`. This is the suitable mode for coherence. Elsewhere, /// though, we set this to false, because we are only interested /// in types that the user could actually have written --- in /// other words, we consider `$0 : Bar` to be unimplemented if /// there is no type that the user could *actually name* that /// would satisfy it. This avoids crippling inference, basically. intercrate: Option<IntercrateMode>, intercrate_ambiguity_causes: Option<Vec<IntercrateAmbiguityCause>>, /// Controls whether or not to filter out negative impls when selecting. /// This is used in librustdoc to distinguish between the lack of an impl /// and a negative impl allow_negative_impls: bool } #[derive(Clone, Debug)] pub enum IntercrateAmbiguityCause { DownstreamCrate { trait_desc: String, self_desc: Option<String>, }, UpstreamCrateUpdate { trait_desc: String, self_desc: Option<String>, }, } impl IntercrateAmbiguityCause { /// Emits notes when the overlap is caused by complex intercrate ambiguities. /// See #23980 for details. pub fn add_intercrate_ambiguity_hint<'a, 'tcx>(&self, err: &mut ::errors::DiagnosticBuilder) { err.note(&self.intercrate_ambiguity_hint()); } pub fn intercrate_ambiguity_hint(&self) -> String { match self { &IntercrateAmbiguityCause::DownstreamCrate { ref trait_desc, ref self_desc } => { let self_desc = if let &Some(ref ty) = self_desc { format!(" for type `{}`", ty) } else { "".to_string() }; format!("downstream crates may implement trait `{}`{}", trait_desc, self_desc) } &IntercrateAmbiguityCause::UpstreamCrateUpdate { ref trait_desc, ref self_desc } => { let self_desc = if let &Some(ref ty) = self_desc { format!(" for type `{}`", ty) } else { "".to_string() }; format!("upstream crates may add new impl of trait `{}`{} \ in future versions", trait_desc, self_desc) } } } } // A stack that walks back up the stack frame. struct TraitObligationStack<'prev, 'tcx: 'prev> { obligation: &'prev TraitObligation<'tcx>, /// Trait ref from `obligation` but skolemized with the /// selection-context's freshener. Used to check for recursion. fresh_trait_ref: ty::PolyTraitRef<'tcx>, previous: TraitObligationStackList<'prev, 'tcx>, } #[derive(Clone)] pub struct SelectionCache<'tcx> { hashmap: RefCell<FxHashMap<ty::TraitRef<'tcx>, WithDepNode<SelectionResult<'tcx, SelectionCandidate<'tcx>>>>>, } /// The selection process begins by considering all impls, where /// clauses, and so forth that might resolve an obligation. Sometimes /// we'll be able to say definitively that (e.g.) an impl does not /// apply to the obligation: perhaps it is defined for `usize` but the /// obligation is for `int`. In that case, we drop the impl out of the /// list. But the other cases are considered *candidates*. /// /// For selection to succeed, there must be exactly one matching /// candidate. If the obligation is fully known, this is guaranteed /// by coherence. However, if the obligation contains type parameters /// or variables, there may be multiple such impls. /// /// It is not a real problem if multiple matching impls exist because /// of type variables - it just means the obligation isn't sufficiently /// elaborated. In that case we report an ambiguity, and the caller can /// try again after more type information has been gathered or report a /// "type annotations required" error. /// /// However, with type parameters, this can be a real problem - type /// parameters don't unify with regular types, but they *can* unify /// with variables from blanket impls, and (unless we know its bounds /// will always be satisfied) picking the blanket impl will be wrong /// for at least *some* substitutions. To make this concrete, if we have /// /// trait AsDebug { type Out : fmt::Debug; fn debug(self) -> Self::Out; } /// impl<T: fmt::Debug> AsDebug for T { /// type Out = T; /// fn debug(self) -> fmt::Debug { self } /// } /// fn foo<T: AsDebug>(t: T) { println!("{:?}", <T as AsDebug>::debug(t)); } /// /// we can't just use the impl to resolve the <T as AsDebug> obligation /// - a type from another crate (that doesn't implement fmt::Debug) could /// implement AsDebug. /// /// Because where-clauses match the type exactly, multiple clauses can /// only match if there are unresolved variables, and we can mostly just /// report this ambiguity in that case. This is still a problem - we can't /// *do anything* with ambiguities that involve only regions. This is issue /// #21974. /// /// If a single where-clause matches and there are no inference /// variables left, then it definitely matches and we can just select /// it. /// /// In fact, we even select the where-clause when the obligation contains /// inference variables. The can lead to inference making "leaps of logic", /// for example in this situation: /// /// pub trait Foo<T> { fn foo(&self) -> T; } /// impl<T> Foo<()> for T { fn foo(&self) { } } /// impl Foo<bool> for bool { fn foo(&self) -> bool { *self } } /// /// pub fn foo<T>(t: T) where T: Foo<bool> { /// println!("{:?}", <T as Foo<_>>::foo(&t)); /// } /// fn main() { foo(false); } /// /// Here the obligation <T as Foo<$0>> can be matched by both the blanket /// impl and the where-clause. We select the where-clause and unify $0=bool, /// so the program prints "false". However, if the where-clause is omitted, /// the blanket impl is selected, we unify $0=(), and the program prints /// "()". /// /// Exactly the same issues apply to projection and object candidates, except /// that we can have both a projection candidate and a where-clause candidate /// for the same obligation. In that case either would do (except that /// different "leaps of logic" would occur if inference variables are /// present), and we just pick the where-clause. This is, for example, /// required for associated types to work in default impls, as the bounds /// are visible both as projection bounds and as where-clauses from the /// parameter environment. #[derive(PartialEq,Eq,Debug,Clone)] enum SelectionCandidate<'tcx> { BuiltinCandidate { has_nested: bool }, ParamCandidate(ty::PolyTraitRef<'tcx>), ImplCandidate(DefId), AutoImplCandidate(DefId), /// This is a trait matching with a projected type as `Self`, and /// we found an applicable bound in the trait definition. ProjectionCandidate, /// Implementation of a `Fn`-family trait by one of the anonymous types /// generated for a `||` expression. ClosureCandidate, /// Implementation of a `Generator` trait by one of the anonymous types /// generated for a generator. GeneratorCandidate, /// Implementation of a `Fn`-family trait by one of the anonymous /// types generated for a fn pointer type (e.g., `fn(int)->int`) FnPointerCandidate, ObjectCandidate, BuiltinObjectCandidate, BuiltinUnsizeCandidate, } impl<'a, 'tcx> ty::Lift<'tcx> for SelectionCandidate<'a> { type Lifted = SelectionCandidate<'tcx>; fn lift_to_tcx<'b, 'gcx>(&self, tcx: TyCtxt<'b, 'gcx, 'tcx>) -> Option<Self::Lifted> { Some(match *self { BuiltinCandidate { has_nested } => { BuiltinCandidate { has_nested, } } ImplCandidate(def_id) => ImplCandidate(def_id), AutoImplCandidate(def_id) => AutoImplCandidate(def_id), ProjectionCandidate => ProjectionCandidate, FnPointerCandidate => FnPointerCandidate, ObjectCandidate => ObjectCandidate, BuiltinObjectCandidate => BuiltinObjectCandidate, BuiltinUnsizeCandidate => BuiltinUnsizeCandidate, ClosureCandidate => ClosureCandidate, GeneratorCandidate => GeneratorCandidate, ParamCandidate(ref trait_ref) => { return tcx.lift(trait_ref).map(ParamCandidate); } }) } } struct SelectionCandidateSet<'tcx> { // a list of candidates that definitely apply to the current // obligation (meaning: types unify). vec: Vec<SelectionCandidate<'tcx>>, // if this is true, then there were candidates that might or might // not have applied, but we couldn't tell. This occurs when some // of the input types are type variables, in which case there are // various "builtin" rules that might or might not trigger. ambiguous: bool, } #[derive(PartialEq,Eq,Debug,Clone)] struct EvaluatedCandidate<'tcx> { candidate: SelectionCandidate<'tcx>, evaluation: EvaluationResult, } /// When does the builtin impl for `T: Trait` apply? enum BuiltinImplConditions<'tcx> { /// The impl is conditional on T1,T2,.. : Trait Where(ty::Binder<Vec<Ty<'tcx>>>), /// There is no built-in impl. There may be some other /// candidate (a where-clause or user-defined impl). None, /// There is *no* impl for this, builtin or not. Ignore /// all where-clauses. Never, /// It is unknown whether there is an impl. Ambiguous } #[derive(Copy, Clone, Debug, PartialOrd, Ord, PartialEq, Eq)] /// The result of trait evaluation. The order is important /// here as the evaluation of a list is the maximum of the /// evaluations. /// /// The evaluation results are ordered: /// - `EvaluatedToOk` implies `EvaluatedToAmbig` implies `EvaluatedToUnknown` /// - `EvaluatedToErr` implies `EvaluatedToRecur` /// - the "union" of evaluation results is equal to their maximum - /// all the "potential success" candidates can potentially succeed, /// so they are no-ops when unioned with a definite error, and within /// the categories it's easy to see that the unions are correct. enum EvaluationResult { /// Evaluation successful EvaluatedToOk, /// Evaluation is known to be ambiguous - it *might* hold for some /// assignment of inference variables, but it might not. /// /// While this has the same meaning as `EvaluatedToUnknown` - we can't /// know whether this obligation holds or not - it is the result we /// would get with an empty stack, and therefore is cacheable. EvaluatedToAmbig, /// Evaluation failed because of recursion involving inference /// variables. We are somewhat imprecise there, so we don't actually /// know the real result. /// /// This can't be trivially cached for the same reason as `EvaluatedToRecur`. EvaluatedToUnknown, /// Evaluation failed because we encountered an obligation we are already /// trying to prove on this branch. /// /// We know this branch can't be a part of a minimal proof-tree for /// the "root" of our cycle, because then we could cut out the recursion /// and maintain a valid proof tree. However, this does not mean /// that all the obligations on this branch do not hold - it's possible /// that we entered this branch "speculatively", and that there /// might be some other way to prove this obligation that does not /// go through this cycle - so we can't cache this as a failure. /// /// For example, suppose we have this: /// /// ```rust,ignore (pseudo-Rust) /// pub trait Trait { fn xyz(); } /// // This impl is "useless", but we can still have /// // an `impl Trait for SomeUnsizedType` somewhere. /// impl<T: Trait + Sized> Trait for T { fn xyz() {} } /// /// pub fn foo<T: Trait + ?Sized>() { /// <T as Trait>::xyz(); /// } /// ``` /// /// When checking `foo`, we have to prove `T: Trait`. This basically /// translates into this: /// /// (T: Trait + Sized →_\impl T: Trait), T: Trait ⊢ T: Trait /// /// When we try to prove it, we first go the first option, which /// recurses. This shows us that the impl is "useless" - it won't /// tell us that `T: Trait` unless it already implemented `Trait` /// by some other means. However, that does not prevent `T: Trait` /// does not hold, because of the bound (which can indeed be satisfied /// by `SomeUnsizedType` from another crate). /// /// FIXME: when an `EvaluatedToRecur` goes past its parent root, we /// ought to convert it to an `EvaluatedToErr`, because we know /// there definitely isn't a proof tree for that obligation. Not /// doing so is still sound - there isn't any proof tree, so the /// branch still can't be a part of a minimal one - but does not /// re-enable caching. EvaluatedToRecur, /// Evaluation failed EvaluatedToErr, } impl EvaluationResult { fn may_apply(self) -> bool { match self { EvaluatedToOk | EvaluatedToAmbig | EvaluatedToUnknown => true, EvaluatedToErr | EvaluatedToRecur => false } } fn is_stack_dependent(self) -> bool { match self { EvaluatedToUnknown | EvaluatedToRecur => true, EvaluatedToOk | EvaluatedToAmbig | EvaluatedToErr => false, } } } #[derive(Clone)] pub struct EvaluationCache<'tcx> { hashmap: RefCell<FxHashMap<ty::PolyTraitRef<'tcx>, WithDepNode<EvaluationResult>>> } impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { pub fn new(infcx: &'cx InferCtxt<'cx, 'gcx, 'tcx>) -> SelectionContext<'cx, 'gcx, 'tcx> { SelectionContext { infcx, freshener: infcx.freshener(), intercrate: None, intercrate_ambiguity_causes: None, allow_negative_impls: false, } } pub fn intercrate(infcx: &'cx InferCtxt<'cx, 'gcx, 'tcx>, mode: IntercrateMode) -> SelectionContext<'cx, 'gcx, 'tcx> { debug!("intercrate({:?})", mode); SelectionContext { infcx, freshener: infcx.freshener(), intercrate: Some(mode), intercrate_ambiguity_causes: None, allow_negative_impls: false, } } pub fn with_negative(infcx: &'cx InferCtxt<'cx, 'gcx, 'tcx>, allow_negative_impls: bool) -> SelectionContext<'cx, 'gcx, 'tcx> { debug!("with_negative({:?})", allow_negative_impls); SelectionContext { infcx, freshener: infcx.freshener(), intercrate: None, intercrate_ambiguity_causes: None, allow_negative_impls, } } /// Enables tracking of intercrate ambiguity causes. These are /// used in coherence to give improved diagnostics. We don't do /// this until we detect a coherence error because it can lead to /// false overflow results (#47139) and because it costs /// computation time. pub fn enable_tracking_intercrate_ambiguity_causes(&mut self) { assert!(self.intercrate.is_some()); assert!(self.intercrate_ambiguity_causes.is_none()); self.intercrate_ambiguity_causes = Some(vec![]); debug!("selcx: enable_tracking_intercrate_ambiguity_causes"); } /// Gets the intercrate ambiguity causes collected since tracking /// was enabled and disables tracking at the same time. If /// tracking is not enabled, just returns an empty vector. pub fn take_intercrate_ambiguity_causes(&mut self) -> Vec<IntercrateAmbiguityCause> { assert!(self.intercrate.is_some()); self.intercrate_ambiguity_causes.take().unwrap_or(vec![]) } pub fn infcx(&self) -> &'cx InferCtxt<'cx, 'gcx, 'tcx> { self.infcx } pub fn tcx(&self) -> TyCtxt<'cx, 'gcx, 'tcx> { self.infcx.tcx } pub fn closure_typer(&self) -> &'cx InferCtxt<'cx, 'gcx, 'tcx> { self.infcx } /// Wraps the inference context's in_snapshot s.t. snapshot handling is only from the selection /// context's self. fn in_snapshot<R, F>(&mut self, f: F) -> R where F: FnOnce(&mut Self, &infer::CombinedSnapshot<'cx, 'tcx>) -> R { self.infcx.in_snapshot(|snapshot| f(self, snapshot)) } /// Wraps a probe s.t. obligations collected during it are ignored and old obligations are /// retained. fn probe<R, F>(&mut self, f: F) -> R where F: FnOnce(&mut Self, &infer::CombinedSnapshot<'cx, 'tcx>) -> R { self.infcx.probe(|snapshot| f(self, snapshot)) } /// Wraps a commit_if_ok s.t. obligations collected during it are not returned in selection if /// the transaction fails and s.t. old obligations are retained. fn commit_if_ok<T, E, F>(&mut self, f: F) -> Result<T, E> where F: FnOnce(&mut Self, &infer::CombinedSnapshot) -> Result<T, E> { self.infcx.commit_if_ok(|snapshot| f(self, snapshot)) } /////////////////////////////////////////////////////////////////////////// // Selection // // The selection phase tries to identify *how* an obligation will // be resolved. For example, it will identify which impl or // parameter bound is to be used. The process can be inconclusive // if the self type in the obligation is not fully inferred. Selection // can result in an error in one of two ways: // // 1. If no applicable impl or parameter bound can be found. // 2. If the output type parameters in the obligation do not match // those specified by the impl/bound. For example, if the obligation // is `Vec<Foo>:Iterable<Bar>`, but the impl specifies // `impl<T> Iterable<T> for Vec<T>`, than an error would result. /// Attempts to satisfy the obligation. If successful, this will affect the surrounding /// type environment by performing unification. pub fn select(&mut self, obligation: &TraitObligation<'tcx>) -> SelectionResult<'tcx, Selection<'tcx>> { debug!("select({:?})", obligation); assert!(!obligation.predicate.has_escaping_regions()); let stack = self.push_stack(TraitObligationStackList::empty(), obligation); let ret = match self.candidate_from_obligation(&stack)? { None => None, Some(candidate) => Some(self.confirm_candidate(obligation, candidate)?) }; Ok(ret) } /////////////////////////////////////////////////////////////////////////// // EVALUATION // // Tests whether an obligation can be selected or whether an impl // can be applied to particular types. It skips the "confirmation" // step and hence completely ignores output type parameters. // // The result is "true" if the obligation *may* hold and "false" if // we can be sure it does not. /// Evaluates whether the obligation `obligation` can be satisfied (by any means). pub fn evaluate_obligation(&mut self, obligation: &PredicateObligation<'tcx>) -> bool { debug!("evaluate_obligation({:?})", obligation); self.probe(|this, _| { this.evaluate_predicate_recursively(TraitObligationStackList::empty(), obligation) .may_apply() }) } /// Evaluates whether the obligation `obligation` can be satisfied, /// and returns `false` if not certain. However, this is not entirely /// accurate if inference variables are involved. pub fn evaluate_obligation_conservatively(&mut self, obligation: &PredicateObligation<'tcx>) -> bool { debug!("evaluate_obligation_conservatively({:?})", obligation); self.probe(|this, _| { this.evaluate_predicate_recursively(TraitObligationStackList::empty(), obligation) == EvaluatedToOk }) } /// Evaluates the predicates in `predicates` recursively. Note that /// this applies projections in the predicates, and therefore /// is run within an inference probe. fn evaluate_predicates_recursively<'a,'o,I>(&mut self, stack: TraitObligationStackList<'o, 'tcx>, predicates: I) -> EvaluationResult where I : IntoIterator<Item=&'a PredicateObligation<'tcx>>, 'tcx:'a { let mut result = EvaluatedToOk; for obligation in predicates { let eval = self.evaluate_predicate_recursively(stack, obligation); debug!("evaluate_predicate_recursively({:?}) = {:?}", obligation, eval); if let EvaluatedToErr = eval { // fast-path - EvaluatedToErr is the top of the lattice, // so we don't need to look on the other predicates. return EvaluatedToErr; } else { result = cmp::max(result, eval); } } result } fn evaluate_predicate_recursively<'o>(&mut self, previous_stack: TraitObligationStackList<'o, 'tcx>, obligation: &PredicateObligation<'tcx>) -> EvaluationResult { debug!("evaluate_predicate_recursively({:?})", obligation); match obligation.predicate { ty::Predicate::Trait(ref t) => { assert!(!t.has_escaping_regions()); let obligation = obligation.with(t.clone()); self.evaluate_trait_predicate_recursively(previous_stack, obligation) } ty::Predicate::Subtype(ref p) => { // does this code ever run? match self.infcx.subtype_predicate(&obligation.cause, obligation.param_env, p) { Some(Ok(InferOk { obligations, .. })) => { self.evaluate_predicates_recursively(previous_stack, &obligations); EvaluatedToOk }, Some(Err(_)) => EvaluatedToErr, None => EvaluatedToAmbig, } } ty::Predicate::WellFormed(ty) => { match ty::wf::obligations(self.infcx, obligation.param_env, obligation.cause.body_id, ty, obligation.cause.span) { Some(obligations) => self.evaluate_predicates_recursively(previous_stack, obligations.iter()), None => EvaluatedToAmbig, } } ty::Predicate::TypeOutlives(..) | ty::Predicate::RegionOutlives(..) => { // we do not consider region relationships when // evaluating trait matches EvaluatedToOk } ty::Predicate::ObjectSafe(trait_def_id) => { if self.tcx().is_object_safe(trait_def_id) { EvaluatedToOk } else { EvaluatedToErr } } ty::Predicate::Projection(ref data) => { let project_obligation = obligation.with(data.clone()); match project::poly_project_and_unify_type(self, &project_obligation) { Ok(Some(subobligations)) => { let result = self.evaluate_predicates_recursively(previous_stack, subobligations.iter()); if let Some(key) = ProjectionCacheKey::from_poly_projection_predicate(self, data) { self.infcx.projection_cache.borrow_mut().complete(key); } result } Ok(None) => { EvaluatedToAmbig } Err(_) => { EvaluatedToErr } } } ty::Predicate::ClosureKind(closure_def_id, closure_substs, kind) => { match self.infcx.closure_kind(closure_def_id, closure_substs) { Some(closure_kind) => { if closure_kind.extends(kind) { EvaluatedToOk } else { EvaluatedToErr } } None => { EvaluatedToAmbig } } } ty::Predicate::ConstEvaluatable(def_id, substs) => { let tcx = self.tcx(); match tcx.lift_to_global(&(obligation.param_env, substs)) { Some((param_env, substs)) => { let instance = ty::Instance::resolve( tcx.global_tcx(), param_env, def_id, substs, ); if let Some(instance) = instance { let cid = GlobalId { instance, promoted: None }; match self.tcx().const_eval(param_env.and(cid)) { Ok(_) => EvaluatedToOk, Err(_) => EvaluatedToErr } } else { EvaluatedToErr } } None => { // Inference variables still left in param_env or substs. EvaluatedToAmbig } } } } } fn evaluate_trait_predicate_recursively<'o>(&mut self, previous_stack: TraitObligationStackList<'o, 'tcx>, mut obligation: TraitObligation<'tcx>) -> EvaluationResult { debug!("evaluate_trait_predicate_recursively({:?})", obligation); if !self.intercrate.is_some() && obligation.is_global() { // If a param env is consistent, global obligations do not depend on its particular // value in order to work, so we can clear out the param env and get better // caching. (If the current param env is inconsistent, we don't care what happens). debug!("evaluate_trait_predicate_recursively({:?}) - in global", obligation); obligation.param_env = obligation.param_env.without_caller_bounds(); } let stack = self.push_stack(previous_stack, &obligation); let fresh_trait_ref = stack.fresh_trait_ref; if let Some(result) = self.check_evaluation_cache(obligation.param_env, fresh_trait_ref) { debug!("CACHE HIT: EVAL({:?})={:?}", fresh_trait_ref, result); return result; } let (result, dep_node) = self.in_task(|this| this.evaluate_stack(&stack)); debug!("CACHE MISS: EVAL({:?})={:?}", fresh_trait_ref, result); self.insert_evaluation_cache(obligation.param_env, fresh_trait_ref, dep_node, result); result } fn evaluate_stack<'o>(&mut self, stack: &TraitObligationStack<'o, 'tcx>) -> EvaluationResult { // In intercrate mode, whenever any of the types are unbound, // there can always be an impl. Even if there are no impls in // this crate, perhaps the type would be unified with // something from another crate that does provide an impl. // // In intra mode, we must still be conservative. The reason is // that we want to avoid cycles. Imagine an impl like: // // impl<T:Eq> Eq for Vec<T> // // and a trait reference like `$0 : Eq` where `$0` is an // unbound variable. When we evaluate this trait-reference, we // will unify `$0` with `Vec<$1>` (for some fresh variable // `$1`), on the condition that `$1 : Eq`. We will then wind // up with many candidates (since that are other `Eq` impls // that apply) and try to winnow things down. This results in // a recursive evaluation that `$1 : Eq` -- as you can // imagine, this is just where we started. To avoid that, we // check for unbound variables and return an ambiguous (hence possible) // match if we've seen this trait before. // // This suffices to allow chains like `FnMut` implemented in // terms of `Fn` etc, but we could probably make this more // precise still. let unbound_input_types = stack.fresh_trait_ref.input_types().any(|ty| ty.is_fresh()); // this check was an imperfect workaround for a bug n the old // intercrate mode, it should be removed when that goes away. if unbound_input_types && self.intercrate == Some(IntercrateMode::Issue43355) { debug!("evaluate_stack({:?}) --> unbound argument, intercrate --> ambiguous", stack.fresh_trait_ref); // Heuristics: show the diagnostics when there are no candidates in crate. if self.intercrate_ambiguity_causes.is_some() { debug!("evaluate_stack: intercrate_ambiguity_causes is some"); if let Ok(candidate_set) = self.assemble_candidates(stack) { if !candidate_set.ambiguous && candidate_set.vec.is_empty() { let trait_ref = stack.obligation.predicate.skip_binder().trait_ref; let self_ty = trait_ref.self_ty(); let cause = IntercrateAmbiguityCause::DownstreamCrate { trait_desc: trait_ref.to_string(), self_desc: if self_ty.has_concrete_skeleton() { Some(self_ty.to_string()) } else { None }, }; debug!("evaluate_stack: pushing cause = {:?}", cause); self.intercrate_ambiguity_causes.as_mut().unwrap().push(cause); } } } return EvaluatedToAmbig; } if unbound_input_types && stack.iter().skip(1).any( |prev| stack.obligation.param_env == prev.obligation.param_env && self.match_fresh_trait_refs(&stack.fresh_trait_ref, &prev.fresh_trait_ref)) { debug!("evaluate_stack({:?}) --> unbound argument, recursive --> giving up", stack.fresh_trait_ref); return EvaluatedToUnknown; } // If there is any previous entry on the stack that precisely // matches this obligation, then we can assume that the // obligation is satisfied for now (still all other conditions // must be met of course). One obvious case this comes up is // marker traits like `Send`. Think of a linked list: // // struct List<T> { data: T, next: Option<Box<List<T>>> { // // `Box<List<T>>` will be `Send` if `T` is `Send` and // `Option<Box<List<T>>>` is `Send`, and in turn // `Option<Box<List<T>>>` is `Send` if `Box<List<T>>` is // `Send`. // // Note that we do this comparison using the `fresh_trait_ref` // fields. Because these have all been skolemized using // `self.freshener`, we can be sure that (a) this will not // affect the inferencer state and (b) that if we see two // skolemized types with the same index, they refer to the // same unbound type variable. if let Some(rec_index) = stack.iter() .skip(1) // skip top-most frame .position(|prev| stack.obligation.param_env == prev.obligation.param_env && stack.fresh_trait_ref == prev.fresh_trait_ref) { debug!("evaluate_stack({:?}) --> recursive", stack.fresh_trait_ref); let cycle = stack.iter().skip(1).take(rec_index+1); let cycle = cycle.map(|stack| ty::Predicate::Trait(stack.obligation.predicate)); if self.coinductive_match(cycle) { debug!("evaluate_stack({:?}) --> recursive, coinductive", stack.fresh_trait_ref); return EvaluatedToOk; } else { debug!("evaluate_stack({:?}) --> recursive, inductive", stack.fresh_trait_ref); return EvaluatedToRecur; } } match self.candidate_from_obligation(stack) { Ok(Some(c)) => self.evaluate_candidate(stack, &c), Ok(None) => EvaluatedToAmbig, Err(..) => EvaluatedToErr } } /// For defaulted traits, we use a co-inductive strategy to solve, so /// that recursion is ok. This routine returns true if the top of the /// stack (`cycle[0]`): /// /// - is a defaulted trait, and /// - it also appears in the backtrace at some position `X`; and, /// - all the predicates at positions `X..` between `X` an the top are /// also defaulted traits. pub fn coinductive_match<I>(&mut self, cycle: I) -> bool where I: Iterator<Item=ty::Predicate<'tcx>> { let mut cycle = cycle; cycle.all(|predicate| self.coinductive_predicate(predicate)) } fn coinductive_predicate(&self, predicate: ty::Predicate<'tcx>) -> bool { let result = match predicate { ty::Predicate::Trait(ref data) => { self.tcx().trait_is_auto(data.def_id()) } _ => { false } }; debug!("coinductive_predicate({:?}) = {:?}", predicate, result); result } /// Further evaluate `candidate` to decide whether all type parameters match and whether nested /// obligations are met. Returns true if `candidate` remains viable after this further /// scrutiny. fn evaluate_candidate<'o>(&mut self, stack: &TraitObligationStack<'o, 'tcx>, candidate: &SelectionCandidate<'tcx>) -> EvaluationResult { debug!("evaluate_candidate: depth={} candidate={:?}", stack.obligation.recursion_depth, candidate); let result = self.probe(|this, _| { let candidate = (*candidate).clone(); match this.confirm_candidate(stack.obligation, candidate) { Ok(selection) => { this.evaluate_predicates_recursively( stack.list(), selection.nested_obligations().iter()) } Err(..) => EvaluatedToErr } }); debug!("evaluate_candidate: depth={} result={:?}", stack.obligation.recursion_depth, result); result } fn check_evaluation_cache(&self, param_env: ty::ParamEnv<'tcx>, trait_ref: ty::PolyTraitRef<'tcx>) -> Option<EvaluationResult> { let tcx = self.tcx(); if self.can_use_global_caches(param_env) { let cache = tcx.evaluation_cache.hashmap.borrow(); if let Some(cached) = cache.get(&trait_ref) { return Some(cached.get(tcx)); } } self.infcx.evaluation_cache.hashmap .borrow() .get(&trait_ref) .map(|v| v.get(tcx)) } fn insert_evaluation_cache(&mut self, param_env: ty::ParamEnv<'tcx>, trait_ref: ty::PolyTraitRef<'tcx>, dep_node: DepNodeIndex, result: EvaluationResult) { // Avoid caching results that depend on more than just the trait-ref // - the stack can create recursion. if result.is_stack_dependent() { return; } if self.can_use_global_caches(param_env) { let mut cache = self.tcx().evaluation_cache.hashmap.borrow_mut(); if let Some(trait_ref) = self.tcx().lift_to_global(&trait_ref) { cache.insert(trait_ref, WithDepNode::new(dep_node, result)); return; } } self.infcx.evaluation_cache.hashmap .borrow_mut() .insert(trait_ref, WithDepNode::new(dep_node, result)); } /////////////////////////////////////////////////////////////////////////// // CANDIDATE ASSEMBLY // // The selection process begins by examining all in-scope impls, // caller obligations, and so forth and assembling a list of // candidates. See [rustc guide] for more details. // // [rustc guide]: // https://rust-lang-nursery.github.io/rustc-guide/trait-resolution.html#candidate-assembly fn candidate_from_obligation<'o>(&mut self, stack: &TraitObligationStack<'o, 'tcx>) -> SelectionResult<'tcx, SelectionCandidate<'tcx>> { // Watch out for overflow. This intentionally bypasses (and does // not update) the cache. let recursion_limit = self.infcx.tcx.sess.recursion_limit.get(); if stack.obligation.recursion_depth >= recursion_limit { self.infcx().report_overflow_error(&stack.obligation, true); } // Check the cache. Note that we skolemize the trait-ref // separately rather than using `stack.fresh_trait_ref` -- this // is because we want the unbound variables to be replaced // with fresh skolemized types starting from index 0. let cache_fresh_trait_pred = self.infcx.freshen(stack.obligation.predicate.clone()); debug!("candidate_from_obligation(cache_fresh_trait_pred={:?}, obligation={:?})", cache_fresh_trait_pred, stack); assert!(!stack.obligation.predicate.has_escaping_regions()); if let Some(c) = self.check_candidate_cache(stack.obligation.param_env, &cache_fresh_trait_pred) { debug!("CACHE HIT: SELECT({:?})={:?}", cache_fresh_trait_pred, c); return c; } // If no match, compute result and insert into cache. let (candidate, dep_node) = self.in_task(|this| { this.candidate_from_obligation_no_cache(stack) }); debug!("CACHE MISS: SELECT({:?})={:?}", cache_fresh_trait_pred, candidate); self.insert_candidate_cache(stack.obligation.param_env, cache_fresh_trait_pred, dep_node, candidate.clone()); candidate } fn in_task<OP, R>(&mut self, op: OP) -> (R, DepNodeIndex) where OP: FnOnce(&mut Self) -> R { let (result, dep_node) = self.tcx().dep_graph.with_anon_task(DepKind::TraitSelect, || { op(self) }); self.tcx().dep_graph.read_index(dep_node); (result, dep_node) } // Treat negative impls as unimplemented fn filter_negative_impls(&self, candidate: SelectionCandidate<'tcx>) -> SelectionResult<'tcx, SelectionCandidate<'tcx>> { if let ImplCandidate(def_id) = candidate { if !self.allow_negative_impls && self.tcx().impl_polarity(def_id) == hir::ImplPolarity::Negative { return Err(Unimplemented) } } Ok(Some(candidate)) } fn candidate_from_obligation_no_cache<'o>(&mut self, stack: &TraitObligationStack<'o, 'tcx>) -> SelectionResult<'tcx, SelectionCandidate<'tcx>> { if stack.obligation.predicate.references_error() { // If we encounter a `TyError`, we generally prefer the // most "optimistic" result in response -- that is, the // one least likely to report downstream errors. But // because this routine is shared by coherence and by // trait selection, there isn't an obvious "right" choice // here in that respect, so we opt to just return // ambiguity and let the upstream clients sort it out. return Ok(None); } match self.is_knowable(stack) { None => {} Some(conflict) => { debug!("coherence stage: not knowable"); if self.intercrate_ambiguity_causes.is_some() { debug!("evaluate_stack: intercrate_ambiguity_causes is some"); // Heuristics: show the diagnostics when there are no candidates in crate. let candidate_set = self.assemble_candidates(stack)?; if !candidate_set.ambiguous && candidate_set.vec.iter().all(|c| { !self.evaluate_candidate(stack, &c).may_apply() }) { let trait_ref = stack.obligation.predicate.skip_binder().trait_ref; let self_ty = trait_ref.self_ty(); let trait_desc = trait_ref.to_string(); let self_desc = if self_ty.has_concrete_skeleton() { Some(self_ty.to_string()) } else { None }; let cause = if let Conflict::Upstream = conflict { IntercrateAmbiguityCause::UpstreamCrateUpdate { trait_desc, self_desc } } else { IntercrateAmbiguityCause::DownstreamCrate { trait_desc, self_desc } }; debug!("evaluate_stack: pushing cause = {:?}", cause); self.intercrate_ambiguity_causes.as_mut().unwrap().push(cause); } } return Ok(None); } } let candidate_set = self.assemble_candidates(stack)?; if candidate_set.ambiguous { debug!("candidate set contains ambig"); return Ok(None); } let mut candidates = candidate_set.vec; debug!("assembled {} candidates for {:?}: {:?}", candidates.len(), stack, candidates); // At this point, we know that each of the entries in the // candidate set is *individually* applicable. Now we have to // figure out if they contain mutual incompatibilities. This // frequently arises if we have an unconstrained input type -- // for example, we are looking for $0:Eq where $0 is some // unconstrained type variable. In that case, we'll get a // candidate which assumes $0 == int, one that assumes $0 == // usize, etc. This spells an ambiguity. // If there is more than one candidate, first winnow them down // by considering extra conditions (nested obligations and so // forth). We don't winnow if there is exactly one // candidate. This is a relatively minor distinction but it // can lead to better inference and error-reporting. An // example would be if there was an impl: // // impl<T:Clone> Vec<T> { fn push_clone(...) { ... } } // // and we were to see some code `foo.push_clone()` where `boo` // is a `Vec<Bar>` and `Bar` does not implement `Clone`. If // we were to winnow, we'd wind up with zero candidates. // Instead, we select the right impl now but report `Bar does // not implement Clone`. if candidates.len() == 1 { return self.filter_negative_impls(candidates.pop().unwrap()); } // Winnow, but record the exact outcome of evaluation, which // is needed for specialization. let mut candidates: Vec<_> = candidates.into_iter().filter_map(|c| { let eval = self.evaluate_candidate(stack, &c); if eval.may_apply() { Some(EvaluatedCandidate { candidate: c, evaluation: eval, }) } else { None } }).collect(); // If there are STILL multiple candidate, we can further // reduce the list by dropping duplicates -- including // resolving specializations. if candidates.len() > 1 { let mut i = 0; while i < candidates.len() { let is_dup = (0..candidates.len()) .filter(|&j| i != j) .any(|j| self.candidate_should_be_dropped_in_favor_of(&candidates[i], &candidates[j])); if is_dup { debug!("Dropping candidate #{}/{}: {:?}", i, candidates.len(), candidates[i]); candidates.swap_remove(i); } else { debug!("Retaining candidate #{}/{}: {:?}", i, candidates.len(), candidates[i]); i += 1; // If there are *STILL* multiple candidates, give up // and report ambiguity. if i > 1 { debug!("multiple matches, ambig"); return Ok(None); } } } } // If there are *NO* candidates, then there are no impls -- // that we know of, anyway. Note that in the case where there // are unbound type variables within the obligation, it might // be the case that you could still satisfy the obligation // from another crate by instantiating the type variables with // a type from another crate that does have an impl. This case // is checked for in `evaluate_stack` (and hence users // who might care about this case, like coherence, should use // that function). if candidates.is_empty() { return Err(Unimplemented); } // Just one candidate left. self.filter_negative_impls(candidates.pop().unwrap().candidate) } fn is_knowable<'o>(&mut self, stack: &TraitObligationStack<'o, 'tcx>) -> Option<Conflict> { debug!("is_knowable(intercrate={:?})", self.intercrate); if !self.intercrate.is_some() { return None; } let obligation = &stack.obligation; let predicate = self.infcx().resolve_type_vars_if_possible(&obligation.predicate); // ok to skip binder because of the nature of the // trait-ref-is-knowable check, which does not care about // bound regions let trait_ref = predicate.skip_binder().trait_ref; let result = coherence::trait_ref_is_knowable(self.tcx(), trait_ref); if let (Some(Conflict::Downstream { used_to_be_broken: true }), Some(IntercrateMode::Issue43355)) = (result, self.intercrate) { debug!("is_knowable: IGNORING conflict to be bug-compatible with #43355"); None } else { result } } /// Returns true if the global caches can be used. /// Do note that if the type itself is not in the /// global tcx, the local caches will be used. fn can_use_global_caches(&self, param_env: ty::ParamEnv<'tcx>) -> bool { // If there are any where-clauses in scope, then we always use // a cache local to this particular scope. Otherwise, we // switch to a global cache. We used to try and draw // finer-grained distinctions, but that led to a serious of // annoying and weird bugs like #22019 and #18290. This simple // rule seems to be pretty clearly safe and also still retains // a very high hit rate (~95% when compiling rustc). if !param_env.caller_bounds.is_empty() { return false; } // Avoid using the master cache during coherence and just rely // on the local cache. This effectively disables caching // during coherence. It is really just a simplification to // avoid us having to fear that coherence results "pollute" // the master cache. Since coherence executes pretty quickly, // it's not worth going to more trouble to increase the // hit-rate I don't think. if self.intercrate.is_some() { return false; } // Otherwise, we can use the global cache. true } fn check_candidate_cache(&mut self, param_env: ty::ParamEnv<'tcx>, cache_fresh_trait_pred: &ty::PolyTraitPredicate<'tcx>) -> Option<SelectionResult<'tcx, SelectionCandidate<'tcx>>> { let tcx = self.tcx(); let trait_ref = &cache_fresh_trait_pred.0.trait_ref; if self.can_use_global_caches(param_env) { let cache = tcx.selection_cache.hashmap.borrow(); if let Some(cached) = cache.get(&trait_ref) { return Some(cached.get(tcx)); } } self.infcx.selection_cache.hashmap .borrow() .get(trait_ref) .map(|v| v.get(tcx)) } fn insert_candidate_cache(&mut self, param_env: ty::ParamEnv<'tcx>, cache_fresh_trait_pred: ty::PolyTraitPredicate<'tcx>, dep_node: DepNodeIndex, candidate: SelectionResult<'tcx, SelectionCandidate<'tcx>>) { let tcx = self.tcx(); let trait_ref = cache_fresh_trait_pred.0.trait_ref; if self.can_use_global_caches(param_env) { let mut cache = tcx.selection_cache.hashmap.borrow_mut(); if let Some(trait_ref) = tcx.lift_to_global(&trait_ref) { if let Some(candidate) = tcx.lift_to_global(&candidate) { cache.insert(trait_ref, WithDepNode::new(dep_node, candidate)); return; } } } self.infcx.selection_cache.hashmap .borrow_mut() .insert(trait_ref, WithDepNode::new(dep_node, candidate)); } fn assemble_candidates<'o>(&mut self, stack: &TraitObligationStack<'o, 'tcx>) -> Result<SelectionCandidateSet<'tcx>, SelectionError<'tcx>> { let TraitObligationStack { obligation, .. } = *stack; let ref obligation = Obligation { param_env: obligation.param_env, cause: obligation.cause.clone(), recursion_depth: obligation.recursion_depth, predicate: self.infcx().resolve_type_vars_if_possible(&obligation.predicate) }; if obligation.predicate.skip_binder().self_ty().is_ty_var() { // Self is a type variable (e.g. `_: AsRef<str>`). // // This is somewhat problematic, as the current scheme can't really // handle it turning to be a projection. This does end up as truly // ambiguous in most cases anyway. // // Take the fast path out - this also improves // performance by preventing assemble_candidates_from_impls from // matching every impl for this trait. return Ok(SelectionCandidateSet { vec: vec![], ambiguous: true }); } let mut candidates = SelectionCandidateSet { vec: Vec::new(), ambiguous: false }; // Other bounds. Consider both in-scope bounds from fn decl // and applicable impls. There is a certain set of precedence rules here. let def_id = obligation.predicate.def_id(); let lang_items = self.tcx().lang_items(); if lang_items.copy_trait() == Some(def_id) { debug!("obligation self ty is {:?}", obligation.predicate.0.self_ty()); // User-defined copy impls are permitted, but only for // structs and enums. self.assemble_candidates_from_impls(obligation, &mut candidates)?; // For other types, we'll use the builtin rules. let copy_conditions = self.copy_clone_conditions(obligation); self.assemble_builtin_bound_candidates(copy_conditions, &mut candidates)?; } else if lang_items.sized_trait() == Some(def_id) { // Sized is never implementable by end-users, it is // always automatically computed. let sized_conditions = self.sized_conditions(obligation); self.assemble_builtin_bound_candidates(sized_conditions, &mut candidates)?; } else if lang_items.unsize_trait() == Some(def_id) { self.assemble_candidates_for_unsizing(obligation, &mut candidates); } else { if lang_items.clone_trait() == Some(def_id) { // Same builtin conditions as `Copy`, i.e. every type which has builtin support // for `Copy` also has builtin support for `Clone`, + tuples and arrays of `Clone` // types have builtin support for `Clone`. let clone_conditions = self.copy_clone_conditions(obligation); self.assemble_builtin_bound_candidates(clone_conditions, &mut candidates)?; } self.assemble_generator_candidates(obligation, &mut candidates)?; self.assemble_closure_candidates(obligation, &mut candidates)?; self.assemble_fn_pointer_candidates(obligation, &mut candidates)?; self.assemble_candidates_from_impls(obligation, &mut candidates)?; self.assemble_candidates_from_object_ty(obligation, &mut candidates); } self.assemble_candidates_from_projected_tys(obligation, &mut candidates); self.assemble_candidates_from_caller_bounds(stack, &mut candidates)?; // Auto implementations have lower priority, so we only // consider triggering a default if there is no other impl that can apply. if candidates.vec.is_empty() { self.assemble_candidates_from_auto_impls(obligation, &mut candidates)?; } debug!("candidate list size: {}", candidates.vec.len()); Ok(candidates) } fn assemble_candidates_from_projected_tys(&mut self, obligation: &TraitObligation<'tcx>, candidates: &mut SelectionCandidateSet<'tcx>) { debug!("assemble_candidates_for_projected_tys({:?})", obligation); // before we go into the whole skolemization thing, just // quickly check if the self-type is a projection at all. match obligation.predicate.0.trait_ref.self_ty().sty { ty::TyProjection(_) | ty::TyAnon(..) => {} ty::TyInfer(ty::TyVar(_)) => { span_bug!(obligation.cause.span, "Self=_ should have been handled by assemble_candidates"); } _ => return } let result = self.probe(|this, snapshot| { this.match_projection_obligation_against_definition_bounds(obligation, snapshot) }); if result { candidates.vec.push(ProjectionCandidate); } } fn match_projection_obligation_against_definition_bounds( &mut self, obligation: &TraitObligation<'tcx>, snapshot: &infer::CombinedSnapshot<'cx, 'tcx>) -> bool { let poly_trait_predicate = self.infcx().resolve_type_vars_if_possible(&obligation.predicate); let (skol_trait_predicate, skol_map) = self.infcx().skolemize_late_bound_regions(&poly_trait_predicate, snapshot); debug!("match_projection_obligation_against_definition_bounds: \ skol_trait_predicate={:?} skol_map={:?}", skol_trait_predicate, skol_map); let (def_id, substs) = match skol_trait_predicate.trait_ref.self_ty().sty { ty::TyProjection(ref data) => (data.trait_ref(self.tcx()).def_id, data.substs), ty::TyAnon(def_id, substs) => (def_id, substs), _ => { span_bug!( obligation.cause.span, "match_projection_obligation_against_definition_bounds() called \ but self-ty not a projection: {:?}", skol_trait_predicate.trait_ref.self_ty()); } }; debug!("match_projection_obligation_against_definition_bounds: \ def_id={:?}, substs={:?}", def_id, substs); let predicates_of = self.tcx().predicates_of(def_id); let bounds = predicates_of.instantiate(self.tcx(), substs); debug!("match_projection_obligation_against_definition_bounds: \ bounds={:?}", bounds); let matching_bound = util::elaborate_predicates(self.tcx(), bounds.predicates) .filter_to_traits() .find( |bound| self.probe( |this, _| this.match_projection(obligation, bound.clone(), skol_trait_predicate.trait_ref.clone(), &skol_map, snapshot))); debug!("match_projection_obligation_against_definition_bounds: \ matching_bound={:?}", matching_bound); match matching_bound { None => false, Some(bound) => { // Repeat the successful match, if any, this time outside of a probe. let result = self.match_projection(obligation, bound, skol_trait_predicate.trait_ref.clone(), &skol_map, snapshot); self.infcx.pop_skolemized(skol_map, snapshot); assert!(result); true } } } fn match_projection(&mut self, obligation: &TraitObligation<'tcx>, trait_bound: ty::PolyTraitRef<'tcx>, skol_trait_ref: ty::TraitRef<'tcx>, skol_map: &infer::SkolemizationMap<'tcx>, snapshot: &infer::CombinedSnapshot<'cx, 'tcx>) -> bool { assert!(!skol_trait_ref.has_escaping_regions()); if let Err(_) = self.infcx.at(&obligation.cause, obligation.param_env) .sup(ty::Binder(skol_trait_ref), trait_bound) { return false; } self.infcx.leak_check(false, obligation.cause.span, skol_map, snapshot).is_ok() } /// Given an obligation like `<SomeTrait for T>`, search the obligations that the caller /// supplied to find out whether it is listed among them. /// /// Never affects inference environment. fn assemble_candidates_from_caller_bounds<'o>(&mut self, stack: &TraitObligationStack<'o, 'tcx>, candidates: &mut SelectionCandidateSet<'tcx>) -> Result<(),SelectionError<'tcx>> { debug!("assemble_candidates_from_caller_bounds({:?})", stack.obligation); let all_bounds = stack.obligation.param_env.caller_bounds .iter() .filter_map(|o| o.to_opt_poly_trait_ref()); // micro-optimization: filter out predicates relating to different // traits. let matching_bounds = all_bounds.filter(|p| p.def_id() == stack.obligation.predicate.def_id()); let matching_bounds = matching_bounds.filter( |bound| self.evaluate_where_clause(stack, bound.clone()).may_apply()); let param_candidates = matching_bounds.map(|bound| ParamCandidate(bound)); candidates.vec.extend(param_candidates); Ok(()) } fn evaluate_where_clause<'o>(&mut self, stack: &TraitObligationStack<'o, 'tcx>, where_clause_trait_ref: ty::PolyTraitRef<'tcx>) -> EvaluationResult { self.probe(move |this, _| { match this.match_where_clause_trait_ref(stack.obligation, where_clause_trait_ref) { Ok(obligations) => { this.evaluate_predicates_recursively(stack.list(), obligations.iter()) } Err(()) => EvaluatedToErr } }) } fn assemble_generator_candidates(&mut self, obligation: &TraitObligation<'tcx>, candidates: &mut SelectionCandidateSet<'tcx>) -> Result<(),SelectionError<'tcx>> { if self.tcx().lang_items().gen_trait() != Some(obligation.predicate.def_id()) { return Ok(()); } // ok to skip binder because the substs on generator types never // touch bound regions, they just capture the in-scope // type/region parameters let self_ty = *obligation.self_ty().skip_binder(); match self_ty.sty { ty::TyGenerator(..) => { debug!("assemble_generator_candidates: self_ty={:?} obligation={:?}", self_ty, obligation); candidates.vec.push(GeneratorCandidate); Ok(()) } ty::TyInfer(ty::TyVar(_)) => { debug!("assemble_generator_candidates: ambiguous self-type"); candidates.ambiguous = true; return Ok(()); } _ => { return Ok(()); } } } /// Check for the artificial impl that the compiler will create for an obligation like `X : /// FnMut<..>` where `X` is a closure type. /// /// Note: the type parameters on a closure candidate are modeled as *output* type /// parameters and hence do not affect whether this trait is a match or not. They will be /// unified during the confirmation step. fn assemble_closure_candidates(&mut self, obligation: &TraitObligation<'tcx>, candidates: &mut SelectionCandidateSet<'tcx>) -> Result<(),SelectionError<'tcx>> { let kind = match self.tcx().lang_items().fn_trait_kind(obligation.predicate.0.def_id()) { Some(k) => k, None => { return Ok(()); } }; // ok to skip binder because the substs on closure types never // touch bound regions, they just capture the in-scope // type/region parameters match obligation.self_ty().skip_binder().sty { ty::TyClosure(closure_def_id, closure_substs) => { debug!("assemble_unboxed_candidates: kind={:?} obligation={:?}", kind, obligation); match self.infcx.closure_kind(closure_def_id, closure_substs) { Some(closure_kind) => { debug!("assemble_unboxed_candidates: closure_kind = {:?}", closure_kind); if closure_kind.extends(kind) { candidates.vec.push(ClosureCandidate); } } None => { debug!("assemble_unboxed_candidates: closure_kind not yet known"); candidates.vec.push(ClosureCandidate); } }; Ok(()) } ty::TyInfer(ty::TyVar(_)) => { debug!("assemble_unboxed_closure_candidates: ambiguous self-type"); candidates.ambiguous = true; return Ok(()); } _ => { return Ok(()); } } } /// Implement one of the `Fn()` family for a fn pointer. fn assemble_fn_pointer_candidates(&mut self, obligation: &TraitObligation<'tcx>, candidates: &mut SelectionCandidateSet<'tcx>) -> Result<(),SelectionError<'tcx>> { // We provide impl of all fn traits for fn pointers. if self.tcx().lang_items().fn_trait_kind(obligation.predicate.def_id()).is_none() { return Ok(()); } // ok to skip binder because what we are inspecting doesn't involve bound regions let self_ty = *obligation.self_ty().skip_binder(); match self_ty.sty { ty::TyInfer(ty::TyVar(_)) => { debug!("assemble_fn_pointer_candidates: ambiguous self-type"); candidates.ambiguous = true; // could wind up being a fn() type } // provide an impl, but only for suitable `fn` pointers ty::TyFnDef(..) | ty::TyFnPtr(_) => { if let ty::Binder(ty::FnSig { unsafety: hir::Unsafety::Normal, abi: Abi::Rust, variadic: false, .. }) = self_ty.fn_sig(self.tcx()) { candidates.vec.push(FnPointerCandidate); } } _ => { } } Ok(()) } /// Search for impls that might apply to `obligation`. fn assemble_candidates_from_impls(&mut self, obligation: &TraitObligation<'tcx>, candidates: &mut SelectionCandidateSet<'tcx>) -> Result<(), SelectionError<'tcx>> { debug!("assemble_candidates_from_impls(obligation={:?})", obligation); self.tcx().for_each_relevant_impl( obligation.predicate.def_id(), obligation.predicate.0.trait_ref.self_ty(), |impl_def_id| { self.probe(|this, snapshot| { /* [1] */ match this.match_impl(impl_def_id, obligation, snapshot) { Ok(skol_map) => { candidates.vec.push(ImplCandidate(impl_def_id)); // NB: we can safely drop the skol map // since we are in a probe [1] mem::drop(skol_map); } Err(_) => { } } }); } ); Ok(()) } fn assemble_candidates_from_auto_impls(&mut self, obligation: &TraitObligation<'tcx>, candidates: &mut SelectionCandidateSet<'tcx>) -> Result<(), SelectionError<'tcx>> { // OK to skip binder here because the tests we do below do not involve bound regions let self_ty = *obligation.self_ty().skip_binder(); debug!("assemble_candidates_from_auto_impls(self_ty={:?})", self_ty); let def_id = obligation.predicate.def_id(); if self.tcx().trait_is_auto(def_id) { match self_ty.sty { ty::TyDynamic(..) => { // For object types, we don't know what the closed // over types are. This means we conservatively // say nothing; a candidate may be added by // `assemble_candidates_from_object_ty`. } ty::TyForeign(..) => { // Since the contents of foreign types is unknown, // we don't add any `..` impl. Default traits could // still be provided by a manual implementation for // this trait and type. } ty::TyParam(..) | ty::TyProjection(..) => { // In these cases, we don't know what the actual // type is. Therefore, we cannot break it down // into its constituent types. So we don't // consider the `..` impl but instead just add no // candidates: this means that typeck will only // succeed if there is another reason to believe // that this obligation holds. That could be a // where-clause or, in the case of an object type, // it could be that the object type lists the // trait (e.g. `Foo+Send : Send`). See // `compile-fail/typeck-default-trait-impl-send-param.rs` // for an example of a test case that exercises // this path. } ty::TyInfer(ty::TyVar(_)) => { // the auto impl might apply, we don't know candidates.ambiguous = true; } _ => { candidates.vec.push(AutoImplCandidate(def_id.clone())) } } } Ok(()) } /// Search for impls that might apply to `obligation`. fn assemble_candidates_from_object_ty(&mut self, obligation: &TraitObligation<'tcx>, candidates: &mut SelectionCandidateSet<'tcx>) { debug!("assemble_candidates_from_object_ty(self_ty={:?})", obligation.self_ty().skip_binder()); // Object-safety candidates are only applicable to object-safe // traits. Including this check is useful because it helps // inference in cases of traits like `BorrowFrom`, which are // not object-safe, and which rely on being able to infer the // self-type from one of the other inputs. Without this check, // these cases wind up being considered ambiguous due to a // (spurious) ambiguity introduced here. let predicate_trait_ref = obligation.predicate.to_poly_trait_ref(); if !self.tcx().is_object_safe(predicate_trait_ref.def_id()) { return; } self.probe(|this, _snapshot| { // the code below doesn't care about regions, and the // self-ty here doesn't escape this probe, so just erase // any LBR. let self_ty = this.tcx().erase_late_bound_regions(&obligation.self_ty()); let poly_trait_ref = match self_ty.sty { ty::TyDynamic(ref data, ..) => { if data.auto_traits().any(|did| did == obligation.predicate.def_id()) { debug!("assemble_candidates_from_object_ty: matched builtin bound, \ pushing candidate"); candidates.vec.push(BuiltinObjectCandidate); return; } match data.principal() { Some(p) => p.with_self_ty(this.tcx(), self_ty), None => return, } } ty::TyInfer(ty::TyVar(_)) => { debug!("assemble_candidates_from_object_ty: ambiguous"); candidates.ambiguous = true; // could wind up being an object type return; } _ => { return; } }; debug!("assemble_candidates_from_object_ty: poly_trait_ref={:?}", poly_trait_ref); // Count only those upcast versions that match the trait-ref // we are looking for. Specifically, do not only check for the // correct trait, but also the correct type parameters. // For example, we may be trying to upcast `Foo` to `Bar<i32>`, // but `Foo` is declared as `trait Foo : Bar<u32>`. let upcast_trait_refs = util::supertraits(this.tcx(), poly_trait_ref) .filter(|upcast_trait_ref| { this.probe(|this, _| { let upcast_trait_ref = upcast_trait_ref.clone(); this.match_poly_trait_ref(obligation, upcast_trait_ref).is_ok() }) }) .count(); if upcast_trait_refs > 1 { // can be upcast in many ways; need more type information candidates.ambiguous = true; } else if upcast_trait_refs == 1 { candidates.vec.push(ObjectCandidate); } }) } /// Search for unsizing that might apply to `obligation`. fn assemble_candidates_for_unsizing(&mut self, obligation: &TraitObligation<'tcx>, candidates: &mut SelectionCandidateSet<'tcx>) { // We currently never consider higher-ranked obligations e.g. // `for<'a> &'a T: Unsize<Trait+'a>` to be implemented. This is not // because they are a priori invalid, and we could potentially add support // for them later, it's just that there isn't really a strong need for it. // A `T: Unsize<U>` obligation is always used as part of a `T: CoerceUnsize<U>` // impl, and those are generally applied to concrete types. // // That said, one might try to write a fn with a where clause like // for<'a> Foo<'a, T>: Unsize<Foo<'a, Trait>> // where the `'a` is kind of orthogonal to the relevant part of the `Unsize`. // Still, you'd be more likely to write that where clause as // T: Trait // so it seems ok if we (conservatively) fail to accept that `Unsize` // obligation above. Should be possible to extend this in the future. let source = match obligation.self_ty().no_late_bound_regions() { Some(t) => t, None => { // Don't add any candidates if there are bound regions. return; } }; let target = obligation.predicate.skip_binder().trait_ref.substs.type_at(1); debug!("assemble_candidates_for_unsizing(source={:?}, target={:?})", source, target); let may_apply = match (&source.sty, &target.sty) { // Trait+Kx+'a -> Trait+Ky+'b (upcasts). (&ty::TyDynamic(ref data_a, ..), &ty::TyDynamic(ref data_b, ..)) => { // Upcasts permit two things: // // 1. Dropping builtin bounds, e.g. `Foo+Send` to `Foo` // 2. Tightening the region bound, e.g. `Foo+'a` to `Foo+'b` if `'a : 'b` // // Note that neither of these changes requires any // change at runtime. Eventually this will be // generalized. // // We always upcast when we can because of reason // #2 (region bounds). match (data_a.principal(), data_b.principal()) { (Some(a), Some(b)) => a.def_id() == b.def_id() && data_b.auto_traits() // All of a's auto traits need to be in b's auto traits. .all(|b| data_a.auto_traits().any(|a| a == b)), _ => false } } // T -> Trait. (_, &ty::TyDynamic(..)) => true, // Ambiguous handling is below T -> Trait, because inference // variables can still implement Unsize<Trait> and nested // obligations will have the final say (likely deferred). (&ty::TyInfer(ty::TyVar(_)), _) | (_, &ty::TyInfer(ty::TyVar(_))) => { debug!("assemble_candidates_for_unsizing: ambiguous"); candidates.ambiguous = true; false } // [T; n] -> [T]. (&ty::TyArray(..), &ty::TySlice(_)) => true, // Struct<T> -> Struct<U>. (&ty::TyAdt(def_id_a, _), &ty::TyAdt(def_id_b, _)) if def_id_a.is_struct() => { def_id_a == def_id_b } // (.., T) -> (.., U). (&ty::TyTuple(tys_a), &ty::TyTuple(tys_b)) => { tys_a.len() == tys_b.len() } _ => false }; if may_apply { candidates.vec.push(BuiltinUnsizeCandidate); } } /////////////////////////////////////////////////////////////////////////// // WINNOW // // Winnowing is the process of attempting to resolve ambiguity by // probing further. During the winnowing process, we unify all // type variables (ignoring skolemization) and then we also // attempt to evaluate recursive bounds to see if they are // satisfied. /// Returns true if `candidate_i` should be dropped in favor of /// `candidate_j`. Generally speaking we will drop duplicate /// candidates and prefer where-clause candidates. /// Returns true if `victim` should be dropped in favor of /// `other`. Generally speaking we will drop duplicate /// candidates and prefer where-clause candidates. /// /// See the comment for "SelectionCandidate" for more details. fn candidate_should_be_dropped_in_favor_of<'o>( &mut self, victim: &EvaluatedCandidate<'tcx>, other: &EvaluatedCandidate<'tcx>) -> bool { if victim.candidate == other.candidate { return true; } match other.candidate { ObjectCandidate | ParamCandidate(_) | ProjectionCandidate => match victim.candidate { AutoImplCandidate(..) => { bug!( "default implementations shouldn't be recorded \ when there are other valid candidates"); } ImplCandidate(..) | ClosureCandidate | GeneratorCandidate | FnPointerCandidate | BuiltinObjectCandidate | BuiltinUnsizeCandidate | BuiltinCandidate { .. } => { // We have a where-clause so don't go around looking // for impls. true } ObjectCandidate | ProjectionCandidate => { // Arbitrarily give param candidates priority // over projection and object candidates. true }, ParamCandidate(..) => false, }, ImplCandidate(other_def) => { // See if we can toss out `victim` based on specialization. // This requires us to know *for sure* that the `other` impl applies // i.e. EvaluatedToOk: if other.evaluation == EvaluatedToOk { if let ImplCandidate(victim_def) = victim.candidate { let tcx = self.tcx().global_tcx(); return tcx.specializes((other_def, victim_def)) || tcx.impls_are_allowed_to_overlap(other_def, victim_def); } } false }, _ => false } } /////////////////////////////////////////////////////////////////////////// // BUILTIN BOUNDS // // These cover the traits that are built-in to the language // itself. This includes `Copy` and `Sized` for sure. For the // moment, it also includes `Send` / `Sync` and a few others, but // those will hopefully change to library-defined traits in the // future. // HACK: if this returns an error, selection exits without considering // other impls. fn assemble_builtin_bound_candidates<'o>(&mut self, conditions: BuiltinImplConditions<'tcx>, candidates: &mut SelectionCandidateSet<'tcx>) -> Result<(),SelectionError<'tcx>> { match conditions { BuiltinImplConditions::Where(nested) => { debug!("builtin_bound: nested={:?}", nested); candidates.vec.push(BuiltinCandidate { has_nested: nested.skip_binder().len() > 0 }); Ok(()) } BuiltinImplConditions::None => { Ok(()) } BuiltinImplConditions::Ambiguous => { debug!("assemble_builtin_bound_candidates: ambiguous builtin"); Ok(candidates.ambiguous = true) } BuiltinImplConditions::Never => { Err(Unimplemented) } } } fn sized_conditions(&mut self, obligation: &TraitObligation<'tcx>) -> BuiltinImplConditions<'tcx> { use self::BuiltinImplConditions::{Ambiguous, None, Never, Where}; // NOTE: binder moved to (*) let self_ty = self.infcx.shallow_resolve( obligation.predicate.skip_binder().self_ty()); match self_ty.sty { ty::TyInfer(ty::IntVar(_)) | ty::TyInfer(ty::FloatVar(_)) | ty::TyUint(_) | ty::TyInt(_) | ty::TyBool | ty::TyFloat(_) | ty::TyFnDef(..) | ty::TyFnPtr(_) | ty::TyRawPtr(..) | ty::TyChar | ty::TyRef(..) | ty::TyGenerator(..) | ty::TyGeneratorWitness(..) | ty::TyArray(..) | ty::TyClosure(..) | ty::TyNever | ty::TyError => { // safe for everything Where(ty::Binder(Vec::new())) } ty::TyStr | ty::TySlice(_) | ty::TyDynamic(..) | ty::TyForeign(..) => Never, ty::TyTuple(tys) => { Where(ty::Binder(tys.last().into_iter().cloned().collect())) } ty::TyAdt(def, substs) => { let sized_crit = def.sized_constraint(self.tcx()); // (*) binder moved here Where(ty::Binder( sized_crit.iter().map(|ty| ty.subst(self.tcx(), substs)).collect() )) } ty::TyProjection(_) | ty::TyParam(_) | ty::TyAnon(..) => None, ty::TyInfer(ty::TyVar(_)) => Ambiguous, ty::TyInfer(ty::CanonicalTy(_)) | ty::TyInfer(ty::FreshTy(_)) | ty::TyInfer(ty::FreshIntTy(_)) | ty::TyInfer(ty::FreshFloatTy(_)) => { bug!("asked to assemble builtin bounds of unexpected type: {:?}", self_ty); } } } fn copy_clone_conditions(&mut self, obligation: &TraitObligation<'tcx>) -> BuiltinImplConditions<'tcx> { // NOTE: binder moved to (*) let self_ty = self.infcx.shallow_resolve( obligation.predicate.skip_binder().self_ty()); use self::BuiltinImplConditions::{Ambiguous, None, Never, Where}; match self_ty.sty { ty::TyInfer(ty::IntVar(_)) | ty::TyInfer(ty::FloatVar(_)) | ty::TyUint(_) | ty::TyInt(_) | ty::TyBool | ty::TyFloat(_) | ty::TyFnDef(..) | ty::TyFnPtr(_) | ty::TyChar | ty::TyRawPtr(..) | ty::TyError | ty::TyNever | ty::TyRef(_, ty::TypeAndMut { ty: _, mutbl: hir::MutImmutable }) => { Where(ty::Binder(Vec::new())) } ty::TyDynamic(..) | ty::TyStr | ty::TySlice(..) | ty::TyGenerator(..) | ty::TyGeneratorWitness(..) | ty::TyForeign(..) | ty::TyRef(_, ty::TypeAndMut { ty: _, mutbl: hir::MutMutable }) => { Never } ty::TyArray(element_ty, _) => { // (*) binder moved here Where(ty::Binder(vec![element_ty])) } ty::TyTuple(tys) => { // (*) binder moved here Where(ty::Binder(tys.to_vec())) } ty::TyClosure(def_id, substs) => { let trait_id = obligation.predicate.def_id(); let is_copy_trait = Some(trait_id) == self.tcx().lang_items().copy_trait(); let is_clone_trait = Some(trait_id) == self.tcx().lang_items().clone_trait(); if is_copy_trait || is_clone_trait { Where(ty::Binder(substs.upvar_tys(def_id, self.tcx()).collect())) } else { Never } } ty::TyAdt(..) | ty::TyProjection(..) | ty::TyParam(..) | ty::TyAnon(..) => { // Fallback to whatever user-defined impls exist in this case. None } ty::TyInfer(ty::TyVar(_)) => { // Unbound type variable. Might or might not have // applicable impls and so forth, depending on what // those type variables wind up being bound to. Ambiguous } ty::TyInfer(ty::CanonicalTy(_)) | ty::TyInfer(ty::FreshTy(_)) | ty::TyInfer(ty::FreshIntTy(_)) | ty::TyInfer(ty::FreshFloatTy(_)) => { bug!("asked to assemble builtin bounds of unexpected type: {:?}", self_ty); } } } /// For default impls, we need to break apart a type into its /// "constituent types" -- meaning, the types that it contains. /// /// Here are some (simple) examples: /// /// ``` /// (i32, u32) -> [i32, u32] /// Foo where struct Foo { x: i32, y: u32 } -> [i32, u32] /// Bar<i32> where struct Bar<T> { x: T, y: u32 } -> [i32, u32] /// Zed<i32> where enum Zed { A(T), B(u32) } -> [i32, u32] /// ``` fn constituent_types_for_ty(&self, t: Ty<'tcx>) -> Vec<Ty<'tcx>> { match t.sty { ty::TyUint(_) | ty::TyInt(_) | ty::TyBool | ty::TyFloat(_) | ty::TyFnDef(..) | ty::TyFnPtr(_) | ty::TyStr | ty::TyError | ty::TyInfer(ty::IntVar(_)) | ty::TyInfer(ty::FloatVar(_)) | ty::TyNever | ty::TyChar => { Vec::new() } ty::TyDynamic(..) | ty::TyParam(..) | ty::TyForeign(..) | ty::TyProjection(..) | ty::TyInfer(ty::CanonicalTy(_)) | ty::TyInfer(ty::TyVar(_)) | ty::TyInfer(ty::FreshTy(_)) | ty::TyInfer(ty::FreshIntTy(_)) | ty::TyInfer(ty::FreshFloatTy(_)) => { bug!("asked to assemble constituent types of unexpected type: {:?}", t); } ty::TyRawPtr(ty::TypeAndMut { ty: element_ty, ..}) | ty::TyRef(_, ty::TypeAndMut { ty: element_ty, ..}) => { vec![element_ty] }, ty::TyArray(element_ty, _) | ty::TySlice(element_ty) => { vec![element_ty] } ty::TyTuple(ref tys) => { // (T1, ..., Tn) -- meets any bound that all of T1...Tn meet tys.to_vec() } ty::TyClosure(def_id, ref substs) => { substs.upvar_tys(def_id, self.tcx()).collect() } ty::TyGenerator(def_id, ref substs, interior) => { substs.upvar_tys(def_id, self.tcx()).chain(iter::once(interior.witness)).collect() } ty::TyGeneratorWitness(types) => { // This is sound because no regions in the witness can refer to // the binder outside the witness. So we'll effectivly reuse // the implicit binder around the witness. types.skip_binder().to_vec() } // for `PhantomData<T>`, we pass `T` ty::TyAdt(def, substs) if def.is_phantom_data() => { substs.types().collect() } ty::TyAdt(def, substs) => { def.all_fields() .map(|f| f.ty(self.tcx(), substs)) .collect() } ty::TyAnon(def_id, substs) => { // We can resolve the `impl Trait` to its concrete type, // which enforces a DAG between the functions requiring // the auto trait bounds in question. vec![self.tcx().type_of(def_id).subst(self.tcx(), substs)] } } } fn collect_predicates_for_types(&mut self, param_env: ty::ParamEnv<'tcx>, cause: ObligationCause<'tcx>, recursion_depth: usize, trait_def_id: DefId, types: ty::Binder<Vec<Ty<'tcx>>>) -> Vec<PredicateObligation<'tcx>> { // Because the types were potentially derived from // higher-ranked obligations they may reference late-bound // regions. For example, `for<'a> Foo<&'a int> : Copy` would // yield a type like `for<'a> &'a int`. In general, we // maintain the invariant that we never manipulate bound // regions, so we have to process these bound regions somehow. // // The strategy is to: // // 1. Instantiate those regions to skolemized regions (e.g., // `for<'a> &'a int` becomes `&0 int`. // 2. Produce something like `&'0 int : Copy` // 3. Re-bind the regions back to `for<'a> &'a int : Copy` types.skip_binder().into_iter().flat_map(|ty| { // binder moved -\ let ty: ty::Binder<Ty<'tcx>> = ty::Binder(ty); // <----------/ self.in_snapshot(|this, snapshot| { let (skol_ty, skol_map) = this.infcx().skolemize_late_bound_regions(&ty, snapshot); let Normalized { value: normalized_ty, mut obligations } = project::normalize_with_depth(this, param_env, cause.clone(), recursion_depth, &skol_ty); let skol_obligation = this.tcx().predicate_for_trait_def(param_env, cause.clone(), trait_def_id, recursion_depth, normalized_ty, &[]); obligations.push(skol_obligation); this.infcx().plug_leaks(skol_map, snapshot, obligations) }) }).collect() } /////////////////////////////////////////////////////////////////////////// // CONFIRMATION // // Confirmation unifies the output type parameters of the trait // with the values found in the obligation, possibly yielding a // type error. See [rustc guide] for more details. // // [rustc guide]: // https://rust-lang-nursery.github.io/rustc-guide/trait-resolution.html#confirmation fn confirm_candidate(&mut self, obligation: &TraitObligation<'tcx>, candidate: SelectionCandidate<'tcx>) -> Result<Selection<'tcx>,SelectionError<'tcx>> { debug!("confirm_candidate({:?}, {:?})", obligation, candidate); match candidate { BuiltinCandidate { has_nested } => { let data = self.confirm_builtin_candidate(obligation, has_nested); Ok(VtableBuiltin(data)) } ParamCandidate(param) => { let obligations = self.confirm_param_candidate(obligation, param); Ok(VtableParam(obligations)) } AutoImplCandidate(trait_def_id) => { let data = self.confirm_auto_impl_candidate(obligation, trait_def_id); Ok(VtableAutoImpl(data)) } ImplCandidate(impl_def_id) => { Ok(VtableImpl(self.confirm_impl_candidate(obligation, impl_def_id))) } ClosureCandidate => { let vtable_closure = self.confirm_closure_candidate(obligation)?; Ok(VtableClosure(vtable_closure)) } GeneratorCandidate => { let vtable_generator = self.confirm_generator_candidate(obligation)?; Ok(VtableGenerator(vtable_generator)) } BuiltinObjectCandidate => { // This indicates something like `(Trait+Send) : // Send`. In this case, we know that this holds // because that's what the object type is telling us, // and there's really no additional obligations to // prove and no types in particular to unify etc. Ok(VtableParam(Vec::new())) } ObjectCandidate => { let data = self.confirm_object_candidate(obligation); Ok(VtableObject(data)) } FnPointerCandidate => { let data = self.confirm_fn_pointer_candidate(obligation)?; Ok(VtableFnPointer(data)) } ProjectionCandidate => { self.confirm_projection_candidate(obligation); Ok(VtableParam(Vec::new())) } BuiltinUnsizeCandidate => { let data = self.confirm_builtin_unsize_candidate(obligation)?; Ok(VtableBuiltin(data)) } } } fn confirm_projection_candidate(&mut self, obligation: &TraitObligation<'tcx>) { self.in_snapshot(|this, snapshot| { let result = this.match_projection_obligation_against_definition_bounds(obligation, snapshot); assert!(result); }) } fn confirm_param_candidate(&mut self, obligation: &TraitObligation<'tcx>, param: ty::PolyTraitRef<'tcx>) -> Vec<PredicateObligation<'tcx>> { debug!("confirm_param_candidate({:?},{:?})", obligation, param); // During evaluation, we already checked that this // where-clause trait-ref could be unified with the obligation // trait-ref. Repeat that unification now without any // transactional boundary; it should not fail. match self.match_where_clause_trait_ref(obligation, param.clone()) { Ok(obligations) => obligations, Err(()) => { bug!("Where clause `{:?}` was applicable to `{:?}` but now is not", param, obligation); } } } fn confirm_builtin_candidate(&mut self, obligation: &TraitObligation<'tcx>, has_nested: bool) -> VtableBuiltinData<PredicateObligation<'tcx>> { debug!("confirm_builtin_candidate({:?}, {:?})", obligation, has_nested); let lang_items = self.tcx().lang_items(); let obligations = if has_nested { let trait_def = obligation.predicate.def_id(); let conditions = match trait_def { _ if Some(trait_def) == lang_items.sized_trait() => { self.sized_conditions(obligation) } _ if Some(trait_def) == lang_items.copy_trait() => { self.copy_clone_conditions(obligation) } _ if Some(trait_def) == lang_items.clone_trait() => { self.copy_clone_conditions(obligation) } _ => bug!("unexpected builtin trait {:?}", trait_def) }; let nested = match conditions { BuiltinImplConditions::Where(nested) => nested, _ => bug!("obligation {:?} had matched a builtin impl but now doesn't", obligation) }; let cause = obligation.derived_cause(BuiltinDerivedObligation); self.collect_predicates_for_types(obligation.param_env, cause, obligation.recursion_depth+1, trait_def, nested) } else { vec![] }; debug!("confirm_builtin_candidate: obligations={:?}", obligations); VtableBuiltinData { nested: obligations } } /// This handles the case where a `auto trait Foo` impl is being used. /// The idea is that the impl applies to `X : Foo` if the following conditions are met: /// /// 1. For each constituent type `Y` in `X`, `Y : Foo` holds /// 2. For each where-clause `C` declared on `Foo`, `[Self => X] C` holds. fn confirm_auto_impl_candidate(&mut self, obligation: &TraitObligation<'tcx>, trait_def_id: DefId) -> VtableAutoImplData<PredicateObligation<'tcx>> { debug!("confirm_auto_impl_candidate({:?}, {:?})", obligation, trait_def_id); // binder is moved below let self_ty = self.infcx.shallow_resolve(obligation.predicate.skip_binder().self_ty()); let types = self.constituent_types_for_ty(self_ty); self.vtable_auto_impl(obligation, trait_def_id, ty::Binder(types)) } /// See `confirm_auto_impl_candidate` fn vtable_auto_impl(&mut self, obligation: &TraitObligation<'tcx>, trait_def_id: DefId, nested: ty::Binder<Vec<Ty<'tcx>>>) -> VtableAutoImplData<PredicateObligation<'tcx>> { debug!("vtable_auto_impl: nested={:?}", nested); let cause = obligation.derived_cause(BuiltinDerivedObligation); let mut obligations = self.collect_predicates_for_types( obligation.param_env, cause, obligation.recursion_depth+1, trait_def_id, nested); let trait_obligations = self.in_snapshot(|this, snapshot| { let poly_trait_ref = obligation.predicate.to_poly_trait_ref(); let (trait_ref, skol_map) = this.infcx().skolemize_late_bound_regions(&poly_trait_ref, snapshot); let cause = obligation.derived_cause(ImplDerivedObligation); this.impl_or_trait_obligations(cause, obligation.recursion_depth + 1, obligation.param_env, trait_def_id, &trait_ref.substs, skol_map, snapshot) }); obligations.extend(trait_obligations); debug!("vtable_auto_impl: obligations={:?}", obligations); VtableAutoImplData { trait_def_id, nested: obligations } } fn confirm_impl_candidate(&mut self, obligation: &TraitObligation<'tcx>, impl_def_id: DefId) -> VtableImplData<'tcx, PredicateObligation<'tcx>> { debug!("confirm_impl_candidate({:?},{:?})", obligation, impl_def_id); // First, create the substitutions by matching the impl again, // this time not in a probe. self.in_snapshot(|this, snapshot| { let (substs, skol_map) = this.rematch_impl(impl_def_id, obligation, snapshot); debug!("confirm_impl_candidate substs={:?}", substs); let cause = obligation.derived_cause(ImplDerivedObligation); this.vtable_impl(impl_def_id, substs, cause, obligation.recursion_depth + 1, obligation.param_env, skol_map, snapshot) }) } fn vtable_impl(&mut self, impl_def_id: DefId, mut substs: Normalized<'tcx, &'tcx Substs<'tcx>>, cause: ObligationCause<'tcx>, recursion_depth: usize, param_env: ty::ParamEnv<'tcx>, skol_map: infer::SkolemizationMap<'tcx>, snapshot: &infer::CombinedSnapshot<'cx, 'tcx>) -> VtableImplData<'tcx, PredicateObligation<'tcx>> { debug!("vtable_impl(impl_def_id={:?}, substs={:?}, recursion_depth={}, skol_map={:?})", impl_def_id, substs, recursion_depth, skol_map); let mut impl_obligations = self.impl_or_trait_obligations(cause, recursion_depth, param_env, impl_def_id, &substs.value, skol_map, snapshot); debug!("vtable_impl: impl_def_id={:?} impl_obligations={:?}", impl_def_id, impl_obligations); // Because of RFC447, the impl-trait-ref and obligations // are sufficient to determine the impl substs, without // relying on projections in the impl-trait-ref. // // e.g. `impl<U: Tr, V: Iterator<Item=U>> Foo<<U as Tr>::T> for V` impl_obligations.append(&mut substs.obligations); VtableImplData { impl_def_id, substs: substs.value, nested: impl_obligations } } fn confirm_object_candidate(&mut self, obligation: &TraitObligation<'tcx>) -> VtableObjectData<'tcx, PredicateObligation<'tcx>> { debug!("confirm_object_candidate({:?})", obligation); // FIXME skipping binder here seems wrong -- we should // probably flatten the binder from the obligation and the // binder from the object. Have to try to make a broken test // case that results. -nmatsakis let self_ty = self.infcx.shallow_resolve(*obligation.self_ty().skip_binder()); let poly_trait_ref = match self_ty.sty { ty::TyDynamic(ref data, ..) => { data.principal().unwrap().with_self_ty(self.tcx(), self_ty) } _ => { span_bug!(obligation.cause.span, "object candidate with non-object"); } }; let mut upcast_trait_ref = None; let mut nested = vec![]; let vtable_base; { let tcx = self.tcx(); // We want to find the first supertrait in the list of // supertraits that we can unify with, and do that // unification. We know that there is exactly one in the list // where we can unify because otherwise select would have // reported an ambiguity. (When we do find a match, also // record it for later.) let nonmatching = util::supertraits(tcx, poly_trait_ref) .take_while(|&t| { match self.commit_if_ok( |this, _| this.match_poly_trait_ref(obligation, t)) { Ok(obligations) => { upcast_trait_ref = Some(t); nested.extend(obligations); false } Err(_) => { true } } }); // Additionally, for each of the nonmatching predicates that // we pass over, we sum up the set of number of vtable // entries, so that we can compute the offset for the selected // trait. vtable_base = nonmatching.map(|t| tcx.count_own_vtable_entries(t)) .sum(); } VtableObjectData { upcast_trait_ref: upcast_trait_ref.unwrap(), vtable_base, nested, } } fn confirm_fn_pointer_candidate(&mut self, obligation: &TraitObligation<'tcx>) -> Result<VtableFnPointerData<'tcx, PredicateObligation<'tcx>>, SelectionError<'tcx>> { debug!("confirm_fn_pointer_candidate({:?})", obligation); // ok to skip binder; it is reintroduced below let self_ty = self.infcx.shallow_resolve(*obligation.self_ty().skip_binder()); let sig = self_ty.fn_sig(self.tcx()); let trait_ref = self.tcx().closure_trait_ref_and_return_type(obligation.predicate.def_id(), self_ty, sig, util::TupleArgumentsFlag::Yes) .map_bound(|(trait_ref, _)| trait_ref); let Normalized { value: trait_ref, obligations } = project::normalize_with_depth(self, obligation.param_env, obligation.cause.clone(), obligation.recursion_depth + 1, &trait_ref); self.confirm_poly_trait_refs(obligation.cause.clone(), obligation.param_env, obligation.predicate.to_poly_trait_ref(), trait_ref)?; Ok(VtableFnPointerData { fn_ty: self_ty, nested: obligations }) } fn confirm_generator_candidate(&mut self, obligation: &TraitObligation<'tcx>) -> Result<VtableGeneratorData<'tcx, PredicateObligation<'tcx>>, SelectionError<'tcx>> { // ok to skip binder because the substs on generator types never // touch bound regions, they just capture the in-scope // type/region parameters let self_ty = self.infcx.shallow_resolve(obligation.self_ty().skip_binder()); let (closure_def_id, substs) = match self_ty.sty { ty::TyGenerator(id, substs, _) => (id, substs), _ => bug!("closure candidate for non-closure {:?}", obligation) }; debug!("confirm_generator_candidate({:?},{:?},{:?})", obligation, closure_def_id, substs); let trait_ref = self.generator_trait_ref_unnormalized(obligation, closure_def_id, substs); let Normalized { value: trait_ref, mut obligations } = normalize_with_depth(self, obligation.param_env, obligation.cause.clone(), obligation.recursion_depth+1, &trait_ref); debug!("confirm_generator_candidate(closure_def_id={:?}, trait_ref={:?}, obligations={:?})", closure_def_id, trait_ref, obligations); obligations.extend( self.confirm_poly_trait_refs(obligation.cause.clone(), obligation.param_env, obligation.predicate.to_poly_trait_ref(), trait_ref)?); Ok(VtableGeneratorData { closure_def_id: closure_def_id, substs: substs.clone(), nested: obligations }) } fn confirm_closure_candidate(&mut self, obligation: &TraitObligation<'tcx>) -> Result<VtableClosureData<'tcx, PredicateObligation<'tcx>>, SelectionError<'tcx>> { debug!("confirm_closure_candidate({:?})", obligation); let kind = match self.tcx().lang_items().fn_trait_kind(obligation.predicate.0.def_id()) { Some(k) => k, None => bug!("closure candidate for non-fn trait {:?}", obligation) }; // ok to skip binder because the substs on closure types never // touch bound regions, they just capture the in-scope // type/region parameters let self_ty = self.infcx.shallow_resolve(obligation.self_ty().skip_binder()); let (closure_def_id, substs) = match self_ty.sty { ty::TyClosure(id, substs) => (id, substs), _ => bug!("closure candidate for non-closure {:?}", obligation) }; let trait_ref = self.closure_trait_ref_unnormalized(obligation, closure_def_id, substs); let Normalized { value: trait_ref, mut obligations } = normalize_with_depth(self, obligation.param_env, obligation.cause.clone(), obligation.recursion_depth+1, &trait_ref); debug!("confirm_closure_candidate(closure_def_id={:?}, trait_ref={:?}, obligations={:?})", closure_def_id, trait_ref, obligations); obligations.extend( self.confirm_poly_trait_refs(obligation.cause.clone(), obligation.param_env, obligation.predicate.to_poly_trait_ref(), trait_ref)?); obligations.push(Obligation::new( obligation.cause.clone(), obligation.param_env, ty::Predicate::ClosureKind(closure_def_id, substs, kind))); Ok(VtableClosureData { closure_def_id, substs: substs.clone(), nested: obligations }) } /// In the case of closure types and fn pointers, /// we currently treat the input type parameters on the trait as /// outputs. This means that when we have a match we have only /// considered the self type, so we have to go back and make sure /// to relate the argument types too. This is kind of wrong, but /// since we control the full set of impls, also not that wrong, /// and it DOES yield better error messages (since we don't report /// errors as if there is no applicable impl, but rather report /// errors are about mismatched argument types. /// /// Here is an example. Imagine we have a closure expression /// and we desugared it so that the type of the expression is /// `Closure`, and `Closure` expects an int as argument. Then it /// is "as if" the compiler generated this impl: /// /// impl Fn(int) for Closure { ... } /// /// Now imagine our obligation is `Fn(usize) for Closure`. So far /// we have matched the self-type `Closure`. At this point we'll /// compare the `int` to `usize` and generate an error. /// /// Note that this checking occurs *after* the impl has selected, /// because these output type parameters should not affect the /// selection of the impl. Therefore, if there is a mismatch, we /// report an error to the user. fn confirm_poly_trait_refs(&mut self, obligation_cause: ObligationCause<'tcx>, obligation_param_env: ty::ParamEnv<'tcx>, obligation_trait_ref: ty::PolyTraitRef<'tcx>, expected_trait_ref: ty::PolyTraitRef<'tcx>) -> Result<Vec<PredicateObligation<'tcx>>, SelectionError<'tcx>> { let obligation_trait_ref = obligation_trait_ref.clone(); self.infcx .at(&obligation_cause, obligation_param_env) .sup(obligation_trait_ref, expected_trait_ref) .map(|InferOk { obligations, .. }| obligations) .map_err(|e| OutputTypeParameterMismatch(expected_trait_ref, obligation_trait_ref, e)) } fn confirm_builtin_unsize_candidate(&mut self, obligation: &TraitObligation<'tcx>,) -> Result<VtableBuiltinData<PredicateObligation<'tcx>>, SelectionError<'tcx>> { let tcx = self.tcx(); // assemble_candidates_for_unsizing should ensure there are no late bound // regions here. See the comment there for more details. let source = self.infcx.shallow_resolve( obligation.self_ty().no_late_bound_regions().unwrap()); let target = obligation.predicate.skip_binder().trait_ref.substs.type_at(1); let target = self.infcx.shallow_resolve(target); debug!("confirm_builtin_unsize_candidate(source={:?}, target={:?})", source, target); let mut nested = vec![]; match (&source.sty, &target.sty) { // Trait+Kx+'a -> Trait+Ky+'b (upcasts). (&ty::TyDynamic(ref data_a, r_a), &ty::TyDynamic(ref data_b, r_b)) => { // See assemble_candidates_for_unsizing for more info. // Binders reintroduced below in call to mk_existential_predicates. let principal = data_a.skip_binder().principal(); let iter = principal.into_iter().map(ty::ExistentialPredicate::Trait) .chain(data_a.skip_binder().projection_bounds() .map(|x| ty::ExistentialPredicate::Projection(x))) .chain(data_b.auto_traits().map(ty::ExistentialPredicate::AutoTrait)); let new_trait = tcx.mk_dynamic( ty::Binder(tcx.mk_existential_predicates(iter)), r_b); let InferOk { obligations, .. } = self.infcx.at(&obligation.cause, obligation.param_env) .eq(target, new_trait) .map_err(|_| Unimplemented)?; nested.extend(obligations); // Register one obligation for 'a: 'b. let cause = ObligationCause::new(obligation.cause.span, obligation.cause.body_id, ObjectCastObligation(target)); let outlives = ty::OutlivesPredicate(r_a, r_b); nested.push(Obligation::with_depth(cause, obligation.recursion_depth + 1, obligation.param_env, ty::Binder(outlives).to_predicate())); } // T -> Trait. (_, &ty::TyDynamic(ref data, r)) => { let mut object_dids = data.auto_traits().chain(data.principal().map(|p| p.def_id())); if let Some(did) = object_dids.find(|did| { !tcx.is_object_safe(*did) }) { return Err(TraitNotObjectSafe(did)) } let cause = ObligationCause::new(obligation.cause.span, obligation.cause.body_id, ObjectCastObligation(target)); let mut push = |predicate| { nested.push(Obligation::with_depth(cause.clone(), obligation.recursion_depth + 1, obligation.param_env, predicate)); }; // Create obligations: // - Casting T to Trait // - For all the various builtin bounds attached to the object cast. (In other // words, if the object type is Foo+Send, this would create an obligation for the // Send check.) // - Projection predicates for predicate in data.iter() { push(predicate.with_self_ty(tcx, source)); } // We can only make objects from sized types. let tr = ty::TraitRef { def_id: tcx.require_lang_item(lang_items::SizedTraitLangItem), substs: tcx.mk_substs_trait(source, &[]), }; push(tr.to_predicate()); // If the type is `Foo+'a`, ensures that the type // being cast to `Foo+'a` outlives `'a`: let outlives = ty::OutlivesPredicate(source, r); push(ty::Binder(outlives).to_predicate()); } // [T; n] -> [T]. (&ty::TyArray(a, _), &ty::TySlice(b)) => { let InferOk { obligations, .. } = self.infcx.at(&obligation.cause, obligation.param_env) .eq(b, a) .map_err(|_| Unimplemented)?; nested.extend(obligations); } // Struct<T> -> Struct<U>. (&ty::TyAdt(def, substs_a), &ty::TyAdt(_, substs_b)) => { let fields = def .all_fields() .map(|f| tcx.type_of(f.did)) .collect::<Vec<_>>(); // The last field of the structure has to exist and contain type parameters. let field = if let Some(&field) = fields.last() { field } else { return Err(Unimplemented); }; let mut ty_params = BitVector::new(substs_a.types().count()); let mut found = false; for ty in field.walk() { if let ty::TyParam(p) = ty.sty { ty_params.insert(p.idx as usize); found = true; } } if !found { return Err(Unimplemented); } // Replace type parameters used in unsizing with // TyError and ensure they do not affect any other fields. // This could be checked after type collection for any struct // with a potentially unsized trailing field. let params = substs_a.iter().enumerate().map(|(i, &k)| { if ty_params.contains(i) { Kind::from(tcx.types.err) } else { k } }); let substs = tcx.mk_substs(params); for &ty in fields.split_last().unwrap().1 { if ty.subst(tcx, substs).references_error() { return Err(Unimplemented); } } // Extract Field<T> and Field<U> from Struct<T> and Struct<U>. let inner_source = field.subst(tcx, substs_a); let inner_target = field.subst(tcx, substs_b); // Check that the source struct with the target's // unsized parameters is equal to the target. let params = substs_a.iter().enumerate().map(|(i, &k)| { if ty_params.contains(i) { substs_b.type_at(i).into() } else { k } }); let new_struct = tcx.mk_adt(def, tcx.mk_substs(params)); let InferOk { obligations, .. } = self.infcx.at(&obligation.cause, obligation.param_env) .eq(target, new_struct) .map_err(|_| Unimplemented)?; nested.extend(obligations); // Construct the nested Field<T>: Unsize<Field<U>> predicate. nested.push(tcx.predicate_for_trait_def( obligation.param_env, obligation.cause.clone(), obligation.predicate.def_id(), obligation.recursion_depth + 1, inner_source, &[inner_target])); } // (.., T) -> (.., U). (&ty::TyTuple(tys_a), &ty::TyTuple(tys_b)) => { assert_eq!(tys_a.len(), tys_b.len()); // The last field of the tuple has to exist. let (a_last, a_mid) = if let Some(x) = tys_a.split_last() { x } else { return Err(Unimplemented); }; let b_last = tys_b.last().unwrap(); // Check that the source tuple with the target's // last element is equal to the target. let new_tuple = tcx.mk_tup(a_mid.iter().chain(Some(b_last))); let InferOk { obligations, .. } = self.infcx.at(&obligation.cause, obligation.param_env) .eq(target, new_tuple) .map_err(|_| Unimplemented)?; nested.extend(obligations); // Construct the nested T: Unsize<U> predicate. nested.push(tcx.predicate_for_trait_def( obligation.param_env, obligation.cause.clone(), obligation.predicate.def_id(), obligation.recursion_depth + 1, a_last, &[b_last])); } _ => bug!() }; Ok(VtableBuiltinData { nested: nested }) } /////////////////////////////////////////////////////////////////////////// // Matching // // Matching is a common path used for both evaluation and // confirmation. It basically unifies types that appear in impls // and traits. This does affect the surrounding environment; // therefore, when used during evaluation, match routines must be // run inside of a `probe()` so that their side-effects are // contained. fn rematch_impl(&mut self, impl_def_id: DefId, obligation: &TraitObligation<'tcx>, snapshot: &infer::CombinedSnapshot<'cx, 'tcx>) -> (Normalized<'tcx, &'tcx Substs<'tcx>>, infer::SkolemizationMap<'tcx>) { match self.match_impl(impl_def_id, obligation, snapshot) { Ok((substs, skol_map)) => (substs, skol_map), Err(()) => { bug!("Impl {:?} was matchable against {:?} but now is not", impl_def_id, obligation); } } } fn match_impl(&mut self, impl_def_id: DefId, obligation: &TraitObligation<'tcx>, snapshot: &infer::CombinedSnapshot<'cx, 'tcx>) -> Result<(Normalized<'tcx, &'tcx Substs<'tcx>>, infer::SkolemizationMap<'tcx>), ()> { let impl_trait_ref = self.tcx().impl_trait_ref(impl_def_id).unwrap(); // Before we create the substitutions and everything, first // consider a "quick reject". This avoids creating more types // and so forth that we need to. if self.fast_reject_trait_refs(obligation, &impl_trait_ref) { return Err(()); } let (skol_obligation, skol_map) = self.infcx().skolemize_late_bound_regions( &obligation.predicate, snapshot); let skol_obligation_trait_ref = skol_obligation.trait_ref; let impl_substs = self.infcx.fresh_substs_for_item(obligation.param_env.universe, obligation.cause.span, impl_def_id); let impl_trait_ref = impl_trait_ref.subst(self.tcx(), impl_substs); let Normalized { value: impl_trait_ref, obligations: mut nested_obligations } = project::normalize_with_depth(self, obligation.param_env, obligation.cause.clone(), obligation.recursion_depth + 1, &impl_trait_ref); debug!("match_impl(impl_def_id={:?}, obligation={:?}, \ impl_trait_ref={:?}, skol_obligation_trait_ref={:?})", impl_def_id, obligation, impl_trait_ref, skol_obligation_trait_ref); let InferOk { obligations, .. } = self.infcx.at(&obligation.cause, obligation.param_env) .eq(skol_obligation_trait_ref, impl_trait_ref) .map_err(|e| { debug!("match_impl: failed eq_trait_refs due to `{}`", e); () })?; nested_obligations.extend(obligations); if let Err(e) = self.infcx.leak_check(false, obligation.cause.span, &skol_map, snapshot) { debug!("match_impl: failed leak check due to `{}`", e); return Err(()); } debug!("match_impl: success impl_substs={:?}", impl_substs); Ok((Normalized { value: impl_substs, obligations: nested_obligations }, skol_map)) } fn fast_reject_trait_refs(&mut self, obligation: &TraitObligation, impl_trait_ref: &ty::TraitRef) -> bool { // We can avoid creating type variables and doing the full // substitution if we find that any of the input types, when // simplified, do not match. obligation.predicate.skip_binder().input_types() .zip(impl_trait_ref.input_types()) .any(|(obligation_ty, impl_ty)| { let simplified_obligation_ty = fast_reject::simplify_type(self.tcx(), obligation_ty, true); let simplified_impl_ty = fast_reject::simplify_type(self.tcx(), impl_ty, false); simplified_obligation_ty.is_some() && simplified_impl_ty.is_some() && simplified_obligation_ty != simplified_impl_ty }) } /// Normalize `where_clause_trait_ref` and try to match it against /// `obligation`. If successful, return any predicates that /// result from the normalization. Normalization is necessary /// because where-clauses are stored in the parameter environment /// unnormalized. fn match_where_clause_trait_ref(&mut self, obligation: &TraitObligation<'tcx>, where_clause_trait_ref: ty::PolyTraitRef<'tcx>) -> Result<Vec<PredicateObligation<'tcx>>,()> { self.match_poly_trait_ref(obligation, where_clause_trait_ref) } /// Returns `Ok` if `poly_trait_ref` being true implies that the /// obligation is satisfied. fn match_poly_trait_ref(&mut self, obligation: &TraitObligation<'tcx>, poly_trait_ref: ty::PolyTraitRef<'tcx>) -> Result<Vec<PredicateObligation<'tcx>>,()> { debug!("match_poly_trait_ref: obligation={:?} poly_trait_ref={:?}", obligation, poly_trait_ref); self.infcx.at(&obligation.cause, obligation.param_env) .sup(obligation.predicate.to_poly_trait_ref(), poly_trait_ref) .map(|InferOk { obligations, .. }| obligations) .map_err(|_| ()) } /////////////////////////////////////////////////////////////////////////// // Miscellany fn match_fresh_trait_refs(&self, previous: &ty::PolyTraitRef<'tcx>, current: &ty::PolyTraitRef<'tcx>) -> bool { let mut matcher = ty::_match::Match::new(self.tcx()); matcher.relate(previous, current).is_ok() } fn push_stack<'o,'s:'o>(&mut self, previous_stack: TraitObligationStackList<'s, 'tcx>, obligation: &'o TraitObligation<'tcx>) -> TraitObligationStack<'o, 'tcx> { let fresh_trait_ref = obligation.predicate.to_poly_trait_ref().fold_with(&mut self.freshener); TraitObligationStack { obligation, fresh_trait_ref, previous: previous_stack, } } fn closure_trait_ref_unnormalized(&mut self, obligation: &TraitObligation<'tcx>, closure_def_id: DefId, substs: ty::ClosureSubsts<'tcx>) -> ty::PolyTraitRef<'tcx> { let closure_type = self.infcx.closure_sig(closure_def_id, substs); let ty::Binder((trait_ref, _)) = self.tcx().closure_trait_ref_and_return_type(obligation.predicate.def_id(), obligation.predicate.0.self_ty(), // (1) closure_type, util::TupleArgumentsFlag::No); // (1) Feels icky to skip the binder here, but OTOH we know // that the self-type is an unboxed closure type and hence is // in fact unparameterized (or at least does not reference any // regions bound in the obligation). Still probably some // refactoring could make this nicer. ty::Binder(trait_ref) } fn generator_trait_ref_unnormalized(&mut self, obligation: &TraitObligation<'tcx>, closure_def_id: DefId, substs: ty::ClosureSubsts<'tcx>) -> ty::PolyTraitRef<'tcx> { let gen_sig = substs.generator_poly_sig(closure_def_id, self.tcx()); let ty::Binder((trait_ref, ..)) = self.tcx().generator_trait_ref_and_outputs(obligation.predicate.def_id(), obligation.predicate.0.self_ty(), // (1) gen_sig); // (1) Feels icky to skip the binder here, but OTOH we know // that the self-type is an generator type and hence is // in fact unparameterized (or at least does not reference any // regions bound in the obligation). Still probably some // refactoring could make this nicer. ty::Binder(trait_ref) } /// Returns the obligations that are implied by instantiating an /// impl or trait. The obligations are substituted and fully /// normalized. This is used when confirming an impl or default /// impl. fn impl_or_trait_obligations(&mut self, cause: ObligationCause<'tcx>, recursion_depth: usize, param_env: ty::ParamEnv<'tcx>, def_id: DefId, // of impl or trait substs: &Substs<'tcx>, // for impl or trait skol_map: infer::SkolemizationMap<'tcx>, snapshot: &infer::CombinedSnapshot<'cx, 'tcx>) -> Vec<PredicateObligation<'tcx>> { debug!("impl_or_trait_obligations(def_id={:?})", def_id); let tcx = self.tcx(); // To allow for one-pass evaluation of the nested obligation, // each predicate must be preceded by the obligations required // to normalize it. // for example, if we have: // impl<U: Iterator, V: Iterator<Item=U>> Foo for V where U::Item: Copy // the impl will have the following predicates: // <V as Iterator>::Item = U, // U: Iterator, U: Sized, // V: Iterator, V: Sized, // <U as Iterator>::Item: Copy // When we substitute, say, `V => IntoIter<u32>, U => $0`, the last // obligation will normalize to `<$0 as Iterator>::Item = $1` and // `$1: Copy`, so we must ensure the obligations are emitted in // that order. let predicates = tcx.predicates_of(def_id); assert_eq!(predicates.parent, None); let mut predicates: Vec<_> = predicates.predicates.iter().flat_map(|predicate| { let predicate = normalize_with_depth(self, param_env, cause.clone(), recursion_depth, &predicate.subst(tcx, substs)); predicate.obligations.into_iter().chain( Some(Obligation { cause: cause.clone(), recursion_depth, param_env, predicate: predicate.value })) }).collect(); // We are performing deduplication here to avoid exponential blowups // (#38528) from happening, but the real cause of the duplication is // unknown. What we know is that the deduplication avoids exponential // amount of predicates being propogated when processing deeply nested // types. let mut seen = FxHashSet(); predicates.retain(|i| seen.insert(i.clone())); self.infcx().plug_leaks(skol_map, snapshot, predicates) } } impl<'tcx> TraitObligation<'tcx> { #[allow(unused_comparisons)] pub fn derived_cause(&self, variant: fn(DerivedObligationCause<'tcx>) -> ObligationCauseCode<'tcx>) -> ObligationCause<'tcx> { /*! * Creates a cause for obligations that are derived from * `obligation` by a recursive search (e.g., for a builtin * bound, or eventually a `auto trait Foo`). If `obligation` * is itself a derived obligation, this is just a clone, but * otherwise we create a "derived obligation" cause so as to * keep track of the original root obligation for error * reporting. */ let obligation = self; // NOTE(flaper87): As of now, it keeps track of the whole error // chain. Ideally, we should have a way to configure this either // by using -Z verbose or just a CLI argument. if obligation.recursion_depth >= 0 { let derived_cause = DerivedObligationCause { parent_trait_ref: obligation.predicate.to_poly_trait_ref(), parent_code: Rc::new(obligation.cause.code.clone()) }; let derived_code = variant(derived_cause); ObligationCause::new(obligation.cause.span, obligation.cause.body_id, derived_code) } else { obligation.cause.clone() } } } impl<'tcx> SelectionCache<'tcx> { pub fn new() -> SelectionCache<'tcx> { SelectionCache { hashmap: RefCell::new(FxHashMap()) } } pub fn clear(&self) { *self.hashmap.borrow_mut() = FxHashMap() } } impl<'tcx> EvaluationCache<'tcx> { pub fn new() -> EvaluationCache<'tcx> { EvaluationCache { hashmap: RefCell::new(FxHashMap()) } } pub fn clear(&self) { *self.hashmap.borrow_mut() = FxHashMap() } } impl<'o,'tcx> TraitObligationStack<'o,'tcx> { fn list(&'o self) -> TraitObligationStackList<'o,'tcx> { TraitObligationStackList::with(self) } fn iter(&'o self) -> TraitObligationStackList<'o,'tcx> { self.list() } } #[derive(Copy, Clone)] struct TraitObligationStackList<'o,'tcx:'o> { head: Option<&'o TraitObligationStack<'o,'tcx>> } impl<'o,'tcx> TraitObligationStackList<'o,'tcx> { fn empty() -> TraitObligationStackList<'o,'tcx> { TraitObligationStackList { head: None } } fn with(r: &'o TraitObligationStack<'o,'tcx>) -> TraitObligationStackList<'o,'tcx> { TraitObligationStackList { head: Some(r) } } } impl<'o,'tcx> Iterator for TraitObligationStackList<'o,'tcx>{ type Item = &'o TraitObligationStack<'o,'tcx>; fn next(&mut self) -> Option<&'o TraitObligationStack<'o,'tcx>> { match self.head { Some(o) => { *self = o.previous; Some(o) } None => None } } } impl<'o,'tcx> fmt::Debug for TraitObligationStack<'o,'tcx> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "TraitObligationStack({:?})", self.obligation) } } #[derive(Clone)] pub struct WithDepNode<T> { dep_node: DepNodeIndex, cached_value: T } impl<T: Clone> WithDepNode<T> { pub fn new(dep_node: DepNodeIndex, cached_value: T) -> Self { WithDepNode { dep_node, cached_value } } pub fn get(&self, tcx: TyCtxt) -> T { tcx.dep_graph.read_index(self.dep_node); self.cached_value.clone() } }
pub use cli::Config; use std::io::stderr; use std::io::Write; pub struct Runtime<'a> { config : Config<'a>, } impl<'a> Runtime<'a> { pub fn new(config : Config<'a>) -> Runtime<'a> { Runtime { config: config, } } pub fn debug(&self, string : &String) { if self.config.is_debugging() { println!("{}", string); } } pub fn print(&self, string : &String) { if self.config.is_verbose() || self.config.is_debugging() { println!("{}", string); } } pub fn trace(string : &String) { // writeln!(&mut stderr, "{}", string); } pub fn is_verbose(&self) -> bool { self.config.is_verbose() } pub fn is_debugging(&self) -> bool { self.config.is_debugging() } } Runtime publishes config pub use cli::Config; use std::io::stderr; use std::io::Write; pub struct Runtime<'a> { pub config : Config<'a>, } impl<'a> Runtime<'a> { pub fn new(config : Config<'a>) -> Runtime<'a> { Runtime { config: config, } } pub fn debug(&self, string : &String) { if self.config.is_debugging() { println!("{}", string); } } pub fn print(&self, string : &String) { if self.config.is_verbose() || self.config.is_debugging() { println!("{}", string); } } pub fn trace(string : &String) { // writeln!(&mut stderr, "{}", string); } pub fn is_verbose(&self) -> bool { self.config.is_verbose() } pub fn is_debugging(&self) -> bool { self.config.is_debugging() } }
// Copyright 2019 The Chromium OS Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. use std::cmp::min; use std::fmt::Debug; use std::fs::File; use std::io::{self, Read, Seek, SeekFrom, Write}; use std::sync::Arc; use async_trait::async_trait; use base::{ info, AsRawDescriptors, FileAllocate, FileReadWriteAtVolatile, FileSetLen, FileSync, PunchHole, SeekHole, WriteZeroesAt, }; use cros_async::Executor; use libc::EINVAL; use remain::sorted; use thiserror::Error as ThisError; use vm_memory::GuestMemory; mod qcow; pub use qcow::{QcowFile, QCOW_MAGIC}; #[cfg(feature = "composite-disk")] mod composite; #[cfg(feature = "composite-disk")] use composite::{CompositeDiskFile, CDISK_MAGIC, CDISK_MAGIC_LEN}; #[cfg(feature = "composite-disk")] mod gpt; #[cfg(feature = "composite-disk")] pub use composite::{ create_composite_disk, create_zero_filler, Error as CompositeError, ImagePartitionType, PartitionInfo, }; #[cfg(feature = "composite-disk")] pub use gpt::Error as GptError; mod android_sparse; use android_sparse::{AndroidSparse, SPARSE_HEADER_MAGIC}; /// Nesting depth limit for disk formats that can open other disk files. pub const MAX_NESTING_DEPTH: u32 = 10; #[sorted] #[derive(ThisError, Debug)] pub enum Error { #[error("failed to create block device: {0}")] BlockDeviceNew(base::Error), #[error("requested file conversion not supported")] ConversionNotSupported, #[error("failure in android sparse disk: {0}")] CreateAndroidSparseDisk(android_sparse::Error), #[cfg(feature = "composite-disk")] #[error("failure in composite disk: {0}")] CreateCompositeDisk(composite::Error), #[error("failure creating single file disk: {0}")] CreateSingleFileDisk(cros_async::AsyncError), #[error("failure with fallocate: {0}")] Fallocate(cros_async::AsyncError), #[error("failure with fsync: {0}")] Fsync(cros_async::AsyncError), #[error("maximum disk nesting depth exceeded")] MaxNestingDepthExceeded, #[error("failure in qcow: {0}")] QcowError(qcow::Error), #[error("failed to read data: {0}")] ReadingData(io::Error), #[error("failed to read header: {0}")] ReadingHeader(io::Error), #[error("failed to read to memory: {0}")] ReadToMem(cros_async::AsyncError), #[error("failed to seek file: {0}")] SeekingFile(io::Error), #[error("failed to set file size: {0}")] SettingFileSize(io::Error), #[error("unknown disk type")] UnknownType, #[error("failed to write from memory: {0}")] WriteFromMem(cros_async::AsyncError), #[error("failed to write from vec: {0}")] WriteFromVec(cros_async::AsyncError), #[error("failed to write data: {0}")] WritingData(io::Error), } pub type Result<T> = std::result::Result<T, Error>; /// A trait for getting the length of a disk image or raw block device. pub trait DiskGetLen { /// Get the current length of the disk in bytes. fn get_len(&self) -> io::Result<u64>; } impl DiskGetLen for File { fn get_len(&self) -> io::Result<u64> { let mut s = self; let orig_seek = s.seek(SeekFrom::Current(0))?; let end = s.seek(SeekFrom::End(0))? as u64; s.seek(SeekFrom::Start(orig_seek))?; Ok(end) } } /// The prerequisites necessary to support a block device. #[rustfmt::skip] // rustfmt won't wrap the long list of trait bounds. pub trait DiskFile: FileSetLen + DiskGetLen + FileSync + FileReadWriteAtVolatile + PunchHole + WriteZeroesAt + FileAllocate + Send + AsRawDescriptors + Debug { } impl< D: FileSetLen + DiskGetLen + FileSync + PunchHole + FileReadWriteAtVolatile + WriteZeroesAt + FileAllocate + Send + AsRawDescriptors + Debug, > DiskFile for D { } /// A `DiskFile` that can be converted for asychronous access. pub trait ToAsyncDisk: DiskFile { /// Convert a boxed self in to a box-wrapped implementaiton of AsyncDisk. /// Used to convert a standard disk image to an async disk image. This conversion and the /// inverse are needed so that the `Send` DiskImage can be given to the block thread where it is /// converted to a non-`Send` AsyncDisk. The AsyncDisk can then be converted back and returned /// to the main device thread if the block device is destroyed or reset. fn to_async_disk(self: Box<Self>, ex: &Executor) -> Result<Box<dyn AsyncDisk>>; } impl ToAsyncDisk for File { fn to_async_disk(self: Box<Self>, ex: &Executor) -> Result<Box<dyn AsyncDisk>> { Ok(Box::new(SingleFileDisk::new(*self, ex)?)) } } /// The variants of image files on the host that can be used as virtual disks. #[derive(Debug, PartialEq, Eq)] pub enum ImageType { Raw, Qcow2, CompositeDisk, AndroidSparse, } fn convert_copy<R, W>(reader: &mut R, writer: &mut W, offset: u64, size: u64) -> Result<()> where R: Read + Seek, W: Write + Seek, { const CHUNK_SIZE: usize = 65536; let mut buf = [0; CHUNK_SIZE]; let mut read_count = 0; reader .seek(SeekFrom::Start(offset)) .map_err(Error::SeekingFile)?; writer .seek(SeekFrom::Start(offset)) .map_err(Error::SeekingFile)?; loop { let this_count = min(CHUNK_SIZE as u64, size - read_count) as usize; let nread = reader .read(&mut buf[..this_count]) .map_err(Error::ReadingData)?; writer.write(&buf[..nread]).map_err(Error::WritingData)?; read_count += nread as u64; if nread == 0 || read_count == size { break; } } Ok(()) } fn convert_reader_writer<R, W>(reader: &mut R, writer: &mut W, size: u64) -> Result<()> where R: Read + Seek + SeekHole, W: Write + Seek, { let mut offset = 0; while offset < size { // Find the next range of data. let next_data = match reader.seek_data(offset).map_err(Error::SeekingFile)? { Some(o) => o, None => { // No more data in the file. break; } }; let next_hole = match reader.seek_hole(next_data).map_err(Error::SeekingFile)? { Some(o) => o, None => { // This should not happen - there should always be at least one hole // after any data. return Err(Error::SeekingFile(io::Error::from_raw_os_error(EINVAL))); } }; let count = next_hole - next_data; convert_copy(reader, writer, next_data, count)?; offset = next_hole; } Ok(()) } fn convert_reader<R>(reader: &mut R, dst_file: File, dst_type: ImageType) -> Result<()> where R: Read + Seek + SeekHole, { let src_size = reader.seek(SeekFrom::End(0)).map_err(Error::SeekingFile)?; reader .seek(SeekFrom::Start(0)) .map_err(Error::SeekingFile)?; // Ensure the destination file is empty before writing to it. dst_file.set_len(0).map_err(Error::SettingFileSize)?; match dst_type { ImageType::Qcow2 => { let mut dst_writer = QcowFile::new(dst_file, src_size).map_err(Error::QcowError)?; convert_reader_writer(reader, &mut dst_writer, src_size) } ImageType::Raw => { let mut dst_writer = dst_file; // Set the length of the destination file to convert it into a sparse file // of the desired size. dst_writer .set_len(src_size) .map_err(Error::SettingFileSize)?; convert_reader_writer(reader, &mut dst_writer, src_size) } _ => Err(Error::ConversionNotSupported), } } /// Copy the contents of a disk image in `src_file` into `dst_file`. /// The type of `src_file` is automatically detected, and the output file type is /// determined by `dst_type`. pub fn convert( src_file: File, dst_file: File, dst_type: ImageType, src_max_nesting_depth: u32, ) -> Result<()> { let src_type = detect_image_type(&src_file)?; match src_type { ImageType::Qcow2 => { let mut src_reader = QcowFile::from(src_file, src_max_nesting_depth).map_err(Error::QcowError)?; convert_reader(&mut src_reader, dst_file, dst_type) } ImageType::Raw => { // src_file is a raw file. let mut src_reader = src_file; convert_reader(&mut src_reader, dst_file, dst_type) } // TODO(schuffelen): Implement Read + Write + SeekHole for CompositeDiskFile _ => Err(Error::ConversionNotSupported), } } /// Detect the type of an image file by checking for a valid header of the supported formats. pub fn detect_image_type(file: &File) -> Result<ImageType> { let mut f = file; let disk_size = f.get_len().map_err(Error::SeekingFile)?; let orig_seek = f.seek(SeekFrom::Current(0)).map_err(Error::SeekingFile)?; f.seek(SeekFrom::Start(0)).map_err(Error::SeekingFile)?; info!("disk size {}, ", disk_size); // Try to read the disk in a nicely-aligned block size unless the whole file is smaller. const MAGIC_BLOCK_SIZE: usize = 4096; #[repr(align(512))] struct BlockAlignedBuffer { data: [u8; MAGIC_BLOCK_SIZE], } let mut magic = BlockAlignedBuffer { data: [0u8; MAGIC_BLOCK_SIZE], }; let magic_read_len = if disk_size > MAGIC_BLOCK_SIZE as u64 { MAGIC_BLOCK_SIZE } else { // This cast is safe since we know disk_size is less than MAGIC_BLOCK_SIZE (4096) and // therefore is representable in usize. disk_size as usize }; f.read_exact(&mut magic.data[0..magic_read_len]) .map_err(Error::ReadingHeader)?; f.seek(SeekFrom::Start(orig_seek)) .map_err(Error::SeekingFile)?; #[cfg(feature = "composite-disk")] if let Some(cdisk_magic) = magic.data.get(0..CDISK_MAGIC_LEN) { if cdisk_magic == CDISK_MAGIC.as_bytes() { return Ok(ImageType::CompositeDisk); } } if let Some(magic4) = magic.data.get(0..4) { if magic4 == QCOW_MAGIC.to_be_bytes() { return Ok(ImageType::Qcow2); } else if magic4 == SPARSE_HEADER_MAGIC.to_le_bytes() { return Ok(ImageType::AndroidSparse); } } Ok(ImageType::Raw) } /// Check if the image file type can be used for async disk access. pub fn async_ok(raw_image: &File) -> Result<bool> { let image_type = detect_image_type(raw_image)?; Ok(match image_type { ImageType::Raw => true, ImageType::Qcow2 | ImageType::AndroidSparse | ImageType::CompositeDisk => false, }) } /// Inspect the image file type and create an appropriate disk file to match it. pub fn create_async_disk_file(raw_image: File) -> Result<Box<dyn ToAsyncDisk>> { let image_type = detect_image_type(&raw_image)?; Ok(match image_type { ImageType::Raw => Box::new(raw_image) as Box<dyn ToAsyncDisk>, ImageType::Qcow2 | ImageType::AndroidSparse | ImageType::CompositeDisk => { return Err(Error::UnknownType) } }) } /// Inspect the image file type and create an appropriate disk file to match it. pub fn create_disk_file(raw_image: File, mut max_nesting_depth: u32) -> Result<Box<dyn DiskFile>> { if max_nesting_depth == 0 { return Err(Error::MaxNestingDepthExceeded); } max_nesting_depth -= 1; let image_type = detect_image_type(&raw_image)?; Ok(match image_type { ImageType::Raw => Box::new(raw_image) as Box<dyn DiskFile>, ImageType::Qcow2 => { Box::new(QcowFile::from(raw_image, max_nesting_depth).map_err(Error::QcowError)?) as Box<dyn DiskFile> } #[cfg(feature = "composite-disk")] ImageType::CompositeDisk => { // Valid composite disk header present Box::new( CompositeDiskFile::from_file(raw_image, max_nesting_depth) .map_err(Error::CreateCompositeDisk)?, ) as Box<dyn DiskFile> } #[cfg(not(feature = "composite-disk"))] ImageType::CompositeDisk => return Err(Error::UnknownType), ImageType::AndroidSparse => { Box::new(AndroidSparse::from_file(raw_image).map_err(Error::CreateAndroidSparseDisk)?) as Box<dyn DiskFile> } }) } /// An asynchronously accessible disk. #[async_trait(?Send)] pub trait AsyncDisk: DiskGetLen + FileSetLen + FileAllocate { /// Returns the inner file consuming self. fn into_inner(self: Box<Self>) -> Box<dyn ToAsyncDisk>; /// Asynchronously fsyncs any completed operations to the disk. async fn fsync(&self) -> Result<()>; /// Reads from the file at 'file_offset' in to memory `mem` at `mem_offsets`. /// `mem_offsets` is similar to an iovec except relative to the start of `mem`. async fn read_to_mem<'a>( &self, file_offset: u64, mem: Arc<GuestMemory>, mem_offsets: &'a [cros_async::MemRegion], ) -> Result<usize>; /// Writes to the file at 'file_offset' from memory `mem` at `mem_offsets`. async fn write_from_mem<'a>( &self, file_offset: u64, mem: Arc<GuestMemory>, mem_offsets: &'a [cros_async::MemRegion], ) -> Result<usize>; /// Replaces a range of bytes with a hole. async fn punch_hole(&self, file_offset: u64, length: u64) -> Result<()>; /// Writes up to `length` bytes of zeroes to the stream, returning how many bytes were written. async fn write_zeroes_at(&self, file_offset: u64, length: u64) -> Result<()>; } use cros_async::IoSourceExt; /// A disk backed by a single file that implements `AsyncDisk` for access. pub struct SingleFileDisk { inner: Box<dyn IoSourceExt<File>>, } impl SingleFileDisk { pub fn new(disk: File, ex: &Executor) -> Result<Self> { ex.async_from(disk) .map_err(Error::CreateSingleFileDisk) .map(|inner| SingleFileDisk { inner }) } } impl DiskGetLen for SingleFileDisk { fn get_len(&self) -> io::Result<u64> { self.inner.as_source().get_len() } } impl FileSetLen for SingleFileDisk { fn set_len(&self, len: u64) -> io::Result<()> { self.inner.as_source().set_len(len) } } impl FileAllocate for SingleFileDisk { fn allocate(&mut self, offset: u64, len: u64) -> io::Result<()> { self.inner.as_source_mut().allocate(offset, len) } } #[async_trait(?Send)] impl AsyncDisk for SingleFileDisk { fn into_inner(self: Box<Self>) -> Box<dyn ToAsyncDisk> { Box::new(self.inner.into_source()) } async fn fsync(&self) -> Result<()> { self.inner.fsync().await.map_err(Error::Fsync) } async fn read_to_mem<'a>( &self, file_offset: u64, mem: Arc<GuestMemory>, mem_offsets: &'a [cros_async::MemRegion], ) -> Result<usize> { self.inner .read_to_mem(Some(file_offset), mem, mem_offsets) .await .map_err(Error::ReadToMem) } async fn write_from_mem<'a>( &self, file_offset: u64, mem: Arc<GuestMemory>, mem_offsets: &'a [cros_async::MemRegion], ) -> Result<usize> { self.inner .write_from_mem(Some(file_offset), mem, mem_offsets) .await .map_err(Error::WriteFromMem) } async fn punch_hole(&self, file_offset: u64, length: u64) -> Result<()> { self.inner .fallocate( file_offset, length, (libc::FALLOC_FL_PUNCH_HOLE | libc::FALLOC_FL_KEEP_SIZE) as u32, ) .await .map_err(Error::Fallocate) } async fn write_zeroes_at(&self, file_offset: u64, length: u64) -> Result<()> { if self .inner .fallocate( file_offset, length, (libc::FALLOC_FL_ZERO_RANGE | libc::FALLOC_FL_KEEP_SIZE) as u32, ) .await .is_ok() { return Ok(()); } // Fall back to writing zeros if fallocate doesn't work. let buf_size = min(length, 0x10000); let mut nwritten = 0; while nwritten < length { let remaining = length - nwritten; let write_size = min(remaining, buf_size) as usize; let buf = vec![0u8; write_size]; nwritten += self .inner .write_from_vec(Some(file_offset + nwritten as u64), buf) .await .map(|(n, _)| n as u64) .map_err(Error::WriteFromVec)?; } Ok(()) } } #[cfg(test)] mod tests { use super::*; use std::fs::{File, OpenOptions}; use cros_async::{Executor, MemRegion}; use vm_memory::{GuestAddress, GuestMemory}; #[test] fn read_async() { async fn read_zeros_async(ex: &Executor) { let guest_mem = Arc::new(GuestMemory::new(&[(GuestAddress(0), 4096)]).unwrap()); let f = File::open("/dev/zero").unwrap(); let async_file = SingleFileDisk::new(f, ex).unwrap(); let result = async_file .read_to_mem( 0, Arc::clone(&guest_mem), &[MemRegion { offset: 0, len: 48 }], ) .await; assert_eq!(48, result.unwrap()); } let ex = Executor::new().unwrap(); ex.run_until(read_zeros_async(&ex)).unwrap(); } #[test] fn write_async() { async fn write_zeros_async(ex: &Executor) { let guest_mem = Arc::new(GuestMemory::new(&[(GuestAddress(0), 4096)]).unwrap()); let f = OpenOptions::new().write(true).open("/dev/null").unwrap(); let async_file = SingleFileDisk::new(f, ex).unwrap(); let result = async_file .write_from_mem( 0, Arc::clone(&guest_mem), &[MemRegion { offset: 0, len: 48 }], ) .await; assert_eq!(48, result.unwrap()); } let ex = Executor::new().unwrap(); ex.run_until(write_zeros_async(&ex)).unwrap(); } #[test] fn detect_image_type_raw() { let mut t = tempfile::tempfile().unwrap(); // Fill the first block of the file with "random" data. let buf = "ABCD".as_bytes().repeat(1024); t.write_all(&buf).unwrap(); let image_type = detect_image_type(&t).expect("failed to detect image type"); assert_eq!(image_type, ImageType::Raw); } #[test] fn detect_image_type_qcow2() { let mut t = tempfile::tempfile().unwrap(); // Write the qcow2 magic signature. The rest of the header is not filled in, so if // detect_image_type is ever updated to validate more of the header, this test would need // to be updated. let buf: &[u8] = &[0x51, 0x46, 0x49, 0xfb]; t.write_all(&buf).unwrap(); let image_type = detect_image_type(&t).expect("failed to detect image type"); assert_eq!(image_type, ImageType::Qcow2); } #[test] fn detect_image_type_android_sparse() { let mut t = tempfile::tempfile().unwrap(); // Write the Android sparse magic signature. The rest of the header is not filled in, so if // detect_image_type is ever updated to validate more of the header, this test would need // to be updated. let buf: &[u8] = &[0x3a, 0xff, 0x26, 0xed]; t.write_all(&buf).unwrap(); let image_type = detect_image_type(&t).expect("failed to detect image type"); assert_eq!(image_type, ImageType::AndroidSparse); } #[test] #[cfg(feature = "composite-disk")] fn detect_image_type_composite() { let mut t = tempfile::tempfile().unwrap(); // Write the composite disk magic signature. The rest of the header is not filled in, so if // detect_image_type is ever updated to validate more of the header, this test would need // to be updated. let buf = "composite_disk\x1d".as_bytes(); t.write_all(&buf).unwrap(); let image_type = detect_image_type(&t).expect("failed to detect image type"); assert_eq!(image_type, ImageType::CompositeDisk); } #[test] fn detect_image_type_small_file() { let mut t = tempfile::tempfile().unwrap(); // Write a file smaller than the four-byte qcow2/sparse magic to ensure the small file logic // works correctly and handles it as a raw file. let buf: &[u8] = &[0xAA, 0xBB]; t.write_all(&buf).unwrap(); let image_type = detect_image_type(&t).expect("failed to detect image type"); assert_eq!(image_type, ImageType::Raw); } } crosvm: Try using block size of 4096. Documentation says 512 but ext4 block size is 4096 and we don't need to diverge here. BUG=b:199094605 TEST=arc.Boot.vm Change-Id: I6f04e652dea6a3cf661d36b943278753340cc665 Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/platform/crosvm/+/3195155 Tested-by: kokoro <2ac7b1f3fa578934c95181d4272be0d3bca00121@google.com> Commit-Queue: Junichi Uekawa <6bd240a6ce32463e272bb69c685a8e946b33ac05@chromium.org> Reviewed-by: Chirantan Ekbote <35787c0e27de5c48735e07f8324823b765e8bcbc@chromium.org> // Copyright 2019 The Chromium OS Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. use std::cmp::min; use std::fmt::Debug; use std::fs::File; use std::io::{self, Read, Seek, SeekFrom, Write}; use std::sync::Arc; use async_trait::async_trait; use base::{ info, AsRawDescriptors, FileAllocate, FileReadWriteAtVolatile, FileSetLen, FileSync, PunchHole, SeekHole, WriteZeroesAt, }; use cros_async::Executor; use libc::EINVAL; use remain::sorted; use thiserror::Error as ThisError; use vm_memory::GuestMemory; mod qcow; pub use qcow::{QcowFile, QCOW_MAGIC}; #[cfg(feature = "composite-disk")] mod composite; #[cfg(feature = "composite-disk")] use composite::{CompositeDiskFile, CDISK_MAGIC, CDISK_MAGIC_LEN}; #[cfg(feature = "composite-disk")] mod gpt; #[cfg(feature = "composite-disk")] pub use composite::{ create_composite_disk, create_zero_filler, Error as CompositeError, ImagePartitionType, PartitionInfo, }; #[cfg(feature = "composite-disk")] pub use gpt::Error as GptError; mod android_sparse; use android_sparse::{AndroidSparse, SPARSE_HEADER_MAGIC}; /// Nesting depth limit for disk formats that can open other disk files. pub const MAX_NESTING_DEPTH: u32 = 10; #[sorted] #[derive(ThisError, Debug)] pub enum Error { #[error("failed to create block device: {0}")] BlockDeviceNew(base::Error), #[error("requested file conversion not supported")] ConversionNotSupported, #[error("failure in android sparse disk: {0}")] CreateAndroidSparseDisk(android_sparse::Error), #[cfg(feature = "composite-disk")] #[error("failure in composite disk: {0}")] CreateCompositeDisk(composite::Error), #[error("failure creating single file disk: {0}")] CreateSingleFileDisk(cros_async::AsyncError), #[error("failure with fallocate: {0}")] Fallocate(cros_async::AsyncError), #[error("failure with fsync: {0}")] Fsync(cros_async::AsyncError), #[error("maximum disk nesting depth exceeded")] MaxNestingDepthExceeded, #[error("failure in qcow: {0}")] QcowError(qcow::Error), #[error("failed to read data: {0}")] ReadingData(io::Error), #[error("failed to read header: {0}")] ReadingHeader(io::Error), #[error("failed to read to memory: {0}")] ReadToMem(cros_async::AsyncError), #[error("failed to seek file: {0}")] SeekingFile(io::Error), #[error("failed to set file size: {0}")] SettingFileSize(io::Error), #[error("unknown disk type")] UnknownType, #[error("failed to write from memory: {0}")] WriteFromMem(cros_async::AsyncError), #[error("failed to write from vec: {0}")] WriteFromVec(cros_async::AsyncError), #[error("failed to write data: {0}")] WritingData(io::Error), } pub type Result<T> = std::result::Result<T, Error>; /// A trait for getting the length of a disk image or raw block device. pub trait DiskGetLen { /// Get the current length of the disk in bytes. fn get_len(&self) -> io::Result<u64>; } impl DiskGetLen for File { fn get_len(&self) -> io::Result<u64> { let mut s = self; let orig_seek = s.seek(SeekFrom::Current(0))?; let end = s.seek(SeekFrom::End(0))? as u64; s.seek(SeekFrom::Start(orig_seek))?; Ok(end) } } /// The prerequisites necessary to support a block device. #[rustfmt::skip] // rustfmt won't wrap the long list of trait bounds. pub trait DiskFile: FileSetLen + DiskGetLen + FileSync + FileReadWriteAtVolatile + PunchHole + WriteZeroesAt + FileAllocate + Send + AsRawDescriptors + Debug { } impl< D: FileSetLen + DiskGetLen + FileSync + PunchHole + FileReadWriteAtVolatile + WriteZeroesAt + FileAllocate + Send + AsRawDescriptors + Debug, > DiskFile for D { } /// A `DiskFile` that can be converted for asychronous access. pub trait ToAsyncDisk: DiskFile { /// Convert a boxed self in to a box-wrapped implementaiton of AsyncDisk. /// Used to convert a standard disk image to an async disk image. This conversion and the /// inverse are needed so that the `Send` DiskImage can be given to the block thread where it is /// converted to a non-`Send` AsyncDisk. The AsyncDisk can then be converted back and returned /// to the main device thread if the block device is destroyed or reset. fn to_async_disk(self: Box<Self>, ex: &Executor) -> Result<Box<dyn AsyncDisk>>; } impl ToAsyncDisk for File { fn to_async_disk(self: Box<Self>, ex: &Executor) -> Result<Box<dyn AsyncDisk>> { Ok(Box::new(SingleFileDisk::new(*self, ex)?)) } } /// The variants of image files on the host that can be used as virtual disks. #[derive(Debug, PartialEq, Eq)] pub enum ImageType { Raw, Qcow2, CompositeDisk, AndroidSparse, } fn convert_copy<R, W>(reader: &mut R, writer: &mut W, offset: u64, size: u64) -> Result<()> where R: Read + Seek, W: Write + Seek, { const CHUNK_SIZE: usize = 65536; let mut buf = [0; CHUNK_SIZE]; let mut read_count = 0; reader .seek(SeekFrom::Start(offset)) .map_err(Error::SeekingFile)?; writer .seek(SeekFrom::Start(offset)) .map_err(Error::SeekingFile)?; loop { let this_count = min(CHUNK_SIZE as u64, size - read_count) as usize; let nread = reader .read(&mut buf[..this_count]) .map_err(Error::ReadingData)?; writer.write(&buf[..nread]).map_err(Error::WritingData)?; read_count += nread as u64; if nread == 0 || read_count == size { break; } } Ok(()) } fn convert_reader_writer<R, W>(reader: &mut R, writer: &mut W, size: u64) -> Result<()> where R: Read + Seek + SeekHole, W: Write + Seek, { let mut offset = 0; while offset < size { // Find the next range of data. let next_data = match reader.seek_data(offset).map_err(Error::SeekingFile)? { Some(o) => o, None => { // No more data in the file. break; } }; let next_hole = match reader.seek_hole(next_data).map_err(Error::SeekingFile)? { Some(o) => o, None => { // This should not happen - there should always be at least one hole // after any data. return Err(Error::SeekingFile(io::Error::from_raw_os_error(EINVAL))); } }; let count = next_hole - next_data; convert_copy(reader, writer, next_data, count)?; offset = next_hole; } Ok(()) } fn convert_reader<R>(reader: &mut R, dst_file: File, dst_type: ImageType) -> Result<()> where R: Read + Seek + SeekHole, { let src_size = reader.seek(SeekFrom::End(0)).map_err(Error::SeekingFile)?; reader .seek(SeekFrom::Start(0)) .map_err(Error::SeekingFile)?; // Ensure the destination file is empty before writing to it. dst_file.set_len(0).map_err(Error::SettingFileSize)?; match dst_type { ImageType::Qcow2 => { let mut dst_writer = QcowFile::new(dst_file, src_size).map_err(Error::QcowError)?; convert_reader_writer(reader, &mut dst_writer, src_size) } ImageType::Raw => { let mut dst_writer = dst_file; // Set the length of the destination file to convert it into a sparse file // of the desired size. dst_writer .set_len(src_size) .map_err(Error::SettingFileSize)?; convert_reader_writer(reader, &mut dst_writer, src_size) } _ => Err(Error::ConversionNotSupported), } } /// Copy the contents of a disk image in `src_file` into `dst_file`. /// The type of `src_file` is automatically detected, and the output file type is /// determined by `dst_type`. pub fn convert( src_file: File, dst_file: File, dst_type: ImageType, src_max_nesting_depth: u32, ) -> Result<()> { let src_type = detect_image_type(&src_file)?; match src_type { ImageType::Qcow2 => { let mut src_reader = QcowFile::from(src_file, src_max_nesting_depth).map_err(Error::QcowError)?; convert_reader(&mut src_reader, dst_file, dst_type) } ImageType::Raw => { // src_file is a raw file. let mut src_reader = src_file; convert_reader(&mut src_reader, dst_file, dst_type) } // TODO(schuffelen): Implement Read + Write + SeekHole for CompositeDiskFile _ => Err(Error::ConversionNotSupported), } } /// Detect the type of an image file by checking for a valid header of the supported formats. pub fn detect_image_type(file: &File) -> Result<ImageType> { let mut f = file; let disk_size = f.get_len().map_err(Error::SeekingFile)?; let orig_seek = f.seek(SeekFrom::Current(0)).map_err(Error::SeekingFile)?; f.seek(SeekFrom::Start(0)).map_err(Error::SeekingFile)?; info!("disk size {}, ", disk_size); // Try to read the disk in a nicely-aligned block size unless the whole file is smaller. const MAGIC_BLOCK_SIZE: usize = 4096; #[repr(align(4096))] struct BlockAlignedBuffer { data: [u8; MAGIC_BLOCK_SIZE], } let mut magic = BlockAlignedBuffer { data: [0u8; MAGIC_BLOCK_SIZE], }; let magic_read_len = if disk_size > MAGIC_BLOCK_SIZE as u64 { MAGIC_BLOCK_SIZE } else { // This cast is safe since we know disk_size is less than MAGIC_BLOCK_SIZE (4096) and // therefore is representable in usize. disk_size as usize }; f.read_exact(&mut magic.data[0..magic_read_len]) .map_err(Error::ReadingHeader)?; f.seek(SeekFrom::Start(orig_seek)) .map_err(Error::SeekingFile)?; #[cfg(feature = "composite-disk")] if let Some(cdisk_magic) = magic.data.get(0..CDISK_MAGIC_LEN) { if cdisk_magic == CDISK_MAGIC.as_bytes() { return Ok(ImageType::CompositeDisk); } } if let Some(magic4) = magic.data.get(0..4) { if magic4 == QCOW_MAGIC.to_be_bytes() { return Ok(ImageType::Qcow2); } else if magic4 == SPARSE_HEADER_MAGIC.to_le_bytes() { return Ok(ImageType::AndroidSparse); } } Ok(ImageType::Raw) } /// Check if the image file type can be used for async disk access. pub fn async_ok(raw_image: &File) -> Result<bool> { let image_type = detect_image_type(raw_image)?; Ok(match image_type { ImageType::Raw => true, ImageType::Qcow2 | ImageType::AndroidSparse | ImageType::CompositeDisk => false, }) } /// Inspect the image file type and create an appropriate disk file to match it. pub fn create_async_disk_file(raw_image: File) -> Result<Box<dyn ToAsyncDisk>> { let image_type = detect_image_type(&raw_image)?; Ok(match image_type { ImageType::Raw => Box::new(raw_image) as Box<dyn ToAsyncDisk>, ImageType::Qcow2 | ImageType::AndroidSparse | ImageType::CompositeDisk => { return Err(Error::UnknownType) } }) } /// Inspect the image file type and create an appropriate disk file to match it. pub fn create_disk_file(raw_image: File, mut max_nesting_depth: u32) -> Result<Box<dyn DiskFile>> { if max_nesting_depth == 0 { return Err(Error::MaxNestingDepthExceeded); } max_nesting_depth -= 1; let image_type = detect_image_type(&raw_image)?; Ok(match image_type { ImageType::Raw => Box::new(raw_image) as Box<dyn DiskFile>, ImageType::Qcow2 => { Box::new(QcowFile::from(raw_image, max_nesting_depth).map_err(Error::QcowError)?) as Box<dyn DiskFile> } #[cfg(feature = "composite-disk")] ImageType::CompositeDisk => { // Valid composite disk header present Box::new( CompositeDiskFile::from_file(raw_image, max_nesting_depth) .map_err(Error::CreateCompositeDisk)?, ) as Box<dyn DiskFile> } #[cfg(not(feature = "composite-disk"))] ImageType::CompositeDisk => return Err(Error::UnknownType), ImageType::AndroidSparse => { Box::new(AndroidSparse::from_file(raw_image).map_err(Error::CreateAndroidSparseDisk)?) as Box<dyn DiskFile> } }) } /// An asynchronously accessible disk. #[async_trait(?Send)] pub trait AsyncDisk: DiskGetLen + FileSetLen + FileAllocate { /// Returns the inner file consuming self. fn into_inner(self: Box<Self>) -> Box<dyn ToAsyncDisk>; /// Asynchronously fsyncs any completed operations to the disk. async fn fsync(&self) -> Result<()>; /// Reads from the file at 'file_offset' in to memory `mem` at `mem_offsets`. /// `mem_offsets` is similar to an iovec except relative to the start of `mem`. async fn read_to_mem<'a>( &self, file_offset: u64, mem: Arc<GuestMemory>, mem_offsets: &'a [cros_async::MemRegion], ) -> Result<usize>; /// Writes to the file at 'file_offset' from memory `mem` at `mem_offsets`. async fn write_from_mem<'a>( &self, file_offset: u64, mem: Arc<GuestMemory>, mem_offsets: &'a [cros_async::MemRegion], ) -> Result<usize>; /// Replaces a range of bytes with a hole. async fn punch_hole(&self, file_offset: u64, length: u64) -> Result<()>; /// Writes up to `length` bytes of zeroes to the stream, returning how many bytes were written. async fn write_zeroes_at(&self, file_offset: u64, length: u64) -> Result<()>; } use cros_async::IoSourceExt; /// A disk backed by a single file that implements `AsyncDisk` for access. pub struct SingleFileDisk { inner: Box<dyn IoSourceExt<File>>, } impl SingleFileDisk { pub fn new(disk: File, ex: &Executor) -> Result<Self> { ex.async_from(disk) .map_err(Error::CreateSingleFileDisk) .map(|inner| SingleFileDisk { inner }) } } impl DiskGetLen for SingleFileDisk { fn get_len(&self) -> io::Result<u64> { self.inner.as_source().get_len() } } impl FileSetLen for SingleFileDisk { fn set_len(&self, len: u64) -> io::Result<()> { self.inner.as_source().set_len(len) } } impl FileAllocate for SingleFileDisk { fn allocate(&mut self, offset: u64, len: u64) -> io::Result<()> { self.inner.as_source_mut().allocate(offset, len) } } #[async_trait(?Send)] impl AsyncDisk for SingleFileDisk { fn into_inner(self: Box<Self>) -> Box<dyn ToAsyncDisk> { Box::new(self.inner.into_source()) } async fn fsync(&self) -> Result<()> { self.inner.fsync().await.map_err(Error::Fsync) } async fn read_to_mem<'a>( &self, file_offset: u64, mem: Arc<GuestMemory>, mem_offsets: &'a [cros_async::MemRegion], ) -> Result<usize> { self.inner .read_to_mem(Some(file_offset), mem, mem_offsets) .await .map_err(Error::ReadToMem) } async fn write_from_mem<'a>( &self, file_offset: u64, mem: Arc<GuestMemory>, mem_offsets: &'a [cros_async::MemRegion], ) -> Result<usize> { self.inner .write_from_mem(Some(file_offset), mem, mem_offsets) .await .map_err(Error::WriteFromMem) } async fn punch_hole(&self, file_offset: u64, length: u64) -> Result<()> { self.inner .fallocate( file_offset, length, (libc::FALLOC_FL_PUNCH_HOLE | libc::FALLOC_FL_KEEP_SIZE) as u32, ) .await .map_err(Error::Fallocate) } async fn write_zeroes_at(&self, file_offset: u64, length: u64) -> Result<()> { if self .inner .fallocate( file_offset, length, (libc::FALLOC_FL_ZERO_RANGE | libc::FALLOC_FL_KEEP_SIZE) as u32, ) .await .is_ok() { return Ok(()); } // Fall back to writing zeros if fallocate doesn't work. let buf_size = min(length, 0x10000); let mut nwritten = 0; while nwritten < length { let remaining = length - nwritten; let write_size = min(remaining, buf_size) as usize; let buf = vec![0u8; write_size]; nwritten += self .inner .write_from_vec(Some(file_offset + nwritten as u64), buf) .await .map(|(n, _)| n as u64) .map_err(Error::WriteFromVec)?; } Ok(()) } } #[cfg(test)] mod tests { use super::*; use std::fs::{File, OpenOptions}; use cros_async::{Executor, MemRegion}; use vm_memory::{GuestAddress, GuestMemory}; #[test] fn read_async() { async fn read_zeros_async(ex: &Executor) { let guest_mem = Arc::new(GuestMemory::new(&[(GuestAddress(0), 4096)]).unwrap()); let f = File::open("/dev/zero").unwrap(); let async_file = SingleFileDisk::new(f, ex).unwrap(); let result = async_file .read_to_mem( 0, Arc::clone(&guest_mem), &[MemRegion { offset: 0, len: 48 }], ) .await; assert_eq!(48, result.unwrap()); } let ex = Executor::new().unwrap(); ex.run_until(read_zeros_async(&ex)).unwrap(); } #[test] fn write_async() { async fn write_zeros_async(ex: &Executor) { let guest_mem = Arc::new(GuestMemory::new(&[(GuestAddress(0), 4096)]).unwrap()); let f = OpenOptions::new().write(true).open("/dev/null").unwrap(); let async_file = SingleFileDisk::new(f, ex).unwrap(); let result = async_file .write_from_mem( 0, Arc::clone(&guest_mem), &[MemRegion { offset: 0, len: 48 }], ) .await; assert_eq!(48, result.unwrap()); } let ex = Executor::new().unwrap(); ex.run_until(write_zeros_async(&ex)).unwrap(); } #[test] fn detect_image_type_raw() { let mut t = tempfile::tempfile().unwrap(); // Fill the first block of the file with "random" data. let buf = "ABCD".as_bytes().repeat(1024); t.write_all(&buf).unwrap(); let image_type = detect_image_type(&t).expect("failed to detect image type"); assert_eq!(image_type, ImageType::Raw); } #[test] fn detect_image_type_qcow2() { let mut t = tempfile::tempfile().unwrap(); // Write the qcow2 magic signature. The rest of the header is not filled in, so if // detect_image_type is ever updated to validate more of the header, this test would need // to be updated. let buf: &[u8] = &[0x51, 0x46, 0x49, 0xfb]; t.write_all(&buf).unwrap(); let image_type = detect_image_type(&t).expect("failed to detect image type"); assert_eq!(image_type, ImageType::Qcow2); } #[test] fn detect_image_type_android_sparse() { let mut t = tempfile::tempfile().unwrap(); // Write the Android sparse magic signature. The rest of the header is not filled in, so if // detect_image_type is ever updated to validate more of the header, this test would need // to be updated. let buf: &[u8] = &[0x3a, 0xff, 0x26, 0xed]; t.write_all(&buf).unwrap(); let image_type = detect_image_type(&t).expect("failed to detect image type"); assert_eq!(image_type, ImageType::AndroidSparse); } #[test] #[cfg(feature = "composite-disk")] fn detect_image_type_composite() { let mut t = tempfile::tempfile().unwrap(); // Write the composite disk magic signature. The rest of the header is not filled in, so if // detect_image_type is ever updated to validate more of the header, this test would need // to be updated. let buf = "composite_disk\x1d".as_bytes(); t.write_all(&buf).unwrap(); let image_type = detect_image_type(&t).expect("failed to detect image type"); assert_eq!(image_type, ImageType::CompositeDisk); } #[test] fn detect_image_type_small_file() { let mut t = tempfile::tempfile().unwrap(); // Write a file smaller than the four-byte qcow2/sparse magic to ensure the small file logic // works correctly and handles it as a raw file. let buf: &[u8] = &[0xAA, 0xBB]; t.write_all(&buf).unwrap(); let image_type = detect_image_type(&t).expect("failed to detect image type"); assert_eq!(image_type, ImageType::Raw); } }
use crate::base::*; use crate::config::StripUnconfigured; use crate::hygiene::{ExpnData, ExpnId, ExpnKind, SyntaxContext}; use crate::mbe::macro_rules::annotate_err_with_kind; use crate::placeholders::{placeholder, PlaceholderExpander}; use crate::proc_macro::collect_derives; use rustc_ast_pretty::pprust; use rustc_attr::{self as attr, is_builtin_attr, HasAttrs}; use rustc_data_structures::sync::Lrc; use rustc_errors::{Applicability, FatalError, PResult}; use rustc_feature::Features; use rustc_parse::configure; use rustc_parse::parser::Parser; use rustc_parse::validate_attr; use rustc_parse::DirectoryOwnership; use rustc_session::parse::{feature_err, ParseSess}; use rustc_span::source_map::respan; use rustc_span::symbol::{sym, Symbol}; use rustc_span::{FileName, Span, DUMMY_SP}; use syntax::ast::{self, AttrItem, Block, Ident, LitKind, NodeId, PatKind, Path}; use syntax::ast::{ItemKind, MacArgs, MacStmtStyle, StmtKind}; use syntax::mut_visit::*; use syntax::ptr::P; use syntax::token; use syntax::tokenstream::{TokenStream, TokenTree}; use syntax::util::map_in_place::MapInPlace; use syntax::visit::{self, AssocCtxt, Visitor}; use smallvec::{smallvec, SmallVec}; use std::io::ErrorKind; use std::ops::DerefMut; use std::path::PathBuf; use std::rc::Rc; use std::{iter, mem, slice}; macro_rules! ast_fragments { ( $($Kind:ident($AstTy:ty) { $kind_name:expr; $(one fn $mut_visit_ast:ident; fn $visit_ast:ident;)? $(many fn $flat_map_ast_elt:ident; fn $visit_ast_elt:ident($($args:tt)*);)? fn $make_ast:ident; })* ) => { /// A fragment of AST that can be produced by a single macro expansion. /// Can also serve as an input and intermediate result for macro expansion operations. pub enum AstFragment { OptExpr(Option<P<ast::Expr>>), $($Kind($AstTy),)* } /// "Discriminant" of an AST fragment. #[derive(Copy, Clone, PartialEq, Eq)] pub enum AstFragmentKind { OptExpr, $($Kind,)* } impl AstFragmentKind { pub fn name(self) -> &'static str { match self { AstFragmentKind::OptExpr => "expression", $(AstFragmentKind::$Kind => $kind_name,)* } } fn make_from<'a>(self, result: Box<dyn MacResult + 'a>) -> Option<AstFragment> { match self { AstFragmentKind::OptExpr => result.make_expr().map(Some).map(AstFragment::OptExpr), $(AstFragmentKind::$Kind => result.$make_ast().map(AstFragment::$Kind),)* } } } impl AstFragment { pub fn add_placeholders(&mut self, placeholders: &[NodeId]) { if placeholders.is_empty() { return; } match self { $($(AstFragment::$Kind(ast) => ast.extend(placeholders.iter().flat_map(|id| { // We are repeating through arguments with `many`, to do that we have to // mention some macro variable from those arguments even if it's not used. macro _repeating($flat_map_ast_elt) {} placeholder(AstFragmentKind::$Kind, *id, None).$make_ast() })),)?)* _ => panic!("unexpected AST fragment kind") } } pub fn make_opt_expr(self) -> Option<P<ast::Expr>> { match self { AstFragment::OptExpr(expr) => expr, _ => panic!("AstFragment::make_* called on the wrong kind of fragment"), } } $(pub fn $make_ast(self) -> $AstTy { match self { AstFragment::$Kind(ast) => ast, _ => panic!("AstFragment::make_* called on the wrong kind of fragment"), } })* pub fn mut_visit_with<F: MutVisitor>(&mut self, vis: &mut F) { match self { AstFragment::OptExpr(opt_expr) => { visit_clobber(opt_expr, |opt_expr| { if let Some(expr) = opt_expr { vis.filter_map_expr(expr) } else { None } }); } $($(AstFragment::$Kind(ast) => vis.$mut_visit_ast(ast),)?)* $($(AstFragment::$Kind(ast) => ast.flat_map_in_place(|ast| vis.$flat_map_ast_elt(ast)),)?)* } } pub fn visit_with<'a, V: Visitor<'a>>(&'a self, visitor: &mut V) { match *self { AstFragment::OptExpr(Some(ref expr)) => visitor.visit_expr(expr), AstFragment::OptExpr(None) => {} $($(AstFragment::$Kind(ref ast) => visitor.$visit_ast(ast),)?)* $($(AstFragment::$Kind(ref ast) => for ast_elt in &ast[..] { visitor.$visit_ast_elt(ast_elt, $($args)*); })?)* } } } impl<'a> MacResult for crate::mbe::macro_rules::ParserAnyMacro<'a> { $(fn $make_ast(self: Box<crate::mbe::macro_rules::ParserAnyMacro<'a>>) -> Option<$AstTy> { Some(self.make(AstFragmentKind::$Kind).$make_ast()) })* } } } ast_fragments! { Expr(P<ast::Expr>) { "expression"; one fn visit_expr; fn visit_expr; fn make_expr; } Pat(P<ast::Pat>) { "pattern"; one fn visit_pat; fn visit_pat; fn make_pat; } Ty(P<ast::Ty>) { "type"; one fn visit_ty; fn visit_ty; fn make_ty; } Stmts(SmallVec<[ast::Stmt; 1]>) { "statement"; many fn flat_map_stmt; fn visit_stmt(); fn make_stmts; } Items(SmallVec<[P<ast::Item>; 1]>) { "item"; many fn flat_map_item; fn visit_item(); fn make_items; } TraitItems(SmallVec<[P<ast::AssocItem>; 1]>) { "trait item"; many fn flat_map_trait_item; fn visit_assoc_item(AssocCtxt::Trait); fn make_trait_items; } ImplItems(SmallVec<[P<ast::AssocItem>; 1]>) { "impl item"; many fn flat_map_impl_item; fn visit_assoc_item(AssocCtxt::Impl); fn make_impl_items; } ForeignItems(SmallVec<[P<ast::ForeignItem>; 1]>) { "foreign item"; many fn flat_map_foreign_item; fn visit_foreign_item(); fn make_foreign_items; } Arms(SmallVec<[ast::Arm; 1]>) { "match arm"; many fn flat_map_arm; fn visit_arm(); fn make_arms; } Fields(SmallVec<[ast::Field; 1]>) { "field expression"; many fn flat_map_field; fn visit_field(); fn make_fields; } FieldPats(SmallVec<[ast::FieldPat; 1]>) { "field pattern"; many fn flat_map_field_pattern; fn visit_field_pattern(); fn make_field_patterns; } GenericParams(SmallVec<[ast::GenericParam; 1]>) { "generic parameter"; many fn flat_map_generic_param; fn visit_generic_param(); fn make_generic_params; } Params(SmallVec<[ast::Param; 1]>) { "function parameter"; many fn flat_map_param; fn visit_param(); fn make_params; } StructFields(SmallVec<[ast::StructField; 1]>) { "field"; many fn flat_map_struct_field; fn visit_struct_field(); fn make_struct_fields; } Variants(SmallVec<[ast::Variant; 1]>) { "variant"; many fn flat_map_variant; fn visit_variant(); fn make_variants; } } impl AstFragmentKind { fn dummy(self, span: Span) -> AstFragment { self.make_from(DummyResult::any(span)).expect("couldn't create a dummy AST fragment") } fn expect_from_annotatables<I: IntoIterator<Item = Annotatable>>( self, items: I, ) -> AstFragment { let mut items = items.into_iter(); match self { AstFragmentKind::Arms => { AstFragment::Arms(items.map(Annotatable::expect_arm).collect()) } AstFragmentKind::Fields => { AstFragment::Fields(items.map(Annotatable::expect_field).collect()) } AstFragmentKind::FieldPats => { AstFragment::FieldPats(items.map(Annotatable::expect_field_pattern).collect()) } AstFragmentKind::GenericParams => { AstFragment::GenericParams(items.map(Annotatable::expect_generic_param).collect()) } AstFragmentKind::Params => { AstFragment::Params(items.map(Annotatable::expect_param).collect()) } AstFragmentKind::StructFields => { AstFragment::StructFields(items.map(Annotatable::expect_struct_field).collect()) } AstFragmentKind::Variants => { AstFragment::Variants(items.map(Annotatable::expect_variant).collect()) } AstFragmentKind::Items => { AstFragment::Items(items.map(Annotatable::expect_item).collect()) } AstFragmentKind::ImplItems => { AstFragment::ImplItems(items.map(Annotatable::expect_impl_item).collect()) } AstFragmentKind::TraitItems => { AstFragment::TraitItems(items.map(Annotatable::expect_trait_item).collect()) } AstFragmentKind::ForeignItems => { AstFragment::ForeignItems(items.map(Annotatable::expect_foreign_item).collect()) } AstFragmentKind::Stmts => { AstFragment::Stmts(items.map(Annotatable::expect_stmt).collect()) } AstFragmentKind::Expr => AstFragment::Expr( items.next().expect("expected exactly one expression").expect_expr(), ), AstFragmentKind::OptExpr => { AstFragment::OptExpr(items.next().map(Annotatable::expect_expr)) } AstFragmentKind::Pat | AstFragmentKind::Ty => { panic!("patterns and types aren't annotatable") } } } } pub struct Invocation { pub kind: InvocationKind, pub fragment_kind: AstFragmentKind, pub expansion_data: ExpansionData, } pub enum InvocationKind { Bang { mac: ast::Mac, span: Span, }, Attr { attr: ast::Attribute, item: Annotatable, // Required for resolving derive helper attributes. derives: Vec<Path>, // We temporarily report errors for attribute macros placed after derives after_derive: bool, }, Derive { path: Path, item: Annotatable, }, /// "Invocation" that contains all derives from an item, /// broken into multiple `Derive` invocations when expanded. /// FIXME: Find a way to remove it. DeriveContainer { derives: Vec<Path>, item: Annotatable, }, } impl InvocationKind { fn placeholder_visibility(&self) -> Option<ast::Visibility> { // HACK: For unnamed fields placeholders should have the same visibility as the actual // fields because for tuple structs/variants resolve determines visibilities of their // constructor using these field visibilities before attributes on them are are expanded. // The assumption is that the attribute expansion cannot change field visibilities, // and it holds because only inert attributes are supported in this position. match self { InvocationKind::Attr { item: Annotatable::StructField(field), .. } | InvocationKind::Derive { item: Annotatable::StructField(field), .. } | InvocationKind::DeriveContainer { item: Annotatable::StructField(field), .. } if field.ident.is_none() => { Some(field.vis.clone()) } _ => None, } } } impl Invocation { pub fn span(&self) -> Span { match &self.kind { InvocationKind::Bang { span, .. } => *span, InvocationKind::Attr { attr, .. } => attr.span, InvocationKind::Derive { path, .. } => path.span, InvocationKind::DeriveContainer { item, .. } => item.span(), } } } pub struct MacroExpander<'a, 'b> { pub cx: &'a mut ExtCtxt<'b>, monotonic: bool, // cf. `cx.monotonic_expander()` } impl<'a, 'b> MacroExpander<'a, 'b> { pub fn new(cx: &'a mut ExtCtxt<'b>, monotonic: bool) -> Self { MacroExpander { cx, monotonic } } pub fn expand_crate(&mut self, mut krate: ast::Crate) -> ast::Crate { let mut module = ModuleData { mod_path: vec![Ident::from_str(&self.cx.ecfg.crate_name)], directory: match self.cx.source_map().span_to_unmapped_path(krate.span) { FileName::Real(path) => path, other => PathBuf::from(other.to_string()), }, }; module.directory.pop(); self.cx.root_path = module.directory.clone(); self.cx.current_expansion.module = Rc::new(module); let orig_mod_span = krate.module.inner; let krate_item = AstFragment::Items(smallvec![P(ast::Item { attrs: krate.attrs, span: krate.span, kind: ast::ItemKind::Mod(krate.module), ident: Ident::invalid(), id: ast::DUMMY_NODE_ID, vis: respan(krate.span.shrink_to_lo(), ast::VisibilityKind::Public), tokens: None, })]); match self.fully_expand_fragment(krate_item).make_items().pop().map(P::into_inner) { Some(ast::Item { attrs, kind: ast::ItemKind::Mod(module), .. }) => { krate.attrs = attrs; krate.module = module; } None => { // Resolution failed so we return an empty expansion krate.attrs = vec![]; krate.module = ast::Mod { inner: orig_mod_span, items: vec![], inline: true }; } Some(ast::Item { span, kind, .. }) => { krate.attrs = vec![]; krate.module = ast::Mod { inner: orig_mod_span, items: vec![], inline: true }; self.cx.span_err( span, &format!( "expected crate top-level item to be a module after macro expansion, found a {}", kind.descriptive_variant() ), ); } }; self.cx.trace_macros_diag(); krate } // Recursively expand all macro invocations in this AST fragment. pub fn fully_expand_fragment(&mut self, input_fragment: AstFragment) -> AstFragment { let orig_expansion_data = self.cx.current_expansion.clone(); self.cx.current_expansion.depth = 0; // Collect all macro invocations and replace them with placeholders. let (mut fragment_with_placeholders, mut invocations) = self.collect_invocations(input_fragment, &[]); // Optimization: if we resolve all imports now, // we'll be able to immediately resolve most of imported macros. self.resolve_imports(); // Resolve paths in all invocations and produce output expanded fragments for them, but // do not insert them into our input AST fragment yet, only store in `expanded_fragments`. // The output fragments also go through expansion recursively until no invocations are left. // Unresolved macros produce dummy outputs as a recovery measure. invocations.reverse(); let mut expanded_fragments = Vec::new(); let mut undetermined_invocations = Vec::new(); let (mut progress, mut force) = (false, !self.monotonic); loop { let invoc = if let Some(invoc) = invocations.pop() { invoc } else { self.resolve_imports(); if undetermined_invocations.is_empty() { break; } invocations = mem::take(&mut undetermined_invocations); force = !mem::replace(&mut progress, false); continue; }; let eager_expansion_root = if self.monotonic { invoc.expansion_data.id } else { orig_expansion_data.id }; let res = match self.cx.resolver.resolve_macro_invocation( &invoc, eager_expansion_root, force, ) { Ok(res) => res, Err(Indeterminate) => { undetermined_invocations.push(invoc); continue; } }; progress = true; let ExpansionData { depth, id: expn_id, .. } = invoc.expansion_data; self.cx.current_expansion = invoc.expansion_data.clone(); // FIXME(jseyfried): Refactor out the following logic let (expanded_fragment, new_invocations) = match res { InvocationRes::Single(ext) => { let fragment = self.expand_invoc(invoc, &ext.kind); self.collect_invocations(fragment, &[]) } InvocationRes::DeriveContainer(_exts) => { // FIXME: Consider using the derive resolutions (`_exts`) immediately, // instead of enqueuing the derives to be resolved again later. let (derives, item) = match invoc.kind { InvocationKind::DeriveContainer { derives, item } => (derives, item), _ => unreachable!(), }; if !item.derive_allowed() { self.error_derive_forbidden_on_non_adt(&derives, &item); } let mut item = self.fully_configure(item); item.visit_attrs(|attrs| attrs.retain(|a| !a.has_name(sym::derive))); let mut derive_placeholders = Vec::with_capacity(derives.len()); invocations.reserve(derives.len()); for path in derives { let expn_id = ExpnId::fresh(None); derive_placeholders.push(NodeId::placeholder_from_expn_id(expn_id)); invocations.push(Invocation { kind: InvocationKind::Derive { path, item: item.clone() }, fragment_kind: invoc.fragment_kind, expansion_data: ExpansionData { id: expn_id, ..invoc.expansion_data.clone() }, }); } let fragment = invoc.fragment_kind.expect_from_annotatables(::std::iter::once(item)); self.collect_invocations(fragment, &derive_placeholders) } }; if expanded_fragments.len() < depth { expanded_fragments.push(Vec::new()); } expanded_fragments[depth - 1].push((expn_id, expanded_fragment)); if !self.cx.ecfg.single_step { invocations.extend(new_invocations.into_iter().rev()); } } self.cx.current_expansion = orig_expansion_data; // Finally incorporate all the expanded macros into the input AST fragment. let mut placeholder_expander = PlaceholderExpander::new(self.cx, self.monotonic); while let Some(expanded_fragments) = expanded_fragments.pop() { for (expn_id, expanded_fragment) in expanded_fragments.into_iter().rev() { placeholder_expander .add(NodeId::placeholder_from_expn_id(expn_id), expanded_fragment); } } fragment_with_placeholders.mut_visit_with(&mut placeholder_expander); fragment_with_placeholders } fn error_derive_forbidden_on_non_adt(&self, derives: &[Path], item: &Annotatable) { let attr = attr::find_by_name(item.attrs(), sym::derive).expect("`derive` attribute should exist"); let span = attr.span; let mut err = self .cx .struct_span_err(span, "`derive` may only be applied to structs, enums and unions"); if let ast::AttrStyle::Inner = attr.style { let trait_list = derives.iter().map(|t| pprust::path_to_string(t)).collect::<Vec<_>>(); let suggestion = format!("#[derive({})]", trait_list.join(", ")); err.span_suggestion( span, "try an outer attribute", suggestion, // We don't 𝑘𝑛𝑜𝑤 that the following item is an ADT Applicability::MaybeIncorrect, ); } err.emit(); } fn resolve_imports(&mut self) { if self.monotonic { self.cx.resolver.resolve_imports(); } } /// Collects all macro invocations reachable at this time in this AST fragment, and replace /// them with "placeholders" - dummy macro invocations with specially crafted `NodeId`s. /// Then call into resolver that builds a skeleton ("reduced graph") of the fragment and /// prepares data for resolving paths of macro invocations. fn collect_invocations( &mut self, mut fragment: AstFragment, extra_placeholders: &[NodeId], ) -> (AstFragment, Vec<Invocation>) { // Resolve `$crate`s in the fragment for pretty-printing. self.cx.resolver.resolve_dollar_crates(); let invocations = { let mut collector = InvocationCollector { cfg: StripUnconfigured { sess: self.cx.parse_sess, features: self.cx.ecfg.features, }, cx: self.cx, invocations: Vec::new(), monotonic: self.monotonic, }; fragment.mut_visit_with(&mut collector); fragment.add_placeholders(extra_placeholders); collector.invocations }; if self.monotonic { self.cx .resolver .visit_ast_fragment_with_placeholders(self.cx.current_expansion.id, &fragment); } (fragment, invocations) } fn fully_configure(&mut self, item: Annotatable) -> Annotatable { let mut cfg = StripUnconfigured { sess: self.cx.parse_sess, features: self.cx.ecfg.features }; // Since the item itself has already been configured by the InvocationCollector, // we know that fold result vector will contain exactly one element match item { Annotatable::Item(item) => Annotatable::Item(cfg.flat_map_item(item).pop().unwrap()), Annotatable::TraitItem(item) => { Annotatable::TraitItem(cfg.flat_map_trait_item(item).pop().unwrap()) } Annotatable::ImplItem(item) => { Annotatable::ImplItem(cfg.flat_map_impl_item(item).pop().unwrap()) } Annotatable::ForeignItem(item) => { Annotatable::ForeignItem(cfg.flat_map_foreign_item(item).pop().unwrap()) } Annotatable::Stmt(stmt) => { Annotatable::Stmt(stmt.map(|stmt| cfg.flat_map_stmt(stmt).pop().unwrap())) } Annotatable::Expr(mut expr) => Annotatable::Expr({ cfg.visit_expr(&mut expr); expr }), Annotatable::Arm(arm) => Annotatable::Arm(cfg.flat_map_arm(arm).pop().unwrap()), Annotatable::Field(field) => { Annotatable::Field(cfg.flat_map_field(field).pop().unwrap()) } Annotatable::FieldPat(fp) => { Annotatable::FieldPat(cfg.flat_map_field_pattern(fp).pop().unwrap()) } Annotatable::GenericParam(param) => { Annotatable::GenericParam(cfg.flat_map_generic_param(param).pop().unwrap()) } Annotatable::Param(param) => { Annotatable::Param(cfg.flat_map_param(param).pop().unwrap()) } Annotatable::StructField(sf) => { Annotatable::StructField(cfg.flat_map_struct_field(sf).pop().unwrap()) } Annotatable::Variant(v) => Annotatable::Variant(cfg.flat_map_variant(v).pop().unwrap()), } } fn error_recursion_limit_reached(&mut self) { let expn_data = self.cx.current_expansion.id.expn_data(); let suggested_limit = self.cx.ecfg.recursion_limit * 2; self.cx .struct_span_err( expn_data.call_site, &format!("recursion limit reached while expanding `{}`", expn_data.kind.descr()), ) .help(&format!( "consider adding a `#![recursion_limit=\"{}\"]` attribute to your crate (`{}`)", suggested_limit, self.cx.ecfg.crate_name, )) .emit(); self.cx.trace_macros_diag(); FatalError.raise(); } /// A macro's expansion does not fit in this fragment kind. /// For example, a non-type macro in a type position. fn error_wrong_fragment_kind(&mut self, kind: AstFragmentKind, mac: &ast::Mac, span: Span) { let msg = format!( "non-{kind} macro in {kind} position: {path}", kind = kind.name(), path = pprust::path_to_string(&mac.path), ); self.cx.span_err(span, &msg); self.cx.trace_macros_diag(); } fn expand_invoc(&mut self, invoc: Invocation, ext: &SyntaxExtensionKind) -> AstFragment { if self.cx.current_expansion.depth > self.cx.ecfg.recursion_limit { self.error_recursion_limit_reached(); } let (fragment_kind, span) = (invoc.fragment_kind, invoc.span()); match invoc.kind { InvocationKind::Bang { mac, .. } => match ext { SyntaxExtensionKind::Bang(expander) => { self.gate_proc_macro_expansion_kind(span, fragment_kind); let tok_result = expander.expand(self.cx, span, mac.args.inner_tokens()); self.parse_ast_fragment(tok_result, fragment_kind, &mac.path, span) } SyntaxExtensionKind::LegacyBang(expander) => { let prev = self.cx.current_expansion.prior_type_ascription; self.cx.current_expansion.prior_type_ascription = mac.prior_type_ascription; let tok_result = expander.expand(self.cx, span, mac.args.inner_tokens()); let result = if let Some(result) = fragment_kind.make_from(tok_result) { result } else { self.error_wrong_fragment_kind(fragment_kind, &mac, span); fragment_kind.dummy(span) }; self.cx.current_expansion.prior_type_ascription = prev; result } _ => unreachable!(), }, InvocationKind::Attr { attr, mut item, .. } => match ext { SyntaxExtensionKind::Attr(expander) => { self.gate_proc_macro_input(&item); self.gate_proc_macro_attr_item(span, &item); let item_tok = TokenTree::token( token::Interpolated(Lrc::new(match item { Annotatable::Item(item) => token::NtItem(item), Annotatable::TraitItem(item) => token::NtTraitItem(item), Annotatable::ImplItem(item) => token::NtImplItem(item), Annotatable::ForeignItem(item) => token::NtForeignItem(item), Annotatable::Stmt(stmt) => token::NtStmt(stmt.into_inner()), Annotatable::Expr(expr) => token::NtExpr(expr), Annotatable::Arm(..) | Annotatable::Field(..) | Annotatable::FieldPat(..) | Annotatable::GenericParam(..) | Annotatable::Param(..) | Annotatable::StructField(..) | Annotatable::Variant(..) => panic!("unexpected annotatable"), })), DUMMY_SP, ) .into(); let item = attr.unwrap_normal_item(); if let MacArgs::Eq(..) = item.args { self.cx.span_err(span, "key-value macro attributes are not supported"); } let tok_result = expander.expand(self.cx, span, item.args.inner_tokens(), item_tok); self.parse_ast_fragment(tok_result, fragment_kind, &item.path, span) } SyntaxExtensionKind::LegacyAttr(expander) => { match validate_attr::parse_meta(self.cx.parse_sess, &attr) { Ok(meta) => { let item = expander.expand(self.cx, span, &meta, item); fragment_kind.expect_from_annotatables(item) } Err(mut err) => { err.emit(); fragment_kind.dummy(span) } } } SyntaxExtensionKind::NonMacroAttr { mark_used } => { attr::mark_known(&attr); if *mark_used { attr::mark_used(&attr); } item.visit_attrs(|attrs| attrs.push(attr)); fragment_kind.expect_from_annotatables(iter::once(item)) } _ => unreachable!(), }, InvocationKind::Derive { path, item } => match ext { SyntaxExtensionKind::Derive(expander) | SyntaxExtensionKind::LegacyDerive(expander) => { if !item.derive_allowed() { return fragment_kind.dummy(span); } if let SyntaxExtensionKind::Derive(..) = ext { self.gate_proc_macro_input(&item); } let meta = ast::MetaItem { kind: ast::MetaItemKind::Word, span, path }; let items = expander.expand(self.cx, span, &meta, item); fragment_kind.expect_from_annotatables(items) } _ => unreachable!(), }, InvocationKind::DeriveContainer { .. } => unreachable!(), } } fn gate_proc_macro_attr_item(&self, span: Span, item: &Annotatable) { let kind = match item { Annotatable::Item(_) | Annotatable::TraitItem(_) | Annotatable::ImplItem(_) | Annotatable::ForeignItem(_) => return, Annotatable::Stmt(_) => "statements", Annotatable::Expr(_) => "expressions", Annotatable::Arm(..) | Annotatable::Field(..) | Annotatable::FieldPat(..) | Annotatable::GenericParam(..) | Annotatable::Param(..) | Annotatable::StructField(..) | Annotatable::Variant(..) => panic!("unexpected annotatable"), }; if self.cx.ecfg.proc_macro_hygiene() { return; } feature_err( self.cx.parse_sess, sym::proc_macro_hygiene, span, &format!("custom attributes cannot be applied to {}", kind), ) .emit(); } fn gate_proc_macro_input(&self, annotatable: &Annotatable) { struct GateProcMacroInput<'a> { parse_sess: &'a ParseSess, } impl<'ast, 'a> Visitor<'ast> for GateProcMacroInput<'a> { fn visit_item(&mut self, item: &'ast ast::Item) { match &item.kind { ast::ItemKind::Mod(module) if !module.inline => { feature_err( self.parse_sess, sym::proc_macro_hygiene, item.span, "non-inline modules in proc macro input are unstable", ) .emit(); } _ => {} } visit::walk_item(self, item); } fn visit_mac(&mut self, _: &'ast ast::Mac) {} } if !self.cx.ecfg.proc_macro_hygiene() { annotatable.visit_with(&mut GateProcMacroInput { parse_sess: self.cx.parse_sess }); } } fn gate_proc_macro_expansion_kind(&self, span: Span, kind: AstFragmentKind) { let kind = match kind { AstFragmentKind::Expr | AstFragmentKind::OptExpr => "expressions", AstFragmentKind::Pat => "patterns", AstFragmentKind::Stmts => "statements", AstFragmentKind::Ty | AstFragmentKind::Items | AstFragmentKind::TraitItems | AstFragmentKind::ImplItems | AstFragmentKind::ForeignItems => return, AstFragmentKind::Arms | AstFragmentKind::Fields | AstFragmentKind::FieldPats | AstFragmentKind::GenericParams | AstFragmentKind::Params | AstFragmentKind::StructFields | AstFragmentKind::Variants => panic!("unexpected AST fragment kind"), }; if self.cx.ecfg.proc_macro_hygiene() { return; } feature_err( self.cx.parse_sess, sym::proc_macro_hygiene, span, &format!("procedural macros cannot be expanded to {}", kind), ) .emit(); } fn parse_ast_fragment( &mut self, toks: TokenStream, kind: AstFragmentKind, path: &Path, span: Span, ) -> AstFragment { let mut parser = self.cx.new_parser_from_tts(toks); match parse_ast_fragment(&mut parser, kind, false) { Ok(fragment) => { ensure_complete_parse(&mut parser, path, kind.name(), span); fragment } Err(mut err) => { err.set_span(span); annotate_err_with_kind(&mut err, kind, span); err.emit(); self.cx.trace_macros_diag(); kind.dummy(span) } } } } pub fn parse_ast_fragment<'a>( this: &mut Parser<'a>, kind: AstFragmentKind, macro_legacy_warnings: bool, ) -> PResult<'a, AstFragment> { Ok(match kind { AstFragmentKind::Items => { let mut items = SmallVec::new(); while let Some(item) = this.parse_item()? { items.push(item); } AstFragment::Items(items) } AstFragmentKind::TraitItems => { let mut items = SmallVec::new(); while this.token != token::Eof { items.push(this.parse_trait_item(&mut false)?); } AstFragment::TraitItems(items) } AstFragmentKind::ImplItems => { let mut items = SmallVec::new(); while this.token != token::Eof { items.push(this.parse_impl_item(&mut false)?); } AstFragment::ImplItems(items) } AstFragmentKind::ForeignItems => { let mut items = SmallVec::new(); while this.token != token::Eof { items.push(this.parse_foreign_item()?); } AstFragment::ForeignItems(items) } AstFragmentKind::Stmts => { let mut stmts = SmallVec::new(); while this.token != token::Eof && // won't make progress on a `}` this.token != token::CloseDelim(token::Brace) { if let Some(stmt) = this.parse_full_stmt(macro_legacy_warnings)? { stmts.push(stmt); } } AstFragment::Stmts(stmts) } AstFragmentKind::Expr => AstFragment::Expr(this.parse_expr()?), AstFragmentKind::OptExpr => { if this.token != token::Eof { AstFragment::OptExpr(Some(this.parse_expr()?)) } else { AstFragment::OptExpr(None) } } AstFragmentKind::Ty => AstFragment::Ty(this.parse_ty()?), AstFragmentKind::Pat => AstFragment::Pat(this.parse_pat(None)?), AstFragmentKind::Arms | AstFragmentKind::Fields | AstFragmentKind::FieldPats | AstFragmentKind::GenericParams | AstFragmentKind::Params | AstFragmentKind::StructFields | AstFragmentKind::Variants => panic!("unexpected AST fragment kind"), }) } pub fn ensure_complete_parse<'a>( this: &mut Parser<'a>, macro_path: &Path, kind_name: &str, span: Span, ) { if this.token != token::Eof { let token = pprust::token_to_string(&this.token); let msg = format!("macro expansion ignores token `{}` and any following", token); // Avoid emitting backtrace info twice. let def_site_span = this.token.span.with_ctxt(SyntaxContext::root()); let mut err = this.struct_span_err(def_site_span, &msg); err.span_label(span, "caused by the macro expansion here"); let msg = format!( "the usage of `{}!` is likely invalid in {} context", pprust::path_to_string(macro_path), kind_name, ); err.note(&msg); let semi_span = this.sess.source_map().next_point(span); let semi_full_span = semi_span.to(this.sess.source_map().next_point(semi_span)); match this.sess.source_map().span_to_snippet(semi_full_span) { Ok(ref snippet) if &snippet[..] != ";" && kind_name == "expression" => { err.span_suggestion( semi_span, "you might be missing a semicolon here", ";".to_owned(), Applicability::MaybeIncorrect, ); } _ => {} } err.emit(); } } struct InvocationCollector<'a, 'b> { cx: &'a mut ExtCtxt<'b>, cfg: StripUnconfigured<'a>, invocations: Vec<Invocation>, monotonic: bool, } impl<'a, 'b> InvocationCollector<'a, 'b> { fn collect(&mut self, fragment_kind: AstFragmentKind, kind: InvocationKind) -> AstFragment { // Expansion data for all the collected invocations is set upon their resolution, // with exception of the derive container case which is not resolved and can get // its expansion data immediately. let expn_data = match &kind { InvocationKind::DeriveContainer { item, .. } => Some(ExpnData { parent: self.cx.current_expansion.id, ..ExpnData::default( ExpnKind::Macro(MacroKind::Attr, sym::derive), item.span(), self.cx.parse_sess.edition, ) }), _ => None, }; let expn_id = ExpnId::fresh(expn_data); let vis = kind.placeholder_visibility(); self.invocations.push(Invocation { kind, fragment_kind, expansion_data: ExpansionData { id: expn_id, depth: self.cx.current_expansion.depth + 1, ..self.cx.current_expansion.clone() }, }); placeholder(fragment_kind, NodeId::placeholder_from_expn_id(expn_id), vis) } fn collect_bang(&mut self, mac: ast::Mac, span: Span, kind: AstFragmentKind) -> AstFragment { self.collect(kind, InvocationKind::Bang { mac, span }) } fn collect_attr( &mut self, attr: Option<ast::Attribute>, derives: Vec<Path>, item: Annotatable, kind: AstFragmentKind, after_derive: bool, ) -> AstFragment { self.collect( kind, match attr { Some(attr) => InvocationKind::Attr { attr, item, derives, after_derive }, None => InvocationKind::DeriveContainer { derives, item }, }, ) } fn find_attr_invoc( &self, attrs: &mut Vec<ast::Attribute>, after_derive: &mut bool, ) -> Option<ast::Attribute> { let attr = attrs .iter() .position(|a| { if a.has_name(sym::derive) { *after_derive = true; } !attr::is_known(a) && !is_builtin_attr(a) }) .map(|i| attrs.remove(i)); if let Some(attr) = &attr { if !self.cx.ecfg.custom_inner_attributes() && attr.style == ast::AttrStyle::Inner && !attr.has_name(sym::test) { feature_err( &self.cx.parse_sess, sym::custom_inner_attributes, attr.span, "non-builtin inner attributes are unstable", ) .emit(); } } attr } /// If `item` is an attr invocation, remove and return the macro attribute and derive traits. fn classify_item( &mut self, item: &mut impl HasAttrs, ) -> (Option<ast::Attribute>, Vec<Path>, /* after_derive */ bool) { let (mut attr, mut traits, mut after_derive) = (None, Vec::new(), false); item.visit_attrs(|mut attrs| { attr = self.find_attr_invoc(&mut attrs, &mut after_derive); traits = collect_derives(&mut self.cx, &mut attrs); }); (attr, traits, after_derive) } /// Alternative to `classify_item()` that ignores `#[derive]` so invocations fallthrough /// to the unused-attributes lint (making it an error on statements and expressions /// is a breaking change) fn classify_nonitem( &mut self, nonitem: &mut impl HasAttrs, ) -> (Option<ast::Attribute>, /* after_derive */ bool) { let (mut attr, mut after_derive) = (None, false); nonitem.visit_attrs(|mut attrs| { attr = self.find_attr_invoc(&mut attrs, &mut after_derive); }); (attr, after_derive) } fn configure<T: HasAttrs>(&mut self, node: T) -> Option<T> { self.cfg.configure(node) } // Detect use of feature-gated or invalid attributes on macro invocations // since they will not be detected after macro expansion. fn check_attributes(&mut self, attrs: &[ast::Attribute]) { let features = self.cx.ecfg.features.unwrap(); for attr in attrs.iter() { rustc_ast_passes::feature_gate::check_attribute(attr, self.cx.parse_sess, features); validate_attr::check_meta(self.cx.parse_sess, attr); // macros are expanded before any lint passes so this warning has to be hardcoded if attr.has_name(sym::derive) { self.cx .struct_span_warn(attr.span, "`#[derive]` does nothing on macro invocations") .note("this may become a hard error in a future release") .emit(); } } } } impl<'a, 'b> MutVisitor for InvocationCollector<'a, 'b> { fn visit_expr(&mut self, expr: &mut P<ast::Expr>) { self.cfg.configure_expr(expr); visit_clobber(expr.deref_mut(), |mut expr| { self.cfg.configure_expr_kind(&mut expr.kind); // ignore derives so they remain unused let (attr, after_derive) = self.classify_nonitem(&mut expr); if attr.is_some() { // Collect the invoc regardless of whether or not attributes are permitted here // expansion will eat the attribute so it won't error later. attr.as_ref().map(|a| self.cfg.maybe_emit_expr_attr_err(a)); // AstFragmentKind::Expr requires the macro to emit an expression. return self .collect_attr( attr, vec![], Annotatable::Expr(P(expr)), AstFragmentKind::Expr, after_derive, ) .make_expr() .into_inner(); } if let ast::ExprKind::Mac(mac) = expr.kind { self.check_attributes(&expr.attrs); self.collect_bang(mac, expr.span, AstFragmentKind::Expr).make_expr().into_inner() } else { noop_visit_expr(&mut expr, self); expr } }); } fn flat_map_arm(&mut self, arm: ast::Arm) -> SmallVec<[ast::Arm; 1]> { let mut arm = configure!(self, arm); let (attr, traits, after_derive) = self.classify_item(&mut arm); if attr.is_some() || !traits.is_empty() { return self .collect_attr( attr, traits, Annotatable::Arm(arm), AstFragmentKind::Arms, after_derive, ) .make_arms(); } noop_flat_map_arm(arm, self) } fn flat_map_field(&mut self, field: ast::Field) -> SmallVec<[ast::Field; 1]> { let mut field = configure!(self, field); let (attr, traits, after_derive) = self.classify_item(&mut field); if attr.is_some() || !traits.is_empty() { return self .collect_attr( attr, traits, Annotatable::Field(field), AstFragmentKind::Fields, after_derive, ) .make_fields(); } noop_flat_map_field(field, self) } fn flat_map_field_pattern(&mut self, fp: ast::FieldPat) -> SmallVec<[ast::FieldPat; 1]> { let mut fp = configure!(self, fp); let (attr, traits, after_derive) = self.classify_item(&mut fp); if attr.is_some() || !traits.is_empty() { return self .collect_attr( attr, traits, Annotatable::FieldPat(fp), AstFragmentKind::FieldPats, after_derive, ) .make_field_patterns(); } noop_flat_map_field_pattern(fp, self) } fn flat_map_param(&mut self, p: ast::Param) -> SmallVec<[ast::Param; 1]> { let mut p = configure!(self, p); let (attr, traits, after_derive) = self.classify_item(&mut p); if attr.is_some() || !traits.is_empty() { return self .collect_attr( attr, traits, Annotatable::Param(p), AstFragmentKind::Params, after_derive, ) .make_params(); } noop_flat_map_param(p, self) } fn flat_map_struct_field(&mut self, sf: ast::StructField) -> SmallVec<[ast::StructField; 1]> { let mut sf = configure!(self, sf); let (attr, traits, after_derive) = self.classify_item(&mut sf); if attr.is_some() || !traits.is_empty() { return self .collect_attr( attr, traits, Annotatable::StructField(sf), AstFragmentKind::StructFields, after_derive, ) .make_struct_fields(); } noop_flat_map_struct_field(sf, self) } fn flat_map_variant(&mut self, variant: ast::Variant) -> SmallVec<[ast::Variant; 1]> { let mut variant = configure!(self, variant); let (attr, traits, after_derive) = self.classify_item(&mut variant); if attr.is_some() || !traits.is_empty() { return self .collect_attr( attr, traits, Annotatable::Variant(variant), AstFragmentKind::Variants, after_derive, ) .make_variants(); } noop_flat_map_variant(variant, self) } fn filter_map_expr(&mut self, expr: P<ast::Expr>) -> Option<P<ast::Expr>> { let expr = configure!(self, expr); expr.filter_map(|mut expr| { self.cfg.configure_expr_kind(&mut expr.kind); // Ignore derives so they remain unused. let (attr, after_derive) = self.classify_nonitem(&mut expr); if attr.is_some() { attr.as_ref().map(|a| self.cfg.maybe_emit_expr_attr_err(a)); return self .collect_attr( attr, vec![], Annotatable::Expr(P(expr)), AstFragmentKind::OptExpr, after_derive, ) .make_opt_expr() .map(|expr| expr.into_inner()); } if let ast::ExprKind::Mac(mac) = expr.kind { self.check_attributes(&expr.attrs); self.collect_bang(mac, expr.span, AstFragmentKind::OptExpr) .make_opt_expr() .map(|expr| expr.into_inner()) } else { Some({ noop_visit_expr(&mut expr, self); expr }) } }) } fn visit_pat(&mut self, pat: &mut P<ast::Pat>) { self.cfg.configure_pat(pat); match pat.kind { PatKind::Mac(_) => {} _ => return noop_visit_pat(pat, self), } visit_clobber(pat, |mut pat| match mem::replace(&mut pat.kind, PatKind::Wild) { PatKind::Mac(mac) => self.collect_bang(mac, pat.span, AstFragmentKind::Pat).make_pat(), _ => unreachable!(), }); } fn flat_map_stmt(&mut self, stmt: ast::Stmt) -> SmallVec<[ast::Stmt; 1]> { let mut stmt = configure!(self, stmt); // we'll expand attributes on expressions separately if !stmt.is_expr() { let (attr, derives, after_derive) = if stmt.is_item() { self.classify_item(&mut stmt) } else { // ignore derives on non-item statements so it falls through // to the unused-attributes lint let (attr, after_derive) = self.classify_nonitem(&mut stmt); (attr, vec![], after_derive) }; if attr.is_some() || !derives.is_empty() { return self .collect_attr( attr, derives, Annotatable::Stmt(P(stmt)), AstFragmentKind::Stmts, after_derive, ) .make_stmts(); } } if let StmtKind::Mac(mac) = stmt.kind { let (mac, style, attrs) = mac.into_inner(); self.check_attributes(&attrs); let mut placeholder = self.collect_bang(mac, stmt.span, AstFragmentKind::Stmts).make_stmts(); // If this is a macro invocation with a semicolon, then apply that // semicolon to the final statement produced by expansion. if style == MacStmtStyle::Semicolon { if let Some(stmt) = placeholder.pop() { placeholder.push(stmt.add_trailing_semicolon()); } } return placeholder; } // The placeholder expander gives ids to statements, so we avoid folding the id here. let ast::Stmt { id, kind, span } = stmt; noop_flat_map_stmt_kind(kind, self) .into_iter() .map(|kind| ast::Stmt { id, kind, span }) .collect() } fn visit_block(&mut self, block: &mut P<Block>) { let old_directory_ownership = self.cx.current_expansion.directory_ownership; self.cx.current_expansion.directory_ownership = DirectoryOwnership::UnownedViaBlock; noop_visit_block(block, self); self.cx.current_expansion.directory_ownership = old_directory_ownership; } fn flat_map_item(&mut self, item: P<ast::Item>) -> SmallVec<[P<ast::Item>; 1]> { let mut item = configure!(self, item); let (attr, traits, after_derive) = self.classify_item(&mut item); if attr.is_some() || !traits.is_empty() { return self .collect_attr( attr, traits, Annotatable::Item(item), AstFragmentKind::Items, after_derive, ) .make_items(); } match item.kind { ast::ItemKind::Mac(..) => { self.check_attributes(&item.attrs); item.and_then(|item| match item.kind { ItemKind::Mac(mac) => self .collect( AstFragmentKind::Items, InvocationKind::Bang { mac, span: item.span }, ) .make_items(), _ => unreachable!(), }) } ast::ItemKind::Mod(ast::Mod { inner, .. }) => { if item.ident == Ident::invalid() { return noop_flat_map_item(item, self); } let orig_directory_ownership = self.cx.current_expansion.directory_ownership; let mut module = (*self.cx.current_expansion.module).clone(); module.mod_path.push(item.ident); // Detect if this is an inline module (`mod m { ... }` as opposed to `mod m;`). // In the non-inline case, `inner` is never the dummy span (cf. `parse_item_mod`). // Thus, if `inner` is the dummy span, we know the module is inline. let inline_module = item.span.contains(inner) || inner.is_dummy(); if inline_module { if let Some(path) = attr::first_attr_value_str_by_name(&item.attrs, sym::path) { self.cx.current_expansion.directory_ownership = DirectoryOwnership::Owned { relative: None }; module.directory.push(&*path.as_str()); } else { module.directory.push(&*item.ident.as_str()); } } else { let path = self.cx.parse_sess.source_map().span_to_unmapped_path(inner); let mut path = match path { FileName::Real(path) => path, other => PathBuf::from(other.to_string()), }; let directory_ownership = match path.file_name().unwrap().to_str() { Some("mod.rs") => DirectoryOwnership::Owned { relative: None }, Some(_) => DirectoryOwnership::Owned { relative: Some(item.ident) }, None => DirectoryOwnership::UnownedViaMod, }; path.pop(); module.directory = path; self.cx.current_expansion.directory_ownership = directory_ownership; } let orig_module = mem::replace(&mut self.cx.current_expansion.module, Rc::new(module)); let result = noop_flat_map_item(item, self); self.cx.current_expansion.module = orig_module; self.cx.current_expansion.directory_ownership = orig_directory_ownership; result } _ => noop_flat_map_item(item, self), } } fn flat_map_trait_item(&mut self, item: P<ast::AssocItem>) -> SmallVec<[P<ast::AssocItem>; 1]> { let mut item = configure!(self, item); let (attr, traits, after_derive) = self.classify_item(&mut item); if attr.is_some() || !traits.is_empty() { return self .collect_attr( attr, traits, Annotatable::TraitItem(item), AstFragmentKind::TraitItems, after_derive, ) .make_trait_items(); } match item.kind { ast::AssocItemKind::Macro(..) => { self.check_attributes(&item.attrs); item.and_then(|item| match item.kind { ast::AssocItemKind::Macro(mac) => self .collect_bang(mac, item.span, AstFragmentKind::TraitItems) .make_trait_items(), _ => unreachable!(), }) } _ => noop_flat_map_assoc_item(item, self), } } fn flat_map_impl_item(&mut self, item: P<ast::AssocItem>) -> SmallVec<[P<ast::AssocItem>; 1]> { let mut item = configure!(self, item); let (attr, traits, after_derive) = self.classify_item(&mut item); if attr.is_some() || !traits.is_empty() { return self .collect_attr( attr, traits, Annotatable::ImplItem(item), AstFragmentKind::ImplItems, after_derive, ) .make_impl_items(); } match item.kind { ast::AssocItemKind::Macro(..) => { self.check_attributes(&item.attrs); item.and_then(|item| match item.kind { ast::AssocItemKind::Macro(mac) => self .collect_bang(mac, item.span, AstFragmentKind::ImplItems) .make_impl_items(), _ => unreachable!(), }) } _ => noop_flat_map_assoc_item(item, self), } } fn visit_ty(&mut self, ty: &mut P<ast::Ty>) { match ty.kind { ast::TyKind::Mac(_) => {} _ => return noop_visit_ty(ty, self), }; visit_clobber(ty, |mut ty| match mem::replace(&mut ty.kind, ast::TyKind::Err) { ast::TyKind::Mac(mac) => self.collect_bang(mac, ty.span, AstFragmentKind::Ty).make_ty(), _ => unreachable!(), }); } fn visit_foreign_mod(&mut self, foreign_mod: &mut ast::ForeignMod) { self.cfg.configure_foreign_mod(foreign_mod); noop_visit_foreign_mod(foreign_mod, self); } fn flat_map_foreign_item( &mut self, mut foreign_item: P<ast::ForeignItem>, ) -> SmallVec<[P<ast::ForeignItem>; 1]> { let (attr, traits, after_derive) = self.classify_item(&mut foreign_item); if attr.is_some() || !traits.is_empty() { return self .collect_attr( attr, traits, Annotatable::ForeignItem(foreign_item), AstFragmentKind::ForeignItems, after_derive, ) .make_foreign_items(); } match foreign_item.kind { ast::ForeignItemKind::Macro(..) => { self.check_attributes(&foreign_item.attrs); foreign_item.and_then(|item| match item.kind { ast::ForeignItemKind::Macro(mac) => self .collect_bang(mac, item.span, AstFragmentKind::ForeignItems) .make_foreign_items(), _ => unreachable!(), }) } _ => noop_flat_map_foreign_item(foreign_item, self), } } fn visit_item_kind(&mut self, item: &mut ast::ItemKind) { match item { ast::ItemKind::MacroDef(..) => {} _ => { self.cfg.configure_item_kind(item); noop_visit_item_kind(item, self); } } } fn flat_map_generic_param( &mut self, param: ast::GenericParam, ) -> SmallVec<[ast::GenericParam; 1]> { let mut param = configure!(self, param); let (attr, traits, after_derive) = self.classify_item(&mut param); if attr.is_some() || !traits.is_empty() { return self .collect_attr( attr, traits, Annotatable::GenericParam(param), AstFragmentKind::GenericParams, after_derive, ) .make_generic_params(); } noop_flat_map_generic_param(param, self) } fn visit_attribute(&mut self, at: &mut ast::Attribute) { // turn `#[doc(include="filename")]` attributes into `#[doc(include(file="filename", // contents="file contents")]` attributes if !at.check_name(sym::doc) { return noop_visit_attribute(at, self); } if let Some(list) = at.meta_item_list() { if !list.iter().any(|it| it.check_name(sym::include)) { return noop_visit_attribute(at, self); } let mut items = vec![]; for mut it in list { if !it.check_name(sym::include) { items.push({ noop_visit_meta_list_item(&mut it, self); it }); continue; } if let Some(file) = it.value_str() { let err_count = self.cx.parse_sess.span_diagnostic.err_count(); self.check_attributes(slice::from_ref(at)); if self.cx.parse_sess.span_diagnostic.err_count() > err_count { // avoid loading the file if they haven't enabled the feature return noop_visit_attribute(at, self); } let filename = match self.cx.resolve_path(&*file.as_str(), it.span()) { Ok(filename) => filename, Err(mut err) => { err.emit(); continue; } }; match self.cx.source_map().load_file(&filename) { Ok(source_file) => { let src = source_file .src .as_ref() .expect("freshly loaded file should have a source"); let src_interned = Symbol::intern(src.as_str()); let include_info = vec![ ast::NestedMetaItem::MetaItem(attr::mk_name_value_item_str( Ident::with_dummy_span(sym::file), file, DUMMY_SP, )), ast::NestedMetaItem::MetaItem(attr::mk_name_value_item_str( Ident::with_dummy_span(sym::contents), src_interned, DUMMY_SP, )), ]; let include_ident = Ident::with_dummy_span(sym::include); let item = attr::mk_list_item(include_ident, include_info); items.push(ast::NestedMetaItem::MetaItem(item)); } Err(e) => { let lit = it.meta_item().and_then(|item| item.name_value_literal()).unwrap(); if e.kind() == ErrorKind::InvalidData { self.cx .struct_span_err( lit.span, &format!("{} wasn't a utf-8 file", filename.display()), ) .span_label(lit.span, "contains invalid utf-8") .emit(); } else { let mut err = self.cx.struct_span_err( lit.span, &format!("couldn't read {}: {}", filename.display(), e), ); err.span_label(lit.span, "couldn't read file"); err.emit(); } } } } else { let mut err = self.cx.struct_span_err( it.span(), &format!("expected path to external documentation"), ); // Check if the user erroneously used `doc(include(...))` syntax. let literal = it.meta_item_list().and_then(|list| { if list.len() == 1 { list[0].literal().map(|literal| &literal.kind) } else { None } }); let (path, applicability) = match &literal { Some(LitKind::Str(path, ..)) => { (path.to_string(), Applicability::MachineApplicable) } _ => (String::from("<path>"), Applicability::HasPlaceholders), }; err.span_suggestion( it.span(), "provide a file path with `=`", format!("include = \"{}\"", path), applicability, ); err.emit(); } } let meta = attr::mk_list_item(Ident::with_dummy_span(sym::doc), items); *at = ast::Attribute { kind: ast::AttrKind::Normal(AttrItem { path: meta.path, args: meta.kind.mac_args(meta.span), }), span: at.span, id: at.id, style: at.style, }; } else { noop_visit_attribute(at, self) } } fn visit_id(&mut self, id: &mut ast::NodeId) { if self.monotonic { debug_assert_eq!(*id, ast::DUMMY_NODE_ID); *id = self.cx.resolver.next_node_id() } } fn visit_fn_decl(&mut self, mut fn_decl: &mut P<ast::FnDecl>) { self.cfg.configure_fn_decl(&mut fn_decl); noop_visit_fn_decl(fn_decl, self); } } pub struct ExpansionConfig<'feat> { pub crate_name: String, pub features: Option<&'feat Features>, pub recursion_limit: usize, pub trace_mac: bool, pub should_test: bool, // If false, strip `#[test]` nodes pub single_step: bool, pub keep_macs: bool, } impl<'feat> ExpansionConfig<'feat> { pub fn default(crate_name: String) -> ExpansionConfig<'static> { ExpansionConfig { crate_name, features: None, recursion_limit: 1024, trace_mac: false, should_test: false, single_step: false, keep_macs: false, } } fn proc_macro_hygiene(&self) -> bool { self.features.map_or(false, |features| features.proc_macro_hygiene) } fn custom_inner_attributes(&self) -> bool { self.features.map_or(false, |features| features.custom_inner_attributes) } } expand: simplify flat_map_item use crate::base::*; use crate::config::StripUnconfigured; use crate::hygiene::{ExpnData, ExpnId, ExpnKind, SyntaxContext}; use crate::mbe::macro_rules::annotate_err_with_kind; use crate::placeholders::{placeholder, PlaceholderExpander}; use crate::proc_macro::collect_derives; use rustc_ast_pretty::pprust; use rustc_attr::{self as attr, is_builtin_attr, HasAttrs}; use rustc_data_structures::sync::Lrc; use rustc_errors::{Applicability, FatalError, PResult}; use rustc_feature::Features; use rustc_parse::configure; use rustc_parse::parser::Parser; use rustc_parse::validate_attr; use rustc_parse::DirectoryOwnership; use rustc_session::parse::{feature_err, ParseSess}; use rustc_span::source_map::respan; use rustc_span::symbol::{sym, Symbol}; use rustc_span::{FileName, Span, DUMMY_SP}; use syntax::ast::{self, AttrItem, Block, Ident, LitKind, NodeId, PatKind, Path}; use syntax::ast::{ItemKind, MacArgs, MacStmtStyle, StmtKind}; use syntax::mut_visit::*; use syntax::ptr::P; use syntax::token; use syntax::tokenstream::{TokenStream, TokenTree}; use syntax::util::map_in_place::MapInPlace; use syntax::visit::{self, AssocCtxt, Visitor}; use smallvec::{smallvec, SmallVec}; use std::io::ErrorKind; use std::ops::DerefMut; use std::path::PathBuf; use std::rc::Rc; use std::{iter, mem, slice}; macro_rules! ast_fragments { ( $($Kind:ident($AstTy:ty) { $kind_name:expr; $(one fn $mut_visit_ast:ident; fn $visit_ast:ident;)? $(many fn $flat_map_ast_elt:ident; fn $visit_ast_elt:ident($($args:tt)*);)? fn $make_ast:ident; })* ) => { /// A fragment of AST that can be produced by a single macro expansion. /// Can also serve as an input and intermediate result for macro expansion operations. pub enum AstFragment { OptExpr(Option<P<ast::Expr>>), $($Kind($AstTy),)* } /// "Discriminant" of an AST fragment. #[derive(Copy, Clone, PartialEq, Eq)] pub enum AstFragmentKind { OptExpr, $($Kind,)* } impl AstFragmentKind { pub fn name(self) -> &'static str { match self { AstFragmentKind::OptExpr => "expression", $(AstFragmentKind::$Kind => $kind_name,)* } } fn make_from<'a>(self, result: Box<dyn MacResult + 'a>) -> Option<AstFragment> { match self { AstFragmentKind::OptExpr => result.make_expr().map(Some).map(AstFragment::OptExpr), $(AstFragmentKind::$Kind => result.$make_ast().map(AstFragment::$Kind),)* } } } impl AstFragment { pub fn add_placeholders(&mut self, placeholders: &[NodeId]) { if placeholders.is_empty() { return; } match self { $($(AstFragment::$Kind(ast) => ast.extend(placeholders.iter().flat_map(|id| { // We are repeating through arguments with `many`, to do that we have to // mention some macro variable from those arguments even if it's not used. macro _repeating($flat_map_ast_elt) {} placeholder(AstFragmentKind::$Kind, *id, None).$make_ast() })),)?)* _ => panic!("unexpected AST fragment kind") } } pub fn make_opt_expr(self) -> Option<P<ast::Expr>> { match self { AstFragment::OptExpr(expr) => expr, _ => panic!("AstFragment::make_* called on the wrong kind of fragment"), } } $(pub fn $make_ast(self) -> $AstTy { match self { AstFragment::$Kind(ast) => ast, _ => panic!("AstFragment::make_* called on the wrong kind of fragment"), } })* pub fn mut_visit_with<F: MutVisitor>(&mut self, vis: &mut F) { match self { AstFragment::OptExpr(opt_expr) => { visit_clobber(opt_expr, |opt_expr| { if let Some(expr) = opt_expr { vis.filter_map_expr(expr) } else { None } }); } $($(AstFragment::$Kind(ast) => vis.$mut_visit_ast(ast),)?)* $($(AstFragment::$Kind(ast) => ast.flat_map_in_place(|ast| vis.$flat_map_ast_elt(ast)),)?)* } } pub fn visit_with<'a, V: Visitor<'a>>(&'a self, visitor: &mut V) { match *self { AstFragment::OptExpr(Some(ref expr)) => visitor.visit_expr(expr), AstFragment::OptExpr(None) => {} $($(AstFragment::$Kind(ref ast) => visitor.$visit_ast(ast),)?)* $($(AstFragment::$Kind(ref ast) => for ast_elt in &ast[..] { visitor.$visit_ast_elt(ast_elt, $($args)*); })?)* } } } impl<'a> MacResult for crate::mbe::macro_rules::ParserAnyMacro<'a> { $(fn $make_ast(self: Box<crate::mbe::macro_rules::ParserAnyMacro<'a>>) -> Option<$AstTy> { Some(self.make(AstFragmentKind::$Kind).$make_ast()) })* } } } ast_fragments! { Expr(P<ast::Expr>) { "expression"; one fn visit_expr; fn visit_expr; fn make_expr; } Pat(P<ast::Pat>) { "pattern"; one fn visit_pat; fn visit_pat; fn make_pat; } Ty(P<ast::Ty>) { "type"; one fn visit_ty; fn visit_ty; fn make_ty; } Stmts(SmallVec<[ast::Stmt; 1]>) { "statement"; many fn flat_map_stmt; fn visit_stmt(); fn make_stmts; } Items(SmallVec<[P<ast::Item>; 1]>) { "item"; many fn flat_map_item; fn visit_item(); fn make_items; } TraitItems(SmallVec<[P<ast::AssocItem>; 1]>) { "trait item"; many fn flat_map_trait_item; fn visit_assoc_item(AssocCtxt::Trait); fn make_trait_items; } ImplItems(SmallVec<[P<ast::AssocItem>; 1]>) { "impl item"; many fn flat_map_impl_item; fn visit_assoc_item(AssocCtxt::Impl); fn make_impl_items; } ForeignItems(SmallVec<[P<ast::ForeignItem>; 1]>) { "foreign item"; many fn flat_map_foreign_item; fn visit_foreign_item(); fn make_foreign_items; } Arms(SmallVec<[ast::Arm; 1]>) { "match arm"; many fn flat_map_arm; fn visit_arm(); fn make_arms; } Fields(SmallVec<[ast::Field; 1]>) { "field expression"; many fn flat_map_field; fn visit_field(); fn make_fields; } FieldPats(SmallVec<[ast::FieldPat; 1]>) { "field pattern"; many fn flat_map_field_pattern; fn visit_field_pattern(); fn make_field_patterns; } GenericParams(SmallVec<[ast::GenericParam; 1]>) { "generic parameter"; many fn flat_map_generic_param; fn visit_generic_param(); fn make_generic_params; } Params(SmallVec<[ast::Param; 1]>) { "function parameter"; many fn flat_map_param; fn visit_param(); fn make_params; } StructFields(SmallVec<[ast::StructField; 1]>) { "field"; many fn flat_map_struct_field; fn visit_struct_field(); fn make_struct_fields; } Variants(SmallVec<[ast::Variant; 1]>) { "variant"; many fn flat_map_variant; fn visit_variant(); fn make_variants; } } impl AstFragmentKind { fn dummy(self, span: Span) -> AstFragment { self.make_from(DummyResult::any(span)).expect("couldn't create a dummy AST fragment") } fn expect_from_annotatables<I: IntoIterator<Item = Annotatable>>( self, items: I, ) -> AstFragment { let mut items = items.into_iter(); match self { AstFragmentKind::Arms => { AstFragment::Arms(items.map(Annotatable::expect_arm).collect()) } AstFragmentKind::Fields => { AstFragment::Fields(items.map(Annotatable::expect_field).collect()) } AstFragmentKind::FieldPats => { AstFragment::FieldPats(items.map(Annotatable::expect_field_pattern).collect()) } AstFragmentKind::GenericParams => { AstFragment::GenericParams(items.map(Annotatable::expect_generic_param).collect()) } AstFragmentKind::Params => { AstFragment::Params(items.map(Annotatable::expect_param).collect()) } AstFragmentKind::StructFields => { AstFragment::StructFields(items.map(Annotatable::expect_struct_field).collect()) } AstFragmentKind::Variants => { AstFragment::Variants(items.map(Annotatable::expect_variant).collect()) } AstFragmentKind::Items => { AstFragment::Items(items.map(Annotatable::expect_item).collect()) } AstFragmentKind::ImplItems => { AstFragment::ImplItems(items.map(Annotatable::expect_impl_item).collect()) } AstFragmentKind::TraitItems => { AstFragment::TraitItems(items.map(Annotatable::expect_trait_item).collect()) } AstFragmentKind::ForeignItems => { AstFragment::ForeignItems(items.map(Annotatable::expect_foreign_item).collect()) } AstFragmentKind::Stmts => { AstFragment::Stmts(items.map(Annotatable::expect_stmt).collect()) } AstFragmentKind::Expr => AstFragment::Expr( items.next().expect("expected exactly one expression").expect_expr(), ), AstFragmentKind::OptExpr => { AstFragment::OptExpr(items.next().map(Annotatable::expect_expr)) } AstFragmentKind::Pat | AstFragmentKind::Ty => { panic!("patterns and types aren't annotatable") } } } } pub struct Invocation { pub kind: InvocationKind, pub fragment_kind: AstFragmentKind, pub expansion_data: ExpansionData, } pub enum InvocationKind { Bang { mac: ast::Mac, span: Span, }, Attr { attr: ast::Attribute, item: Annotatable, // Required for resolving derive helper attributes. derives: Vec<Path>, // We temporarily report errors for attribute macros placed after derives after_derive: bool, }, Derive { path: Path, item: Annotatable, }, /// "Invocation" that contains all derives from an item, /// broken into multiple `Derive` invocations when expanded. /// FIXME: Find a way to remove it. DeriveContainer { derives: Vec<Path>, item: Annotatable, }, } impl InvocationKind { fn placeholder_visibility(&self) -> Option<ast::Visibility> { // HACK: For unnamed fields placeholders should have the same visibility as the actual // fields because for tuple structs/variants resolve determines visibilities of their // constructor using these field visibilities before attributes on them are are expanded. // The assumption is that the attribute expansion cannot change field visibilities, // and it holds because only inert attributes are supported in this position. match self { InvocationKind::Attr { item: Annotatable::StructField(field), .. } | InvocationKind::Derive { item: Annotatable::StructField(field), .. } | InvocationKind::DeriveContainer { item: Annotatable::StructField(field), .. } if field.ident.is_none() => { Some(field.vis.clone()) } _ => None, } } } impl Invocation { pub fn span(&self) -> Span { match &self.kind { InvocationKind::Bang { span, .. } => *span, InvocationKind::Attr { attr, .. } => attr.span, InvocationKind::Derive { path, .. } => path.span, InvocationKind::DeriveContainer { item, .. } => item.span(), } } } pub struct MacroExpander<'a, 'b> { pub cx: &'a mut ExtCtxt<'b>, monotonic: bool, // cf. `cx.monotonic_expander()` } impl<'a, 'b> MacroExpander<'a, 'b> { pub fn new(cx: &'a mut ExtCtxt<'b>, monotonic: bool) -> Self { MacroExpander { cx, monotonic } } pub fn expand_crate(&mut self, mut krate: ast::Crate) -> ast::Crate { let mut module = ModuleData { mod_path: vec![Ident::from_str(&self.cx.ecfg.crate_name)], directory: match self.cx.source_map().span_to_unmapped_path(krate.span) { FileName::Real(path) => path, other => PathBuf::from(other.to_string()), }, }; module.directory.pop(); self.cx.root_path = module.directory.clone(); self.cx.current_expansion.module = Rc::new(module); let orig_mod_span = krate.module.inner; let krate_item = AstFragment::Items(smallvec![P(ast::Item { attrs: krate.attrs, span: krate.span, kind: ast::ItemKind::Mod(krate.module), ident: Ident::invalid(), id: ast::DUMMY_NODE_ID, vis: respan(krate.span.shrink_to_lo(), ast::VisibilityKind::Public), tokens: None, })]); match self.fully_expand_fragment(krate_item).make_items().pop().map(P::into_inner) { Some(ast::Item { attrs, kind: ast::ItemKind::Mod(module), .. }) => { krate.attrs = attrs; krate.module = module; } None => { // Resolution failed so we return an empty expansion krate.attrs = vec![]; krate.module = ast::Mod { inner: orig_mod_span, items: vec![], inline: true }; } Some(ast::Item { span, kind, .. }) => { krate.attrs = vec![]; krate.module = ast::Mod { inner: orig_mod_span, items: vec![], inline: true }; self.cx.span_err( span, &format!( "expected crate top-level item to be a module after macro expansion, found a {}", kind.descriptive_variant() ), ); } }; self.cx.trace_macros_diag(); krate } // Recursively expand all macro invocations in this AST fragment. pub fn fully_expand_fragment(&mut self, input_fragment: AstFragment) -> AstFragment { let orig_expansion_data = self.cx.current_expansion.clone(); self.cx.current_expansion.depth = 0; // Collect all macro invocations and replace them with placeholders. let (mut fragment_with_placeholders, mut invocations) = self.collect_invocations(input_fragment, &[]); // Optimization: if we resolve all imports now, // we'll be able to immediately resolve most of imported macros. self.resolve_imports(); // Resolve paths in all invocations and produce output expanded fragments for them, but // do not insert them into our input AST fragment yet, only store in `expanded_fragments`. // The output fragments also go through expansion recursively until no invocations are left. // Unresolved macros produce dummy outputs as a recovery measure. invocations.reverse(); let mut expanded_fragments = Vec::new(); let mut undetermined_invocations = Vec::new(); let (mut progress, mut force) = (false, !self.monotonic); loop { let invoc = if let Some(invoc) = invocations.pop() { invoc } else { self.resolve_imports(); if undetermined_invocations.is_empty() { break; } invocations = mem::take(&mut undetermined_invocations); force = !mem::replace(&mut progress, false); continue; }; let eager_expansion_root = if self.monotonic { invoc.expansion_data.id } else { orig_expansion_data.id }; let res = match self.cx.resolver.resolve_macro_invocation( &invoc, eager_expansion_root, force, ) { Ok(res) => res, Err(Indeterminate) => { undetermined_invocations.push(invoc); continue; } }; progress = true; let ExpansionData { depth, id: expn_id, .. } = invoc.expansion_data; self.cx.current_expansion = invoc.expansion_data.clone(); // FIXME(jseyfried): Refactor out the following logic let (expanded_fragment, new_invocations) = match res { InvocationRes::Single(ext) => { let fragment = self.expand_invoc(invoc, &ext.kind); self.collect_invocations(fragment, &[]) } InvocationRes::DeriveContainer(_exts) => { // FIXME: Consider using the derive resolutions (`_exts`) immediately, // instead of enqueuing the derives to be resolved again later. let (derives, item) = match invoc.kind { InvocationKind::DeriveContainer { derives, item } => (derives, item), _ => unreachable!(), }; if !item.derive_allowed() { self.error_derive_forbidden_on_non_adt(&derives, &item); } let mut item = self.fully_configure(item); item.visit_attrs(|attrs| attrs.retain(|a| !a.has_name(sym::derive))); let mut derive_placeholders = Vec::with_capacity(derives.len()); invocations.reserve(derives.len()); for path in derives { let expn_id = ExpnId::fresh(None); derive_placeholders.push(NodeId::placeholder_from_expn_id(expn_id)); invocations.push(Invocation { kind: InvocationKind::Derive { path, item: item.clone() }, fragment_kind: invoc.fragment_kind, expansion_data: ExpansionData { id: expn_id, ..invoc.expansion_data.clone() }, }); } let fragment = invoc.fragment_kind.expect_from_annotatables(::std::iter::once(item)); self.collect_invocations(fragment, &derive_placeholders) } }; if expanded_fragments.len() < depth { expanded_fragments.push(Vec::new()); } expanded_fragments[depth - 1].push((expn_id, expanded_fragment)); if !self.cx.ecfg.single_step { invocations.extend(new_invocations.into_iter().rev()); } } self.cx.current_expansion = orig_expansion_data; // Finally incorporate all the expanded macros into the input AST fragment. let mut placeholder_expander = PlaceholderExpander::new(self.cx, self.monotonic); while let Some(expanded_fragments) = expanded_fragments.pop() { for (expn_id, expanded_fragment) in expanded_fragments.into_iter().rev() { placeholder_expander .add(NodeId::placeholder_from_expn_id(expn_id), expanded_fragment); } } fragment_with_placeholders.mut_visit_with(&mut placeholder_expander); fragment_with_placeholders } fn error_derive_forbidden_on_non_adt(&self, derives: &[Path], item: &Annotatable) { let attr = attr::find_by_name(item.attrs(), sym::derive).expect("`derive` attribute should exist"); let span = attr.span; let mut err = self .cx .struct_span_err(span, "`derive` may only be applied to structs, enums and unions"); if let ast::AttrStyle::Inner = attr.style { let trait_list = derives.iter().map(|t| pprust::path_to_string(t)).collect::<Vec<_>>(); let suggestion = format!("#[derive({})]", trait_list.join(", ")); err.span_suggestion( span, "try an outer attribute", suggestion, // We don't 𝑘𝑛𝑜𝑤 that the following item is an ADT Applicability::MaybeIncorrect, ); } err.emit(); } fn resolve_imports(&mut self) { if self.monotonic { self.cx.resolver.resolve_imports(); } } /// Collects all macro invocations reachable at this time in this AST fragment, and replace /// them with "placeholders" - dummy macro invocations with specially crafted `NodeId`s. /// Then call into resolver that builds a skeleton ("reduced graph") of the fragment and /// prepares data for resolving paths of macro invocations. fn collect_invocations( &mut self, mut fragment: AstFragment, extra_placeholders: &[NodeId], ) -> (AstFragment, Vec<Invocation>) { // Resolve `$crate`s in the fragment for pretty-printing. self.cx.resolver.resolve_dollar_crates(); let invocations = { let mut collector = InvocationCollector { cfg: StripUnconfigured { sess: self.cx.parse_sess, features: self.cx.ecfg.features, }, cx: self.cx, invocations: Vec::new(), monotonic: self.monotonic, }; fragment.mut_visit_with(&mut collector); fragment.add_placeholders(extra_placeholders); collector.invocations }; if self.monotonic { self.cx .resolver .visit_ast_fragment_with_placeholders(self.cx.current_expansion.id, &fragment); } (fragment, invocations) } fn fully_configure(&mut self, item: Annotatable) -> Annotatable { let mut cfg = StripUnconfigured { sess: self.cx.parse_sess, features: self.cx.ecfg.features }; // Since the item itself has already been configured by the InvocationCollector, // we know that fold result vector will contain exactly one element match item { Annotatable::Item(item) => Annotatable::Item(cfg.flat_map_item(item).pop().unwrap()), Annotatable::TraitItem(item) => { Annotatable::TraitItem(cfg.flat_map_trait_item(item).pop().unwrap()) } Annotatable::ImplItem(item) => { Annotatable::ImplItem(cfg.flat_map_impl_item(item).pop().unwrap()) } Annotatable::ForeignItem(item) => { Annotatable::ForeignItem(cfg.flat_map_foreign_item(item).pop().unwrap()) } Annotatable::Stmt(stmt) => { Annotatable::Stmt(stmt.map(|stmt| cfg.flat_map_stmt(stmt).pop().unwrap())) } Annotatable::Expr(mut expr) => Annotatable::Expr({ cfg.visit_expr(&mut expr); expr }), Annotatable::Arm(arm) => Annotatable::Arm(cfg.flat_map_arm(arm).pop().unwrap()), Annotatable::Field(field) => { Annotatable::Field(cfg.flat_map_field(field).pop().unwrap()) } Annotatable::FieldPat(fp) => { Annotatable::FieldPat(cfg.flat_map_field_pattern(fp).pop().unwrap()) } Annotatable::GenericParam(param) => { Annotatable::GenericParam(cfg.flat_map_generic_param(param).pop().unwrap()) } Annotatable::Param(param) => { Annotatable::Param(cfg.flat_map_param(param).pop().unwrap()) } Annotatable::StructField(sf) => { Annotatable::StructField(cfg.flat_map_struct_field(sf).pop().unwrap()) } Annotatable::Variant(v) => Annotatable::Variant(cfg.flat_map_variant(v).pop().unwrap()), } } fn error_recursion_limit_reached(&mut self) { let expn_data = self.cx.current_expansion.id.expn_data(); let suggested_limit = self.cx.ecfg.recursion_limit * 2; self.cx .struct_span_err( expn_data.call_site, &format!("recursion limit reached while expanding `{}`", expn_data.kind.descr()), ) .help(&format!( "consider adding a `#![recursion_limit=\"{}\"]` attribute to your crate (`{}`)", suggested_limit, self.cx.ecfg.crate_name, )) .emit(); self.cx.trace_macros_diag(); FatalError.raise(); } /// A macro's expansion does not fit in this fragment kind. /// For example, a non-type macro in a type position. fn error_wrong_fragment_kind(&mut self, kind: AstFragmentKind, mac: &ast::Mac, span: Span) { let msg = format!( "non-{kind} macro in {kind} position: {path}", kind = kind.name(), path = pprust::path_to_string(&mac.path), ); self.cx.span_err(span, &msg); self.cx.trace_macros_diag(); } fn expand_invoc(&mut self, invoc: Invocation, ext: &SyntaxExtensionKind) -> AstFragment { if self.cx.current_expansion.depth > self.cx.ecfg.recursion_limit { self.error_recursion_limit_reached(); } let (fragment_kind, span) = (invoc.fragment_kind, invoc.span()); match invoc.kind { InvocationKind::Bang { mac, .. } => match ext { SyntaxExtensionKind::Bang(expander) => { self.gate_proc_macro_expansion_kind(span, fragment_kind); let tok_result = expander.expand(self.cx, span, mac.args.inner_tokens()); self.parse_ast_fragment(tok_result, fragment_kind, &mac.path, span) } SyntaxExtensionKind::LegacyBang(expander) => { let prev = self.cx.current_expansion.prior_type_ascription; self.cx.current_expansion.prior_type_ascription = mac.prior_type_ascription; let tok_result = expander.expand(self.cx, span, mac.args.inner_tokens()); let result = if let Some(result) = fragment_kind.make_from(tok_result) { result } else { self.error_wrong_fragment_kind(fragment_kind, &mac, span); fragment_kind.dummy(span) }; self.cx.current_expansion.prior_type_ascription = prev; result } _ => unreachable!(), }, InvocationKind::Attr { attr, mut item, .. } => match ext { SyntaxExtensionKind::Attr(expander) => { self.gate_proc_macro_input(&item); self.gate_proc_macro_attr_item(span, &item); let item_tok = TokenTree::token( token::Interpolated(Lrc::new(match item { Annotatable::Item(item) => token::NtItem(item), Annotatable::TraitItem(item) => token::NtTraitItem(item), Annotatable::ImplItem(item) => token::NtImplItem(item), Annotatable::ForeignItem(item) => token::NtForeignItem(item), Annotatable::Stmt(stmt) => token::NtStmt(stmt.into_inner()), Annotatable::Expr(expr) => token::NtExpr(expr), Annotatable::Arm(..) | Annotatable::Field(..) | Annotatable::FieldPat(..) | Annotatable::GenericParam(..) | Annotatable::Param(..) | Annotatable::StructField(..) | Annotatable::Variant(..) => panic!("unexpected annotatable"), })), DUMMY_SP, ) .into(); let item = attr.unwrap_normal_item(); if let MacArgs::Eq(..) = item.args { self.cx.span_err(span, "key-value macro attributes are not supported"); } let tok_result = expander.expand(self.cx, span, item.args.inner_tokens(), item_tok); self.parse_ast_fragment(tok_result, fragment_kind, &item.path, span) } SyntaxExtensionKind::LegacyAttr(expander) => { match validate_attr::parse_meta(self.cx.parse_sess, &attr) { Ok(meta) => { let item = expander.expand(self.cx, span, &meta, item); fragment_kind.expect_from_annotatables(item) } Err(mut err) => { err.emit(); fragment_kind.dummy(span) } } } SyntaxExtensionKind::NonMacroAttr { mark_used } => { attr::mark_known(&attr); if *mark_used { attr::mark_used(&attr); } item.visit_attrs(|attrs| attrs.push(attr)); fragment_kind.expect_from_annotatables(iter::once(item)) } _ => unreachable!(), }, InvocationKind::Derive { path, item } => match ext { SyntaxExtensionKind::Derive(expander) | SyntaxExtensionKind::LegacyDerive(expander) => { if !item.derive_allowed() { return fragment_kind.dummy(span); } if let SyntaxExtensionKind::Derive(..) = ext { self.gate_proc_macro_input(&item); } let meta = ast::MetaItem { kind: ast::MetaItemKind::Word, span, path }; let items = expander.expand(self.cx, span, &meta, item); fragment_kind.expect_from_annotatables(items) } _ => unreachable!(), }, InvocationKind::DeriveContainer { .. } => unreachable!(), } } fn gate_proc_macro_attr_item(&self, span: Span, item: &Annotatable) { let kind = match item { Annotatable::Item(_) | Annotatable::TraitItem(_) | Annotatable::ImplItem(_) | Annotatable::ForeignItem(_) => return, Annotatable::Stmt(_) => "statements", Annotatable::Expr(_) => "expressions", Annotatable::Arm(..) | Annotatable::Field(..) | Annotatable::FieldPat(..) | Annotatable::GenericParam(..) | Annotatable::Param(..) | Annotatable::StructField(..) | Annotatable::Variant(..) => panic!("unexpected annotatable"), }; if self.cx.ecfg.proc_macro_hygiene() { return; } feature_err( self.cx.parse_sess, sym::proc_macro_hygiene, span, &format!("custom attributes cannot be applied to {}", kind), ) .emit(); } fn gate_proc_macro_input(&self, annotatable: &Annotatable) { struct GateProcMacroInput<'a> { parse_sess: &'a ParseSess, } impl<'ast, 'a> Visitor<'ast> for GateProcMacroInput<'a> { fn visit_item(&mut self, item: &'ast ast::Item) { match &item.kind { ast::ItemKind::Mod(module) if !module.inline => { feature_err( self.parse_sess, sym::proc_macro_hygiene, item.span, "non-inline modules in proc macro input are unstable", ) .emit(); } _ => {} } visit::walk_item(self, item); } fn visit_mac(&mut self, _: &'ast ast::Mac) {} } if !self.cx.ecfg.proc_macro_hygiene() { annotatable.visit_with(&mut GateProcMacroInput { parse_sess: self.cx.parse_sess }); } } fn gate_proc_macro_expansion_kind(&self, span: Span, kind: AstFragmentKind) { let kind = match kind { AstFragmentKind::Expr | AstFragmentKind::OptExpr => "expressions", AstFragmentKind::Pat => "patterns", AstFragmentKind::Stmts => "statements", AstFragmentKind::Ty | AstFragmentKind::Items | AstFragmentKind::TraitItems | AstFragmentKind::ImplItems | AstFragmentKind::ForeignItems => return, AstFragmentKind::Arms | AstFragmentKind::Fields | AstFragmentKind::FieldPats | AstFragmentKind::GenericParams | AstFragmentKind::Params | AstFragmentKind::StructFields | AstFragmentKind::Variants => panic!("unexpected AST fragment kind"), }; if self.cx.ecfg.proc_macro_hygiene() { return; } feature_err( self.cx.parse_sess, sym::proc_macro_hygiene, span, &format!("procedural macros cannot be expanded to {}", kind), ) .emit(); } fn parse_ast_fragment( &mut self, toks: TokenStream, kind: AstFragmentKind, path: &Path, span: Span, ) -> AstFragment { let mut parser = self.cx.new_parser_from_tts(toks); match parse_ast_fragment(&mut parser, kind, false) { Ok(fragment) => { ensure_complete_parse(&mut parser, path, kind.name(), span); fragment } Err(mut err) => { err.set_span(span); annotate_err_with_kind(&mut err, kind, span); err.emit(); self.cx.trace_macros_diag(); kind.dummy(span) } } } } pub fn parse_ast_fragment<'a>( this: &mut Parser<'a>, kind: AstFragmentKind, macro_legacy_warnings: bool, ) -> PResult<'a, AstFragment> { Ok(match kind { AstFragmentKind::Items => { let mut items = SmallVec::new(); while let Some(item) = this.parse_item()? { items.push(item); } AstFragment::Items(items) } AstFragmentKind::TraitItems => { let mut items = SmallVec::new(); while this.token != token::Eof { items.push(this.parse_trait_item(&mut false)?); } AstFragment::TraitItems(items) } AstFragmentKind::ImplItems => { let mut items = SmallVec::new(); while this.token != token::Eof { items.push(this.parse_impl_item(&mut false)?); } AstFragment::ImplItems(items) } AstFragmentKind::ForeignItems => { let mut items = SmallVec::new(); while this.token != token::Eof { items.push(this.parse_foreign_item()?); } AstFragment::ForeignItems(items) } AstFragmentKind::Stmts => { let mut stmts = SmallVec::new(); while this.token != token::Eof && // won't make progress on a `}` this.token != token::CloseDelim(token::Brace) { if let Some(stmt) = this.parse_full_stmt(macro_legacy_warnings)? { stmts.push(stmt); } } AstFragment::Stmts(stmts) } AstFragmentKind::Expr => AstFragment::Expr(this.parse_expr()?), AstFragmentKind::OptExpr => { if this.token != token::Eof { AstFragment::OptExpr(Some(this.parse_expr()?)) } else { AstFragment::OptExpr(None) } } AstFragmentKind::Ty => AstFragment::Ty(this.parse_ty()?), AstFragmentKind::Pat => AstFragment::Pat(this.parse_pat(None)?), AstFragmentKind::Arms | AstFragmentKind::Fields | AstFragmentKind::FieldPats | AstFragmentKind::GenericParams | AstFragmentKind::Params | AstFragmentKind::StructFields | AstFragmentKind::Variants => panic!("unexpected AST fragment kind"), }) } pub fn ensure_complete_parse<'a>( this: &mut Parser<'a>, macro_path: &Path, kind_name: &str, span: Span, ) { if this.token != token::Eof { let token = pprust::token_to_string(&this.token); let msg = format!("macro expansion ignores token `{}` and any following", token); // Avoid emitting backtrace info twice. let def_site_span = this.token.span.with_ctxt(SyntaxContext::root()); let mut err = this.struct_span_err(def_site_span, &msg); err.span_label(span, "caused by the macro expansion here"); let msg = format!( "the usage of `{}!` is likely invalid in {} context", pprust::path_to_string(macro_path), kind_name, ); err.note(&msg); let semi_span = this.sess.source_map().next_point(span); let semi_full_span = semi_span.to(this.sess.source_map().next_point(semi_span)); match this.sess.source_map().span_to_snippet(semi_full_span) { Ok(ref snippet) if &snippet[..] != ";" && kind_name == "expression" => { err.span_suggestion( semi_span, "you might be missing a semicolon here", ";".to_owned(), Applicability::MaybeIncorrect, ); } _ => {} } err.emit(); } } struct InvocationCollector<'a, 'b> { cx: &'a mut ExtCtxt<'b>, cfg: StripUnconfigured<'a>, invocations: Vec<Invocation>, monotonic: bool, } impl<'a, 'b> InvocationCollector<'a, 'b> { fn collect(&mut self, fragment_kind: AstFragmentKind, kind: InvocationKind) -> AstFragment { // Expansion data for all the collected invocations is set upon their resolution, // with exception of the derive container case which is not resolved and can get // its expansion data immediately. let expn_data = match &kind { InvocationKind::DeriveContainer { item, .. } => Some(ExpnData { parent: self.cx.current_expansion.id, ..ExpnData::default( ExpnKind::Macro(MacroKind::Attr, sym::derive), item.span(), self.cx.parse_sess.edition, ) }), _ => None, }; let expn_id = ExpnId::fresh(expn_data); let vis = kind.placeholder_visibility(); self.invocations.push(Invocation { kind, fragment_kind, expansion_data: ExpansionData { id: expn_id, depth: self.cx.current_expansion.depth + 1, ..self.cx.current_expansion.clone() }, }); placeholder(fragment_kind, NodeId::placeholder_from_expn_id(expn_id), vis) } fn collect_bang(&mut self, mac: ast::Mac, span: Span, kind: AstFragmentKind) -> AstFragment { self.collect(kind, InvocationKind::Bang { mac, span }) } fn collect_attr( &mut self, attr: Option<ast::Attribute>, derives: Vec<Path>, item: Annotatable, kind: AstFragmentKind, after_derive: bool, ) -> AstFragment { self.collect( kind, match attr { Some(attr) => InvocationKind::Attr { attr, item, derives, after_derive }, None => InvocationKind::DeriveContainer { derives, item }, }, ) } fn find_attr_invoc( &self, attrs: &mut Vec<ast::Attribute>, after_derive: &mut bool, ) -> Option<ast::Attribute> { let attr = attrs .iter() .position(|a| { if a.has_name(sym::derive) { *after_derive = true; } !attr::is_known(a) && !is_builtin_attr(a) }) .map(|i| attrs.remove(i)); if let Some(attr) = &attr { if !self.cx.ecfg.custom_inner_attributes() && attr.style == ast::AttrStyle::Inner && !attr.has_name(sym::test) { feature_err( &self.cx.parse_sess, sym::custom_inner_attributes, attr.span, "non-builtin inner attributes are unstable", ) .emit(); } } attr } /// If `item` is an attr invocation, remove and return the macro attribute and derive traits. fn classify_item( &mut self, item: &mut impl HasAttrs, ) -> (Option<ast::Attribute>, Vec<Path>, /* after_derive */ bool) { let (mut attr, mut traits, mut after_derive) = (None, Vec::new(), false); item.visit_attrs(|mut attrs| { attr = self.find_attr_invoc(&mut attrs, &mut after_derive); traits = collect_derives(&mut self.cx, &mut attrs); }); (attr, traits, after_derive) } /// Alternative to `classify_item()` that ignores `#[derive]` so invocations fallthrough /// to the unused-attributes lint (making it an error on statements and expressions /// is a breaking change) fn classify_nonitem( &mut self, nonitem: &mut impl HasAttrs, ) -> (Option<ast::Attribute>, /* after_derive */ bool) { let (mut attr, mut after_derive) = (None, false); nonitem.visit_attrs(|mut attrs| { attr = self.find_attr_invoc(&mut attrs, &mut after_derive); }); (attr, after_derive) } fn configure<T: HasAttrs>(&mut self, node: T) -> Option<T> { self.cfg.configure(node) } // Detect use of feature-gated or invalid attributes on macro invocations // since they will not be detected after macro expansion. fn check_attributes(&mut self, attrs: &[ast::Attribute]) { let features = self.cx.ecfg.features.unwrap(); for attr in attrs.iter() { rustc_ast_passes::feature_gate::check_attribute(attr, self.cx.parse_sess, features); validate_attr::check_meta(self.cx.parse_sess, attr); // macros are expanded before any lint passes so this warning has to be hardcoded if attr.has_name(sym::derive) { self.cx .struct_span_warn(attr.span, "`#[derive]` does nothing on macro invocations") .note("this may become a hard error in a future release") .emit(); } } } } impl<'a, 'b> MutVisitor for InvocationCollector<'a, 'b> { fn visit_expr(&mut self, expr: &mut P<ast::Expr>) { self.cfg.configure_expr(expr); visit_clobber(expr.deref_mut(), |mut expr| { self.cfg.configure_expr_kind(&mut expr.kind); // ignore derives so they remain unused let (attr, after_derive) = self.classify_nonitem(&mut expr); if attr.is_some() { // Collect the invoc regardless of whether or not attributes are permitted here // expansion will eat the attribute so it won't error later. attr.as_ref().map(|a| self.cfg.maybe_emit_expr_attr_err(a)); // AstFragmentKind::Expr requires the macro to emit an expression. return self .collect_attr( attr, vec![], Annotatable::Expr(P(expr)), AstFragmentKind::Expr, after_derive, ) .make_expr() .into_inner(); } if let ast::ExprKind::Mac(mac) = expr.kind { self.check_attributes(&expr.attrs); self.collect_bang(mac, expr.span, AstFragmentKind::Expr).make_expr().into_inner() } else { noop_visit_expr(&mut expr, self); expr } }); } fn flat_map_arm(&mut self, arm: ast::Arm) -> SmallVec<[ast::Arm; 1]> { let mut arm = configure!(self, arm); let (attr, traits, after_derive) = self.classify_item(&mut arm); if attr.is_some() || !traits.is_empty() { return self .collect_attr( attr, traits, Annotatable::Arm(arm), AstFragmentKind::Arms, after_derive, ) .make_arms(); } noop_flat_map_arm(arm, self) } fn flat_map_field(&mut self, field: ast::Field) -> SmallVec<[ast::Field; 1]> { let mut field = configure!(self, field); let (attr, traits, after_derive) = self.classify_item(&mut field); if attr.is_some() || !traits.is_empty() { return self .collect_attr( attr, traits, Annotatable::Field(field), AstFragmentKind::Fields, after_derive, ) .make_fields(); } noop_flat_map_field(field, self) } fn flat_map_field_pattern(&mut self, fp: ast::FieldPat) -> SmallVec<[ast::FieldPat; 1]> { let mut fp = configure!(self, fp); let (attr, traits, after_derive) = self.classify_item(&mut fp); if attr.is_some() || !traits.is_empty() { return self .collect_attr( attr, traits, Annotatable::FieldPat(fp), AstFragmentKind::FieldPats, after_derive, ) .make_field_patterns(); } noop_flat_map_field_pattern(fp, self) } fn flat_map_param(&mut self, p: ast::Param) -> SmallVec<[ast::Param; 1]> { let mut p = configure!(self, p); let (attr, traits, after_derive) = self.classify_item(&mut p); if attr.is_some() || !traits.is_empty() { return self .collect_attr( attr, traits, Annotatable::Param(p), AstFragmentKind::Params, after_derive, ) .make_params(); } noop_flat_map_param(p, self) } fn flat_map_struct_field(&mut self, sf: ast::StructField) -> SmallVec<[ast::StructField; 1]> { let mut sf = configure!(self, sf); let (attr, traits, after_derive) = self.classify_item(&mut sf); if attr.is_some() || !traits.is_empty() { return self .collect_attr( attr, traits, Annotatable::StructField(sf), AstFragmentKind::StructFields, after_derive, ) .make_struct_fields(); } noop_flat_map_struct_field(sf, self) } fn flat_map_variant(&mut self, variant: ast::Variant) -> SmallVec<[ast::Variant; 1]> { let mut variant = configure!(self, variant); let (attr, traits, after_derive) = self.classify_item(&mut variant); if attr.is_some() || !traits.is_empty() { return self .collect_attr( attr, traits, Annotatable::Variant(variant), AstFragmentKind::Variants, after_derive, ) .make_variants(); } noop_flat_map_variant(variant, self) } fn filter_map_expr(&mut self, expr: P<ast::Expr>) -> Option<P<ast::Expr>> { let expr = configure!(self, expr); expr.filter_map(|mut expr| { self.cfg.configure_expr_kind(&mut expr.kind); // Ignore derives so they remain unused. let (attr, after_derive) = self.classify_nonitem(&mut expr); if attr.is_some() { attr.as_ref().map(|a| self.cfg.maybe_emit_expr_attr_err(a)); return self .collect_attr( attr, vec![], Annotatable::Expr(P(expr)), AstFragmentKind::OptExpr, after_derive, ) .make_opt_expr() .map(|expr| expr.into_inner()); } if let ast::ExprKind::Mac(mac) = expr.kind { self.check_attributes(&expr.attrs); self.collect_bang(mac, expr.span, AstFragmentKind::OptExpr) .make_opt_expr() .map(|expr| expr.into_inner()) } else { Some({ noop_visit_expr(&mut expr, self); expr }) } }) } fn visit_pat(&mut self, pat: &mut P<ast::Pat>) { self.cfg.configure_pat(pat); match pat.kind { PatKind::Mac(_) => {} _ => return noop_visit_pat(pat, self), } visit_clobber(pat, |mut pat| match mem::replace(&mut pat.kind, PatKind::Wild) { PatKind::Mac(mac) => self.collect_bang(mac, pat.span, AstFragmentKind::Pat).make_pat(), _ => unreachable!(), }); } fn flat_map_stmt(&mut self, stmt: ast::Stmt) -> SmallVec<[ast::Stmt; 1]> { let mut stmt = configure!(self, stmt); // we'll expand attributes on expressions separately if !stmt.is_expr() { let (attr, derives, after_derive) = if stmt.is_item() { self.classify_item(&mut stmt) } else { // ignore derives on non-item statements so it falls through // to the unused-attributes lint let (attr, after_derive) = self.classify_nonitem(&mut stmt); (attr, vec![], after_derive) }; if attr.is_some() || !derives.is_empty() { return self .collect_attr( attr, derives, Annotatable::Stmt(P(stmt)), AstFragmentKind::Stmts, after_derive, ) .make_stmts(); } } if let StmtKind::Mac(mac) = stmt.kind { let (mac, style, attrs) = mac.into_inner(); self.check_attributes(&attrs); let mut placeholder = self.collect_bang(mac, stmt.span, AstFragmentKind::Stmts).make_stmts(); // If this is a macro invocation with a semicolon, then apply that // semicolon to the final statement produced by expansion. if style == MacStmtStyle::Semicolon { if let Some(stmt) = placeholder.pop() { placeholder.push(stmt.add_trailing_semicolon()); } } return placeholder; } // The placeholder expander gives ids to statements, so we avoid folding the id here. let ast::Stmt { id, kind, span } = stmt; noop_flat_map_stmt_kind(kind, self) .into_iter() .map(|kind| ast::Stmt { id, kind, span }) .collect() } fn visit_block(&mut self, block: &mut P<Block>) { let old_directory_ownership = self.cx.current_expansion.directory_ownership; self.cx.current_expansion.directory_ownership = DirectoryOwnership::UnownedViaBlock; noop_visit_block(block, self); self.cx.current_expansion.directory_ownership = old_directory_ownership; } fn flat_map_item(&mut self, item: P<ast::Item>) -> SmallVec<[P<ast::Item>; 1]> { let mut item = configure!(self, item); let (attr, traits, after_derive) = self.classify_item(&mut item); if attr.is_some() || !traits.is_empty() { return self .collect_attr( attr, traits, Annotatable::Item(item), AstFragmentKind::Items, after_derive, ) .make_items(); } match item.kind { ast::ItemKind::Mac(..) => { self.check_attributes(&item.attrs); item.and_then(|item| match item.kind { ItemKind::Mac(mac) => self .collect( AstFragmentKind::Items, InvocationKind::Bang { mac, span: item.span }, ) .make_items(), _ => unreachable!(), }) } ast::ItemKind::Mod(ast::Mod { inner, .. }) if item.ident != Ident::invalid() => { let orig_directory_ownership = self.cx.current_expansion.directory_ownership; let mut module = (*self.cx.current_expansion.module).clone(); module.mod_path.push(item.ident); // Detect if this is an inline module (`mod m { ... }` as opposed to `mod m;`). // In the non-inline case, `inner` is never the dummy span (cf. `parse_item_mod`). // Thus, if `inner` is the dummy span, we know the module is inline. let inline_module = item.span.contains(inner) || inner.is_dummy(); if inline_module { if let Some(path) = attr::first_attr_value_str_by_name(&item.attrs, sym::path) { self.cx.current_expansion.directory_ownership = DirectoryOwnership::Owned { relative: None }; module.directory.push(&*path.as_str()); } else { module.directory.push(&*item.ident.as_str()); } } else { let path = self.cx.parse_sess.source_map().span_to_unmapped_path(inner); let mut path = match path { FileName::Real(path) => path, other => PathBuf::from(other.to_string()), }; let directory_ownership = match path.file_name().unwrap().to_str() { Some("mod.rs") => DirectoryOwnership::Owned { relative: None }, Some(_) => DirectoryOwnership::Owned { relative: Some(item.ident) }, None => DirectoryOwnership::UnownedViaMod, }; path.pop(); module.directory = path; self.cx.current_expansion.directory_ownership = directory_ownership; } let orig_module = mem::replace(&mut self.cx.current_expansion.module, Rc::new(module)); let result = noop_flat_map_item(item, self); self.cx.current_expansion.module = orig_module; self.cx.current_expansion.directory_ownership = orig_directory_ownership; result } _ => noop_flat_map_item(item, self), } } fn flat_map_trait_item(&mut self, item: P<ast::AssocItem>) -> SmallVec<[P<ast::AssocItem>; 1]> { let mut item = configure!(self, item); let (attr, traits, after_derive) = self.classify_item(&mut item); if attr.is_some() || !traits.is_empty() { return self .collect_attr( attr, traits, Annotatable::TraitItem(item), AstFragmentKind::TraitItems, after_derive, ) .make_trait_items(); } match item.kind { ast::AssocItemKind::Macro(..) => { self.check_attributes(&item.attrs); item.and_then(|item| match item.kind { ast::AssocItemKind::Macro(mac) => self .collect_bang(mac, item.span, AstFragmentKind::TraitItems) .make_trait_items(), _ => unreachable!(), }) } _ => noop_flat_map_assoc_item(item, self), } } fn flat_map_impl_item(&mut self, item: P<ast::AssocItem>) -> SmallVec<[P<ast::AssocItem>; 1]> { let mut item = configure!(self, item); let (attr, traits, after_derive) = self.classify_item(&mut item); if attr.is_some() || !traits.is_empty() { return self .collect_attr( attr, traits, Annotatable::ImplItem(item), AstFragmentKind::ImplItems, after_derive, ) .make_impl_items(); } match item.kind { ast::AssocItemKind::Macro(..) => { self.check_attributes(&item.attrs); item.and_then(|item| match item.kind { ast::AssocItemKind::Macro(mac) => self .collect_bang(mac, item.span, AstFragmentKind::ImplItems) .make_impl_items(), _ => unreachable!(), }) } _ => noop_flat_map_assoc_item(item, self), } } fn visit_ty(&mut self, ty: &mut P<ast::Ty>) { match ty.kind { ast::TyKind::Mac(_) => {} _ => return noop_visit_ty(ty, self), }; visit_clobber(ty, |mut ty| match mem::replace(&mut ty.kind, ast::TyKind::Err) { ast::TyKind::Mac(mac) => self.collect_bang(mac, ty.span, AstFragmentKind::Ty).make_ty(), _ => unreachable!(), }); } fn visit_foreign_mod(&mut self, foreign_mod: &mut ast::ForeignMod) { self.cfg.configure_foreign_mod(foreign_mod); noop_visit_foreign_mod(foreign_mod, self); } fn flat_map_foreign_item( &mut self, mut foreign_item: P<ast::ForeignItem>, ) -> SmallVec<[P<ast::ForeignItem>; 1]> { let (attr, traits, after_derive) = self.classify_item(&mut foreign_item); if attr.is_some() || !traits.is_empty() { return self .collect_attr( attr, traits, Annotatable::ForeignItem(foreign_item), AstFragmentKind::ForeignItems, after_derive, ) .make_foreign_items(); } match foreign_item.kind { ast::ForeignItemKind::Macro(..) => { self.check_attributes(&foreign_item.attrs); foreign_item.and_then(|item| match item.kind { ast::ForeignItemKind::Macro(mac) => self .collect_bang(mac, item.span, AstFragmentKind::ForeignItems) .make_foreign_items(), _ => unreachable!(), }) } _ => noop_flat_map_foreign_item(foreign_item, self), } } fn visit_item_kind(&mut self, item: &mut ast::ItemKind) { match item { ast::ItemKind::MacroDef(..) => {} _ => { self.cfg.configure_item_kind(item); noop_visit_item_kind(item, self); } } } fn flat_map_generic_param( &mut self, param: ast::GenericParam, ) -> SmallVec<[ast::GenericParam; 1]> { let mut param = configure!(self, param); let (attr, traits, after_derive) = self.classify_item(&mut param); if attr.is_some() || !traits.is_empty() { return self .collect_attr( attr, traits, Annotatable::GenericParam(param), AstFragmentKind::GenericParams, after_derive, ) .make_generic_params(); } noop_flat_map_generic_param(param, self) } fn visit_attribute(&mut self, at: &mut ast::Attribute) { // turn `#[doc(include="filename")]` attributes into `#[doc(include(file="filename", // contents="file contents")]` attributes if !at.check_name(sym::doc) { return noop_visit_attribute(at, self); } if let Some(list) = at.meta_item_list() { if !list.iter().any(|it| it.check_name(sym::include)) { return noop_visit_attribute(at, self); } let mut items = vec![]; for mut it in list { if !it.check_name(sym::include) { items.push({ noop_visit_meta_list_item(&mut it, self); it }); continue; } if let Some(file) = it.value_str() { let err_count = self.cx.parse_sess.span_diagnostic.err_count(); self.check_attributes(slice::from_ref(at)); if self.cx.parse_sess.span_diagnostic.err_count() > err_count { // avoid loading the file if they haven't enabled the feature return noop_visit_attribute(at, self); } let filename = match self.cx.resolve_path(&*file.as_str(), it.span()) { Ok(filename) => filename, Err(mut err) => { err.emit(); continue; } }; match self.cx.source_map().load_file(&filename) { Ok(source_file) => { let src = source_file .src .as_ref() .expect("freshly loaded file should have a source"); let src_interned = Symbol::intern(src.as_str()); let include_info = vec![ ast::NestedMetaItem::MetaItem(attr::mk_name_value_item_str( Ident::with_dummy_span(sym::file), file, DUMMY_SP, )), ast::NestedMetaItem::MetaItem(attr::mk_name_value_item_str( Ident::with_dummy_span(sym::contents), src_interned, DUMMY_SP, )), ]; let include_ident = Ident::with_dummy_span(sym::include); let item = attr::mk_list_item(include_ident, include_info); items.push(ast::NestedMetaItem::MetaItem(item)); } Err(e) => { let lit = it.meta_item().and_then(|item| item.name_value_literal()).unwrap(); if e.kind() == ErrorKind::InvalidData { self.cx .struct_span_err( lit.span, &format!("{} wasn't a utf-8 file", filename.display()), ) .span_label(lit.span, "contains invalid utf-8") .emit(); } else { let mut err = self.cx.struct_span_err( lit.span, &format!("couldn't read {}: {}", filename.display(), e), ); err.span_label(lit.span, "couldn't read file"); err.emit(); } } } } else { let mut err = self.cx.struct_span_err( it.span(), &format!("expected path to external documentation"), ); // Check if the user erroneously used `doc(include(...))` syntax. let literal = it.meta_item_list().and_then(|list| { if list.len() == 1 { list[0].literal().map(|literal| &literal.kind) } else { None } }); let (path, applicability) = match &literal { Some(LitKind::Str(path, ..)) => { (path.to_string(), Applicability::MachineApplicable) } _ => (String::from("<path>"), Applicability::HasPlaceholders), }; err.span_suggestion( it.span(), "provide a file path with `=`", format!("include = \"{}\"", path), applicability, ); err.emit(); } } let meta = attr::mk_list_item(Ident::with_dummy_span(sym::doc), items); *at = ast::Attribute { kind: ast::AttrKind::Normal(AttrItem { path: meta.path, args: meta.kind.mac_args(meta.span), }), span: at.span, id: at.id, style: at.style, }; } else { noop_visit_attribute(at, self) } } fn visit_id(&mut self, id: &mut ast::NodeId) { if self.monotonic { debug_assert_eq!(*id, ast::DUMMY_NODE_ID); *id = self.cx.resolver.next_node_id() } } fn visit_fn_decl(&mut self, mut fn_decl: &mut P<ast::FnDecl>) { self.cfg.configure_fn_decl(&mut fn_decl); noop_visit_fn_decl(fn_decl, self); } } pub struct ExpansionConfig<'feat> { pub crate_name: String, pub features: Option<&'feat Features>, pub recursion_limit: usize, pub trace_mac: bool, pub should_test: bool, // If false, strip `#[test]` nodes pub single_step: bool, pub keep_macs: bool, } impl<'feat> ExpansionConfig<'feat> { pub fn default(crate_name: String) -> ExpansionConfig<'static> { ExpansionConfig { crate_name, features: None, recursion_limit: 1024, trace_mac: false, should_test: false, single_step: false, keep_macs: false, } } fn proc_macro_hygiene(&self) -> bool { self.features.map_or(false, |features| features.proc_macro_hygiene) } fn custom_inner_attributes(&self) -> bool { self.features.map_or(false, |features| features.custom_inner_attributes) } }
#![cfg(not(test))] use serial_logger::write_static; #[lang = "eh_personality"] #[no_mangle] pub extern "C" fn eh_personality() { write_static("PANIC: eh_personality\n"); } #[lang = "eh_unwind_resume"] #[no_mangle] pub extern "C" fn rust_eh_unwind_resume() { write_static("PANIC: rust_eh_unwind_resume\n"); } #[no_mangle] pub extern "C" fn __udivti3() { write_static("ERROR: Unimplemented intrinsic __udivti3\n"); } #[no_mangle] pub extern "C" fn __umodti3() { write_static("ERROR: Unimplemented intrinsic __umodti3\n"); } #[no_mangle] pub extern "C" fn __muloti4() { write_static("ERROR: Unimplemented intrinsic __muloti4\n"); } #[no_mangle] pub extern "C" fn __floatundisf() { write_static("ERROR: Unimplemented intrinsic __floatundisf\n"); } #[no_mangle] pub extern "C" fn __floatundidf() { write_static("ERROR: Unimplemented intrinsic __floatundidf\n"); } #[allow(non_snake_case)] #[no_mangle] pub extern "C" fn _Unwind_Resume() { write_static("PANIC: _Unwind_Resume\n"); } #[allow(empty_loop)] #[lang = "panic_fmt"] #[no_mangle] pub extern "C" fn panic_fmt() -> ! { write_static("PANIC: panic_fmt\n"); loop {} } Arguments for panic_fmt #![cfg(not(test))] use core::fmt; use serial_logger::write_static; #[lang = "eh_personality"] #[no_mangle] pub extern "C" fn eh_personality() { write_static("PANIC: eh_personality\n"); } #[lang = "eh_unwind_resume"] #[no_mangle] pub extern "C" fn rust_eh_unwind_resume() { write_static("PANIC: rust_eh_unwind_resume\n"); } #[no_mangle] pub extern "C" fn __udivti3() { write_static("ERROR: Unimplemented intrinsic __udivti3\n"); } #[no_mangle] pub extern "C" fn __umodti3() { write_static("ERROR: Unimplemented intrinsic __umodti3\n"); } #[no_mangle] pub extern "C" fn __muloti4() { write_static("ERROR: Unimplemented intrinsic __muloti4\n"); } #[no_mangle] pub extern "C" fn __floatundisf() { write_static("ERROR: Unimplemented intrinsic __floatundisf\n"); } #[no_mangle] pub extern "C" fn __floatundidf() { write_static("ERROR: Unimplemented intrinsic __floatundidf\n"); } #[allow(non_snake_case)] #[no_mangle] pub extern "C" fn _Unwind_Resume() { write_static("PANIC: _Unwind_Resume\n"); } #[allow(empty_loop)] #[lang = "panic_fmt"] #[no_mangle] pub extern "C" fn panic_fmt(_fmt: fmt::Arguments, _file_line: &(&'static str, u32)) -> ! { write_static("PANIC: panic_fmt\n"); loop {} }
// ignore-tidy-filelength //! This module contains the "cleaned" pieces of the AST, and the functions //! that clean them. pub mod inline; pub mod cfg; mod simplify; mod auto_trait; mod blanket_impl; use rustc_index::vec::{IndexVec, Idx}; use rustc_target::spec::abi::Abi; use rustc_typeck::hir_ty_to_ty; use rustc::infer::region_constraints::{RegionConstraintData, Constraint}; use rustc::middle::resolve_lifetime as rl; use rustc::middle::lang_items; use rustc::middle::stability; use rustc::mir::interpret::GlobalId; use rustc::hir; use rustc::hir::def::{CtorKind, DefKind, Res}; use rustc::hir::def_id::{CrateNum, DefId, CRATE_DEF_INDEX, LOCAL_CRATE}; use rustc::hir::ptr::P; use rustc::ty::subst::{InternalSubsts, SubstsRef, GenericArgKind}; use rustc::ty::{self, DefIdTree, TyCtxt, Region, RegionVid, Ty, AdtKind}; use rustc::ty::fold::TypeFolder; use rustc::ty::layout::VariantIdx; use rustc::util::nodemap::{FxHashMap, FxHashSet}; use syntax::ast::{self, Attribute, AttrStyle, AttrKind, Ident}; use syntax::attr; use syntax::util::comments; use syntax::source_map::DUMMY_SP; use syntax_pos::symbol::{Symbol, kw, sym}; use syntax_pos::hygiene::MacroKind; use syntax_pos::{self, Pos, FileName}; use std::collections::hash_map::Entry; use std::fmt; use std::hash::{Hash, Hasher}; use std::default::Default; use std::{mem, slice, vec}; use std::num::NonZeroU32; use std::iter::FromIterator; use std::rc::Rc; use std::cell::RefCell; use std::sync::Arc; use std::u32; use crate::core::{self, DocContext, ImplTraitParam}; use crate::doctree; use crate::html::render::{cache, ExternalLocation}; use crate::html::item_type::ItemType; use self::cfg::Cfg; use self::auto_trait::AutoTraitFinder; use self::blanket_impl::BlanketImplFinder; pub use self::Type::*; pub use self::Mutability::*; pub use self::ItemEnum::*; pub use self::SelfTy::*; pub use self::FunctionRetTy::*; pub use self::Visibility::{Public, Inherited}; thread_local!(pub static MAX_DEF_ID: RefCell<FxHashMap<CrateNum, DefId>> = Default::default()); const FN_OUTPUT_NAME: &'static str = "Output"; #[derive(Clone, Debug)] pub struct Crate { pub name: String, pub version: Option<String>, pub src: FileName, pub module: Option<Item>, pub externs: Vec<(CrateNum, ExternalCrate)>, pub primitives: Vec<(DefId, PrimitiveType, Attributes)>, // These are later on moved into `CACHEKEY`, leaving the map empty. // Only here so that they can be filtered through the rustdoc passes. pub external_traits: Rc<RefCell<FxHashMap<DefId, Trait>>>, pub masked_crates: FxHashSet<CrateNum>, pub collapsed: bool, } #[derive(Clone, Debug)] pub struct ExternalCrate { pub name: String, pub src: FileName, pub attrs: Attributes, pub primitives: Vec<(DefId, PrimitiveType, Attributes)>, pub keywords: Vec<(DefId, String, Attributes)>, } /// Anything with a source location and set of attributes and, optionally, a /// name. That is, anything that can be documented. This doesn't correspond /// directly to the AST's concept of an item; it's a strict superset. #[derive(Clone)] pub struct Item { /// Stringified span pub source: Span, /// Not everything has a name. E.g., impls pub name: Option<String>, pub attrs: Attributes, pub inner: ItemEnum, pub visibility: Visibility, pub def_id: DefId, pub stability: Option<Stability>, pub deprecation: Option<Deprecation>, } impl fmt::Debug for Item { fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { let fake = MAX_DEF_ID.with(|m| m.borrow().get(&self.def_id.krate) .map(|id| self.def_id >= *id).unwrap_or(false)); let def_id: &dyn fmt::Debug = if fake { &"**FAKE**" } else { &self.def_id }; fmt.debug_struct("Item") .field("source", &self.source) .field("name", &self.name) .field("attrs", &self.attrs) .field("inner", &self.inner) .field("visibility", &self.visibility) .field("def_id", def_id) .field("stability", &self.stability) .field("deprecation", &self.deprecation) .finish() } } impl Item { /// Finds the `doc` attribute as a NameValue and returns the corresponding /// value found. pub fn doc_value(&self) -> Option<&str> { self.attrs.doc_value() } /// Finds all `doc` attributes as NameValues and returns their corresponding values, joined /// with newlines. pub fn collapsed_doc_value(&self) -> Option<String> { self.attrs.collapsed_doc_value() } pub fn links(&self) -> Vec<(String, String)> { self.attrs.links(&self.def_id.krate) } pub fn is_crate(&self) -> bool { match self.inner { StrippedItem(box ModuleItem(Module { is_crate: true, ..})) | ModuleItem(Module { is_crate: true, ..}) => true, _ => false, } } pub fn is_mod(&self) -> bool { self.type_() == ItemType::Module } pub fn is_trait(&self) -> bool { self.type_() == ItemType::Trait } pub fn is_struct(&self) -> bool { self.type_() == ItemType::Struct } pub fn is_enum(&self) -> bool { self.type_() == ItemType::Enum } pub fn is_variant(&self) -> bool { self.type_() == ItemType::Variant } pub fn is_associated_type(&self) -> bool { self.type_() == ItemType::AssocType } pub fn is_associated_const(&self) -> bool { self.type_() == ItemType::AssocConst } pub fn is_method(&self) -> bool { self.type_() == ItemType::Method } pub fn is_ty_method(&self) -> bool { self.type_() == ItemType::TyMethod } pub fn is_typedef(&self) -> bool { self.type_() == ItemType::Typedef } pub fn is_primitive(&self) -> bool { self.type_() == ItemType::Primitive } pub fn is_union(&self) -> bool { self.type_() == ItemType::Union } pub fn is_import(&self) -> bool { self.type_() == ItemType::Import } pub fn is_extern_crate(&self) -> bool { self.type_() == ItemType::ExternCrate } pub fn is_keyword(&self) -> bool { self.type_() == ItemType::Keyword } pub fn is_stripped(&self) -> bool { match self.inner { StrippedItem(..) => true, _ => false } } pub fn has_stripped_fields(&self) -> Option<bool> { match self.inner { StructItem(ref _struct) => Some(_struct.fields_stripped), UnionItem(ref union) => Some(union.fields_stripped), VariantItem(Variant { kind: VariantKind::Struct(ref vstruct)} ) => { Some(vstruct.fields_stripped) }, _ => None, } } pub fn stability_class(&self) -> Option<String> { self.stability.as_ref().and_then(|ref s| { let mut classes = Vec::with_capacity(2); if s.level == stability::Unstable { classes.push("unstable"); } if s.deprecation.is_some() { classes.push("deprecated"); } if classes.len() != 0 { Some(classes.join(" ")) } else { None } }) } pub fn stable_since(&self) -> Option<&str> { self.stability.as_ref().map(|s| &s.since[..]) } pub fn is_non_exhaustive(&self) -> bool { self.attrs.other_attrs.iter() .any(|a| a.check_name(sym::non_exhaustive)) } /// Returns a documentation-level item type from the item. pub fn type_(&self) -> ItemType { ItemType::from(self) } /// Returns the info in the item's `#[deprecated]` or `#[rustc_deprecated]` attributes. /// /// If the item is not deprecated, returns `None`. pub fn deprecation(&self) -> Option<&Deprecation> { self.deprecation .as_ref() .or_else(|| self.stability.as_ref().and_then(|s| s.deprecation.as_ref())) } pub fn is_default(&self) -> bool { match self.inner { ItemEnum::MethodItem(ref meth) => { if let Some(defaultness) = meth.defaultness { defaultness.has_value() && !defaultness.is_final() } else { false } } _ => false, } } } #[derive(Clone, Debug)] pub enum ItemEnum { ExternCrateItem(String, Option<String>), ImportItem(Import), StructItem(Struct), UnionItem(Union), EnumItem(Enum), FunctionItem(Function), ModuleItem(Module), TypedefItem(Typedef, bool /* is associated type */), OpaqueTyItem(OpaqueTy, bool /* is associated type */), StaticItem(Static), ConstantItem(Constant), TraitItem(Trait), TraitAliasItem(TraitAlias), ImplItem(Impl), /// A method signature only. Used for required methods in traits (ie, /// non-default-methods). TyMethodItem(TyMethod), /// A method with a body. MethodItem(Method), StructFieldItem(Type), VariantItem(Variant), /// `fn`s from an extern block ForeignFunctionItem(Function), /// `static`s from an extern block ForeignStaticItem(Static), /// `type`s from an extern block ForeignTypeItem, MacroItem(Macro), ProcMacroItem(ProcMacro), PrimitiveItem(PrimitiveType), AssocConstItem(Type, Option<String>), AssocTypeItem(Vec<GenericBound>, Option<Type>), /// An item that has been stripped by a rustdoc pass StrippedItem(Box<ItemEnum>), KeywordItem(String), } impl ItemEnum { pub fn is_associated(&self) -> bool { match *self { ItemEnum::TypedefItem(_, _) | ItemEnum::AssocTypeItem(_, _) => true, _ => false, } } } #[derive(Clone, Debug)] pub struct Module { pub items: Vec<Item>, pub is_crate: bool, } pub struct ListAttributesIter<'a> { attrs: slice::Iter<'a, ast::Attribute>, current_list: vec::IntoIter<ast::NestedMetaItem>, name: Symbol, } impl<'a> Iterator for ListAttributesIter<'a> { type Item = ast::NestedMetaItem; fn next(&mut self) -> Option<Self::Item> { if let Some(nested) = self.current_list.next() { return Some(nested); } for attr in &mut self.attrs { if let Some(list) = attr.meta_item_list() { if attr.check_name(self.name) { self.current_list = list.into_iter(); if let Some(nested) = self.current_list.next() { return Some(nested); } } } } None } fn size_hint(&self) -> (usize, Option<usize>) { let lower = self.current_list.len(); (lower, None) } } pub trait AttributesExt { /// Finds an attribute as List and returns the list of attributes nested inside. fn lists(&self, name: Symbol) -> ListAttributesIter<'_>; } impl AttributesExt for [ast::Attribute] { fn lists(&self, name: Symbol) -> ListAttributesIter<'_> { ListAttributesIter { attrs: self.iter(), current_list: Vec::new().into_iter(), name, } } } pub trait NestedAttributesExt { /// Returns `true` if the attribute list contains a specific `Word` fn has_word(self, word: Symbol) -> bool; } impl<I: IntoIterator<Item=ast::NestedMetaItem>> NestedAttributesExt for I { fn has_word(self, word: Symbol) -> bool { self.into_iter().any(|attr| attr.is_word() && attr.check_name(word)) } } /// A portion of documentation, extracted from a `#[doc]` attribute. /// /// Each variant contains the line number within the complete doc-comment where the fragment /// starts, as well as the Span where the corresponding doc comment or attribute is located. /// /// Included files are kept separate from inline doc comments so that proper line-number /// information can be given when a doctest fails. Sugared doc comments and "raw" doc comments are /// kept separate because of issue #42760. #[derive(Clone, PartialEq, Eq, Debug, Hash)] pub enum DocFragment { /// A doc fragment created from a `///` or `//!` doc comment. SugaredDoc(usize, syntax_pos::Span, String), /// A doc fragment created from a "raw" `#[doc=""]` attribute. RawDoc(usize, syntax_pos::Span, String), /// A doc fragment created from a `#[doc(include="filename")]` attribute. Contains both the /// given filename and the file contents. Include(usize, syntax_pos::Span, String, String), } impl DocFragment { pub fn as_str(&self) -> &str { match *self { DocFragment::SugaredDoc(_, _, ref s) => &s[..], DocFragment::RawDoc(_, _, ref s) => &s[..], DocFragment::Include(_, _, _, ref s) => &s[..], } } pub fn span(&self) -> syntax_pos::Span { match *self { DocFragment::SugaredDoc(_, span, _) | DocFragment::RawDoc(_, span, _) | DocFragment::Include(_, span, _, _) => span, } } } impl<'a> FromIterator<&'a DocFragment> for String { fn from_iter<T>(iter: T) -> Self where T: IntoIterator<Item = &'a DocFragment> { iter.into_iter().fold(String::new(), |mut acc, frag| { if !acc.is_empty() { acc.push('\n'); } match *frag { DocFragment::SugaredDoc(_, _, ref docs) | DocFragment::RawDoc(_, _, ref docs) | DocFragment::Include(_, _, _, ref docs) => acc.push_str(docs), } acc }) } } #[derive(Clone, Debug, Default)] pub struct Attributes { pub doc_strings: Vec<DocFragment>, pub other_attrs: Vec<ast::Attribute>, pub cfg: Option<Arc<Cfg>>, pub span: Option<syntax_pos::Span>, /// map from Rust paths to resolved defs and potential URL fragments pub links: Vec<(String, Option<DefId>, Option<String>)>, pub inner_docs: bool, } impl Attributes { /// Extracts the content from an attribute `#[doc(cfg(content))]`. fn extract_cfg(mi: &ast::MetaItem) -> Option<&ast::MetaItem> { use syntax::ast::NestedMetaItem::MetaItem; if let ast::MetaItemKind::List(ref nmis) = mi.kind { if nmis.len() == 1 { if let MetaItem(ref cfg_mi) = nmis[0] { if cfg_mi.check_name(sym::cfg) { if let ast::MetaItemKind::List(ref cfg_nmis) = cfg_mi.kind { if cfg_nmis.len() == 1 { if let MetaItem(ref content_mi) = cfg_nmis[0] { return Some(content_mi); } } } } } } } None } /// Reads a `MetaItem` from within an attribute, looks for whether it is a /// `#[doc(include="file")]`, and returns the filename and contents of the file as loaded from /// its expansion. fn extract_include(mi: &ast::MetaItem) -> Option<(String, String)> { mi.meta_item_list().and_then(|list| { for meta in list { if meta.check_name(sym::include) { // the actual compiled `#[doc(include="filename")]` gets expanded to // `#[doc(include(file="filename", contents="file contents")]` so we need to // look for that instead return meta.meta_item_list().and_then(|list| { let mut filename: Option<String> = None; let mut contents: Option<String> = None; for it in list { if it.check_name(sym::file) { if let Some(name) = it.value_str() { filename = Some(name.to_string()); } } else if it.check_name(sym::contents) { if let Some(docs) = it.value_str() { contents = Some(docs.to_string()); } } } if let (Some(filename), Some(contents)) = (filename, contents) { Some((filename, contents)) } else { None } }); } } None }) } pub fn has_doc_flag(&self, flag: Symbol) -> bool { for attr in &self.other_attrs { if !attr.check_name(sym::doc) { continue; } if let Some(items) = attr.meta_item_list() { if items.iter().filter_map(|i| i.meta_item()).any(|it| it.check_name(flag)) { return true; } } } false } pub fn from_ast(diagnostic: &::errors::Handler, attrs: &[ast::Attribute]) -> Attributes { let mut doc_strings = vec![]; let mut sp = None; let mut cfg = Cfg::True; let mut doc_line = 0; /// If `attr` is a doc comment, strips the leading and (if present) /// trailing comments symbols, e.g. `///`, `/**`, and `*/`. Otherwise, /// returns `attr` unchanged. pub fn with_doc_comment_markers_stripped<T>( attr: &Attribute, f: impl FnOnce(&Attribute) -> T ) -> T { match attr.kind { AttrKind::Normal(_) => { f(attr) } AttrKind::DocComment(comment) => { let comment = Symbol::intern(&comments::strip_doc_comment_decoration(&comment.as_str())); f(&Attribute { kind: AttrKind::DocComment(comment), id: attr.id, style: attr.style, span: attr.span, }) } } } let other_attrs = attrs.iter().filter_map(|attr| { with_doc_comment_markers_stripped(attr, |attr| { if attr.check_name(sym::doc) { if let Some(mi) = attr.meta() { if let Some(value) = mi.value_str() { // Extracted #[doc = "..."] let value = value.to_string(); let line = doc_line; doc_line += value.lines().count(); if attr.is_doc_comment() { doc_strings.push(DocFragment::SugaredDoc(line, attr.span, value)); } else { doc_strings.push(DocFragment::RawDoc(line, attr.span, value)); } if sp.is_none() { sp = Some(attr.span); } return None; } else if let Some(cfg_mi) = Attributes::extract_cfg(&mi) { // Extracted #[doc(cfg(...))] match Cfg::parse(cfg_mi) { Ok(new_cfg) => cfg &= new_cfg, Err(e) => diagnostic.span_err(e.span, e.msg), } return None; } else if let Some((filename, contents)) = Attributes::extract_include(&mi) { let line = doc_line; doc_line += contents.lines().count(); doc_strings.push(DocFragment::Include(line, attr.span, filename, contents)); } } } Some(attr.clone()) }) }).collect(); // treat #[target_feature(enable = "feat")] attributes as if they were // #[doc(cfg(target_feature = "feat"))] attributes as well for attr in attrs.lists(sym::target_feature) { if attr.check_name(sym::enable) { if let Some(feat) = attr.value_str() { let meta = attr::mk_name_value_item_str( Ident::with_dummy_span(sym::target_feature), feat, DUMMY_SP ); if let Ok(feat_cfg) = Cfg::parse(&meta) { cfg &= feat_cfg; } } } } let inner_docs = attrs.iter() .filter(|a| a.check_name(sym::doc)) .next() .map_or(true, |a| a.style == AttrStyle::Inner); Attributes { doc_strings, other_attrs, cfg: if cfg == Cfg::True { None } else { Some(Arc::new(cfg)) }, span: sp, links: vec![], inner_docs, } } /// Finds the `doc` attribute as a NameValue and returns the corresponding /// value found. pub fn doc_value(&self) -> Option<&str> { self.doc_strings.first().map(|s| s.as_str()) } /// Finds all `doc` attributes as NameValues and returns their corresponding values, joined /// with newlines. pub fn collapsed_doc_value(&self) -> Option<String> { if !self.doc_strings.is_empty() { Some(self.doc_strings.iter().collect()) } else { None } } /// Gets links as a vector /// /// Cache must be populated before call pub fn links(&self, krate: &CrateNum) -> Vec<(String, String)> { use crate::html::format::href; self.links.iter().filter_map(|&(ref s, did, ref fragment)| { match did { Some(did) => { if let Some((mut href, ..)) = href(did) { if let Some(ref fragment) = *fragment { href.push_str("#"); href.push_str(fragment); } Some((s.clone(), href)) } else { None } } None => { if let Some(ref fragment) = *fragment { let cache = cache(); let url = match cache.extern_locations.get(krate) { Some(&(_, ref src, ExternalLocation::Local)) => src.to_str().expect("invalid file path"), Some(&(_, _, ExternalLocation::Remote(ref s))) => s, Some(&(_, _, ExternalLocation::Unknown)) | None => "https://doc.rust-lang.org/nightly", }; // This is a primitive so the url is done "by hand". let tail = fragment.find('#').unwrap_or_else(|| fragment.len()); Some((s.clone(), format!("{}{}std/primitive.{}.html{}", url, if !url.ends_with('/') { "/" } else { "" }, &fragment[..tail], &fragment[tail..]))) } else { panic!("This isn't a primitive?!"); } } } }).collect() } } impl PartialEq for Attributes { fn eq(&self, rhs: &Self) -> bool { self.doc_strings == rhs.doc_strings && self.cfg == rhs.cfg && self.span == rhs.span && self.links == rhs.links && self.other_attrs.iter().map(|attr| attr.id).eq(rhs.other_attrs.iter().map(|attr| attr.id)) } } impl Eq for Attributes {} impl Hash for Attributes { fn hash<H: Hasher>(&self, hasher: &mut H) { self.doc_strings.hash(hasher); self.cfg.hash(hasher); self.span.hash(hasher); self.links.hash(hasher); for attr in &self.other_attrs { attr.id.hash(hasher); } } } impl AttributesExt for Attributes { fn lists(&self, name: Symbol) -> ListAttributesIter<'_> { self.other_attrs.lists(name) } } #[derive(Clone, PartialEq, Eq, Debug, Hash)] pub enum GenericBound { TraitBound(PolyTrait, hir::TraitBoundModifier), Outlives(Lifetime), } impl GenericBound { fn maybe_sized(cx: &DocContext<'_>) -> GenericBound { let did = cx.tcx.require_lang_item(lang_items::SizedTraitLangItem, None); let empty = cx.tcx.intern_substs(&[]); let path = external_path(cx, cx.tcx.item_name(did), Some(did), false, vec![], empty); inline::record_extern_fqn(cx, did, TypeKind::Trait); GenericBound::TraitBound(PolyTrait { trait_: ResolvedPath { path, param_names: None, did, is_generic: false, }, generic_params: Vec::new(), }, hir::TraitBoundModifier::Maybe) } fn is_sized_bound(&self, cx: &DocContext<'_>) -> bool { use rustc::hir::TraitBoundModifier as TBM; if let GenericBound::TraitBound(PolyTrait { ref trait_, .. }, TBM::None) = *self { if trait_.def_id() == cx.tcx.lang_items().sized_trait() { return true; } } false } fn get_poly_trait(&self) -> Option<PolyTrait> { if let GenericBound::TraitBound(ref p, _) = *self { return Some(p.clone()) } None } fn get_trait_type(&self) -> Option<Type> { if let GenericBound::TraitBound(PolyTrait { ref trait_, .. }, _) = *self { Some(trait_.clone()) } else { None } } } #[derive(Clone, PartialEq, Eq, Debug, Hash)] pub struct Lifetime(String); impl Lifetime { pub fn get_ref<'a>(&'a self) -> &'a str { let Lifetime(ref s) = *self; let s: &'a str = s; s } pub fn statik() -> Lifetime { Lifetime("'static".to_string()) } } #[derive(Clone, Debug)] pub enum WherePredicate { BoundPredicate { ty: Type, bounds: Vec<GenericBound> }, RegionPredicate { lifetime: Lifetime, bounds: Vec<GenericBound> }, EqPredicate { lhs: Type, rhs: Type }, } impl WherePredicate { pub fn get_bounds(&self) -> Option<&[GenericBound]> { match *self { WherePredicate::BoundPredicate { ref bounds, .. } => Some(bounds), WherePredicate::RegionPredicate { ref bounds, .. } => Some(bounds), _ => None, } } } #[derive(Clone, PartialEq, Eq, Debug, Hash)] pub enum GenericParamDefKind { Lifetime, Type { did: DefId, bounds: Vec<GenericBound>, default: Option<Type>, synthetic: Option<hir::SyntheticTyParamKind>, }, Const { did: DefId, ty: Type, }, } impl GenericParamDefKind { pub fn is_type(&self) -> bool { match *self { GenericParamDefKind::Type { .. } => true, _ => false, } } // FIXME(eddyb) this either returns the default of a type parameter, or the // type of a `const` parameter. It seems that the intention is to *visit* // any embedded types, but `get_type` seems to be the wrong name for that. pub fn get_type(&self) -> Option<Type> { match self { GenericParamDefKind::Type { default, .. } => default.clone(), GenericParamDefKind::Const { ty, .. } => Some(ty.clone()), GenericParamDefKind::Lifetime => None, } } } #[derive(Clone, PartialEq, Eq, Debug, Hash)] pub struct GenericParamDef { pub name: String, pub kind: GenericParamDefKind, } impl GenericParamDef { pub fn is_synthetic_type_param(&self) -> bool { match self.kind { GenericParamDefKind::Lifetime | GenericParamDefKind::Const { .. } => false, GenericParamDefKind::Type { ref synthetic, .. } => synthetic.is_some(), } } pub fn is_type(&self) -> bool { self.kind.is_type() } pub fn get_type(&self) -> Option<Type> { self.kind.get_type() } pub fn get_bounds(&self) -> Option<&[GenericBound]> { match self.kind { GenericParamDefKind::Type { ref bounds, .. } => Some(bounds), _ => None, } } } // maybe use a Generic enum and use Vec<Generic>? #[derive(Clone, Debug, Default)] pub struct Generics { pub params: Vec<GenericParamDef>, pub where_predicates: Vec<WherePredicate>, } #[derive(Clone, Debug)] pub struct Method { pub generics: Generics, pub decl: FnDecl, pub header: hir::FnHeader, pub defaultness: Option<hir::Defaultness>, pub all_types: Vec<Type>, pub ret_types: Vec<Type>, } #[derive(Clone, Debug)] pub struct TyMethod { pub header: hir::FnHeader, pub decl: FnDecl, pub generics: Generics, pub all_types: Vec<Type>, pub ret_types: Vec<Type>, } #[derive(Clone, Debug)] pub struct Function { pub decl: FnDecl, pub generics: Generics, pub header: hir::FnHeader, pub all_types: Vec<Type>, pub ret_types: Vec<Type>, } #[derive(Clone, PartialEq, Eq, Debug, Hash)] pub struct FnDecl { pub inputs: Arguments, pub output: FunctionRetTy, pub c_variadic: bool, pub attrs: Attributes, } impl FnDecl { pub fn self_type(&self) -> Option<SelfTy> { self.inputs.values.get(0).and_then(|v| v.to_self()) } /// Returns the sugared return type for an async function. /// /// For example, if the return type is `impl std::future::Future<Output = i32>`, this function /// will return `i32`. /// /// # Panics /// /// This function will panic if the return type does not match the expected sugaring for async /// functions. pub fn sugared_async_return_type(&self) -> FunctionRetTy { match &self.output { FunctionRetTy::Return(Type::ImplTrait(bounds)) => { match &bounds[0] { GenericBound::TraitBound(PolyTrait { trait_, .. }, ..) => { let bindings = trait_.bindings().unwrap(); FunctionRetTy::Return(bindings[0].ty().clone()) } _ => panic!("unexpected desugaring of async function"), } } _ => panic!("unexpected desugaring of async function"), } } } #[derive(Clone, PartialEq, Eq, Debug, Hash)] pub struct Arguments { pub values: Vec<Argument>, } #[derive(Clone, PartialEq, Eq, Debug, Hash)] pub struct Argument { pub type_: Type, pub name: String, } #[derive(Clone, PartialEq, Debug)] pub enum SelfTy { SelfValue, SelfBorrowed(Option<Lifetime>, Mutability), SelfExplicit(Type), } impl Argument { pub fn to_self(&self) -> Option<SelfTy> { if self.name != "self" { return None; } if self.type_.is_self_type() { return Some(SelfValue); } match self.type_ { BorrowedRef{ref lifetime, mutability, ref type_} if type_.is_self_type() => { Some(SelfBorrowed(lifetime.clone(), mutability)) } _ => Some(SelfExplicit(self.type_.clone())) } } } #[derive(Clone, PartialEq, Eq, Debug, Hash)] pub enum FunctionRetTy { Return(Type), DefaultReturn, } impl GetDefId for FunctionRetTy { fn def_id(&self) -> Option<DefId> { match *self { Return(ref ty) => ty.def_id(), DefaultReturn => None, } } } #[derive(Clone, Debug)] pub struct Trait { pub auto: bool, pub unsafety: hir::Unsafety, pub items: Vec<Item>, pub generics: Generics, pub bounds: Vec<GenericBound>, pub is_spotlight: bool, pub is_auto: bool, } #[derive(Clone, Debug)] pub struct TraitAlias { pub generics: Generics, pub bounds: Vec<GenericBound>, } /// A trait reference, which may have higher ranked lifetimes. #[derive(Clone, PartialEq, Eq, Debug, Hash)] pub struct PolyTrait { pub trait_: Type, pub generic_params: Vec<GenericParamDef>, } /// A representation of a type suitable for hyperlinking purposes. Ideally, one can get the original /// type out of the AST/`TyCtxt` given one of these, if more information is needed. Most /// importantly, it does not preserve mutability or boxes. #[derive(Clone, PartialEq, Eq, Debug, Hash)] pub enum Type { /// Structs/enums/traits (most that would be an `hir::TyKind::Path`). ResolvedPath { path: Path, param_names: Option<Vec<GenericBound>>, did: DefId, /// `true` if is a `T::Name` path for associated types. is_generic: bool, }, /// For parameterized types, so the consumer of the JSON don't go /// looking for types which don't exist anywhere. Generic(String), /// Primitives are the fixed-size numeric types (plus int/usize/float), char, /// arrays, slices, and tuples. Primitive(PrimitiveType), /// `extern "ABI" fn` BareFunction(Box<BareFunctionDecl>), Tuple(Vec<Type>), Slice(Box<Type>), Array(Box<Type>, String), Never, RawPointer(Mutability, Box<Type>), BorrowedRef { lifetime: Option<Lifetime>, mutability: Mutability, type_: Box<Type>, }, // `<Type as Trait>::Name` QPath { name: String, self_type: Box<Type>, trait_: Box<Type> }, // `_` Infer, // `impl TraitA + TraitB + ...` ImplTrait(Vec<GenericBound>), } #[derive(Clone, PartialEq, Eq, Hash, Copy, Debug)] pub enum PrimitiveType { Isize, I8, I16, I32, I64, I128, Usize, U8, U16, U32, U64, U128, F32, F64, Char, Bool, Str, Slice, Array, Tuple, Unit, RawPointer, Reference, Fn, Never, } #[derive(Clone, Copy, Debug)] pub enum TypeKind { Enum, Function, Module, Const, Static, Struct, Union, Trait, Typedef, Foreign, Macro, Attr, Derive, TraitAlias, } pub trait GetDefId { fn def_id(&self) -> Option<DefId>; } impl<T: GetDefId> GetDefId for Option<T> { fn def_id(&self) -> Option<DefId> { self.as_ref().and_then(|d| d.def_id()) } } impl Type { pub fn primitive_type(&self) -> Option<PrimitiveType> { match *self { Primitive(p) | BorrowedRef { type_: box Primitive(p), ..} => Some(p), Slice(..) | BorrowedRef { type_: box Slice(..), .. } => Some(PrimitiveType::Slice), Array(..) | BorrowedRef { type_: box Array(..), .. } => Some(PrimitiveType::Array), Tuple(ref tys) => if tys.is_empty() { Some(PrimitiveType::Unit) } else { Some(PrimitiveType::Tuple) }, RawPointer(..) => Some(PrimitiveType::RawPointer), BorrowedRef { type_: box Generic(..), .. } => Some(PrimitiveType::Reference), BareFunction(..) => Some(PrimitiveType::Fn), Never => Some(PrimitiveType::Never), _ => None, } } pub fn is_generic(&self) -> bool { match *self { ResolvedPath { is_generic, .. } => is_generic, _ => false, } } pub fn is_self_type(&self) -> bool { match *self { Generic(ref name) => name == "Self", _ => false } } pub fn generics(&self) -> Option<Vec<Type>> { match *self { ResolvedPath { ref path, .. } => { path.segments.last().and_then(|seg| { if let GenericArgs::AngleBracketed { ref args, .. } = seg.args { Some(args.iter().filter_map(|arg| match arg { GenericArg::Type(ty) => Some(ty.clone()), _ => None, }).collect()) } else { None } }) } _ => None, } } pub fn bindings(&self) -> Option<&[TypeBinding]> { match *self { ResolvedPath { ref path, .. } => { path.segments.last().and_then(|seg| { if let GenericArgs::AngleBracketed { ref bindings, .. } = seg.args { Some(&**bindings) } else { None } }) } _ => None } } pub fn is_full_generic(&self) -> bool { match *self { Type::Generic(_) => true, _ => false, } } pub fn projection(&self) -> Option<(&Type, DefId, &str)> { let (self_, trait_, name) = match self { QPath { ref self_type, ref trait_, ref name } => { (self_type, trait_, name) } _ => return None, }; let trait_did = match **trait_ { ResolvedPath { did, .. } => did, _ => return None, }; Some((&self_, trait_did, name)) } } impl GetDefId for Type { fn def_id(&self) -> Option<DefId> { match *self { ResolvedPath { did, .. } => Some(did), Primitive(p) => crate::html::render::cache().primitive_locations.get(&p).cloned(), BorrowedRef { type_: box Generic(..), .. } => Primitive(PrimitiveType::Reference).def_id(), BorrowedRef { ref type_, .. } => type_.def_id(), Tuple(ref tys) => if tys.is_empty() { Primitive(PrimitiveType::Unit).def_id() } else { Primitive(PrimitiveType::Tuple).def_id() }, BareFunction(..) => Primitive(PrimitiveType::Fn).def_id(), Never => Primitive(PrimitiveType::Never).def_id(), Slice(..) => Primitive(PrimitiveType::Slice).def_id(), Array(..) => Primitive(PrimitiveType::Array).def_id(), RawPointer(..) => Primitive(PrimitiveType::RawPointer).def_id(), QPath { ref self_type, .. } => self_type.def_id(), _ => None, } } } impl PrimitiveType { fn from_str(s: &str) -> Option<PrimitiveType> { match s { "isize" => Some(PrimitiveType::Isize), "i8" => Some(PrimitiveType::I8), "i16" => Some(PrimitiveType::I16), "i32" => Some(PrimitiveType::I32), "i64" => Some(PrimitiveType::I64), "i128" => Some(PrimitiveType::I128), "usize" => Some(PrimitiveType::Usize), "u8" => Some(PrimitiveType::U8), "u16" => Some(PrimitiveType::U16), "u32" => Some(PrimitiveType::U32), "u64" => Some(PrimitiveType::U64), "u128" => Some(PrimitiveType::U128), "bool" => Some(PrimitiveType::Bool), "char" => Some(PrimitiveType::Char), "str" => Some(PrimitiveType::Str), "f32" => Some(PrimitiveType::F32), "f64" => Some(PrimitiveType::F64), "array" => Some(PrimitiveType::Array), "slice" => Some(PrimitiveType::Slice), "tuple" => Some(PrimitiveType::Tuple), "unit" => Some(PrimitiveType::Unit), "pointer" => Some(PrimitiveType::RawPointer), "reference" => Some(PrimitiveType::Reference), "fn" => Some(PrimitiveType::Fn), "never" => Some(PrimitiveType::Never), _ => None, } } pub fn as_str(&self) -> &'static str { use self::PrimitiveType::*; match *self { Isize => "isize", I8 => "i8", I16 => "i16", I32 => "i32", I64 => "i64", I128 => "i128", Usize => "usize", U8 => "u8", U16 => "u16", U32 => "u32", U64 => "u64", U128 => "u128", F32 => "f32", F64 => "f64", Str => "str", Bool => "bool", Char => "char", Array => "array", Slice => "slice", Tuple => "tuple", Unit => "unit", RawPointer => "pointer", Reference => "reference", Fn => "fn", Never => "never", } } pub fn to_url_str(&self) -> &'static str { self.as_str() } } impl From<ast::IntTy> for PrimitiveType { fn from(int_ty: ast::IntTy) -> PrimitiveType { match int_ty { ast::IntTy::Isize => PrimitiveType::Isize, ast::IntTy::I8 => PrimitiveType::I8, ast::IntTy::I16 => PrimitiveType::I16, ast::IntTy::I32 => PrimitiveType::I32, ast::IntTy::I64 => PrimitiveType::I64, ast::IntTy::I128 => PrimitiveType::I128, } } } impl From<ast::UintTy> for PrimitiveType { fn from(uint_ty: ast::UintTy) -> PrimitiveType { match uint_ty { ast::UintTy::Usize => PrimitiveType::Usize, ast::UintTy::U8 => PrimitiveType::U8, ast::UintTy::U16 => PrimitiveType::U16, ast::UintTy::U32 => PrimitiveType::U32, ast::UintTy::U64 => PrimitiveType::U64, ast::UintTy::U128 => PrimitiveType::U128, } } } impl From<ast::FloatTy> for PrimitiveType { fn from(float_ty: ast::FloatTy) -> PrimitiveType { match float_ty { ast::FloatTy::F32 => PrimitiveType::F32, ast::FloatTy::F64 => PrimitiveType::F64, } } } #[derive(Clone, PartialEq, Eq, Debug)] pub enum Visibility { Public, Inherited, Crate, Restricted(DefId, Path), } #[derive(Clone, Debug)] pub struct Struct { pub struct_type: doctree::StructType, pub generics: Generics, pub fields: Vec<Item>, pub fields_stripped: bool, } #[derive(Clone, Debug)] pub struct Union { pub struct_type: doctree::StructType, pub generics: Generics, pub fields: Vec<Item>, pub fields_stripped: bool, } /// This is a more limited form of the standard Struct, different in that /// it lacks the things most items have (name, id, parameterization). Found /// only as a variant in an enum. #[derive(Clone, Debug)] pub struct VariantStruct { pub struct_type: doctree::StructType, pub fields: Vec<Item>, pub fields_stripped: bool, } #[derive(Clone, Debug)] pub struct Enum { pub variants: IndexVec<VariantIdx, Item>, pub generics: Generics, pub variants_stripped: bool, } #[derive(Clone, Debug)] pub struct Variant { pub kind: VariantKind, } #[derive(Clone, Debug)] pub enum VariantKind { CLike, Tuple(Vec<Type>), Struct(VariantStruct), } #[derive(Clone, Debug)] pub struct Span { pub filename: FileName, pub loline: usize, pub locol: usize, pub hiline: usize, pub hicol: usize, pub original: syntax_pos::Span, } impl Span { pub fn empty() -> Span { Span { filename: FileName::Anon(0), loline: 0, locol: 0, hiline: 0, hicol: 0, original: syntax_pos::DUMMY_SP, } } pub fn span(&self) -> syntax_pos::Span { self.original } } #[derive(Clone, PartialEq, Eq, Debug, Hash)] pub struct Path { pub global: bool, pub res: Res, pub segments: Vec<PathSegment>, } impl Path { pub fn last_name(&self) -> &str { self.segments.last().expect("segments were empty").name.as_str() } } #[derive(Clone, PartialEq, Eq, Debug, Hash)] pub enum GenericArg { Lifetime(Lifetime), Type(Type), Const(Constant), } #[derive(Clone, PartialEq, Eq, Debug, Hash)] pub enum GenericArgs { AngleBracketed { args: Vec<GenericArg>, bindings: Vec<TypeBinding>, }, Parenthesized { inputs: Vec<Type>, output: Option<Type>, } } #[derive(Clone, PartialEq, Eq, Debug, Hash)] pub struct PathSegment { pub name: String, pub args: GenericArgs, } #[derive(Clone, Debug)] pub struct Typedef { pub type_: Type, pub generics: Generics, } #[derive(Clone, Debug)] pub struct OpaqueTy { pub bounds: Vec<GenericBound>, pub generics: Generics, } #[derive(Clone, PartialEq, Eq, Debug, Hash)] pub struct BareFunctionDecl { pub unsafety: hir::Unsafety, pub generic_params: Vec<GenericParamDef>, pub decl: FnDecl, pub abi: Abi, } #[derive(Clone, Debug)] pub struct Static { pub type_: Type, pub mutability: Mutability, /// It's useful to have the value of a static documented, but I have no /// desire to represent expressions (that'd basically be all of the AST, /// which is huge!). So, have a string. pub expr: String, } #[derive(Clone, PartialEq, Eq, Hash, Debug)] pub struct Constant { pub type_: Type, pub expr: String, } #[derive(Debug, Clone, PartialEq, Eq, Copy, Hash)] pub enum Mutability { Mutable, Immutable, } #[derive(Clone, PartialEq, Debug)] pub enum ImplPolarity { Positive, Negative, } #[derive(Clone, Debug)] pub struct Impl { pub unsafety: hir::Unsafety, pub generics: Generics, pub provided_trait_methods: FxHashSet<String>, pub trait_: Option<Type>, pub for_: Type, pub items: Vec<Item>, pub polarity: Option<ImplPolarity>, pub synthetic: bool, pub blanket_impl: Option<Type>, } #[derive(Clone, Debug)] pub enum Import { // use source as str; Simple(String, ImportSource), // use source::*; Glob(ImportSource) } #[derive(Clone, Debug)] pub struct ImportSource { pub path: Path, pub did: Option<DefId>, } #[derive(Clone, Debug)] pub struct Macro { pub source: String, pub imported_from: Option<String>, } #[derive(Clone, Debug)] pub struct ProcMacro { pub kind: MacroKind, pub helpers: Vec<String>, } #[derive(Clone, Debug)] pub struct Stability { pub level: stability::StabilityLevel, pub feature: Option<String>, pub since: String, pub deprecation: Option<Deprecation>, pub unstable_reason: Option<String>, pub issue: Option<NonZeroU32>, } #[derive(Clone, Debug)] pub struct Deprecation { pub since: Option<String>, pub note: Option<String>, } /// An type binding on an associated type (e.g., `A = Bar` in `Foo<A = Bar>` or /// `A: Send + Sync` in `Foo<A: Send + Sync>`). #[derive(Clone, PartialEq, Eq, Debug, Hash)] pub struct TypeBinding { pub name: String, pub kind: TypeBindingKind, } #[derive(Clone, PartialEq, Eq, Debug, Hash)] pub enum TypeBindingKind { Equality { ty: Type, }, Constraint { bounds: Vec<GenericBound>, }, } impl TypeBinding { pub fn ty(&self) -> &Type { match self.kind { TypeBindingKind::Equality { ref ty } => ty, _ => panic!("expected equality type binding for parenthesized generic args"), } } } fixup clean/types.rs imports // ignore-tidy-filelength use std::fmt; use std::hash::{Hash, Hasher}; use std::default::Default; use std::{slice, vec}; use std::num::NonZeroU32; use std::iter::FromIterator; use std::rc::Rc; use std::cell::RefCell; use std::sync::Arc; use rustc::middle::lang_items; use rustc::middle::stability; use rustc::hir; use rustc::hir::def::Res; use rustc::hir::def_id::{CrateNum, DefId}; use rustc::ty::layout::VariantIdx; use rustc::util::nodemap::{FxHashMap, FxHashSet}; use rustc_index::vec::IndexVec; use rustc_target::spec::abi::Abi; use syntax::ast::{self, Attribute, AttrStyle, AttrKind, Ident}; use syntax::attr; use syntax::util::comments; use syntax::source_map::DUMMY_SP; use syntax_pos::hygiene::MacroKind; use syntax_pos::symbol::{Symbol, sym}; use syntax_pos::{self, FileName}; use crate::core::DocContext; use crate::clean::cfg::Cfg; use crate::clean::inline; use crate::clean::external_path; use crate::clean::types::Type::{QPath, ResolvedPath}; use crate::doctree; use crate::html::item_type::ItemType; use crate::html::render::{cache, ExternalLocation}; use self::Type::*; use self::ItemEnum::*; use self::SelfTy::*; use self::FunctionRetTy::*; thread_local!(pub static MAX_DEF_ID: RefCell<FxHashMap<CrateNum, DefId>> = Default::default()); const FN_OUTPUT_NAME: &'static str = "Output"; #[derive(Clone, Debug)] pub struct Crate { pub name: String, pub version: Option<String>, pub src: FileName, pub module: Option<Item>, pub externs: Vec<(CrateNum, ExternalCrate)>, pub primitives: Vec<(DefId, PrimitiveType, Attributes)>, // These are later on moved into `CACHEKEY`, leaving the map empty. // Only here so that they can be filtered through the rustdoc passes. pub external_traits: Rc<RefCell<FxHashMap<DefId, Trait>>>, pub masked_crates: FxHashSet<CrateNum>, pub collapsed: bool, } #[derive(Clone, Debug)] pub struct ExternalCrate { pub name: String, pub src: FileName, pub attrs: Attributes, pub primitives: Vec<(DefId, PrimitiveType, Attributes)>, pub keywords: Vec<(DefId, String, Attributes)>, } /// Anything with a source location and set of attributes and, optionally, a /// name. That is, anything that can be documented. This doesn't correspond /// directly to the AST's concept of an item; it's a strict superset. #[derive(Clone)] pub struct Item { /// Stringified span pub source: Span, /// Not everything has a name. E.g., impls pub name: Option<String>, pub attrs: Attributes, pub inner: ItemEnum, pub visibility: Visibility, pub def_id: DefId, pub stability: Option<Stability>, pub deprecation: Option<Deprecation>, } impl fmt::Debug for Item { fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { let fake = MAX_DEF_ID.with(|m| m.borrow().get(&self.def_id.krate) .map(|id| self.def_id >= *id).unwrap_or(false)); let def_id: &dyn fmt::Debug = if fake { &"**FAKE**" } else { &self.def_id }; fmt.debug_struct("Item") .field("source", &self.source) .field("name", &self.name) .field("attrs", &self.attrs) .field("inner", &self.inner) .field("visibility", &self.visibility) .field("def_id", def_id) .field("stability", &self.stability) .field("deprecation", &self.deprecation) .finish() } } impl Item { /// Finds the `doc` attribute as a NameValue and returns the corresponding /// value found. pub fn doc_value(&self) -> Option<&str> { self.attrs.doc_value() } /// Finds all `doc` attributes as NameValues and returns their corresponding values, joined /// with newlines. pub fn collapsed_doc_value(&self) -> Option<String> { self.attrs.collapsed_doc_value() } pub fn links(&self) -> Vec<(String, String)> { self.attrs.links(&self.def_id.krate) } pub fn is_crate(&self) -> bool { match self.inner { StrippedItem(box ModuleItem(Module { is_crate: true, ..})) | ModuleItem(Module { is_crate: true, ..}) => true, _ => false, } } pub fn is_mod(&self) -> bool { self.type_() == ItemType::Module } pub fn is_trait(&self) -> bool { self.type_() == ItemType::Trait } pub fn is_struct(&self) -> bool { self.type_() == ItemType::Struct } pub fn is_enum(&self) -> bool { self.type_() == ItemType::Enum } pub fn is_variant(&self) -> bool { self.type_() == ItemType::Variant } pub fn is_associated_type(&self) -> bool { self.type_() == ItemType::AssocType } pub fn is_associated_const(&self) -> bool { self.type_() == ItemType::AssocConst } pub fn is_method(&self) -> bool { self.type_() == ItemType::Method } pub fn is_ty_method(&self) -> bool { self.type_() == ItemType::TyMethod } pub fn is_typedef(&self) -> bool { self.type_() == ItemType::Typedef } pub fn is_primitive(&self) -> bool { self.type_() == ItemType::Primitive } pub fn is_union(&self) -> bool { self.type_() == ItemType::Union } pub fn is_import(&self) -> bool { self.type_() == ItemType::Import } pub fn is_extern_crate(&self) -> bool { self.type_() == ItemType::ExternCrate } pub fn is_keyword(&self) -> bool { self.type_() == ItemType::Keyword } pub fn is_stripped(&self) -> bool { match self.inner { StrippedItem(..) => true, _ => false } } pub fn has_stripped_fields(&self) -> Option<bool> { match self.inner { StructItem(ref _struct) => Some(_struct.fields_stripped), UnionItem(ref union) => Some(union.fields_stripped), VariantItem(Variant { kind: VariantKind::Struct(ref vstruct)} ) => { Some(vstruct.fields_stripped) }, _ => None, } } pub fn stability_class(&self) -> Option<String> { self.stability.as_ref().and_then(|ref s| { let mut classes = Vec::with_capacity(2); if s.level == stability::Unstable { classes.push("unstable"); } if s.deprecation.is_some() { classes.push("deprecated"); } if classes.len() != 0 { Some(classes.join(" ")) } else { None } }) } pub fn stable_since(&self) -> Option<&str> { self.stability.as_ref().map(|s| &s.since[..]) } pub fn is_non_exhaustive(&self) -> bool { self.attrs.other_attrs.iter() .any(|a| a.check_name(sym::non_exhaustive)) } /// Returns a documentation-level item type from the item. pub fn type_(&self) -> ItemType { ItemType::from(self) } /// Returns the info in the item's `#[deprecated]` or `#[rustc_deprecated]` attributes. /// /// If the item is not deprecated, returns `None`. pub fn deprecation(&self) -> Option<&Deprecation> { self.deprecation .as_ref() .or_else(|| self.stability.as_ref().and_then(|s| s.deprecation.as_ref())) } pub fn is_default(&self) -> bool { match self.inner { ItemEnum::MethodItem(ref meth) => { if let Some(defaultness) = meth.defaultness { defaultness.has_value() && !defaultness.is_final() } else { false } } _ => false, } } } #[derive(Clone, Debug)] pub enum ItemEnum { ExternCrateItem(String, Option<String>), ImportItem(Import), StructItem(Struct), UnionItem(Union), EnumItem(Enum), FunctionItem(Function), ModuleItem(Module), TypedefItem(Typedef, bool /* is associated type */), OpaqueTyItem(OpaqueTy, bool /* is associated type */), StaticItem(Static), ConstantItem(Constant), TraitItem(Trait), TraitAliasItem(TraitAlias), ImplItem(Impl), /// A method signature only. Used for required methods in traits (ie, /// non-default-methods). TyMethodItem(TyMethod), /// A method with a body. MethodItem(Method), StructFieldItem(Type), VariantItem(Variant), /// `fn`s from an extern block ForeignFunctionItem(Function), /// `static`s from an extern block ForeignStaticItem(Static), /// `type`s from an extern block ForeignTypeItem, MacroItem(Macro), ProcMacroItem(ProcMacro), PrimitiveItem(PrimitiveType), AssocConstItem(Type, Option<String>), AssocTypeItem(Vec<GenericBound>, Option<Type>), /// An item that has been stripped by a rustdoc pass StrippedItem(Box<ItemEnum>), KeywordItem(String), } impl ItemEnum { pub fn is_associated(&self) -> bool { match *self { ItemEnum::TypedefItem(_, _) | ItemEnum::AssocTypeItem(_, _) => true, _ => false, } } } #[derive(Clone, Debug)] pub struct Module { pub items: Vec<Item>, pub is_crate: bool, } pub struct ListAttributesIter<'a> { attrs: slice::Iter<'a, ast::Attribute>, current_list: vec::IntoIter<ast::NestedMetaItem>, name: Symbol, } impl<'a> Iterator for ListAttributesIter<'a> { type Item = ast::NestedMetaItem; fn next(&mut self) -> Option<Self::Item> { if let Some(nested) = self.current_list.next() { return Some(nested); } for attr in &mut self.attrs { if let Some(list) = attr.meta_item_list() { if attr.check_name(self.name) { self.current_list = list.into_iter(); if let Some(nested) = self.current_list.next() { return Some(nested); } } } } None } fn size_hint(&self) -> (usize, Option<usize>) { let lower = self.current_list.len(); (lower, None) } } pub trait AttributesExt { /// Finds an attribute as List and returns the list of attributes nested inside. fn lists(&self, name: Symbol) -> ListAttributesIter<'_>; } impl AttributesExt for [ast::Attribute] { fn lists(&self, name: Symbol) -> ListAttributesIter<'_> { ListAttributesIter { attrs: self.iter(), current_list: Vec::new().into_iter(), name, } } } pub trait NestedAttributesExt { /// Returns `true` if the attribute list contains a specific `Word` fn has_word(self, word: Symbol) -> bool; } impl<I: IntoIterator<Item=ast::NestedMetaItem>> NestedAttributesExt for I { fn has_word(self, word: Symbol) -> bool { self.into_iter().any(|attr| attr.is_word() && attr.check_name(word)) } } /// A portion of documentation, extracted from a `#[doc]` attribute. /// /// Each variant contains the line number within the complete doc-comment where the fragment /// starts, as well as the Span where the corresponding doc comment or attribute is located. /// /// Included files are kept separate from inline doc comments so that proper line-number /// information can be given when a doctest fails. Sugared doc comments and "raw" doc comments are /// kept separate because of issue #42760. #[derive(Clone, PartialEq, Eq, Debug, Hash)] pub enum DocFragment { /// A doc fragment created from a `///` or `//!` doc comment. SugaredDoc(usize, syntax_pos::Span, String), /// A doc fragment created from a "raw" `#[doc=""]` attribute. RawDoc(usize, syntax_pos::Span, String), /// A doc fragment created from a `#[doc(include="filename")]` attribute. Contains both the /// given filename and the file contents. Include(usize, syntax_pos::Span, String, String), } impl DocFragment { pub fn as_str(&self) -> &str { match *self { DocFragment::SugaredDoc(_, _, ref s) => &s[..], DocFragment::RawDoc(_, _, ref s) => &s[..], DocFragment::Include(_, _, _, ref s) => &s[..], } } pub fn span(&self) -> syntax_pos::Span { match *self { DocFragment::SugaredDoc(_, span, _) | DocFragment::RawDoc(_, span, _) | DocFragment::Include(_, span, _, _) => span, } } } impl<'a> FromIterator<&'a DocFragment> for String { fn from_iter<T>(iter: T) -> Self where T: IntoIterator<Item = &'a DocFragment> { iter.into_iter().fold(String::new(), |mut acc, frag| { if !acc.is_empty() { acc.push('\n'); } match *frag { DocFragment::SugaredDoc(_, _, ref docs) | DocFragment::RawDoc(_, _, ref docs) | DocFragment::Include(_, _, _, ref docs) => acc.push_str(docs), } acc }) } } #[derive(Clone, Debug, Default)] pub struct Attributes { pub doc_strings: Vec<DocFragment>, pub other_attrs: Vec<ast::Attribute>, pub cfg: Option<Arc<Cfg>>, pub span: Option<syntax_pos::Span>, /// map from Rust paths to resolved defs and potential URL fragments pub links: Vec<(String, Option<DefId>, Option<String>)>, pub inner_docs: bool, } impl Attributes { /// Extracts the content from an attribute `#[doc(cfg(content))]`. fn extract_cfg(mi: &ast::MetaItem) -> Option<&ast::MetaItem> { use syntax::ast::NestedMetaItem::MetaItem; if let ast::MetaItemKind::List(ref nmis) = mi.kind { if nmis.len() == 1 { if let MetaItem(ref cfg_mi) = nmis[0] { if cfg_mi.check_name(sym::cfg) { if let ast::MetaItemKind::List(ref cfg_nmis) = cfg_mi.kind { if cfg_nmis.len() == 1 { if let MetaItem(ref content_mi) = cfg_nmis[0] { return Some(content_mi); } } } } } } } None } /// Reads a `MetaItem` from within an attribute, looks for whether it is a /// `#[doc(include="file")]`, and returns the filename and contents of the file as loaded from /// its expansion. fn extract_include(mi: &ast::MetaItem) -> Option<(String, String)> { mi.meta_item_list().and_then(|list| { for meta in list { if meta.check_name(sym::include) { // the actual compiled `#[doc(include="filename")]` gets expanded to // `#[doc(include(file="filename", contents="file contents")]` so we need to // look for that instead return meta.meta_item_list().and_then(|list| { let mut filename: Option<String> = None; let mut contents: Option<String> = None; for it in list { if it.check_name(sym::file) { if let Some(name) = it.value_str() { filename = Some(name.to_string()); } } else if it.check_name(sym::contents) { if let Some(docs) = it.value_str() { contents = Some(docs.to_string()); } } } if let (Some(filename), Some(contents)) = (filename, contents) { Some((filename, contents)) } else { None } }); } } None }) } pub fn has_doc_flag(&self, flag: Symbol) -> bool { for attr in &self.other_attrs { if !attr.check_name(sym::doc) { continue; } if let Some(items) = attr.meta_item_list() { if items.iter().filter_map(|i| i.meta_item()).any(|it| it.check_name(flag)) { return true; } } } false } pub fn from_ast(diagnostic: &::errors::Handler, attrs: &[ast::Attribute]) -> Attributes { let mut doc_strings = vec![]; let mut sp = None; let mut cfg = Cfg::True; let mut doc_line = 0; /// If `attr` is a doc comment, strips the leading and (if present) /// trailing comments symbols, e.g. `///`, `/**`, and `*/`. Otherwise, /// returns `attr` unchanged. pub fn with_doc_comment_markers_stripped<T>( attr: &Attribute, f: impl FnOnce(&Attribute) -> T ) -> T { match attr.kind { AttrKind::Normal(_) => { f(attr) } AttrKind::DocComment(comment) => { let comment = Symbol::intern(&comments::strip_doc_comment_decoration(&comment.as_str())); f(&Attribute { kind: AttrKind::DocComment(comment), id: attr.id, style: attr.style, span: attr.span, }) } } } let other_attrs = attrs.iter().filter_map(|attr| { with_doc_comment_markers_stripped(attr, |attr| { if attr.check_name(sym::doc) { if let Some(mi) = attr.meta() { if let Some(value) = mi.value_str() { // Extracted #[doc = "..."] let value = value.to_string(); let line = doc_line; doc_line += value.lines().count(); if attr.is_doc_comment() { doc_strings.push(DocFragment::SugaredDoc(line, attr.span, value)); } else { doc_strings.push(DocFragment::RawDoc(line, attr.span, value)); } if sp.is_none() { sp = Some(attr.span); } return None; } else if let Some(cfg_mi) = Attributes::extract_cfg(&mi) { // Extracted #[doc(cfg(...))] match Cfg::parse(cfg_mi) { Ok(new_cfg) => cfg &= new_cfg, Err(e) => diagnostic.span_err(e.span, e.msg), } return None; } else if let Some((filename, contents)) = Attributes::extract_include(&mi) { let line = doc_line; doc_line += contents.lines().count(); doc_strings.push(DocFragment::Include(line, attr.span, filename, contents)); } } } Some(attr.clone()) }) }).collect(); // treat #[target_feature(enable = "feat")] attributes as if they were // #[doc(cfg(target_feature = "feat"))] attributes as well for attr in attrs.lists(sym::target_feature) { if attr.check_name(sym::enable) { if let Some(feat) = attr.value_str() { let meta = attr::mk_name_value_item_str( Ident::with_dummy_span(sym::target_feature), feat, DUMMY_SP ); if let Ok(feat_cfg) = Cfg::parse(&meta) { cfg &= feat_cfg; } } } } let inner_docs = attrs.iter() .filter(|a| a.check_name(sym::doc)) .next() .map_or(true, |a| a.style == AttrStyle::Inner); Attributes { doc_strings, other_attrs, cfg: if cfg == Cfg::True { None } else { Some(Arc::new(cfg)) }, span: sp, links: vec![], inner_docs, } } /// Finds the `doc` attribute as a NameValue and returns the corresponding /// value found. pub fn doc_value(&self) -> Option<&str> { self.doc_strings.first().map(|s| s.as_str()) } /// Finds all `doc` attributes as NameValues and returns their corresponding values, joined /// with newlines. pub fn collapsed_doc_value(&self) -> Option<String> { if !self.doc_strings.is_empty() { Some(self.doc_strings.iter().collect()) } else { None } } /// Gets links as a vector /// /// Cache must be populated before call pub fn links(&self, krate: &CrateNum) -> Vec<(String, String)> { use crate::html::format::href; self.links.iter().filter_map(|&(ref s, did, ref fragment)| { match did { Some(did) => { if let Some((mut href, ..)) = href(did) { if let Some(ref fragment) = *fragment { href.push_str("#"); href.push_str(fragment); } Some((s.clone(), href)) } else { None } } None => { if let Some(ref fragment) = *fragment { let cache = cache(); let url = match cache.extern_locations.get(krate) { Some(&(_, ref src, ExternalLocation::Local)) => src.to_str().expect("invalid file path"), Some(&(_, _, ExternalLocation::Remote(ref s))) => s, Some(&(_, _, ExternalLocation::Unknown)) | None => "https://doc.rust-lang.org/nightly", }; // This is a primitive so the url is done "by hand". let tail = fragment.find('#').unwrap_or_else(|| fragment.len()); Some((s.clone(), format!("{}{}std/primitive.{}.html{}", url, if !url.ends_with('/') { "/" } else { "" }, &fragment[..tail], &fragment[tail..]))) } else { panic!("This isn't a primitive?!"); } } } }).collect() } } impl PartialEq for Attributes { fn eq(&self, rhs: &Self) -> bool { self.doc_strings == rhs.doc_strings && self.cfg == rhs.cfg && self.span == rhs.span && self.links == rhs.links && self.other_attrs.iter().map(|attr| attr.id).eq(rhs.other_attrs.iter().map(|attr| attr.id)) } } impl Eq for Attributes {} impl Hash for Attributes { fn hash<H: Hasher>(&self, hasher: &mut H) { self.doc_strings.hash(hasher); self.cfg.hash(hasher); self.span.hash(hasher); self.links.hash(hasher); for attr in &self.other_attrs { attr.id.hash(hasher); } } } impl AttributesExt for Attributes { fn lists(&self, name: Symbol) -> ListAttributesIter<'_> { self.other_attrs.lists(name) } } #[derive(Clone, PartialEq, Eq, Debug, Hash)] pub enum GenericBound { TraitBound(PolyTrait, hir::TraitBoundModifier), Outlives(Lifetime), } impl GenericBound { fn maybe_sized(cx: &DocContext<'_>) -> GenericBound { let did = cx.tcx.require_lang_item(lang_items::SizedTraitLangItem, None); let empty = cx.tcx.intern_substs(&[]); let path = external_path(cx, cx.tcx.item_name(did), Some(did), false, vec![], empty); inline::record_extern_fqn(cx, did, TypeKind::Trait); GenericBound::TraitBound(PolyTrait { trait_: ResolvedPath { path, param_names: None, did, is_generic: false, }, generic_params: Vec::new(), }, hir::TraitBoundModifier::Maybe) } fn is_sized_bound(&self, cx: &DocContext<'_>) -> bool { use rustc::hir::TraitBoundModifier as TBM; if let GenericBound::TraitBound(PolyTrait { ref trait_, .. }, TBM::None) = *self { if trait_.def_id() == cx.tcx.lang_items().sized_trait() { return true; } } false } fn get_poly_trait(&self) -> Option<PolyTrait> { if let GenericBound::TraitBound(ref p, _) = *self { return Some(p.clone()) } None } fn get_trait_type(&self) -> Option<Type> { if let GenericBound::TraitBound(PolyTrait { ref trait_, .. }, _) = *self { Some(trait_.clone()) } else { None } } } #[derive(Clone, PartialEq, Eq, Debug, Hash)] pub struct Lifetime(String); impl Lifetime { pub fn get_ref<'a>(&'a self) -> &'a str { let Lifetime(ref s) = *self; let s: &'a str = s; s } pub fn statik() -> Lifetime { Lifetime("'static".to_string()) } } #[derive(Clone, Debug)] pub enum WherePredicate { BoundPredicate { ty: Type, bounds: Vec<GenericBound> }, RegionPredicate { lifetime: Lifetime, bounds: Vec<GenericBound> }, EqPredicate { lhs: Type, rhs: Type }, } impl WherePredicate { pub fn get_bounds(&self) -> Option<&[GenericBound]> { match *self { WherePredicate::BoundPredicate { ref bounds, .. } => Some(bounds), WherePredicate::RegionPredicate { ref bounds, .. } => Some(bounds), _ => None, } } } #[derive(Clone, PartialEq, Eq, Debug, Hash)] pub enum GenericParamDefKind { Lifetime, Type { did: DefId, bounds: Vec<GenericBound>, default: Option<Type>, synthetic: Option<hir::SyntheticTyParamKind>, }, Const { did: DefId, ty: Type, }, } impl GenericParamDefKind { pub fn is_type(&self) -> bool { match *self { GenericParamDefKind::Type { .. } => true, _ => false, } } // FIXME(eddyb) this either returns the default of a type parameter, or the // type of a `const` parameter. It seems that the intention is to *visit* // any embedded types, but `get_type` seems to be the wrong name for that. pub fn get_type(&self) -> Option<Type> { match self { GenericParamDefKind::Type { default, .. } => default.clone(), GenericParamDefKind::Const { ty, .. } => Some(ty.clone()), GenericParamDefKind::Lifetime => None, } } } #[derive(Clone, PartialEq, Eq, Debug, Hash)] pub struct GenericParamDef { pub name: String, pub kind: GenericParamDefKind, } impl GenericParamDef { pub fn is_synthetic_type_param(&self) -> bool { match self.kind { GenericParamDefKind::Lifetime | GenericParamDefKind::Const { .. } => false, GenericParamDefKind::Type { ref synthetic, .. } => synthetic.is_some(), } } pub fn is_type(&self) -> bool { self.kind.is_type() } pub fn get_type(&self) -> Option<Type> { self.kind.get_type() } pub fn get_bounds(&self) -> Option<&[GenericBound]> { match self.kind { GenericParamDefKind::Type { ref bounds, .. } => Some(bounds), _ => None, } } } // maybe use a Generic enum and use Vec<Generic>? #[derive(Clone, Debug, Default)] pub struct Generics { pub params: Vec<GenericParamDef>, pub where_predicates: Vec<WherePredicate>, } #[derive(Clone, Debug)] pub struct Method { pub generics: Generics, pub decl: FnDecl, pub header: hir::FnHeader, pub defaultness: Option<hir::Defaultness>, pub all_types: Vec<Type>, pub ret_types: Vec<Type>, } #[derive(Clone, Debug)] pub struct TyMethod { pub header: hir::FnHeader, pub decl: FnDecl, pub generics: Generics, pub all_types: Vec<Type>, pub ret_types: Vec<Type>, } #[derive(Clone, Debug)] pub struct Function { pub decl: FnDecl, pub generics: Generics, pub header: hir::FnHeader, pub all_types: Vec<Type>, pub ret_types: Vec<Type>, } #[derive(Clone, PartialEq, Eq, Debug, Hash)] pub struct FnDecl { pub inputs: Arguments, pub output: FunctionRetTy, pub c_variadic: bool, pub attrs: Attributes, } impl FnDecl { pub fn self_type(&self) -> Option<SelfTy> { self.inputs.values.get(0).and_then(|v| v.to_self()) } /// Returns the sugared return type for an async function. /// /// For example, if the return type is `impl std::future::Future<Output = i32>`, this function /// will return `i32`. /// /// # Panics /// /// This function will panic if the return type does not match the expected sugaring for async /// functions. pub fn sugared_async_return_type(&self) -> FunctionRetTy { match &self.output { FunctionRetTy::Return(Type::ImplTrait(bounds)) => { match &bounds[0] { GenericBound::TraitBound(PolyTrait { trait_, .. }, ..) => { let bindings = trait_.bindings().unwrap(); FunctionRetTy::Return(bindings[0].ty().clone()) } _ => panic!("unexpected desugaring of async function"), } } _ => panic!("unexpected desugaring of async function"), } } } #[derive(Clone, PartialEq, Eq, Debug, Hash)] pub struct Arguments { pub values: Vec<Argument>, } #[derive(Clone, PartialEq, Eq, Debug, Hash)] pub struct Argument { pub type_: Type, pub name: String, } #[derive(Clone, PartialEq, Debug)] pub enum SelfTy { SelfValue, SelfBorrowed(Option<Lifetime>, Mutability), SelfExplicit(Type), } impl Argument { pub fn to_self(&self) -> Option<SelfTy> { if self.name != "self" { return None; } if self.type_.is_self_type() { return Some(SelfValue); } match self.type_ { BorrowedRef{ref lifetime, mutability, ref type_} if type_.is_self_type() => { Some(SelfBorrowed(lifetime.clone(), mutability)) } _ => Some(SelfExplicit(self.type_.clone())) } } } #[derive(Clone, PartialEq, Eq, Debug, Hash)] pub enum FunctionRetTy { Return(Type), DefaultReturn, } impl GetDefId for FunctionRetTy { fn def_id(&self) -> Option<DefId> { match *self { Return(ref ty) => ty.def_id(), DefaultReturn => None, } } } #[derive(Clone, Debug)] pub struct Trait { pub auto: bool, pub unsafety: hir::Unsafety, pub items: Vec<Item>, pub generics: Generics, pub bounds: Vec<GenericBound>, pub is_spotlight: bool, pub is_auto: bool, } #[derive(Clone, Debug)] pub struct TraitAlias { pub generics: Generics, pub bounds: Vec<GenericBound>, } /// A trait reference, which may have higher ranked lifetimes. #[derive(Clone, PartialEq, Eq, Debug, Hash)] pub struct PolyTrait { pub trait_: Type, pub generic_params: Vec<GenericParamDef>, } /// A representation of a type suitable for hyperlinking purposes. Ideally, one can get the original /// type out of the AST/`TyCtxt` given one of these, if more information is needed. Most /// importantly, it does not preserve mutability or boxes. #[derive(Clone, PartialEq, Eq, Debug, Hash)] pub enum Type { /// Structs/enums/traits (most that would be an `hir::TyKind::Path`). ResolvedPath { path: Path, param_names: Option<Vec<GenericBound>>, did: DefId, /// `true` if is a `T::Name` path for associated types. is_generic: bool, }, /// For parameterized types, so the consumer of the JSON don't go /// looking for types which don't exist anywhere. Generic(String), /// Primitives are the fixed-size numeric types (plus int/usize/float), char, /// arrays, slices, and tuples. Primitive(PrimitiveType), /// `extern "ABI" fn` BareFunction(Box<BareFunctionDecl>), Tuple(Vec<Type>), Slice(Box<Type>), Array(Box<Type>, String), Never, RawPointer(Mutability, Box<Type>), BorrowedRef { lifetime: Option<Lifetime>, mutability: Mutability, type_: Box<Type>, }, // `<Type as Trait>::Name` QPath { name: String, self_type: Box<Type>, trait_: Box<Type> }, // `_` Infer, // `impl TraitA + TraitB + ...` ImplTrait(Vec<GenericBound>), } #[derive(Clone, PartialEq, Eq, Hash, Copy, Debug)] pub enum PrimitiveType { Isize, I8, I16, I32, I64, I128, Usize, U8, U16, U32, U64, U128, F32, F64, Char, Bool, Str, Slice, Array, Tuple, Unit, RawPointer, Reference, Fn, Never, } #[derive(Clone, Copy, Debug)] pub enum TypeKind { Enum, Function, Module, Const, Static, Struct, Union, Trait, Typedef, Foreign, Macro, Attr, Derive, TraitAlias, } pub trait GetDefId { fn def_id(&self) -> Option<DefId>; } impl<T: GetDefId> GetDefId for Option<T> { fn def_id(&self) -> Option<DefId> { self.as_ref().and_then(|d| d.def_id()) } } impl Type { pub fn primitive_type(&self) -> Option<PrimitiveType> { match *self { Primitive(p) | BorrowedRef { type_: box Primitive(p), ..} => Some(p), Slice(..) | BorrowedRef { type_: box Slice(..), .. } => Some(PrimitiveType::Slice), Array(..) | BorrowedRef { type_: box Array(..), .. } => Some(PrimitiveType::Array), Tuple(ref tys) => if tys.is_empty() { Some(PrimitiveType::Unit) } else { Some(PrimitiveType::Tuple) }, RawPointer(..) => Some(PrimitiveType::RawPointer), BorrowedRef { type_: box Generic(..), .. } => Some(PrimitiveType::Reference), BareFunction(..) => Some(PrimitiveType::Fn), Never => Some(PrimitiveType::Never), _ => None, } } pub fn is_generic(&self) -> bool { match *self { ResolvedPath { is_generic, .. } => is_generic, _ => false, } } pub fn is_self_type(&self) -> bool { match *self { Generic(ref name) => name == "Self", _ => false } } pub fn generics(&self) -> Option<Vec<Type>> { match *self { ResolvedPath { ref path, .. } => { path.segments.last().and_then(|seg| { if let GenericArgs::AngleBracketed { ref args, .. } = seg.args { Some(args.iter().filter_map(|arg| match arg { GenericArg::Type(ty) => Some(ty.clone()), _ => None, }).collect()) } else { None } }) } _ => None, } } pub fn bindings(&self) -> Option<&[TypeBinding]> { match *self { ResolvedPath { ref path, .. } => { path.segments.last().and_then(|seg| { if let GenericArgs::AngleBracketed { ref bindings, .. } = seg.args { Some(&**bindings) } else { None } }) } _ => None } } pub fn is_full_generic(&self) -> bool { match *self { Type::Generic(_) => true, _ => false, } } pub fn projection(&self) -> Option<(&Type, DefId, &str)> { let (self_, trait_, name) = match self { QPath { ref self_type, ref trait_, ref name } => { (self_type, trait_, name) } _ => return None, }; let trait_did = match **trait_ { ResolvedPath { did, .. } => did, _ => return None, }; Some((&self_, trait_did, name)) } } impl GetDefId for Type { fn def_id(&self) -> Option<DefId> { match *self { ResolvedPath { did, .. } => Some(did), Primitive(p) => crate::html::render::cache().primitive_locations.get(&p).cloned(), BorrowedRef { type_: box Generic(..), .. } => Primitive(PrimitiveType::Reference).def_id(), BorrowedRef { ref type_, .. } => type_.def_id(), Tuple(ref tys) => if tys.is_empty() { Primitive(PrimitiveType::Unit).def_id() } else { Primitive(PrimitiveType::Tuple).def_id() }, BareFunction(..) => Primitive(PrimitiveType::Fn).def_id(), Never => Primitive(PrimitiveType::Never).def_id(), Slice(..) => Primitive(PrimitiveType::Slice).def_id(), Array(..) => Primitive(PrimitiveType::Array).def_id(), RawPointer(..) => Primitive(PrimitiveType::RawPointer).def_id(), QPath { ref self_type, .. } => self_type.def_id(), _ => None, } } } impl PrimitiveType { fn from_str(s: &str) -> Option<PrimitiveType> { match s { "isize" => Some(PrimitiveType::Isize), "i8" => Some(PrimitiveType::I8), "i16" => Some(PrimitiveType::I16), "i32" => Some(PrimitiveType::I32), "i64" => Some(PrimitiveType::I64), "i128" => Some(PrimitiveType::I128), "usize" => Some(PrimitiveType::Usize), "u8" => Some(PrimitiveType::U8), "u16" => Some(PrimitiveType::U16), "u32" => Some(PrimitiveType::U32), "u64" => Some(PrimitiveType::U64), "u128" => Some(PrimitiveType::U128), "bool" => Some(PrimitiveType::Bool), "char" => Some(PrimitiveType::Char), "str" => Some(PrimitiveType::Str), "f32" => Some(PrimitiveType::F32), "f64" => Some(PrimitiveType::F64), "array" => Some(PrimitiveType::Array), "slice" => Some(PrimitiveType::Slice), "tuple" => Some(PrimitiveType::Tuple), "unit" => Some(PrimitiveType::Unit), "pointer" => Some(PrimitiveType::RawPointer), "reference" => Some(PrimitiveType::Reference), "fn" => Some(PrimitiveType::Fn), "never" => Some(PrimitiveType::Never), _ => None, } } pub fn as_str(&self) -> &'static str { use self::PrimitiveType::*; match *self { Isize => "isize", I8 => "i8", I16 => "i16", I32 => "i32", I64 => "i64", I128 => "i128", Usize => "usize", U8 => "u8", U16 => "u16", U32 => "u32", U64 => "u64", U128 => "u128", F32 => "f32", F64 => "f64", Str => "str", Bool => "bool", Char => "char", Array => "array", Slice => "slice", Tuple => "tuple", Unit => "unit", RawPointer => "pointer", Reference => "reference", Fn => "fn", Never => "never", } } pub fn to_url_str(&self) -> &'static str { self.as_str() } } impl From<ast::IntTy> for PrimitiveType { fn from(int_ty: ast::IntTy) -> PrimitiveType { match int_ty { ast::IntTy::Isize => PrimitiveType::Isize, ast::IntTy::I8 => PrimitiveType::I8, ast::IntTy::I16 => PrimitiveType::I16, ast::IntTy::I32 => PrimitiveType::I32, ast::IntTy::I64 => PrimitiveType::I64, ast::IntTy::I128 => PrimitiveType::I128, } } } impl From<ast::UintTy> for PrimitiveType { fn from(uint_ty: ast::UintTy) -> PrimitiveType { match uint_ty { ast::UintTy::Usize => PrimitiveType::Usize, ast::UintTy::U8 => PrimitiveType::U8, ast::UintTy::U16 => PrimitiveType::U16, ast::UintTy::U32 => PrimitiveType::U32, ast::UintTy::U64 => PrimitiveType::U64, ast::UintTy::U128 => PrimitiveType::U128, } } } impl From<ast::FloatTy> for PrimitiveType { fn from(float_ty: ast::FloatTy) -> PrimitiveType { match float_ty { ast::FloatTy::F32 => PrimitiveType::F32, ast::FloatTy::F64 => PrimitiveType::F64, } } } #[derive(Clone, PartialEq, Eq, Debug)] pub enum Visibility { Public, Inherited, Crate, Restricted(DefId, Path), } #[derive(Clone, Debug)] pub struct Struct { pub struct_type: doctree::StructType, pub generics: Generics, pub fields: Vec<Item>, pub fields_stripped: bool, } #[derive(Clone, Debug)] pub struct Union { pub struct_type: doctree::StructType, pub generics: Generics, pub fields: Vec<Item>, pub fields_stripped: bool, } /// This is a more limited form of the standard Struct, different in that /// it lacks the things most items have (name, id, parameterization). Found /// only as a variant in an enum. #[derive(Clone, Debug)] pub struct VariantStruct { pub struct_type: doctree::StructType, pub fields: Vec<Item>, pub fields_stripped: bool, } #[derive(Clone, Debug)] pub struct Enum { pub variants: IndexVec<VariantIdx, Item>, pub generics: Generics, pub variants_stripped: bool, } #[derive(Clone, Debug)] pub struct Variant { pub kind: VariantKind, } #[derive(Clone, Debug)] pub enum VariantKind { CLike, Tuple(Vec<Type>), Struct(VariantStruct), } #[derive(Clone, Debug)] pub struct Span { pub filename: FileName, pub loline: usize, pub locol: usize, pub hiline: usize, pub hicol: usize, pub original: syntax_pos::Span, } impl Span { pub fn empty() -> Span { Span { filename: FileName::Anon(0), loline: 0, locol: 0, hiline: 0, hicol: 0, original: syntax_pos::DUMMY_SP, } } pub fn span(&self) -> syntax_pos::Span { self.original } } #[derive(Clone, PartialEq, Eq, Debug, Hash)] pub struct Path { pub global: bool, pub res: Res, pub segments: Vec<PathSegment>, } impl Path { pub fn last_name(&self) -> &str { self.segments.last().expect("segments were empty").name.as_str() } } #[derive(Clone, PartialEq, Eq, Debug, Hash)] pub enum GenericArg { Lifetime(Lifetime), Type(Type), Const(Constant), } #[derive(Clone, PartialEq, Eq, Debug, Hash)] pub enum GenericArgs { AngleBracketed { args: Vec<GenericArg>, bindings: Vec<TypeBinding>, }, Parenthesized { inputs: Vec<Type>, output: Option<Type>, } } #[derive(Clone, PartialEq, Eq, Debug, Hash)] pub struct PathSegment { pub name: String, pub args: GenericArgs, } #[derive(Clone, Debug)] pub struct Typedef { pub type_: Type, pub generics: Generics, } #[derive(Clone, Debug)] pub struct OpaqueTy { pub bounds: Vec<GenericBound>, pub generics: Generics, } #[derive(Clone, PartialEq, Eq, Debug, Hash)] pub struct BareFunctionDecl { pub unsafety: hir::Unsafety, pub generic_params: Vec<GenericParamDef>, pub decl: FnDecl, pub abi: Abi, } #[derive(Clone, Debug)] pub struct Static { pub type_: Type, pub mutability: Mutability, /// It's useful to have the value of a static documented, but I have no /// desire to represent expressions (that'd basically be all of the AST, /// which is huge!). So, have a string. pub expr: String, } #[derive(Clone, PartialEq, Eq, Hash, Debug)] pub struct Constant { pub type_: Type, pub expr: String, } #[derive(Debug, Clone, PartialEq, Eq, Copy, Hash)] pub enum Mutability { Mutable, Immutable, } #[derive(Clone, PartialEq, Debug)] pub enum ImplPolarity { Positive, Negative, } #[derive(Clone, Debug)] pub struct Impl { pub unsafety: hir::Unsafety, pub generics: Generics, pub provided_trait_methods: FxHashSet<String>, pub trait_: Option<Type>, pub for_: Type, pub items: Vec<Item>, pub polarity: Option<ImplPolarity>, pub synthetic: bool, pub blanket_impl: Option<Type>, } #[derive(Clone, Debug)] pub enum Import { // use source as str; Simple(String, ImportSource), // use source::*; Glob(ImportSource) } #[derive(Clone, Debug)] pub struct ImportSource { pub path: Path, pub did: Option<DefId>, } #[derive(Clone, Debug)] pub struct Macro { pub source: String, pub imported_from: Option<String>, } #[derive(Clone, Debug)] pub struct ProcMacro { pub kind: MacroKind, pub helpers: Vec<String>, } #[derive(Clone, Debug)] pub struct Stability { pub level: stability::StabilityLevel, pub feature: Option<String>, pub since: String, pub deprecation: Option<Deprecation>, pub unstable_reason: Option<String>, pub issue: Option<NonZeroU32>, } #[derive(Clone, Debug)] pub struct Deprecation { pub since: Option<String>, pub note: Option<String>, } /// An type binding on an associated type (e.g., `A = Bar` in `Foo<A = Bar>` or /// `A: Send + Sync` in `Foo<A: Send + Sync>`). #[derive(Clone, PartialEq, Eq, Debug, Hash)] pub struct TypeBinding { pub name: String, pub kind: TypeBindingKind, } #[derive(Clone, PartialEq, Eq, Debug, Hash)] pub enum TypeBindingKind { Equality { ty: Type, }, Constraint { bounds: Vec<GenericBound>, }, } impl TypeBinding { pub fn ty(&self) -> &Type { match self.kind { TypeBindingKind::Equality { ref ty } => ty, _ => panic!("expected equality type binding for parenthesized generic args"), } } }
extern crate libc; extern crate "termbox-sys" as termbox; pub use self::running::running; pub use self::style::{Style, RB_BOLD, RB_UNDERLINE, RB_REVERSE, RB_NORMAL}; use std::error::Error; use std::fmt; use std::kinds::marker; use std::time::duration::Duration; use std::num::FromPrimitive; use termbox::RawEvent; use libc::{c_int, c_uint}; #[derive(Copy)] pub enum Event { KeyEvent(u8, u16, u32), ResizeEvent(i32, i32), NoEvent } #[derive(Copy, PartialEq)] #[repr(C,u16)] pub enum Color { Default = 0x00, Black = 0x01, Red = 0x02, Green = 0x03, Yellow = 0x04, Blue = 0x05, Magenta = 0x06, Cyan = 0x07, White = 0x08, } mod style { bitflags! { #[repr(C)] flags Style: u16 { const TB_NORMAL_COLOR = 0x000F, const RB_BOLD = 0x0100, const RB_UNDERLINE = 0x0200, const RB_REVERSE = 0x0400, const RB_NORMAL = 0x0000, const TB_ATTRIB = RB_BOLD.bits | RB_UNDERLINE.bits | RB_REVERSE.bits, } } impl Style { pub fn from_color(color: super::Color) -> Style { Style { bits: color as u16 & TB_NORMAL_COLOR.bits } } } } const NIL_RAW_EVENT: RawEvent = RawEvent { etype: 0, emod: 0, key: 0, ch: 0, w: 0, h: 0 }; // FIXME: Rust doesn't support this enum representation. // #[derive(Copy,FromPrimitive,Show)] // #[repr(C,int)] // pub enum EventErrorKind { // Error = -1, // } // pub type EventError = Option<EventErrorKind>; #[allow(non_snake_case)] pub mod EventErrorKind { #[derive(Copy,Show)] pub struct Error; } pub type EventError = Option<EventErrorKind::Error>; pub type EventResult<T> = Result<T, EventError>; impl Error for EventError { fn description(&self) -> &str { match *self { // TODO: Check errno here Some(EventErrorKind::Error) => "Unknown error.", None => "Unexpected return code." } } } fn unpack_event(ev_type: c_int, ev: &RawEvent) -> EventResult<Event> { match ev_type { 0 => Ok(Event::NoEvent), 1 => Ok(Event::KeyEvent(ev.emod, ev.key, ev.ch)), 2 => Ok(Event::ResizeEvent(ev.w, ev.h)), // FIXME: Rust doesn't support this error representation // res => FromPrimitive::from_int(res as int), -1 => Err(Some(EventErrorKind::Error)), _ => Err(None) } } #[derive(Copy,FromPrimitive,Show)] #[repr(C,int)] pub enum InitErrorKind { UnsupportedTerminal = -1, FailedToOpenTty = -2, PipeTrapError = -3, } pub enum InitError { Opt(InitOption, Option<Box<Error>>), AlreadyOpen, TermBox(Option<InitErrorKind>), } impl fmt::Show for InitError { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { write!(fmt, "{}", self.description()) } } impl Error for InitError { fn description(&self) -> &str { match *self { InitError::Opt(InitOption::BufferStderr, _) => "Could not redirect stderr.", InitError::AlreadyOpen => "RustBox is already open.", InitError::TermBox(e) => e.map_or("Unexpected TermBox return code.", |e| match e { InitErrorKind::UnsupportedTerminal => "Unsupported terminal.", InitErrorKind::FailedToOpenTty => "Failed to open TTY.", InitErrorKind::PipeTrapError => "Pipe trap error.", }), } } fn cause(&self) -> Option<&Error> { match *self { InitError::Opt(_, Some(ref e)) => Some(&**e), _ => None } } } mod running { use std::sync::atomic::{self, AtomicBool}; // The state of the RustBox is protected by the lock. Yay, global state! static RUSTBOX_RUNNING: AtomicBool = atomic::ATOMIC_BOOL_INIT; /// true iff RustBox is currently running. Beware of races here--don't rely on this for anything /// critical unless you happen to know that RustBox cannot change state when it is called (a good /// usecase would be checking to see if it's worth risking double printing backtraces to avoid /// having them swallowed up by RustBox). pub fn running() -> bool { RUSTBOX_RUNNING.load(atomic::Ordering::SeqCst) } // Internal RAII guard used to ensure we release the running lock whenever we acquire it. #[allow(missing_copy_implementations)] pub struct RunningGuard(()); pub fn run() -> Option<RunningGuard> { // Ensure that we are not already running and simultaneously set RUSTBOX_RUNNING using an // atomic swap. This ensures that contending threads don't trample each other. if RUSTBOX_RUNNING.swap(true, atomic::Ordering::SeqCst) { // The Rustbox was already running. None } else { // The RustBox was not already running, and now we have the lock. Some(RunningGuard(())) } } impl Drop for RunningGuard { fn drop(&mut self) { // Indicate that we're free now. We could probably get away with lower atomicity here, // but there's no reason to take that chance. RUSTBOX_RUNNING.store(false, atomic::Ordering::SeqCst); } } } // RAII guard for input redirection #[cfg(unix)] mod redirect { use std::error::Error; use libc; use std::io::{util, IoError, PipeStream}; use std::io::pipe::PipePair; use std::os::unix::AsRawFd; use super::{InitError, InitOption}; use super::running::RunningGuard; pub struct Redirect { pair: PipePair, fd: PipeStream, } impl Drop for Redirect { fn drop(&mut self) { // We make sure that we never actually create the Redirect without also taking a // RunningGuard. This means that we know that this will always be dropped immediately // before the RunningGuard is destroyed, and *after* a RustBox containing one is // destroyed. // // We rely on destructor order here: destructors are always executed top-down, so as // long as this is included above the RunningGuard in the RustBox struct, we can be // confident that it is destroyed while we're still holding onto the lock. unsafe { let old_fd = self.pair.writer.as_raw_fd(); let new_fd = self.fd.as_raw_fd(); // Reopen new_fd as writer. // (Note that if we fail here, we can't really do anything about it, so just ignore any // errors). if libc::dup2(old_fd, new_fd) != new_fd { return } } // Copy from reader to writer. drop(util::copy(&mut self.pair.reader, &mut self.pair.writer)); } } // The reason we take the RunningGuard is to make sure we don't try to redirect before the // TermBox is set up. Otherwise it is possible to race with other threads trying to set up the // RustBox. fn redirect(new: PipeStream, _: &RunningGuard) -> Result<Redirect, Option<Box<Error>>> { // Create a pipe pair. let mut pair = try!(PipeStream::pair().map_err( |e| Some(box e as Box<Error>))); unsafe { let new_fd = new.as_raw_fd(); // Copy new_fd to dup_fd. let dup_fd = match libc::dup(new_fd) { -1 => return Err(Some(box IoError::last_error() as Box<Error>)), fd => try!(PipeStream::open(fd).map_err( |e| Some(box e as Box<Error>))), }; // Make the writer nonblocking. This means that even if the stderr pipe fills up, // exceptions from stack traces will not block the program. Unfortunately, if this // does happen stderr outputwill be lost until RustBox exits. let old_fd = pair.writer.as_raw_fd(); let res = libc::fcntl(old_fd, libc::F_SETFL, libc::O_NONBLOCK); if res != 0 { return Err(if res == -1 { Some(box IoError::last_error() as Box<Error>) } else { None }) // This should really never happen, but no reason to unwind here. } // Reopen new_fd as writer. let fd = libc::dup2(old_fd, new_fd); if fd == new_fd { // On success, the new file descriptor should be returned. Replace the old one // with dup_fd, since we no longer need an explicit reference to the writer. // Note that it is *possible* that some other thread tried to take over stderr // between when we did and now, causing a race here. RustBox won't do it, though. // And it's honestly not clear how to guarantee correct behavior there anyway, // since if the change had come a fraction of a second later we still probably // wouldn't want to overwite it. In general this is a good argument for why the // redirect behavior is optional. pair.writer = dup_fd; Ok(Redirect { pair: pair, fd: new, }) } else { Err(if fd == -1 { Some(box IoError::last_error() as Box<Error>) } else { None }) } } } pub fn redirect_stderr(stderr: &mut Option<Redirect>, rg: &RunningGuard) -> Result<(), InitError> { match *stderr { Some(_) => { // Can only redirect once. Err(InitError::Opt(InitOption::BufferStderr, None)) }, None => { *stderr = Some(try!(redirect( try!(PipeStream::open(libc::STDERR_FILENO) .map_err( |e| InitError::Opt(InitOption::BufferStderr, Some(box e as Box<Error>)))), rg) .map_err( |e| InitError::Opt(InitOption::BufferStderr, e)))); Ok(()) } } } } #[cfg(not(unix))] // Not sure how we'll do this on Windows, unimplemented for now. mod redirect { pub enum Redirect { } pub fn redirect_stderr(_: &mut Option<Redirect>, _: &super::RunningGuard) -> Result<(), super::InitError> { Err(super::InitError::Opt(super::InitOption::BufferStderr, None)) } } #[allow(missing_copy_implementations)] pub struct RustBox { // Termbox is not thread safe no_sync: marker::NoSync, // We only bother to redirect stderr for the moment, since it's used for panic! _stderr: Option<redirect::Redirect>, // RAII lock. // // Note that running *MUST* be the last field in the destructor, since destructors run in // top-down order. Otherwise it will not properly protect the above fields. _running: running::RunningGuard, } #[derive(Copy,Show)] pub enum InitOption { /// Use this option to automatically buffer stderr while RustBox is running. It will be /// written when RustBox exits. /// /// This option uses a nonblocking OS pipe to buffer stderr output. This means that if the /// pipe fills up, subsequent writes will fail until RustBox exits. If this is a concern for /// your program, don't use RustBox's default pipe-based redirection; instead, redirect stderr /// to a log file or another process that is capable of handling it better. BufferStderr, } impl RustBox { pub fn init(opts: &[Option<InitOption>]) -> Result<RustBox, InitError> { // Acquire RAII lock. This might seem like overkill, but it is easy to forget to release // it in the maze of error conditions below. let running = match running::run() { Some(r) => r, None => return Err(InitError::AlreadyOpen) }; // Time to check our options. let mut stderr = None; for opt in opts.iter().filter_map(|&opt| opt) { match opt { InitOption::BufferStderr => try!(redirect::redirect_stderr(&mut stderr, &running)), } } // Create the RustBox. Ok(unsafe { match termbox::tb_init() { 0 => RustBox { no_sync: marker::NoSync, _stderr: stderr, _running: running, }, res => { return Err(InitError::TermBox(FromPrimitive::from_int(res as int))) } } }) } pub fn width(&self) -> uint { unsafe { termbox::tb_width() as uint } } pub fn height(&self) -> uint { unsafe { termbox::tb_height() as uint } } pub fn clear(&self) { unsafe { termbox::tb_clear() } } pub fn present(&self) { unsafe { termbox::tb_present() } } pub fn set_cursor(&self, x: int, y: int) { unsafe { termbox::tb_set_cursor(x as c_int, y as c_int) } } // Unsafe because u8 is not guaranteed to be a UTF-8 character pub unsafe fn change_cell(&self, x: uint, y: uint, ch: u32, fg: u16, bg: u16) { termbox::tb_change_cell(x as c_uint, y as c_uint, ch, fg, bg) } pub fn print(&self, x: uint, y: uint, sty: Style, fg: Color, bg: Color, s: &str) { let fg = Style::from_color(fg) | (sty & style::TB_ATTRIB); let bg = Style::from_color(bg); for (i, ch) in s.chars().enumerate() { unsafe { self.change_cell(x+i, y, ch as u32, fg.bits(), bg.bits()); } } } pub fn print_char(&self, x: uint, y: uint, sty: Style, fg: Color, bg: Color, ch: char) { let fg = Style::from_color(fg) | (sty & style::TB_ATTRIB); let bg = Style::from_color(bg); unsafe { self.change_cell(x, y, ch as u32, fg.bits(), bg.bits()); } } pub fn poll_event(&self) -> EventResult<Event> { let ev = NIL_RAW_EVENT; let rc = unsafe { termbox::tb_poll_event(&ev as *const RawEvent) }; unpack_event(rc, &ev) } pub fn peek_event(&self, timeout: Duration) -> EventResult<Event> { let ev = NIL_RAW_EVENT; let rc = unsafe { termbox::tb_peek_event(&ev as *const RawEvent, timeout.num_milliseconds() as c_uint) }; unpack_event(rc, &ev) } } impl Drop for RustBox { fn drop(&mut self) { // Since only one instance of the RustBox is ever accessible, we should not // need to do this atomically. // Note: we should definitely have RUSTBOX_RUNNING = true here. unsafe { termbox::tb_shutdown(); } } } Updated to latest rust #![allow(unstable)] extern crate libc; extern crate "termbox-sys" as termbox; pub use self::running::running; pub use self::style::{Style, RB_BOLD, RB_UNDERLINE, RB_REVERSE, RB_NORMAL}; use std::error::Error; use std::fmt; use std::marker; use std::time::duration::Duration; use std::num::FromPrimitive; use termbox::RawEvent; use libc::{c_int, c_uint}; #[derive(Copy)] pub enum Event { KeyEvent(u8, u16, u32), ResizeEvent(i32, i32), NoEvent } #[derive(Copy, PartialEq)] #[repr(C,u16)] pub enum Color { Default = 0x00, Black = 0x01, Red = 0x02, Green = 0x03, Yellow = 0x04, Blue = 0x05, Magenta = 0x06, Cyan = 0x07, White = 0x08, } mod style { bitflags! { #[repr(C)] flags Style: u16 { const TB_NORMAL_COLOR = 0x000F, const RB_BOLD = 0x0100, const RB_UNDERLINE = 0x0200, const RB_REVERSE = 0x0400, const RB_NORMAL = 0x0000, const TB_ATTRIB = RB_BOLD.bits | RB_UNDERLINE.bits | RB_REVERSE.bits, } } impl Style { pub fn from_color(color: super::Color) -> Style { Style { bits: color as u16 & TB_NORMAL_COLOR.bits } } } } const NIL_RAW_EVENT: RawEvent = RawEvent { etype: 0, emod: 0, key: 0, ch: 0, w: 0, h: 0 }; // FIXME: Rust doesn't support this enum representation. // #[derive(Copy,FromPrimitive,Show)] // #[repr(C,int)] // pub enum EventErrorKind { // Error = -1, // } // pub type EventError = Option<EventErrorKind>; #[allow(non_snake_case)] pub mod EventErrorKind { #[derive(Copy,Show)] pub struct Error; } pub type EventError = Option<EventErrorKind::Error>; pub type EventResult<T> = Result<T, EventError>; impl Error for EventError { fn description(&self) -> &str { match *self { // TODO: Check errno here Some(EventErrorKind::Error) => "Unknown error.", None => "Unexpected return code." } } } fn unpack_event(ev_type: c_int, ev: &RawEvent) -> EventResult<Event> { match ev_type { 0 => Ok(Event::NoEvent), 1 => Ok(Event::KeyEvent(ev.emod, ev.key, ev.ch)), 2 => Ok(Event::ResizeEvent(ev.w, ev.h)), // FIXME: Rust doesn't support this error representation // res => FromPrimitive::from_int(res as isize), -1 => Err(Some(EventErrorKind::Error)), _ => Err(None) } } #[derive(Copy,FromPrimitive,Show)] #[repr(C,isize)] pub enum InitErrorKind { UnsupportedTerminal = -1, FailedToOpenTty = -2, PipeTrapError = -3, } pub enum InitError { Opt(InitOption, Option<Box<Error>>), AlreadyOpen, TermBox(Option<InitErrorKind>), } impl fmt::Show for InitError { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { write!(fmt, "{}", self.description()) } } impl Error for InitError { fn description(&self) -> &str { match *self { InitError::Opt(InitOption::BufferStderr, _) => "Could not redirect stderr.", InitError::AlreadyOpen => "RustBox is already open.", InitError::TermBox(e) => e.map_or("Unexpected TermBox return code.", |e| match e { InitErrorKind::UnsupportedTerminal => "Unsupported terminal.", InitErrorKind::FailedToOpenTty => "Failed to open TTY.", InitErrorKind::PipeTrapError => "Pipe trap error.", }), } } fn cause(&self) -> Option<&Error> { match *self { InitError::Opt(_, Some(ref e)) => Some(&**e), _ => None } } } mod running { use std::sync::atomic::{self, AtomicBool}; // The state of the RustBox is protected by the lock. Yay, global state! static RUSTBOX_RUNNING: AtomicBool = atomic::ATOMIC_BOOL_INIT; /// true iff RustBox is currently running. Beware of races here--don't rely on this for anything /// critical unless you happen to know that RustBox cannot change state when it is called (a good /// usecase would be checking to see if it's worth risking double printing backtraces to avoid /// having them swallowed up by RustBox). pub fn running() -> bool { RUSTBOX_RUNNING.load(atomic::Ordering::SeqCst) } // Internal RAII guard used to ensure we release the running lock whenever we acquire it. #[allow(missing_copy_implementations)] pub struct RunningGuard(()); pub fn run() -> Option<RunningGuard> { // Ensure that we are not already running and simultaneously set RUSTBOX_RUNNING using an // atomic swap. This ensures that contending threads don't trample each other. if RUSTBOX_RUNNING.swap(true, atomic::Ordering::SeqCst) { // The Rustbox was already running. None } else { // The RustBox was not already running, and now we have the lock. Some(RunningGuard(())) } } impl Drop for RunningGuard { fn drop(&mut self) { // Indicate that we're free now. We could probably get away with lower atomicity here, // but there's no reason to take that chance. RUSTBOX_RUNNING.store(false, atomic::Ordering::SeqCst); } } } // RAII guard for input redirection #[cfg(unix)] mod redirect { use std::error::Error; use libc; use std::io::{util, IoError, PipeStream}; use std::io::pipe::PipePair; use std::os::unix::AsRawFd; use super::{InitError, InitOption}; use super::running::RunningGuard; pub struct Redirect { pair: PipePair, fd: PipeStream, } impl Drop for Redirect { fn drop(&mut self) { // We make sure that we never actually create the Redirect without also taking a // RunningGuard. This means that we know that this will always be dropped immediately // before the RunningGuard is destroyed, and *after* a RustBox containing one is // destroyed. // // We rely on destructor order here: destructors are always executed top-down, so as // long as this is included above the RunningGuard in the RustBox struct, we can be // confident that it is destroyed while we're still holding onto the lock. unsafe { let old_fd = self.pair.writer.as_raw_fd(); let new_fd = self.fd.as_raw_fd(); // Reopen new_fd as writer. // (Note that if we fail here, we can't really do anything about it, so just ignore any // errors). if libc::dup2(old_fd, new_fd) != new_fd { return } } // Copy from reader to writer. drop(util::copy(&mut self.pair.reader, &mut self.pair.writer)); } } // The reason we take the RunningGuard is to make sure we don't try to redirect before the // TermBox is set up. Otherwise it is possible to race with other threads trying to set up the // RustBox. fn redirect(new: PipeStream, _: &RunningGuard) -> Result<Redirect, Option<Box<Error>>> { // Create a pipe pair. let mut pair = try!(PipeStream::pair().map_err( |e| Some(Box::new(e) as Box<Error>))); unsafe { let new_fd = new.as_raw_fd(); // Copy new_fd to dup_fd. let dup_fd = match libc::dup(new_fd) { -1 => return Err(Some(Box::new(IoError::last_error()) as Box<Error>)), fd => try!(PipeStream::open(fd).map_err( |e| Some(Box::new(e) as Box<Error>))), }; // Make the writer nonblocking. This means that even if the stderr pipe fills up, // exceptions from stack traces will not block the program. Unfortunately, if this // does happen stderr outputwill be lost until RustBox exits. let old_fd = pair.writer.as_raw_fd(); let res = libc::fcntl(old_fd, libc::F_SETFL, libc::O_NONBLOCK); if res != 0 { return Err(if res == -1 { Some(Box::new(IoError::last_error()) as Box<Error>) } else { None }) // This should really never happen, but no reason to unwind here. } // Reopen new_fd as writer. let fd = libc::dup2(old_fd, new_fd); if fd == new_fd { // On success, the new file descriptor should be returned. Replace the old one // with dup_fd, since we no longer need an explicit reference to the writer. // Note that it is *possible* that some other thread tried to take over stderr // between when we did and now, causing a race here. RustBox won't do it, though. // And it's honestly not clear how to guarantee correct behavior there anyway, // since if the change had come a fraction of a second later we still probably // wouldn't want to overwite it. In general this is a good argument for why the // redirect behavior is optional. pair.writer = dup_fd; Ok(Redirect { pair: pair, fd: new, }) } else { Err(if fd == -1 { Some(Box::new(IoError::last_error()) as Box<Error>) } else { None }) } } } pub fn redirect_stderr(stderr: &mut Option<Redirect>, rg: &RunningGuard) -> Result<(), InitError> { match *stderr { Some(_) => { // Can only redirect once. Err(InitError::Opt(InitOption::BufferStderr, None)) }, None => { *stderr = Some(try!(redirect( try!(PipeStream::open(libc::STDERR_FILENO) .map_err( |e| InitError::Opt(InitOption::BufferStderr, Some(Box::new(e) as Box<Error>)) )), rg) .map_err( |e| InitError::Opt(InitOption::BufferStderr, e)))); Ok(()) } } } } #[cfg(not(unix))] // Not sure how we'll do this on Windows, unimplemented for now. mod redirect { pub enum Redirect { } pub fn redirect_stderr(_: &mut Option<Redirect>, _: &super::RunningGuard) -> Result<(), super::InitError> { Err(super::InitError::Opt(super::InitOption::BufferStderr, None)) } } #[allow(missing_copy_implementations)] pub struct RustBox { // Termbox is not thread safe no_sync: marker::NoSync, // We only bother to redirect stderr for the moment, since it's used for panic! _stderr: Option<redirect::Redirect>, // RAII lock. // // Note that running *MUST* be the last field in the destructor, since destructors run in // top-down order. Otherwise it will not properly protect the above fields. _running: running::RunningGuard, } #[derive(Copy,Show)] pub enum InitOption { /// Use this option to automatically buffer stderr while RustBox is running. It will be /// written when RustBox exits. /// /// This option uses a nonblocking OS pipe to buffer stderr output. This means that if the /// pipe fills up, subsequent writes will fail until RustBox exits. If this is a concern for /// your program, don't use RustBox's default pipe-based redirection; instead, redirect stderr /// to a log file or another process that is capable of handling it better. BufferStderr, } impl RustBox { pub fn init(opts: &[Option<InitOption>]) -> Result<RustBox, InitError> { // Acquire RAII lock. This might seem like overkill, but it is easy to forget to release // it in the maze of error conditions below. let running = match running::run() { Some(r) => r, None => return Err(InitError::AlreadyOpen) }; // Time to check our options. let mut stderr = None; for opt in opts.iter().filter_map(|&opt| opt) { match opt { InitOption::BufferStderr => try!(redirect::redirect_stderr(&mut stderr, &running)), } } // Create the RustBox. Ok(unsafe { match termbox::tb_init() { 0 => RustBox { no_sync: marker::NoSync, _stderr: stderr, _running: running, }, res => { return Err(InitError::TermBox(FromPrimitive::from_int(res as isize))) } } }) } pub fn width(&self) -> usize { unsafe { termbox::tb_width() as usize } } pub fn height(&self) -> usize { unsafe { termbox::tb_height() as usize } } pub fn clear(&self) { unsafe { termbox::tb_clear() } } pub fn present(&self) { unsafe { termbox::tb_present() } } pub fn set_cursor(&self, x: isize, y: isize) { unsafe { termbox::tb_set_cursor(x as c_int, y as c_int) } } // Unsafe because u8 is not guaranteed to be a UTF-8 character pub unsafe fn change_cell(&self, x: usize, y: usize, ch: u32, fg: u16, bg: u16) { termbox::tb_change_cell(x as c_uint, y as c_uint, ch, fg, bg) } pub fn print(&self, x: usize, y: usize, sty: Style, fg: Color, bg: Color, s: &str) { let fg = Style::from_color(fg) | (sty & style::TB_ATTRIB); let bg = Style::from_color(bg); for (i, ch) in s.chars().enumerate() { unsafe { self.change_cell(x+i, y, ch as u32, fg.bits(), bg.bits()); } } } pub fn print_char(&self, x: usize, y: usize, sty: Style, fg: Color, bg: Color, ch: char) { let fg = Style::from_color(fg) | (sty & style::TB_ATTRIB); let bg = Style::from_color(bg); unsafe { self.change_cell(x, y, ch as u32, fg.bits(), bg.bits()); } } pub fn poll_event(&self) -> EventResult<Event> { let ev = NIL_RAW_EVENT; let rc = unsafe { termbox::tb_poll_event(&ev as *const RawEvent) }; unpack_event(rc, &ev) } pub fn peek_event(&self, timeout: Duration) -> EventResult<Event> { let ev = NIL_RAW_EVENT; let rc = unsafe { termbox::tb_peek_event(&ev as *const RawEvent, timeout.num_milliseconds() as c_uint) }; unpack_event(rc, &ev) } } impl Drop for RustBox { fn drop(&mut self) { // Since only one instance of the RustBox is ever accessible, we should not // need to do this atomically. // Note: we should definitely have RUSTBOX_RUNNING = true here. unsafe { termbox::tb_shutdown(); } } }
use crate::clean::auto_trait::AutoTraitFinder; use crate::clean::blanket_impl::BlanketImplFinder; use crate::clean::{ inline, Clean, Crate, ExternalCrate, Generic, GenericArg, GenericArgs, ImportSource, Item, ItemKind, Lifetime, Path, PathSegment, PolyTrait, Primitive, PrimitiveType, ResolvedPath, Type, TypeBinding, Visibility, }; use crate::core::DocContext; use crate::formats::item_type::ItemType; use rustc_ast as ast; use rustc_ast::tokenstream::TokenTree; use rustc_hir as hir; use rustc_hir::def::{DefKind, Res}; use rustc_hir::def_id::{DefId, LOCAL_CRATE}; use rustc_middle::mir::interpret::ConstValue; use rustc_middle::ty::subst::{GenericArgKind, SubstsRef}; use rustc_middle::ty::{self, DefIdTree, TyCtxt}; use rustc_span::symbol::{kw, sym, Symbol}; use std::fmt::Write as _; use std::mem; #[cfg(test)] mod tests; crate fn krate(cx: &mut DocContext<'_>) -> Crate { use crate::visit_lib::LibEmbargoVisitor; let krate = cx.tcx.hir().krate(); let module = crate::visit_ast::RustdocVisitor::new(cx).visit(krate); let mut externs = Vec::new(); for &cnum in cx.tcx.crates(()).iter() { externs.push(ExternalCrate { crate_num: cnum }); // Analyze doc-reachability for extern items LibEmbargoVisitor::new(cx).visit_lib(cnum); } externs.sort_unstable_by_key(|e| e.crate_num); // Clean the crate, translating the entire librustc_ast AST to one that is // understood by rustdoc. let mut module = module.clean(cx); match *module.kind { ItemKind::ModuleItem(ref module) => { for it in &module.items { // `compiler_builtins` should be masked too, but we can't apply // `#[doc(masked)]` to the injected `extern crate` because it's unstable. if it.is_extern_crate() && (it.attrs.has_doc_flag(sym::masked) || cx.tcx.is_compiler_builtins(it.def_id.krate())) { cx.cache.masked_crates.insert(it.def_id.krate()); } } } _ => unreachable!(), } let local_crate = ExternalCrate { crate_num: LOCAL_CRATE }; let src = local_crate.src(cx.tcx); let name = local_crate.name(cx.tcx); let primitives = local_crate.primitives(cx.tcx); let keywords = local_crate.keywords(cx.tcx); { let m = match *module.kind { ItemKind::ModuleItem(ref mut m) => m, _ => unreachable!(), }; m.items.extend(primitives.iter().map(|&(def_id, prim)| { Item::from_def_id_and_parts( def_id, Some(prim.as_sym()), ItemKind::PrimitiveItem(prim), cx, ) })); m.items.extend(keywords.into_iter().map(|(def_id, kw)| { Item::from_def_id_and_parts(def_id, Some(kw), ItemKind::KeywordItem(kw), cx) })); } Crate { name, src, module, externs, primitives, external_traits: cx.external_traits.clone(), collapsed: false, } } fn external_generic_args( cx: &mut DocContext<'_>, trait_did: Option<DefId>, has_self: bool, bindings: Vec<TypeBinding>, substs: SubstsRef<'_>, ) -> GenericArgs { let mut skip_self = has_self; let mut ty_kind = None; let args: Vec<_> = substs .iter() .filter_map(|kind| match kind.unpack() { GenericArgKind::Lifetime(lt) => match lt { ty::ReLateBound(_, ty::BoundRegion { kind: ty::BrAnon(_), .. }) => { Some(GenericArg::Lifetime(Lifetime::elided())) } _ => lt.clean(cx).map(GenericArg::Lifetime), }, GenericArgKind::Type(_) if skip_self => { skip_self = false; None } GenericArgKind::Type(ty) => { ty_kind = Some(ty.kind()); Some(GenericArg::Type(ty.clean(cx))) } GenericArgKind::Const(ct) => Some(GenericArg::Const(Box::new(ct.clean(cx)))), }) .collect(); match trait_did { // Attempt to sugar an external path like Fn<(A, B,), C> to Fn(A, B) -> C Some(did) if cx.tcx.fn_trait_kind_from_lang_item(did).is_some() => { assert!(ty_kind.is_some()); let inputs = match ty_kind { Some(ty::Tuple(ref tys)) => tys.iter().map(|t| t.expect_ty().clean(cx)).collect(), _ => return GenericArgs::AngleBracketed { args, bindings }, }; let output = None; // FIXME(#20299) return type comes from a projection now // match types[1].kind { // ty::Tuple(ref v) if v.is_empty() => None, // -> () // _ => Some(types[1].clean(cx)) // }; GenericArgs::Parenthesized { inputs, output } } _ => GenericArgs::AngleBracketed { args, bindings }, } } // trait_did should be set to a trait's DefId if called on a TraitRef, in order to sugar // from Fn<(A, B,), C> to Fn(A, B) -> C pub(super) fn external_path( cx: &mut DocContext<'_>, name: Symbol, trait_did: Option<DefId>, has_self: bool, bindings: Vec<TypeBinding>, substs: SubstsRef<'_>, ) -> Path { Path { global: false, res: Res::Err, segments: vec![PathSegment { name, args: external_generic_args(cx, trait_did, has_self, bindings, substs), }], } } crate fn strip_type(ty: Type) -> Type { match ty { Type::ResolvedPath { path, did, is_generic } => { Type::ResolvedPath { path: strip_path(&path), did, is_generic } } Type::DynTrait(mut bounds, lt) => { let first = bounds.remove(0); let stripped_trait = strip_type(first.trait_); bounds.insert( 0, PolyTrait { trait_: stripped_trait, generic_params: first.generic_params }, ); Type::DynTrait(bounds, lt) } Type::Tuple(inner_tys) => { Type::Tuple(inner_tys.iter().map(|t| strip_type(t.clone())).collect()) } Type::Slice(inner_ty) => Type::Slice(Box::new(strip_type(*inner_ty))), Type::Array(inner_ty, s) => Type::Array(Box::new(strip_type(*inner_ty)), s), Type::RawPointer(m, inner_ty) => Type::RawPointer(m, Box::new(strip_type(*inner_ty))), Type::BorrowedRef { lifetime, mutability, type_ } => { Type::BorrowedRef { lifetime, mutability, type_: Box::new(strip_type(*type_)) } } Type::QPath { name, self_type, trait_, self_def_id } => Type::QPath { name, self_def_id, self_type: Box::new(strip_type(*self_type)), trait_: Box::new(strip_type(*trait_)), }, _ => ty, } } crate fn strip_path(path: &Path) -> Path { let segments = path .segments .iter() .map(|s| PathSegment { name: s.name, args: GenericArgs::AngleBracketed { args: vec![], bindings: vec![] }, }) .collect(); Path { global: path.global, res: path.res, segments } } crate fn qpath_to_string(p: &hir::QPath<'_>) -> String { let segments = match *p { hir::QPath::Resolved(_, ref path) => &path.segments, hir::QPath::TypeRelative(_, ref segment) => return segment.ident.to_string(), hir::QPath::LangItem(lang_item, ..) => return lang_item.name().to_string(), }; let mut s = String::new(); for (i, seg) in segments.iter().enumerate() { if i > 0 { s.push_str("::"); } if seg.ident.name != kw::PathRoot { s.push_str(&seg.ident.as_str()); } } s } crate fn build_deref_target_impls(cx: &mut DocContext<'_>, items: &[Item], ret: &mut Vec<Item>) { let tcx = cx.tcx; for item in items { let target = match *item.kind { ItemKind::TypedefItem(ref t, true) => &t.type_, _ => continue, }; if let Some(prim) = target.primitive_type() { for &did in prim.impls(tcx).iter().filter(|did| !did.is_local()) { inline::build_impl(cx, None, did, None, ret); } } else if let ResolvedPath { did, .. } = *target { if !did.is_local() { inline::build_impls(cx, None, did, None, ret); } } } } crate fn name_from_pat(p: &hir::Pat<'_>) -> Symbol { use rustc_hir::*; debug!("trying to get a name from pattern: {:?}", p); Symbol::intern(&match p.kind { PatKind::Wild | PatKind::Struct(..) => return kw::Underscore, PatKind::Binding(_, _, ident, _) => return ident.name, PatKind::TupleStruct(ref p, ..) | PatKind::Path(ref p) => qpath_to_string(p), PatKind::Or(ref pats) => { pats.iter().map(|p| name_from_pat(p).to_string()).collect::<Vec<String>>().join(" | ") } PatKind::Tuple(ref elts, _) => format!( "({})", elts.iter().map(|p| name_from_pat(p).to_string()).collect::<Vec<String>>().join(", ") ), PatKind::Box(ref p) => return name_from_pat(&**p), PatKind::Ref(ref p, _) => return name_from_pat(&**p), PatKind::Lit(..) => { warn!( "tried to get argument name from PatKind::Lit, which is silly in function arguments" ); return Symbol::intern("()"); } PatKind::Range(..) => return kw::Underscore, PatKind::Slice(ref begin, ref mid, ref end) => { let begin = begin.iter().map(|p| name_from_pat(p).to_string()); let mid = mid.as_ref().map(|p| format!("..{}", name_from_pat(&**p))).into_iter(); let end = end.iter().map(|p| name_from_pat(p).to_string()); format!("[{}]", begin.chain(mid).chain(end).collect::<Vec<_>>().join(", ")) } }) } crate fn print_const(cx: &DocContext<'_>, n: &'tcx ty::Const<'_>) -> String { match n.val { ty::ConstKind::Unevaluated(ty::Unevaluated { def, substs_: _, promoted }) => { let mut s = if let Some(def) = def.as_local() { let hir_id = cx.tcx.hir().local_def_id_to_hir_id(def.did); print_const_expr(cx.tcx, cx.tcx.hir().body_owned_by(hir_id)) } else { inline::print_inlined_const(cx.tcx, def.did) }; if let Some(promoted) = promoted { s.push_str(&format!("::{:?}", promoted)) } s } _ => { let mut s = n.to_string(); // array lengths are obviously usize if s.ends_with("_usize") { let n = s.len() - "_usize".len(); s.truncate(n); if s.ends_with(": ") { let n = s.len() - ": ".len(); s.truncate(n); } } s } } } crate fn print_evaluated_const(tcx: TyCtxt<'_>, def_id: DefId) -> Option<String> { tcx.const_eval_poly(def_id).ok().and_then(|val| { let ty = tcx.type_of(def_id); match (val, ty.kind()) { (_, &ty::Ref(..)) => None, (ConstValue::Scalar(_), &ty::Adt(_, _)) => None, (ConstValue::Scalar(_), _) => { let const_ = ty::Const::from_value(tcx, val, ty); Some(print_const_with_custom_print_scalar(tcx, const_)) } _ => None, } }) } fn format_integer_with_underscore_sep(num: &str) -> String { let num_chars: Vec<_> = num.chars().collect(); let mut num_start_index = if num_chars.get(0) == Some(&'-') { 1 } else { 0 }; let chunk_size = match num[num_start_index..].as_bytes() { [b'0', b'b' | b'x', ..] => { num_start_index += 2; 4 } [b'0', b'o', ..] => { num_start_index += 2; let remaining_chars = num_chars.len() - num_start_index; if remaining_chars <= 6 { // don't add underscores to Unix permissions like 0755 or 100755 return num.to_string(); } 3 } _ => 3, }; num_chars[..num_start_index] .iter() .chain(num_chars[num_start_index..].rchunks(chunk_size).rev().intersperse(&['_']).flatten()) .collect() } fn print_const_with_custom_print_scalar(tcx: TyCtxt<'_>, ct: &'tcx ty::Const<'tcx>) -> String { // Use a slightly different format for integer types which always shows the actual value. // For all other types, fallback to the original `pretty_print_const`. match (ct.val, ct.ty.kind()) { (ty::ConstKind::Value(ConstValue::Scalar(int)), ty::Uint(ui)) => { format!("{}{}", format_integer_with_underscore_sep(&int.to_string()), ui.name_str()) } (ty::ConstKind::Value(ConstValue::Scalar(int)), ty::Int(i)) => { let ty = tcx.lift(ct.ty).unwrap(); let size = tcx.layout_of(ty::ParamEnv::empty().and(ty)).unwrap().size; let data = int.assert_bits(size); let sign_extended_data = size.sign_extend(data) as i128; format!( "{}{}", format_integer_with_underscore_sep(&sign_extended_data.to_string()), i.name_str() ) } _ => ct.to_string(), } } crate fn is_literal_expr(tcx: TyCtxt<'_>, hir_id: hir::HirId) -> bool { if let hir::Node::Expr(expr) = tcx.hir().get(hir_id) { if let hir::ExprKind::Lit(_) = &expr.kind { return true; } if let hir::ExprKind::Unary(hir::UnOp::Neg, expr) = &expr.kind { if let hir::ExprKind::Lit(_) = &expr.kind { return true; } } } false } crate fn print_const_expr(tcx: TyCtxt<'_>, body: hir::BodyId) -> String { let hir = tcx.hir(); let value = &hir.body(body).value; let snippet = if !value.span.from_expansion() { tcx.sess.source_map().span_to_snippet(value.span).ok() } else { None }; snippet.unwrap_or_else(|| rustc_hir_pretty::id_to_string(&hir, body.hir_id)) } /// Given a type Path, resolve it to a Type using the TyCtxt crate fn resolve_type(cx: &mut DocContext<'_>, path: Path, id: hir::HirId) -> Type { debug!("resolve_type({:?},{:?})", path, id); let is_generic = match path.res { Res::PrimTy(p) => return Primitive(PrimitiveType::from(p)), Res::SelfTy(..) if path.segments.len() == 1 => { return Generic(kw::SelfUpper); } Res::Def(DefKind::TyParam, _) if path.segments.len() == 1 => { return Generic(Symbol::intern(&path.whole_name())); } Res::SelfTy(..) | Res::Def(DefKind::TyParam | DefKind::AssocTy, _) => true, _ => false, }; let did = register_res(cx, path.res); ResolvedPath { path, did, is_generic } } crate fn get_auto_trait_and_blanket_impls( cx: &mut DocContext<'tcx>, item_def_id: DefId, ) -> impl Iterator<Item = Item> { let auto_impls = cx .sess() .prof .generic_activity("get_auto_trait_impls") .run(|| AutoTraitFinder::new(cx).get_auto_trait_impls(item_def_id)); let blanket_impls = cx .sess() .prof .generic_activity("get_blanket_impls") .run(|| BlanketImplFinder { cx }.get_blanket_impls(item_def_id)); auto_impls.into_iter().chain(blanket_impls) } /// If `res` has a documentation page associated, store it in the cache. /// /// This is later used by [`href()`] to determine the HTML link for the item. /// /// [`href()`]: crate::html::format::href crate fn register_res(cx: &mut DocContext<'_>, res: Res) -> DefId { use DefKind::*; debug!("register_res({:?})", res); let (did, kind) = match res { Res::Def(DefKind::AssocTy | DefKind::AssocFn | DefKind::AssocConst, i) => { // associated items are documented, but on the page of their parent (cx.tcx.parent(i).unwrap(), ItemType::Trait) } Res::Def(DefKind::Variant, i) => { // variant items are documented, but on the page of their parent (cx.tcx.parent(i).expect("cannot get parent def id"), ItemType::Enum) } // Each of these have their own page. Res::Def( kind @ (Fn | TyAlias | Enum | Trait | Struct | Union | Mod | ForeignTy | Const | Static | Macro(..) | TraitAlias), i, ) => (i, kind.into()), // This is part of a trait definition; document the trait. Res::SelfTy(Some(trait_def_id), _) => (trait_def_id, ItemType::Trait), // This is an inherent impl; it doesn't have its own page. Res::SelfTy(None, Some((impl_def_id, _))) => return impl_def_id, Res::SelfTy(None, None) | Res::PrimTy(_) | Res::ToolMod | Res::SelfCtor(_) | Res::Local(_) | Res::NonMacroAttr(_) | Res::Err => return res.def_id(), Res::Def( TyParam | ConstParam | Ctor(..) | ExternCrate | Use | ForeignMod | AnonConst | OpaqueTy | Field | LifetimeParam | GlobalAsm | Impl | Closure | Generator, id, ) => return id, }; if did.is_local() { return did; } inline::record_extern_fqn(cx, did, kind); if let ItemType::Trait = kind { inline::record_extern_trait(cx, did); } did } crate fn resolve_use_source(cx: &mut DocContext<'_>, path: Path) -> ImportSource { ImportSource { did: if path.res.opt_def_id().is_none() { None } else { Some(register_res(cx, path.res)) }, path, } } crate fn enter_impl_trait<F, R>(cx: &mut DocContext<'_>, f: F) -> R where F: FnOnce(&mut DocContext<'_>) -> R, { let old_bounds = mem::take(&mut cx.impl_trait_bounds); let r = f(cx); assert!(cx.impl_trait_bounds.is_empty()); cx.impl_trait_bounds = old_bounds; r } /// Find the nearest parent module of a [`DefId`]. crate fn find_nearest_parent_module(tcx: TyCtxt<'_>, def_id: DefId) -> Option<DefId> { if def_id.is_top_level_module() { // The crate root has no parent. Use it as the root instead. Some(def_id) } else { let mut current = def_id; // The immediate parent might not always be a module. // Find the first parent which is. while let Some(parent) = tcx.parent(current) { if tcx.def_kind(parent) == DefKind::Mod { return Some(parent); } current = parent; } None } } /// Checks for the existence of `hidden` in the attribute below if `flag` is `sym::hidden`: /// /// ``` /// #[doc(hidden)] /// pub fn foo() {} /// ``` /// /// This function exists because it runs on `hir::Attributes` whereas the other is a /// `clean::Attributes` method. crate fn has_doc_flag(attrs: ty::Attributes<'_>, flag: Symbol) -> bool { attrs.iter().any(|attr| { attr.has_name(sym::doc) && attr.meta_item_list().map_or(false, |l| rustc_attr::list_contains_name(&l, flag)) }) } /// A link to `doc.rust-lang.org` that includes the channel name. Use this instead of manual links /// so that the channel is consistent. /// /// Set by `bootstrap::Builder::doc_rust_lang_org_channel` in order to keep tests passing on beta/stable. crate const DOC_RUST_LANG_ORG_CHANNEL: &'static str = env!("DOC_RUST_LANG_ORG_CHANNEL"); /// Render a sequence of macro arms in a format suitable for displaying to the user /// as part of an item declaration. pub(super) fn render_macro_arms<'a>( matchers: impl Iterator<Item = &'a TokenTree>, arm_delim: &str, ) -> String { let mut out = String::new(); for matcher in matchers { writeln!(out, " {} => {{ ... }}{}", render_macro_matcher(matcher), arm_delim).unwrap(); } out } /// Render a macro matcher in a format suitable for displaying to the user /// as part of an item declaration. pub(super) fn render_macro_matcher(matcher: &TokenTree) -> String { rustc_ast_pretty::pprust::tt_to_string(matcher) } pub(super) fn display_macro_source( cx: &mut DocContext<'_>, name: Symbol, def: &ast::MacroDef, def_id: DefId, vis: impl Clean<Visibility>, ) -> String { let tts: Vec<_> = def.body.inner_tokens().into_trees().collect(); // Extract the spans of all matchers. They represent the "interface" of the macro. let matchers = tts.chunks(4).map(|arm| &arm[0]); if def.macro_rules { format!("macro_rules! {} {{\n{}}}", name, render_macro_arms(matchers, ";")) } else { let vis = vis.clean(cx); if matchers.len() <= 1 { format!( "{}macro {}{} {{\n ...\n}}", vis.to_src_with_space(cx.tcx, def_id), name, matchers.map(render_macro_matcher).collect::<String>(), ) } else { format!( "{}macro {} {{\n{}}}", vis.to_src_with_space(cx.tcx, def_id), name, render_macro_arms(matchers, ","), ) } } } rustdoc: Get symbol for `TyParam` directly use crate::clean::auto_trait::AutoTraitFinder; use crate::clean::blanket_impl::BlanketImplFinder; use crate::clean::{ inline, Clean, Crate, ExternalCrate, Generic, GenericArg, GenericArgs, ImportSource, Item, ItemKind, Lifetime, Path, PathSegment, PolyTrait, Primitive, PrimitiveType, ResolvedPath, Type, TypeBinding, Visibility, }; use crate::core::DocContext; use crate::formats::item_type::ItemType; use rustc_ast as ast; use rustc_ast::tokenstream::TokenTree; use rustc_hir as hir; use rustc_hir::def::{DefKind, Res}; use rustc_hir::def_id::{DefId, LOCAL_CRATE}; use rustc_middle::mir::interpret::ConstValue; use rustc_middle::ty::subst::{GenericArgKind, SubstsRef}; use rustc_middle::ty::{self, DefIdTree, TyCtxt}; use rustc_span::symbol::{kw, sym, Symbol}; use std::fmt::Write as _; use std::mem; #[cfg(test)] mod tests; crate fn krate(cx: &mut DocContext<'_>) -> Crate { use crate::visit_lib::LibEmbargoVisitor; let krate = cx.tcx.hir().krate(); let module = crate::visit_ast::RustdocVisitor::new(cx).visit(krate); let mut externs = Vec::new(); for &cnum in cx.tcx.crates(()).iter() { externs.push(ExternalCrate { crate_num: cnum }); // Analyze doc-reachability for extern items LibEmbargoVisitor::new(cx).visit_lib(cnum); } externs.sort_unstable_by_key(|e| e.crate_num); // Clean the crate, translating the entire librustc_ast AST to one that is // understood by rustdoc. let mut module = module.clean(cx); match *module.kind { ItemKind::ModuleItem(ref module) => { for it in &module.items { // `compiler_builtins` should be masked too, but we can't apply // `#[doc(masked)]` to the injected `extern crate` because it's unstable. if it.is_extern_crate() && (it.attrs.has_doc_flag(sym::masked) || cx.tcx.is_compiler_builtins(it.def_id.krate())) { cx.cache.masked_crates.insert(it.def_id.krate()); } } } _ => unreachable!(), } let local_crate = ExternalCrate { crate_num: LOCAL_CRATE }; let src = local_crate.src(cx.tcx); let name = local_crate.name(cx.tcx); let primitives = local_crate.primitives(cx.tcx); let keywords = local_crate.keywords(cx.tcx); { let m = match *module.kind { ItemKind::ModuleItem(ref mut m) => m, _ => unreachable!(), }; m.items.extend(primitives.iter().map(|&(def_id, prim)| { Item::from_def_id_and_parts( def_id, Some(prim.as_sym()), ItemKind::PrimitiveItem(prim), cx, ) })); m.items.extend(keywords.into_iter().map(|(def_id, kw)| { Item::from_def_id_and_parts(def_id, Some(kw), ItemKind::KeywordItem(kw), cx) })); } Crate { name, src, module, externs, primitives, external_traits: cx.external_traits.clone(), collapsed: false, } } fn external_generic_args( cx: &mut DocContext<'_>, trait_did: Option<DefId>, has_self: bool, bindings: Vec<TypeBinding>, substs: SubstsRef<'_>, ) -> GenericArgs { let mut skip_self = has_self; let mut ty_kind = None; let args: Vec<_> = substs .iter() .filter_map(|kind| match kind.unpack() { GenericArgKind::Lifetime(lt) => match lt { ty::ReLateBound(_, ty::BoundRegion { kind: ty::BrAnon(_), .. }) => { Some(GenericArg::Lifetime(Lifetime::elided())) } _ => lt.clean(cx).map(GenericArg::Lifetime), }, GenericArgKind::Type(_) if skip_self => { skip_self = false; None } GenericArgKind::Type(ty) => { ty_kind = Some(ty.kind()); Some(GenericArg::Type(ty.clean(cx))) } GenericArgKind::Const(ct) => Some(GenericArg::Const(Box::new(ct.clean(cx)))), }) .collect(); match trait_did { // Attempt to sugar an external path like Fn<(A, B,), C> to Fn(A, B) -> C Some(did) if cx.tcx.fn_trait_kind_from_lang_item(did).is_some() => { assert!(ty_kind.is_some()); let inputs = match ty_kind { Some(ty::Tuple(ref tys)) => tys.iter().map(|t| t.expect_ty().clean(cx)).collect(), _ => return GenericArgs::AngleBracketed { args, bindings }, }; let output = None; // FIXME(#20299) return type comes from a projection now // match types[1].kind { // ty::Tuple(ref v) if v.is_empty() => None, // -> () // _ => Some(types[1].clean(cx)) // }; GenericArgs::Parenthesized { inputs, output } } _ => GenericArgs::AngleBracketed { args, bindings }, } } // trait_did should be set to a trait's DefId if called on a TraitRef, in order to sugar // from Fn<(A, B,), C> to Fn(A, B) -> C pub(super) fn external_path( cx: &mut DocContext<'_>, name: Symbol, trait_did: Option<DefId>, has_self: bool, bindings: Vec<TypeBinding>, substs: SubstsRef<'_>, ) -> Path { Path { global: false, res: Res::Err, segments: vec![PathSegment { name, args: external_generic_args(cx, trait_did, has_self, bindings, substs), }], } } crate fn strip_type(ty: Type) -> Type { match ty { Type::ResolvedPath { path, did, is_generic } => { Type::ResolvedPath { path: strip_path(&path), did, is_generic } } Type::DynTrait(mut bounds, lt) => { let first = bounds.remove(0); let stripped_trait = strip_type(first.trait_); bounds.insert( 0, PolyTrait { trait_: stripped_trait, generic_params: first.generic_params }, ); Type::DynTrait(bounds, lt) } Type::Tuple(inner_tys) => { Type::Tuple(inner_tys.iter().map(|t| strip_type(t.clone())).collect()) } Type::Slice(inner_ty) => Type::Slice(Box::new(strip_type(*inner_ty))), Type::Array(inner_ty, s) => Type::Array(Box::new(strip_type(*inner_ty)), s), Type::RawPointer(m, inner_ty) => Type::RawPointer(m, Box::new(strip_type(*inner_ty))), Type::BorrowedRef { lifetime, mutability, type_ } => { Type::BorrowedRef { lifetime, mutability, type_: Box::new(strip_type(*type_)) } } Type::QPath { name, self_type, trait_, self_def_id } => Type::QPath { name, self_def_id, self_type: Box::new(strip_type(*self_type)), trait_: Box::new(strip_type(*trait_)), }, _ => ty, } } crate fn strip_path(path: &Path) -> Path { let segments = path .segments .iter() .map(|s| PathSegment { name: s.name, args: GenericArgs::AngleBracketed { args: vec![], bindings: vec![] }, }) .collect(); Path { global: path.global, res: path.res, segments } } crate fn qpath_to_string(p: &hir::QPath<'_>) -> String { let segments = match *p { hir::QPath::Resolved(_, ref path) => &path.segments, hir::QPath::TypeRelative(_, ref segment) => return segment.ident.to_string(), hir::QPath::LangItem(lang_item, ..) => return lang_item.name().to_string(), }; let mut s = String::new(); for (i, seg) in segments.iter().enumerate() { if i > 0 { s.push_str("::"); } if seg.ident.name != kw::PathRoot { s.push_str(&seg.ident.as_str()); } } s } crate fn build_deref_target_impls(cx: &mut DocContext<'_>, items: &[Item], ret: &mut Vec<Item>) { let tcx = cx.tcx; for item in items { let target = match *item.kind { ItemKind::TypedefItem(ref t, true) => &t.type_, _ => continue, }; if let Some(prim) = target.primitive_type() { for &did in prim.impls(tcx).iter().filter(|did| !did.is_local()) { inline::build_impl(cx, None, did, None, ret); } } else if let ResolvedPath { did, .. } = *target { if !did.is_local() { inline::build_impls(cx, None, did, None, ret); } } } } crate fn name_from_pat(p: &hir::Pat<'_>) -> Symbol { use rustc_hir::*; debug!("trying to get a name from pattern: {:?}", p); Symbol::intern(&match p.kind { PatKind::Wild | PatKind::Struct(..) => return kw::Underscore, PatKind::Binding(_, _, ident, _) => return ident.name, PatKind::TupleStruct(ref p, ..) | PatKind::Path(ref p) => qpath_to_string(p), PatKind::Or(ref pats) => { pats.iter().map(|p| name_from_pat(p).to_string()).collect::<Vec<String>>().join(" | ") } PatKind::Tuple(ref elts, _) => format!( "({})", elts.iter().map(|p| name_from_pat(p).to_string()).collect::<Vec<String>>().join(", ") ), PatKind::Box(ref p) => return name_from_pat(&**p), PatKind::Ref(ref p, _) => return name_from_pat(&**p), PatKind::Lit(..) => { warn!( "tried to get argument name from PatKind::Lit, which is silly in function arguments" ); return Symbol::intern("()"); } PatKind::Range(..) => return kw::Underscore, PatKind::Slice(ref begin, ref mid, ref end) => { let begin = begin.iter().map(|p| name_from_pat(p).to_string()); let mid = mid.as_ref().map(|p| format!("..{}", name_from_pat(&**p))).into_iter(); let end = end.iter().map(|p| name_from_pat(p).to_string()); format!("[{}]", begin.chain(mid).chain(end).collect::<Vec<_>>().join(", ")) } }) } crate fn print_const(cx: &DocContext<'_>, n: &'tcx ty::Const<'_>) -> String { match n.val { ty::ConstKind::Unevaluated(ty::Unevaluated { def, substs_: _, promoted }) => { let mut s = if let Some(def) = def.as_local() { let hir_id = cx.tcx.hir().local_def_id_to_hir_id(def.did); print_const_expr(cx.tcx, cx.tcx.hir().body_owned_by(hir_id)) } else { inline::print_inlined_const(cx.tcx, def.did) }; if let Some(promoted) = promoted { s.push_str(&format!("::{:?}", promoted)) } s } _ => { let mut s = n.to_string(); // array lengths are obviously usize if s.ends_with("_usize") { let n = s.len() - "_usize".len(); s.truncate(n); if s.ends_with(": ") { let n = s.len() - ": ".len(); s.truncate(n); } } s } } } crate fn print_evaluated_const(tcx: TyCtxt<'_>, def_id: DefId) -> Option<String> { tcx.const_eval_poly(def_id).ok().and_then(|val| { let ty = tcx.type_of(def_id); match (val, ty.kind()) { (_, &ty::Ref(..)) => None, (ConstValue::Scalar(_), &ty::Adt(_, _)) => None, (ConstValue::Scalar(_), _) => { let const_ = ty::Const::from_value(tcx, val, ty); Some(print_const_with_custom_print_scalar(tcx, const_)) } _ => None, } }) } fn format_integer_with_underscore_sep(num: &str) -> String { let num_chars: Vec<_> = num.chars().collect(); let mut num_start_index = if num_chars.get(0) == Some(&'-') { 1 } else { 0 }; let chunk_size = match num[num_start_index..].as_bytes() { [b'0', b'b' | b'x', ..] => { num_start_index += 2; 4 } [b'0', b'o', ..] => { num_start_index += 2; let remaining_chars = num_chars.len() - num_start_index; if remaining_chars <= 6 { // don't add underscores to Unix permissions like 0755 or 100755 return num.to_string(); } 3 } _ => 3, }; num_chars[..num_start_index] .iter() .chain(num_chars[num_start_index..].rchunks(chunk_size).rev().intersperse(&['_']).flatten()) .collect() } fn print_const_with_custom_print_scalar(tcx: TyCtxt<'_>, ct: &'tcx ty::Const<'tcx>) -> String { // Use a slightly different format for integer types which always shows the actual value. // For all other types, fallback to the original `pretty_print_const`. match (ct.val, ct.ty.kind()) { (ty::ConstKind::Value(ConstValue::Scalar(int)), ty::Uint(ui)) => { format!("{}{}", format_integer_with_underscore_sep(&int.to_string()), ui.name_str()) } (ty::ConstKind::Value(ConstValue::Scalar(int)), ty::Int(i)) => { let ty = tcx.lift(ct.ty).unwrap(); let size = tcx.layout_of(ty::ParamEnv::empty().and(ty)).unwrap().size; let data = int.assert_bits(size); let sign_extended_data = size.sign_extend(data) as i128; format!( "{}{}", format_integer_with_underscore_sep(&sign_extended_data.to_string()), i.name_str() ) } _ => ct.to_string(), } } crate fn is_literal_expr(tcx: TyCtxt<'_>, hir_id: hir::HirId) -> bool { if let hir::Node::Expr(expr) = tcx.hir().get(hir_id) { if let hir::ExprKind::Lit(_) = &expr.kind { return true; } if let hir::ExprKind::Unary(hir::UnOp::Neg, expr) = &expr.kind { if let hir::ExprKind::Lit(_) = &expr.kind { return true; } } } false } crate fn print_const_expr(tcx: TyCtxt<'_>, body: hir::BodyId) -> String { let hir = tcx.hir(); let value = &hir.body(body).value; let snippet = if !value.span.from_expansion() { tcx.sess.source_map().span_to_snippet(value.span).ok() } else { None }; snippet.unwrap_or_else(|| rustc_hir_pretty::id_to_string(&hir, body.hir_id)) } /// Given a type Path, resolve it to a Type using the TyCtxt crate fn resolve_type(cx: &mut DocContext<'_>, path: Path, id: hir::HirId) -> Type { debug!("resolve_type({:?},{:?})", path, id); let is_generic = match path.res { Res::PrimTy(p) => return Primitive(PrimitiveType::from(p)), Res::SelfTy(..) if path.segments.len() == 1 => { return Generic(kw::SelfUpper); } Res::Def(DefKind::TyParam, _) if path.segments.len() == 1 => { return Generic(path.segments[0].name); } Res::SelfTy(..) | Res::Def(DefKind::TyParam | DefKind::AssocTy, _) => true, _ => false, }; let did = register_res(cx, path.res); ResolvedPath { path, did, is_generic } } crate fn get_auto_trait_and_blanket_impls( cx: &mut DocContext<'tcx>, item_def_id: DefId, ) -> impl Iterator<Item = Item> { let auto_impls = cx .sess() .prof .generic_activity("get_auto_trait_impls") .run(|| AutoTraitFinder::new(cx).get_auto_trait_impls(item_def_id)); let blanket_impls = cx .sess() .prof .generic_activity("get_blanket_impls") .run(|| BlanketImplFinder { cx }.get_blanket_impls(item_def_id)); auto_impls.into_iter().chain(blanket_impls) } /// If `res` has a documentation page associated, store it in the cache. /// /// This is later used by [`href()`] to determine the HTML link for the item. /// /// [`href()`]: crate::html::format::href crate fn register_res(cx: &mut DocContext<'_>, res: Res) -> DefId { use DefKind::*; debug!("register_res({:?})", res); let (did, kind) = match res { Res::Def(DefKind::AssocTy | DefKind::AssocFn | DefKind::AssocConst, i) => { // associated items are documented, but on the page of their parent (cx.tcx.parent(i).unwrap(), ItemType::Trait) } Res::Def(DefKind::Variant, i) => { // variant items are documented, but on the page of their parent (cx.tcx.parent(i).expect("cannot get parent def id"), ItemType::Enum) } // Each of these have their own page. Res::Def( kind @ (Fn | TyAlias | Enum | Trait | Struct | Union | Mod | ForeignTy | Const | Static | Macro(..) | TraitAlias), i, ) => (i, kind.into()), // This is part of a trait definition; document the trait. Res::SelfTy(Some(trait_def_id), _) => (trait_def_id, ItemType::Trait), // This is an inherent impl; it doesn't have its own page. Res::SelfTy(None, Some((impl_def_id, _))) => return impl_def_id, Res::SelfTy(None, None) | Res::PrimTy(_) | Res::ToolMod | Res::SelfCtor(_) | Res::Local(_) | Res::NonMacroAttr(_) | Res::Err => return res.def_id(), Res::Def( TyParam | ConstParam | Ctor(..) | ExternCrate | Use | ForeignMod | AnonConst | OpaqueTy | Field | LifetimeParam | GlobalAsm | Impl | Closure | Generator, id, ) => return id, }; if did.is_local() { return did; } inline::record_extern_fqn(cx, did, kind); if let ItemType::Trait = kind { inline::record_extern_trait(cx, did); } did } crate fn resolve_use_source(cx: &mut DocContext<'_>, path: Path) -> ImportSource { ImportSource { did: if path.res.opt_def_id().is_none() { None } else { Some(register_res(cx, path.res)) }, path, } } crate fn enter_impl_trait<F, R>(cx: &mut DocContext<'_>, f: F) -> R where F: FnOnce(&mut DocContext<'_>) -> R, { let old_bounds = mem::take(&mut cx.impl_trait_bounds); let r = f(cx); assert!(cx.impl_trait_bounds.is_empty()); cx.impl_trait_bounds = old_bounds; r } /// Find the nearest parent module of a [`DefId`]. crate fn find_nearest_parent_module(tcx: TyCtxt<'_>, def_id: DefId) -> Option<DefId> { if def_id.is_top_level_module() { // The crate root has no parent. Use it as the root instead. Some(def_id) } else { let mut current = def_id; // The immediate parent might not always be a module. // Find the first parent which is. while let Some(parent) = tcx.parent(current) { if tcx.def_kind(parent) == DefKind::Mod { return Some(parent); } current = parent; } None } } /// Checks for the existence of `hidden` in the attribute below if `flag` is `sym::hidden`: /// /// ``` /// #[doc(hidden)] /// pub fn foo() {} /// ``` /// /// This function exists because it runs on `hir::Attributes` whereas the other is a /// `clean::Attributes` method. crate fn has_doc_flag(attrs: ty::Attributes<'_>, flag: Symbol) -> bool { attrs.iter().any(|attr| { attr.has_name(sym::doc) && attr.meta_item_list().map_or(false, |l| rustc_attr::list_contains_name(&l, flag)) }) } /// A link to `doc.rust-lang.org` that includes the channel name. Use this instead of manual links /// so that the channel is consistent. /// /// Set by `bootstrap::Builder::doc_rust_lang_org_channel` in order to keep tests passing on beta/stable. crate const DOC_RUST_LANG_ORG_CHANNEL: &'static str = env!("DOC_RUST_LANG_ORG_CHANNEL"); /// Render a sequence of macro arms in a format suitable for displaying to the user /// as part of an item declaration. pub(super) fn render_macro_arms<'a>( matchers: impl Iterator<Item = &'a TokenTree>, arm_delim: &str, ) -> String { let mut out = String::new(); for matcher in matchers { writeln!(out, " {} => {{ ... }}{}", render_macro_matcher(matcher), arm_delim).unwrap(); } out } /// Render a macro matcher in a format suitable for displaying to the user /// as part of an item declaration. pub(super) fn render_macro_matcher(matcher: &TokenTree) -> String { rustc_ast_pretty::pprust::tt_to_string(matcher) } pub(super) fn display_macro_source( cx: &mut DocContext<'_>, name: Symbol, def: &ast::MacroDef, def_id: DefId, vis: impl Clean<Visibility>, ) -> String { let tts: Vec<_> = def.body.inner_tokens().into_trees().collect(); // Extract the spans of all matchers. They represent the "interface" of the macro. let matchers = tts.chunks(4).map(|arm| &arm[0]); if def.macro_rules { format!("macro_rules! {} {{\n{}}}", name, render_macro_arms(matchers, ";")) } else { let vis = vis.clean(cx); if matchers.len() <= 1 { format!( "{}macro {}{} {{\n ...\n}}", vis.to_src_with_space(cx.tcx, def_id), name, matchers.map(render_macro_matcher).collect::<String>(), ) } else { format!( "{}macro {} {{\n{}}}", vis.to_src_with_space(cx.tcx, def_id), name, render_macro_arms(matchers, ","), ) } } }
// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. use rustc_target::spec::abi::{self, Abi}; use ast::{AngleBracketedArgs, ParenthesisedArgs, AttrStyle, BareFnTy}; use ast::{GenericBound, TraitBoundModifier}; use ast::Unsafety; use ast::{Mod, AnonConst, Arg, Arm, Attribute, BindingMode, TraitItemKind}; use ast::Block; use ast::{BlockCheckMode, CaptureBy, Movability}; use ast::{Constness, Crate}; use ast::Defaultness; use ast::EnumDef; use ast::{Expr, ExprKind, RangeLimits}; use ast::{Field, FnDecl, FnHeader}; use ast::{ForeignItem, ForeignItemKind, FunctionRetTy}; use ast::{GenericParam, GenericParamKind}; use ast::GenericArg; use ast::{Ident, ImplItem, IsAsync, IsAuto, Item, ItemKind}; use ast::{Label, Lifetime, Lit, LitKind}; use ast::Local; use ast::MacStmtStyle; use ast::{Mac, Mac_, MacDelimiter}; use ast::{MutTy, Mutability}; use ast::{Pat, PatKind, PathSegment}; use ast::{PolyTraitRef, QSelf}; use ast::{Stmt, StmtKind}; use ast::{VariantData, StructField}; use ast::StrStyle; use ast::SelfKind; use ast::{TraitItem, TraitRef, TraitObjectSyntax}; use ast::{Ty, TyKind, TypeBinding, GenericBounds}; use ast::{Visibility, VisibilityKind, WhereClause, CrateSugar}; use ast::{UseTree, UseTreeKind}; use ast::{BinOpKind, UnOp}; use ast::{RangeEnd, RangeSyntax}; use {ast, attr}; use source_map::{self, SourceMap, Spanned, respan}; use syntax_pos::{self, Span, MultiSpan, BytePos, FileName, edition::Edition}; use errors::{self, Applicability, DiagnosticBuilder, DiagnosticId}; use parse::{self, SeqSep, classify, token}; use parse::lexer::TokenAndSpan; use parse::lexer::comments::{doc_comment_style, strip_doc_comment_decoration}; use parse::{new_sub_parser_from_file, ParseSess, Directory, DirectoryOwnership}; use util::parser::{AssocOp, Fixity}; use print::pprust; use ptr::P; use parse::PResult; use ThinVec; use tokenstream::{self, Delimited, ThinTokenStream, TokenTree, TokenStream}; use symbol::{Symbol, keywords}; use std::borrow::Cow; use std::cmp; use std::mem; use std::path::{self, Path, PathBuf}; use std::slice; #[derive(Debug)] /// Whether the type alias or associated type is a concrete type or an existential type pub enum AliasKind { /// Just a new name for the same type Weak(P<Ty>), /// Only trait impls of the type will be usable, not the actual type itself Existential(GenericBounds), } bitflags! { struct Restrictions: u8 { const STMT_EXPR = 1 << 0; const NO_STRUCT_LITERAL = 1 << 1; } } type ItemInfo = (Ident, ItemKind, Option<Vec<Attribute>>); /// How to parse a path. #[derive(Copy, Clone, PartialEq)] pub enum PathStyle { /// In some contexts, notably in expressions, paths with generic arguments are ambiguous /// with something else. For example, in expressions `segment < ....` can be interpreted /// as a comparison and `segment ( ....` can be interpreted as a function call. /// In all such contexts the non-path interpretation is preferred by default for practical /// reasons, but the path interpretation can be forced by the disambiguator `::`, e.g. /// `x<y>` - comparisons, `x::<y>` - unambiguously a path. Expr, /// In other contexts, notably in types, no ambiguity exists and paths can be written /// without the disambiguator, e.g. `x<y>` - unambiguously a path. /// Paths with disambiguators are still accepted, `x::<Y>` - unambiguously a path too. Type, /// A path with generic arguments disallowed, e.g. `foo::bar::Baz`, used in imports, /// visibilities or attributes. /// Technically, this variant is unnecessary and e.g. `Expr` can be used instead /// (paths in "mod" contexts have to be checked later for absence of generic arguments /// anyway, due to macros), but it is used to avoid weird suggestions about expected /// tokens when something goes wrong. Mod, } #[derive(Clone, Copy, PartialEq, Debug)] enum SemiColonMode { Break, Ignore, } #[derive(Clone, Copy, PartialEq, Debug)] enum BlockMode { Break, Ignore, } /// Possibly accept an `token::Interpolated` expression (a pre-parsed expression /// dropped into the token stream, which happens while parsing the result of /// macro expansion). Placement of these is not as complex as I feared it would /// be. The important thing is to make sure that lookahead doesn't balk at /// `token::Interpolated` tokens. macro_rules! maybe_whole_expr { ($p:expr) => { if let token::Interpolated(nt) = $p.token.clone() { match nt.0 { token::NtExpr(ref e) | token::NtLiteral(ref e) => { $p.bump(); return Ok((*e).clone()); } token::NtPath(ref path) => { $p.bump(); let span = $p.span; let kind = ExprKind::Path(None, (*path).clone()); return Ok($p.mk_expr(span, kind, ThinVec::new())); } token::NtBlock(ref block) => { $p.bump(); let span = $p.span; let kind = ExprKind::Block((*block).clone(), None); return Ok($p.mk_expr(span, kind, ThinVec::new())); } _ => {}, }; } } } /// As maybe_whole_expr, but for things other than expressions macro_rules! maybe_whole { ($p:expr, $constructor:ident, |$x:ident| $e:expr) => { if let token::Interpolated(nt) = $p.token.clone() { if let token::$constructor($x) = nt.0.clone() { $p.bump(); return Ok($e); } } }; } fn maybe_append(mut lhs: Vec<Attribute>, mut rhs: Option<Vec<Attribute>>) -> Vec<Attribute> { if let Some(ref mut rhs) = rhs { lhs.append(rhs); } lhs } #[derive(Debug, Clone, Copy, PartialEq)] enum PrevTokenKind { DocComment, Comma, Plus, Interpolated, Eof, Ident, Other, } trait RecoverQPath: Sized { const PATH_STYLE: PathStyle = PathStyle::Expr; fn to_ty(&self) -> Option<P<Ty>>; fn to_recovered(&self, qself: Option<QSelf>, path: ast::Path) -> Self; fn to_string(&self) -> String; } impl RecoverQPath for Ty { const PATH_STYLE: PathStyle = PathStyle::Type; fn to_ty(&self) -> Option<P<Ty>> { Some(P(self.clone())) } fn to_recovered(&self, qself: Option<QSelf>, path: ast::Path) -> Self { Self { span: path.span, node: TyKind::Path(qself, path), id: self.id } } fn to_string(&self) -> String { pprust::ty_to_string(self) } } impl RecoverQPath for Pat { fn to_ty(&self) -> Option<P<Ty>> { self.to_ty() } fn to_recovered(&self, qself: Option<QSelf>, path: ast::Path) -> Self { Self { span: path.span, node: PatKind::Path(qself, path), id: self.id } } fn to_string(&self) -> String { pprust::pat_to_string(self) } } impl RecoverQPath for Expr { fn to_ty(&self) -> Option<P<Ty>> { self.to_ty() } fn to_recovered(&self, qself: Option<QSelf>, path: ast::Path) -> Self { Self { span: path.span, node: ExprKind::Path(qself, path), id: self.id, attrs: self.attrs.clone() } } fn to_string(&self) -> String { pprust::expr_to_string(self) } } /* ident is handled by common.rs */ #[derive(Clone)] pub struct Parser<'a> { pub sess: &'a ParseSess, /// the current token: pub token: token::Token, /// the span of the current token: pub span: Span, /// the span of the previous token: meta_var_span: Option<Span>, pub prev_span: Span, /// the previous token kind prev_token_kind: PrevTokenKind, restrictions: Restrictions, /// Used to determine the path to externally loaded source files crate directory: Directory<'a>, /// Whether to parse sub-modules in other files. pub recurse_into_file_modules: bool, /// Name of the root module this parser originated from. If `None`, then the /// name is not known. This does not change while the parser is descending /// into modules, and sub-parsers have new values for this name. pub root_module_name: Option<String>, crate expected_tokens: Vec<TokenType>, token_cursor: TokenCursor, desugar_doc_comments: bool, /// Whether we should configure out of line modules as we parse. pub cfg_mods: bool, } #[derive(Clone)] struct TokenCursor { frame: TokenCursorFrame, stack: Vec<TokenCursorFrame>, } #[derive(Clone)] struct TokenCursorFrame { delim: token::DelimToken, span: Span, open_delim: bool, tree_cursor: tokenstream::Cursor, close_delim: bool, last_token: LastToken, } /// This is used in `TokenCursorFrame` above to track tokens that are consumed /// by the parser, and then that's transitively used to record the tokens that /// each parse AST item is created with. /// /// Right now this has two states, either collecting tokens or not collecting /// tokens. If we're collecting tokens we just save everything off into a local /// `Vec`. This should eventually though likely save tokens from the original /// token stream and just use slicing of token streams to avoid creation of a /// whole new vector. /// /// The second state is where we're passively not recording tokens, but the last /// token is still tracked for when we want to start recording tokens. This /// "last token" means that when we start recording tokens we'll want to ensure /// that this, the first token, is included in the output. /// /// You can find some more example usage of this in the `collect_tokens` method /// on the parser. #[derive(Clone)] enum LastToken { Collecting(Vec<TokenStream>), Was(Option<TokenStream>), } impl TokenCursorFrame { fn new(sp: Span, delimited: &Delimited) -> Self { TokenCursorFrame { delim: delimited.delim, span: sp, open_delim: delimited.delim == token::NoDelim, tree_cursor: delimited.stream().into_trees(), close_delim: delimited.delim == token::NoDelim, last_token: LastToken::Was(None), } } } impl TokenCursor { fn next(&mut self) -> TokenAndSpan { loop { let tree = if !self.frame.open_delim { self.frame.open_delim = true; Delimited { delim: self.frame.delim, tts: TokenStream::empty().into() } .open_tt(self.frame.span) } else if let Some(tree) = self.frame.tree_cursor.next() { tree } else if !self.frame.close_delim { self.frame.close_delim = true; Delimited { delim: self.frame.delim, tts: TokenStream::empty().into() } .close_tt(self.frame.span) } else if let Some(frame) = self.stack.pop() { self.frame = frame; continue } else { return TokenAndSpan { tok: token::Eof, sp: syntax_pos::DUMMY_SP } }; match self.frame.last_token { LastToken::Collecting(ref mut v) => v.push(tree.clone().into()), LastToken::Was(ref mut t) => *t = Some(tree.clone().into()), } match tree { TokenTree::Token(sp, tok) => return TokenAndSpan { tok: tok, sp: sp }, TokenTree::Delimited(sp, ref delimited) => { let frame = TokenCursorFrame::new(sp, delimited); self.stack.push(mem::replace(&mut self.frame, frame)); } } } } fn next_desugared(&mut self) -> TokenAndSpan { let (sp, name) = match self.next() { TokenAndSpan { sp, tok: token::DocComment(name) } => (sp, name), tok => return tok, }; let stripped = strip_doc_comment_decoration(&name.as_str()); // Searches for the occurrences of `"#*` and returns the minimum number of `#`s // required to wrap the text. let mut num_of_hashes = 0; let mut count = 0; for ch in stripped.chars() { count = match ch { '"' => 1, '#' if count > 0 => count + 1, _ => 0, }; num_of_hashes = cmp::max(num_of_hashes, count); } let body = TokenTree::Delimited(sp, Delimited { delim: token::Bracket, tts: [TokenTree::Token(sp, token::Ident(ast::Ident::from_str("doc"), false)), TokenTree::Token(sp, token::Eq), TokenTree::Token(sp, token::Literal( token::StrRaw(Symbol::intern(&stripped), num_of_hashes), None))] .iter().cloned().collect::<TokenStream>().into(), }); self.stack.push(mem::replace(&mut self.frame, TokenCursorFrame::new(sp, &Delimited { delim: token::NoDelim, tts: if doc_comment_style(&name.as_str()) == AttrStyle::Inner { [TokenTree::Token(sp, token::Pound), TokenTree::Token(sp, token::Not), body] .iter().cloned().collect::<TokenStream>().into() } else { [TokenTree::Token(sp, token::Pound), body] .iter().cloned().collect::<TokenStream>().into() }, }))); self.next() } } #[derive(Clone, PartialEq)] crate enum TokenType { Token(token::Token), Keyword(keywords::Keyword), Operator, Lifetime, Ident, Path, Type, } impl TokenType { fn to_string(&self) -> String { match *self { TokenType::Token(ref t) => format!("`{}`", pprust::token_to_string(t)), TokenType::Keyword(kw) => format!("`{}`", kw.name()), TokenType::Operator => "an operator".to_string(), TokenType::Lifetime => "lifetime".to_string(), TokenType::Ident => "identifier".to_string(), TokenType::Path => "path".to_string(), TokenType::Type => "type".to_string(), } } } /// Returns true if `IDENT t` can start a type - `IDENT::a::b`, `IDENT<u8, u8>`, /// `IDENT<<u8 as Trait>::AssocTy>`. /// /// Types can also be of the form `IDENT(u8, u8) -> u8`, however this assumes /// that IDENT is not the ident of a fn trait fn can_continue_type_after_non_fn_ident(t: &token::Token) -> bool { t == &token::ModSep || t == &token::Lt || t == &token::BinOp(token::Shl) } /// Information about the path to a module. pub struct ModulePath { name: String, path_exists: bool, pub result: Result<ModulePathSuccess, Error>, } pub struct ModulePathSuccess { pub path: PathBuf, pub directory_ownership: DirectoryOwnership, warn: bool, } pub enum Error { FileNotFoundForModule { mod_name: String, default_path: String, secondary_path: String, dir_path: String, }, DuplicatePaths { mod_name: String, default_path: String, secondary_path: String, }, UselessDocComment, InclusiveRangeWithNoEnd, } impl Error { fn span_err<S: Into<MultiSpan>>(self, sp: S, handler: &errors::Handler) -> DiagnosticBuilder { match self { Error::FileNotFoundForModule { ref mod_name, ref default_path, ref secondary_path, ref dir_path } => { let mut err = struct_span_err!(handler, sp, E0583, "file not found for module `{}`", mod_name); err.help(&format!("name the file either {} or {} inside the directory \"{}\"", default_path, secondary_path, dir_path)); err } Error::DuplicatePaths { ref mod_name, ref default_path, ref secondary_path } => { let mut err = struct_span_err!(handler, sp, E0584, "file for module `{}` found at both {} and {}", mod_name, default_path, secondary_path); err.help("delete or rename one of them to remove the ambiguity"); err } Error::UselessDocComment => { let mut err = struct_span_err!(handler, sp, E0585, "found a documentation comment that doesn't document anything"); err.help("doc comments must come before what they document, maybe a comment was \ intended with `//`?"); err } Error::InclusiveRangeWithNoEnd => { let mut err = struct_span_err!(handler, sp, E0586, "inclusive range with no end"); err.help("inclusive ranges must be bounded at the end (`..=b` or `a..=b`)"); err } } } } #[derive(Debug)] enum LhsExpr { NotYetParsed, AttributesParsed(ThinVec<Attribute>), AlreadyParsed(P<Expr>), } impl From<Option<ThinVec<Attribute>>> for LhsExpr { fn from(o: Option<ThinVec<Attribute>>) -> Self { if let Some(attrs) = o { LhsExpr::AttributesParsed(attrs) } else { LhsExpr::NotYetParsed } } } impl From<P<Expr>> for LhsExpr { fn from(expr: P<Expr>) -> Self { LhsExpr::AlreadyParsed(expr) } } /// Create a placeholder argument. fn dummy_arg(span: Span) -> Arg { let ident = Ident::new(keywords::Invalid.name(), span); let pat = P(Pat { id: ast::DUMMY_NODE_ID, node: PatKind::Ident(BindingMode::ByValue(Mutability::Immutable), ident, None), span, }); let ty = Ty { node: TyKind::Err, span, id: ast::DUMMY_NODE_ID }; Arg { ty: P(ty), pat: pat, id: ast::DUMMY_NODE_ID } } #[derive(Copy, Clone, Debug)] enum TokenExpectType { Expect, NoExpect, } impl<'a> Parser<'a> { pub fn new(sess: &'a ParseSess, tokens: TokenStream, directory: Option<Directory<'a>>, recurse_into_file_modules: bool, desugar_doc_comments: bool) -> Self { let mut parser = Parser { sess, token: token::Whitespace, span: syntax_pos::DUMMY_SP, prev_span: syntax_pos::DUMMY_SP, meta_var_span: None, prev_token_kind: PrevTokenKind::Other, restrictions: Restrictions::empty(), recurse_into_file_modules, directory: Directory { path: Cow::from(PathBuf::new()), ownership: DirectoryOwnership::Owned { relative: None } }, root_module_name: None, expected_tokens: Vec::new(), token_cursor: TokenCursor { frame: TokenCursorFrame::new(syntax_pos::DUMMY_SP, &Delimited { delim: token::NoDelim, tts: tokens.into(), }), stack: Vec::new(), }, desugar_doc_comments, cfg_mods: true, }; let tok = parser.next_tok(); parser.token = tok.tok; parser.span = tok.sp; if let Some(directory) = directory { parser.directory = directory; } else if !parser.span.is_dummy() { if let FileName::Real(mut path) = sess.source_map().span_to_unmapped_path(parser.span) { path.pop(); parser.directory.path = Cow::from(path); } } parser.process_potential_macro_variable(); parser } fn next_tok(&mut self) -> TokenAndSpan { let mut next = if self.desugar_doc_comments { self.token_cursor.next_desugared() } else { self.token_cursor.next() }; if next.sp.is_dummy() { // Tweak the location for better diagnostics, but keep syntactic context intact. next.sp = self.prev_span.with_ctxt(next.sp.ctxt()); } next } /// Convert the current token to a string using self's reader pub fn this_token_to_string(&self) -> String { pprust::token_to_string(&self.token) } fn token_descr(&self) -> Option<&'static str> { Some(match &self.token { t if t.is_special_ident() => "reserved identifier", t if t.is_used_keyword() => "keyword", t if t.is_unused_keyword() => "reserved keyword", _ => return None, }) } fn this_token_descr(&self) -> String { if let Some(prefix) = self.token_descr() { format!("{} `{}`", prefix, self.this_token_to_string()) } else { format!("`{}`", self.this_token_to_string()) } } fn unexpected_last<T>(&self, t: &token::Token) -> PResult<'a, T> { let token_str = pprust::token_to_string(t); Err(self.span_fatal(self.prev_span, &format!("unexpected token: `{}`", token_str))) } crate fn unexpected<T>(&mut self) -> PResult<'a, T> { match self.expect_one_of(&[], &[]) { Err(e) => Err(e), Ok(_) => unreachable!(), } } /// Expect and consume the token t. Signal an error if /// the next token is not t. pub fn expect(&mut self, t: &token::Token) -> PResult<'a, ()> { if self.expected_tokens.is_empty() { if self.token == *t { self.bump(); Ok(()) } else { let token_str = pprust::token_to_string(t); let this_token_str = self.this_token_to_string(); let mut err = self.fatal(&format!("expected `{}`, found `{}`", token_str, this_token_str)); let sp = if self.token == token::Token::Eof { // EOF, don't want to point at the following char, but rather the last token self.prev_span } else { self.sess.source_map().next_point(self.prev_span) }; let label_exp = format!("expected `{}`", token_str); let cm = self.sess.source_map(); match (cm.lookup_line(self.span.lo()), cm.lookup_line(sp.lo())) { (Ok(ref a), Ok(ref b)) if a.line == b.line => { // When the spans are in the same line, it means that the only content // between them is whitespace, point only at the found token. err.span_label(self.span, label_exp); } _ => { err.span_label(sp, label_exp); err.span_label(self.span, "unexpected token"); } } Err(err) } } else { self.expect_one_of(slice::from_ref(t), &[]) } } /// Expect next token to be edible or inedible token. If edible, /// then consume it; if inedible, then return without consuming /// anything. Signal a fatal error if next token is unexpected. fn expect_one_of(&mut self, edible: &[token::Token], inedible: &[token::Token]) -> PResult<'a, ()>{ fn tokens_to_string(tokens: &[TokenType]) -> String { let mut i = tokens.iter(); // This might be a sign we need a connect method on Iterator. let b = i.next() .map_or("".to_string(), |t| t.to_string()); i.enumerate().fold(b, |mut b, (i, a)| { if tokens.len() > 2 && i == tokens.len() - 2 { b.push_str(", or "); } else if tokens.len() == 2 && i == tokens.len() - 2 { b.push_str(" or "); } else { b.push_str(", "); } b.push_str(&a.to_string()); b }) } if edible.contains(&self.token) { self.bump(); Ok(()) } else if inedible.contains(&self.token) { // leave it in the input Ok(()) } else { let mut expected = edible.iter() .map(|x| TokenType::Token(x.clone())) .chain(inedible.iter().map(|x| TokenType::Token(x.clone()))) .chain(self.expected_tokens.iter().cloned()) .collect::<Vec<_>>(); expected.sort_by_cached_key(|x| x.to_string()); expected.dedup(); let expect = tokens_to_string(&expected[..]); let actual = self.this_token_to_string(); let (msg_exp, (label_sp, label_exp)) = if expected.len() > 1 { let short_expect = if expected.len() > 6 { format!("{} possible tokens", expected.len()) } else { expect.clone() }; (format!("expected one of {}, found `{}`", expect, actual), (self.sess.source_map().next_point(self.prev_span), format!("expected one of {} here", short_expect))) } else if expected.is_empty() { (format!("unexpected token: `{}`", actual), (self.prev_span, "unexpected token after this".to_string())) } else { (format!("expected {}, found `{}`", expect, actual), (self.sess.source_map().next_point(self.prev_span), format!("expected {} here", expect))) }; let mut err = self.fatal(&msg_exp); let sp = if self.token == token::Token::Eof { // This is EOF, don't want to point at the following char, but rather the last token self.prev_span } else { label_sp }; let cm = self.sess.source_map(); match (cm.lookup_line(self.span.lo()), cm.lookup_line(sp.lo())) { (Ok(ref a), Ok(ref b)) if a.line == b.line => { // When the spans are in the same line, it means that the only content between // them is whitespace, point at the found token in that case: // // X | () => { syntax error }; // | ^^^^^ expected one of 8 possible tokens here // // instead of having: // // X | () => { syntax error }; // | -^^^^^ unexpected token // | | // | expected one of 8 possible tokens here err.span_label(self.span, label_exp); } _ => { err.span_label(sp, label_exp); err.span_label(self.span, "unexpected token"); } } Err(err) } } /// returns the span of expr, if it was not interpolated or the span of the interpolated token fn interpolated_or_expr_span(&self, expr: PResult<'a, P<Expr>>) -> PResult<'a, (Span, P<Expr>)> { expr.map(|e| { if self.prev_token_kind == PrevTokenKind::Interpolated { (self.prev_span, e) } else { (e.span, e) } }) } fn expected_ident_found(&self) -> DiagnosticBuilder<'a> { let mut err = self.struct_span_err(self.span, &format!("expected identifier, found {}", self.this_token_descr())); if let Some(token_descr) = self.token_descr() { err.span_label(self.span, format!("expected identifier, found {}", token_descr)); } else { err.span_label(self.span, "expected identifier"); if self.token == token::Comma && self.look_ahead(1, |t| t.is_ident()) { err.span_suggestion(self.span, "remove this comma", "".into()); } } err } pub fn parse_ident(&mut self) -> PResult<'a, ast::Ident> { self.parse_ident_common(true) } fn parse_ident_common(&mut self, recover: bool) -> PResult<'a, ast::Ident> { match self.token { token::Ident(ident, _) => { if self.token.is_reserved_ident() { let mut err = self.expected_ident_found(); if recover { err.emit(); } else { return Err(err); } } let span = self.span; self.bump(); Ok(Ident::new(ident.name, span)) } _ => { Err(if self.prev_token_kind == PrevTokenKind::DocComment { self.span_fatal_err(self.prev_span, Error::UselessDocComment) } else { self.expected_ident_found() }) } } } /// Check if the next token is `tok`, and return `true` if so. /// /// This method will automatically add `tok` to `expected_tokens` if `tok` is not /// encountered. crate fn check(&mut self, tok: &token::Token) -> bool { let is_present = self.token == *tok; if !is_present { self.expected_tokens.push(TokenType::Token(tok.clone())); } is_present } /// Consume token 'tok' if it exists. Returns true if the given /// token was present, false otherwise. pub fn eat(&mut self, tok: &token::Token) -> bool { let is_present = self.check(tok); if is_present { self.bump() } is_present } fn check_keyword(&mut self, kw: keywords::Keyword) -> bool { self.expected_tokens.push(TokenType::Keyword(kw)); self.token.is_keyword(kw) } /// If the next token is the given keyword, eat it and return /// true. Otherwise, return false. pub fn eat_keyword(&mut self, kw: keywords::Keyword) -> bool { if self.check_keyword(kw) { self.bump(); true } else { false } } fn eat_keyword_noexpect(&mut self, kw: keywords::Keyword) -> bool { if self.token.is_keyword(kw) { self.bump(); true } else { false } } /// If the given word is not a keyword, signal an error. /// If the next token is not the given word, signal an error. /// Otherwise, eat it. fn expect_keyword(&mut self, kw: keywords::Keyword) -> PResult<'a, ()> { if !self.eat_keyword(kw) { self.unexpected() } else { Ok(()) } } fn check_ident(&mut self) -> bool { if self.token.is_ident() { true } else { self.expected_tokens.push(TokenType::Ident); false } } fn check_path(&mut self) -> bool { if self.token.is_path_start() { true } else { self.expected_tokens.push(TokenType::Path); false } } fn check_type(&mut self) -> bool { if self.token.can_begin_type() { true } else { self.expected_tokens.push(TokenType::Type); false } } /// Expect and consume a `+`. if `+=` is seen, replace it with a `=` /// and continue. If a `+` is not seen, return false. /// /// This is using when token splitting += into +. /// See issue 47856 for an example of when this may occur. fn eat_plus(&mut self) -> bool { self.expected_tokens.push(TokenType::Token(token::BinOp(token::Plus))); match self.token { token::BinOp(token::Plus) => { self.bump(); true } token::BinOpEq(token::Plus) => { let span = self.span.with_lo(self.span.lo() + BytePos(1)); self.bump_with(token::Eq, span); true } _ => false, } } /// Checks to see if the next token is either `+` or `+=`. /// Otherwise returns false. fn check_plus(&mut self) -> bool { if self.token.is_like_plus() { true } else { self.expected_tokens.push(TokenType::Token(token::BinOp(token::Plus))); false } } /// Expect and consume an `&`. If `&&` is seen, replace it with a single /// `&` and continue. If an `&` is not seen, signal an error. fn expect_and(&mut self) -> PResult<'a, ()> { self.expected_tokens.push(TokenType::Token(token::BinOp(token::And))); match self.token { token::BinOp(token::And) => { self.bump(); Ok(()) } token::AndAnd => { let span = self.span.with_lo(self.span.lo() + BytePos(1)); Ok(self.bump_with(token::BinOp(token::And), span)) } _ => self.unexpected() } } /// Expect and consume an `|`. If `||` is seen, replace it with a single /// `|` and continue. If an `|` is not seen, signal an error. fn expect_or(&mut self) -> PResult<'a, ()> { self.expected_tokens.push(TokenType::Token(token::BinOp(token::Or))); match self.token { token::BinOp(token::Or) => { self.bump(); Ok(()) } token::OrOr => { let span = self.span.with_lo(self.span.lo() + BytePos(1)); Ok(self.bump_with(token::BinOp(token::Or), span)) } _ => self.unexpected() } } fn expect_no_suffix(&self, sp: Span, kind: &str, suffix: Option<ast::Name>) { match suffix { None => {/* everything ok */} Some(suf) => { let text = suf.as_str(); if text.is_empty() { self.span_bug(sp, "found empty literal suffix in Some") } self.span_err(sp, &format!("{} with a suffix is invalid", kind)); } } } /// Attempt to consume a `<`. If `<<` is seen, replace it with a single /// `<` and continue. If a `<` is not seen, return false. /// /// This is meant to be used when parsing generics on a path to get the /// starting token. fn eat_lt(&mut self) -> bool { self.expected_tokens.push(TokenType::Token(token::Lt)); match self.token { token::Lt => { self.bump(); true } token::BinOp(token::Shl) => { let span = self.span.with_lo(self.span.lo() + BytePos(1)); self.bump_with(token::Lt, span); true } _ => false, } } fn expect_lt(&mut self) -> PResult<'a, ()> { if !self.eat_lt() { self.unexpected() } else { Ok(()) } } /// Expect and consume a GT. if a >> is seen, replace it /// with a single > and continue. If a GT is not seen, /// signal an error. fn expect_gt(&mut self) -> PResult<'a, ()> { self.expected_tokens.push(TokenType::Token(token::Gt)); match self.token { token::Gt => { self.bump(); Ok(()) } token::BinOp(token::Shr) => { let span = self.span.with_lo(self.span.lo() + BytePos(1)); Ok(self.bump_with(token::Gt, span)) } token::BinOpEq(token::Shr) => { let span = self.span.with_lo(self.span.lo() + BytePos(1)); Ok(self.bump_with(token::Ge, span)) } token::Ge => { let span = self.span.with_lo(self.span.lo() + BytePos(1)); Ok(self.bump_with(token::Eq, span)) } _ => self.unexpected() } } /// Eat and discard tokens until one of `kets` is encountered. Respects token trees, /// passes through any errors encountered. Used for error recovery. fn eat_to_tokens(&mut self, kets: &[&token::Token]) { let handler = self.diagnostic(); if let Err(ref mut err) = self.parse_seq_to_before_tokens(kets, SeqSep::none(), TokenExpectType::Expect, |p| Ok(p.parse_token_tree())) { handler.cancel(err); } } /// Parse a sequence, including the closing delimiter. The function /// f must consume tokens until reaching the next separator or /// closing bracket. pub fn parse_seq_to_end<T, F>(&mut self, ket: &token::Token, sep: SeqSep, f: F) -> PResult<'a, Vec<T>> where F: FnMut(&mut Parser<'a>) -> PResult<'a, T>, { let val = self.parse_seq_to_before_end(ket, sep, f)?; self.bump(); Ok(val) } /// Parse a sequence, not including the closing delimiter. The function /// f must consume tokens until reaching the next separator or /// closing bracket. pub fn parse_seq_to_before_end<T, F>(&mut self, ket: &token::Token, sep: SeqSep, f: F) -> PResult<'a, Vec<T>> where F: FnMut(&mut Parser<'a>) -> PResult<'a, T> { self.parse_seq_to_before_tokens(&[ket], sep, TokenExpectType::Expect, f) } fn parse_seq_to_before_tokens<T, F>( &mut self, kets: &[&token::Token], sep: SeqSep, expect: TokenExpectType, mut f: F, ) -> PResult<'a, Vec<T>> where F: FnMut(&mut Parser<'a>) -> PResult<'a, T> { let mut first: bool = true; let mut v = vec![]; while !kets.iter().any(|k| { match expect { TokenExpectType::Expect => self.check(k), TokenExpectType::NoExpect => self.token == **k, } }) { match self.token { token::CloseDelim(..) | token::Eof => break, _ => {} }; if let Some(ref t) = sep.sep { if first { first = false; } else { if let Err(mut e) = self.expect(t) { // Attempt to keep parsing if it was a similar separator if let Some(ref tokens) = t.similar_tokens() { if tokens.contains(&self.token) { self.bump(); } } e.emit(); // Attempt to keep parsing if it was an omitted separator match f(self) { Ok(t) => { v.push(t); continue; }, Err(mut e) => { e.cancel(); break; } } } } } if sep.trailing_sep_allowed && kets.iter().any(|k| { match expect { TokenExpectType::Expect => self.check(k), TokenExpectType::NoExpect => self.token == **k, } }) { break; } let t = f(self)?; v.push(t); } Ok(v) } /// Parse a sequence, including the closing delimiter. The function /// f must consume tokens until reaching the next separator or /// closing bracket. fn parse_unspanned_seq<T, F>(&mut self, bra: &token::Token, ket: &token::Token, sep: SeqSep, f: F) -> PResult<'a, Vec<T>> where F: FnMut(&mut Parser<'a>) -> PResult<'a, T>, { self.expect(bra)?; let result = self.parse_seq_to_before_end(ket, sep, f)?; if self.token == *ket { self.bump(); } Ok(result) } /// Advance the parser by one token pub fn bump(&mut self) { if self.prev_token_kind == PrevTokenKind::Eof { // Bumping after EOF is a bad sign, usually an infinite loop. self.bug("attempted to bump the parser past EOF (may be stuck in a loop)"); } self.prev_span = self.meta_var_span.take().unwrap_or(self.span); // Record last token kind for possible error recovery. self.prev_token_kind = match self.token { token::DocComment(..) => PrevTokenKind::DocComment, token::Comma => PrevTokenKind::Comma, token::BinOp(token::Plus) => PrevTokenKind::Plus, token::Interpolated(..) => PrevTokenKind::Interpolated, token::Eof => PrevTokenKind::Eof, token::Ident(..) => PrevTokenKind::Ident, _ => PrevTokenKind::Other, }; let next = self.next_tok(); self.span = next.sp; self.token = next.tok; self.expected_tokens.clear(); // check after each token self.process_potential_macro_variable(); } /// Advance the parser using provided token as a next one. Use this when /// consuming a part of a token. For example a single `<` from `<<`. fn bump_with(&mut self, next: token::Token, span: Span) { self.prev_span = self.span.with_hi(span.lo()); // It would be incorrect to record the kind of the current token, but // fortunately for tokens currently using `bump_with`, the // prev_token_kind will be of no use anyway. self.prev_token_kind = PrevTokenKind::Other; self.span = span; self.token = next; self.expected_tokens.clear(); } pub fn look_ahead<R, F>(&self, dist: usize, f: F) -> R where F: FnOnce(&token::Token) -> R, { if dist == 0 { return f(&self.token) } f(&match self.token_cursor.frame.tree_cursor.look_ahead(dist - 1) { Some(tree) => match tree { TokenTree::Token(_, tok) => tok, TokenTree::Delimited(_, delimited) => token::OpenDelim(delimited.delim), }, None => token::CloseDelim(self.token_cursor.frame.delim), }) } fn look_ahead_span(&self, dist: usize) -> Span { if dist == 0 { return self.span } match self.token_cursor.frame.tree_cursor.look_ahead(dist - 1) { Some(TokenTree::Token(span, _)) | Some(TokenTree::Delimited(span, _)) => span, None => self.look_ahead_span(dist - 1), } } pub fn fatal(&self, m: &str) -> DiagnosticBuilder<'a> { self.sess.span_diagnostic.struct_span_fatal(self.span, m) } pub fn span_fatal<S: Into<MultiSpan>>(&self, sp: S, m: &str) -> DiagnosticBuilder<'a> { self.sess.span_diagnostic.struct_span_fatal(sp, m) } fn span_fatal_err<S: Into<MultiSpan>>(&self, sp: S, err: Error) -> DiagnosticBuilder<'a> { err.span_err(sp, self.diagnostic()) } fn bug(&self, m: &str) -> ! { self.sess.span_diagnostic.span_bug(self.span, m) } fn span_err<S: Into<MultiSpan>>(&self, sp: S, m: &str) { self.sess.span_diagnostic.span_err(sp, m) } fn struct_span_err<S: Into<MultiSpan>>(&self, sp: S, m: &str) -> DiagnosticBuilder<'a> { self.sess.span_diagnostic.struct_span_err(sp, m) } crate fn span_bug<S: Into<MultiSpan>>(&self, sp: S, m: &str) -> ! { self.sess.span_diagnostic.span_bug(sp, m) } crate fn abort_if_errors(&self) { self.sess.span_diagnostic.abort_if_errors(); } fn cancel(&self, err: &mut DiagnosticBuilder) { self.sess.span_diagnostic.cancel(err) } crate fn diagnostic(&self) -> &'a errors::Handler { &self.sess.span_diagnostic } /// Is the current token one of the keywords that signals a bare function /// type? fn token_is_bare_fn_keyword(&mut self) -> bool { self.check_keyword(keywords::Fn) || self.check_keyword(keywords::Unsafe) || self.check_keyword(keywords::Extern) && self.is_extern_non_path() } /// parse a TyKind::BareFn type: fn parse_ty_bare_fn(&mut self, generic_params: Vec<GenericParam>) -> PResult<'a, TyKind> { /* [unsafe] [extern "ABI"] fn (S) -> T ^~~~^ ^~~~^ ^~^ ^ | | | | | | | Return type | | Argument types | | | ABI Function Style */ let unsafety = self.parse_unsafety(); let abi = if self.eat_keyword(keywords::Extern) { self.parse_opt_abi()?.unwrap_or(Abi::C) } else { Abi::Rust }; self.expect_keyword(keywords::Fn)?; let (inputs, variadic) = self.parse_fn_args(false, true)?; let ret_ty = self.parse_ret_ty(false)?; let decl = P(FnDecl { inputs, output: ret_ty, variadic, }); Ok(TyKind::BareFn(P(BareFnTy { abi, unsafety, generic_params, decl, }))) } /// Parse asyncness: `async` or nothing fn parse_asyncness(&mut self) -> IsAsync { if self.eat_keyword(keywords::Async) { IsAsync::Async { closure_id: ast::DUMMY_NODE_ID, return_impl_trait_id: ast::DUMMY_NODE_ID, } } else { IsAsync::NotAsync } } /// Parse unsafety: `unsafe` or nothing. fn parse_unsafety(&mut self) -> Unsafety { if self.eat_keyword(keywords::Unsafe) { Unsafety::Unsafe } else { Unsafety::Normal } } /// Parse the items in a trait declaration pub fn parse_trait_item(&mut self, at_end: &mut bool) -> PResult<'a, TraitItem> { maybe_whole!(self, NtTraitItem, |x| x); let attrs = self.parse_outer_attributes()?; let (mut item, tokens) = self.collect_tokens(|this| { this.parse_trait_item_(at_end, attrs) })?; // See `parse_item` for why this clause is here. if !item.attrs.iter().any(|attr| attr.style == AttrStyle::Inner) { item.tokens = Some(tokens); } Ok(item) } fn parse_trait_item_(&mut self, at_end: &mut bool, mut attrs: Vec<Attribute>) -> PResult<'a, TraitItem> { let lo = self.span; let (name, node, generics) = if self.eat_keyword(keywords::Type) { self.parse_trait_item_assoc_ty()? } else if self.is_const_item() { self.expect_keyword(keywords::Const)?; let ident = self.parse_ident()?; self.expect(&token::Colon)?; let ty = self.parse_ty()?; let default = if self.check(&token::Eq) { self.bump(); let expr = self.parse_expr()?; self.expect(&token::Semi)?; Some(expr) } else { self.expect(&token::Semi)?; None }; (ident, TraitItemKind::Const(ty, default), ast::Generics::default()) } else if let Some(mac) = self.parse_assoc_macro_invoc("trait", None, &mut false)? { // trait item macro. (keywords::Invalid.ident(), ast::TraitItemKind::Macro(mac), ast::Generics::default()) } else { let (constness, unsafety, asyncness, abi) = self.parse_fn_front_matter()?; let ident = self.parse_ident()?; let mut generics = self.parse_generics()?; let d = self.parse_fn_decl_with_self(|p: &mut Parser<'a>| { // This is somewhat dubious; We don't want to allow // argument names to be left off if there is a // definition... p.parse_arg_general(false) })?; generics.where_clause = self.parse_where_clause()?; let sig = ast::MethodSig { header: FnHeader { unsafety, constness, abi, asyncness, }, decl: d, }; let body = match self.token { token::Semi => { self.bump(); *at_end = true; debug!("parse_trait_methods(): parsing required method"); None } token::OpenDelim(token::Brace) => { debug!("parse_trait_methods(): parsing provided method"); *at_end = true; let (inner_attrs, body) = self.parse_inner_attrs_and_block()?; attrs.extend(inner_attrs.iter().cloned()); Some(body) } _ => { let token_str = self.this_token_to_string(); let mut err = self.fatal(&format!("expected `;` or `{{`, found `{}`", token_str)); err.span_label(self.span, "expected `;` or `{`"); return Err(err); } }; (ident, ast::TraitItemKind::Method(sig, body), generics) }; Ok(TraitItem { id: ast::DUMMY_NODE_ID, ident: name, attrs, generics, node, span: lo.to(self.prev_span), tokens: None, }) } /// Parse optional return type [ -> TY ] in function decl fn parse_ret_ty(&mut self, allow_plus: bool) -> PResult<'a, FunctionRetTy> { if self.eat(&token::RArrow) { Ok(FunctionRetTy::Ty(self.parse_ty_common(allow_plus, true)?)) } else { Ok(FunctionRetTy::Default(self.span.shrink_to_lo())) } } // Parse a type pub fn parse_ty(&mut self) -> PResult<'a, P<Ty>> { self.parse_ty_common(true, true) } /// Parse a type in restricted contexts where `+` is not permitted. /// Example 1: `&'a TYPE` /// `+` is prohibited to maintain operator priority (P(+) < P(&)). /// Example 2: `value1 as TYPE + value2` /// `+` is prohibited to avoid interactions with expression grammar. fn parse_ty_no_plus(&mut self) -> PResult<'a, P<Ty>> { self.parse_ty_common(false, true) } fn parse_ty_common(&mut self, allow_plus: bool, allow_qpath_recovery: bool) -> PResult<'a, P<Ty>> { maybe_whole!(self, NtTy, |x| x); let lo = self.span; let mut impl_dyn_multi = false; let node = if self.eat(&token::OpenDelim(token::Paren)) { // `(TYPE)` is a parenthesized type. // `(TYPE,)` is a tuple with a single field of type TYPE. let mut ts = vec![]; let mut last_comma = false; while self.token != token::CloseDelim(token::Paren) { ts.push(self.parse_ty()?); if self.eat(&token::Comma) { last_comma = true; } else { last_comma = false; break; } } let trailing_plus = self.prev_token_kind == PrevTokenKind::Plus; self.expect(&token::CloseDelim(token::Paren))?; if ts.len() == 1 && !last_comma { let ty = ts.into_iter().nth(0).unwrap().into_inner(); let maybe_bounds = allow_plus && self.token.is_like_plus(); match ty.node { // `(TY_BOUND_NOPAREN) + BOUND + ...`. TyKind::Path(None, ref path) if maybe_bounds => { self.parse_remaining_bounds(Vec::new(), path.clone(), lo, true)? } TyKind::TraitObject(ref bounds, TraitObjectSyntax::None) if maybe_bounds && bounds.len() == 1 && !trailing_plus => { let path = match bounds[0] { GenericBound::Trait(ref pt, ..) => pt.trait_ref.path.clone(), _ => self.bug("unexpected lifetime bound"), }; self.parse_remaining_bounds(Vec::new(), path, lo, true)? } // `(TYPE)` _ => TyKind::Paren(P(ty)) } } else { TyKind::Tup(ts) } } else if self.eat(&token::Not) { // Never type `!` TyKind::Never } else if self.eat(&token::BinOp(token::Star)) { // Raw pointer TyKind::Ptr(self.parse_ptr()?) } else if self.eat(&token::OpenDelim(token::Bracket)) { // Array or slice let t = self.parse_ty()?; // Parse optional `; EXPR` in `[TYPE; EXPR]` let t = match self.maybe_parse_fixed_length_of_vec()? { None => TyKind::Slice(t), Some(length) => TyKind::Array(t, AnonConst { id: ast::DUMMY_NODE_ID, value: length, }), }; self.expect(&token::CloseDelim(token::Bracket))?; t } else if self.check(&token::BinOp(token::And)) || self.check(&token::AndAnd) { // Reference self.expect_and()?; self.parse_borrowed_pointee()? } else if self.eat_keyword_noexpect(keywords::Typeof) { // `typeof(EXPR)` // In order to not be ambiguous, the type must be surrounded by parens. self.expect(&token::OpenDelim(token::Paren))?; let e = AnonConst { id: ast::DUMMY_NODE_ID, value: self.parse_expr()?, }; self.expect(&token::CloseDelim(token::Paren))?; TyKind::Typeof(e) } else if self.eat_keyword(keywords::Underscore) { // A type to be inferred `_` TyKind::Infer } else if self.token_is_bare_fn_keyword() { // Function pointer type self.parse_ty_bare_fn(Vec::new())? } else if self.check_keyword(keywords::For) { // Function pointer type or bound list (trait object type) starting with a poly-trait. // `for<'lt> [unsafe] [extern "ABI"] fn (&'lt S) -> T` // `for<'lt> Trait1<'lt> + Trait2 + 'a` let lo = self.span; let lifetime_defs = self.parse_late_bound_lifetime_defs()?; if self.token_is_bare_fn_keyword() { self.parse_ty_bare_fn(lifetime_defs)? } else { let path = self.parse_path(PathStyle::Type)?; let parse_plus = allow_plus && self.check_plus(); self.parse_remaining_bounds(lifetime_defs, path, lo, parse_plus)? } } else if self.eat_keyword(keywords::Impl) { // Always parse bounds greedily for better error recovery. let bounds = self.parse_generic_bounds()?; impl_dyn_multi = bounds.len() > 1 || self.prev_token_kind == PrevTokenKind::Plus; TyKind::ImplTrait(ast::DUMMY_NODE_ID, bounds) } else if self.check_keyword(keywords::Dyn) && self.look_ahead(1, |t| t.can_begin_bound() && !can_continue_type_after_non_fn_ident(t)) { self.bump(); // `dyn` // Always parse bounds greedily for better error recovery. let bounds = self.parse_generic_bounds()?; impl_dyn_multi = bounds.len() > 1 || self.prev_token_kind == PrevTokenKind::Plus; TyKind::TraitObject(bounds, TraitObjectSyntax::Dyn) } else if self.check(&token::Question) || self.check_lifetime() && self.look_ahead(1, |t| t.is_like_plus()) { // Bound list (trait object type) TyKind::TraitObject(self.parse_generic_bounds_common(allow_plus)?, TraitObjectSyntax::None) } else if self.eat_lt() { // Qualified path let (qself, path) = self.parse_qpath(PathStyle::Type)?; TyKind::Path(Some(qself), path) } else if self.token.is_path_start() { // Simple path let path = self.parse_path(PathStyle::Type)?; if self.eat(&token::Not) { // Macro invocation in type position let (delim, tts) = self.expect_delimited_token_tree()?; let node = Mac_ { path, tts, delim }; TyKind::Mac(respan(lo.to(self.prev_span), node)) } else { // Just a type path or bound list (trait object type) starting with a trait. // `Type` // `Trait1 + Trait2 + 'a` if allow_plus && self.check_plus() { self.parse_remaining_bounds(Vec::new(), path, lo, true)? } else { TyKind::Path(None, path) } } } else { let msg = format!("expected type, found {}", self.this_token_descr()); return Err(self.fatal(&msg)); }; let span = lo.to(self.prev_span); let ty = Ty { node, span, id: ast::DUMMY_NODE_ID }; // Try to recover from use of `+` with incorrect priority. self.maybe_report_ambiguous_plus(allow_plus, impl_dyn_multi, &ty); self.maybe_recover_from_bad_type_plus(allow_plus, &ty)?; let ty = self.maybe_recover_from_bad_qpath(ty, allow_qpath_recovery)?; Ok(P(ty)) } fn parse_remaining_bounds(&mut self, generic_params: Vec<GenericParam>, path: ast::Path, lo: Span, parse_plus: bool) -> PResult<'a, TyKind> { let poly_trait_ref = PolyTraitRef::new(generic_params, path, lo.to(self.prev_span)); let mut bounds = vec![GenericBound::Trait(poly_trait_ref, TraitBoundModifier::None)]; if parse_plus { self.eat_plus(); // `+`, or `+=` gets split and `+` is discarded bounds.append(&mut self.parse_generic_bounds()?); } Ok(TyKind::TraitObject(bounds, TraitObjectSyntax::None)) } fn maybe_report_ambiguous_plus(&mut self, allow_plus: bool, impl_dyn_multi: bool, ty: &Ty) { if !allow_plus && impl_dyn_multi { let sum_with_parens = format!("({})", pprust::ty_to_string(&ty)); self.struct_span_err(ty.span, "ambiguous `+` in a type") .span_suggestion_with_applicability( ty.span, "use parentheses to disambiguate", sum_with_parens, Applicability::MachineApplicable ).emit(); } } fn maybe_recover_from_bad_type_plus(&mut self, allow_plus: bool, ty: &Ty) -> PResult<'a, ()> { // Do not add `+` to expected tokens. if !allow_plus || !self.token.is_like_plus() { return Ok(()) } self.bump(); // `+` let bounds = self.parse_generic_bounds()?; let sum_span = ty.span.to(self.prev_span); let mut err = struct_span_err!(self.sess.span_diagnostic, sum_span, E0178, "expected a path on the left-hand side of `+`, not `{}`", pprust::ty_to_string(ty)); match ty.node { TyKind::Rptr(ref lifetime, ref mut_ty) => { let sum_with_parens = pprust::to_string(|s| { use print::pprust::PrintState; s.s.word("&")?; s.print_opt_lifetime(lifetime)?; s.print_mutability(mut_ty.mutbl)?; s.popen()?; s.print_type(&mut_ty.ty)?; s.print_type_bounds(" +", &bounds)?; s.pclose() }); err.span_suggestion_with_applicability( sum_span, "try adding parentheses", sum_with_parens, Applicability::MachineApplicable ); } TyKind::Ptr(..) | TyKind::BareFn(..) => { err.span_label(sum_span, "perhaps you forgot parentheses?"); } _ => { err.span_label(sum_span, "expected a path"); }, } err.emit(); Ok(()) } // Try to recover from associated item paths like `[T]::AssocItem`/`(T, U)::AssocItem`. fn maybe_recover_from_bad_qpath<T: RecoverQPath>(&mut self, base: T, allow_recovery: bool) -> PResult<'a, T> { // Do not add `::` to expected tokens. if !allow_recovery || self.token != token::ModSep { return Ok(base); } let ty = match base.to_ty() { Some(ty) => ty, None => return Ok(base), }; self.bump(); // `::` let mut segments = Vec::new(); self.parse_path_segments(&mut segments, T::PATH_STYLE, true)?; let span = ty.span.to(self.prev_span); let path_span = span.to(span); // use an empty path since `position` == 0 let recovered = base.to_recovered( Some(QSelf { ty, path_span, position: 0 }), ast::Path { segments, span }, ); self.diagnostic() .struct_span_err(span, "missing angle brackets in associated item path") .span_suggestion_with_applicability( // this is a best-effort recovery span, "try", recovered.to_string(), Applicability::MaybeIncorrect ).emit(); Ok(recovered) } fn parse_borrowed_pointee(&mut self) -> PResult<'a, TyKind> { let opt_lifetime = if self.check_lifetime() { Some(self.expect_lifetime()) } else { None }; let mutbl = self.parse_mutability(); let ty = self.parse_ty_no_plus()?; return Ok(TyKind::Rptr(opt_lifetime, MutTy { ty: ty, mutbl: mutbl })); } fn parse_ptr(&mut self) -> PResult<'a, MutTy> { let mutbl = if self.eat_keyword(keywords::Mut) { Mutability::Mutable } else if self.eat_keyword(keywords::Const) { Mutability::Immutable } else { let span = self.prev_span; self.span_err(span, "expected mut or const in raw pointer type (use \ `*mut T` or `*const T` as appropriate)"); Mutability::Immutable }; let t = self.parse_ty_no_plus()?; Ok(MutTy { ty: t, mutbl: mutbl }) } fn is_named_argument(&mut self) -> bool { let offset = match self.token { token::Interpolated(ref nt) => match nt.0 { token::NtPat(..) => return self.look_ahead(1, |t| t == &token::Colon), _ => 0, } token::BinOp(token::And) | token::AndAnd => 1, _ if self.token.is_keyword(keywords::Mut) => 1, _ => 0, }; self.look_ahead(offset, |t| t.is_ident()) && self.look_ahead(offset + 1, |t| t == &token::Colon) } /// This version of parse arg doesn't necessarily require /// identifier names. fn parse_arg_general(&mut self, require_name: bool) -> PResult<'a, Arg> { maybe_whole!(self, NtArg, |x| x); let (pat, ty) = if require_name || self.is_named_argument() { debug!("parse_arg_general parse_pat (require_name:{})", require_name); let pat = self.parse_pat()?; self.expect(&token::Colon)?; (pat, self.parse_ty()?) } else { debug!("parse_arg_general ident_to_pat"); let parser_snapshot_before_pat = self.clone(); // We're going to try parsing the argument as a pattern (even though it's not // allowed). This way we can provide better errors to the user. let pat_arg: PResult<'a, _> = do catch { let pat = self.parse_pat()?; self.expect(&token::Colon)?; (pat, self.parse_ty()?) }; match pat_arg { Ok((pat, ty)) => { let mut err = self.diagnostic().struct_span_err_with_code( pat.span, "patterns aren't allowed in methods without bodies", DiagnosticId::Error("E0642".into()), ); err.span_suggestion_short_with_applicability( pat.span, "give this argument a name or use an underscore to ignore it", "_".to_owned(), Applicability::MachineApplicable, ); err.emit(); // Pretend the pattern is `_`, to avoid duplicate errors from AST validation. let pat = P(Pat { node: PatKind::Wild, span: pat.span, id: ast::DUMMY_NODE_ID }); (pat, ty) } Err(mut err) => { err.cancel(); // Recover from attempting to parse the argument as a pattern. This means // the type is alone, with no name, e.g. `fn foo(u32)`. mem::replace(self, parser_snapshot_before_pat); debug!("parse_arg_general ident_to_pat"); let ident = Ident::new(keywords::Invalid.name(), self.prev_span); let ty = self.parse_ty()?; let pat = P(Pat { id: ast::DUMMY_NODE_ID, node: PatKind::Ident( BindingMode::ByValue(Mutability::Immutable), ident, None), span: ty.span, }); (pat, ty) } } }; Ok(Arg { ty, pat, id: ast::DUMMY_NODE_ID }) } /// Parse a single function argument crate fn parse_arg(&mut self) -> PResult<'a, Arg> { self.parse_arg_general(true) } /// Parse an argument in a lambda header e.g. |arg, arg| fn parse_fn_block_arg(&mut self) -> PResult<'a, Arg> { let pat = self.parse_pat()?; let t = if self.eat(&token::Colon) { self.parse_ty()? } else { P(Ty { id: ast::DUMMY_NODE_ID, node: TyKind::Infer, span: self.span, }) }; Ok(Arg { ty: t, pat, id: ast::DUMMY_NODE_ID }) } fn maybe_parse_fixed_length_of_vec(&mut self) -> PResult<'a, Option<P<ast::Expr>>> { if self.eat(&token::Semi) { Ok(Some(self.parse_expr()?)) } else { Ok(None) } } /// Matches token_lit = LIT_INTEGER | ... fn parse_lit_token(&mut self) -> PResult<'a, LitKind> { let out = match self.token { token::Interpolated(ref nt) => match nt.0 { token::NtExpr(ref v) | token::NtLiteral(ref v) => match v.node { ExprKind::Lit(ref lit) => { lit.node.clone() } _ => { return self.unexpected_last(&self.token); } }, _ => { return self.unexpected_last(&self.token); } }, token::Literal(lit, suf) => { let diag = Some((self.span, &self.sess.span_diagnostic)); let (suffix_illegal, result) = parse::lit_token(lit, suf, diag); if suffix_illegal { let sp = self.span; self.expect_no_suffix(sp, &format!("{} literal", lit.short_name()), suf) } result.unwrap() } _ => { return self.unexpected_last(&self.token); } }; self.bump(); Ok(out) } /// Matches lit = true | false | token_lit crate fn parse_lit(&mut self) -> PResult<'a, Lit> { let lo = self.span; let lit = if self.eat_keyword(keywords::True) { LitKind::Bool(true) } else if self.eat_keyword(keywords::False) { LitKind::Bool(false) } else { let lit = self.parse_lit_token()?; lit }; Ok(source_map::Spanned { node: lit, span: lo.to(self.prev_span) }) } /// matches '-' lit | lit (cf. ast_validation::AstValidator::check_expr_within_pat) crate fn parse_literal_maybe_minus(&mut self) -> PResult<'a, P<Expr>> { maybe_whole_expr!(self); let minus_lo = self.span; let minus_present = self.eat(&token::BinOp(token::Minus)); let lo = self.span; let literal = P(self.parse_lit()?); let hi = self.prev_span; let expr = self.mk_expr(lo.to(hi), ExprKind::Lit(literal), ThinVec::new()); if minus_present { let minus_hi = self.prev_span; let unary = self.mk_unary(UnOp::Neg, expr); Ok(self.mk_expr(minus_lo.to(minus_hi), unary, ThinVec::new())) } else { Ok(expr) } } fn parse_path_segment_ident(&mut self) -> PResult<'a, ast::Ident> { match self.token { token::Ident(ident, _) if self.token.is_path_segment_keyword() => { let span = self.span; self.bump(); Ok(Ident::new(ident.name, span)) } _ => self.parse_ident(), } } /// Parses qualified path. /// Assumes that the leading `<` has been parsed already. /// /// `qualified_path = <type [as trait_ref]>::path` /// /// # Examples /// `<T>::default` /// `<T as U>::a` /// `<T as U>::F::a<S>` (without disambiguator) /// `<T as U>::F::a::<S>` (with disambiguator) fn parse_qpath(&mut self, style: PathStyle) -> PResult<'a, (QSelf, ast::Path)> { let lo = self.prev_span; let ty = self.parse_ty()?; // `path` will contain the prefix of the path up to the `>`, // if any (e.g., `U` in the `<T as U>::*` examples // above). `path_span` has the span of that path, or an empty // span in the case of something like `<T>::Bar`. let (mut path, path_span); if self.eat_keyword(keywords::As) { let path_lo = self.span; path = self.parse_path(PathStyle::Type)?; path_span = path_lo.to(self.prev_span); } else { path = ast::Path { segments: Vec::new(), span: syntax_pos::DUMMY_SP }; path_span = self.span.to(self.span); } self.expect(&token::Gt)?; self.expect(&token::ModSep)?; let qself = QSelf { ty, path_span, position: path.segments.len() }; self.parse_path_segments(&mut path.segments, style, true)?; Ok((qself, ast::Path { segments: path.segments, span: lo.to(self.prev_span) })) } /// Parses simple paths. /// /// `path = [::] segment+` /// `segment = ident | ident[::]<args> | ident[::](args) [-> type]` /// /// # Examples /// `a::b::C<D>` (without disambiguator) /// `a::b::C::<D>` (with disambiguator) /// `Fn(Args)` (without disambiguator) /// `Fn::(Args)` (with disambiguator) pub fn parse_path(&mut self, style: PathStyle) -> PResult<'a, ast::Path> { self.parse_path_common(style, true) } crate fn parse_path_common(&mut self, style: PathStyle, enable_warning: bool) -> PResult<'a, ast::Path> { maybe_whole!(self, NtPath, |path| { if style == PathStyle::Mod && path.segments.iter().any(|segment| segment.args.is_some()) { self.diagnostic().span_err(path.span, "unexpected generic arguments in path"); } path }); let lo = self.meta_var_span.unwrap_or(self.span); let mut segments = Vec::new(); if self.eat(&token::ModSep) { segments.push(PathSegment::crate_root(lo.shrink_to_lo())); } self.parse_path_segments(&mut segments, style, enable_warning)?; Ok(ast::Path { segments, span: lo.to(self.prev_span) }) } /// Like `parse_path`, but also supports parsing `Word` meta items into paths for back-compat. /// This is used when parsing derive macro paths in `#[derive]` attributes. pub fn parse_path_allowing_meta(&mut self, style: PathStyle) -> PResult<'a, ast::Path> { let meta_ident = match self.token { token::Interpolated(ref nt) => match nt.0 { token::NtMeta(ref meta) => match meta.node { ast::MetaItemKind::Word => Some(meta.ident.clone()), _ => None, }, _ => None, }, _ => None, }; if let Some(path) = meta_ident { self.bump(); return Ok(path); } self.parse_path(style) } fn parse_path_segments(&mut self, segments: &mut Vec<PathSegment>, style: PathStyle, enable_warning: bool) -> PResult<'a, ()> { loop { segments.push(self.parse_path_segment(style, enable_warning)?); if self.is_import_coupler() || !self.eat(&token::ModSep) { return Ok(()); } } } fn parse_path_segment(&mut self, style: PathStyle, enable_warning: bool) -> PResult<'a, PathSegment> { let ident = self.parse_path_segment_ident()?; let is_args_start = |token: &token::Token| match *token { token::Lt | token::BinOp(token::Shl) | token::OpenDelim(token::Paren) => true, _ => false, }; let check_args_start = |this: &mut Self| { this.expected_tokens.extend_from_slice( &[TokenType::Token(token::Lt), TokenType::Token(token::OpenDelim(token::Paren))] ); is_args_start(&this.token) }; Ok(if style == PathStyle::Type && check_args_start(self) || style != PathStyle::Mod && self.check(&token::ModSep) && self.look_ahead(1, |t| is_args_start(t)) { // Generic arguments are found - `<`, `(`, `::<` or `::(`. let lo = self.span; if self.eat(&token::ModSep) && style == PathStyle::Type && enable_warning { self.diagnostic().struct_span_warn(self.prev_span, "unnecessary path disambiguator") .span_label(self.prev_span, "try removing `::`").emit(); } let args = if self.eat_lt() { // `<'a, T, A = U>` let (args, bindings) = self.parse_generic_args()?; self.expect_gt()?; let span = lo.to(self.prev_span); AngleBracketedArgs { args, bindings, span }.into() } else { // `(T, U) -> R` self.bump(); // `(` let inputs = self.parse_seq_to_before_tokens( &[&token::CloseDelim(token::Paren)], SeqSep::trailing_allowed(token::Comma), TokenExpectType::Expect, |p| p.parse_ty())?; self.bump(); // `)` let span = lo.to(self.prev_span); let output = if self.eat(&token::RArrow) { Some(self.parse_ty_common(false, false)?) } else { None }; ParenthesisedArgs { inputs, output, span }.into() }; PathSegment { ident, args } } else { // Generic arguments are not found. PathSegment::from_ident(ident) }) } crate fn check_lifetime(&mut self) -> bool { self.expected_tokens.push(TokenType::Lifetime); self.token.is_lifetime() } /// Parse single lifetime 'a or panic. crate fn expect_lifetime(&mut self) -> Lifetime { if let Some(ident) = self.token.lifetime() { let span = self.span; self.bump(); Lifetime { ident: Ident::new(ident.name, span), id: ast::DUMMY_NODE_ID } } else { self.span_bug(self.span, "not a lifetime") } } fn eat_label(&mut self) -> Option<Label> { if let Some(ident) = self.token.lifetime() { let span = self.span; self.bump(); Some(Label { ident: Ident::new(ident.name, span) }) } else { None } } /// Parse mutability (`mut` or nothing). fn parse_mutability(&mut self) -> Mutability { if self.eat_keyword(keywords::Mut) { Mutability::Mutable } else { Mutability::Immutable } } fn parse_field_name(&mut self) -> PResult<'a, Ident> { if let token::Literal(token::Integer(name), None) = self.token { self.bump(); Ok(Ident::new(name, self.prev_span)) } else { self.parse_ident_common(false) } } /// Parse ident (COLON expr)? fn parse_field(&mut self) -> PResult<'a, Field> { let attrs = self.parse_outer_attributes()?; let lo = self.span; // Check if a colon exists one ahead. This means we're parsing a fieldname. let (fieldname, expr, is_shorthand) = if self.look_ahead(1, |t| t == &token::Colon) { let fieldname = self.parse_field_name()?; self.bump(); // `:` (fieldname, self.parse_expr()?, false) } else { let fieldname = self.parse_ident_common(false)?; // Mimic `x: x` for the `x` field shorthand. let path = ast::Path::from_ident(fieldname); let expr = self.mk_expr(fieldname.span, ExprKind::Path(None, path), ThinVec::new()); (fieldname, expr, true) }; Ok(ast::Field { ident: fieldname, span: lo.to(expr.span), expr, is_shorthand, attrs: attrs.into(), }) } fn mk_expr(&mut self, span: Span, node: ExprKind, attrs: ThinVec<Attribute>) -> P<Expr> { P(Expr { node, span, attrs, id: ast::DUMMY_NODE_ID }) } fn mk_unary(&mut self, unop: ast::UnOp, expr: P<Expr>) -> ast::ExprKind { ExprKind::Unary(unop, expr) } fn mk_binary(&mut self, binop: ast::BinOp, lhs: P<Expr>, rhs: P<Expr>) -> ast::ExprKind { ExprKind::Binary(binop, lhs, rhs) } fn mk_call(&mut self, f: P<Expr>, args: Vec<P<Expr>>) -> ast::ExprKind { ExprKind::Call(f, args) } fn mk_index(&mut self, expr: P<Expr>, idx: P<Expr>) -> ast::ExprKind { ExprKind::Index(expr, idx) } fn mk_range(&mut self, start: Option<P<Expr>>, end: Option<P<Expr>>, limits: RangeLimits) -> PResult<'a, ast::ExprKind> { if end.is_none() && limits == RangeLimits::Closed { Err(self.span_fatal_err(self.span, Error::InclusiveRangeWithNoEnd)) } else { Ok(ExprKind::Range(start, end, limits)) } } fn mk_assign_op(&mut self, binop: ast::BinOp, lhs: P<Expr>, rhs: P<Expr>) -> ast::ExprKind { ExprKind::AssignOp(binop, lhs, rhs) } pub fn mk_mac_expr(&mut self, span: Span, m: Mac_, attrs: ThinVec<Attribute>) -> P<Expr> { P(Expr { id: ast::DUMMY_NODE_ID, node: ExprKind::Mac(source_map::Spanned {node: m, span: span}), span, attrs, }) } fn expect_delimited_token_tree(&mut self) -> PResult<'a, (MacDelimiter, ThinTokenStream)> { let delim = match self.token { token::OpenDelim(delim) => delim, _ => { let msg = "expected open delimiter"; let mut err = self.fatal(msg); err.span_label(self.span, msg); return Err(err) } }; let delimited = match self.parse_token_tree() { TokenTree::Delimited(_, delimited) => delimited, _ => unreachable!(), }; let delim = match delim { token::Paren => MacDelimiter::Parenthesis, token::Bracket => MacDelimiter::Bracket, token::Brace => MacDelimiter::Brace, token::NoDelim => self.bug("unexpected no delimiter"), }; Ok((delim, delimited.stream().into())) } /// At the bottom (top?) of the precedence hierarchy, /// parse things like parenthesized exprs, /// macros, return, etc. /// /// NB: This does not parse outer attributes, /// and is private because it only works /// correctly if called from parse_dot_or_call_expr(). fn parse_bottom_expr(&mut self) -> PResult<'a, P<Expr>> { maybe_whole_expr!(self); // Outer attributes are already parsed and will be // added to the return value after the fact. // // Therefore, prevent sub-parser from parsing // attributes by giving them a empty "already parsed" list. let mut attrs = ThinVec::new(); let lo = self.span; let mut hi = self.span; let ex: ExprKind; // Note: when adding new syntax here, don't forget to adjust Token::can_begin_expr(). match self.token { token::OpenDelim(token::Paren) => { self.bump(); attrs.extend(self.parse_inner_attributes()?); // (e) is parenthesized e // (e,) is a tuple with only one field, e let mut es = vec![]; let mut trailing_comma = false; while self.token != token::CloseDelim(token::Paren) { es.push(self.parse_expr()?); self.expect_one_of(&[], &[token::Comma, token::CloseDelim(token::Paren)])?; if self.check(&token::Comma) { trailing_comma = true; self.bump(); } else { trailing_comma = false; break; } } self.bump(); hi = self.prev_span; ex = if es.len() == 1 && !trailing_comma { ExprKind::Paren(es.into_iter().nth(0).unwrap()) } else { ExprKind::Tup(es) }; } token::OpenDelim(token::Brace) => { return self.parse_block_expr(None, lo, BlockCheckMode::Default, attrs); } token::BinOp(token::Or) | token::OrOr => { return self.parse_lambda_expr(attrs); } token::OpenDelim(token::Bracket) => { self.bump(); attrs.extend(self.parse_inner_attributes()?); if self.check(&token::CloseDelim(token::Bracket)) { // Empty vector. self.bump(); ex = ExprKind::Array(Vec::new()); } else { // Nonempty vector. let first_expr = self.parse_expr()?; if self.check(&token::Semi) { // Repeating array syntax: [ 0; 512 ] self.bump(); let count = AnonConst { id: ast::DUMMY_NODE_ID, value: self.parse_expr()?, }; self.expect(&token::CloseDelim(token::Bracket))?; ex = ExprKind::Repeat(first_expr, count); } else if self.check(&token::Comma) { // Vector with two or more elements. self.bump(); let remaining_exprs = self.parse_seq_to_end( &token::CloseDelim(token::Bracket), SeqSep::trailing_allowed(token::Comma), |p| Ok(p.parse_expr()?) )?; let mut exprs = vec![first_expr]; exprs.extend(remaining_exprs); ex = ExprKind::Array(exprs); } else { // Vector with one element. self.expect(&token::CloseDelim(token::Bracket))?; ex = ExprKind::Array(vec![first_expr]); } } hi = self.prev_span; } _ => { if self.eat_lt() { let (qself, path) = self.parse_qpath(PathStyle::Expr)?; hi = path.span; return Ok(self.mk_expr(lo.to(hi), ExprKind::Path(Some(qself), path), attrs)); } if self.span.edition() >= Edition::Edition2018 && self.check_keyword(keywords::Async) { if self.is_async_block() { // check for `async {` and `async move {` return self.parse_async_block(attrs); } else { return self.parse_lambda_expr(attrs); } } if self.check_keyword(keywords::Move) || self.check_keyword(keywords::Static) { return self.parse_lambda_expr(attrs); } if self.eat_keyword(keywords::If) { return self.parse_if_expr(attrs); } if self.eat_keyword(keywords::For) { let lo = self.prev_span; return self.parse_for_expr(None, lo, attrs); } if self.eat_keyword(keywords::While) { let lo = self.prev_span; return self.parse_while_expr(None, lo, attrs); } if let Some(label) = self.eat_label() { let lo = label.ident.span; self.expect(&token::Colon)?; if self.eat_keyword(keywords::While) { return self.parse_while_expr(Some(label), lo, attrs) } if self.eat_keyword(keywords::For) { return self.parse_for_expr(Some(label), lo, attrs) } if self.eat_keyword(keywords::Loop) { return self.parse_loop_expr(Some(label), lo, attrs) } if self.token == token::OpenDelim(token::Brace) { return self.parse_block_expr(Some(label), lo, BlockCheckMode::Default, attrs); } let msg = "expected `while`, `for`, `loop` or `{` after a label"; let mut err = self.fatal(msg); err.span_label(self.span, msg); return Err(err); } if self.eat_keyword(keywords::Loop) { let lo = self.prev_span; return self.parse_loop_expr(None, lo, attrs); } if self.eat_keyword(keywords::Continue) { let label = self.eat_label(); let ex = ExprKind::Continue(label); let hi = self.prev_span; return Ok(self.mk_expr(lo.to(hi), ex, attrs)); } if self.eat_keyword(keywords::Match) { return self.parse_match_expr(attrs); } if self.eat_keyword(keywords::Unsafe) { return self.parse_block_expr( None, lo, BlockCheckMode::Unsafe(ast::UserProvided), attrs); } if self.is_catch_expr() { let lo = self.span; assert!(self.eat_keyword(keywords::Do)); assert!(self.eat_keyword(keywords::Catch)); return self.parse_catch_expr(lo, attrs); } if self.eat_keyword(keywords::Return) { if self.token.can_begin_expr() { let e = self.parse_expr()?; hi = e.span; ex = ExprKind::Ret(Some(e)); } else { ex = ExprKind::Ret(None); } } else if self.eat_keyword(keywords::Break) { let label = self.eat_label(); let e = if self.token.can_begin_expr() && !(self.token == token::OpenDelim(token::Brace) && self.restrictions.contains( Restrictions::NO_STRUCT_LITERAL)) { Some(self.parse_expr()?) } else { None }; ex = ExprKind::Break(label, e); hi = self.prev_span; } else if self.eat_keyword(keywords::Yield) { if self.token.can_begin_expr() { let e = self.parse_expr()?; hi = e.span; ex = ExprKind::Yield(Some(e)); } else { ex = ExprKind::Yield(None); } } else if self.token.is_keyword(keywords::Let) { // Catch this syntax error here, instead of in `parse_ident`, so // that we can explicitly mention that let is not to be used as an expression let mut db = self.fatal("expected expression, found statement (`let`)"); db.span_label(self.span, "expected expression"); db.note("variable declaration using `let` is a statement"); return Err(db); } else if self.token.is_path_start() { let pth = self.parse_path(PathStyle::Expr)?; // `!`, as an operator, is prefix, so we know this isn't that if self.eat(&token::Not) { // MACRO INVOCATION expression let (delim, tts) = self.expect_delimited_token_tree()?; let hi = self.prev_span; let node = Mac_ { path: pth, tts, delim }; return Ok(self.mk_mac_expr(lo.to(hi), node, attrs)) } if self.check(&token::OpenDelim(token::Brace)) { // This is a struct literal, unless we're prohibited // from parsing struct literals here. let prohibited = self.restrictions.contains( Restrictions::NO_STRUCT_LITERAL ); if !prohibited { return self.parse_struct_expr(lo, pth, attrs); } } hi = pth.span; ex = ExprKind::Path(None, pth); } else { match self.parse_literal_maybe_minus() { Ok(expr) => { hi = expr.span; ex = expr.node.clone(); } Err(mut err) => { self.cancel(&mut err); let msg = format!("expected expression, found {}", self.this_token_descr()); let mut err = self.fatal(&msg); err.span_label(self.span, "expected expression"); return Err(err); } } } } } let expr = Expr { node: ex, span: lo.to(hi), id: ast::DUMMY_NODE_ID, attrs }; let expr = self.maybe_recover_from_bad_qpath(expr, true)?; return Ok(P(expr)); } fn parse_struct_expr(&mut self, lo: Span, pth: ast::Path, mut attrs: ThinVec<Attribute>) -> PResult<'a, P<Expr>> { let struct_sp = lo.to(self.prev_span); self.bump(); let mut fields = Vec::new(); let mut base = None; attrs.extend(self.parse_inner_attributes()?); while self.token != token::CloseDelim(token::Brace) { if self.eat(&token::DotDot) { let exp_span = self.prev_span; match self.parse_expr() { Ok(e) => { base = Some(e); } Err(mut e) => { e.emit(); self.recover_stmt(); } } if self.token == token::Comma { let mut err = self.sess.span_diagnostic.mut_span_err( exp_span.to(self.prev_span), "cannot use a comma after the base struct", ); err.span_suggestion_short_with_applicability( self.span, "remove this comma", "".to_owned(), Applicability::MachineApplicable ); err.note("the base struct must always be the last field"); err.emit(); self.recover_stmt(); } break; } match self.parse_field() { Ok(f) => fields.push(f), Err(mut e) => { e.span_label(struct_sp, "while parsing this struct"); e.emit(); // If the next token is a comma, then try to parse // what comes next as additional fields, rather than // bailing out until next `}`. if self.token != token::Comma { self.recover_stmt(); break; } } } match self.expect_one_of(&[token::Comma], &[token::CloseDelim(token::Brace)]) { Ok(()) => {} Err(mut e) => { e.emit(); self.recover_stmt(); break; } } } let span = lo.to(self.span); self.expect(&token::CloseDelim(token::Brace))?; return Ok(self.mk_expr(span, ExprKind::Struct(pth, fields, base), attrs)); } fn parse_or_use_outer_attributes(&mut self, already_parsed_attrs: Option<ThinVec<Attribute>>) -> PResult<'a, ThinVec<Attribute>> { if let Some(attrs) = already_parsed_attrs { Ok(attrs) } else { self.parse_outer_attributes().map(|a| a.into()) } } /// Parse a block or unsafe block fn parse_block_expr(&mut self, opt_label: Option<Label>, lo: Span, blk_mode: BlockCheckMode, outer_attrs: ThinVec<Attribute>) -> PResult<'a, P<Expr>> { self.expect(&token::OpenDelim(token::Brace))?; let mut attrs = outer_attrs; attrs.extend(self.parse_inner_attributes()?); let blk = self.parse_block_tail(lo, blk_mode)?; return Ok(self.mk_expr(blk.span, ExprKind::Block(blk, opt_label), attrs)); } /// parse a.b or a(13) or a[4] or just a fn parse_dot_or_call_expr(&mut self, already_parsed_attrs: Option<ThinVec<Attribute>>) -> PResult<'a, P<Expr>> { let attrs = self.parse_or_use_outer_attributes(already_parsed_attrs)?; let b = self.parse_bottom_expr(); let (span, b) = self.interpolated_or_expr_span(b)?; self.parse_dot_or_call_expr_with(b, span, attrs) } fn parse_dot_or_call_expr_with(&mut self, e0: P<Expr>, lo: Span, mut attrs: ThinVec<Attribute>) -> PResult<'a, P<Expr>> { // Stitch the list of outer attributes onto the return value. // A little bit ugly, but the best way given the current code // structure self.parse_dot_or_call_expr_with_(e0, lo) .map(|expr| expr.map(|mut expr| { attrs.extend::<Vec<_>>(expr.attrs.into()); expr.attrs = attrs; match expr.node { ExprKind::If(..) | ExprKind::IfLet(..) => { if !expr.attrs.is_empty() { // Just point to the first attribute in there... let span = expr.attrs[0].span; self.span_err(span, "attributes are not yet allowed on `if` \ expressions"); } } _ => {} } expr }) ) } // Assuming we have just parsed `.`, continue parsing into an expression. fn parse_dot_suffix(&mut self, self_arg: P<Expr>, lo: Span) -> PResult<'a, P<Expr>> { let segment = self.parse_path_segment(PathStyle::Expr, true)?; Ok(match self.token { token::OpenDelim(token::Paren) => { // Method call `expr.f()` let mut args = self.parse_unspanned_seq( &token::OpenDelim(token::Paren), &token::CloseDelim(token::Paren), SeqSep::trailing_allowed(token::Comma), |p| Ok(p.parse_expr()?) )?; args.insert(0, self_arg); let span = lo.to(self.prev_span); self.mk_expr(span, ExprKind::MethodCall(segment, args), ThinVec::new()) } _ => { // Field access `expr.f` if let Some(args) = segment.args { self.span_err(args.span(), "field expressions may not have generic arguments"); } let span = lo.to(self.prev_span); self.mk_expr(span, ExprKind::Field(self_arg, segment.ident), ThinVec::new()) } }) } fn parse_dot_or_call_expr_with_(&mut self, e0: P<Expr>, lo: Span) -> PResult<'a, P<Expr>> { let mut e = e0; let mut hi; loop { // expr? while self.eat(&token::Question) { let hi = self.prev_span; e = self.mk_expr(lo.to(hi), ExprKind::Try(e), ThinVec::new()); } // expr.f if self.eat(&token::Dot) { match self.token { token::Ident(..) => { e = self.parse_dot_suffix(e, lo)?; } token::Literal(token::Integer(name), _) => { let span = self.span; self.bump(); let field = ExprKind::Field(e, Ident::new(name, span)); e = self.mk_expr(lo.to(span), field, ThinVec::new()); } token::Literal(token::Float(n), _suf) => { self.bump(); let fstr = n.as_str(); let mut err = self.diagnostic().struct_span_err(self.prev_span, &format!("unexpected token: `{}`", n)); err.span_label(self.prev_span, "unexpected token"); if fstr.chars().all(|x| "0123456789.".contains(x)) { let float = match fstr.parse::<f64>().ok() { Some(f) => f, None => continue, }; let sugg = pprust::to_string(|s| { use print::pprust::PrintState; s.popen()?; s.print_expr(&e)?; s.s.word( ".")?; s.print_usize(float.trunc() as usize)?; s.pclose()?; s.s.word(".")?; s.s.word(fstr.splitn(2, ".").last().unwrap()) }); err.span_suggestion_with_applicability( lo.to(self.prev_span), "try parenthesizing the first index", sugg, Applicability::MachineApplicable ); } return Err(err); } _ => { // FIXME Could factor this out into non_fatal_unexpected or something. let actual = self.this_token_to_string(); self.span_err(self.span, &format!("unexpected token: `{}`", actual)); } } continue; } if self.expr_is_complete(&e) { break; } match self.token { // expr(...) token::OpenDelim(token::Paren) => { let es = self.parse_unspanned_seq( &token::OpenDelim(token::Paren), &token::CloseDelim(token::Paren), SeqSep::trailing_allowed(token::Comma), |p| Ok(p.parse_expr()?) )?; hi = self.prev_span; let nd = self.mk_call(e, es); e = self.mk_expr(lo.to(hi), nd, ThinVec::new()); } // expr[...] // Could be either an index expression or a slicing expression. token::OpenDelim(token::Bracket) => { self.bump(); let ix = self.parse_expr()?; hi = self.span; self.expect(&token::CloseDelim(token::Bracket))?; let index = self.mk_index(e, ix); e = self.mk_expr(lo.to(hi), index, ThinVec::new()) } _ => return Ok(e) } } return Ok(e); } crate fn process_potential_macro_variable(&mut self) { let (token, span) = match self.token { token::Dollar if self.span.ctxt() != syntax_pos::hygiene::SyntaxContext::empty() && self.look_ahead(1, |t| t.is_ident()) => { self.bump(); let name = match self.token { token::Ident(ident, _) => ident, _ => unreachable!() }; let mut err = self.fatal(&format!("unknown macro variable `{}`", name)); err.span_label(self.span, "unknown macro variable"); err.emit(); return } token::Interpolated(ref nt) => { self.meta_var_span = Some(self.span); // Interpolated identifier and lifetime tokens are replaced with usual identifier // and lifetime tokens, so the former are never encountered during normal parsing. match nt.0 { token::NtIdent(ident, is_raw) => (token::Ident(ident, is_raw), ident.span), token::NtLifetime(ident) => (token::Lifetime(ident), ident.span), _ => return, } } _ => return, }; self.token = token; self.span = span; } /// parse a single token tree from the input. crate fn parse_token_tree(&mut self) -> TokenTree { match self.token { token::OpenDelim(..) => { let frame = mem::replace(&mut self.token_cursor.frame, self.token_cursor.stack.pop().unwrap()); self.span = frame.span; self.bump(); TokenTree::Delimited(frame.span, Delimited { delim: frame.delim, tts: frame.tree_cursor.original_stream().into(), }) }, token::CloseDelim(_) | token::Eof => unreachable!(), _ => { let (token, span) = (mem::replace(&mut self.token, token::Whitespace), self.span); self.bump(); TokenTree::Token(span, token) } } } // parse a stream of tokens into a list of TokenTree's, // up to EOF. pub fn parse_all_token_trees(&mut self) -> PResult<'a, Vec<TokenTree>> { let mut tts = Vec::new(); while self.token != token::Eof { tts.push(self.parse_token_tree()); } Ok(tts) } pub fn parse_tokens(&mut self) -> TokenStream { let mut result = Vec::new(); loop { match self.token { token::Eof | token::CloseDelim(..) => break, _ => result.push(self.parse_token_tree().into()), } } TokenStream::concat(result) } /// Parse a prefix-unary-operator expr fn parse_prefix_expr(&mut self, already_parsed_attrs: Option<ThinVec<Attribute>>) -> PResult<'a, P<Expr>> { let attrs = self.parse_or_use_outer_attributes(already_parsed_attrs)?; let lo = self.span; // Note: when adding new unary operators, don't forget to adjust Token::can_begin_expr() let (hi, ex) = match self.token { token::Not => { self.bump(); let e = self.parse_prefix_expr(None); let (span, e) = self.interpolated_or_expr_span(e)?; (lo.to(span), self.mk_unary(UnOp::Not, e)) } // Suggest `!` for bitwise negation when encountering a `~` token::Tilde => { self.bump(); let e = self.parse_prefix_expr(None); let (span, e) = self.interpolated_or_expr_span(e)?; let span_of_tilde = lo; let mut err = self.diagnostic().struct_span_err(span_of_tilde, "`~` cannot be used as a unary operator"); err.span_suggestion_short_with_applicability( span_of_tilde, "use `!` to perform bitwise negation", "!".to_owned(), Applicability::MachineApplicable ); err.emit(); (lo.to(span), self.mk_unary(UnOp::Not, e)) } token::BinOp(token::Minus) => { self.bump(); let e = self.parse_prefix_expr(None); let (span, e) = self.interpolated_or_expr_span(e)?; (lo.to(span), self.mk_unary(UnOp::Neg, e)) } token::BinOp(token::Star) => { self.bump(); let e = self.parse_prefix_expr(None); let (span, e) = self.interpolated_or_expr_span(e)?; (lo.to(span), self.mk_unary(UnOp::Deref, e)) } token::BinOp(token::And) | token::AndAnd => { self.expect_and()?; let m = self.parse_mutability(); let e = self.parse_prefix_expr(None); let (span, e) = self.interpolated_or_expr_span(e)?; (lo.to(span), ExprKind::AddrOf(m, e)) } token::Ident(..) if self.token.is_keyword(keywords::In) => { self.bump(); let place = self.parse_expr_res( Restrictions::NO_STRUCT_LITERAL, None, )?; let blk = self.parse_block()?; let span = blk.span; let blk_expr = self.mk_expr(span, ExprKind::Block(blk, None), ThinVec::new()); (lo.to(span), ExprKind::ObsoleteInPlace(place, blk_expr)) } token::Ident(..) if self.token.is_keyword(keywords::Box) => { self.bump(); let e = self.parse_prefix_expr(None); let (span, e) = self.interpolated_or_expr_span(e)?; (lo.to(span), ExprKind::Box(e)) } token::Ident(..) if self.token.is_ident_named("not") => { // `not` is just an ordinary identifier in Rust-the-language, // but as `rustc`-the-compiler, we can issue clever diagnostics // for confused users who really want to say `!` let token_cannot_continue_expr = |t: &token::Token| match *t { // These tokens can start an expression after `!`, but // can't continue an expression after an ident token::Ident(ident, is_raw) => token::ident_can_begin_expr(ident, is_raw), token::Literal(..) | token::Pound => true, token::Interpolated(ref nt) => match nt.0 { token::NtIdent(..) | token::NtExpr(..) | token::NtBlock(..) | token::NtPath(..) => true, _ => false, }, _ => false }; let cannot_continue_expr = self.look_ahead(1, token_cannot_continue_expr); if cannot_continue_expr { self.bump(); // Emit the error ... let mut err = self.diagnostic() .struct_span_err(self.span, &format!("unexpected {} after identifier", self.this_token_descr())); // span the `not` plus trailing whitespace to avoid // trailing whitespace after the `!` in our suggestion let to_replace = self.sess.source_map() .span_until_non_whitespace(lo.to(self.span)); err.span_suggestion_short_with_applicability( to_replace, "use `!` to perform logical negation", "!".to_owned(), Applicability::MachineApplicable ); err.emit(); // —and recover! (just as if we were in the block // for the `token::Not` arm) let e = self.parse_prefix_expr(None); let (span, e) = self.interpolated_or_expr_span(e)?; (lo.to(span), self.mk_unary(UnOp::Not, e)) } else { return self.parse_dot_or_call_expr(Some(attrs)); } } _ => { return self.parse_dot_or_call_expr(Some(attrs)); } }; return Ok(self.mk_expr(lo.to(hi), ex, attrs)); } /// Parse an associative expression /// /// This parses an expression accounting for associativity and precedence of the operators in /// the expression. fn parse_assoc_expr(&mut self, already_parsed_attrs: Option<ThinVec<Attribute>>) -> PResult<'a, P<Expr>> { self.parse_assoc_expr_with(0, already_parsed_attrs.into()) } /// Parse an associative expression with operators of at least `min_prec` precedence fn parse_assoc_expr_with(&mut self, min_prec: usize, lhs: LhsExpr) -> PResult<'a, P<Expr>> { let mut lhs = if let LhsExpr::AlreadyParsed(expr) = lhs { expr } else { let attrs = match lhs { LhsExpr::AttributesParsed(attrs) => Some(attrs), _ => None, }; if [token::DotDot, token::DotDotDot, token::DotDotEq].contains(&self.token) { return self.parse_prefix_range_expr(attrs); } else { self.parse_prefix_expr(attrs)? } }; if self.expr_is_complete(&lhs) { // Semi-statement forms are odd. See https://github.com/rust-lang/rust/issues/29071 return Ok(lhs); } self.expected_tokens.push(TokenType::Operator); while let Some(op) = AssocOp::from_token(&self.token) { // Adjust the span for interpolated LHS to point to the `$lhs` token and not to what // it refers to. Interpolated identifiers are unwrapped early and never show up here // as `PrevTokenKind::Interpolated` so if LHS is a single identifier we always process // it as "interpolated", it doesn't change the answer for non-interpolated idents. let lhs_span = match (self.prev_token_kind, &lhs.node) { (PrevTokenKind::Interpolated, _) => self.prev_span, (PrevTokenKind::Ident, &ExprKind::Path(None, ref path)) if path.segments.len() == 1 => self.prev_span, _ => lhs.span, }; let cur_op_span = self.span; let restrictions = if op.is_assign_like() { self.restrictions & Restrictions::NO_STRUCT_LITERAL } else { self.restrictions }; if op.precedence() < min_prec { break; } // Check for deprecated `...` syntax if self.token == token::DotDotDot && op == AssocOp::DotDotEq { self.err_dotdotdot_syntax(self.span); } self.bump(); if op.is_comparison() { self.check_no_chained_comparison(&lhs, &op); } // Special cases: if op == AssocOp::As { lhs = self.parse_assoc_op_cast(lhs, lhs_span, ExprKind::Cast)?; continue } else if op == AssocOp::Colon { lhs = match self.parse_assoc_op_cast(lhs, lhs_span, ExprKind::Type) { Ok(lhs) => lhs, Err(mut err) => { err.span_label(self.span, "expecting a type here because of type ascription"); let cm = self.sess.source_map(); let cur_pos = cm.lookup_char_pos(self.span.lo()); let op_pos = cm.lookup_char_pos(cur_op_span.hi()); if cur_pos.line != op_pos.line { err.span_suggestion_with_applicability( cur_op_span, "try using a semicolon", ";".to_string(), Applicability::MaybeIncorrect // speculative ); } return Err(err); } }; continue } else if op == AssocOp::DotDot || op == AssocOp::DotDotEq { // If we didn’t have to handle `x..`/`x..=`, it would be pretty easy to // generalise it to the Fixity::None code. // // We have 2 alternatives here: `x..y`/`x..=y` and `x..`/`x..=` The other // two variants are handled with `parse_prefix_range_expr` call above. let rhs = if self.is_at_start_of_range_notation_rhs() { Some(self.parse_assoc_expr_with(op.precedence() + 1, LhsExpr::NotYetParsed)?) } else { None }; let (lhs_span, rhs_span) = (lhs.span, if let Some(ref x) = rhs { x.span } else { cur_op_span }); let limits = if op == AssocOp::DotDot { RangeLimits::HalfOpen } else { RangeLimits::Closed }; let r = try!(self.mk_range(Some(lhs), rhs, limits)); lhs = self.mk_expr(lhs_span.to(rhs_span), r, ThinVec::new()); break } let rhs = match op.fixity() { Fixity::Right => self.with_res( restrictions - Restrictions::STMT_EXPR, |this| { this.parse_assoc_expr_with(op.precedence(), LhsExpr::NotYetParsed) }), Fixity::Left => self.with_res( restrictions - Restrictions::STMT_EXPR, |this| { this.parse_assoc_expr_with(op.precedence() + 1, LhsExpr::NotYetParsed) }), // We currently have no non-associative operators that are not handled above by // the special cases. The code is here only for future convenience. Fixity::None => self.with_res( restrictions - Restrictions::STMT_EXPR, |this| { this.parse_assoc_expr_with(op.precedence() + 1, LhsExpr::NotYetParsed) }), }?; let span = lhs_span.to(rhs.span); lhs = match op { AssocOp::Add | AssocOp::Subtract | AssocOp::Multiply | AssocOp::Divide | AssocOp::Modulus | AssocOp::LAnd | AssocOp::LOr | AssocOp::BitXor | AssocOp::BitAnd | AssocOp::BitOr | AssocOp::ShiftLeft | AssocOp::ShiftRight | AssocOp::Equal | AssocOp::Less | AssocOp::LessEqual | AssocOp::NotEqual | AssocOp::Greater | AssocOp::GreaterEqual => { let ast_op = op.to_ast_binop().unwrap(); let binary = self.mk_binary(source_map::respan(cur_op_span, ast_op), lhs, rhs); self.mk_expr(span, binary, ThinVec::new()) } AssocOp::Assign => self.mk_expr(span, ExprKind::Assign(lhs, rhs), ThinVec::new()), AssocOp::ObsoleteInPlace => self.mk_expr(span, ExprKind::ObsoleteInPlace(lhs, rhs), ThinVec::new()), AssocOp::AssignOp(k) => { let aop = match k { token::Plus => BinOpKind::Add, token::Minus => BinOpKind::Sub, token::Star => BinOpKind::Mul, token::Slash => BinOpKind::Div, token::Percent => BinOpKind::Rem, token::Caret => BinOpKind::BitXor, token::And => BinOpKind::BitAnd, token::Or => BinOpKind::BitOr, token::Shl => BinOpKind::Shl, token::Shr => BinOpKind::Shr, }; let aopexpr = self.mk_assign_op(source_map::respan(cur_op_span, aop), lhs, rhs); self.mk_expr(span, aopexpr, ThinVec::new()) } AssocOp::As | AssocOp::Colon | AssocOp::DotDot | AssocOp::DotDotEq => { self.bug("AssocOp should have been handled by special case") } }; if op.fixity() == Fixity::None { break } } Ok(lhs) } fn parse_assoc_op_cast(&mut self, lhs: P<Expr>, lhs_span: Span, expr_kind: fn(P<Expr>, P<Ty>) -> ExprKind) -> PResult<'a, P<Expr>> { let mk_expr = |this: &mut Self, rhs: P<Ty>| { this.mk_expr(lhs_span.to(rhs.span), expr_kind(lhs, rhs), ThinVec::new()) }; // Save the state of the parser before parsing type normally, in case there is a // LessThan comparison after this cast. let parser_snapshot_before_type = self.clone(); match self.parse_ty_no_plus() { Ok(rhs) => { Ok(mk_expr(self, rhs)) } Err(mut type_err) => { // Rewind to before attempting to parse the type with generics, to recover // from situations like `x as usize < y` in which we first tried to parse // `usize < y` as a type with generic arguments. let parser_snapshot_after_type = self.clone(); mem::replace(self, parser_snapshot_before_type); match self.parse_path(PathStyle::Expr) { Ok(path) => { let (op_noun, op_verb) = match self.token { token::Lt => ("comparison", "comparing"), token::BinOp(token::Shl) => ("shift", "shifting"), _ => { // We can end up here even without `<` being the next token, for // example because `parse_ty_no_plus` returns `Err` on keywords, // but `parse_path` returns `Ok` on them due to error recovery. // Return original error and parser state. mem::replace(self, parser_snapshot_after_type); return Err(type_err); } }; // Successfully parsed the type path leaving a `<` yet to parse. type_err.cancel(); // Report non-fatal diagnostics, keep `x as usize` as an expression // in AST and continue parsing. let msg = format!("`<` is interpreted as a start of generic \ arguments for `{}`, not a {}", path, op_noun); let mut err = self.sess.span_diagnostic.struct_span_err(self.span, &msg); err.span_label(self.look_ahead_span(1).to(parser_snapshot_after_type.span), "interpreted as generic arguments"); err.span_label(self.span, format!("not interpreted as {}", op_noun)); let expr = mk_expr(self, P(Ty { span: path.span, node: TyKind::Path(None, path), id: ast::DUMMY_NODE_ID })); let expr_str = self.sess.source_map().span_to_snippet(expr.span) .unwrap_or(pprust::expr_to_string(&expr)); err.span_suggestion_with_applicability( expr.span, &format!("try {} the cast value", op_verb), format!("({})", expr_str), Applicability::MachineApplicable ); err.emit(); Ok(expr) } Err(mut path_err) => { // Couldn't parse as a path, return original error and parser state. path_err.cancel(); mem::replace(self, parser_snapshot_after_type); Err(type_err) } } } } } /// Produce an error if comparison operators are chained (RFC #558). /// We only need to check lhs, not rhs, because all comparison ops /// have same precedence and are left-associative fn check_no_chained_comparison(&mut self, lhs: &Expr, outer_op: &AssocOp) { debug_assert!(outer_op.is_comparison(), "check_no_chained_comparison: {:?} is not comparison", outer_op); match lhs.node { ExprKind::Binary(op, _, _) if op.node.is_comparison() => { // respan to include both operators let op_span = op.span.to(self.span); let mut err = self.diagnostic().struct_span_err(op_span, "chained comparison operators require parentheses"); if op.node == BinOpKind::Lt && *outer_op == AssocOp::Less || // Include `<` to provide this recommendation *outer_op == AssocOp::Greater // even in a case like the following: { // Foo<Bar<Baz<Qux, ()>>> err.help( "use `::<...>` instead of `<...>` if you meant to specify type arguments"); err.help("or use `(...)` if you meant to specify fn arguments"); } err.emit(); } _ => {} } } /// Parse prefix-forms of range notation: `..expr`, `..`, `..=expr` fn parse_prefix_range_expr(&mut self, already_parsed_attrs: Option<ThinVec<Attribute>>) -> PResult<'a, P<Expr>> { // Check for deprecated `...` syntax if self.token == token::DotDotDot { self.err_dotdotdot_syntax(self.span); } debug_assert!([token::DotDot, token::DotDotDot, token::DotDotEq].contains(&self.token), "parse_prefix_range_expr: token {:?} is not DotDot/DotDotEq", self.token); let tok = self.token.clone(); let attrs = self.parse_or_use_outer_attributes(already_parsed_attrs)?; let lo = self.span; let mut hi = self.span; self.bump(); let opt_end = if self.is_at_start_of_range_notation_rhs() { // RHS must be parsed with more associativity than the dots. let next_prec = AssocOp::from_token(&tok).unwrap().precedence() + 1; Some(self.parse_assoc_expr_with(next_prec, LhsExpr::NotYetParsed) .map(|x|{ hi = x.span; x })?) } else { None }; let limits = if tok == token::DotDot { RangeLimits::HalfOpen } else { RangeLimits::Closed }; let r = try!(self.mk_range(None, opt_end, limits)); Ok(self.mk_expr(lo.to(hi), r, attrs)) } fn is_at_start_of_range_notation_rhs(&self) -> bool { if self.token.can_begin_expr() { // parse `for i in 1.. { }` as infinite loop, not as `for i in (1..{})`. if self.token == token::OpenDelim(token::Brace) { return !self.restrictions.contains(Restrictions::NO_STRUCT_LITERAL); } true } else { false } } /// Parse an 'if' or 'if let' expression ('if' token already eaten) fn parse_if_expr(&mut self, attrs: ThinVec<Attribute>) -> PResult<'a, P<Expr>> { if self.check_keyword(keywords::Let) { return self.parse_if_let_expr(attrs); } let lo = self.prev_span; let cond = self.parse_expr_res(Restrictions::NO_STRUCT_LITERAL, None)?; // Verify that the parsed `if` condition makes sense as a condition. If it is a block, then // verify that the last statement is either an implicit return (no `;`) or an explicit // return. This won't catch blocks with an explicit `return`, but that would be caught by // the dead code lint. if self.eat_keyword(keywords::Else) || !cond.returns() { let sp = self.sess.source_map().next_point(lo); let mut err = self.diagnostic() .struct_span_err(sp, "missing condition for `if` statemement"); err.span_label(sp, "expected if condition here"); return Err(err) } let not_block = self.token != token::OpenDelim(token::Brace); let thn = self.parse_block().map_err(|mut err| { if not_block { err.span_label(lo, "this `if` statement has a condition, but no block"); } err })?; let mut els: Option<P<Expr>> = None; let mut hi = thn.span; if self.eat_keyword(keywords::Else) { let elexpr = self.parse_else_expr()?; hi = elexpr.span; els = Some(elexpr); } Ok(self.mk_expr(lo.to(hi), ExprKind::If(cond, thn, els), attrs)) } /// Parse an 'if let' expression ('if' token already eaten) fn parse_if_let_expr(&mut self, attrs: ThinVec<Attribute>) -> PResult<'a, P<Expr>> { let lo = self.prev_span; self.expect_keyword(keywords::Let)?; let pats = self.parse_pats()?; self.expect(&token::Eq)?; let expr = self.parse_expr_res(Restrictions::NO_STRUCT_LITERAL, None)?; let thn = self.parse_block()?; let (hi, els) = if self.eat_keyword(keywords::Else) { let expr = self.parse_else_expr()?; (expr.span, Some(expr)) } else { (thn.span, None) }; Ok(self.mk_expr(lo.to(hi), ExprKind::IfLet(pats, expr, thn, els), attrs)) } // `move |args| expr` fn parse_lambda_expr(&mut self, attrs: ThinVec<Attribute>) -> PResult<'a, P<Expr>> { let lo = self.span; let movability = if self.eat_keyword(keywords::Static) { Movability::Static } else { Movability::Movable }; let asyncness = if self.span.edition() >= Edition::Edition2018 { self.parse_asyncness() } else { IsAsync::NotAsync }; let capture_clause = if self.eat_keyword(keywords::Move) { CaptureBy::Value } else { CaptureBy::Ref }; let decl = self.parse_fn_block_decl()?; let decl_hi = self.prev_span; let body = match decl.output { FunctionRetTy::Default(_) => { let restrictions = self.restrictions - Restrictions::STMT_EXPR; self.parse_expr_res(restrictions, None)? }, _ => { // If an explicit return type is given, require a // block to appear (RFC 968). let body_lo = self.span; self.parse_block_expr(None, body_lo, BlockCheckMode::Default, ThinVec::new())? } }; Ok(self.mk_expr( lo.to(body.span), ExprKind::Closure(capture_clause, asyncness, movability, decl, body, lo.to(decl_hi)), attrs)) } // `else` token already eaten fn parse_else_expr(&mut self) -> PResult<'a, P<Expr>> { if self.eat_keyword(keywords::If) { return self.parse_if_expr(ThinVec::new()); } else { let blk = self.parse_block()?; return Ok(self.mk_expr(blk.span, ExprKind::Block(blk, None), ThinVec::new())); } } /// Parse a 'for' .. 'in' expression ('for' token already eaten) fn parse_for_expr(&mut self, opt_label: Option<Label>, span_lo: Span, mut attrs: ThinVec<Attribute>) -> PResult<'a, P<Expr>> { // Parse: `for <src_pat> in <src_expr> <src_loop_block>` let pat = self.parse_top_level_pat()?; if !self.eat_keyword(keywords::In) { let in_span = self.prev_span.between(self.span); let mut err = self.sess.span_diagnostic .struct_span_err(in_span, "missing `in` in `for` loop"); err.span_suggestion_short_with_applicability( in_span, "try adding `in` here", " in ".into(), // has been misleading, at least in the past (closed Issue #48492) Applicability::MaybeIncorrect ); err.emit(); } let expr = self.parse_expr_res(Restrictions::NO_STRUCT_LITERAL, None)?; let (iattrs, loop_block) = self.parse_inner_attrs_and_block()?; attrs.extend(iattrs); let hi = self.prev_span; Ok(self.mk_expr(span_lo.to(hi), ExprKind::ForLoop(pat, expr, loop_block, opt_label), attrs)) } /// Parse a 'while' or 'while let' expression ('while' token already eaten) fn parse_while_expr(&mut self, opt_label: Option<Label>, span_lo: Span, mut attrs: ThinVec<Attribute>) -> PResult<'a, P<Expr>> { if self.token.is_keyword(keywords::Let) { return self.parse_while_let_expr(opt_label, span_lo, attrs); } let cond = self.parse_expr_res(Restrictions::NO_STRUCT_LITERAL, None)?; let (iattrs, body) = self.parse_inner_attrs_and_block()?; attrs.extend(iattrs); let span = span_lo.to(body.span); return Ok(self.mk_expr(span, ExprKind::While(cond, body, opt_label), attrs)); } /// Parse a 'while let' expression ('while' token already eaten) fn parse_while_let_expr(&mut self, opt_label: Option<Label>, span_lo: Span, mut attrs: ThinVec<Attribute>) -> PResult<'a, P<Expr>> { self.expect_keyword(keywords::Let)?; let pats = self.parse_pats()?; self.expect(&token::Eq)?; let expr = self.parse_expr_res(Restrictions::NO_STRUCT_LITERAL, None)?; let (iattrs, body) = self.parse_inner_attrs_and_block()?; attrs.extend(iattrs); let span = span_lo.to(body.span); return Ok(self.mk_expr(span, ExprKind::WhileLet(pats, expr, body, opt_label), attrs)); } // parse `loop {...}`, `loop` token already eaten fn parse_loop_expr(&mut self, opt_label: Option<Label>, span_lo: Span, mut attrs: ThinVec<Attribute>) -> PResult<'a, P<Expr>> { let (iattrs, body) = self.parse_inner_attrs_and_block()?; attrs.extend(iattrs); let span = span_lo.to(body.span); Ok(self.mk_expr(span, ExprKind::Loop(body, opt_label), attrs)) } /// Parse an `async move {...}` expression pub fn parse_async_block(&mut self, mut attrs: ThinVec<Attribute>) -> PResult<'a, P<Expr>> { let span_lo = self.span; self.expect_keyword(keywords::Async)?; let capture_clause = if self.eat_keyword(keywords::Move) { CaptureBy::Value } else { CaptureBy::Ref }; let (iattrs, body) = self.parse_inner_attrs_and_block()?; attrs.extend(iattrs); Ok(self.mk_expr( span_lo.to(body.span), ExprKind::Async(capture_clause, ast::DUMMY_NODE_ID, body), attrs)) } /// Parse a `do catch {...}` expression (`do catch` token already eaten) fn parse_catch_expr(&mut self, span_lo: Span, mut attrs: ThinVec<Attribute>) -> PResult<'a, P<Expr>> { let (iattrs, body) = self.parse_inner_attrs_and_block()?; attrs.extend(iattrs); Ok(self.mk_expr(span_lo.to(body.span), ExprKind::Catch(body), attrs)) } // `match` token already eaten fn parse_match_expr(&mut self, mut attrs: ThinVec<Attribute>) -> PResult<'a, P<Expr>> { let match_span = self.prev_span; let lo = self.prev_span; let discriminant = self.parse_expr_res(Restrictions::NO_STRUCT_LITERAL, None)?; if let Err(mut e) = self.expect(&token::OpenDelim(token::Brace)) { if self.token == token::Token::Semi { e.span_suggestion_short_with_applicability( match_span, "try removing this `match`", "".to_owned(), Applicability::MaybeIncorrect // speculative ); } return Err(e) } attrs.extend(self.parse_inner_attributes()?); let mut arms: Vec<Arm> = Vec::new(); while self.token != token::CloseDelim(token::Brace) { match self.parse_arm() { Ok(arm) => arms.push(arm), Err(mut e) => { // Recover by skipping to the end of the block. e.emit(); self.recover_stmt(); let span = lo.to(self.span); if self.token == token::CloseDelim(token::Brace) { self.bump(); } return Ok(self.mk_expr(span, ExprKind::Match(discriminant, arms), attrs)); } } } let hi = self.span; self.bump(); return Ok(self.mk_expr(lo.to(hi), ExprKind::Match(discriminant, arms), attrs)); } crate fn parse_arm(&mut self) -> PResult<'a, Arm> { maybe_whole!(self, NtArm, |x| x); let attrs = self.parse_outer_attributes()?; // Allow a '|' before the pats (RFC 1925) self.eat(&token::BinOp(token::Or)); let pats = self.parse_pats()?; let guard = if self.eat_keyword(keywords::If) { Some(self.parse_expr()?) } else { None }; let arrow_span = self.span; self.expect(&token::FatArrow)?; let arm_start_span = self.span; let expr = self.parse_expr_res(Restrictions::STMT_EXPR, None) .map_err(|mut err| { err.span_label(arrow_span, "while parsing the `match` arm starting here"); err })?; let require_comma = classify::expr_requires_semi_to_be_stmt(&expr) && self.token != token::CloseDelim(token::Brace); if require_comma { let cm = self.sess.source_map(); self.expect_one_of(&[token::Comma], &[token::CloseDelim(token::Brace)]) .map_err(|mut err| { match (cm.span_to_lines(expr.span), cm.span_to_lines(arm_start_span)) { (Ok(ref expr_lines), Ok(ref arm_start_lines)) if arm_start_lines.lines[0].end_col == expr_lines.lines[0].end_col && expr_lines.lines.len() == 2 && self.token == token::FatArrow => { // We check whether there's any trailing code in the parse span, // if there isn't, we very likely have the following: // // X | &Y => "y" // | -- - missing comma // | | // | arrow_span // X | &X => "x" // | - ^^ self.span // | | // | parsed until here as `"y" & X` err.span_suggestion_short_with_applicability( cm.next_point(arm_start_span), "missing a comma here to end this `match` arm", ",".to_owned(), Applicability::MachineApplicable ); } _ => { err.span_label(arrow_span, "while parsing the `match` arm starting here"); } } err })?; } else { self.eat(&token::Comma); } Ok(ast::Arm { attrs, pats, guard, body: expr, }) } /// Parse an expression pub fn parse_expr(&mut self) -> PResult<'a, P<Expr>> { self.parse_expr_res(Restrictions::empty(), None) } /// Evaluate the closure with restrictions in place. /// /// After the closure is evaluated, restrictions are reset. fn with_res<F, T>(&mut self, r: Restrictions, f: F) -> T where F: FnOnce(&mut Self) -> T { let old = self.restrictions; self.restrictions = r; let r = f(self); self.restrictions = old; return r; } /// Parse an expression, subject to the given restrictions fn parse_expr_res(&mut self, r: Restrictions, already_parsed_attrs: Option<ThinVec<Attribute>>) -> PResult<'a, P<Expr>> { self.with_res(r, |this| this.parse_assoc_expr(already_parsed_attrs)) } /// Parse the RHS of a local variable declaration (e.g. '= 14;') fn parse_initializer(&mut self, skip_eq: bool) -> PResult<'a, Option<P<Expr>>> { if self.check(&token::Eq) { self.bump(); Ok(Some(self.parse_expr()?)) } else if skip_eq { Ok(Some(self.parse_expr()?)) } else { Ok(None) } } /// Parse patterns, separated by '|' s fn parse_pats(&mut self) -> PResult<'a, Vec<P<Pat>>> { let mut pats = Vec::new(); loop { pats.push(self.parse_top_level_pat()?); if self.token == token::OrOr { let mut err = self.struct_span_err(self.span, "unexpected token `||` after pattern"); err.span_suggestion_with_applicability( self.span, "use a single `|` to specify multiple patterns", "|".to_owned(), Applicability::MachineApplicable ); err.emit(); self.bump(); } else if self.check(&token::BinOp(token::Or)) { self.bump(); } else { return Ok(pats); } }; } // Parses a parenthesized list of patterns like // `()`, `(p)`, `(p,)`, `(p, q)`, or `(p, .., q)`. Returns: // - a vector of the patterns that were parsed // - an option indicating the index of the `..` element // - a boolean indicating whether a trailing comma was present. // Trailing commas are significant because (p) and (p,) are different patterns. fn parse_parenthesized_pat_list(&mut self) -> PResult<'a, (Vec<P<Pat>>, Option<usize>, bool)> { self.expect(&token::OpenDelim(token::Paren))?; let result = self.parse_pat_list()?; self.expect(&token::CloseDelim(token::Paren))?; Ok(result) } fn parse_pat_list(&mut self) -> PResult<'a, (Vec<P<Pat>>, Option<usize>, bool)> { let mut fields = Vec::new(); let mut ddpos = None; let mut trailing_comma = false; loop { if self.eat(&token::DotDot) { if ddpos.is_none() { ddpos = Some(fields.len()); } else { // Emit a friendly error, ignore `..` and continue parsing self.span_err(self.prev_span, "`..` can only be used once per tuple or tuple struct pattern"); } } else if !self.check(&token::CloseDelim(token::Paren)) { fields.push(self.parse_pat()?); } else { break } trailing_comma = self.eat(&token::Comma); if !trailing_comma { break } } if ddpos == Some(fields.len()) && trailing_comma { // `..` needs to be followed by `)` or `, pat`, `..,)` is disallowed. self.span_err(self.prev_span, "trailing comma is not permitted after `..`"); } Ok((fields, ddpos, trailing_comma)) } fn parse_pat_vec_elements( &mut self, ) -> PResult<'a, (Vec<P<Pat>>, Option<P<Pat>>, Vec<P<Pat>>)> { let mut before = Vec::new(); let mut slice = None; let mut after = Vec::new(); let mut first = true; let mut before_slice = true; while self.token != token::CloseDelim(token::Bracket) { if first { first = false; } else { self.expect(&token::Comma)?; if self.token == token::CloseDelim(token::Bracket) && (before_slice || !after.is_empty()) { break } } if before_slice { if self.eat(&token::DotDot) { if self.check(&token::Comma) || self.check(&token::CloseDelim(token::Bracket)) { slice = Some(P(Pat { id: ast::DUMMY_NODE_ID, node: PatKind::Wild, span: self.prev_span, })); before_slice = false; } continue } } let subpat = self.parse_pat()?; if before_slice && self.eat(&token::DotDot) { slice = Some(subpat); before_slice = false; } else if before_slice { before.push(subpat); } else { after.push(subpat); } } Ok((before, slice, after)) } fn parse_pat_field( &mut self, lo: Span, attrs: Vec<Attribute> ) -> PResult<'a, source_map::Spanned<ast::FieldPat>> { // Check if a colon exists one ahead. This means we're parsing a fieldname. let hi; let (subpat, fieldname, is_shorthand) = if self.look_ahead(1, |t| t == &token::Colon) { // Parsing a pattern of the form "fieldname: pat" let fieldname = self.parse_field_name()?; self.bump(); let pat = self.parse_pat()?; hi = pat.span; (pat, fieldname, false) } else { // Parsing a pattern of the form "(box) (ref) (mut) fieldname" let is_box = self.eat_keyword(keywords::Box); let boxed_span = self.span; let is_ref = self.eat_keyword(keywords::Ref); let is_mut = self.eat_keyword(keywords::Mut); let fieldname = self.parse_ident()?; hi = self.prev_span; let bind_type = match (is_ref, is_mut) { (true, true) => BindingMode::ByRef(Mutability::Mutable), (true, false) => BindingMode::ByRef(Mutability::Immutable), (false, true) => BindingMode::ByValue(Mutability::Mutable), (false, false) => BindingMode::ByValue(Mutability::Immutable), }; let fieldpat = P(Pat { id: ast::DUMMY_NODE_ID, node: PatKind::Ident(bind_type, fieldname, None), span: boxed_span.to(hi), }); let subpat = if is_box { P(Pat { id: ast::DUMMY_NODE_ID, node: PatKind::Box(fieldpat), span: lo.to(hi), }) } else { fieldpat }; (subpat, fieldname, true) }; Ok(source_map::Spanned { span: lo.to(hi), node: ast::FieldPat { ident: fieldname, pat: subpat, is_shorthand, attrs: attrs.into(), } }) } /// Parse the fields of a struct-like pattern fn parse_pat_fields(&mut self) -> PResult<'a, (Vec<source_map::Spanned<ast::FieldPat>>, bool)> { let mut fields = Vec::new(); let mut etc = false; let mut ate_comma = true; let mut delayed_err: Option<DiagnosticBuilder<'a>> = None; let mut etc_span = None; while self.token != token::CloseDelim(token::Brace) { let attrs = self.parse_outer_attributes()?; let lo = self.span; // check that a comma comes after every field if !ate_comma { let err = self.struct_span_err(self.prev_span, "expected `,`"); return Err(err); } ate_comma = false; if self.check(&token::DotDot) || self.token == token::DotDotDot { etc = true; let mut etc_sp = self.span; if self.token == token::DotDotDot { // Issue #46718 // Accept `...` as if it were `..` to avoid further errors let mut err = self.struct_span_err(self.span, "expected field pattern, found `...`"); err.span_suggestion_with_applicability( self.span, "to omit remaining fields, use one fewer `.`", "..".to_owned(), Applicability::MachineApplicable ); err.emit(); } self.bump(); // `..` || `...`:w if self.token == token::CloseDelim(token::Brace) { etc_span = Some(etc_sp); break; } let token_str = self.this_token_to_string(); let mut err = self.fatal(&format!("expected `}}`, found `{}`", token_str)); err.span_label(self.span, "expected `}`"); let mut comma_sp = None; if self.token == token::Comma { // Issue #49257 etc_sp = etc_sp.to(self.sess.source_map().span_until_non_whitespace(self.span)); err.span_label(etc_sp, "`..` must be at the end and cannot have a trailing comma"); comma_sp = Some(self.span); self.bump(); ate_comma = true; } etc_span = Some(etc_sp); if self.token == token::CloseDelim(token::Brace) { // If the struct looks otherwise well formed, recover and continue. if let Some(sp) = comma_sp { err.span_suggestion_short(sp, "remove this comma", "".into()); } err.emit(); break; } else if self.token.is_ident() && ate_comma { // Accept fields coming after `..,`. // This way we avoid "pattern missing fields" errors afterwards. // We delay this error until the end in order to have a span for a // suggested fix. if let Some(mut delayed_err) = delayed_err { delayed_err.emit(); return Err(err); } else { delayed_err = Some(err); } } else { if let Some(mut err) = delayed_err { err.emit(); } return Err(err); } } fields.push(match self.parse_pat_field(lo, attrs) { Ok(field) => field, Err(err) => { if let Some(mut delayed_err) = delayed_err { delayed_err.emit(); } return Err(err); } }); ate_comma = self.eat(&token::Comma); } if let Some(mut err) = delayed_err { if let Some(etc_span) = etc_span { err.multipart_suggestion( "move the `..` to the end of the field list", vec![ (etc_span, "".into()), (self.span, format!("{}.. }}", if ate_comma { "" } else { ", " })), ], ); } err.emit(); } return Ok((fields, etc)); } fn parse_pat_range_end(&mut self) -> PResult<'a, P<Expr>> { if self.token.is_path_start() { let lo = self.span; let (qself, path) = if self.eat_lt() { // Parse a qualified path let (qself, path) = self.parse_qpath(PathStyle::Expr)?; (Some(qself), path) } else { // Parse an unqualified path (None, self.parse_path(PathStyle::Expr)?) }; let hi = self.prev_span; Ok(self.mk_expr(lo.to(hi), ExprKind::Path(qself, path), ThinVec::new())) } else { self.parse_literal_maybe_minus() } } // helper function to decide whether to parse as ident binding or to try to do // something more complex like range patterns fn parse_as_ident(&mut self) -> bool { self.look_ahead(1, |t| match *t { token::OpenDelim(token::Paren) | token::OpenDelim(token::Brace) | token::DotDotDot | token::DotDotEq | token::ModSep | token::Not => Some(false), // ensure slice patterns [a, b.., c] and [a, b, c..] don't go into the // range pattern branch token::DotDot => None, _ => Some(true), }).unwrap_or_else(|| self.look_ahead(2, |t| match *t { token::Comma | token::CloseDelim(token::Bracket) => true, _ => false, })) } /// A wrapper around `parse_pat` with some special error handling for the /// "top-level" patterns in a match arm, `for` loop, `let`, &c. (in contrast /// to subpatterns within such). fn parse_top_level_pat(&mut self) -> PResult<'a, P<Pat>> { let pat = self.parse_pat()?; if self.token == token::Comma { // An unexpected comma after a top-level pattern is a clue that the // user (perhaps more accustomed to some other language) forgot the // parentheses in what should have been a tuple pattern; return a // suggestion-enhanced error here rather than choking on the comma // later. let comma_span = self.span; self.bump(); if let Err(mut err) = self.parse_pat_list() { // We didn't expect this to work anyway; we just wanted // to advance to the end of the comma-sequence so we know // the span to suggest parenthesizing err.cancel(); } let seq_span = pat.span.to(self.prev_span); let mut err = self.struct_span_err(comma_span, "unexpected `,` in pattern"); if let Ok(seq_snippet) = self.sess.source_map().span_to_snippet(seq_span) { err.span_suggestion_with_applicability( seq_span, "try adding parentheses", format!("({})", seq_snippet), Applicability::MachineApplicable ); } return Err(err); } Ok(pat) } /// Parse a pattern. pub fn parse_pat(&mut self) -> PResult<'a, P<Pat>> { self.parse_pat_with_range_pat(true) } /// Parse a pattern, with a setting whether modern range patterns e.g. `a..=b`, `a..b` are /// allowed. fn parse_pat_with_range_pat(&mut self, allow_range_pat: bool) -> PResult<'a, P<Pat>> { maybe_whole!(self, NtPat, |x| x); let lo = self.span; let pat; match self.token { token::BinOp(token::And) | token::AndAnd => { // Parse &pat / &mut pat self.expect_and()?; let mutbl = self.parse_mutability(); if let token::Lifetime(ident) = self.token { let mut err = self.fatal(&format!("unexpected lifetime `{}` in pattern", ident)); err.span_label(self.span, "unexpected lifetime"); return Err(err); } let subpat = self.parse_pat_with_range_pat(false)?; pat = PatKind::Ref(subpat, mutbl); } token::OpenDelim(token::Paren) => { // Parse (pat,pat,pat,...) as tuple pattern let (fields, ddpos, trailing_comma) = self.parse_parenthesized_pat_list()?; pat = if fields.len() == 1 && ddpos.is_none() && !trailing_comma { PatKind::Paren(fields.into_iter().nth(0).unwrap()) } else { PatKind::Tuple(fields, ddpos) }; } token::OpenDelim(token::Bracket) => { // Parse [pat,pat,...] as slice pattern self.bump(); let (before, slice, after) = self.parse_pat_vec_elements()?; self.expect(&token::CloseDelim(token::Bracket))?; pat = PatKind::Slice(before, slice, after); } // At this point, token != &, &&, (, [ _ => if self.eat_keyword(keywords::Underscore) { // Parse _ pat = PatKind::Wild; } else if self.eat_keyword(keywords::Mut) { // Parse mut ident @ pat / mut ref ident @ pat let mutref_span = self.prev_span.to(self.span); let binding_mode = if self.eat_keyword(keywords::Ref) { self.diagnostic() .struct_span_err(mutref_span, "the order of `mut` and `ref` is incorrect") .span_suggestion_with_applicability( mutref_span, "try switching the order", "ref mut".into(), Applicability::MachineApplicable ).emit(); BindingMode::ByRef(Mutability::Mutable) } else { BindingMode::ByValue(Mutability::Mutable) }; pat = self.parse_pat_ident(binding_mode)?; } else if self.eat_keyword(keywords::Ref) { // Parse ref ident @ pat / ref mut ident @ pat let mutbl = self.parse_mutability(); pat = self.parse_pat_ident(BindingMode::ByRef(mutbl))?; } else if self.eat_keyword(keywords::Box) { // Parse box pat let subpat = self.parse_pat_with_range_pat(false)?; pat = PatKind::Box(subpat); } else if self.token.is_ident() && !self.token.is_reserved_ident() && self.parse_as_ident() { // Parse ident @ pat // This can give false positives and parse nullary enums, // they are dealt with later in resolve let binding_mode = BindingMode::ByValue(Mutability::Immutable); pat = self.parse_pat_ident(binding_mode)?; } else if self.token.is_path_start() { // Parse pattern starting with a path let (qself, path) = if self.eat_lt() { // Parse a qualified path let (qself, path) = self.parse_qpath(PathStyle::Expr)?; (Some(qself), path) } else { // Parse an unqualified path (None, self.parse_path(PathStyle::Expr)?) }; match self.token { token::Not if qself.is_none() => { // Parse macro invocation self.bump(); let (delim, tts) = self.expect_delimited_token_tree()?; let mac = respan(lo.to(self.prev_span), Mac_ { path, tts, delim }); pat = PatKind::Mac(mac); } token::DotDotDot | token::DotDotEq | token::DotDot => { let end_kind = match self.token { token::DotDot => RangeEnd::Excluded, token::DotDotDot => RangeEnd::Included(RangeSyntax::DotDotDot), token::DotDotEq => RangeEnd::Included(RangeSyntax::DotDotEq), _ => panic!("can only parse `..`/`...`/`..=` for ranges \ (checked above)"), }; let op_span = self.span; // Parse range let span = lo.to(self.prev_span); let begin = self.mk_expr(span, ExprKind::Path(qself, path), ThinVec::new()); self.bump(); let end = self.parse_pat_range_end()?; let op = Spanned { span: op_span, node: end_kind }; pat = PatKind::Range(begin, end, op); } token::OpenDelim(token::Brace) => { if qself.is_some() { let msg = "unexpected `{` after qualified path"; let mut err = self.fatal(msg); err.span_label(self.span, msg); return Err(err); } // Parse struct pattern self.bump(); let (fields, etc) = self.parse_pat_fields().unwrap_or_else(|mut e| { e.emit(); self.recover_stmt(); (vec![], false) }); self.bump(); pat = PatKind::Struct(path, fields, etc); } token::OpenDelim(token::Paren) => { if qself.is_some() { let msg = "unexpected `(` after qualified path"; let mut err = self.fatal(msg); err.span_label(self.span, msg); return Err(err); } // Parse tuple struct or enum pattern let (fields, ddpos, _) = self.parse_parenthesized_pat_list()?; pat = PatKind::TupleStruct(path, fields, ddpos) } _ => pat = PatKind::Path(qself, path), } } else { // Try to parse everything else as literal with optional minus match self.parse_literal_maybe_minus() { Ok(begin) => { let op_span = self.span; if self.check(&token::DotDot) || self.check(&token::DotDotEq) || self.check(&token::DotDotDot) { let end_kind = if self.eat(&token::DotDotDot) { RangeEnd::Included(RangeSyntax::DotDotDot) } else if self.eat(&token::DotDotEq) { RangeEnd::Included(RangeSyntax::DotDotEq) } else if self.eat(&token::DotDot) { RangeEnd::Excluded } else { panic!("impossible case: we already matched \ on a range-operator token") }; let end = self.parse_pat_range_end()?; let op = Spanned { span: op_span, node: end_kind }; pat = PatKind::Range(begin, end, op); } else { pat = PatKind::Lit(begin); } } Err(mut err) => { self.cancel(&mut err); let msg = format!("expected pattern, found {}", self.this_token_descr()); let mut err = self.fatal(&msg); err.span_label(self.span, "expected pattern"); return Err(err); } } } } let pat = Pat { node: pat, span: lo.to(self.prev_span), id: ast::DUMMY_NODE_ID }; let pat = self.maybe_recover_from_bad_qpath(pat, true)?; if !allow_range_pat { match pat.node { PatKind::Range( _, _, Spanned { node: RangeEnd::Included(RangeSyntax::DotDotDot), .. } ) => {}, PatKind::Range(..) => { let mut err = self.struct_span_err( pat.span, "the range pattern here has ambiguous interpretation", ); err.span_suggestion_with_applicability( pat.span, "add parentheses to clarify the precedence", format!("({})", pprust::pat_to_string(&pat)), // "ambiguous interpretation" implies that we have to be guessing Applicability::MaybeIncorrect ); return Err(err); } _ => {} } } Ok(P(pat)) } /// Parse ident or ident @ pat /// used by the copy foo and ref foo patterns to give a good /// error message when parsing mistakes like ref foo(a,b) fn parse_pat_ident(&mut self, binding_mode: ast::BindingMode) -> PResult<'a, PatKind> { let ident = self.parse_ident()?; let sub = if self.eat(&token::At) { Some(self.parse_pat()?) } else { None }; // just to be friendly, if they write something like // ref Some(i) // we end up here with ( as the current token. This shortly // leads to a parse error. Note that if there is no explicit // binding mode then we do not end up here, because the lookahead // will direct us over to parse_enum_variant() if self.token == token::OpenDelim(token::Paren) { return Err(self.span_fatal( self.prev_span, "expected identifier, found enum pattern")) } Ok(PatKind::Ident(binding_mode, ident, sub)) } /// Parse a local variable declaration fn parse_local(&mut self, attrs: ThinVec<Attribute>) -> PResult<'a, P<Local>> { let lo = self.prev_span; let pat = self.parse_top_level_pat()?; let (err, ty) = if self.eat(&token::Colon) { // Save the state of the parser before parsing type normally, in case there is a `:` // instead of an `=` typo. let parser_snapshot_before_type = self.clone(); let colon_sp = self.prev_span; match self.parse_ty() { Ok(ty) => (None, Some(ty)), Err(mut err) => { // Rewind to before attempting to parse the type and continue parsing let parser_snapshot_after_type = self.clone(); mem::replace(self, parser_snapshot_before_type); let snippet = self.sess.source_map().span_to_snippet(pat.span).unwrap(); err.span_label(pat.span, format!("while parsing the type for `{}`", snippet)); (Some((parser_snapshot_after_type, colon_sp, err)), None) } } } else { (None, None) }; let init = match (self.parse_initializer(err.is_some()), err) { (Ok(init), None) => { // init parsed, ty parsed init } (Ok(init), Some((_, colon_sp, mut err))) => { // init parsed, ty error // Could parse the type as if it were the initializer, it is likely there was a // typo in the code: `:` instead of `=`. Add suggestion and emit the error. err.span_suggestion_short_with_applicability( colon_sp, "use `=` if you meant to assign", "=".to_string(), Applicability::MachineApplicable ); err.emit(); // As this was parsed successfully, continue as if the code has been fixed for the // rest of the file. It will still fail due to the emitted error, but we avoid // extra noise. init } (Err(mut init_err), Some((snapshot, _, ty_err))) => { // init error, ty error init_err.cancel(); // Couldn't parse the type nor the initializer, only raise the type error and // return to the parser state before parsing the type as the initializer. // let x: <parse_error>; mem::replace(self, snapshot); return Err(ty_err); } (Err(err), None) => { // init error, ty parsed // Couldn't parse the initializer and we're not attempting to recover a failed // parse of the type, return the error. return Err(err); } }; let hi = if self.token == token::Semi { self.span } else { self.prev_span }; Ok(P(ast::Local { ty, pat, init, id: ast::DUMMY_NODE_ID, span: lo.to(hi), attrs, })) } /// Parse a structure field fn parse_name_and_ty(&mut self, lo: Span, vis: Visibility, attrs: Vec<Attribute>) -> PResult<'a, StructField> { let name = self.parse_ident()?; self.expect(&token::Colon)?; let ty = self.parse_ty()?; Ok(StructField { span: lo.to(self.prev_span), ident: Some(name), vis, id: ast::DUMMY_NODE_ID, ty, attrs, }) } /// Emit an expected item after attributes error. fn expected_item_err(&self, attrs: &[Attribute]) { let message = match attrs.last() { Some(&Attribute { is_sugared_doc: true, .. }) => "expected item after doc comment", _ => "expected item after attributes", }; self.span_err(self.prev_span, message); } /// Parse a statement. This stops just before trailing semicolons on everything but items. /// e.g. a `StmtKind::Semi` parses to a `StmtKind::Expr`, leaving the trailing `;` unconsumed. pub fn parse_stmt(&mut self) -> PResult<'a, Option<Stmt>> { Ok(self.parse_stmt_(true)) } // Eat tokens until we can be relatively sure we reached the end of the // statement. This is something of a best-effort heuristic. // // We terminate when we find an unmatched `}` (without consuming it). fn recover_stmt(&mut self) { self.recover_stmt_(SemiColonMode::Ignore, BlockMode::Ignore) } // If `break_on_semi` is `Break`, then we will stop consuming tokens after // finding (and consuming) a `;` outside of `{}` or `[]` (note that this is // approximate - it can mean we break too early due to macros, but that // should only lead to sub-optimal recovery, not inaccurate parsing). // // If `break_on_block` is `Break`, then we will stop consuming tokens // after finding (and consuming) a brace-delimited block. fn recover_stmt_(&mut self, break_on_semi: SemiColonMode, break_on_block: BlockMode) { let mut brace_depth = 0; let mut bracket_depth = 0; let mut in_block = false; debug!("recover_stmt_ enter loop (semi={:?}, block={:?})", break_on_semi, break_on_block); loop { debug!("recover_stmt_ loop {:?}", self.token); match self.token { token::OpenDelim(token::DelimToken::Brace) => { brace_depth += 1; self.bump(); if break_on_block == BlockMode::Break && brace_depth == 1 && bracket_depth == 0 { in_block = true; } } token::OpenDelim(token::DelimToken::Bracket) => { bracket_depth += 1; self.bump(); } token::CloseDelim(token::DelimToken::Brace) => { if brace_depth == 0 { debug!("recover_stmt_ return - close delim {:?}", self.token); return; } brace_depth -= 1; self.bump(); if in_block && bracket_depth == 0 && brace_depth == 0 { debug!("recover_stmt_ return - block end {:?}", self.token); return; } } token::CloseDelim(token::DelimToken::Bracket) => { bracket_depth -= 1; if bracket_depth < 0 { bracket_depth = 0; } self.bump(); } token::Eof => { debug!("recover_stmt_ return - Eof"); return; } token::Semi => { self.bump(); if break_on_semi == SemiColonMode::Break && brace_depth == 0 && bracket_depth == 0 { debug!("recover_stmt_ return - Semi"); return; } } _ => { self.bump() } } } } fn parse_stmt_(&mut self, macro_legacy_warnings: bool) -> Option<Stmt> { self.parse_stmt_without_recovery(macro_legacy_warnings).unwrap_or_else(|mut e| { e.emit(); self.recover_stmt_(SemiColonMode::Break, BlockMode::Ignore); None }) } fn is_async_block(&mut self) -> bool { self.token.is_keyword(keywords::Async) && ( ( // `async move {` self.look_ahead(1, |t| t.is_keyword(keywords::Move)) && self.look_ahead(2, |t| *t == token::OpenDelim(token::Brace)) ) || ( // `async {` self.look_ahead(1, |t| *t == token::OpenDelim(token::Brace)) ) ) } fn is_catch_expr(&mut self) -> bool { self.token.is_keyword(keywords::Do) && self.look_ahead(1, |t| t.is_keyword(keywords::Catch)) && self.look_ahead(2, |t| *t == token::OpenDelim(token::Brace)) && // prevent `while catch {} {}`, `if catch {} {} else {}`, etc. !self.restrictions.contains(Restrictions::NO_STRUCT_LITERAL) } fn is_union_item(&self) -> bool { self.token.is_keyword(keywords::Union) && self.look_ahead(1, |t| t.is_ident() && !t.is_reserved_ident()) } fn is_crate_vis(&self) -> bool { self.token.is_keyword(keywords::Crate) && self.look_ahead(1, |t| t != &token::ModSep) } fn is_extern_non_path(&self) -> bool { self.token.is_keyword(keywords::Extern) && self.look_ahead(1, |t| t != &token::ModSep) } fn is_existential_type_decl(&self) -> bool { self.token.is_keyword(keywords::Existential) && self.look_ahead(1, |t| t.is_keyword(keywords::Type)) } fn is_auto_trait_item(&mut self) -> bool { // auto trait (self.token.is_keyword(keywords::Auto) && self.look_ahead(1, |t| t.is_keyword(keywords::Trait))) || // unsafe auto trait (self.token.is_keyword(keywords::Unsafe) && self.look_ahead(1, |t| t.is_keyword(keywords::Auto)) && self.look_ahead(2, |t| t.is_keyword(keywords::Trait))) } fn eat_macro_def(&mut self, attrs: &[Attribute], vis: &Visibility, lo: Span) -> PResult<'a, Option<P<Item>>> { let token_lo = self.span; let (ident, def) = match self.token { token::Ident(ident, false) if ident.name == keywords::Macro.name() => { self.bump(); let ident = self.parse_ident()?; let tokens = if self.check(&token::OpenDelim(token::Brace)) { match self.parse_token_tree() { TokenTree::Delimited(_, ref delimited) => delimited.stream(), _ => unreachable!(), } } else if self.check(&token::OpenDelim(token::Paren)) { let args = self.parse_token_tree(); let body = if self.check(&token::OpenDelim(token::Brace)) { self.parse_token_tree() } else { self.unexpected()?; unreachable!() }; TokenStream::concat(vec![ args.into(), TokenTree::Token(token_lo.to(self.prev_span), token::FatArrow).into(), body.into(), ]) } else { self.unexpected()?; unreachable!() }; (ident, ast::MacroDef { tokens: tokens.into(), legacy: false }) } token::Ident(ident, _) if ident.name == "macro_rules" && self.look_ahead(1, |t| *t == token::Not) => { let prev_span = self.prev_span; self.complain_if_pub_macro(&vis.node, prev_span); self.bump(); self.bump(); let ident = self.parse_ident()?; let (delim, tokens) = self.expect_delimited_token_tree()?; if delim != MacDelimiter::Brace { if !self.eat(&token::Semi) { let msg = "macros that expand to items must either \ be surrounded with braces or followed by a semicolon"; self.span_err(self.prev_span, msg); } } (ident, ast::MacroDef { tokens: tokens, legacy: true }) } _ => return Ok(None), }; let span = lo.to(self.prev_span); Ok(Some(self.mk_item(span, ident, ItemKind::MacroDef(def), vis.clone(), attrs.to_vec()))) } fn parse_stmt_without_recovery(&mut self, macro_legacy_warnings: bool) -> PResult<'a, Option<Stmt>> { maybe_whole!(self, NtStmt, |x| Some(x)); let attrs = self.parse_outer_attributes()?; let lo = self.span; Ok(Some(if self.eat_keyword(keywords::Let) { Stmt { id: ast::DUMMY_NODE_ID, node: StmtKind::Local(self.parse_local(attrs.into())?), span: lo.to(self.prev_span), } } else if let Some(macro_def) = self.eat_macro_def( &attrs, &source_map::respan(lo, VisibilityKind::Inherited), lo, )? { Stmt { id: ast::DUMMY_NODE_ID, node: StmtKind::Item(macro_def), span: lo.to(self.prev_span), } // Starts like a simple path, being careful to avoid contextual keywords // such as a union items, item with `crate` visibility or auto trait items. // Our goal here is to parse an arbitrary path `a::b::c` but not something that starts // like a path (1 token), but it fact not a path. // `union::b::c` - path, `union U { ... }` - not a path. // `crate::b::c` - path, `crate struct S;` - not a path. // `extern::b::c` - path, `extern crate c;` - not a path. } else if self.token.is_path_start() && !self.token.is_qpath_start() && !self.is_union_item() && !self.is_crate_vis() && !self.is_extern_non_path() && !self.is_existential_type_decl() && !self.is_auto_trait_item() { let pth = self.parse_path(PathStyle::Expr)?; if !self.eat(&token::Not) { let expr = if self.check(&token::OpenDelim(token::Brace)) { self.parse_struct_expr(lo, pth, ThinVec::new())? } else { let hi = self.prev_span; self.mk_expr(lo.to(hi), ExprKind::Path(None, pth), ThinVec::new()) }; let expr = self.with_res(Restrictions::STMT_EXPR, |this| { let expr = this.parse_dot_or_call_expr_with(expr, lo, attrs.into())?; this.parse_assoc_expr_with(0, LhsExpr::AlreadyParsed(expr)) })?; return Ok(Some(Stmt { id: ast::DUMMY_NODE_ID, node: StmtKind::Expr(expr), span: lo.to(self.prev_span), })); } // it's a macro invocation let id = match self.token { token::OpenDelim(_) => keywords::Invalid.ident(), // no special identifier _ => self.parse_ident()?, }; // check that we're pointing at delimiters (need to check // again after the `if`, because of `parse_ident` // consuming more tokens). match self.token { token::OpenDelim(_) => {} _ => { // we only expect an ident if we didn't parse one // above. let ident_str = if id.name == keywords::Invalid.name() { "identifier, " } else { "" }; let tok_str = self.this_token_to_string(); let mut err = self.fatal(&format!("expected {}`(` or `{{`, found `{}`", ident_str, tok_str)); err.span_label(self.span, format!("expected {}`(` or `{{`", ident_str)); return Err(err) }, } let (delim, tts) = self.expect_delimited_token_tree()?; let hi = self.prev_span; let style = if delim == MacDelimiter::Brace { MacStmtStyle::Braces } else { MacStmtStyle::NoBraces }; if id.name == keywords::Invalid.name() { let mac = respan(lo.to(hi), Mac_ { path: pth, tts, delim }); let node = if delim == MacDelimiter::Brace || self.token == token::Semi || self.token == token::Eof { StmtKind::Mac(P((mac, style, attrs.into()))) } // We used to incorrectly stop parsing macro-expanded statements here. // If the next token will be an error anyway but could have parsed with the // earlier behavior, stop parsing here and emit a warning to avoid breakage. else if macro_legacy_warnings && self.token.can_begin_expr() && match self.token { // These can continue an expression, so we can't stop parsing and warn. token::OpenDelim(token::Paren) | token::OpenDelim(token::Bracket) | token::BinOp(token::Minus) | token::BinOp(token::Star) | token::BinOp(token::And) | token::BinOp(token::Or) | token::AndAnd | token::OrOr | token::DotDot | token::DotDotDot | token::DotDotEq => false, _ => true, } { self.warn_missing_semicolon(); StmtKind::Mac(P((mac, style, attrs.into()))) } else { let e = self.mk_mac_expr(lo.to(hi), mac.node, ThinVec::new()); let e = self.parse_dot_or_call_expr_with(e, lo, attrs.into())?; let e = self.parse_assoc_expr_with(0, LhsExpr::AlreadyParsed(e))?; StmtKind::Expr(e) }; Stmt { id: ast::DUMMY_NODE_ID, span: lo.to(hi), node, } } else { // if it has a special ident, it's definitely an item // // Require a semicolon or braces. if style != MacStmtStyle::Braces { if !self.eat(&token::Semi) { self.span_err(self.prev_span, "macros that expand to items must \ either be surrounded with braces or \ followed by a semicolon"); } } let span = lo.to(hi); Stmt { id: ast::DUMMY_NODE_ID, span, node: StmtKind::Item({ self.mk_item( span, id /*id is good here*/, ItemKind::Mac(respan(span, Mac_ { path: pth, tts, delim })), respan(lo, VisibilityKind::Inherited), attrs) }), } } } else { // FIXME: Bad copy of attrs let old_directory_ownership = mem::replace(&mut self.directory.ownership, DirectoryOwnership::UnownedViaBlock); let item = self.parse_item_(attrs.clone(), false, true)?; self.directory.ownership = old_directory_ownership; match item { Some(i) => Stmt { id: ast::DUMMY_NODE_ID, span: lo.to(i.span), node: StmtKind::Item(i), }, None => { let unused_attrs = |attrs: &[Attribute], s: &mut Self| { if !attrs.is_empty() { if s.prev_token_kind == PrevTokenKind::DocComment { s.span_fatal_err(s.prev_span, Error::UselessDocComment).emit(); } else if attrs.iter().any(|a| a.style == AttrStyle::Outer) { s.span_err(s.span, "expected statement after outer attribute"); } } }; // Do not attempt to parse an expression if we're done here. if self.token == token::Semi { unused_attrs(&attrs, self); self.bump(); return Ok(None); } if self.token == token::CloseDelim(token::Brace) { unused_attrs(&attrs, self); return Ok(None); } // Remainder are line-expr stmts. let e = self.parse_expr_res( Restrictions::STMT_EXPR, Some(attrs.into()))?; Stmt { id: ast::DUMMY_NODE_ID, span: lo.to(e.span), node: StmtKind::Expr(e), } } } })) } /// Is this expression a successfully-parsed statement? fn expr_is_complete(&mut self, e: &Expr) -> bool { self.restrictions.contains(Restrictions::STMT_EXPR) && !classify::expr_requires_semi_to_be_stmt(e) } /// Parse a block. No inner attrs are allowed. pub fn parse_block(&mut self) -> PResult<'a, P<Block>> { maybe_whole!(self, NtBlock, |x| x); let lo = self.span; if !self.eat(&token::OpenDelim(token::Brace)) { let sp = self.span; let tok = self.this_token_to_string(); let mut do_not_suggest_help = false; let mut e = self.span_fatal(sp, &format!("expected `{{`, found `{}`", tok)); if self.token.is_keyword(keywords::In) || self.token == token::Colon { do_not_suggest_help = true; e.span_label(sp, "expected `{`"); } // Check to see if the user has written something like // // if (cond) // bar; // // Which is valid in other languages, but not Rust. match self.parse_stmt_without_recovery(false) { Ok(Some(stmt)) => { if self.look_ahead(1, |t| t == &token::OpenDelim(token::Brace)) || do_not_suggest_help { // if the next token is an open brace (e.g., `if a b {`), the place- // inside-a-block suggestion would be more likely wrong than right return Err(e); } let mut stmt_span = stmt.span; // expand the span to include the semicolon, if it exists if self.eat(&token::Semi) { stmt_span = stmt_span.with_hi(self.prev_span.hi()); } let sugg = pprust::to_string(|s| { use print::pprust::{PrintState, INDENT_UNIT}; s.ibox(INDENT_UNIT)?; s.bopen()?; s.print_stmt(&stmt)?; s.bclose_maybe_open(stmt.span, INDENT_UNIT, false) }); e.span_suggestion_with_applicability( stmt_span, "try placing this code inside a block", sugg, // speculative, has been misleading in the past (closed Issue #46836) Applicability::MaybeIncorrect ); } Err(mut e) => { self.recover_stmt_(SemiColonMode::Break, BlockMode::Ignore); self.cancel(&mut e); } _ => () } return Err(e); } self.parse_block_tail(lo, BlockCheckMode::Default) } /// Parse a block. Inner attrs are allowed. fn parse_inner_attrs_and_block(&mut self) -> PResult<'a, (Vec<Attribute>, P<Block>)> { maybe_whole!(self, NtBlock, |x| (Vec::new(), x)); let lo = self.span; self.expect(&token::OpenDelim(token::Brace))?; Ok((self.parse_inner_attributes()?, self.parse_block_tail(lo, BlockCheckMode::Default)?)) } /// Parse the rest of a block expression or function body /// Precondition: already parsed the '{'. fn parse_block_tail(&mut self, lo: Span, s: BlockCheckMode) -> PResult<'a, P<Block>> { let mut stmts = vec![]; let mut recovered = false; while !self.eat(&token::CloseDelim(token::Brace)) { let stmt = match self.parse_full_stmt(false) { Err(mut err) => { err.emit(); self.recover_stmt_(SemiColonMode::Ignore, BlockMode::Ignore); self.eat(&token::CloseDelim(token::Brace)); recovered = true; break; } Ok(stmt) => stmt, }; if let Some(stmt) = stmt { stmts.push(stmt); } else if self.token == token::Eof { break; } else { // Found only `;` or `}`. continue; }; } Ok(P(ast::Block { stmts, id: ast::DUMMY_NODE_ID, rules: s, span: lo.to(self.prev_span), recovered, })) } /// Parse a statement, including the trailing semicolon. crate fn parse_full_stmt(&mut self, macro_legacy_warnings: bool) -> PResult<'a, Option<Stmt>> { // skip looking for a trailing semicolon when we have an interpolated statement maybe_whole!(self, NtStmt, |x| Some(x)); let mut stmt = match self.parse_stmt_without_recovery(macro_legacy_warnings)? { Some(stmt) => stmt, None => return Ok(None), }; match stmt.node { StmtKind::Expr(ref expr) if self.token != token::Eof => { // expression without semicolon if classify::expr_requires_semi_to_be_stmt(expr) { // Just check for errors and recover; do not eat semicolon yet. if let Err(mut e) = self.expect_one_of(&[], &[token::Semi, token::CloseDelim(token::Brace)]) { e.emit(); self.recover_stmt(); } } } StmtKind::Local(..) => { // We used to incorrectly allow a macro-expanded let statement to lack a semicolon. if macro_legacy_warnings && self.token != token::Semi { self.warn_missing_semicolon(); } else { self.expect_one_of(&[], &[token::Semi])?; } } _ => {} } if self.eat(&token::Semi) { stmt = stmt.add_trailing_semicolon(); } stmt.span = stmt.span.with_hi(self.prev_span.hi()); Ok(Some(stmt)) } fn warn_missing_semicolon(&self) { self.diagnostic().struct_span_warn(self.span, { &format!("expected `;`, found `{}`", self.this_token_to_string()) }).note({ "This was erroneously allowed and will become a hard error in a future release" }).emit(); } fn err_dotdotdot_syntax(&self, span: Span) { self.diagnostic().struct_span_err(span, { "unexpected token: `...`" }).span_suggestion_with_applicability( span, "use `..` for an exclusive range", "..".to_owned(), Applicability::MaybeIncorrect ).span_suggestion_with_applicability( span, "or `..=` for an inclusive range", "..=".to_owned(), Applicability::MaybeIncorrect ).emit(); } // Parse bounds of a type parameter `BOUND + BOUND + BOUND`, possibly with trailing `+`. // BOUND = TY_BOUND | LT_BOUND // LT_BOUND = LIFETIME (e.g. `'a`) // TY_BOUND = TY_BOUND_NOPAREN | (TY_BOUND_NOPAREN) // TY_BOUND_NOPAREN = [?] [for<LT_PARAM_DEFS>] SIMPLE_PATH (e.g. `?for<'a: 'b> m::Trait<'a>`) fn parse_generic_bounds_common(&mut self, allow_plus: bool) -> PResult<'a, GenericBounds> { let mut bounds = Vec::new(); loop { // This needs to be synchronized with `Token::can_begin_bound`. let is_bound_start = self.check_path() || self.check_lifetime() || self.check(&token::Question) || self.check_keyword(keywords::For) || self.check(&token::OpenDelim(token::Paren)); if is_bound_start { let lo = self.span; let has_parens = self.eat(&token::OpenDelim(token::Paren)); let question = if self.eat(&token::Question) { Some(self.prev_span) } else { None }; if self.token.is_lifetime() { if let Some(question_span) = question { self.span_err(question_span, "`?` may only modify trait bounds, not lifetime bounds"); } bounds.push(GenericBound::Outlives(self.expect_lifetime())); if has_parens { self.expect(&token::CloseDelim(token::Paren))?; self.span_err(self.prev_span, "parenthesized lifetime bounds are not supported"); } } else { let lifetime_defs = self.parse_late_bound_lifetime_defs()?; let path = self.parse_path(PathStyle::Type)?; if has_parens { self.expect(&token::CloseDelim(token::Paren))?; } let poly_trait = PolyTraitRef::new(lifetime_defs, path, lo.to(self.prev_span)); let modifier = if question.is_some() { TraitBoundModifier::Maybe } else { TraitBoundModifier::None }; bounds.push(GenericBound::Trait(poly_trait, modifier)); } } else { break } if !allow_plus || !self.eat_plus() { break } } return Ok(bounds); } fn parse_generic_bounds(&mut self) -> PResult<'a, GenericBounds> { self.parse_generic_bounds_common(true) } // Parse bounds of a lifetime parameter `BOUND + BOUND + BOUND`, possibly with trailing `+`. // BOUND = LT_BOUND (e.g. `'a`) fn parse_lt_param_bounds(&mut self) -> GenericBounds { let mut lifetimes = Vec::new(); while self.check_lifetime() { lifetimes.push(ast::GenericBound::Outlives(self.expect_lifetime())); if !self.eat_plus() { break } } lifetimes } /// Matches typaram = IDENT (`?` unbound)? optbounds ( EQ ty )? fn parse_ty_param(&mut self, preceding_attrs: Vec<Attribute>) -> PResult<'a, GenericParam> { let ident = self.parse_ident()?; // Parse optional colon and param bounds. let bounds = if self.eat(&token::Colon) { self.parse_generic_bounds()? } else { Vec::new() }; let default = if self.eat(&token::Eq) { Some(self.parse_ty()?) } else { None }; Ok(GenericParam { ident, id: ast::DUMMY_NODE_ID, attrs: preceding_attrs.into(), bounds, kind: GenericParamKind::Type { default, } }) } /// Parses the following grammar: /// TraitItemAssocTy = Ident ["<"...">"] [":" [GenericBounds]] ["where" ...] ["=" Ty] fn parse_trait_item_assoc_ty(&mut self) -> PResult<'a, (Ident, TraitItemKind, ast::Generics)> { let ident = self.parse_ident()?; let mut generics = self.parse_generics()?; // Parse optional colon and param bounds. let bounds = if self.eat(&token::Colon) { self.parse_generic_bounds()? } else { Vec::new() }; generics.where_clause = self.parse_where_clause()?; let default = if self.eat(&token::Eq) { Some(self.parse_ty()?) } else { None }; self.expect(&token::Semi)?; Ok((ident, TraitItemKind::Type(bounds, default), generics)) } /// Parses (possibly empty) list of lifetime and type parameters, possibly including /// trailing comma and erroneous trailing attributes. crate fn parse_generic_params(&mut self) -> PResult<'a, Vec<ast::GenericParam>> { let mut params = Vec::new(); let mut seen_ty_param = false; loop { let attrs = self.parse_outer_attributes()?; if self.check_lifetime() { let lifetime = self.expect_lifetime(); // Parse lifetime parameter. let bounds = if self.eat(&token::Colon) { self.parse_lt_param_bounds() } else { Vec::new() }; params.push(ast::GenericParam { ident: lifetime.ident, id: lifetime.id, attrs: attrs.into(), bounds, kind: ast::GenericParamKind::Lifetime, }); if seen_ty_param { self.span_err(self.prev_span, "lifetime parameters must be declared prior to type parameters"); } } else if self.check_ident() { // Parse type parameter. params.push(self.parse_ty_param(attrs)?); seen_ty_param = true; } else { // Check for trailing attributes and stop parsing. if !attrs.is_empty() { let param_kind = if seen_ty_param { "type" } else { "lifetime" }; self.span_err(attrs[0].span, &format!("trailing attribute after {} parameters", param_kind)); } break } if !self.eat(&token::Comma) { break } } Ok(params) } /// Parse a set of optional generic type parameter declarations. Where /// clauses are not parsed here, and must be added later via /// `parse_where_clause()`. /// /// matches generics = ( ) | ( < > ) | ( < typaramseq ( , )? > ) | ( < lifetimes ( , )? > ) /// | ( < lifetimes , typaramseq ( , )? > ) /// where typaramseq = ( typaram ) | ( typaram , typaramseq ) fn parse_generics(&mut self) -> PResult<'a, ast::Generics> { maybe_whole!(self, NtGenerics, |x| x); let span_lo = self.span; if self.eat_lt() { let params = self.parse_generic_params()?; self.expect_gt()?; Ok(ast::Generics { params, where_clause: WhereClause { id: ast::DUMMY_NODE_ID, predicates: Vec::new(), span: syntax_pos::DUMMY_SP, }, span: span_lo.to(self.prev_span), }) } else { Ok(ast::Generics::default()) } } /// Parses (possibly empty) list of lifetime and type arguments and associated type bindings, /// possibly including trailing comma. fn parse_generic_args(&mut self) -> PResult<'a, (Vec<GenericArg>, Vec<TypeBinding>)> { let mut args = Vec::new(); let mut bindings = Vec::new(); let mut seen_type = false; let mut seen_binding = false; loop { if self.check_lifetime() && self.look_ahead(1, |t| !t.is_like_plus()) { // Parse lifetime argument. args.push(GenericArg::Lifetime(self.expect_lifetime())); if seen_type || seen_binding { self.span_err(self.prev_span, "lifetime parameters must be declared prior to type parameters"); } } else if self.check_ident() && self.look_ahead(1, |t| t == &token::Eq) { // Parse associated type binding. let lo = self.span; let ident = self.parse_ident()?; self.bump(); let ty = self.parse_ty()?; bindings.push(TypeBinding { id: ast::DUMMY_NODE_ID, ident, ty, span: lo.to(self.prev_span), }); seen_binding = true; } else if self.check_type() { // Parse type argument. let ty_param = self.parse_ty()?; if seen_binding { self.span_err(ty_param.span, "type parameters must be declared prior to associated type bindings"); } args.push(GenericArg::Type(ty_param)); seen_type = true; } else { break } if !self.eat(&token::Comma) { break } } Ok((args, bindings)) } /// Parses an optional `where` clause and places it in `generics`. /// /// ```ignore (only-for-syntax-highlight) /// where T : Trait<U, V> + 'b, 'a : 'b /// ``` fn parse_where_clause(&mut self) -> PResult<'a, WhereClause> { maybe_whole!(self, NtWhereClause, |x| x); let mut where_clause = WhereClause { id: ast::DUMMY_NODE_ID, predicates: Vec::new(), span: syntax_pos::DUMMY_SP, }; if !self.eat_keyword(keywords::Where) { return Ok(where_clause); } let lo = self.prev_span; // We are considering adding generics to the `where` keyword as an alternative higher-rank // parameter syntax (as in `where<'a>` or `where<T>`. To avoid that being a breaking // change we parse those generics now, but report an error. if self.choose_generics_over_qpath() { let generics = self.parse_generics()?; self.span_err(generics.span, "generic parameters on `where` clauses are reserved for future use"); } loop { let lo = self.span; if self.check_lifetime() && self.look_ahead(1, |t| !t.is_like_plus()) { let lifetime = self.expect_lifetime(); // Bounds starting with a colon are mandatory, but possibly empty. self.expect(&token::Colon)?; let bounds = self.parse_lt_param_bounds(); where_clause.predicates.push(ast::WherePredicate::RegionPredicate( ast::WhereRegionPredicate { span: lo.to(self.prev_span), lifetime, bounds, } )); } else if self.check_type() { // Parse optional `for<'a, 'b>`. // This `for` is parsed greedily and applies to the whole predicate, // the bounded type can have its own `for` applying only to it. // Example 1: for<'a> Trait1<'a>: Trait2<'a /*ok*/> // Example 2: (for<'a> Trait1<'a>): Trait2<'a /*not ok*/> // Example 3: for<'a> for<'b> Trait1<'a, 'b>: Trait2<'a /*ok*/, 'b /*not ok*/> let lifetime_defs = self.parse_late_bound_lifetime_defs()?; // Parse type with mandatory colon and (possibly empty) bounds, // or with mandatory equality sign and the second type. let ty = self.parse_ty()?; if self.eat(&token::Colon) { let bounds = self.parse_generic_bounds()?; where_clause.predicates.push(ast::WherePredicate::BoundPredicate( ast::WhereBoundPredicate { span: lo.to(self.prev_span), bound_generic_params: lifetime_defs, bounded_ty: ty, bounds, } )); // FIXME: Decide what should be used here, `=` or `==`. // FIXME: We are just dropping the binders in lifetime_defs on the floor here. } else if self.eat(&token::Eq) || self.eat(&token::EqEq) { let rhs_ty = self.parse_ty()?; where_clause.predicates.push(ast::WherePredicate::EqPredicate( ast::WhereEqPredicate { span: lo.to(self.prev_span), lhs_ty: ty, rhs_ty, id: ast::DUMMY_NODE_ID, } )); } else { return self.unexpected(); } } else { break } if !self.eat(&token::Comma) { break } } where_clause.span = lo.to(self.prev_span); Ok(where_clause) } fn parse_fn_args(&mut self, named_args: bool, allow_variadic: bool) -> PResult<'a, (Vec<Arg> , bool)> { let sp = self.span; let mut variadic = false; let args: Vec<Option<Arg>> = self.parse_unspanned_seq( &token::OpenDelim(token::Paren), &token::CloseDelim(token::Paren), SeqSep::trailing_allowed(token::Comma), |p| { if p.token == token::DotDotDot { p.bump(); variadic = true; if allow_variadic { if p.token != token::CloseDelim(token::Paren) { let span = p.span; p.span_err(span, "`...` must be last in argument list for variadic function"); } Ok(None) } else { let span = p.prev_span; if p.token == token::CloseDelim(token::Paren) { // continue parsing to present any further errors p.struct_span_err( span, "only foreign functions are allowed to be variadic" ).emit(); Ok(Some(dummy_arg(span))) } else { // this function definition looks beyond recovery, stop parsing p.span_err(span, "only foreign functions are allowed to be variadic"); Ok(None) } } } else { match p.parse_arg_general(named_args) { Ok(arg) => Ok(Some(arg)), Err(mut e) => { e.emit(); let lo = p.prev_span; // Skip every token until next possible arg or end. p.eat_to_tokens(&[&token::Comma, &token::CloseDelim(token::Paren)]); // Create a placeholder argument for proper arg count (#34264). let span = lo.to(p.prev_span); Ok(Some(dummy_arg(span))) } } } } )?; let args: Vec<_> = args.into_iter().filter_map(|x| x).collect(); if variadic && args.is_empty() { self.span_err(sp, "variadic function must be declared with at least one named argument"); } Ok((args, variadic)) } /// Parse the argument list and result type of a function declaration fn parse_fn_decl(&mut self, allow_variadic: bool) -> PResult<'a, P<FnDecl>> { let (args, variadic) = self.parse_fn_args(true, allow_variadic)?; let ret_ty = self.parse_ret_ty(true)?; Ok(P(FnDecl { inputs: args, output: ret_ty, variadic, })) } /// Returns the parsed optional self argument and whether a self shortcut was used. fn parse_self_arg(&mut self) -> PResult<'a, Option<Arg>> { let expect_ident = |this: &mut Self| match this.token { // Preserve hygienic context. token::Ident(ident, _) => { let span = this.span; this.bump(); Ident::new(ident.name, span) } _ => unreachable!() }; let isolated_self = |this: &mut Self, n| { this.look_ahead(n, |t| t.is_keyword(keywords::SelfValue)) && this.look_ahead(n + 1, |t| t != &token::ModSep) }; // Parse optional self parameter of a method. // Only a limited set of initial token sequences is considered self parameters, anything // else is parsed as a normal function parameter list, so some lookahead is required. let eself_lo = self.span; let (eself, eself_ident, eself_hi) = match self.token { token::BinOp(token::And) => { // &self // &mut self // &'lt self // &'lt mut self // &not_self (if isolated_self(self, 1) { self.bump(); SelfKind::Region(None, Mutability::Immutable) } else if self.look_ahead(1, |t| t.is_keyword(keywords::Mut)) && isolated_self(self, 2) { self.bump(); self.bump(); SelfKind::Region(None, Mutability::Mutable) } else if self.look_ahead(1, |t| t.is_lifetime()) && isolated_self(self, 2) { self.bump(); let lt = self.expect_lifetime(); SelfKind::Region(Some(lt), Mutability::Immutable) } else if self.look_ahead(1, |t| t.is_lifetime()) && self.look_ahead(2, |t| t.is_keyword(keywords::Mut)) && isolated_self(self, 3) { self.bump(); let lt = self.expect_lifetime(); self.bump(); SelfKind::Region(Some(lt), Mutability::Mutable) } else { return Ok(None); }, expect_ident(self), self.prev_span) } token::BinOp(token::Star) => { // *self // *const self // *mut self // *not_self // Emit special error for `self` cases. (if isolated_self(self, 1) { self.bump(); self.span_err(self.span, "cannot pass `self` by raw pointer"); SelfKind::Value(Mutability::Immutable) } else if self.look_ahead(1, |t| t.is_mutability()) && isolated_self(self, 2) { self.bump(); self.bump(); self.span_err(self.span, "cannot pass `self` by raw pointer"); SelfKind::Value(Mutability::Immutable) } else { return Ok(None); }, expect_ident(self), self.prev_span) } token::Ident(..) => { if isolated_self(self, 0) { // self // self: TYPE let eself_ident = expect_ident(self); let eself_hi = self.prev_span; (if self.eat(&token::Colon) { let ty = self.parse_ty()?; SelfKind::Explicit(ty, Mutability::Immutable) } else { SelfKind::Value(Mutability::Immutable) }, eself_ident, eself_hi) } else if self.token.is_keyword(keywords::Mut) && isolated_self(self, 1) { // mut self // mut self: TYPE self.bump(); let eself_ident = expect_ident(self); let eself_hi = self.prev_span; (if self.eat(&token::Colon) { let ty = self.parse_ty()?; SelfKind::Explicit(ty, Mutability::Mutable) } else { SelfKind::Value(Mutability::Mutable) }, eself_ident, eself_hi) } else { return Ok(None); } } _ => return Ok(None), }; let eself = source_map::respan(eself_lo.to(eself_hi), eself); Ok(Some(Arg::from_self(eself, eself_ident))) } /// Parse the parameter list and result type of a function that may have a `self` parameter. fn parse_fn_decl_with_self<F>(&mut self, parse_arg_fn: F) -> PResult<'a, P<FnDecl>> where F: FnMut(&mut Parser<'a>) -> PResult<'a, Arg>, { self.expect(&token::OpenDelim(token::Paren))?; // Parse optional self argument let self_arg = self.parse_self_arg()?; // Parse the rest of the function parameter list. let sep = SeqSep::trailing_allowed(token::Comma); let fn_inputs = if let Some(self_arg) = self_arg { if self.check(&token::CloseDelim(token::Paren)) { vec![self_arg] } else if self.eat(&token::Comma) { let mut fn_inputs = vec![self_arg]; fn_inputs.append(&mut self.parse_seq_to_before_end( &token::CloseDelim(token::Paren), sep, parse_arg_fn)? ); fn_inputs } else { return self.unexpected(); } } else { self.parse_seq_to_before_end(&token::CloseDelim(token::Paren), sep, parse_arg_fn)? }; // Parse closing paren and return type. self.expect(&token::CloseDelim(token::Paren))?; Ok(P(FnDecl { inputs: fn_inputs, output: self.parse_ret_ty(true)?, variadic: false })) } // parse the |arg, arg| header on a lambda fn parse_fn_block_decl(&mut self) -> PResult<'a, P<FnDecl>> { let inputs_captures = { if self.eat(&token::OrOr) { Vec::new() } else { self.expect(&token::BinOp(token::Or))?; let args = self.parse_seq_to_before_tokens( &[&token::BinOp(token::Or), &token::OrOr], SeqSep::trailing_allowed(token::Comma), TokenExpectType::NoExpect, |p| p.parse_fn_block_arg() )?; self.expect_or()?; args } }; let output = self.parse_ret_ty(true)?; Ok(P(FnDecl { inputs: inputs_captures, output, variadic: false })) } /// Parse the name and optional generic types of a function header. fn parse_fn_header(&mut self) -> PResult<'a, (Ident, ast::Generics)> { let id = self.parse_ident()?; let generics = self.parse_generics()?; Ok((id, generics)) } fn mk_item(&mut self, span: Span, ident: Ident, node: ItemKind, vis: Visibility, attrs: Vec<Attribute>) -> P<Item> { P(Item { ident, attrs, id: ast::DUMMY_NODE_ID, node, vis, span, tokens: None, }) } /// Parse an item-position function declaration. fn parse_item_fn(&mut self, unsafety: Unsafety, asyncness: IsAsync, constness: Spanned<Constness>, abi: Abi) -> PResult<'a, ItemInfo> { let (ident, mut generics) = self.parse_fn_header()?; let decl = self.parse_fn_decl(false)?; generics.where_clause = self.parse_where_clause()?; let (inner_attrs, body) = self.parse_inner_attrs_and_block()?; let header = FnHeader { unsafety, asyncness, constness, abi }; Ok((ident, ItemKind::Fn(decl, header, generics, body), Some(inner_attrs))) } /// true if we are looking at `const ID`, false for things like `const fn` etc fn is_const_item(&mut self) -> bool { self.token.is_keyword(keywords::Const) && !self.look_ahead(1, |t| t.is_keyword(keywords::Fn)) && !self.look_ahead(1, |t| t.is_keyword(keywords::Unsafe)) } /// parses all the "front matter" for a `fn` declaration, up to /// and including the `fn` keyword: /// /// - `const fn` /// - `unsafe fn` /// - `const unsafe fn` /// - `extern fn` /// - etc fn parse_fn_front_matter(&mut self) -> PResult<'a, ( Spanned<Constness>, Unsafety, IsAsync, Abi )> { let is_const_fn = self.eat_keyword(keywords::Const); let const_span = self.prev_span; let unsafety = self.parse_unsafety(); let asyncness = self.parse_asyncness(); let (constness, unsafety, abi) = if is_const_fn { (respan(const_span, Constness::Const), unsafety, Abi::Rust) } else { let abi = if self.eat_keyword(keywords::Extern) { self.parse_opt_abi()?.unwrap_or(Abi::C) } else { Abi::Rust }; (respan(self.prev_span, Constness::NotConst), unsafety, abi) }; self.expect_keyword(keywords::Fn)?; Ok((constness, unsafety, asyncness, abi)) } /// Parse an impl item. pub fn parse_impl_item(&mut self, at_end: &mut bool) -> PResult<'a, ImplItem> { maybe_whole!(self, NtImplItem, |x| x); let attrs = self.parse_outer_attributes()?; let (mut item, tokens) = self.collect_tokens(|this| { this.parse_impl_item_(at_end, attrs) })?; // See `parse_item` for why this clause is here. if !item.attrs.iter().any(|attr| attr.style == AttrStyle::Inner) { item.tokens = Some(tokens); } Ok(item) } fn parse_impl_item_(&mut self, at_end: &mut bool, mut attrs: Vec<Attribute>) -> PResult<'a, ImplItem> { let lo = self.span; let vis = self.parse_visibility(false)?; let defaultness = self.parse_defaultness(); let (name, node, generics) = if let Some(type_) = self.eat_type() { let (name, alias, generics) = type_?; let kind = match alias { AliasKind::Weak(typ) => ast::ImplItemKind::Type(typ), AliasKind::Existential(bounds) => ast::ImplItemKind::Existential(bounds), }; (name, kind, generics) } else if self.is_const_item() { // This parses the grammar: // ImplItemConst = "const" Ident ":" Ty "=" Expr ";" self.expect_keyword(keywords::Const)?; let name = self.parse_ident()?; self.expect(&token::Colon)?; let typ = self.parse_ty()?; self.expect(&token::Eq)?; let expr = self.parse_expr()?; self.expect(&token::Semi)?; (name, ast::ImplItemKind::Const(typ, expr), ast::Generics::default()) } else { let (name, inner_attrs, generics, node) = self.parse_impl_method(&vis, at_end)?; attrs.extend(inner_attrs); (name, node, generics) }; Ok(ImplItem { id: ast::DUMMY_NODE_ID, span: lo.to(self.prev_span), ident: name, vis, defaultness, attrs, generics, node, tokens: None, }) } fn complain_if_pub_macro(&mut self, vis: &VisibilityKind, sp: Span) { if let Err(mut err) = self.complain_if_pub_macro_diag(vis, sp) { err.emit(); } } fn complain_if_pub_macro_diag(&mut self, vis: &VisibilityKind, sp: Span) -> PResult<'a, ()> { match *vis { VisibilityKind::Inherited => Ok(()), _ => { let is_macro_rules: bool = match self.token { token::Ident(sid, _) => sid.name == Symbol::intern("macro_rules"), _ => false, }; if is_macro_rules { let mut err = self.diagnostic() .struct_span_err(sp, "can't qualify macro_rules invocation with `pub`"); err.span_suggestion_with_applicability( sp, "try exporting the macro", "#[macro_export]".to_owned(), Applicability::MaybeIncorrect // speculative ); Err(err) } else { let mut err = self.diagnostic() .struct_span_err(sp, "can't qualify macro invocation with `pub`"); err.help("try adjusting the macro to put `pub` inside the invocation"); Err(err) } } } } fn missing_assoc_item_kind_err(&mut self, item_type: &str, prev_span: Span) -> DiagnosticBuilder<'a> { let expected_kinds = if item_type == "extern" { "missing `fn`, `type`, or `static`" } else { "missing `fn`, `type`, or `const`" }; // Given this code `path(`, it seems like this is not // setting the visibility of a macro invocation, but rather // a mistyped method declaration. // Create a diagnostic pointing out that `fn` is missing. // // x | pub path(&self) { // | ^ missing `fn`, `type`, or `const` // pub path( // ^^ `sp` below will point to this let sp = prev_span.between(self.prev_span); let mut err = self.diagnostic().struct_span_err( sp, &format!("{} for {}-item declaration", expected_kinds, item_type)); err.span_label(sp, expected_kinds); err } /// Parse a method or a macro invocation in a trait impl. fn parse_impl_method(&mut self, vis: &Visibility, at_end: &mut bool) -> PResult<'a, (Ident, Vec<Attribute>, ast::Generics, ast::ImplItemKind)> { // code copied from parse_macro_use_or_failure... abstraction! if let Some(mac) = self.parse_assoc_macro_invoc("impl", Some(vis), at_end)? { // Method macro. Ok((keywords::Invalid.ident(), vec![], ast::Generics::default(), ast::ImplItemKind::Macro(mac))) } else { let (constness, unsafety, asyncness, abi) = self.parse_fn_front_matter()?; let ident = self.parse_ident()?; let mut generics = self.parse_generics()?; let decl = self.parse_fn_decl_with_self(|p| p.parse_arg())?; generics.where_clause = self.parse_where_clause()?; *at_end = true; let (inner_attrs, body) = self.parse_inner_attrs_and_block()?; let header = ast::FnHeader { abi, unsafety, constness, asyncness }; Ok((ident, inner_attrs, generics, ast::ImplItemKind::Method( ast::MethodSig { header, decl }, body ))) } } /// Parse `trait Foo { ... }` or `trait Foo = Bar;` fn parse_item_trait(&mut self, is_auto: IsAuto, unsafety: Unsafety) -> PResult<'a, ItemInfo> { let ident = self.parse_ident()?; let mut tps = self.parse_generics()?; // Parse optional colon and supertrait bounds. let bounds = if self.eat(&token::Colon) { self.parse_generic_bounds()? } else { Vec::new() }; if self.eat(&token::Eq) { // it's a trait alias let bounds = self.parse_generic_bounds()?; tps.where_clause = self.parse_where_clause()?; self.expect(&token::Semi)?; if unsafety != Unsafety::Normal { self.span_err(self.prev_span, "trait aliases cannot be unsafe"); } Ok((ident, ItemKind::TraitAlias(tps, bounds), None)) } else { // it's a normal trait tps.where_clause = self.parse_where_clause()?; self.expect(&token::OpenDelim(token::Brace))?; let mut trait_items = vec![]; while !self.eat(&token::CloseDelim(token::Brace)) { let mut at_end = false; match self.parse_trait_item(&mut at_end) { Ok(item) => trait_items.push(item), Err(mut e) => { e.emit(); if !at_end { self.recover_stmt_(SemiColonMode::Break, BlockMode::Break); } } } } Ok((ident, ItemKind::Trait(is_auto, unsafety, tps, bounds, trait_items), None)) } } fn choose_generics_over_qpath(&self) -> bool { // There's an ambiguity between generic parameters and qualified paths in impls. // If we see `<` it may start both, so we have to inspect some following tokens. // The following combinations can only start generics, // but not qualified paths (with one exception): // `<` `>` - empty generic parameters // `<` `#` - generic parameters with attributes // `<` (LIFETIME|IDENT) `>` - single generic parameter // `<` (LIFETIME|IDENT) `,` - first generic parameter in a list // `<` (LIFETIME|IDENT) `:` - generic parameter with bounds // `<` (LIFETIME|IDENT) `=` - generic parameter with a default // The only truly ambiguous case is // `<` IDENT `>` `::` IDENT ... // we disambiguate it in favor of generics (`impl<T> ::absolute::Path<T> { ... }`) // because this is what almost always expected in practice, qualified paths in impls // (`impl <Type>::AssocTy { ... }`) aren't even allowed by type checker at the moment. self.token == token::Lt && (self.look_ahead(1, |t| t == &token::Pound || t == &token::Gt) || self.look_ahead(1, |t| t.is_lifetime() || t.is_ident()) && self.look_ahead(2, |t| t == &token::Gt || t == &token::Comma || t == &token::Colon || t == &token::Eq)) } fn parse_impl_body(&mut self) -> PResult<'a, (Vec<ImplItem>, Vec<Attribute>)> { self.expect(&token::OpenDelim(token::Brace))?; let attrs = self.parse_inner_attributes()?; let mut impl_items = Vec::new(); while !self.eat(&token::CloseDelim(token::Brace)) { let mut at_end = false; match self.parse_impl_item(&mut at_end) { Ok(impl_item) => impl_items.push(impl_item), Err(mut err) => { err.emit(); if !at_end { self.recover_stmt_(SemiColonMode::Break, BlockMode::Break); } } } } Ok((impl_items, attrs)) } /// Parses an implementation item, `impl` keyword is already parsed. /// impl<'a, T> TYPE { /* impl items */ } /// impl<'a, T> TRAIT for TYPE { /* impl items */ } /// impl<'a, T> !TRAIT for TYPE { /* impl items */ } /// We actually parse slightly more relaxed grammar for better error reporting and recovery. /// `impl` GENERICS `!`? TYPE `for`? (TYPE | `..`) (`where` PREDICATES)? `{` BODY `}` /// `impl` GENERICS `!`? TYPE (`where` PREDICATES)? `{` BODY `}` fn parse_item_impl(&mut self, unsafety: Unsafety, defaultness: Defaultness) -> PResult<'a, ItemInfo> { // First, parse generic parameters if necessary. let mut generics = if self.choose_generics_over_qpath() { self.parse_generics()? } else { ast::Generics::default() }; // Disambiguate `impl !Trait for Type { ... }` and `impl ! { ... }` for the never type. let polarity = if self.check(&token::Not) && self.look_ahead(1, |t| t.can_begin_type()) { self.bump(); // `!` ast::ImplPolarity::Negative } else { ast::ImplPolarity::Positive }; // Parse both types and traits as a type, then reinterpret if necessary. let ty_first = self.parse_ty()?; // If `for` is missing we try to recover. let has_for = self.eat_keyword(keywords::For); let missing_for_span = self.prev_span.between(self.span); let ty_second = if self.token == token::DotDot { // We need to report this error after `cfg` expansion for compatibility reasons self.bump(); // `..`, do not add it to expected tokens Some(P(Ty { node: TyKind::Err, span: self.prev_span, id: ast::DUMMY_NODE_ID })) } else if has_for || self.token.can_begin_type() { Some(self.parse_ty()?) } else { None }; generics.where_clause = self.parse_where_clause()?; let (impl_items, attrs) = self.parse_impl_body()?; let item_kind = match ty_second { Some(ty_second) => { // impl Trait for Type if !has_for { self.span_err(missing_for_span, "missing `for` in a trait impl"); } let ty_first = ty_first.into_inner(); let path = match ty_first.node { // This notably includes paths passed through `ty` macro fragments (#46438). TyKind::Path(None, path) => path, _ => { self.span_err(ty_first.span, "expected a trait, found type"); ast::Path::from_ident(Ident::new(keywords::Invalid.name(), ty_first.span)) } }; let trait_ref = TraitRef { path, ref_id: ty_first.id }; ItemKind::Impl(unsafety, polarity, defaultness, generics, Some(trait_ref), ty_second, impl_items) } None => { // impl Type ItemKind::Impl(unsafety, polarity, defaultness, generics, None, ty_first, impl_items) } }; Ok((keywords::Invalid.ident(), item_kind, Some(attrs))) } fn parse_late_bound_lifetime_defs(&mut self) -> PResult<'a, Vec<GenericParam>> { if self.eat_keyword(keywords::For) { self.expect_lt()?; let params = self.parse_generic_params()?; self.expect_gt()?; // We rely on AST validation to rule out invalid cases: There must not be type // parameters, and the lifetime parameters must not have bounds. Ok(params) } else { Ok(Vec::new()) } } /// Parse struct Foo { ... } fn parse_item_struct(&mut self) -> PResult<'a, ItemInfo> { let class_name = self.parse_ident()?; let mut generics = self.parse_generics()?; // There is a special case worth noting here, as reported in issue #17904. // If we are parsing a tuple struct it is the case that the where clause // should follow the field list. Like so: // // struct Foo<T>(T) where T: Copy; // // If we are parsing a normal record-style struct it is the case // that the where clause comes before the body, and after the generics. // So if we look ahead and see a brace or a where-clause we begin // parsing a record style struct. // // Otherwise if we look ahead and see a paren we parse a tuple-style // struct. let vdata = if self.token.is_keyword(keywords::Where) { generics.where_clause = self.parse_where_clause()?; if self.eat(&token::Semi) { // If we see a: `struct Foo<T> where T: Copy;` style decl. VariantData::Unit(ast::DUMMY_NODE_ID) } else { // If we see: `struct Foo<T> where T: Copy { ... }` VariantData::Struct(self.parse_record_struct_body()?, ast::DUMMY_NODE_ID) } // No `where` so: `struct Foo<T>;` } else if self.eat(&token::Semi) { VariantData::Unit(ast::DUMMY_NODE_ID) // Record-style struct definition } else if self.token == token::OpenDelim(token::Brace) { VariantData::Struct(self.parse_record_struct_body()?, ast::DUMMY_NODE_ID) // Tuple-style struct definition with optional where-clause. } else if self.token == token::OpenDelim(token::Paren) { let body = VariantData::Tuple(self.parse_tuple_struct_body()?, ast::DUMMY_NODE_ID); generics.where_clause = self.parse_where_clause()?; self.expect(&token::Semi)?; body } else { let token_str = self.this_token_to_string(); let mut err = self.fatal(&format!( "expected `where`, `{{`, `(`, or `;` after struct name, found `{}`", token_str )); err.span_label(self.span, "expected `where`, `{`, `(`, or `;` after struct name"); return Err(err); }; Ok((class_name, ItemKind::Struct(vdata, generics), None)) } /// Parse union Foo { ... } fn parse_item_union(&mut self) -> PResult<'a, ItemInfo> { let class_name = self.parse_ident()?; let mut generics = self.parse_generics()?; let vdata = if self.token.is_keyword(keywords::Where) { generics.where_clause = self.parse_where_clause()?; VariantData::Struct(self.parse_record_struct_body()?, ast::DUMMY_NODE_ID) } else if self.token == token::OpenDelim(token::Brace) { VariantData::Struct(self.parse_record_struct_body()?, ast::DUMMY_NODE_ID) } else { let token_str = self.this_token_to_string(); let mut err = self.fatal(&format!( "expected `where` or `{{` after union name, found `{}`", token_str)); err.span_label(self.span, "expected `where` or `{` after union name"); return Err(err); }; Ok((class_name, ItemKind::Union(vdata, generics), None)) } fn consume_block(&mut self, delim: token::DelimToken) { let mut brace_depth = 0; if !self.eat(&token::OpenDelim(delim)) { return; } loop { if self.eat(&token::OpenDelim(delim)) { brace_depth += 1; } else if self.eat(&token::CloseDelim(delim)) { if brace_depth == 0 { return; } else { brace_depth -= 1; continue; } } else if self.eat(&token::Eof) || self.eat(&token::CloseDelim(token::NoDelim)) { return; } else { self.bump(); } } } fn parse_record_struct_body(&mut self) -> PResult<'a, Vec<StructField>> { let mut fields = Vec::new(); if self.eat(&token::OpenDelim(token::Brace)) { while self.token != token::CloseDelim(token::Brace) { let field = self.parse_struct_decl_field().map_err(|e| { self.recover_stmt(); e }); match field { Ok(field) => fields.push(field), Err(mut err) => { err.emit(); } } } self.eat(&token::CloseDelim(token::Brace)); } else { let token_str = self.this_token_to_string(); let mut err = self.fatal(&format!( "expected `where`, or `{{` after struct name, found `{}`", token_str)); err.span_label(self.span, "expected `where`, or `{` after struct name"); return Err(err); } Ok(fields) } fn parse_tuple_struct_body(&mut self) -> PResult<'a, Vec<StructField>> { // This is the case where we find `struct Foo<T>(T) where T: Copy;` // Unit like structs are handled in parse_item_struct function let fields = self.parse_unspanned_seq( &token::OpenDelim(token::Paren), &token::CloseDelim(token::Paren), SeqSep::trailing_allowed(token::Comma), |p| { let attrs = p.parse_outer_attributes()?; let lo = p.span; let vis = p.parse_visibility(true)?; let ty = p.parse_ty()?; Ok(StructField { span: lo.to(ty.span), vis, ident: None, id: ast::DUMMY_NODE_ID, ty, attrs, }) })?; Ok(fields) } /// Parse a structure field declaration fn parse_single_struct_field(&mut self, lo: Span, vis: Visibility, attrs: Vec<Attribute> ) -> PResult<'a, StructField> { let mut seen_comma: bool = false; let a_var = self.parse_name_and_ty(lo, vis, attrs)?; if self.token == token::Comma { seen_comma = true; } match self.token { token::Comma => { self.bump(); } token::CloseDelim(token::Brace) => {} token::DocComment(_) => { let previous_span = self.prev_span; let mut err = self.span_fatal_err(self.span, Error::UselessDocComment); self.bump(); // consume the doc comment let comma_after_doc_seen = self.eat(&token::Comma); // `seen_comma` is always false, because we are inside doc block // condition is here to make code more readable if seen_comma == false && comma_after_doc_seen == true { seen_comma = true; } if comma_after_doc_seen || self.token == token::CloseDelim(token::Brace) { err.emit(); } else { if seen_comma == false { let sp = self.sess.source_map().next_point(previous_span); err.span_suggestion_with_applicability( sp, "missing comma here", ",".into(), Applicability::MachineApplicable ); } return Err(err); } } _ => { let sp = self.sess.source_map().next_point(self.prev_span); let mut err = self.struct_span_err(sp, &format!("expected `,`, or `}}`, found `{}`", self.this_token_to_string())); if self.token.is_ident() { // This is likely another field; emit the diagnostic and keep going err.span_suggestion(sp, "try adding a comma", ",".into()); err.emit(); } else { return Err(err) } } } Ok(a_var) } /// Parse an element of a struct definition fn parse_struct_decl_field(&mut self) -> PResult<'a, StructField> { let attrs = self.parse_outer_attributes()?; let lo = self.span; let vis = self.parse_visibility(false)?; self.parse_single_struct_field(lo, vis, attrs) } /// Parse `pub`, `pub(crate)` and `pub(in path)` plus shortcuts `pub(self)` for `pub(in self)` /// and `pub(super)` for `pub(in super)`. If the following element can't be a tuple (i.e. it's /// a function definition, it's not a tuple struct field) and the contents within the parens /// isn't valid, emit a proper diagnostic. pub fn parse_visibility(&mut self, can_take_tuple: bool) -> PResult<'a, Visibility> { maybe_whole!(self, NtVis, |x| x); self.expected_tokens.push(TokenType::Keyword(keywords::Crate)); if self.is_crate_vis() { self.bump(); // `crate` return Ok(respan(self.prev_span, VisibilityKind::Crate(CrateSugar::JustCrate))); } if !self.eat_keyword(keywords::Pub) { // We need a span for our `Spanned<VisibilityKind>`, but there's inherently no // keyword to grab a span from for inherited visibility; an empty span at the // beginning of the current token would seem to be the "Schelling span". return Ok(respan(self.span.shrink_to_lo(), VisibilityKind::Inherited)) } let lo = self.prev_span; if self.check(&token::OpenDelim(token::Paren)) { // We don't `self.bump()` the `(` yet because this might be a struct definition where // `()` or a tuple might be allowed. For example, `struct Struct(pub (), pub (usize));`. // Because of this, we only `bump` the `(` if we're assured it is appropriate to do so // by the following tokens. if self.look_ahead(1, |t| t.is_keyword(keywords::Crate)) { // `pub(crate)` self.bump(); // `(` self.bump(); // `crate` self.expect(&token::CloseDelim(token::Paren))?; // `)` let vis = respan( lo.to(self.prev_span), VisibilityKind::Crate(CrateSugar::PubCrate), ); return Ok(vis) } else if self.look_ahead(1, |t| t.is_keyword(keywords::In)) { // `pub(in path)` self.bump(); // `(` self.bump(); // `in` let path = self.parse_path(PathStyle::Mod)?; // `path` self.expect(&token::CloseDelim(token::Paren))?; // `)` let vis = respan(lo.to(self.prev_span), VisibilityKind::Restricted { path: P(path), id: ast::DUMMY_NODE_ID, }); return Ok(vis) } else if self.look_ahead(2, |t| t == &token::CloseDelim(token::Paren)) && self.look_ahead(1, |t| t.is_keyword(keywords::Super) || t.is_keyword(keywords::SelfValue)) { // `pub(self)` or `pub(super)` self.bump(); // `(` let path = self.parse_path(PathStyle::Mod)?; // `super`/`self` self.expect(&token::CloseDelim(token::Paren))?; // `)` let vis = respan(lo.to(self.prev_span), VisibilityKind::Restricted { path: P(path), id: ast::DUMMY_NODE_ID, }); return Ok(vis) } else if !can_take_tuple { // Provide this diagnostic if this is not a tuple struct // `pub(something) fn ...` or `struct X { pub(something) y: Z }` self.bump(); // `(` let msg = "incorrect visibility restriction"; let suggestion = r##"some possible visibility restrictions are: `pub(crate)`: visible only on the current crate `pub(super)`: visible only in the current module's parent `pub(in path::to::module)`: visible only on the specified path"##; let path = self.parse_path(PathStyle::Mod)?; let sp = self.prev_span; let help_msg = format!("make this visible only to module `{}` with `in`", path); self.expect(&token::CloseDelim(token::Paren))?; // `)` let mut err = struct_span_err!(self.sess.span_diagnostic, sp, E0704, "{}", msg); err.help(suggestion); err.span_suggestion_with_applicability( sp, &help_msg, format!("in {}", path), Applicability::MachineApplicable ); err.emit(); // emit diagnostic, but continue with public visibility } } Ok(respan(lo, VisibilityKind::Public)) } /// Parse defaultness: `default` or nothing. fn parse_defaultness(&mut self) -> Defaultness { // `pub` is included for better error messages if self.check_keyword(keywords::Default) && self.look_ahead(1, |t| t.is_keyword(keywords::Impl) || t.is_keyword(keywords::Const) || t.is_keyword(keywords::Fn) || t.is_keyword(keywords::Unsafe) || t.is_keyword(keywords::Extern) || t.is_keyword(keywords::Type) || t.is_keyword(keywords::Pub)) { self.bump(); // `default` Defaultness::Default } else { Defaultness::Final } } /// Given a termination token, parse all of the items in a module fn parse_mod_items(&mut self, term: &token::Token, inner_lo: Span) -> PResult<'a, Mod> { let mut items = vec![]; while let Some(item) = self.parse_item()? { items.push(item); } if !self.eat(term) { let token_str = self.this_token_to_string(); let mut err = self.fatal(&format!("expected item, found `{}`", token_str)); if token_str == ";" { let msg = "consider removing this semicolon"; err.span_suggestion_short_with_applicability( self.span, msg, "".to_string(), Applicability::MachineApplicable ); if !items.is_empty() { // Issue #51603 let previous_item = &items[items.len()-1]; let previous_item_kind_name = match previous_item.node { // say "braced struct" because tuple-structs and // braceless-empty-struct declarations do take a semicolon ItemKind::Struct(..) => Some("braced struct"), ItemKind::Enum(..) => Some("enum"), ItemKind::Trait(..) => Some("trait"), ItemKind::Union(..) => Some("union"), _ => None, }; if let Some(name) = previous_item_kind_name { err.help(&format!("{} declarations are not followed by a semicolon", name)); } } } else { err.span_label(self.span, "expected item"); } return Err(err); } let hi = if self.span.is_dummy() { inner_lo } else { self.prev_span }; Ok(ast::Mod { inner: inner_lo.to(hi), items, }) } fn parse_item_const(&mut self, m: Option<Mutability>) -> PResult<'a, ItemInfo> { let id = self.parse_ident()?; self.expect(&token::Colon)?; let ty = self.parse_ty()?; self.expect(&token::Eq)?; let e = self.parse_expr()?; self.expect(&token::Semi)?; let item = match m { Some(m) => ItemKind::Static(ty, m, e), None => ItemKind::Const(ty, e), }; Ok((id, item, None)) } /// Parse a `mod <foo> { ... }` or `mod <foo>;` item fn parse_item_mod(&mut self, outer_attrs: &[Attribute]) -> PResult<'a, ItemInfo> { let (in_cfg, outer_attrs) = { let mut strip_unconfigured = ::config::StripUnconfigured { sess: self.sess, should_test: false, // irrelevant features: None, // don't perform gated feature checking }; let outer_attrs = strip_unconfigured.process_cfg_attrs(outer_attrs.to_owned()); (!self.cfg_mods || strip_unconfigured.in_cfg(&outer_attrs), outer_attrs) }; let id_span = self.span; let id = self.parse_ident()?; if self.check(&token::Semi) { self.bump(); if in_cfg && self.recurse_into_file_modules { // This mod is in an external file. Let's go get it! let ModulePathSuccess { path, directory_ownership, warn } = self.submod_path(id, &outer_attrs, id_span)?; let (module, mut attrs) = self.eval_src_mod(path, directory_ownership, id.to_string(), id_span)?; if warn { let attr = Attribute { id: attr::mk_attr_id(), style: ast::AttrStyle::Outer, path: ast::Path::from_ident(Ident::from_str("warn_directory_ownership")), tokens: TokenStream::empty(), is_sugared_doc: false, span: syntax_pos::DUMMY_SP, }; attr::mark_known(&attr); attrs.push(attr); } Ok((id, module, Some(attrs))) } else { let placeholder = ast::Mod { inner: syntax_pos::DUMMY_SP, items: Vec::new() }; Ok((id, ItemKind::Mod(placeholder), None)) } } else { let old_directory = self.directory.clone(); self.push_directory(id, &outer_attrs); self.expect(&token::OpenDelim(token::Brace))?; let mod_inner_lo = self.span; let attrs = self.parse_inner_attributes()?; let module = self.parse_mod_items(&token::CloseDelim(token::Brace), mod_inner_lo)?; self.directory = old_directory; Ok((id, ItemKind::Mod(module), Some(attrs))) } } fn push_directory(&mut self, id: Ident, attrs: &[Attribute]) { if let Some(path) = attr::first_attr_value_str_by_name(attrs, "path") { self.directory.path.to_mut().push(&path.as_str()); self.directory.ownership = DirectoryOwnership::Owned { relative: None }; } else { self.directory.path.to_mut().push(&id.as_str()); } } pub fn submod_path_from_attr(attrs: &[Attribute], dir_path: &Path) -> Option<PathBuf> { if let Some(s) = attr::first_attr_value_str_by_name(attrs, "path") { let s = s.as_str(); // On windows, the base path might have the form // `\\?\foo\bar` in which case it does not tolerate // mixed `/` and `\` separators, so canonicalize // `/` to `\`. #[cfg(windows)] let s = s.replace("/", "\\"); Some(dir_path.join(s)) } else { None } } /// Returns either a path to a module, or . pub fn default_submod_path( id: ast::Ident, relative: Option<ast::Ident>, dir_path: &Path, source_map: &SourceMap) -> ModulePath { // If we're in a foo.rs file instead of a mod.rs file, // we need to look for submodules in // `./foo/<id>.rs` and `./foo/<id>/mod.rs` rather than // `./<id>.rs` and `./<id>/mod.rs`. let relative_prefix_string; let relative_prefix = if let Some(ident) = relative { relative_prefix_string = format!("{}{}", ident.as_str(), path::MAIN_SEPARATOR); &relative_prefix_string } else { "" }; let mod_name = id.to_string(); let default_path_str = format!("{}{}.rs", relative_prefix, mod_name); let secondary_path_str = format!("{}{}{}mod.rs", relative_prefix, mod_name, path::MAIN_SEPARATOR); let default_path = dir_path.join(&default_path_str); let secondary_path = dir_path.join(&secondary_path_str); let default_exists = source_map.file_exists(&default_path); let secondary_exists = source_map.file_exists(&secondary_path); let result = match (default_exists, secondary_exists) { (true, false) => Ok(ModulePathSuccess { path: default_path, directory_ownership: DirectoryOwnership::Owned { relative: Some(id), }, warn: false, }), (false, true) => Ok(ModulePathSuccess { path: secondary_path, directory_ownership: DirectoryOwnership::Owned { relative: None, }, warn: false, }), (false, false) => Err(Error::FileNotFoundForModule { mod_name: mod_name.clone(), default_path: default_path_str, secondary_path: secondary_path_str, dir_path: dir_path.display().to_string(), }), (true, true) => Err(Error::DuplicatePaths { mod_name: mod_name.clone(), default_path: default_path_str, secondary_path: secondary_path_str, }), }; ModulePath { name: mod_name, path_exists: default_exists || secondary_exists, result, } } fn submod_path(&mut self, id: ast::Ident, outer_attrs: &[Attribute], id_sp: Span) -> PResult<'a, ModulePathSuccess> { if let Some(path) = Parser::submod_path_from_attr(outer_attrs, &self.directory.path) { return Ok(ModulePathSuccess { directory_ownership: match path.file_name().and_then(|s| s.to_str()) { // All `#[path]` files are treated as though they are a `mod.rs` file. // This means that `mod foo;` declarations inside `#[path]`-included // files are siblings, // // Note that this will produce weirdness when a file named `foo.rs` is // `#[path]` included and contains a `mod foo;` declaration. // If you encounter this, it's your own darn fault :P Some(_) => DirectoryOwnership::Owned { relative: None }, _ => DirectoryOwnership::UnownedViaMod(true), }, path, warn: false, }); } let relative = match self.directory.ownership { DirectoryOwnership::Owned { relative } => { // Push the usage onto the list of non-mod.rs mod uses. // This is used later for feature-gate error reporting. if let Some(cur_file_ident) = relative { self.sess .non_modrs_mods.borrow_mut() .push((cur_file_ident, id_sp)); } relative }, DirectoryOwnership::UnownedViaBlock | DirectoryOwnership::UnownedViaMod(_) => None, }; let paths = Parser::default_submod_path( id, relative, &self.directory.path, self.sess.source_map()); match self.directory.ownership { DirectoryOwnership::Owned { .. } => { paths.result.map_err(|err| self.span_fatal_err(id_sp, err)) }, DirectoryOwnership::UnownedViaBlock => { let msg = "Cannot declare a non-inline module inside a block \ unless it has a path attribute"; let mut err = self.diagnostic().struct_span_err(id_sp, msg); if paths.path_exists { let msg = format!("Maybe `use` the module `{}` instead of redeclaring it", paths.name); err.span_note(id_sp, &msg); } Err(err) } DirectoryOwnership::UnownedViaMod(warn) => { if warn { if let Ok(result) = paths.result { return Ok(ModulePathSuccess { warn: true, ..result }); } } let mut err = self.diagnostic().struct_span_err(id_sp, "cannot declare a new module at this location"); if !id_sp.is_dummy() { let src_path = self.sess.source_map().span_to_filename(id_sp); if let FileName::Real(src_path) = src_path { if let Some(stem) = src_path.file_stem() { let mut dest_path = src_path.clone(); dest_path.set_file_name(stem); dest_path.push("mod.rs"); err.span_note(id_sp, &format!("maybe move this module `{}` to its own \ directory via `{}`", src_path.display(), dest_path.display())); } } } if paths.path_exists { err.span_note(id_sp, &format!("... or maybe `use` the module `{}` instead \ of possibly redeclaring it", paths.name)); } Err(err) } } } /// Read a module from a source file. fn eval_src_mod(&mut self, path: PathBuf, directory_ownership: DirectoryOwnership, name: String, id_sp: Span) -> PResult<'a, (ast::ItemKind, Vec<Attribute> )> { let mut included_mod_stack = self.sess.included_mod_stack.borrow_mut(); if let Some(i) = included_mod_stack.iter().position(|p| *p == path) { let mut err = String::from("circular modules: "); let len = included_mod_stack.len(); for p in &included_mod_stack[i.. len] { err.push_str(&p.to_string_lossy()); err.push_str(" -> "); } err.push_str(&path.to_string_lossy()); return Err(self.span_fatal(id_sp, &err[..])); } included_mod_stack.push(path.clone()); drop(included_mod_stack); let mut p0 = new_sub_parser_from_file(self.sess, &path, directory_ownership, Some(name), id_sp); p0.cfg_mods = self.cfg_mods; let mod_inner_lo = p0.span; let mod_attrs = p0.parse_inner_attributes()?; let m0 = p0.parse_mod_items(&token::Eof, mod_inner_lo)?; self.sess.included_mod_stack.borrow_mut().pop(); Ok((ast::ItemKind::Mod(m0), mod_attrs)) } /// Parse a function declaration from a foreign module fn parse_item_foreign_fn(&mut self, vis: ast::Visibility, lo: Span, attrs: Vec<Attribute>) -> PResult<'a, ForeignItem> { self.expect_keyword(keywords::Fn)?; let (ident, mut generics) = self.parse_fn_header()?; let decl = self.parse_fn_decl(true)?; generics.where_clause = self.parse_where_clause()?; let hi = self.span; self.expect(&token::Semi)?; Ok(ast::ForeignItem { ident, attrs, node: ForeignItemKind::Fn(decl, generics), id: ast::DUMMY_NODE_ID, span: lo.to(hi), vis, }) } /// Parse a static item from a foreign module. /// Assumes that the `static` keyword is already parsed. fn parse_item_foreign_static(&mut self, vis: ast::Visibility, lo: Span, attrs: Vec<Attribute>) -> PResult<'a, ForeignItem> { let mutbl = self.eat_keyword(keywords::Mut); let ident = self.parse_ident()?; self.expect(&token::Colon)?; let ty = self.parse_ty()?; let hi = self.span; self.expect(&token::Semi)?; Ok(ForeignItem { ident, attrs, node: ForeignItemKind::Static(ty, mutbl), id: ast::DUMMY_NODE_ID, span: lo.to(hi), vis, }) } /// Parse a type from a foreign module fn parse_item_foreign_type(&mut self, vis: ast::Visibility, lo: Span, attrs: Vec<Attribute>) -> PResult<'a, ForeignItem> { self.expect_keyword(keywords::Type)?; let ident = self.parse_ident()?; let hi = self.span; self.expect(&token::Semi)?; Ok(ast::ForeignItem { ident: ident, attrs: attrs, node: ForeignItemKind::Ty, id: ast::DUMMY_NODE_ID, span: lo.to(hi), vis: vis }) } fn parse_crate_name_with_dashes(&mut self) -> PResult<'a, ast::Ident> { let error_msg = "crate name using dashes are not valid in `extern crate` statements"; let suggestion_msg = "if the original crate name uses dashes you need to use underscores \ in the code"; let mut ident = self.parse_ident()?; let mut idents = vec![]; let mut replacement = vec![]; let mut fixed_crate_name = false; // Accept `extern crate name-like-this` for better diagnostics let dash = token::Token::BinOp(token::BinOpToken::Minus); if self.token == dash { // Do not include `-` as part of the expected tokens list while self.eat(&dash) { fixed_crate_name = true; replacement.push((self.prev_span, "_".to_string())); idents.push(self.parse_ident()?); } } if fixed_crate_name { let fixed_name_sp = ident.span.to(idents.last().unwrap().span); let mut fixed_name = format!("{}", ident.name); for part in idents { fixed_name.push_str(&format!("_{}", part.name)); } ident = Ident::from_str(&fixed_name).with_span_pos(fixed_name_sp); let mut err = self.struct_span_err(fixed_name_sp, error_msg); err.span_label(fixed_name_sp, "dash-separated idents are not valid"); err.multipart_suggestion(suggestion_msg, replacement); err.emit(); } Ok(ident) } /// Parse extern crate links /// /// # Examples /// /// extern crate foo; /// extern crate bar as foo; fn parse_item_extern_crate(&mut self, lo: Span, visibility: Visibility, attrs: Vec<Attribute>) -> PResult<'a, P<Item>> { // Accept `extern crate name-like-this` for better diagnostics let orig_name = self.parse_crate_name_with_dashes()?; let (item_name, orig_name) = if let Some(rename) = self.parse_rename()? { (rename, Some(orig_name.name)) } else { (orig_name, None) }; self.expect(&token::Semi)?; let span = lo.to(self.prev_span); Ok(self.mk_item(span, item_name, ItemKind::ExternCrate(orig_name), visibility, attrs)) } /// Parse `extern` for foreign ABIs /// modules. /// /// `extern` is expected to have been /// consumed before calling this method /// /// # Examples: /// /// extern "C" {} /// extern {} fn parse_item_foreign_mod(&mut self, lo: Span, opt_abi: Option<Abi>, visibility: Visibility, mut attrs: Vec<Attribute>) -> PResult<'a, P<Item>> { self.expect(&token::OpenDelim(token::Brace))?; let abi = opt_abi.unwrap_or(Abi::C); attrs.extend(self.parse_inner_attributes()?); let mut foreign_items = vec![]; while let Some(item) = self.parse_foreign_item()? { foreign_items.push(item); } self.expect(&token::CloseDelim(token::Brace))?; let prev_span = self.prev_span; let m = ast::ForeignMod { abi, items: foreign_items }; let invalid = keywords::Invalid.ident(); Ok(self.mk_item(lo.to(prev_span), invalid, ItemKind::ForeignMod(m), visibility, attrs)) } /// Parse type Foo = Bar; /// or /// existential type Foo: Bar; /// or /// return None without modifying the parser state fn eat_type(&mut self) -> Option<PResult<'a, (Ident, AliasKind, ast::Generics)>> { // This parses the grammar: // Ident ["<"...">"] ["where" ...] ("=" | ":") Ty ";" if self.check_keyword(keywords::Type) || self.check_keyword(keywords::Existential) && self.look_ahead(1, |t| t.is_keyword(keywords::Type)) { let existential = self.eat_keyword(keywords::Existential); assert!(self.eat_keyword(keywords::Type)); Some(self.parse_existential_or_alias(existential)) } else { None } } /// Parse type alias or existential type fn parse_existential_or_alias( &mut self, existential: bool, ) -> PResult<'a, (Ident, AliasKind, ast::Generics)> { let ident = self.parse_ident()?; let mut tps = self.parse_generics()?; tps.where_clause = self.parse_where_clause()?; let alias = if existential { self.expect(&token::Colon)?; let bounds = self.parse_generic_bounds()?; AliasKind::Existential(bounds) } else { self.expect(&token::Eq)?; let ty = self.parse_ty()?; AliasKind::Weak(ty) }; self.expect(&token::Semi)?; Ok((ident, alias, tps)) } /// Parse the part of an "enum" decl following the '{' fn parse_enum_def(&mut self, _generics: &ast::Generics) -> PResult<'a, EnumDef> { let mut variants = Vec::new(); let mut all_nullary = true; let mut any_disr = None; while self.token != token::CloseDelim(token::Brace) { let variant_attrs = self.parse_outer_attributes()?; let vlo = self.span; let struct_def; let mut disr_expr = None; let ident = self.parse_ident()?; if self.check(&token::OpenDelim(token::Brace)) { // Parse a struct variant. all_nullary = false; struct_def = VariantData::Struct(self.parse_record_struct_body()?, ast::DUMMY_NODE_ID); } else if self.check(&token::OpenDelim(token::Paren)) { all_nullary = false; struct_def = VariantData::Tuple(self.parse_tuple_struct_body()?, ast::DUMMY_NODE_ID); } else if self.eat(&token::Eq) { disr_expr = Some(AnonConst { id: ast::DUMMY_NODE_ID, value: self.parse_expr()?, }); any_disr = disr_expr.as_ref().map(|c| c.value.span); struct_def = VariantData::Unit(ast::DUMMY_NODE_ID); } else { struct_def = VariantData::Unit(ast::DUMMY_NODE_ID); } let vr = ast::Variant_ { ident, attrs: variant_attrs, data: struct_def, disr_expr, }; variants.push(respan(vlo.to(self.prev_span), vr)); if !self.eat(&token::Comma) { break; } } self.expect(&token::CloseDelim(token::Brace))?; match any_disr { Some(disr_span) if !all_nullary => self.span_err(disr_span, "discriminator values can only be used with a field-less enum"), _ => () } Ok(ast::EnumDef { variants: variants }) } /// Parse an "enum" declaration fn parse_item_enum(&mut self) -> PResult<'a, ItemInfo> { let id = self.parse_ident()?; let mut generics = self.parse_generics()?; generics.where_clause = self.parse_where_clause()?; self.expect(&token::OpenDelim(token::Brace))?; let enum_definition = self.parse_enum_def(&generics).map_err(|e| { self.recover_stmt(); self.eat(&token::CloseDelim(token::Brace)); e })?; Ok((id, ItemKind::Enum(enum_definition, generics), None)) } /// Parses a string as an ABI spec on an extern type or module. Consumes /// the `extern` keyword, if one is found. fn parse_opt_abi(&mut self) -> PResult<'a, Option<Abi>> { match self.token { token::Literal(token::Str_(s), suf) | token::Literal(token::StrRaw(s, _), suf) => { let sp = self.span; self.expect_no_suffix(sp, "ABI spec", suf); self.bump(); match abi::lookup(&s.as_str()) { Some(abi) => Ok(Some(abi)), None => { let prev_span = self.prev_span; let mut err = struct_span_err!( self.sess.span_diagnostic, prev_span, E0703, "invalid ABI: found `{}`", s); err.span_label(prev_span, "invalid ABI"); err.help(&format!("valid ABIs: {}", abi::all_names().join(", "))); err.emit(); Ok(None) } } } _ => Ok(None), } } fn is_static_global(&mut self) -> bool { if self.check_keyword(keywords::Static) { // Check if this could be a closure !self.look_ahead(1, |token| { if token.is_keyword(keywords::Move) { return true; } match *token { token::BinOp(token::Or) | token::OrOr => true, _ => false, } }) } else { false } } fn parse_item_( &mut self, attrs: Vec<Attribute>, macros_allowed: bool, attributes_allowed: bool, ) -> PResult<'a, Option<P<Item>>> { let (ret, tokens) = self.collect_tokens(|this| { this.parse_item_implementation(attrs, macros_allowed, attributes_allowed) })?; // Once we've parsed an item and recorded the tokens we got while // parsing we may want to store `tokens` into the item we're about to // return. Note, though, that we specifically didn't capture tokens // related to outer attributes. The `tokens` field here may later be // used with procedural macros to convert this item back into a token // stream, but during expansion we may be removing attributes as we go // along. // // If we've got inner attributes then the `tokens` we've got above holds // these inner attributes. If an inner attribute is expanded we won't // actually remove it from the token stream, so we'll just keep yielding // it (bad!). To work around this case for now we just avoid recording // `tokens` if we detect any inner attributes. This should help keep // expansion correct, but we should fix this bug one day! Ok(ret.map(|item| { item.map(|mut i| { if !i.attrs.iter().any(|attr| attr.style == AttrStyle::Inner) { i.tokens = Some(tokens); } i }) })) } /// Parse one of the items allowed by the flags. /// NB: this function no longer parses the items inside an /// extern crate. fn parse_item_implementation( &mut self, attrs: Vec<Attribute>, macros_allowed: bool, attributes_allowed: bool, ) -> PResult<'a, Option<P<Item>>> { maybe_whole!(self, NtItem, |item| { let mut item = item.into_inner(); let mut attrs = attrs; mem::swap(&mut item.attrs, &mut attrs); item.attrs.extend(attrs); Some(P(item)) }); let lo = self.span; let visibility = self.parse_visibility(false)?; if self.eat_keyword(keywords::Use) { // USE ITEM let item_ = ItemKind::Use(P(self.parse_use_tree()?)); self.expect(&token::Semi)?; let span = lo.to(self.prev_span); let item = self.mk_item(span, keywords::Invalid.ident(), item_, visibility, attrs); return Ok(Some(item)); } if self.check_keyword(keywords::Extern) && self.is_extern_non_path() { self.bump(); // `extern` if self.eat_keyword(keywords::Crate) { return Ok(Some(self.parse_item_extern_crate(lo, visibility, attrs)?)); } let opt_abi = self.parse_opt_abi()?; if self.eat_keyword(keywords::Fn) { // EXTERN FUNCTION ITEM let fn_span = self.prev_span; let abi = opt_abi.unwrap_or(Abi::C); let (ident, item_, extra_attrs) = self.parse_item_fn(Unsafety::Normal, IsAsync::NotAsync, respan(fn_span, Constness::NotConst), abi)?; let prev_span = self.prev_span; let item = self.mk_item(lo.to(prev_span), ident, item_, visibility, maybe_append(attrs, extra_attrs)); return Ok(Some(item)); } else if self.check(&token::OpenDelim(token::Brace)) { return Ok(Some(self.parse_item_foreign_mod(lo, opt_abi, visibility, attrs)?)); } self.unexpected()?; } if self.is_static_global() { self.bump(); // STATIC ITEM let m = if self.eat_keyword(keywords::Mut) { Mutability::Mutable } else { Mutability::Immutable }; let (ident, item_, extra_attrs) = self.parse_item_const(Some(m))?; let prev_span = self.prev_span; let item = self.mk_item(lo.to(prev_span), ident, item_, visibility, maybe_append(attrs, extra_attrs)); return Ok(Some(item)); } if self.eat_keyword(keywords::Const) { let const_span = self.prev_span; if self.check_keyword(keywords::Fn) || (self.check_keyword(keywords::Unsafe) && self.look_ahead(1, |t| t.is_keyword(keywords::Fn))) { // CONST FUNCTION ITEM let unsafety = self.parse_unsafety(); self.bump(); let (ident, item_, extra_attrs) = self.parse_item_fn(unsafety, IsAsync::NotAsync, respan(const_span, Constness::Const), Abi::Rust)?; let prev_span = self.prev_span; let item = self.mk_item(lo.to(prev_span), ident, item_, visibility, maybe_append(attrs, extra_attrs)); return Ok(Some(item)); } // CONST ITEM if self.eat_keyword(keywords::Mut) { let prev_span = self.prev_span; self.diagnostic().struct_span_err(prev_span, "const globals cannot be mutable") .help("did you mean to declare a static?") .emit(); } let (ident, item_, extra_attrs) = self.parse_item_const(None)?; let prev_span = self.prev_span; let item = self.mk_item(lo.to(prev_span), ident, item_, visibility, maybe_append(attrs, extra_attrs)); return Ok(Some(item)); } // `unsafe async fn` or `async fn` if ( self.check_keyword(keywords::Unsafe) && self.look_ahead(1, |t| t.is_keyword(keywords::Async)) ) || ( self.check_keyword(keywords::Async) && self.look_ahead(1, |t| t.is_keyword(keywords::Fn)) ) { // ASYNC FUNCTION ITEM let unsafety = self.parse_unsafety(); self.expect_keyword(keywords::Async)?; self.expect_keyword(keywords::Fn)?; let fn_span = self.prev_span; let (ident, item_, extra_attrs) = self.parse_item_fn(unsafety, IsAsync::Async { closure_id: ast::DUMMY_NODE_ID, return_impl_trait_id: ast::DUMMY_NODE_ID, }, respan(fn_span, Constness::NotConst), Abi::Rust)?; let prev_span = self.prev_span; let item = self.mk_item(lo.to(prev_span), ident, item_, visibility, maybe_append(attrs, extra_attrs)); return Ok(Some(item)); } if self.check_keyword(keywords::Unsafe) && (self.look_ahead(1, |t| t.is_keyword(keywords::Trait)) || self.look_ahead(1, |t| t.is_keyword(keywords::Auto))) { // UNSAFE TRAIT ITEM self.bump(); // `unsafe` let is_auto = if self.eat_keyword(keywords::Trait) { IsAuto::No } else { self.expect_keyword(keywords::Auto)?; self.expect_keyword(keywords::Trait)?; IsAuto::Yes }; let (ident, item_, extra_attrs) = self.parse_item_trait(is_auto, Unsafety::Unsafe)?; let prev_span = self.prev_span; let item = self.mk_item(lo.to(prev_span), ident, item_, visibility, maybe_append(attrs, extra_attrs)); return Ok(Some(item)); } if self.check_keyword(keywords::Impl) || self.check_keyword(keywords::Unsafe) && self.look_ahead(1, |t| t.is_keyword(keywords::Impl)) || self.check_keyword(keywords::Default) && self.look_ahead(1, |t| t.is_keyword(keywords::Impl)) || self.check_keyword(keywords::Default) && self.look_ahead(1, |t| t.is_keyword(keywords::Unsafe)) { // IMPL ITEM let defaultness = self.parse_defaultness(); let unsafety = self.parse_unsafety(); self.expect_keyword(keywords::Impl)?; let (ident, item, extra_attrs) = self.parse_item_impl(unsafety, defaultness)?; let span = lo.to(self.prev_span); return Ok(Some(self.mk_item(span, ident, item, visibility, maybe_append(attrs, extra_attrs)))); } if self.check_keyword(keywords::Fn) { // FUNCTION ITEM self.bump(); let fn_span = self.prev_span; let (ident, item_, extra_attrs) = self.parse_item_fn(Unsafety::Normal, IsAsync::NotAsync, respan(fn_span, Constness::NotConst), Abi::Rust)?; let prev_span = self.prev_span; let item = self.mk_item(lo.to(prev_span), ident, item_, visibility, maybe_append(attrs, extra_attrs)); return Ok(Some(item)); } if self.check_keyword(keywords::Unsafe) && self.look_ahead(1, |t| *t != token::OpenDelim(token::Brace)) { // UNSAFE FUNCTION ITEM self.bump(); // `unsafe` // `{` is also expected after `unsafe`, in case of error, include it in the diagnostic self.check(&token::OpenDelim(token::Brace)); let abi = if self.eat_keyword(keywords::Extern) { self.parse_opt_abi()?.unwrap_or(Abi::C) } else { Abi::Rust }; self.expect_keyword(keywords::Fn)?; let fn_span = self.prev_span; let (ident, item_, extra_attrs) = self.parse_item_fn(Unsafety::Unsafe, IsAsync::NotAsync, respan(fn_span, Constness::NotConst), abi)?; let prev_span = self.prev_span; let item = self.mk_item(lo.to(prev_span), ident, item_, visibility, maybe_append(attrs, extra_attrs)); return Ok(Some(item)); } if self.eat_keyword(keywords::Mod) { // MODULE ITEM let (ident, item_, extra_attrs) = self.parse_item_mod(&attrs[..])?; let prev_span = self.prev_span; let item = self.mk_item(lo.to(prev_span), ident, item_, visibility, maybe_append(attrs, extra_attrs)); return Ok(Some(item)); } if let Some(type_) = self.eat_type() { let (ident, alias, generics) = type_?; // TYPE ITEM let item_ = match alias { AliasKind::Weak(ty) => ItemKind::Ty(ty, generics), AliasKind::Existential(bounds) => ItemKind::Existential(bounds, generics), }; let prev_span = self.prev_span; let item = self.mk_item(lo.to(prev_span), ident, item_, visibility, attrs); return Ok(Some(item)); } if self.eat_keyword(keywords::Enum) { // ENUM ITEM let (ident, item_, extra_attrs) = self.parse_item_enum()?; let prev_span = self.prev_span; let item = self.mk_item(lo.to(prev_span), ident, item_, visibility, maybe_append(attrs, extra_attrs)); return Ok(Some(item)); } if self.check_keyword(keywords::Trait) || (self.check_keyword(keywords::Auto) && self.look_ahead(1, |t| t.is_keyword(keywords::Trait))) { let is_auto = if self.eat_keyword(keywords::Trait) { IsAuto::No } else { self.expect_keyword(keywords::Auto)?; self.expect_keyword(keywords::Trait)?; IsAuto::Yes }; // TRAIT ITEM let (ident, item_, extra_attrs) = self.parse_item_trait(is_auto, Unsafety::Normal)?; let prev_span = self.prev_span; let item = self.mk_item(lo.to(prev_span), ident, item_, visibility, maybe_append(attrs, extra_attrs)); return Ok(Some(item)); } if self.eat_keyword(keywords::Struct) { // STRUCT ITEM let (ident, item_, extra_attrs) = self.parse_item_struct()?; let prev_span = self.prev_span; let item = self.mk_item(lo.to(prev_span), ident, item_, visibility, maybe_append(attrs, extra_attrs)); return Ok(Some(item)); } if self.is_union_item() { // UNION ITEM self.bump(); let (ident, item_, extra_attrs) = self.parse_item_union()?; let prev_span = self.prev_span; let item = self.mk_item(lo.to(prev_span), ident, item_, visibility, maybe_append(attrs, extra_attrs)); return Ok(Some(item)); } if let Some(macro_def) = self.eat_macro_def(&attrs, &visibility, lo)? { return Ok(Some(macro_def)); } // Verify whether we have encountered a struct or method definition where the user forgot to // add the `struct` or `fn` keyword after writing `pub`: `pub S {}` if visibility.node.is_pub() && self.check_ident() && self.look_ahead(1, |t| *t != token::Not) { // Space between `pub` keyword and the identifier // // pub S {} // ^^^ `sp` points here let sp = self.prev_span.between(self.span); let full_sp = self.prev_span.to(self.span); let ident_sp = self.span; if self.look_ahead(1, |t| *t == token::OpenDelim(token::Brace)) { // possible public struct definition where `struct` was forgotten let ident = self.parse_ident().unwrap(); let msg = format!("add `struct` here to parse `{}` as a public struct", ident); let mut err = self.diagnostic() .struct_span_err(sp, "missing `struct` for struct definition"); err.span_suggestion_short_with_applicability( sp, &msg, " struct ".into(), Applicability::MaybeIncorrect // speculative ); return Err(err); } else if self.look_ahead(1, |t| *t == token::OpenDelim(token::Paren)) { let ident = self.parse_ident().unwrap(); self.consume_block(token::Paren); let (kw, kw_name, ambiguous) = if self.check(&token::RArrow) || self.check(&token::OpenDelim(token::Brace)) { ("fn", "method", false) } else if self.check(&token::Colon) { let kw = "struct"; (kw, kw, false) } else { ("fn` or `struct", "method or struct", true) }; let msg = format!("missing `{}` for {} definition", kw, kw_name); let mut err = self.diagnostic().struct_span_err(sp, &msg); if !ambiguous { let suggestion = format!("add `{}` here to parse `{}` as a public {}", kw, ident, kw_name); err.span_suggestion_short_with_applicability( sp, &suggestion, format!(" {} ", kw), Applicability::MachineApplicable ); } else { if let Ok(snippet) = self.sess.source_map().span_to_snippet(ident_sp) { err.span_suggestion_with_applicability( full_sp, "if you meant to call a macro, try", format!("{}!", snippet), // this is the `ambiguous` conditional branch Applicability::MaybeIncorrect ); } else { err.help("if you meant to call a macro, remove the `pub` \ and add a trailing `!` after the identifier"); } } return Err(err); } } self.parse_macro_use_or_failure(attrs, macros_allowed, attributes_allowed, lo, visibility) } /// Parse a foreign item. crate fn parse_foreign_item(&mut self) -> PResult<'a, Option<ForeignItem>> { maybe_whole!(self, NtForeignItem, |ni| Some(ni)); let attrs = self.parse_outer_attributes()?; let lo = self.span; let visibility = self.parse_visibility(false)?; // FOREIGN STATIC ITEM // Treat `const` as `static` for error recovery, but don't add it to expected tokens. if self.check_keyword(keywords::Static) || self.token.is_keyword(keywords::Const) { if self.token.is_keyword(keywords::Const) { self.diagnostic() .struct_span_err(self.span, "extern items cannot be `const`") .span_suggestion_with_applicability( self.span, "try using a static value", "static".to_owned(), Applicability::MachineApplicable ).emit(); } self.bump(); // `static` or `const` return Ok(Some(self.parse_item_foreign_static(visibility, lo, attrs)?)); } // FOREIGN FUNCTION ITEM if self.check_keyword(keywords::Fn) { return Ok(Some(self.parse_item_foreign_fn(visibility, lo, attrs)?)); } // FOREIGN TYPE ITEM if self.check_keyword(keywords::Type) { return Ok(Some(self.parse_item_foreign_type(visibility, lo, attrs)?)); } match self.parse_assoc_macro_invoc("extern", Some(&visibility), &mut false)? { Some(mac) => { Ok(Some( ForeignItem { ident: keywords::Invalid.ident(), span: lo.to(self.prev_span), id: ast::DUMMY_NODE_ID, attrs, vis: visibility, node: ForeignItemKind::Macro(mac), } )) } None => { if !attrs.is_empty() { self.expected_item_err(&attrs); } Ok(None) } } } /// This is the fall-through for parsing items. fn parse_macro_use_or_failure( &mut self, attrs: Vec<Attribute> , macros_allowed: bool, attributes_allowed: bool, lo: Span, visibility: Visibility ) -> PResult<'a, Option<P<Item>>> { if macros_allowed && self.token.is_path_start() { // MACRO INVOCATION ITEM let prev_span = self.prev_span; self.complain_if_pub_macro(&visibility.node, prev_span); let mac_lo = self.span; // item macro. let pth = self.parse_path(PathStyle::Mod)?; self.expect(&token::Not)?; // a 'special' identifier (like what `macro_rules!` uses) // is optional. We should eventually unify invoc syntax // and remove this. let id = if self.token.is_ident() { self.parse_ident()? } else { keywords::Invalid.ident() // no special identifier }; // eat a matched-delimiter token tree: let (delim, tts) = self.expect_delimited_token_tree()?; if delim != MacDelimiter::Brace { if !self.eat(&token::Semi) { self.span_err(self.prev_span, "macros that expand to items must either \ be surrounded with braces or followed by \ a semicolon"); } } let hi = self.prev_span; let mac = respan(mac_lo.to(hi), Mac_ { path: pth, tts, delim }); let item = self.mk_item(lo.to(hi), id, ItemKind::Mac(mac), visibility, attrs); return Ok(Some(item)); } // FAILURE TO PARSE ITEM match visibility.node { VisibilityKind::Inherited => {} _ => { return Err(self.span_fatal(self.prev_span, "unmatched visibility `pub`")); } } if !attributes_allowed && !attrs.is_empty() { self.expected_item_err(&attrs); } Ok(None) } /// Parse a macro invocation inside a `trait`, `impl` or `extern` block fn parse_assoc_macro_invoc(&mut self, item_kind: &str, vis: Option<&Visibility>, at_end: &mut bool) -> PResult<'a, Option<Mac>> { if self.token.is_path_start() && !self.is_extern_non_path() { let prev_span = self.prev_span; let lo = self.span; let pth = self.parse_path(PathStyle::Mod)?; if pth.segments.len() == 1 { if !self.eat(&token::Not) { return Err(self.missing_assoc_item_kind_err(item_kind, prev_span)); } } else { self.expect(&token::Not)?; } if let Some(vis) = vis { self.complain_if_pub_macro(&vis.node, prev_span); } *at_end = true; // eat a matched-delimiter token tree: let (delim, tts) = self.expect_delimited_token_tree()?; if delim != MacDelimiter::Brace { self.expect(&token::Semi)? } Ok(Some(respan(lo.to(self.prev_span), Mac_ { path: pth, tts, delim }))) } else { Ok(None) } } fn collect_tokens<F, R>(&mut self, f: F) -> PResult<'a, (R, TokenStream)> where F: FnOnce(&mut Self) -> PResult<'a, R> { // Record all tokens we parse when parsing this item. let mut tokens = Vec::new(); let prev_collecting = match self.token_cursor.frame.last_token { LastToken::Collecting(ref mut list) => { Some(mem::replace(list, Vec::new())) } LastToken::Was(ref mut last) => { tokens.extend(last.take()); None } }; self.token_cursor.frame.last_token = LastToken::Collecting(tokens); let prev = self.token_cursor.stack.len(); let ret = f(self); let last_token = if self.token_cursor.stack.len() == prev { &mut self.token_cursor.frame.last_token } else { &mut self.token_cursor.stack[prev].last_token }; // Pull our the toekns that we've collected from the call to `f` above let mut collected_tokens = match *last_token { LastToken::Collecting(ref mut v) => mem::replace(v, Vec::new()), LastToken::Was(_) => panic!("our vector went away?"), }; // If we're not at EOF our current token wasn't actually consumed by // `f`, but it'll still be in our list that we pulled out. In that case // put it back. let extra_token = if self.token != token::Eof { collected_tokens.pop() } else { None }; // If we were previously collecting tokens, then this was a recursive // call. In that case we need to record all the tokens we collected in // our parent list as well. To do that we push a clone of our stream // onto the previous list. let stream = collected_tokens.into_iter().collect::<TokenStream>(); match prev_collecting { Some(mut list) => { list.push(stream.clone()); list.extend(extra_token); *last_token = LastToken::Collecting(list); } None => { *last_token = LastToken::Was(extra_token); } } Ok((ret?, stream)) } pub fn parse_item(&mut self) -> PResult<'a, Option<P<Item>>> { let attrs = self.parse_outer_attributes()?; self.parse_item_(attrs, true, false) } /// `::{` or `::*` fn is_import_coupler(&mut self) -> bool { self.check(&token::ModSep) && self.look_ahead(1, |t| *t == token::OpenDelim(token::Brace) || *t == token::BinOp(token::Star)) } /// Parse UseTree /// /// USE_TREE = [`::`] `*` | /// [`::`] `{` USE_TREE_LIST `}` | /// PATH `::` `*` | /// PATH `::` `{` USE_TREE_LIST `}` | /// PATH [`as` IDENT] fn parse_use_tree(&mut self) -> PResult<'a, UseTree> { let lo = self.span; let mut prefix = ast::Path { segments: Vec::new(), span: lo.shrink_to_lo() }; let kind = if self.check(&token::OpenDelim(token::Brace)) || self.check(&token::BinOp(token::Star)) || self.is_import_coupler() { // `use *;` or `use ::*;` or `use {...};` or `use ::{...};` if self.eat(&token::ModSep) { prefix.segments.push(PathSegment::crate_root(lo.shrink_to_lo())); } if self.eat(&token::BinOp(token::Star)) { UseTreeKind::Glob } else { UseTreeKind::Nested(self.parse_use_tree_list()?) } } else { // `use path::*;` or `use path::{...};` or `use path;` or `use path as bar;` prefix = self.parse_path(PathStyle::Mod)?; if self.eat(&token::ModSep) { if self.eat(&token::BinOp(token::Star)) { UseTreeKind::Glob } else { UseTreeKind::Nested(self.parse_use_tree_list()?) } } else { UseTreeKind::Simple(self.parse_rename()?, ast::DUMMY_NODE_ID, ast::DUMMY_NODE_ID) } }; Ok(UseTree { prefix, kind, span: lo.to(self.prev_span) }) } /// Parse UseTreeKind::Nested(list) /// /// USE_TREE_LIST = Ø | (USE_TREE `,`)* USE_TREE [`,`] fn parse_use_tree_list(&mut self) -> PResult<'a, Vec<(UseTree, ast::NodeId)>> { self.parse_unspanned_seq(&token::OpenDelim(token::Brace), &token::CloseDelim(token::Brace), SeqSep::trailing_allowed(token::Comma), |this| { Ok((this.parse_use_tree()?, ast::DUMMY_NODE_ID)) }) } fn parse_rename(&mut self) -> PResult<'a, Option<Ident>> { if self.eat_keyword(keywords::As) { match self.token { token::Ident(ident, false) if ident.name == keywords::Underscore.name() => { self.bump(); // `_` Ok(Some(ident.gensym())) } _ => self.parse_ident().map(Some), } } else { Ok(None) } } /// Parses a source module as a crate. This is the main /// entry point for the parser. pub fn parse_crate_mod(&mut self) -> PResult<'a, Crate> { let lo = self.span; Ok(ast::Crate { attrs: self.parse_inner_attributes()?, module: self.parse_mod_items(&token::Eof, lo)?, span: lo.to(self.span), }) } pub fn parse_optional_str(&mut self) -> Option<(Symbol, ast::StrStyle, Option<ast::Name>)> { let ret = match self.token { token::Literal(token::Str_(s), suf) => (s, ast::StrStyle::Cooked, suf), token::Literal(token::StrRaw(s, n), suf) => (s, ast::StrStyle::Raw(n), suf), _ => return None }; self.bump(); Some(ret) } pub fn parse_str(&mut self) -> PResult<'a, (Symbol, StrStyle)> { match self.parse_optional_str() { Some((s, style, suf)) => { let sp = self.prev_span; self.expect_no_suffix(sp, "string literal", suf); Ok((s, style)) } _ => { let msg = "expected string literal"; let mut err = self.fatal(msg); err.span_label(self.span, msg); Err(err) } } } } Rollup merge of #53585 - dtolnay:comment, r=Mark-Simulacrum Remove super old comment on function that parses items This comment was added more than 5 years ago in ab03c1e4221. As far as anyone reading this comment today needs to know, the function has never parsed items from inside an extern crate. // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. use rustc_target::spec::abi::{self, Abi}; use ast::{AngleBracketedArgs, ParenthesisedArgs, AttrStyle, BareFnTy}; use ast::{GenericBound, TraitBoundModifier}; use ast::Unsafety; use ast::{Mod, AnonConst, Arg, Arm, Attribute, BindingMode, TraitItemKind}; use ast::Block; use ast::{BlockCheckMode, CaptureBy, Movability}; use ast::{Constness, Crate}; use ast::Defaultness; use ast::EnumDef; use ast::{Expr, ExprKind, RangeLimits}; use ast::{Field, FnDecl, FnHeader}; use ast::{ForeignItem, ForeignItemKind, FunctionRetTy}; use ast::{GenericParam, GenericParamKind}; use ast::GenericArg; use ast::{Ident, ImplItem, IsAsync, IsAuto, Item, ItemKind}; use ast::{Label, Lifetime, Lit, LitKind}; use ast::Local; use ast::MacStmtStyle; use ast::{Mac, Mac_, MacDelimiter}; use ast::{MutTy, Mutability}; use ast::{Pat, PatKind, PathSegment}; use ast::{PolyTraitRef, QSelf}; use ast::{Stmt, StmtKind}; use ast::{VariantData, StructField}; use ast::StrStyle; use ast::SelfKind; use ast::{TraitItem, TraitRef, TraitObjectSyntax}; use ast::{Ty, TyKind, TypeBinding, GenericBounds}; use ast::{Visibility, VisibilityKind, WhereClause, CrateSugar}; use ast::{UseTree, UseTreeKind}; use ast::{BinOpKind, UnOp}; use ast::{RangeEnd, RangeSyntax}; use {ast, attr}; use source_map::{self, SourceMap, Spanned, respan}; use syntax_pos::{self, Span, MultiSpan, BytePos, FileName, edition::Edition}; use errors::{self, Applicability, DiagnosticBuilder, DiagnosticId}; use parse::{self, SeqSep, classify, token}; use parse::lexer::TokenAndSpan; use parse::lexer::comments::{doc_comment_style, strip_doc_comment_decoration}; use parse::{new_sub_parser_from_file, ParseSess, Directory, DirectoryOwnership}; use util::parser::{AssocOp, Fixity}; use print::pprust; use ptr::P; use parse::PResult; use ThinVec; use tokenstream::{self, Delimited, ThinTokenStream, TokenTree, TokenStream}; use symbol::{Symbol, keywords}; use std::borrow::Cow; use std::cmp; use std::mem; use std::path::{self, Path, PathBuf}; use std::slice; #[derive(Debug)] /// Whether the type alias or associated type is a concrete type or an existential type pub enum AliasKind { /// Just a new name for the same type Weak(P<Ty>), /// Only trait impls of the type will be usable, not the actual type itself Existential(GenericBounds), } bitflags! { struct Restrictions: u8 { const STMT_EXPR = 1 << 0; const NO_STRUCT_LITERAL = 1 << 1; } } type ItemInfo = (Ident, ItemKind, Option<Vec<Attribute>>); /// How to parse a path. #[derive(Copy, Clone, PartialEq)] pub enum PathStyle { /// In some contexts, notably in expressions, paths with generic arguments are ambiguous /// with something else. For example, in expressions `segment < ....` can be interpreted /// as a comparison and `segment ( ....` can be interpreted as a function call. /// In all such contexts the non-path interpretation is preferred by default for practical /// reasons, but the path interpretation can be forced by the disambiguator `::`, e.g. /// `x<y>` - comparisons, `x::<y>` - unambiguously a path. Expr, /// In other contexts, notably in types, no ambiguity exists and paths can be written /// without the disambiguator, e.g. `x<y>` - unambiguously a path. /// Paths with disambiguators are still accepted, `x::<Y>` - unambiguously a path too. Type, /// A path with generic arguments disallowed, e.g. `foo::bar::Baz`, used in imports, /// visibilities or attributes. /// Technically, this variant is unnecessary and e.g. `Expr` can be used instead /// (paths in "mod" contexts have to be checked later for absence of generic arguments /// anyway, due to macros), but it is used to avoid weird suggestions about expected /// tokens when something goes wrong. Mod, } #[derive(Clone, Copy, PartialEq, Debug)] enum SemiColonMode { Break, Ignore, } #[derive(Clone, Copy, PartialEq, Debug)] enum BlockMode { Break, Ignore, } /// Possibly accept an `token::Interpolated` expression (a pre-parsed expression /// dropped into the token stream, which happens while parsing the result of /// macro expansion). Placement of these is not as complex as I feared it would /// be. The important thing is to make sure that lookahead doesn't balk at /// `token::Interpolated` tokens. macro_rules! maybe_whole_expr { ($p:expr) => { if let token::Interpolated(nt) = $p.token.clone() { match nt.0 { token::NtExpr(ref e) | token::NtLiteral(ref e) => { $p.bump(); return Ok((*e).clone()); } token::NtPath(ref path) => { $p.bump(); let span = $p.span; let kind = ExprKind::Path(None, (*path).clone()); return Ok($p.mk_expr(span, kind, ThinVec::new())); } token::NtBlock(ref block) => { $p.bump(); let span = $p.span; let kind = ExprKind::Block((*block).clone(), None); return Ok($p.mk_expr(span, kind, ThinVec::new())); } _ => {}, }; } } } /// As maybe_whole_expr, but for things other than expressions macro_rules! maybe_whole { ($p:expr, $constructor:ident, |$x:ident| $e:expr) => { if let token::Interpolated(nt) = $p.token.clone() { if let token::$constructor($x) = nt.0.clone() { $p.bump(); return Ok($e); } } }; } fn maybe_append(mut lhs: Vec<Attribute>, mut rhs: Option<Vec<Attribute>>) -> Vec<Attribute> { if let Some(ref mut rhs) = rhs { lhs.append(rhs); } lhs } #[derive(Debug, Clone, Copy, PartialEq)] enum PrevTokenKind { DocComment, Comma, Plus, Interpolated, Eof, Ident, Other, } trait RecoverQPath: Sized { const PATH_STYLE: PathStyle = PathStyle::Expr; fn to_ty(&self) -> Option<P<Ty>>; fn to_recovered(&self, qself: Option<QSelf>, path: ast::Path) -> Self; fn to_string(&self) -> String; } impl RecoverQPath for Ty { const PATH_STYLE: PathStyle = PathStyle::Type; fn to_ty(&self) -> Option<P<Ty>> { Some(P(self.clone())) } fn to_recovered(&self, qself: Option<QSelf>, path: ast::Path) -> Self { Self { span: path.span, node: TyKind::Path(qself, path), id: self.id } } fn to_string(&self) -> String { pprust::ty_to_string(self) } } impl RecoverQPath for Pat { fn to_ty(&self) -> Option<P<Ty>> { self.to_ty() } fn to_recovered(&self, qself: Option<QSelf>, path: ast::Path) -> Self { Self { span: path.span, node: PatKind::Path(qself, path), id: self.id } } fn to_string(&self) -> String { pprust::pat_to_string(self) } } impl RecoverQPath for Expr { fn to_ty(&self) -> Option<P<Ty>> { self.to_ty() } fn to_recovered(&self, qself: Option<QSelf>, path: ast::Path) -> Self { Self { span: path.span, node: ExprKind::Path(qself, path), id: self.id, attrs: self.attrs.clone() } } fn to_string(&self) -> String { pprust::expr_to_string(self) } } /* ident is handled by common.rs */ #[derive(Clone)] pub struct Parser<'a> { pub sess: &'a ParseSess, /// the current token: pub token: token::Token, /// the span of the current token: pub span: Span, /// the span of the previous token: meta_var_span: Option<Span>, pub prev_span: Span, /// the previous token kind prev_token_kind: PrevTokenKind, restrictions: Restrictions, /// Used to determine the path to externally loaded source files crate directory: Directory<'a>, /// Whether to parse sub-modules in other files. pub recurse_into_file_modules: bool, /// Name of the root module this parser originated from. If `None`, then the /// name is not known. This does not change while the parser is descending /// into modules, and sub-parsers have new values for this name. pub root_module_name: Option<String>, crate expected_tokens: Vec<TokenType>, token_cursor: TokenCursor, desugar_doc_comments: bool, /// Whether we should configure out of line modules as we parse. pub cfg_mods: bool, } #[derive(Clone)] struct TokenCursor { frame: TokenCursorFrame, stack: Vec<TokenCursorFrame>, } #[derive(Clone)] struct TokenCursorFrame { delim: token::DelimToken, span: Span, open_delim: bool, tree_cursor: tokenstream::Cursor, close_delim: bool, last_token: LastToken, } /// This is used in `TokenCursorFrame` above to track tokens that are consumed /// by the parser, and then that's transitively used to record the tokens that /// each parse AST item is created with. /// /// Right now this has two states, either collecting tokens or not collecting /// tokens. If we're collecting tokens we just save everything off into a local /// `Vec`. This should eventually though likely save tokens from the original /// token stream and just use slicing of token streams to avoid creation of a /// whole new vector. /// /// The second state is where we're passively not recording tokens, but the last /// token is still tracked for when we want to start recording tokens. This /// "last token" means that when we start recording tokens we'll want to ensure /// that this, the first token, is included in the output. /// /// You can find some more example usage of this in the `collect_tokens` method /// on the parser. #[derive(Clone)] enum LastToken { Collecting(Vec<TokenStream>), Was(Option<TokenStream>), } impl TokenCursorFrame { fn new(sp: Span, delimited: &Delimited) -> Self { TokenCursorFrame { delim: delimited.delim, span: sp, open_delim: delimited.delim == token::NoDelim, tree_cursor: delimited.stream().into_trees(), close_delim: delimited.delim == token::NoDelim, last_token: LastToken::Was(None), } } } impl TokenCursor { fn next(&mut self) -> TokenAndSpan { loop { let tree = if !self.frame.open_delim { self.frame.open_delim = true; Delimited { delim: self.frame.delim, tts: TokenStream::empty().into() } .open_tt(self.frame.span) } else if let Some(tree) = self.frame.tree_cursor.next() { tree } else if !self.frame.close_delim { self.frame.close_delim = true; Delimited { delim: self.frame.delim, tts: TokenStream::empty().into() } .close_tt(self.frame.span) } else if let Some(frame) = self.stack.pop() { self.frame = frame; continue } else { return TokenAndSpan { tok: token::Eof, sp: syntax_pos::DUMMY_SP } }; match self.frame.last_token { LastToken::Collecting(ref mut v) => v.push(tree.clone().into()), LastToken::Was(ref mut t) => *t = Some(tree.clone().into()), } match tree { TokenTree::Token(sp, tok) => return TokenAndSpan { tok: tok, sp: sp }, TokenTree::Delimited(sp, ref delimited) => { let frame = TokenCursorFrame::new(sp, delimited); self.stack.push(mem::replace(&mut self.frame, frame)); } } } } fn next_desugared(&mut self) -> TokenAndSpan { let (sp, name) = match self.next() { TokenAndSpan { sp, tok: token::DocComment(name) } => (sp, name), tok => return tok, }; let stripped = strip_doc_comment_decoration(&name.as_str()); // Searches for the occurrences of `"#*` and returns the minimum number of `#`s // required to wrap the text. let mut num_of_hashes = 0; let mut count = 0; for ch in stripped.chars() { count = match ch { '"' => 1, '#' if count > 0 => count + 1, _ => 0, }; num_of_hashes = cmp::max(num_of_hashes, count); } let body = TokenTree::Delimited(sp, Delimited { delim: token::Bracket, tts: [TokenTree::Token(sp, token::Ident(ast::Ident::from_str("doc"), false)), TokenTree::Token(sp, token::Eq), TokenTree::Token(sp, token::Literal( token::StrRaw(Symbol::intern(&stripped), num_of_hashes), None))] .iter().cloned().collect::<TokenStream>().into(), }); self.stack.push(mem::replace(&mut self.frame, TokenCursorFrame::new(sp, &Delimited { delim: token::NoDelim, tts: if doc_comment_style(&name.as_str()) == AttrStyle::Inner { [TokenTree::Token(sp, token::Pound), TokenTree::Token(sp, token::Not), body] .iter().cloned().collect::<TokenStream>().into() } else { [TokenTree::Token(sp, token::Pound), body] .iter().cloned().collect::<TokenStream>().into() }, }))); self.next() } } #[derive(Clone, PartialEq)] crate enum TokenType { Token(token::Token), Keyword(keywords::Keyword), Operator, Lifetime, Ident, Path, Type, } impl TokenType { fn to_string(&self) -> String { match *self { TokenType::Token(ref t) => format!("`{}`", pprust::token_to_string(t)), TokenType::Keyword(kw) => format!("`{}`", kw.name()), TokenType::Operator => "an operator".to_string(), TokenType::Lifetime => "lifetime".to_string(), TokenType::Ident => "identifier".to_string(), TokenType::Path => "path".to_string(), TokenType::Type => "type".to_string(), } } } /// Returns true if `IDENT t` can start a type - `IDENT::a::b`, `IDENT<u8, u8>`, /// `IDENT<<u8 as Trait>::AssocTy>`. /// /// Types can also be of the form `IDENT(u8, u8) -> u8`, however this assumes /// that IDENT is not the ident of a fn trait fn can_continue_type_after_non_fn_ident(t: &token::Token) -> bool { t == &token::ModSep || t == &token::Lt || t == &token::BinOp(token::Shl) } /// Information about the path to a module. pub struct ModulePath { name: String, path_exists: bool, pub result: Result<ModulePathSuccess, Error>, } pub struct ModulePathSuccess { pub path: PathBuf, pub directory_ownership: DirectoryOwnership, warn: bool, } pub enum Error { FileNotFoundForModule { mod_name: String, default_path: String, secondary_path: String, dir_path: String, }, DuplicatePaths { mod_name: String, default_path: String, secondary_path: String, }, UselessDocComment, InclusiveRangeWithNoEnd, } impl Error { fn span_err<S: Into<MultiSpan>>(self, sp: S, handler: &errors::Handler) -> DiagnosticBuilder { match self { Error::FileNotFoundForModule { ref mod_name, ref default_path, ref secondary_path, ref dir_path } => { let mut err = struct_span_err!(handler, sp, E0583, "file not found for module `{}`", mod_name); err.help(&format!("name the file either {} or {} inside the directory \"{}\"", default_path, secondary_path, dir_path)); err } Error::DuplicatePaths { ref mod_name, ref default_path, ref secondary_path } => { let mut err = struct_span_err!(handler, sp, E0584, "file for module `{}` found at both {} and {}", mod_name, default_path, secondary_path); err.help("delete or rename one of them to remove the ambiguity"); err } Error::UselessDocComment => { let mut err = struct_span_err!(handler, sp, E0585, "found a documentation comment that doesn't document anything"); err.help("doc comments must come before what they document, maybe a comment was \ intended with `//`?"); err } Error::InclusiveRangeWithNoEnd => { let mut err = struct_span_err!(handler, sp, E0586, "inclusive range with no end"); err.help("inclusive ranges must be bounded at the end (`..=b` or `a..=b`)"); err } } } } #[derive(Debug)] enum LhsExpr { NotYetParsed, AttributesParsed(ThinVec<Attribute>), AlreadyParsed(P<Expr>), } impl From<Option<ThinVec<Attribute>>> for LhsExpr { fn from(o: Option<ThinVec<Attribute>>) -> Self { if let Some(attrs) = o { LhsExpr::AttributesParsed(attrs) } else { LhsExpr::NotYetParsed } } } impl From<P<Expr>> for LhsExpr { fn from(expr: P<Expr>) -> Self { LhsExpr::AlreadyParsed(expr) } } /// Create a placeholder argument. fn dummy_arg(span: Span) -> Arg { let ident = Ident::new(keywords::Invalid.name(), span); let pat = P(Pat { id: ast::DUMMY_NODE_ID, node: PatKind::Ident(BindingMode::ByValue(Mutability::Immutable), ident, None), span, }); let ty = Ty { node: TyKind::Err, span, id: ast::DUMMY_NODE_ID }; Arg { ty: P(ty), pat: pat, id: ast::DUMMY_NODE_ID } } #[derive(Copy, Clone, Debug)] enum TokenExpectType { Expect, NoExpect, } impl<'a> Parser<'a> { pub fn new(sess: &'a ParseSess, tokens: TokenStream, directory: Option<Directory<'a>>, recurse_into_file_modules: bool, desugar_doc_comments: bool) -> Self { let mut parser = Parser { sess, token: token::Whitespace, span: syntax_pos::DUMMY_SP, prev_span: syntax_pos::DUMMY_SP, meta_var_span: None, prev_token_kind: PrevTokenKind::Other, restrictions: Restrictions::empty(), recurse_into_file_modules, directory: Directory { path: Cow::from(PathBuf::new()), ownership: DirectoryOwnership::Owned { relative: None } }, root_module_name: None, expected_tokens: Vec::new(), token_cursor: TokenCursor { frame: TokenCursorFrame::new(syntax_pos::DUMMY_SP, &Delimited { delim: token::NoDelim, tts: tokens.into(), }), stack: Vec::new(), }, desugar_doc_comments, cfg_mods: true, }; let tok = parser.next_tok(); parser.token = tok.tok; parser.span = tok.sp; if let Some(directory) = directory { parser.directory = directory; } else if !parser.span.is_dummy() { if let FileName::Real(mut path) = sess.source_map().span_to_unmapped_path(parser.span) { path.pop(); parser.directory.path = Cow::from(path); } } parser.process_potential_macro_variable(); parser } fn next_tok(&mut self) -> TokenAndSpan { let mut next = if self.desugar_doc_comments { self.token_cursor.next_desugared() } else { self.token_cursor.next() }; if next.sp.is_dummy() { // Tweak the location for better diagnostics, but keep syntactic context intact. next.sp = self.prev_span.with_ctxt(next.sp.ctxt()); } next } /// Convert the current token to a string using self's reader pub fn this_token_to_string(&self) -> String { pprust::token_to_string(&self.token) } fn token_descr(&self) -> Option<&'static str> { Some(match &self.token { t if t.is_special_ident() => "reserved identifier", t if t.is_used_keyword() => "keyword", t if t.is_unused_keyword() => "reserved keyword", _ => return None, }) } fn this_token_descr(&self) -> String { if let Some(prefix) = self.token_descr() { format!("{} `{}`", prefix, self.this_token_to_string()) } else { format!("`{}`", self.this_token_to_string()) } } fn unexpected_last<T>(&self, t: &token::Token) -> PResult<'a, T> { let token_str = pprust::token_to_string(t); Err(self.span_fatal(self.prev_span, &format!("unexpected token: `{}`", token_str))) } crate fn unexpected<T>(&mut self) -> PResult<'a, T> { match self.expect_one_of(&[], &[]) { Err(e) => Err(e), Ok(_) => unreachable!(), } } /// Expect and consume the token t. Signal an error if /// the next token is not t. pub fn expect(&mut self, t: &token::Token) -> PResult<'a, ()> { if self.expected_tokens.is_empty() { if self.token == *t { self.bump(); Ok(()) } else { let token_str = pprust::token_to_string(t); let this_token_str = self.this_token_to_string(); let mut err = self.fatal(&format!("expected `{}`, found `{}`", token_str, this_token_str)); let sp = if self.token == token::Token::Eof { // EOF, don't want to point at the following char, but rather the last token self.prev_span } else { self.sess.source_map().next_point(self.prev_span) }; let label_exp = format!("expected `{}`", token_str); let cm = self.sess.source_map(); match (cm.lookup_line(self.span.lo()), cm.lookup_line(sp.lo())) { (Ok(ref a), Ok(ref b)) if a.line == b.line => { // When the spans are in the same line, it means that the only content // between them is whitespace, point only at the found token. err.span_label(self.span, label_exp); } _ => { err.span_label(sp, label_exp); err.span_label(self.span, "unexpected token"); } } Err(err) } } else { self.expect_one_of(slice::from_ref(t), &[]) } } /// Expect next token to be edible or inedible token. If edible, /// then consume it; if inedible, then return without consuming /// anything. Signal a fatal error if next token is unexpected. fn expect_one_of(&mut self, edible: &[token::Token], inedible: &[token::Token]) -> PResult<'a, ()>{ fn tokens_to_string(tokens: &[TokenType]) -> String { let mut i = tokens.iter(); // This might be a sign we need a connect method on Iterator. let b = i.next() .map_or("".to_string(), |t| t.to_string()); i.enumerate().fold(b, |mut b, (i, a)| { if tokens.len() > 2 && i == tokens.len() - 2 { b.push_str(", or "); } else if tokens.len() == 2 && i == tokens.len() - 2 { b.push_str(" or "); } else { b.push_str(", "); } b.push_str(&a.to_string()); b }) } if edible.contains(&self.token) { self.bump(); Ok(()) } else if inedible.contains(&self.token) { // leave it in the input Ok(()) } else { let mut expected = edible.iter() .map(|x| TokenType::Token(x.clone())) .chain(inedible.iter().map(|x| TokenType::Token(x.clone()))) .chain(self.expected_tokens.iter().cloned()) .collect::<Vec<_>>(); expected.sort_by_cached_key(|x| x.to_string()); expected.dedup(); let expect = tokens_to_string(&expected[..]); let actual = self.this_token_to_string(); let (msg_exp, (label_sp, label_exp)) = if expected.len() > 1 { let short_expect = if expected.len() > 6 { format!("{} possible tokens", expected.len()) } else { expect.clone() }; (format!("expected one of {}, found `{}`", expect, actual), (self.sess.source_map().next_point(self.prev_span), format!("expected one of {} here", short_expect))) } else if expected.is_empty() { (format!("unexpected token: `{}`", actual), (self.prev_span, "unexpected token after this".to_string())) } else { (format!("expected {}, found `{}`", expect, actual), (self.sess.source_map().next_point(self.prev_span), format!("expected {} here", expect))) }; let mut err = self.fatal(&msg_exp); let sp = if self.token == token::Token::Eof { // This is EOF, don't want to point at the following char, but rather the last token self.prev_span } else { label_sp }; let cm = self.sess.source_map(); match (cm.lookup_line(self.span.lo()), cm.lookup_line(sp.lo())) { (Ok(ref a), Ok(ref b)) if a.line == b.line => { // When the spans are in the same line, it means that the only content between // them is whitespace, point at the found token in that case: // // X | () => { syntax error }; // | ^^^^^ expected one of 8 possible tokens here // // instead of having: // // X | () => { syntax error }; // | -^^^^^ unexpected token // | | // | expected one of 8 possible tokens here err.span_label(self.span, label_exp); } _ => { err.span_label(sp, label_exp); err.span_label(self.span, "unexpected token"); } } Err(err) } } /// returns the span of expr, if it was not interpolated or the span of the interpolated token fn interpolated_or_expr_span(&self, expr: PResult<'a, P<Expr>>) -> PResult<'a, (Span, P<Expr>)> { expr.map(|e| { if self.prev_token_kind == PrevTokenKind::Interpolated { (self.prev_span, e) } else { (e.span, e) } }) } fn expected_ident_found(&self) -> DiagnosticBuilder<'a> { let mut err = self.struct_span_err(self.span, &format!("expected identifier, found {}", self.this_token_descr())); if let Some(token_descr) = self.token_descr() { err.span_label(self.span, format!("expected identifier, found {}", token_descr)); } else { err.span_label(self.span, "expected identifier"); if self.token == token::Comma && self.look_ahead(1, |t| t.is_ident()) { err.span_suggestion(self.span, "remove this comma", "".into()); } } err } pub fn parse_ident(&mut self) -> PResult<'a, ast::Ident> { self.parse_ident_common(true) } fn parse_ident_common(&mut self, recover: bool) -> PResult<'a, ast::Ident> { match self.token { token::Ident(ident, _) => { if self.token.is_reserved_ident() { let mut err = self.expected_ident_found(); if recover { err.emit(); } else { return Err(err); } } let span = self.span; self.bump(); Ok(Ident::new(ident.name, span)) } _ => { Err(if self.prev_token_kind == PrevTokenKind::DocComment { self.span_fatal_err(self.prev_span, Error::UselessDocComment) } else { self.expected_ident_found() }) } } } /// Check if the next token is `tok`, and return `true` if so. /// /// This method will automatically add `tok` to `expected_tokens` if `tok` is not /// encountered. crate fn check(&mut self, tok: &token::Token) -> bool { let is_present = self.token == *tok; if !is_present { self.expected_tokens.push(TokenType::Token(tok.clone())); } is_present } /// Consume token 'tok' if it exists. Returns true if the given /// token was present, false otherwise. pub fn eat(&mut self, tok: &token::Token) -> bool { let is_present = self.check(tok); if is_present { self.bump() } is_present } fn check_keyword(&mut self, kw: keywords::Keyword) -> bool { self.expected_tokens.push(TokenType::Keyword(kw)); self.token.is_keyword(kw) } /// If the next token is the given keyword, eat it and return /// true. Otherwise, return false. pub fn eat_keyword(&mut self, kw: keywords::Keyword) -> bool { if self.check_keyword(kw) { self.bump(); true } else { false } } fn eat_keyword_noexpect(&mut self, kw: keywords::Keyword) -> bool { if self.token.is_keyword(kw) { self.bump(); true } else { false } } /// If the given word is not a keyword, signal an error. /// If the next token is not the given word, signal an error. /// Otherwise, eat it. fn expect_keyword(&mut self, kw: keywords::Keyword) -> PResult<'a, ()> { if !self.eat_keyword(kw) { self.unexpected() } else { Ok(()) } } fn check_ident(&mut self) -> bool { if self.token.is_ident() { true } else { self.expected_tokens.push(TokenType::Ident); false } } fn check_path(&mut self) -> bool { if self.token.is_path_start() { true } else { self.expected_tokens.push(TokenType::Path); false } } fn check_type(&mut self) -> bool { if self.token.can_begin_type() { true } else { self.expected_tokens.push(TokenType::Type); false } } /// Expect and consume a `+`. if `+=` is seen, replace it with a `=` /// and continue. If a `+` is not seen, return false. /// /// This is using when token splitting += into +. /// See issue 47856 for an example of when this may occur. fn eat_plus(&mut self) -> bool { self.expected_tokens.push(TokenType::Token(token::BinOp(token::Plus))); match self.token { token::BinOp(token::Plus) => { self.bump(); true } token::BinOpEq(token::Plus) => { let span = self.span.with_lo(self.span.lo() + BytePos(1)); self.bump_with(token::Eq, span); true } _ => false, } } /// Checks to see if the next token is either `+` or `+=`. /// Otherwise returns false. fn check_plus(&mut self) -> bool { if self.token.is_like_plus() { true } else { self.expected_tokens.push(TokenType::Token(token::BinOp(token::Plus))); false } } /// Expect and consume an `&`. If `&&` is seen, replace it with a single /// `&` and continue. If an `&` is not seen, signal an error. fn expect_and(&mut self) -> PResult<'a, ()> { self.expected_tokens.push(TokenType::Token(token::BinOp(token::And))); match self.token { token::BinOp(token::And) => { self.bump(); Ok(()) } token::AndAnd => { let span = self.span.with_lo(self.span.lo() + BytePos(1)); Ok(self.bump_with(token::BinOp(token::And), span)) } _ => self.unexpected() } } /// Expect and consume an `|`. If `||` is seen, replace it with a single /// `|` and continue. If an `|` is not seen, signal an error. fn expect_or(&mut self) -> PResult<'a, ()> { self.expected_tokens.push(TokenType::Token(token::BinOp(token::Or))); match self.token { token::BinOp(token::Or) => { self.bump(); Ok(()) } token::OrOr => { let span = self.span.with_lo(self.span.lo() + BytePos(1)); Ok(self.bump_with(token::BinOp(token::Or), span)) } _ => self.unexpected() } } fn expect_no_suffix(&self, sp: Span, kind: &str, suffix: Option<ast::Name>) { match suffix { None => {/* everything ok */} Some(suf) => { let text = suf.as_str(); if text.is_empty() { self.span_bug(sp, "found empty literal suffix in Some") } self.span_err(sp, &format!("{} with a suffix is invalid", kind)); } } } /// Attempt to consume a `<`. If `<<` is seen, replace it with a single /// `<` and continue. If a `<` is not seen, return false. /// /// This is meant to be used when parsing generics on a path to get the /// starting token. fn eat_lt(&mut self) -> bool { self.expected_tokens.push(TokenType::Token(token::Lt)); match self.token { token::Lt => { self.bump(); true } token::BinOp(token::Shl) => { let span = self.span.with_lo(self.span.lo() + BytePos(1)); self.bump_with(token::Lt, span); true } _ => false, } } fn expect_lt(&mut self) -> PResult<'a, ()> { if !self.eat_lt() { self.unexpected() } else { Ok(()) } } /// Expect and consume a GT. if a >> is seen, replace it /// with a single > and continue. If a GT is not seen, /// signal an error. fn expect_gt(&mut self) -> PResult<'a, ()> { self.expected_tokens.push(TokenType::Token(token::Gt)); match self.token { token::Gt => { self.bump(); Ok(()) } token::BinOp(token::Shr) => { let span = self.span.with_lo(self.span.lo() + BytePos(1)); Ok(self.bump_with(token::Gt, span)) } token::BinOpEq(token::Shr) => { let span = self.span.with_lo(self.span.lo() + BytePos(1)); Ok(self.bump_with(token::Ge, span)) } token::Ge => { let span = self.span.with_lo(self.span.lo() + BytePos(1)); Ok(self.bump_with(token::Eq, span)) } _ => self.unexpected() } } /// Eat and discard tokens until one of `kets` is encountered. Respects token trees, /// passes through any errors encountered. Used for error recovery. fn eat_to_tokens(&mut self, kets: &[&token::Token]) { let handler = self.diagnostic(); if let Err(ref mut err) = self.parse_seq_to_before_tokens(kets, SeqSep::none(), TokenExpectType::Expect, |p| Ok(p.parse_token_tree())) { handler.cancel(err); } } /// Parse a sequence, including the closing delimiter. The function /// f must consume tokens until reaching the next separator or /// closing bracket. pub fn parse_seq_to_end<T, F>(&mut self, ket: &token::Token, sep: SeqSep, f: F) -> PResult<'a, Vec<T>> where F: FnMut(&mut Parser<'a>) -> PResult<'a, T>, { let val = self.parse_seq_to_before_end(ket, sep, f)?; self.bump(); Ok(val) } /// Parse a sequence, not including the closing delimiter. The function /// f must consume tokens until reaching the next separator or /// closing bracket. pub fn parse_seq_to_before_end<T, F>(&mut self, ket: &token::Token, sep: SeqSep, f: F) -> PResult<'a, Vec<T>> where F: FnMut(&mut Parser<'a>) -> PResult<'a, T> { self.parse_seq_to_before_tokens(&[ket], sep, TokenExpectType::Expect, f) } fn parse_seq_to_before_tokens<T, F>( &mut self, kets: &[&token::Token], sep: SeqSep, expect: TokenExpectType, mut f: F, ) -> PResult<'a, Vec<T>> where F: FnMut(&mut Parser<'a>) -> PResult<'a, T> { let mut first: bool = true; let mut v = vec![]; while !kets.iter().any(|k| { match expect { TokenExpectType::Expect => self.check(k), TokenExpectType::NoExpect => self.token == **k, } }) { match self.token { token::CloseDelim(..) | token::Eof => break, _ => {} }; if let Some(ref t) = sep.sep { if first { first = false; } else { if let Err(mut e) = self.expect(t) { // Attempt to keep parsing if it was a similar separator if let Some(ref tokens) = t.similar_tokens() { if tokens.contains(&self.token) { self.bump(); } } e.emit(); // Attempt to keep parsing if it was an omitted separator match f(self) { Ok(t) => { v.push(t); continue; }, Err(mut e) => { e.cancel(); break; } } } } } if sep.trailing_sep_allowed && kets.iter().any(|k| { match expect { TokenExpectType::Expect => self.check(k), TokenExpectType::NoExpect => self.token == **k, } }) { break; } let t = f(self)?; v.push(t); } Ok(v) } /// Parse a sequence, including the closing delimiter. The function /// f must consume tokens until reaching the next separator or /// closing bracket. fn parse_unspanned_seq<T, F>(&mut self, bra: &token::Token, ket: &token::Token, sep: SeqSep, f: F) -> PResult<'a, Vec<T>> where F: FnMut(&mut Parser<'a>) -> PResult<'a, T>, { self.expect(bra)?; let result = self.parse_seq_to_before_end(ket, sep, f)?; if self.token == *ket { self.bump(); } Ok(result) } /// Advance the parser by one token pub fn bump(&mut self) { if self.prev_token_kind == PrevTokenKind::Eof { // Bumping after EOF is a bad sign, usually an infinite loop. self.bug("attempted to bump the parser past EOF (may be stuck in a loop)"); } self.prev_span = self.meta_var_span.take().unwrap_or(self.span); // Record last token kind for possible error recovery. self.prev_token_kind = match self.token { token::DocComment(..) => PrevTokenKind::DocComment, token::Comma => PrevTokenKind::Comma, token::BinOp(token::Plus) => PrevTokenKind::Plus, token::Interpolated(..) => PrevTokenKind::Interpolated, token::Eof => PrevTokenKind::Eof, token::Ident(..) => PrevTokenKind::Ident, _ => PrevTokenKind::Other, }; let next = self.next_tok(); self.span = next.sp; self.token = next.tok; self.expected_tokens.clear(); // check after each token self.process_potential_macro_variable(); } /// Advance the parser using provided token as a next one. Use this when /// consuming a part of a token. For example a single `<` from `<<`. fn bump_with(&mut self, next: token::Token, span: Span) { self.prev_span = self.span.with_hi(span.lo()); // It would be incorrect to record the kind of the current token, but // fortunately for tokens currently using `bump_with`, the // prev_token_kind will be of no use anyway. self.prev_token_kind = PrevTokenKind::Other; self.span = span; self.token = next; self.expected_tokens.clear(); } pub fn look_ahead<R, F>(&self, dist: usize, f: F) -> R where F: FnOnce(&token::Token) -> R, { if dist == 0 { return f(&self.token) } f(&match self.token_cursor.frame.tree_cursor.look_ahead(dist - 1) { Some(tree) => match tree { TokenTree::Token(_, tok) => tok, TokenTree::Delimited(_, delimited) => token::OpenDelim(delimited.delim), }, None => token::CloseDelim(self.token_cursor.frame.delim), }) } fn look_ahead_span(&self, dist: usize) -> Span { if dist == 0 { return self.span } match self.token_cursor.frame.tree_cursor.look_ahead(dist - 1) { Some(TokenTree::Token(span, _)) | Some(TokenTree::Delimited(span, _)) => span, None => self.look_ahead_span(dist - 1), } } pub fn fatal(&self, m: &str) -> DiagnosticBuilder<'a> { self.sess.span_diagnostic.struct_span_fatal(self.span, m) } pub fn span_fatal<S: Into<MultiSpan>>(&self, sp: S, m: &str) -> DiagnosticBuilder<'a> { self.sess.span_diagnostic.struct_span_fatal(sp, m) } fn span_fatal_err<S: Into<MultiSpan>>(&self, sp: S, err: Error) -> DiagnosticBuilder<'a> { err.span_err(sp, self.diagnostic()) } fn bug(&self, m: &str) -> ! { self.sess.span_diagnostic.span_bug(self.span, m) } fn span_err<S: Into<MultiSpan>>(&self, sp: S, m: &str) { self.sess.span_diagnostic.span_err(sp, m) } fn struct_span_err<S: Into<MultiSpan>>(&self, sp: S, m: &str) -> DiagnosticBuilder<'a> { self.sess.span_diagnostic.struct_span_err(sp, m) } crate fn span_bug<S: Into<MultiSpan>>(&self, sp: S, m: &str) -> ! { self.sess.span_diagnostic.span_bug(sp, m) } crate fn abort_if_errors(&self) { self.sess.span_diagnostic.abort_if_errors(); } fn cancel(&self, err: &mut DiagnosticBuilder) { self.sess.span_diagnostic.cancel(err) } crate fn diagnostic(&self) -> &'a errors::Handler { &self.sess.span_diagnostic } /// Is the current token one of the keywords that signals a bare function /// type? fn token_is_bare_fn_keyword(&mut self) -> bool { self.check_keyword(keywords::Fn) || self.check_keyword(keywords::Unsafe) || self.check_keyword(keywords::Extern) && self.is_extern_non_path() } /// parse a TyKind::BareFn type: fn parse_ty_bare_fn(&mut self, generic_params: Vec<GenericParam>) -> PResult<'a, TyKind> { /* [unsafe] [extern "ABI"] fn (S) -> T ^~~~^ ^~~~^ ^~^ ^ | | | | | | | Return type | | Argument types | | | ABI Function Style */ let unsafety = self.parse_unsafety(); let abi = if self.eat_keyword(keywords::Extern) { self.parse_opt_abi()?.unwrap_or(Abi::C) } else { Abi::Rust }; self.expect_keyword(keywords::Fn)?; let (inputs, variadic) = self.parse_fn_args(false, true)?; let ret_ty = self.parse_ret_ty(false)?; let decl = P(FnDecl { inputs, output: ret_ty, variadic, }); Ok(TyKind::BareFn(P(BareFnTy { abi, unsafety, generic_params, decl, }))) } /// Parse asyncness: `async` or nothing fn parse_asyncness(&mut self) -> IsAsync { if self.eat_keyword(keywords::Async) { IsAsync::Async { closure_id: ast::DUMMY_NODE_ID, return_impl_trait_id: ast::DUMMY_NODE_ID, } } else { IsAsync::NotAsync } } /// Parse unsafety: `unsafe` or nothing. fn parse_unsafety(&mut self) -> Unsafety { if self.eat_keyword(keywords::Unsafe) { Unsafety::Unsafe } else { Unsafety::Normal } } /// Parse the items in a trait declaration pub fn parse_trait_item(&mut self, at_end: &mut bool) -> PResult<'a, TraitItem> { maybe_whole!(self, NtTraitItem, |x| x); let attrs = self.parse_outer_attributes()?; let (mut item, tokens) = self.collect_tokens(|this| { this.parse_trait_item_(at_end, attrs) })?; // See `parse_item` for why this clause is here. if !item.attrs.iter().any(|attr| attr.style == AttrStyle::Inner) { item.tokens = Some(tokens); } Ok(item) } fn parse_trait_item_(&mut self, at_end: &mut bool, mut attrs: Vec<Attribute>) -> PResult<'a, TraitItem> { let lo = self.span; let (name, node, generics) = if self.eat_keyword(keywords::Type) { self.parse_trait_item_assoc_ty()? } else if self.is_const_item() { self.expect_keyword(keywords::Const)?; let ident = self.parse_ident()?; self.expect(&token::Colon)?; let ty = self.parse_ty()?; let default = if self.check(&token::Eq) { self.bump(); let expr = self.parse_expr()?; self.expect(&token::Semi)?; Some(expr) } else { self.expect(&token::Semi)?; None }; (ident, TraitItemKind::Const(ty, default), ast::Generics::default()) } else if let Some(mac) = self.parse_assoc_macro_invoc("trait", None, &mut false)? { // trait item macro. (keywords::Invalid.ident(), ast::TraitItemKind::Macro(mac), ast::Generics::default()) } else { let (constness, unsafety, asyncness, abi) = self.parse_fn_front_matter()?; let ident = self.parse_ident()?; let mut generics = self.parse_generics()?; let d = self.parse_fn_decl_with_self(|p: &mut Parser<'a>| { // This is somewhat dubious; We don't want to allow // argument names to be left off if there is a // definition... p.parse_arg_general(false) })?; generics.where_clause = self.parse_where_clause()?; let sig = ast::MethodSig { header: FnHeader { unsafety, constness, abi, asyncness, }, decl: d, }; let body = match self.token { token::Semi => { self.bump(); *at_end = true; debug!("parse_trait_methods(): parsing required method"); None } token::OpenDelim(token::Brace) => { debug!("parse_trait_methods(): parsing provided method"); *at_end = true; let (inner_attrs, body) = self.parse_inner_attrs_and_block()?; attrs.extend(inner_attrs.iter().cloned()); Some(body) } _ => { let token_str = self.this_token_to_string(); let mut err = self.fatal(&format!("expected `;` or `{{`, found `{}`", token_str)); err.span_label(self.span, "expected `;` or `{`"); return Err(err); } }; (ident, ast::TraitItemKind::Method(sig, body), generics) }; Ok(TraitItem { id: ast::DUMMY_NODE_ID, ident: name, attrs, generics, node, span: lo.to(self.prev_span), tokens: None, }) } /// Parse optional return type [ -> TY ] in function decl fn parse_ret_ty(&mut self, allow_plus: bool) -> PResult<'a, FunctionRetTy> { if self.eat(&token::RArrow) { Ok(FunctionRetTy::Ty(self.parse_ty_common(allow_plus, true)?)) } else { Ok(FunctionRetTy::Default(self.span.shrink_to_lo())) } } // Parse a type pub fn parse_ty(&mut self) -> PResult<'a, P<Ty>> { self.parse_ty_common(true, true) } /// Parse a type in restricted contexts where `+` is not permitted. /// Example 1: `&'a TYPE` /// `+` is prohibited to maintain operator priority (P(+) < P(&)). /// Example 2: `value1 as TYPE + value2` /// `+` is prohibited to avoid interactions with expression grammar. fn parse_ty_no_plus(&mut self) -> PResult<'a, P<Ty>> { self.parse_ty_common(false, true) } fn parse_ty_common(&mut self, allow_plus: bool, allow_qpath_recovery: bool) -> PResult<'a, P<Ty>> { maybe_whole!(self, NtTy, |x| x); let lo = self.span; let mut impl_dyn_multi = false; let node = if self.eat(&token::OpenDelim(token::Paren)) { // `(TYPE)` is a parenthesized type. // `(TYPE,)` is a tuple with a single field of type TYPE. let mut ts = vec![]; let mut last_comma = false; while self.token != token::CloseDelim(token::Paren) { ts.push(self.parse_ty()?); if self.eat(&token::Comma) { last_comma = true; } else { last_comma = false; break; } } let trailing_plus = self.prev_token_kind == PrevTokenKind::Plus; self.expect(&token::CloseDelim(token::Paren))?; if ts.len() == 1 && !last_comma { let ty = ts.into_iter().nth(0).unwrap().into_inner(); let maybe_bounds = allow_plus && self.token.is_like_plus(); match ty.node { // `(TY_BOUND_NOPAREN) + BOUND + ...`. TyKind::Path(None, ref path) if maybe_bounds => { self.parse_remaining_bounds(Vec::new(), path.clone(), lo, true)? } TyKind::TraitObject(ref bounds, TraitObjectSyntax::None) if maybe_bounds && bounds.len() == 1 && !trailing_plus => { let path = match bounds[0] { GenericBound::Trait(ref pt, ..) => pt.trait_ref.path.clone(), _ => self.bug("unexpected lifetime bound"), }; self.parse_remaining_bounds(Vec::new(), path, lo, true)? } // `(TYPE)` _ => TyKind::Paren(P(ty)) } } else { TyKind::Tup(ts) } } else if self.eat(&token::Not) { // Never type `!` TyKind::Never } else if self.eat(&token::BinOp(token::Star)) { // Raw pointer TyKind::Ptr(self.parse_ptr()?) } else if self.eat(&token::OpenDelim(token::Bracket)) { // Array or slice let t = self.parse_ty()?; // Parse optional `; EXPR` in `[TYPE; EXPR]` let t = match self.maybe_parse_fixed_length_of_vec()? { None => TyKind::Slice(t), Some(length) => TyKind::Array(t, AnonConst { id: ast::DUMMY_NODE_ID, value: length, }), }; self.expect(&token::CloseDelim(token::Bracket))?; t } else if self.check(&token::BinOp(token::And)) || self.check(&token::AndAnd) { // Reference self.expect_and()?; self.parse_borrowed_pointee()? } else if self.eat_keyword_noexpect(keywords::Typeof) { // `typeof(EXPR)` // In order to not be ambiguous, the type must be surrounded by parens. self.expect(&token::OpenDelim(token::Paren))?; let e = AnonConst { id: ast::DUMMY_NODE_ID, value: self.parse_expr()?, }; self.expect(&token::CloseDelim(token::Paren))?; TyKind::Typeof(e) } else if self.eat_keyword(keywords::Underscore) { // A type to be inferred `_` TyKind::Infer } else if self.token_is_bare_fn_keyword() { // Function pointer type self.parse_ty_bare_fn(Vec::new())? } else if self.check_keyword(keywords::For) { // Function pointer type or bound list (trait object type) starting with a poly-trait. // `for<'lt> [unsafe] [extern "ABI"] fn (&'lt S) -> T` // `for<'lt> Trait1<'lt> + Trait2 + 'a` let lo = self.span; let lifetime_defs = self.parse_late_bound_lifetime_defs()?; if self.token_is_bare_fn_keyword() { self.parse_ty_bare_fn(lifetime_defs)? } else { let path = self.parse_path(PathStyle::Type)?; let parse_plus = allow_plus && self.check_plus(); self.parse_remaining_bounds(lifetime_defs, path, lo, parse_plus)? } } else if self.eat_keyword(keywords::Impl) { // Always parse bounds greedily for better error recovery. let bounds = self.parse_generic_bounds()?; impl_dyn_multi = bounds.len() > 1 || self.prev_token_kind == PrevTokenKind::Plus; TyKind::ImplTrait(ast::DUMMY_NODE_ID, bounds) } else if self.check_keyword(keywords::Dyn) && self.look_ahead(1, |t| t.can_begin_bound() && !can_continue_type_after_non_fn_ident(t)) { self.bump(); // `dyn` // Always parse bounds greedily for better error recovery. let bounds = self.parse_generic_bounds()?; impl_dyn_multi = bounds.len() > 1 || self.prev_token_kind == PrevTokenKind::Plus; TyKind::TraitObject(bounds, TraitObjectSyntax::Dyn) } else if self.check(&token::Question) || self.check_lifetime() && self.look_ahead(1, |t| t.is_like_plus()) { // Bound list (trait object type) TyKind::TraitObject(self.parse_generic_bounds_common(allow_plus)?, TraitObjectSyntax::None) } else if self.eat_lt() { // Qualified path let (qself, path) = self.parse_qpath(PathStyle::Type)?; TyKind::Path(Some(qself), path) } else if self.token.is_path_start() { // Simple path let path = self.parse_path(PathStyle::Type)?; if self.eat(&token::Not) { // Macro invocation in type position let (delim, tts) = self.expect_delimited_token_tree()?; let node = Mac_ { path, tts, delim }; TyKind::Mac(respan(lo.to(self.prev_span), node)) } else { // Just a type path or bound list (trait object type) starting with a trait. // `Type` // `Trait1 + Trait2 + 'a` if allow_plus && self.check_plus() { self.parse_remaining_bounds(Vec::new(), path, lo, true)? } else { TyKind::Path(None, path) } } } else { let msg = format!("expected type, found {}", self.this_token_descr()); return Err(self.fatal(&msg)); }; let span = lo.to(self.prev_span); let ty = Ty { node, span, id: ast::DUMMY_NODE_ID }; // Try to recover from use of `+` with incorrect priority. self.maybe_report_ambiguous_plus(allow_plus, impl_dyn_multi, &ty); self.maybe_recover_from_bad_type_plus(allow_plus, &ty)?; let ty = self.maybe_recover_from_bad_qpath(ty, allow_qpath_recovery)?; Ok(P(ty)) } fn parse_remaining_bounds(&mut self, generic_params: Vec<GenericParam>, path: ast::Path, lo: Span, parse_plus: bool) -> PResult<'a, TyKind> { let poly_trait_ref = PolyTraitRef::new(generic_params, path, lo.to(self.prev_span)); let mut bounds = vec![GenericBound::Trait(poly_trait_ref, TraitBoundModifier::None)]; if parse_plus { self.eat_plus(); // `+`, or `+=` gets split and `+` is discarded bounds.append(&mut self.parse_generic_bounds()?); } Ok(TyKind::TraitObject(bounds, TraitObjectSyntax::None)) } fn maybe_report_ambiguous_plus(&mut self, allow_plus: bool, impl_dyn_multi: bool, ty: &Ty) { if !allow_plus && impl_dyn_multi { let sum_with_parens = format!("({})", pprust::ty_to_string(&ty)); self.struct_span_err(ty.span, "ambiguous `+` in a type") .span_suggestion_with_applicability( ty.span, "use parentheses to disambiguate", sum_with_parens, Applicability::MachineApplicable ).emit(); } } fn maybe_recover_from_bad_type_plus(&mut self, allow_plus: bool, ty: &Ty) -> PResult<'a, ()> { // Do not add `+` to expected tokens. if !allow_plus || !self.token.is_like_plus() { return Ok(()) } self.bump(); // `+` let bounds = self.parse_generic_bounds()?; let sum_span = ty.span.to(self.prev_span); let mut err = struct_span_err!(self.sess.span_diagnostic, sum_span, E0178, "expected a path on the left-hand side of `+`, not `{}`", pprust::ty_to_string(ty)); match ty.node { TyKind::Rptr(ref lifetime, ref mut_ty) => { let sum_with_parens = pprust::to_string(|s| { use print::pprust::PrintState; s.s.word("&")?; s.print_opt_lifetime(lifetime)?; s.print_mutability(mut_ty.mutbl)?; s.popen()?; s.print_type(&mut_ty.ty)?; s.print_type_bounds(" +", &bounds)?; s.pclose() }); err.span_suggestion_with_applicability( sum_span, "try adding parentheses", sum_with_parens, Applicability::MachineApplicable ); } TyKind::Ptr(..) | TyKind::BareFn(..) => { err.span_label(sum_span, "perhaps you forgot parentheses?"); } _ => { err.span_label(sum_span, "expected a path"); }, } err.emit(); Ok(()) } // Try to recover from associated item paths like `[T]::AssocItem`/`(T, U)::AssocItem`. fn maybe_recover_from_bad_qpath<T: RecoverQPath>(&mut self, base: T, allow_recovery: bool) -> PResult<'a, T> { // Do not add `::` to expected tokens. if !allow_recovery || self.token != token::ModSep { return Ok(base); } let ty = match base.to_ty() { Some(ty) => ty, None => return Ok(base), }; self.bump(); // `::` let mut segments = Vec::new(); self.parse_path_segments(&mut segments, T::PATH_STYLE, true)?; let span = ty.span.to(self.prev_span); let path_span = span.to(span); // use an empty path since `position` == 0 let recovered = base.to_recovered( Some(QSelf { ty, path_span, position: 0 }), ast::Path { segments, span }, ); self.diagnostic() .struct_span_err(span, "missing angle brackets in associated item path") .span_suggestion_with_applicability( // this is a best-effort recovery span, "try", recovered.to_string(), Applicability::MaybeIncorrect ).emit(); Ok(recovered) } fn parse_borrowed_pointee(&mut self) -> PResult<'a, TyKind> { let opt_lifetime = if self.check_lifetime() { Some(self.expect_lifetime()) } else { None }; let mutbl = self.parse_mutability(); let ty = self.parse_ty_no_plus()?; return Ok(TyKind::Rptr(opt_lifetime, MutTy { ty: ty, mutbl: mutbl })); } fn parse_ptr(&mut self) -> PResult<'a, MutTy> { let mutbl = if self.eat_keyword(keywords::Mut) { Mutability::Mutable } else if self.eat_keyword(keywords::Const) { Mutability::Immutable } else { let span = self.prev_span; self.span_err(span, "expected mut or const in raw pointer type (use \ `*mut T` or `*const T` as appropriate)"); Mutability::Immutable }; let t = self.parse_ty_no_plus()?; Ok(MutTy { ty: t, mutbl: mutbl }) } fn is_named_argument(&mut self) -> bool { let offset = match self.token { token::Interpolated(ref nt) => match nt.0 { token::NtPat(..) => return self.look_ahead(1, |t| t == &token::Colon), _ => 0, } token::BinOp(token::And) | token::AndAnd => 1, _ if self.token.is_keyword(keywords::Mut) => 1, _ => 0, }; self.look_ahead(offset, |t| t.is_ident()) && self.look_ahead(offset + 1, |t| t == &token::Colon) } /// This version of parse arg doesn't necessarily require /// identifier names. fn parse_arg_general(&mut self, require_name: bool) -> PResult<'a, Arg> { maybe_whole!(self, NtArg, |x| x); let (pat, ty) = if require_name || self.is_named_argument() { debug!("parse_arg_general parse_pat (require_name:{})", require_name); let pat = self.parse_pat()?; self.expect(&token::Colon)?; (pat, self.parse_ty()?) } else { debug!("parse_arg_general ident_to_pat"); let parser_snapshot_before_pat = self.clone(); // We're going to try parsing the argument as a pattern (even though it's not // allowed). This way we can provide better errors to the user. let pat_arg: PResult<'a, _> = do catch { let pat = self.parse_pat()?; self.expect(&token::Colon)?; (pat, self.parse_ty()?) }; match pat_arg { Ok((pat, ty)) => { let mut err = self.diagnostic().struct_span_err_with_code( pat.span, "patterns aren't allowed in methods without bodies", DiagnosticId::Error("E0642".into()), ); err.span_suggestion_short_with_applicability( pat.span, "give this argument a name or use an underscore to ignore it", "_".to_owned(), Applicability::MachineApplicable, ); err.emit(); // Pretend the pattern is `_`, to avoid duplicate errors from AST validation. let pat = P(Pat { node: PatKind::Wild, span: pat.span, id: ast::DUMMY_NODE_ID }); (pat, ty) } Err(mut err) => { err.cancel(); // Recover from attempting to parse the argument as a pattern. This means // the type is alone, with no name, e.g. `fn foo(u32)`. mem::replace(self, parser_snapshot_before_pat); debug!("parse_arg_general ident_to_pat"); let ident = Ident::new(keywords::Invalid.name(), self.prev_span); let ty = self.parse_ty()?; let pat = P(Pat { id: ast::DUMMY_NODE_ID, node: PatKind::Ident( BindingMode::ByValue(Mutability::Immutable), ident, None), span: ty.span, }); (pat, ty) } } }; Ok(Arg { ty, pat, id: ast::DUMMY_NODE_ID }) } /// Parse a single function argument crate fn parse_arg(&mut self) -> PResult<'a, Arg> { self.parse_arg_general(true) } /// Parse an argument in a lambda header e.g. |arg, arg| fn parse_fn_block_arg(&mut self) -> PResult<'a, Arg> { let pat = self.parse_pat()?; let t = if self.eat(&token::Colon) { self.parse_ty()? } else { P(Ty { id: ast::DUMMY_NODE_ID, node: TyKind::Infer, span: self.span, }) }; Ok(Arg { ty: t, pat, id: ast::DUMMY_NODE_ID }) } fn maybe_parse_fixed_length_of_vec(&mut self) -> PResult<'a, Option<P<ast::Expr>>> { if self.eat(&token::Semi) { Ok(Some(self.parse_expr()?)) } else { Ok(None) } } /// Matches token_lit = LIT_INTEGER | ... fn parse_lit_token(&mut self) -> PResult<'a, LitKind> { let out = match self.token { token::Interpolated(ref nt) => match nt.0 { token::NtExpr(ref v) | token::NtLiteral(ref v) => match v.node { ExprKind::Lit(ref lit) => { lit.node.clone() } _ => { return self.unexpected_last(&self.token); } }, _ => { return self.unexpected_last(&self.token); } }, token::Literal(lit, suf) => { let diag = Some((self.span, &self.sess.span_diagnostic)); let (suffix_illegal, result) = parse::lit_token(lit, suf, diag); if suffix_illegal { let sp = self.span; self.expect_no_suffix(sp, &format!("{} literal", lit.short_name()), suf) } result.unwrap() } _ => { return self.unexpected_last(&self.token); } }; self.bump(); Ok(out) } /// Matches lit = true | false | token_lit crate fn parse_lit(&mut self) -> PResult<'a, Lit> { let lo = self.span; let lit = if self.eat_keyword(keywords::True) { LitKind::Bool(true) } else if self.eat_keyword(keywords::False) { LitKind::Bool(false) } else { let lit = self.parse_lit_token()?; lit }; Ok(source_map::Spanned { node: lit, span: lo.to(self.prev_span) }) } /// matches '-' lit | lit (cf. ast_validation::AstValidator::check_expr_within_pat) crate fn parse_literal_maybe_minus(&mut self) -> PResult<'a, P<Expr>> { maybe_whole_expr!(self); let minus_lo = self.span; let minus_present = self.eat(&token::BinOp(token::Minus)); let lo = self.span; let literal = P(self.parse_lit()?); let hi = self.prev_span; let expr = self.mk_expr(lo.to(hi), ExprKind::Lit(literal), ThinVec::new()); if minus_present { let minus_hi = self.prev_span; let unary = self.mk_unary(UnOp::Neg, expr); Ok(self.mk_expr(minus_lo.to(minus_hi), unary, ThinVec::new())) } else { Ok(expr) } } fn parse_path_segment_ident(&mut self) -> PResult<'a, ast::Ident> { match self.token { token::Ident(ident, _) if self.token.is_path_segment_keyword() => { let span = self.span; self.bump(); Ok(Ident::new(ident.name, span)) } _ => self.parse_ident(), } } /// Parses qualified path. /// Assumes that the leading `<` has been parsed already. /// /// `qualified_path = <type [as trait_ref]>::path` /// /// # Examples /// `<T>::default` /// `<T as U>::a` /// `<T as U>::F::a<S>` (without disambiguator) /// `<T as U>::F::a::<S>` (with disambiguator) fn parse_qpath(&mut self, style: PathStyle) -> PResult<'a, (QSelf, ast::Path)> { let lo = self.prev_span; let ty = self.parse_ty()?; // `path` will contain the prefix of the path up to the `>`, // if any (e.g., `U` in the `<T as U>::*` examples // above). `path_span` has the span of that path, or an empty // span in the case of something like `<T>::Bar`. let (mut path, path_span); if self.eat_keyword(keywords::As) { let path_lo = self.span; path = self.parse_path(PathStyle::Type)?; path_span = path_lo.to(self.prev_span); } else { path = ast::Path { segments: Vec::new(), span: syntax_pos::DUMMY_SP }; path_span = self.span.to(self.span); } self.expect(&token::Gt)?; self.expect(&token::ModSep)?; let qself = QSelf { ty, path_span, position: path.segments.len() }; self.parse_path_segments(&mut path.segments, style, true)?; Ok((qself, ast::Path { segments: path.segments, span: lo.to(self.prev_span) })) } /// Parses simple paths. /// /// `path = [::] segment+` /// `segment = ident | ident[::]<args> | ident[::](args) [-> type]` /// /// # Examples /// `a::b::C<D>` (without disambiguator) /// `a::b::C::<D>` (with disambiguator) /// `Fn(Args)` (without disambiguator) /// `Fn::(Args)` (with disambiguator) pub fn parse_path(&mut self, style: PathStyle) -> PResult<'a, ast::Path> { self.parse_path_common(style, true) } crate fn parse_path_common(&mut self, style: PathStyle, enable_warning: bool) -> PResult<'a, ast::Path> { maybe_whole!(self, NtPath, |path| { if style == PathStyle::Mod && path.segments.iter().any(|segment| segment.args.is_some()) { self.diagnostic().span_err(path.span, "unexpected generic arguments in path"); } path }); let lo = self.meta_var_span.unwrap_or(self.span); let mut segments = Vec::new(); if self.eat(&token::ModSep) { segments.push(PathSegment::crate_root(lo.shrink_to_lo())); } self.parse_path_segments(&mut segments, style, enable_warning)?; Ok(ast::Path { segments, span: lo.to(self.prev_span) }) } /// Like `parse_path`, but also supports parsing `Word` meta items into paths for back-compat. /// This is used when parsing derive macro paths in `#[derive]` attributes. pub fn parse_path_allowing_meta(&mut self, style: PathStyle) -> PResult<'a, ast::Path> { let meta_ident = match self.token { token::Interpolated(ref nt) => match nt.0 { token::NtMeta(ref meta) => match meta.node { ast::MetaItemKind::Word => Some(meta.ident.clone()), _ => None, }, _ => None, }, _ => None, }; if let Some(path) = meta_ident { self.bump(); return Ok(path); } self.parse_path(style) } fn parse_path_segments(&mut self, segments: &mut Vec<PathSegment>, style: PathStyle, enable_warning: bool) -> PResult<'a, ()> { loop { segments.push(self.parse_path_segment(style, enable_warning)?); if self.is_import_coupler() || !self.eat(&token::ModSep) { return Ok(()); } } } fn parse_path_segment(&mut self, style: PathStyle, enable_warning: bool) -> PResult<'a, PathSegment> { let ident = self.parse_path_segment_ident()?; let is_args_start = |token: &token::Token| match *token { token::Lt | token::BinOp(token::Shl) | token::OpenDelim(token::Paren) => true, _ => false, }; let check_args_start = |this: &mut Self| { this.expected_tokens.extend_from_slice( &[TokenType::Token(token::Lt), TokenType::Token(token::OpenDelim(token::Paren))] ); is_args_start(&this.token) }; Ok(if style == PathStyle::Type && check_args_start(self) || style != PathStyle::Mod && self.check(&token::ModSep) && self.look_ahead(1, |t| is_args_start(t)) { // Generic arguments are found - `<`, `(`, `::<` or `::(`. let lo = self.span; if self.eat(&token::ModSep) && style == PathStyle::Type && enable_warning { self.diagnostic().struct_span_warn(self.prev_span, "unnecessary path disambiguator") .span_label(self.prev_span, "try removing `::`").emit(); } let args = if self.eat_lt() { // `<'a, T, A = U>` let (args, bindings) = self.parse_generic_args()?; self.expect_gt()?; let span = lo.to(self.prev_span); AngleBracketedArgs { args, bindings, span }.into() } else { // `(T, U) -> R` self.bump(); // `(` let inputs = self.parse_seq_to_before_tokens( &[&token::CloseDelim(token::Paren)], SeqSep::trailing_allowed(token::Comma), TokenExpectType::Expect, |p| p.parse_ty())?; self.bump(); // `)` let span = lo.to(self.prev_span); let output = if self.eat(&token::RArrow) { Some(self.parse_ty_common(false, false)?) } else { None }; ParenthesisedArgs { inputs, output, span }.into() }; PathSegment { ident, args } } else { // Generic arguments are not found. PathSegment::from_ident(ident) }) } crate fn check_lifetime(&mut self) -> bool { self.expected_tokens.push(TokenType::Lifetime); self.token.is_lifetime() } /// Parse single lifetime 'a or panic. crate fn expect_lifetime(&mut self) -> Lifetime { if let Some(ident) = self.token.lifetime() { let span = self.span; self.bump(); Lifetime { ident: Ident::new(ident.name, span), id: ast::DUMMY_NODE_ID } } else { self.span_bug(self.span, "not a lifetime") } } fn eat_label(&mut self) -> Option<Label> { if let Some(ident) = self.token.lifetime() { let span = self.span; self.bump(); Some(Label { ident: Ident::new(ident.name, span) }) } else { None } } /// Parse mutability (`mut` or nothing). fn parse_mutability(&mut self) -> Mutability { if self.eat_keyword(keywords::Mut) { Mutability::Mutable } else { Mutability::Immutable } } fn parse_field_name(&mut self) -> PResult<'a, Ident> { if let token::Literal(token::Integer(name), None) = self.token { self.bump(); Ok(Ident::new(name, self.prev_span)) } else { self.parse_ident_common(false) } } /// Parse ident (COLON expr)? fn parse_field(&mut self) -> PResult<'a, Field> { let attrs = self.parse_outer_attributes()?; let lo = self.span; // Check if a colon exists one ahead. This means we're parsing a fieldname. let (fieldname, expr, is_shorthand) = if self.look_ahead(1, |t| t == &token::Colon) { let fieldname = self.parse_field_name()?; self.bump(); // `:` (fieldname, self.parse_expr()?, false) } else { let fieldname = self.parse_ident_common(false)?; // Mimic `x: x` for the `x` field shorthand. let path = ast::Path::from_ident(fieldname); let expr = self.mk_expr(fieldname.span, ExprKind::Path(None, path), ThinVec::new()); (fieldname, expr, true) }; Ok(ast::Field { ident: fieldname, span: lo.to(expr.span), expr, is_shorthand, attrs: attrs.into(), }) } fn mk_expr(&mut self, span: Span, node: ExprKind, attrs: ThinVec<Attribute>) -> P<Expr> { P(Expr { node, span, attrs, id: ast::DUMMY_NODE_ID }) } fn mk_unary(&mut self, unop: ast::UnOp, expr: P<Expr>) -> ast::ExprKind { ExprKind::Unary(unop, expr) } fn mk_binary(&mut self, binop: ast::BinOp, lhs: P<Expr>, rhs: P<Expr>) -> ast::ExprKind { ExprKind::Binary(binop, lhs, rhs) } fn mk_call(&mut self, f: P<Expr>, args: Vec<P<Expr>>) -> ast::ExprKind { ExprKind::Call(f, args) } fn mk_index(&mut self, expr: P<Expr>, idx: P<Expr>) -> ast::ExprKind { ExprKind::Index(expr, idx) } fn mk_range(&mut self, start: Option<P<Expr>>, end: Option<P<Expr>>, limits: RangeLimits) -> PResult<'a, ast::ExprKind> { if end.is_none() && limits == RangeLimits::Closed { Err(self.span_fatal_err(self.span, Error::InclusiveRangeWithNoEnd)) } else { Ok(ExprKind::Range(start, end, limits)) } } fn mk_assign_op(&mut self, binop: ast::BinOp, lhs: P<Expr>, rhs: P<Expr>) -> ast::ExprKind { ExprKind::AssignOp(binop, lhs, rhs) } pub fn mk_mac_expr(&mut self, span: Span, m: Mac_, attrs: ThinVec<Attribute>) -> P<Expr> { P(Expr { id: ast::DUMMY_NODE_ID, node: ExprKind::Mac(source_map::Spanned {node: m, span: span}), span, attrs, }) } fn expect_delimited_token_tree(&mut self) -> PResult<'a, (MacDelimiter, ThinTokenStream)> { let delim = match self.token { token::OpenDelim(delim) => delim, _ => { let msg = "expected open delimiter"; let mut err = self.fatal(msg); err.span_label(self.span, msg); return Err(err) } }; let delimited = match self.parse_token_tree() { TokenTree::Delimited(_, delimited) => delimited, _ => unreachable!(), }; let delim = match delim { token::Paren => MacDelimiter::Parenthesis, token::Bracket => MacDelimiter::Bracket, token::Brace => MacDelimiter::Brace, token::NoDelim => self.bug("unexpected no delimiter"), }; Ok((delim, delimited.stream().into())) } /// At the bottom (top?) of the precedence hierarchy, /// parse things like parenthesized exprs, /// macros, return, etc. /// /// NB: This does not parse outer attributes, /// and is private because it only works /// correctly if called from parse_dot_or_call_expr(). fn parse_bottom_expr(&mut self) -> PResult<'a, P<Expr>> { maybe_whole_expr!(self); // Outer attributes are already parsed and will be // added to the return value after the fact. // // Therefore, prevent sub-parser from parsing // attributes by giving them a empty "already parsed" list. let mut attrs = ThinVec::new(); let lo = self.span; let mut hi = self.span; let ex: ExprKind; // Note: when adding new syntax here, don't forget to adjust Token::can_begin_expr(). match self.token { token::OpenDelim(token::Paren) => { self.bump(); attrs.extend(self.parse_inner_attributes()?); // (e) is parenthesized e // (e,) is a tuple with only one field, e let mut es = vec![]; let mut trailing_comma = false; while self.token != token::CloseDelim(token::Paren) { es.push(self.parse_expr()?); self.expect_one_of(&[], &[token::Comma, token::CloseDelim(token::Paren)])?; if self.check(&token::Comma) { trailing_comma = true; self.bump(); } else { trailing_comma = false; break; } } self.bump(); hi = self.prev_span; ex = if es.len() == 1 && !trailing_comma { ExprKind::Paren(es.into_iter().nth(0).unwrap()) } else { ExprKind::Tup(es) }; } token::OpenDelim(token::Brace) => { return self.parse_block_expr(None, lo, BlockCheckMode::Default, attrs); } token::BinOp(token::Or) | token::OrOr => { return self.parse_lambda_expr(attrs); } token::OpenDelim(token::Bracket) => { self.bump(); attrs.extend(self.parse_inner_attributes()?); if self.check(&token::CloseDelim(token::Bracket)) { // Empty vector. self.bump(); ex = ExprKind::Array(Vec::new()); } else { // Nonempty vector. let first_expr = self.parse_expr()?; if self.check(&token::Semi) { // Repeating array syntax: [ 0; 512 ] self.bump(); let count = AnonConst { id: ast::DUMMY_NODE_ID, value: self.parse_expr()?, }; self.expect(&token::CloseDelim(token::Bracket))?; ex = ExprKind::Repeat(first_expr, count); } else if self.check(&token::Comma) { // Vector with two or more elements. self.bump(); let remaining_exprs = self.parse_seq_to_end( &token::CloseDelim(token::Bracket), SeqSep::trailing_allowed(token::Comma), |p| Ok(p.parse_expr()?) )?; let mut exprs = vec![first_expr]; exprs.extend(remaining_exprs); ex = ExprKind::Array(exprs); } else { // Vector with one element. self.expect(&token::CloseDelim(token::Bracket))?; ex = ExprKind::Array(vec![first_expr]); } } hi = self.prev_span; } _ => { if self.eat_lt() { let (qself, path) = self.parse_qpath(PathStyle::Expr)?; hi = path.span; return Ok(self.mk_expr(lo.to(hi), ExprKind::Path(Some(qself), path), attrs)); } if self.span.edition() >= Edition::Edition2018 && self.check_keyword(keywords::Async) { if self.is_async_block() { // check for `async {` and `async move {` return self.parse_async_block(attrs); } else { return self.parse_lambda_expr(attrs); } } if self.check_keyword(keywords::Move) || self.check_keyword(keywords::Static) { return self.parse_lambda_expr(attrs); } if self.eat_keyword(keywords::If) { return self.parse_if_expr(attrs); } if self.eat_keyword(keywords::For) { let lo = self.prev_span; return self.parse_for_expr(None, lo, attrs); } if self.eat_keyword(keywords::While) { let lo = self.prev_span; return self.parse_while_expr(None, lo, attrs); } if let Some(label) = self.eat_label() { let lo = label.ident.span; self.expect(&token::Colon)?; if self.eat_keyword(keywords::While) { return self.parse_while_expr(Some(label), lo, attrs) } if self.eat_keyword(keywords::For) { return self.parse_for_expr(Some(label), lo, attrs) } if self.eat_keyword(keywords::Loop) { return self.parse_loop_expr(Some(label), lo, attrs) } if self.token == token::OpenDelim(token::Brace) { return self.parse_block_expr(Some(label), lo, BlockCheckMode::Default, attrs); } let msg = "expected `while`, `for`, `loop` or `{` after a label"; let mut err = self.fatal(msg); err.span_label(self.span, msg); return Err(err); } if self.eat_keyword(keywords::Loop) { let lo = self.prev_span; return self.parse_loop_expr(None, lo, attrs); } if self.eat_keyword(keywords::Continue) { let label = self.eat_label(); let ex = ExprKind::Continue(label); let hi = self.prev_span; return Ok(self.mk_expr(lo.to(hi), ex, attrs)); } if self.eat_keyword(keywords::Match) { return self.parse_match_expr(attrs); } if self.eat_keyword(keywords::Unsafe) { return self.parse_block_expr( None, lo, BlockCheckMode::Unsafe(ast::UserProvided), attrs); } if self.is_catch_expr() { let lo = self.span; assert!(self.eat_keyword(keywords::Do)); assert!(self.eat_keyword(keywords::Catch)); return self.parse_catch_expr(lo, attrs); } if self.eat_keyword(keywords::Return) { if self.token.can_begin_expr() { let e = self.parse_expr()?; hi = e.span; ex = ExprKind::Ret(Some(e)); } else { ex = ExprKind::Ret(None); } } else if self.eat_keyword(keywords::Break) { let label = self.eat_label(); let e = if self.token.can_begin_expr() && !(self.token == token::OpenDelim(token::Brace) && self.restrictions.contains( Restrictions::NO_STRUCT_LITERAL)) { Some(self.parse_expr()?) } else { None }; ex = ExprKind::Break(label, e); hi = self.prev_span; } else if self.eat_keyword(keywords::Yield) { if self.token.can_begin_expr() { let e = self.parse_expr()?; hi = e.span; ex = ExprKind::Yield(Some(e)); } else { ex = ExprKind::Yield(None); } } else if self.token.is_keyword(keywords::Let) { // Catch this syntax error here, instead of in `parse_ident`, so // that we can explicitly mention that let is not to be used as an expression let mut db = self.fatal("expected expression, found statement (`let`)"); db.span_label(self.span, "expected expression"); db.note("variable declaration using `let` is a statement"); return Err(db); } else if self.token.is_path_start() { let pth = self.parse_path(PathStyle::Expr)?; // `!`, as an operator, is prefix, so we know this isn't that if self.eat(&token::Not) { // MACRO INVOCATION expression let (delim, tts) = self.expect_delimited_token_tree()?; let hi = self.prev_span; let node = Mac_ { path: pth, tts, delim }; return Ok(self.mk_mac_expr(lo.to(hi), node, attrs)) } if self.check(&token::OpenDelim(token::Brace)) { // This is a struct literal, unless we're prohibited // from parsing struct literals here. let prohibited = self.restrictions.contains( Restrictions::NO_STRUCT_LITERAL ); if !prohibited { return self.parse_struct_expr(lo, pth, attrs); } } hi = pth.span; ex = ExprKind::Path(None, pth); } else { match self.parse_literal_maybe_minus() { Ok(expr) => { hi = expr.span; ex = expr.node.clone(); } Err(mut err) => { self.cancel(&mut err); let msg = format!("expected expression, found {}", self.this_token_descr()); let mut err = self.fatal(&msg); err.span_label(self.span, "expected expression"); return Err(err); } } } } } let expr = Expr { node: ex, span: lo.to(hi), id: ast::DUMMY_NODE_ID, attrs }; let expr = self.maybe_recover_from_bad_qpath(expr, true)?; return Ok(P(expr)); } fn parse_struct_expr(&mut self, lo: Span, pth: ast::Path, mut attrs: ThinVec<Attribute>) -> PResult<'a, P<Expr>> { let struct_sp = lo.to(self.prev_span); self.bump(); let mut fields = Vec::new(); let mut base = None; attrs.extend(self.parse_inner_attributes()?); while self.token != token::CloseDelim(token::Brace) { if self.eat(&token::DotDot) { let exp_span = self.prev_span; match self.parse_expr() { Ok(e) => { base = Some(e); } Err(mut e) => { e.emit(); self.recover_stmt(); } } if self.token == token::Comma { let mut err = self.sess.span_diagnostic.mut_span_err( exp_span.to(self.prev_span), "cannot use a comma after the base struct", ); err.span_suggestion_short_with_applicability( self.span, "remove this comma", "".to_owned(), Applicability::MachineApplicable ); err.note("the base struct must always be the last field"); err.emit(); self.recover_stmt(); } break; } match self.parse_field() { Ok(f) => fields.push(f), Err(mut e) => { e.span_label(struct_sp, "while parsing this struct"); e.emit(); // If the next token is a comma, then try to parse // what comes next as additional fields, rather than // bailing out until next `}`. if self.token != token::Comma { self.recover_stmt(); break; } } } match self.expect_one_of(&[token::Comma], &[token::CloseDelim(token::Brace)]) { Ok(()) => {} Err(mut e) => { e.emit(); self.recover_stmt(); break; } } } let span = lo.to(self.span); self.expect(&token::CloseDelim(token::Brace))?; return Ok(self.mk_expr(span, ExprKind::Struct(pth, fields, base), attrs)); } fn parse_or_use_outer_attributes(&mut self, already_parsed_attrs: Option<ThinVec<Attribute>>) -> PResult<'a, ThinVec<Attribute>> { if let Some(attrs) = already_parsed_attrs { Ok(attrs) } else { self.parse_outer_attributes().map(|a| a.into()) } } /// Parse a block or unsafe block fn parse_block_expr(&mut self, opt_label: Option<Label>, lo: Span, blk_mode: BlockCheckMode, outer_attrs: ThinVec<Attribute>) -> PResult<'a, P<Expr>> { self.expect(&token::OpenDelim(token::Brace))?; let mut attrs = outer_attrs; attrs.extend(self.parse_inner_attributes()?); let blk = self.parse_block_tail(lo, blk_mode)?; return Ok(self.mk_expr(blk.span, ExprKind::Block(blk, opt_label), attrs)); } /// parse a.b or a(13) or a[4] or just a fn parse_dot_or_call_expr(&mut self, already_parsed_attrs: Option<ThinVec<Attribute>>) -> PResult<'a, P<Expr>> { let attrs = self.parse_or_use_outer_attributes(already_parsed_attrs)?; let b = self.parse_bottom_expr(); let (span, b) = self.interpolated_or_expr_span(b)?; self.parse_dot_or_call_expr_with(b, span, attrs) } fn parse_dot_or_call_expr_with(&mut self, e0: P<Expr>, lo: Span, mut attrs: ThinVec<Attribute>) -> PResult<'a, P<Expr>> { // Stitch the list of outer attributes onto the return value. // A little bit ugly, but the best way given the current code // structure self.parse_dot_or_call_expr_with_(e0, lo) .map(|expr| expr.map(|mut expr| { attrs.extend::<Vec<_>>(expr.attrs.into()); expr.attrs = attrs; match expr.node { ExprKind::If(..) | ExprKind::IfLet(..) => { if !expr.attrs.is_empty() { // Just point to the first attribute in there... let span = expr.attrs[0].span; self.span_err(span, "attributes are not yet allowed on `if` \ expressions"); } } _ => {} } expr }) ) } // Assuming we have just parsed `.`, continue parsing into an expression. fn parse_dot_suffix(&mut self, self_arg: P<Expr>, lo: Span) -> PResult<'a, P<Expr>> { let segment = self.parse_path_segment(PathStyle::Expr, true)?; Ok(match self.token { token::OpenDelim(token::Paren) => { // Method call `expr.f()` let mut args = self.parse_unspanned_seq( &token::OpenDelim(token::Paren), &token::CloseDelim(token::Paren), SeqSep::trailing_allowed(token::Comma), |p| Ok(p.parse_expr()?) )?; args.insert(0, self_arg); let span = lo.to(self.prev_span); self.mk_expr(span, ExprKind::MethodCall(segment, args), ThinVec::new()) } _ => { // Field access `expr.f` if let Some(args) = segment.args { self.span_err(args.span(), "field expressions may not have generic arguments"); } let span = lo.to(self.prev_span); self.mk_expr(span, ExprKind::Field(self_arg, segment.ident), ThinVec::new()) } }) } fn parse_dot_or_call_expr_with_(&mut self, e0: P<Expr>, lo: Span) -> PResult<'a, P<Expr>> { let mut e = e0; let mut hi; loop { // expr? while self.eat(&token::Question) { let hi = self.prev_span; e = self.mk_expr(lo.to(hi), ExprKind::Try(e), ThinVec::new()); } // expr.f if self.eat(&token::Dot) { match self.token { token::Ident(..) => { e = self.parse_dot_suffix(e, lo)?; } token::Literal(token::Integer(name), _) => { let span = self.span; self.bump(); let field = ExprKind::Field(e, Ident::new(name, span)); e = self.mk_expr(lo.to(span), field, ThinVec::new()); } token::Literal(token::Float(n), _suf) => { self.bump(); let fstr = n.as_str(); let mut err = self.diagnostic().struct_span_err(self.prev_span, &format!("unexpected token: `{}`", n)); err.span_label(self.prev_span, "unexpected token"); if fstr.chars().all(|x| "0123456789.".contains(x)) { let float = match fstr.parse::<f64>().ok() { Some(f) => f, None => continue, }; let sugg = pprust::to_string(|s| { use print::pprust::PrintState; s.popen()?; s.print_expr(&e)?; s.s.word( ".")?; s.print_usize(float.trunc() as usize)?; s.pclose()?; s.s.word(".")?; s.s.word(fstr.splitn(2, ".").last().unwrap()) }); err.span_suggestion_with_applicability( lo.to(self.prev_span), "try parenthesizing the first index", sugg, Applicability::MachineApplicable ); } return Err(err); } _ => { // FIXME Could factor this out into non_fatal_unexpected or something. let actual = self.this_token_to_string(); self.span_err(self.span, &format!("unexpected token: `{}`", actual)); } } continue; } if self.expr_is_complete(&e) { break; } match self.token { // expr(...) token::OpenDelim(token::Paren) => { let es = self.parse_unspanned_seq( &token::OpenDelim(token::Paren), &token::CloseDelim(token::Paren), SeqSep::trailing_allowed(token::Comma), |p| Ok(p.parse_expr()?) )?; hi = self.prev_span; let nd = self.mk_call(e, es); e = self.mk_expr(lo.to(hi), nd, ThinVec::new()); } // expr[...] // Could be either an index expression or a slicing expression. token::OpenDelim(token::Bracket) => { self.bump(); let ix = self.parse_expr()?; hi = self.span; self.expect(&token::CloseDelim(token::Bracket))?; let index = self.mk_index(e, ix); e = self.mk_expr(lo.to(hi), index, ThinVec::new()) } _ => return Ok(e) } } return Ok(e); } crate fn process_potential_macro_variable(&mut self) { let (token, span) = match self.token { token::Dollar if self.span.ctxt() != syntax_pos::hygiene::SyntaxContext::empty() && self.look_ahead(1, |t| t.is_ident()) => { self.bump(); let name = match self.token { token::Ident(ident, _) => ident, _ => unreachable!() }; let mut err = self.fatal(&format!("unknown macro variable `{}`", name)); err.span_label(self.span, "unknown macro variable"); err.emit(); return } token::Interpolated(ref nt) => { self.meta_var_span = Some(self.span); // Interpolated identifier and lifetime tokens are replaced with usual identifier // and lifetime tokens, so the former are never encountered during normal parsing. match nt.0 { token::NtIdent(ident, is_raw) => (token::Ident(ident, is_raw), ident.span), token::NtLifetime(ident) => (token::Lifetime(ident), ident.span), _ => return, } } _ => return, }; self.token = token; self.span = span; } /// parse a single token tree from the input. crate fn parse_token_tree(&mut self) -> TokenTree { match self.token { token::OpenDelim(..) => { let frame = mem::replace(&mut self.token_cursor.frame, self.token_cursor.stack.pop().unwrap()); self.span = frame.span; self.bump(); TokenTree::Delimited(frame.span, Delimited { delim: frame.delim, tts: frame.tree_cursor.original_stream().into(), }) }, token::CloseDelim(_) | token::Eof => unreachable!(), _ => { let (token, span) = (mem::replace(&mut self.token, token::Whitespace), self.span); self.bump(); TokenTree::Token(span, token) } } } // parse a stream of tokens into a list of TokenTree's, // up to EOF. pub fn parse_all_token_trees(&mut self) -> PResult<'a, Vec<TokenTree>> { let mut tts = Vec::new(); while self.token != token::Eof { tts.push(self.parse_token_tree()); } Ok(tts) } pub fn parse_tokens(&mut self) -> TokenStream { let mut result = Vec::new(); loop { match self.token { token::Eof | token::CloseDelim(..) => break, _ => result.push(self.parse_token_tree().into()), } } TokenStream::concat(result) } /// Parse a prefix-unary-operator expr fn parse_prefix_expr(&mut self, already_parsed_attrs: Option<ThinVec<Attribute>>) -> PResult<'a, P<Expr>> { let attrs = self.parse_or_use_outer_attributes(already_parsed_attrs)?; let lo = self.span; // Note: when adding new unary operators, don't forget to adjust Token::can_begin_expr() let (hi, ex) = match self.token { token::Not => { self.bump(); let e = self.parse_prefix_expr(None); let (span, e) = self.interpolated_or_expr_span(e)?; (lo.to(span), self.mk_unary(UnOp::Not, e)) } // Suggest `!` for bitwise negation when encountering a `~` token::Tilde => { self.bump(); let e = self.parse_prefix_expr(None); let (span, e) = self.interpolated_or_expr_span(e)?; let span_of_tilde = lo; let mut err = self.diagnostic().struct_span_err(span_of_tilde, "`~` cannot be used as a unary operator"); err.span_suggestion_short_with_applicability( span_of_tilde, "use `!` to perform bitwise negation", "!".to_owned(), Applicability::MachineApplicable ); err.emit(); (lo.to(span), self.mk_unary(UnOp::Not, e)) } token::BinOp(token::Minus) => { self.bump(); let e = self.parse_prefix_expr(None); let (span, e) = self.interpolated_or_expr_span(e)?; (lo.to(span), self.mk_unary(UnOp::Neg, e)) } token::BinOp(token::Star) => { self.bump(); let e = self.parse_prefix_expr(None); let (span, e) = self.interpolated_or_expr_span(e)?; (lo.to(span), self.mk_unary(UnOp::Deref, e)) } token::BinOp(token::And) | token::AndAnd => { self.expect_and()?; let m = self.parse_mutability(); let e = self.parse_prefix_expr(None); let (span, e) = self.interpolated_or_expr_span(e)?; (lo.to(span), ExprKind::AddrOf(m, e)) } token::Ident(..) if self.token.is_keyword(keywords::In) => { self.bump(); let place = self.parse_expr_res( Restrictions::NO_STRUCT_LITERAL, None, )?; let blk = self.parse_block()?; let span = blk.span; let blk_expr = self.mk_expr(span, ExprKind::Block(blk, None), ThinVec::new()); (lo.to(span), ExprKind::ObsoleteInPlace(place, blk_expr)) } token::Ident(..) if self.token.is_keyword(keywords::Box) => { self.bump(); let e = self.parse_prefix_expr(None); let (span, e) = self.interpolated_or_expr_span(e)?; (lo.to(span), ExprKind::Box(e)) } token::Ident(..) if self.token.is_ident_named("not") => { // `not` is just an ordinary identifier in Rust-the-language, // but as `rustc`-the-compiler, we can issue clever diagnostics // for confused users who really want to say `!` let token_cannot_continue_expr = |t: &token::Token| match *t { // These tokens can start an expression after `!`, but // can't continue an expression after an ident token::Ident(ident, is_raw) => token::ident_can_begin_expr(ident, is_raw), token::Literal(..) | token::Pound => true, token::Interpolated(ref nt) => match nt.0 { token::NtIdent(..) | token::NtExpr(..) | token::NtBlock(..) | token::NtPath(..) => true, _ => false, }, _ => false }; let cannot_continue_expr = self.look_ahead(1, token_cannot_continue_expr); if cannot_continue_expr { self.bump(); // Emit the error ... let mut err = self.diagnostic() .struct_span_err(self.span, &format!("unexpected {} after identifier", self.this_token_descr())); // span the `not` plus trailing whitespace to avoid // trailing whitespace after the `!` in our suggestion let to_replace = self.sess.source_map() .span_until_non_whitespace(lo.to(self.span)); err.span_suggestion_short_with_applicability( to_replace, "use `!` to perform logical negation", "!".to_owned(), Applicability::MachineApplicable ); err.emit(); // —and recover! (just as if we were in the block // for the `token::Not` arm) let e = self.parse_prefix_expr(None); let (span, e) = self.interpolated_or_expr_span(e)?; (lo.to(span), self.mk_unary(UnOp::Not, e)) } else { return self.parse_dot_or_call_expr(Some(attrs)); } } _ => { return self.parse_dot_or_call_expr(Some(attrs)); } }; return Ok(self.mk_expr(lo.to(hi), ex, attrs)); } /// Parse an associative expression /// /// This parses an expression accounting for associativity and precedence of the operators in /// the expression. fn parse_assoc_expr(&mut self, already_parsed_attrs: Option<ThinVec<Attribute>>) -> PResult<'a, P<Expr>> { self.parse_assoc_expr_with(0, already_parsed_attrs.into()) } /// Parse an associative expression with operators of at least `min_prec` precedence fn parse_assoc_expr_with(&mut self, min_prec: usize, lhs: LhsExpr) -> PResult<'a, P<Expr>> { let mut lhs = if let LhsExpr::AlreadyParsed(expr) = lhs { expr } else { let attrs = match lhs { LhsExpr::AttributesParsed(attrs) => Some(attrs), _ => None, }; if [token::DotDot, token::DotDotDot, token::DotDotEq].contains(&self.token) { return self.parse_prefix_range_expr(attrs); } else { self.parse_prefix_expr(attrs)? } }; if self.expr_is_complete(&lhs) { // Semi-statement forms are odd. See https://github.com/rust-lang/rust/issues/29071 return Ok(lhs); } self.expected_tokens.push(TokenType::Operator); while let Some(op) = AssocOp::from_token(&self.token) { // Adjust the span for interpolated LHS to point to the `$lhs` token and not to what // it refers to. Interpolated identifiers are unwrapped early and never show up here // as `PrevTokenKind::Interpolated` so if LHS is a single identifier we always process // it as "interpolated", it doesn't change the answer for non-interpolated idents. let lhs_span = match (self.prev_token_kind, &lhs.node) { (PrevTokenKind::Interpolated, _) => self.prev_span, (PrevTokenKind::Ident, &ExprKind::Path(None, ref path)) if path.segments.len() == 1 => self.prev_span, _ => lhs.span, }; let cur_op_span = self.span; let restrictions = if op.is_assign_like() { self.restrictions & Restrictions::NO_STRUCT_LITERAL } else { self.restrictions }; if op.precedence() < min_prec { break; } // Check for deprecated `...` syntax if self.token == token::DotDotDot && op == AssocOp::DotDotEq { self.err_dotdotdot_syntax(self.span); } self.bump(); if op.is_comparison() { self.check_no_chained_comparison(&lhs, &op); } // Special cases: if op == AssocOp::As { lhs = self.parse_assoc_op_cast(lhs, lhs_span, ExprKind::Cast)?; continue } else if op == AssocOp::Colon { lhs = match self.parse_assoc_op_cast(lhs, lhs_span, ExprKind::Type) { Ok(lhs) => lhs, Err(mut err) => { err.span_label(self.span, "expecting a type here because of type ascription"); let cm = self.sess.source_map(); let cur_pos = cm.lookup_char_pos(self.span.lo()); let op_pos = cm.lookup_char_pos(cur_op_span.hi()); if cur_pos.line != op_pos.line { err.span_suggestion_with_applicability( cur_op_span, "try using a semicolon", ";".to_string(), Applicability::MaybeIncorrect // speculative ); } return Err(err); } }; continue } else if op == AssocOp::DotDot || op == AssocOp::DotDotEq { // If we didn’t have to handle `x..`/`x..=`, it would be pretty easy to // generalise it to the Fixity::None code. // // We have 2 alternatives here: `x..y`/`x..=y` and `x..`/`x..=` The other // two variants are handled with `parse_prefix_range_expr` call above. let rhs = if self.is_at_start_of_range_notation_rhs() { Some(self.parse_assoc_expr_with(op.precedence() + 1, LhsExpr::NotYetParsed)?) } else { None }; let (lhs_span, rhs_span) = (lhs.span, if let Some(ref x) = rhs { x.span } else { cur_op_span }); let limits = if op == AssocOp::DotDot { RangeLimits::HalfOpen } else { RangeLimits::Closed }; let r = try!(self.mk_range(Some(lhs), rhs, limits)); lhs = self.mk_expr(lhs_span.to(rhs_span), r, ThinVec::new()); break } let rhs = match op.fixity() { Fixity::Right => self.with_res( restrictions - Restrictions::STMT_EXPR, |this| { this.parse_assoc_expr_with(op.precedence(), LhsExpr::NotYetParsed) }), Fixity::Left => self.with_res( restrictions - Restrictions::STMT_EXPR, |this| { this.parse_assoc_expr_with(op.precedence() + 1, LhsExpr::NotYetParsed) }), // We currently have no non-associative operators that are not handled above by // the special cases. The code is here only for future convenience. Fixity::None => self.with_res( restrictions - Restrictions::STMT_EXPR, |this| { this.parse_assoc_expr_with(op.precedence() + 1, LhsExpr::NotYetParsed) }), }?; let span = lhs_span.to(rhs.span); lhs = match op { AssocOp::Add | AssocOp::Subtract | AssocOp::Multiply | AssocOp::Divide | AssocOp::Modulus | AssocOp::LAnd | AssocOp::LOr | AssocOp::BitXor | AssocOp::BitAnd | AssocOp::BitOr | AssocOp::ShiftLeft | AssocOp::ShiftRight | AssocOp::Equal | AssocOp::Less | AssocOp::LessEqual | AssocOp::NotEqual | AssocOp::Greater | AssocOp::GreaterEqual => { let ast_op = op.to_ast_binop().unwrap(); let binary = self.mk_binary(source_map::respan(cur_op_span, ast_op), lhs, rhs); self.mk_expr(span, binary, ThinVec::new()) } AssocOp::Assign => self.mk_expr(span, ExprKind::Assign(lhs, rhs), ThinVec::new()), AssocOp::ObsoleteInPlace => self.mk_expr(span, ExprKind::ObsoleteInPlace(lhs, rhs), ThinVec::new()), AssocOp::AssignOp(k) => { let aop = match k { token::Plus => BinOpKind::Add, token::Minus => BinOpKind::Sub, token::Star => BinOpKind::Mul, token::Slash => BinOpKind::Div, token::Percent => BinOpKind::Rem, token::Caret => BinOpKind::BitXor, token::And => BinOpKind::BitAnd, token::Or => BinOpKind::BitOr, token::Shl => BinOpKind::Shl, token::Shr => BinOpKind::Shr, }; let aopexpr = self.mk_assign_op(source_map::respan(cur_op_span, aop), lhs, rhs); self.mk_expr(span, aopexpr, ThinVec::new()) } AssocOp::As | AssocOp::Colon | AssocOp::DotDot | AssocOp::DotDotEq => { self.bug("AssocOp should have been handled by special case") } }; if op.fixity() == Fixity::None { break } } Ok(lhs) } fn parse_assoc_op_cast(&mut self, lhs: P<Expr>, lhs_span: Span, expr_kind: fn(P<Expr>, P<Ty>) -> ExprKind) -> PResult<'a, P<Expr>> { let mk_expr = |this: &mut Self, rhs: P<Ty>| { this.mk_expr(lhs_span.to(rhs.span), expr_kind(lhs, rhs), ThinVec::new()) }; // Save the state of the parser before parsing type normally, in case there is a // LessThan comparison after this cast. let parser_snapshot_before_type = self.clone(); match self.parse_ty_no_plus() { Ok(rhs) => { Ok(mk_expr(self, rhs)) } Err(mut type_err) => { // Rewind to before attempting to parse the type with generics, to recover // from situations like `x as usize < y` in which we first tried to parse // `usize < y` as a type with generic arguments. let parser_snapshot_after_type = self.clone(); mem::replace(self, parser_snapshot_before_type); match self.parse_path(PathStyle::Expr) { Ok(path) => { let (op_noun, op_verb) = match self.token { token::Lt => ("comparison", "comparing"), token::BinOp(token::Shl) => ("shift", "shifting"), _ => { // We can end up here even without `<` being the next token, for // example because `parse_ty_no_plus` returns `Err` on keywords, // but `parse_path` returns `Ok` on them due to error recovery. // Return original error and parser state. mem::replace(self, parser_snapshot_after_type); return Err(type_err); } }; // Successfully parsed the type path leaving a `<` yet to parse. type_err.cancel(); // Report non-fatal diagnostics, keep `x as usize` as an expression // in AST and continue parsing. let msg = format!("`<` is interpreted as a start of generic \ arguments for `{}`, not a {}", path, op_noun); let mut err = self.sess.span_diagnostic.struct_span_err(self.span, &msg); err.span_label(self.look_ahead_span(1).to(parser_snapshot_after_type.span), "interpreted as generic arguments"); err.span_label(self.span, format!("not interpreted as {}", op_noun)); let expr = mk_expr(self, P(Ty { span: path.span, node: TyKind::Path(None, path), id: ast::DUMMY_NODE_ID })); let expr_str = self.sess.source_map().span_to_snippet(expr.span) .unwrap_or(pprust::expr_to_string(&expr)); err.span_suggestion_with_applicability( expr.span, &format!("try {} the cast value", op_verb), format!("({})", expr_str), Applicability::MachineApplicable ); err.emit(); Ok(expr) } Err(mut path_err) => { // Couldn't parse as a path, return original error and parser state. path_err.cancel(); mem::replace(self, parser_snapshot_after_type); Err(type_err) } } } } } /// Produce an error if comparison operators are chained (RFC #558). /// We only need to check lhs, not rhs, because all comparison ops /// have same precedence and are left-associative fn check_no_chained_comparison(&mut self, lhs: &Expr, outer_op: &AssocOp) { debug_assert!(outer_op.is_comparison(), "check_no_chained_comparison: {:?} is not comparison", outer_op); match lhs.node { ExprKind::Binary(op, _, _) if op.node.is_comparison() => { // respan to include both operators let op_span = op.span.to(self.span); let mut err = self.diagnostic().struct_span_err(op_span, "chained comparison operators require parentheses"); if op.node == BinOpKind::Lt && *outer_op == AssocOp::Less || // Include `<` to provide this recommendation *outer_op == AssocOp::Greater // even in a case like the following: { // Foo<Bar<Baz<Qux, ()>>> err.help( "use `::<...>` instead of `<...>` if you meant to specify type arguments"); err.help("or use `(...)` if you meant to specify fn arguments"); } err.emit(); } _ => {} } } /// Parse prefix-forms of range notation: `..expr`, `..`, `..=expr` fn parse_prefix_range_expr(&mut self, already_parsed_attrs: Option<ThinVec<Attribute>>) -> PResult<'a, P<Expr>> { // Check for deprecated `...` syntax if self.token == token::DotDotDot { self.err_dotdotdot_syntax(self.span); } debug_assert!([token::DotDot, token::DotDotDot, token::DotDotEq].contains(&self.token), "parse_prefix_range_expr: token {:?} is not DotDot/DotDotEq", self.token); let tok = self.token.clone(); let attrs = self.parse_or_use_outer_attributes(already_parsed_attrs)?; let lo = self.span; let mut hi = self.span; self.bump(); let opt_end = if self.is_at_start_of_range_notation_rhs() { // RHS must be parsed with more associativity than the dots. let next_prec = AssocOp::from_token(&tok).unwrap().precedence() + 1; Some(self.parse_assoc_expr_with(next_prec, LhsExpr::NotYetParsed) .map(|x|{ hi = x.span; x })?) } else { None }; let limits = if tok == token::DotDot { RangeLimits::HalfOpen } else { RangeLimits::Closed }; let r = try!(self.mk_range(None, opt_end, limits)); Ok(self.mk_expr(lo.to(hi), r, attrs)) } fn is_at_start_of_range_notation_rhs(&self) -> bool { if self.token.can_begin_expr() { // parse `for i in 1.. { }` as infinite loop, not as `for i in (1..{})`. if self.token == token::OpenDelim(token::Brace) { return !self.restrictions.contains(Restrictions::NO_STRUCT_LITERAL); } true } else { false } } /// Parse an 'if' or 'if let' expression ('if' token already eaten) fn parse_if_expr(&mut self, attrs: ThinVec<Attribute>) -> PResult<'a, P<Expr>> { if self.check_keyword(keywords::Let) { return self.parse_if_let_expr(attrs); } let lo = self.prev_span; let cond = self.parse_expr_res(Restrictions::NO_STRUCT_LITERAL, None)?; // Verify that the parsed `if` condition makes sense as a condition. If it is a block, then // verify that the last statement is either an implicit return (no `;`) or an explicit // return. This won't catch blocks with an explicit `return`, but that would be caught by // the dead code lint. if self.eat_keyword(keywords::Else) || !cond.returns() { let sp = self.sess.source_map().next_point(lo); let mut err = self.diagnostic() .struct_span_err(sp, "missing condition for `if` statemement"); err.span_label(sp, "expected if condition here"); return Err(err) } let not_block = self.token != token::OpenDelim(token::Brace); let thn = self.parse_block().map_err(|mut err| { if not_block { err.span_label(lo, "this `if` statement has a condition, but no block"); } err })?; let mut els: Option<P<Expr>> = None; let mut hi = thn.span; if self.eat_keyword(keywords::Else) { let elexpr = self.parse_else_expr()?; hi = elexpr.span; els = Some(elexpr); } Ok(self.mk_expr(lo.to(hi), ExprKind::If(cond, thn, els), attrs)) } /// Parse an 'if let' expression ('if' token already eaten) fn parse_if_let_expr(&mut self, attrs: ThinVec<Attribute>) -> PResult<'a, P<Expr>> { let lo = self.prev_span; self.expect_keyword(keywords::Let)?; let pats = self.parse_pats()?; self.expect(&token::Eq)?; let expr = self.parse_expr_res(Restrictions::NO_STRUCT_LITERAL, None)?; let thn = self.parse_block()?; let (hi, els) = if self.eat_keyword(keywords::Else) { let expr = self.parse_else_expr()?; (expr.span, Some(expr)) } else { (thn.span, None) }; Ok(self.mk_expr(lo.to(hi), ExprKind::IfLet(pats, expr, thn, els), attrs)) } // `move |args| expr` fn parse_lambda_expr(&mut self, attrs: ThinVec<Attribute>) -> PResult<'a, P<Expr>> { let lo = self.span; let movability = if self.eat_keyword(keywords::Static) { Movability::Static } else { Movability::Movable }; let asyncness = if self.span.edition() >= Edition::Edition2018 { self.parse_asyncness() } else { IsAsync::NotAsync }; let capture_clause = if self.eat_keyword(keywords::Move) { CaptureBy::Value } else { CaptureBy::Ref }; let decl = self.parse_fn_block_decl()?; let decl_hi = self.prev_span; let body = match decl.output { FunctionRetTy::Default(_) => { let restrictions = self.restrictions - Restrictions::STMT_EXPR; self.parse_expr_res(restrictions, None)? }, _ => { // If an explicit return type is given, require a // block to appear (RFC 968). let body_lo = self.span; self.parse_block_expr(None, body_lo, BlockCheckMode::Default, ThinVec::new())? } }; Ok(self.mk_expr( lo.to(body.span), ExprKind::Closure(capture_clause, asyncness, movability, decl, body, lo.to(decl_hi)), attrs)) } // `else` token already eaten fn parse_else_expr(&mut self) -> PResult<'a, P<Expr>> { if self.eat_keyword(keywords::If) { return self.parse_if_expr(ThinVec::new()); } else { let blk = self.parse_block()?; return Ok(self.mk_expr(blk.span, ExprKind::Block(blk, None), ThinVec::new())); } } /// Parse a 'for' .. 'in' expression ('for' token already eaten) fn parse_for_expr(&mut self, opt_label: Option<Label>, span_lo: Span, mut attrs: ThinVec<Attribute>) -> PResult<'a, P<Expr>> { // Parse: `for <src_pat> in <src_expr> <src_loop_block>` let pat = self.parse_top_level_pat()?; if !self.eat_keyword(keywords::In) { let in_span = self.prev_span.between(self.span); let mut err = self.sess.span_diagnostic .struct_span_err(in_span, "missing `in` in `for` loop"); err.span_suggestion_short_with_applicability( in_span, "try adding `in` here", " in ".into(), // has been misleading, at least in the past (closed Issue #48492) Applicability::MaybeIncorrect ); err.emit(); } let expr = self.parse_expr_res(Restrictions::NO_STRUCT_LITERAL, None)?; let (iattrs, loop_block) = self.parse_inner_attrs_and_block()?; attrs.extend(iattrs); let hi = self.prev_span; Ok(self.mk_expr(span_lo.to(hi), ExprKind::ForLoop(pat, expr, loop_block, opt_label), attrs)) } /// Parse a 'while' or 'while let' expression ('while' token already eaten) fn parse_while_expr(&mut self, opt_label: Option<Label>, span_lo: Span, mut attrs: ThinVec<Attribute>) -> PResult<'a, P<Expr>> { if self.token.is_keyword(keywords::Let) { return self.parse_while_let_expr(opt_label, span_lo, attrs); } let cond = self.parse_expr_res(Restrictions::NO_STRUCT_LITERAL, None)?; let (iattrs, body) = self.parse_inner_attrs_and_block()?; attrs.extend(iattrs); let span = span_lo.to(body.span); return Ok(self.mk_expr(span, ExprKind::While(cond, body, opt_label), attrs)); } /// Parse a 'while let' expression ('while' token already eaten) fn parse_while_let_expr(&mut self, opt_label: Option<Label>, span_lo: Span, mut attrs: ThinVec<Attribute>) -> PResult<'a, P<Expr>> { self.expect_keyword(keywords::Let)?; let pats = self.parse_pats()?; self.expect(&token::Eq)?; let expr = self.parse_expr_res(Restrictions::NO_STRUCT_LITERAL, None)?; let (iattrs, body) = self.parse_inner_attrs_and_block()?; attrs.extend(iattrs); let span = span_lo.to(body.span); return Ok(self.mk_expr(span, ExprKind::WhileLet(pats, expr, body, opt_label), attrs)); } // parse `loop {...}`, `loop` token already eaten fn parse_loop_expr(&mut self, opt_label: Option<Label>, span_lo: Span, mut attrs: ThinVec<Attribute>) -> PResult<'a, P<Expr>> { let (iattrs, body) = self.parse_inner_attrs_and_block()?; attrs.extend(iattrs); let span = span_lo.to(body.span); Ok(self.mk_expr(span, ExprKind::Loop(body, opt_label), attrs)) } /// Parse an `async move {...}` expression pub fn parse_async_block(&mut self, mut attrs: ThinVec<Attribute>) -> PResult<'a, P<Expr>> { let span_lo = self.span; self.expect_keyword(keywords::Async)?; let capture_clause = if self.eat_keyword(keywords::Move) { CaptureBy::Value } else { CaptureBy::Ref }; let (iattrs, body) = self.parse_inner_attrs_and_block()?; attrs.extend(iattrs); Ok(self.mk_expr( span_lo.to(body.span), ExprKind::Async(capture_clause, ast::DUMMY_NODE_ID, body), attrs)) } /// Parse a `do catch {...}` expression (`do catch` token already eaten) fn parse_catch_expr(&mut self, span_lo: Span, mut attrs: ThinVec<Attribute>) -> PResult<'a, P<Expr>> { let (iattrs, body) = self.parse_inner_attrs_and_block()?; attrs.extend(iattrs); Ok(self.mk_expr(span_lo.to(body.span), ExprKind::Catch(body), attrs)) } // `match` token already eaten fn parse_match_expr(&mut self, mut attrs: ThinVec<Attribute>) -> PResult<'a, P<Expr>> { let match_span = self.prev_span; let lo = self.prev_span; let discriminant = self.parse_expr_res(Restrictions::NO_STRUCT_LITERAL, None)?; if let Err(mut e) = self.expect(&token::OpenDelim(token::Brace)) { if self.token == token::Token::Semi { e.span_suggestion_short_with_applicability( match_span, "try removing this `match`", "".to_owned(), Applicability::MaybeIncorrect // speculative ); } return Err(e) } attrs.extend(self.parse_inner_attributes()?); let mut arms: Vec<Arm> = Vec::new(); while self.token != token::CloseDelim(token::Brace) { match self.parse_arm() { Ok(arm) => arms.push(arm), Err(mut e) => { // Recover by skipping to the end of the block. e.emit(); self.recover_stmt(); let span = lo.to(self.span); if self.token == token::CloseDelim(token::Brace) { self.bump(); } return Ok(self.mk_expr(span, ExprKind::Match(discriminant, arms), attrs)); } } } let hi = self.span; self.bump(); return Ok(self.mk_expr(lo.to(hi), ExprKind::Match(discriminant, arms), attrs)); } crate fn parse_arm(&mut self) -> PResult<'a, Arm> { maybe_whole!(self, NtArm, |x| x); let attrs = self.parse_outer_attributes()?; // Allow a '|' before the pats (RFC 1925) self.eat(&token::BinOp(token::Or)); let pats = self.parse_pats()?; let guard = if self.eat_keyword(keywords::If) { Some(self.parse_expr()?) } else { None }; let arrow_span = self.span; self.expect(&token::FatArrow)?; let arm_start_span = self.span; let expr = self.parse_expr_res(Restrictions::STMT_EXPR, None) .map_err(|mut err| { err.span_label(arrow_span, "while parsing the `match` arm starting here"); err })?; let require_comma = classify::expr_requires_semi_to_be_stmt(&expr) && self.token != token::CloseDelim(token::Brace); if require_comma { let cm = self.sess.source_map(); self.expect_one_of(&[token::Comma], &[token::CloseDelim(token::Brace)]) .map_err(|mut err| { match (cm.span_to_lines(expr.span), cm.span_to_lines(arm_start_span)) { (Ok(ref expr_lines), Ok(ref arm_start_lines)) if arm_start_lines.lines[0].end_col == expr_lines.lines[0].end_col && expr_lines.lines.len() == 2 && self.token == token::FatArrow => { // We check whether there's any trailing code in the parse span, // if there isn't, we very likely have the following: // // X | &Y => "y" // | -- - missing comma // | | // | arrow_span // X | &X => "x" // | - ^^ self.span // | | // | parsed until here as `"y" & X` err.span_suggestion_short_with_applicability( cm.next_point(arm_start_span), "missing a comma here to end this `match` arm", ",".to_owned(), Applicability::MachineApplicable ); } _ => { err.span_label(arrow_span, "while parsing the `match` arm starting here"); } } err })?; } else { self.eat(&token::Comma); } Ok(ast::Arm { attrs, pats, guard, body: expr, }) } /// Parse an expression pub fn parse_expr(&mut self) -> PResult<'a, P<Expr>> { self.parse_expr_res(Restrictions::empty(), None) } /// Evaluate the closure with restrictions in place. /// /// After the closure is evaluated, restrictions are reset. fn with_res<F, T>(&mut self, r: Restrictions, f: F) -> T where F: FnOnce(&mut Self) -> T { let old = self.restrictions; self.restrictions = r; let r = f(self); self.restrictions = old; return r; } /// Parse an expression, subject to the given restrictions fn parse_expr_res(&mut self, r: Restrictions, already_parsed_attrs: Option<ThinVec<Attribute>>) -> PResult<'a, P<Expr>> { self.with_res(r, |this| this.parse_assoc_expr(already_parsed_attrs)) } /// Parse the RHS of a local variable declaration (e.g. '= 14;') fn parse_initializer(&mut self, skip_eq: bool) -> PResult<'a, Option<P<Expr>>> { if self.check(&token::Eq) { self.bump(); Ok(Some(self.parse_expr()?)) } else if skip_eq { Ok(Some(self.parse_expr()?)) } else { Ok(None) } } /// Parse patterns, separated by '|' s fn parse_pats(&mut self) -> PResult<'a, Vec<P<Pat>>> { let mut pats = Vec::new(); loop { pats.push(self.parse_top_level_pat()?); if self.token == token::OrOr { let mut err = self.struct_span_err(self.span, "unexpected token `||` after pattern"); err.span_suggestion_with_applicability( self.span, "use a single `|` to specify multiple patterns", "|".to_owned(), Applicability::MachineApplicable ); err.emit(); self.bump(); } else if self.check(&token::BinOp(token::Or)) { self.bump(); } else { return Ok(pats); } }; } // Parses a parenthesized list of patterns like // `()`, `(p)`, `(p,)`, `(p, q)`, or `(p, .., q)`. Returns: // - a vector of the patterns that were parsed // - an option indicating the index of the `..` element // - a boolean indicating whether a trailing comma was present. // Trailing commas are significant because (p) and (p,) are different patterns. fn parse_parenthesized_pat_list(&mut self) -> PResult<'a, (Vec<P<Pat>>, Option<usize>, bool)> { self.expect(&token::OpenDelim(token::Paren))?; let result = self.parse_pat_list()?; self.expect(&token::CloseDelim(token::Paren))?; Ok(result) } fn parse_pat_list(&mut self) -> PResult<'a, (Vec<P<Pat>>, Option<usize>, bool)> { let mut fields = Vec::new(); let mut ddpos = None; let mut trailing_comma = false; loop { if self.eat(&token::DotDot) { if ddpos.is_none() { ddpos = Some(fields.len()); } else { // Emit a friendly error, ignore `..` and continue parsing self.span_err(self.prev_span, "`..` can only be used once per tuple or tuple struct pattern"); } } else if !self.check(&token::CloseDelim(token::Paren)) { fields.push(self.parse_pat()?); } else { break } trailing_comma = self.eat(&token::Comma); if !trailing_comma { break } } if ddpos == Some(fields.len()) && trailing_comma { // `..` needs to be followed by `)` or `, pat`, `..,)` is disallowed. self.span_err(self.prev_span, "trailing comma is not permitted after `..`"); } Ok((fields, ddpos, trailing_comma)) } fn parse_pat_vec_elements( &mut self, ) -> PResult<'a, (Vec<P<Pat>>, Option<P<Pat>>, Vec<P<Pat>>)> { let mut before = Vec::new(); let mut slice = None; let mut after = Vec::new(); let mut first = true; let mut before_slice = true; while self.token != token::CloseDelim(token::Bracket) { if first { first = false; } else { self.expect(&token::Comma)?; if self.token == token::CloseDelim(token::Bracket) && (before_slice || !after.is_empty()) { break } } if before_slice { if self.eat(&token::DotDot) { if self.check(&token::Comma) || self.check(&token::CloseDelim(token::Bracket)) { slice = Some(P(Pat { id: ast::DUMMY_NODE_ID, node: PatKind::Wild, span: self.prev_span, })); before_slice = false; } continue } } let subpat = self.parse_pat()?; if before_slice && self.eat(&token::DotDot) { slice = Some(subpat); before_slice = false; } else if before_slice { before.push(subpat); } else { after.push(subpat); } } Ok((before, slice, after)) } fn parse_pat_field( &mut self, lo: Span, attrs: Vec<Attribute> ) -> PResult<'a, source_map::Spanned<ast::FieldPat>> { // Check if a colon exists one ahead. This means we're parsing a fieldname. let hi; let (subpat, fieldname, is_shorthand) = if self.look_ahead(1, |t| t == &token::Colon) { // Parsing a pattern of the form "fieldname: pat" let fieldname = self.parse_field_name()?; self.bump(); let pat = self.parse_pat()?; hi = pat.span; (pat, fieldname, false) } else { // Parsing a pattern of the form "(box) (ref) (mut) fieldname" let is_box = self.eat_keyword(keywords::Box); let boxed_span = self.span; let is_ref = self.eat_keyword(keywords::Ref); let is_mut = self.eat_keyword(keywords::Mut); let fieldname = self.parse_ident()?; hi = self.prev_span; let bind_type = match (is_ref, is_mut) { (true, true) => BindingMode::ByRef(Mutability::Mutable), (true, false) => BindingMode::ByRef(Mutability::Immutable), (false, true) => BindingMode::ByValue(Mutability::Mutable), (false, false) => BindingMode::ByValue(Mutability::Immutable), }; let fieldpat = P(Pat { id: ast::DUMMY_NODE_ID, node: PatKind::Ident(bind_type, fieldname, None), span: boxed_span.to(hi), }); let subpat = if is_box { P(Pat { id: ast::DUMMY_NODE_ID, node: PatKind::Box(fieldpat), span: lo.to(hi), }) } else { fieldpat }; (subpat, fieldname, true) }; Ok(source_map::Spanned { span: lo.to(hi), node: ast::FieldPat { ident: fieldname, pat: subpat, is_shorthand, attrs: attrs.into(), } }) } /// Parse the fields of a struct-like pattern fn parse_pat_fields(&mut self) -> PResult<'a, (Vec<source_map::Spanned<ast::FieldPat>>, bool)> { let mut fields = Vec::new(); let mut etc = false; let mut ate_comma = true; let mut delayed_err: Option<DiagnosticBuilder<'a>> = None; let mut etc_span = None; while self.token != token::CloseDelim(token::Brace) { let attrs = self.parse_outer_attributes()?; let lo = self.span; // check that a comma comes after every field if !ate_comma { let err = self.struct_span_err(self.prev_span, "expected `,`"); return Err(err); } ate_comma = false; if self.check(&token::DotDot) || self.token == token::DotDotDot { etc = true; let mut etc_sp = self.span; if self.token == token::DotDotDot { // Issue #46718 // Accept `...` as if it were `..` to avoid further errors let mut err = self.struct_span_err(self.span, "expected field pattern, found `...`"); err.span_suggestion_with_applicability( self.span, "to omit remaining fields, use one fewer `.`", "..".to_owned(), Applicability::MachineApplicable ); err.emit(); } self.bump(); // `..` || `...`:w if self.token == token::CloseDelim(token::Brace) { etc_span = Some(etc_sp); break; } let token_str = self.this_token_to_string(); let mut err = self.fatal(&format!("expected `}}`, found `{}`", token_str)); err.span_label(self.span, "expected `}`"); let mut comma_sp = None; if self.token == token::Comma { // Issue #49257 etc_sp = etc_sp.to(self.sess.source_map().span_until_non_whitespace(self.span)); err.span_label(etc_sp, "`..` must be at the end and cannot have a trailing comma"); comma_sp = Some(self.span); self.bump(); ate_comma = true; } etc_span = Some(etc_sp); if self.token == token::CloseDelim(token::Brace) { // If the struct looks otherwise well formed, recover and continue. if let Some(sp) = comma_sp { err.span_suggestion_short(sp, "remove this comma", "".into()); } err.emit(); break; } else if self.token.is_ident() && ate_comma { // Accept fields coming after `..,`. // This way we avoid "pattern missing fields" errors afterwards. // We delay this error until the end in order to have a span for a // suggested fix. if let Some(mut delayed_err) = delayed_err { delayed_err.emit(); return Err(err); } else { delayed_err = Some(err); } } else { if let Some(mut err) = delayed_err { err.emit(); } return Err(err); } } fields.push(match self.parse_pat_field(lo, attrs) { Ok(field) => field, Err(err) => { if let Some(mut delayed_err) = delayed_err { delayed_err.emit(); } return Err(err); } }); ate_comma = self.eat(&token::Comma); } if let Some(mut err) = delayed_err { if let Some(etc_span) = etc_span { err.multipart_suggestion( "move the `..` to the end of the field list", vec![ (etc_span, "".into()), (self.span, format!("{}.. }}", if ate_comma { "" } else { ", " })), ], ); } err.emit(); } return Ok((fields, etc)); } fn parse_pat_range_end(&mut self) -> PResult<'a, P<Expr>> { if self.token.is_path_start() { let lo = self.span; let (qself, path) = if self.eat_lt() { // Parse a qualified path let (qself, path) = self.parse_qpath(PathStyle::Expr)?; (Some(qself), path) } else { // Parse an unqualified path (None, self.parse_path(PathStyle::Expr)?) }; let hi = self.prev_span; Ok(self.mk_expr(lo.to(hi), ExprKind::Path(qself, path), ThinVec::new())) } else { self.parse_literal_maybe_minus() } } // helper function to decide whether to parse as ident binding or to try to do // something more complex like range patterns fn parse_as_ident(&mut self) -> bool { self.look_ahead(1, |t| match *t { token::OpenDelim(token::Paren) | token::OpenDelim(token::Brace) | token::DotDotDot | token::DotDotEq | token::ModSep | token::Not => Some(false), // ensure slice patterns [a, b.., c] and [a, b, c..] don't go into the // range pattern branch token::DotDot => None, _ => Some(true), }).unwrap_or_else(|| self.look_ahead(2, |t| match *t { token::Comma | token::CloseDelim(token::Bracket) => true, _ => false, })) } /// A wrapper around `parse_pat` with some special error handling for the /// "top-level" patterns in a match arm, `for` loop, `let`, &c. (in contrast /// to subpatterns within such). fn parse_top_level_pat(&mut self) -> PResult<'a, P<Pat>> { let pat = self.parse_pat()?; if self.token == token::Comma { // An unexpected comma after a top-level pattern is a clue that the // user (perhaps more accustomed to some other language) forgot the // parentheses in what should have been a tuple pattern; return a // suggestion-enhanced error here rather than choking on the comma // later. let comma_span = self.span; self.bump(); if let Err(mut err) = self.parse_pat_list() { // We didn't expect this to work anyway; we just wanted // to advance to the end of the comma-sequence so we know // the span to suggest parenthesizing err.cancel(); } let seq_span = pat.span.to(self.prev_span); let mut err = self.struct_span_err(comma_span, "unexpected `,` in pattern"); if let Ok(seq_snippet) = self.sess.source_map().span_to_snippet(seq_span) { err.span_suggestion_with_applicability( seq_span, "try adding parentheses", format!("({})", seq_snippet), Applicability::MachineApplicable ); } return Err(err); } Ok(pat) } /// Parse a pattern. pub fn parse_pat(&mut self) -> PResult<'a, P<Pat>> { self.parse_pat_with_range_pat(true) } /// Parse a pattern, with a setting whether modern range patterns e.g. `a..=b`, `a..b` are /// allowed. fn parse_pat_with_range_pat(&mut self, allow_range_pat: bool) -> PResult<'a, P<Pat>> { maybe_whole!(self, NtPat, |x| x); let lo = self.span; let pat; match self.token { token::BinOp(token::And) | token::AndAnd => { // Parse &pat / &mut pat self.expect_and()?; let mutbl = self.parse_mutability(); if let token::Lifetime(ident) = self.token { let mut err = self.fatal(&format!("unexpected lifetime `{}` in pattern", ident)); err.span_label(self.span, "unexpected lifetime"); return Err(err); } let subpat = self.parse_pat_with_range_pat(false)?; pat = PatKind::Ref(subpat, mutbl); } token::OpenDelim(token::Paren) => { // Parse (pat,pat,pat,...) as tuple pattern let (fields, ddpos, trailing_comma) = self.parse_parenthesized_pat_list()?; pat = if fields.len() == 1 && ddpos.is_none() && !trailing_comma { PatKind::Paren(fields.into_iter().nth(0).unwrap()) } else { PatKind::Tuple(fields, ddpos) }; } token::OpenDelim(token::Bracket) => { // Parse [pat,pat,...] as slice pattern self.bump(); let (before, slice, after) = self.parse_pat_vec_elements()?; self.expect(&token::CloseDelim(token::Bracket))?; pat = PatKind::Slice(before, slice, after); } // At this point, token != &, &&, (, [ _ => if self.eat_keyword(keywords::Underscore) { // Parse _ pat = PatKind::Wild; } else if self.eat_keyword(keywords::Mut) { // Parse mut ident @ pat / mut ref ident @ pat let mutref_span = self.prev_span.to(self.span); let binding_mode = if self.eat_keyword(keywords::Ref) { self.diagnostic() .struct_span_err(mutref_span, "the order of `mut` and `ref` is incorrect") .span_suggestion_with_applicability( mutref_span, "try switching the order", "ref mut".into(), Applicability::MachineApplicable ).emit(); BindingMode::ByRef(Mutability::Mutable) } else { BindingMode::ByValue(Mutability::Mutable) }; pat = self.parse_pat_ident(binding_mode)?; } else if self.eat_keyword(keywords::Ref) { // Parse ref ident @ pat / ref mut ident @ pat let mutbl = self.parse_mutability(); pat = self.parse_pat_ident(BindingMode::ByRef(mutbl))?; } else if self.eat_keyword(keywords::Box) { // Parse box pat let subpat = self.parse_pat_with_range_pat(false)?; pat = PatKind::Box(subpat); } else if self.token.is_ident() && !self.token.is_reserved_ident() && self.parse_as_ident() { // Parse ident @ pat // This can give false positives and parse nullary enums, // they are dealt with later in resolve let binding_mode = BindingMode::ByValue(Mutability::Immutable); pat = self.parse_pat_ident(binding_mode)?; } else if self.token.is_path_start() { // Parse pattern starting with a path let (qself, path) = if self.eat_lt() { // Parse a qualified path let (qself, path) = self.parse_qpath(PathStyle::Expr)?; (Some(qself), path) } else { // Parse an unqualified path (None, self.parse_path(PathStyle::Expr)?) }; match self.token { token::Not if qself.is_none() => { // Parse macro invocation self.bump(); let (delim, tts) = self.expect_delimited_token_tree()?; let mac = respan(lo.to(self.prev_span), Mac_ { path, tts, delim }); pat = PatKind::Mac(mac); } token::DotDotDot | token::DotDotEq | token::DotDot => { let end_kind = match self.token { token::DotDot => RangeEnd::Excluded, token::DotDotDot => RangeEnd::Included(RangeSyntax::DotDotDot), token::DotDotEq => RangeEnd::Included(RangeSyntax::DotDotEq), _ => panic!("can only parse `..`/`...`/`..=` for ranges \ (checked above)"), }; let op_span = self.span; // Parse range let span = lo.to(self.prev_span); let begin = self.mk_expr(span, ExprKind::Path(qself, path), ThinVec::new()); self.bump(); let end = self.parse_pat_range_end()?; let op = Spanned { span: op_span, node: end_kind }; pat = PatKind::Range(begin, end, op); } token::OpenDelim(token::Brace) => { if qself.is_some() { let msg = "unexpected `{` after qualified path"; let mut err = self.fatal(msg); err.span_label(self.span, msg); return Err(err); } // Parse struct pattern self.bump(); let (fields, etc) = self.parse_pat_fields().unwrap_or_else(|mut e| { e.emit(); self.recover_stmt(); (vec![], false) }); self.bump(); pat = PatKind::Struct(path, fields, etc); } token::OpenDelim(token::Paren) => { if qself.is_some() { let msg = "unexpected `(` after qualified path"; let mut err = self.fatal(msg); err.span_label(self.span, msg); return Err(err); } // Parse tuple struct or enum pattern let (fields, ddpos, _) = self.parse_parenthesized_pat_list()?; pat = PatKind::TupleStruct(path, fields, ddpos) } _ => pat = PatKind::Path(qself, path), } } else { // Try to parse everything else as literal with optional minus match self.parse_literal_maybe_minus() { Ok(begin) => { let op_span = self.span; if self.check(&token::DotDot) || self.check(&token::DotDotEq) || self.check(&token::DotDotDot) { let end_kind = if self.eat(&token::DotDotDot) { RangeEnd::Included(RangeSyntax::DotDotDot) } else if self.eat(&token::DotDotEq) { RangeEnd::Included(RangeSyntax::DotDotEq) } else if self.eat(&token::DotDot) { RangeEnd::Excluded } else { panic!("impossible case: we already matched \ on a range-operator token") }; let end = self.parse_pat_range_end()?; let op = Spanned { span: op_span, node: end_kind }; pat = PatKind::Range(begin, end, op); } else { pat = PatKind::Lit(begin); } } Err(mut err) => { self.cancel(&mut err); let msg = format!("expected pattern, found {}", self.this_token_descr()); let mut err = self.fatal(&msg); err.span_label(self.span, "expected pattern"); return Err(err); } } } } let pat = Pat { node: pat, span: lo.to(self.prev_span), id: ast::DUMMY_NODE_ID }; let pat = self.maybe_recover_from_bad_qpath(pat, true)?; if !allow_range_pat { match pat.node { PatKind::Range( _, _, Spanned { node: RangeEnd::Included(RangeSyntax::DotDotDot), .. } ) => {}, PatKind::Range(..) => { let mut err = self.struct_span_err( pat.span, "the range pattern here has ambiguous interpretation", ); err.span_suggestion_with_applicability( pat.span, "add parentheses to clarify the precedence", format!("({})", pprust::pat_to_string(&pat)), // "ambiguous interpretation" implies that we have to be guessing Applicability::MaybeIncorrect ); return Err(err); } _ => {} } } Ok(P(pat)) } /// Parse ident or ident @ pat /// used by the copy foo and ref foo patterns to give a good /// error message when parsing mistakes like ref foo(a,b) fn parse_pat_ident(&mut self, binding_mode: ast::BindingMode) -> PResult<'a, PatKind> { let ident = self.parse_ident()?; let sub = if self.eat(&token::At) { Some(self.parse_pat()?) } else { None }; // just to be friendly, if they write something like // ref Some(i) // we end up here with ( as the current token. This shortly // leads to a parse error. Note that if there is no explicit // binding mode then we do not end up here, because the lookahead // will direct us over to parse_enum_variant() if self.token == token::OpenDelim(token::Paren) { return Err(self.span_fatal( self.prev_span, "expected identifier, found enum pattern")) } Ok(PatKind::Ident(binding_mode, ident, sub)) } /// Parse a local variable declaration fn parse_local(&mut self, attrs: ThinVec<Attribute>) -> PResult<'a, P<Local>> { let lo = self.prev_span; let pat = self.parse_top_level_pat()?; let (err, ty) = if self.eat(&token::Colon) { // Save the state of the parser before parsing type normally, in case there is a `:` // instead of an `=` typo. let parser_snapshot_before_type = self.clone(); let colon_sp = self.prev_span; match self.parse_ty() { Ok(ty) => (None, Some(ty)), Err(mut err) => { // Rewind to before attempting to parse the type and continue parsing let parser_snapshot_after_type = self.clone(); mem::replace(self, parser_snapshot_before_type); let snippet = self.sess.source_map().span_to_snippet(pat.span).unwrap(); err.span_label(pat.span, format!("while parsing the type for `{}`", snippet)); (Some((parser_snapshot_after_type, colon_sp, err)), None) } } } else { (None, None) }; let init = match (self.parse_initializer(err.is_some()), err) { (Ok(init), None) => { // init parsed, ty parsed init } (Ok(init), Some((_, colon_sp, mut err))) => { // init parsed, ty error // Could parse the type as if it were the initializer, it is likely there was a // typo in the code: `:` instead of `=`. Add suggestion and emit the error. err.span_suggestion_short_with_applicability( colon_sp, "use `=` if you meant to assign", "=".to_string(), Applicability::MachineApplicable ); err.emit(); // As this was parsed successfully, continue as if the code has been fixed for the // rest of the file. It will still fail due to the emitted error, but we avoid // extra noise. init } (Err(mut init_err), Some((snapshot, _, ty_err))) => { // init error, ty error init_err.cancel(); // Couldn't parse the type nor the initializer, only raise the type error and // return to the parser state before parsing the type as the initializer. // let x: <parse_error>; mem::replace(self, snapshot); return Err(ty_err); } (Err(err), None) => { // init error, ty parsed // Couldn't parse the initializer and we're not attempting to recover a failed // parse of the type, return the error. return Err(err); } }; let hi = if self.token == token::Semi { self.span } else { self.prev_span }; Ok(P(ast::Local { ty, pat, init, id: ast::DUMMY_NODE_ID, span: lo.to(hi), attrs, })) } /// Parse a structure field fn parse_name_and_ty(&mut self, lo: Span, vis: Visibility, attrs: Vec<Attribute>) -> PResult<'a, StructField> { let name = self.parse_ident()?; self.expect(&token::Colon)?; let ty = self.parse_ty()?; Ok(StructField { span: lo.to(self.prev_span), ident: Some(name), vis, id: ast::DUMMY_NODE_ID, ty, attrs, }) } /// Emit an expected item after attributes error. fn expected_item_err(&self, attrs: &[Attribute]) { let message = match attrs.last() { Some(&Attribute { is_sugared_doc: true, .. }) => "expected item after doc comment", _ => "expected item after attributes", }; self.span_err(self.prev_span, message); } /// Parse a statement. This stops just before trailing semicolons on everything but items. /// e.g. a `StmtKind::Semi` parses to a `StmtKind::Expr`, leaving the trailing `;` unconsumed. pub fn parse_stmt(&mut self) -> PResult<'a, Option<Stmt>> { Ok(self.parse_stmt_(true)) } // Eat tokens until we can be relatively sure we reached the end of the // statement. This is something of a best-effort heuristic. // // We terminate when we find an unmatched `}` (without consuming it). fn recover_stmt(&mut self) { self.recover_stmt_(SemiColonMode::Ignore, BlockMode::Ignore) } // If `break_on_semi` is `Break`, then we will stop consuming tokens after // finding (and consuming) a `;` outside of `{}` or `[]` (note that this is // approximate - it can mean we break too early due to macros, but that // should only lead to sub-optimal recovery, not inaccurate parsing). // // If `break_on_block` is `Break`, then we will stop consuming tokens // after finding (and consuming) a brace-delimited block. fn recover_stmt_(&mut self, break_on_semi: SemiColonMode, break_on_block: BlockMode) { let mut brace_depth = 0; let mut bracket_depth = 0; let mut in_block = false; debug!("recover_stmt_ enter loop (semi={:?}, block={:?})", break_on_semi, break_on_block); loop { debug!("recover_stmt_ loop {:?}", self.token); match self.token { token::OpenDelim(token::DelimToken::Brace) => { brace_depth += 1; self.bump(); if break_on_block == BlockMode::Break && brace_depth == 1 && bracket_depth == 0 { in_block = true; } } token::OpenDelim(token::DelimToken::Bracket) => { bracket_depth += 1; self.bump(); } token::CloseDelim(token::DelimToken::Brace) => { if brace_depth == 0 { debug!("recover_stmt_ return - close delim {:?}", self.token); return; } brace_depth -= 1; self.bump(); if in_block && bracket_depth == 0 && brace_depth == 0 { debug!("recover_stmt_ return - block end {:?}", self.token); return; } } token::CloseDelim(token::DelimToken::Bracket) => { bracket_depth -= 1; if bracket_depth < 0 { bracket_depth = 0; } self.bump(); } token::Eof => { debug!("recover_stmt_ return - Eof"); return; } token::Semi => { self.bump(); if break_on_semi == SemiColonMode::Break && brace_depth == 0 && bracket_depth == 0 { debug!("recover_stmt_ return - Semi"); return; } } _ => { self.bump() } } } } fn parse_stmt_(&mut self, macro_legacy_warnings: bool) -> Option<Stmt> { self.parse_stmt_without_recovery(macro_legacy_warnings).unwrap_or_else(|mut e| { e.emit(); self.recover_stmt_(SemiColonMode::Break, BlockMode::Ignore); None }) } fn is_async_block(&mut self) -> bool { self.token.is_keyword(keywords::Async) && ( ( // `async move {` self.look_ahead(1, |t| t.is_keyword(keywords::Move)) && self.look_ahead(2, |t| *t == token::OpenDelim(token::Brace)) ) || ( // `async {` self.look_ahead(1, |t| *t == token::OpenDelim(token::Brace)) ) ) } fn is_catch_expr(&mut self) -> bool { self.token.is_keyword(keywords::Do) && self.look_ahead(1, |t| t.is_keyword(keywords::Catch)) && self.look_ahead(2, |t| *t == token::OpenDelim(token::Brace)) && // prevent `while catch {} {}`, `if catch {} {} else {}`, etc. !self.restrictions.contains(Restrictions::NO_STRUCT_LITERAL) } fn is_union_item(&self) -> bool { self.token.is_keyword(keywords::Union) && self.look_ahead(1, |t| t.is_ident() && !t.is_reserved_ident()) } fn is_crate_vis(&self) -> bool { self.token.is_keyword(keywords::Crate) && self.look_ahead(1, |t| t != &token::ModSep) } fn is_extern_non_path(&self) -> bool { self.token.is_keyword(keywords::Extern) && self.look_ahead(1, |t| t != &token::ModSep) } fn is_existential_type_decl(&self) -> bool { self.token.is_keyword(keywords::Existential) && self.look_ahead(1, |t| t.is_keyword(keywords::Type)) } fn is_auto_trait_item(&mut self) -> bool { // auto trait (self.token.is_keyword(keywords::Auto) && self.look_ahead(1, |t| t.is_keyword(keywords::Trait))) || // unsafe auto trait (self.token.is_keyword(keywords::Unsafe) && self.look_ahead(1, |t| t.is_keyword(keywords::Auto)) && self.look_ahead(2, |t| t.is_keyword(keywords::Trait))) } fn eat_macro_def(&mut self, attrs: &[Attribute], vis: &Visibility, lo: Span) -> PResult<'a, Option<P<Item>>> { let token_lo = self.span; let (ident, def) = match self.token { token::Ident(ident, false) if ident.name == keywords::Macro.name() => { self.bump(); let ident = self.parse_ident()?; let tokens = if self.check(&token::OpenDelim(token::Brace)) { match self.parse_token_tree() { TokenTree::Delimited(_, ref delimited) => delimited.stream(), _ => unreachable!(), } } else if self.check(&token::OpenDelim(token::Paren)) { let args = self.parse_token_tree(); let body = if self.check(&token::OpenDelim(token::Brace)) { self.parse_token_tree() } else { self.unexpected()?; unreachable!() }; TokenStream::concat(vec![ args.into(), TokenTree::Token(token_lo.to(self.prev_span), token::FatArrow).into(), body.into(), ]) } else { self.unexpected()?; unreachable!() }; (ident, ast::MacroDef { tokens: tokens.into(), legacy: false }) } token::Ident(ident, _) if ident.name == "macro_rules" && self.look_ahead(1, |t| *t == token::Not) => { let prev_span = self.prev_span; self.complain_if_pub_macro(&vis.node, prev_span); self.bump(); self.bump(); let ident = self.parse_ident()?; let (delim, tokens) = self.expect_delimited_token_tree()?; if delim != MacDelimiter::Brace { if !self.eat(&token::Semi) { let msg = "macros that expand to items must either \ be surrounded with braces or followed by a semicolon"; self.span_err(self.prev_span, msg); } } (ident, ast::MacroDef { tokens: tokens, legacy: true }) } _ => return Ok(None), }; let span = lo.to(self.prev_span); Ok(Some(self.mk_item(span, ident, ItemKind::MacroDef(def), vis.clone(), attrs.to_vec()))) } fn parse_stmt_without_recovery(&mut self, macro_legacy_warnings: bool) -> PResult<'a, Option<Stmt>> { maybe_whole!(self, NtStmt, |x| Some(x)); let attrs = self.parse_outer_attributes()?; let lo = self.span; Ok(Some(if self.eat_keyword(keywords::Let) { Stmt { id: ast::DUMMY_NODE_ID, node: StmtKind::Local(self.parse_local(attrs.into())?), span: lo.to(self.prev_span), } } else if let Some(macro_def) = self.eat_macro_def( &attrs, &source_map::respan(lo, VisibilityKind::Inherited), lo, )? { Stmt { id: ast::DUMMY_NODE_ID, node: StmtKind::Item(macro_def), span: lo.to(self.prev_span), } // Starts like a simple path, being careful to avoid contextual keywords // such as a union items, item with `crate` visibility or auto trait items. // Our goal here is to parse an arbitrary path `a::b::c` but not something that starts // like a path (1 token), but it fact not a path. // `union::b::c` - path, `union U { ... }` - not a path. // `crate::b::c` - path, `crate struct S;` - not a path. // `extern::b::c` - path, `extern crate c;` - not a path. } else if self.token.is_path_start() && !self.token.is_qpath_start() && !self.is_union_item() && !self.is_crate_vis() && !self.is_extern_non_path() && !self.is_existential_type_decl() && !self.is_auto_trait_item() { let pth = self.parse_path(PathStyle::Expr)?; if !self.eat(&token::Not) { let expr = if self.check(&token::OpenDelim(token::Brace)) { self.parse_struct_expr(lo, pth, ThinVec::new())? } else { let hi = self.prev_span; self.mk_expr(lo.to(hi), ExprKind::Path(None, pth), ThinVec::new()) }; let expr = self.with_res(Restrictions::STMT_EXPR, |this| { let expr = this.parse_dot_or_call_expr_with(expr, lo, attrs.into())?; this.parse_assoc_expr_with(0, LhsExpr::AlreadyParsed(expr)) })?; return Ok(Some(Stmt { id: ast::DUMMY_NODE_ID, node: StmtKind::Expr(expr), span: lo.to(self.prev_span), })); } // it's a macro invocation let id = match self.token { token::OpenDelim(_) => keywords::Invalid.ident(), // no special identifier _ => self.parse_ident()?, }; // check that we're pointing at delimiters (need to check // again after the `if`, because of `parse_ident` // consuming more tokens). match self.token { token::OpenDelim(_) => {} _ => { // we only expect an ident if we didn't parse one // above. let ident_str = if id.name == keywords::Invalid.name() { "identifier, " } else { "" }; let tok_str = self.this_token_to_string(); let mut err = self.fatal(&format!("expected {}`(` or `{{`, found `{}`", ident_str, tok_str)); err.span_label(self.span, format!("expected {}`(` or `{{`", ident_str)); return Err(err) }, } let (delim, tts) = self.expect_delimited_token_tree()?; let hi = self.prev_span; let style = if delim == MacDelimiter::Brace { MacStmtStyle::Braces } else { MacStmtStyle::NoBraces }; if id.name == keywords::Invalid.name() { let mac = respan(lo.to(hi), Mac_ { path: pth, tts, delim }); let node = if delim == MacDelimiter::Brace || self.token == token::Semi || self.token == token::Eof { StmtKind::Mac(P((mac, style, attrs.into()))) } // We used to incorrectly stop parsing macro-expanded statements here. // If the next token will be an error anyway but could have parsed with the // earlier behavior, stop parsing here and emit a warning to avoid breakage. else if macro_legacy_warnings && self.token.can_begin_expr() && match self.token { // These can continue an expression, so we can't stop parsing and warn. token::OpenDelim(token::Paren) | token::OpenDelim(token::Bracket) | token::BinOp(token::Minus) | token::BinOp(token::Star) | token::BinOp(token::And) | token::BinOp(token::Or) | token::AndAnd | token::OrOr | token::DotDot | token::DotDotDot | token::DotDotEq => false, _ => true, } { self.warn_missing_semicolon(); StmtKind::Mac(P((mac, style, attrs.into()))) } else { let e = self.mk_mac_expr(lo.to(hi), mac.node, ThinVec::new()); let e = self.parse_dot_or_call_expr_with(e, lo, attrs.into())?; let e = self.parse_assoc_expr_with(0, LhsExpr::AlreadyParsed(e))?; StmtKind::Expr(e) }; Stmt { id: ast::DUMMY_NODE_ID, span: lo.to(hi), node, } } else { // if it has a special ident, it's definitely an item // // Require a semicolon or braces. if style != MacStmtStyle::Braces { if !self.eat(&token::Semi) { self.span_err(self.prev_span, "macros that expand to items must \ either be surrounded with braces or \ followed by a semicolon"); } } let span = lo.to(hi); Stmt { id: ast::DUMMY_NODE_ID, span, node: StmtKind::Item({ self.mk_item( span, id /*id is good here*/, ItemKind::Mac(respan(span, Mac_ { path: pth, tts, delim })), respan(lo, VisibilityKind::Inherited), attrs) }), } } } else { // FIXME: Bad copy of attrs let old_directory_ownership = mem::replace(&mut self.directory.ownership, DirectoryOwnership::UnownedViaBlock); let item = self.parse_item_(attrs.clone(), false, true)?; self.directory.ownership = old_directory_ownership; match item { Some(i) => Stmt { id: ast::DUMMY_NODE_ID, span: lo.to(i.span), node: StmtKind::Item(i), }, None => { let unused_attrs = |attrs: &[Attribute], s: &mut Self| { if !attrs.is_empty() { if s.prev_token_kind == PrevTokenKind::DocComment { s.span_fatal_err(s.prev_span, Error::UselessDocComment).emit(); } else if attrs.iter().any(|a| a.style == AttrStyle::Outer) { s.span_err(s.span, "expected statement after outer attribute"); } } }; // Do not attempt to parse an expression if we're done here. if self.token == token::Semi { unused_attrs(&attrs, self); self.bump(); return Ok(None); } if self.token == token::CloseDelim(token::Brace) { unused_attrs(&attrs, self); return Ok(None); } // Remainder are line-expr stmts. let e = self.parse_expr_res( Restrictions::STMT_EXPR, Some(attrs.into()))?; Stmt { id: ast::DUMMY_NODE_ID, span: lo.to(e.span), node: StmtKind::Expr(e), } } } })) } /// Is this expression a successfully-parsed statement? fn expr_is_complete(&mut self, e: &Expr) -> bool { self.restrictions.contains(Restrictions::STMT_EXPR) && !classify::expr_requires_semi_to_be_stmt(e) } /// Parse a block. No inner attrs are allowed. pub fn parse_block(&mut self) -> PResult<'a, P<Block>> { maybe_whole!(self, NtBlock, |x| x); let lo = self.span; if !self.eat(&token::OpenDelim(token::Brace)) { let sp = self.span; let tok = self.this_token_to_string(); let mut do_not_suggest_help = false; let mut e = self.span_fatal(sp, &format!("expected `{{`, found `{}`", tok)); if self.token.is_keyword(keywords::In) || self.token == token::Colon { do_not_suggest_help = true; e.span_label(sp, "expected `{`"); } // Check to see if the user has written something like // // if (cond) // bar; // // Which is valid in other languages, but not Rust. match self.parse_stmt_without_recovery(false) { Ok(Some(stmt)) => { if self.look_ahead(1, |t| t == &token::OpenDelim(token::Brace)) || do_not_suggest_help { // if the next token is an open brace (e.g., `if a b {`), the place- // inside-a-block suggestion would be more likely wrong than right return Err(e); } let mut stmt_span = stmt.span; // expand the span to include the semicolon, if it exists if self.eat(&token::Semi) { stmt_span = stmt_span.with_hi(self.prev_span.hi()); } let sugg = pprust::to_string(|s| { use print::pprust::{PrintState, INDENT_UNIT}; s.ibox(INDENT_UNIT)?; s.bopen()?; s.print_stmt(&stmt)?; s.bclose_maybe_open(stmt.span, INDENT_UNIT, false) }); e.span_suggestion_with_applicability( stmt_span, "try placing this code inside a block", sugg, // speculative, has been misleading in the past (closed Issue #46836) Applicability::MaybeIncorrect ); } Err(mut e) => { self.recover_stmt_(SemiColonMode::Break, BlockMode::Ignore); self.cancel(&mut e); } _ => () } return Err(e); } self.parse_block_tail(lo, BlockCheckMode::Default) } /// Parse a block. Inner attrs are allowed. fn parse_inner_attrs_and_block(&mut self) -> PResult<'a, (Vec<Attribute>, P<Block>)> { maybe_whole!(self, NtBlock, |x| (Vec::new(), x)); let lo = self.span; self.expect(&token::OpenDelim(token::Brace))?; Ok((self.parse_inner_attributes()?, self.parse_block_tail(lo, BlockCheckMode::Default)?)) } /// Parse the rest of a block expression or function body /// Precondition: already parsed the '{'. fn parse_block_tail(&mut self, lo: Span, s: BlockCheckMode) -> PResult<'a, P<Block>> { let mut stmts = vec![]; let mut recovered = false; while !self.eat(&token::CloseDelim(token::Brace)) { let stmt = match self.parse_full_stmt(false) { Err(mut err) => { err.emit(); self.recover_stmt_(SemiColonMode::Ignore, BlockMode::Ignore); self.eat(&token::CloseDelim(token::Brace)); recovered = true; break; } Ok(stmt) => stmt, }; if let Some(stmt) = stmt { stmts.push(stmt); } else if self.token == token::Eof { break; } else { // Found only `;` or `}`. continue; }; } Ok(P(ast::Block { stmts, id: ast::DUMMY_NODE_ID, rules: s, span: lo.to(self.prev_span), recovered, })) } /// Parse a statement, including the trailing semicolon. crate fn parse_full_stmt(&mut self, macro_legacy_warnings: bool) -> PResult<'a, Option<Stmt>> { // skip looking for a trailing semicolon when we have an interpolated statement maybe_whole!(self, NtStmt, |x| Some(x)); let mut stmt = match self.parse_stmt_without_recovery(macro_legacy_warnings)? { Some(stmt) => stmt, None => return Ok(None), }; match stmt.node { StmtKind::Expr(ref expr) if self.token != token::Eof => { // expression without semicolon if classify::expr_requires_semi_to_be_stmt(expr) { // Just check for errors and recover; do not eat semicolon yet. if let Err(mut e) = self.expect_one_of(&[], &[token::Semi, token::CloseDelim(token::Brace)]) { e.emit(); self.recover_stmt(); } } } StmtKind::Local(..) => { // We used to incorrectly allow a macro-expanded let statement to lack a semicolon. if macro_legacy_warnings && self.token != token::Semi { self.warn_missing_semicolon(); } else { self.expect_one_of(&[], &[token::Semi])?; } } _ => {} } if self.eat(&token::Semi) { stmt = stmt.add_trailing_semicolon(); } stmt.span = stmt.span.with_hi(self.prev_span.hi()); Ok(Some(stmt)) } fn warn_missing_semicolon(&self) { self.diagnostic().struct_span_warn(self.span, { &format!("expected `;`, found `{}`", self.this_token_to_string()) }).note({ "This was erroneously allowed and will become a hard error in a future release" }).emit(); } fn err_dotdotdot_syntax(&self, span: Span) { self.diagnostic().struct_span_err(span, { "unexpected token: `...`" }).span_suggestion_with_applicability( span, "use `..` for an exclusive range", "..".to_owned(), Applicability::MaybeIncorrect ).span_suggestion_with_applicability( span, "or `..=` for an inclusive range", "..=".to_owned(), Applicability::MaybeIncorrect ).emit(); } // Parse bounds of a type parameter `BOUND + BOUND + BOUND`, possibly with trailing `+`. // BOUND = TY_BOUND | LT_BOUND // LT_BOUND = LIFETIME (e.g. `'a`) // TY_BOUND = TY_BOUND_NOPAREN | (TY_BOUND_NOPAREN) // TY_BOUND_NOPAREN = [?] [for<LT_PARAM_DEFS>] SIMPLE_PATH (e.g. `?for<'a: 'b> m::Trait<'a>`) fn parse_generic_bounds_common(&mut self, allow_plus: bool) -> PResult<'a, GenericBounds> { let mut bounds = Vec::new(); loop { // This needs to be synchronized with `Token::can_begin_bound`. let is_bound_start = self.check_path() || self.check_lifetime() || self.check(&token::Question) || self.check_keyword(keywords::For) || self.check(&token::OpenDelim(token::Paren)); if is_bound_start { let lo = self.span; let has_parens = self.eat(&token::OpenDelim(token::Paren)); let question = if self.eat(&token::Question) { Some(self.prev_span) } else { None }; if self.token.is_lifetime() { if let Some(question_span) = question { self.span_err(question_span, "`?` may only modify trait bounds, not lifetime bounds"); } bounds.push(GenericBound::Outlives(self.expect_lifetime())); if has_parens { self.expect(&token::CloseDelim(token::Paren))?; self.span_err(self.prev_span, "parenthesized lifetime bounds are not supported"); } } else { let lifetime_defs = self.parse_late_bound_lifetime_defs()?; let path = self.parse_path(PathStyle::Type)?; if has_parens { self.expect(&token::CloseDelim(token::Paren))?; } let poly_trait = PolyTraitRef::new(lifetime_defs, path, lo.to(self.prev_span)); let modifier = if question.is_some() { TraitBoundModifier::Maybe } else { TraitBoundModifier::None }; bounds.push(GenericBound::Trait(poly_trait, modifier)); } } else { break } if !allow_plus || !self.eat_plus() { break } } return Ok(bounds); } fn parse_generic_bounds(&mut self) -> PResult<'a, GenericBounds> { self.parse_generic_bounds_common(true) } // Parse bounds of a lifetime parameter `BOUND + BOUND + BOUND`, possibly with trailing `+`. // BOUND = LT_BOUND (e.g. `'a`) fn parse_lt_param_bounds(&mut self) -> GenericBounds { let mut lifetimes = Vec::new(); while self.check_lifetime() { lifetimes.push(ast::GenericBound::Outlives(self.expect_lifetime())); if !self.eat_plus() { break } } lifetimes } /// Matches typaram = IDENT (`?` unbound)? optbounds ( EQ ty )? fn parse_ty_param(&mut self, preceding_attrs: Vec<Attribute>) -> PResult<'a, GenericParam> { let ident = self.parse_ident()?; // Parse optional colon and param bounds. let bounds = if self.eat(&token::Colon) { self.parse_generic_bounds()? } else { Vec::new() }; let default = if self.eat(&token::Eq) { Some(self.parse_ty()?) } else { None }; Ok(GenericParam { ident, id: ast::DUMMY_NODE_ID, attrs: preceding_attrs.into(), bounds, kind: GenericParamKind::Type { default, } }) } /// Parses the following grammar: /// TraitItemAssocTy = Ident ["<"...">"] [":" [GenericBounds]] ["where" ...] ["=" Ty] fn parse_trait_item_assoc_ty(&mut self) -> PResult<'a, (Ident, TraitItemKind, ast::Generics)> { let ident = self.parse_ident()?; let mut generics = self.parse_generics()?; // Parse optional colon and param bounds. let bounds = if self.eat(&token::Colon) { self.parse_generic_bounds()? } else { Vec::new() }; generics.where_clause = self.parse_where_clause()?; let default = if self.eat(&token::Eq) { Some(self.parse_ty()?) } else { None }; self.expect(&token::Semi)?; Ok((ident, TraitItemKind::Type(bounds, default), generics)) } /// Parses (possibly empty) list of lifetime and type parameters, possibly including /// trailing comma and erroneous trailing attributes. crate fn parse_generic_params(&mut self) -> PResult<'a, Vec<ast::GenericParam>> { let mut params = Vec::new(); let mut seen_ty_param = false; loop { let attrs = self.parse_outer_attributes()?; if self.check_lifetime() { let lifetime = self.expect_lifetime(); // Parse lifetime parameter. let bounds = if self.eat(&token::Colon) { self.parse_lt_param_bounds() } else { Vec::new() }; params.push(ast::GenericParam { ident: lifetime.ident, id: lifetime.id, attrs: attrs.into(), bounds, kind: ast::GenericParamKind::Lifetime, }); if seen_ty_param { self.span_err(self.prev_span, "lifetime parameters must be declared prior to type parameters"); } } else if self.check_ident() { // Parse type parameter. params.push(self.parse_ty_param(attrs)?); seen_ty_param = true; } else { // Check for trailing attributes and stop parsing. if !attrs.is_empty() { let param_kind = if seen_ty_param { "type" } else { "lifetime" }; self.span_err(attrs[0].span, &format!("trailing attribute after {} parameters", param_kind)); } break } if !self.eat(&token::Comma) { break } } Ok(params) } /// Parse a set of optional generic type parameter declarations. Where /// clauses are not parsed here, and must be added later via /// `parse_where_clause()`. /// /// matches generics = ( ) | ( < > ) | ( < typaramseq ( , )? > ) | ( < lifetimes ( , )? > ) /// | ( < lifetimes , typaramseq ( , )? > ) /// where typaramseq = ( typaram ) | ( typaram , typaramseq ) fn parse_generics(&mut self) -> PResult<'a, ast::Generics> { maybe_whole!(self, NtGenerics, |x| x); let span_lo = self.span; if self.eat_lt() { let params = self.parse_generic_params()?; self.expect_gt()?; Ok(ast::Generics { params, where_clause: WhereClause { id: ast::DUMMY_NODE_ID, predicates: Vec::new(), span: syntax_pos::DUMMY_SP, }, span: span_lo.to(self.prev_span), }) } else { Ok(ast::Generics::default()) } } /// Parses (possibly empty) list of lifetime and type arguments and associated type bindings, /// possibly including trailing comma. fn parse_generic_args(&mut self) -> PResult<'a, (Vec<GenericArg>, Vec<TypeBinding>)> { let mut args = Vec::new(); let mut bindings = Vec::new(); let mut seen_type = false; let mut seen_binding = false; loop { if self.check_lifetime() && self.look_ahead(1, |t| !t.is_like_plus()) { // Parse lifetime argument. args.push(GenericArg::Lifetime(self.expect_lifetime())); if seen_type || seen_binding { self.span_err(self.prev_span, "lifetime parameters must be declared prior to type parameters"); } } else if self.check_ident() && self.look_ahead(1, |t| t == &token::Eq) { // Parse associated type binding. let lo = self.span; let ident = self.parse_ident()?; self.bump(); let ty = self.parse_ty()?; bindings.push(TypeBinding { id: ast::DUMMY_NODE_ID, ident, ty, span: lo.to(self.prev_span), }); seen_binding = true; } else if self.check_type() { // Parse type argument. let ty_param = self.parse_ty()?; if seen_binding { self.span_err(ty_param.span, "type parameters must be declared prior to associated type bindings"); } args.push(GenericArg::Type(ty_param)); seen_type = true; } else { break } if !self.eat(&token::Comma) { break } } Ok((args, bindings)) } /// Parses an optional `where` clause and places it in `generics`. /// /// ```ignore (only-for-syntax-highlight) /// where T : Trait<U, V> + 'b, 'a : 'b /// ``` fn parse_where_clause(&mut self) -> PResult<'a, WhereClause> { maybe_whole!(self, NtWhereClause, |x| x); let mut where_clause = WhereClause { id: ast::DUMMY_NODE_ID, predicates: Vec::new(), span: syntax_pos::DUMMY_SP, }; if !self.eat_keyword(keywords::Where) { return Ok(where_clause); } let lo = self.prev_span; // We are considering adding generics to the `where` keyword as an alternative higher-rank // parameter syntax (as in `where<'a>` or `where<T>`. To avoid that being a breaking // change we parse those generics now, but report an error. if self.choose_generics_over_qpath() { let generics = self.parse_generics()?; self.span_err(generics.span, "generic parameters on `where` clauses are reserved for future use"); } loop { let lo = self.span; if self.check_lifetime() && self.look_ahead(1, |t| !t.is_like_plus()) { let lifetime = self.expect_lifetime(); // Bounds starting with a colon are mandatory, but possibly empty. self.expect(&token::Colon)?; let bounds = self.parse_lt_param_bounds(); where_clause.predicates.push(ast::WherePredicate::RegionPredicate( ast::WhereRegionPredicate { span: lo.to(self.prev_span), lifetime, bounds, } )); } else if self.check_type() { // Parse optional `for<'a, 'b>`. // This `for` is parsed greedily and applies to the whole predicate, // the bounded type can have its own `for` applying only to it. // Example 1: for<'a> Trait1<'a>: Trait2<'a /*ok*/> // Example 2: (for<'a> Trait1<'a>): Trait2<'a /*not ok*/> // Example 3: for<'a> for<'b> Trait1<'a, 'b>: Trait2<'a /*ok*/, 'b /*not ok*/> let lifetime_defs = self.parse_late_bound_lifetime_defs()?; // Parse type with mandatory colon and (possibly empty) bounds, // or with mandatory equality sign and the second type. let ty = self.parse_ty()?; if self.eat(&token::Colon) { let bounds = self.parse_generic_bounds()?; where_clause.predicates.push(ast::WherePredicate::BoundPredicate( ast::WhereBoundPredicate { span: lo.to(self.prev_span), bound_generic_params: lifetime_defs, bounded_ty: ty, bounds, } )); // FIXME: Decide what should be used here, `=` or `==`. // FIXME: We are just dropping the binders in lifetime_defs on the floor here. } else if self.eat(&token::Eq) || self.eat(&token::EqEq) { let rhs_ty = self.parse_ty()?; where_clause.predicates.push(ast::WherePredicate::EqPredicate( ast::WhereEqPredicate { span: lo.to(self.prev_span), lhs_ty: ty, rhs_ty, id: ast::DUMMY_NODE_ID, } )); } else { return self.unexpected(); } } else { break } if !self.eat(&token::Comma) { break } } where_clause.span = lo.to(self.prev_span); Ok(where_clause) } fn parse_fn_args(&mut self, named_args: bool, allow_variadic: bool) -> PResult<'a, (Vec<Arg> , bool)> { let sp = self.span; let mut variadic = false; let args: Vec<Option<Arg>> = self.parse_unspanned_seq( &token::OpenDelim(token::Paren), &token::CloseDelim(token::Paren), SeqSep::trailing_allowed(token::Comma), |p| { if p.token == token::DotDotDot { p.bump(); variadic = true; if allow_variadic { if p.token != token::CloseDelim(token::Paren) { let span = p.span; p.span_err(span, "`...` must be last in argument list for variadic function"); } Ok(None) } else { let span = p.prev_span; if p.token == token::CloseDelim(token::Paren) { // continue parsing to present any further errors p.struct_span_err( span, "only foreign functions are allowed to be variadic" ).emit(); Ok(Some(dummy_arg(span))) } else { // this function definition looks beyond recovery, stop parsing p.span_err(span, "only foreign functions are allowed to be variadic"); Ok(None) } } } else { match p.parse_arg_general(named_args) { Ok(arg) => Ok(Some(arg)), Err(mut e) => { e.emit(); let lo = p.prev_span; // Skip every token until next possible arg or end. p.eat_to_tokens(&[&token::Comma, &token::CloseDelim(token::Paren)]); // Create a placeholder argument for proper arg count (#34264). let span = lo.to(p.prev_span); Ok(Some(dummy_arg(span))) } } } } )?; let args: Vec<_> = args.into_iter().filter_map(|x| x).collect(); if variadic && args.is_empty() { self.span_err(sp, "variadic function must be declared with at least one named argument"); } Ok((args, variadic)) } /// Parse the argument list and result type of a function declaration fn parse_fn_decl(&mut self, allow_variadic: bool) -> PResult<'a, P<FnDecl>> { let (args, variadic) = self.parse_fn_args(true, allow_variadic)?; let ret_ty = self.parse_ret_ty(true)?; Ok(P(FnDecl { inputs: args, output: ret_ty, variadic, })) } /// Returns the parsed optional self argument and whether a self shortcut was used. fn parse_self_arg(&mut self) -> PResult<'a, Option<Arg>> { let expect_ident = |this: &mut Self| match this.token { // Preserve hygienic context. token::Ident(ident, _) => { let span = this.span; this.bump(); Ident::new(ident.name, span) } _ => unreachable!() }; let isolated_self = |this: &mut Self, n| { this.look_ahead(n, |t| t.is_keyword(keywords::SelfValue)) && this.look_ahead(n + 1, |t| t != &token::ModSep) }; // Parse optional self parameter of a method. // Only a limited set of initial token sequences is considered self parameters, anything // else is parsed as a normal function parameter list, so some lookahead is required. let eself_lo = self.span; let (eself, eself_ident, eself_hi) = match self.token { token::BinOp(token::And) => { // &self // &mut self // &'lt self // &'lt mut self // &not_self (if isolated_self(self, 1) { self.bump(); SelfKind::Region(None, Mutability::Immutable) } else if self.look_ahead(1, |t| t.is_keyword(keywords::Mut)) && isolated_self(self, 2) { self.bump(); self.bump(); SelfKind::Region(None, Mutability::Mutable) } else if self.look_ahead(1, |t| t.is_lifetime()) && isolated_self(self, 2) { self.bump(); let lt = self.expect_lifetime(); SelfKind::Region(Some(lt), Mutability::Immutable) } else if self.look_ahead(1, |t| t.is_lifetime()) && self.look_ahead(2, |t| t.is_keyword(keywords::Mut)) && isolated_self(self, 3) { self.bump(); let lt = self.expect_lifetime(); self.bump(); SelfKind::Region(Some(lt), Mutability::Mutable) } else { return Ok(None); }, expect_ident(self), self.prev_span) } token::BinOp(token::Star) => { // *self // *const self // *mut self // *not_self // Emit special error for `self` cases. (if isolated_self(self, 1) { self.bump(); self.span_err(self.span, "cannot pass `self` by raw pointer"); SelfKind::Value(Mutability::Immutable) } else if self.look_ahead(1, |t| t.is_mutability()) && isolated_self(self, 2) { self.bump(); self.bump(); self.span_err(self.span, "cannot pass `self` by raw pointer"); SelfKind::Value(Mutability::Immutable) } else { return Ok(None); }, expect_ident(self), self.prev_span) } token::Ident(..) => { if isolated_self(self, 0) { // self // self: TYPE let eself_ident = expect_ident(self); let eself_hi = self.prev_span; (if self.eat(&token::Colon) { let ty = self.parse_ty()?; SelfKind::Explicit(ty, Mutability::Immutable) } else { SelfKind::Value(Mutability::Immutable) }, eself_ident, eself_hi) } else if self.token.is_keyword(keywords::Mut) && isolated_self(self, 1) { // mut self // mut self: TYPE self.bump(); let eself_ident = expect_ident(self); let eself_hi = self.prev_span; (if self.eat(&token::Colon) { let ty = self.parse_ty()?; SelfKind::Explicit(ty, Mutability::Mutable) } else { SelfKind::Value(Mutability::Mutable) }, eself_ident, eself_hi) } else { return Ok(None); } } _ => return Ok(None), }; let eself = source_map::respan(eself_lo.to(eself_hi), eself); Ok(Some(Arg::from_self(eself, eself_ident))) } /// Parse the parameter list and result type of a function that may have a `self` parameter. fn parse_fn_decl_with_self<F>(&mut self, parse_arg_fn: F) -> PResult<'a, P<FnDecl>> where F: FnMut(&mut Parser<'a>) -> PResult<'a, Arg>, { self.expect(&token::OpenDelim(token::Paren))?; // Parse optional self argument let self_arg = self.parse_self_arg()?; // Parse the rest of the function parameter list. let sep = SeqSep::trailing_allowed(token::Comma); let fn_inputs = if let Some(self_arg) = self_arg { if self.check(&token::CloseDelim(token::Paren)) { vec![self_arg] } else if self.eat(&token::Comma) { let mut fn_inputs = vec![self_arg]; fn_inputs.append(&mut self.parse_seq_to_before_end( &token::CloseDelim(token::Paren), sep, parse_arg_fn)? ); fn_inputs } else { return self.unexpected(); } } else { self.parse_seq_to_before_end(&token::CloseDelim(token::Paren), sep, parse_arg_fn)? }; // Parse closing paren and return type. self.expect(&token::CloseDelim(token::Paren))?; Ok(P(FnDecl { inputs: fn_inputs, output: self.parse_ret_ty(true)?, variadic: false })) } // parse the |arg, arg| header on a lambda fn parse_fn_block_decl(&mut self) -> PResult<'a, P<FnDecl>> { let inputs_captures = { if self.eat(&token::OrOr) { Vec::new() } else { self.expect(&token::BinOp(token::Or))?; let args = self.parse_seq_to_before_tokens( &[&token::BinOp(token::Or), &token::OrOr], SeqSep::trailing_allowed(token::Comma), TokenExpectType::NoExpect, |p| p.parse_fn_block_arg() )?; self.expect_or()?; args } }; let output = self.parse_ret_ty(true)?; Ok(P(FnDecl { inputs: inputs_captures, output, variadic: false })) } /// Parse the name and optional generic types of a function header. fn parse_fn_header(&mut self) -> PResult<'a, (Ident, ast::Generics)> { let id = self.parse_ident()?; let generics = self.parse_generics()?; Ok((id, generics)) } fn mk_item(&mut self, span: Span, ident: Ident, node: ItemKind, vis: Visibility, attrs: Vec<Attribute>) -> P<Item> { P(Item { ident, attrs, id: ast::DUMMY_NODE_ID, node, vis, span, tokens: None, }) } /// Parse an item-position function declaration. fn parse_item_fn(&mut self, unsafety: Unsafety, asyncness: IsAsync, constness: Spanned<Constness>, abi: Abi) -> PResult<'a, ItemInfo> { let (ident, mut generics) = self.parse_fn_header()?; let decl = self.parse_fn_decl(false)?; generics.where_clause = self.parse_where_clause()?; let (inner_attrs, body) = self.parse_inner_attrs_and_block()?; let header = FnHeader { unsafety, asyncness, constness, abi }; Ok((ident, ItemKind::Fn(decl, header, generics, body), Some(inner_attrs))) } /// true if we are looking at `const ID`, false for things like `const fn` etc fn is_const_item(&mut self) -> bool { self.token.is_keyword(keywords::Const) && !self.look_ahead(1, |t| t.is_keyword(keywords::Fn)) && !self.look_ahead(1, |t| t.is_keyword(keywords::Unsafe)) } /// parses all the "front matter" for a `fn` declaration, up to /// and including the `fn` keyword: /// /// - `const fn` /// - `unsafe fn` /// - `const unsafe fn` /// - `extern fn` /// - etc fn parse_fn_front_matter(&mut self) -> PResult<'a, ( Spanned<Constness>, Unsafety, IsAsync, Abi )> { let is_const_fn = self.eat_keyword(keywords::Const); let const_span = self.prev_span; let unsafety = self.parse_unsafety(); let asyncness = self.parse_asyncness(); let (constness, unsafety, abi) = if is_const_fn { (respan(const_span, Constness::Const), unsafety, Abi::Rust) } else { let abi = if self.eat_keyword(keywords::Extern) { self.parse_opt_abi()?.unwrap_or(Abi::C) } else { Abi::Rust }; (respan(self.prev_span, Constness::NotConst), unsafety, abi) }; self.expect_keyword(keywords::Fn)?; Ok((constness, unsafety, asyncness, abi)) } /// Parse an impl item. pub fn parse_impl_item(&mut self, at_end: &mut bool) -> PResult<'a, ImplItem> { maybe_whole!(self, NtImplItem, |x| x); let attrs = self.parse_outer_attributes()?; let (mut item, tokens) = self.collect_tokens(|this| { this.parse_impl_item_(at_end, attrs) })?; // See `parse_item` for why this clause is here. if !item.attrs.iter().any(|attr| attr.style == AttrStyle::Inner) { item.tokens = Some(tokens); } Ok(item) } fn parse_impl_item_(&mut self, at_end: &mut bool, mut attrs: Vec<Attribute>) -> PResult<'a, ImplItem> { let lo = self.span; let vis = self.parse_visibility(false)?; let defaultness = self.parse_defaultness(); let (name, node, generics) = if let Some(type_) = self.eat_type() { let (name, alias, generics) = type_?; let kind = match alias { AliasKind::Weak(typ) => ast::ImplItemKind::Type(typ), AliasKind::Existential(bounds) => ast::ImplItemKind::Existential(bounds), }; (name, kind, generics) } else if self.is_const_item() { // This parses the grammar: // ImplItemConst = "const" Ident ":" Ty "=" Expr ";" self.expect_keyword(keywords::Const)?; let name = self.parse_ident()?; self.expect(&token::Colon)?; let typ = self.parse_ty()?; self.expect(&token::Eq)?; let expr = self.parse_expr()?; self.expect(&token::Semi)?; (name, ast::ImplItemKind::Const(typ, expr), ast::Generics::default()) } else { let (name, inner_attrs, generics, node) = self.parse_impl_method(&vis, at_end)?; attrs.extend(inner_attrs); (name, node, generics) }; Ok(ImplItem { id: ast::DUMMY_NODE_ID, span: lo.to(self.prev_span), ident: name, vis, defaultness, attrs, generics, node, tokens: None, }) } fn complain_if_pub_macro(&mut self, vis: &VisibilityKind, sp: Span) { if let Err(mut err) = self.complain_if_pub_macro_diag(vis, sp) { err.emit(); } } fn complain_if_pub_macro_diag(&mut self, vis: &VisibilityKind, sp: Span) -> PResult<'a, ()> { match *vis { VisibilityKind::Inherited => Ok(()), _ => { let is_macro_rules: bool = match self.token { token::Ident(sid, _) => sid.name == Symbol::intern("macro_rules"), _ => false, }; if is_macro_rules { let mut err = self.diagnostic() .struct_span_err(sp, "can't qualify macro_rules invocation with `pub`"); err.span_suggestion_with_applicability( sp, "try exporting the macro", "#[macro_export]".to_owned(), Applicability::MaybeIncorrect // speculative ); Err(err) } else { let mut err = self.diagnostic() .struct_span_err(sp, "can't qualify macro invocation with `pub`"); err.help("try adjusting the macro to put `pub` inside the invocation"); Err(err) } } } } fn missing_assoc_item_kind_err(&mut self, item_type: &str, prev_span: Span) -> DiagnosticBuilder<'a> { let expected_kinds = if item_type == "extern" { "missing `fn`, `type`, or `static`" } else { "missing `fn`, `type`, or `const`" }; // Given this code `path(`, it seems like this is not // setting the visibility of a macro invocation, but rather // a mistyped method declaration. // Create a diagnostic pointing out that `fn` is missing. // // x | pub path(&self) { // | ^ missing `fn`, `type`, or `const` // pub path( // ^^ `sp` below will point to this let sp = prev_span.between(self.prev_span); let mut err = self.diagnostic().struct_span_err( sp, &format!("{} for {}-item declaration", expected_kinds, item_type)); err.span_label(sp, expected_kinds); err } /// Parse a method or a macro invocation in a trait impl. fn parse_impl_method(&mut self, vis: &Visibility, at_end: &mut bool) -> PResult<'a, (Ident, Vec<Attribute>, ast::Generics, ast::ImplItemKind)> { // code copied from parse_macro_use_or_failure... abstraction! if let Some(mac) = self.parse_assoc_macro_invoc("impl", Some(vis), at_end)? { // Method macro. Ok((keywords::Invalid.ident(), vec![], ast::Generics::default(), ast::ImplItemKind::Macro(mac))) } else { let (constness, unsafety, asyncness, abi) = self.parse_fn_front_matter()?; let ident = self.parse_ident()?; let mut generics = self.parse_generics()?; let decl = self.parse_fn_decl_with_self(|p| p.parse_arg())?; generics.where_clause = self.parse_where_clause()?; *at_end = true; let (inner_attrs, body) = self.parse_inner_attrs_and_block()?; let header = ast::FnHeader { abi, unsafety, constness, asyncness }; Ok((ident, inner_attrs, generics, ast::ImplItemKind::Method( ast::MethodSig { header, decl }, body ))) } } /// Parse `trait Foo { ... }` or `trait Foo = Bar;` fn parse_item_trait(&mut self, is_auto: IsAuto, unsafety: Unsafety) -> PResult<'a, ItemInfo> { let ident = self.parse_ident()?; let mut tps = self.parse_generics()?; // Parse optional colon and supertrait bounds. let bounds = if self.eat(&token::Colon) { self.parse_generic_bounds()? } else { Vec::new() }; if self.eat(&token::Eq) { // it's a trait alias let bounds = self.parse_generic_bounds()?; tps.where_clause = self.parse_where_clause()?; self.expect(&token::Semi)?; if unsafety != Unsafety::Normal { self.span_err(self.prev_span, "trait aliases cannot be unsafe"); } Ok((ident, ItemKind::TraitAlias(tps, bounds), None)) } else { // it's a normal trait tps.where_clause = self.parse_where_clause()?; self.expect(&token::OpenDelim(token::Brace))?; let mut trait_items = vec![]; while !self.eat(&token::CloseDelim(token::Brace)) { let mut at_end = false; match self.parse_trait_item(&mut at_end) { Ok(item) => trait_items.push(item), Err(mut e) => { e.emit(); if !at_end { self.recover_stmt_(SemiColonMode::Break, BlockMode::Break); } } } } Ok((ident, ItemKind::Trait(is_auto, unsafety, tps, bounds, trait_items), None)) } } fn choose_generics_over_qpath(&self) -> bool { // There's an ambiguity between generic parameters and qualified paths in impls. // If we see `<` it may start both, so we have to inspect some following tokens. // The following combinations can only start generics, // but not qualified paths (with one exception): // `<` `>` - empty generic parameters // `<` `#` - generic parameters with attributes // `<` (LIFETIME|IDENT) `>` - single generic parameter // `<` (LIFETIME|IDENT) `,` - first generic parameter in a list // `<` (LIFETIME|IDENT) `:` - generic parameter with bounds // `<` (LIFETIME|IDENT) `=` - generic parameter with a default // The only truly ambiguous case is // `<` IDENT `>` `::` IDENT ... // we disambiguate it in favor of generics (`impl<T> ::absolute::Path<T> { ... }`) // because this is what almost always expected in practice, qualified paths in impls // (`impl <Type>::AssocTy { ... }`) aren't even allowed by type checker at the moment. self.token == token::Lt && (self.look_ahead(1, |t| t == &token::Pound || t == &token::Gt) || self.look_ahead(1, |t| t.is_lifetime() || t.is_ident()) && self.look_ahead(2, |t| t == &token::Gt || t == &token::Comma || t == &token::Colon || t == &token::Eq)) } fn parse_impl_body(&mut self) -> PResult<'a, (Vec<ImplItem>, Vec<Attribute>)> { self.expect(&token::OpenDelim(token::Brace))?; let attrs = self.parse_inner_attributes()?; let mut impl_items = Vec::new(); while !self.eat(&token::CloseDelim(token::Brace)) { let mut at_end = false; match self.parse_impl_item(&mut at_end) { Ok(impl_item) => impl_items.push(impl_item), Err(mut err) => { err.emit(); if !at_end { self.recover_stmt_(SemiColonMode::Break, BlockMode::Break); } } } } Ok((impl_items, attrs)) } /// Parses an implementation item, `impl` keyword is already parsed. /// impl<'a, T> TYPE { /* impl items */ } /// impl<'a, T> TRAIT for TYPE { /* impl items */ } /// impl<'a, T> !TRAIT for TYPE { /* impl items */ } /// We actually parse slightly more relaxed grammar for better error reporting and recovery. /// `impl` GENERICS `!`? TYPE `for`? (TYPE | `..`) (`where` PREDICATES)? `{` BODY `}` /// `impl` GENERICS `!`? TYPE (`where` PREDICATES)? `{` BODY `}` fn parse_item_impl(&mut self, unsafety: Unsafety, defaultness: Defaultness) -> PResult<'a, ItemInfo> { // First, parse generic parameters if necessary. let mut generics = if self.choose_generics_over_qpath() { self.parse_generics()? } else { ast::Generics::default() }; // Disambiguate `impl !Trait for Type { ... }` and `impl ! { ... }` for the never type. let polarity = if self.check(&token::Not) && self.look_ahead(1, |t| t.can_begin_type()) { self.bump(); // `!` ast::ImplPolarity::Negative } else { ast::ImplPolarity::Positive }; // Parse both types and traits as a type, then reinterpret if necessary. let ty_first = self.parse_ty()?; // If `for` is missing we try to recover. let has_for = self.eat_keyword(keywords::For); let missing_for_span = self.prev_span.between(self.span); let ty_second = if self.token == token::DotDot { // We need to report this error after `cfg` expansion for compatibility reasons self.bump(); // `..`, do not add it to expected tokens Some(P(Ty { node: TyKind::Err, span: self.prev_span, id: ast::DUMMY_NODE_ID })) } else if has_for || self.token.can_begin_type() { Some(self.parse_ty()?) } else { None }; generics.where_clause = self.parse_where_clause()?; let (impl_items, attrs) = self.parse_impl_body()?; let item_kind = match ty_second { Some(ty_second) => { // impl Trait for Type if !has_for { self.span_err(missing_for_span, "missing `for` in a trait impl"); } let ty_first = ty_first.into_inner(); let path = match ty_first.node { // This notably includes paths passed through `ty` macro fragments (#46438). TyKind::Path(None, path) => path, _ => { self.span_err(ty_first.span, "expected a trait, found type"); ast::Path::from_ident(Ident::new(keywords::Invalid.name(), ty_first.span)) } }; let trait_ref = TraitRef { path, ref_id: ty_first.id }; ItemKind::Impl(unsafety, polarity, defaultness, generics, Some(trait_ref), ty_second, impl_items) } None => { // impl Type ItemKind::Impl(unsafety, polarity, defaultness, generics, None, ty_first, impl_items) } }; Ok((keywords::Invalid.ident(), item_kind, Some(attrs))) } fn parse_late_bound_lifetime_defs(&mut self) -> PResult<'a, Vec<GenericParam>> { if self.eat_keyword(keywords::For) { self.expect_lt()?; let params = self.parse_generic_params()?; self.expect_gt()?; // We rely on AST validation to rule out invalid cases: There must not be type // parameters, and the lifetime parameters must not have bounds. Ok(params) } else { Ok(Vec::new()) } } /// Parse struct Foo { ... } fn parse_item_struct(&mut self) -> PResult<'a, ItemInfo> { let class_name = self.parse_ident()?; let mut generics = self.parse_generics()?; // There is a special case worth noting here, as reported in issue #17904. // If we are parsing a tuple struct it is the case that the where clause // should follow the field list. Like so: // // struct Foo<T>(T) where T: Copy; // // If we are parsing a normal record-style struct it is the case // that the where clause comes before the body, and after the generics. // So if we look ahead and see a brace or a where-clause we begin // parsing a record style struct. // // Otherwise if we look ahead and see a paren we parse a tuple-style // struct. let vdata = if self.token.is_keyword(keywords::Where) { generics.where_clause = self.parse_where_clause()?; if self.eat(&token::Semi) { // If we see a: `struct Foo<T> where T: Copy;` style decl. VariantData::Unit(ast::DUMMY_NODE_ID) } else { // If we see: `struct Foo<T> where T: Copy { ... }` VariantData::Struct(self.parse_record_struct_body()?, ast::DUMMY_NODE_ID) } // No `where` so: `struct Foo<T>;` } else if self.eat(&token::Semi) { VariantData::Unit(ast::DUMMY_NODE_ID) // Record-style struct definition } else if self.token == token::OpenDelim(token::Brace) { VariantData::Struct(self.parse_record_struct_body()?, ast::DUMMY_NODE_ID) // Tuple-style struct definition with optional where-clause. } else if self.token == token::OpenDelim(token::Paren) { let body = VariantData::Tuple(self.parse_tuple_struct_body()?, ast::DUMMY_NODE_ID); generics.where_clause = self.parse_where_clause()?; self.expect(&token::Semi)?; body } else { let token_str = self.this_token_to_string(); let mut err = self.fatal(&format!( "expected `where`, `{{`, `(`, or `;` after struct name, found `{}`", token_str )); err.span_label(self.span, "expected `where`, `{`, `(`, or `;` after struct name"); return Err(err); }; Ok((class_name, ItemKind::Struct(vdata, generics), None)) } /// Parse union Foo { ... } fn parse_item_union(&mut self) -> PResult<'a, ItemInfo> { let class_name = self.parse_ident()?; let mut generics = self.parse_generics()?; let vdata = if self.token.is_keyword(keywords::Where) { generics.where_clause = self.parse_where_clause()?; VariantData::Struct(self.parse_record_struct_body()?, ast::DUMMY_NODE_ID) } else if self.token == token::OpenDelim(token::Brace) { VariantData::Struct(self.parse_record_struct_body()?, ast::DUMMY_NODE_ID) } else { let token_str = self.this_token_to_string(); let mut err = self.fatal(&format!( "expected `where` or `{{` after union name, found `{}`", token_str)); err.span_label(self.span, "expected `where` or `{` after union name"); return Err(err); }; Ok((class_name, ItemKind::Union(vdata, generics), None)) } fn consume_block(&mut self, delim: token::DelimToken) { let mut brace_depth = 0; if !self.eat(&token::OpenDelim(delim)) { return; } loop { if self.eat(&token::OpenDelim(delim)) { brace_depth += 1; } else if self.eat(&token::CloseDelim(delim)) { if brace_depth == 0 { return; } else { brace_depth -= 1; continue; } } else if self.eat(&token::Eof) || self.eat(&token::CloseDelim(token::NoDelim)) { return; } else { self.bump(); } } } fn parse_record_struct_body(&mut self) -> PResult<'a, Vec<StructField>> { let mut fields = Vec::new(); if self.eat(&token::OpenDelim(token::Brace)) { while self.token != token::CloseDelim(token::Brace) { let field = self.parse_struct_decl_field().map_err(|e| { self.recover_stmt(); e }); match field { Ok(field) => fields.push(field), Err(mut err) => { err.emit(); } } } self.eat(&token::CloseDelim(token::Brace)); } else { let token_str = self.this_token_to_string(); let mut err = self.fatal(&format!( "expected `where`, or `{{` after struct name, found `{}`", token_str)); err.span_label(self.span, "expected `where`, or `{` after struct name"); return Err(err); } Ok(fields) } fn parse_tuple_struct_body(&mut self) -> PResult<'a, Vec<StructField>> { // This is the case where we find `struct Foo<T>(T) where T: Copy;` // Unit like structs are handled in parse_item_struct function let fields = self.parse_unspanned_seq( &token::OpenDelim(token::Paren), &token::CloseDelim(token::Paren), SeqSep::trailing_allowed(token::Comma), |p| { let attrs = p.parse_outer_attributes()?; let lo = p.span; let vis = p.parse_visibility(true)?; let ty = p.parse_ty()?; Ok(StructField { span: lo.to(ty.span), vis, ident: None, id: ast::DUMMY_NODE_ID, ty, attrs, }) })?; Ok(fields) } /// Parse a structure field declaration fn parse_single_struct_field(&mut self, lo: Span, vis: Visibility, attrs: Vec<Attribute> ) -> PResult<'a, StructField> { let mut seen_comma: bool = false; let a_var = self.parse_name_and_ty(lo, vis, attrs)?; if self.token == token::Comma { seen_comma = true; } match self.token { token::Comma => { self.bump(); } token::CloseDelim(token::Brace) => {} token::DocComment(_) => { let previous_span = self.prev_span; let mut err = self.span_fatal_err(self.span, Error::UselessDocComment); self.bump(); // consume the doc comment let comma_after_doc_seen = self.eat(&token::Comma); // `seen_comma` is always false, because we are inside doc block // condition is here to make code more readable if seen_comma == false && comma_after_doc_seen == true { seen_comma = true; } if comma_after_doc_seen || self.token == token::CloseDelim(token::Brace) { err.emit(); } else { if seen_comma == false { let sp = self.sess.source_map().next_point(previous_span); err.span_suggestion_with_applicability( sp, "missing comma here", ",".into(), Applicability::MachineApplicable ); } return Err(err); } } _ => { let sp = self.sess.source_map().next_point(self.prev_span); let mut err = self.struct_span_err(sp, &format!("expected `,`, or `}}`, found `{}`", self.this_token_to_string())); if self.token.is_ident() { // This is likely another field; emit the diagnostic and keep going err.span_suggestion(sp, "try adding a comma", ",".into()); err.emit(); } else { return Err(err) } } } Ok(a_var) } /// Parse an element of a struct definition fn parse_struct_decl_field(&mut self) -> PResult<'a, StructField> { let attrs = self.parse_outer_attributes()?; let lo = self.span; let vis = self.parse_visibility(false)?; self.parse_single_struct_field(lo, vis, attrs) } /// Parse `pub`, `pub(crate)` and `pub(in path)` plus shortcuts `pub(self)` for `pub(in self)` /// and `pub(super)` for `pub(in super)`. If the following element can't be a tuple (i.e. it's /// a function definition, it's not a tuple struct field) and the contents within the parens /// isn't valid, emit a proper diagnostic. pub fn parse_visibility(&mut self, can_take_tuple: bool) -> PResult<'a, Visibility> { maybe_whole!(self, NtVis, |x| x); self.expected_tokens.push(TokenType::Keyword(keywords::Crate)); if self.is_crate_vis() { self.bump(); // `crate` return Ok(respan(self.prev_span, VisibilityKind::Crate(CrateSugar::JustCrate))); } if !self.eat_keyword(keywords::Pub) { // We need a span for our `Spanned<VisibilityKind>`, but there's inherently no // keyword to grab a span from for inherited visibility; an empty span at the // beginning of the current token would seem to be the "Schelling span". return Ok(respan(self.span.shrink_to_lo(), VisibilityKind::Inherited)) } let lo = self.prev_span; if self.check(&token::OpenDelim(token::Paren)) { // We don't `self.bump()` the `(` yet because this might be a struct definition where // `()` or a tuple might be allowed. For example, `struct Struct(pub (), pub (usize));`. // Because of this, we only `bump` the `(` if we're assured it is appropriate to do so // by the following tokens. if self.look_ahead(1, |t| t.is_keyword(keywords::Crate)) { // `pub(crate)` self.bump(); // `(` self.bump(); // `crate` self.expect(&token::CloseDelim(token::Paren))?; // `)` let vis = respan( lo.to(self.prev_span), VisibilityKind::Crate(CrateSugar::PubCrate), ); return Ok(vis) } else if self.look_ahead(1, |t| t.is_keyword(keywords::In)) { // `pub(in path)` self.bump(); // `(` self.bump(); // `in` let path = self.parse_path(PathStyle::Mod)?; // `path` self.expect(&token::CloseDelim(token::Paren))?; // `)` let vis = respan(lo.to(self.prev_span), VisibilityKind::Restricted { path: P(path), id: ast::DUMMY_NODE_ID, }); return Ok(vis) } else if self.look_ahead(2, |t| t == &token::CloseDelim(token::Paren)) && self.look_ahead(1, |t| t.is_keyword(keywords::Super) || t.is_keyword(keywords::SelfValue)) { // `pub(self)` or `pub(super)` self.bump(); // `(` let path = self.parse_path(PathStyle::Mod)?; // `super`/`self` self.expect(&token::CloseDelim(token::Paren))?; // `)` let vis = respan(lo.to(self.prev_span), VisibilityKind::Restricted { path: P(path), id: ast::DUMMY_NODE_ID, }); return Ok(vis) } else if !can_take_tuple { // Provide this diagnostic if this is not a tuple struct // `pub(something) fn ...` or `struct X { pub(something) y: Z }` self.bump(); // `(` let msg = "incorrect visibility restriction"; let suggestion = r##"some possible visibility restrictions are: `pub(crate)`: visible only on the current crate `pub(super)`: visible only in the current module's parent `pub(in path::to::module)`: visible only on the specified path"##; let path = self.parse_path(PathStyle::Mod)?; let sp = self.prev_span; let help_msg = format!("make this visible only to module `{}` with `in`", path); self.expect(&token::CloseDelim(token::Paren))?; // `)` let mut err = struct_span_err!(self.sess.span_diagnostic, sp, E0704, "{}", msg); err.help(suggestion); err.span_suggestion_with_applicability( sp, &help_msg, format!("in {}", path), Applicability::MachineApplicable ); err.emit(); // emit diagnostic, but continue with public visibility } } Ok(respan(lo, VisibilityKind::Public)) } /// Parse defaultness: `default` or nothing. fn parse_defaultness(&mut self) -> Defaultness { // `pub` is included for better error messages if self.check_keyword(keywords::Default) && self.look_ahead(1, |t| t.is_keyword(keywords::Impl) || t.is_keyword(keywords::Const) || t.is_keyword(keywords::Fn) || t.is_keyword(keywords::Unsafe) || t.is_keyword(keywords::Extern) || t.is_keyword(keywords::Type) || t.is_keyword(keywords::Pub)) { self.bump(); // `default` Defaultness::Default } else { Defaultness::Final } } /// Given a termination token, parse all of the items in a module fn parse_mod_items(&mut self, term: &token::Token, inner_lo: Span) -> PResult<'a, Mod> { let mut items = vec![]; while let Some(item) = self.parse_item()? { items.push(item); } if !self.eat(term) { let token_str = self.this_token_to_string(); let mut err = self.fatal(&format!("expected item, found `{}`", token_str)); if token_str == ";" { let msg = "consider removing this semicolon"; err.span_suggestion_short_with_applicability( self.span, msg, "".to_string(), Applicability::MachineApplicable ); if !items.is_empty() { // Issue #51603 let previous_item = &items[items.len()-1]; let previous_item_kind_name = match previous_item.node { // say "braced struct" because tuple-structs and // braceless-empty-struct declarations do take a semicolon ItemKind::Struct(..) => Some("braced struct"), ItemKind::Enum(..) => Some("enum"), ItemKind::Trait(..) => Some("trait"), ItemKind::Union(..) => Some("union"), _ => None, }; if let Some(name) = previous_item_kind_name { err.help(&format!("{} declarations are not followed by a semicolon", name)); } } } else { err.span_label(self.span, "expected item"); } return Err(err); } let hi = if self.span.is_dummy() { inner_lo } else { self.prev_span }; Ok(ast::Mod { inner: inner_lo.to(hi), items, }) } fn parse_item_const(&mut self, m: Option<Mutability>) -> PResult<'a, ItemInfo> { let id = self.parse_ident()?; self.expect(&token::Colon)?; let ty = self.parse_ty()?; self.expect(&token::Eq)?; let e = self.parse_expr()?; self.expect(&token::Semi)?; let item = match m { Some(m) => ItemKind::Static(ty, m, e), None => ItemKind::Const(ty, e), }; Ok((id, item, None)) } /// Parse a `mod <foo> { ... }` or `mod <foo>;` item fn parse_item_mod(&mut self, outer_attrs: &[Attribute]) -> PResult<'a, ItemInfo> { let (in_cfg, outer_attrs) = { let mut strip_unconfigured = ::config::StripUnconfigured { sess: self.sess, should_test: false, // irrelevant features: None, // don't perform gated feature checking }; let outer_attrs = strip_unconfigured.process_cfg_attrs(outer_attrs.to_owned()); (!self.cfg_mods || strip_unconfigured.in_cfg(&outer_attrs), outer_attrs) }; let id_span = self.span; let id = self.parse_ident()?; if self.check(&token::Semi) { self.bump(); if in_cfg && self.recurse_into_file_modules { // This mod is in an external file. Let's go get it! let ModulePathSuccess { path, directory_ownership, warn } = self.submod_path(id, &outer_attrs, id_span)?; let (module, mut attrs) = self.eval_src_mod(path, directory_ownership, id.to_string(), id_span)?; if warn { let attr = Attribute { id: attr::mk_attr_id(), style: ast::AttrStyle::Outer, path: ast::Path::from_ident(Ident::from_str("warn_directory_ownership")), tokens: TokenStream::empty(), is_sugared_doc: false, span: syntax_pos::DUMMY_SP, }; attr::mark_known(&attr); attrs.push(attr); } Ok((id, module, Some(attrs))) } else { let placeholder = ast::Mod { inner: syntax_pos::DUMMY_SP, items: Vec::new() }; Ok((id, ItemKind::Mod(placeholder), None)) } } else { let old_directory = self.directory.clone(); self.push_directory(id, &outer_attrs); self.expect(&token::OpenDelim(token::Brace))?; let mod_inner_lo = self.span; let attrs = self.parse_inner_attributes()?; let module = self.parse_mod_items(&token::CloseDelim(token::Brace), mod_inner_lo)?; self.directory = old_directory; Ok((id, ItemKind::Mod(module), Some(attrs))) } } fn push_directory(&mut self, id: Ident, attrs: &[Attribute]) { if let Some(path) = attr::first_attr_value_str_by_name(attrs, "path") { self.directory.path.to_mut().push(&path.as_str()); self.directory.ownership = DirectoryOwnership::Owned { relative: None }; } else { self.directory.path.to_mut().push(&id.as_str()); } } pub fn submod_path_from_attr(attrs: &[Attribute], dir_path: &Path) -> Option<PathBuf> { if let Some(s) = attr::first_attr_value_str_by_name(attrs, "path") { let s = s.as_str(); // On windows, the base path might have the form // `\\?\foo\bar` in which case it does not tolerate // mixed `/` and `\` separators, so canonicalize // `/` to `\`. #[cfg(windows)] let s = s.replace("/", "\\"); Some(dir_path.join(s)) } else { None } } /// Returns either a path to a module, or . pub fn default_submod_path( id: ast::Ident, relative: Option<ast::Ident>, dir_path: &Path, source_map: &SourceMap) -> ModulePath { // If we're in a foo.rs file instead of a mod.rs file, // we need to look for submodules in // `./foo/<id>.rs` and `./foo/<id>/mod.rs` rather than // `./<id>.rs` and `./<id>/mod.rs`. let relative_prefix_string; let relative_prefix = if let Some(ident) = relative { relative_prefix_string = format!("{}{}", ident.as_str(), path::MAIN_SEPARATOR); &relative_prefix_string } else { "" }; let mod_name = id.to_string(); let default_path_str = format!("{}{}.rs", relative_prefix, mod_name); let secondary_path_str = format!("{}{}{}mod.rs", relative_prefix, mod_name, path::MAIN_SEPARATOR); let default_path = dir_path.join(&default_path_str); let secondary_path = dir_path.join(&secondary_path_str); let default_exists = source_map.file_exists(&default_path); let secondary_exists = source_map.file_exists(&secondary_path); let result = match (default_exists, secondary_exists) { (true, false) => Ok(ModulePathSuccess { path: default_path, directory_ownership: DirectoryOwnership::Owned { relative: Some(id), }, warn: false, }), (false, true) => Ok(ModulePathSuccess { path: secondary_path, directory_ownership: DirectoryOwnership::Owned { relative: None, }, warn: false, }), (false, false) => Err(Error::FileNotFoundForModule { mod_name: mod_name.clone(), default_path: default_path_str, secondary_path: secondary_path_str, dir_path: dir_path.display().to_string(), }), (true, true) => Err(Error::DuplicatePaths { mod_name: mod_name.clone(), default_path: default_path_str, secondary_path: secondary_path_str, }), }; ModulePath { name: mod_name, path_exists: default_exists || secondary_exists, result, } } fn submod_path(&mut self, id: ast::Ident, outer_attrs: &[Attribute], id_sp: Span) -> PResult<'a, ModulePathSuccess> { if let Some(path) = Parser::submod_path_from_attr(outer_attrs, &self.directory.path) { return Ok(ModulePathSuccess { directory_ownership: match path.file_name().and_then(|s| s.to_str()) { // All `#[path]` files are treated as though they are a `mod.rs` file. // This means that `mod foo;` declarations inside `#[path]`-included // files are siblings, // // Note that this will produce weirdness when a file named `foo.rs` is // `#[path]` included and contains a `mod foo;` declaration. // If you encounter this, it's your own darn fault :P Some(_) => DirectoryOwnership::Owned { relative: None }, _ => DirectoryOwnership::UnownedViaMod(true), }, path, warn: false, }); } let relative = match self.directory.ownership { DirectoryOwnership::Owned { relative } => { // Push the usage onto the list of non-mod.rs mod uses. // This is used later for feature-gate error reporting. if let Some(cur_file_ident) = relative { self.sess .non_modrs_mods.borrow_mut() .push((cur_file_ident, id_sp)); } relative }, DirectoryOwnership::UnownedViaBlock | DirectoryOwnership::UnownedViaMod(_) => None, }; let paths = Parser::default_submod_path( id, relative, &self.directory.path, self.sess.source_map()); match self.directory.ownership { DirectoryOwnership::Owned { .. } => { paths.result.map_err(|err| self.span_fatal_err(id_sp, err)) }, DirectoryOwnership::UnownedViaBlock => { let msg = "Cannot declare a non-inline module inside a block \ unless it has a path attribute"; let mut err = self.diagnostic().struct_span_err(id_sp, msg); if paths.path_exists { let msg = format!("Maybe `use` the module `{}` instead of redeclaring it", paths.name); err.span_note(id_sp, &msg); } Err(err) } DirectoryOwnership::UnownedViaMod(warn) => { if warn { if let Ok(result) = paths.result { return Ok(ModulePathSuccess { warn: true, ..result }); } } let mut err = self.diagnostic().struct_span_err(id_sp, "cannot declare a new module at this location"); if !id_sp.is_dummy() { let src_path = self.sess.source_map().span_to_filename(id_sp); if let FileName::Real(src_path) = src_path { if let Some(stem) = src_path.file_stem() { let mut dest_path = src_path.clone(); dest_path.set_file_name(stem); dest_path.push("mod.rs"); err.span_note(id_sp, &format!("maybe move this module `{}` to its own \ directory via `{}`", src_path.display(), dest_path.display())); } } } if paths.path_exists { err.span_note(id_sp, &format!("... or maybe `use` the module `{}` instead \ of possibly redeclaring it", paths.name)); } Err(err) } } } /// Read a module from a source file. fn eval_src_mod(&mut self, path: PathBuf, directory_ownership: DirectoryOwnership, name: String, id_sp: Span) -> PResult<'a, (ast::ItemKind, Vec<Attribute> )> { let mut included_mod_stack = self.sess.included_mod_stack.borrow_mut(); if let Some(i) = included_mod_stack.iter().position(|p| *p == path) { let mut err = String::from("circular modules: "); let len = included_mod_stack.len(); for p in &included_mod_stack[i.. len] { err.push_str(&p.to_string_lossy()); err.push_str(" -> "); } err.push_str(&path.to_string_lossy()); return Err(self.span_fatal(id_sp, &err[..])); } included_mod_stack.push(path.clone()); drop(included_mod_stack); let mut p0 = new_sub_parser_from_file(self.sess, &path, directory_ownership, Some(name), id_sp); p0.cfg_mods = self.cfg_mods; let mod_inner_lo = p0.span; let mod_attrs = p0.parse_inner_attributes()?; let m0 = p0.parse_mod_items(&token::Eof, mod_inner_lo)?; self.sess.included_mod_stack.borrow_mut().pop(); Ok((ast::ItemKind::Mod(m0), mod_attrs)) } /// Parse a function declaration from a foreign module fn parse_item_foreign_fn(&mut self, vis: ast::Visibility, lo: Span, attrs: Vec<Attribute>) -> PResult<'a, ForeignItem> { self.expect_keyword(keywords::Fn)?; let (ident, mut generics) = self.parse_fn_header()?; let decl = self.parse_fn_decl(true)?; generics.where_clause = self.parse_where_clause()?; let hi = self.span; self.expect(&token::Semi)?; Ok(ast::ForeignItem { ident, attrs, node: ForeignItemKind::Fn(decl, generics), id: ast::DUMMY_NODE_ID, span: lo.to(hi), vis, }) } /// Parse a static item from a foreign module. /// Assumes that the `static` keyword is already parsed. fn parse_item_foreign_static(&mut self, vis: ast::Visibility, lo: Span, attrs: Vec<Attribute>) -> PResult<'a, ForeignItem> { let mutbl = self.eat_keyword(keywords::Mut); let ident = self.parse_ident()?; self.expect(&token::Colon)?; let ty = self.parse_ty()?; let hi = self.span; self.expect(&token::Semi)?; Ok(ForeignItem { ident, attrs, node: ForeignItemKind::Static(ty, mutbl), id: ast::DUMMY_NODE_ID, span: lo.to(hi), vis, }) } /// Parse a type from a foreign module fn parse_item_foreign_type(&mut self, vis: ast::Visibility, lo: Span, attrs: Vec<Attribute>) -> PResult<'a, ForeignItem> { self.expect_keyword(keywords::Type)?; let ident = self.parse_ident()?; let hi = self.span; self.expect(&token::Semi)?; Ok(ast::ForeignItem { ident: ident, attrs: attrs, node: ForeignItemKind::Ty, id: ast::DUMMY_NODE_ID, span: lo.to(hi), vis: vis }) } fn parse_crate_name_with_dashes(&mut self) -> PResult<'a, ast::Ident> { let error_msg = "crate name using dashes are not valid in `extern crate` statements"; let suggestion_msg = "if the original crate name uses dashes you need to use underscores \ in the code"; let mut ident = self.parse_ident()?; let mut idents = vec![]; let mut replacement = vec![]; let mut fixed_crate_name = false; // Accept `extern crate name-like-this` for better diagnostics let dash = token::Token::BinOp(token::BinOpToken::Minus); if self.token == dash { // Do not include `-` as part of the expected tokens list while self.eat(&dash) { fixed_crate_name = true; replacement.push((self.prev_span, "_".to_string())); idents.push(self.parse_ident()?); } } if fixed_crate_name { let fixed_name_sp = ident.span.to(idents.last().unwrap().span); let mut fixed_name = format!("{}", ident.name); for part in idents { fixed_name.push_str(&format!("_{}", part.name)); } ident = Ident::from_str(&fixed_name).with_span_pos(fixed_name_sp); let mut err = self.struct_span_err(fixed_name_sp, error_msg); err.span_label(fixed_name_sp, "dash-separated idents are not valid"); err.multipart_suggestion(suggestion_msg, replacement); err.emit(); } Ok(ident) } /// Parse extern crate links /// /// # Examples /// /// extern crate foo; /// extern crate bar as foo; fn parse_item_extern_crate(&mut self, lo: Span, visibility: Visibility, attrs: Vec<Attribute>) -> PResult<'a, P<Item>> { // Accept `extern crate name-like-this` for better diagnostics let orig_name = self.parse_crate_name_with_dashes()?; let (item_name, orig_name) = if let Some(rename) = self.parse_rename()? { (rename, Some(orig_name.name)) } else { (orig_name, None) }; self.expect(&token::Semi)?; let span = lo.to(self.prev_span); Ok(self.mk_item(span, item_name, ItemKind::ExternCrate(orig_name), visibility, attrs)) } /// Parse `extern` for foreign ABIs /// modules. /// /// `extern` is expected to have been /// consumed before calling this method /// /// # Examples: /// /// extern "C" {} /// extern {} fn parse_item_foreign_mod(&mut self, lo: Span, opt_abi: Option<Abi>, visibility: Visibility, mut attrs: Vec<Attribute>) -> PResult<'a, P<Item>> { self.expect(&token::OpenDelim(token::Brace))?; let abi = opt_abi.unwrap_or(Abi::C); attrs.extend(self.parse_inner_attributes()?); let mut foreign_items = vec![]; while let Some(item) = self.parse_foreign_item()? { foreign_items.push(item); } self.expect(&token::CloseDelim(token::Brace))?; let prev_span = self.prev_span; let m = ast::ForeignMod { abi, items: foreign_items }; let invalid = keywords::Invalid.ident(); Ok(self.mk_item(lo.to(prev_span), invalid, ItemKind::ForeignMod(m), visibility, attrs)) } /// Parse type Foo = Bar; /// or /// existential type Foo: Bar; /// or /// return None without modifying the parser state fn eat_type(&mut self) -> Option<PResult<'a, (Ident, AliasKind, ast::Generics)>> { // This parses the grammar: // Ident ["<"...">"] ["where" ...] ("=" | ":") Ty ";" if self.check_keyword(keywords::Type) || self.check_keyword(keywords::Existential) && self.look_ahead(1, |t| t.is_keyword(keywords::Type)) { let existential = self.eat_keyword(keywords::Existential); assert!(self.eat_keyword(keywords::Type)); Some(self.parse_existential_or_alias(existential)) } else { None } } /// Parse type alias or existential type fn parse_existential_or_alias( &mut self, existential: bool, ) -> PResult<'a, (Ident, AliasKind, ast::Generics)> { let ident = self.parse_ident()?; let mut tps = self.parse_generics()?; tps.where_clause = self.parse_where_clause()?; let alias = if existential { self.expect(&token::Colon)?; let bounds = self.parse_generic_bounds()?; AliasKind::Existential(bounds) } else { self.expect(&token::Eq)?; let ty = self.parse_ty()?; AliasKind::Weak(ty) }; self.expect(&token::Semi)?; Ok((ident, alias, tps)) } /// Parse the part of an "enum" decl following the '{' fn parse_enum_def(&mut self, _generics: &ast::Generics) -> PResult<'a, EnumDef> { let mut variants = Vec::new(); let mut all_nullary = true; let mut any_disr = None; while self.token != token::CloseDelim(token::Brace) { let variant_attrs = self.parse_outer_attributes()?; let vlo = self.span; let struct_def; let mut disr_expr = None; let ident = self.parse_ident()?; if self.check(&token::OpenDelim(token::Brace)) { // Parse a struct variant. all_nullary = false; struct_def = VariantData::Struct(self.parse_record_struct_body()?, ast::DUMMY_NODE_ID); } else if self.check(&token::OpenDelim(token::Paren)) { all_nullary = false; struct_def = VariantData::Tuple(self.parse_tuple_struct_body()?, ast::DUMMY_NODE_ID); } else if self.eat(&token::Eq) { disr_expr = Some(AnonConst { id: ast::DUMMY_NODE_ID, value: self.parse_expr()?, }); any_disr = disr_expr.as_ref().map(|c| c.value.span); struct_def = VariantData::Unit(ast::DUMMY_NODE_ID); } else { struct_def = VariantData::Unit(ast::DUMMY_NODE_ID); } let vr = ast::Variant_ { ident, attrs: variant_attrs, data: struct_def, disr_expr, }; variants.push(respan(vlo.to(self.prev_span), vr)); if !self.eat(&token::Comma) { break; } } self.expect(&token::CloseDelim(token::Brace))?; match any_disr { Some(disr_span) if !all_nullary => self.span_err(disr_span, "discriminator values can only be used with a field-less enum"), _ => () } Ok(ast::EnumDef { variants: variants }) } /// Parse an "enum" declaration fn parse_item_enum(&mut self) -> PResult<'a, ItemInfo> { let id = self.parse_ident()?; let mut generics = self.parse_generics()?; generics.where_clause = self.parse_where_clause()?; self.expect(&token::OpenDelim(token::Brace))?; let enum_definition = self.parse_enum_def(&generics).map_err(|e| { self.recover_stmt(); self.eat(&token::CloseDelim(token::Brace)); e })?; Ok((id, ItemKind::Enum(enum_definition, generics), None)) } /// Parses a string as an ABI spec on an extern type or module. Consumes /// the `extern` keyword, if one is found. fn parse_opt_abi(&mut self) -> PResult<'a, Option<Abi>> { match self.token { token::Literal(token::Str_(s), suf) | token::Literal(token::StrRaw(s, _), suf) => { let sp = self.span; self.expect_no_suffix(sp, "ABI spec", suf); self.bump(); match abi::lookup(&s.as_str()) { Some(abi) => Ok(Some(abi)), None => { let prev_span = self.prev_span; let mut err = struct_span_err!( self.sess.span_diagnostic, prev_span, E0703, "invalid ABI: found `{}`", s); err.span_label(prev_span, "invalid ABI"); err.help(&format!("valid ABIs: {}", abi::all_names().join(", "))); err.emit(); Ok(None) } } } _ => Ok(None), } } fn is_static_global(&mut self) -> bool { if self.check_keyword(keywords::Static) { // Check if this could be a closure !self.look_ahead(1, |token| { if token.is_keyword(keywords::Move) { return true; } match *token { token::BinOp(token::Or) | token::OrOr => true, _ => false, } }) } else { false } } fn parse_item_( &mut self, attrs: Vec<Attribute>, macros_allowed: bool, attributes_allowed: bool, ) -> PResult<'a, Option<P<Item>>> { let (ret, tokens) = self.collect_tokens(|this| { this.parse_item_implementation(attrs, macros_allowed, attributes_allowed) })?; // Once we've parsed an item and recorded the tokens we got while // parsing we may want to store `tokens` into the item we're about to // return. Note, though, that we specifically didn't capture tokens // related to outer attributes. The `tokens` field here may later be // used with procedural macros to convert this item back into a token // stream, but during expansion we may be removing attributes as we go // along. // // If we've got inner attributes then the `tokens` we've got above holds // these inner attributes. If an inner attribute is expanded we won't // actually remove it from the token stream, so we'll just keep yielding // it (bad!). To work around this case for now we just avoid recording // `tokens` if we detect any inner attributes. This should help keep // expansion correct, but we should fix this bug one day! Ok(ret.map(|item| { item.map(|mut i| { if !i.attrs.iter().any(|attr| attr.style == AttrStyle::Inner) { i.tokens = Some(tokens); } i }) })) } /// Parse one of the items allowed by the flags. fn parse_item_implementation( &mut self, attrs: Vec<Attribute>, macros_allowed: bool, attributes_allowed: bool, ) -> PResult<'a, Option<P<Item>>> { maybe_whole!(self, NtItem, |item| { let mut item = item.into_inner(); let mut attrs = attrs; mem::swap(&mut item.attrs, &mut attrs); item.attrs.extend(attrs); Some(P(item)) }); let lo = self.span; let visibility = self.parse_visibility(false)?; if self.eat_keyword(keywords::Use) { // USE ITEM let item_ = ItemKind::Use(P(self.parse_use_tree()?)); self.expect(&token::Semi)?; let span = lo.to(self.prev_span); let item = self.mk_item(span, keywords::Invalid.ident(), item_, visibility, attrs); return Ok(Some(item)); } if self.check_keyword(keywords::Extern) && self.is_extern_non_path() { self.bump(); // `extern` if self.eat_keyword(keywords::Crate) { return Ok(Some(self.parse_item_extern_crate(lo, visibility, attrs)?)); } let opt_abi = self.parse_opt_abi()?; if self.eat_keyword(keywords::Fn) { // EXTERN FUNCTION ITEM let fn_span = self.prev_span; let abi = opt_abi.unwrap_or(Abi::C); let (ident, item_, extra_attrs) = self.parse_item_fn(Unsafety::Normal, IsAsync::NotAsync, respan(fn_span, Constness::NotConst), abi)?; let prev_span = self.prev_span; let item = self.mk_item(lo.to(prev_span), ident, item_, visibility, maybe_append(attrs, extra_attrs)); return Ok(Some(item)); } else if self.check(&token::OpenDelim(token::Brace)) { return Ok(Some(self.parse_item_foreign_mod(lo, opt_abi, visibility, attrs)?)); } self.unexpected()?; } if self.is_static_global() { self.bump(); // STATIC ITEM let m = if self.eat_keyword(keywords::Mut) { Mutability::Mutable } else { Mutability::Immutable }; let (ident, item_, extra_attrs) = self.parse_item_const(Some(m))?; let prev_span = self.prev_span; let item = self.mk_item(lo.to(prev_span), ident, item_, visibility, maybe_append(attrs, extra_attrs)); return Ok(Some(item)); } if self.eat_keyword(keywords::Const) { let const_span = self.prev_span; if self.check_keyword(keywords::Fn) || (self.check_keyword(keywords::Unsafe) && self.look_ahead(1, |t| t.is_keyword(keywords::Fn))) { // CONST FUNCTION ITEM let unsafety = self.parse_unsafety(); self.bump(); let (ident, item_, extra_attrs) = self.parse_item_fn(unsafety, IsAsync::NotAsync, respan(const_span, Constness::Const), Abi::Rust)?; let prev_span = self.prev_span; let item = self.mk_item(lo.to(prev_span), ident, item_, visibility, maybe_append(attrs, extra_attrs)); return Ok(Some(item)); } // CONST ITEM if self.eat_keyword(keywords::Mut) { let prev_span = self.prev_span; self.diagnostic().struct_span_err(prev_span, "const globals cannot be mutable") .help("did you mean to declare a static?") .emit(); } let (ident, item_, extra_attrs) = self.parse_item_const(None)?; let prev_span = self.prev_span; let item = self.mk_item(lo.to(prev_span), ident, item_, visibility, maybe_append(attrs, extra_attrs)); return Ok(Some(item)); } // `unsafe async fn` or `async fn` if ( self.check_keyword(keywords::Unsafe) && self.look_ahead(1, |t| t.is_keyword(keywords::Async)) ) || ( self.check_keyword(keywords::Async) && self.look_ahead(1, |t| t.is_keyword(keywords::Fn)) ) { // ASYNC FUNCTION ITEM let unsafety = self.parse_unsafety(); self.expect_keyword(keywords::Async)?; self.expect_keyword(keywords::Fn)?; let fn_span = self.prev_span; let (ident, item_, extra_attrs) = self.parse_item_fn(unsafety, IsAsync::Async { closure_id: ast::DUMMY_NODE_ID, return_impl_trait_id: ast::DUMMY_NODE_ID, }, respan(fn_span, Constness::NotConst), Abi::Rust)?; let prev_span = self.prev_span; let item = self.mk_item(lo.to(prev_span), ident, item_, visibility, maybe_append(attrs, extra_attrs)); return Ok(Some(item)); } if self.check_keyword(keywords::Unsafe) && (self.look_ahead(1, |t| t.is_keyword(keywords::Trait)) || self.look_ahead(1, |t| t.is_keyword(keywords::Auto))) { // UNSAFE TRAIT ITEM self.bump(); // `unsafe` let is_auto = if self.eat_keyword(keywords::Trait) { IsAuto::No } else { self.expect_keyword(keywords::Auto)?; self.expect_keyword(keywords::Trait)?; IsAuto::Yes }; let (ident, item_, extra_attrs) = self.parse_item_trait(is_auto, Unsafety::Unsafe)?; let prev_span = self.prev_span; let item = self.mk_item(lo.to(prev_span), ident, item_, visibility, maybe_append(attrs, extra_attrs)); return Ok(Some(item)); } if self.check_keyword(keywords::Impl) || self.check_keyword(keywords::Unsafe) && self.look_ahead(1, |t| t.is_keyword(keywords::Impl)) || self.check_keyword(keywords::Default) && self.look_ahead(1, |t| t.is_keyword(keywords::Impl)) || self.check_keyword(keywords::Default) && self.look_ahead(1, |t| t.is_keyword(keywords::Unsafe)) { // IMPL ITEM let defaultness = self.parse_defaultness(); let unsafety = self.parse_unsafety(); self.expect_keyword(keywords::Impl)?; let (ident, item, extra_attrs) = self.parse_item_impl(unsafety, defaultness)?; let span = lo.to(self.prev_span); return Ok(Some(self.mk_item(span, ident, item, visibility, maybe_append(attrs, extra_attrs)))); } if self.check_keyword(keywords::Fn) { // FUNCTION ITEM self.bump(); let fn_span = self.prev_span; let (ident, item_, extra_attrs) = self.parse_item_fn(Unsafety::Normal, IsAsync::NotAsync, respan(fn_span, Constness::NotConst), Abi::Rust)?; let prev_span = self.prev_span; let item = self.mk_item(lo.to(prev_span), ident, item_, visibility, maybe_append(attrs, extra_attrs)); return Ok(Some(item)); } if self.check_keyword(keywords::Unsafe) && self.look_ahead(1, |t| *t != token::OpenDelim(token::Brace)) { // UNSAFE FUNCTION ITEM self.bump(); // `unsafe` // `{` is also expected after `unsafe`, in case of error, include it in the diagnostic self.check(&token::OpenDelim(token::Brace)); let abi = if self.eat_keyword(keywords::Extern) { self.parse_opt_abi()?.unwrap_or(Abi::C) } else { Abi::Rust }; self.expect_keyword(keywords::Fn)?; let fn_span = self.prev_span; let (ident, item_, extra_attrs) = self.parse_item_fn(Unsafety::Unsafe, IsAsync::NotAsync, respan(fn_span, Constness::NotConst), abi)?; let prev_span = self.prev_span; let item = self.mk_item(lo.to(prev_span), ident, item_, visibility, maybe_append(attrs, extra_attrs)); return Ok(Some(item)); } if self.eat_keyword(keywords::Mod) { // MODULE ITEM let (ident, item_, extra_attrs) = self.parse_item_mod(&attrs[..])?; let prev_span = self.prev_span; let item = self.mk_item(lo.to(prev_span), ident, item_, visibility, maybe_append(attrs, extra_attrs)); return Ok(Some(item)); } if let Some(type_) = self.eat_type() { let (ident, alias, generics) = type_?; // TYPE ITEM let item_ = match alias { AliasKind::Weak(ty) => ItemKind::Ty(ty, generics), AliasKind::Existential(bounds) => ItemKind::Existential(bounds, generics), }; let prev_span = self.prev_span; let item = self.mk_item(lo.to(prev_span), ident, item_, visibility, attrs); return Ok(Some(item)); } if self.eat_keyword(keywords::Enum) { // ENUM ITEM let (ident, item_, extra_attrs) = self.parse_item_enum()?; let prev_span = self.prev_span; let item = self.mk_item(lo.to(prev_span), ident, item_, visibility, maybe_append(attrs, extra_attrs)); return Ok(Some(item)); } if self.check_keyword(keywords::Trait) || (self.check_keyword(keywords::Auto) && self.look_ahead(1, |t| t.is_keyword(keywords::Trait))) { let is_auto = if self.eat_keyword(keywords::Trait) { IsAuto::No } else { self.expect_keyword(keywords::Auto)?; self.expect_keyword(keywords::Trait)?; IsAuto::Yes }; // TRAIT ITEM let (ident, item_, extra_attrs) = self.parse_item_trait(is_auto, Unsafety::Normal)?; let prev_span = self.prev_span; let item = self.mk_item(lo.to(prev_span), ident, item_, visibility, maybe_append(attrs, extra_attrs)); return Ok(Some(item)); } if self.eat_keyword(keywords::Struct) { // STRUCT ITEM let (ident, item_, extra_attrs) = self.parse_item_struct()?; let prev_span = self.prev_span; let item = self.mk_item(lo.to(prev_span), ident, item_, visibility, maybe_append(attrs, extra_attrs)); return Ok(Some(item)); } if self.is_union_item() { // UNION ITEM self.bump(); let (ident, item_, extra_attrs) = self.parse_item_union()?; let prev_span = self.prev_span; let item = self.mk_item(lo.to(prev_span), ident, item_, visibility, maybe_append(attrs, extra_attrs)); return Ok(Some(item)); } if let Some(macro_def) = self.eat_macro_def(&attrs, &visibility, lo)? { return Ok(Some(macro_def)); } // Verify whether we have encountered a struct or method definition where the user forgot to // add the `struct` or `fn` keyword after writing `pub`: `pub S {}` if visibility.node.is_pub() && self.check_ident() && self.look_ahead(1, |t| *t != token::Not) { // Space between `pub` keyword and the identifier // // pub S {} // ^^^ `sp` points here let sp = self.prev_span.between(self.span); let full_sp = self.prev_span.to(self.span); let ident_sp = self.span; if self.look_ahead(1, |t| *t == token::OpenDelim(token::Brace)) { // possible public struct definition where `struct` was forgotten let ident = self.parse_ident().unwrap(); let msg = format!("add `struct` here to parse `{}` as a public struct", ident); let mut err = self.diagnostic() .struct_span_err(sp, "missing `struct` for struct definition"); err.span_suggestion_short_with_applicability( sp, &msg, " struct ".into(), Applicability::MaybeIncorrect // speculative ); return Err(err); } else if self.look_ahead(1, |t| *t == token::OpenDelim(token::Paren)) { let ident = self.parse_ident().unwrap(); self.consume_block(token::Paren); let (kw, kw_name, ambiguous) = if self.check(&token::RArrow) || self.check(&token::OpenDelim(token::Brace)) { ("fn", "method", false) } else if self.check(&token::Colon) { let kw = "struct"; (kw, kw, false) } else { ("fn` or `struct", "method or struct", true) }; let msg = format!("missing `{}` for {} definition", kw, kw_name); let mut err = self.diagnostic().struct_span_err(sp, &msg); if !ambiguous { let suggestion = format!("add `{}` here to parse `{}` as a public {}", kw, ident, kw_name); err.span_suggestion_short_with_applicability( sp, &suggestion, format!(" {} ", kw), Applicability::MachineApplicable ); } else { if let Ok(snippet) = self.sess.source_map().span_to_snippet(ident_sp) { err.span_suggestion_with_applicability( full_sp, "if you meant to call a macro, try", format!("{}!", snippet), // this is the `ambiguous` conditional branch Applicability::MaybeIncorrect ); } else { err.help("if you meant to call a macro, remove the `pub` \ and add a trailing `!` after the identifier"); } } return Err(err); } } self.parse_macro_use_or_failure(attrs, macros_allowed, attributes_allowed, lo, visibility) } /// Parse a foreign item. crate fn parse_foreign_item(&mut self) -> PResult<'a, Option<ForeignItem>> { maybe_whole!(self, NtForeignItem, |ni| Some(ni)); let attrs = self.parse_outer_attributes()?; let lo = self.span; let visibility = self.parse_visibility(false)?; // FOREIGN STATIC ITEM // Treat `const` as `static` for error recovery, but don't add it to expected tokens. if self.check_keyword(keywords::Static) || self.token.is_keyword(keywords::Const) { if self.token.is_keyword(keywords::Const) { self.diagnostic() .struct_span_err(self.span, "extern items cannot be `const`") .span_suggestion_with_applicability( self.span, "try using a static value", "static".to_owned(), Applicability::MachineApplicable ).emit(); } self.bump(); // `static` or `const` return Ok(Some(self.parse_item_foreign_static(visibility, lo, attrs)?)); } // FOREIGN FUNCTION ITEM if self.check_keyword(keywords::Fn) { return Ok(Some(self.parse_item_foreign_fn(visibility, lo, attrs)?)); } // FOREIGN TYPE ITEM if self.check_keyword(keywords::Type) { return Ok(Some(self.parse_item_foreign_type(visibility, lo, attrs)?)); } match self.parse_assoc_macro_invoc("extern", Some(&visibility), &mut false)? { Some(mac) => { Ok(Some( ForeignItem { ident: keywords::Invalid.ident(), span: lo.to(self.prev_span), id: ast::DUMMY_NODE_ID, attrs, vis: visibility, node: ForeignItemKind::Macro(mac), } )) } None => { if !attrs.is_empty() { self.expected_item_err(&attrs); } Ok(None) } } } /// This is the fall-through for parsing items. fn parse_macro_use_or_failure( &mut self, attrs: Vec<Attribute> , macros_allowed: bool, attributes_allowed: bool, lo: Span, visibility: Visibility ) -> PResult<'a, Option<P<Item>>> { if macros_allowed && self.token.is_path_start() { // MACRO INVOCATION ITEM let prev_span = self.prev_span; self.complain_if_pub_macro(&visibility.node, prev_span); let mac_lo = self.span; // item macro. let pth = self.parse_path(PathStyle::Mod)?; self.expect(&token::Not)?; // a 'special' identifier (like what `macro_rules!` uses) // is optional. We should eventually unify invoc syntax // and remove this. let id = if self.token.is_ident() { self.parse_ident()? } else { keywords::Invalid.ident() // no special identifier }; // eat a matched-delimiter token tree: let (delim, tts) = self.expect_delimited_token_tree()?; if delim != MacDelimiter::Brace { if !self.eat(&token::Semi) { self.span_err(self.prev_span, "macros that expand to items must either \ be surrounded with braces or followed by \ a semicolon"); } } let hi = self.prev_span; let mac = respan(mac_lo.to(hi), Mac_ { path: pth, tts, delim }); let item = self.mk_item(lo.to(hi), id, ItemKind::Mac(mac), visibility, attrs); return Ok(Some(item)); } // FAILURE TO PARSE ITEM match visibility.node { VisibilityKind::Inherited => {} _ => { return Err(self.span_fatal(self.prev_span, "unmatched visibility `pub`")); } } if !attributes_allowed && !attrs.is_empty() { self.expected_item_err(&attrs); } Ok(None) } /// Parse a macro invocation inside a `trait`, `impl` or `extern` block fn parse_assoc_macro_invoc(&mut self, item_kind: &str, vis: Option<&Visibility>, at_end: &mut bool) -> PResult<'a, Option<Mac>> { if self.token.is_path_start() && !self.is_extern_non_path() { let prev_span = self.prev_span; let lo = self.span; let pth = self.parse_path(PathStyle::Mod)?; if pth.segments.len() == 1 { if !self.eat(&token::Not) { return Err(self.missing_assoc_item_kind_err(item_kind, prev_span)); } } else { self.expect(&token::Not)?; } if let Some(vis) = vis { self.complain_if_pub_macro(&vis.node, prev_span); } *at_end = true; // eat a matched-delimiter token tree: let (delim, tts) = self.expect_delimited_token_tree()?; if delim != MacDelimiter::Brace { self.expect(&token::Semi)? } Ok(Some(respan(lo.to(self.prev_span), Mac_ { path: pth, tts, delim }))) } else { Ok(None) } } fn collect_tokens<F, R>(&mut self, f: F) -> PResult<'a, (R, TokenStream)> where F: FnOnce(&mut Self) -> PResult<'a, R> { // Record all tokens we parse when parsing this item. let mut tokens = Vec::new(); let prev_collecting = match self.token_cursor.frame.last_token { LastToken::Collecting(ref mut list) => { Some(mem::replace(list, Vec::new())) } LastToken::Was(ref mut last) => { tokens.extend(last.take()); None } }; self.token_cursor.frame.last_token = LastToken::Collecting(tokens); let prev = self.token_cursor.stack.len(); let ret = f(self); let last_token = if self.token_cursor.stack.len() == prev { &mut self.token_cursor.frame.last_token } else { &mut self.token_cursor.stack[prev].last_token }; // Pull our the toekns that we've collected from the call to `f` above let mut collected_tokens = match *last_token { LastToken::Collecting(ref mut v) => mem::replace(v, Vec::new()), LastToken::Was(_) => panic!("our vector went away?"), }; // If we're not at EOF our current token wasn't actually consumed by // `f`, but it'll still be in our list that we pulled out. In that case // put it back. let extra_token = if self.token != token::Eof { collected_tokens.pop() } else { None }; // If we were previously collecting tokens, then this was a recursive // call. In that case we need to record all the tokens we collected in // our parent list as well. To do that we push a clone of our stream // onto the previous list. let stream = collected_tokens.into_iter().collect::<TokenStream>(); match prev_collecting { Some(mut list) => { list.push(stream.clone()); list.extend(extra_token); *last_token = LastToken::Collecting(list); } None => { *last_token = LastToken::Was(extra_token); } } Ok((ret?, stream)) } pub fn parse_item(&mut self) -> PResult<'a, Option<P<Item>>> { let attrs = self.parse_outer_attributes()?; self.parse_item_(attrs, true, false) } /// `::{` or `::*` fn is_import_coupler(&mut self) -> bool { self.check(&token::ModSep) && self.look_ahead(1, |t| *t == token::OpenDelim(token::Brace) || *t == token::BinOp(token::Star)) } /// Parse UseTree /// /// USE_TREE = [`::`] `*` | /// [`::`] `{` USE_TREE_LIST `}` | /// PATH `::` `*` | /// PATH `::` `{` USE_TREE_LIST `}` | /// PATH [`as` IDENT] fn parse_use_tree(&mut self) -> PResult<'a, UseTree> { let lo = self.span; let mut prefix = ast::Path { segments: Vec::new(), span: lo.shrink_to_lo() }; let kind = if self.check(&token::OpenDelim(token::Brace)) || self.check(&token::BinOp(token::Star)) || self.is_import_coupler() { // `use *;` or `use ::*;` or `use {...};` or `use ::{...};` if self.eat(&token::ModSep) { prefix.segments.push(PathSegment::crate_root(lo.shrink_to_lo())); } if self.eat(&token::BinOp(token::Star)) { UseTreeKind::Glob } else { UseTreeKind::Nested(self.parse_use_tree_list()?) } } else { // `use path::*;` or `use path::{...};` or `use path;` or `use path as bar;` prefix = self.parse_path(PathStyle::Mod)?; if self.eat(&token::ModSep) { if self.eat(&token::BinOp(token::Star)) { UseTreeKind::Glob } else { UseTreeKind::Nested(self.parse_use_tree_list()?) } } else { UseTreeKind::Simple(self.parse_rename()?, ast::DUMMY_NODE_ID, ast::DUMMY_NODE_ID) } }; Ok(UseTree { prefix, kind, span: lo.to(self.prev_span) }) } /// Parse UseTreeKind::Nested(list) /// /// USE_TREE_LIST = Ø | (USE_TREE `,`)* USE_TREE [`,`] fn parse_use_tree_list(&mut self) -> PResult<'a, Vec<(UseTree, ast::NodeId)>> { self.parse_unspanned_seq(&token::OpenDelim(token::Brace), &token::CloseDelim(token::Brace), SeqSep::trailing_allowed(token::Comma), |this| { Ok((this.parse_use_tree()?, ast::DUMMY_NODE_ID)) }) } fn parse_rename(&mut self) -> PResult<'a, Option<Ident>> { if self.eat_keyword(keywords::As) { match self.token { token::Ident(ident, false) if ident.name == keywords::Underscore.name() => { self.bump(); // `_` Ok(Some(ident.gensym())) } _ => self.parse_ident().map(Some), } } else { Ok(None) } } /// Parses a source module as a crate. This is the main /// entry point for the parser. pub fn parse_crate_mod(&mut self) -> PResult<'a, Crate> { let lo = self.span; Ok(ast::Crate { attrs: self.parse_inner_attributes()?, module: self.parse_mod_items(&token::Eof, lo)?, span: lo.to(self.span), }) } pub fn parse_optional_str(&mut self) -> Option<(Symbol, ast::StrStyle, Option<ast::Name>)> { let ret = match self.token { token::Literal(token::Str_(s), suf) => (s, ast::StrStyle::Cooked, suf), token::Literal(token::StrRaw(s, n), suf) => (s, ast::StrStyle::Raw(n), suf), _ => return None }; self.bump(); Some(ret) } pub fn parse_str(&mut self) -> PResult<'a, (Symbol, StrStyle)> { match self.parse_optional_str() { Some((s, style, suf)) => { let sp = self.prev_span; self.expect_no_suffix(sp, "string literal", suf); Ok((s, style)) } _ => { let msg = "expected string literal"; let mut err = self.fatal(msg); err.span_label(self.span, msg); Err(err) } } } }
#![allow(dead_code)] use std::mem; use std::collections::{HashMap}; use parse::lexer::*; use parse::tokens::*; use ast::{Stmt, Expr, Block, TType, Local, Decl, OptionalTypeExprTupleList, OptionalParamInfoList}; use ast::Stmt::*; use ast::Expr::*; use ast::TType::*; use ast::Decl::*; //use ast::*; use ptr::{B}; //use ast::{Expr, Stmt}; type BlockStack = Vec<Block>; pub struct Parser{ lexer : Lexer, block_stack : BlockStack, paren_stack : Vec<char>, seq_expr_list : Vec<B<Expr>>, last_expr_type : Option<TType> } impl Parser{ pub fn new(src : String)->Self{ Parser { lexer : Lexer::new(src), block_stack : BlockStack::new(), paren_stack : Vec::new(), seq_expr_list : Vec::new(), last_expr_type : None } } pub fn start_lexer(&mut self){ self.lexer.get_char(); self.lexer.get_token(); } pub fn run(& mut self)->Option<Block>{ self.parse_block() //self.block.generate(); } fn parse_block(& mut self)->Option<Block>{ //let mut b = Block::new(); //self.block_stack.push(b); self.lexer.get_char(); self.program(); //begin parsing debug_assert!(self.block_stack.len() == 1, "Only parent block should be on the stack when the parsing is finished"); let mut main_block = self.block_stack.pop().unwrap(); //main_block.generate(); //if main_block.statements.len() == 0{ if !main_block.expr.is_some(){ None } else{ Some(main_block) } } fn program(&mut self){ loop{ match self.lexer.get_token(){ //FIXME semicolon handling should change: Token::SemiColon => continue, Token::Nil | Token::Number | Token::LeftParen | Token::Minus | Token::If | Token::While | Token::For | Token::Break | Token::Let | Token::Function | Token::Ident | Token::TokString => { let expr = Some(self.expr().unwrap().1); self.block_stack.last_mut().unwrap().expr = expr; //FIXME should we break? break; }, /*Token::Do => { debug_assert!(self.block_stack.len() > 0, "No parent block on the stack"); self.block_stack.push(Block::new()); self.expr(); if self.lexer.curr_token == Token::End{ //TODO make sure we track all block openings let block = self.block_stack.pop().unwrap(); let mut curr_block = self.block_stack.last_mut().unwrap(); //curr_block.statements.push(Self::mk_block_stmt(block)); } },*/ Token::Eof => {return}, Token::End => { //TODO block stack pop return //continue; }, _ => {panic!("Invalid token");} } } } //FIXME temporarily pub for integration testing pub fn expr(&mut self) -> Option<(TType, B<Expr>)> { match self.lexer.curr_token{ Token::Nil => { Some((TNil, B(NilExpr))) }, Token::Number => { return self.parse_num_expr() //B(NumExpr(self.lexer.curr_string.clone().parse::<i32>().unwrap())) }, Token::Ident => { return self.parse_ident_expr() }, Token::TokString => { return self.parse_string_expr() }, Token::Let =>{ return self.parse_let_expr() }, // Token::Function => { // return self.parse_function_decl() // }, Token::LeftParen => { //seqexpr self.paren_stack.push('('); while self.lexer.get_token() != Token::RightParen { if self.lexer.curr_token == Token::SemiColon { continue; } if self.lexer.curr_token == Token::Eof {break;} let optional_expr = self.expr(); if optional_expr.is_some() { let (ty, e) = optional_expr.unwrap(); self.seq_expr_list.push(e); self.last_expr_type = Some(ty); } //check closing paren here because self.expr() above could have curr_token set to it if self.lexer.curr_token == Token::RightParen{ break; } } if self.lexer.curr_token == Token::Eof { panic!("Unexpected eof encountered"); } self.paren_stack.pop(); if !self.paren_stack.is_empty() { panic!("Missing ')'"); } let last_type = mem::replace(&mut self.last_expr_type, None); let expr_list = mem::replace(&mut self.seq_expr_list, Vec::new()); Some((last_type.unwrap(), B(SeqExpr(Some(expr_list))))) }, Token::If => { return self.parse_if_then_else_expr() }, Token::While => { return self.parse_while_expr() }, Token::For => { return self.parse_for_expr() }, // Token::RightParen => { // if self.paren_stack.is_empty(){ // panic!("Mismatched parenthesis"); // } // self.paren_stack.pop(); // //TODO mem::replace self.seq_expr_list with Vec::new and assign it to SeqExpr // Some(B(SeqExpr(None))) // }, Token::End => panic!("Unexpected 'end'. Expected an expr."), _ =>panic!("FIXME: handle more patterns") } } fn parse_let_expr(&mut self) -> Option<(TType, B<Expr>)>{ let mut b = Block::new(); //set parent-child relationship self.block_stack.push(b); let mut decls : Vec<Decl> = Vec::new(); loop{ match self.lexer.get_token() { Token::Type => { //typedec self.parse_type_decl(&mut decls); }, Token::Var => { //Vardec self.parse_var_decl(&mut decls); }, Token::Function => { //functiondec self.parse_function_decl(&mut decls); }, //FIXME probably all these following guards are useless? Token::In => break, //FIXME Eof occurrence is an error Token::Eof => break, //FIXME End occurrence is an error Token::End => break, _ => panic!("Unexpected token. Expected a declaration or 'in'") } //this is needed because a var decl parse can set the curr_token to 'in' if self.lexer.curr_token == Token::In{ break; } }//let loop ends let (_ty, _expr) = if self.lexer.curr_token == Token::In{ //FIXME get the list of exprs and the type of the last expr in the list self.lexer.get_token(); let expr = self.expr(); debug_assert!(expr.is_some(), "expr expected after 'in'"); expr.unwrap() } else{ panic!("Expected 'in' after declarations"); }; return Some((_ty, B(LetExpr(decls, Some(_expr))))) } fn parse_type_decl(&mut self, decls : &mut Vec<Decl>){ match self.lexer.get_token() { Token::Ident => { let id = self.lexer.curr_string.clone(); match self.lexer.get_token(){ Token::Equals => { match self.lexer.get_token(){ Token::Int => decls.push(TypeDec(id, TInt32)), Token::TokString => decls.push(TypeDec(id, TString)), Token::Ident => decls.push(TypeDec(id, TCustom(self.lexer.curr_string.clone()))), Token::Array => { match self.lexer.get_token() { Token::Of => { match self.lexer.get_token() { Token::Int => {}, Token::TokString => {}, Token::Ident => {}, _ => panic!("Expected either int, string or type-id") } }, _ => panic!("Expected 'of' after 'array'") } }, Token::LeftCurly => { //rectype }, _ => panic!("Expected either int, string, type-id, array of, '{' after '='") } }, _ => panic!("Expected '=' after type-id") } }, _ => panic!("Expected identifier after 'type'") } } fn parse_var_decl(&mut self, decls : &mut Vec<Decl>){ match self.lexer.get_token() { Token::Ident => { let id = self.lexer.curr_string.clone(); match self.lexer.get_token() { Token::Colon => { match self.lexer.get_token() { Token::Int => { match self.lexer.get_token(){ Token::ColonEquals => { //get rhs expr and its type let (ty, expr) = self.get_nxt_and_parse(); self.block_stack.last_mut().unwrap().sym_tab.borrow_mut().insert(id.clone(), ty); decls.push(VarDec(id.clone(), TInt32, expr)); }, _ => panic!("Expected ':='") } }, Token::TokString => { match self.lexer.get_token(){ Token::ColonEquals => { self.expr(); }, _ => panic!("Expected ':='") } }, _ => panic!("expr : pattern not covered") } }, _ => panic!("Expected ':' after identifier") } }, _ => panic!("Expected an identifier") } } fn parse_ident_expr(&mut self) -> Option<(TType, B<Expr>)>{ //check if symbol defined in the sym tab //if self.block_stack.last().unwrap().contains(self.lexer.curr_string) let op1 = B(IdExpr(self.lexer.curr_string.clone())); match self.lexer.get_token(){ Token::LeftSquare => {}, //subscript Token::Dot => {}, //fieldexp Token::LeftParen => { //callexpr let args_list = self.parse_call_args(); //FIXME should a marker type be used instead of TVoid to indicate that the type should be verified by the type-checker? match *op1 { IdExpr(ref fn_name) => return Some((TVoid, B(CallExpr(fn_name.clone(), args_list)))), _ => {} }; }, Token::Plus => { let (t, op2) = self.get_nxt_and_parse(); //FIXME it's better to let the type-checker do the checking if t == TInt32{ return Some((TInt32, B(AddExpr(op1, op2)))) } else{ panic!("Expected i32 as the type of rhs expression"); } }, _ => { //TVoid because we dont know the type of the identifier yet. return Some((TVoid, op1)) } } Some((TVoid, op1)) } fn parse_string_expr(&mut self) -> Option<(TType, B<Expr>)>{ Some((TString, B(StringExpr(self.lexer.curr_string.clone())))) } fn parse_num_expr(&mut self) -> Option<(TType, B<Expr>)>{ let num = self.lexer.curr_string.parse::<i32>().unwrap(); let op1 = B(NumExpr(num)); match self.lexer.get_token(){ Token::Plus => { let (t, op2) = self.get_nxt_and_parse(); //FIXME it's better to use a type-checker if t == TInt32{ return Some((TInt32, B(AddExpr(op1, op2)))) } else{ panic!("Expected i32 as the type of rhs expression"); } }, Token::Minus => { let (t, op2) = self.get_nxt_and_parse(); //FIXME it's better to use a type-checker if t == TInt32{ return Some((TInt32, B(SubExpr(op1, op2)))) } else{ panic!("Expected i32 as the type of rhs expression"); } }, //FIXME ';', ')' can be a encountered as well. deal with it. _ => { return Some((TInt32, op1)) } } } fn parse_function_decl(&mut self, decls : &mut Vec<Decl>){ match self.lexer.get_token(){ Token::Ident => { let id = self.lexer.curr_string.clone(); //parse the parameters list let field_decs = self.parse_function_params_list(); //parse return type let ret_type = self.parse_function_ret_type(); //parse body here let e = self.expr(); debug_assert!(e.is_some() == true, "Function body cannot be empty"); let body = e.unwrap().1; //function id ( fieldDec; ) : tyId = exp decls.push(FunDec(id, field_decs, ret_type, body)); }, _ => panic!("Expected an id after 'function'") } } fn parse_function_params_list(&mut self) -> OptionalParamInfoList { match self.lexer.get_token(){ Token::LeftParen => { let mut field_decs : Vec<(String, TType)> = Vec::new(); loop{ match self.lexer.get_token() { Token::Comma => continue, Token::RightParen => { //parameterless function break; }, Token::Eof => panic!("Unexpected eof encountered. Expected a ')' after field-declaration."), Token::Ident => { let id = self.lexer.curr_string.clone(); //FIXME should we verify duplicate params here? //HashMap and BTreeMap do not respect the order of insertions //which is required to set up args during call. //Vec will respect the order but cost O(n) for the verification //Need multi_index kind of a structure from C++ Boost if field_decs.iter().find(|&tup| tup.0 == id).is_some(){ panic!(format!("parameter '{}' found more than once", id)); } match self.lexer.get_token() { Token::Colon => { match self.lexer.get_token() { Token::Int | Token::TokString | Token::Ident => { let ty = Self::get_ty_from_string(self.lexer.curr_string.as_str()); field_decs.push((id, ty)); }, _ => panic!("Expected type-id after ':'") } }, _ => panic!("Expected ':' after id") } }, _ => panic!("Expected a ')' or parameter id") } } return if field_decs.is_empty() {None} else {Some(field_decs)} }, _ => panic!("Expected a '(' after function id") } } fn parse_call_args(&mut self) -> OptionalTypeExprTupleList{ let mut args_list = Vec::new(); loop { match self.lexer.get_token() { Token::RightParen => break, Token::Number | Token::Ident | Token::TokString => { let e = self.expr(); if e.is_some() { args_list.push(e.unwrap()); } }, _ => { panic!("Invalid expression used as a call argument"); } //_ => panic!("Invalid expression used as a call argument") } if self.lexer.curr_token == Token::RightParen { break } } return if args_list.is_empty() {None} else {Some(args_list)} } fn parse_function_ret_type(&mut self) -> TType{ match self.lexer.get_token() { Token::Colon => { match self.lexer.get_token() { Token::Int | Token::TokString | Token::Ident => Self::get_ty_from_string(self.lexer.curr_string.as_str()), _ => panic!("Expected a type after ':'") } } Token::Equals => { self.lexer.get_token(); //eat '=' TVoid } _ => panic!("Expected ':' or '=' after the parameter list") } } fn get_ty_from_string(str_ : &str) -> TType{ match str_ { "int" => TInt32, "string" => TString, _ => TCustom(str_.to_string()) } } fn get_nxt_and_parse(&mut self) -> (TType, B<Expr>){ self.lexer.get_token(); self.expr().unwrap() } fn parse_while_expr(&mut self) -> Option<(TType, B<Expr>)>{ self.lexer.get_token(); let opt_tup = self.expr().unwrap(); //Because ident-expr parsing advances to the next token //and returns a TVoid, there is an extra check on the //curr_token if opt_tup.0 != TInt32 && self.lexer.curr_token != Token::Do{ self.lexer.get_token(); } match self.lexer.curr_token { Token::Do => { self.lexer.get_token(); let (ty, body) = self.expr().unwrap(); Some((ty, B(WhileExpr(opt_tup.1, body)))) }, _ => panic!("Expected 'do' after the while expression") } } fn parse_if_then_else_expr(&mut self) -> Option<(TType, B<Expr>)>{ //eat 'if' self.lexer.get_token(); //parse the conditional expr let opt_tup = self.expr().unwrap(); //since only arithmetic expr parsing advances to point to the next token, //we do a typecheck in order to determine if we match on the curr_token //or call get_token() if opt_tup.0 != TInt32 && self.lexer.curr_token != Token::Then{ self.lexer.get_token(); } match self.lexer.curr_token { Token::Then => { self.lexer.get_token(); //advance to the next token let (_, then_expr) = self.expr().unwrap(); match self.lexer.curr_token { Token::Else => { self.lexer.get_token(); //advance to the next token let (_, else_body) = self.expr().unwrap(); return Some((TVoid, B(IfThenElseExpr(opt_tup.1, then_expr, else_body)))) } _ => {} //FIXME this isn't an if-then-else expr. should we do something here? } Some((TVoid, B(IfThenExpr(opt_tup.1, then_expr)))) }, _ => panic!("Expected then after the if expression") } } fn parse_for_expr(&mut self) -> Option<(TType, B<Expr>)>{ match self.lexer.get_token(){ Token::Ident => { let id = self.lexer.curr_string.clone(); match self.lexer.get_token(){ Token::ColonEquals => { self.lexer.get_token(); let (_, id_expr) = self.expr().unwrap(); match self.lexer.curr_token{ Token::To => { self.lexer.get_token(); let (_, to_expr) = self.expr().unwrap(); match self.lexer.curr_token{ Token::Do => { self.lexer.get_token(); let (_, do_expr) = self.expr().unwrap(); return Some((TVoid, B(ForExpr(id, id_expr, to_expr, do_expr)))) }, _ => panic!("Expected 'do' after expression") } }, _ => panic!("Expected 'to' after expression") } }, _ => panic!("Expected := after ident in a for construct") } }, _ => panic!("Expected an ident after 'for'") } } } #[test] fn test_func_decl_no_params() { let mut p = Parser::new("function foo()=print(\"ab\")".to_string()); p.start_lexer(); let mut decls = Vec::new(); p.parse_function_decl(&mut decls); assert_eq!(decls.len(), 1); match &decls[0]{ &FunDec(ref name, _, ref ty, ref b_expr) => { assert_eq!(String::from("foo"), *name); assert_eq!(TVoid, *ty); match &**b_expr { &CallExpr(ref name, _) => assert_eq!(String::from("print"), *name), _ => {} } }, _ => {} } } #[test] #[should_panic(expected="parameter 'a' found more than once")] fn test_parse_function_params_list_duplicate_params() { let mut p = Parser::new("foo(a:int, a:int)".to_string()); p.start_lexer(); p.parse_function_params_list(); } #[test] fn test_parse_call_expr_num_expr(){ let mut p = Parser::new("f(1)".to_string()); p.start_lexer(); let tup = p.expr(); assert_eq!(tup.is_some(), true); let (ty, b_expr) = tup.unwrap(); assert_eq!(ty, TVoid); match *b_expr { CallExpr(ref n, ref type_expr_lst) => { assert_eq!(n, "f"); assert_eq!(type_expr_lst.is_some(), true); match type_expr_lst{ &Some(ref l) => { assert_eq!(l.len(), 1); let (ref ty, ref b_expr) = l[0usize]; assert_eq!(*ty, TInt32); match &**b_expr { &NumExpr(ref n) => assert_eq!(*n, 1), _ => {} } }, _ => {} } }, _ => {} } } #[test] fn test_parse_call_expr_ident_expr(){ let mut p = Parser::new("f(abc)".to_string()); p.start_lexer(); let tup = p.expr(); assert_eq!(tup.is_some(), true); let (ty, b_expr) = tup.unwrap(); assert_eq!(ty, TVoid); match *b_expr { CallExpr(ref n, ref type_expr_lst) => { assert_eq!(n, "f"); assert_eq!(type_expr_lst.is_some(), true); match type_expr_lst{ &Some(ref l) => { assert_eq!(l.len(), 1); let (ref ty, ref b_expr) = l[0usize]; assert_eq!(*ty, TVoid); match &**b_expr { &IdExpr(ref id) => assert_eq!(*id, "abc"), _ => {} } }, _ => {} } }, _ => {} } } #[test] fn test_only_string_expr() { let mut p = Parser::new("\"abc\"".to_string()); p.start_lexer(); assert_eq!(p.lexer.curr_token, Token::TokString); } #[test] fn test_parse_call_expr_string_arg(){ let mut p = Parser::new("f(\"abc\")".to_string()); p.start_lexer(); let tup = p.expr(); assert_eq!(tup.is_some(), true); let (ty, b_expr) = tup.unwrap(); assert_eq!(ty, TVoid); match *b_expr { CallExpr(ref n, ref type_expr_lst) => { assert_eq!(n, "f"); assert_eq!(type_expr_lst.is_some(), true); match type_expr_lst{ &Some(ref l) => { assert_eq!(l.len(), 1); let (ref ty, ref b_expr) = l[0usize]; assert_eq!(*ty, TString); match &**b_expr { &StringExpr(ref value) => assert_eq!(*value, "abc"), _ => {} } }, _ => {} } }, _ => {} } } #[test] fn test_parse_call_expr_inum_ident_exprs(){ let mut p = Parser::new("f(1, abc)".to_string()); p.start_lexer(); let tup = p.expr(); assert_eq!(tup.is_some(), true); let (ty, b_expr) = tup.unwrap(); assert_eq!(ty, TVoid); match *b_expr { CallExpr(ref n, ref type_expr_lst) => { assert_eq!(n, "f"); assert_eq!(type_expr_lst.is_some(), true); match type_expr_lst{ &Some(ref l) => { assert_eq!(l.len(), 2); let (ref ty, ref b_expr) = l[1usize]; assert_eq!(*ty, TVoid); match &**b_expr { &IdExpr(ref id) => assert_eq!(*id, "abc"), _ => {} } }, _ => {} } }, _ => {} } } #[test] fn test_parse_call_expr_add_expr(){ let mut p = Parser::new("f(1+2)".to_string()); p.start_lexer(); let tup = p.expr(); assert_eq!(tup.is_some(), true); let (ty, b_expr) = tup.unwrap(); assert_eq!(ty, TVoid); match *b_expr { CallExpr(ref n, ref type_expr_lst) => { assert_eq!(n, "f"); assert_eq!(type_expr_lst.is_some(), true); match type_expr_lst{ &Some(ref l) => { assert_eq!(l.len(), 1); let (ref ty, ref b_expr) = l[0usize]; assert_eq!(*ty, TInt32); match &**b_expr { &AddExpr(ref op1, ref op2) => { match &**op1 { &NumExpr(ref n) => assert_eq!(*n, 1), _ => {} } match &**op2 { &NumExpr(ref n) => assert_eq!(*n, 2), _ => {} } }, _ => {} } }, _ => {} } }, _ => {} } } #[test] fn test_parse_call_expr_no_args(){ let mut p = Parser::new("f()".to_string()); p.start_lexer(); let tup = p.expr(); assert_eq!(tup.is_some(), true); let (ty, b_expr) = tup.unwrap(); assert_eq!(ty, TVoid); match *b_expr { CallExpr(ref n, _) => assert_eq!(n, "f"), _ => {} } } #[test] fn test_parse_func_ret_type_void(){ let mut p = Parser::new(")=".to_string()); p.start_lexer(); let ty = p.parse_function_ret_type(); assert_eq!(ty, TVoid); } #[test] fn test_parse_func_ret_type_int(){ let mut p = Parser::new(") : int =".to_string()); p.start_lexer(); let ty = p.parse_function_ret_type(); assert_eq!(ty, TInt32); } #[test] fn test_parse_func_ret_type_string(){ let mut p = Parser::new(") : string =".to_string()); p.start_lexer(); let ty = p.parse_function_ret_type(); assert_eq!(ty, TString); } #[test] fn test_parse_func_ret_type_custom(){ let mut p = Parser::new(") : custom =".to_string()); p.start_lexer(); let ty = p.parse_function_ret_type(); assert_eq!(ty, TCustom("custom".to_string())); } #[test] fn test_field_decs_none(){ let mut p = Parser::new("f()".to_string()); p.start_lexer(); let m = p.parse_function_params_list(); assert_eq!(m, None); } #[test] fn test_field_decs_one_dec(){ let mut p = Parser::new("f(a: int)".to_string()); p.start_lexer(); let m = p.parse_function_params_list(); assert_eq!(m.is_some(), true); assert_eq!(m.unwrap().len(), 1); } #[test] fn test_field_decs_two_decs(){ let mut p = Parser::new("f(a: int, b:int)".to_string()); p.start_lexer(); let m = p.parse_function_params_list(); assert_eq!(m.is_some(), true); assert_eq!(m.unwrap().len(), 2); } #[test] fn test_field_decs_two_decs_int_string(){ let mut p = Parser::new("f(a: int, b:string)".to_string()); p.start_lexer(); let m = p.parse_function_params_list().unwrap(); assert_eq!(m.len(), 2); assert_eq!(m[0].1, TType::TInt32); assert_eq!(m[1].1, TType::TString); } #[test] fn test_field_decs_one_dec_with_alias(){ let mut p = Parser::new("f(a: myint)".to_string()); p.start_lexer(); let m = p.parse_function_params_list().unwrap(); assert_eq!(m[0].1, TType::TCustom("myint".to_string())); } #[test] #[should_panic(expected="Unexpected eof encountered. Expected a ')' after field-declaration.")] fn test_field_decs_no_closing_paren(){ let mut p = Parser::new("f(a: myint".to_string()); p.start_lexer(); p.parse_function_params_list(); } #[test] fn test_let_var_decl_returns_block() { let mut p = Parser::new("let var a : int := 1 in 1+1 end".to_string()); assert_eq!(p.run().is_some(), true); } #[test] fn test_let_var_decl_returns_let_expr() { let mut p = Parser::new("let var a : int := 1 in a end".to_string()); let b = p.run().unwrap(); match *b.expr.unwrap(){ LetExpr(ref v, ref o) => { assert_eq!(v.len(), 1); assert_eq!(o.is_some(), true); match v[0]{ VarDec(ref id, ref ty, ref e) => { assert_eq!(*id, "a".to_string()); match **e{ //**e means deref deref B<T> which results in T NumExpr(ref n) => assert_eq!(1, *n), _ => {} } }, _ => {} } }, _ => {} } } #[test] fn test_let_var_decl_sym_tab_count() { let mut p = Parser::new("let var a : int := 1 in a end".to_string()); let b = p.run().unwrap(); assert_eq!(b.sym_tab.borrow().len(), 1); assert_eq!(b.sym_tab.borrow().get(&"a".to_string()), Some(&TInt32)); } #[test] fn test_let_add_expr() { let mut p = Parser::new("let var a : int := 1 + 3 + 1 in a end".to_string()); let b = p.run().unwrap(); match *b.expr.unwrap(){ LetExpr(ref v, ref o) => { assert_eq!(v.len(), 1); assert_eq!(o.is_some(), true); match v[0]{ VarDec(ref id, ref ty, ref e) => { assert_eq!(*id, "a".to_string()); match **e{ //**e means deref deref B<T> which results in T AddExpr(ref e1, ref e2) => { match **e1{ NumExpr(ref n) => assert_eq!(*n, 1), _ => panic!("num expr expected") } match **e2{ AddExpr(ref e1, ref e2) => { match **e1{ NumExpr(ref n) => assert_eq!(*n, 3), _ => panic!("num expr expected") } match **e2{ NumExpr(ref n) => assert_eq!(*n, 1), _ => panic!("num expr expected") } }, _ => panic!("add expr expected") } }, _ => panic!("add expr expected") } }, _ => panic!("ver decl expected") } }, _ => panic!("let expr expected") } } #[test] fn test_parse_2_vars_in_let() { let mut p = Parser::new("let var a : int := 1\nvar b : int:=2\n in b end".to_string()); let b = p.run().unwrap(); match *b.expr.unwrap(){ LetExpr(ref v, ref o) => { assert_eq!(v.len(), 2); }, _ => {} } } #[test] fn test_1_seq_expr_able_to_parse() { let mut p = Parser::new("(1;)".to_string()); p.start_lexer(); assert_eq!(p.expr().is_some(), true); } #[test] fn test_1_seq_expr_last_type_int() { let mut p = Parser::new("(1;)".to_string()); p.start_lexer(); let (ty, expr) = p.expr().unwrap(); match(*expr){ SeqExpr(ref o) => { assert_eq!(o.as_ref().unwrap().len(), 1); match *o.as_ref().unwrap()[0]{ NumExpr(ref n) => { assert_eq!(*n, 1); }, _ => {} } }, _ => panic!("Invalid expr") } } #[test] fn test_1_seq_expr_last_type_void() { let mut p = Parser::new("(a;)".to_string()); p.start_lexer(); let (ty, expr) = p.expr().unwrap(); assert_eq!(ty, TVoid); } #[test] fn test_2_seq_exprs_last_type_void() { let mut p = Parser::new("(1;a;)".to_string()); p.start_lexer(); let (ty, expr) = p.expr().unwrap(); assert_eq!(ty, TVoid); } #[test] fn test_2_seq_exprs_last_type_int() { let mut p = Parser::new("(a;1;)".to_string()); p.start_lexer(); let (ty, expr) = p.expr().unwrap(); assert_eq!(ty, TInt32); } #[test] fn test_1_seq_expr_without_semicolon_type_int() { let mut p = Parser::new("(1)".to_string()); p.start_lexer(); let (ty, expr) = p.expr().unwrap(); assert_eq!(ty, TInt32); } #[test] fn test_1_seq_expr_add_expr() { let mut p = Parser::new("(5+16)".to_string()); p.start_lexer(); let (ty, expr) = p.expr().unwrap(); match(*expr){ SeqExpr(ref o) => { assert_eq!(o.as_ref().unwrap().len(), 1); match *o.as_ref().unwrap()[0]{ AddExpr(ref e1, ref e2) => { match **e1 { NumExpr(ref n) => assert_eq!(*n, 5), _ => {} } match **e2 { NumExpr(ref n) => assert_eq!(*n, 16), _ => {} } }, _ => {} } }, _ => panic!("Invalid expr") } } #[test] fn test_get_ty(){ assert_eq!(Parser::get_ty_from_string("int"), TInt32); assert_eq!(Parser::get_ty_from_string("string"), TString); assert_eq!(Parser::get_ty_from_string("index_type"), TCustom("index_type".to_string())); } #[test] fn test_if_then_expr(){ let mut p = Parser::new("if 1 then 1".to_string()); p.start_lexer(); let (ty, expr) = p.expr().unwrap(); match(*expr){ IfThenExpr(ref conditional_expr, ref then_expr) => { match(**conditional_expr){ NumExpr(ref n) => assert_eq!(*n, 1), _ => {} } }, _ => {} } } #[test] fn test_if_then_with_ident_as_conditional_expr(){ let mut p = Parser::new("if a then 1".to_string()); p.start_lexer(); let (ty, expr) = p.expr().unwrap(); match(*expr){ IfThenExpr(ref conditional_expr, _) => { match(**conditional_expr){ IdExpr(ref i) => assert_eq!(*i, String::from("a")), _ => {} } }, _ => {} } } #[test] fn test_if_then_with_add_as_conditional_expr(){ let mut p = Parser::new("if 1+1 then 1".to_string()); p.start_lexer(); let (ty, expr) = p.expr().unwrap(); match(*expr){ IfThenExpr(ref conditional_expr, ref then_expr) => { match(**conditional_expr){ AddExpr(ref l, ref r) => { match **l{ NumExpr(ref n) => assert_eq!(*n, 1), _ => {} } }, _ => {} } }, _ => {} } } #[test] fn test_if_then_with_string_as_conditional_expr(){ let mut p = Parser::new("if \"abhi\" then 1".to_string()); p.start_lexer(); let (ty, expr) = p.expr().unwrap(); match(*expr){ IfThenExpr(ref conditional_expr, ref then_expr) => { match(**conditional_expr){ StringExpr(ref s) => assert_eq!(*s, String::from("abhi")), _ => {} } }, _ => {} } } #[test] fn test_if_then_else_expr(){ let mut p = Parser::new("if 1 then 1 else 0".to_string()); p.start_lexer(); let (ty, expr) = p.expr().unwrap(); match(*expr){ IfThenElseExpr(ref conditional_expr, ref then_expr, ref else_expr) => { match(**conditional_expr){ NumExpr(ref n) => assert_eq!(*n, 1), _ => {} } match(**else_expr){ NumExpr(ref n) => assert_eq!(*n, 0), _ => {} } }, _ => {} } } #[test] fn test_if_expr_with_string_expr_as_conditional_expr(){ let mut p = Parser::new("if \"abhi\" then 1".to_string()); p.start_lexer(); let (ty, expr) = p.expr().unwrap(); match(*expr){ IfThenExpr(ref conditional_expr, _) => match(**conditional_expr) { StringExpr(ref s) => assert_eq!(*s, "abhi"), _ => panic!("This will not exhecute") }, _ => panic!("This will not execute") } } #[test] #[should_panic(expected="Type mismatch between the then and else expressions")] fn test_if_then_else_expr_fail_string_return(){ let mut p = Parser::new("if 1 then 1 else \"abhi\"".to_string()); p.start_lexer(); let (ty, expr) = p.expr().unwrap(); match(*expr){ IfThenElseExpr(_, _, ref else_expr) => { match(**else_expr){ StringExpr(_) => panic!("Type mismatch between the then and else expressions"), _ => panic!("This will not execute") } }, _ => panic!("This will not execute") } } #[test] fn test_while_expr(){ let mut p = Parser::new("while 1 do 1".to_string()); p.start_lexer(); let (ty, expr) = p.expr().unwrap(); match(*expr){ WhileExpr(ref conditional_expr, ref do_expr) => { match(**conditional_expr){ NumExpr(ref n) => assert_eq!(*n, 1), _ => panic!("This will not execute") } match(**do_expr){ NumExpr(ref n) => assert_eq!(*n, 1), _ => panic!("This will not execute") } }, _ => panic!("This will not execute") } } #[test] fn test_while_expr_with_string_as_conditional_expr(){ let mut p = Parser::new("while \"abhi\" do 1".to_string()); p.start_lexer(); let (ty, expr) = p.expr().unwrap(); match(*expr){ WhileExpr(ref conditional_expr, ref do_expr) => { match(**conditional_expr){ StringExpr(ref s) => assert_eq!(*s, "abhi"), _ => panic!("This will not execute") } }, _ => panic!("This will not execute") } } #[test] fn test_while_expr_with_addexpr_as_conditional_expr(){ let mut p = Parser::new("while 1+1 do 1".to_string()); p.start_lexer(); let (ty, expr) = p.expr().unwrap(); match(*expr){ WhileExpr(ref conditional_expr, ref do_expr) => { match(**conditional_expr){ AddExpr(ref l, ref r) => { match **l{ NumExpr(ref n) => assert_eq!(*n, 1), _ => {} } }, _ => panic!("This will not execute") } }, _ => panic!("This will not execute") } } #[test] fn test_while_expr_with_ident_as_conditional_expr(){ let mut p = Parser::new("while a do 1".to_string()); p.start_lexer(); let (ty, expr) = p.expr().unwrap(); match(*expr){ WhileExpr(ref conditional_expr, ref do_expr) => { match(**conditional_expr){ IdExpr(ref id) => { assert_eq!(*id, String::from("a")); }, _ => panic!("This will not execute") } }, _ => panic!("This will not execute") } } #[test] fn test_for_expr(){ let mut p = Parser::new("for id:= 1 to 10 do 1+1".to_string()); p.start_lexer(); let (ty, expr) = p.expr().unwrap(); match(*expr){ ForExpr(ref id, ref from_expr, ref to_expr, ref do_expr) => { assert_eq!(*id, String::from("id")); match(**from_expr){ NumExpr(ref n) => assert_eq!(*n, 1), _ => panic!("This will not execute") } match(**to_expr){ NumExpr(ref n) => assert_eq!(*n, 10), _ => panic!("This will not execute") } match(**do_expr){ AddExpr(ref l, ref r) => { match(**l){ NumExpr(ref n) => assert_eq!(*n, 1), _ => {} } }, _ => panic!("This will not execute") } }, _ => panic!("This will not execute") } } #[test] fn test_for_expr_with_ident_as_from_expr(){ let mut p = Parser::new("for id:= a to 10 do 1+1".to_string()); p.start_lexer(); let (ty, expr) = p.expr().unwrap(); match(*expr){ ForExpr(ref id, ref from_expr, _, _) => { match(**from_expr){ IdExpr(ref i) => assert_eq!(*i, String::from("a")), _ => panic!("This will not execute") } }, _ => panic!("This will not execute") } } #[test] fn test_for_expr_with_ident_as_to_and_from_expr(){ let mut p = Parser::new("for id:= a to b do 1+1".to_string()); p.start_lexer(); let (ty, expr) = p.expr().unwrap(); match(*expr){ ForExpr(ref id, ref from_expr, ref to_expr, _) => { match(**to_expr){ IdExpr(ref i) => assert_eq!(*i, String::from("b")), _ => panic!("This will not execute") } match(**from_expr){ IdExpr(ref i) => assert_eq!(*i, String::from("a")), _ => panic!("This will not execute") } }, _ => panic!("This will not execute") } } Remove unnecessary refs for numbers in the match guards #![allow(dead_code)] use std::mem; use std::collections::{HashMap}; use parse::lexer::*; use parse::tokens::*; use ast::{Stmt, Expr, Block, TType, Local, Decl, OptionalTypeExprTupleList, OptionalParamInfoList}; use ast::Stmt::*; use ast::Expr::*; use ast::TType::*; use ast::Decl::*; //use ast::*; use ptr::{B}; //use ast::{Expr, Stmt}; type BlockStack = Vec<Block>; pub struct Parser{ lexer : Lexer, block_stack : BlockStack, paren_stack : Vec<char>, seq_expr_list : Vec<B<Expr>>, last_expr_type : Option<TType> } impl Parser{ pub fn new(src : String)->Self{ Parser { lexer : Lexer::new(src), block_stack : BlockStack::new(), paren_stack : Vec::new(), seq_expr_list : Vec::new(), last_expr_type : None } } pub fn start_lexer(&mut self){ self.lexer.get_char(); self.lexer.get_token(); } pub fn run(& mut self)->Option<Block>{ self.parse_block() //self.block.generate(); } fn parse_block(& mut self)->Option<Block>{ //let mut b = Block::new(); //self.block_stack.push(b); self.lexer.get_char(); self.program(); //begin parsing debug_assert!(self.block_stack.len() == 1, "Only parent block should be on the stack when the parsing is finished"); let mut main_block = self.block_stack.pop().unwrap(); //main_block.generate(); //if main_block.statements.len() == 0{ if !main_block.expr.is_some(){ None } else{ Some(main_block) } } fn program(&mut self){ loop{ match self.lexer.get_token(){ //FIXME semicolon handling should change: Token::SemiColon => continue, Token::Nil | Token::Number | Token::LeftParen | Token::Minus | Token::If | Token::While | Token::For | Token::Break | Token::Let | Token::Function | Token::Ident | Token::TokString => { let expr = Some(self.expr().unwrap().1); self.block_stack.last_mut().unwrap().expr = expr; //FIXME should we break? break; }, /*Token::Do => { debug_assert!(self.block_stack.len() > 0, "No parent block on the stack"); self.block_stack.push(Block::new()); self.expr(); if self.lexer.curr_token == Token::End{ //TODO make sure we track all block openings let block = self.block_stack.pop().unwrap(); let mut curr_block = self.block_stack.last_mut().unwrap(); //curr_block.statements.push(Self::mk_block_stmt(block)); } },*/ Token::Eof => {return}, Token::End => { //TODO block stack pop return //continue; }, _ => {panic!("Invalid token");} } } } //FIXME temporarily pub for integration testing pub fn expr(&mut self) -> Option<(TType, B<Expr>)> { match self.lexer.curr_token{ Token::Nil => { Some((TNil, B(NilExpr))) }, Token::Number => { return self.parse_num_expr() //B(NumExpr(self.lexer.curr_string.clone().parse::<i32>().unwrap())) }, Token::Ident => { return self.parse_ident_expr() }, Token::TokString => { return self.parse_string_expr() }, Token::Let =>{ return self.parse_let_expr() }, // Token::Function => { // return self.parse_function_decl() // }, Token::LeftParen => { //seqexpr self.paren_stack.push('('); while self.lexer.get_token() != Token::RightParen { if self.lexer.curr_token == Token::SemiColon { continue; } if self.lexer.curr_token == Token::Eof {break;} let optional_expr = self.expr(); if optional_expr.is_some() { let (ty, e) = optional_expr.unwrap(); self.seq_expr_list.push(e); self.last_expr_type = Some(ty); } //check closing paren here because self.expr() above could have curr_token set to it if self.lexer.curr_token == Token::RightParen{ break; } } if self.lexer.curr_token == Token::Eof { panic!("Unexpected eof encountered"); } self.paren_stack.pop(); if !self.paren_stack.is_empty() { panic!("Missing ')'"); } let last_type = mem::replace(&mut self.last_expr_type, None); let expr_list = mem::replace(&mut self.seq_expr_list, Vec::new()); Some((last_type.unwrap(), B(SeqExpr(Some(expr_list))))) }, Token::If => { return self.parse_if_then_else_expr() }, Token::While => { return self.parse_while_expr() }, Token::For => { return self.parse_for_expr() }, // Token::RightParen => { // if self.paren_stack.is_empty(){ // panic!("Mismatched parenthesis"); // } // self.paren_stack.pop(); // //TODO mem::replace self.seq_expr_list with Vec::new and assign it to SeqExpr // Some(B(SeqExpr(None))) // }, Token::End => panic!("Unexpected 'end'. Expected an expr."), _ =>panic!("FIXME: handle more patterns") } } fn parse_let_expr(&mut self) -> Option<(TType, B<Expr>)>{ let mut b = Block::new(); //set parent-child relationship self.block_stack.push(b); let mut decls : Vec<Decl> = Vec::new(); loop{ match self.lexer.get_token() { Token::Type => { //typedec self.parse_type_decl(&mut decls); }, Token::Var => { //Vardec self.parse_var_decl(&mut decls); }, Token::Function => { //functiondec self.parse_function_decl(&mut decls); }, //FIXME probably all these following guards are useless? Token::In => break, //FIXME Eof occurrence is an error Token::Eof => break, //FIXME End occurrence is an error Token::End => break, _ => panic!("Unexpected token. Expected a declaration or 'in'") } //this is needed because a var decl parse can set the curr_token to 'in' if self.lexer.curr_token == Token::In{ break; } }//let loop ends let (_ty, _expr) = if self.lexer.curr_token == Token::In{ //FIXME get the list of exprs and the type of the last expr in the list self.lexer.get_token(); let expr = self.expr(); debug_assert!(expr.is_some(), "expr expected after 'in'"); expr.unwrap() } else{ panic!("Expected 'in' after declarations"); }; return Some((_ty, B(LetExpr(decls, Some(_expr))))) } fn parse_type_decl(&mut self, decls : &mut Vec<Decl>){ match self.lexer.get_token() { Token::Ident => { let id = self.lexer.curr_string.clone(); match self.lexer.get_token(){ Token::Equals => { match self.lexer.get_token(){ Token::Int => decls.push(TypeDec(id, TInt32)), Token::TokString => decls.push(TypeDec(id, TString)), Token::Ident => decls.push(TypeDec(id, TCustom(self.lexer.curr_string.clone()))), Token::Array => { match self.lexer.get_token() { Token::Of => { match self.lexer.get_token() { Token::Int => {}, Token::TokString => {}, Token::Ident => {}, _ => panic!("Expected either int, string or type-id") } }, _ => panic!("Expected 'of' after 'array'") } }, Token::LeftCurly => { //rectype }, _ => panic!("Expected either int, string, type-id, array of, '{' after '='") } }, _ => panic!("Expected '=' after type-id") } }, _ => panic!("Expected identifier after 'type'") } } fn parse_var_decl(&mut self, decls : &mut Vec<Decl>){ match self.lexer.get_token() { Token::Ident => { let id = self.lexer.curr_string.clone(); match self.lexer.get_token() { Token::Colon => { match self.lexer.get_token() { Token::Int => { match self.lexer.get_token(){ Token::ColonEquals => { //get rhs expr and its type let (ty, expr) = self.get_nxt_and_parse(); self.block_stack.last_mut().unwrap().sym_tab.borrow_mut().insert(id.clone(), ty); decls.push(VarDec(id.clone(), TInt32, expr)); }, _ => panic!("Expected ':='") } }, Token::TokString => { match self.lexer.get_token(){ Token::ColonEquals => { self.expr(); }, _ => panic!("Expected ':='") } }, _ => panic!("expr : pattern not covered") } }, _ => panic!("Expected ':' after identifier") } }, _ => panic!("Expected an identifier") } } fn parse_ident_expr(&mut self) -> Option<(TType, B<Expr>)>{ //check if symbol defined in the sym tab //if self.block_stack.last().unwrap().contains(self.lexer.curr_string) let op1 = B(IdExpr(self.lexer.curr_string.clone())); match self.lexer.get_token(){ Token::LeftSquare => {}, //subscript Token::Dot => {}, //fieldexp Token::LeftParen => { //callexpr let args_list = self.parse_call_args(); //FIXME should a marker type be used instead of TVoid to indicate that the type should be verified by the type-checker? match *op1 { IdExpr(ref fn_name) => return Some((TVoid, B(CallExpr(fn_name.clone(), args_list)))), _ => {} }; }, Token::Plus => { let (t, op2) = self.get_nxt_and_parse(); //FIXME it's better to let the type-checker do the checking if t == TInt32{ return Some((TInt32, B(AddExpr(op1, op2)))) } else{ panic!("Expected i32 as the type of rhs expression"); } }, _ => { //TVoid because we dont know the type of the identifier yet. return Some((TVoid, op1)) } } Some((TVoid, op1)) } fn parse_string_expr(&mut self) -> Option<(TType, B<Expr>)>{ Some((TString, B(StringExpr(self.lexer.curr_string.clone())))) } fn parse_num_expr(&mut self) -> Option<(TType, B<Expr>)>{ let num = self.lexer.curr_string.parse::<i32>().unwrap(); let op1 = B(NumExpr(num)); match self.lexer.get_token(){ Token::Plus => { let (t, op2) = self.get_nxt_and_parse(); //FIXME it's better to use a type-checker if t == TInt32{ return Some((TInt32, B(AddExpr(op1, op2)))) } else{ panic!("Expected i32 as the type of rhs expression"); } }, Token::Minus => { let (t, op2) = self.get_nxt_and_parse(); //FIXME it's better to use a type-checker if t == TInt32{ return Some((TInt32, B(SubExpr(op1, op2)))) } else{ panic!("Expected i32 as the type of rhs expression"); } }, //FIXME ';', ')' can be a encountered as well. deal with it. _ => { return Some((TInt32, op1)) } } } fn parse_function_decl(&mut self, decls : &mut Vec<Decl>){ match self.lexer.get_token(){ Token::Ident => { let id = self.lexer.curr_string.clone(); //parse the parameters list let field_decs = self.parse_function_params_list(); //parse return type let ret_type = self.parse_function_ret_type(); //parse body here let e = self.expr(); debug_assert!(e.is_some() == true, "Function body cannot be empty"); let body = e.unwrap().1; //function id ( fieldDec; ) : tyId = exp decls.push(FunDec(id, field_decs, ret_type, body)); }, _ => panic!("Expected an id after 'function'") } } fn parse_function_params_list(&mut self) -> OptionalParamInfoList { match self.lexer.get_token(){ Token::LeftParen => { let mut field_decs : Vec<(String, TType)> = Vec::new(); loop{ match self.lexer.get_token() { Token::Comma => continue, Token::RightParen => { //parameterless function break; }, Token::Eof => panic!("Unexpected eof encountered. Expected a ')' after field-declaration."), Token::Ident => { let id = self.lexer.curr_string.clone(); //FIXME should we verify duplicate params here? //HashMap and BTreeMap do not respect the order of insertions //which is required to set up args during call. //Vec will respect the order but cost O(n) for the verification //Need multi_index kind of a structure from C++ Boost if field_decs.iter().find(|&tup| tup.0 == id).is_some(){ panic!(format!("parameter '{}' found more than once", id)); } match self.lexer.get_token() { Token::Colon => { match self.lexer.get_token() { Token::Int | Token::TokString | Token::Ident => { let ty = Self::get_ty_from_string(self.lexer.curr_string.as_str()); field_decs.push((id, ty)); }, _ => panic!("Expected type-id after ':'") } }, _ => panic!("Expected ':' after id") } }, _ => panic!("Expected a ')' or parameter id") } } return if field_decs.is_empty() {None} else {Some(field_decs)} }, _ => panic!("Expected a '(' after function id") } } fn parse_call_args(&mut self) -> OptionalTypeExprTupleList{ let mut args_list = Vec::new(); loop { match self.lexer.get_token() { Token::RightParen => break, Token::Number | Token::Ident | Token::TokString => { let e = self.expr(); if e.is_some() { args_list.push(e.unwrap()); } }, _ => { panic!("Invalid expression used as a call argument"); } //_ => panic!("Invalid expression used as a call argument") } if self.lexer.curr_token == Token::RightParen { break } } return if args_list.is_empty() {None} else {Some(args_list)} } fn parse_function_ret_type(&mut self) -> TType{ match self.lexer.get_token() { Token::Colon => { match self.lexer.get_token() { Token::Int | Token::TokString | Token::Ident => Self::get_ty_from_string(self.lexer.curr_string.as_str()), _ => panic!("Expected a type after ':'") } } Token::Equals => { self.lexer.get_token(); //eat '=' TVoid } _ => panic!("Expected ':' or '=' after the parameter list") } } fn get_ty_from_string(str_ : &str) -> TType{ match str_ { "int" => TInt32, "string" => TString, _ => TCustom(str_.to_string()) } } fn get_nxt_and_parse(&mut self) -> (TType, B<Expr>){ self.lexer.get_token(); self.expr().unwrap() } fn parse_while_expr(&mut self) -> Option<(TType, B<Expr>)>{ self.lexer.get_token(); let opt_tup = self.expr().unwrap(); //Because ident-expr parsing advances to the next token //and returns a TVoid, there is an extra check on the //curr_token if opt_tup.0 != TInt32 && self.lexer.curr_token != Token::Do{ self.lexer.get_token(); } match self.lexer.curr_token { Token::Do => { self.lexer.get_token(); let (ty, body) = self.expr().unwrap(); Some((ty, B(WhileExpr(opt_tup.1, body)))) }, _ => panic!("Expected 'do' after the while expression") } } fn parse_if_then_else_expr(&mut self) -> Option<(TType, B<Expr>)>{ //eat 'if' self.lexer.get_token(); //parse the conditional expr let opt_tup = self.expr().unwrap(); //since only arithmetic expr parsing advances to point to the next token, //we do a typecheck in order to determine if we match on the curr_token //or call get_token() if opt_tup.0 != TInt32 && self.lexer.curr_token != Token::Then{ self.lexer.get_token(); } match self.lexer.curr_token { Token::Then => { self.lexer.get_token(); //advance to the next token let (_, then_expr) = self.expr().unwrap(); match self.lexer.curr_token { Token::Else => { self.lexer.get_token(); //advance to the next token let (_, else_body) = self.expr().unwrap(); return Some((TVoid, B(IfThenElseExpr(opt_tup.1, then_expr, else_body)))) } _ => {} //FIXME this isn't an if-then-else expr. should we do something here? } Some((TVoid, B(IfThenExpr(opt_tup.1, then_expr)))) }, _ => panic!("Expected then after the if expression") } } fn parse_for_expr(&mut self) -> Option<(TType, B<Expr>)>{ match self.lexer.get_token(){ Token::Ident => { let id = self.lexer.curr_string.clone(); match self.lexer.get_token(){ Token::ColonEquals => { self.lexer.get_token(); let (_, id_expr) = self.expr().unwrap(); match self.lexer.curr_token{ Token::To => { self.lexer.get_token(); let (_, to_expr) = self.expr().unwrap(); match self.lexer.curr_token{ Token::Do => { self.lexer.get_token(); let (_, do_expr) = self.expr().unwrap(); return Some((TVoid, B(ForExpr(id, id_expr, to_expr, do_expr)))) }, _ => panic!("Expected 'do' after expression") } }, _ => panic!("Expected 'to' after expression") } }, _ => panic!("Expected := after ident in a for construct") } }, _ => panic!("Expected an ident after 'for'") } } } #[test] fn test_func_decl_no_params() { let mut p = Parser::new("function foo()=print(\"ab\")".to_string()); p.start_lexer(); let mut decls = Vec::new(); p.parse_function_decl(&mut decls); assert_eq!(decls.len(), 1); match &decls[0]{ &FunDec(ref name, _, ref ty, ref b_expr) => { assert_eq!(String::from("foo"), *name); assert_eq!(TVoid, *ty); match &**b_expr { &CallExpr(ref name, _) => assert_eq!(String::from("print"), *name), _ => {} } }, _ => {} } } #[test] #[should_panic(expected="parameter 'a' found more than once")] fn test_parse_function_params_list_duplicate_params() { let mut p = Parser::new("foo(a:int, a:int)".to_string()); p.start_lexer(); p.parse_function_params_list(); } #[test] fn test_parse_call_expr_num_expr(){ let mut p = Parser::new("f(1)".to_string()); p.start_lexer(); let tup = p.expr(); assert_eq!(tup.is_some(), true); let (ty, b_expr) = tup.unwrap(); assert_eq!(ty, TVoid); match *b_expr { CallExpr(ref n, ref type_expr_lst) => { assert_eq!(n, "f"); assert_eq!(type_expr_lst.is_some(), true); match type_expr_lst{ &Some(ref l) => { assert_eq!(l.len(), 1); let (ref ty, ref b_expr) = l[0usize]; assert_eq!(*ty, TInt32); match &**b_expr { &NumExpr(n) => assert_eq!(n, 1), _ => {} } }, _ => {} } }, _ => {} } } #[test] fn test_parse_call_expr_ident_expr(){ let mut p = Parser::new("f(abc)".to_string()); p.start_lexer(); let tup = p.expr(); assert_eq!(tup.is_some(), true); let (ty, b_expr) = tup.unwrap(); assert_eq!(ty, TVoid); match *b_expr { CallExpr(ref n, ref type_expr_lst) => { assert_eq!(n, "f"); assert_eq!(type_expr_lst.is_some(), true); match type_expr_lst{ &Some(ref l) => { assert_eq!(l.len(), 1); let (ref ty, ref b_expr) = l[0usize]; assert_eq!(*ty, TVoid); match &**b_expr { &IdExpr(ref id) => assert_eq!(*id, "abc"), _ => {} } }, _ => {} } }, _ => {} } } #[test] fn test_only_string_expr() { let mut p = Parser::new("\"abc\"".to_string()); p.start_lexer(); assert_eq!(p.lexer.curr_token, Token::TokString); } #[test] fn test_parse_call_expr_string_arg(){ let mut p = Parser::new("f(\"abc\")".to_string()); p.start_lexer(); let tup = p.expr(); assert_eq!(tup.is_some(), true); let (ty, b_expr) = tup.unwrap(); assert_eq!(ty, TVoid); match *b_expr { CallExpr(ref n, ref type_expr_lst) => { assert_eq!(n, "f"); assert_eq!(type_expr_lst.is_some(), true); match type_expr_lst{ &Some(ref l) => { assert_eq!(l.len(), 1); let (ref ty, ref b_expr) = l[0usize]; assert_eq!(*ty, TString); match &**b_expr { &StringExpr(ref value) => assert_eq!(*value, "abc"), _ => {} } }, _ => {} } }, _ => {} } } #[test] fn test_parse_call_expr_inum_ident_exprs(){ let mut p = Parser::new("f(1, abc)".to_string()); p.start_lexer(); let tup = p.expr(); assert_eq!(tup.is_some(), true); let (ty, b_expr) = tup.unwrap(); assert_eq!(ty, TVoid); match *b_expr { CallExpr(ref n, ref type_expr_lst) => { assert_eq!(n, "f"); assert_eq!(type_expr_lst.is_some(), true); match type_expr_lst{ &Some(ref l) => { assert_eq!(l.len(), 2); let (ref ty, ref b_expr) = l[1usize]; assert_eq!(*ty, TVoid); match &**b_expr { &IdExpr(ref id) => assert_eq!(*id, "abc"), _ => {} } }, _ => {} } }, _ => {} } } #[test] fn test_parse_call_expr_add_expr(){ let mut p = Parser::new("f(1+2)".to_string()); p.start_lexer(); let tup = p.expr(); assert_eq!(tup.is_some(), true); let (ty, b_expr) = tup.unwrap(); assert_eq!(ty, TVoid); match *b_expr { CallExpr(ref n, ref type_expr_lst) => { assert_eq!(n, "f"); assert_eq!(type_expr_lst.is_some(), true); match type_expr_lst{ &Some(ref l) => { assert_eq!(l.len(), 1); let (ref ty, ref b_expr) = l[0usize]; assert_eq!(*ty, TInt32); match &**b_expr { &AddExpr(ref op1, ref op2) => { match &**op1 { &NumExpr(n) => assert_eq!(n, 1), _ => {} } match &**op2 { &NumExpr(n) => assert_eq!(n, 2), _ => {} } }, _ => {} } }, _ => {} } }, _ => {} } } #[test] fn test_parse_call_expr_no_args(){ let mut p = Parser::new("f()".to_string()); p.start_lexer(); let tup = p.expr(); assert_eq!(tup.is_some(), true); let (ty, b_expr) = tup.unwrap(); assert_eq!(ty, TVoid); match *b_expr { CallExpr(ref n, _) => assert_eq!(n, "f"), _ => {} } } #[test] fn test_parse_func_ret_type_void(){ let mut p = Parser::new(")=".to_string()); p.start_lexer(); let ty = p.parse_function_ret_type(); assert_eq!(ty, TVoid); } #[test] fn test_parse_func_ret_type_int(){ let mut p = Parser::new(") : int =".to_string()); p.start_lexer(); let ty = p.parse_function_ret_type(); assert_eq!(ty, TInt32); } #[test] fn test_parse_func_ret_type_string(){ let mut p = Parser::new(") : string =".to_string()); p.start_lexer(); let ty = p.parse_function_ret_type(); assert_eq!(ty, TString); } #[test] fn test_parse_func_ret_type_custom(){ let mut p = Parser::new(") : custom =".to_string()); p.start_lexer(); let ty = p.parse_function_ret_type(); assert_eq!(ty, TCustom("custom".to_string())); } #[test] fn test_field_decs_none(){ let mut p = Parser::new("f()".to_string()); p.start_lexer(); let m = p.parse_function_params_list(); assert_eq!(m, None); } #[test] fn test_field_decs_one_dec(){ let mut p = Parser::new("f(a: int)".to_string()); p.start_lexer(); let m = p.parse_function_params_list(); assert_eq!(m.is_some(), true); assert_eq!(m.unwrap().len(), 1); } #[test] fn test_field_decs_two_decs(){ let mut p = Parser::new("f(a: int, b:int)".to_string()); p.start_lexer(); let m = p.parse_function_params_list(); assert_eq!(m.is_some(), true); assert_eq!(m.unwrap().len(), 2); } #[test] fn test_field_decs_two_decs_int_string(){ let mut p = Parser::new("f(a: int, b:string)".to_string()); p.start_lexer(); let m = p.parse_function_params_list().unwrap(); assert_eq!(m.len(), 2); assert_eq!(m[0].1, TType::TInt32); assert_eq!(m[1].1, TType::TString); } #[test] fn test_field_decs_one_dec_with_alias(){ let mut p = Parser::new("f(a: myint)".to_string()); p.start_lexer(); let m = p.parse_function_params_list().unwrap(); assert_eq!(m[0].1, TType::TCustom("myint".to_string())); } #[test] #[should_panic(expected="Unexpected eof encountered. Expected a ')' after field-declaration.")] fn test_field_decs_no_closing_paren(){ let mut p = Parser::new("f(a: myint".to_string()); p.start_lexer(); p.parse_function_params_list(); } #[test] fn test_let_var_decl_returns_block() { let mut p = Parser::new("let var a : int := 1 in 1+1 end".to_string()); assert_eq!(p.run().is_some(), true); } #[test] fn test_let_var_decl_returns_let_expr() { let mut p = Parser::new("let var a : int := 1 in a end".to_string()); let b = p.run().unwrap(); match *b.expr.unwrap(){ LetExpr(ref v, ref o) => { assert_eq!(v.len(), 1); assert_eq!(o.is_some(), true); match v[0]{ VarDec(ref id, ref ty, ref e) => { assert_eq!(*id, "a".to_string()); match **e{ //**e means deref deref B<T> which results in T NumExpr(n) => assert_eq!(1, n), _ => {} } }, _ => {} } }, _ => {} } } #[test] fn test_let_var_decl_sym_tab_count() { let mut p = Parser::new("let var a : int := 1 in a end".to_string()); let b = p.run().unwrap(); assert_eq!(b.sym_tab.borrow().len(), 1); assert_eq!(b.sym_tab.borrow().get(&"a".to_string()), Some(&TInt32)); } #[test] fn test_let_add_expr() { let mut p = Parser::new("let var a : int := 1 + 3 + 1 in a end".to_string()); let b = p.run().unwrap(); match *b.expr.unwrap(){ LetExpr(ref v, ref o) => { assert_eq!(v.len(), 1); assert_eq!(o.is_some(), true); match v[0]{ VarDec(ref id, ref ty, ref e) => { assert_eq!(*id, "a".to_string()); match **e{ //**e means deref deref B<T> which results in T AddExpr(ref e1, ref e2) => { match **e1{ NumExpr(n) => assert_eq!(n, 1), _ => panic!("num expr expected") } match **e2{ AddExpr(ref e1, ref e2) => { match **e1{ NumExpr(n) => assert_eq!(n, 3), _ => panic!("num expr expected") } match **e2{ NumExpr(n) => assert_eq!(n, 1), _ => panic!("num expr expected") } }, _ => panic!("add expr expected") } }, _ => panic!("add expr expected") } }, _ => panic!("ver decl expected") } }, _ => panic!("let expr expected") } } #[test] fn test_parse_2_vars_in_let() { let mut p = Parser::new("let var a : int := 1\nvar b : int:=2\n in b end".to_string()); let b = p.run().unwrap(); match *b.expr.unwrap(){ LetExpr(ref v, ref o) => { assert_eq!(v.len(), 2); }, _ => {} } } #[test] fn test_1_seq_expr_able_to_parse() { let mut p = Parser::new("(1;)".to_string()); p.start_lexer(); assert_eq!(p.expr().is_some(), true); } #[test] fn test_1_seq_expr_last_type_int() { let mut p = Parser::new("(1;)".to_string()); p.start_lexer(); let (ty, expr) = p.expr().unwrap(); match(*expr){ SeqExpr(ref o) => { assert_eq!(o.as_ref().unwrap().len(), 1); match *o.as_ref().unwrap()[0]{ NumExpr(n) => { assert_eq!(n, 1); }, _ => {} } }, _ => panic!("Invalid expr") } } #[test] fn test_1_seq_expr_last_type_void() { let mut p = Parser::new("(a;)".to_string()); p.start_lexer(); let (ty, expr) = p.expr().unwrap(); assert_eq!(ty, TVoid); } #[test] fn test_2_seq_exprs_last_type_void() { let mut p = Parser::new("(1;a;)".to_string()); p.start_lexer(); let (ty, expr) = p.expr().unwrap(); assert_eq!(ty, TVoid); } #[test] fn test_2_seq_exprs_last_type_int() { let mut p = Parser::new("(a;1;)".to_string()); p.start_lexer(); let (ty, expr) = p.expr().unwrap(); assert_eq!(ty, TInt32); } #[test] fn test_1_seq_expr_without_semicolon_type_int() { let mut p = Parser::new("(1)".to_string()); p.start_lexer(); let (ty, expr) = p.expr().unwrap(); assert_eq!(ty, TInt32); } #[test] fn test_1_seq_expr_add_expr() { let mut p = Parser::new("(5+16)".to_string()); p.start_lexer(); let (ty, expr) = p.expr().unwrap(); match(*expr){ SeqExpr(ref o) => { assert_eq!(o.as_ref().unwrap().len(), 1); match *o.as_ref().unwrap()[0]{ AddExpr(ref e1, ref e2) => { match **e1 { NumExpr(n) => assert_eq!(n, 5), _ => {} } match **e2 { NumExpr(n) => assert_eq!(n, 16), _ => {} } }, _ => {} } }, _ => panic!("Invalid expr") } } #[test] fn test_get_ty(){ assert_eq!(Parser::get_ty_from_string("int"), TInt32); assert_eq!(Parser::get_ty_from_string("string"), TString); assert_eq!(Parser::get_ty_from_string("index_type"), TCustom("index_type".to_string())); } #[test] fn test_if_then_expr(){ let mut p = Parser::new("if 1 then 1".to_string()); p.start_lexer(); let (ty, expr) = p.expr().unwrap(); match(*expr){ IfThenExpr(ref conditional_expr, ref then_expr) => { match(**conditional_expr){ NumExpr(n) => assert_eq!(n, 1), _ => {} } }, _ => {} } } #[test] fn test_if_then_with_ident_as_conditional_expr(){ let mut p = Parser::new("if a then 1".to_string()); p.start_lexer(); let (ty, expr) = p.expr().unwrap(); match(*expr){ IfThenExpr(ref conditional_expr, _) => { match(**conditional_expr){ IdExpr(ref i) => assert_eq!(*i, String::from("a")), _ => {} } }, _ => {} } } #[test] fn test_if_then_with_add_as_conditional_expr(){ let mut p = Parser::new("if 1+1 then 1".to_string()); p.start_lexer(); let (ty, expr) = p.expr().unwrap(); match(*expr){ IfThenExpr(ref conditional_expr, ref then_expr) => { match(**conditional_expr){ AddExpr(ref l, ref r) => { match **l{ NumExpr(n) => assert_eq!(n, 1), _ => {} } }, _ => {} } }, _ => {} } } #[test] fn test_if_then_with_string_as_conditional_expr(){ let mut p = Parser::new("if \"abhi\" then 1".to_string()); p.start_lexer(); let (ty, expr) = p.expr().unwrap(); match(*expr){ IfThenExpr(ref conditional_expr, ref then_expr) => { match(**conditional_expr){ StringExpr(ref s) => assert_eq!(*s, String::from("abhi")), _ => {} } }, _ => {} } } #[test] fn test_if_then_else_expr(){ let mut p = Parser::new("if 1 then 1 else 0".to_string()); p.start_lexer(); let (ty, expr) = p.expr().unwrap(); match(*expr){ IfThenElseExpr(ref conditional_expr, ref then_expr, ref else_expr) => { match(**conditional_expr){ NumExpr(n) => assert_eq!(n, 1), _ => {} } match(**else_expr){ NumExpr(n) => assert_eq!(n, 0), _ => {} } }, _ => {} } } #[test] fn test_if_expr_with_string_expr_as_conditional_expr(){ let mut p = Parser::new("if \"abhi\" then 1".to_string()); p.start_lexer(); let (ty, expr) = p.expr().unwrap(); match(*expr){ IfThenExpr(ref conditional_expr, _) => match(**conditional_expr) { StringExpr(ref s) => assert_eq!(*s, "abhi"), _ => panic!("This will not exhecute") }, _ => panic!("This will not execute") } } #[test] #[should_panic(expected="Type mismatch between the then and else expressions")] fn test_if_then_else_expr_fail_string_return(){ let mut p = Parser::new("if 1 then 1 else \"abhi\"".to_string()); p.start_lexer(); let (ty, expr) = p.expr().unwrap(); match(*expr){ IfThenElseExpr(_, _, ref else_expr) => { match(**else_expr){ StringExpr(_) => panic!("Type mismatch between the then and else expressions"), _ => panic!("This will not execute") } }, _ => panic!("This will not execute") } } #[test] fn test_while_expr(){ let mut p = Parser::new("while 1 do 1".to_string()); p.start_lexer(); let (ty, expr) = p.expr().unwrap(); match(*expr){ WhileExpr(ref conditional_expr, ref do_expr) => { match(**conditional_expr){ NumExpr(n) => assert_eq!(n, 1), _ => panic!("This will not execute") } match(**do_expr){ NumExpr(n) => assert_eq!(n, 1), _ => panic!("This will not execute") } }, _ => panic!("This will not execute") } } #[test] fn test_while_expr_with_string_as_conditional_expr(){ let mut p = Parser::new("while \"abhi\" do 1".to_string()); p.start_lexer(); let (ty, expr) = p.expr().unwrap(); match(*expr){ WhileExpr(ref conditional_expr, ref do_expr) => { match(**conditional_expr){ StringExpr(ref s) => assert_eq!(*s, "abhi"), _ => panic!("This will not execute") } }, _ => panic!("This will not execute") } } #[test] fn test_while_expr_with_addexpr_as_conditional_expr(){ let mut p = Parser::new("while 1+1 do 1".to_string()); p.start_lexer(); let (ty, expr) = p.expr().unwrap(); match(*expr){ WhileExpr(ref conditional_expr, ref do_expr) => { match(**conditional_expr){ AddExpr(ref l, ref r) => { match **l{ NumExpr(n) => assert_eq!(n, 1), _ => {} } }, _ => panic!("This will not execute") } }, _ => panic!("This will not execute") } } #[test] fn test_while_expr_with_ident_as_conditional_expr(){ let mut p = Parser::new("while a do 1".to_string()); p.start_lexer(); let (ty, expr) = p.expr().unwrap(); match(*expr){ WhileExpr(ref conditional_expr, ref do_expr) => { match(**conditional_expr){ IdExpr(ref id) => { assert_eq!(*id, String::from("a")); }, _ => panic!("This will not execute") } }, _ => panic!("This will not execute") } } #[test] fn test_for_expr(){ let mut p = Parser::new("for id:= 1 to 10 do 1+1".to_string()); p.start_lexer(); let (ty, expr) = p.expr().unwrap(); match(*expr){ ForExpr(ref id, ref from_expr, ref to_expr, ref do_expr) => { assert_eq!(*id, String::from("id")); match(**from_expr){ NumExpr(n) => assert_eq!(n, 1), _ => panic!("This will not execute") } match(**to_expr){ NumExpr(n) => assert_eq!(n, 10), _ => panic!("This will not execute") } match(**do_expr){ AddExpr(ref l, ref r) => { match(**l){ NumExpr(n) => assert_eq!(n, 1), _ => {} } }, _ => panic!("This will not execute") } }, _ => panic!("This will not execute") } } #[test] fn test_for_expr_with_ident_as_from_expr(){ let mut p = Parser::new("for id:= a to 10 do 1+1".to_string()); p.start_lexer(); let (ty, expr) = p.expr().unwrap(); match(*expr){ ForExpr(ref id, ref from_expr, _, _) => { match(**from_expr){ IdExpr(ref i) => assert_eq!(*i, String::from("a")), _ => panic!("This will not execute") } }, _ => panic!("This will not execute") } } #[test] fn test_for_expr_with_ident_as_to_and_from_expr(){ let mut p = Parser::new("for id:= a to b do 1+1".to_string()); p.start_lexer(); let (ty, expr) = p.expr().unwrap(); match(*expr){ ForExpr(ref id, ref from_expr, ref to_expr, _) => { match(**to_expr){ IdExpr(ref i) => assert_eq!(*i, String::from("b")), _ => panic!("This will not execute") } match(**from_expr){ IdExpr(ref i) => assert_eq!(*i, String::from("a")), _ => panic!("This will not execute") } }, _ => panic!("This will not execute") } match(*expr){ ForExpr(ref id, ref from_expr, ref to_expr, _) => { match(**to_expr){ IdExpr(ref i) => assert_eq!(*i, String::from("b")), _ => panic!("This will not execute") } match(**from_expr){ IdExpr(ref i) => assert_eq!(*i, String::from("a")), _ => panic!("This will not execute") } }, _ => panic!("This will not execute") } }
// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. pub use self::PathParsingMode::*; use abi; use ast::BareFnTy; use ast::{RegionTyParamBound, TraitTyParamBound, TraitBoundModifier}; use ast::{Public, Unsafety}; use ast::{Mod, BiAdd, Arg, Arm, Attribute, BindByRef, BindByValue}; use ast::{BiBitAnd, BiBitOr, BiBitXor, BiRem, BiLt, BiGt, Block}; use ast::{BlockCheckMode, CaptureByRef, CaptureByValue, CaptureClause}; use ast::{Constness, ConstImplItem, ConstTraitItem, Crate, CrateConfig}; use ast::{Decl, DeclItem, DeclLocal, DefaultBlock, DefaultReturn}; use ast::{UnDeref, BiDiv, EMPTY_CTXT, EnumDef, ExplicitSelf}; use ast::{Expr, Expr_, ExprAddrOf, ExprMatch, ExprAgain}; use ast::{ExprAssign, ExprAssignOp, ExprBinary, ExprBlock, ExprBox}; use ast::{ExprBreak, ExprCall, ExprCast, ExprInPlace}; use ast::{ExprField, ExprTupField, ExprClosure, ExprIf, ExprIfLet, ExprIndex}; use ast::{ExprLit, ExprLoop, ExprMac, ExprRange}; use ast::{ExprMethodCall, ExprParen, ExprPath}; use ast::{ExprRepeat, ExprRet, ExprStruct, ExprTup, ExprUnary}; use ast::{ExprVec, ExprWhile, ExprWhileLet, ExprForLoop, Field, FnDecl}; use ast::{ForeignItem, ForeignItemStatic, ForeignItemFn, ForeignMod, FunctionRetTy}; use ast::{Ident, Inherited, ImplItem, Item, Item_, ItemStatic}; use ast::{ItemEnum, ItemFn, ItemForeignMod, ItemImpl, ItemConst}; use ast::{ItemMac, ItemMod, ItemStruct, ItemTrait, ItemTy, ItemDefaultImpl}; use ast::{ItemExternCrate, ItemUse}; use ast::{LifetimeDef, Lit, Lit_}; use ast::{LitBool, LitChar, LitByte, LitByteStr}; use ast::{LitStr, LitInt, Local}; use ast::{MacStmtWithBraces, MacStmtWithSemicolon, MacStmtWithoutBraces}; use ast::{MutImmutable, MutMutable, Mac_}; use ast::{MutTy, BiMul, Mutability}; use ast::{MethodImplItem, NamedField, UnNeg, NoReturn, UnNot}; use ast::{Pat, PatBox, PatEnum, PatIdent, PatLit, PatQPath, PatMac, PatRange}; use ast::{PatRegion, PatStruct, PatTup, PatVec, PatWild, PatWildMulti}; use ast::PatWildSingle; use ast::{PolyTraitRef, QSelf}; use ast::{Return, BiShl, BiShr, Stmt, StmtDecl}; use ast::{StmtExpr, StmtSemi, StmtMac, VariantData, StructField}; use ast::{BiSub, StrStyle}; use ast::{SelfExplicit, SelfRegion, SelfStatic, SelfValue}; use ast::{Delimited, SequenceRepetition, TokenTree, TraitItem, TraitRef}; use ast::{TtDelimited, TtSequence, TtToken}; use ast::{Ty, Ty_, TypeBinding}; use ast::{TyMac}; use ast::{TyFixedLengthVec, TyBareFn, TyTypeof, TyInfer}; use ast::{TyParam, TyParamBound, TyParen, TyPath, TyPolyTraitRef, TyPtr}; use ast::{TyRptr, TyTup, TyU32, TyVec}; use ast::{TypeImplItem, TypeTraitItem}; use ast::{UnnamedField, UnsafeBlock}; use ast::{ViewPath, ViewPathGlob, ViewPathList, ViewPathSimple}; use ast::{Visibility, WhereClause}; use ast; use ast_util::{self, AS_PREC, ident_to_path, operator_prec}; use codemap::{self, Span, BytePos, Spanned, spanned, mk_sp, CodeMap}; use diagnostic; use ext::tt::macro_parser; use parse; use parse::attr::ParserAttr; use parse::classify; use parse::common::{SeqSep, seq_sep_none, seq_sep_trailing_allowed}; use parse::lexer::{Reader, TokenAndSpan}; use parse::obsolete::{ParserObsoleteMethods, ObsoleteSyntax}; use parse::token::{self, MatchNt, SubstNt, SpecialVarNt, InternedString}; use parse::token::{keywords, special_idents, SpecialMacroVar}; use parse::{new_sub_parser_from_file, ParseSess}; use print::pprust; use ptr::P; use owned_slice::OwnedSlice; use parse::PResult; use diagnostic::FatalError; use std::collections::HashSet; use std::io::prelude::*; use std::mem; use std::path::{Path, PathBuf}; use std::rc::Rc; use std::slice; bitflags! { flags Restrictions: u8 { const RESTRICTION_STMT_EXPR = 1 << 0, const RESTRICTION_NO_STRUCT_LITERAL = 1 << 1, } } type ItemInfo = (Ident, Item_, Option<Vec<Attribute> >); /// How to parse a path. There are four different kinds of paths, all of which /// are parsed somewhat differently. #[derive(Copy, Clone, PartialEq)] pub enum PathParsingMode { /// A path with no type parameters; e.g. `foo::bar::Baz` NoTypesAllowed, /// A path with a lifetime and type parameters, with no double colons /// before the type parameters; e.g. `foo::bar<'a>::Baz<T>` LifetimeAndTypesWithoutColons, /// A path with a lifetime and type parameters with double colons before /// the type parameters; e.g. `foo::bar::<'a>::Baz::<T>` LifetimeAndTypesWithColons, } /// How to parse a bound, whether to allow bound modifiers such as `?`. #[derive(Copy, Clone, PartialEq)] pub enum BoundParsingMode { Bare, Modified, } /// Possibly accept an `token::Interpolated` expression (a pre-parsed expression /// dropped into the token stream, which happens while parsing the result of /// macro expansion). Placement of these is not as complex as I feared it would /// be. The important thing is to make sure that lookahead doesn't balk at /// `token::Interpolated` tokens. macro_rules! maybe_whole_expr { ($p:expr) => ( { let found = match $p.token { token::Interpolated(token::NtExpr(ref e)) => { Some((*e).clone()) } token::Interpolated(token::NtPath(_)) => { // FIXME: The following avoids an issue with lexical borrowck scopes, // but the clone is unfortunate. let pt = match $p.token { token::Interpolated(token::NtPath(ref pt)) => (**pt).clone(), _ => unreachable!() }; let span = $p.span; Some($p.mk_expr(span.lo, span.hi, ExprPath(None, pt))) } token::Interpolated(token::NtBlock(_)) => { // FIXME: The following avoids an issue with lexical borrowck scopes, // but the clone is unfortunate. let b = match $p.token { token::Interpolated(token::NtBlock(ref b)) => (*b).clone(), _ => unreachable!() }; let span = $p.span; Some($p.mk_expr(span.lo, span.hi, ExprBlock(b))) } _ => None }; match found { Some(e) => { try!($p.bump()); return Ok(e); } None => () } } ) } /// As maybe_whole_expr, but for things other than expressions macro_rules! maybe_whole { ($p:expr, $constructor:ident) => ( { let found = match ($p).token { token::Interpolated(token::$constructor(_)) => { Some(try!(($p).bump_and_get())) } _ => None }; if let Some(token::Interpolated(token::$constructor(x))) = found { return Ok(x.clone()); } } ); (no_clone $p:expr, $constructor:ident) => ( { let found = match ($p).token { token::Interpolated(token::$constructor(_)) => { Some(try!(($p).bump_and_get())) } _ => None }; if let Some(token::Interpolated(token::$constructor(x))) = found { return Ok(x); } } ); (deref $p:expr, $constructor:ident) => ( { let found = match ($p).token { token::Interpolated(token::$constructor(_)) => { Some(try!(($p).bump_and_get())) } _ => None }; if let Some(token::Interpolated(token::$constructor(x))) = found { return Ok((*x).clone()); } } ); (Some deref $p:expr, $constructor:ident) => ( { let found = match ($p).token { token::Interpolated(token::$constructor(_)) => { Some(try!(($p).bump_and_get())) } _ => None }; if let Some(token::Interpolated(token::$constructor(x))) = found { return Ok(Some((*x).clone())); } } ); (pair_empty $p:expr, $constructor:ident) => ( { let found = match ($p).token { token::Interpolated(token::$constructor(_)) => { Some(try!(($p).bump_and_get())) } _ => None }; if let Some(token::Interpolated(token::$constructor(x))) = found { return Ok((Vec::new(), x)); } } ) } fn maybe_append(mut lhs: Vec<Attribute>, rhs: Option<Vec<Attribute>>) -> Vec<Attribute> { if let Some(ref attrs) = rhs { lhs.extend(attrs.iter().cloned()) } lhs } /* ident is handled by common.rs */ pub struct Parser<'a> { pub sess: &'a ParseSess, /// the current token: pub token: token::Token, /// the span of the current token: pub span: Span, /// the span of the prior token: pub last_span: Span, pub cfg: CrateConfig, /// the previous token or None (only stashed sometimes). pub last_token: Option<Box<token::Token>>, pub buffer: [TokenAndSpan; 4], pub buffer_start: isize, pub buffer_end: isize, pub tokens_consumed: usize, pub restrictions: Restrictions, pub quote_depth: usize, // not (yet) related to the quasiquoter pub reader: Box<Reader+'a>, pub interner: Rc<token::IdentInterner>, /// The set of seen errors about obsolete syntax. Used to suppress /// extra detail when the same error is seen twice pub obsolete_set: HashSet<ObsoleteSyntax>, /// Used to determine the path to externally loaded source files pub mod_path_stack: Vec<InternedString>, /// Stack of spans of open delimiters. Used for error message. pub open_braces: Vec<Span>, /// Flag if this parser "owns" the directory that it is currently parsing /// in. This will affect how nested files are looked up. pub owns_directory: bool, /// Name of the root module this parser originated from. If `None`, then the /// name is not known. This does not change while the parser is descending /// into modules, and sub-parsers have new values for this name. pub root_module_name: Option<String>, pub expected_tokens: Vec<TokenType>, } #[derive(PartialEq, Eq, Clone)] pub enum TokenType { Token(token::Token), Keyword(keywords::Keyword), Operator, } impl TokenType { fn to_string(&self) -> String { match *self { TokenType::Token(ref t) => format!("`{}`", Parser::token_to_string(t)), TokenType::Operator => "an operator".to_string(), TokenType::Keyword(kw) => format!("`{}`", kw.to_name()), } } } fn is_plain_ident_or_underscore(t: &token::Token) -> bool { t.is_plain_ident() || *t == token::Underscore } /// Information about the path to a module. pub struct ModulePath { pub name: String, pub path_exists: bool, pub result: Result<ModulePathSuccess, ModulePathError>, } pub struct ModulePathSuccess { pub path: ::std::path::PathBuf, pub owns_directory: bool, } pub struct ModulePathError { pub err_msg: String, pub help_msg: String, } impl<'a> Parser<'a> { pub fn new(sess: &'a ParseSess, cfg: ast::CrateConfig, mut rdr: Box<Reader+'a>) -> Parser<'a> { let tok0 = rdr.real_token(); let span = tok0.sp; let placeholder = TokenAndSpan { tok: token::Underscore, sp: span, }; Parser { reader: rdr, interner: token::get_ident_interner(), sess: sess, cfg: cfg, token: tok0.tok, span: span, last_span: span, last_token: None, buffer: [ placeholder.clone(), placeholder.clone(), placeholder.clone(), placeholder.clone(), ], buffer_start: 0, buffer_end: 0, tokens_consumed: 0, restrictions: Restrictions::empty(), quote_depth: 0, obsolete_set: HashSet::new(), mod_path_stack: Vec::new(), open_braces: Vec::new(), owns_directory: true, root_module_name: None, expected_tokens: Vec::new(), } } // Panicing fns (for now!) // This is so that the quote_*!() syntax extensions pub fn parse_expr(&mut self) -> P<Expr> { panictry!(self.parse_expr_nopanic()) } pub fn parse_item(&mut self) -> Option<P<Item>> { panictry!(self.parse_item_nopanic()) } pub fn parse_pat(&mut self) -> P<Pat> { panictry!(self.parse_pat_nopanic()) } pub fn parse_arm(&mut self) -> Arm { panictry!(self.parse_arm_nopanic()) } pub fn parse_ty(&mut self) -> P<Ty> { panictry!(self.parse_ty_nopanic()) } pub fn parse_stmt(&mut self) -> Option<P<Stmt>> { panictry!(self.parse_stmt_nopanic()) } /// Convert a token to a string using self's reader pub fn token_to_string(token: &token::Token) -> String { pprust::token_to_string(token) } /// Convert the current token to a string using self's reader pub fn this_token_to_string(&self) -> String { Parser::token_to_string(&self.token) } pub fn unexpected_last(&self, t: &token::Token) -> FatalError { let token_str = Parser::token_to_string(t); let last_span = self.last_span; self.span_fatal(last_span, &format!("unexpected token: `{}`", token_str)) } pub fn unexpected(&mut self) -> FatalError { match self.expect_one_of(&[], &[]) { Err(e) => e, Ok(_) => unreachable!() } } /// Expect and consume the token t. Signal an error if /// the next token is not t. pub fn expect(&mut self, t: &token::Token) -> PResult<()> { if self.expected_tokens.is_empty() { if self.token == *t { self.bump() } else { let token_str = Parser::token_to_string(t); let this_token_str = self.this_token_to_string(); Err(self.fatal(&format!("expected `{}`, found `{}`", token_str, this_token_str))) } } else { self.expect_one_of(slice::ref_slice(t), &[]) } } /// Expect next token to be edible or inedible token. If edible, /// then consume it; if inedible, then return without consuming /// anything. Signal a fatal error if next token is unexpected. pub fn expect_one_of(&mut self, edible: &[token::Token], inedible: &[token::Token]) -> PResult<()>{ fn tokens_to_string(tokens: &[TokenType]) -> String { let mut i = tokens.iter(); // This might be a sign we need a connect method on Iterator. let b = i.next() .map_or("".to_string(), |t| t.to_string()); i.enumerate().fold(b, |mut b, (i, ref a)| { if tokens.len() > 2 && i == tokens.len() - 2 { b.push_str(", or "); } else if tokens.len() == 2 && i == tokens.len() - 2 { b.push_str(" or "); } else { b.push_str(", "); } b.push_str(&*a.to_string()); b }) } if edible.contains(&self.token) { self.bump() } else if inedible.contains(&self.token) { // leave it in the input Ok(()) } else { let mut expected = edible.iter() .map(|x| TokenType::Token(x.clone())) .chain(inedible.iter().map(|x| TokenType::Token(x.clone()))) .chain(self.expected_tokens.iter().cloned()) .collect::<Vec<_>>(); expected.sort_by(|a, b| a.to_string().cmp(&b.to_string())); expected.dedup(); let expect = tokens_to_string(&expected[..]); let actual = self.this_token_to_string(); Err(self.fatal( &(if expected.len() > 1 { (format!("expected one of {}, found `{}`", expect, actual)) } else if expected.is_empty() { (format!("unexpected token: `{}`", actual)) } else { (format!("expected {}, found `{}`", expect, actual)) })[..] )) } } /// Check for erroneous `ident { }`; if matches, signal error and /// recover (without consuming any expected input token). Returns /// true if and only if input was consumed for recovery. pub fn check_for_erroneous_unit_struct_expecting(&mut self, expected: &[token::Token]) -> PResult<bool> { if self.token == token::OpenDelim(token::Brace) && expected.iter().all(|t| *t != token::OpenDelim(token::Brace)) && self.look_ahead(1, |t| *t == token::CloseDelim(token::Brace)) { // matched; signal non-fatal error and recover. let span = self.span; self.span_err(span, "unit-like struct construction is written with no trailing `{ }`"); try!(self.eat(&token::OpenDelim(token::Brace))); try!(self.eat(&token::CloseDelim(token::Brace))); Ok(true) } else { Ok(false) } } /// Commit to parsing a complete expression `e` expected to be /// followed by some token from the set edible + inedible. Recover /// from anticipated input errors, discarding erroneous characters. pub fn commit_expr(&mut self, e: &Expr, edible: &[token::Token], inedible: &[token::Token]) -> PResult<()> { debug!("commit_expr {:?}", e); if let ExprPath(..) = e.node { // might be unit-struct construction; check for recoverableinput error. let expected = edible.iter() .cloned() .chain(inedible.iter().cloned()) .collect::<Vec<_>>(); try!(self.check_for_erroneous_unit_struct_expecting(&expected[..])); } self.expect_one_of(edible, inedible) } pub fn commit_expr_expecting(&mut self, e: &Expr, edible: token::Token) -> PResult<()> { self.commit_expr(e, &[edible], &[]) } /// Commit to parsing a complete statement `s`, which expects to be /// followed by some token from the set edible + inedible. Check /// for recoverable input errors, discarding erroneous characters. pub fn commit_stmt(&mut self, edible: &[token::Token], inedible: &[token::Token]) -> PResult<()> { if self.last_token .as_ref() .map_or(false, |t| t.is_ident() || t.is_path()) { let expected = edible.iter() .cloned() .chain(inedible.iter().cloned()) .collect::<Vec<_>>(); try!(self.check_for_erroneous_unit_struct_expecting(&expected)); } self.expect_one_of(edible, inedible) } pub fn commit_stmt_expecting(&mut self, edible: token::Token) -> PResult<()> { self.commit_stmt(&[edible], &[]) } pub fn parse_ident(&mut self) -> PResult<ast::Ident> { self.check_strict_keywords(); try!(self.check_reserved_keywords()); match self.token { token::Ident(i, _) => { try!(self.bump()); Ok(i) } token::Interpolated(token::NtIdent(..)) => { self.bug("ident interpolation not converted to real token"); } _ => { let token_str = self.this_token_to_string(); Err(self.fatal(&format!("expected ident, found `{}`", token_str))) } } } pub fn parse_ident_or_self_type(&mut self) -> PResult<ast::Ident> { if self.is_self_type_ident() { self.expect_self_type_ident() } else { self.parse_ident() } } pub fn parse_path_list_item(&mut self) -> PResult<ast::PathListItem> { let lo = self.span.lo; let node = if try!(self.eat_keyword(keywords::SelfValue)) { let rename = try!(self.parse_rename()); ast::PathListMod { id: ast::DUMMY_NODE_ID, rename: rename } } else { let ident = try!(self.parse_ident()); let rename = try!(self.parse_rename()); ast::PathListIdent { name: ident, rename: rename, id: ast::DUMMY_NODE_ID } }; let hi = self.last_span.hi; Ok(spanned(lo, hi, node)) } /// Check if the next token is `tok`, and return `true` if so. /// /// This method is will automatically add `tok` to `expected_tokens` if `tok` is not /// encountered. pub fn check(&mut self, tok: &token::Token) -> bool { let is_present = self.token == *tok; if !is_present { self.expected_tokens.push(TokenType::Token(tok.clone())); } is_present } /// Consume token 'tok' if it exists. Returns true if the given /// token was present, false otherwise. pub fn eat(&mut self, tok: &token::Token) -> PResult<bool> { let is_present = self.check(tok); if is_present { try!(self.bump())} Ok(is_present) } pub fn check_keyword(&mut self, kw: keywords::Keyword) -> bool { self.expected_tokens.push(TokenType::Keyword(kw)); self.token.is_keyword(kw) } /// If the next token is the given keyword, eat it and return /// true. Otherwise, return false. pub fn eat_keyword(&mut self, kw: keywords::Keyword) -> PResult<bool> { if self.check_keyword(kw) { try!(self.bump()); Ok(true) } else { Ok(false) } } pub fn eat_keyword_noexpect(&mut self, kw: keywords::Keyword) -> PResult<bool> { if self.token.is_keyword(kw) { try!(self.bump()); Ok(true) } else { Ok(false) } } /// If the given word is not a keyword, signal an error. /// If the next token is not the given word, signal an error. /// Otherwise, eat it. pub fn expect_keyword(&mut self, kw: keywords::Keyword) -> PResult<()> { if !try!(self.eat_keyword(kw) ){ self.expect_one_of(&[], &[]) } else { Ok(()) } } /// Signal an error if the given string is a strict keyword pub fn check_strict_keywords(&mut self) { if self.token.is_strict_keyword() { let token_str = self.this_token_to_string(); let span = self.span; self.span_err(span, &format!("expected identifier, found keyword `{}`", token_str)); } } /// Signal an error if the current token is a reserved keyword pub fn check_reserved_keywords(&mut self) -> PResult<()>{ if self.token.is_reserved_keyword() { let token_str = self.this_token_to_string(); Err(self.fatal(&format!("`{}` is a reserved keyword", token_str))) } else { Ok(()) } } /// Expect and consume an `&`. If `&&` is seen, replace it with a single /// `&` and continue. If an `&` is not seen, signal an error. fn expect_and(&mut self) -> PResult<()> { self.expected_tokens.push(TokenType::Token(token::BinOp(token::And))); match self.token { token::BinOp(token::And) => self.bump(), token::AndAnd => { let span = self.span; let lo = span.lo + BytePos(1); Ok(self.replace_token(token::BinOp(token::And), lo, span.hi)) } _ => self.expect_one_of(&[], &[]) } } pub fn expect_no_suffix(&self, sp: Span, kind: &str, suffix: Option<ast::Name>) { match suffix { None => {/* everything ok */} Some(suf) => { let text = suf.as_str(); if text.is_empty() { self.span_bug(sp, "found empty literal suffix in Some") } self.span_err(sp, &*format!("{} with a suffix is invalid", kind)); } } } /// Attempt to consume a `<`. If `<<` is seen, replace it with a single /// `<` and continue. If a `<` is not seen, return false. /// /// This is meant to be used when parsing generics on a path to get the /// starting token. fn eat_lt(&mut self) -> PResult<bool> { self.expected_tokens.push(TokenType::Token(token::Lt)); match self.token { token::Lt => { try!(self.bump()); Ok(true)} token::BinOp(token::Shl) => { let span = self.span; let lo = span.lo + BytePos(1); self.replace_token(token::Lt, lo, span.hi); Ok(true) } _ => Ok(false), } } fn expect_lt(&mut self) -> PResult<()> { if !try!(self.eat_lt()) { self.expect_one_of(&[], &[]) } else { Ok(()) } } /// Expect and consume a GT. if a >> is seen, replace it /// with a single > and continue. If a GT is not seen, /// signal an error. pub fn expect_gt(&mut self) -> PResult<()> { self.expected_tokens.push(TokenType::Token(token::Gt)); match self.token { token::Gt => self.bump(), token::BinOp(token::Shr) => { let span = self.span; let lo = span.lo + BytePos(1); Ok(self.replace_token(token::Gt, lo, span.hi)) } token::BinOpEq(token::Shr) => { let span = self.span; let lo = span.lo + BytePos(1); Ok(self.replace_token(token::Ge, lo, span.hi)) } token::Ge => { let span = self.span; let lo = span.lo + BytePos(1); Ok(self.replace_token(token::Eq, lo, span.hi)) } _ => { let gt_str = Parser::token_to_string(&token::Gt); let this_token_str = self.this_token_to_string(); Err(self.fatal(&format!("expected `{}`, found `{}`", gt_str, this_token_str))) } } } pub fn parse_seq_to_before_gt_or_return<T, F>(&mut self, sep: Option<token::Token>, mut f: F) -> PResult<(OwnedSlice<T>, bool)> where F: FnMut(&mut Parser) -> PResult<Option<T>>, { let mut v = Vec::new(); // This loop works by alternating back and forth between parsing types // and commas. For example, given a string `A, B,>`, the parser would // first parse `A`, then a comma, then `B`, then a comma. After that it // would encounter a `>` and stop. This lets the parser handle trailing // commas in generic parameters, because it can stop either after // parsing a type or after parsing a comma. for i in 0.. { if self.check(&token::Gt) || self.token == token::BinOp(token::Shr) || self.token == token::Ge || self.token == token::BinOpEq(token::Shr) { break; } if i % 2 == 0 { match try!(f(self)) { Some(result) => v.push(result), None => return Ok((OwnedSlice::from_vec(v), true)) } } else { if let Some(t) = sep.as_ref() { try!(self.expect(t)); } } } return Ok((OwnedSlice::from_vec(v), false)); } /// Parse a sequence bracketed by '<' and '>', stopping /// before the '>'. pub fn parse_seq_to_before_gt<T, F>(&mut self, sep: Option<token::Token>, mut f: F) -> PResult<OwnedSlice<T>> where F: FnMut(&mut Parser) -> PResult<T>, { let (result, returned) = try!(self.parse_seq_to_before_gt_or_return(sep, |p| Ok(Some(try!(f(p)))))); assert!(!returned); return Ok(result); } pub fn parse_seq_to_gt<T, F>(&mut self, sep: Option<token::Token>, f: F) -> PResult<OwnedSlice<T>> where F: FnMut(&mut Parser) -> PResult<T>, { let v = try!(self.parse_seq_to_before_gt(sep, f)); try!(self.expect_gt()); return Ok(v); } pub fn parse_seq_to_gt_or_return<T, F>(&mut self, sep: Option<token::Token>, f: F) -> PResult<(OwnedSlice<T>, bool)> where F: FnMut(&mut Parser) -> PResult<Option<T>>, { let (v, returned) = try!(self.parse_seq_to_before_gt_or_return(sep, f)); if !returned { try!(self.expect_gt()); } return Ok((v, returned)); } /// Parse a sequence, including the closing delimiter. The function /// f must consume tokens until reaching the next separator or /// closing bracket. pub fn parse_seq_to_end<T, F>(&mut self, ket: &token::Token, sep: SeqSep, f: F) -> PResult<Vec<T>> where F: FnMut(&mut Parser) -> PResult<T>, { let val = try!(self.parse_seq_to_before_end(ket, sep, f)); try!(self.bump()); Ok(val) } /// Parse a sequence, not including the closing delimiter. The function /// f must consume tokens until reaching the next separator or /// closing bracket. pub fn parse_seq_to_before_end<T, F>(&mut self, ket: &token::Token, sep: SeqSep, mut f: F) -> PResult<Vec<T>> where F: FnMut(&mut Parser) -> PResult<T>, { let mut first: bool = true; let mut v = vec!(); while self.token != *ket { match sep.sep { Some(ref t) => { if first { first = false; } else { try!(self.expect(t)); } } _ => () } if sep.trailing_sep_allowed && self.check(ket) { break; } v.push(try!(f(self))); } return Ok(v); } /// Parse a sequence, including the closing delimiter. The function /// f must consume tokens until reaching the next separator or /// closing bracket. pub fn parse_unspanned_seq<T, F>(&mut self, bra: &token::Token, ket: &token::Token, sep: SeqSep, f: F) -> PResult<Vec<T>> where F: FnMut(&mut Parser) -> PResult<T>, { try!(self.expect(bra)); let result = try!(self.parse_seq_to_before_end(ket, sep, f)); try!(self.bump()); Ok(result) } /// Parse a sequence parameter of enum variant. For consistency purposes, /// these should not be empty. pub fn parse_enum_variant_seq<T, F>(&mut self, bra: &token::Token, ket: &token::Token, sep: SeqSep, f: F) -> PResult<Vec<T>> where F: FnMut(&mut Parser) -> PResult<T>, { let result = try!(self.parse_unspanned_seq(bra, ket, sep, f)); if result.is_empty() { let last_span = self.last_span; self.span_err(last_span, "nullary enum variants are written with no trailing `( )`"); } Ok(result) } // NB: Do not use this function unless you actually plan to place the // spanned list in the AST. pub fn parse_seq<T, F>(&mut self, bra: &token::Token, ket: &token::Token, sep: SeqSep, f: F) -> PResult<Spanned<Vec<T>>> where F: FnMut(&mut Parser) -> PResult<T>, { let lo = self.span.lo; try!(self.expect(bra)); let result = try!(self.parse_seq_to_before_end(ket, sep, f)); let hi = self.span.hi; try!(self.bump()); Ok(spanned(lo, hi, result)) } /// Advance the parser by one token pub fn bump(&mut self) -> PResult<()> { self.last_span = self.span; // Stash token for error recovery (sometimes; clone is not necessarily cheap). self.last_token = if self.token.is_ident() || self.token.is_path() || self.token == token::Comma { Some(Box::new(self.token.clone())) } else { None }; let next = if self.buffer_start == self.buffer_end { self.reader.real_token() } else { // Avoid token copies with `replace`. let buffer_start = self.buffer_start as usize; let next_index = (buffer_start + 1) & 3; self.buffer_start = next_index as isize; let placeholder = TokenAndSpan { tok: token::Underscore, sp: self.span, }; mem::replace(&mut self.buffer[buffer_start], placeholder) }; self.span = next.sp; self.token = next.tok; self.tokens_consumed += 1; self.expected_tokens.clear(); // check after each token self.check_unknown_macro_variable() } /// Advance the parser by one token and return the bumped token. pub fn bump_and_get(&mut self) -> PResult<token::Token> { let old_token = mem::replace(&mut self.token, token::Underscore); try!(self.bump()); Ok(old_token) } /// EFFECT: replace the current token and span with the given one pub fn replace_token(&mut self, next: token::Token, lo: BytePos, hi: BytePos) { self.last_span = mk_sp(self.span.lo, lo); self.token = next; self.span = mk_sp(lo, hi); } pub fn buffer_length(&mut self) -> isize { if self.buffer_start <= self.buffer_end { return self.buffer_end - self.buffer_start; } return (4 - self.buffer_start) + self.buffer_end; } pub fn look_ahead<R, F>(&mut self, distance: usize, f: F) -> R where F: FnOnce(&token::Token) -> R, { let dist = distance as isize; while self.buffer_length() < dist { self.buffer[self.buffer_end as usize] = self.reader.real_token(); self.buffer_end = (self.buffer_end + 1) & 3; } f(&self.buffer[((self.buffer_start + dist - 1) & 3) as usize].tok) } pub fn fatal(&self, m: &str) -> diagnostic::FatalError { self.sess.span_diagnostic.span_fatal(self.span, m) } pub fn span_fatal(&self, sp: Span, m: &str) -> diagnostic::FatalError { self.sess.span_diagnostic.span_fatal(sp, m) } pub fn span_fatal_help(&self, sp: Span, m: &str, help: &str) -> diagnostic::FatalError { self.span_err(sp, m); self.fileline_help(sp, help); diagnostic::FatalError } pub fn span_note(&self, sp: Span, m: &str) { self.sess.span_diagnostic.span_note(sp, m) } pub fn span_help(&self, sp: Span, m: &str) { self.sess.span_diagnostic.span_help(sp, m) } pub fn span_suggestion(&self, sp: Span, m: &str, n: String) { self.sess.span_diagnostic.span_suggestion(sp, m, n) } pub fn fileline_help(&self, sp: Span, m: &str) { self.sess.span_diagnostic.fileline_help(sp, m) } pub fn bug(&self, m: &str) -> ! { self.sess.span_diagnostic.span_bug(self.span, m) } pub fn warn(&self, m: &str) { self.sess.span_diagnostic.span_warn(self.span, m) } pub fn span_warn(&self, sp: Span, m: &str) { self.sess.span_diagnostic.span_warn(sp, m) } pub fn span_err(&self, sp: Span, m: &str) { self.sess.span_diagnostic.span_err(sp, m) } pub fn span_bug(&self, sp: Span, m: &str) -> ! { self.sess.span_diagnostic.span_bug(sp, m) } pub fn abort_if_errors(&self) { self.sess.span_diagnostic.handler().abort_if_errors(); } pub fn id_to_interned_str(&mut self, id: Ident) -> InternedString { id.name.as_str() } /// Is the current token one of the keywords that signals a bare function /// type? pub fn token_is_bare_fn_keyword(&mut self) -> bool { self.check_keyword(keywords::Fn) || self.check_keyword(keywords::Unsafe) || self.check_keyword(keywords::Extern) } pub fn get_lifetime(&mut self) -> ast::Ident { match self.token { token::Lifetime(ref ident) => *ident, _ => self.bug("not a lifetime"), } } pub fn parse_for_in_type(&mut self) -> PResult<Ty_> { /* Parses whatever can come after a `for` keyword in a type. The `for` has already been consumed. Deprecated: - for <'lt> |S| -> T Eventually: - for <'lt> [unsafe] [extern "ABI"] fn (S) -> T - for <'lt> path::foo(a, b) */ // parse <'lt> let lo = self.span.lo; let lifetime_defs = try!(self.parse_late_bound_lifetime_defs()); // examine next token to decide to do if self.token_is_bare_fn_keyword() { self.parse_ty_bare_fn(lifetime_defs) } else { let hi = self.span.hi; let trait_ref = try!(self.parse_trait_ref()); let poly_trait_ref = ast::PolyTraitRef { bound_lifetimes: lifetime_defs, trait_ref: trait_ref, span: mk_sp(lo, hi)}; let other_bounds = if try!(self.eat(&token::BinOp(token::Plus)) ){ try!(self.parse_ty_param_bounds(BoundParsingMode::Bare)) } else { OwnedSlice::empty() }; let all_bounds = Some(TraitTyParamBound(poly_trait_ref, TraitBoundModifier::None)).into_iter() .chain(other_bounds.into_vec()) .collect(); Ok(ast::TyPolyTraitRef(all_bounds)) } } pub fn parse_ty_path(&mut self) -> PResult<Ty_> { Ok(TyPath(None, try!(self.parse_path(LifetimeAndTypesWithoutColons)))) } /// parse a TyBareFn type: pub fn parse_ty_bare_fn(&mut self, lifetime_defs: Vec<ast::LifetimeDef>) -> PResult<Ty_> { /* [unsafe] [extern "ABI"] fn <'lt> (S) -> T ^~~~^ ^~~~^ ^~~~^ ^~^ ^ | | | | | | | | | Return type | | | Argument types | | Lifetimes | ABI Function Style */ let unsafety = try!(self.parse_unsafety()); let abi = if try!(self.eat_keyword(keywords::Extern) ){ try!(self.parse_opt_abi()).unwrap_or(abi::C) } else { abi::Rust }; try!(self.expect_keyword(keywords::Fn)); let (inputs, variadic) = try!(self.parse_fn_args(false, true)); let ret_ty = try!(self.parse_ret_ty()); let decl = P(FnDecl { inputs: inputs, output: ret_ty, variadic: variadic }); Ok(TyBareFn(P(BareFnTy { abi: abi, unsafety: unsafety, lifetimes: lifetime_defs, decl: decl }))) } /// Parses an obsolete closure kind (`&:`, `&mut:`, or `:`). pub fn parse_obsolete_closure_kind(&mut self) -> PResult<()> { let lo = self.span.lo; if self.check(&token::BinOp(token::And)) && self.look_ahead(1, |t| t.is_keyword(keywords::Mut)) && self.look_ahead(2, |t| *t == token::Colon) { try!(self.bump()); try!(self.bump()); try!(self.bump()); } else if self.token == token::BinOp(token::And) && self.look_ahead(1, |t| *t == token::Colon) { try!(self.bump()); try!(self.bump()); } else if try!(self.eat(&token::Colon)) { /* nothing */ } else { return Ok(()); } let span = mk_sp(lo, self.span.hi); self.obsolete(span, ObsoleteSyntax::ClosureKind); Ok(()) } pub fn parse_unsafety(&mut self) -> PResult<Unsafety> { if try!(self.eat_keyword(keywords::Unsafe)) { return Ok(Unsafety::Unsafe); } else { return Ok(Unsafety::Normal); } } /// Parse the items in a trait declaration pub fn parse_trait_items(&mut self) -> PResult<Vec<P<TraitItem>>> { self.parse_unspanned_seq( &token::OpenDelim(token::Brace), &token::CloseDelim(token::Brace), seq_sep_none(), |p| -> PResult<P<TraitItem>> { maybe_whole!(no_clone p, NtTraitItem); let mut attrs = p.parse_outer_attributes(); let lo = p.span.lo; let (name, node) = if try!(p.eat_keyword(keywords::Type)) { let TyParam {ident, bounds, default, ..} = try!(p.parse_ty_param()); try!(p.expect(&token::Semi)); (ident, TypeTraitItem(bounds, default)) } else if p.is_const_item() { try!(p.expect_keyword(keywords::Const)); let ident = try!(p.parse_ident()); try!(p.expect(&token::Colon)); let ty = try!(p.parse_ty_sum()); let default = if p.check(&token::Eq) { try!(p.bump()); let expr = try!(p.parse_expr_nopanic()); try!(p.commit_expr_expecting(&expr, token::Semi)); Some(expr) } else { try!(p.expect(&token::Semi)); None }; (ident, ConstTraitItem(ty, default)) } else { let (constness, unsafety, abi) = try!(p.parse_fn_front_matter()); let ident = try!(p.parse_ident()); let mut generics = try!(p.parse_generics()); let (explicit_self, d) = try!(p.parse_fn_decl_with_self(|p|{ // This is somewhat dubious; We don't want to allow // argument names to be left off if there is a // definition... p.parse_arg_general(false) })); generics.where_clause = try!(p.parse_where_clause()); let sig = ast::MethodSig { unsafety: unsafety, constness: constness, decl: d, generics: generics, abi: abi, explicit_self: explicit_self, }; let body = match p.token { token::Semi => { try!(p.bump()); debug!("parse_trait_methods(): parsing required method"); None } token::OpenDelim(token::Brace) => { debug!("parse_trait_methods(): parsing provided method"); let (inner_attrs, body) = try!(p.parse_inner_attrs_and_block()); attrs.extend(inner_attrs.iter().cloned()); Some(body) } _ => { let token_str = p.this_token_to_string(); return Err(p.fatal(&format!("expected `;` or `{{`, found `{}`", token_str)[..])) } }; (ident, ast::MethodTraitItem(sig, body)) }; Ok(P(TraitItem { id: ast::DUMMY_NODE_ID, ident: name, attrs: attrs, node: node, span: mk_sp(lo, p.last_span.hi), })) }) } /// Parse a possibly mutable type pub fn parse_mt(&mut self) -> PResult<MutTy> { let mutbl = try!(self.parse_mutability()); let t = try!(self.parse_ty_nopanic()); Ok(MutTy { ty: t, mutbl: mutbl }) } /// Parse optional return type [ -> TY ] in function decl pub fn parse_ret_ty(&mut self) -> PResult<FunctionRetTy> { if try!(self.eat(&token::RArrow) ){ if try!(self.eat(&token::Not) ){ Ok(NoReturn(self.last_span)) } else { Ok(Return(try!(self.parse_ty_nopanic()))) } } else { let pos = self.span.lo; Ok(DefaultReturn(mk_sp(pos, pos))) } } /// Parse a type in a context where `T1+T2` is allowed. pub fn parse_ty_sum(&mut self) -> PResult<P<Ty>> { let lo = self.span.lo; let lhs = try!(self.parse_ty_nopanic()); if !try!(self.eat(&token::BinOp(token::Plus)) ){ return Ok(lhs); } let bounds = try!(self.parse_ty_param_bounds(BoundParsingMode::Bare)); // In type grammar, `+` is treated like a binary operator, // and hence both L and R side are required. if bounds.is_empty() { let last_span = self.last_span; self.span_err(last_span, "at least one type parameter bound \ must be specified"); } let sp = mk_sp(lo, self.last_span.hi); let sum = ast::TyObjectSum(lhs, bounds); Ok(P(Ty {id: ast::DUMMY_NODE_ID, node: sum, span: sp})) } /// Parse a type. pub fn parse_ty_nopanic(&mut self) -> PResult<P<Ty>> { maybe_whole!(no_clone self, NtTy); let lo = self.span.lo; let t = if self.check(&token::OpenDelim(token::Paren)) { try!(self.bump()); // (t) is a parenthesized ty // (t,) is the type of a tuple with only one field, // of type t let mut ts = vec![]; let mut last_comma = false; while self.token != token::CloseDelim(token::Paren) { ts.push(try!(self.parse_ty_sum())); if self.check(&token::Comma) { last_comma = true; try!(self.bump()); } else { last_comma = false; break; } } try!(self.expect(&token::CloseDelim(token::Paren))); if ts.len() == 1 && !last_comma { TyParen(ts.into_iter().nth(0).unwrap()) } else { TyTup(ts) } } else if self.check(&token::BinOp(token::Star)) { // STAR POINTER (bare pointer?) try!(self.bump()); TyPtr(try!(self.parse_ptr())) } else if self.check(&token::OpenDelim(token::Bracket)) { // VECTOR try!(self.expect(&token::OpenDelim(token::Bracket))); let t = try!(self.parse_ty_sum()); // Parse the `; e` in `[ i32; e ]` // where `e` is a const expression let t = match try!(self.maybe_parse_fixed_length_of_vec()) { None => TyVec(t), Some(suffix) => TyFixedLengthVec(t, suffix) }; try!(self.expect(&token::CloseDelim(token::Bracket))); t } else if self.check(&token::BinOp(token::And)) || self.token == token::AndAnd { // BORROWED POINTER try!(self.expect_and()); try!(self.parse_borrowed_pointee()) } else if self.check_keyword(keywords::For) { try!(self.parse_for_in_type()) } else if self.token_is_bare_fn_keyword() { // BARE FUNCTION try!(self.parse_ty_bare_fn(Vec::new())) } else if try!(self.eat_keyword_noexpect(keywords::Typeof)) { // TYPEOF // In order to not be ambiguous, the type must be surrounded by parens. try!(self.expect(&token::OpenDelim(token::Paren))); let e = try!(self.parse_expr_nopanic()); try!(self.expect(&token::CloseDelim(token::Paren))); TyTypeof(e) } else if try!(self.eat_lt()) { let (qself, path) = try!(self.parse_qualified_path(NoTypesAllowed)); TyPath(Some(qself), path) } else if self.check(&token::ModSep) || self.token.is_ident() || self.token.is_path() { let path = try!(self.parse_path(LifetimeAndTypesWithoutColons)); if self.check(&token::Not) { // MACRO INVOCATION try!(self.bump()); let delim = try!(self.expect_open_delim()); let tts = try!(self.parse_seq_to_end(&token::CloseDelim(delim), seq_sep_none(), |p| p.parse_token_tree())); let hi = self.span.hi; TyMac(spanned(lo, hi, Mac_ { path: path, tts: tts, ctxt: EMPTY_CTXT })) } else { // NAMED TYPE TyPath(None, path) } } else if try!(self.eat(&token::Underscore) ){ // TYPE TO BE INFERRED TyInfer } else { let this_token_str = self.this_token_to_string(); let msg = format!("expected type, found `{}`", this_token_str); return Err(self.fatal(&msg[..])); }; let sp = mk_sp(lo, self.last_span.hi); Ok(P(Ty {id: ast::DUMMY_NODE_ID, node: t, span: sp})) } pub fn parse_borrowed_pointee(&mut self) -> PResult<Ty_> { // look for `&'lt` or `&'foo ` and interpret `foo` as the region name: let opt_lifetime = try!(self.parse_opt_lifetime()); let mt = try!(self.parse_mt()); return Ok(TyRptr(opt_lifetime, mt)); } pub fn parse_ptr(&mut self) -> PResult<MutTy> { let mutbl = if try!(self.eat_keyword(keywords::Mut) ){ MutMutable } else if try!(self.eat_keyword(keywords::Const) ){ MutImmutable } else { let span = self.last_span; self.span_err(span, "bare raw pointers are no longer allowed, you should \ likely use `*mut T`, but otherwise `*T` is now \ known as `*const T`"); MutImmutable }; let t = try!(self.parse_ty_nopanic()); Ok(MutTy { ty: t, mutbl: mutbl }) } pub fn is_named_argument(&mut self) -> bool { let offset = match self.token { token::BinOp(token::And) => 1, token::AndAnd => 1, _ if self.token.is_keyword(keywords::Mut) => 1, _ => 0 }; debug!("parser is_named_argument offset:{}", offset); if offset == 0 { is_plain_ident_or_underscore(&self.token) && self.look_ahead(1, |t| *t == token::Colon) } else { self.look_ahead(offset, |t| is_plain_ident_or_underscore(t)) && self.look_ahead(offset + 1, |t| *t == token::Colon) } } /// This version of parse arg doesn't necessarily require /// identifier names. pub fn parse_arg_general(&mut self, require_name: bool) -> PResult<Arg> { let pat = if require_name || self.is_named_argument() { debug!("parse_arg_general parse_pat (require_name:{})", require_name); let pat = try!(self.parse_pat_nopanic()); try!(self.expect(&token::Colon)); pat } else { debug!("parse_arg_general ident_to_pat"); ast_util::ident_to_pat(ast::DUMMY_NODE_ID, self.last_span, special_idents::invalid) }; let t = try!(self.parse_ty_sum()); Ok(Arg { ty: t, pat: pat, id: ast::DUMMY_NODE_ID, }) } /// Parse a single function argument pub fn parse_arg(&mut self) -> PResult<Arg> { self.parse_arg_general(true) } /// Parse an argument in a lambda header e.g. |arg, arg| pub fn parse_fn_block_arg(&mut self) -> PResult<Arg> { let pat = try!(self.parse_pat_nopanic()); let t = if try!(self.eat(&token::Colon) ){ try!(self.parse_ty_sum()) } else { P(Ty { id: ast::DUMMY_NODE_ID, node: TyInfer, span: mk_sp(self.span.lo, self.span.hi), }) }; Ok(Arg { ty: t, pat: pat, id: ast::DUMMY_NODE_ID }) } pub fn maybe_parse_fixed_length_of_vec(&mut self) -> PResult<Option<P<ast::Expr>>> { if self.check(&token::Semi) { try!(self.bump()); Ok(Some(try!(self.parse_expr_nopanic()))) } else { Ok(None) } } /// Matches token_lit = LIT_INTEGER | ... pub fn lit_from_token(&self, tok: &token::Token) -> PResult<Lit_> { match *tok { token::Interpolated(token::NtExpr(ref v)) => { match v.node { ExprLit(ref lit) => { Ok(lit.node.clone()) } _ => { return Err(self.unexpected_last(tok)); } } } token::Literal(lit, suf) => { let (suffix_illegal, out) = match lit { token::Byte(i) => (true, LitByte(parse::byte_lit(&i.as_str()).0)), token::Char(i) => (true, LitChar(parse::char_lit(&i.as_str()).0)), // there are some valid suffixes for integer and // float literals, so all the handling is done // internally. token::Integer(s) => { (false, parse::integer_lit(&s.as_str(), suf.as_ref().map(|s| s.as_str()), &self.sess.span_diagnostic, self.last_span)) } token::Float(s) => { (false, parse::float_lit(&s.as_str(), suf.as_ref().map(|s| s.as_str()), &self.sess.span_diagnostic, self.last_span)) } token::Str_(s) => { (true, LitStr(token::intern_and_get_ident(&parse::str_lit(&s.as_str())), ast::CookedStr)) } token::StrRaw(s, n) => { (true, LitStr( token::intern_and_get_ident(&parse::raw_str_lit(&s.as_str())), ast::RawStr(n))) } token::ByteStr(i) => (true, LitByteStr(parse::byte_str_lit(&i.as_str()))), token::ByteStrRaw(i, _) => (true, LitByteStr(Rc::new(i.to_string().into_bytes()))), }; if suffix_illegal { let sp = self.last_span; self.expect_no_suffix(sp, &*format!("{} literal", lit.short_name()), suf) } Ok(out) } _ => { return Err(self.unexpected_last(tok)); } } } /// Matches lit = true | false | token_lit pub fn parse_lit(&mut self) -> PResult<Lit> { let lo = self.span.lo; let lit = if try!(self.eat_keyword(keywords::True) ){ LitBool(true) } else if try!(self.eat_keyword(keywords::False) ){ LitBool(false) } else { let token = try!(self.bump_and_get()); let lit = try!(self.lit_from_token(&token)); lit }; Ok(codemap::Spanned { node: lit, span: mk_sp(lo, self.last_span.hi) }) } /// matches '-' lit | lit pub fn parse_literal_maybe_minus(&mut self) -> PResult<P<Expr>> { let minus_lo = self.span.lo; let minus_present = try!(self.eat(&token::BinOp(token::Minus))); let lo = self.span.lo; let literal = P(try!(self.parse_lit())); let hi = self.last_span.hi; let expr = self.mk_expr(lo, hi, ExprLit(literal)); if minus_present { let minus_hi = self.last_span.hi; let unary = self.mk_unary(UnNeg, expr); Ok(self.mk_expr(minus_lo, minus_hi, unary)) } else { Ok(expr) } } // QUALIFIED PATH `<TYPE [as TRAIT_REF]>::IDENT[::<PARAMS>]` // Assumes that the leading `<` has been parsed already. pub fn parse_qualified_path(&mut self, mode: PathParsingMode) -> PResult<(QSelf, ast::Path)> { let span = self.last_span; let self_type = try!(self.parse_ty_sum()); let mut path = if try!(self.eat_keyword(keywords::As)) { try!(self.parse_path(LifetimeAndTypesWithoutColons)) } else { ast::Path { span: span, global: false, segments: vec![] } }; let qself = QSelf { ty: self_type, position: path.segments.len() }; try!(self.expect(&token::Gt)); try!(self.expect(&token::ModSep)); let segments = match mode { LifetimeAndTypesWithoutColons => { try!(self.parse_path_segments_without_colons()) } LifetimeAndTypesWithColons => { try!(self.parse_path_segments_with_colons()) } NoTypesAllowed => { try!(self.parse_path_segments_without_types()) } }; path.segments.extend(segments); path.span.hi = self.last_span.hi; Ok((qself, path)) } /// Parses a path and optional type parameter bounds, depending on the /// mode. The `mode` parameter determines whether lifetimes, types, and/or /// bounds are permitted and whether `::` must precede type parameter /// groups. pub fn parse_path(&mut self, mode: PathParsingMode) -> PResult<ast::Path> { // Check for a whole path... let found = match self.token { token::Interpolated(token::NtPath(_)) => Some(try!(self.bump_and_get())), _ => None, }; if let Some(token::Interpolated(token::NtPath(path))) = found { return Ok(*path); } let lo = self.span.lo; let is_global = try!(self.eat(&token::ModSep)); // Parse any number of segments and bound sets. A segment is an // identifier followed by an optional lifetime and a set of types. // A bound set is a set of type parameter bounds. let segments = match mode { LifetimeAndTypesWithoutColons => { try!(self.parse_path_segments_without_colons()) } LifetimeAndTypesWithColons => { try!(self.parse_path_segments_with_colons()) } NoTypesAllowed => { try!(self.parse_path_segments_without_types()) } }; // Assemble the span. let span = mk_sp(lo, self.last_span.hi); // Assemble the result. Ok(ast::Path { span: span, global: is_global, segments: segments, }) } /// Examples: /// - `a::b<T,U>::c<V,W>` /// - `a::b<T,U>::c(V) -> W` /// - `a::b<T,U>::c(V)` pub fn parse_path_segments_without_colons(&mut self) -> PResult<Vec<ast::PathSegment>> { let mut segments = Vec::new(); loop { // First, parse an identifier. let identifier = try!(self.parse_ident_or_self_type()); // Parse types, optionally. let parameters = if try!(self.eat_lt() ){ let (lifetimes, types, bindings) = try!(self.parse_generic_values_after_lt()); ast::AngleBracketedParameters(ast::AngleBracketedParameterData { lifetimes: lifetimes, types: OwnedSlice::from_vec(types), bindings: OwnedSlice::from_vec(bindings), }) } else if try!(self.eat(&token::OpenDelim(token::Paren)) ){ let lo = self.last_span.lo; let inputs = try!(self.parse_seq_to_end( &token::CloseDelim(token::Paren), seq_sep_trailing_allowed(token::Comma), |p| p.parse_ty_sum())); let output_ty = if try!(self.eat(&token::RArrow) ){ Some(try!(self.parse_ty_nopanic())) } else { None }; let hi = self.last_span.hi; ast::ParenthesizedParameters(ast::ParenthesizedParameterData { span: mk_sp(lo, hi), inputs: inputs, output: output_ty, }) } else { ast::PathParameters::none() }; // Assemble and push the result. segments.push(ast::PathSegment { identifier: identifier, parameters: parameters }); // Continue only if we see a `::` if !try!(self.eat(&token::ModSep) ){ return Ok(segments); } } } /// Examples: /// - `a::b::<T,U>::c` pub fn parse_path_segments_with_colons(&mut self) -> PResult<Vec<ast::PathSegment>> { let mut segments = Vec::new(); loop { // First, parse an identifier. let identifier = try!(self.parse_ident_or_self_type()); // If we do not see a `::`, stop. if !try!(self.eat(&token::ModSep) ){ segments.push(ast::PathSegment { identifier: identifier, parameters: ast::PathParameters::none() }); return Ok(segments); } // Check for a type segment. if try!(self.eat_lt() ){ // Consumed `a::b::<`, go look for types let (lifetimes, types, bindings) = try!(self.parse_generic_values_after_lt()); segments.push(ast::PathSegment { identifier: identifier, parameters: ast::AngleBracketedParameters(ast::AngleBracketedParameterData { lifetimes: lifetimes, types: OwnedSlice::from_vec(types), bindings: OwnedSlice::from_vec(bindings), }), }); // Consumed `a::b::<T,U>`, check for `::` before proceeding if !try!(self.eat(&token::ModSep) ){ return Ok(segments); } } else { // Consumed `a::`, go look for `b` segments.push(ast::PathSegment { identifier: identifier, parameters: ast::PathParameters::none(), }); } } } /// Examples: /// - `a::b::c` pub fn parse_path_segments_without_types(&mut self) -> PResult<Vec<ast::PathSegment>> { let mut segments = Vec::new(); loop { // First, parse an identifier. let identifier = try!(self.parse_ident_or_self_type()); // Assemble and push the result. segments.push(ast::PathSegment { identifier: identifier, parameters: ast::PathParameters::none() }); // If we do not see a `::`, stop. if !try!(self.eat(&token::ModSep) ){ return Ok(segments); } } } /// parses 0 or 1 lifetime pub fn parse_opt_lifetime(&mut self) -> PResult<Option<ast::Lifetime>> { match self.token { token::Lifetime(..) => { Ok(Some(try!(self.parse_lifetime()))) } _ => { Ok(None) } } } /// Parses a single lifetime /// Matches lifetime = LIFETIME pub fn parse_lifetime(&mut self) -> PResult<ast::Lifetime> { match self.token { token::Lifetime(i) => { let span = self.span; try!(self.bump()); return Ok(ast::Lifetime { id: ast::DUMMY_NODE_ID, span: span, name: i.name }); } _ => { return Err(self.fatal(&format!("expected a lifetime name"))); } } } /// Parses `lifetime_defs = [ lifetime_defs { ',' lifetime_defs } ]` where `lifetime_def = /// lifetime [':' lifetimes]` pub fn parse_lifetime_defs(&mut self) -> PResult<Vec<ast::LifetimeDef>> { let mut res = Vec::new(); loop { match self.token { token::Lifetime(_) => { let lifetime = try!(self.parse_lifetime()); let bounds = if try!(self.eat(&token::Colon) ){ try!(self.parse_lifetimes(token::BinOp(token::Plus))) } else { Vec::new() }; res.push(ast::LifetimeDef { lifetime: lifetime, bounds: bounds }); } _ => { return Ok(res); } } match self.token { token::Comma => { try!(self.bump());} token::Gt => { return Ok(res); } token::BinOp(token::Shr) => { return Ok(res); } _ => { let this_token_str = self.this_token_to_string(); let msg = format!("expected `,` or `>` after lifetime \ name, found `{}`", this_token_str); return Err(self.fatal(&msg[..])); } } } } /// matches lifetimes = ( lifetime ) | ( lifetime , lifetimes ) actually, it matches the empty /// one too, but putting that in there messes up the grammar.... /// /// Parses zero or more comma separated lifetimes. Expects each lifetime to be followed by /// either a comma or `>`. Used when parsing type parameter lists, where we expect something /// like `<'a, 'b, T>`. pub fn parse_lifetimes(&mut self, sep: token::Token) -> PResult<Vec<ast::Lifetime>> { let mut res = Vec::new(); loop { match self.token { token::Lifetime(_) => { res.push(try!(self.parse_lifetime())); } _ => { return Ok(res); } } if self.token != sep { return Ok(res); } try!(self.bump()); } } /// Parse mutability declaration (mut/const/imm) pub fn parse_mutability(&mut self) -> PResult<Mutability> { if try!(self.eat_keyword(keywords::Mut) ){ Ok(MutMutable) } else { Ok(MutImmutable) } } /// Parse ident COLON expr pub fn parse_field(&mut self) -> PResult<Field> { let lo = self.span.lo; let i = try!(self.parse_ident()); let hi = self.last_span.hi; try!(self.expect(&token::Colon)); let e = try!(self.parse_expr_nopanic()); Ok(ast::Field { ident: spanned(lo, hi, i), span: mk_sp(lo, e.span.hi), expr: e, }) } pub fn mk_expr(&mut self, lo: BytePos, hi: BytePos, node: Expr_) -> P<Expr> { P(Expr { id: ast::DUMMY_NODE_ID, node: node, span: mk_sp(lo, hi), }) } pub fn mk_unary(&mut self, unop: ast::UnOp, expr: P<Expr>) -> ast::Expr_ { ExprUnary(unop, expr) } pub fn mk_binary(&mut self, binop: ast::BinOp, lhs: P<Expr>, rhs: P<Expr>) -> ast::Expr_ { ExprBinary(binop, lhs, rhs) } pub fn mk_call(&mut self, f: P<Expr>, args: Vec<P<Expr>>) -> ast::Expr_ { ExprCall(f, args) } fn mk_method_call(&mut self, ident: ast::SpannedIdent, tps: Vec<P<Ty>>, args: Vec<P<Expr>>) -> ast::Expr_ { ExprMethodCall(ident, tps, args) } pub fn mk_index(&mut self, expr: P<Expr>, idx: P<Expr>) -> ast::Expr_ { ExprIndex(expr, idx) } pub fn mk_range(&mut self, start: Option<P<Expr>>, end: Option<P<Expr>>) -> ast::Expr_ { ExprRange(start, end) } pub fn mk_field(&mut self, expr: P<Expr>, ident: ast::SpannedIdent) -> ast::Expr_ { ExprField(expr, ident) } pub fn mk_tup_field(&mut self, expr: P<Expr>, idx: codemap::Spanned<usize>) -> ast::Expr_ { ExprTupField(expr, idx) } pub fn mk_assign_op(&mut self, binop: ast::BinOp, lhs: P<Expr>, rhs: P<Expr>) -> ast::Expr_ { ExprAssignOp(binop, lhs, rhs) } pub fn mk_mac_expr(&mut self, lo: BytePos, hi: BytePos, m: Mac_) -> P<Expr> { P(Expr { id: ast::DUMMY_NODE_ID, node: ExprMac(codemap::Spanned {node: m, span: mk_sp(lo, hi)}), span: mk_sp(lo, hi), }) } pub fn mk_lit_u32(&mut self, i: u32) -> P<Expr> { let span = &self.span; let lv_lit = P(codemap::Spanned { node: LitInt(i as u64, ast::UnsignedIntLit(TyU32)), span: *span }); P(Expr { id: ast::DUMMY_NODE_ID, node: ExprLit(lv_lit), span: *span, }) } fn expect_open_delim(&mut self) -> PResult<token::DelimToken> { self.expected_tokens.push(TokenType::Token(token::Gt)); match self.token { token::OpenDelim(delim) => { try!(self.bump()); Ok(delim) }, _ => Err(self.fatal("expected open delimiter")), } } /// At the bottom (top?) of the precedence hierarchy, /// parse things like parenthesized exprs, /// macros, return, etc. pub fn parse_bottom_expr(&mut self) -> PResult<P<Expr>> { maybe_whole_expr!(self); let lo = self.span.lo; let mut hi = self.span.hi; let ex: Expr_; // Note: when adding new syntax here, don't forget to adjust Token::can_begin_expr(). match self.token { token::OpenDelim(token::Paren) => { try!(self.bump()); // (e) is parenthesized e // (e,) is a tuple with only one field, e let mut es = vec![]; let mut trailing_comma = false; while self.token != token::CloseDelim(token::Paren) { es.push(try!(self.parse_expr_nopanic())); try!(self.commit_expr(&**es.last().unwrap(), &[], &[token::Comma, token::CloseDelim(token::Paren)])); if self.check(&token::Comma) { trailing_comma = true; try!(self.bump()); } else { trailing_comma = false; break; } } try!(self.bump()); hi = self.last_span.hi; return if es.len() == 1 && !trailing_comma { Ok(self.mk_expr(lo, hi, ExprParen(es.into_iter().nth(0).unwrap()))) } else { Ok(self.mk_expr(lo, hi, ExprTup(es))) } }, token::OpenDelim(token::Brace) => { return self.parse_block_expr(lo, DefaultBlock); }, token::BinOp(token::Or) | token::OrOr => { let lo = self.span.lo; return self.parse_lambda_expr(lo, CaptureByRef); }, token::Ident(id @ ast::Ident { name: token::SELF_KEYWORD_NAME, ctxt: _ }, token::Plain) => { try!(self.bump()); let path = ast_util::ident_to_path(mk_sp(lo, hi), id); ex = ExprPath(None, path); hi = self.last_span.hi; } token::OpenDelim(token::Bracket) => { try!(self.bump()); if self.check(&token::CloseDelim(token::Bracket)) { // Empty vector. try!(self.bump()); ex = ExprVec(Vec::new()); } else { // Nonempty vector. let first_expr = try!(self.parse_expr_nopanic()); if self.check(&token::Semi) { // Repeating array syntax: [ 0; 512 ] try!(self.bump()); let count = try!(self.parse_expr_nopanic()); try!(self.expect(&token::CloseDelim(token::Bracket))); ex = ExprRepeat(first_expr, count); } else if self.check(&token::Comma) { // Vector with two or more elements. try!(self.bump()); let remaining_exprs = try!(self.parse_seq_to_end( &token::CloseDelim(token::Bracket), seq_sep_trailing_allowed(token::Comma), |p| Ok(try!(p.parse_expr_nopanic())) )); let mut exprs = vec!(first_expr); exprs.extend(remaining_exprs); ex = ExprVec(exprs); } else { // Vector with one element. try!(self.expect(&token::CloseDelim(token::Bracket))); ex = ExprVec(vec!(first_expr)); } } hi = self.last_span.hi; } _ => { if try!(self.eat_lt()){ let (qself, path) = try!(self.parse_qualified_path(LifetimeAndTypesWithColons)); hi = path.span.hi; return Ok(self.mk_expr(lo, hi, ExprPath(Some(qself), path))); } if try!(self.eat_keyword(keywords::Move) ){ let lo = self.last_span.lo; return self.parse_lambda_expr(lo, CaptureByValue); } if try!(self.eat_keyword(keywords::If)) { return self.parse_if_expr(); } if try!(self.eat_keyword(keywords::For) ){ let lo = self.last_span.lo; return self.parse_for_expr(None, lo); } if try!(self.eat_keyword(keywords::While) ){ let lo = self.last_span.lo; return self.parse_while_expr(None, lo); } if self.token.is_lifetime() { let lifetime = self.get_lifetime(); let lo = self.span.lo; try!(self.bump()); try!(self.expect(&token::Colon)); if try!(self.eat_keyword(keywords::While) ){ return self.parse_while_expr(Some(lifetime), lo) } if try!(self.eat_keyword(keywords::For) ){ return self.parse_for_expr(Some(lifetime), lo) } if try!(self.eat_keyword(keywords::Loop) ){ return self.parse_loop_expr(Some(lifetime), lo) } return Err(self.fatal("expected `while`, `for`, or `loop` after a label")) } if try!(self.eat_keyword(keywords::Loop) ){ let lo = self.last_span.lo; return self.parse_loop_expr(None, lo); } if try!(self.eat_keyword(keywords::Continue) ){ let ex = if self.token.is_lifetime() { let ex = ExprAgain(Some(Spanned{ node: self.get_lifetime(), span: self.span })); try!(self.bump()); ex } else { ExprAgain(None) }; let hi = self.last_span.hi; return Ok(self.mk_expr(lo, hi, ex)); } if try!(self.eat_keyword(keywords::Match) ){ return self.parse_match_expr(); } if try!(self.eat_keyword(keywords::Unsafe) ){ return self.parse_block_expr( lo, UnsafeBlock(ast::UserProvided)); } if try!(self.eat_keyword(keywords::Return) ){ if self.token.can_begin_expr() { let e = try!(self.parse_expr_nopanic()); hi = e.span.hi; ex = ExprRet(Some(e)); } else { ex = ExprRet(None); } } else if try!(self.eat_keyword(keywords::Break) ){ if self.token.is_lifetime() { ex = ExprBreak(Some(Spanned { node: self.get_lifetime(), span: self.span })); try!(self.bump()); } else { ex = ExprBreak(None); } hi = self.last_span.hi; } else if self.check(&token::ModSep) || self.token.is_ident() && !self.check_keyword(keywords::True) && !self.check_keyword(keywords::False) { let pth = try!(self.parse_path(LifetimeAndTypesWithColons)); // `!`, as an operator, is prefix, so we know this isn't that if self.check(&token::Not) { // MACRO INVOCATION expression try!(self.bump()); let delim = try!(self.expect_open_delim()); let tts = try!(self.parse_seq_to_end( &token::CloseDelim(delim), seq_sep_none(), |p| p.parse_token_tree())); let hi = self.last_span.hi; return Ok(self.mk_mac_expr(lo, hi, Mac_ { path: pth, tts: tts, ctxt: EMPTY_CTXT })); } if self.check(&token::OpenDelim(token::Brace)) { // This is a struct literal, unless we're prohibited // from parsing struct literals here. let prohibited = self.restrictions.contains( Restrictions::RESTRICTION_NO_STRUCT_LITERAL ); if !prohibited { // It's a struct literal. try!(self.bump()); let mut fields = Vec::new(); let mut base = None; while self.token != token::CloseDelim(token::Brace) { if try!(self.eat(&token::DotDot) ){ base = Some(try!(self.parse_expr_nopanic())); break; } fields.push(try!(self.parse_field())); try!(self.commit_expr(&*fields.last().unwrap().expr, &[token::Comma], &[token::CloseDelim(token::Brace)])); } hi = self.span.hi; try!(self.expect(&token::CloseDelim(token::Brace))); ex = ExprStruct(pth, fields, base); return Ok(self.mk_expr(lo, hi, ex)); } } hi = pth.span.hi; ex = ExprPath(None, pth); } else { // other literal expression let lit = try!(self.parse_lit()); hi = lit.span.hi; ex = ExprLit(P(lit)); } } } return Ok(self.mk_expr(lo, hi, ex)); } /// Parse a block or unsafe block pub fn parse_block_expr(&mut self, lo: BytePos, blk_mode: BlockCheckMode) -> PResult<P<Expr>> { try!(self.expect(&token::OpenDelim(token::Brace))); let blk = try!(self.parse_block_tail(lo, blk_mode)); return Ok(self.mk_expr(blk.span.lo, blk.span.hi, ExprBlock(blk))); } /// parse a.b or a(13) or a[4] or just a pub fn parse_dot_or_call_expr(&mut self) -> PResult<P<Expr>> { let b = try!(self.parse_bottom_expr()); self.parse_dot_or_call_expr_with(b) } pub fn parse_dot_or_call_expr_with(&mut self, e0: P<Expr>) -> PResult<P<Expr>> { let mut e = e0; let lo = e.span.lo; let mut hi; loop { // expr.f if try!(self.eat(&token::Dot) ){ match self.token { token::Ident(i, _) => { let dot = self.last_span.hi; hi = self.span.hi; try!(self.bump()); let (_, tys, bindings) = if try!(self.eat(&token::ModSep) ){ try!(self.expect_lt()); try!(self.parse_generic_values_after_lt()) } else { (Vec::new(), Vec::new(), Vec::new()) }; if !bindings.is_empty() { let last_span = self.last_span; self.span_err(last_span, "type bindings are only permitted on trait paths"); } // expr.f() method call match self.token { token::OpenDelim(token::Paren) => { let mut es = try!(self.parse_unspanned_seq( &token::OpenDelim(token::Paren), &token::CloseDelim(token::Paren), seq_sep_trailing_allowed(token::Comma), |p| Ok(try!(p.parse_expr_nopanic())) )); hi = self.last_span.hi; es.insert(0, e); let id = spanned(dot, hi, i); let nd = self.mk_method_call(id, tys, es); e = self.mk_expr(lo, hi, nd); } _ => { if !tys.is_empty() { let last_span = self.last_span; self.span_err(last_span, "field expressions may not \ have type parameters"); } let id = spanned(dot, hi, i); let field = self.mk_field(e, id); e = self.mk_expr(lo, hi, field); } } } token::Literal(token::Integer(n), suf) => { let sp = self.span; // A tuple index may not have a suffix self.expect_no_suffix(sp, "tuple index", suf); let dot = self.last_span.hi; hi = self.span.hi; try!(self.bump()); let index = n.as_str().parse::<usize>().ok(); match index { Some(n) => { let id = spanned(dot, hi, n); let field = self.mk_tup_field(e, id); e = self.mk_expr(lo, hi, field); } None => { let last_span = self.last_span; self.span_err(last_span, "invalid tuple or tuple struct index"); } } } token::Literal(token::Float(n), _suf) => { try!(self.bump()); let last_span = self.last_span; let fstr = n.as_str(); self.span_err(last_span, &format!("unexpected token: `{}`", n.as_str())); if fstr.chars().all(|x| "0123456789.".contains(x)) { let float = match fstr.parse::<f64>().ok() { Some(f) => f, None => continue, }; self.fileline_help(last_span, &format!("try parenthesizing the first index; e.g., `(foo.{}){}`", float.trunc() as usize, format!(".{}", fstr.splitn(2, ".").last().unwrap()))); } self.abort_if_errors(); } _ => return Err(self.unexpected()) } continue; } if self.expr_is_complete(&*e) { break; } match self.token { // expr(...) token::OpenDelim(token::Paren) => { let es = try!(self.parse_unspanned_seq( &token::OpenDelim(token::Paren), &token::CloseDelim(token::Paren), seq_sep_trailing_allowed(token::Comma), |p| Ok(try!(p.parse_expr_nopanic())) )); hi = self.last_span.hi; let nd = self.mk_call(e, es); e = self.mk_expr(lo, hi, nd); } // expr[...] // Could be either an index expression or a slicing expression. token::OpenDelim(token::Bracket) => { try!(self.bump()); let ix = try!(self.parse_expr_nopanic()); hi = self.span.hi; try!(self.commit_expr_expecting(&*ix, token::CloseDelim(token::Bracket))); let index = self.mk_index(e, ix); e = self.mk_expr(lo, hi, index) } _ => return Ok(e) } } return Ok(e); } // Parse unquoted tokens after a `$` in a token tree fn parse_unquoted(&mut self) -> PResult<TokenTree> { let mut sp = self.span; let (name, namep) = match self.token { token::Dollar => { try!(self.bump()); if self.token == token::OpenDelim(token::Paren) { let Spanned { node: seq, span: seq_span } = try!(self.parse_seq( &token::OpenDelim(token::Paren), &token::CloseDelim(token::Paren), seq_sep_none(), |p| p.parse_token_tree() )); let (sep, repeat) = try!(self.parse_sep_and_kleene_op()); let name_num = macro_parser::count_names(&seq); return Ok(TtSequence(mk_sp(sp.lo, seq_span.hi), Rc::new(SequenceRepetition { tts: seq, separator: sep, op: repeat, num_captures: name_num }))); } else if self.token.is_keyword_allow_following_colon(keywords::Crate) { try!(self.bump()); return Ok(TtToken(sp, SpecialVarNt(SpecialMacroVar::CrateMacroVar))); } else { sp = mk_sp(sp.lo, self.span.hi); let namep = match self.token { token::Ident(_, p) => p, _ => token::Plain }; let name = try!(self.parse_ident()); (name, namep) } } token::SubstNt(name, namep) => { try!(self.bump()); (name, namep) } _ => unreachable!() }; // continue by trying to parse the `:ident` after `$name` if self.token == token::Colon && self.look_ahead(1, |t| t.is_ident() && !t.is_strict_keyword() && !t.is_reserved_keyword()) { try!(self.bump()); sp = mk_sp(sp.lo, self.span.hi); let kindp = match self.token { token::Ident(_, p) => p, _ => token::Plain }; let nt_kind = try!(self.parse_ident()); Ok(TtToken(sp, MatchNt(name, nt_kind, namep, kindp))) } else { Ok(TtToken(sp, SubstNt(name, namep))) } } pub fn check_unknown_macro_variable(&mut self) -> PResult<()> { if self.quote_depth == 0 { match self.token { token::SubstNt(name, _) => return Err(self.fatal(&format!("unknown macro variable `{}`", name))), _ => {} } } Ok(()) } /// Parse an optional separator followed by a Kleene-style /// repetition token (+ or *). pub fn parse_sep_and_kleene_op(&mut self) -> PResult<(Option<token::Token>, ast::KleeneOp)> { fn parse_kleene_op(parser: &mut Parser) -> PResult<Option<ast::KleeneOp>> { match parser.token { token::BinOp(token::Star) => { try!(parser.bump()); Ok(Some(ast::ZeroOrMore)) }, token::BinOp(token::Plus) => { try!(parser.bump()); Ok(Some(ast::OneOrMore)) }, _ => Ok(None) } }; match try!(parse_kleene_op(self)) { Some(kleene_op) => return Ok((None, kleene_op)), None => {} } let separator = try!(self.bump_and_get()); match try!(parse_kleene_op(self)) { Some(zerok) => Ok((Some(separator), zerok)), None => return Err(self.fatal("expected `*` or `+`")) } } /// parse a single token tree from the input. pub fn parse_token_tree(&mut self) -> PResult<TokenTree> { // FIXME #6994: currently, this is too eager. It // parses token trees but also identifies TtSequence's // and token::SubstNt's; it's too early to know yet // whether something will be a nonterminal or a seq // yet. maybe_whole!(deref self, NtTT); // this is the fall-through for the 'match' below. // invariants: the current token is not a left-delimiter, // not an EOF, and not the desired right-delimiter (if // it were, parse_seq_to_before_end would have prevented // reaching this point. fn parse_non_delim_tt_tok(p: &mut Parser) -> PResult<TokenTree> { maybe_whole!(deref p, NtTT); match p.token { token::CloseDelim(_) => { // This is a conservative error: only report the last unclosed delimiter. The // previous unclosed delimiters could actually be closed! The parser just hasn't // gotten to them yet. match p.open_braces.last() { None => {} Some(&sp) => p.span_note(sp, "unclosed delimiter"), }; let token_str = p.this_token_to_string(); Err(p.fatal(&format!("incorrect close delimiter: `{}`", token_str))) }, /* we ought to allow different depths of unquotation */ token::Dollar | token::SubstNt(..) if p.quote_depth > 0 => { p.parse_unquoted() } _ => { Ok(TtToken(p.span, try!(p.bump_and_get()))) } } } match self.token { token::Eof => { let open_braces = self.open_braces.clone(); for sp in &open_braces { self.span_help(*sp, "did you mean to close this delimiter?"); } // There shouldn't really be a span, but it's easier for the test runner // if we give it one return Err(self.fatal("this file contains an un-closed delimiter ")); }, token::OpenDelim(delim) => { // The span for beginning of the delimited section let pre_span = self.span; // Parse the open delimiter. self.open_braces.push(self.span); let open_span = self.span; try!(self.bump()); // Parse the token trees within the delimiters let tts = try!(self.parse_seq_to_before_end( &token::CloseDelim(delim), seq_sep_none(), |p| p.parse_token_tree() )); // Parse the close delimiter. let close_span = self.span; try!(self.bump()); self.open_braces.pop().unwrap(); // Expand to cover the entire delimited token tree let span = Span { hi: close_span.hi, ..pre_span }; Ok(TtDelimited(span, Rc::new(Delimited { delim: delim, open_span: open_span, tts: tts, close_span: close_span, }))) }, _ => parse_non_delim_tt_tok(self), } } // parse a stream of tokens into a list of TokenTree's, // up to EOF. pub fn parse_all_token_trees(&mut self) -> PResult<Vec<TokenTree>> { let mut tts = Vec::new(); while self.token != token::Eof { tts.push(try!(self.parse_token_tree())); } Ok(tts) } /// Parse a prefix-operator expr pub fn parse_prefix_expr(&mut self) -> PResult<P<Expr>> { let lo = self.span.lo; let hi; // Note: when adding new unary operators, don't forget to adjust Token::can_begin_expr() let ex; match self.token { token::Not => { try!(self.bump()); let e = try!(self.parse_prefix_expr()); hi = e.span.hi; ex = self.mk_unary(UnNot, e); } token::BinOp(token::Minus) => { try!(self.bump()); let e = try!(self.parse_prefix_expr()); hi = e.span.hi; ex = self.mk_unary(UnNeg, e); } token::BinOp(token::Star) => { try!(self.bump()); let e = try!(self.parse_prefix_expr()); hi = e.span.hi; ex = self.mk_unary(UnDeref, e); } token::BinOp(token::And) | token::AndAnd => { try!(self.expect_and()); let m = try!(self.parse_mutability()); let e = try!(self.parse_prefix_expr()); hi = e.span.hi; ex = ExprAddrOf(m, e); } token::Ident(..) if self.token.is_keyword(keywords::In) => { try!(self.bump()); let place = try!(self.parse_expr_res(Restrictions::RESTRICTION_NO_STRUCT_LITERAL)); let blk = try!(self.parse_block()); hi = blk.span.hi; let blk_expr = self.mk_expr(blk.span.lo, blk.span.hi, ExprBlock(blk)); ex = ExprInPlace(place, blk_expr); } token::Ident(..) if self.token.is_keyword(keywords::Box) => { try!(self.bump()); let subexpression = try!(self.parse_prefix_expr()); hi = subexpression.span.hi; ex = ExprBox(subexpression); } _ => return self.parse_dot_or_call_expr() } return Ok(self.mk_expr(lo, hi, ex)); } /// Parse an expression of binops pub fn parse_binops(&mut self) -> PResult<P<Expr>> { let prefix_expr = try!(self.parse_prefix_expr()); self.parse_more_binops(prefix_expr, 0) } /// Parse an expression of binops of at least min_prec precedence pub fn parse_more_binops(&mut self, lhs: P<Expr>, min_prec: usize) -> PResult<P<Expr>> { if self.expr_is_complete(&*lhs) { return Ok(lhs); } self.expected_tokens.push(TokenType::Operator); let cur_op_span = self.span; let cur_opt = self.token.to_binop(); match cur_opt { Some(cur_op) => { if ast_util::is_comparison_binop(cur_op) { self.check_no_chained_comparison(&*lhs, cur_op) } let cur_prec = operator_prec(cur_op); if cur_prec >= min_prec { try!(self.bump()); let expr = try!(self.parse_prefix_expr()); let rhs = try!(self.parse_more_binops(expr, cur_prec + 1)); let lhs_span = lhs.span; let rhs_span = rhs.span; let binary = self.mk_binary(codemap::respan(cur_op_span, cur_op), lhs, rhs); let bin = self.mk_expr(lhs_span.lo, rhs_span.hi, binary); self.parse_more_binops(bin, min_prec) } else { Ok(lhs) } } None => { if AS_PREC >= min_prec && try!(self.eat_keyword_noexpect(keywords::As) ){ let rhs = try!(self.parse_ty_nopanic()); let _as = self.mk_expr(lhs.span.lo, rhs.span.hi, ExprCast(lhs, rhs)); self.parse_more_binops(_as, min_prec) } else { Ok(lhs) } } } } /// Produce an error if comparison operators are chained (RFC #558). /// We only need to check lhs, not rhs, because all comparison ops /// have same precedence and are left-associative fn check_no_chained_comparison(&mut self, lhs: &Expr, outer_op: ast::BinOp_) { debug_assert!(ast_util::is_comparison_binop(outer_op)); match lhs.node { ExprBinary(op, _, _) if ast_util::is_comparison_binop(op.node) => { // respan to include both operators let op_span = mk_sp(op.span.lo, self.span.hi); self.span_err(op_span, "chained comparison operators require parentheses"); if op.node == BiLt && outer_op == BiGt { self.fileline_help(op_span, "use `::<...>` instead of `<...>` if you meant to specify type arguments"); } } _ => {} } } /// Parse an assignment expression.... /// actually, this seems to be the main entry point for /// parsing an arbitrary expression. pub fn parse_assign_expr(&mut self) -> PResult<P<Expr>> { match self.token { token::DotDot => { // prefix-form of range notation '..expr' // This has the same precedence as assignment expressions // (much lower than other prefix expressions) to be consistent // with the postfix-form 'expr..' let lo = self.span.lo; let mut hi = self.span.hi; try!(self.bump()); let opt_end = if self.is_at_start_of_range_notation_rhs() { let end = try!(self.parse_binops()); hi = end.span.hi; Some(end) } else { None }; let ex = self.mk_range(None, opt_end); Ok(self.mk_expr(lo, hi, ex)) } _ => { let lhs = try!(self.parse_binops()); self.parse_assign_expr_with(lhs) } } } pub fn parse_assign_expr_with(&mut self, lhs: P<Expr>) -> PResult<P<Expr>> { let restrictions = self.restrictions & Restrictions::RESTRICTION_NO_STRUCT_LITERAL; let op_span = self.span; match self.token { token::Eq => { try!(self.bump()); let rhs = try!(self.parse_expr_res(restrictions)); Ok(self.mk_expr(lhs.span.lo, rhs.span.hi, ExprAssign(lhs, rhs))) } token::BinOpEq(op) => { try!(self.bump()); let rhs = try!(self.parse_expr_res(restrictions)); let aop = match op { token::Plus => BiAdd, token::Minus => BiSub, token::Star => BiMul, token::Slash => BiDiv, token::Percent => BiRem, token::Caret => BiBitXor, token::And => BiBitAnd, token::Or => BiBitOr, token::Shl => BiShl, token::Shr => BiShr }; let rhs_span = rhs.span; let span = lhs.span; let assign_op = self.mk_assign_op(codemap::respan(op_span, aop), lhs, rhs); Ok(self.mk_expr(span.lo, rhs_span.hi, assign_op)) } // A range expression, either `expr..expr` or `expr..`. token::DotDot => { let lo = lhs.span.lo; let mut hi = self.span.hi; try!(self.bump()); let opt_end = if self.is_at_start_of_range_notation_rhs() { let end = try!(self.parse_binops()); hi = end.span.hi; Some(end) } else { None }; let range = self.mk_range(Some(lhs), opt_end); return Ok(self.mk_expr(lo, hi, range)); } _ => { Ok(lhs) } } } fn is_at_start_of_range_notation_rhs(&self) -> bool { if self.token.can_begin_expr() { // parse `for i in 1.. { }` as infinite loop, not as `for i in (1..{})`. if self.token == token::OpenDelim(token::Brace) { return !self.restrictions.contains(Restrictions::RESTRICTION_NO_STRUCT_LITERAL); } true } else { false } } /// Parse an 'if' or 'if let' expression ('if' token already eaten) pub fn parse_if_expr(&mut self) -> PResult<P<Expr>> { if self.check_keyword(keywords::Let) { return self.parse_if_let_expr(); } let lo = self.last_span.lo; let cond = try!(self.parse_expr_res(Restrictions::RESTRICTION_NO_STRUCT_LITERAL)); let thn = try!(self.parse_block()); let mut els: Option<P<Expr>> = None; let mut hi = thn.span.hi; if try!(self.eat_keyword(keywords::Else) ){ let elexpr = try!(self.parse_else_expr()); hi = elexpr.span.hi; els = Some(elexpr); } Ok(self.mk_expr(lo, hi, ExprIf(cond, thn, els))) } /// Parse an 'if let' expression ('if' token already eaten) pub fn parse_if_let_expr(&mut self) -> PResult<P<Expr>> { let lo = self.last_span.lo; try!(self.expect_keyword(keywords::Let)); let pat = try!(self.parse_pat_nopanic()); try!(self.expect(&token::Eq)); let expr = try!(self.parse_expr_res(Restrictions::RESTRICTION_NO_STRUCT_LITERAL)); let thn = try!(self.parse_block()); let (hi, els) = if try!(self.eat_keyword(keywords::Else) ){ let expr = try!(self.parse_else_expr()); (expr.span.hi, Some(expr)) } else { (thn.span.hi, None) }; Ok(self.mk_expr(lo, hi, ExprIfLet(pat, expr, thn, els))) } // `|args| expr` pub fn parse_lambda_expr(&mut self, lo: BytePos, capture_clause: CaptureClause) -> PResult<P<Expr>> { let decl = try!(self.parse_fn_block_decl()); let body = match decl.output { DefaultReturn(_) => { // If no explicit return type is given, parse any // expr and wrap it up in a dummy block: let body_expr = try!(self.parse_expr_nopanic()); P(ast::Block { id: ast::DUMMY_NODE_ID, stmts: vec![], span: body_expr.span, expr: Some(body_expr), rules: DefaultBlock, }) } _ => { // If an explicit return type is given, require a // block to appear (RFC 968). try!(self.parse_block()) } }; Ok(self.mk_expr( lo, body.span.hi, ExprClosure(capture_clause, decl, body))) } pub fn parse_else_expr(&mut self) -> PResult<P<Expr>> { if try!(self.eat_keyword(keywords::If) ){ return self.parse_if_expr(); } else { let blk = try!(self.parse_block()); return Ok(self.mk_expr(blk.span.lo, blk.span.hi, ExprBlock(blk))); } } /// Parse a 'for' .. 'in' expression ('for' token already eaten) pub fn parse_for_expr(&mut self, opt_ident: Option<ast::Ident>, span_lo: BytePos) -> PResult<P<Expr>> { // Parse: `for <src_pat> in <src_expr> <src_loop_block>` let pat = try!(self.parse_pat_nopanic()); try!(self.expect_keyword(keywords::In)); let expr = try!(self.parse_expr_res(Restrictions::RESTRICTION_NO_STRUCT_LITERAL)); let loop_block = try!(self.parse_block()); let hi = self.last_span.hi; Ok(self.mk_expr(span_lo, hi, ExprForLoop(pat, expr, loop_block, opt_ident))) } /// Parse a 'while' or 'while let' expression ('while' token already eaten) pub fn parse_while_expr(&mut self, opt_ident: Option<ast::Ident>, span_lo: BytePos) -> PResult<P<Expr>> { if self.token.is_keyword(keywords::Let) { return self.parse_while_let_expr(opt_ident, span_lo); } let cond = try!(self.parse_expr_res(Restrictions::RESTRICTION_NO_STRUCT_LITERAL)); let body = try!(self.parse_block()); let hi = body.span.hi; return Ok(self.mk_expr(span_lo, hi, ExprWhile(cond, body, opt_ident))); } /// Parse a 'while let' expression ('while' token already eaten) pub fn parse_while_let_expr(&mut self, opt_ident: Option<ast::Ident>, span_lo: BytePos) -> PResult<P<Expr>> { try!(self.expect_keyword(keywords::Let)); let pat = try!(self.parse_pat_nopanic()); try!(self.expect(&token::Eq)); let expr = try!(self.parse_expr_res(Restrictions::RESTRICTION_NO_STRUCT_LITERAL)); let body = try!(self.parse_block()); let hi = body.span.hi; return Ok(self.mk_expr(span_lo, hi, ExprWhileLet(pat, expr, body, opt_ident))); } pub fn parse_loop_expr(&mut self, opt_ident: Option<ast::Ident>, span_lo: BytePos) -> PResult<P<Expr>> { let body = try!(self.parse_block()); let hi = body.span.hi; Ok(self.mk_expr(span_lo, hi, ExprLoop(body, opt_ident))) } fn parse_match_expr(&mut self) -> PResult<P<Expr>> { let lo = self.last_span.lo; let discriminant = try!(self.parse_expr_res(Restrictions::RESTRICTION_NO_STRUCT_LITERAL)); try!(self.commit_expr_expecting(&*discriminant, token::OpenDelim(token::Brace))); let mut arms: Vec<Arm> = Vec::new(); while self.token != token::CloseDelim(token::Brace) { arms.push(try!(self.parse_arm_nopanic())); } let hi = self.span.hi; try!(self.bump()); return Ok(self.mk_expr(lo, hi, ExprMatch(discriminant, arms))); } pub fn parse_arm_nopanic(&mut self) -> PResult<Arm> { maybe_whole!(no_clone self, NtArm); let attrs = self.parse_outer_attributes(); let pats = try!(self.parse_pats()); let mut guard = None; if try!(self.eat_keyword(keywords::If) ){ guard = Some(try!(self.parse_expr_nopanic())); } try!(self.expect(&token::FatArrow)); let expr = try!(self.parse_expr_res(Restrictions::RESTRICTION_STMT_EXPR)); let require_comma = !classify::expr_is_simple_block(&*expr) && self.token != token::CloseDelim(token::Brace); if require_comma { try!(self.commit_expr(&*expr, &[token::Comma], &[token::CloseDelim(token::Brace)])); } else { try!(self.eat(&token::Comma)); } Ok(ast::Arm { attrs: attrs, pats: pats, guard: guard, body: expr, }) } /// Parse an expression pub fn parse_expr_nopanic(&mut self) -> PResult<P<Expr>> { self.parse_expr_res(Restrictions::empty()) } /// Parse an expression, subject to the given restrictions pub fn parse_expr_res(&mut self, r: Restrictions) -> PResult<P<Expr>> { let old = self.restrictions; self.restrictions = r; let e = try!(self.parse_assign_expr()); self.restrictions = old; return Ok(e); } /// Parse the RHS of a local variable declaration (e.g. '= 14;') fn parse_initializer(&mut self) -> PResult<Option<P<Expr>>> { if self.check(&token::Eq) { try!(self.bump()); Ok(Some(try!(self.parse_expr_nopanic()))) } else { Ok(None) } } /// Parse patterns, separated by '|' s fn parse_pats(&mut self) -> PResult<Vec<P<Pat>>> { let mut pats = Vec::new(); loop { pats.push(try!(self.parse_pat_nopanic())); if self.check(&token::BinOp(token::Or)) { try!(self.bump());} else { return Ok(pats); } }; } fn parse_pat_tuple_elements(&mut self) -> PResult<Vec<P<Pat>>> { let mut fields = vec![]; if !self.check(&token::CloseDelim(token::Paren)) { fields.push(try!(self.parse_pat_nopanic())); if self.look_ahead(1, |t| *t != token::CloseDelim(token::Paren)) { while try!(self.eat(&token::Comma)) && !self.check(&token::CloseDelim(token::Paren)) { fields.push(try!(self.parse_pat_nopanic())); } } if fields.len() == 1 { try!(self.expect(&token::Comma)); } } Ok(fields) } fn parse_pat_vec_elements( &mut self, ) -> PResult<(Vec<P<Pat>>, Option<P<Pat>>, Vec<P<Pat>>)> { let mut before = Vec::new(); let mut slice = None; let mut after = Vec::new(); let mut first = true; let mut before_slice = true; while self.token != token::CloseDelim(token::Bracket) { if first { first = false; } else { try!(self.expect(&token::Comma)); if self.token == token::CloseDelim(token::Bracket) && (before_slice || !after.is_empty()) { break } } if before_slice { if self.check(&token::DotDot) { try!(self.bump()); if self.check(&token::Comma) || self.check(&token::CloseDelim(token::Bracket)) { slice = Some(P(ast::Pat { id: ast::DUMMY_NODE_ID, node: PatWild(PatWildMulti), span: self.span, })); before_slice = false; } continue } } let subpat = try!(self.parse_pat_nopanic()); if before_slice && self.check(&token::DotDot) { try!(self.bump()); slice = Some(subpat); before_slice = false; } else if before_slice { before.push(subpat); } else { after.push(subpat); } } Ok((before, slice, after)) } /// Parse the fields of a struct-like pattern fn parse_pat_fields(&mut self) -> PResult<(Vec<codemap::Spanned<ast::FieldPat>> , bool)> { let mut fields = Vec::new(); let mut etc = false; let mut first = true; while self.token != token::CloseDelim(token::Brace) { if first { first = false; } else { try!(self.expect(&token::Comma)); // accept trailing commas if self.check(&token::CloseDelim(token::Brace)) { break } } let lo = self.span.lo; let hi; if self.check(&token::DotDot) { try!(self.bump()); if self.token != token::CloseDelim(token::Brace) { let token_str = self.this_token_to_string(); return Err(self.fatal(&format!("expected `{}`, found `{}`", "}", token_str))) } etc = true; break; } // Check if a colon exists one ahead. This means we're parsing a fieldname. let (subpat, fieldname, is_shorthand) = if self.look_ahead(1, |t| t == &token::Colon) { // Parsing a pattern of the form "fieldname: pat" let fieldname = try!(self.parse_ident()); try!(self.bump()); let pat = try!(self.parse_pat_nopanic()); hi = pat.span.hi; (pat, fieldname, false) } else { // Parsing a pattern of the form "(box) (ref) (mut) fieldname" let is_box = try!(self.eat_keyword(keywords::Box)); let boxed_span_lo = self.span.lo; let is_ref = try!(self.eat_keyword(keywords::Ref)); let is_mut = try!(self.eat_keyword(keywords::Mut)); let fieldname = try!(self.parse_ident()); hi = self.last_span.hi; let bind_type = match (is_ref, is_mut) { (true, true) => BindByRef(MutMutable), (true, false) => BindByRef(MutImmutable), (false, true) => BindByValue(MutMutable), (false, false) => BindByValue(MutImmutable), }; let fieldpath = codemap::Spanned{span:self.last_span, node:fieldname}; let fieldpat = P(ast::Pat{ id: ast::DUMMY_NODE_ID, node: PatIdent(bind_type, fieldpath, None), span: mk_sp(boxed_span_lo, hi), }); let subpat = if is_box { P(ast::Pat{ id: ast::DUMMY_NODE_ID, node: PatBox(fieldpat), span: mk_sp(lo, hi), }) } else { fieldpat }; (subpat, fieldname, true) }; fields.push(codemap::Spanned { span: mk_sp(lo, hi), node: ast::FieldPat { ident: fieldname, pat: subpat, is_shorthand: is_shorthand }}); } return Ok((fields, etc)); } fn parse_pat_range_end(&mut self) -> PResult<P<Expr>> { if self.is_path_start() { let lo = self.span.lo; let (qself, path) = if try!(self.eat_lt()) { // Parse a qualified path let (qself, path) = try!(self.parse_qualified_path(NoTypesAllowed)); (Some(qself), path) } else { // Parse an unqualified path (None, try!(self.parse_path(LifetimeAndTypesWithColons))) }; let hi = self.last_span.hi; Ok(self.mk_expr(lo, hi, ExprPath(qself, path))) } else { self.parse_literal_maybe_minus() } } fn is_path_start(&self) -> bool { (self.token == token::Lt || self.token == token::ModSep || self.token.is_ident() || self.token.is_path()) && !self.token.is_keyword(keywords::True) && !self.token.is_keyword(keywords::False) } /// Parse a pattern. pub fn parse_pat_nopanic(&mut self) -> PResult<P<Pat>> { maybe_whole!(self, NtPat); let lo = self.span.lo; let pat; match self.token { token::Underscore => { // Parse _ try!(self.bump()); pat = PatWild(PatWildSingle); } token::BinOp(token::And) | token::AndAnd => { // Parse &pat / &mut pat try!(self.expect_and()); let mutbl = try!(self.parse_mutability()); let subpat = try!(self.parse_pat_nopanic()); pat = PatRegion(subpat, mutbl); } token::OpenDelim(token::Paren) => { // Parse (pat,pat,pat,...) as tuple pattern try!(self.bump()); let fields = try!(self.parse_pat_tuple_elements()); try!(self.expect(&token::CloseDelim(token::Paren))); pat = PatTup(fields); } token::OpenDelim(token::Bracket) => { // Parse [pat,pat,...] as slice pattern try!(self.bump()); let (before, slice, after) = try!(self.parse_pat_vec_elements()); try!(self.expect(&token::CloseDelim(token::Bracket))); pat = PatVec(before, slice, after); } _ => { // At this point, token != _, &, &&, (, [ if try!(self.eat_keyword(keywords::Mut)) { // Parse mut ident @ pat pat = try!(self.parse_pat_ident(BindByValue(MutMutable))); } else if try!(self.eat_keyword(keywords::Ref)) { // Parse ref ident @ pat / ref mut ident @ pat let mutbl = try!(self.parse_mutability()); pat = try!(self.parse_pat_ident(BindByRef(mutbl))); } else if try!(self.eat_keyword(keywords::Box)) { // Parse box pat let subpat = try!(self.parse_pat_nopanic()); pat = PatBox(subpat); } else if self.is_path_start() { // Parse pattern starting with a path if self.token.is_plain_ident() && self.look_ahead(1, |t| *t != token::DotDotDot && *t != token::OpenDelim(token::Brace) && *t != token::OpenDelim(token::Paren) && // Contrary to its definition, a plain ident can be followed by :: in macros *t != token::ModSep) { // Plain idents have some extra abilities here compared to general paths if self.look_ahead(1, |t| *t == token::Not) { // Parse macro invocation let ident = try!(self.parse_ident()); let ident_span = self.last_span; let path = ident_to_path(ident_span, ident); try!(self.bump()); let delim = try!(self.expect_open_delim()); let tts = try!(self.parse_seq_to_end(&token::CloseDelim(delim), seq_sep_none(), |p| p.parse_token_tree())); let mac = Mac_ { path: path, tts: tts, ctxt: EMPTY_CTXT }; pat = PatMac(codemap::Spanned {node: mac, span: self.span}); } else { // Parse ident @ pat // This can give false positives and parse nullary enums, // they are dealt with later in resolve pat = try!(self.parse_pat_ident(BindByValue(MutImmutable))); } } else { let (qself, path) = if try!(self.eat_lt()) { // Parse a qualified path let (qself, path) = try!(self.parse_qualified_path(NoTypesAllowed)); (Some(qself), path) } else { // Parse an unqualified path (None, try!(self.parse_path(LifetimeAndTypesWithColons))) }; match self.token { token::DotDotDot => { // Parse range let hi = self.last_span.hi; let begin = self.mk_expr(lo, hi, ExprPath(qself, path)); try!(self.bump()); let end = try!(self.parse_pat_range_end()); pat = PatRange(begin, end); } token::OpenDelim(token::Brace) => { if qself.is_some() { let span = self.span; self.span_err(span, "unexpected `{` after qualified path"); self.abort_if_errors(); } // Parse struct pattern try!(self.bump()); let (fields, etc) = try!(self.parse_pat_fields()); try!(self.bump()); pat = PatStruct(path, fields, etc); } token::OpenDelim(token::Paren) => { if qself.is_some() { let span = self.span; self.span_err(span, "unexpected `(` after qualified path"); self.abort_if_errors(); } // Parse tuple struct or enum pattern if self.look_ahead(1, |t| *t == token::DotDot) { // This is a "top constructor only" pat try!(self.bump()); try!(self.bump()); try!(self.expect(&token::CloseDelim(token::Paren))); pat = PatEnum(path, None); } else { let args = try!(self.parse_enum_variant_seq( &token::OpenDelim(token::Paren), &token::CloseDelim(token::Paren), seq_sep_trailing_allowed(token::Comma), |p| p.parse_pat_nopanic())); pat = PatEnum(path, Some(args)); } } _ if qself.is_some() => { // Parse qualified path pat = PatQPath(qself.unwrap(), path); } _ => { // Parse nullary enum pat = PatEnum(path, Some(vec![])); } } } } else { // Try to parse everything else as literal with optional minus let begin = try!(self.parse_literal_maybe_minus()); if try!(self.eat(&token::DotDotDot)) { let end = try!(self.parse_pat_range_end()); pat = PatRange(begin, end); } else { pat = PatLit(begin); } } } } let hi = self.last_span.hi; Ok(P(ast::Pat { id: ast::DUMMY_NODE_ID, node: pat, span: mk_sp(lo, hi), })) } /// Parse ident or ident @ pat /// used by the copy foo and ref foo patterns to give a good /// error message when parsing mistakes like ref foo(a,b) fn parse_pat_ident(&mut self, binding_mode: ast::BindingMode) -> PResult<ast::Pat_> { if !self.token.is_plain_ident() { let span = self.span; let tok_str = self.this_token_to_string(); return Err(self.span_fatal(span, &format!("expected identifier, found `{}`", tok_str))) } let ident = try!(self.parse_ident()); let last_span = self.last_span; let name = codemap::Spanned{span: last_span, node: ident}; let sub = if try!(self.eat(&token::At) ){ Some(try!(self.parse_pat_nopanic())) } else { None }; // just to be friendly, if they write something like // ref Some(i) // we end up here with ( as the current token. This shortly // leads to a parse error. Note that if there is no explicit // binding mode then we do not end up here, because the lookahead // will direct us over to parse_enum_variant() if self.token == token::OpenDelim(token::Paren) { let last_span = self.last_span; return Err(self.span_fatal( last_span, "expected identifier, found enum pattern")) } Ok(PatIdent(binding_mode, name, sub)) } /// Parse a local variable declaration fn parse_local(&mut self) -> PResult<P<Local>> { let lo = self.span.lo; let pat = try!(self.parse_pat_nopanic()); let mut ty = None; if try!(self.eat(&token::Colon) ){ ty = Some(try!(self.parse_ty_sum())); } let init = try!(self.parse_initializer()); Ok(P(ast::Local { ty: ty, pat: pat, init: init, id: ast::DUMMY_NODE_ID, span: mk_sp(lo, self.last_span.hi), })) } /// Parse a "let" stmt fn parse_let(&mut self) -> PResult<P<Decl>> { let lo = self.span.lo; let local = try!(self.parse_local()); Ok(P(spanned(lo, self.last_span.hi, DeclLocal(local)))) } /// Parse a structure field fn parse_name_and_ty(&mut self, pr: Visibility, attrs: Vec<Attribute> ) -> PResult<StructField> { let lo = match pr { Inherited => self.span.lo, Public => self.last_span.lo, }; if !self.token.is_plain_ident() { return Err(self.fatal("expected ident")); } let name = try!(self.parse_ident()); try!(self.expect(&token::Colon)); let ty = try!(self.parse_ty_sum()); Ok(spanned(lo, self.last_span.hi, ast::StructField_ { kind: NamedField(name, pr), id: ast::DUMMY_NODE_ID, ty: ty, attrs: attrs, })) } /// Emit an expected item after attributes error. fn expected_item_err(&self, attrs: &[Attribute]) { let message = match attrs.last() { Some(&Attribute { node: ast::Attribute_ { is_sugared_doc: true, .. }, .. }) => { "expected item after doc comment" } _ => "expected item after attributes", }; self.span_err(self.last_span, message); } /// Parse a statement. may include decl. pub fn parse_stmt_nopanic(&mut self) -> PResult<Option<P<Stmt>>> { Ok(try!(self.parse_stmt_()).map(P)) } fn parse_stmt_(&mut self) -> PResult<Option<Stmt>> { maybe_whole!(Some deref self, NtStmt); fn check_expected_item(p: &mut Parser, attrs: &[Attribute]) { // If we have attributes then we should have an item if !attrs.is_empty() { p.expected_item_err(attrs); } } let attrs = self.parse_outer_attributes(); let lo = self.span.lo; Ok(Some(if self.check_keyword(keywords::Let) { check_expected_item(self, &attrs); try!(self.expect_keyword(keywords::Let)); let decl = try!(self.parse_let()); spanned(lo, decl.span.hi, StmtDecl(decl, ast::DUMMY_NODE_ID)) } else if self.token.is_ident() && !self.token.is_any_keyword() && self.look_ahead(1, |t| *t == token::Not) { // it's a macro invocation: check_expected_item(self, &attrs); // Potential trouble: if we allow macros with paths instead of // idents, we'd need to look ahead past the whole path here... let pth = try!(self.parse_path(NoTypesAllowed)); try!(self.bump()); let id = match self.token { token::OpenDelim(_) => token::special_idents::invalid, // no special identifier _ => try!(self.parse_ident()), }; // check that we're pointing at delimiters (need to check // again after the `if`, because of `parse_ident` // consuming more tokens). let delim = match self.token { token::OpenDelim(delim) => delim, _ => { // we only expect an ident if we didn't parse one // above. let ident_str = if id.name == token::special_idents::invalid.name { "identifier, " } else { "" }; let tok_str = self.this_token_to_string(); return Err(self.fatal(&format!("expected {}`(` or `{{`, found `{}`", ident_str, tok_str))) }, }; let tts = try!(self.parse_unspanned_seq( &token::OpenDelim(delim), &token::CloseDelim(delim), seq_sep_none(), |p| p.parse_token_tree() )); let hi = self.last_span.hi; let style = if delim == token::Brace { MacStmtWithBraces } else { MacStmtWithoutBraces }; if id.name == token::special_idents::invalid.name { spanned(lo, hi, StmtMac(P(spanned(lo, hi, Mac_ { path: pth, tts: tts, ctxt: EMPTY_CTXT })), style)) } else { // if it has a special ident, it's definitely an item // // Require a semicolon or braces. if style != MacStmtWithBraces { if !try!(self.eat(&token::Semi) ){ let last_span = self.last_span; self.span_err(last_span, "macros that expand to items must \ either be surrounded with braces or \ followed by a semicolon"); } } spanned(lo, hi, StmtDecl( P(spanned(lo, hi, DeclItem( self.mk_item( lo, hi, id /*id is good here*/, ItemMac(spanned(lo, hi, Mac_ { path: pth, tts: tts, ctxt: EMPTY_CTXT })), Inherited, Vec::new(/*no attrs*/))))), ast::DUMMY_NODE_ID)) } } else { match try!(self.parse_item_(attrs, false)) { Some(i) => { let hi = i.span.hi; let decl = P(spanned(lo, hi, DeclItem(i))); spanned(lo, hi, StmtDecl(decl, ast::DUMMY_NODE_ID)) } None => { // Do not attempt to parse an expression if we're done here. if self.token == token::Semi { try!(self.bump()); return Ok(None); } if self.token == token::CloseDelim(token::Brace) { return Ok(None); } // Remainder are line-expr stmts. let e = try!(self.parse_expr_res(Restrictions::RESTRICTION_STMT_EXPR)); spanned(lo, e.span.hi, StmtExpr(e, ast::DUMMY_NODE_ID)) } } })) } /// Is this expression a successfully-parsed statement? fn expr_is_complete(&mut self, e: &Expr) -> bool { self.restrictions.contains(Restrictions::RESTRICTION_STMT_EXPR) && !classify::expr_requires_semi_to_be_stmt(e) } /// Parse a block. No inner attrs are allowed. pub fn parse_block(&mut self) -> PResult<P<Block>> { maybe_whole!(no_clone self, NtBlock); let lo = self.span.lo; if !try!(self.eat(&token::OpenDelim(token::Brace)) ){ let sp = self.span; let tok = self.this_token_to_string(); return Err(self.span_fatal_help(sp, &format!("expected `{{`, found `{}`", tok), "place this code inside a block")); } self.parse_block_tail(lo, DefaultBlock) } /// Parse a block. Inner attrs are allowed. fn parse_inner_attrs_and_block(&mut self) -> PResult<(Vec<Attribute>, P<Block>)> { maybe_whole!(pair_empty self, NtBlock); let lo = self.span.lo; try!(self.expect(&token::OpenDelim(token::Brace))); Ok((self.parse_inner_attributes(), try!(self.parse_block_tail(lo, DefaultBlock)))) } /// Parse the rest of a block expression or function body /// Precondition: already parsed the '{'. fn parse_block_tail(&mut self, lo: BytePos, s: BlockCheckMode) -> PResult<P<Block>> { let mut stmts = vec![]; let mut expr = None; while !try!(self.eat(&token::CloseDelim(token::Brace))) { let Spanned {node, span} = if let Some(s) = try!(self.parse_stmt_()) { s } else { // Found only `;` or `}`. continue; }; match node { StmtExpr(e, _) => { try!(self.handle_expression_like_statement(e, span, &mut stmts, &mut expr)); } StmtMac(mac, MacStmtWithoutBraces) => { // statement macro without braces; might be an // expr depending on whether a semicolon follows match self.token { token::Semi => { stmts.push(P(Spanned { node: StmtMac(mac, MacStmtWithSemicolon), span: mk_sp(span.lo, self.span.hi), })); try!(self.bump()); } _ => { let e = self.mk_mac_expr(span.lo, span.hi, mac.and_then(|m| m.node)); let e = try!(self.parse_dot_or_call_expr_with(e)); let e = try!(self.parse_more_binops(e, 0)); let e = try!(self.parse_assign_expr_with(e)); try!(self.handle_expression_like_statement( e, span, &mut stmts, &mut expr)); } } } StmtMac(m, style) => { // statement macro; might be an expr match self.token { token::Semi => { stmts.push(P(Spanned { node: StmtMac(m, MacStmtWithSemicolon), span: mk_sp(span.lo, self.span.hi), })); try!(self.bump()); } token::CloseDelim(token::Brace) => { // if a block ends in `m!(arg)` without // a `;`, it must be an expr expr = Some(self.mk_mac_expr(span.lo, span.hi, m.and_then(|x| x.node))); } _ => { stmts.push(P(Spanned { node: StmtMac(m, style), span: span })); } } } _ => { // all other kinds of statements: let mut hi = span.hi; if classify::stmt_ends_with_semi(&node) { try!(self.commit_stmt_expecting(token::Semi)); hi = self.last_span.hi; } stmts.push(P(Spanned { node: node, span: mk_sp(span.lo, hi) })); } } } Ok(P(ast::Block { stmts: stmts, expr: expr, id: ast::DUMMY_NODE_ID, rules: s, span: mk_sp(lo, self.last_span.hi), })) } fn handle_expression_like_statement( &mut self, e: P<Expr>, span: Span, stmts: &mut Vec<P<Stmt>>, last_block_expr: &mut Option<P<Expr>>) -> PResult<()> { // expression without semicolon if classify::expr_requires_semi_to_be_stmt(&*e) { // Just check for errors and recover; do not eat semicolon yet. try!(self.commit_stmt(&[], &[token::Semi, token::CloseDelim(token::Brace)])); } match self.token { token::Semi => { try!(self.bump()); let span_with_semi = Span { lo: span.lo, hi: self.last_span.hi, expn_id: span.expn_id, }; stmts.push(P(Spanned { node: StmtSemi(e, ast::DUMMY_NODE_ID), span: span_with_semi, })); } token::CloseDelim(token::Brace) => *last_block_expr = Some(e), _ => { stmts.push(P(Spanned { node: StmtExpr(e, ast::DUMMY_NODE_ID), span: span })); } } Ok(()) } // Parses a sequence of bounds if a `:` is found, // otherwise returns empty list. fn parse_colon_then_ty_param_bounds(&mut self, mode: BoundParsingMode) -> PResult<OwnedSlice<TyParamBound>> { if !try!(self.eat(&token::Colon) ){ Ok(OwnedSlice::empty()) } else { self.parse_ty_param_bounds(mode) } } // matches bounds = ( boundseq )? // where boundseq = ( polybound + boundseq ) | polybound // and polybound = ( 'for' '<' 'region '>' )? bound // and bound = 'region | trait_ref fn parse_ty_param_bounds(&mut self, mode: BoundParsingMode) -> PResult<OwnedSlice<TyParamBound>> { let mut result = vec!(); loop { let question_span = self.span; let ate_question = try!(self.eat(&token::Question)); match self.token { token::Lifetime(lifetime) => { if ate_question { self.span_err(question_span, "`?` may only modify trait bounds, not lifetime bounds"); } result.push(RegionTyParamBound(ast::Lifetime { id: ast::DUMMY_NODE_ID, span: self.span, name: lifetime.name })); try!(self.bump()); } token::ModSep | token::Ident(..) => { let poly_trait_ref = try!(self.parse_poly_trait_ref()); let modifier = if ate_question { if mode == BoundParsingMode::Modified { TraitBoundModifier::Maybe } else { self.span_err(question_span, "unexpected `?`"); TraitBoundModifier::None } } else { TraitBoundModifier::None }; result.push(TraitTyParamBound(poly_trait_ref, modifier)) } _ => break, } if !try!(self.eat(&token::BinOp(token::Plus)) ){ break; } } return Ok(OwnedSlice::from_vec(result)); } /// Matches typaram = IDENT (`?` unbound)? optbounds ( EQ ty )? fn parse_ty_param(&mut self) -> PResult<TyParam> { let span = self.span; let ident = try!(self.parse_ident()); let bounds = try!(self.parse_colon_then_ty_param_bounds(BoundParsingMode::Modified)); let default = if self.check(&token::Eq) { try!(self.bump()); Some(try!(self.parse_ty_sum())) } else { None }; Ok(TyParam { ident: ident, id: ast::DUMMY_NODE_ID, bounds: bounds, default: default, span: span, }) } /// Parse a set of optional generic type parameter declarations. Where /// clauses are not parsed here, and must be added later via /// `parse_where_clause()`. /// /// matches generics = ( ) | ( < > ) | ( < typaramseq ( , )? > ) | ( < lifetimes ( , )? > ) /// | ( < lifetimes , typaramseq ( , )? > ) /// where typaramseq = ( typaram ) | ( typaram , typaramseq ) pub fn parse_generics(&mut self) -> PResult<ast::Generics> { maybe_whole!(self, NtGenerics); if try!(self.eat(&token::Lt) ){ let lifetime_defs = try!(self.parse_lifetime_defs()); let mut seen_default = false; let ty_params = try!(self.parse_seq_to_gt(Some(token::Comma), |p| { try!(p.forbid_lifetime()); let ty_param = try!(p.parse_ty_param()); if ty_param.default.is_some() { seen_default = true; } else if seen_default { let last_span = p.last_span; p.span_err(last_span, "type parameters with a default must be trailing"); } Ok(ty_param) })); Ok(ast::Generics { lifetimes: lifetime_defs, ty_params: ty_params, where_clause: WhereClause { id: ast::DUMMY_NODE_ID, predicates: Vec::new(), } }) } else { Ok(ast_util::empty_generics()) } } fn parse_generic_values_after_lt(&mut self) -> PResult<(Vec<ast::Lifetime>, Vec<P<Ty>>, Vec<P<TypeBinding>>)> { let span_lo = self.span.lo; let lifetimes = try!(self.parse_lifetimes(token::Comma)); let missing_comma = !lifetimes.is_empty() && !self.token.is_like_gt() && self.last_token .as_ref().map_or(true, |x| &**x != &token::Comma); if missing_comma { let msg = format!("expected `,` or `>` after lifetime \ name, found `{}`", self.this_token_to_string()); self.span_err(self.span, &msg); let span_hi = self.span.hi; let span_hi = if self.parse_ty_nopanic().is_ok() { self.span.hi } else { span_hi }; let msg = format!("did you mean a single argument type &'a Type, \ or did you mean the comma-separated arguments \ 'a, Type?"); self.span_note(mk_sp(span_lo, span_hi), &msg); self.abort_if_errors() } // First parse types. let (types, returned) = try!(self.parse_seq_to_gt_or_return( Some(token::Comma), |p| { try!(p.forbid_lifetime()); if p.look_ahead(1, |t| t == &token::Eq) { Ok(None) } else { Ok(Some(try!(p.parse_ty_sum()))) } } )); // If we found the `>`, don't continue. if !returned { return Ok((lifetimes, types.into_vec(), Vec::new())); } // Then parse type bindings. let bindings = try!(self.parse_seq_to_gt( Some(token::Comma), |p| { try!(p.forbid_lifetime()); let lo = p.span.lo; let ident = try!(p.parse_ident()); let found_eq = try!(p.eat(&token::Eq)); if !found_eq { let span = p.span; p.span_warn(span, "whoops, no =?"); } let ty = try!(p.parse_ty_nopanic()); let hi = ty.span.hi; let span = mk_sp(lo, hi); return Ok(P(TypeBinding{id: ast::DUMMY_NODE_ID, ident: ident, ty: ty, span: span, })); } )); Ok((lifetimes, types.into_vec(), bindings.into_vec())) } fn forbid_lifetime(&mut self) -> PResult<()> { if self.token.is_lifetime() { let span = self.span; return Err(self.span_fatal(span, "lifetime parameters must be declared \ prior to type parameters")) } Ok(()) } /// Parses an optional `where` clause and places it in `generics`. /// /// ``` /// where T : Trait<U, V> + 'b, 'a : 'b /// ``` pub fn parse_where_clause(&mut self) -> PResult<ast::WhereClause> { maybe_whole!(self, NtWhereClause); let mut where_clause = WhereClause { id: ast::DUMMY_NODE_ID, predicates: Vec::new(), }; if !try!(self.eat_keyword(keywords::Where)) { return Ok(where_clause); } let mut parsed_something = false; loop { let lo = self.span.lo; match self.token { token::OpenDelim(token::Brace) => { break } token::Lifetime(..) => { let bounded_lifetime = try!(self.parse_lifetime()); try!(self.eat(&token::Colon)); let bounds = try!(self.parse_lifetimes(token::BinOp(token::Plus))); let hi = self.last_span.hi; let span = mk_sp(lo, hi); where_clause.predicates.push(ast::WherePredicate::RegionPredicate( ast::WhereRegionPredicate { span: span, lifetime: bounded_lifetime, bounds: bounds } )); parsed_something = true; } _ => { let bound_lifetimes = if try!(self.eat_keyword(keywords::For) ){ // Higher ranked constraint. try!(self.expect(&token::Lt)); let lifetime_defs = try!(self.parse_lifetime_defs()); try!(self.expect_gt()); lifetime_defs } else { vec![] }; let bounded_ty = try!(self.parse_ty_nopanic()); if try!(self.eat(&token::Colon) ){ let bounds = try!(self.parse_ty_param_bounds(BoundParsingMode::Bare)); let hi = self.last_span.hi; let span = mk_sp(lo, hi); if bounds.is_empty() { self.span_err(span, "each predicate in a `where` clause must have \ at least one bound in it"); } where_clause.predicates.push(ast::WherePredicate::BoundPredicate( ast::WhereBoundPredicate { span: span, bound_lifetimes: bound_lifetimes, bounded_ty: bounded_ty, bounds: bounds, })); parsed_something = true; } else if try!(self.eat(&token::Eq) ){ // let ty = try!(self.parse_ty_nopanic()); let hi = self.last_span.hi; let span = mk_sp(lo, hi); // where_clause.predicates.push( // ast::WherePredicate::EqPredicate(ast::WhereEqPredicate { // id: ast::DUMMY_NODE_ID, // span: span, // path: panic!("NYI"), //bounded_ty, // ty: ty, // })); // parsed_something = true; // // FIXME(#18433) self.span_err(span, "equality constraints are not yet supported \ in where clauses (#20041)"); } else { let last_span = self.last_span; self.span_err(last_span, "unexpected token in `where` clause"); } } }; if !try!(self.eat(&token::Comma) ){ break } } if !parsed_something { let last_span = self.last_span; self.span_err(last_span, "a `where` clause must have at least one predicate \ in it"); } Ok(where_clause) } fn parse_fn_args(&mut self, named_args: bool, allow_variadic: bool) -> PResult<(Vec<Arg> , bool)> { let sp = self.span; let mut args: Vec<Option<Arg>> = try!(self.parse_unspanned_seq( &token::OpenDelim(token::Paren), &token::CloseDelim(token::Paren), seq_sep_trailing_allowed(token::Comma), |p| { if p.token == token::DotDotDot { try!(p.bump()); if allow_variadic { if p.token != token::CloseDelim(token::Paren) { let span = p.span; return Err(p.span_fatal(span, "`...` must be last in argument list for variadic function")) } } else { let span = p.span; return Err(p.span_fatal(span, "only foreign functions are allowed to be variadic")) } Ok(None) } else { Ok(Some(try!(p.parse_arg_general(named_args)))) } } )); let variadic = match args.pop() { Some(None) => true, Some(x) => { // Need to put back that last arg args.push(x); false } None => false }; if variadic && args.is_empty() { self.span_err(sp, "variadic function must be declared with at least one named argument"); } let args = args.into_iter().map(|x| x.unwrap()).collect(); Ok((args, variadic)) } /// Parse the argument list and result type of a function declaration pub fn parse_fn_decl(&mut self, allow_variadic: bool) -> PResult<P<FnDecl>> { let (args, variadic) = try!(self.parse_fn_args(true, allow_variadic)); let ret_ty = try!(self.parse_ret_ty()); Ok(P(FnDecl { inputs: args, output: ret_ty, variadic: variadic })) } fn is_self_ident(&mut self) -> bool { match self.token { token::Ident(id, token::Plain) => id.name == special_idents::self_.name, _ => false } } fn expect_self_ident(&mut self) -> PResult<ast::Ident> { match self.token { token::Ident(id, token::Plain) if id.name == special_idents::self_.name => { try!(self.bump()); Ok(id) }, _ => { let token_str = self.this_token_to_string(); return Err(self.fatal(&format!("expected `self`, found `{}`", token_str))) } } } fn is_self_type_ident(&mut self) -> bool { match self.token { token::Ident(id, token::Plain) => id.name == special_idents::type_self.name, _ => false } } fn expect_self_type_ident(&mut self) -> PResult<ast::Ident> { match self.token { token::Ident(id, token::Plain) if id.name == special_idents::type_self.name => { try!(self.bump()); Ok(id) }, _ => { let token_str = self.this_token_to_string(); Err(self.fatal(&format!("expected `Self`, found `{}`", token_str))) } } } /// Parse the argument list and result type of a function /// that may have a self type. fn parse_fn_decl_with_self<F>(&mut self, parse_arg_fn: F) -> PResult<(ExplicitSelf, P<FnDecl>)> where F: FnMut(&mut Parser) -> PResult<Arg>, { fn maybe_parse_borrowed_explicit_self(this: &mut Parser) -> PResult<ast::ExplicitSelf_> { // The following things are possible to see here: // // fn(&mut self) // fn(&mut self) // fn(&'lt self) // fn(&'lt mut self) // // We already know that the current token is `&`. if this.look_ahead(1, |t| t.is_keyword(keywords::SelfValue)) { try!(this.bump()); Ok(SelfRegion(None, MutImmutable, try!(this.expect_self_ident()))) } else if this.look_ahead(1, |t| t.is_mutability()) && this.look_ahead(2, |t| t.is_keyword(keywords::SelfValue)) { try!(this.bump()); let mutability = try!(this.parse_mutability()); Ok(SelfRegion(None, mutability, try!(this.expect_self_ident()))) } else if this.look_ahead(1, |t| t.is_lifetime()) && this.look_ahead(2, |t| t.is_keyword(keywords::SelfValue)) { try!(this.bump()); let lifetime = try!(this.parse_lifetime()); Ok(SelfRegion(Some(lifetime), MutImmutable, try!(this.expect_self_ident()))) } else if this.look_ahead(1, |t| t.is_lifetime()) && this.look_ahead(2, |t| t.is_mutability()) && this.look_ahead(3, |t| t.is_keyword(keywords::SelfValue)) { try!(this.bump()); let lifetime = try!(this.parse_lifetime()); let mutability = try!(this.parse_mutability()); Ok(SelfRegion(Some(lifetime), mutability, try!(this.expect_self_ident()))) } else { Ok(SelfStatic) } } try!(self.expect(&token::OpenDelim(token::Paren))); // A bit of complexity and lookahead is needed here in order to be // backwards compatible. let lo = self.span.lo; let mut self_ident_lo = self.span.lo; let mut self_ident_hi = self.span.hi; let mut mutbl_self = MutImmutable; let explicit_self = match self.token { token::BinOp(token::And) => { let eself = try!(maybe_parse_borrowed_explicit_self(self)); self_ident_lo = self.last_span.lo; self_ident_hi = self.last_span.hi; eself } token::BinOp(token::Star) => { // Possibly "*self" or "*mut self" -- not supported. Try to avoid // emitting cryptic "unexpected token" errors. try!(self.bump()); let _mutability = if self.token.is_mutability() { try!(self.parse_mutability()) } else { MutImmutable }; if self.is_self_ident() { let span = self.span; self.span_err(span, "cannot pass self by raw pointer"); try!(self.bump()); } // error case, making bogus self ident: SelfValue(special_idents::self_) } token::Ident(..) => { if self.is_self_ident() { let self_ident = try!(self.expect_self_ident()); // Determine whether this is the fully explicit form, `self: // TYPE`. if try!(self.eat(&token::Colon) ){ SelfExplicit(try!(self.parse_ty_sum()), self_ident) } else { SelfValue(self_ident) } } else if self.token.is_mutability() && self.look_ahead(1, |t| t.is_keyword(keywords::SelfValue)) { mutbl_self = try!(self.parse_mutability()); let self_ident = try!(self.expect_self_ident()); // Determine whether this is the fully explicit form, // `self: TYPE`. if try!(self.eat(&token::Colon) ){ SelfExplicit(try!(self.parse_ty_sum()), self_ident) } else { SelfValue(self_ident) } } else { SelfStatic } } _ => SelfStatic, }; let explicit_self_sp = mk_sp(self_ident_lo, self_ident_hi); // shared fall-through for the three cases below. borrowing prevents simply // writing this as a closure macro_rules! parse_remaining_arguments { ($self_id:ident) => { // If we parsed a self type, expect a comma before the argument list. match self.token { token::Comma => { try!(self.bump()); let sep = seq_sep_trailing_allowed(token::Comma); let mut fn_inputs = try!(self.parse_seq_to_before_end( &token::CloseDelim(token::Paren), sep, parse_arg_fn )); fn_inputs.insert(0, Arg::new_self(explicit_self_sp, mutbl_self, $self_id)); fn_inputs } token::CloseDelim(token::Paren) => { vec!(Arg::new_self(explicit_self_sp, mutbl_self, $self_id)) } _ => { let token_str = self.this_token_to_string(); return Err(self.fatal(&format!("expected `,` or `)`, found `{}`", token_str))) } } } } let fn_inputs = match explicit_self { SelfStatic => { let sep = seq_sep_trailing_allowed(token::Comma); try!(self.parse_seq_to_before_end(&token::CloseDelim(token::Paren), sep, parse_arg_fn)) } SelfValue(id) => parse_remaining_arguments!(id), SelfRegion(_,_,id) => parse_remaining_arguments!(id), SelfExplicit(_,id) => parse_remaining_arguments!(id), }; try!(self.expect(&token::CloseDelim(token::Paren))); let hi = self.span.hi; let ret_ty = try!(self.parse_ret_ty()); let fn_decl = P(FnDecl { inputs: fn_inputs, output: ret_ty, variadic: false }); Ok((spanned(lo, hi, explicit_self), fn_decl)) } // parse the |arg, arg| header on a lambda fn parse_fn_block_decl(&mut self) -> PResult<P<FnDecl>> { let inputs_captures = { if try!(self.eat(&token::OrOr) ){ Vec::new() } else { try!(self.expect(&token::BinOp(token::Or))); try!(self.parse_obsolete_closure_kind()); let args = try!(self.parse_seq_to_before_end( &token::BinOp(token::Or), seq_sep_trailing_allowed(token::Comma), |p| p.parse_fn_block_arg() )); try!(self.bump()); args } }; let output = try!(self.parse_ret_ty()); Ok(P(FnDecl { inputs: inputs_captures, output: output, variadic: false })) } /// Parse the name and optional generic types of a function header. fn parse_fn_header(&mut self) -> PResult<(Ident, ast::Generics)> { let id = try!(self.parse_ident()); let generics = try!(self.parse_generics()); Ok((id, generics)) } fn mk_item(&mut self, lo: BytePos, hi: BytePos, ident: Ident, node: Item_, vis: Visibility, attrs: Vec<Attribute>) -> P<Item> { P(Item { ident: ident, attrs: attrs, id: ast::DUMMY_NODE_ID, node: node, vis: vis, span: mk_sp(lo, hi) }) } /// Parse an item-position function declaration. fn parse_item_fn(&mut self, unsafety: Unsafety, constness: Constness, abi: abi::Abi) -> PResult<ItemInfo> { let (ident, mut generics) = try!(self.parse_fn_header()); let decl = try!(self.parse_fn_decl(false)); generics.where_clause = try!(self.parse_where_clause()); let (inner_attrs, body) = try!(self.parse_inner_attrs_and_block()); Ok((ident, ItemFn(decl, unsafety, constness, abi, generics, body), Some(inner_attrs))) } /// true if we are looking at `const ID`, false for things like `const fn` etc pub fn is_const_item(&mut self) -> bool { self.token.is_keyword(keywords::Const) && !self.look_ahead(1, |t| t.is_keyword(keywords::Fn)) } /// parses all the "front matter" for a `fn` declaration, up to /// and including the `fn` keyword: /// /// - `const fn` /// - `unsafe fn` /// - `extern fn` /// - etc pub fn parse_fn_front_matter(&mut self) -> PResult<(ast::Constness, ast::Unsafety, abi::Abi)> { let unsafety = try!(self.parse_unsafety()); let is_const_fn = try!(self.eat_keyword(keywords::Const)); let (constness, unsafety, abi) = if is_const_fn { (Constness::Const, unsafety, abi::Rust) } else { let abi = if try!(self.eat_keyword(keywords::Extern)) { try!(self.parse_opt_abi()).unwrap_or(abi::C) } else { abi::Rust }; (Constness::NotConst, unsafety, abi) }; try!(self.expect_keyword(keywords::Fn)); Ok((constness, unsafety, abi)) } /// Parse an impl item. pub fn parse_impl_item(&mut self) -> PResult<P<ImplItem>> { maybe_whole!(no_clone self, NtImplItem); let mut attrs = self.parse_outer_attributes(); let lo = self.span.lo; let vis = try!(self.parse_visibility()); let (name, node) = if try!(self.eat_keyword(keywords::Type)) { let name = try!(self.parse_ident()); try!(self.expect(&token::Eq)); let typ = try!(self.parse_ty_sum()); try!(self.expect(&token::Semi)); (name, TypeImplItem(typ)) } else if self.is_const_item() { try!(self.expect_keyword(keywords::Const)); let name = try!(self.parse_ident()); try!(self.expect(&token::Colon)); let typ = try!(self.parse_ty_sum()); try!(self.expect(&token::Eq)); let expr = try!(self.parse_expr_nopanic()); try!(self.commit_expr_expecting(&expr, token::Semi)); (name, ConstImplItem(typ, expr)) } else { let (name, inner_attrs, node) = try!(self.parse_impl_method(vis)); attrs.extend(inner_attrs); (name, node) }; Ok(P(ImplItem { id: ast::DUMMY_NODE_ID, span: mk_sp(lo, self.last_span.hi), ident: name, vis: vis, attrs: attrs, node: node })) } fn complain_if_pub_macro(&mut self, visa: Visibility, span: Span) { match visa { Public => { self.span_err(span, "can't qualify macro invocation with `pub`"); self.fileline_help(span, "try adjusting the macro to put `pub` inside \ the invocation"); } Inherited => (), } } /// Parse a method or a macro invocation in a trait impl. fn parse_impl_method(&mut self, vis: Visibility) -> PResult<(Ident, Vec<ast::Attribute>, ast::ImplItem_)> { // code copied from parse_macro_use_or_failure... abstraction! if !self.token.is_any_keyword() && self.look_ahead(1, |t| *t == token::Not) && (self.look_ahead(2, |t| *t == token::OpenDelim(token::Paren)) || self.look_ahead(2, |t| *t == token::OpenDelim(token::Brace))) { // method macro. let last_span = self.last_span; self.complain_if_pub_macro(vis, last_span); let pth = try!(self.parse_path(NoTypesAllowed)); try!(self.expect(&token::Not)); // eat a matched-delimiter token tree: let delim = try!(self.expect_open_delim()); let tts = try!(self.parse_seq_to_end(&token::CloseDelim(delim), seq_sep_none(), |p| p.parse_token_tree())); let m_ = Mac_ { path: pth, tts: tts, ctxt: EMPTY_CTXT }; let m: ast::Mac = codemap::Spanned { node: m_, span: mk_sp(self.span.lo, self.span.hi) }; if delim != token::Brace { try!(self.expect(&token::Semi)) } Ok((token::special_idents::invalid, vec![], ast::MacImplItem(m))) } else { let (constness, unsafety, abi) = try!(self.parse_fn_front_matter()); let ident = try!(self.parse_ident()); let mut generics = try!(self.parse_generics()); let (explicit_self, decl) = try!(self.parse_fn_decl_with_self(|p| { p.parse_arg() })); generics.where_clause = try!(self.parse_where_clause()); let (inner_attrs, body) = try!(self.parse_inner_attrs_and_block()); Ok((ident, inner_attrs, MethodImplItem(ast::MethodSig { generics: generics, abi: abi, explicit_self: explicit_self, unsafety: unsafety, constness: constness, decl: decl }, body))) } } /// Parse trait Foo { ... } fn parse_item_trait(&mut self, unsafety: Unsafety) -> PResult<ItemInfo> { let ident = try!(self.parse_ident()); let mut tps = try!(self.parse_generics()); // Parse supertrait bounds. let bounds = try!(self.parse_colon_then_ty_param_bounds(BoundParsingMode::Bare)); tps.where_clause = try!(self.parse_where_clause()); let meths = try!(self.parse_trait_items()); Ok((ident, ItemTrait(unsafety, tps, bounds, meths), None)) } /// Parses items implementations variants /// impl<T> Foo { ... } /// impl<T> ToString for &'static T { ... } /// impl Send for .. {} fn parse_item_impl(&mut self, unsafety: ast::Unsafety) -> PResult<ItemInfo> { let impl_span = self.span; // First, parse type parameters if necessary. let mut generics = try!(self.parse_generics()); // Special case: if the next identifier that follows is '(', don't // allow this to be parsed as a trait. let could_be_trait = self.token != token::OpenDelim(token::Paren); let neg_span = self.span; let polarity = if try!(self.eat(&token::Not) ){ ast::ImplPolarity::Negative } else { ast::ImplPolarity::Positive }; // Parse the trait. let mut ty = try!(self.parse_ty_sum()); // Parse traits, if necessary. let opt_trait = if could_be_trait && try!(self.eat_keyword(keywords::For) ){ // New-style trait. Reinterpret the type as a trait. match ty.node { TyPath(None, ref path) => { Some(TraitRef { path: (*path).clone(), ref_id: ty.id, }) } _ => { self.span_err(ty.span, "not a trait"); None } } } else { match polarity { ast::ImplPolarity::Negative => { // This is a negated type implementation // `impl !MyType {}`, which is not allowed. self.span_err(neg_span, "inherent implementation can't be negated"); }, _ => {} } None }; if opt_trait.is_some() && try!(self.eat(&token::DotDot) ){ if generics.is_parameterized() { self.span_err(impl_span, "default trait implementations are not \ allowed to have generics"); } try!(self.expect(&token::OpenDelim(token::Brace))); try!(self.expect(&token::CloseDelim(token::Brace))); Ok((ast_util::impl_pretty_name(&opt_trait, None), ItemDefaultImpl(unsafety, opt_trait.unwrap()), None)) } else { if opt_trait.is_some() { ty = try!(self.parse_ty_sum()); } generics.where_clause = try!(self.parse_where_clause()); try!(self.expect(&token::OpenDelim(token::Brace))); let attrs = self.parse_inner_attributes(); let mut impl_items = vec![]; while !try!(self.eat(&token::CloseDelim(token::Brace))) { impl_items.push(try!(self.parse_impl_item())); } Ok((ast_util::impl_pretty_name(&opt_trait, Some(&*ty)), ItemImpl(unsafety, polarity, generics, opt_trait, ty, impl_items), Some(attrs))) } } /// Parse a::B<String,i32> fn parse_trait_ref(&mut self) -> PResult<TraitRef> { Ok(ast::TraitRef { path: try!(self.parse_path(LifetimeAndTypesWithoutColons)), ref_id: ast::DUMMY_NODE_ID, }) } fn parse_late_bound_lifetime_defs(&mut self) -> PResult<Vec<ast::LifetimeDef>> { if try!(self.eat_keyword(keywords::For) ){ try!(self.expect(&token::Lt)); let lifetime_defs = try!(self.parse_lifetime_defs()); try!(self.expect_gt()); Ok(lifetime_defs) } else { Ok(Vec::new()) } } /// Parse for<'l> a::B<String,i32> fn parse_poly_trait_ref(&mut self) -> PResult<PolyTraitRef> { let lo = self.span.lo; let lifetime_defs = try!(self.parse_late_bound_lifetime_defs()); Ok(ast::PolyTraitRef { bound_lifetimes: lifetime_defs, trait_ref: try!(self.parse_trait_ref()), span: mk_sp(lo, self.last_span.hi), }) } /// Parse struct Foo { ... } fn parse_item_struct(&mut self) -> PResult<ItemInfo> { let class_name = try!(self.parse_ident()); let mut generics = try!(self.parse_generics()); // There is a special case worth noting here, as reported in issue #17904. // If we are parsing a tuple struct it is the case that the where clause // should follow the field list. Like so: // // struct Foo<T>(T) where T: Copy; // // If we are parsing a normal record-style struct it is the case // that the where clause comes before the body, and after the generics. // So if we look ahead and see a brace or a where-clause we begin // parsing a record style struct. // // Otherwise if we look ahead and see a paren we parse a tuple-style // struct. let vdata = if self.token.is_keyword(keywords::Where) { generics.where_clause = try!(self.parse_where_clause()); if try!(self.eat(&token::Semi)) { // If we see a: `struct Foo<T> where T: Copy;` style decl. VariantData::Unit(ast::DUMMY_NODE_ID) } else { // If we see: `struct Foo<T> where T: Copy { ... }` VariantData::Struct(try!(self.parse_record_struct_body()), ast::DUMMY_NODE_ID) } // No `where` so: `struct Foo<T>;` } else if try!(self.eat(&token::Semi) ){ VariantData::Unit(ast::DUMMY_NODE_ID) // Record-style struct definition } else if self.token == token::OpenDelim(token::Brace) { VariantData::Struct(try!(self.parse_record_struct_body()), ast::DUMMY_NODE_ID) // Tuple-style struct definition with optional where-clause. } else if self.token == token::OpenDelim(token::Paren) { VariantData::Tuple(try!(self.parse_tuple_struct_body(&mut generics)), ast::DUMMY_NODE_ID) } else { let token_str = self.this_token_to_string(); return Err(self.fatal(&format!("expected `where`, `{{`, `(`, or `;` after struct \ name, found `{}`", token_str))) }; Ok((class_name, ItemStruct(P(vdata), generics), None)) } pub fn parse_record_struct_body(&mut self) -> PResult<Vec<StructField>> { let mut fields = Vec::new(); if try!(self.eat(&token::OpenDelim(token::Brace)) ){ while self.token != token::CloseDelim(token::Brace) { fields.push(try!(self.parse_struct_decl_field(true))); } try!(self.bump()); } else { let token_str = self.this_token_to_string(); return Err(self.fatal(&format!("expected `where`, or `{{` after struct \ name, found `{}`", token_str))); } Ok(fields) } pub fn parse_tuple_struct_body(&mut self, generics: &mut ast::Generics) -> PResult<Vec<StructField>> { // This is the case where we find `struct Foo<T>(T) where T: Copy;` // Unit like structs are handled in parse_item_struct function let fields = try!(self.parse_unspanned_seq( &token::OpenDelim(token::Paren), &token::CloseDelim(token::Paren), seq_sep_trailing_allowed(token::Comma), |p| { let attrs = p.parse_outer_attributes(); let lo = p.span.lo; let struct_field_ = ast::StructField_ { kind: UnnamedField(try!(p.parse_visibility())), id: ast::DUMMY_NODE_ID, ty: try!(p.parse_ty_sum()), attrs: attrs, }; Ok(spanned(lo, p.span.hi, struct_field_)) })); generics.where_clause = try!(self.parse_where_clause()); try!(self.expect(&token::Semi)); Ok(fields) } /// Parse a structure field declaration pub fn parse_single_struct_field(&mut self, vis: Visibility, attrs: Vec<Attribute> ) -> PResult<StructField> { let a_var = try!(self.parse_name_and_ty(vis, attrs)); match self.token { token::Comma => { try!(self.bump()); } token::CloseDelim(token::Brace) => {} _ => { let span = self.span; let token_str = self.this_token_to_string(); return Err(self.span_fatal_help(span, &format!("expected `,`, or `}}`, found `{}`", token_str), "struct fields should be separated by commas")) } } Ok(a_var) } /// Parse an element of a struct definition fn parse_struct_decl_field(&mut self, allow_pub: bool) -> PResult<StructField> { let attrs = self.parse_outer_attributes(); if try!(self.eat_keyword(keywords::Pub) ){ if !allow_pub { let span = self.last_span; self.span_err(span, "`pub` is not allowed here"); } return self.parse_single_struct_field(Public, attrs); } return self.parse_single_struct_field(Inherited, attrs); } /// Parse visibility: PUB or nothing fn parse_visibility(&mut self) -> PResult<Visibility> { if try!(self.eat_keyword(keywords::Pub)) { Ok(Public) } else { Ok(Inherited) } } /// Given a termination token, parse all of the items in a module fn parse_mod_items(&mut self, term: &token::Token, inner_lo: BytePos) -> PResult<Mod> { let mut items = vec![]; while let Some(item) = try!(self.parse_item_nopanic()) { items.push(item); } if !try!(self.eat(term)) { let token_str = self.this_token_to_string(); return Err(self.fatal(&format!("expected item, found `{}`", token_str))); } let hi = if self.span == codemap::DUMMY_SP { inner_lo } else { self.last_span.hi }; Ok(ast::Mod { inner: mk_sp(inner_lo, hi), items: items }) } fn parse_item_const(&mut self, m: Option<Mutability>) -> PResult<ItemInfo> { let id = try!(self.parse_ident()); try!(self.expect(&token::Colon)); let ty = try!(self.parse_ty_sum()); try!(self.expect(&token::Eq)); let e = try!(self.parse_expr_nopanic()); try!(self.commit_expr_expecting(&*e, token::Semi)); let item = match m { Some(m) => ItemStatic(ty, m, e), None => ItemConst(ty, e), }; Ok((id, item, None)) } /// Parse a `mod <foo> { ... }` or `mod <foo>;` item fn parse_item_mod(&mut self, outer_attrs: &[Attribute]) -> PResult<ItemInfo> { let id_span = self.span; let id = try!(self.parse_ident()); if self.check(&token::Semi) { try!(self.bump()); // This mod is in an external file. Let's go get it! let (m, attrs) = try!(self.eval_src_mod(id, outer_attrs, id_span)); Ok((id, m, Some(attrs))) } else { self.push_mod_path(id, outer_attrs); try!(self.expect(&token::OpenDelim(token::Brace))); let mod_inner_lo = self.span.lo; let old_owns_directory = self.owns_directory; self.owns_directory = true; let attrs = self.parse_inner_attributes(); let m = try!(self.parse_mod_items(&token::CloseDelim(token::Brace), mod_inner_lo)); self.owns_directory = old_owns_directory; self.pop_mod_path(); Ok((id, ItemMod(m), Some(attrs))) } } fn push_mod_path(&mut self, id: Ident, attrs: &[Attribute]) { let default_path = self.id_to_interned_str(id); let file_path = match ::attr::first_attr_value_str_by_name(attrs, "path") { Some(d) => d, None => default_path, }; self.mod_path_stack.push(file_path) } fn pop_mod_path(&mut self) { self.mod_path_stack.pop().unwrap(); } pub fn submod_path_from_attr(attrs: &[ast::Attribute], dir_path: &Path) -> Option<PathBuf> { ::attr::first_attr_value_str_by_name(attrs, "path").map(|d| dir_path.join(&*d)) } /// Returns either a path to a module, or . pub fn default_submod_path(id: ast::Ident, dir_path: &Path, codemap: &CodeMap) -> ModulePath { let mod_name = id.to_string(); let default_path_str = format!("{}.rs", mod_name); let secondary_path_str = format!("{}/mod.rs", mod_name); let default_path = dir_path.join(&default_path_str); let secondary_path = dir_path.join(&secondary_path_str); let default_exists = codemap.file_exists(&default_path); let secondary_exists = codemap.file_exists(&secondary_path); let result = match (default_exists, secondary_exists) { (true, false) => Ok(ModulePathSuccess { path: default_path, owns_directory: false }), (false, true) => Ok(ModulePathSuccess { path: secondary_path, owns_directory: true }), (false, false) => Err(ModulePathError { err_msg: format!("file not found for module `{}`", mod_name), help_msg: format!("name the file either {} or {} inside the directory {:?}", default_path_str, secondary_path_str, dir_path.display()), }), (true, true) => Err(ModulePathError { err_msg: format!("file for module `{}` found at both {} and {}", mod_name, default_path_str, secondary_path_str), help_msg: "delete or rename one of them to remove the ambiguity".to_owned(), }), }; ModulePath { name: mod_name, path_exists: default_exists || secondary_exists, result: result, } } fn submod_path(&mut self, id: ast::Ident, outer_attrs: &[ast::Attribute], id_sp: Span) -> PResult<ModulePathSuccess> { let mut prefix = PathBuf::from(&self.sess.codemap().span_to_filename(self.span)); prefix.pop(); let mut dir_path = prefix; for part in &self.mod_path_stack { dir_path.push(&**part); } if let Some(p) = Parser::submod_path_from_attr(outer_attrs, &dir_path) { return Ok(ModulePathSuccess { path: p, owns_directory: true }); } let paths = Parser::default_submod_path(id, &dir_path, self.sess.codemap()); if !self.owns_directory { self.span_err(id_sp, "cannot declare a new module at this location"); let this_module = match self.mod_path_stack.last() { Some(name) => name.to_string(), None => self.root_module_name.as_ref().unwrap().clone(), }; self.span_note(id_sp, &format!("maybe move this module `{0}` to its own directory \ via `{0}/mod.rs`", this_module)); if paths.path_exists { self.span_note(id_sp, &format!("... or maybe `use` the module `{}` instead \ of possibly redeclaring it", paths.name)); } self.abort_if_errors(); } match paths.result { Ok(succ) => Ok(succ), Err(err) => Err(self.span_fatal_help(id_sp, &err.err_msg, &err.help_msg)), } } /// Read a module from a source file. fn eval_src_mod(&mut self, id: ast::Ident, outer_attrs: &[ast::Attribute], id_sp: Span) -> PResult<(ast::Item_, Vec<ast::Attribute> )> { let ModulePathSuccess { path, owns_directory } = try!(self.submod_path(id, outer_attrs, id_sp)); self.eval_src_mod_from_path(path, owns_directory, id.to_string(), id_sp) } fn eval_src_mod_from_path(&mut self, path: PathBuf, owns_directory: bool, name: String, id_sp: Span) -> PResult<(ast::Item_, Vec<ast::Attribute> )> { let mut included_mod_stack = self.sess.included_mod_stack.borrow_mut(); match included_mod_stack.iter().position(|p| *p == path) { Some(i) => { let mut err = String::from("circular modules: "); let len = included_mod_stack.len(); for p in &included_mod_stack[i.. len] { err.push_str(&p.to_string_lossy()); err.push_str(" -> "); } err.push_str(&path.to_string_lossy()); return Err(self.span_fatal(id_sp, &err[..])); } None => () } included_mod_stack.push(path.clone()); drop(included_mod_stack); let mut p0 = new_sub_parser_from_file(self.sess, self.cfg.clone(), &path, owns_directory, Some(name), id_sp); let mod_inner_lo = p0.span.lo; let mod_attrs = p0.parse_inner_attributes(); let m0 = try!(p0.parse_mod_items(&token::Eof, mod_inner_lo)); self.sess.included_mod_stack.borrow_mut().pop(); Ok((ast::ItemMod(m0), mod_attrs)) } /// Parse a function declaration from a foreign module fn parse_item_foreign_fn(&mut self, vis: ast::Visibility, lo: BytePos, attrs: Vec<Attribute>) -> PResult<P<ForeignItem>> { try!(self.expect_keyword(keywords::Fn)); let (ident, mut generics) = try!(self.parse_fn_header()); let decl = try!(self.parse_fn_decl(true)); generics.where_clause = try!(self.parse_where_clause()); let hi = self.span.hi; try!(self.expect(&token::Semi)); Ok(P(ast::ForeignItem { ident: ident, attrs: attrs, node: ForeignItemFn(decl, generics), id: ast::DUMMY_NODE_ID, span: mk_sp(lo, hi), vis: vis })) } /// Parse a static item from a foreign module fn parse_item_foreign_static(&mut self, vis: ast::Visibility, lo: BytePos, attrs: Vec<Attribute>) -> PResult<P<ForeignItem>> { try!(self.expect_keyword(keywords::Static)); let mutbl = try!(self.eat_keyword(keywords::Mut)); let ident = try!(self.parse_ident()); try!(self.expect(&token::Colon)); let ty = try!(self.parse_ty_sum()); let hi = self.span.hi; try!(self.expect(&token::Semi)); Ok(P(ForeignItem { ident: ident, attrs: attrs, node: ForeignItemStatic(ty, mutbl), id: ast::DUMMY_NODE_ID, span: mk_sp(lo, hi), vis: vis })) } /// Parse extern crate links /// /// # Examples /// /// extern crate foo; /// extern crate bar as foo; fn parse_item_extern_crate(&mut self, lo: BytePos, visibility: Visibility, attrs: Vec<Attribute>) -> PResult<P<Item>> { let crate_name = try!(self.parse_ident()); let (maybe_path, ident) = if let Some(ident) = try!(self.parse_rename()) { (Some(crate_name.name), ident) } else { (None, crate_name) }; try!(self.expect(&token::Semi)); let last_span = self.last_span; if visibility == ast::Public { self.span_warn(mk_sp(lo, last_span.hi), "`pub extern crate` does not work as expected and should not be used. \ Likely to become an error. Prefer `extern crate` and `pub use`."); } Ok(self.mk_item(lo, last_span.hi, ident, ItemExternCrate(maybe_path), visibility, attrs)) } /// Parse `extern` for foreign ABIs /// modules. /// /// `extern` is expected to have been /// consumed before calling this method /// /// # Examples: /// /// extern "C" {} /// extern {} fn parse_item_foreign_mod(&mut self, lo: BytePos, opt_abi: Option<abi::Abi>, visibility: Visibility, mut attrs: Vec<Attribute>) -> PResult<P<Item>> { try!(self.expect(&token::OpenDelim(token::Brace))); let abi = opt_abi.unwrap_or(abi::C); attrs.extend(self.parse_inner_attributes()); let mut foreign_items = vec![]; while let Some(item) = try!(self.parse_foreign_item()) { foreign_items.push(item); } try!(self.expect(&token::CloseDelim(token::Brace))); let last_span = self.last_span; let m = ast::ForeignMod { abi: abi, items: foreign_items }; Ok(self.mk_item(lo, last_span.hi, special_idents::invalid, ItemForeignMod(m), visibility, attrs)) } /// Parse type Foo = Bar; fn parse_item_type(&mut self) -> PResult<ItemInfo> { let ident = try!(self.parse_ident()); let mut tps = try!(self.parse_generics()); tps.where_clause = try!(self.parse_where_clause()); try!(self.expect(&token::Eq)); let ty = try!(self.parse_ty_sum()); try!(self.expect(&token::Semi)); Ok((ident, ItemTy(ty, tps), None)) } /// Parse a structure-like enum variant definition /// this should probably be renamed or refactored... fn parse_struct_def(&mut self) -> PResult<P<VariantData>> { let mut fields: Vec<StructField> = Vec::new(); while self.token != token::CloseDelim(token::Brace) { fields.push(try!(self.parse_struct_decl_field(false))); } try!(self.bump()); Ok(P(VariantData::Struct(fields, ast::DUMMY_NODE_ID))) } /// Parse the part of an "enum" decl following the '{' fn parse_enum_def(&mut self, _generics: &ast::Generics) -> PResult<EnumDef> { let mut variants = Vec::new(); let mut all_nullary = true; let mut any_disr = None; while self.token != token::CloseDelim(token::Brace) { let variant_attrs = self.parse_outer_attributes(); let vlo = self.span.lo; let struct_def; let mut disr_expr = None; let ident = try!(self.parse_ident()); if try!(self.eat(&token::OpenDelim(token::Brace)) ){ // Parse a struct variant. all_nullary = false; struct_def = try!(self.parse_struct_def()); } else if self.check(&token::OpenDelim(token::Paren)) { all_nullary = false; let arg_tys = try!(self.parse_enum_variant_seq( &token::OpenDelim(token::Paren), &token::CloseDelim(token::Paren), seq_sep_trailing_allowed(token::Comma), |p| p.parse_ty_sum() )); let mut fields = Vec::new(); for ty in arg_tys { fields.push(Spanned { span: ty.span, node: ast::StructField_ { ty: ty, kind: ast::UnnamedField(ast::Inherited), attrs: Vec::new(), id: ast::DUMMY_NODE_ID, }}); } struct_def = P(ast::VariantData::Tuple(fields, ast::DUMMY_NODE_ID)); } else if try!(self.eat(&token::Eq) ){ disr_expr = Some(try!(self.parse_expr_nopanic())); any_disr = disr_expr.as_ref().map(|expr| expr.span); struct_def = P(ast::VariantData::Unit(ast::DUMMY_NODE_ID)); } else { struct_def = P(ast::VariantData::Unit(ast::DUMMY_NODE_ID)); } let vr = ast::Variant_ { name: ident, attrs: variant_attrs, data: struct_def, disr_expr: disr_expr, }; variants.push(P(spanned(vlo, self.last_span.hi, vr))); if !try!(self.eat(&token::Comma)) { break; } } try!(self.expect(&token::CloseDelim(token::Brace))); match any_disr { Some(disr_span) if !all_nullary => self.span_err(disr_span, "discriminator values can only be used with a c-like enum"), _ => () } Ok(ast::EnumDef { variants: variants }) } /// Parse an "enum" declaration fn parse_item_enum(&mut self) -> PResult<ItemInfo> { let id = try!(self.parse_ident()); let mut generics = try!(self.parse_generics()); generics.where_clause = try!(self.parse_where_clause()); try!(self.expect(&token::OpenDelim(token::Brace))); let enum_definition = try!(self.parse_enum_def(&generics)); Ok((id, ItemEnum(enum_definition, generics), None)) } /// Parses a string as an ABI spec on an extern type or module. Consumes /// the `extern` keyword, if one is found. fn parse_opt_abi(&mut self) -> PResult<Option<abi::Abi>> { match self.token { token::Literal(token::Str_(s), suf) | token::Literal(token::StrRaw(s, _), suf) => { let sp = self.span; self.expect_no_suffix(sp, "ABI spec", suf); try!(self.bump()); match abi::lookup(&s.as_str()) { Some(abi) => Ok(Some(abi)), None => { let last_span = self.last_span; self.span_err( last_span, &format!("invalid ABI: expected one of [{}], \ found `{}`", abi::all_names().join(", "), s)); Ok(None) } } } _ => Ok(None), } } /// Parse one of the items allowed by the flags. /// NB: this function no longer parses the items inside an /// extern crate. fn parse_item_(&mut self, attrs: Vec<Attribute>, macros_allowed: bool) -> PResult<Option<P<Item>>> { let nt_item = match self.token { token::Interpolated(token::NtItem(ref item)) => { Some((**item).clone()) } _ => None }; match nt_item { Some(mut item) => { try!(self.bump()); let mut attrs = attrs; mem::swap(&mut item.attrs, &mut attrs); item.attrs.extend(attrs); return Ok(Some(P(item))); } None => {} } let lo = self.span.lo; let visibility = try!(self.parse_visibility()); if try!(self.eat_keyword(keywords::Use) ){ // USE ITEM let item_ = ItemUse(try!(self.parse_view_path())); try!(self.expect(&token::Semi)); let last_span = self.last_span; let item = self.mk_item(lo, last_span.hi, token::special_idents::invalid, item_, visibility, attrs); return Ok(Some(item)); } if try!(self.eat_keyword(keywords::Extern)) { if try!(self.eat_keyword(keywords::Crate)) { return Ok(Some(try!(self.parse_item_extern_crate(lo, visibility, attrs)))); } let opt_abi = try!(self.parse_opt_abi()); if try!(self.eat_keyword(keywords::Fn) ){ // EXTERN FUNCTION ITEM let abi = opt_abi.unwrap_or(abi::C); let (ident, item_, extra_attrs) = try!(self.parse_item_fn(Unsafety::Normal, Constness::NotConst, abi)); let last_span = self.last_span; let item = self.mk_item(lo, last_span.hi, ident, item_, visibility, maybe_append(attrs, extra_attrs)); return Ok(Some(item)); } else if self.check(&token::OpenDelim(token::Brace)) { return Ok(Some(try!(self.parse_item_foreign_mod(lo, opt_abi, visibility, attrs)))); } try!(self.expect_one_of(&[], &[])); } if try!(self.eat_keyword(keywords::Static) ){ // STATIC ITEM let m = if try!(self.eat_keyword(keywords::Mut)) {MutMutable} else {MutImmutable}; let (ident, item_, extra_attrs) = try!(self.parse_item_const(Some(m))); let last_span = self.last_span; let item = self.mk_item(lo, last_span.hi, ident, item_, visibility, maybe_append(attrs, extra_attrs)); return Ok(Some(item)); } if try!(self.eat_keyword(keywords::Const) ){ if self.check_keyword(keywords::Fn) { // CONST FUNCTION ITEM try!(self.bump()); let (ident, item_, extra_attrs) = try!(self.parse_item_fn(Unsafety::Normal, Constness::Const, abi::Rust)); let last_span = self.last_span; let item = self.mk_item(lo, last_span.hi, ident, item_, visibility, maybe_append(attrs, extra_attrs)); return Ok(Some(item)); } // CONST ITEM if try!(self.eat_keyword(keywords::Mut) ){ let last_span = self.last_span; self.span_err(last_span, "const globals cannot be mutable"); self.fileline_help(last_span, "did you mean to declare a static?"); } let (ident, item_, extra_attrs) = try!(self.parse_item_const(None)); let last_span = self.last_span; let item = self.mk_item(lo, last_span.hi, ident, item_, visibility, maybe_append(attrs, extra_attrs)); return Ok(Some(item)); } if self.check_keyword(keywords::Unsafe) && self.look_ahead(1, |t| t.is_keyword(keywords::Trait)) { // UNSAFE TRAIT ITEM try!(self.expect_keyword(keywords::Unsafe)); try!(self.expect_keyword(keywords::Trait)); let (ident, item_, extra_attrs) = try!(self.parse_item_trait(ast::Unsafety::Unsafe)); let last_span = self.last_span; let item = self.mk_item(lo, last_span.hi, ident, item_, visibility, maybe_append(attrs, extra_attrs)); return Ok(Some(item)); } if self.check_keyword(keywords::Unsafe) && self.look_ahead(1, |t| t.is_keyword(keywords::Impl)) { // IMPL ITEM try!(self.expect_keyword(keywords::Unsafe)); try!(self.expect_keyword(keywords::Impl)); let (ident, item_, extra_attrs) = try!(self.parse_item_impl(ast::Unsafety::Unsafe)); let last_span = self.last_span; let item = self.mk_item(lo, last_span.hi, ident, item_, visibility, maybe_append(attrs, extra_attrs)); return Ok(Some(item)); } if self.check_keyword(keywords::Fn) { // FUNCTION ITEM try!(self.bump()); let (ident, item_, extra_attrs) = try!(self.parse_item_fn(Unsafety::Normal, Constness::NotConst, abi::Rust)); let last_span = self.last_span; let item = self.mk_item(lo, last_span.hi, ident, item_, visibility, maybe_append(attrs, extra_attrs)); return Ok(Some(item)); } if self.check_keyword(keywords::Unsafe) && self.look_ahead(1, |t| *t != token::OpenDelim(token::Brace)) { // UNSAFE FUNCTION ITEM try!(self.bump()); let abi = if try!(self.eat_keyword(keywords::Extern) ){ try!(self.parse_opt_abi()).unwrap_or(abi::C) } else { abi::Rust }; let constness = if abi == abi::Rust && try!(self.eat_keyword(keywords::Const) ){ Constness::Const } else { Constness::NotConst }; try!(self.expect_keyword(keywords::Fn)); let (ident, item_, extra_attrs) = try!(self.parse_item_fn(Unsafety::Unsafe, constness, abi)); let last_span = self.last_span; let item = self.mk_item(lo, last_span.hi, ident, item_, visibility, maybe_append(attrs, extra_attrs)); return Ok(Some(item)); } if try!(self.eat_keyword(keywords::Mod) ){ // MODULE ITEM let (ident, item_, extra_attrs) = try!(self.parse_item_mod(&attrs[..])); let last_span = self.last_span; let item = self.mk_item(lo, last_span.hi, ident, item_, visibility, maybe_append(attrs, extra_attrs)); return Ok(Some(item)); } if try!(self.eat_keyword(keywords::Type) ){ // TYPE ITEM let (ident, item_, extra_attrs) = try!(self.parse_item_type()); let last_span = self.last_span; let item = self.mk_item(lo, last_span.hi, ident, item_, visibility, maybe_append(attrs, extra_attrs)); return Ok(Some(item)); } if try!(self.eat_keyword(keywords::Enum) ){ // ENUM ITEM let (ident, item_, extra_attrs) = try!(self.parse_item_enum()); let last_span = self.last_span; let item = self.mk_item(lo, last_span.hi, ident, item_, visibility, maybe_append(attrs, extra_attrs)); return Ok(Some(item)); } if try!(self.eat_keyword(keywords::Trait) ){ // TRAIT ITEM let (ident, item_, extra_attrs) = try!(self.parse_item_trait(ast::Unsafety::Normal)); let last_span = self.last_span; let item = self.mk_item(lo, last_span.hi, ident, item_, visibility, maybe_append(attrs, extra_attrs)); return Ok(Some(item)); } if try!(self.eat_keyword(keywords::Impl) ){ // IMPL ITEM let (ident, item_, extra_attrs) = try!(self.parse_item_impl(ast::Unsafety::Normal)); let last_span = self.last_span; let item = self.mk_item(lo, last_span.hi, ident, item_, visibility, maybe_append(attrs, extra_attrs)); return Ok(Some(item)); } if try!(self.eat_keyword(keywords::Struct) ){ // STRUCT ITEM let (ident, item_, extra_attrs) = try!(self.parse_item_struct()); let last_span = self.last_span; let item = self.mk_item(lo, last_span.hi, ident, item_, visibility, maybe_append(attrs, extra_attrs)); return Ok(Some(item)); } self.parse_macro_use_or_failure(attrs,macros_allowed,lo,visibility) } /// Parse a foreign item. fn parse_foreign_item(&mut self) -> PResult<Option<P<ForeignItem>>> { let attrs = self.parse_outer_attributes(); let lo = self.span.lo; let visibility = try!(self.parse_visibility()); if self.check_keyword(keywords::Static) { // FOREIGN STATIC ITEM return Ok(Some(try!(self.parse_item_foreign_static(visibility, lo, attrs)))); } if self.check_keyword(keywords::Fn) || self.check_keyword(keywords::Unsafe) { // FOREIGN FUNCTION ITEM return Ok(Some(try!(self.parse_item_foreign_fn(visibility, lo, attrs)))); } // FIXME #5668: this will occur for a macro invocation: match try!(self.parse_macro_use_or_failure(attrs, true, lo, visibility)) { Some(item) => { return Err(self.span_fatal(item.span, "macros cannot expand to foreign items")); } None => Ok(None) } } /// This is the fall-through for parsing items. fn parse_macro_use_or_failure( &mut self, attrs: Vec<Attribute> , macros_allowed: bool, lo: BytePos, visibility: Visibility ) -> PResult<Option<P<Item>>> { if macros_allowed && !self.token.is_any_keyword() && self.look_ahead(1, |t| *t == token::Not) && (self.look_ahead(2, |t| t.is_plain_ident()) || self.look_ahead(2, |t| *t == token::OpenDelim(token::Paren)) || self.look_ahead(2, |t| *t == token::OpenDelim(token::Brace))) { // MACRO INVOCATION ITEM let last_span = self.last_span; self.complain_if_pub_macro(visibility, last_span); // item macro. let pth = try!(self.parse_path(NoTypesAllowed)); try!(self.expect(&token::Not)); // a 'special' identifier (like what `macro_rules!` uses) // is optional. We should eventually unify invoc syntax // and remove this. let id = if self.token.is_plain_ident() { try!(self.parse_ident()) } else { token::special_idents::invalid // no special identifier }; // eat a matched-delimiter token tree: let delim = try!(self.expect_open_delim()); let tts = try!(self.parse_seq_to_end(&token::CloseDelim(delim), seq_sep_none(), |p| p.parse_token_tree())); // single-variant-enum... : let m = Mac_ { path: pth, tts: tts, ctxt: EMPTY_CTXT }; let m: ast::Mac = codemap::Spanned { node: m, span: mk_sp(self.span.lo, self.span.hi) }; if delim != token::Brace { if !try!(self.eat(&token::Semi) ){ let last_span = self.last_span; self.span_err(last_span, "macros that expand to items must either \ be surrounded with braces or followed by \ a semicolon"); } } let item_ = ItemMac(m); let last_span = self.last_span; let item = self.mk_item(lo, last_span.hi, id, item_, visibility, attrs); return Ok(Some(item)); } // FAILURE TO PARSE ITEM match visibility { Inherited => {} Public => { let last_span = self.last_span; return Err(self.span_fatal(last_span, "unmatched visibility `pub`")); } } if !attrs.is_empty() { self.expected_item_err(&attrs); } Ok(None) } pub fn parse_item_nopanic(&mut self) -> PResult<Option<P<Item>>> { let attrs = self.parse_outer_attributes(); self.parse_item_(attrs, true) } /// Matches view_path : MOD? non_global_path as IDENT /// | MOD? non_global_path MOD_SEP LBRACE RBRACE /// | MOD? non_global_path MOD_SEP LBRACE ident_seq RBRACE /// | MOD? non_global_path MOD_SEP STAR /// | MOD? non_global_path fn parse_view_path(&mut self) -> PResult<P<ViewPath>> { let lo = self.span.lo; // Allow a leading :: because the paths are absolute either way. // This occurs with "use $crate::..." in macros. try!(self.eat(&token::ModSep)); if self.check(&token::OpenDelim(token::Brace)) { // use {foo,bar} let idents = try!(self.parse_unspanned_seq( &token::OpenDelim(token::Brace), &token::CloseDelim(token::Brace), seq_sep_trailing_allowed(token::Comma), |p| p.parse_path_list_item())); let path = ast::Path { span: mk_sp(lo, self.span.hi), global: false, segments: Vec::new() }; return Ok(P(spanned(lo, self.span.hi, ViewPathList(path, idents)))); } let first_ident = try!(self.parse_ident()); let mut path = vec!(first_ident); if let token::ModSep = self.token { // foo::bar or foo::{a,b,c} or foo::* while self.check(&token::ModSep) { try!(self.bump()); match self.token { token::Ident(..) => { let ident = try!(self.parse_ident()); path.push(ident); } // foo::bar::{a,b,c} token::OpenDelim(token::Brace) => { let idents = try!(self.parse_unspanned_seq( &token::OpenDelim(token::Brace), &token::CloseDelim(token::Brace), seq_sep_trailing_allowed(token::Comma), |p| p.parse_path_list_item() )); let path = ast::Path { span: mk_sp(lo, self.span.hi), global: false, segments: path.into_iter().map(|identifier| { ast::PathSegment { identifier: identifier, parameters: ast::PathParameters::none(), } }).collect() }; return Ok(P(spanned(lo, self.span.hi, ViewPathList(path, idents)))); } // foo::bar::* token::BinOp(token::Star) => { try!(self.bump()); let path = ast::Path { span: mk_sp(lo, self.span.hi), global: false, segments: path.into_iter().map(|identifier| { ast::PathSegment { identifier: identifier, parameters: ast::PathParameters::none(), } }).collect() }; return Ok(P(spanned(lo, self.span.hi, ViewPathGlob(path)))); } // fall-through for case foo::bar::; token::Semi => { self.span_err(self.span, "expected identifier or `{` or `*`, found `;`"); } _ => break } } } let mut rename_to = path[path.len() - 1]; let path = ast::Path { span: mk_sp(lo, self.last_span.hi), global: false, segments: path.into_iter().map(|identifier| { ast::PathSegment { identifier: identifier, parameters: ast::PathParameters::none(), } }).collect() }; rename_to = try!(self.parse_rename()).unwrap_or(rename_to); Ok(P(spanned(lo, self.last_span.hi, ViewPathSimple(rename_to, path)))) } fn parse_rename(&mut self) -> PResult<Option<Ident>> { if try!(self.eat_keyword(keywords::As)) { self.parse_ident().map(Some) } else { Ok(None) } } /// Parses a source module as a crate. This is the main /// entry point for the parser. pub fn parse_crate_mod(&mut self) -> PResult<Crate> { let lo = self.span.lo; Ok(ast::Crate { attrs: self.parse_inner_attributes(), module: try!(self.parse_mod_items(&token::Eof, lo)), config: self.cfg.clone(), span: mk_sp(lo, self.span.lo), exported_macros: Vec::new(), }) } pub fn parse_optional_str(&mut self) -> PResult<Option<(InternedString, ast::StrStyle, Option<ast::Name>)>> { let ret = match self.token { token::Literal(token::Str_(s), suf) => { (self.id_to_interned_str(ast::Ident::with_empty_ctxt(s)), ast::CookedStr, suf) } token::Literal(token::StrRaw(s, n), suf) => { (self.id_to_interned_str(ast::Ident::with_empty_ctxt(s)), ast::RawStr(n), suf) } _ => return Ok(None) }; try!(self.bump()); Ok(Some(ret)) } pub fn parse_str(&mut self) -> PResult<(InternedString, StrStyle)> { match try!(self.parse_optional_str()) { Some((s, style, suf)) => { let sp = self.last_span; self.expect_no_suffix(sp, "string literal", suf); Ok((s, style)) } _ => Err(self.fatal("expected string literal")) } } } Auto merge of #29242 - matklad:fix-comment, r=alexcrichton Qualified paths allow full path after the `>::`. For example ```rust <T as Foo>::U::generic_method::<f64>() ``` The example is taken from `test/run-pass/associated-item-long-paths.rs`. // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. pub use self::PathParsingMode::*; use abi; use ast::BareFnTy; use ast::{RegionTyParamBound, TraitTyParamBound, TraitBoundModifier}; use ast::{Public, Unsafety}; use ast::{Mod, BiAdd, Arg, Arm, Attribute, BindByRef, BindByValue}; use ast::{BiBitAnd, BiBitOr, BiBitXor, BiRem, BiLt, BiGt, Block}; use ast::{BlockCheckMode, CaptureByRef, CaptureByValue, CaptureClause}; use ast::{Constness, ConstImplItem, ConstTraitItem, Crate, CrateConfig}; use ast::{Decl, DeclItem, DeclLocal, DefaultBlock, DefaultReturn}; use ast::{UnDeref, BiDiv, EMPTY_CTXT, EnumDef, ExplicitSelf}; use ast::{Expr, Expr_, ExprAddrOf, ExprMatch, ExprAgain}; use ast::{ExprAssign, ExprAssignOp, ExprBinary, ExprBlock, ExprBox}; use ast::{ExprBreak, ExprCall, ExprCast, ExprInPlace}; use ast::{ExprField, ExprTupField, ExprClosure, ExprIf, ExprIfLet, ExprIndex}; use ast::{ExprLit, ExprLoop, ExprMac, ExprRange}; use ast::{ExprMethodCall, ExprParen, ExprPath}; use ast::{ExprRepeat, ExprRet, ExprStruct, ExprTup, ExprUnary}; use ast::{ExprVec, ExprWhile, ExprWhileLet, ExprForLoop, Field, FnDecl}; use ast::{ForeignItem, ForeignItemStatic, ForeignItemFn, ForeignMod, FunctionRetTy}; use ast::{Ident, Inherited, ImplItem, Item, Item_, ItemStatic}; use ast::{ItemEnum, ItemFn, ItemForeignMod, ItemImpl, ItemConst}; use ast::{ItemMac, ItemMod, ItemStruct, ItemTrait, ItemTy, ItemDefaultImpl}; use ast::{ItemExternCrate, ItemUse}; use ast::{LifetimeDef, Lit, Lit_}; use ast::{LitBool, LitChar, LitByte, LitByteStr}; use ast::{LitStr, LitInt, Local}; use ast::{MacStmtWithBraces, MacStmtWithSemicolon, MacStmtWithoutBraces}; use ast::{MutImmutable, MutMutable, Mac_}; use ast::{MutTy, BiMul, Mutability}; use ast::{MethodImplItem, NamedField, UnNeg, NoReturn, UnNot}; use ast::{Pat, PatBox, PatEnum, PatIdent, PatLit, PatQPath, PatMac, PatRange}; use ast::{PatRegion, PatStruct, PatTup, PatVec, PatWild, PatWildMulti}; use ast::PatWildSingle; use ast::{PolyTraitRef, QSelf}; use ast::{Return, BiShl, BiShr, Stmt, StmtDecl}; use ast::{StmtExpr, StmtSemi, StmtMac, VariantData, StructField}; use ast::{BiSub, StrStyle}; use ast::{SelfExplicit, SelfRegion, SelfStatic, SelfValue}; use ast::{Delimited, SequenceRepetition, TokenTree, TraitItem, TraitRef}; use ast::{TtDelimited, TtSequence, TtToken}; use ast::{Ty, Ty_, TypeBinding}; use ast::{TyMac}; use ast::{TyFixedLengthVec, TyBareFn, TyTypeof, TyInfer}; use ast::{TyParam, TyParamBound, TyParen, TyPath, TyPolyTraitRef, TyPtr}; use ast::{TyRptr, TyTup, TyU32, TyVec}; use ast::{TypeImplItem, TypeTraitItem}; use ast::{UnnamedField, UnsafeBlock}; use ast::{ViewPath, ViewPathGlob, ViewPathList, ViewPathSimple}; use ast::{Visibility, WhereClause}; use ast; use ast_util::{self, AS_PREC, ident_to_path, operator_prec}; use codemap::{self, Span, BytePos, Spanned, spanned, mk_sp, CodeMap}; use diagnostic; use ext::tt::macro_parser; use parse; use parse::attr::ParserAttr; use parse::classify; use parse::common::{SeqSep, seq_sep_none, seq_sep_trailing_allowed}; use parse::lexer::{Reader, TokenAndSpan}; use parse::obsolete::{ParserObsoleteMethods, ObsoleteSyntax}; use parse::token::{self, MatchNt, SubstNt, SpecialVarNt, InternedString}; use parse::token::{keywords, special_idents, SpecialMacroVar}; use parse::{new_sub_parser_from_file, ParseSess}; use print::pprust; use ptr::P; use owned_slice::OwnedSlice; use parse::PResult; use diagnostic::FatalError; use std::collections::HashSet; use std::io::prelude::*; use std::mem; use std::path::{Path, PathBuf}; use std::rc::Rc; use std::slice; bitflags! { flags Restrictions: u8 { const RESTRICTION_STMT_EXPR = 1 << 0, const RESTRICTION_NO_STRUCT_LITERAL = 1 << 1, } } type ItemInfo = (Ident, Item_, Option<Vec<Attribute> >); /// How to parse a path. There are four different kinds of paths, all of which /// are parsed somewhat differently. #[derive(Copy, Clone, PartialEq)] pub enum PathParsingMode { /// A path with no type parameters; e.g. `foo::bar::Baz` NoTypesAllowed, /// A path with a lifetime and type parameters, with no double colons /// before the type parameters; e.g. `foo::bar<'a>::Baz<T>` LifetimeAndTypesWithoutColons, /// A path with a lifetime and type parameters with double colons before /// the type parameters; e.g. `foo::bar::<'a>::Baz::<T>` LifetimeAndTypesWithColons, } /// How to parse a bound, whether to allow bound modifiers such as `?`. #[derive(Copy, Clone, PartialEq)] pub enum BoundParsingMode { Bare, Modified, } /// Possibly accept an `token::Interpolated` expression (a pre-parsed expression /// dropped into the token stream, which happens while parsing the result of /// macro expansion). Placement of these is not as complex as I feared it would /// be. The important thing is to make sure that lookahead doesn't balk at /// `token::Interpolated` tokens. macro_rules! maybe_whole_expr { ($p:expr) => ( { let found = match $p.token { token::Interpolated(token::NtExpr(ref e)) => { Some((*e).clone()) } token::Interpolated(token::NtPath(_)) => { // FIXME: The following avoids an issue with lexical borrowck scopes, // but the clone is unfortunate. let pt = match $p.token { token::Interpolated(token::NtPath(ref pt)) => (**pt).clone(), _ => unreachable!() }; let span = $p.span; Some($p.mk_expr(span.lo, span.hi, ExprPath(None, pt))) } token::Interpolated(token::NtBlock(_)) => { // FIXME: The following avoids an issue with lexical borrowck scopes, // but the clone is unfortunate. let b = match $p.token { token::Interpolated(token::NtBlock(ref b)) => (*b).clone(), _ => unreachable!() }; let span = $p.span; Some($p.mk_expr(span.lo, span.hi, ExprBlock(b))) } _ => None }; match found { Some(e) => { try!($p.bump()); return Ok(e); } None => () } } ) } /// As maybe_whole_expr, but for things other than expressions macro_rules! maybe_whole { ($p:expr, $constructor:ident) => ( { let found = match ($p).token { token::Interpolated(token::$constructor(_)) => { Some(try!(($p).bump_and_get())) } _ => None }; if let Some(token::Interpolated(token::$constructor(x))) = found { return Ok(x.clone()); } } ); (no_clone $p:expr, $constructor:ident) => ( { let found = match ($p).token { token::Interpolated(token::$constructor(_)) => { Some(try!(($p).bump_and_get())) } _ => None }; if let Some(token::Interpolated(token::$constructor(x))) = found { return Ok(x); } } ); (deref $p:expr, $constructor:ident) => ( { let found = match ($p).token { token::Interpolated(token::$constructor(_)) => { Some(try!(($p).bump_and_get())) } _ => None }; if let Some(token::Interpolated(token::$constructor(x))) = found { return Ok((*x).clone()); } } ); (Some deref $p:expr, $constructor:ident) => ( { let found = match ($p).token { token::Interpolated(token::$constructor(_)) => { Some(try!(($p).bump_and_get())) } _ => None }; if let Some(token::Interpolated(token::$constructor(x))) = found { return Ok(Some((*x).clone())); } } ); (pair_empty $p:expr, $constructor:ident) => ( { let found = match ($p).token { token::Interpolated(token::$constructor(_)) => { Some(try!(($p).bump_and_get())) } _ => None }; if let Some(token::Interpolated(token::$constructor(x))) = found { return Ok((Vec::new(), x)); } } ) } fn maybe_append(mut lhs: Vec<Attribute>, rhs: Option<Vec<Attribute>>) -> Vec<Attribute> { if let Some(ref attrs) = rhs { lhs.extend(attrs.iter().cloned()) } lhs } /* ident is handled by common.rs */ pub struct Parser<'a> { pub sess: &'a ParseSess, /// the current token: pub token: token::Token, /// the span of the current token: pub span: Span, /// the span of the prior token: pub last_span: Span, pub cfg: CrateConfig, /// the previous token or None (only stashed sometimes). pub last_token: Option<Box<token::Token>>, pub buffer: [TokenAndSpan; 4], pub buffer_start: isize, pub buffer_end: isize, pub tokens_consumed: usize, pub restrictions: Restrictions, pub quote_depth: usize, // not (yet) related to the quasiquoter pub reader: Box<Reader+'a>, pub interner: Rc<token::IdentInterner>, /// The set of seen errors about obsolete syntax. Used to suppress /// extra detail when the same error is seen twice pub obsolete_set: HashSet<ObsoleteSyntax>, /// Used to determine the path to externally loaded source files pub mod_path_stack: Vec<InternedString>, /// Stack of spans of open delimiters. Used for error message. pub open_braces: Vec<Span>, /// Flag if this parser "owns" the directory that it is currently parsing /// in. This will affect how nested files are looked up. pub owns_directory: bool, /// Name of the root module this parser originated from. If `None`, then the /// name is not known. This does not change while the parser is descending /// into modules, and sub-parsers have new values for this name. pub root_module_name: Option<String>, pub expected_tokens: Vec<TokenType>, } #[derive(PartialEq, Eq, Clone)] pub enum TokenType { Token(token::Token), Keyword(keywords::Keyword), Operator, } impl TokenType { fn to_string(&self) -> String { match *self { TokenType::Token(ref t) => format!("`{}`", Parser::token_to_string(t)), TokenType::Operator => "an operator".to_string(), TokenType::Keyword(kw) => format!("`{}`", kw.to_name()), } } } fn is_plain_ident_or_underscore(t: &token::Token) -> bool { t.is_plain_ident() || *t == token::Underscore } /// Information about the path to a module. pub struct ModulePath { pub name: String, pub path_exists: bool, pub result: Result<ModulePathSuccess, ModulePathError>, } pub struct ModulePathSuccess { pub path: ::std::path::PathBuf, pub owns_directory: bool, } pub struct ModulePathError { pub err_msg: String, pub help_msg: String, } impl<'a> Parser<'a> { pub fn new(sess: &'a ParseSess, cfg: ast::CrateConfig, mut rdr: Box<Reader+'a>) -> Parser<'a> { let tok0 = rdr.real_token(); let span = tok0.sp; let placeholder = TokenAndSpan { tok: token::Underscore, sp: span, }; Parser { reader: rdr, interner: token::get_ident_interner(), sess: sess, cfg: cfg, token: tok0.tok, span: span, last_span: span, last_token: None, buffer: [ placeholder.clone(), placeholder.clone(), placeholder.clone(), placeholder.clone(), ], buffer_start: 0, buffer_end: 0, tokens_consumed: 0, restrictions: Restrictions::empty(), quote_depth: 0, obsolete_set: HashSet::new(), mod_path_stack: Vec::new(), open_braces: Vec::new(), owns_directory: true, root_module_name: None, expected_tokens: Vec::new(), } } // Panicing fns (for now!) // This is so that the quote_*!() syntax extensions pub fn parse_expr(&mut self) -> P<Expr> { panictry!(self.parse_expr_nopanic()) } pub fn parse_item(&mut self) -> Option<P<Item>> { panictry!(self.parse_item_nopanic()) } pub fn parse_pat(&mut self) -> P<Pat> { panictry!(self.parse_pat_nopanic()) } pub fn parse_arm(&mut self) -> Arm { panictry!(self.parse_arm_nopanic()) } pub fn parse_ty(&mut self) -> P<Ty> { panictry!(self.parse_ty_nopanic()) } pub fn parse_stmt(&mut self) -> Option<P<Stmt>> { panictry!(self.parse_stmt_nopanic()) } /// Convert a token to a string using self's reader pub fn token_to_string(token: &token::Token) -> String { pprust::token_to_string(token) } /// Convert the current token to a string using self's reader pub fn this_token_to_string(&self) -> String { Parser::token_to_string(&self.token) } pub fn unexpected_last(&self, t: &token::Token) -> FatalError { let token_str = Parser::token_to_string(t); let last_span = self.last_span; self.span_fatal(last_span, &format!("unexpected token: `{}`", token_str)) } pub fn unexpected(&mut self) -> FatalError { match self.expect_one_of(&[], &[]) { Err(e) => e, Ok(_) => unreachable!() } } /// Expect and consume the token t. Signal an error if /// the next token is not t. pub fn expect(&mut self, t: &token::Token) -> PResult<()> { if self.expected_tokens.is_empty() { if self.token == *t { self.bump() } else { let token_str = Parser::token_to_string(t); let this_token_str = self.this_token_to_string(); Err(self.fatal(&format!("expected `{}`, found `{}`", token_str, this_token_str))) } } else { self.expect_one_of(slice::ref_slice(t), &[]) } } /// Expect next token to be edible or inedible token. If edible, /// then consume it; if inedible, then return without consuming /// anything. Signal a fatal error if next token is unexpected. pub fn expect_one_of(&mut self, edible: &[token::Token], inedible: &[token::Token]) -> PResult<()>{ fn tokens_to_string(tokens: &[TokenType]) -> String { let mut i = tokens.iter(); // This might be a sign we need a connect method on Iterator. let b = i.next() .map_or("".to_string(), |t| t.to_string()); i.enumerate().fold(b, |mut b, (i, ref a)| { if tokens.len() > 2 && i == tokens.len() - 2 { b.push_str(", or "); } else if tokens.len() == 2 && i == tokens.len() - 2 { b.push_str(" or "); } else { b.push_str(", "); } b.push_str(&*a.to_string()); b }) } if edible.contains(&self.token) { self.bump() } else if inedible.contains(&self.token) { // leave it in the input Ok(()) } else { let mut expected = edible.iter() .map(|x| TokenType::Token(x.clone())) .chain(inedible.iter().map(|x| TokenType::Token(x.clone()))) .chain(self.expected_tokens.iter().cloned()) .collect::<Vec<_>>(); expected.sort_by(|a, b| a.to_string().cmp(&b.to_string())); expected.dedup(); let expect = tokens_to_string(&expected[..]); let actual = self.this_token_to_string(); Err(self.fatal( &(if expected.len() > 1 { (format!("expected one of {}, found `{}`", expect, actual)) } else if expected.is_empty() { (format!("unexpected token: `{}`", actual)) } else { (format!("expected {}, found `{}`", expect, actual)) })[..] )) } } /// Check for erroneous `ident { }`; if matches, signal error and /// recover (without consuming any expected input token). Returns /// true if and only if input was consumed for recovery. pub fn check_for_erroneous_unit_struct_expecting(&mut self, expected: &[token::Token]) -> PResult<bool> { if self.token == token::OpenDelim(token::Brace) && expected.iter().all(|t| *t != token::OpenDelim(token::Brace)) && self.look_ahead(1, |t| *t == token::CloseDelim(token::Brace)) { // matched; signal non-fatal error and recover. let span = self.span; self.span_err(span, "unit-like struct construction is written with no trailing `{ }`"); try!(self.eat(&token::OpenDelim(token::Brace))); try!(self.eat(&token::CloseDelim(token::Brace))); Ok(true) } else { Ok(false) } } /// Commit to parsing a complete expression `e` expected to be /// followed by some token from the set edible + inedible. Recover /// from anticipated input errors, discarding erroneous characters. pub fn commit_expr(&mut self, e: &Expr, edible: &[token::Token], inedible: &[token::Token]) -> PResult<()> { debug!("commit_expr {:?}", e); if let ExprPath(..) = e.node { // might be unit-struct construction; check for recoverableinput error. let expected = edible.iter() .cloned() .chain(inedible.iter().cloned()) .collect::<Vec<_>>(); try!(self.check_for_erroneous_unit_struct_expecting(&expected[..])); } self.expect_one_of(edible, inedible) } pub fn commit_expr_expecting(&mut self, e: &Expr, edible: token::Token) -> PResult<()> { self.commit_expr(e, &[edible], &[]) } /// Commit to parsing a complete statement `s`, which expects to be /// followed by some token from the set edible + inedible. Check /// for recoverable input errors, discarding erroneous characters. pub fn commit_stmt(&mut self, edible: &[token::Token], inedible: &[token::Token]) -> PResult<()> { if self.last_token .as_ref() .map_or(false, |t| t.is_ident() || t.is_path()) { let expected = edible.iter() .cloned() .chain(inedible.iter().cloned()) .collect::<Vec<_>>(); try!(self.check_for_erroneous_unit_struct_expecting(&expected)); } self.expect_one_of(edible, inedible) } pub fn commit_stmt_expecting(&mut self, edible: token::Token) -> PResult<()> { self.commit_stmt(&[edible], &[]) } pub fn parse_ident(&mut self) -> PResult<ast::Ident> { self.check_strict_keywords(); try!(self.check_reserved_keywords()); match self.token { token::Ident(i, _) => { try!(self.bump()); Ok(i) } token::Interpolated(token::NtIdent(..)) => { self.bug("ident interpolation not converted to real token"); } _ => { let token_str = self.this_token_to_string(); Err(self.fatal(&format!("expected ident, found `{}`", token_str))) } } } pub fn parse_ident_or_self_type(&mut self) -> PResult<ast::Ident> { if self.is_self_type_ident() { self.expect_self_type_ident() } else { self.parse_ident() } } pub fn parse_path_list_item(&mut self) -> PResult<ast::PathListItem> { let lo = self.span.lo; let node = if try!(self.eat_keyword(keywords::SelfValue)) { let rename = try!(self.parse_rename()); ast::PathListMod { id: ast::DUMMY_NODE_ID, rename: rename } } else { let ident = try!(self.parse_ident()); let rename = try!(self.parse_rename()); ast::PathListIdent { name: ident, rename: rename, id: ast::DUMMY_NODE_ID } }; let hi = self.last_span.hi; Ok(spanned(lo, hi, node)) } /// Check if the next token is `tok`, and return `true` if so. /// /// This method is will automatically add `tok` to `expected_tokens` if `tok` is not /// encountered. pub fn check(&mut self, tok: &token::Token) -> bool { let is_present = self.token == *tok; if !is_present { self.expected_tokens.push(TokenType::Token(tok.clone())); } is_present } /// Consume token 'tok' if it exists. Returns true if the given /// token was present, false otherwise. pub fn eat(&mut self, tok: &token::Token) -> PResult<bool> { let is_present = self.check(tok); if is_present { try!(self.bump())} Ok(is_present) } pub fn check_keyword(&mut self, kw: keywords::Keyword) -> bool { self.expected_tokens.push(TokenType::Keyword(kw)); self.token.is_keyword(kw) } /// If the next token is the given keyword, eat it and return /// true. Otherwise, return false. pub fn eat_keyword(&mut self, kw: keywords::Keyword) -> PResult<bool> { if self.check_keyword(kw) { try!(self.bump()); Ok(true) } else { Ok(false) } } pub fn eat_keyword_noexpect(&mut self, kw: keywords::Keyword) -> PResult<bool> { if self.token.is_keyword(kw) { try!(self.bump()); Ok(true) } else { Ok(false) } } /// If the given word is not a keyword, signal an error. /// If the next token is not the given word, signal an error. /// Otherwise, eat it. pub fn expect_keyword(&mut self, kw: keywords::Keyword) -> PResult<()> { if !try!(self.eat_keyword(kw) ){ self.expect_one_of(&[], &[]) } else { Ok(()) } } /// Signal an error if the given string is a strict keyword pub fn check_strict_keywords(&mut self) { if self.token.is_strict_keyword() { let token_str = self.this_token_to_string(); let span = self.span; self.span_err(span, &format!("expected identifier, found keyword `{}`", token_str)); } } /// Signal an error if the current token is a reserved keyword pub fn check_reserved_keywords(&mut self) -> PResult<()>{ if self.token.is_reserved_keyword() { let token_str = self.this_token_to_string(); Err(self.fatal(&format!("`{}` is a reserved keyword", token_str))) } else { Ok(()) } } /// Expect and consume an `&`. If `&&` is seen, replace it with a single /// `&` and continue. If an `&` is not seen, signal an error. fn expect_and(&mut self) -> PResult<()> { self.expected_tokens.push(TokenType::Token(token::BinOp(token::And))); match self.token { token::BinOp(token::And) => self.bump(), token::AndAnd => { let span = self.span; let lo = span.lo + BytePos(1); Ok(self.replace_token(token::BinOp(token::And), lo, span.hi)) } _ => self.expect_one_of(&[], &[]) } } pub fn expect_no_suffix(&self, sp: Span, kind: &str, suffix: Option<ast::Name>) { match suffix { None => {/* everything ok */} Some(suf) => { let text = suf.as_str(); if text.is_empty() { self.span_bug(sp, "found empty literal suffix in Some") } self.span_err(sp, &*format!("{} with a suffix is invalid", kind)); } } } /// Attempt to consume a `<`. If `<<` is seen, replace it with a single /// `<` and continue. If a `<` is not seen, return false. /// /// This is meant to be used when parsing generics on a path to get the /// starting token. fn eat_lt(&mut self) -> PResult<bool> { self.expected_tokens.push(TokenType::Token(token::Lt)); match self.token { token::Lt => { try!(self.bump()); Ok(true)} token::BinOp(token::Shl) => { let span = self.span; let lo = span.lo + BytePos(1); self.replace_token(token::Lt, lo, span.hi); Ok(true) } _ => Ok(false), } } fn expect_lt(&mut self) -> PResult<()> { if !try!(self.eat_lt()) { self.expect_one_of(&[], &[]) } else { Ok(()) } } /// Expect and consume a GT. if a >> is seen, replace it /// with a single > and continue. If a GT is not seen, /// signal an error. pub fn expect_gt(&mut self) -> PResult<()> { self.expected_tokens.push(TokenType::Token(token::Gt)); match self.token { token::Gt => self.bump(), token::BinOp(token::Shr) => { let span = self.span; let lo = span.lo + BytePos(1); Ok(self.replace_token(token::Gt, lo, span.hi)) } token::BinOpEq(token::Shr) => { let span = self.span; let lo = span.lo + BytePos(1); Ok(self.replace_token(token::Ge, lo, span.hi)) } token::Ge => { let span = self.span; let lo = span.lo + BytePos(1); Ok(self.replace_token(token::Eq, lo, span.hi)) } _ => { let gt_str = Parser::token_to_string(&token::Gt); let this_token_str = self.this_token_to_string(); Err(self.fatal(&format!("expected `{}`, found `{}`", gt_str, this_token_str))) } } } pub fn parse_seq_to_before_gt_or_return<T, F>(&mut self, sep: Option<token::Token>, mut f: F) -> PResult<(OwnedSlice<T>, bool)> where F: FnMut(&mut Parser) -> PResult<Option<T>>, { let mut v = Vec::new(); // This loop works by alternating back and forth between parsing types // and commas. For example, given a string `A, B,>`, the parser would // first parse `A`, then a comma, then `B`, then a comma. After that it // would encounter a `>` and stop. This lets the parser handle trailing // commas in generic parameters, because it can stop either after // parsing a type or after parsing a comma. for i in 0.. { if self.check(&token::Gt) || self.token == token::BinOp(token::Shr) || self.token == token::Ge || self.token == token::BinOpEq(token::Shr) { break; } if i % 2 == 0 { match try!(f(self)) { Some(result) => v.push(result), None => return Ok((OwnedSlice::from_vec(v), true)) } } else { if let Some(t) = sep.as_ref() { try!(self.expect(t)); } } } return Ok((OwnedSlice::from_vec(v), false)); } /// Parse a sequence bracketed by '<' and '>', stopping /// before the '>'. pub fn parse_seq_to_before_gt<T, F>(&mut self, sep: Option<token::Token>, mut f: F) -> PResult<OwnedSlice<T>> where F: FnMut(&mut Parser) -> PResult<T>, { let (result, returned) = try!(self.parse_seq_to_before_gt_or_return(sep, |p| Ok(Some(try!(f(p)))))); assert!(!returned); return Ok(result); } pub fn parse_seq_to_gt<T, F>(&mut self, sep: Option<token::Token>, f: F) -> PResult<OwnedSlice<T>> where F: FnMut(&mut Parser) -> PResult<T>, { let v = try!(self.parse_seq_to_before_gt(sep, f)); try!(self.expect_gt()); return Ok(v); } pub fn parse_seq_to_gt_or_return<T, F>(&mut self, sep: Option<token::Token>, f: F) -> PResult<(OwnedSlice<T>, bool)> where F: FnMut(&mut Parser) -> PResult<Option<T>>, { let (v, returned) = try!(self.parse_seq_to_before_gt_or_return(sep, f)); if !returned { try!(self.expect_gt()); } return Ok((v, returned)); } /// Parse a sequence, including the closing delimiter. The function /// f must consume tokens until reaching the next separator or /// closing bracket. pub fn parse_seq_to_end<T, F>(&mut self, ket: &token::Token, sep: SeqSep, f: F) -> PResult<Vec<T>> where F: FnMut(&mut Parser) -> PResult<T>, { let val = try!(self.parse_seq_to_before_end(ket, sep, f)); try!(self.bump()); Ok(val) } /// Parse a sequence, not including the closing delimiter. The function /// f must consume tokens until reaching the next separator or /// closing bracket. pub fn parse_seq_to_before_end<T, F>(&mut self, ket: &token::Token, sep: SeqSep, mut f: F) -> PResult<Vec<T>> where F: FnMut(&mut Parser) -> PResult<T>, { let mut first: bool = true; let mut v = vec!(); while self.token != *ket { match sep.sep { Some(ref t) => { if first { first = false; } else { try!(self.expect(t)); } } _ => () } if sep.trailing_sep_allowed && self.check(ket) { break; } v.push(try!(f(self))); } return Ok(v); } /// Parse a sequence, including the closing delimiter. The function /// f must consume tokens until reaching the next separator or /// closing bracket. pub fn parse_unspanned_seq<T, F>(&mut self, bra: &token::Token, ket: &token::Token, sep: SeqSep, f: F) -> PResult<Vec<T>> where F: FnMut(&mut Parser) -> PResult<T>, { try!(self.expect(bra)); let result = try!(self.parse_seq_to_before_end(ket, sep, f)); try!(self.bump()); Ok(result) } /// Parse a sequence parameter of enum variant. For consistency purposes, /// these should not be empty. pub fn parse_enum_variant_seq<T, F>(&mut self, bra: &token::Token, ket: &token::Token, sep: SeqSep, f: F) -> PResult<Vec<T>> where F: FnMut(&mut Parser) -> PResult<T>, { let result = try!(self.parse_unspanned_seq(bra, ket, sep, f)); if result.is_empty() { let last_span = self.last_span; self.span_err(last_span, "nullary enum variants are written with no trailing `( )`"); } Ok(result) } // NB: Do not use this function unless you actually plan to place the // spanned list in the AST. pub fn parse_seq<T, F>(&mut self, bra: &token::Token, ket: &token::Token, sep: SeqSep, f: F) -> PResult<Spanned<Vec<T>>> where F: FnMut(&mut Parser) -> PResult<T>, { let lo = self.span.lo; try!(self.expect(bra)); let result = try!(self.parse_seq_to_before_end(ket, sep, f)); let hi = self.span.hi; try!(self.bump()); Ok(spanned(lo, hi, result)) } /// Advance the parser by one token pub fn bump(&mut self) -> PResult<()> { self.last_span = self.span; // Stash token for error recovery (sometimes; clone is not necessarily cheap). self.last_token = if self.token.is_ident() || self.token.is_path() || self.token == token::Comma { Some(Box::new(self.token.clone())) } else { None }; let next = if self.buffer_start == self.buffer_end { self.reader.real_token() } else { // Avoid token copies with `replace`. let buffer_start = self.buffer_start as usize; let next_index = (buffer_start + 1) & 3; self.buffer_start = next_index as isize; let placeholder = TokenAndSpan { tok: token::Underscore, sp: self.span, }; mem::replace(&mut self.buffer[buffer_start], placeholder) }; self.span = next.sp; self.token = next.tok; self.tokens_consumed += 1; self.expected_tokens.clear(); // check after each token self.check_unknown_macro_variable() } /// Advance the parser by one token and return the bumped token. pub fn bump_and_get(&mut self) -> PResult<token::Token> { let old_token = mem::replace(&mut self.token, token::Underscore); try!(self.bump()); Ok(old_token) } /// EFFECT: replace the current token and span with the given one pub fn replace_token(&mut self, next: token::Token, lo: BytePos, hi: BytePos) { self.last_span = mk_sp(self.span.lo, lo); self.token = next; self.span = mk_sp(lo, hi); } pub fn buffer_length(&mut self) -> isize { if self.buffer_start <= self.buffer_end { return self.buffer_end - self.buffer_start; } return (4 - self.buffer_start) + self.buffer_end; } pub fn look_ahead<R, F>(&mut self, distance: usize, f: F) -> R where F: FnOnce(&token::Token) -> R, { let dist = distance as isize; while self.buffer_length() < dist { self.buffer[self.buffer_end as usize] = self.reader.real_token(); self.buffer_end = (self.buffer_end + 1) & 3; } f(&self.buffer[((self.buffer_start + dist - 1) & 3) as usize].tok) } pub fn fatal(&self, m: &str) -> diagnostic::FatalError { self.sess.span_diagnostic.span_fatal(self.span, m) } pub fn span_fatal(&self, sp: Span, m: &str) -> diagnostic::FatalError { self.sess.span_diagnostic.span_fatal(sp, m) } pub fn span_fatal_help(&self, sp: Span, m: &str, help: &str) -> diagnostic::FatalError { self.span_err(sp, m); self.fileline_help(sp, help); diagnostic::FatalError } pub fn span_note(&self, sp: Span, m: &str) { self.sess.span_diagnostic.span_note(sp, m) } pub fn span_help(&self, sp: Span, m: &str) { self.sess.span_diagnostic.span_help(sp, m) } pub fn span_suggestion(&self, sp: Span, m: &str, n: String) { self.sess.span_diagnostic.span_suggestion(sp, m, n) } pub fn fileline_help(&self, sp: Span, m: &str) { self.sess.span_diagnostic.fileline_help(sp, m) } pub fn bug(&self, m: &str) -> ! { self.sess.span_diagnostic.span_bug(self.span, m) } pub fn warn(&self, m: &str) { self.sess.span_diagnostic.span_warn(self.span, m) } pub fn span_warn(&self, sp: Span, m: &str) { self.sess.span_diagnostic.span_warn(sp, m) } pub fn span_err(&self, sp: Span, m: &str) { self.sess.span_diagnostic.span_err(sp, m) } pub fn span_bug(&self, sp: Span, m: &str) -> ! { self.sess.span_diagnostic.span_bug(sp, m) } pub fn abort_if_errors(&self) { self.sess.span_diagnostic.handler().abort_if_errors(); } pub fn id_to_interned_str(&mut self, id: Ident) -> InternedString { id.name.as_str() } /// Is the current token one of the keywords that signals a bare function /// type? pub fn token_is_bare_fn_keyword(&mut self) -> bool { self.check_keyword(keywords::Fn) || self.check_keyword(keywords::Unsafe) || self.check_keyword(keywords::Extern) } pub fn get_lifetime(&mut self) -> ast::Ident { match self.token { token::Lifetime(ref ident) => *ident, _ => self.bug("not a lifetime"), } } pub fn parse_for_in_type(&mut self) -> PResult<Ty_> { /* Parses whatever can come after a `for` keyword in a type. The `for` has already been consumed. Deprecated: - for <'lt> |S| -> T Eventually: - for <'lt> [unsafe] [extern "ABI"] fn (S) -> T - for <'lt> path::foo(a, b) */ // parse <'lt> let lo = self.span.lo; let lifetime_defs = try!(self.parse_late_bound_lifetime_defs()); // examine next token to decide to do if self.token_is_bare_fn_keyword() { self.parse_ty_bare_fn(lifetime_defs) } else { let hi = self.span.hi; let trait_ref = try!(self.parse_trait_ref()); let poly_trait_ref = ast::PolyTraitRef { bound_lifetimes: lifetime_defs, trait_ref: trait_ref, span: mk_sp(lo, hi)}; let other_bounds = if try!(self.eat(&token::BinOp(token::Plus)) ){ try!(self.parse_ty_param_bounds(BoundParsingMode::Bare)) } else { OwnedSlice::empty() }; let all_bounds = Some(TraitTyParamBound(poly_trait_ref, TraitBoundModifier::None)).into_iter() .chain(other_bounds.into_vec()) .collect(); Ok(ast::TyPolyTraitRef(all_bounds)) } } pub fn parse_ty_path(&mut self) -> PResult<Ty_> { Ok(TyPath(None, try!(self.parse_path(LifetimeAndTypesWithoutColons)))) } /// parse a TyBareFn type: pub fn parse_ty_bare_fn(&mut self, lifetime_defs: Vec<ast::LifetimeDef>) -> PResult<Ty_> { /* [unsafe] [extern "ABI"] fn <'lt> (S) -> T ^~~~^ ^~~~^ ^~~~^ ^~^ ^ | | | | | | | | | Return type | | | Argument types | | Lifetimes | ABI Function Style */ let unsafety = try!(self.parse_unsafety()); let abi = if try!(self.eat_keyword(keywords::Extern) ){ try!(self.parse_opt_abi()).unwrap_or(abi::C) } else { abi::Rust }; try!(self.expect_keyword(keywords::Fn)); let (inputs, variadic) = try!(self.parse_fn_args(false, true)); let ret_ty = try!(self.parse_ret_ty()); let decl = P(FnDecl { inputs: inputs, output: ret_ty, variadic: variadic }); Ok(TyBareFn(P(BareFnTy { abi: abi, unsafety: unsafety, lifetimes: lifetime_defs, decl: decl }))) } /// Parses an obsolete closure kind (`&:`, `&mut:`, or `:`). pub fn parse_obsolete_closure_kind(&mut self) -> PResult<()> { let lo = self.span.lo; if self.check(&token::BinOp(token::And)) && self.look_ahead(1, |t| t.is_keyword(keywords::Mut)) && self.look_ahead(2, |t| *t == token::Colon) { try!(self.bump()); try!(self.bump()); try!(self.bump()); } else if self.token == token::BinOp(token::And) && self.look_ahead(1, |t| *t == token::Colon) { try!(self.bump()); try!(self.bump()); } else if try!(self.eat(&token::Colon)) { /* nothing */ } else { return Ok(()); } let span = mk_sp(lo, self.span.hi); self.obsolete(span, ObsoleteSyntax::ClosureKind); Ok(()) } pub fn parse_unsafety(&mut self) -> PResult<Unsafety> { if try!(self.eat_keyword(keywords::Unsafe)) { return Ok(Unsafety::Unsafe); } else { return Ok(Unsafety::Normal); } } /// Parse the items in a trait declaration pub fn parse_trait_items(&mut self) -> PResult<Vec<P<TraitItem>>> { self.parse_unspanned_seq( &token::OpenDelim(token::Brace), &token::CloseDelim(token::Brace), seq_sep_none(), |p| -> PResult<P<TraitItem>> { maybe_whole!(no_clone p, NtTraitItem); let mut attrs = p.parse_outer_attributes(); let lo = p.span.lo; let (name, node) = if try!(p.eat_keyword(keywords::Type)) { let TyParam {ident, bounds, default, ..} = try!(p.parse_ty_param()); try!(p.expect(&token::Semi)); (ident, TypeTraitItem(bounds, default)) } else if p.is_const_item() { try!(p.expect_keyword(keywords::Const)); let ident = try!(p.parse_ident()); try!(p.expect(&token::Colon)); let ty = try!(p.parse_ty_sum()); let default = if p.check(&token::Eq) { try!(p.bump()); let expr = try!(p.parse_expr_nopanic()); try!(p.commit_expr_expecting(&expr, token::Semi)); Some(expr) } else { try!(p.expect(&token::Semi)); None }; (ident, ConstTraitItem(ty, default)) } else { let (constness, unsafety, abi) = try!(p.parse_fn_front_matter()); let ident = try!(p.parse_ident()); let mut generics = try!(p.parse_generics()); let (explicit_self, d) = try!(p.parse_fn_decl_with_self(|p|{ // This is somewhat dubious; We don't want to allow // argument names to be left off if there is a // definition... p.parse_arg_general(false) })); generics.where_clause = try!(p.parse_where_clause()); let sig = ast::MethodSig { unsafety: unsafety, constness: constness, decl: d, generics: generics, abi: abi, explicit_self: explicit_self, }; let body = match p.token { token::Semi => { try!(p.bump()); debug!("parse_trait_methods(): parsing required method"); None } token::OpenDelim(token::Brace) => { debug!("parse_trait_methods(): parsing provided method"); let (inner_attrs, body) = try!(p.parse_inner_attrs_and_block()); attrs.extend(inner_attrs.iter().cloned()); Some(body) } _ => { let token_str = p.this_token_to_string(); return Err(p.fatal(&format!("expected `;` or `{{`, found `{}`", token_str)[..])) } }; (ident, ast::MethodTraitItem(sig, body)) }; Ok(P(TraitItem { id: ast::DUMMY_NODE_ID, ident: name, attrs: attrs, node: node, span: mk_sp(lo, p.last_span.hi), })) }) } /// Parse a possibly mutable type pub fn parse_mt(&mut self) -> PResult<MutTy> { let mutbl = try!(self.parse_mutability()); let t = try!(self.parse_ty_nopanic()); Ok(MutTy { ty: t, mutbl: mutbl }) } /// Parse optional return type [ -> TY ] in function decl pub fn parse_ret_ty(&mut self) -> PResult<FunctionRetTy> { if try!(self.eat(&token::RArrow) ){ if try!(self.eat(&token::Not) ){ Ok(NoReturn(self.last_span)) } else { Ok(Return(try!(self.parse_ty_nopanic()))) } } else { let pos = self.span.lo; Ok(DefaultReturn(mk_sp(pos, pos))) } } /// Parse a type in a context where `T1+T2` is allowed. pub fn parse_ty_sum(&mut self) -> PResult<P<Ty>> { let lo = self.span.lo; let lhs = try!(self.parse_ty_nopanic()); if !try!(self.eat(&token::BinOp(token::Plus)) ){ return Ok(lhs); } let bounds = try!(self.parse_ty_param_bounds(BoundParsingMode::Bare)); // In type grammar, `+` is treated like a binary operator, // and hence both L and R side are required. if bounds.is_empty() { let last_span = self.last_span; self.span_err(last_span, "at least one type parameter bound \ must be specified"); } let sp = mk_sp(lo, self.last_span.hi); let sum = ast::TyObjectSum(lhs, bounds); Ok(P(Ty {id: ast::DUMMY_NODE_ID, node: sum, span: sp})) } /// Parse a type. pub fn parse_ty_nopanic(&mut self) -> PResult<P<Ty>> { maybe_whole!(no_clone self, NtTy); let lo = self.span.lo; let t = if self.check(&token::OpenDelim(token::Paren)) { try!(self.bump()); // (t) is a parenthesized ty // (t,) is the type of a tuple with only one field, // of type t let mut ts = vec![]; let mut last_comma = false; while self.token != token::CloseDelim(token::Paren) { ts.push(try!(self.parse_ty_sum())); if self.check(&token::Comma) { last_comma = true; try!(self.bump()); } else { last_comma = false; break; } } try!(self.expect(&token::CloseDelim(token::Paren))); if ts.len() == 1 && !last_comma { TyParen(ts.into_iter().nth(0).unwrap()) } else { TyTup(ts) } } else if self.check(&token::BinOp(token::Star)) { // STAR POINTER (bare pointer?) try!(self.bump()); TyPtr(try!(self.parse_ptr())) } else if self.check(&token::OpenDelim(token::Bracket)) { // VECTOR try!(self.expect(&token::OpenDelim(token::Bracket))); let t = try!(self.parse_ty_sum()); // Parse the `; e` in `[ i32; e ]` // where `e` is a const expression let t = match try!(self.maybe_parse_fixed_length_of_vec()) { None => TyVec(t), Some(suffix) => TyFixedLengthVec(t, suffix) }; try!(self.expect(&token::CloseDelim(token::Bracket))); t } else if self.check(&token::BinOp(token::And)) || self.token == token::AndAnd { // BORROWED POINTER try!(self.expect_and()); try!(self.parse_borrowed_pointee()) } else if self.check_keyword(keywords::For) { try!(self.parse_for_in_type()) } else if self.token_is_bare_fn_keyword() { // BARE FUNCTION try!(self.parse_ty_bare_fn(Vec::new())) } else if try!(self.eat_keyword_noexpect(keywords::Typeof)) { // TYPEOF // In order to not be ambiguous, the type must be surrounded by parens. try!(self.expect(&token::OpenDelim(token::Paren))); let e = try!(self.parse_expr_nopanic()); try!(self.expect(&token::CloseDelim(token::Paren))); TyTypeof(e) } else if try!(self.eat_lt()) { let (qself, path) = try!(self.parse_qualified_path(NoTypesAllowed)); TyPath(Some(qself), path) } else if self.check(&token::ModSep) || self.token.is_ident() || self.token.is_path() { let path = try!(self.parse_path(LifetimeAndTypesWithoutColons)); if self.check(&token::Not) { // MACRO INVOCATION try!(self.bump()); let delim = try!(self.expect_open_delim()); let tts = try!(self.parse_seq_to_end(&token::CloseDelim(delim), seq_sep_none(), |p| p.parse_token_tree())); let hi = self.span.hi; TyMac(spanned(lo, hi, Mac_ { path: path, tts: tts, ctxt: EMPTY_CTXT })) } else { // NAMED TYPE TyPath(None, path) } } else if try!(self.eat(&token::Underscore) ){ // TYPE TO BE INFERRED TyInfer } else { let this_token_str = self.this_token_to_string(); let msg = format!("expected type, found `{}`", this_token_str); return Err(self.fatal(&msg[..])); }; let sp = mk_sp(lo, self.last_span.hi); Ok(P(Ty {id: ast::DUMMY_NODE_ID, node: t, span: sp})) } pub fn parse_borrowed_pointee(&mut self) -> PResult<Ty_> { // look for `&'lt` or `&'foo ` and interpret `foo` as the region name: let opt_lifetime = try!(self.parse_opt_lifetime()); let mt = try!(self.parse_mt()); return Ok(TyRptr(opt_lifetime, mt)); } pub fn parse_ptr(&mut self) -> PResult<MutTy> { let mutbl = if try!(self.eat_keyword(keywords::Mut) ){ MutMutable } else if try!(self.eat_keyword(keywords::Const) ){ MutImmutable } else { let span = self.last_span; self.span_err(span, "bare raw pointers are no longer allowed, you should \ likely use `*mut T`, but otherwise `*T` is now \ known as `*const T`"); MutImmutable }; let t = try!(self.parse_ty_nopanic()); Ok(MutTy { ty: t, mutbl: mutbl }) } pub fn is_named_argument(&mut self) -> bool { let offset = match self.token { token::BinOp(token::And) => 1, token::AndAnd => 1, _ if self.token.is_keyword(keywords::Mut) => 1, _ => 0 }; debug!("parser is_named_argument offset:{}", offset); if offset == 0 { is_plain_ident_or_underscore(&self.token) && self.look_ahead(1, |t| *t == token::Colon) } else { self.look_ahead(offset, |t| is_plain_ident_or_underscore(t)) && self.look_ahead(offset + 1, |t| *t == token::Colon) } } /// This version of parse arg doesn't necessarily require /// identifier names. pub fn parse_arg_general(&mut self, require_name: bool) -> PResult<Arg> { let pat = if require_name || self.is_named_argument() { debug!("parse_arg_general parse_pat (require_name:{})", require_name); let pat = try!(self.parse_pat_nopanic()); try!(self.expect(&token::Colon)); pat } else { debug!("parse_arg_general ident_to_pat"); ast_util::ident_to_pat(ast::DUMMY_NODE_ID, self.last_span, special_idents::invalid) }; let t = try!(self.parse_ty_sum()); Ok(Arg { ty: t, pat: pat, id: ast::DUMMY_NODE_ID, }) } /// Parse a single function argument pub fn parse_arg(&mut self) -> PResult<Arg> { self.parse_arg_general(true) } /// Parse an argument in a lambda header e.g. |arg, arg| pub fn parse_fn_block_arg(&mut self) -> PResult<Arg> { let pat = try!(self.parse_pat_nopanic()); let t = if try!(self.eat(&token::Colon) ){ try!(self.parse_ty_sum()) } else { P(Ty { id: ast::DUMMY_NODE_ID, node: TyInfer, span: mk_sp(self.span.lo, self.span.hi), }) }; Ok(Arg { ty: t, pat: pat, id: ast::DUMMY_NODE_ID }) } pub fn maybe_parse_fixed_length_of_vec(&mut self) -> PResult<Option<P<ast::Expr>>> { if self.check(&token::Semi) { try!(self.bump()); Ok(Some(try!(self.parse_expr_nopanic()))) } else { Ok(None) } } /// Matches token_lit = LIT_INTEGER | ... pub fn lit_from_token(&self, tok: &token::Token) -> PResult<Lit_> { match *tok { token::Interpolated(token::NtExpr(ref v)) => { match v.node { ExprLit(ref lit) => { Ok(lit.node.clone()) } _ => { return Err(self.unexpected_last(tok)); } } } token::Literal(lit, suf) => { let (suffix_illegal, out) = match lit { token::Byte(i) => (true, LitByte(parse::byte_lit(&i.as_str()).0)), token::Char(i) => (true, LitChar(parse::char_lit(&i.as_str()).0)), // there are some valid suffixes for integer and // float literals, so all the handling is done // internally. token::Integer(s) => { (false, parse::integer_lit(&s.as_str(), suf.as_ref().map(|s| s.as_str()), &self.sess.span_diagnostic, self.last_span)) } token::Float(s) => { (false, parse::float_lit(&s.as_str(), suf.as_ref().map(|s| s.as_str()), &self.sess.span_diagnostic, self.last_span)) } token::Str_(s) => { (true, LitStr(token::intern_and_get_ident(&parse::str_lit(&s.as_str())), ast::CookedStr)) } token::StrRaw(s, n) => { (true, LitStr( token::intern_and_get_ident(&parse::raw_str_lit(&s.as_str())), ast::RawStr(n))) } token::ByteStr(i) => (true, LitByteStr(parse::byte_str_lit(&i.as_str()))), token::ByteStrRaw(i, _) => (true, LitByteStr(Rc::new(i.to_string().into_bytes()))), }; if suffix_illegal { let sp = self.last_span; self.expect_no_suffix(sp, &*format!("{} literal", lit.short_name()), suf) } Ok(out) } _ => { return Err(self.unexpected_last(tok)); } } } /// Matches lit = true | false | token_lit pub fn parse_lit(&mut self) -> PResult<Lit> { let lo = self.span.lo; let lit = if try!(self.eat_keyword(keywords::True) ){ LitBool(true) } else if try!(self.eat_keyword(keywords::False) ){ LitBool(false) } else { let token = try!(self.bump_and_get()); let lit = try!(self.lit_from_token(&token)); lit }; Ok(codemap::Spanned { node: lit, span: mk_sp(lo, self.last_span.hi) }) } /// matches '-' lit | lit pub fn parse_literal_maybe_minus(&mut self) -> PResult<P<Expr>> { let minus_lo = self.span.lo; let minus_present = try!(self.eat(&token::BinOp(token::Minus))); let lo = self.span.lo; let literal = P(try!(self.parse_lit())); let hi = self.last_span.hi; let expr = self.mk_expr(lo, hi, ExprLit(literal)); if minus_present { let minus_hi = self.last_span.hi; let unary = self.mk_unary(UnNeg, expr); Ok(self.mk_expr(minus_lo, minus_hi, unary)) } else { Ok(expr) } } /// Parses qualified path. /// /// Assumes that the leading `<` has been parsed already. /// /// Qualifed paths are a part of the universal function call /// syntax (UFCS). /// /// `qualified_path = <type [as trait_ref]>::path` /// /// See `parse_path` for `mode` meaning. /// /// # Examples: /// /// `<T as U>::a` /// `<T as U>::F::a::<S>` pub fn parse_qualified_path(&mut self, mode: PathParsingMode) -> PResult<(QSelf, ast::Path)> { let span = self.last_span; let self_type = try!(self.parse_ty_sum()); let mut path = if try!(self.eat_keyword(keywords::As)) { try!(self.parse_path(LifetimeAndTypesWithoutColons)) } else { ast::Path { span: span, global: false, segments: vec![] } }; let qself = QSelf { ty: self_type, position: path.segments.len() }; try!(self.expect(&token::Gt)); try!(self.expect(&token::ModSep)); let segments = match mode { LifetimeAndTypesWithoutColons => { try!(self.parse_path_segments_without_colons()) } LifetimeAndTypesWithColons => { try!(self.parse_path_segments_with_colons()) } NoTypesAllowed => { try!(self.parse_path_segments_without_types()) } }; path.segments.extend(segments); path.span.hi = self.last_span.hi; Ok((qself, path)) } /// Parses a path and optional type parameter bounds, depending on the /// mode. The `mode` parameter determines whether lifetimes, types, and/or /// bounds are permitted and whether `::` must precede type parameter /// groups. pub fn parse_path(&mut self, mode: PathParsingMode) -> PResult<ast::Path> { // Check for a whole path... let found = match self.token { token::Interpolated(token::NtPath(_)) => Some(try!(self.bump_and_get())), _ => None, }; if let Some(token::Interpolated(token::NtPath(path))) = found { return Ok(*path); } let lo = self.span.lo; let is_global = try!(self.eat(&token::ModSep)); // Parse any number of segments and bound sets. A segment is an // identifier followed by an optional lifetime and a set of types. // A bound set is a set of type parameter bounds. let segments = match mode { LifetimeAndTypesWithoutColons => { try!(self.parse_path_segments_without_colons()) } LifetimeAndTypesWithColons => { try!(self.parse_path_segments_with_colons()) } NoTypesAllowed => { try!(self.parse_path_segments_without_types()) } }; // Assemble the span. let span = mk_sp(lo, self.last_span.hi); // Assemble the result. Ok(ast::Path { span: span, global: is_global, segments: segments, }) } /// Examples: /// - `a::b<T,U>::c<V,W>` /// - `a::b<T,U>::c(V) -> W` /// - `a::b<T,U>::c(V)` pub fn parse_path_segments_without_colons(&mut self) -> PResult<Vec<ast::PathSegment>> { let mut segments = Vec::new(); loop { // First, parse an identifier. let identifier = try!(self.parse_ident_or_self_type()); // Parse types, optionally. let parameters = if try!(self.eat_lt() ){ let (lifetimes, types, bindings) = try!(self.parse_generic_values_after_lt()); ast::AngleBracketedParameters(ast::AngleBracketedParameterData { lifetimes: lifetimes, types: OwnedSlice::from_vec(types), bindings: OwnedSlice::from_vec(bindings), }) } else if try!(self.eat(&token::OpenDelim(token::Paren)) ){ let lo = self.last_span.lo; let inputs = try!(self.parse_seq_to_end( &token::CloseDelim(token::Paren), seq_sep_trailing_allowed(token::Comma), |p| p.parse_ty_sum())); let output_ty = if try!(self.eat(&token::RArrow) ){ Some(try!(self.parse_ty_nopanic())) } else { None }; let hi = self.last_span.hi; ast::ParenthesizedParameters(ast::ParenthesizedParameterData { span: mk_sp(lo, hi), inputs: inputs, output: output_ty, }) } else { ast::PathParameters::none() }; // Assemble and push the result. segments.push(ast::PathSegment { identifier: identifier, parameters: parameters }); // Continue only if we see a `::` if !try!(self.eat(&token::ModSep) ){ return Ok(segments); } } } /// Examples: /// - `a::b::<T,U>::c` pub fn parse_path_segments_with_colons(&mut self) -> PResult<Vec<ast::PathSegment>> { let mut segments = Vec::new(); loop { // First, parse an identifier. let identifier = try!(self.parse_ident_or_self_type()); // If we do not see a `::`, stop. if !try!(self.eat(&token::ModSep) ){ segments.push(ast::PathSegment { identifier: identifier, parameters: ast::PathParameters::none() }); return Ok(segments); } // Check for a type segment. if try!(self.eat_lt() ){ // Consumed `a::b::<`, go look for types let (lifetimes, types, bindings) = try!(self.parse_generic_values_after_lt()); segments.push(ast::PathSegment { identifier: identifier, parameters: ast::AngleBracketedParameters(ast::AngleBracketedParameterData { lifetimes: lifetimes, types: OwnedSlice::from_vec(types), bindings: OwnedSlice::from_vec(bindings), }), }); // Consumed `a::b::<T,U>`, check for `::` before proceeding if !try!(self.eat(&token::ModSep) ){ return Ok(segments); } } else { // Consumed `a::`, go look for `b` segments.push(ast::PathSegment { identifier: identifier, parameters: ast::PathParameters::none(), }); } } } /// Examples: /// - `a::b::c` pub fn parse_path_segments_without_types(&mut self) -> PResult<Vec<ast::PathSegment>> { let mut segments = Vec::new(); loop { // First, parse an identifier. let identifier = try!(self.parse_ident_or_self_type()); // Assemble and push the result. segments.push(ast::PathSegment { identifier: identifier, parameters: ast::PathParameters::none() }); // If we do not see a `::`, stop. if !try!(self.eat(&token::ModSep) ){ return Ok(segments); } } } /// parses 0 or 1 lifetime pub fn parse_opt_lifetime(&mut self) -> PResult<Option<ast::Lifetime>> { match self.token { token::Lifetime(..) => { Ok(Some(try!(self.parse_lifetime()))) } _ => { Ok(None) } } } /// Parses a single lifetime /// Matches lifetime = LIFETIME pub fn parse_lifetime(&mut self) -> PResult<ast::Lifetime> { match self.token { token::Lifetime(i) => { let span = self.span; try!(self.bump()); return Ok(ast::Lifetime { id: ast::DUMMY_NODE_ID, span: span, name: i.name }); } _ => { return Err(self.fatal(&format!("expected a lifetime name"))); } } } /// Parses `lifetime_defs = [ lifetime_defs { ',' lifetime_defs } ]` where `lifetime_def = /// lifetime [':' lifetimes]` pub fn parse_lifetime_defs(&mut self) -> PResult<Vec<ast::LifetimeDef>> { let mut res = Vec::new(); loop { match self.token { token::Lifetime(_) => { let lifetime = try!(self.parse_lifetime()); let bounds = if try!(self.eat(&token::Colon) ){ try!(self.parse_lifetimes(token::BinOp(token::Plus))) } else { Vec::new() }; res.push(ast::LifetimeDef { lifetime: lifetime, bounds: bounds }); } _ => { return Ok(res); } } match self.token { token::Comma => { try!(self.bump());} token::Gt => { return Ok(res); } token::BinOp(token::Shr) => { return Ok(res); } _ => { let this_token_str = self.this_token_to_string(); let msg = format!("expected `,` or `>` after lifetime \ name, found `{}`", this_token_str); return Err(self.fatal(&msg[..])); } } } } /// matches lifetimes = ( lifetime ) | ( lifetime , lifetimes ) actually, it matches the empty /// one too, but putting that in there messes up the grammar.... /// /// Parses zero or more comma separated lifetimes. Expects each lifetime to be followed by /// either a comma or `>`. Used when parsing type parameter lists, where we expect something /// like `<'a, 'b, T>`. pub fn parse_lifetimes(&mut self, sep: token::Token) -> PResult<Vec<ast::Lifetime>> { let mut res = Vec::new(); loop { match self.token { token::Lifetime(_) => { res.push(try!(self.parse_lifetime())); } _ => { return Ok(res); } } if self.token != sep { return Ok(res); } try!(self.bump()); } } /// Parse mutability declaration (mut/const/imm) pub fn parse_mutability(&mut self) -> PResult<Mutability> { if try!(self.eat_keyword(keywords::Mut) ){ Ok(MutMutable) } else { Ok(MutImmutable) } } /// Parse ident COLON expr pub fn parse_field(&mut self) -> PResult<Field> { let lo = self.span.lo; let i = try!(self.parse_ident()); let hi = self.last_span.hi; try!(self.expect(&token::Colon)); let e = try!(self.parse_expr_nopanic()); Ok(ast::Field { ident: spanned(lo, hi, i), span: mk_sp(lo, e.span.hi), expr: e, }) } pub fn mk_expr(&mut self, lo: BytePos, hi: BytePos, node: Expr_) -> P<Expr> { P(Expr { id: ast::DUMMY_NODE_ID, node: node, span: mk_sp(lo, hi), }) } pub fn mk_unary(&mut self, unop: ast::UnOp, expr: P<Expr>) -> ast::Expr_ { ExprUnary(unop, expr) } pub fn mk_binary(&mut self, binop: ast::BinOp, lhs: P<Expr>, rhs: P<Expr>) -> ast::Expr_ { ExprBinary(binop, lhs, rhs) } pub fn mk_call(&mut self, f: P<Expr>, args: Vec<P<Expr>>) -> ast::Expr_ { ExprCall(f, args) } fn mk_method_call(&mut self, ident: ast::SpannedIdent, tps: Vec<P<Ty>>, args: Vec<P<Expr>>) -> ast::Expr_ { ExprMethodCall(ident, tps, args) } pub fn mk_index(&mut self, expr: P<Expr>, idx: P<Expr>) -> ast::Expr_ { ExprIndex(expr, idx) } pub fn mk_range(&mut self, start: Option<P<Expr>>, end: Option<P<Expr>>) -> ast::Expr_ { ExprRange(start, end) } pub fn mk_field(&mut self, expr: P<Expr>, ident: ast::SpannedIdent) -> ast::Expr_ { ExprField(expr, ident) } pub fn mk_tup_field(&mut self, expr: P<Expr>, idx: codemap::Spanned<usize>) -> ast::Expr_ { ExprTupField(expr, idx) } pub fn mk_assign_op(&mut self, binop: ast::BinOp, lhs: P<Expr>, rhs: P<Expr>) -> ast::Expr_ { ExprAssignOp(binop, lhs, rhs) } pub fn mk_mac_expr(&mut self, lo: BytePos, hi: BytePos, m: Mac_) -> P<Expr> { P(Expr { id: ast::DUMMY_NODE_ID, node: ExprMac(codemap::Spanned {node: m, span: mk_sp(lo, hi)}), span: mk_sp(lo, hi), }) } pub fn mk_lit_u32(&mut self, i: u32) -> P<Expr> { let span = &self.span; let lv_lit = P(codemap::Spanned { node: LitInt(i as u64, ast::UnsignedIntLit(TyU32)), span: *span }); P(Expr { id: ast::DUMMY_NODE_ID, node: ExprLit(lv_lit), span: *span, }) } fn expect_open_delim(&mut self) -> PResult<token::DelimToken> { self.expected_tokens.push(TokenType::Token(token::Gt)); match self.token { token::OpenDelim(delim) => { try!(self.bump()); Ok(delim) }, _ => Err(self.fatal("expected open delimiter")), } } /// At the bottom (top?) of the precedence hierarchy, /// parse things like parenthesized exprs, /// macros, return, etc. pub fn parse_bottom_expr(&mut self) -> PResult<P<Expr>> { maybe_whole_expr!(self); let lo = self.span.lo; let mut hi = self.span.hi; let ex: Expr_; // Note: when adding new syntax here, don't forget to adjust Token::can_begin_expr(). match self.token { token::OpenDelim(token::Paren) => { try!(self.bump()); // (e) is parenthesized e // (e,) is a tuple with only one field, e let mut es = vec![]; let mut trailing_comma = false; while self.token != token::CloseDelim(token::Paren) { es.push(try!(self.parse_expr_nopanic())); try!(self.commit_expr(&**es.last().unwrap(), &[], &[token::Comma, token::CloseDelim(token::Paren)])); if self.check(&token::Comma) { trailing_comma = true; try!(self.bump()); } else { trailing_comma = false; break; } } try!(self.bump()); hi = self.last_span.hi; return if es.len() == 1 && !trailing_comma { Ok(self.mk_expr(lo, hi, ExprParen(es.into_iter().nth(0).unwrap()))) } else { Ok(self.mk_expr(lo, hi, ExprTup(es))) } }, token::OpenDelim(token::Brace) => { return self.parse_block_expr(lo, DefaultBlock); }, token::BinOp(token::Or) | token::OrOr => { let lo = self.span.lo; return self.parse_lambda_expr(lo, CaptureByRef); }, token::Ident(id @ ast::Ident { name: token::SELF_KEYWORD_NAME, ctxt: _ }, token::Plain) => { try!(self.bump()); let path = ast_util::ident_to_path(mk_sp(lo, hi), id); ex = ExprPath(None, path); hi = self.last_span.hi; } token::OpenDelim(token::Bracket) => { try!(self.bump()); if self.check(&token::CloseDelim(token::Bracket)) { // Empty vector. try!(self.bump()); ex = ExprVec(Vec::new()); } else { // Nonempty vector. let first_expr = try!(self.parse_expr_nopanic()); if self.check(&token::Semi) { // Repeating array syntax: [ 0; 512 ] try!(self.bump()); let count = try!(self.parse_expr_nopanic()); try!(self.expect(&token::CloseDelim(token::Bracket))); ex = ExprRepeat(first_expr, count); } else if self.check(&token::Comma) { // Vector with two or more elements. try!(self.bump()); let remaining_exprs = try!(self.parse_seq_to_end( &token::CloseDelim(token::Bracket), seq_sep_trailing_allowed(token::Comma), |p| Ok(try!(p.parse_expr_nopanic())) )); let mut exprs = vec!(first_expr); exprs.extend(remaining_exprs); ex = ExprVec(exprs); } else { // Vector with one element. try!(self.expect(&token::CloseDelim(token::Bracket))); ex = ExprVec(vec!(first_expr)); } } hi = self.last_span.hi; } _ => { if try!(self.eat_lt()){ let (qself, path) = try!(self.parse_qualified_path(LifetimeAndTypesWithColons)); hi = path.span.hi; return Ok(self.mk_expr(lo, hi, ExprPath(Some(qself), path))); } if try!(self.eat_keyword(keywords::Move) ){ let lo = self.last_span.lo; return self.parse_lambda_expr(lo, CaptureByValue); } if try!(self.eat_keyword(keywords::If)) { return self.parse_if_expr(); } if try!(self.eat_keyword(keywords::For) ){ let lo = self.last_span.lo; return self.parse_for_expr(None, lo); } if try!(self.eat_keyword(keywords::While) ){ let lo = self.last_span.lo; return self.parse_while_expr(None, lo); } if self.token.is_lifetime() { let lifetime = self.get_lifetime(); let lo = self.span.lo; try!(self.bump()); try!(self.expect(&token::Colon)); if try!(self.eat_keyword(keywords::While) ){ return self.parse_while_expr(Some(lifetime), lo) } if try!(self.eat_keyword(keywords::For) ){ return self.parse_for_expr(Some(lifetime), lo) } if try!(self.eat_keyword(keywords::Loop) ){ return self.parse_loop_expr(Some(lifetime), lo) } return Err(self.fatal("expected `while`, `for`, or `loop` after a label")) } if try!(self.eat_keyword(keywords::Loop) ){ let lo = self.last_span.lo; return self.parse_loop_expr(None, lo); } if try!(self.eat_keyword(keywords::Continue) ){ let ex = if self.token.is_lifetime() { let ex = ExprAgain(Some(Spanned{ node: self.get_lifetime(), span: self.span })); try!(self.bump()); ex } else { ExprAgain(None) }; let hi = self.last_span.hi; return Ok(self.mk_expr(lo, hi, ex)); } if try!(self.eat_keyword(keywords::Match) ){ return self.parse_match_expr(); } if try!(self.eat_keyword(keywords::Unsafe) ){ return self.parse_block_expr( lo, UnsafeBlock(ast::UserProvided)); } if try!(self.eat_keyword(keywords::Return) ){ if self.token.can_begin_expr() { let e = try!(self.parse_expr_nopanic()); hi = e.span.hi; ex = ExprRet(Some(e)); } else { ex = ExprRet(None); } } else if try!(self.eat_keyword(keywords::Break) ){ if self.token.is_lifetime() { ex = ExprBreak(Some(Spanned { node: self.get_lifetime(), span: self.span })); try!(self.bump()); } else { ex = ExprBreak(None); } hi = self.last_span.hi; } else if self.check(&token::ModSep) || self.token.is_ident() && !self.check_keyword(keywords::True) && !self.check_keyword(keywords::False) { let pth = try!(self.parse_path(LifetimeAndTypesWithColons)); // `!`, as an operator, is prefix, so we know this isn't that if self.check(&token::Not) { // MACRO INVOCATION expression try!(self.bump()); let delim = try!(self.expect_open_delim()); let tts = try!(self.parse_seq_to_end( &token::CloseDelim(delim), seq_sep_none(), |p| p.parse_token_tree())); let hi = self.last_span.hi; return Ok(self.mk_mac_expr(lo, hi, Mac_ { path: pth, tts: tts, ctxt: EMPTY_CTXT })); } if self.check(&token::OpenDelim(token::Brace)) { // This is a struct literal, unless we're prohibited // from parsing struct literals here. let prohibited = self.restrictions.contains( Restrictions::RESTRICTION_NO_STRUCT_LITERAL ); if !prohibited { // It's a struct literal. try!(self.bump()); let mut fields = Vec::new(); let mut base = None; while self.token != token::CloseDelim(token::Brace) { if try!(self.eat(&token::DotDot) ){ base = Some(try!(self.parse_expr_nopanic())); break; } fields.push(try!(self.parse_field())); try!(self.commit_expr(&*fields.last().unwrap().expr, &[token::Comma], &[token::CloseDelim(token::Brace)])); } hi = self.span.hi; try!(self.expect(&token::CloseDelim(token::Brace))); ex = ExprStruct(pth, fields, base); return Ok(self.mk_expr(lo, hi, ex)); } } hi = pth.span.hi; ex = ExprPath(None, pth); } else { // other literal expression let lit = try!(self.parse_lit()); hi = lit.span.hi; ex = ExprLit(P(lit)); } } } return Ok(self.mk_expr(lo, hi, ex)); } /// Parse a block or unsafe block pub fn parse_block_expr(&mut self, lo: BytePos, blk_mode: BlockCheckMode) -> PResult<P<Expr>> { try!(self.expect(&token::OpenDelim(token::Brace))); let blk = try!(self.parse_block_tail(lo, blk_mode)); return Ok(self.mk_expr(blk.span.lo, blk.span.hi, ExprBlock(blk))); } /// parse a.b or a(13) or a[4] or just a pub fn parse_dot_or_call_expr(&mut self) -> PResult<P<Expr>> { let b = try!(self.parse_bottom_expr()); self.parse_dot_or_call_expr_with(b) } pub fn parse_dot_or_call_expr_with(&mut self, e0: P<Expr>) -> PResult<P<Expr>> { let mut e = e0; let lo = e.span.lo; let mut hi; loop { // expr.f if try!(self.eat(&token::Dot) ){ match self.token { token::Ident(i, _) => { let dot = self.last_span.hi; hi = self.span.hi; try!(self.bump()); let (_, tys, bindings) = if try!(self.eat(&token::ModSep) ){ try!(self.expect_lt()); try!(self.parse_generic_values_after_lt()) } else { (Vec::new(), Vec::new(), Vec::new()) }; if !bindings.is_empty() { let last_span = self.last_span; self.span_err(last_span, "type bindings are only permitted on trait paths"); } // expr.f() method call match self.token { token::OpenDelim(token::Paren) => { let mut es = try!(self.parse_unspanned_seq( &token::OpenDelim(token::Paren), &token::CloseDelim(token::Paren), seq_sep_trailing_allowed(token::Comma), |p| Ok(try!(p.parse_expr_nopanic())) )); hi = self.last_span.hi; es.insert(0, e); let id = spanned(dot, hi, i); let nd = self.mk_method_call(id, tys, es); e = self.mk_expr(lo, hi, nd); } _ => { if !tys.is_empty() { let last_span = self.last_span; self.span_err(last_span, "field expressions may not \ have type parameters"); } let id = spanned(dot, hi, i); let field = self.mk_field(e, id); e = self.mk_expr(lo, hi, field); } } } token::Literal(token::Integer(n), suf) => { let sp = self.span; // A tuple index may not have a suffix self.expect_no_suffix(sp, "tuple index", suf); let dot = self.last_span.hi; hi = self.span.hi; try!(self.bump()); let index = n.as_str().parse::<usize>().ok(); match index { Some(n) => { let id = spanned(dot, hi, n); let field = self.mk_tup_field(e, id); e = self.mk_expr(lo, hi, field); } None => { let last_span = self.last_span; self.span_err(last_span, "invalid tuple or tuple struct index"); } } } token::Literal(token::Float(n), _suf) => { try!(self.bump()); let last_span = self.last_span; let fstr = n.as_str(); self.span_err(last_span, &format!("unexpected token: `{}`", n.as_str())); if fstr.chars().all(|x| "0123456789.".contains(x)) { let float = match fstr.parse::<f64>().ok() { Some(f) => f, None => continue, }; self.fileline_help(last_span, &format!("try parenthesizing the first index; e.g., `(foo.{}){}`", float.trunc() as usize, format!(".{}", fstr.splitn(2, ".").last().unwrap()))); } self.abort_if_errors(); } _ => return Err(self.unexpected()) } continue; } if self.expr_is_complete(&*e) { break; } match self.token { // expr(...) token::OpenDelim(token::Paren) => { let es = try!(self.parse_unspanned_seq( &token::OpenDelim(token::Paren), &token::CloseDelim(token::Paren), seq_sep_trailing_allowed(token::Comma), |p| Ok(try!(p.parse_expr_nopanic())) )); hi = self.last_span.hi; let nd = self.mk_call(e, es); e = self.mk_expr(lo, hi, nd); } // expr[...] // Could be either an index expression or a slicing expression. token::OpenDelim(token::Bracket) => { try!(self.bump()); let ix = try!(self.parse_expr_nopanic()); hi = self.span.hi; try!(self.commit_expr_expecting(&*ix, token::CloseDelim(token::Bracket))); let index = self.mk_index(e, ix); e = self.mk_expr(lo, hi, index) } _ => return Ok(e) } } return Ok(e); } // Parse unquoted tokens after a `$` in a token tree fn parse_unquoted(&mut self) -> PResult<TokenTree> { let mut sp = self.span; let (name, namep) = match self.token { token::Dollar => { try!(self.bump()); if self.token == token::OpenDelim(token::Paren) { let Spanned { node: seq, span: seq_span } = try!(self.parse_seq( &token::OpenDelim(token::Paren), &token::CloseDelim(token::Paren), seq_sep_none(), |p| p.parse_token_tree() )); let (sep, repeat) = try!(self.parse_sep_and_kleene_op()); let name_num = macro_parser::count_names(&seq); return Ok(TtSequence(mk_sp(sp.lo, seq_span.hi), Rc::new(SequenceRepetition { tts: seq, separator: sep, op: repeat, num_captures: name_num }))); } else if self.token.is_keyword_allow_following_colon(keywords::Crate) { try!(self.bump()); return Ok(TtToken(sp, SpecialVarNt(SpecialMacroVar::CrateMacroVar))); } else { sp = mk_sp(sp.lo, self.span.hi); let namep = match self.token { token::Ident(_, p) => p, _ => token::Plain }; let name = try!(self.parse_ident()); (name, namep) } } token::SubstNt(name, namep) => { try!(self.bump()); (name, namep) } _ => unreachable!() }; // continue by trying to parse the `:ident` after `$name` if self.token == token::Colon && self.look_ahead(1, |t| t.is_ident() && !t.is_strict_keyword() && !t.is_reserved_keyword()) { try!(self.bump()); sp = mk_sp(sp.lo, self.span.hi); let kindp = match self.token { token::Ident(_, p) => p, _ => token::Plain }; let nt_kind = try!(self.parse_ident()); Ok(TtToken(sp, MatchNt(name, nt_kind, namep, kindp))) } else { Ok(TtToken(sp, SubstNt(name, namep))) } } pub fn check_unknown_macro_variable(&mut self) -> PResult<()> { if self.quote_depth == 0 { match self.token { token::SubstNt(name, _) => return Err(self.fatal(&format!("unknown macro variable `{}`", name))), _ => {} } } Ok(()) } /// Parse an optional separator followed by a Kleene-style /// repetition token (+ or *). pub fn parse_sep_and_kleene_op(&mut self) -> PResult<(Option<token::Token>, ast::KleeneOp)> { fn parse_kleene_op(parser: &mut Parser) -> PResult<Option<ast::KleeneOp>> { match parser.token { token::BinOp(token::Star) => { try!(parser.bump()); Ok(Some(ast::ZeroOrMore)) }, token::BinOp(token::Plus) => { try!(parser.bump()); Ok(Some(ast::OneOrMore)) }, _ => Ok(None) } }; match try!(parse_kleene_op(self)) { Some(kleene_op) => return Ok((None, kleene_op)), None => {} } let separator = try!(self.bump_and_get()); match try!(parse_kleene_op(self)) { Some(zerok) => Ok((Some(separator), zerok)), None => return Err(self.fatal("expected `*` or `+`")) } } /// parse a single token tree from the input. pub fn parse_token_tree(&mut self) -> PResult<TokenTree> { // FIXME #6994: currently, this is too eager. It // parses token trees but also identifies TtSequence's // and token::SubstNt's; it's too early to know yet // whether something will be a nonterminal or a seq // yet. maybe_whole!(deref self, NtTT); // this is the fall-through for the 'match' below. // invariants: the current token is not a left-delimiter, // not an EOF, and not the desired right-delimiter (if // it were, parse_seq_to_before_end would have prevented // reaching this point. fn parse_non_delim_tt_tok(p: &mut Parser) -> PResult<TokenTree> { maybe_whole!(deref p, NtTT); match p.token { token::CloseDelim(_) => { // This is a conservative error: only report the last unclosed delimiter. The // previous unclosed delimiters could actually be closed! The parser just hasn't // gotten to them yet. match p.open_braces.last() { None => {} Some(&sp) => p.span_note(sp, "unclosed delimiter"), }; let token_str = p.this_token_to_string(); Err(p.fatal(&format!("incorrect close delimiter: `{}`", token_str))) }, /* we ought to allow different depths of unquotation */ token::Dollar | token::SubstNt(..) if p.quote_depth > 0 => { p.parse_unquoted() } _ => { Ok(TtToken(p.span, try!(p.bump_and_get()))) } } } match self.token { token::Eof => { let open_braces = self.open_braces.clone(); for sp in &open_braces { self.span_help(*sp, "did you mean to close this delimiter?"); } // There shouldn't really be a span, but it's easier for the test runner // if we give it one return Err(self.fatal("this file contains an un-closed delimiter ")); }, token::OpenDelim(delim) => { // The span for beginning of the delimited section let pre_span = self.span; // Parse the open delimiter. self.open_braces.push(self.span); let open_span = self.span; try!(self.bump()); // Parse the token trees within the delimiters let tts = try!(self.parse_seq_to_before_end( &token::CloseDelim(delim), seq_sep_none(), |p| p.parse_token_tree() )); // Parse the close delimiter. let close_span = self.span; try!(self.bump()); self.open_braces.pop().unwrap(); // Expand to cover the entire delimited token tree let span = Span { hi: close_span.hi, ..pre_span }; Ok(TtDelimited(span, Rc::new(Delimited { delim: delim, open_span: open_span, tts: tts, close_span: close_span, }))) }, _ => parse_non_delim_tt_tok(self), } } // parse a stream of tokens into a list of TokenTree's, // up to EOF. pub fn parse_all_token_trees(&mut self) -> PResult<Vec<TokenTree>> { let mut tts = Vec::new(); while self.token != token::Eof { tts.push(try!(self.parse_token_tree())); } Ok(tts) } /// Parse a prefix-operator expr pub fn parse_prefix_expr(&mut self) -> PResult<P<Expr>> { let lo = self.span.lo; let hi; // Note: when adding new unary operators, don't forget to adjust Token::can_begin_expr() let ex; match self.token { token::Not => { try!(self.bump()); let e = try!(self.parse_prefix_expr()); hi = e.span.hi; ex = self.mk_unary(UnNot, e); } token::BinOp(token::Minus) => { try!(self.bump()); let e = try!(self.parse_prefix_expr()); hi = e.span.hi; ex = self.mk_unary(UnNeg, e); } token::BinOp(token::Star) => { try!(self.bump()); let e = try!(self.parse_prefix_expr()); hi = e.span.hi; ex = self.mk_unary(UnDeref, e); } token::BinOp(token::And) | token::AndAnd => { try!(self.expect_and()); let m = try!(self.parse_mutability()); let e = try!(self.parse_prefix_expr()); hi = e.span.hi; ex = ExprAddrOf(m, e); } token::Ident(..) if self.token.is_keyword(keywords::In) => { try!(self.bump()); let place = try!(self.parse_expr_res(Restrictions::RESTRICTION_NO_STRUCT_LITERAL)); let blk = try!(self.parse_block()); hi = blk.span.hi; let blk_expr = self.mk_expr(blk.span.lo, blk.span.hi, ExprBlock(blk)); ex = ExprInPlace(place, blk_expr); } token::Ident(..) if self.token.is_keyword(keywords::Box) => { try!(self.bump()); let subexpression = try!(self.parse_prefix_expr()); hi = subexpression.span.hi; ex = ExprBox(subexpression); } _ => return self.parse_dot_or_call_expr() } return Ok(self.mk_expr(lo, hi, ex)); } /// Parse an expression of binops pub fn parse_binops(&mut self) -> PResult<P<Expr>> { let prefix_expr = try!(self.parse_prefix_expr()); self.parse_more_binops(prefix_expr, 0) } /// Parse an expression of binops of at least min_prec precedence pub fn parse_more_binops(&mut self, lhs: P<Expr>, min_prec: usize) -> PResult<P<Expr>> { if self.expr_is_complete(&*lhs) { return Ok(lhs); } self.expected_tokens.push(TokenType::Operator); let cur_op_span = self.span; let cur_opt = self.token.to_binop(); match cur_opt { Some(cur_op) => { if ast_util::is_comparison_binop(cur_op) { self.check_no_chained_comparison(&*lhs, cur_op) } let cur_prec = operator_prec(cur_op); if cur_prec >= min_prec { try!(self.bump()); let expr = try!(self.parse_prefix_expr()); let rhs = try!(self.parse_more_binops(expr, cur_prec + 1)); let lhs_span = lhs.span; let rhs_span = rhs.span; let binary = self.mk_binary(codemap::respan(cur_op_span, cur_op), lhs, rhs); let bin = self.mk_expr(lhs_span.lo, rhs_span.hi, binary); self.parse_more_binops(bin, min_prec) } else { Ok(lhs) } } None => { if AS_PREC >= min_prec && try!(self.eat_keyword_noexpect(keywords::As) ){ let rhs = try!(self.parse_ty_nopanic()); let _as = self.mk_expr(lhs.span.lo, rhs.span.hi, ExprCast(lhs, rhs)); self.parse_more_binops(_as, min_prec) } else { Ok(lhs) } } } } /// Produce an error if comparison operators are chained (RFC #558). /// We only need to check lhs, not rhs, because all comparison ops /// have same precedence and are left-associative fn check_no_chained_comparison(&mut self, lhs: &Expr, outer_op: ast::BinOp_) { debug_assert!(ast_util::is_comparison_binop(outer_op)); match lhs.node { ExprBinary(op, _, _) if ast_util::is_comparison_binop(op.node) => { // respan to include both operators let op_span = mk_sp(op.span.lo, self.span.hi); self.span_err(op_span, "chained comparison operators require parentheses"); if op.node == BiLt && outer_op == BiGt { self.fileline_help(op_span, "use `::<...>` instead of `<...>` if you meant to specify type arguments"); } } _ => {} } } /// Parse an assignment expression.... /// actually, this seems to be the main entry point for /// parsing an arbitrary expression. pub fn parse_assign_expr(&mut self) -> PResult<P<Expr>> { match self.token { token::DotDot => { // prefix-form of range notation '..expr' // This has the same precedence as assignment expressions // (much lower than other prefix expressions) to be consistent // with the postfix-form 'expr..' let lo = self.span.lo; let mut hi = self.span.hi; try!(self.bump()); let opt_end = if self.is_at_start_of_range_notation_rhs() { let end = try!(self.parse_binops()); hi = end.span.hi; Some(end) } else { None }; let ex = self.mk_range(None, opt_end); Ok(self.mk_expr(lo, hi, ex)) } _ => { let lhs = try!(self.parse_binops()); self.parse_assign_expr_with(lhs) } } } pub fn parse_assign_expr_with(&mut self, lhs: P<Expr>) -> PResult<P<Expr>> { let restrictions = self.restrictions & Restrictions::RESTRICTION_NO_STRUCT_LITERAL; let op_span = self.span; match self.token { token::Eq => { try!(self.bump()); let rhs = try!(self.parse_expr_res(restrictions)); Ok(self.mk_expr(lhs.span.lo, rhs.span.hi, ExprAssign(lhs, rhs))) } token::BinOpEq(op) => { try!(self.bump()); let rhs = try!(self.parse_expr_res(restrictions)); let aop = match op { token::Plus => BiAdd, token::Minus => BiSub, token::Star => BiMul, token::Slash => BiDiv, token::Percent => BiRem, token::Caret => BiBitXor, token::And => BiBitAnd, token::Or => BiBitOr, token::Shl => BiShl, token::Shr => BiShr }; let rhs_span = rhs.span; let span = lhs.span; let assign_op = self.mk_assign_op(codemap::respan(op_span, aop), lhs, rhs); Ok(self.mk_expr(span.lo, rhs_span.hi, assign_op)) } // A range expression, either `expr..expr` or `expr..`. token::DotDot => { let lo = lhs.span.lo; let mut hi = self.span.hi; try!(self.bump()); let opt_end = if self.is_at_start_of_range_notation_rhs() { let end = try!(self.parse_binops()); hi = end.span.hi; Some(end) } else { None }; let range = self.mk_range(Some(lhs), opt_end); return Ok(self.mk_expr(lo, hi, range)); } _ => { Ok(lhs) } } } fn is_at_start_of_range_notation_rhs(&self) -> bool { if self.token.can_begin_expr() { // parse `for i in 1.. { }` as infinite loop, not as `for i in (1..{})`. if self.token == token::OpenDelim(token::Brace) { return !self.restrictions.contains(Restrictions::RESTRICTION_NO_STRUCT_LITERAL); } true } else { false } } /// Parse an 'if' or 'if let' expression ('if' token already eaten) pub fn parse_if_expr(&mut self) -> PResult<P<Expr>> { if self.check_keyword(keywords::Let) { return self.parse_if_let_expr(); } let lo = self.last_span.lo; let cond = try!(self.parse_expr_res(Restrictions::RESTRICTION_NO_STRUCT_LITERAL)); let thn = try!(self.parse_block()); let mut els: Option<P<Expr>> = None; let mut hi = thn.span.hi; if try!(self.eat_keyword(keywords::Else) ){ let elexpr = try!(self.parse_else_expr()); hi = elexpr.span.hi; els = Some(elexpr); } Ok(self.mk_expr(lo, hi, ExprIf(cond, thn, els))) } /// Parse an 'if let' expression ('if' token already eaten) pub fn parse_if_let_expr(&mut self) -> PResult<P<Expr>> { let lo = self.last_span.lo; try!(self.expect_keyword(keywords::Let)); let pat = try!(self.parse_pat_nopanic()); try!(self.expect(&token::Eq)); let expr = try!(self.parse_expr_res(Restrictions::RESTRICTION_NO_STRUCT_LITERAL)); let thn = try!(self.parse_block()); let (hi, els) = if try!(self.eat_keyword(keywords::Else) ){ let expr = try!(self.parse_else_expr()); (expr.span.hi, Some(expr)) } else { (thn.span.hi, None) }; Ok(self.mk_expr(lo, hi, ExprIfLet(pat, expr, thn, els))) } // `|args| expr` pub fn parse_lambda_expr(&mut self, lo: BytePos, capture_clause: CaptureClause) -> PResult<P<Expr>> { let decl = try!(self.parse_fn_block_decl()); let body = match decl.output { DefaultReturn(_) => { // If no explicit return type is given, parse any // expr and wrap it up in a dummy block: let body_expr = try!(self.parse_expr_nopanic()); P(ast::Block { id: ast::DUMMY_NODE_ID, stmts: vec![], span: body_expr.span, expr: Some(body_expr), rules: DefaultBlock, }) } _ => { // If an explicit return type is given, require a // block to appear (RFC 968). try!(self.parse_block()) } }; Ok(self.mk_expr( lo, body.span.hi, ExprClosure(capture_clause, decl, body))) } pub fn parse_else_expr(&mut self) -> PResult<P<Expr>> { if try!(self.eat_keyword(keywords::If) ){ return self.parse_if_expr(); } else { let blk = try!(self.parse_block()); return Ok(self.mk_expr(blk.span.lo, blk.span.hi, ExprBlock(blk))); } } /// Parse a 'for' .. 'in' expression ('for' token already eaten) pub fn parse_for_expr(&mut self, opt_ident: Option<ast::Ident>, span_lo: BytePos) -> PResult<P<Expr>> { // Parse: `for <src_pat> in <src_expr> <src_loop_block>` let pat = try!(self.parse_pat_nopanic()); try!(self.expect_keyword(keywords::In)); let expr = try!(self.parse_expr_res(Restrictions::RESTRICTION_NO_STRUCT_LITERAL)); let loop_block = try!(self.parse_block()); let hi = self.last_span.hi; Ok(self.mk_expr(span_lo, hi, ExprForLoop(pat, expr, loop_block, opt_ident))) } /// Parse a 'while' or 'while let' expression ('while' token already eaten) pub fn parse_while_expr(&mut self, opt_ident: Option<ast::Ident>, span_lo: BytePos) -> PResult<P<Expr>> { if self.token.is_keyword(keywords::Let) { return self.parse_while_let_expr(opt_ident, span_lo); } let cond = try!(self.parse_expr_res(Restrictions::RESTRICTION_NO_STRUCT_LITERAL)); let body = try!(self.parse_block()); let hi = body.span.hi; return Ok(self.mk_expr(span_lo, hi, ExprWhile(cond, body, opt_ident))); } /// Parse a 'while let' expression ('while' token already eaten) pub fn parse_while_let_expr(&mut self, opt_ident: Option<ast::Ident>, span_lo: BytePos) -> PResult<P<Expr>> { try!(self.expect_keyword(keywords::Let)); let pat = try!(self.parse_pat_nopanic()); try!(self.expect(&token::Eq)); let expr = try!(self.parse_expr_res(Restrictions::RESTRICTION_NO_STRUCT_LITERAL)); let body = try!(self.parse_block()); let hi = body.span.hi; return Ok(self.mk_expr(span_lo, hi, ExprWhileLet(pat, expr, body, opt_ident))); } pub fn parse_loop_expr(&mut self, opt_ident: Option<ast::Ident>, span_lo: BytePos) -> PResult<P<Expr>> { let body = try!(self.parse_block()); let hi = body.span.hi; Ok(self.mk_expr(span_lo, hi, ExprLoop(body, opt_ident))) } fn parse_match_expr(&mut self) -> PResult<P<Expr>> { let lo = self.last_span.lo; let discriminant = try!(self.parse_expr_res(Restrictions::RESTRICTION_NO_STRUCT_LITERAL)); try!(self.commit_expr_expecting(&*discriminant, token::OpenDelim(token::Brace))); let mut arms: Vec<Arm> = Vec::new(); while self.token != token::CloseDelim(token::Brace) { arms.push(try!(self.parse_arm_nopanic())); } let hi = self.span.hi; try!(self.bump()); return Ok(self.mk_expr(lo, hi, ExprMatch(discriminant, arms))); } pub fn parse_arm_nopanic(&mut self) -> PResult<Arm> { maybe_whole!(no_clone self, NtArm); let attrs = self.parse_outer_attributes(); let pats = try!(self.parse_pats()); let mut guard = None; if try!(self.eat_keyword(keywords::If) ){ guard = Some(try!(self.parse_expr_nopanic())); } try!(self.expect(&token::FatArrow)); let expr = try!(self.parse_expr_res(Restrictions::RESTRICTION_STMT_EXPR)); let require_comma = !classify::expr_is_simple_block(&*expr) && self.token != token::CloseDelim(token::Brace); if require_comma { try!(self.commit_expr(&*expr, &[token::Comma], &[token::CloseDelim(token::Brace)])); } else { try!(self.eat(&token::Comma)); } Ok(ast::Arm { attrs: attrs, pats: pats, guard: guard, body: expr, }) } /// Parse an expression pub fn parse_expr_nopanic(&mut self) -> PResult<P<Expr>> { self.parse_expr_res(Restrictions::empty()) } /// Parse an expression, subject to the given restrictions pub fn parse_expr_res(&mut self, r: Restrictions) -> PResult<P<Expr>> { let old = self.restrictions; self.restrictions = r; let e = try!(self.parse_assign_expr()); self.restrictions = old; return Ok(e); } /// Parse the RHS of a local variable declaration (e.g. '= 14;') fn parse_initializer(&mut self) -> PResult<Option<P<Expr>>> { if self.check(&token::Eq) { try!(self.bump()); Ok(Some(try!(self.parse_expr_nopanic()))) } else { Ok(None) } } /// Parse patterns, separated by '|' s fn parse_pats(&mut self) -> PResult<Vec<P<Pat>>> { let mut pats = Vec::new(); loop { pats.push(try!(self.parse_pat_nopanic())); if self.check(&token::BinOp(token::Or)) { try!(self.bump());} else { return Ok(pats); } }; } fn parse_pat_tuple_elements(&mut self) -> PResult<Vec<P<Pat>>> { let mut fields = vec![]; if !self.check(&token::CloseDelim(token::Paren)) { fields.push(try!(self.parse_pat_nopanic())); if self.look_ahead(1, |t| *t != token::CloseDelim(token::Paren)) { while try!(self.eat(&token::Comma)) && !self.check(&token::CloseDelim(token::Paren)) { fields.push(try!(self.parse_pat_nopanic())); } } if fields.len() == 1 { try!(self.expect(&token::Comma)); } } Ok(fields) } fn parse_pat_vec_elements( &mut self, ) -> PResult<(Vec<P<Pat>>, Option<P<Pat>>, Vec<P<Pat>>)> { let mut before = Vec::new(); let mut slice = None; let mut after = Vec::new(); let mut first = true; let mut before_slice = true; while self.token != token::CloseDelim(token::Bracket) { if first { first = false; } else { try!(self.expect(&token::Comma)); if self.token == token::CloseDelim(token::Bracket) && (before_slice || !after.is_empty()) { break } } if before_slice { if self.check(&token::DotDot) { try!(self.bump()); if self.check(&token::Comma) || self.check(&token::CloseDelim(token::Bracket)) { slice = Some(P(ast::Pat { id: ast::DUMMY_NODE_ID, node: PatWild(PatWildMulti), span: self.span, })); before_slice = false; } continue } } let subpat = try!(self.parse_pat_nopanic()); if before_slice && self.check(&token::DotDot) { try!(self.bump()); slice = Some(subpat); before_slice = false; } else if before_slice { before.push(subpat); } else { after.push(subpat); } } Ok((before, slice, after)) } /// Parse the fields of a struct-like pattern fn parse_pat_fields(&mut self) -> PResult<(Vec<codemap::Spanned<ast::FieldPat>> , bool)> { let mut fields = Vec::new(); let mut etc = false; let mut first = true; while self.token != token::CloseDelim(token::Brace) { if first { first = false; } else { try!(self.expect(&token::Comma)); // accept trailing commas if self.check(&token::CloseDelim(token::Brace)) { break } } let lo = self.span.lo; let hi; if self.check(&token::DotDot) { try!(self.bump()); if self.token != token::CloseDelim(token::Brace) { let token_str = self.this_token_to_string(); return Err(self.fatal(&format!("expected `{}`, found `{}`", "}", token_str))) } etc = true; break; } // Check if a colon exists one ahead. This means we're parsing a fieldname. let (subpat, fieldname, is_shorthand) = if self.look_ahead(1, |t| t == &token::Colon) { // Parsing a pattern of the form "fieldname: pat" let fieldname = try!(self.parse_ident()); try!(self.bump()); let pat = try!(self.parse_pat_nopanic()); hi = pat.span.hi; (pat, fieldname, false) } else { // Parsing a pattern of the form "(box) (ref) (mut) fieldname" let is_box = try!(self.eat_keyword(keywords::Box)); let boxed_span_lo = self.span.lo; let is_ref = try!(self.eat_keyword(keywords::Ref)); let is_mut = try!(self.eat_keyword(keywords::Mut)); let fieldname = try!(self.parse_ident()); hi = self.last_span.hi; let bind_type = match (is_ref, is_mut) { (true, true) => BindByRef(MutMutable), (true, false) => BindByRef(MutImmutable), (false, true) => BindByValue(MutMutable), (false, false) => BindByValue(MutImmutable), }; let fieldpath = codemap::Spanned{span:self.last_span, node:fieldname}; let fieldpat = P(ast::Pat{ id: ast::DUMMY_NODE_ID, node: PatIdent(bind_type, fieldpath, None), span: mk_sp(boxed_span_lo, hi), }); let subpat = if is_box { P(ast::Pat{ id: ast::DUMMY_NODE_ID, node: PatBox(fieldpat), span: mk_sp(lo, hi), }) } else { fieldpat }; (subpat, fieldname, true) }; fields.push(codemap::Spanned { span: mk_sp(lo, hi), node: ast::FieldPat { ident: fieldname, pat: subpat, is_shorthand: is_shorthand }}); } return Ok((fields, etc)); } fn parse_pat_range_end(&mut self) -> PResult<P<Expr>> { if self.is_path_start() { let lo = self.span.lo; let (qself, path) = if try!(self.eat_lt()) { // Parse a qualified path let (qself, path) = try!(self.parse_qualified_path(NoTypesAllowed)); (Some(qself), path) } else { // Parse an unqualified path (None, try!(self.parse_path(LifetimeAndTypesWithColons))) }; let hi = self.last_span.hi; Ok(self.mk_expr(lo, hi, ExprPath(qself, path))) } else { self.parse_literal_maybe_minus() } } fn is_path_start(&self) -> bool { (self.token == token::Lt || self.token == token::ModSep || self.token.is_ident() || self.token.is_path()) && !self.token.is_keyword(keywords::True) && !self.token.is_keyword(keywords::False) } /// Parse a pattern. pub fn parse_pat_nopanic(&mut self) -> PResult<P<Pat>> { maybe_whole!(self, NtPat); let lo = self.span.lo; let pat; match self.token { token::Underscore => { // Parse _ try!(self.bump()); pat = PatWild(PatWildSingle); } token::BinOp(token::And) | token::AndAnd => { // Parse &pat / &mut pat try!(self.expect_and()); let mutbl = try!(self.parse_mutability()); let subpat = try!(self.parse_pat_nopanic()); pat = PatRegion(subpat, mutbl); } token::OpenDelim(token::Paren) => { // Parse (pat,pat,pat,...) as tuple pattern try!(self.bump()); let fields = try!(self.parse_pat_tuple_elements()); try!(self.expect(&token::CloseDelim(token::Paren))); pat = PatTup(fields); } token::OpenDelim(token::Bracket) => { // Parse [pat,pat,...] as slice pattern try!(self.bump()); let (before, slice, after) = try!(self.parse_pat_vec_elements()); try!(self.expect(&token::CloseDelim(token::Bracket))); pat = PatVec(before, slice, after); } _ => { // At this point, token != _, &, &&, (, [ if try!(self.eat_keyword(keywords::Mut)) { // Parse mut ident @ pat pat = try!(self.parse_pat_ident(BindByValue(MutMutable))); } else if try!(self.eat_keyword(keywords::Ref)) { // Parse ref ident @ pat / ref mut ident @ pat let mutbl = try!(self.parse_mutability()); pat = try!(self.parse_pat_ident(BindByRef(mutbl))); } else if try!(self.eat_keyword(keywords::Box)) { // Parse box pat let subpat = try!(self.parse_pat_nopanic()); pat = PatBox(subpat); } else if self.is_path_start() { // Parse pattern starting with a path if self.token.is_plain_ident() && self.look_ahead(1, |t| *t != token::DotDotDot && *t != token::OpenDelim(token::Brace) && *t != token::OpenDelim(token::Paren) && // Contrary to its definition, a plain ident can be followed by :: in macros *t != token::ModSep) { // Plain idents have some extra abilities here compared to general paths if self.look_ahead(1, |t| *t == token::Not) { // Parse macro invocation let ident = try!(self.parse_ident()); let ident_span = self.last_span; let path = ident_to_path(ident_span, ident); try!(self.bump()); let delim = try!(self.expect_open_delim()); let tts = try!(self.parse_seq_to_end(&token::CloseDelim(delim), seq_sep_none(), |p| p.parse_token_tree())); let mac = Mac_ { path: path, tts: tts, ctxt: EMPTY_CTXT }; pat = PatMac(codemap::Spanned {node: mac, span: self.span}); } else { // Parse ident @ pat // This can give false positives and parse nullary enums, // they are dealt with later in resolve pat = try!(self.parse_pat_ident(BindByValue(MutImmutable))); } } else { let (qself, path) = if try!(self.eat_lt()) { // Parse a qualified path let (qself, path) = try!(self.parse_qualified_path(NoTypesAllowed)); (Some(qself), path) } else { // Parse an unqualified path (None, try!(self.parse_path(LifetimeAndTypesWithColons))) }; match self.token { token::DotDotDot => { // Parse range let hi = self.last_span.hi; let begin = self.mk_expr(lo, hi, ExprPath(qself, path)); try!(self.bump()); let end = try!(self.parse_pat_range_end()); pat = PatRange(begin, end); } token::OpenDelim(token::Brace) => { if qself.is_some() { let span = self.span; self.span_err(span, "unexpected `{` after qualified path"); self.abort_if_errors(); } // Parse struct pattern try!(self.bump()); let (fields, etc) = try!(self.parse_pat_fields()); try!(self.bump()); pat = PatStruct(path, fields, etc); } token::OpenDelim(token::Paren) => { if qself.is_some() { let span = self.span; self.span_err(span, "unexpected `(` after qualified path"); self.abort_if_errors(); } // Parse tuple struct or enum pattern if self.look_ahead(1, |t| *t == token::DotDot) { // This is a "top constructor only" pat try!(self.bump()); try!(self.bump()); try!(self.expect(&token::CloseDelim(token::Paren))); pat = PatEnum(path, None); } else { let args = try!(self.parse_enum_variant_seq( &token::OpenDelim(token::Paren), &token::CloseDelim(token::Paren), seq_sep_trailing_allowed(token::Comma), |p| p.parse_pat_nopanic())); pat = PatEnum(path, Some(args)); } } _ if qself.is_some() => { // Parse qualified path pat = PatQPath(qself.unwrap(), path); } _ => { // Parse nullary enum pat = PatEnum(path, Some(vec![])); } } } } else { // Try to parse everything else as literal with optional minus let begin = try!(self.parse_literal_maybe_minus()); if try!(self.eat(&token::DotDotDot)) { let end = try!(self.parse_pat_range_end()); pat = PatRange(begin, end); } else { pat = PatLit(begin); } } } } let hi = self.last_span.hi; Ok(P(ast::Pat { id: ast::DUMMY_NODE_ID, node: pat, span: mk_sp(lo, hi), })) } /// Parse ident or ident @ pat /// used by the copy foo and ref foo patterns to give a good /// error message when parsing mistakes like ref foo(a,b) fn parse_pat_ident(&mut self, binding_mode: ast::BindingMode) -> PResult<ast::Pat_> { if !self.token.is_plain_ident() { let span = self.span; let tok_str = self.this_token_to_string(); return Err(self.span_fatal(span, &format!("expected identifier, found `{}`", tok_str))) } let ident = try!(self.parse_ident()); let last_span = self.last_span; let name = codemap::Spanned{span: last_span, node: ident}; let sub = if try!(self.eat(&token::At) ){ Some(try!(self.parse_pat_nopanic())) } else { None }; // just to be friendly, if they write something like // ref Some(i) // we end up here with ( as the current token. This shortly // leads to a parse error. Note that if there is no explicit // binding mode then we do not end up here, because the lookahead // will direct us over to parse_enum_variant() if self.token == token::OpenDelim(token::Paren) { let last_span = self.last_span; return Err(self.span_fatal( last_span, "expected identifier, found enum pattern")) } Ok(PatIdent(binding_mode, name, sub)) } /// Parse a local variable declaration fn parse_local(&mut self) -> PResult<P<Local>> { let lo = self.span.lo; let pat = try!(self.parse_pat_nopanic()); let mut ty = None; if try!(self.eat(&token::Colon) ){ ty = Some(try!(self.parse_ty_sum())); } let init = try!(self.parse_initializer()); Ok(P(ast::Local { ty: ty, pat: pat, init: init, id: ast::DUMMY_NODE_ID, span: mk_sp(lo, self.last_span.hi), })) } /// Parse a "let" stmt fn parse_let(&mut self) -> PResult<P<Decl>> { let lo = self.span.lo; let local = try!(self.parse_local()); Ok(P(spanned(lo, self.last_span.hi, DeclLocal(local)))) } /// Parse a structure field fn parse_name_and_ty(&mut self, pr: Visibility, attrs: Vec<Attribute> ) -> PResult<StructField> { let lo = match pr { Inherited => self.span.lo, Public => self.last_span.lo, }; if !self.token.is_plain_ident() { return Err(self.fatal("expected ident")); } let name = try!(self.parse_ident()); try!(self.expect(&token::Colon)); let ty = try!(self.parse_ty_sum()); Ok(spanned(lo, self.last_span.hi, ast::StructField_ { kind: NamedField(name, pr), id: ast::DUMMY_NODE_ID, ty: ty, attrs: attrs, })) } /// Emit an expected item after attributes error. fn expected_item_err(&self, attrs: &[Attribute]) { let message = match attrs.last() { Some(&Attribute { node: ast::Attribute_ { is_sugared_doc: true, .. }, .. }) => { "expected item after doc comment" } _ => "expected item after attributes", }; self.span_err(self.last_span, message); } /// Parse a statement. may include decl. pub fn parse_stmt_nopanic(&mut self) -> PResult<Option<P<Stmt>>> { Ok(try!(self.parse_stmt_()).map(P)) } fn parse_stmt_(&mut self) -> PResult<Option<Stmt>> { maybe_whole!(Some deref self, NtStmt); fn check_expected_item(p: &mut Parser, attrs: &[Attribute]) { // If we have attributes then we should have an item if !attrs.is_empty() { p.expected_item_err(attrs); } } let attrs = self.parse_outer_attributes(); let lo = self.span.lo; Ok(Some(if self.check_keyword(keywords::Let) { check_expected_item(self, &attrs); try!(self.expect_keyword(keywords::Let)); let decl = try!(self.parse_let()); spanned(lo, decl.span.hi, StmtDecl(decl, ast::DUMMY_NODE_ID)) } else if self.token.is_ident() && !self.token.is_any_keyword() && self.look_ahead(1, |t| *t == token::Not) { // it's a macro invocation: check_expected_item(self, &attrs); // Potential trouble: if we allow macros with paths instead of // idents, we'd need to look ahead past the whole path here... let pth = try!(self.parse_path(NoTypesAllowed)); try!(self.bump()); let id = match self.token { token::OpenDelim(_) => token::special_idents::invalid, // no special identifier _ => try!(self.parse_ident()), }; // check that we're pointing at delimiters (need to check // again after the `if`, because of `parse_ident` // consuming more tokens). let delim = match self.token { token::OpenDelim(delim) => delim, _ => { // we only expect an ident if we didn't parse one // above. let ident_str = if id.name == token::special_idents::invalid.name { "identifier, " } else { "" }; let tok_str = self.this_token_to_string(); return Err(self.fatal(&format!("expected {}`(` or `{{`, found `{}`", ident_str, tok_str))) }, }; let tts = try!(self.parse_unspanned_seq( &token::OpenDelim(delim), &token::CloseDelim(delim), seq_sep_none(), |p| p.parse_token_tree() )); let hi = self.last_span.hi; let style = if delim == token::Brace { MacStmtWithBraces } else { MacStmtWithoutBraces }; if id.name == token::special_idents::invalid.name { spanned(lo, hi, StmtMac(P(spanned(lo, hi, Mac_ { path: pth, tts: tts, ctxt: EMPTY_CTXT })), style)) } else { // if it has a special ident, it's definitely an item // // Require a semicolon or braces. if style != MacStmtWithBraces { if !try!(self.eat(&token::Semi) ){ let last_span = self.last_span; self.span_err(last_span, "macros that expand to items must \ either be surrounded with braces or \ followed by a semicolon"); } } spanned(lo, hi, StmtDecl( P(spanned(lo, hi, DeclItem( self.mk_item( lo, hi, id /*id is good here*/, ItemMac(spanned(lo, hi, Mac_ { path: pth, tts: tts, ctxt: EMPTY_CTXT })), Inherited, Vec::new(/*no attrs*/))))), ast::DUMMY_NODE_ID)) } } else { match try!(self.parse_item_(attrs, false)) { Some(i) => { let hi = i.span.hi; let decl = P(spanned(lo, hi, DeclItem(i))); spanned(lo, hi, StmtDecl(decl, ast::DUMMY_NODE_ID)) } None => { // Do not attempt to parse an expression if we're done here. if self.token == token::Semi { try!(self.bump()); return Ok(None); } if self.token == token::CloseDelim(token::Brace) { return Ok(None); } // Remainder are line-expr stmts. let e = try!(self.parse_expr_res(Restrictions::RESTRICTION_STMT_EXPR)); spanned(lo, e.span.hi, StmtExpr(e, ast::DUMMY_NODE_ID)) } } })) } /// Is this expression a successfully-parsed statement? fn expr_is_complete(&mut self, e: &Expr) -> bool { self.restrictions.contains(Restrictions::RESTRICTION_STMT_EXPR) && !classify::expr_requires_semi_to_be_stmt(e) } /// Parse a block. No inner attrs are allowed. pub fn parse_block(&mut self) -> PResult<P<Block>> { maybe_whole!(no_clone self, NtBlock); let lo = self.span.lo; if !try!(self.eat(&token::OpenDelim(token::Brace)) ){ let sp = self.span; let tok = self.this_token_to_string(); return Err(self.span_fatal_help(sp, &format!("expected `{{`, found `{}`", tok), "place this code inside a block")); } self.parse_block_tail(lo, DefaultBlock) } /// Parse a block. Inner attrs are allowed. fn parse_inner_attrs_and_block(&mut self) -> PResult<(Vec<Attribute>, P<Block>)> { maybe_whole!(pair_empty self, NtBlock); let lo = self.span.lo; try!(self.expect(&token::OpenDelim(token::Brace))); Ok((self.parse_inner_attributes(), try!(self.parse_block_tail(lo, DefaultBlock)))) } /// Parse the rest of a block expression or function body /// Precondition: already parsed the '{'. fn parse_block_tail(&mut self, lo: BytePos, s: BlockCheckMode) -> PResult<P<Block>> { let mut stmts = vec![]; let mut expr = None; while !try!(self.eat(&token::CloseDelim(token::Brace))) { let Spanned {node, span} = if let Some(s) = try!(self.parse_stmt_()) { s } else { // Found only `;` or `}`. continue; }; match node { StmtExpr(e, _) => { try!(self.handle_expression_like_statement(e, span, &mut stmts, &mut expr)); } StmtMac(mac, MacStmtWithoutBraces) => { // statement macro without braces; might be an // expr depending on whether a semicolon follows match self.token { token::Semi => { stmts.push(P(Spanned { node: StmtMac(mac, MacStmtWithSemicolon), span: mk_sp(span.lo, self.span.hi), })); try!(self.bump()); } _ => { let e = self.mk_mac_expr(span.lo, span.hi, mac.and_then(|m| m.node)); let e = try!(self.parse_dot_or_call_expr_with(e)); let e = try!(self.parse_more_binops(e, 0)); let e = try!(self.parse_assign_expr_with(e)); try!(self.handle_expression_like_statement( e, span, &mut stmts, &mut expr)); } } } StmtMac(m, style) => { // statement macro; might be an expr match self.token { token::Semi => { stmts.push(P(Spanned { node: StmtMac(m, MacStmtWithSemicolon), span: mk_sp(span.lo, self.span.hi), })); try!(self.bump()); } token::CloseDelim(token::Brace) => { // if a block ends in `m!(arg)` without // a `;`, it must be an expr expr = Some(self.mk_mac_expr(span.lo, span.hi, m.and_then(|x| x.node))); } _ => { stmts.push(P(Spanned { node: StmtMac(m, style), span: span })); } } } _ => { // all other kinds of statements: let mut hi = span.hi; if classify::stmt_ends_with_semi(&node) { try!(self.commit_stmt_expecting(token::Semi)); hi = self.last_span.hi; } stmts.push(P(Spanned { node: node, span: mk_sp(span.lo, hi) })); } } } Ok(P(ast::Block { stmts: stmts, expr: expr, id: ast::DUMMY_NODE_ID, rules: s, span: mk_sp(lo, self.last_span.hi), })) } fn handle_expression_like_statement( &mut self, e: P<Expr>, span: Span, stmts: &mut Vec<P<Stmt>>, last_block_expr: &mut Option<P<Expr>>) -> PResult<()> { // expression without semicolon if classify::expr_requires_semi_to_be_stmt(&*e) { // Just check for errors and recover; do not eat semicolon yet. try!(self.commit_stmt(&[], &[token::Semi, token::CloseDelim(token::Brace)])); } match self.token { token::Semi => { try!(self.bump()); let span_with_semi = Span { lo: span.lo, hi: self.last_span.hi, expn_id: span.expn_id, }; stmts.push(P(Spanned { node: StmtSemi(e, ast::DUMMY_NODE_ID), span: span_with_semi, })); } token::CloseDelim(token::Brace) => *last_block_expr = Some(e), _ => { stmts.push(P(Spanned { node: StmtExpr(e, ast::DUMMY_NODE_ID), span: span })); } } Ok(()) } // Parses a sequence of bounds if a `:` is found, // otherwise returns empty list. fn parse_colon_then_ty_param_bounds(&mut self, mode: BoundParsingMode) -> PResult<OwnedSlice<TyParamBound>> { if !try!(self.eat(&token::Colon) ){ Ok(OwnedSlice::empty()) } else { self.parse_ty_param_bounds(mode) } } // matches bounds = ( boundseq )? // where boundseq = ( polybound + boundseq ) | polybound // and polybound = ( 'for' '<' 'region '>' )? bound // and bound = 'region | trait_ref fn parse_ty_param_bounds(&mut self, mode: BoundParsingMode) -> PResult<OwnedSlice<TyParamBound>> { let mut result = vec!(); loop { let question_span = self.span; let ate_question = try!(self.eat(&token::Question)); match self.token { token::Lifetime(lifetime) => { if ate_question { self.span_err(question_span, "`?` may only modify trait bounds, not lifetime bounds"); } result.push(RegionTyParamBound(ast::Lifetime { id: ast::DUMMY_NODE_ID, span: self.span, name: lifetime.name })); try!(self.bump()); } token::ModSep | token::Ident(..) => { let poly_trait_ref = try!(self.parse_poly_trait_ref()); let modifier = if ate_question { if mode == BoundParsingMode::Modified { TraitBoundModifier::Maybe } else { self.span_err(question_span, "unexpected `?`"); TraitBoundModifier::None } } else { TraitBoundModifier::None }; result.push(TraitTyParamBound(poly_trait_ref, modifier)) } _ => break, } if !try!(self.eat(&token::BinOp(token::Plus)) ){ break; } } return Ok(OwnedSlice::from_vec(result)); } /// Matches typaram = IDENT (`?` unbound)? optbounds ( EQ ty )? fn parse_ty_param(&mut self) -> PResult<TyParam> { let span = self.span; let ident = try!(self.parse_ident()); let bounds = try!(self.parse_colon_then_ty_param_bounds(BoundParsingMode::Modified)); let default = if self.check(&token::Eq) { try!(self.bump()); Some(try!(self.parse_ty_sum())) } else { None }; Ok(TyParam { ident: ident, id: ast::DUMMY_NODE_ID, bounds: bounds, default: default, span: span, }) } /// Parse a set of optional generic type parameter declarations. Where /// clauses are not parsed here, and must be added later via /// `parse_where_clause()`. /// /// matches generics = ( ) | ( < > ) | ( < typaramseq ( , )? > ) | ( < lifetimes ( , )? > ) /// | ( < lifetimes , typaramseq ( , )? > ) /// where typaramseq = ( typaram ) | ( typaram , typaramseq ) pub fn parse_generics(&mut self) -> PResult<ast::Generics> { maybe_whole!(self, NtGenerics); if try!(self.eat(&token::Lt) ){ let lifetime_defs = try!(self.parse_lifetime_defs()); let mut seen_default = false; let ty_params = try!(self.parse_seq_to_gt(Some(token::Comma), |p| { try!(p.forbid_lifetime()); let ty_param = try!(p.parse_ty_param()); if ty_param.default.is_some() { seen_default = true; } else if seen_default { let last_span = p.last_span; p.span_err(last_span, "type parameters with a default must be trailing"); } Ok(ty_param) })); Ok(ast::Generics { lifetimes: lifetime_defs, ty_params: ty_params, where_clause: WhereClause { id: ast::DUMMY_NODE_ID, predicates: Vec::new(), } }) } else { Ok(ast_util::empty_generics()) } } fn parse_generic_values_after_lt(&mut self) -> PResult<(Vec<ast::Lifetime>, Vec<P<Ty>>, Vec<P<TypeBinding>>)> { let span_lo = self.span.lo; let lifetimes = try!(self.parse_lifetimes(token::Comma)); let missing_comma = !lifetimes.is_empty() && !self.token.is_like_gt() && self.last_token .as_ref().map_or(true, |x| &**x != &token::Comma); if missing_comma { let msg = format!("expected `,` or `>` after lifetime \ name, found `{}`", self.this_token_to_string()); self.span_err(self.span, &msg); let span_hi = self.span.hi; let span_hi = if self.parse_ty_nopanic().is_ok() { self.span.hi } else { span_hi }; let msg = format!("did you mean a single argument type &'a Type, \ or did you mean the comma-separated arguments \ 'a, Type?"); self.span_note(mk_sp(span_lo, span_hi), &msg); self.abort_if_errors() } // First parse types. let (types, returned) = try!(self.parse_seq_to_gt_or_return( Some(token::Comma), |p| { try!(p.forbid_lifetime()); if p.look_ahead(1, |t| t == &token::Eq) { Ok(None) } else { Ok(Some(try!(p.parse_ty_sum()))) } } )); // If we found the `>`, don't continue. if !returned { return Ok((lifetimes, types.into_vec(), Vec::new())); } // Then parse type bindings. let bindings = try!(self.parse_seq_to_gt( Some(token::Comma), |p| { try!(p.forbid_lifetime()); let lo = p.span.lo; let ident = try!(p.parse_ident()); let found_eq = try!(p.eat(&token::Eq)); if !found_eq { let span = p.span; p.span_warn(span, "whoops, no =?"); } let ty = try!(p.parse_ty_nopanic()); let hi = ty.span.hi; let span = mk_sp(lo, hi); return Ok(P(TypeBinding{id: ast::DUMMY_NODE_ID, ident: ident, ty: ty, span: span, })); } )); Ok((lifetimes, types.into_vec(), bindings.into_vec())) } fn forbid_lifetime(&mut self) -> PResult<()> { if self.token.is_lifetime() { let span = self.span; return Err(self.span_fatal(span, "lifetime parameters must be declared \ prior to type parameters")) } Ok(()) } /// Parses an optional `where` clause and places it in `generics`. /// /// ``` /// where T : Trait<U, V> + 'b, 'a : 'b /// ``` pub fn parse_where_clause(&mut self) -> PResult<ast::WhereClause> { maybe_whole!(self, NtWhereClause); let mut where_clause = WhereClause { id: ast::DUMMY_NODE_ID, predicates: Vec::new(), }; if !try!(self.eat_keyword(keywords::Where)) { return Ok(where_clause); } let mut parsed_something = false; loop { let lo = self.span.lo; match self.token { token::OpenDelim(token::Brace) => { break } token::Lifetime(..) => { let bounded_lifetime = try!(self.parse_lifetime()); try!(self.eat(&token::Colon)); let bounds = try!(self.parse_lifetimes(token::BinOp(token::Plus))); let hi = self.last_span.hi; let span = mk_sp(lo, hi); where_clause.predicates.push(ast::WherePredicate::RegionPredicate( ast::WhereRegionPredicate { span: span, lifetime: bounded_lifetime, bounds: bounds } )); parsed_something = true; } _ => { let bound_lifetimes = if try!(self.eat_keyword(keywords::For) ){ // Higher ranked constraint. try!(self.expect(&token::Lt)); let lifetime_defs = try!(self.parse_lifetime_defs()); try!(self.expect_gt()); lifetime_defs } else { vec![] }; let bounded_ty = try!(self.parse_ty_nopanic()); if try!(self.eat(&token::Colon) ){ let bounds = try!(self.parse_ty_param_bounds(BoundParsingMode::Bare)); let hi = self.last_span.hi; let span = mk_sp(lo, hi); if bounds.is_empty() { self.span_err(span, "each predicate in a `where` clause must have \ at least one bound in it"); } where_clause.predicates.push(ast::WherePredicate::BoundPredicate( ast::WhereBoundPredicate { span: span, bound_lifetimes: bound_lifetimes, bounded_ty: bounded_ty, bounds: bounds, })); parsed_something = true; } else if try!(self.eat(&token::Eq) ){ // let ty = try!(self.parse_ty_nopanic()); let hi = self.last_span.hi; let span = mk_sp(lo, hi); // where_clause.predicates.push( // ast::WherePredicate::EqPredicate(ast::WhereEqPredicate { // id: ast::DUMMY_NODE_ID, // span: span, // path: panic!("NYI"), //bounded_ty, // ty: ty, // })); // parsed_something = true; // // FIXME(#18433) self.span_err(span, "equality constraints are not yet supported \ in where clauses (#20041)"); } else { let last_span = self.last_span; self.span_err(last_span, "unexpected token in `where` clause"); } } }; if !try!(self.eat(&token::Comma) ){ break } } if !parsed_something { let last_span = self.last_span; self.span_err(last_span, "a `where` clause must have at least one predicate \ in it"); } Ok(where_clause) } fn parse_fn_args(&mut self, named_args: bool, allow_variadic: bool) -> PResult<(Vec<Arg> , bool)> { let sp = self.span; let mut args: Vec<Option<Arg>> = try!(self.parse_unspanned_seq( &token::OpenDelim(token::Paren), &token::CloseDelim(token::Paren), seq_sep_trailing_allowed(token::Comma), |p| { if p.token == token::DotDotDot { try!(p.bump()); if allow_variadic { if p.token != token::CloseDelim(token::Paren) { let span = p.span; return Err(p.span_fatal(span, "`...` must be last in argument list for variadic function")) } } else { let span = p.span; return Err(p.span_fatal(span, "only foreign functions are allowed to be variadic")) } Ok(None) } else { Ok(Some(try!(p.parse_arg_general(named_args)))) } } )); let variadic = match args.pop() { Some(None) => true, Some(x) => { // Need to put back that last arg args.push(x); false } None => false }; if variadic && args.is_empty() { self.span_err(sp, "variadic function must be declared with at least one named argument"); } let args = args.into_iter().map(|x| x.unwrap()).collect(); Ok((args, variadic)) } /// Parse the argument list and result type of a function declaration pub fn parse_fn_decl(&mut self, allow_variadic: bool) -> PResult<P<FnDecl>> { let (args, variadic) = try!(self.parse_fn_args(true, allow_variadic)); let ret_ty = try!(self.parse_ret_ty()); Ok(P(FnDecl { inputs: args, output: ret_ty, variadic: variadic })) } fn is_self_ident(&mut self) -> bool { match self.token { token::Ident(id, token::Plain) => id.name == special_idents::self_.name, _ => false } } fn expect_self_ident(&mut self) -> PResult<ast::Ident> { match self.token { token::Ident(id, token::Plain) if id.name == special_idents::self_.name => { try!(self.bump()); Ok(id) }, _ => { let token_str = self.this_token_to_string(); return Err(self.fatal(&format!("expected `self`, found `{}`", token_str))) } } } fn is_self_type_ident(&mut self) -> bool { match self.token { token::Ident(id, token::Plain) => id.name == special_idents::type_self.name, _ => false } } fn expect_self_type_ident(&mut self) -> PResult<ast::Ident> { match self.token { token::Ident(id, token::Plain) if id.name == special_idents::type_self.name => { try!(self.bump()); Ok(id) }, _ => { let token_str = self.this_token_to_string(); Err(self.fatal(&format!("expected `Self`, found `{}`", token_str))) } } } /// Parse the argument list and result type of a function /// that may have a self type. fn parse_fn_decl_with_self<F>(&mut self, parse_arg_fn: F) -> PResult<(ExplicitSelf, P<FnDecl>)> where F: FnMut(&mut Parser) -> PResult<Arg>, { fn maybe_parse_borrowed_explicit_self(this: &mut Parser) -> PResult<ast::ExplicitSelf_> { // The following things are possible to see here: // // fn(&mut self) // fn(&mut self) // fn(&'lt self) // fn(&'lt mut self) // // We already know that the current token is `&`. if this.look_ahead(1, |t| t.is_keyword(keywords::SelfValue)) { try!(this.bump()); Ok(SelfRegion(None, MutImmutable, try!(this.expect_self_ident()))) } else if this.look_ahead(1, |t| t.is_mutability()) && this.look_ahead(2, |t| t.is_keyword(keywords::SelfValue)) { try!(this.bump()); let mutability = try!(this.parse_mutability()); Ok(SelfRegion(None, mutability, try!(this.expect_self_ident()))) } else if this.look_ahead(1, |t| t.is_lifetime()) && this.look_ahead(2, |t| t.is_keyword(keywords::SelfValue)) { try!(this.bump()); let lifetime = try!(this.parse_lifetime()); Ok(SelfRegion(Some(lifetime), MutImmutable, try!(this.expect_self_ident()))) } else if this.look_ahead(1, |t| t.is_lifetime()) && this.look_ahead(2, |t| t.is_mutability()) && this.look_ahead(3, |t| t.is_keyword(keywords::SelfValue)) { try!(this.bump()); let lifetime = try!(this.parse_lifetime()); let mutability = try!(this.parse_mutability()); Ok(SelfRegion(Some(lifetime), mutability, try!(this.expect_self_ident()))) } else { Ok(SelfStatic) } } try!(self.expect(&token::OpenDelim(token::Paren))); // A bit of complexity and lookahead is needed here in order to be // backwards compatible. let lo = self.span.lo; let mut self_ident_lo = self.span.lo; let mut self_ident_hi = self.span.hi; let mut mutbl_self = MutImmutable; let explicit_self = match self.token { token::BinOp(token::And) => { let eself = try!(maybe_parse_borrowed_explicit_self(self)); self_ident_lo = self.last_span.lo; self_ident_hi = self.last_span.hi; eself } token::BinOp(token::Star) => { // Possibly "*self" or "*mut self" -- not supported. Try to avoid // emitting cryptic "unexpected token" errors. try!(self.bump()); let _mutability = if self.token.is_mutability() { try!(self.parse_mutability()) } else { MutImmutable }; if self.is_self_ident() { let span = self.span; self.span_err(span, "cannot pass self by raw pointer"); try!(self.bump()); } // error case, making bogus self ident: SelfValue(special_idents::self_) } token::Ident(..) => { if self.is_self_ident() { let self_ident = try!(self.expect_self_ident()); // Determine whether this is the fully explicit form, `self: // TYPE`. if try!(self.eat(&token::Colon) ){ SelfExplicit(try!(self.parse_ty_sum()), self_ident) } else { SelfValue(self_ident) } } else if self.token.is_mutability() && self.look_ahead(1, |t| t.is_keyword(keywords::SelfValue)) { mutbl_self = try!(self.parse_mutability()); let self_ident = try!(self.expect_self_ident()); // Determine whether this is the fully explicit form, // `self: TYPE`. if try!(self.eat(&token::Colon) ){ SelfExplicit(try!(self.parse_ty_sum()), self_ident) } else { SelfValue(self_ident) } } else { SelfStatic } } _ => SelfStatic, }; let explicit_self_sp = mk_sp(self_ident_lo, self_ident_hi); // shared fall-through for the three cases below. borrowing prevents simply // writing this as a closure macro_rules! parse_remaining_arguments { ($self_id:ident) => { // If we parsed a self type, expect a comma before the argument list. match self.token { token::Comma => { try!(self.bump()); let sep = seq_sep_trailing_allowed(token::Comma); let mut fn_inputs = try!(self.parse_seq_to_before_end( &token::CloseDelim(token::Paren), sep, parse_arg_fn )); fn_inputs.insert(0, Arg::new_self(explicit_self_sp, mutbl_self, $self_id)); fn_inputs } token::CloseDelim(token::Paren) => { vec!(Arg::new_self(explicit_self_sp, mutbl_self, $self_id)) } _ => { let token_str = self.this_token_to_string(); return Err(self.fatal(&format!("expected `,` or `)`, found `{}`", token_str))) } } } } let fn_inputs = match explicit_self { SelfStatic => { let sep = seq_sep_trailing_allowed(token::Comma); try!(self.parse_seq_to_before_end(&token::CloseDelim(token::Paren), sep, parse_arg_fn)) } SelfValue(id) => parse_remaining_arguments!(id), SelfRegion(_,_,id) => parse_remaining_arguments!(id), SelfExplicit(_,id) => parse_remaining_arguments!(id), }; try!(self.expect(&token::CloseDelim(token::Paren))); let hi = self.span.hi; let ret_ty = try!(self.parse_ret_ty()); let fn_decl = P(FnDecl { inputs: fn_inputs, output: ret_ty, variadic: false }); Ok((spanned(lo, hi, explicit_self), fn_decl)) } // parse the |arg, arg| header on a lambda fn parse_fn_block_decl(&mut self) -> PResult<P<FnDecl>> { let inputs_captures = { if try!(self.eat(&token::OrOr) ){ Vec::new() } else { try!(self.expect(&token::BinOp(token::Or))); try!(self.parse_obsolete_closure_kind()); let args = try!(self.parse_seq_to_before_end( &token::BinOp(token::Or), seq_sep_trailing_allowed(token::Comma), |p| p.parse_fn_block_arg() )); try!(self.bump()); args } }; let output = try!(self.parse_ret_ty()); Ok(P(FnDecl { inputs: inputs_captures, output: output, variadic: false })) } /// Parse the name and optional generic types of a function header. fn parse_fn_header(&mut self) -> PResult<(Ident, ast::Generics)> { let id = try!(self.parse_ident()); let generics = try!(self.parse_generics()); Ok((id, generics)) } fn mk_item(&mut self, lo: BytePos, hi: BytePos, ident: Ident, node: Item_, vis: Visibility, attrs: Vec<Attribute>) -> P<Item> { P(Item { ident: ident, attrs: attrs, id: ast::DUMMY_NODE_ID, node: node, vis: vis, span: mk_sp(lo, hi) }) } /// Parse an item-position function declaration. fn parse_item_fn(&mut self, unsafety: Unsafety, constness: Constness, abi: abi::Abi) -> PResult<ItemInfo> { let (ident, mut generics) = try!(self.parse_fn_header()); let decl = try!(self.parse_fn_decl(false)); generics.where_clause = try!(self.parse_where_clause()); let (inner_attrs, body) = try!(self.parse_inner_attrs_and_block()); Ok((ident, ItemFn(decl, unsafety, constness, abi, generics, body), Some(inner_attrs))) } /// true if we are looking at `const ID`, false for things like `const fn` etc pub fn is_const_item(&mut self) -> bool { self.token.is_keyword(keywords::Const) && !self.look_ahead(1, |t| t.is_keyword(keywords::Fn)) } /// parses all the "front matter" for a `fn` declaration, up to /// and including the `fn` keyword: /// /// - `const fn` /// - `unsafe fn` /// - `extern fn` /// - etc pub fn parse_fn_front_matter(&mut self) -> PResult<(ast::Constness, ast::Unsafety, abi::Abi)> { let unsafety = try!(self.parse_unsafety()); let is_const_fn = try!(self.eat_keyword(keywords::Const)); let (constness, unsafety, abi) = if is_const_fn { (Constness::Const, unsafety, abi::Rust) } else { let abi = if try!(self.eat_keyword(keywords::Extern)) { try!(self.parse_opt_abi()).unwrap_or(abi::C) } else { abi::Rust }; (Constness::NotConst, unsafety, abi) }; try!(self.expect_keyword(keywords::Fn)); Ok((constness, unsafety, abi)) } /// Parse an impl item. pub fn parse_impl_item(&mut self) -> PResult<P<ImplItem>> { maybe_whole!(no_clone self, NtImplItem); let mut attrs = self.parse_outer_attributes(); let lo = self.span.lo; let vis = try!(self.parse_visibility()); let (name, node) = if try!(self.eat_keyword(keywords::Type)) { let name = try!(self.parse_ident()); try!(self.expect(&token::Eq)); let typ = try!(self.parse_ty_sum()); try!(self.expect(&token::Semi)); (name, TypeImplItem(typ)) } else if self.is_const_item() { try!(self.expect_keyword(keywords::Const)); let name = try!(self.parse_ident()); try!(self.expect(&token::Colon)); let typ = try!(self.parse_ty_sum()); try!(self.expect(&token::Eq)); let expr = try!(self.parse_expr_nopanic()); try!(self.commit_expr_expecting(&expr, token::Semi)); (name, ConstImplItem(typ, expr)) } else { let (name, inner_attrs, node) = try!(self.parse_impl_method(vis)); attrs.extend(inner_attrs); (name, node) }; Ok(P(ImplItem { id: ast::DUMMY_NODE_ID, span: mk_sp(lo, self.last_span.hi), ident: name, vis: vis, attrs: attrs, node: node })) } fn complain_if_pub_macro(&mut self, visa: Visibility, span: Span) { match visa { Public => { self.span_err(span, "can't qualify macro invocation with `pub`"); self.fileline_help(span, "try adjusting the macro to put `pub` inside \ the invocation"); } Inherited => (), } } /// Parse a method or a macro invocation in a trait impl. fn parse_impl_method(&mut self, vis: Visibility) -> PResult<(Ident, Vec<ast::Attribute>, ast::ImplItem_)> { // code copied from parse_macro_use_or_failure... abstraction! if !self.token.is_any_keyword() && self.look_ahead(1, |t| *t == token::Not) && (self.look_ahead(2, |t| *t == token::OpenDelim(token::Paren)) || self.look_ahead(2, |t| *t == token::OpenDelim(token::Brace))) { // method macro. let last_span = self.last_span; self.complain_if_pub_macro(vis, last_span); let pth = try!(self.parse_path(NoTypesAllowed)); try!(self.expect(&token::Not)); // eat a matched-delimiter token tree: let delim = try!(self.expect_open_delim()); let tts = try!(self.parse_seq_to_end(&token::CloseDelim(delim), seq_sep_none(), |p| p.parse_token_tree())); let m_ = Mac_ { path: pth, tts: tts, ctxt: EMPTY_CTXT }; let m: ast::Mac = codemap::Spanned { node: m_, span: mk_sp(self.span.lo, self.span.hi) }; if delim != token::Brace { try!(self.expect(&token::Semi)) } Ok((token::special_idents::invalid, vec![], ast::MacImplItem(m))) } else { let (constness, unsafety, abi) = try!(self.parse_fn_front_matter()); let ident = try!(self.parse_ident()); let mut generics = try!(self.parse_generics()); let (explicit_self, decl) = try!(self.parse_fn_decl_with_self(|p| { p.parse_arg() })); generics.where_clause = try!(self.parse_where_clause()); let (inner_attrs, body) = try!(self.parse_inner_attrs_and_block()); Ok((ident, inner_attrs, MethodImplItem(ast::MethodSig { generics: generics, abi: abi, explicit_self: explicit_self, unsafety: unsafety, constness: constness, decl: decl }, body))) } } /// Parse trait Foo { ... } fn parse_item_trait(&mut self, unsafety: Unsafety) -> PResult<ItemInfo> { let ident = try!(self.parse_ident()); let mut tps = try!(self.parse_generics()); // Parse supertrait bounds. let bounds = try!(self.parse_colon_then_ty_param_bounds(BoundParsingMode::Bare)); tps.where_clause = try!(self.parse_where_clause()); let meths = try!(self.parse_trait_items()); Ok((ident, ItemTrait(unsafety, tps, bounds, meths), None)) } /// Parses items implementations variants /// impl<T> Foo { ... } /// impl<T> ToString for &'static T { ... } /// impl Send for .. {} fn parse_item_impl(&mut self, unsafety: ast::Unsafety) -> PResult<ItemInfo> { let impl_span = self.span; // First, parse type parameters if necessary. let mut generics = try!(self.parse_generics()); // Special case: if the next identifier that follows is '(', don't // allow this to be parsed as a trait. let could_be_trait = self.token != token::OpenDelim(token::Paren); let neg_span = self.span; let polarity = if try!(self.eat(&token::Not) ){ ast::ImplPolarity::Negative } else { ast::ImplPolarity::Positive }; // Parse the trait. let mut ty = try!(self.parse_ty_sum()); // Parse traits, if necessary. let opt_trait = if could_be_trait && try!(self.eat_keyword(keywords::For) ){ // New-style trait. Reinterpret the type as a trait. match ty.node { TyPath(None, ref path) => { Some(TraitRef { path: (*path).clone(), ref_id: ty.id, }) } _ => { self.span_err(ty.span, "not a trait"); None } } } else { match polarity { ast::ImplPolarity::Negative => { // This is a negated type implementation // `impl !MyType {}`, which is not allowed. self.span_err(neg_span, "inherent implementation can't be negated"); }, _ => {} } None }; if opt_trait.is_some() && try!(self.eat(&token::DotDot) ){ if generics.is_parameterized() { self.span_err(impl_span, "default trait implementations are not \ allowed to have generics"); } try!(self.expect(&token::OpenDelim(token::Brace))); try!(self.expect(&token::CloseDelim(token::Brace))); Ok((ast_util::impl_pretty_name(&opt_trait, None), ItemDefaultImpl(unsafety, opt_trait.unwrap()), None)) } else { if opt_trait.is_some() { ty = try!(self.parse_ty_sum()); } generics.where_clause = try!(self.parse_where_clause()); try!(self.expect(&token::OpenDelim(token::Brace))); let attrs = self.parse_inner_attributes(); let mut impl_items = vec![]; while !try!(self.eat(&token::CloseDelim(token::Brace))) { impl_items.push(try!(self.parse_impl_item())); } Ok((ast_util::impl_pretty_name(&opt_trait, Some(&*ty)), ItemImpl(unsafety, polarity, generics, opt_trait, ty, impl_items), Some(attrs))) } } /// Parse a::B<String,i32> fn parse_trait_ref(&mut self) -> PResult<TraitRef> { Ok(ast::TraitRef { path: try!(self.parse_path(LifetimeAndTypesWithoutColons)), ref_id: ast::DUMMY_NODE_ID, }) } fn parse_late_bound_lifetime_defs(&mut self) -> PResult<Vec<ast::LifetimeDef>> { if try!(self.eat_keyword(keywords::For) ){ try!(self.expect(&token::Lt)); let lifetime_defs = try!(self.parse_lifetime_defs()); try!(self.expect_gt()); Ok(lifetime_defs) } else { Ok(Vec::new()) } } /// Parse for<'l> a::B<String,i32> fn parse_poly_trait_ref(&mut self) -> PResult<PolyTraitRef> { let lo = self.span.lo; let lifetime_defs = try!(self.parse_late_bound_lifetime_defs()); Ok(ast::PolyTraitRef { bound_lifetimes: lifetime_defs, trait_ref: try!(self.parse_trait_ref()), span: mk_sp(lo, self.last_span.hi), }) } /// Parse struct Foo { ... } fn parse_item_struct(&mut self) -> PResult<ItemInfo> { let class_name = try!(self.parse_ident()); let mut generics = try!(self.parse_generics()); // There is a special case worth noting here, as reported in issue #17904. // If we are parsing a tuple struct it is the case that the where clause // should follow the field list. Like so: // // struct Foo<T>(T) where T: Copy; // // If we are parsing a normal record-style struct it is the case // that the where clause comes before the body, and after the generics. // So if we look ahead and see a brace or a where-clause we begin // parsing a record style struct. // // Otherwise if we look ahead and see a paren we parse a tuple-style // struct. let vdata = if self.token.is_keyword(keywords::Where) { generics.where_clause = try!(self.parse_where_clause()); if try!(self.eat(&token::Semi)) { // If we see a: `struct Foo<T> where T: Copy;` style decl. VariantData::Unit(ast::DUMMY_NODE_ID) } else { // If we see: `struct Foo<T> where T: Copy { ... }` VariantData::Struct(try!(self.parse_record_struct_body()), ast::DUMMY_NODE_ID) } // No `where` so: `struct Foo<T>;` } else if try!(self.eat(&token::Semi) ){ VariantData::Unit(ast::DUMMY_NODE_ID) // Record-style struct definition } else if self.token == token::OpenDelim(token::Brace) { VariantData::Struct(try!(self.parse_record_struct_body()), ast::DUMMY_NODE_ID) // Tuple-style struct definition with optional where-clause. } else if self.token == token::OpenDelim(token::Paren) { VariantData::Tuple(try!(self.parse_tuple_struct_body(&mut generics)), ast::DUMMY_NODE_ID) } else { let token_str = self.this_token_to_string(); return Err(self.fatal(&format!("expected `where`, `{{`, `(`, or `;` after struct \ name, found `{}`", token_str))) }; Ok((class_name, ItemStruct(P(vdata), generics), None)) } pub fn parse_record_struct_body(&mut self) -> PResult<Vec<StructField>> { let mut fields = Vec::new(); if try!(self.eat(&token::OpenDelim(token::Brace)) ){ while self.token != token::CloseDelim(token::Brace) { fields.push(try!(self.parse_struct_decl_field(true))); } try!(self.bump()); } else { let token_str = self.this_token_to_string(); return Err(self.fatal(&format!("expected `where`, or `{{` after struct \ name, found `{}`", token_str))); } Ok(fields) } pub fn parse_tuple_struct_body(&mut self, generics: &mut ast::Generics) -> PResult<Vec<StructField>> { // This is the case where we find `struct Foo<T>(T) where T: Copy;` // Unit like structs are handled in parse_item_struct function let fields = try!(self.parse_unspanned_seq( &token::OpenDelim(token::Paren), &token::CloseDelim(token::Paren), seq_sep_trailing_allowed(token::Comma), |p| { let attrs = p.parse_outer_attributes(); let lo = p.span.lo; let struct_field_ = ast::StructField_ { kind: UnnamedField(try!(p.parse_visibility())), id: ast::DUMMY_NODE_ID, ty: try!(p.parse_ty_sum()), attrs: attrs, }; Ok(spanned(lo, p.span.hi, struct_field_)) })); generics.where_clause = try!(self.parse_where_clause()); try!(self.expect(&token::Semi)); Ok(fields) } /// Parse a structure field declaration pub fn parse_single_struct_field(&mut self, vis: Visibility, attrs: Vec<Attribute> ) -> PResult<StructField> { let a_var = try!(self.parse_name_and_ty(vis, attrs)); match self.token { token::Comma => { try!(self.bump()); } token::CloseDelim(token::Brace) => {} _ => { let span = self.span; let token_str = self.this_token_to_string(); return Err(self.span_fatal_help(span, &format!("expected `,`, or `}}`, found `{}`", token_str), "struct fields should be separated by commas")) } } Ok(a_var) } /// Parse an element of a struct definition fn parse_struct_decl_field(&mut self, allow_pub: bool) -> PResult<StructField> { let attrs = self.parse_outer_attributes(); if try!(self.eat_keyword(keywords::Pub) ){ if !allow_pub { let span = self.last_span; self.span_err(span, "`pub` is not allowed here"); } return self.parse_single_struct_field(Public, attrs); } return self.parse_single_struct_field(Inherited, attrs); } /// Parse visibility: PUB or nothing fn parse_visibility(&mut self) -> PResult<Visibility> { if try!(self.eat_keyword(keywords::Pub)) { Ok(Public) } else { Ok(Inherited) } } /// Given a termination token, parse all of the items in a module fn parse_mod_items(&mut self, term: &token::Token, inner_lo: BytePos) -> PResult<Mod> { let mut items = vec![]; while let Some(item) = try!(self.parse_item_nopanic()) { items.push(item); } if !try!(self.eat(term)) { let token_str = self.this_token_to_string(); return Err(self.fatal(&format!("expected item, found `{}`", token_str))); } let hi = if self.span == codemap::DUMMY_SP { inner_lo } else { self.last_span.hi }; Ok(ast::Mod { inner: mk_sp(inner_lo, hi), items: items }) } fn parse_item_const(&mut self, m: Option<Mutability>) -> PResult<ItemInfo> { let id = try!(self.parse_ident()); try!(self.expect(&token::Colon)); let ty = try!(self.parse_ty_sum()); try!(self.expect(&token::Eq)); let e = try!(self.parse_expr_nopanic()); try!(self.commit_expr_expecting(&*e, token::Semi)); let item = match m { Some(m) => ItemStatic(ty, m, e), None => ItemConst(ty, e), }; Ok((id, item, None)) } /// Parse a `mod <foo> { ... }` or `mod <foo>;` item fn parse_item_mod(&mut self, outer_attrs: &[Attribute]) -> PResult<ItemInfo> { let id_span = self.span; let id = try!(self.parse_ident()); if self.check(&token::Semi) { try!(self.bump()); // This mod is in an external file. Let's go get it! let (m, attrs) = try!(self.eval_src_mod(id, outer_attrs, id_span)); Ok((id, m, Some(attrs))) } else { self.push_mod_path(id, outer_attrs); try!(self.expect(&token::OpenDelim(token::Brace))); let mod_inner_lo = self.span.lo; let old_owns_directory = self.owns_directory; self.owns_directory = true; let attrs = self.parse_inner_attributes(); let m = try!(self.parse_mod_items(&token::CloseDelim(token::Brace), mod_inner_lo)); self.owns_directory = old_owns_directory; self.pop_mod_path(); Ok((id, ItemMod(m), Some(attrs))) } } fn push_mod_path(&mut self, id: Ident, attrs: &[Attribute]) { let default_path = self.id_to_interned_str(id); let file_path = match ::attr::first_attr_value_str_by_name(attrs, "path") { Some(d) => d, None => default_path, }; self.mod_path_stack.push(file_path) } fn pop_mod_path(&mut self) { self.mod_path_stack.pop().unwrap(); } pub fn submod_path_from_attr(attrs: &[ast::Attribute], dir_path: &Path) -> Option<PathBuf> { ::attr::first_attr_value_str_by_name(attrs, "path").map(|d| dir_path.join(&*d)) } /// Returns either a path to a module, or . pub fn default_submod_path(id: ast::Ident, dir_path: &Path, codemap: &CodeMap) -> ModulePath { let mod_name = id.to_string(); let default_path_str = format!("{}.rs", mod_name); let secondary_path_str = format!("{}/mod.rs", mod_name); let default_path = dir_path.join(&default_path_str); let secondary_path = dir_path.join(&secondary_path_str); let default_exists = codemap.file_exists(&default_path); let secondary_exists = codemap.file_exists(&secondary_path); let result = match (default_exists, secondary_exists) { (true, false) => Ok(ModulePathSuccess { path: default_path, owns_directory: false }), (false, true) => Ok(ModulePathSuccess { path: secondary_path, owns_directory: true }), (false, false) => Err(ModulePathError { err_msg: format!("file not found for module `{}`", mod_name), help_msg: format!("name the file either {} or {} inside the directory {:?}", default_path_str, secondary_path_str, dir_path.display()), }), (true, true) => Err(ModulePathError { err_msg: format!("file for module `{}` found at both {} and {}", mod_name, default_path_str, secondary_path_str), help_msg: "delete or rename one of them to remove the ambiguity".to_owned(), }), }; ModulePath { name: mod_name, path_exists: default_exists || secondary_exists, result: result, } } fn submod_path(&mut self, id: ast::Ident, outer_attrs: &[ast::Attribute], id_sp: Span) -> PResult<ModulePathSuccess> { let mut prefix = PathBuf::from(&self.sess.codemap().span_to_filename(self.span)); prefix.pop(); let mut dir_path = prefix; for part in &self.mod_path_stack { dir_path.push(&**part); } if let Some(p) = Parser::submod_path_from_attr(outer_attrs, &dir_path) { return Ok(ModulePathSuccess { path: p, owns_directory: true }); } let paths = Parser::default_submod_path(id, &dir_path, self.sess.codemap()); if !self.owns_directory { self.span_err(id_sp, "cannot declare a new module at this location"); let this_module = match self.mod_path_stack.last() { Some(name) => name.to_string(), None => self.root_module_name.as_ref().unwrap().clone(), }; self.span_note(id_sp, &format!("maybe move this module `{0}` to its own directory \ via `{0}/mod.rs`", this_module)); if paths.path_exists { self.span_note(id_sp, &format!("... or maybe `use` the module `{}` instead \ of possibly redeclaring it", paths.name)); } self.abort_if_errors(); } match paths.result { Ok(succ) => Ok(succ), Err(err) => Err(self.span_fatal_help(id_sp, &err.err_msg, &err.help_msg)), } } /// Read a module from a source file. fn eval_src_mod(&mut self, id: ast::Ident, outer_attrs: &[ast::Attribute], id_sp: Span) -> PResult<(ast::Item_, Vec<ast::Attribute> )> { let ModulePathSuccess { path, owns_directory } = try!(self.submod_path(id, outer_attrs, id_sp)); self.eval_src_mod_from_path(path, owns_directory, id.to_string(), id_sp) } fn eval_src_mod_from_path(&mut self, path: PathBuf, owns_directory: bool, name: String, id_sp: Span) -> PResult<(ast::Item_, Vec<ast::Attribute> )> { let mut included_mod_stack = self.sess.included_mod_stack.borrow_mut(); match included_mod_stack.iter().position(|p| *p == path) { Some(i) => { let mut err = String::from("circular modules: "); let len = included_mod_stack.len(); for p in &included_mod_stack[i.. len] { err.push_str(&p.to_string_lossy()); err.push_str(" -> "); } err.push_str(&path.to_string_lossy()); return Err(self.span_fatal(id_sp, &err[..])); } None => () } included_mod_stack.push(path.clone()); drop(included_mod_stack); let mut p0 = new_sub_parser_from_file(self.sess, self.cfg.clone(), &path, owns_directory, Some(name), id_sp); let mod_inner_lo = p0.span.lo; let mod_attrs = p0.parse_inner_attributes(); let m0 = try!(p0.parse_mod_items(&token::Eof, mod_inner_lo)); self.sess.included_mod_stack.borrow_mut().pop(); Ok((ast::ItemMod(m0), mod_attrs)) } /// Parse a function declaration from a foreign module fn parse_item_foreign_fn(&mut self, vis: ast::Visibility, lo: BytePos, attrs: Vec<Attribute>) -> PResult<P<ForeignItem>> { try!(self.expect_keyword(keywords::Fn)); let (ident, mut generics) = try!(self.parse_fn_header()); let decl = try!(self.parse_fn_decl(true)); generics.where_clause = try!(self.parse_where_clause()); let hi = self.span.hi; try!(self.expect(&token::Semi)); Ok(P(ast::ForeignItem { ident: ident, attrs: attrs, node: ForeignItemFn(decl, generics), id: ast::DUMMY_NODE_ID, span: mk_sp(lo, hi), vis: vis })) } /// Parse a static item from a foreign module fn parse_item_foreign_static(&mut self, vis: ast::Visibility, lo: BytePos, attrs: Vec<Attribute>) -> PResult<P<ForeignItem>> { try!(self.expect_keyword(keywords::Static)); let mutbl = try!(self.eat_keyword(keywords::Mut)); let ident = try!(self.parse_ident()); try!(self.expect(&token::Colon)); let ty = try!(self.parse_ty_sum()); let hi = self.span.hi; try!(self.expect(&token::Semi)); Ok(P(ForeignItem { ident: ident, attrs: attrs, node: ForeignItemStatic(ty, mutbl), id: ast::DUMMY_NODE_ID, span: mk_sp(lo, hi), vis: vis })) } /// Parse extern crate links /// /// # Examples /// /// extern crate foo; /// extern crate bar as foo; fn parse_item_extern_crate(&mut self, lo: BytePos, visibility: Visibility, attrs: Vec<Attribute>) -> PResult<P<Item>> { let crate_name = try!(self.parse_ident()); let (maybe_path, ident) = if let Some(ident) = try!(self.parse_rename()) { (Some(crate_name.name), ident) } else { (None, crate_name) }; try!(self.expect(&token::Semi)); let last_span = self.last_span; if visibility == ast::Public { self.span_warn(mk_sp(lo, last_span.hi), "`pub extern crate` does not work as expected and should not be used. \ Likely to become an error. Prefer `extern crate` and `pub use`."); } Ok(self.mk_item(lo, last_span.hi, ident, ItemExternCrate(maybe_path), visibility, attrs)) } /// Parse `extern` for foreign ABIs /// modules. /// /// `extern` is expected to have been /// consumed before calling this method /// /// # Examples: /// /// extern "C" {} /// extern {} fn parse_item_foreign_mod(&mut self, lo: BytePos, opt_abi: Option<abi::Abi>, visibility: Visibility, mut attrs: Vec<Attribute>) -> PResult<P<Item>> { try!(self.expect(&token::OpenDelim(token::Brace))); let abi = opt_abi.unwrap_or(abi::C); attrs.extend(self.parse_inner_attributes()); let mut foreign_items = vec![]; while let Some(item) = try!(self.parse_foreign_item()) { foreign_items.push(item); } try!(self.expect(&token::CloseDelim(token::Brace))); let last_span = self.last_span; let m = ast::ForeignMod { abi: abi, items: foreign_items }; Ok(self.mk_item(lo, last_span.hi, special_idents::invalid, ItemForeignMod(m), visibility, attrs)) } /// Parse type Foo = Bar; fn parse_item_type(&mut self) -> PResult<ItemInfo> { let ident = try!(self.parse_ident()); let mut tps = try!(self.parse_generics()); tps.where_clause = try!(self.parse_where_clause()); try!(self.expect(&token::Eq)); let ty = try!(self.parse_ty_sum()); try!(self.expect(&token::Semi)); Ok((ident, ItemTy(ty, tps), None)) } /// Parse a structure-like enum variant definition /// this should probably be renamed or refactored... fn parse_struct_def(&mut self) -> PResult<P<VariantData>> { let mut fields: Vec<StructField> = Vec::new(); while self.token != token::CloseDelim(token::Brace) { fields.push(try!(self.parse_struct_decl_field(false))); } try!(self.bump()); Ok(P(VariantData::Struct(fields, ast::DUMMY_NODE_ID))) } /// Parse the part of an "enum" decl following the '{' fn parse_enum_def(&mut self, _generics: &ast::Generics) -> PResult<EnumDef> { let mut variants = Vec::new(); let mut all_nullary = true; let mut any_disr = None; while self.token != token::CloseDelim(token::Brace) { let variant_attrs = self.parse_outer_attributes(); let vlo = self.span.lo; let struct_def; let mut disr_expr = None; let ident = try!(self.parse_ident()); if try!(self.eat(&token::OpenDelim(token::Brace)) ){ // Parse a struct variant. all_nullary = false; struct_def = try!(self.parse_struct_def()); } else if self.check(&token::OpenDelim(token::Paren)) { all_nullary = false; let arg_tys = try!(self.parse_enum_variant_seq( &token::OpenDelim(token::Paren), &token::CloseDelim(token::Paren), seq_sep_trailing_allowed(token::Comma), |p| p.parse_ty_sum() )); let mut fields = Vec::new(); for ty in arg_tys { fields.push(Spanned { span: ty.span, node: ast::StructField_ { ty: ty, kind: ast::UnnamedField(ast::Inherited), attrs: Vec::new(), id: ast::DUMMY_NODE_ID, }}); } struct_def = P(ast::VariantData::Tuple(fields, ast::DUMMY_NODE_ID)); } else if try!(self.eat(&token::Eq) ){ disr_expr = Some(try!(self.parse_expr_nopanic())); any_disr = disr_expr.as_ref().map(|expr| expr.span); struct_def = P(ast::VariantData::Unit(ast::DUMMY_NODE_ID)); } else { struct_def = P(ast::VariantData::Unit(ast::DUMMY_NODE_ID)); } let vr = ast::Variant_ { name: ident, attrs: variant_attrs, data: struct_def, disr_expr: disr_expr, }; variants.push(P(spanned(vlo, self.last_span.hi, vr))); if !try!(self.eat(&token::Comma)) { break; } } try!(self.expect(&token::CloseDelim(token::Brace))); match any_disr { Some(disr_span) if !all_nullary => self.span_err(disr_span, "discriminator values can only be used with a c-like enum"), _ => () } Ok(ast::EnumDef { variants: variants }) } /// Parse an "enum" declaration fn parse_item_enum(&mut self) -> PResult<ItemInfo> { let id = try!(self.parse_ident()); let mut generics = try!(self.parse_generics()); generics.where_clause = try!(self.parse_where_clause()); try!(self.expect(&token::OpenDelim(token::Brace))); let enum_definition = try!(self.parse_enum_def(&generics)); Ok((id, ItemEnum(enum_definition, generics), None)) } /// Parses a string as an ABI spec on an extern type or module. Consumes /// the `extern` keyword, if one is found. fn parse_opt_abi(&mut self) -> PResult<Option<abi::Abi>> { match self.token { token::Literal(token::Str_(s), suf) | token::Literal(token::StrRaw(s, _), suf) => { let sp = self.span; self.expect_no_suffix(sp, "ABI spec", suf); try!(self.bump()); match abi::lookup(&s.as_str()) { Some(abi) => Ok(Some(abi)), None => { let last_span = self.last_span; self.span_err( last_span, &format!("invalid ABI: expected one of [{}], \ found `{}`", abi::all_names().join(", "), s)); Ok(None) } } } _ => Ok(None), } } /// Parse one of the items allowed by the flags. /// NB: this function no longer parses the items inside an /// extern crate. fn parse_item_(&mut self, attrs: Vec<Attribute>, macros_allowed: bool) -> PResult<Option<P<Item>>> { let nt_item = match self.token { token::Interpolated(token::NtItem(ref item)) => { Some((**item).clone()) } _ => None }; match nt_item { Some(mut item) => { try!(self.bump()); let mut attrs = attrs; mem::swap(&mut item.attrs, &mut attrs); item.attrs.extend(attrs); return Ok(Some(P(item))); } None => {} } let lo = self.span.lo; let visibility = try!(self.parse_visibility()); if try!(self.eat_keyword(keywords::Use) ){ // USE ITEM let item_ = ItemUse(try!(self.parse_view_path())); try!(self.expect(&token::Semi)); let last_span = self.last_span; let item = self.mk_item(lo, last_span.hi, token::special_idents::invalid, item_, visibility, attrs); return Ok(Some(item)); } if try!(self.eat_keyword(keywords::Extern)) { if try!(self.eat_keyword(keywords::Crate)) { return Ok(Some(try!(self.parse_item_extern_crate(lo, visibility, attrs)))); } let opt_abi = try!(self.parse_opt_abi()); if try!(self.eat_keyword(keywords::Fn) ){ // EXTERN FUNCTION ITEM let abi = opt_abi.unwrap_or(abi::C); let (ident, item_, extra_attrs) = try!(self.parse_item_fn(Unsafety::Normal, Constness::NotConst, abi)); let last_span = self.last_span; let item = self.mk_item(lo, last_span.hi, ident, item_, visibility, maybe_append(attrs, extra_attrs)); return Ok(Some(item)); } else if self.check(&token::OpenDelim(token::Brace)) { return Ok(Some(try!(self.parse_item_foreign_mod(lo, opt_abi, visibility, attrs)))); } try!(self.expect_one_of(&[], &[])); } if try!(self.eat_keyword(keywords::Static) ){ // STATIC ITEM let m = if try!(self.eat_keyword(keywords::Mut)) {MutMutable} else {MutImmutable}; let (ident, item_, extra_attrs) = try!(self.parse_item_const(Some(m))); let last_span = self.last_span; let item = self.mk_item(lo, last_span.hi, ident, item_, visibility, maybe_append(attrs, extra_attrs)); return Ok(Some(item)); } if try!(self.eat_keyword(keywords::Const) ){ if self.check_keyword(keywords::Fn) { // CONST FUNCTION ITEM try!(self.bump()); let (ident, item_, extra_attrs) = try!(self.parse_item_fn(Unsafety::Normal, Constness::Const, abi::Rust)); let last_span = self.last_span; let item = self.mk_item(lo, last_span.hi, ident, item_, visibility, maybe_append(attrs, extra_attrs)); return Ok(Some(item)); } // CONST ITEM if try!(self.eat_keyword(keywords::Mut) ){ let last_span = self.last_span; self.span_err(last_span, "const globals cannot be mutable"); self.fileline_help(last_span, "did you mean to declare a static?"); } let (ident, item_, extra_attrs) = try!(self.parse_item_const(None)); let last_span = self.last_span; let item = self.mk_item(lo, last_span.hi, ident, item_, visibility, maybe_append(attrs, extra_attrs)); return Ok(Some(item)); } if self.check_keyword(keywords::Unsafe) && self.look_ahead(1, |t| t.is_keyword(keywords::Trait)) { // UNSAFE TRAIT ITEM try!(self.expect_keyword(keywords::Unsafe)); try!(self.expect_keyword(keywords::Trait)); let (ident, item_, extra_attrs) = try!(self.parse_item_trait(ast::Unsafety::Unsafe)); let last_span = self.last_span; let item = self.mk_item(lo, last_span.hi, ident, item_, visibility, maybe_append(attrs, extra_attrs)); return Ok(Some(item)); } if self.check_keyword(keywords::Unsafe) && self.look_ahead(1, |t| t.is_keyword(keywords::Impl)) { // IMPL ITEM try!(self.expect_keyword(keywords::Unsafe)); try!(self.expect_keyword(keywords::Impl)); let (ident, item_, extra_attrs) = try!(self.parse_item_impl(ast::Unsafety::Unsafe)); let last_span = self.last_span; let item = self.mk_item(lo, last_span.hi, ident, item_, visibility, maybe_append(attrs, extra_attrs)); return Ok(Some(item)); } if self.check_keyword(keywords::Fn) { // FUNCTION ITEM try!(self.bump()); let (ident, item_, extra_attrs) = try!(self.parse_item_fn(Unsafety::Normal, Constness::NotConst, abi::Rust)); let last_span = self.last_span; let item = self.mk_item(lo, last_span.hi, ident, item_, visibility, maybe_append(attrs, extra_attrs)); return Ok(Some(item)); } if self.check_keyword(keywords::Unsafe) && self.look_ahead(1, |t| *t != token::OpenDelim(token::Brace)) { // UNSAFE FUNCTION ITEM try!(self.bump()); let abi = if try!(self.eat_keyword(keywords::Extern) ){ try!(self.parse_opt_abi()).unwrap_or(abi::C) } else { abi::Rust }; let constness = if abi == abi::Rust && try!(self.eat_keyword(keywords::Const) ){ Constness::Const } else { Constness::NotConst }; try!(self.expect_keyword(keywords::Fn)); let (ident, item_, extra_attrs) = try!(self.parse_item_fn(Unsafety::Unsafe, constness, abi)); let last_span = self.last_span; let item = self.mk_item(lo, last_span.hi, ident, item_, visibility, maybe_append(attrs, extra_attrs)); return Ok(Some(item)); } if try!(self.eat_keyword(keywords::Mod) ){ // MODULE ITEM let (ident, item_, extra_attrs) = try!(self.parse_item_mod(&attrs[..])); let last_span = self.last_span; let item = self.mk_item(lo, last_span.hi, ident, item_, visibility, maybe_append(attrs, extra_attrs)); return Ok(Some(item)); } if try!(self.eat_keyword(keywords::Type) ){ // TYPE ITEM let (ident, item_, extra_attrs) = try!(self.parse_item_type()); let last_span = self.last_span; let item = self.mk_item(lo, last_span.hi, ident, item_, visibility, maybe_append(attrs, extra_attrs)); return Ok(Some(item)); } if try!(self.eat_keyword(keywords::Enum) ){ // ENUM ITEM let (ident, item_, extra_attrs) = try!(self.parse_item_enum()); let last_span = self.last_span; let item = self.mk_item(lo, last_span.hi, ident, item_, visibility, maybe_append(attrs, extra_attrs)); return Ok(Some(item)); } if try!(self.eat_keyword(keywords::Trait) ){ // TRAIT ITEM let (ident, item_, extra_attrs) = try!(self.parse_item_trait(ast::Unsafety::Normal)); let last_span = self.last_span; let item = self.mk_item(lo, last_span.hi, ident, item_, visibility, maybe_append(attrs, extra_attrs)); return Ok(Some(item)); } if try!(self.eat_keyword(keywords::Impl) ){ // IMPL ITEM let (ident, item_, extra_attrs) = try!(self.parse_item_impl(ast::Unsafety::Normal)); let last_span = self.last_span; let item = self.mk_item(lo, last_span.hi, ident, item_, visibility, maybe_append(attrs, extra_attrs)); return Ok(Some(item)); } if try!(self.eat_keyword(keywords::Struct) ){ // STRUCT ITEM let (ident, item_, extra_attrs) = try!(self.parse_item_struct()); let last_span = self.last_span; let item = self.mk_item(lo, last_span.hi, ident, item_, visibility, maybe_append(attrs, extra_attrs)); return Ok(Some(item)); } self.parse_macro_use_or_failure(attrs,macros_allowed,lo,visibility) } /// Parse a foreign item. fn parse_foreign_item(&mut self) -> PResult<Option<P<ForeignItem>>> { let attrs = self.parse_outer_attributes(); let lo = self.span.lo; let visibility = try!(self.parse_visibility()); if self.check_keyword(keywords::Static) { // FOREIGN STATIC ITEM return Ok(Some(try!(self.parse_item_foreign_static(visibility, lo, attrs)))); } if self.check_keyword(keywords::Fn) || self.check_keyword(keywords::Unsafe) { // FOREIGN FUNCTION ITEM return Ok(Some(try!(self.parse_item_foreign_fn(visibility, lo, attrs)))); } // FIXME #5668: this will occur for a macro invocation: match try!(self.parse_macro_use_or_failure(attrs, true, lo, visibility)) { Some(item) => { return Err(self.span_fatal(item.span, "macros cannot expand to foreign items")); } None => Ok(None) } } /// This is the fall-through for parsing items. fn parse_macro_use_or_failure( &mut self, attrs: Vec<Attribute> , macros_allowed: bool, lo: BytePos, visibility: Visibility ) -> PResult<Option<P<Item>>> { if macros_allowed && !self.token.is_any_keyword() && self.look_ahead(1, |t| *t == token::Not) && (self.look_ahead(2, |t| t.is_plain_ident()) || self.look_ahead(2, |t| *t == token::OpenDelim(token::Paren)) || self.look_ahead(2, |t| *t == token::OpenDelim(token::Brace))) { // MACRO INVOCATION ITEM let last_span = self.last_span; self.complain_if_pub_macro(visibility, last_span); // item macro. let pth = try!(self.parse_path(NoTypesAllowed)); try!(self.expect(&token::Not)); // a 'special' identifier (like what `macro_rules!` uses) // is optional. We should eventually unify invoc syntax // and remove this. let id = if self.token.is_plain_ident() { try!(self.parse_ident()) } else { token::special_idents::invalid // no special identifier }; // eat a matched-delimiter token tree: let delim = try!(self.expect_open_delim()); let tts = try!(self.parse_seq_to_end(&token::CloseDelim(delim), seq_sep_none(), |p| p.parse_token_tree())); // single-variant-enum... : let m = Mac_ { path: pth, tts: tts, ctxt: EMPTY_CTXT }; let m: ast::Mac = codemap::Spanned { node: m, span: mk_sp(self.span.lo, self.span.hi) }; if delim != token::Brace { if !try!(self.eat(&token::Semi) ){ let last_span = self.last_span; self.span_err(last_span, "macros that expand to items must either \ be surrounded with braces or followed by \ a semicolon"); } } let item_ = ItemMac(m); let last_span = self.last_span; let item = self.mk_item(lo, last_span.hi, id, item_, visibility, attrs); return Ok(Some(item)); } // FAILURE TO PARSE ITEM match visibility { Inherited => {} Public => { let last_span = self.last_span; return Err(self.span_fatal(last_span, "unmatched visibility `pub`")); } } if !attrs.is_empty() { self.expected_item_err(&attrs); } Ok(None) } pub fn parse_item_nopanic(&mut self) -> PResult<Option<P<Item>>> { let attrs = self.parse_outer_attributes(); self.parse_item_(attrs, true) } /// Matches view_path : MOD? non_global_path as IDENT /// | MOD? non_global_path MOD_SEP LBRACE RBRACE /// | MOD? non_global_path MOD_SEP LBRACE ident_seq RBRACE /// | MOD? non_global_path MOD_SEP STAR /// | MOD? non_global_path fn parse_view_path(&mut self) -> PResult<P<ViewPath>> { let lo = self.span.lo; // Allow a leading :: because the paths are absolute either way. // This occurs with "use $crate::..." in macros. try!(self.eat(&token::ModSep)); if self.check(&token::OpenDelim(token::Brace)) { // use {foo,bar} let idents = try!(self.parse_unspanned_seq( &token::OpenDelim(token::Brace), &token::CloseDelim(token::Brace), seq_sep_trailing_allowed(token::Comma), |p| p.parse_path_list_item())); let path = ast::Path { span: mk_sp(lo, self.span.hi), global: false, segments: Vec::new() }; return Ok(P(spanned(lo, self.span.hi, ViewPathList(path, idents)))); } let first_ident = try!(self.parse_ident()); let mut path = vec!(first_ident); if let token::ModSep = self.token { // foo::bar or foo::{a,b,c} or foo::* while self.check(&token::ModSep) { try!(self.bump()); match self.token { token::Ident(..) => { let ident = try!(self.parse_ident()); path.push(ident); } // foo::bar::{a,b,c} token::OpenDelim(token::Brace) => { let idents = try!(self.parse_unspanned_seq( &token::OpenDelim(token::Brace), &token::CloseDelim(token::Brace), seq_sep_trailing_allowed(token::Comma), |p| p.parse_path_list_item() )); let path = ast::Path { span: mk_sp(lo, self.span.hi), global: false, segments: path.into_iter().map(|identifier| { ast::PathSegment { identifier: identifier, parameters: ast::PathParameters::none(), } }).collect() }; return Ok(P(spanned(lo, self.span.hi, ViewPathList(path, idents)))); } // foo::bar::* token::BinOp(token::Star) => { try!(self.bump()); let path = ast::Path { span: mk_sp(lo, self.span.hi), global: false, segments: path.into_iter().map(|identifier| { ast::PathSegment { identifier: identifier, parameters: ast::PathParameters::none(), } }).collect() }; return Ok(P(spanned(lo, self.span.hi, ViewPathGlob(path)))); } // fall-through for case foo::bar::; token::Semi => { self.span_err(self.span, "expected identifier or `{` or `*`, found `;`"); } _ => break } } } let mut rename_to = path[path.len() - 1]; let path = ast::Path { span: mk_sp(lo, self.last_span.hi), global: false, segments: path.into_iter().map(|identifier| { ast::PathSegment { identifier: identifier, parameters: ast::PathParameters::none(), } }).collect() }; rename_to = try!(self.parse_rename()).unwrap_or(rename_to); Ok(P(spanned(lo, self.last_span.hi, ViewPathSimple(rename_to, path)))) } fn parse_rename(&mut self) -> PResult<Option<Ident>> { if try!(self.eat_keyword(keywords::As)) { self.parse_ident().map(Some) } else { Ok(None) } } /// Parses a source module as a crate. This is the main /// entry point for the parser. pub fn parse_crate_mod(&mut self) -> PResult<Crate> { let lo = self.span.lo; Ok(ast::Crate { attrs: self.parse_inner_attributes(), module: try!(self.parse_mod_items(&token::Eof, lo)), config: self.cfg.clone(), span: mk_sp(lo, self.span.lo), exported_macros: Vec::new(), }) } pub fn parse_optional_str(&mut self) -> PResult<Option<(InternedString, ast::StrStyle, Option<ast::Name>)>> { let ret = match self.token { token::Literal(token::Str_(s), suf) => { (self.id_to_interned_str(ast::Ident::with_empty_ctxt(s)), ast::CookedStr, suf) } token::Literal(token::StrRaw(s, n), suf) => { (self.id_to_interned_str(ast::Ident::with_empty_ctxt(s)), ast::RawStr(n), suf) } _ => return Ok(None) }; try!(self.bump()); Ok(Some(ret)) } pub fn parse_str(&mut self) -> PResult<(InternedString, StrStyle)> { match try!(self.parse_optional_str()) { Some((s, style, suf)) => { let sp = self.last_span; self.expect_no_suffix(sp, "string literal", suf); Ok((s, style)) } _ => Err(self.fatal("expected string literal")) } } }
use itertools::{multipeek,MultiPeek}; use std::str; #[derive(Debug, PartialEq)] pub enum Token { LeftParen, RightParen, LeftBrace, RightBrace, Comma, Dot, Minus, Plus, Semicolon, Slash, Star, Bang, BangEqual, Equal, EqualEqual, Greater, GreaterEqual, Less, LessEqual, Identifier(String), StringLiteral(String), NumberLiteral(f64), // Keywords And, Class, Else, False, Fun, For, If, Nil, Or, Print, Return, Super, This, True, Var, While, Eof, // The book doesn't have tokens for comments and // whitespaces. Introducing them the scanner can // deal with them uniformly and in a more // functional way. Comment, Whitespace, } #[derive(Debug)] pub struct TokenWithContext { token: Token, // Takes a copy. Tokens can outlive the file they came from lexeme: String, line: usize, } struct Scanner<'a>{ current: usize, current_lexeme: String, line: usize, source: MultiPeek<str::Chars<'a>>, } fn is_digit(c: char) -> bool { c >= '0' && c <= '9' } fn is_alpha(c: char) -> bool { (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') || c == '_' } fn is_alphanumeric(c: char) -> bool { is_digit(c) || is_alpha(c) } fn is_whitespace(c: char) -> bool { match c { ' ' => true, '\r' => true, '\t' => true, '\n' => true, _ => false, } } impl<'a> Scanner<'a> { fn initialize(source: &String) -> Scanner { Scanner { current: 0, current_lexeme: "".into(), line: 1, // 1-indexed, source: multipeek(source.chars()), } } fn is_at_end(&mut self) -> bool { self.source.reset_peek(); match self.source.peek(){ Some(_) => false, None => true, } } fn peek(&mut self) -> char() { self.source.reset_peek(); //TODO: this is dubious match self.source.peek(){ Some(&c) => c, None => '\0', } } fn advance(&mut self) -> char { self.current += 1; //TODO: handle None! let c = self.source.next().unwrap(); self.current_lexeme.push(c); if c == '\n' { self.line += 1; } c } fn is_match(&mut self, expected: char) -> bool { self.source.reset_peek(); match self.source.peek(){ Some(&c) => { if c == expected { let _ = self.source.next(); self.current += 1; true } else{ false } } None => false, } } fn add_context(&mut self, token: Token) -> TokenWithContext { let result = TokenWithContext { token: token, lexeme: self.current_lexeme.clone(), line: self.line, }; self.current_lexeme = "".into(); result } fn string(&mut self) -> Result<Token, String> { let initial_line = self.line; while self.peek() != '"' && !self.is_at_end() { self.advance(); } if self.is_at_end() { return Err(format!("Unterminated string at line {}", initial_line)); } self.advance(); let literal_length = self.current_lexeme.len() - 2; // Trims delimiters let literal = self.current_lexeme.chars().skip(1).take(literal_length).collect(); Ok(Token::StringLiteral(literal)) } fn number(&mut self) -> Token { while is_digit(self.peek()) { self.advance(); } // Here we do double lookahead self.source.reset_peek(); match self.source.peek(){ Some(&p1) =>{ match self.source.peek() { Some(&p2) => if p1 == '.' && is_digit(p2){ self.advance(); // Consume the . while is_digit(self.peek()) { self.advance(); } }, None => (), } } None => (), } let value = self.current_lexeme.parse::<f64>().unwrap(); Token::NumberLiteral(value) } fn identifier(&mut self) -> Token { while is_alphanumeric(self.peek()) { self.advance(); } // TODO: take a ref in the first place match self.current_lexeme.as_ref() { "and" => Token::And, "class" => Token::Class, "else" => Token::Else, "false" => Token::False, "for" => Token::For, "fun" => Token::Fun, "if" => Token::If, "nil" => Token::Nil, "or" => Token::Or, "print" => Token::Print, "return" => Token::Return, "super" => Token::Super, "this" => Token::This, "true" => Token::True, "var" => Token::Var, "while" => Token::While, identifier => Token::Identifier(identifier.into()), } } fn scan_next(&mut self) -> Result<TokenWithContext, String> { let token = match self.advance() { '(' => Token::LeftParen, ')' => Token::RightParen, '{' => Token::LeftBrace, '}' => Token::RightBrace, ',' => Token::Comma, '.' => Token::Dot, '-' => Token::Minus, '+' => Token::Plus, ';' => Token::Semicolon, '*' => Token::Star, '!' => { if self.is_match('=') { Token::BangEqual } else { Token::Bang } } '=' => { if self.is_match('=') { Token::EqualEqual } else { Token::Equal } } '<' => { if self.is_match('=') { Token::LessEqual } else { Token::Less } } '>' => { if self.is_match('=') { Token::GreaterEqual } else { Token::Greater } } '/' => { if self.is_match('/') { // Comments go on till the end of the line while self.peek() != '\n' && !self.is_at_end() { self.advance(); } Token::Comment } else { Token::Slash } } '"' => try!(self.string()), c if is_whitespace(c) => Token::Whitespace, c if is_digit(c) => self.number(), c if is_alpha(c) => self.identifier(), c => { return Err(format!("Unexpected character {} at line {}, pos {}", c, self.line, self.current - 1)); } }; Ok(self.add_context(token)) } } pub fn scan(source: &String) -> Result<Vec<TokenWithContext>, String> { let mut scanner = Scanner::initialize(source); let mut tokens = Vec::new(); while !scanner.is_at_end() { let token_with_context = try!(scanner.scan_next()); match token_with_context.token { // Ignoring tokens we don't care about Token::Comment => {} Token::Whitespace => {} _ => tokens.push(token_with_context), } } tokens.push(TokenWithContext { token: Token::Eof, lexeme: "".into(), line: scanner.line, }); Ok(tokens) } #[cfg(test)] mod tests { use scanner::*; #[test] fn single_token() { let tokens = scan(&"+".into()).unwrap(); assert_eq!(tokens[0].token, Token::Plus); } #[test] fn expression() { let tokens = scan(&"1+2".into()).unwrap(); assert_eq!(tokens[0].token, Token::NumberLiteral(1.0f64)); assert_eq!(tokens[1].token, Token::Plus); assert_eq!(tokens[2].token, Token::NumberLiteral(2.0f64)); assert_eq!(tokens[3].token, Token::Eof); } #[test] fn expression_with_whitespaces() { let tokens = scan(&"1 + 2".into()).unwrap(); assert_eq!(tokens[0].token, Token::NumberLiteral(1.0f64)); assert_eq!(tokens[1].token, Token::Plus); assert_eq!(tokens[2].token, Token::NumberLiteral(2.0f64)); assert_eq!(tokens[3].token, Token::Eof); } #[test] fn assignement_with_comment() { let tokens = scan(&"var a = 1.0; // A comment".into()).unwrap(); assert_eq!(tokens[0].token, Token::Var); assert_eq!(tokens[1].token, Token::Identifier("a".into())); assert_eq!(tokens[2].token, Token::Equal); assert_eq!(tokens[3].token, Token::NumberLiteral(1.0f64)); assert_eq!(tokens[4].token, Token::Semicolon); assert_eq!(tokens[5].token, Token::Eof); } #[test] fn multiline_statements() { let tokens = scan(&r#"var a = 1.0; var b = "Hello";"# .into()) .unwrap(); assert_eq!(tokens[0].token, Token::Var); assert_eq!(tokens[1].token, Token::Identifier("a".into())); assert_eq!(tokens[2].token, Token::Equal); assert_eq!(tokens[3].token, Token::NumberLiteral(1.0f64)); assert_eq!(tokens[4].token, Token::Semicolon); assert_eq!(tokens[5].token, Token::Var); assert_eq!(tokens[6].token, Token::Identifier("b".into())); assert_eq!(tokens[7].token, Token::Equal); assert_eq!(tokens[8].token, Token::StringLiteral("Hello".into())); assert_eq!(tokens[9].token, Token::Semicolon); assert_eq!(tokens[10].token, Token::Eof); assert_eq!(tokens[1].line, 1); assert_eq!(tokens[9].line, 2); } } Using peek_check everywhere use itertools::{multipeek, MultiPeek}; use std::str; #[derive(Debug, PartialEq)] pub enum Token { LeftParen, RightParen, LeftBrace, RightBrace, Comma, Dot, Minus, Plus, Semicolon, Slash, Star, Bang, BangEqual, Equal, EqualEqual, Greater, GreaterEqual, Less, LessEqual, Identifier(String), StringLiteral(String), NumberLiteral(f64), // Keywords And, Class, Else, False, Fun, For, If, Nil, Or, Print, Return, Super, This, True, Var, While, Eof, // The book doesn't have tokens for comments and // whitespaces. Introducing them the scanner can // deal with them uniformly and in a more // functional way. Comment, Whitespace, } #[derive(Debug)] pub struct TokenWithContext { token: Token, // Takes a copy. Tokens can outlive the file they came from lexeme: String, line: usize, } struct Scanner<'a> { current: usize, current_lexeme: String, line: usize, source: MultiPeek<str::Chars<'a>>, } fn is_digit(c: char) -> bool { c >= '0' && c <= '9' } fn is_alpha(c: char) -> bool { (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') || c == '_' } fn is_alphanumeric(c: char) -> bool { is_digit(c) || is_alpha(c) } fn is_whitespace(c: char) -> bool { match c { ' ' => true, '\r' => true, '\t' => true, '\n' => true, _ => false, } } impl<'a> Scanner<'a> { fn initialize(source: &String) -> Scanner { Scanner { current: 0, current_lexeme: "".into(), line: 1, // 1-indexed, source: multipeek(source.chars()), } } fn is_at_end(&mut self) -> bool { self.source.reset_peek(); match self.source.peek() { Some(_) => false, None => true, } } fn peek_check(&mut self, check: &Fn(char) -> bool) -> bool { self.source.reset_peek(); match self.source.peek() { Some(&c) => check(c), None => false, } } fn advance(&mut self) -> char { self.current += 1; // TODO: handle None! let c = self.source.next().unwrap(); self.current_lexeme.push(c); if c == '\n' { self.line += 1; } c } fn is_match(&mut self, expected: char) -> bool { self.source.reset_peek(); match self.source.peek() { Some(&c) => { if c == expected { let _ = self.source.next(); self.current += 1; true } else { false } } None => false, } } fn add_context(&mut self, token: Token) -> TokenWithContext { let result = TokenWithContext { token: token, lexeme: self.current_lexeme.clone(), line: self.line, }; self.current_lexeme = "".into(); result } fn string(&mut self) -> Result<Token, String> { let initial_line = self.line; self.source.reset_peek(); while match self.source.peek() { Some(&p) => p != '"', None => false, } { self.advance(); } if self.is_at_end() { return Err(format!("Unterminated string at line {}", initial_line)); } self.advance(); let literal_length = self.current_lexeme.len() - 2; // Trims delimiters let literal = self.current_lexeme.chars().skip(1).take(literal_length).collect(); Ok(Token::StringLiteral(literal)) } fn number(&mut self) -> Token { while self.peek_check(&is_digit) { self.advance(); } // Here we do double lookahead self.source.reset_peek(); match self.source.peek() { Some(&p1) => { match self.source.peek() { Some(&p2) => { if p1 == '.' && is_digit(p2) { self.advance(); // Consume the . while self.peek_check(&is_digit) { self.advance(); } } } None => (), } } None => (), } let value = self.current_lexeme.parse::<f64>().unwrap(); Token::NumberLiteral(value) } fn identifier(&mut self) -> Token { while self.peek_check(&is_alphanumeric) { self.advance(); } // TODO: take a ref in the first place match self.current_lexeme.as_ref() { "and" => Token::And, "class" => Token::Class, "else" => Token::Else, "false" => Token::False, "for" => Token::For, "fun" => Token::Fun, "if" => Token::If, "nil" => Token::Nil, "or" => Token::Or, "print" => Token::Print, "return" => Token::Return, "super" => Token::Super, "this" => Token::This, "true" => Token::True, "var" => Token::Var, "while" => Token::While, identifier => Token::Identifier(identifier.into()), } } fn scan_next(&mut self) -> Result<TokenWithContext, String> { let token = match self.advance() { '(' => Token::LeftParen, ')' => Token::RightParen, '{' => Token::LeftBrace, '}' => Token::RightBrace, ',' => Token::Comma, '.' => Token::Dot, '-' => Token::Minus, '+' => Token::Plus, ';' => Token::Semicolon, '*' => Token::Star, '!' => { if self.is_match('=') { Token::BangEqual } else { Token::Bang } } '=' => { if self.is_match('=') { Token::EqualEqual } else { Token::Equal } } '<' => { if self.is_match('=') { Token::LessEqual } else { Token::Less } } '>' => { if self.is_match('=') { Token::GreaterEqual } else { Token::Greater } } '/' => { if self.is_match('/') { // Comments go on till the end of the line while self.peek_check(&|c| c != '\n') { self.advance(); } Token::Comment } else { Token::Slash } } '"' => try!(self.string()), c if is_whitespace(c) => Token::Whitespace, c if is_digit(c) => self.number(), c if is_alpha(c) => self.identifier(), c => { return Err(format!("Unexpected character {} at line {}, pos {}", c, self.line, self.current - 1)); } }; Ok(self.add_context(token)) } } pub fn scan(source: &String) -> Result<Vec<TokenWithContext>, String> { let mut scanner = Scanner::initialize(source); let mut tokens = Vec::new(); while !scanner.is_at_end() { let token_with_context = try!(scanner.scan_next()); match token_with_context.token { // Ignoring tokens we don't care about Token::Comment => {} Token::Whitespace => {} _ => tokens.push(token_with_context), } } tokens.push(TokenWithContext { token: Token::Eof, lexeme: "".into(), line: scanner.line, }); Ok(tokens) } #[cfg(test)] mod tests { use scanner::*; #[test] fn single_token() { let tokens = scan(&"+".into()).unwrap(); assert_eq!(tokens[0].token, Token::Plus); } #[test] fn expression() { let tokens = scan(&"1+2".into()).unwrap(); assert_eq!(tokens[0].token, Token::NumberLiteral(1.0f64)); assert_eq!(tokens[1].token, Token::Plus); assert_eq!(tokens[2].token, Token::NumberLiteral(2.0f64)); assert_eq!(tokens[3].token, Token::Eof); } #[test] fn expression_with_whitespaces() { let tokens = scan(&"1 + 2".into()).unwrap(); assert_eq!(tokens[0].token, Token::NumberLiteral(1.0f64)); assert_eq!(tokens[1].token, Token::Plus); assert_eq!(tokens[2].token, Token::NumberLiteral(2.0f64)); assert_eq!(tokens[3].token, Token::Eof); } #[test] fn assignement_with_comment() { let tokens = scan(&"var a = 1.0; // A comment".into()).unwrap(); assert_eq!(tokens[0].token, Token::Var); assert_eq!(tokens[1].token, Token::Identifier("a".into())); assert_eq!(tokens[2].token, Token::Equal); assert_eq!(tokens[3].token, Token::NumberLiteral(1.0f64)); assert_eq!(tokens[4].token, Token::Semicolon); assert_eq!(tokens[5].token, Token::Eof); } #[test] fn multiline_statements() { let tokens = scan(&r#"var a = 1.0; var b = "Hello";"# .into()) .unwrap(); assert_eq!(tokens[0].token, Token::Var); assert_eq!(tokens[1].token, Token::Identifier("a".into())); assert_eq!(tokens[2].token, Token::Equal); assert_eq!(tokens[3].token, Token::NumberLiteral(1.0f64)); assert_eq!(tokens[4].token, Token::Semicolon); assert_eq!(tokens[5].token, Token::Var); assert_eq!(tokens[6].token, Token::Identifier("b".into())); assert_eq!(tokens[7].token, Token::Equal); assert_eq!(tokens[8].token, Token::StringLiteral("Hello".into())); assert_eq!(tokens[9].token, Token::Semicolon); assert_eq!(tokens[10].token, Token::Eof); assert_eq!(tokens[1].line, 1); assert_eq!(tokens[9].line, 2); } }
extern crate git2; const TMP_NAME: &'static str = "refs/centralgit/tmp_fd2db5f8_bac2_4a1e_9487_4ac3414788aa"; use git2::*; use std::process::Command; use std::path::Path; use shell::Shell; use super::RepoHost; use std::collections::HashMap; pub struct Scratch<'a> { pub repo: Repository, pub host: &'a RepoHost, } impl<'a> Scratch<'a> { pub fn new(path: &Path, host: &'a RepoHost) -> Scratch<'a> { Scratch { repo: Repository::init_bare(&path).expect("could not init scratch"), host: host, } } pub fn tracking(&self, module: &str, branch: &str) -> Option<Object> { let remote_name = format!("{}", module); let fetch_url = self.host.local_path(&module); let mut remote = if let Ok(remote) = self.repo.find_remote(&remote_name) { remote } else { debug!("==== create remote (remote_name:{}, remote_url:{})", &remote_name, &fetch_url); self.repo.remote(&remote_name, &fetch_url).expect("can't create remote") }; let rs = remote.get_refspec(0).unwrap().str().unwrap().to_string(); if let Ok(_) = remote.fetch(&[&rs], None, None) { return self.repo .revparse_single(&format!("remotes/{}/{}", module, branch)) .ok(); } else { return None; } } // force push of the new revision-object to temp repo pub fn transfer(&self, rev: &str, source: &Path) -> Object { // TODO: implement using libgit let target = &self.repo.path(); let shell = Shell { cwd: source.to_path_buf() }; shell.command(&format!("git update-ref {} {}", TMP_NAME, rev)); shell.command(&format!("git push --force {} {}", &target.to_string_lossy(), TMP_NAME)); let obj = self.repo.revparse_single(rev).expect("can't find transfered ref"); return obj; } // takes everything from base except it's tree and replaces it with the tree // given pub fn rewrite(&self, base: &Commit, parents: &[&Commit], tree: &Tree) -> Oid { if parents.len() == 0 { ::std::fs::remove_file(self.repo.path().join("HEAD")).expect("can't remove HEAD"); } else { self.repo.set_head_detached(parents[0].id()).expect("rewrite: can't detach head"); } self.repo .commit(Some("HEAD"), &base.author(), &base.committer(), &base.message().unwrap_or("no message"), tree, parents) .expect("rewrite: can't commit") } pub fn push(&self, oid: Oid, module: &str, target: &str) -> String { let commit = &self.repo.find_commit(oid).expect("can't find commit"); self.repo.set_head_detached(commit.id()).expect("can't detach HEAD"); let cmd = format!("git push {} HEAD:{}", self.host.remote_url(module), target); let shell = Shell { cwd: self.repo.path().to_path_buf() }; let output = shell.command(&cmd); debug!("push: {}\n{}", cmd, output); format!("{}", output) } fn subtree(&self, tree: &Tree, path: &Path) -> Option<Tree> { if let Some(oid) = tree.get_path(path).map(|x| x.id()).ok() { return self.repo.find_tree(oid).ok(); } else { return None; } } fn replace_child(&self, child: &Path, subtree: Oid, full_tree: &Tree) -> Tree { let full_tree_id = { let mut builder = self.repo .treebuilder(Some(&full_tree)) .expect("replace_child: can't create treebuilder"); builder.insert(child, subtree, 0o0040000) // GIT_FILEMODE_TREE .expect("replace_child: can't insert tree"); builder.write().expect("replace_child: can't write tree") }; return self.repo.find_tree(full_tree_id).expect("replace_child: can't find new tree"); } pub fn replace_subtree(&self, path: &Path, subtree: Oid, full_tree: &Tree) -> Tree { if path.components().count() == 1 { return self.replace_child(path, subtree, full_tree); } else { let name = Path::new(path.file_name().expect("no module name")); let path = path.parent().expect("module not in subdir"); let st = self.subtree(&full_tree, path).unwrap(); let tree = self.replace_child(name, subtree, &st); return self.replace_subtree(path, tree.id(), full_tree); } } pub fn split_subdir(&self, module: &str, newrev: Oid) -> Option<Oid> { let walk = { let mut walk = self.repo.revwalk().expect("walk: can't create revwalk"); walk.set_sorting(SORT_REVERSE | SORT_TOPOLOGICAL); walk.push(newrev).expect("walk.push"); walk }; let mut map = HashMap::<Oid, Oid>::new(); 'walk: for commit in walk { let commit = self.repo.find_commit(commit.unwrap()).unwrap(); let tree = commit.tree().expect("commit has no tree"); let new_tree = if let Ok(tree_entry) = tree.get_path(&Path::new(&module)) { self.repo.find_tree(tree_entry.id()).expect("central_submit: can't find tree") } else { continue 'walk; }; match commit.parents().count() { 2 => { let parent1 = commit.parents().nth(0).unwrap().id(); let parent2 = commit.parents().nth(1).unwrap().id(); if let (Some(&parent1), Some(&parent2)) = (map.get(&parent1), map.get(&parent2)) { let parent1 = self.repo.find_commit(parent1).unwrap(); let parent2 = self.repo.find_commit(parent2).unwrap(); map.insert(commit.id(), self.rewrite(&commit, &[&parent1, &parent2], &new_tree)); continue 'walk; } if let (Some(&parent), None) = (map.get(&parent1), map.get(&parent2)) { let parent = self.repo.find_commit(parent).unwrap(); if new_tree.id() == parent.tree().unwrap().id() { map.insert(commit.id(), parent.id()); continue 'walk; } map.insert(commit.id(), self.rewrite(&commit, &[&parent], &new_tree)); continue 'walk; } if let (None, Some(&parent)) = (map.get(&parent1), map.get(&parent2)) { let parent = self.repo.find_commit(parent).unwrap(); if new_tree.id() == parent.tree().unwrap().id() { map.insert(commit.id(), parent.id()); continue 'walk; } map.insert(commit.id(), self.rewrite(&commit, &[&parent], &new_tree)); continue 'walk; } } 1 => { let parent = commit.parents().nth(0).unwrap().id(); if let Some(&parent) = map.get(&parent) { let parent = self.repo.find_commit(parent).unwrap(); if new_tree.id() == parent.tree().unwrap().id() { map.insert(commit.id(), parent.id()); continue 'walk; } map.insert(commit.id(), self.rewrite(&commit, &[&parent], &new_tree)); continue 'walk; } } _ => {} } map.insert(commit.id(), self.rewrite(&commit, &[], &new_tree)); } return map.get(&newrev).map(|&id| id); } pub fn find_all_subdirs(&self, tree: &Tree) -> Vec<String> { let mut sd = vec![]; for item in tree { if let Ok(st) = self.repo.find_tree(item.id()) { let name = item.name().unwrap(); if !name.starts_with(".") { sd.push(name.to_string()); for r in self.find_all_subdirs(&st) { sd.push(format!("{}/{}", name, r)); } } } } return sd; } pub fn join(&self, dst: Oid, path: &Path, src: Oid) -> Oid { let dst = self.repo.find_commit(dst).unwrap(); let src = self.repo.find_commit(src).unwrap(); let signature = Signature::new("CentralGit", "cg@cg.com", &dst.committer().when()).unwrap(); let walk = { let mut walk = self.repo.revwalk().expect("walk: can't create revwalk"); walk.set_sorting(SORT_REVERSE | SORT_TOPOLOGICAL); walk.push(src.id()).expect("walk.push"); walk }; let empty = self.repo.find_tree(self.repo.treebuilder(None).unwrap().write().unwrap()).unwrap(); let mut map = HashMap::<Oid, Oid>::new(); 'walk: for commit in walk { let commit = self.repo.find_commit(commit.unwrap()).unwrap(); let tree = commit.tree().expect("commit has no tree"); let new_tree = self.replace_subtree(path, tree.id(), &empty); match commit.parents().count() { 2 => { let parent1 = commit.parents().nth(0).unwrap().id(); let parent2 = commit.parents().nth(1).unwrap().id(); if let (Some(&parent1), Some(&parent2)) = (map.get(&parent1), map.get(&parent2)) { let parent1 = self.repo.find_commit(parent1).unwrap(); let parent2 = self.repo.find_commit(parent2).unwrap(); let new = new_tree.id(); let p1 = parent1.tree().unwrap().id(); let p2 = parent2.tree().unwrap().id(); map.insert(commit.id(), self.rewrite(&commit, &[&parent1, &parent2], &new_tree)); continue 'walk; } } 1 => { let parent = commit.parents().nth(0).unwrap().id(); let parent = *map.get(&parent).unwrap(); let parent = self.repo.find_commit(parent).unwrap(); map.insert(commit.id(), self.rewrite(&commit, &[&parent], &new_tree)); continue 'walk; } _ => {} } map.insert(commit.id(), self.rewrite(&commit, &[], &new_tree)); } let final_tree = self.replace_subtree(path, src.tree().unwrap().id(), &dst.tree().unwrap()); let parents = [&dst, &self.repo.find_commit(map[&src.id()]).unwrap()]; self.repo.set_head_detached(parents[0].id()).expect("join: can't detach head"); let join_commit = self.repo .commit(Some("HEAD"), &signature, &signature, "repo_join", &final_tree, &parents) .unwrap(); return join_commit; } } cleanup extern crate git2; const TMP_NAME: &'static str = "refs/centralgit/tmp_fd2db5f8_bac2_4a1e_9487_4ac3414788aa"; use git2::*; use std::process::Command; use std::path::Path; use shell::Shell; use super::RepoHost; use std::collections::HashMap; pub struct Scratch<'a> { pub repo: Repository, pub host: &'a RepoHost, } enum CommitKind { Normal(Oid), Merge(Oid, Oid), Orphan, } impl<'a> Scratch<'a> { pub fn new(path: &Path, host: &'a RepoHost) -> Scratch<'a> { Scratch { repo: Repository::init_bare(&path).expect("could not init scratch"), host: host, } } pub fn tracking(&self, module: &str, branch: &str) -> Option<Object> { let remote_name = format!("{}", module); let fetch_url = self.host.local_path(&module); let mut remote = if let Ok(remote) = self.repo.find_remote(&remote_name) { remote } else { debug!("==== create remote (remote_name:{}, remote_url:{})", &remote_name, &fetch_url); self.repo.remote(&remote_name, &fetch_url).expect("can't create remote") }; let rs = remote.get_refspec(0).unwrap().str().unwrap().to_string(); if let Ok(_) = remote.fetch(&[&rs], None, None) { return self.repo .revparse_single(&format!("remotes/{}/{}", module, branch)) .ok(); } else { return None; } } // force push of the new revision-object to temp repo pub fn transfer(&self, rev: &str, source: &Path) -> Object { // TODO: implement using libgit let target = &self.repo.path(); let shell = Shell { cwd: source.to_path_buf() }; shell.command(&format!("git update-ref {} {}", TMP_NAME, rev)); shell.command(&format!("git push --force {} {}", &target.to_string_lossy(), TMP_NAME)); let obj = self.repo.revparse_single(rev).expect("can't find transfered ref"); return obj; } // takes everything from base except it's tree and replaces it with the tree // given pub fn rewrite(&self, base: &Commit, parents: &[&Commit], tree: &Tree) -> Oid { if parents.len() == 0 { ::std::fs::remove_file(self.repo.path().join("HEAD")).expect("can't remove HEAD"); } else { self.repo.set_head_detached(parents[0].id()).expect("rewrite: can't detach head"); } self.repo .commit(Some("HEAD"), &base.author(), &base.committer(), &base.message().unwrap_or("no message"), tree, parents) .expect("rewrite: can't commit") } pub fn push(&self, oid: Oid, module: &str, target: &str) -> String { let commit = &self.repo.find_commit(oid).expect("can't find commit"); self.repo.set_head_detached(commit.id()).expect("can't detach HEAD"); let cmd = format!("git push {} HEAD:{}", self.host.remote_url(module), target); let shell = Shell { cwd: self.repo.path().to_path_buf() }; let output = shell.command(&cmd); debug!("push: {}\n{}", cmd, output); format!("{}", output) } fn subtree(&self, tree: &Tree, path: &Path) -> Option<Tree> { if let Some(oid) = tree.get_path(path).map(|x| x.id()).ok() { return self.repo.find_tree(oid).ok(); } else { return None; } } fn replace_child(&self, child: &Path, subtree: Oid, full_tree: &Tree) -> Tree { let full_tree_id = { let mut builder = self.repo .treebuilder(Some(&full_tree)) .expect("replace_child: can't create treebuilder"); builder.insert(child, subtree, 0o0040000) // GIT_FILEMODE_TREE .expect("replace_child: can't insert tree"); builder.write().expect("replace_child: can't write tree") }; return self.repo.find_tree(full_tree_id).expect("replace_child: can't find new tree"); } pub fn replace_subtree(&self, path: &Path, subtree: Oid, full_tree: &Tree) -> Tree { if path.components().count() == 1 { return self.replace_child(path, subtree, full_tree); } else { let name = Path::new(path.file_name().expect("no module name")); let path = path.parent().expect("module not in subdir"); let st = self.subtree(&full_tree, path).unwrap(); let tree = self.replace_child(name, subtree, &st); return self.replace_subtree(path, tree.id(), full_tree); } } pub fn split_subdir(&self, module: &str, newrev: Oid) -> Option<Oid> { let walk = { let mut walk = self.repo.revwalk().expect("walk: can't create revwalk"); walk.set_sorting(SORT_REVERSE | SORT_TOPOLOGICAL); walk.push(newrev).expect("walk.push"); walk }; let mut map = HashMap::<Oid, Oid>::new(); 'walk: for commit in walk { let commit = self.repo.find_commit(commit.unwrap()).unwrap(); let tree = commit.tree().expect("commit has no tree"); let new_tree = if let Ok(tree_entry) = tree.get_path(&Path::new(&module)) { self.repo.find_tree(tree_entry.id()).expect("central_submit: can't find tree") } else { continue 'walk; }; match match commit.parents().count() { 2 => { let parent1 = commit.parents().nth(0).unwrap().id(); let parent2 = commit.parents().nth(1).unwrap().id(); match (map.get(&parent1), map.get(&parent2)) { (Some(&parent1), Some(&parent2)) => CommitKind::Merge(parent1, parent2), (Some(&parent), None) => CommitKind::Normal(parent), (None, Some(&parent)) => CommitKind::Normal(parent), _ => CommitKind::Orphan, } } 1 => { let parent = commit.parents().nth(0).unwrap().id(); match map.get(&parent) { Some(&parent) => CommitKind::Normal(parent), _ => CommitKind::Orphan, } } _ => CommitKind::Orphan, } { CommitKind::Merge(parent1, parent2) => { let parent1 = self.repo.find_commit(parent1).unwrap(); let parent2 = self.repo.find_commit(parent2).unwrap(); map.insert(commit.id(), self.rewrite(&commit, &[&parent1, &parent2], &new_tree)); continue 'walk; } CommitKind::Normal(parent) => { let parent = self.repo.find_commit(parent).unwrap(); if new_tree.id() == parent.tree().unwrap().id() { map.insert(commit.id(), parent.id()); continue 'walk; } map.insert(commit.id(), self.rewrite(&commit, &[&parent], &new_tree)); continue 'walk; } CommitKind::Orphan => { map.insert(commit.id(), self.rewrite(&commit, &[], &new_tree)); } } } return map.get(&newrev).map(|&id| id); } pub fn find_all_subdirs(&self, tree: &Tree) -> Vec<String> { let mut sd = vec![]; for item in tree { if let Ok(st) = self.repo.find_tree(item.id()) { let name = item.name().unwrap(); if !name.starts_with(".") { sd.push(name.to_string()); for r in self.find_all_subdirs(&st) { sd.push(format!("{}/{}", name, r)); } } } } return sd; } pub fn join(&self, dst: Oid, path: &Path, src: Oid) -> Oid { let dst = self.repo.find_commit(dst).unwrap(); let src = self.repo.find_commit(src).unwrap(); let signature = Signature::new("CentralGit", "cg@cg.com", &dst.committer().when()).unwrap(); let walk = { let mut walk = self.repo.revwalk().expect("walk: can't create revwalk"); walk.set_sorting(SORT_REVERSE | SORT_TOPOLOGICAL); walk.push(src.id()).expect("walk.push"); walk }; let empty = self.repo.find_tree(self.repo.treebuilder(None).unwrap().write().unwrap()).unwrap(); let mut map = HashMap::<Oid, Oid>::new(); 'walk: for commit in walk { let commit = self.repo.find_commit(commit.unwrap()).unwrap(); let tree = commit.tree().expect("commit has no tree"); let new_tree = self.replace_subtree(path, tree.id(), &empty); match commit.parents().count() { 2 => { let parent1 = commit.parents().nth(0).unwrap().id(); let parent2 = commit.parents().nth(1).unwrap().id(); if let (Some(&parent1), Some(&parent2)) = (map.get(&parent1), map.get(&parent2)) { let parent1 = self.repo.find_commit(parent1).unwrap(); let parent2 = self.repo.find_commit(parent2).unwrap(); let new = new_tree.id(); let p1 = parent1.tree().unwrap().id(); let p2 = parent2.tree().unwrap().id(); map.insert(commit.id(), self.rewrite(&commit, &[&parent1, &parent2], &new_tree)); continue 'walk; } } 1 => { let parent = commit.parents().nth(0).unwrap().id(); let parent = *map.get(&parent).unwrap(); let parent = self.repo.find_commit(parent).unwrap(); map.insert(commit.id(), self.rewrite(&commit, &[&parent], &new_tree)); continue 'walk; } _ => {} } map.insert(commit.id(), self.rewrite(&commit, &[], &new_tree)); } let final_tree = self.replace_subtree(path, src.tree().unwrap().id(), &dst.tree().unwrap()); let parents = [&dst, &self.repo.find_commit(map[&src.id()]).unwrap()]; self.repo.set_head_detached(parents[0].id()).expect("join: can't detach head"); let join_commit = self.repo .commit(Some("HEAD"), &signature, &signature, "repo_join", &final_tree, &parents) .unwrap(); return join_commit; } }
use std::cmp::max; use std::collections::HashMap; use dora_parser::ast::visit::*; use dora_parser::ast::Expr::*; use dora_parser::ast::Stmt::*; use dora_parser::ast::*; use crate::cpu::*; use crate::mem; use crate::semck::specialize::{specialize_for_call_type, specialize_type}; use crate::ty::{BuiltinType, TypeList, TypeParamId}; use crate::vm::{ Arg, CallSite, CallType, Fct, FctId, FctKind, FctParent, FctSrc, Intrinsic, NodeMap, Store, TraitId, VarId, VM, }; pub fn generate<'a, 'ast: 'a>( vm: &'a VM<'ast>, fct: &Fct<'ast>, src: &'a FctSrc, jit_info: &'a mut JitInfo<'ast>, cls_type_params: &TypeList, fct_type_params: &TypeList, ) { let start = if fct.has_self() { 1 } else { 0 }; if let FctParent::Class(cls_id) = fct.parent { let cls = vm.classes.idx(cls_id); let cls = cls.read(); assert_eq!(cls_type_params.len(), cls.type_params.len()); } else { assert_eq!(cls_type_params.len(), 0); } assert_eq!(fct.type_params.len(), fct_type_params.len()); for ty in cls_type_params.iter() { assert!(ty.is_concrete_type(vm)); } for ty in fct_type_params.iter() { assert!(ty.is_concrete_type(vm)); } let mut ig = InfoGenerator { vm, fct, ast: fct.ast, src, jit_info, stacksize: 0, param_offset: PARAM_OFFSET, leaf: true, eh_return_value: None, eh_status: None, param_reg_idx: start, param_freg_idx: 0, cls_type_params, fct_type_params, }; ig.generate(); } pub struct JitInfo<'ast> { pub stacksize: i32, // size of local variables on stack pub leaf: bool, // false if fct calls other functions pub eh_return_value: Option<i32>, // stack slot for return value storage pub map_stores: NodeMap<Store>, pub map_csites: NodeMap<CallSite<'ast>>, pub map_offsets: NodeMap<i32>, pub map_var_offsets: HashMap<VarId, i32>, pub map_fors: NodeMap<ForInfo<'ast>>, pub map_templates: NodeMap<TemplateJitInfo<'ast>>, } impl<'ast> JitInfo<'ast> { pub fn get_store(&self, id: NodeId) -> Store { match self.map_stores.get(id) { Some(store) => *store, None => Store::Reg, } } pub fn stacksize(&self) -> i32 { self.stacksize } pub fn offset(&self, var_id: VarId) -> i32 { *self .map_var_offsets .get(&var_id) .expect("no offset found for var") } pub fn new() -> JitInfo<'ast> { JitInfo { stacksize: 0, leaf: false, eh_return_value: None, map_stores: NodeMap::new(), map_csites: NodeMap::new(), map_offsets: NodeMap::new(), map_var_offsets: HashMap::new(), map_fors: NodeMap::new(), map_templates: NodeMap::new(), } } } struct InfoGenerator<'a, 'ast: 'a> { vm: &'a VM<'ast>, fct: &'a Fct<'ast>, src: &'a FctSrc, ast: &'ast Function, jit_info: &'a mut JitInfo<'ast>, stacksize: i32, eh_return_value: Option<i32>, eh_status: Option<i32>, param_offset: i32, leaf: bool, param_reg_idx: usize, param_freg_idx: usize, cls_type_params: &'a TypeList, fct_type_params: &'a TypeList, } impl<'a, 'ast> Visitor<'ast> for InfoGenerator<'a, 'ast> { fn visit_param(&mut self, p: &'ast Param) { let var = *self.src.map_vars.get(p.id).unwrap(); let ty = self.src.vars[var].ty; let ty = self.specialize_type(ty); let is_float = ty.is_float(); // only some parameters are passed in registers // these registers need to be stored into local variables if is_float && self.param_freg_idx < FREG_PARAMS.len() { self.param_freg_idx += 1; } else if !is_float && self.param_reg_idx < REG_PARAMS.len() { self.param_reg_idx += 1; // the rest of the parameters are already stored on the stack // just use the current offset } else { let var = &self.src.vars[var]; self.jit_info .map_var_offsets .insert(var.id, self.param_offset); // determine next `param_offset` self.param_offset = next_param_offset(self.param_offset, var.ty); } } fn visit_stmt(&mut self, s: &'ast Stmt) { match s { &StmtDo(ref r#try) => { self.reserve_stmt_do(r#try); } &StmtFor(ref sfor) => { self.reserve_stmt_for(sfor); } _ => {} } visit::walk_stmt(self, s); } fn visit_expr(&mut self, e: &'ast Expr) { match *e { ExprCall(ref expr) => self.expr_call(expr), ExprDelegation(ref expr) => self.expr_delegation(expr), ExprBin(ref expr) => self.expr_bin(expr), ExprUn(ref expr) => self.expr_un(expr), ExprTypeParam(_) => unreachable!(), ExprTemplate(ref expr) => self.expr_template(expr), _ => visit::walk_expr(self, e), } } } impl<'a, 'ast> InfoGenerator<'a, 'ast> { fn generate(&mut self) { self.visit_fct(self.ast); self.jit_info.stacksize = mem::align_i32(self.stacksize, STACK_FRAME_ALIGNMENT as i32); self.jit_info.leaf = self.leaf; self.jit_info.eh_return_value = self.eh_return_value; } fn reserve_stmt_do(&mut self, r#try: &'ast StmtDoType) { let ret = self.specialize_type(self.fct.return_type); if !ret.is_unit() { self.eh_return_value = Some( self.eh_return_value .unwrap_or_else(|| self.reserve_stack_slot(ret)), ); } if r#try.finally_block.is_some() { let offset = self.reserve_stack_slot(BuiltinType::Ptr); self.jit_info.map_offsets.insert(r#try.id, offset); } } fn reserve_stmt_for(&mut self, stmt: &'ast StmtForType) { let for_type_info = self.src.map_fors.get(stmt.id).unwrap(); // reserve stack slot for iterator let offset = self.reserve_stack_slot(for_type_info.iterator_type); self.jit_info.map_offsets.insert(stmt.id, offset); // build makeIterator() call let object_type = self.ty(stmt.expr.id()); let ctype = CallType::Method(object_type, for_type_info.make_iterator, TypeList::empty()); let args = vec![Arg::Expr(&stmt.expr, BuiltinType::Unit, 0)]; let make_iterator = self.build_call_site(&ctype, for_type_info.make_iterator, args); // build hasNext() call let ctype = CallType::Method( for_type_info.iterator_type, for_type_info.has_next, TypeList::empty(), ); let args = vec![Arg::Stack(offset, BuiltinType::Unit, 0)]; let has_next = self.build_call_site(&ctype, for_type_info.has_next, args); // build next() call let ctype = CallType::Method( for_type_info.iterator_type, for_type_info.next, TypeList::empty(), ); let args = vec![Arg::Stack(offset, BuiltinType::Unit, 0)]; let next = self.build_call_site(&ctype, for_type_info.next, args); self.jit_info.map_fors.insert( stmt.id, ForInfo { make_iterator, has_next, next, }, ); } fn get_intrinsic(&self, id: NodeId) -> Option<Intrinsic> { let call_type = self.src.map_calls.get(id).unwrap(); if let Some(intrinsic) = call_type.to_intrinsic() { return Some(intrinsic); } let fid = call_type.fct_id().unwrap(); // the function we compile right now is never an intrinsic if self.fct.id == fid { return None; } let fct = self.vm.fcts.idx(fid); let fct = fct.read(); match fct.kind { FctKind::Builtin(intr) => Some(intr), _ => None, } } fn expr_call(&mut self, expr: &'ast ExprCallType) { if let Some(intrinsic) = self.get_intrinsic(expr.id) { self.reserve_args_call(expr); match intrinsic { Intrinsic::Assert => { let offset = self.reserve_stack_slot(BuiltinType::Ptr); let cls_id = self.vm.vips.error_class; let cls = self.vm.classes.idx(cls_id); let cls = cls.read(); let selfie_offset = self.reserve_stack_slot(cls.ty); let args = vec![ Arg::SelfieNew(cls.ty, selfie_offset), Arg::Stack(offset, BuiltinType::Ptr, 0), ]; self.universal_call(expr.id, args, cls.constructor); } _ => {} }; return; } let call_type = self.src.map_calls.get(expr.id).unwrap().clone(); let mut args = expr .args .iter() .map(|arg| Arg::Expr(arg, BuiltinType::Unit, 0)) .collect::<Vec<_>>(); let callee_id = match *call_type { CallType::Ctor(_, fid, _) | CallType::CtorNew(_, fid, _) => { let ty = self.ty(expr.id); let arg = if call_type.is_ctor() { Arg::Selfie(ty, 0) } else { Arg::SelfieNew(ty, 0) }; args.insert(0, arg); fid } CallType::Method(_, fct_id, _) => { let object = expr.object().unwrap(); args.insert(0, Arg::Expr(object, BuiltinType::Unit, 0)); let fct = self.vm.fcts.idx(fct_id); let fct = fct.read(); if fct.parent.is_trait() { // This happens for calls like (T: SomeTrait).method() // Find the exact method that is called let trait_id = fct.trait_id(); let object_type = match *call_type { CallType::Method(ty, _, _) => ty, _ => unreachable!(), }; let object_type = self.specialize_type(object_type); self.find_trait_impl(fct_id, trait_id, object_type) } else { fct_id } } CallType::Fct(fid, _, _) => fid, CallType::Expr(_, fid) => { let object = &expr.callee; let ty = self.ty(object.id()); args.insert(0, Arg::Expr(object, ty, 0)); fid } CallType::TraitStatic(tp_id, trait_id, trait_fct_id) => { let list_id = match tp_id { TypeParamId::Fct(list_id) => list_id, TypeParamId::Class(_) => unimplemented!(), }; let ty = self.fct_type_params[list_id.idx()]; let cls_id = ty.cls_id(self.vm).expect("no cls_id for type"); let cls = self.vm.classes.idx(cls_id); let cls = cls.read(); let mut impl_fct_id: Option<FctId> = None; for &impl_id in &cls.impls { let ximpl = self.vm.impls[impl_id].read(); if ximpl.trait_id != Some(trait_id) { continue; } for &fid in &ximpl.methods { let method = self.vm.fcts.idx(fid); let method = method.read(); if method.impl_for == Some(trait_fct_id) { impl_fct_id = Some(fid); break; } } } impl_fct_id.expect("no impl_fct_id found") } CallType::Trait(_, _) => unimplemented!(), CallType::Intrinsic(_) => unreachable!(), }; let callee = self.vm.fcts.idx(callee_id); let callee = callee.read(); if let FctKind::Builtin(_) = callee.kind { self.reserve_args_call(expr); return; } self.universal_call(expr.id, args, Some(callee_id)); } fn reserve_args_call(&mut self, expr: &'ast ExprCallType) { for arg in &expr.args { self.visit_expr(arg); self.reserve_temp_for_node(arg); } let call_type = self.src.map_calls.get(expr.id).unwrap(); if call_type.is_method() { let object = expr.object().unwrap(); self.visit_expr(object); self.reserve_temp_for_node(object); } else if call_type.is_expr() { self.visit_expr(&expr.callee); self.reserve_temp_for_node(&expr.callee); } } fn find_trait_impl(&self, fct_id: FctId, trait_id: TraitId, object_type: BuiltinType) -> FctId { let cls_id = object_type.cls_id(self.vm).unwrap(); let cls = self.vm.classes.idx(cls_id); let cls = cls.read(); for &impl_id in &cls.impls { let ximpl = self.vm.impls[impl_id].read(); if ximpl.trait_id() != trait_id { continue; } for &mtd_id in &ximpl.methods { let mtd = self.vm.fcts.idx(mtd_id); let mtd = mtd.read(); if mtd.impl_for == Some(fct_id) { return mtd_id; } } } panic!("no impl found for generic trait call") } fn expr_delegation(&mut self, expr: &'ast ExprDelegationType) { let mut args = expr .args .iter() .map(|arg| Arg::Expr(arg, BuiltinType::Unit, 0)) .collect::<Vec<_>>(); let cls = self.ty(expr.id); args.insert(0, Arg::Selfie(cls, 0)); self.universal_call(expr.id, args, None); } fn universal_call(&mut self, id: NodeId, args: Vec<Arg<'ast>>, callee_id: Option<FctId>) { let call_type = self.src.map_calls.get(id).unwrap().clone(); let callee_id = if let Some(callee_id) = callee_id { callee_id } else { call_type.fct_id().unwrap() }; let csite = self.build_call_site(&*call_type, callee_id, args); // remember args self.jit_info.map_csites.insert_or_replace(id, csite); } fn build_call_site( &mut self, call_type: &CallType, callee_id: FctId, args: Vec<Arg<'ast>>, ) -> CallSite<'ast> { // function invokes another function self.leaf = false; let callee = self.vm.fcts.idx(callee_id); let callee = callee.read(); let (args, return_type, super_call) = self.determine_call_args_and_types(&*call_type, &*callee, args); let (cls_type_params, fct_type_params) = self.determine_call_type_params(&*call_type); let argsize = self.determine_call_stack(&args); CallSite { callee: callee_id, args, argsize, cls_type_params, fct_type_params, super_call, return_type, } } fn determine_call_args_and_types( &mut self, call_type: &CallType, callee: &Fct<'ast>, args: Vec<Arg<'ast>>, ) -> (Vec<Arg<'ast>>, BuiltinType, bool) { let mut super_call = false; assert!(callee.params_with_self().len() == args.len()); let args = args .iter() .enumerate() .map(|(ind, arg)| { let ty = callee.params_with_self()[ind]; let ty = self.specialize_type(specialize_for_call_type(call_type, ty, self.vm)); let offset = self.reserve_stack_slot(ty); match *arg { Arg::Expr(ast, _, _) => { if ind == 0 && ast.is_super() { super_call = true; } Arg::Expr(ast, ty, offset) } Arg::Stack(soffset, _, _) => Arg::Stack(soffset, ty, offset), Arg::SelfieNew(cid, _) => Arg::SelfieNew(cid, offset), Arg::Selfie(cid, _) => Arg::Selfie(cid, offset), } }) .collect::<Vec<_>>(); let return_type = self.specialize_type(specialize_for_call_type( call_type, callee.return_type, self.vm, )); (args, return_type, super_call) } fn determine_call_type_params(&mut self, call_type: &CallType) -> (TypeList, TypeList) { let cls_type_params; let fct_type_params; match *call_type { CallType::Ctor(_, _, ref type_params) | CallType::CtorNew(_, _, ref type_params) => { cls_type_params = type_params.clone(); fct_type_params = TypeList::empty(); } CallType::Method(ty, _, ref type_params) => { let ty = self.specialize_type(ty); cls_type_params = ty.type_params(self.vm); fct_type_params = type_params.clone(); } CallType::Fct(_, ref cls_tps, ref fct_tps) => { cls_type_params = cls_tps.clone(); fct_type_params = fct_tps.clone(); } CallType::Expr(ty, _) => { let ty = self.specialize_type(ty); cls_type_params = ty.type_params(self.vm); fct_type_params = TypeList::empty(); } CallType::Trait(_, _) => unimplemented!(), CallType::TraitStatic(_, _, _) => { cls_type_params = TypeList::empty(); fct_type_params = TypeList::empty(); } CallType::Intrinsic(_) => unreachable!(), } (cls_type_params, fct_type_params) } fn determine_call_stack(&mut self, args: &[Arg<'ast>]) -> i32 { let mut reg_args: i32 = 0; let mut freg_args: i32 = 0; for arg in args { match *arg { Arg::Expr(ast, ty, _) => { self.visit_expr(ast); if ty.is_float() { freg_args += 1; } else { reg_args += 1; } } Arg::Stack(_, ty, _) | Arg::Selfie(ty, _) | Arg::SelfieNew(ty, _) => { if ty.is_float() { freg_args += 1; } else { reg_args += 1; } } } } // some register are reserved on stack let args_on_stack = max(0, reg_args - REG_PARAMS.len() as i32) + max(0, freg_args - FREG_PARAMS.len() as i32); mem::align_i32(mem::ptr_width() * args_on_stack, 16) } fn expr_assign(&mut self, e: &'ast ExprBinType) { let call_type = self.src.map_calls.get(e.id); if call_type.is_some() { let call_expr = e.lhs.to_call().unwrap(); let object = &call_expr.callee; let index = &call_expr.args[0]; let value = &e.rhs; if let Some(_) = self.get_intrinsic(e.id) { self.visit_expr(object); self.visit_expr(index); self.visit_expr(value); self.reserve_temp_for_node(object); self.reserve_temp_for_node(index); let element_type = self.ty(object.id()).type_params(self.vm)[0]; self.reserve_temp_for_node_with_type(e.rhs.id(), element_type); } else { let args = vec![ Arg::Expr(object, BuiltinType::Unit, 0), Arg::Expr(index, BuiltinType::Unit, 0), Arg::Expr(value, BuiltinType::Unit, 0), ]; self.universal_call(e.id, args, None); } } else if e.lhs.is_ident() { self.visit_expr(&e.rhs); let lhs = e.lhs.to_ident().unwrap(); let field = self.src.map_idents.get(lhs.id).unwrap().is_field(); if field { self.reserve_temp_for_node_with_type(lhs.id, BuiltinType::Ptr); } } else { // e.lhs is a field let lhs = e.lhs.to_dot().unwrap(); self.visit_expr(&lhs.lhs); self.visit_expr(&e.rhs); self.reserve_temp_for_node(&lhs.lhs); self.reserve_temp_for_node(&e.rhs); } } fn expr_bin(&mut self, expr: &'ast ExprBinType) { if expr.op.is_any_assign() { self.expr_assign(expr); return; } let lhs_ty = self.ty(expr.lhs.id()); let rhs_ty = self.ty(expr.rhs.id()); if expr.op == BinOp::Cmp(CmpOp::Is) || expr.op == BinOp::Cmp(CmpOp::IsNot) { self.visit_expr(&expr.lhs); self.visit_expr(&expr.rhs); self.reserve_temp_for_node_with_type(expr.lhs.id(), BuiltinType::Ptr); } else if expr.op == BinOp::Or || expr.op == BinOp::And { self.visit_expr(&expr.lhs); self.visit_expr(&expr.rhs); // no temporaries needed } else if let Some(_) = self.get_intrinsic(expr.id) { self.visit_expr(&expr.lhs); self.visit_expr(&expr.rhs); self.reserve_temp_for_node(&expr.lhs); } else { let args = vec![ Arg::Expr(&expr.lhs, lhs_ty, 0), Arg::Expr(&expr.rhs, rhs_ty, 0), ]; let fid = self.src.map_calls.get(expr.id).unwrap().fct_id().unwrap(); self.universal_call(expr.id, args, Some(fid)); } } fn expr_un(&mut self, expr: &'ast ExprUnType) { if let Some(_) = self.get_intrinsic(expr.id) { // no temporaries needed self.visit_expr(&expr.opnd); } else { let args = vec![Arg::Expr(&expr.opnd, BuiltinType::Unit, 0)]; self.universal_call(expr.id, args, None); } } fn expr_template(&mut self, expr: &'ast ExprTemplateType) { let string_buffer_offset = self.reserve_stack_slot(BuiltinType::Ptr); let string_part_offset = self.reserve_stack_slot(BuiltinType::Ptr); // build StringBuffer::empty() call let fct_id = self.vm.vips.fct.string_buffer_empty; let ctype = CallType::Fct(fct_id, TypeList::empty(), TypeList::empty()); let string_buffer_new = self.build_call_site(&ctype, fct_id, Vec::new()); let mut part_infos = Vec::new(); for part in &expr.parts { let mut object_offset = None; let mut to_string = None; if !part.is_lit_str() { self.visit_expr(part); let ty = self.ty(part.id()); if ty.cls_id(self.vm) != Some(self.vm.vips.string_class) { // build toString() call let offset = self.reserve_stack_slot(ty); object_offset = Some(offset); let cls_id = ty.cls_id(self.vm).expect("no cls_id found for type"); let cls = self.vm.classes.idx(cls_id); let cls = cls.read(); let name = self.vm.interner.intern("toString"); let to_string_id = cls .find_trait_method(self.vm, self.vm.vips.stringable_trait, name, false) .expect("toString() method not found"); let ctype = CallType::Method(ty, to_string_id, TypeList::empty()); let args = vec![Arg::Stack(offset, ty, 0)]; to_string = Some(self.build_call_site(&ctype, to_string_id, args)); } } // build StringBuffer::append() call let fct_id = self.vm.vips.fct.string_buffer_append; let ty = BuiltinType::from_cls(self.vm.vips.cls.string_buffer, self.vm); let ctype = CallType::Method(ty, fct_id, TypeList::empty()); let args = vec![ Arg::Stack(string_buffer_offset, BuiltinType::Ptr, 0), Arg::Stack(string_part_offset, BuiltinType::Ptr, 0), ]; let append = self.build_call_site(&ctype, fct_id, args); part_infos.push(TemplatePartJitInfo { object_offset, to_string, append, }); } // build StringBuffer::toString() call let fct_id = self.vm.vips.fct.string_buffer_to_string; let ty = BuiltinType::from_cls(self.vm.vips.cls.string_buffer, self.vm); let ctype = CallType::Method(ty, fct_id, TypeList::empty()); let args = vec![Arg::Stack(string_buffer_offset, BuiltinType::Ptr, 0)]; let string_buffer_to_string = self.build_call_site(&ctype, fct_id, args); self.jit_info.map_templates.insert( expr.id, TemplateJitInfo { string_buffer_offset, string_part_offset, string_buffer_new, part_infos, string_buffer_to_string, }, ); } fn reserve_temp_for_node_id(&mut self, id: NodeId) -> i32 { let ty = self.ty(id); self.reserve_temp_for_node_with_type(id, ty) } fn reserve_temp_for_node(&mut self, expr: &Expr) -> i32 { let ty = self.ty(expr.id()); self.reserve_temp_for_node_with_type(expr.id(), ty) } fn reserve_temp_for_ctor(&mut self, id: NodeId) -> i32 { self.reserve_temp_for_node_with_type(id, BuiltinType::Ptr) } fn reserve_temp_for_node_with_type(&mut self, id: NodeId, ty: BuiltinType) -> i32 { let offset = self.reserve_stack_slot(ty); self.jit_info .map_stores .insert_or_replace(id, Store::Temp(offset, ty)); offset } fn reserve_stack_slot(&mut self, ty: BuiltinType) -> i32 { let (ty_size, ty_align) = if ty.is_nil() { (mem::ptr_width(), mem::ptr_width()) } else { (ty.size(self.vm), ty.align(self.vm)) }; self.stacksize = mem::align_i32(self.stacksize, ty_align) + ty_size; -self.stacksize } fn ty(&self, id: NodeId) -> BuiltinType { let ty = self.src.ty(id); self.specialize_type(ty) } fn specialize_type(&self, ty: BuiltinType) -> BuiltinType { let result = specialize_type(self.vm, ty, &self.cls_type_params, &self.fct_type_params); assert!(result.is_concrete_type(self.vm)); result } } #[derive(Clone)] pub struct ForInfo<'ast> { pub make_iterator: CallSite<'ast>, pub has_next: CallSite<'ast>, pub next: CallSite<'ast>, } #[derive(Clone)] pub struct TemplateJitInfo<'ast> { pub string_buffer_offset: i32, pub string_part_offset: i32, pub string_buffer_new: CallSite<'ast>, pub part_infos: Vec<TemplatePartJitInfo<'ast>>, pub string_buffer_to_string: CallSite<'ast>, } #[derive(Clone)] pub struct TemplatePartJitInfo<'ast> { pub object_offset: Option<i32>, pub to_string: Option<CallSite<'ast>>, pub append: CallSite<'ast>, } #[cfg(test)] mod tests { use super::*; use crate::os; use crate::test; use crate::vm::*; fn info<F>(code: &'static str, f: F) where F: FnOnce(&FctSrc, &JitInfo), { os::init_page_size(); test::parse(code, |vm| { let fid = vm.fct_by_name("f").unwrap(); let fct = vm.fcts.idx(fid); let fct = fct.read(); let src = fct.src(); let mut src = src.write(); let mut jit_info = JitInfo::new(); let empty = TypeList::empty(); generate(vm, &fct, &mut src, &mut jit_info, &empty, &empty); f(&src, &jit_info); }); } #[test] fn test_tempsize() { info("fun f() { 1+2*3; }", |_, jit_info| { assert_eq!(16, jit_info.stacksize); }); info("fun f() { 2*3+4+5; }", |_, jit_info| { assert_eq!(16, jit_info.stacksize); }); info("fun f() { 1+(2+(3+4)); }", |_, jit_info| { assert_eq!(16, jit_info.stacksize); }) } #[test] fn test_tempsize_for_fct_call() { info( "fun f() { g(1,2,3,4,5,6); } fun g(a:Int, b:Int, c:Int, d:Int, e:Int, f:Int) {}", |_, jit_info| { assert_eq!(32, jit_info.stacksize); }, ); info( "fun f() { g(1,2,3,4,5,6,7,8); } fun g(a:Int, b:Int, c:Int, d:Int, e:Int, f:Int, g:Int, h:Int) {}", |_, jit_info| { assert_eq!(32, jit_info.stacksize); }, ); info( "fun f() { g(1,2,3,4,5,6,7,8)+(1+2); } fun g(a:Int, b:Int, c:Int, d:Int, e:Int, f:Int, g:Int, h:Int) -> Int { return 0; }", |_, jit_info| { assert_eq!(48, jit_info.stacksize); }, ); } #[test] fn test_invocation_flag() { info("fun f() { g(); } fun g() { }", |_, jit_info| { assert!(!jit_info.leaf); }); info("fun f() { }", |_, jit_info| { assert!(jit_info.leaf); }); } } codegen: remove map_stores in info use std::cmp::max; use std::collections::HashMap; use dora_parser::ast::visit::*; use dora_parser::ast::Expr::*; use dora_parser::ast::Stmt::*; use dora_parser::ast::*; use crate::cpu::*; use crate::mem; use crate::semck::specialize::{specialize_for_call_type, specialize_type}; use crate::ty::{BuiltinType, TypeList, TypeParamId}; use crate::vm::{ Arg, CallSite, CallType, Fct, FctId, FctKind, FctParent, FctSrc, Intrinsic, NodeMap, TraitId, VarId, VM, }; pub fn generate<'a, 'ast: 'a>( vm: &'a VM<'ast>, fct: &Fct<'ast>, src: &'a FctSrc, jit_info: &'a mut JitInfo<'ast>, cls_type_params: &TypeList, fct_type_params: &TypeList, ) { let start = if fct.has_self() { 1 } else { 0 }; if let FctParent::Class(cls_id) = fct.parent { let cls = vm.classes.idx(cls_id); let cls = cls.read(); assert_eq!(cls_type_params.len(), cls.type_params.len()); } else { assert_eq!(cls_type_params.len(), 0); } assert_eq!(fct.type_params.len(), fct_type_params.len()); for ty in cls_type_params.iter() { assert!(ty.is_concrete_type(vm)); } for ty in fct_type_params.iter() { assert!(ty.is_concrete_type(vm)); } let mut ig = InfoGenerator { vm, fct, ast: fct.ast, src, jit_info, stacksize: 0, param_offset: PARAM_OFFSET, leaf: true, eh_return_value: None, eh_status: None, param_reg_idx: start, param_freg_idx: 0, cls_type_params, fct_type_params, }; ig.generate(); } pub struct JitInfo<'ast> { pub stacksize: i32, // size of local variables on stack pub leaf: bool, // false if fct calls other functions pub eh_return_value: Option<i32>, // stack slot for return value storage pub map_csites: NodeMap<CallSite<'ast>>, pub map_offsets: NodeMap<i32>, pub map_var_offsets: HashMap<VarId, i32>, pub map_fors: NodeMap<ForInfo<'ast>>, pub map_templates: NodeMap<TemplateJitInfo<'ast>>, } impl<'ast> JitInfo<'ast> { pub fn stacksize(&self) -> i32 { self.stacksize } pub fn offset(&self, var_id: VarId) -> i32 { *self .map_var_offsets .get(&var_id) .expect("no offset found for var") } pub fn new() -> JitInfo<'ast> { JitInfo { stacksize: 0, leaf: false, eh_return_value: None, map_csites: NodeMap::new(), map_offsets: NodeMap::new(), map_var_offsets: HashMap::new(), map_fors: NodeMap::new(), map_templates: NodeMap::new(), } } } struct InfoGenerator<'a, 'ast: 'a> { vm: &'a VM<'ast>, fct: &'a Fct<'ast>, src: &'a FctSrc, ast: &'ast Function, jit_info: &'a mut JitInfo<'ast>, stacksize: i32, eh_return_value: Option<i32>, eh_status: Option<i32>, param_offset: i32, leaf: bool, param_reg_idx: usize, param_freg_idx: usize, cls_type_params: &'a TypeList, fct_type_params: &'a TypeList, } impl<'a, 'ast> Visitor<'ast> for InfoGenerator<'a, 'ast> { fn visit_param(&mut self, p: &'ast Param) { let var = *self.src.map_vars.get(p.id).unwrap(); let ty = self.src.vars[var].ty; let ty = self.specialize_type(ty); let is_float = ty.is_float(); // only some parameters are passed in registers // these registers need to be stored into local variables if is_float && self.param_freg_idx < FREG_PARAMS.len() { self.param_freg_idx += 1; } else if !is_float && self.param_reg_idx < REG_PARAMS.len() { self.param_reg_idx += 1; // the rest of the parameters are already stored on the stack // just use the current offset } else { let var = &self.src.vars[var]; self.jit_info .map_var_offsets .insert(var.id, self.param_offset); // determine next `param_offset` self.param_offset = next_param_offset(self.param_offset, var.ty); } } fn visit_stmt(&mut self, s: &'ast Stmt) { match s { &StmtDo(ref r#try) => { self.reserve_stmt_do(r#try); } &StmtFor(ref sfor) => { self.reserve_stmt_for(sfor); } _ => {} } visit::walk_stmt(self, s); } fn visit_expr(&mut self, e: &'ast Expr) { match *e { ExprCall(ref expr) => self.expr_call(expr), ExprDelegation(ref expr) => self.expr_delegation(expr), ExprBin(ref expr) => self.expr_bin(expr), ExprUn(ref expr) => self.expr_un(expr), ExprTypeParam(_) => unreachable!(), ExprTemplate(ref expr) => self.expr_template(expr), _ => visit::walk_expr(self, e), } } } impl<'a, 'ast> InfoGenerator<'a, 'ast> { fn generate(&mut self) { self.visit_fct(self.ast); self.jit_info.stacksize = mem::align_i32(self.stacksize, STACK_FRAME_ALIGNMENT as i32); self.jit_info.leaf = self.leaf; self.jit_info.eh_return_value = self.eh_return_value; } fn reserve_stmt_do(&mut self, r#try: &'ast StmtDoType) { let ret = self.specialize_type(self.fct.return_type); if !ret.is_unit() { self.eh_return_value = Some( self.eh_return_value .unwrap_or_else(|| self.reserve_stack_slot(ret)), ); } if r#try.finally_block.is_some() { let offset = self.reserve_stack_slot(BuiltinType::Ptr); self.jit_info.map_offsets.insert(r#try.id, offset); } } fn reserve_stmt_for(&mut self, stmt: &'ast StmtForType) { let for_type_info = self.src.map_fors.get(stmt.id).unwrap(); // reserve stack slot for iterator let offset = self.reserve_stack_slot(for_type_info.iterator_type); self.jit_info.map_offsets.insert(stmt.id, offset); // build makeIterator() call let object_type = self.ty(stmt.expr.id()); let ctype = CallType::Method(object_type, for_type_info.make_iterator, TypeList::empty()); let args = vec![Arg::Expr(&stmt.expr, BuiltinType::Unit, 0)]; let make_iterator = self.build_call_site(&ctype, for_type_info.make_iterator, args); // build hasNext() call let ctype = CallType::Method( for_type_info.iterator_type, for_type_info.has_next, TypeList::empty(), ); let args = vec![Arg::Stack(offset, BuiltinType::Unit, 0)]; let has_next = self.build_call_site(&ctype, for_type_info.has_next, args); // build next() call let ctype = CallType::Method( for_type_info.iterator_type, for_type_info.next, TypeList::empty(), ); let args = vec![Arg::Stack(offset, BuiltinType::Unit, 0)]; let next = self.build_call_site(&ctype, for_type_info.next, args); self.jit_info.map_fors.insert( stmt.id, ForInfo { make_iterator, has_next, next, }, ); } fn get_intrinsic(&self, id: NodeId) -> Option<Intrinsic> { let call_type = self.src.map_calls.get(id).unwrap(); if let Some(intrinsic) = call_type.to_intrinsic() { return Some(intrinsic); } let fid = call_type.fct_id().unwrap(); // the function we compile right now is never an intrinsic if self.fct.id == fid { return None; } let fct = self.vm.fcts.idx(fid); let fct = fct.read(); match fct.kind { FctKind::Builtin(intr) => Some(intr), _ => None, } } fn expr_call(&mut self, expr: &'ast ExprCallType) { if let Some(intrinsic) = self.get_intrinsic(expr.id) { self.reserve_args_call(expr); match intrinsic { Intrinsic::Assert => { let offset = self.reserve_stack_slot(BuiltinType::Ptr); let cls_id = self.vm.vips.error_class; let cls = self.vm.classes.idx(cls_id); let cls = cls.read(); let selfie_offset = self.reserve_stack_slot(cls.ty); let args = vec![ Arg::SelfieNew(cls.ty, selfie_offset), Arg::Stack(offset, BuiltinType::Ptr, 0), ]; self.universal_call(expr.id, args, cls.constructor); } _ => {} }; return; } let call_type = self.src.map_calls.get(expr.id).unwrap().clone(); let mut args = expr .args .iter() .map(|arg| Arg::Expr(arg, BuiltinType::Unit, 0)) .collect::<Vec<_>>(); let callee_id = match *call_type { CallType::Ctor(_, fid, _) | CallType::CtorNew(_, fid, _) => { let ty = self.ty(expr.id); let arg = if call_type.is_ctor() { Arg::Selfie(ty, 0) } else { Arg::SelfieNew(ty, 0) }; args.insert(0, arg); fid } CallType::Method(_, fct_id, _) => { let object = expr.object().unwrap(); args.insert(0, Arg::Expr(object, BuiltinType::Unit, 0)); let fct = self.vm.fcts.idx(fct_id); let fct = fct.read(); if fct.parent.is_trait() { // This happens for calls like (T: SomeTrait).method() // Find the exact method that is called let trait_id = fct.trait_id(); let object_type = match *call_type { CallType::Method(ty, _, _) => ty, _ => unreachable!(), }; let object_type = self.specialize_type(object_type); self.find_trait_impl(fct_id, trait_id, object_type) } else { fct_id } } CallType::Fct(fid, _, _) => fid, CallType::Expr(_, fid) => { let object = &expr.callee; let ty = self.ty(object.id()); args.insert(0, Arg::Expr(object, ty, 0)); fid } CallType::TraitStatic(tp_id, trait_id, trait_fct_id) => { let list_id = match tp_id { TypeParamId::Fct(list_id) => list_id, TypeParamId::Class(_) => unimplemented!(), }; let ty = self.fct_type_params[list_id.idx()]; let cls_id = ty.cls_id(self.vm).expect("no cls_id for type"); let cls = self.vm.classes.idx(cls_id); let cls = cls.read(); let mut impl_fct_id: Option<FctId> = None; for &impl_id in &cls.impls { let ximpl = self.vm.impls[impl_id].read(); if ximpl.trait_id != Some(trait_id) { continue; } for &fid in &ximpl.methods { let method = self.vm.fcts.idx(fid); let method = method.read(); if method.impl_for == Some(trait_fct_id) { impl_fct_id = Some(fid); break; } } } impl_fct_id.expect("no impl_fct_id found") } CallType::Trait(_, _) => unimplemented!(), CallType::Intrinsic(_) => unreachable!(), }; let callee = self.vm.fcts.idx(callee_id); let callee = callee.read(); if let FctKind::Builtin(_) = callee.kind { self.reserve_args_call(expr); return; } self.universal_call(expr.id, args, Some(callee_id)); } fn reserve_args_call(&mut self, expr: &'ast ExprCallType) { for arg in &expr.args { self.visit_expr(arg); } let call_type = self.src.map_calls.get(expr.id).unwrap(); if call_type.is_method() { let object = expr.object().unwrap(); self.visit_expr(object); } else if call_type.is_expr() { self.visit_expr(&expr.callee); } } fn find_trait_impl(&self, fct_id: FctId, trait_id: TraitId, object_type: BuiltinType) -> FctId { let cls_id = object_type.cls_id(self.vm).unwrap(); let cls = self.vm.classes.idx(cls_id); let cls = cls.read(); for &impl_id in &cls.impls { let ximpl = self.vm.impls[impl_id].read(); if ximpl.trait_id() != trait_id { continue; } for &mtd_id in &ximpl.methods { let mtd = self.vm.fcts.idx(mtd_id); let mtd = mtd.read(); if mtd.impl_for == Some(fct_id) { return mtd_id; } } } panic!("no impl found for generic trait call") } fn expr_delegation(&mut self, expr: &'ast ExprDelegationType) { let mut args = expr .args .iter() .map(|arg| Arg::Expr(arg, BuiltinType::Unit, 0)) .collect::<Vec<_>>(); let cls = self.ty(expr.id); args.insert(0, Arg::Selfie(cls, 0)); self.universal_call(expr.id, args, None); } fn universal_call(&mut self, id: NodeId, args: Vec<Arg<'ast>>, callee_id: Option<FctId>) { let call_type = self.src.map_calls.get(id).unwrap().clone(); let callee_id = if let Some(callee_id) = callee_id { callee_id } else { call_type.fct_id().unwrap() }; let csite = self.build_call_site(&*call_type, callee_id, args); // remember args self.jit_info.map_csites.insert_or_replace(id, csite); } fn build_call_site( &mut self, call_type: &CallType, callee_id: FctId, args: Vec<Arg<'ast>>, ) -> CallSite<'ast> { // function invokes another function self.leaf = false; let callee = self.vm.fcts.idx(callee_id); let callee = callee.read(); let (args, return_type, super_call) = self.determine_call_args_and_types(&*call_type, &*callee, args); let (cls_type_params, fct_type_params) = self.determine_call_type_params(&*call_type); let argsize = self.determine_call_stack(&args); CallSite { callee: callee_id, args, argsize, cls_type_params, fct_type_params, super_call, return_type, } } fn determine_call_args_and_types( &mut self, call_type: &CallType, callee: &Fct<'ast>, args: Vec<Arg<'ast>>, ) -> (Vec<Arg<'ast>>, BuiltinType, bool) { let mut super_call = false; assert!(callee.params_with_self().len() == args.len()); let args = args .iter() .enumerate() .map(|(ind, arg)| { let ty = callee.params_with_self()[ind]; let ty = self.specialize_type(specialize_for_call_type(call_type, ty, self.vm)); let offset = self.reserve_stack_slot(ty); match *arg { Arg::Expr(ast, _, _) => { if ind == 0 && ast.is_super() { super_call = true; } Arg::Expr(ast, ty, offset) } Arg::Stack(soffset, _, _) => Arg::Stack(soffset, ty, offset), Arg::SelfieNew(cid, _) => Arg::SelfieNew(cid, offset), Arg::Selfie(cid, _) => Arg::Selfie(cid, offset), } }) .collect::<Vec<_>>(); let return_type = self.specialize_type(specialize_for_call_type( call_type, callee.return_type, self.vm, )); (args, return_type, super_call) } fn determine_call_type_params(&mut self, call_type: &CallType) -> (TypeList, TypeList) { let cls_type_params; let fct_type_params; match *call_type { CallType::Ctor(_, _, ref type_params) | CallType::CtorNew(_, _, ref type_params) => { cls_type_params = type_params.clone(); fct_type_params = TypeList::empty(); } CallType::Method(ty, _, ref type_params) => { let ty = self.specialize_type(ty); cls_type_params = ty.type_params(self.vm); fct_type_params = type_params.clone(); } CallType::Fct(_, ref cls_tps, ref fct_tps) => { cls_type_params = cls_tps.clone(); fct_type_params = fct_tps.clone(); } CallType::Expr(ty, _) => { let ty = self.specialize_type(ty); cls_type_params = ty.type_params(self.vm); fct_type_params = TypeList::empty(); } CallType::Trait(_, _) => unimplemented!(), CallType::TraitStatic(_, _, _) => { cls_type_params = TypeList::empty(); fct_type_params = TypeList::empty(); } CallType::Intrinsic(_) => unreachable!(), } (cls_type_params, fct_type_params) } fn determine_call_stack(&mut self, args: &[Arg<'ast>]) -> i32 { let mut reg_args: i32 = 0; let mut freg_args: i32 = 0; for arg in args { match *arg { Arg::Expr(ast, ty, _) => { self.visit_expr(ast); if ty.is_float() { freg_args += 1; } else { reg_args += 1; } } Arg::Stack(_, ty, _) | Arg::Selfie(ty, _) | Arg::SelfieNew(ty, _) => { if ty.is_float() { freg_args += 1; } else { reg_args += 1; } } } } // some register are reserved on stack let args_on_stack = max(0, reg_args - REG_PARAMS.len() as i32) + max(0, freg_args - FREG_PARAMS.len() as i32); mem::align_i32(mem::ptr_width() * args_on_stack, 16) } fn expr_assign(&mut self, e: &'ast ExprBinType) { let call_type = self.src.map_calls.get(e.id); if call_type.is_some() { let call_expr = e.lhs.to_call().unwrap(); let object = &call_expr.callee; let index = &call_expr.args[0]; let value = &e.rhs; if let Some(_) = self.get_intrinsic(e.id) { self.visit_expr(object); self.visit_expr(index); self.visit_expr(value); } else { let args = vec![ Arg::Expr(object, BuiltinType::Unit, 0), Arg::Expr(index, BuiltinType::Unit, 0), Arg::Expr(value, BuiltinType::Unit, 0), ]; self.universal_call(e.id, args, None); } } else if e.lhs.is_ident() { self.visit_expr(&e.rhs); } else { // e.lhs is a field let lhs = e.lhs.to_dot().unwrap(); self.visit_expr(&lhs.lhs); self.visit_expr(&e.rhs); } } fn expr_bin(&mut self, expr: &'ast ExprBinType) { if expr.op.is_any_assign() { self.expr_assign(expr); return; } let lhs_ty = self.ty(expr.lhs.id()); let rhs_ty = self.ty(expr.rhs.id()); if expr.op == BinOp::Cmp(CmpOp::Is) || expr.op == BinOp::Cmp(CmpOp::IsNot) { self.visit_expr(&expr.lhs); self.visit_expr(&expr.rhs); } else if expr.op == BinOp::Or || expr.op == BinOp::And { self.visit_expr(&expr.lhs); self.visit_expr(&expr.rhs); // no temporaries needed } else if let Some(_) = self.get_intrinsic(expr.id) { self.visit_expr(&expr.lhs); self.visit_expr(&expr.rhs); } else { let args = vec![ Arg::Expr(&expr.lhs, lhs_ty, 0), Arg::Expr(&expr.rhs, rhs_ty, 0), ]; let fid = self.src.map_calls.get(expr.id).unwrap().fct_id().unwrap(); self.universal_call(expr.id, args, Some(fid)); } } fn expr_un(&mut self, expr: &'ast ExprUnType) { if let Some(_) = self.get_intrinsic(expr.id) { // no temporaries needed self.visit_expr(&expr.opnd); } else { let args = vec![Arg::Expr(&expr.opnd, BuiltinType::Unit, 0)]; self.universal_call(expr.id, args, None); } } fn expr_template(&mut self, expr: &'ast ExprTemplateType) { let string_buffer_offset = self.reserve_stack_slot(BuiltinType::Ptr); let string_part_offset = self.reserve_stack_slot(BuiltinType::Ptr); // build StringBuffer::empty() call let fct_id = self.vm.vips.fct.string_buffer_empty; let ctype = CallType::Fct(fct_id, TypeList::empty(), TypeList::empty()); let string_buffer_new = self.build_call_site(&ctype, fct_id, Vec::new()); let mut part_infos = Vec::new(); for part in &expr.parts { let mut object_offset = None; let mut to_string = None; if !part.is_lit_str() { self.visit_expr(part); let ty = self.ty(part.id()); if ty.cls_id(self.vm) != Some(self.vm.vips.string_class) { // build toString() call let offset = self.reserve_stack_slot(ty); object_offset = Some(offset); let cls_id = ty.cls_id(self.vm).expect("no cls_id found for type"); let cls = self.vm.classes.idx(cls_id); let cls = cls.read(); let name = self.vm.interner.intern("toString"); let to_string_id = cls .find_trait_method(self.vm, self.vm.vips.stringable_trait, name, false) .expect("toString() method not found"); let ctype = CallType::Method(ty, to_string_id, TypeList::empty()); let args = vec![Arg::Stack(offset, ty, 0)]; to_string = Some(self.build_call_site(&ctype, to_string_id, args)); } } // build StringBuffer::append() call let fct_id = self.vm.vips.fct.string_buffer_append; let ty = BuiltinType::from_cls(self.vm.vips.cls.string_buffer, self.vm); let ctype = CallType::Method(ty, fct_id, TypeList::empty()); let args = vec![ Arg::Stack(string_buffer_offset, BuiltinType::Ptr, 0), Arg::Stack(string_part_offset, BuiltinType::Ptr, 0), ]; let append = self.build_call_site(&ctype, fct_id, args); part_infos.push(TemplatePartJitInfo { object_offset, to_string, append, }); } // build StringBuffer::toString() call let fct_id = self.vm.vips.fct.string_buffer_to_string; let ty = BuiltinType::from_cls(self.vm.vips.cls.string_buffer, self.vm); let ctype = CallType::Method(ty, fct_id, TypeList::empty()); let args = vec![Arg::Stack(string_buffer_offset, BuiltinType::Ptr, 0)]; let string_buffer_to_string = self.build_call_site(&ctype, fct_id, args); self.jit_info.map_templates.insert( expr.id, TemplateJitInfo { string_buffer_offset, string_part_offset, string_buffer_new, part_infos, string_buffer_to_string, }, ); } fn reserve_temp_for_node_id(&mut self, id: NodeId) -> i32 { let ty = self.ty(id); self.reserve_stack_slot(ty) } fn reserve_temp_for_node(&mut self, expr: &Expr) -> i32 { let ty = self.ty(expr.id()); self.reserve_stack_slot(ty) } fn reserve_stack_slot(&mut self, ty: BuiltinType) -> i32 { let (ty_size, ty_align) = if ty.is_nil() { (mem::ptr_width(), mem::ptr_width()) } else { (ty.size(self.vm), ty.align(self.vm)) }; self.stacksize = mem::align_i32(self.stacksize, ty_align) + ty_size; -self.stacksize } fn ty(&self, id: NodeId) -> BuiltinType { let ty = self.src.ty(id); self.specialize_type(ty) } fn specialize_type(&self, ty: BuiltinType) -> BuiltinType { let result = specialize_type(self.vm, ty, &self.cls_type_params, &self.fct_type_params); assert!(result.is_concrete_type(self.vm)); result } } #[derive(Clone)] pub struct ForInfo<'ast> { pub make_iterator: CallSite<'ast>, pub has_next: CallSite<'ast>, pub next: CallSite<'ast>, } #[derive(Clone)] pub struct TemplateJitInfo<'ast> { pub string_buffer_offset: i32, pub string_part_offset: i32, pub string_buffer_new: CallSite<'ast>, pub part_infos: Vec<TemplatePartJitInfo<'ast>>, pub string_buffer_to_string: CallSite<'ast>, } #[derive(Clone)] pub struct TemplatePartJitInfo<'ast> { pub object_offset: Option<i32>, pub to_string: Option<CallSite<'ast>>, pub append: CallSite<'ast>, } #[cfg(test)] mod tests { use super::*; use crate::os; use crate::test; use crate::vm::*; fn info<F>(code: &'static str, f: F) where F: FnOnce(&FctSrc, &JitInfo), { os::init_page_size(); test::parse(code, |vm| { let fid = vm.fct_by_name("f").unwrap(); let fct = vm.fcts.idx(fid); let fct = fct.read(); let src = fct.src(); let mut src = src.write(); let mut jit_info = JitInfo::new(); let empty = TypeList::empty(); generate(vm, &fct, &mut src, &mut jit_info, &empty, &empty); f(&src, &jit_info); }); } #[test] fn test_invocation_flag() { info("fun f() { g(); } fun g() { }", |_, jit_info| { assert!(!jit_info.leaf); }); info("fun f() { }", |_, jit_info| { assert!(jit_info.leaf); }); } }
// Copyright 2017 Dmitry Tantsur <divius.inside@gmail.com> // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //! Session structure definition. //! //! The Session object serves as a wrapper around an HTTP(s) client, handling //! authentication, accessing the service catalog and token refresh. use std::cell::RefCell; use hyper::{Client, Url}; use hyper::client::{IntoUrl, RequestBuilder, Response}; use hyper::method::Method; use super::ApiError; use super::auth::base::{AuthMethod, AuthToken, AuthTokenHeader}; use super::utils; /// Request builder with authentication. #[allow(missing_debug_implementations)] pub struct AuthenticatedRequestBuilder<'a, A: AuthMethod + 'a> { parent: &'a Session<A>, inner: RequestBuilder<'a> } /// An OpenStack API session. /// /// Owns a token and an underlying client. #[derive(Debug)] pub struct Session<A: AuthMethod> { auth_method: A, client: Client, cached_token: RefCell<Option<AuthToken>> } impl<'a, A: AuthMethod> AuthenticatedRequestBuilder<'a, A> { /// Send this request. pub fn send(self) -> Result<Response, ApiError> { let token_value = try!(self.parent.token_value()); let hdr = AuthTokenHeader(token_value); self.inner.header(hdr).send().map_err(From::from) } } impl<'a, A: AuthMethod + 'a> Session<A> { /// Create a new session with a given authentication plugin. pub fn new(auth_method: A) -> Session<A> { Session { auth_method: auth_method, client: utils::http_client(), cached_token: RefCell::new(None) } } /// Get a clone of the authentication token. pub fn auth_token(&self) -> Result<AuthToken, ApiError> { try!(self.refresh_token()); Ok(self.cached_token.borrow().clone().unwrap()) } /// Get an endpoint URL. pub fn get_endpoint(&self, service_type: &str, endpoint_interface: Option<&str>, region: Option<&str>) -> Result<Url, ApiError> { self.auth_method.get_endpoint(service_type, endpoint_interface, region, &self) } /// A wrapper for HTTP request. pub fn request<U: IntoUrl>(&'a self, method: Method, url: U) -> AuthenticatedRequestBuilder<'a, A> { AuthenticatedRequestBuilder { parent: self, inner: self.client.request(method, url) } } // Private and test-only #[cfg(test)] pub fn new_with_params(auth_method: A, client: Client, token: AuthToken) -> Session<A> { Session { auth_method: auth_method, client: client, cached_token: RefCell::new(Some(token)) } } fn refresh_token(&self) -> Result<(), ApiError> { let mut cached_token = self.cached_token.borrow_mut(); if cached_token.is_some() { return Ok(()) } // TODO: check expires_at let new_token = try!(self.auth_method.get_token(&self.client)); *cached_token = Some(new_token); Ok(()) } fn token_value(&self) -> Result<String, ApiError> { try!(self.refresh_token()); Ok(self.cached_token.borrow().clone().unwrap().token) } } #[cfg(test)] pub mod test { use super::super::auth::base::{AuthToken, NoAuth}; use super::super::utils; use super::Session; pub fn new_session(token: &str) -> Session<NoAuth> { let token = AuthToken { token: String::from(token), expires_at: None }; Session::new_with_params(NoAuth::new("http://127.0.0.1/").unwrap(), utils::http_client(), token) } #[test] fn test_session_new() { let s = new_session("foo"); assert_eq!(&s.token_value().unwrap(), "foo"); } } Add missing methods to AuthenticatedRequestBuilder // Copyright 2017 Dmitry Tantsur <divius.inside@gmail.com> // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //! Session structure definition. //! //! The Session object serves as a wrapper around an HTTP(s) client, handling //! authentication, accessing the service catalog and token refresh. use std::cell::RefCell; use hyper::{Client, Url}; use hyper::client::{Body, IntoUrl, RequestBuilder, Response}; use hyper::header::{Header, Headers, HeaderFormat}; use hyper::method::Method; use super::ApiError; use super::auth::base::{AuthMethod, AuthToken, AuthTokenHeader}; use super::utils; /// Request builder with authentication. /// /// Essentially copies the interface of hyper::client::RequestBuilder. #[allow(missing_debug_implementations)] pub struct AuthenticatedRequestBuilder<'a, A: AuthMethod + 'a> { parent: &'a Session<A>, inner: RequestBuilder<'a> } /// An OpenStack API session. /// /// Owns a token and an underlying client. #[derive(Debug)] pub struct Session<A: AuthMethod> { auth_method: A, client: Client, cached_token: RefCell<Option<AuthToken>> } impl<'a, A: AuthMethod> AuthenticatedRequestBuilder<'a, A> { /// Send this request. pub fn send(self) -> Result<Response, ApiError> { let token_value = try!(self.parent.token_value()); let hdr = AuthTokenHeader(token_value); self.inner.header(hdr).send().map_err(From::from) } /// Add body to the request. pub fn body<B: Into<Body<'a>>>(self, body: B) -> AuthenticatedRequestBuilder<'a, A> { AuthenticatedRequestBuilder { inner: self.inner.body(body), .. self } } /// Add additional headers to the request. pub fn headers(self, headers: Headers) -> AuthenticatedRequestBuilder<'a, A> { AuthenticatedRequestBuilder { inner: self.inner.headers(headers), .. self } } /// Add an individual header to the request. /// /// Note that X-Auth-Token is always overwritten with a token in use. pub fn header<H: Header + HeaderFormat>(self, header: H) -> AuthenticatedRequestBuilder<'a, A> { AuthenticatedRequestBuilder { inner: self.inner.header(header), .. self } } } impl<'a, A: AuthMethod + 'a> Session<A> { /// Create a new session with a given authentication plugin. pub fn new(auth_method: A) -> Session<A> { Session { auth_method: auth_method, client: utils::http_client(), cached_token: RefCell::new(None) } } /// Get a clone of the authentication token. pub fn auth_token(&self) -> Result<AuthToken, ApiError> { try!(self.refresh_token()); Ok(self.cached_token.borrow().clone().unwrap()) } /// Get an endpoint URL. pub fn get_endpoint(&self, service_type: &str, endpoint_interface: Option<&str>, region: Option<&str>) -> Result<Url, ApiError> { self.auth_method.get_endpoint(service_type, endpoint_interface, region, &self) } /// A wrapper for HTTP request. pub fn request<U: IntoUrl>(&'a self, method: Method, url: U) -> AuthenticatedRequestBuilder<'a, A> { AuthenticatedRequestBuilder { parent: self, inner: self.client.request(method, url) } } // Private and test-only #[cfg(test)] pub fn new_with_params(auth_method: A, client: Client, token: AuthToken) -> Session<A> { Session { auth_method: auth_method, client: client, cached_token: RefCell::new(Some(token)) } } fn refresh_token(&self) -> Result<(), ApiError> { let mut cached_token = self.cached_token.borrow_mut(); if cached_token.is_some() { return Ok(()) } // TODO: check expires_at let new_token = try!(self.auth_method.get_token(&self.client)); *cached_token = Some(new_token); Ok(()) } fn token_value(&self) -> Result<String, ApiError> { try!(self.refresh_token()); Ok(self.cached_token.borrow().clone().unwrap().token) } } #[cfg(test)] pub mod test { use super::super::auth::base::{AuthToken, NoAuth}; use super::super::utils; use super::Session; pub fn new_session(token: &str) -> Session<NoAuth> { let token = AuthToken { token: String::from(token), expires_at: None }; Session::new_with_params(NoAuth::new("http://127.0.0.1/").unwrap(), utils::http_client(), token) } #[test] fn test_session_new() { let s = new_session("foo"); assert_eq!(&s.token_value().unwrap(), "foo"); } }
// Copyright 2013 The noise-rs developers. For a full listing of the authors, // refer to the AUTHORS file at the top-level directory of this distribution. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. /* Note that this is NOT Ken Perlin's simplex noise, as that is patent encumbered. Instead, these functions use the OpenSimplex algorithm, as detailed here: http://uniblock.tumblr.com/post/97868843242/noise */ use std::num::{cast, Float}; use seed::Seed; use gradients::GRADIENT2; const STRETCH_CONSTANT_2D: f64 = -0.211324865405187; //(1/sqrt(2+1)-1)/2; const SQUISH_CONSTANT_2D: f64 = 0.366025403784439; //(sqrt(2+1)-1)/2; const NORM_CONSTANT_2D: f32 = 14.0; fn get_simplex2_gradient<T: Float>(seed: &Seed, xs_floor: T, ys_floor: T, dx: T, dy: T) -> T { let two: T = cast(2.0f32).unwrap(); let attn = two - dx * dx - dy * dy; if attn > cast(0.0f32).unwrap() { let index = seed.get2(xs_floor.to_int().unwrap(), ys_floor.to_int().unwrap()) % GRADIENT2.len(); let vec = GRADIENT2[index]; let attn2 = attn * attn; return attn2 * attn2 * (dx * cast(vec[0]).unwrap() + dy * cast(vec[1]).unwrap()); } return cast(0.0f32).unwrap(); } pub fn simplex2<T: Float>(seed: &Seed, point: &::Point2<T>) -> f32 { let zero: T = cast(0.0f32).unwrap(); let one: T = cast(1.0f32).unwrap(); let two: T = cast(2.0f32).unwrap(); let squish_constant: T = cast(SQUISH_CONSTANT_2D).unwrap(); //Place input coordinates onto grid. let stretch_offset = (point[0] + point[1]) * cast(STRETCH_CONSTANT_2D).unwrap(); let xs = point[0] + stretch_offset; let ys = point[1] + stretch_offset; //Floor to get grid coordinates of rhombus (stretched square) super-cell origin. let mut xs_floor = xs.floor(); let mut ys_floor = ys.floor(); //Skew out to get actual coordinates of rhombus origin. We'll need these later. let squish_offset = (xs_floor + ys_floor) * squish_constant; let x_floor = xs_floor + squish_offset; let y_floor = ys_floor + squish_offset; //Compute grid coordinates relative to rhombus origin. let xs_frac = xs - xs_floor; let ys_frac = ys - ys_floor; //Sum those together to get a value that determines which region we're in. let frac_sum = xs_frac + ys_frac; //Positions relative to origin point. let mut dx0 = point[0] - x_floor; let mut dy0 = point[1] - y_floor; let mut value = zero; //Contribution (1,0) let dx1 = dx0 - one - squish_constant; let dy1 = dy0 - zero - squish_constant; value = value + get_simplex2_gradient(seed, xs_floor + one, ys_floor + zero, dx1, dy1); //Contribution (0,1) let dx2 = dx0 - zero - squish_constant; let dy2 = dy0 - one - squish_constant; value = value + get_simplex2_gradient(seed, xs_floor + zero, ys_floor + one, dx2, dy2); let (dx_ext, dy_ext, xsv_ext, ysv_ext) = if frac_sum <= one { //We're inside the triangle (2-Simplex) at (0,0) let z_frac = one - frac_sum; if z_frac > xs_frac || z_frac > ys_frac { //(0,0) is one of the closest two triangular vertices if xs_frac > ys_frac { (dx0 - one, dy0 + one, xs_floor + one, ys_floor - one) } else { (dx0 + one, dy0 - one, xs_floor - one, ys_floor + one) } } else { //(1,0) and (0,1) are the closest two vertices. (dx0 - one - two * squish_constant, dy0 - one - two * squish_constant, xs_floor + one, ys_floor + one) } } else { //We're inside the triangle (2-Simplex) at (1,1) let z_frac = two - frac_sum; if z_frac < xs_frac || z_frac < ys_frac { //(0,0) is one of the closest two triangular vertices if xs_frac > ys_frac { (dx0 - two - two * squish_constant, dy0 + zero - two * squish_constant, xs_floor + two, ys_floor + zero) } else { (dx0 + zero - two * squish_constant, dy0 - two - two * squish_constant, xs_floor + zero, ys_floor + two) } } else { //(1,0) and (0,1) are the closest two vertices. (dx0, dy0, xs_floor, ys_floor) } }; if frac_sum > one { xs_floor = xs_floor + one; ys_floor = ys_floor + one; dx0 = dx0 - one - two * squish_constant; dy0 = dy0 - one - two * squish_constant; } //Contribution (0,0) or (1,1) value = value + get_simplex2_gradient(seed, xs_floor, ys_floor, dx0, dy0); //Extra Vertex value = value + get_simplex2_gradient(seed, xsv_ext, ysv_ext, dx_ext, dy_ext); return value.to_f32().unwrap() / NORM_CONSTANT_2D; } #[cfg(test)] mod tests { use std::rand; use seed::Seed; use simplex::simplex2; const EPSILON: f32 = 0.000001; #[test] fn test_simplex2() { let mut rng: rand::XorShiftRng = rand::SeedableRng::from_seed([42, 37, 26, 8]); let seed = Seed::from_rng(&mut rng); assert_approx_eq!( 0.000000, simplex2(&seed, &[0.0f32, 0.0]), EPSILON); assert_approx_eq!(-0.073360, simplex2(&seed, &[0.5f32, 0.0]), EPSILON); assert_approx_eq!(-0.163874, simplex2(&seed, &[1.0f32, 0.0]), EPSILON); assert_approx_eq!( 0.110803, simplex2(&seed, &[1.5f32, 0.0]), EPSILON); assert_approx_eq!(-0.038814, simplex2(&seed, &[2.0f32, 0.0]), EPSILON); assert_approx_eq!(-0.352936, simplex2(&seed, &[0.0f32, 0.5]), EPSILON); assert_approx_eq!(-0.301287, simplex2(&seed, &[0.5f32, 0.5]), EPSILON); assert_approx_eq!(-0.292849, simplex2(&seed, &[1.0f32, 0.5]), EPSILON); assert_approx_eq!( 0.144342, simplex2(&seed, &[1.5f32, 0.5]), EPSILON); assert_approx_eq!( 0.208945, simplex2(&seed, &[2.0f32, 0.5]), EPSILON); assert_approx_eq!(-0.235302, simplex2(&seed, &[0.0f32, 1.0]), EPSILON); assert_approx_eq!( 0.057028, simplex2(&seed, &[0.5f32, 1.0]), EPSILON); assert_approx_eq!( 0.142835, simplex2(&seed, &[1.0f32, 1.0]), EPSILON); assert_approx_eq!( 0.261979, simplex2(&seed, &[1.5f32, 1.0]), EPSILON); assert_approx_eq!( 0.252710, simplex2(&seed, &[2.0f32, 1.0]), EPSILON); assert_approx_eq!(-0.305799, simplex2(&seed, &[0.0f32, 1.5]), EPSILON); assert_approx_eq!( 0.142413, simplex2(&seed, &[0.5f32, 1.5]), EPSILON); assert_approx_eq!( 0.470566, simplex2(&seed, &[1.0f32, 1.5]), EPSILON); assert_approx_eq!( 0.321834, simplex2(&seed, &[1.5f32, 1.5]), EPSILON); assert_approx_eq!( 0.013222, simplex2(&seed, &[2.0f32, 1.5]), EPSILON); assert_approx_eq!(-0.383105, simplex2(&seed, &[0.0f32, 2.0]), EPSILON); assert_approx_eq!(-0.140743, simplex2(&seed, &[0.5f32, 2.0]), EPSILON); assert_approx_eq!( 0.092702, simplex2(&seed, &[1.0f32, 2.0]), EPSILON); assert_approx_eq!(-0.022101, simplex2(&seed, &[1.5f32, 2.0]), EPSILON); assert_approx_eq!(-0.301222, simplex2(&seed, &[2.0f32, 2.0]), EPSILON); } } Remove the Simplex test. Will replace with image tests later. // Copyright 2013 The noise-rs developers. For a full listing of the authors, // refer to the AUTHORS file at the top-level directory of this distribution. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. /* Note that this is NOT Ken Perlin's simplex noise, as that is patent encumbered. Instead, these functions use the OpenSimplex algorithm, as detailed here: http://uniblock.tumblr.com/post/97868843242/noise */ use std::num::{cast, Float}; use seed::Seed; use gradients::GRADIENT2; const STRETCH_CONSTANT_2D: f64 = -0.211324865405187; //(1/sqrt(2+1)-1)/2; const SQUISH_CONSTANT_2D: f64 = 0.366025403784439; //(sqrt(2+1)-1)/2; const NORM_CONSTANT_2D: f32 = 14.0; fn get_simplex2_gradient<T: Float>(seed: &Seed, xs_floor: T, ys_floor: T, dx: T, dy: T) -> T { let two: T = cast(2.0f32).unwrap(); let attn = two - dx * dx - dy * dy; if attn > cast(0.0f32).unwrap() { let index = seed.get2(xs_floor.to_int().unwrap(), ys_floor.to_int().unwrap()) % GRADIENT2.len(); let vec = GRADIENT2[index]; let attn2 = attn * attn; return attn2 * attn2 * (dx * cast(vec[0]).unwrap() + dy * cast(vec[1]).unwrap()); } return cast(0.0f32).unwrap(); } pub fn simplex2<T: Float>(seed: &Seed, point: &::Point2<T>) -> f32 { let zero: T = cast(0.0f32).unwrap(); let one: T = cast(1.0f32).unwrap(); let two: T = cast(2.0f32).unwrap(); let squish_constant: T = cast(SQUISH_CONSTANT_2D).unwrap(); //Place input coordinates onto grid. let stretch_offset = (point[0] + point[1]) * cast(STRETCH_CONSTANT_2D).unwrap(); let xs = point[0] + stretch_offset; let ys = point[1] + stretch_offset; //Floor to get grid coordinates of rhombus (stretched square) super-cell origin. let mut xs_floor = xs.floor(); let mut ys_floor = ys.floor(); //Skew out to get actual coordinates of rhombus origin. We'll need these later. let squish_offset = (xs_floor + ys_floor) * squish_constant; let x_floor = xs_floor + squish_offset; let y_floor = ys_floor + squish_offset; //Compute grid coordinates relative to rhombus origin. let xs_frac = xs - xs_floor; let ys_frac = ys - ys_floor; //Sum those together to get a value that determines which region we're in. let frac_sum = xs_frac + ys_frac; //Positions relative to origin point. let mut dx0 = point[0] - x_floor; let mut dy0 = point[1] - y_floor; let mut value = zero; //Contribution (1,0) let dx1 = dx0 - one - squish_constant; let dy1 = dy0 - zero - squish_constant; value = value + get_simplex2_gradient(seed, xs_floor + one, ys_floor + zero, dx1, dy1); //Contribution (0,1) let dx2 = dx0 - zero - squish_constant; let dy2 = dy0 - one - squish_constant; value = value + get_simplex2_gradient(seed, xs_floor + zero, ys_floor + one, dx2, dy2); let (dx_ext, dy_ext, xsv_ext, ysv_ext) = if frac_sum <= one { //We're inside the triangle (2-Simplex) at (0,0) let z_frac = one - frac_sum; if z_frac > xs_frac || z_frac > ys_frac { //(0,0) is one of the closest two triangular vertices if xs_frac > ys_frac { (dx0 - one, dy0 + one, xs_floor + one, ys_floor - one) } else { (dx0 + one, dy0 - one, xs_floor - one, ys_floor + one) } } else { //(1,0) and (0,1) are the closest two vertices. (dx0 - one - two * squish_constant, dy0 - one - two * squish_constant, xs_floor + one, ys_floor + one) } } else { //We're inside the triangle (2-Simplex) at (1,1) let z_frac = two - frac_sum; if z_frac < xs_frac || z_frac < ys_frac { //(0,0) is one of the closest two triangular vertices if xs_frac > ys_frac { (dx0 - two - two * squish_constant, dy0 + zero - two * squish_constant, xs_floor + two, ys_floor + zero) } else { (dx0 + zero - two * squish_constant, dy0 - two - two * squish_constant, xs_floor + zero, ys_floor + two) } } else { //(1,0) and (0,1) are the closest two vertices. (dx0, dy0, xs_floor, ys_floor) } }; if frac_sum > one { xs_floor = xs_floor + one; ys_floor = ys_floor + one; dx0 = dx0 - one - two * squish_constant; dy0 = dy0 - one - two * squish_constant; } //Contribution (0,0) or (1,1) value = value + get_simplex2_gradient(seed, xs_floor, ys_floor, dx0, dy0); //Extra Vertex value = value + get_simplex2_gradient(seed, xsv_ext, ysv_ext, dx_ext, dy_ext); return value.to_f32().unwrap() / NORM_CONSTANT_2D; }
use hyper; use serde_json; use std; use std::collections::HashMap; use std::fmt; use std::fs::File; use std::io::Read; use std::str::FromStr; use toml; #[derive(Deserialize, Debug, Clone)] pub struct Config { pub token: String, pub org: String, #[serde(default = "default_team")] pub team: Vec<TeamConfig>, #[serde(default = "default_repo")] pub repo: Vec<RepoConfig>, #[serde(default = "default_endpoint")] pub endpoint: String, #[serde(default = "default_home")] pub home: String, #[serde(default = "default_sh")] pub sh: String, #[serde(default = "default_cache_duration")] pub cache_duration: u64, #[serde(default = "default_cert_path")] pub cert_path: String, #[serde(default = "default_user_conf_path")] pub user_conf_path: String, pub proxy_url: Option<String>, } fn default_team() -> Vec<TeamConfig> { Vec::new() } fn default_repo() -> Vec<RepoConfig> { Vec::new() } fn default_endpoint() -> String { String::from("https://api.github.com") } fn default_home() -> String { String::from("/home/{}") } fn default_sh() -> String { String::from("/bin/bash") } fn default_cache_duration() -> u64 { 3600 } fn default_cert_path() -> String { String::from("/etc/ssl/certs/ca-certificates.crt") } fn default_user_conf_path() -> String { String::from(".config/sectora.toml") } impl Config { pub fn new(configpath: &std::path::Path) -> Result<Self, CliError> { let mut file = File::open(configpath)?; let mut contents = String::new(); file.read_to_string(&mut contents)?; Ok(toml::from_str::<Config>(&contents)?) } } #[derive(Serialize, Deserialize, Debug, Clone)] pub struct UserConfig { pub sh: Option<String>, pub pass: Option<String>, } impl UserConfig { #[allow(dead_code)] pub fn new(configpath: &std::path::Path) -> Result<Self, CliError> { let mut file = File::open(configpath)?; let mut contents = String::new(); file.read_to_string(&mut contents)?; Ok(toml::from_str::<UserConfig>(&contents)?) } } #[derive(Serialize, Deserialize, Debug, Clone)] pub struct Team { pub id: u64, pub name: String, } #[derive(Serialize, Deserialize, Debug, Clone)] pub struct TeamConfig { pub name: String, pub gid: Option<u64>, pub group: Option<String>, } #[derive(Serialize, Deserialize, Debug, Clone)] pub struct Repo { pub id: u64, pub name: String, } #[derive(Serialize, Deserialize, Debug, Clone)] pub struct RepoConfig { pub name: String, pub gid: Option<u64>, pub group: Option<String>, } #[derive(Serialize, Deserialize, Debug, Clone)] pub enum SectorType { Team, Repo, } impl fmt::Display for SectorType { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match self { &SectorType::Team => write!(f, "T"), &SectorType::Repo => write!(f, "R"), } } } impl FromStr for SectorType { type Err = std::io::Error; fn from_str(s: &str) -> Result<Self, Self::Err> { match s { "T" => Ok(SectorType::Team), "R" => Ok(SectorType::Repo), _ => Err(std::io::Error::new(std::io::ErrorKind::Other, "unknown sector type")), } } } #[derive(Serialize, Deserialize, Debug, Clone)] pub struct Sector { pub id: u64, pub name: String, pub sector_type: SectorType, } impl From<Team> for Sector { fn from(team: Team) -> Self { Self { id: team.id, name: team.name, sector_type: SectorType::Team, } } } impl From<Repo> for Sector { fn from(repo: Repo) -> Self { Self { id: repo.id, name: repo.name, sector_type: SectorType::Repo, } } } impl fmt::Display for Sector { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}:{}:{}", self.id, self.name, self.sector_type) } } pub enum ParseSectorError { Id(std::num::ParseIntError), Type(std::io::Error), } impl FromStr for Sector { type Err = ParseSectorError; fn from_str(s: &str) -> Result<Self, Self::Err> { let parts = s.split(":").collect::<Vec<&str>>(); Ok(Self { id: parts[0].parse().map_err(|e| ParseSectorError::Id(e))?, name: String::from(parts[1]), sector_type: parts[2].parse().map_err(|e| ParseSectorError::Type(e))?, }) } } #[derive(Debug, Clone)] pub struct SectorGroup { pub sector: Sector, pub gid: Option<u64>, pub group: Option<String>, pub members: HashMap<String, Member>, } impl SectorGroup { #[allow(dead_code)] pub fn get_gid(&self) -> u64 { self.gid.unwrap_or(self.sector.id) } #[allow(dead_code)] pub fn get_group(&self) -> String { self.group.clone().unwrap_or(self.sector.name.clone()) } } impl fmt::Display for SectorGroup { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { let members_str = self.members.values() .map(|v| v.to_string()) .collect::<Vec<_>>() .join(" "); write!(f, "{}\t{}\t{}\t{}", self.sector, self.gid.and_then(|i| Some(i.to_string())).unwrap_or(String::new()), self.group.clone().unwrap_or(String::new()), members_str) } } pub enum ParseSectorGroupError { Sector(ParseSectorError), Gid(std::num::ParseIntError), Member(std::num::ParseIntError), } impl FromStr for SectorGroup { type Err = ParseSectorGroupError; fn from_str(s: &str) -> Result<Self, Self::Err> { let parts = s.split("\t").collect::<Vec<&str>>(); let sector = parts[0].parse().map_err(|e| ParseSectorGroupError::Sector(e))?; let gid: Option<u64> = match parts[1] { "" => None, s => Some(s.parse::<u64>().map_err(|e| ParseSectorGroupError::Gid(e))?), }; let group: Option<String> = match parts[2] { "" => None, s => Some(String::from(s)), }; let members = parts[3].split(" ") .map(|s| s.parse::<Member>().map_err(|e| ParseSectorGroupError::Member(e))) .collect::<Result<Vec<Member>, _>>()? .into_iter() .map(|m| (m.login.clone(), m)) .collect::<HashMap<_, _>>(); Ok(Self { sector, gid, group, members, }) } } #[derive(Serialize, Deserialize, Debug, Clone)] pub struct Member { pub id: u64, pub login: String, } impl fmt::Display for Member { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}:{}", self.id, self.login) } } impl FromStr for Member { type Err = std::num::ParseIntError; fn from_str(s: &str) -> Result<Self, Self::Err> { let parts = s.split(":").collect::<Vec<&str>>(); Ok(Self { id: parts[0].parse()?, login: String::from(parts[1]), }) } } #[allow(dead_code)] pub struct MemberGid { pub member: Member, pub gid: u64, } impl fmt::Display for MemberGid { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}|{}", self.member, self.gid) } } impl FromStr for MemberGid { type Err = std::num::ParseIntError; fn from_str(s: &str) -> Result<Self, Self::Err> { let parts = s.split("|").collect::<Vec<&str>>(); Ok(Self { member: parts[0].parse()?, gid: parts[1].parse()?, }) } } #[derive(Serialize, Deserialize, Debug)] pub struct PublicKey { pub id: u64, pub key: String, } #[derive(Debug)] pub enum CliError { Serde(serde_json::Error), Io(std::io::Error), Toml(toml::de::Error), Hyper(hyper::Error), } impl From<serde_json::Error> for CliError { fn from(err: serde_json::Error) -> CliError { CliError::Serde(err) } } impl From<std::io::Error> for CliError { fn from(err: std::io::Error) -> CliError { CliError::Io(err) } } impl From<toml::de::Error> for CliError { fn from(err: toml::de::Error) -> CliError { CliError::Toml(err) } } impl From<hyper::Error> for CliError { fn from(err: hyper::Error) -> CliError { CliError::Hyper(err) } } Add RateLimit struct use hyper; use serde_json; use std; use std::collections::HashMap; use std::fmt; use std::fs::File; use std::io::Read; use std::str::FromStr; use toml; #[derive(Deserialize, Debug, Clone)] pub struct Config { pub token: String, pub org: String, #[serde(default = "default_team")] pub team: Vec<TeamConfig>, #[serde(default = "default_repo")] pub repo: Vec<RepoConfig>, #[serde(default = "default_endpoint")] pub endpoint: String, #[serde(default = "default_home")] pub home: String, #[serde(default = "default_sh")] pub sh: String, #[serde(default = "default_cache_duration")] pub cache_duration: u64, #[serde(default = "default_cert_path")] pub cert_path: String, #[serde(default = "default_user_conf_path")] pub user_conf_path: String, pub proxy_url: Option<String>, } fn default_team() -> Vec<TeamConfig> { Vec::new() } fn default_repo() -> Vec<RepoConfig> { Vec::new() } fn default_endpoint() -> String { String::from("https://api.github.com") } fn default_home() -> String { String::from("/home/{}") } fn default_sh() -> String { String::from("/bin/bash") } fn default_cache_duration() -> u64 { 3600 } fn default_cert_path() -> String { String::from("/etc/ssl/certs/ca-certificates.crt") } fn default_user_conf_path() -> String { String::from(".config/sectora.toml") } impl Config { pub fn new(configpath: &std::path::Path) -> Result<Self, CliError> { let mut file = File::open(configpath)?; let mut contents = String::new(); file.read_to_string(&mut contents)?; Ok(toml::from_str::<Config>(&contents)?) } } #[derive(Serialize, Deserialize, Debug, Clone)] pub struct UserConfig { pub sh: Option<String>, pub pass: Option<String>, } impl UserConfig { #[allow(dead_code)] pub fn new(configpath: &std::path::Path) -> Result<Self, CliError> { let mut file = File::open(configpath)?; let mut contents = String::new(); file.read_to_string(&mut contents)?; Ok(toml::from_str::<UserConfig>(&contents)?) } } #[derive(Serialize, Deserialize, Debug, Clone)] pub struct Team { pub id: u64, pub name: String, } #[derive(Serialize, Deserialize, Debug, Clone)] pub struct TeamConfig { pub name: String, pub gid: Option<u64>, pub group: Option<String>, } #[derive(Serialize, Deserialize, Debug, Clone)] pub struct Repo { pub id: u64, pub name: String, } #[derive(Serialize, Deserialize, Debug, Clone)] pub struct RepoConfig { pub name: String, pub gid: Option<u64>, pub group: Option<String>, } #[derive(Serialize, Deserialize, Debug, Clone)] pub enum SectorType { Team, Repo, } impl fmt::Display for SectorType { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match self { &SectorType::Team => write!(f, "T"), &SectorType::Repo => write!(f, "R"), } } } impl FromStr for SectorType { type Err = std::io::Error; fn from_str(s: &str) -> Result<Self, Self::Err> { match s { "T" => Ok(SectorType::Team), "R" => Ok(SectorType::Repo), _ => Err(std::io::Error::new(std::io::ErrorKind::Other, "unknown sector type")), } } } #[derive(Serialize, Deserialize, Debug, Clone)] pub struct Sector { pub id: u64, pub name: String, pub sector_type: SectorType, } impl From<Team> for Sector { fn from(team: Team) -> Self { Self { id: team.id, name: team.name, sector_type: SectorType::Team, } } } impl From<Repo> for Sector { fn from(repo: Repo) -> Self { Self { id: repo.id, name: repo.name, sector_type: SectorType::Repo, } } } impl fmt::Display for Sector { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}:{}:{}", self.id, self.name, self.sector_type) } } pub enum ParseSectorError { Id(std::num::ParseIntError), Type(std::io::Error), } impl FromStr for Sector { type Err = ParseSectorError; fn from_str(s: &str) -> Result<Self, Self::Err> { let parts = s.split(":").collect::<Vec<&str>>(); Ok(Self { id: parts[0].parse().map_err(|e| ParseSectorError::Id(e))?, name: String::from(parts[1]), sector_type: parts[2].parse().map_err(|e| ParseSectorError::Type(e))?, }) } } #[derive(Debug, Clone)] pub struct SectorGroup { pub sector: Sector, pub gid: Option<u64>, pub group: Option<String>, pub members: HashMap<String, Member>, } impl SectorGroup { #[allow(dead_code)] pub fn get_gid(&self) -> u64 { self.gid.unwrap_or(self.sector.id) } #[allow(dead_code)] pub fn get_group(&self) -> String { self.group.clone().unwrap_or(self.sector.name.clone()) } } impl fmt::Display for SectorGroup { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { let members_str = self.members.values() .map(|v| v.to_string()) .collect::<Vec<_>>() .join(" "); write!(f, "{}\t{}\t{}\t{}", self.sector, self.gid.and_then(|i| Some(i.to_string())).unwrap_or(String::new()), self.group.clone().unwrap_or(String::new()), members_str) } } pub enum ParseSectorGroupError { Sector(ParseSectorError), Gid(std::num::ParseIntError), Member(std::num::ParseIntError), } impl FromStr for SectorGroup { type Err = ParseSectorGroupError; fn from_str(s: &str) -> Result<Self, Self::Err> { let parts = s.split("\t").collect::<Vec<&str>>(); let sector = parts[0].parse().map_err(|e| ParseSectorGroupError::Sector(e))?; let gid: Option<u64> = match parts[1] { "" => None, s => Some(s.parse::<u64>().map_err(|e| ParseSectorGroupError::Gid(e))?), }; let group: Option<String> = match parts[2] { "" => None, s => Some(String::from(s)), }; let members = parts[3].split(" ") .map(|s| s.parse::<Member>().map_err(|e| ParseSectorGroupError::Member(e))) .collect::<Result<Vec<Member>, _>>()? .into_iter() .map(|m| (m.login.clone(), m)) .collect::<HashMap<_, _>>(); Ok(Self { sector, gid, group, members, }) } } #[derive(Serialize, Deserialize, Debug, Clone)] pub struct Member { pub id: u64, pub login: String, } impl fmt::Display for Member { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}:{}", self.id, self.login) } } impl FromStr for Member { type Err = std::num::ParseIntError; fn from_str(s: &str) -> Result<Self, Self::Err> { let parts = s.split(":").collect::<Vec<&str>>(); Ok(Self { id: parts[0].parse()?, login: String::from(parts[1]), }) } } #[allow(dead_code)] pub struct MemberGid { pub member: Member, pub gid: u64, } impl fmt::Display for MemberGid { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}|{}", self.member, self.gid) } } impl FromStr for MemberGid { type Err = std::num::ParseIntError; fn from_str(s: &str) -> Result<Self, Self::Err> { let parts = s.split("|").collect::<Vec<&str>>(); Ok(Self { member: parts[0].parse()?, gid: parts[1].parse()?, }) } } #[derive(Serialize, Deserialize, Debug)] pub struct PublicKey { pub id: u64, pub key: String, } #[derive(Serialize, Deserialize, Debug)] pub struct Rate { pub limit: usize, pub remaining: usize, pub reset: usize, } #[derive(Serialize, Deserialize, Debug)] pub struct RateLimit { pub rate: Rate, } #[derive(Debug)] pub enum CliError { Serde(serde_json::Error), Io(std::io::Error), Toml(toml::de::Error), Hyper(hyper::Error), } impl From<serde_json::Error> for CliError { fn from(err: serde_json::Error) -> CliError { CliError::Serde(err) } } impl From<std::io::Error> for CliError { fn from(err: std::io::Error) -> CliError { CliError::Io(err) } } impl From<toml::de::Error> for CliError { fn from(err: toml::de::Error) -> CliError { CliError::Toml(err) } } impl From<hyper::Error> for CliError { fn from(err: hyper::Error) -> CliError { CliError::Hyper(err) } }
use error::Error; #[derive(Debug)] pub struct Summarizer { data: Vec<f64>, } impl Summarizer { /// Construct a `Summarizer` from a slice of 64-bit floating point numbers. /// /// This constructor is partial, and we obtain the following guarantees /// about the resulting sample data: /// /// - The sample size is positive /// - All values are finite /// - The data are sorted /// pub fn new(data: &[f64]) -> Result<Self, Error> { if data.is_empty() { return Err(Error::EmptySample); } if data.iter().any(|x| !x.is_finite()) { return Err(Error::BadSample); } let mut data = Vec::from(data); // Won't panic: we have checked that each float is finite. data.sort_by(|a, b| a.partial_cmp(b).unwrap_or_else(|| unreachable!())); let s = Summarizer { data }; Ok(s) } pub fn as_slice(&self) -> &[f64] { self.data.as_slice() } pub fn size(&self) -> f64 { self.data.len() as f64 } pub fn min(&self) -> f64 { self.data[0] } pub fn max(&self) -> f64 { self.data[self.data.len() - 1] } pub fn mean(&self) -> f64 { let t: f64 = self.data.iter().sum(); t / self.size() } pub fn median(&self) -> f64 { let d = &self.data; let n = d.len(); if n % 2 == 0 { (d[(n / 2) - 1] + d[n / 2]) / 2.0 } else { d[(n - 1) / 2] } } /// Closest-ranks percentile computed via linear interpolation. /// See: http://www.itl.nist.gov/div898/handbook/prc/section2/prc262.htm /// /// According to NIST, there isn't a standard computational definition of percentile. /// We take a practical approach that aims to be both unsurprising and consistent with /// common statistics packages. In particular, our implementation guarantees that the /// boundary percentiles correspond to the sample min and max. pub fn percentile(&self, p: f64) -> Result<f64, Error> { if !p.is_finite() { return Err(Error::Undefined); } if p < 0.0 || 1.0 < p { return Err(Error::Undefined); } let rank = (self.size() - 1.0) * p; let frac = rank.fract(); let i = rank.floor() as usize; let j = i + 1; if j == self.data.len() { // This implies that `i` indexes the largest data point in the sample. // Dereferencing at `j` would be an error, but `i` is exactly the max. return Ok(self.data[i]); } let xi = self.data[i]; let xj = self.data[j]; let x = xi + frac * (xj - xi); Ok(x) } /// Uses Bessel's correction to estimate population variance. pub fn unbiased_variance(&self) -> f64 { let m = self.mean(); let sum_sq_diff: f64 = self.data .iter() .map(|x| (x - m).powi(2)) .sum(); (1.0 / (self.size() - 1.0)) * sum_sq_diff } pub fn standard_deviation(&self) -> f64 { self.unbiased_variance().sqrt() } pub fn standard_error(&self) -> f64 { self.standard_deviation() / self.size().sqrt() } } #[derive(Debug)] pub struct Summary { iqr: f64, len: usize, lower_quartile: f64, min: f64, min_non_outlier: f64, max: f64, max_non_outlier: f64, mean: f64, median: f64, standard_deviation: f64, standard_error: f64, unbiased_variance: f64, upper_quartile: f64, } impl Summary { /// Construct a `Summary` from a slice of 64-bit floating point numbers. /// /// This constructor is partial, and we obtain the following guarantees /// about the resulting sample data: /// /// - The sample size is positive /// - All values are finite /// - The data are sorted /// pub fn new(data: &[f64]) -> Result<Self, Error> { let s = Summarizer::new(data)?; // The percentile arguments below are statically known to meet the // `percentile()` bounds, so we can always unwrap. let q1 = s.percentile(0.25).unwrap_or_else(|_| unreachable!()); let q3 = s.percentile(0.75).unwrap_or_else(|_| unreachable!()); let iqr = q3 - q1; let lower_outlier_bound = q1 - 1.5 * iqr; let min_non_outlier = s .as_slice() .iter() .cloned() .find(|&x| lower_outlier_bound <= x) .unwrap_or_else(|| unreachable!()); // By definition of quartile. let upper_outlier_bound = q3 + 1.5 * iqr; let max_non_outlier = s .as_slice() .iter() .cloned() .rev() .find(|&x| x <= upper_outlier_bound) .unwrap_or_else(|| unreachable!()); // By definition of quartile. Ok(Summary { iqr: iqr, len: s.data.len(), lower_quartile: q1, min: s.min(), min_non_outlier: min_non_outlier, max: s.max(), max_non_outlier: max_non_outlier, mean: s.mean(), median: s.median(), upper_quartile: q3, unbiased_variance: s.unbiased_variance(), standard_deviation: s.standard_deviation(), standard_error: s.standard_error(), }) } pub fn size(&self) -> f64 { self.len as f64 } pub fn range(&self) -> f64 { self.max - self.min } pub fn iqr(&self) -> f64 { self.iqr } pub fn lower_quartile(&self) -> f64 { self.lower_quartile } pub fn min(&self) -> f64 { self.min } pub fn min_non_outlier(&self) -> f64 { self.min_non_outlier } pub fn max(&self) -> f64 { self.max } pub fn max_non_outlier(&self) -> f64 { self.max_non_outlier } pub fn mean(&self) -> f64 { self.mean } pub fn median(&self) -> f64 { self.median } /// Uses Bessel's correction to estimate population variance. pub fn unbiased_variance(&self) -> f64 { self.unbiased_variance } pub fn upper_quartile(&self) -> f64 { self.upper_quartile } pub fn standard_deviation(&self) -> f64 { self.standard_deviation } pub fn standard_error(&self) -> f64 { self.standard_error } } Move all `Summary` calculations into `Summarizer` methods use error::Error; #[derive(Debug)] pub struct Summarizer { data: Vec<f64>, } impl Summarizer { /// Construct a `Summarizer` from a slice of 64-bit floating point numbers. /// /// This constructor is partial, and we obtain the following guarantees /// about the resulting sample data: /// /// - The sample size is positive /// - All values are finite /// - The data are sorted /// pub fn new(data: &[f64]) -> Result<Self, Error> { if data.is_empty() { return Err(Error::EmptySample); } if data.iter().any(|x| !x.is_finite()) { return Err(Error::BadSample); } let mut data = Vec::from(data); // Won't panic: we have checked that each float is finite. data.sort_by(|a, b| a.partial_cmp(b).unwrap_or_else(|| unreachable!())); let s = Summarizer { data }; Ok(s) } pub fn as_slice(&self) -> &[f64] { self.data.as_slice() } pub fn size(&self) -> f64 { self.data.len() as f64 } pub fn iqr(&self) -> f64 { self.upper_quartile() - self.lower_quartile() } pub fn lower_quartile(&self) -> f64 { // Statically known to be defined. self.percentile(0.25).unwrap_or_else(|_| unreachable!()) } pub fn min(&self) -> f64 { self.data[0] } pub fn min_non_outlier(&self) -> f64 { let lower_outlier_bound = self.lower_quartile() - 1.5 * self.iqr(); self.data .iter() .cloned() .find(|&x| lower_outlier_bound <= x) .unwrap_or_else(|| unreachable!()) // By definition of quartile. } pub fn max(&self) -> f64 { self.data[self.data.len() - 1] } pub fn max_non_outlier(&self) -> f64 { let upper_outlier_bound = self.upper_quartile() + 1.5 * self.iqr(); self.data .iter() .cloned() .rev() .find(|&x| x <= upper_outlier_bound) .unwrap_or_else(|| unreachable!()) // By definition of quartile. } pub fn mean(&self) -> f64 { let t: f64 = self.data.iter().sum(); t / self.size() } pub fn median(&self) -> f64 { let d = &self.data; let n = d.len(); if n % 2 == 0 { (d[(n / 2) - 1] + d[n / 2]) / 2.0 } else { d[(n - 1) / 2] } } /// Closest-ranks percentile computed via linear interpolation. /// See: http://www.itl.nist.gov/div898/handbook/prc/section2/prc262.htm /// /// According to NIST, there isn't a standard computational definition of percentile. /// We take a practical approach that aims to be both unsurprising and consistent with /// common statistics packages. In particular, our implementation guarantees that the /// boundary percentiles correspond to the sample min and max. pub fn percentile(&self, p: f64) -> Result<f64, Error> { if !p.is_finite() { return Err(Error::Undefined); } if p < 0.0 || 1.0 < p { return Err(Error::Undefined); } let rank = (self.size() - 1.0) * p; let frac = rank.fract(); let i = rank.floor() as usize; let j = i + 1; if j == self.data.len() { // This implies that `i` indexes the largest data point in the sample. // Dereferencing at `j` would be an error, but `i` is exactly the max. return Ok(self.data[i]); } let xi = self.data[i]; let xj = self.data[j]; let x = xi + frac * (xj - xi); Ok(x) } pub fn range(&self) -> f64 { self.max() - self.min() } pub fn upper_quartile(&self) -> f64 { // Statically known to be defined. self.percentile(0.75).unwrap_or_else(|_| unreachable!()) } /// Uses Bessel's correction to estimate population variance. pub fn unbiased_variance(&self) -> f64 { let m = self.mean(); let sum_sq_diff: f64 = self.data .iter() .map(|x| (x - m).powi(2)) .sum(); (1.0 / (self.size() - 1.0)) * sum_sq_diff } pub fn standard_deviation(&self) -> f64 { self.unbiased_variance().sqrt() } pub fn standard_error(&self) -> f64 { self.standard_deviation() / self.size().sqrt() } } #[derive(Debug)] pub struct Summary { iqr: f64, len: usize, lower_quartile: f64, min: f64, min_non_outlier: f64, max: f64, max_non_outlier: f64, mean: f64, median: f64, range: f64, standard_deviation: f64, standard_error: f64, unbiased_variance: f64, upper_quartile: f64, } impl Summary { /// Construct a `Summary` from a slice of 64-bit floating point numbers. /// /// This constructor is partial, and we obtain the following guarantees /// about the resulting sample data: /// /// - The sample size is positive /// - All values are finite /// - The data are sorted /// pub fn new(data: &[f64]) -> Result<Self, Error> { let s = Summarizer::new(data)?; Ok(Summary { iqr: s.iqr(), len: s.data.len(), lower_quartile: s.lower_quartile(), min: s.min(), min_non_outlier: s.min_non_outlier(), max: s.max(), max_non_outlier: s.max_non_outlier(), mean: s.mean(), median: s.median(), range: s.range(), upper_quartile: s.upper_quartile(), unbiased_variance: s.unbiased_variance(), standard_deviation: s.standard_deviation(), standard_error: s.standard_error(), }) } pub fn size(&self) -> f64 { self.len as f64 } pub fn range(&self) -> f64 { self.range } pub fn iqr(&self) -> f64 { self.iqr } pub fn lower_quartile(&self) -> f64 { self.lower_quartile } pub fn min(&self) -> f64 { self.min } pub fn min_non_outlier(&self) -> f64 { self.min_non_outlier } pub fn max(&self) -> f64 { self.max } pub fn max_non_outlier(&self) -> f64 { self.max_non_outlier } pub fn mean(&self) -> f64 { self.mean } pub fn median(&self) -> f64 { self.median } /// Uses Bessel's correction to estimate population variance. pub fn unbiased_variance(&self) -> f64 { self.unbiased_variance } pub fn upper_quartile(&self) -> f64 { self.upper_quartile } pub fn standard_deviation(&self) -> f64 { self.standard_deviation } pub fn standard_error(&self) -> f64 { self.standard_error } }
// vim: tw=80 //! POSIX Asynchronous I/O //! //! The POSIX AIO interface is used for asynchronous I/O on files and disk-like //! devices. It supports [`read`](struct.AioCb.html#method.read), //! [`write`](struct.AioCb.html#method.write), and //! [`fsync`](struct.AioCb.html#method.fsync) operations. Completion //! notifications can optionally be delivered via //! [signals](../signal/enum.SigevNotify.html#variant.SigevSignal), via the //! [`aio_suspend`](fn.aio_suspend.html) function, or via polling. Some //! platforms support other completion //! notifications, such as //! [kevent](../signal/enum.SigevNotify.html#variant.SigevKevent). //! //! Multiple operations may be submitted in a batch with //! [`lio_listio`](fn.lio_listio.html), though the standard does not guarantee //! that they will be executed atomically. //! //! Outstanding operations may be cancelled with //! [`cancel`](struct.AioCb.html#method.cancel) or //! [`aio_cancel_all`](fn.aio_cancel_all.html), though the operating system may //! not support this for all filesystems and devices. use {Error, Result}; use errno::Errno; use std::os::unix::io::RawFd; use libc::{c_void, off_t, size_t}; use libc; use std::borrow::{Borrow, BorrowMut}; use std::fmt; use std::fmt::Debug; use std::marker::PhantomData; use std::mem; use std::ptr::{null, null_mut}; use sys::signal::*; use std::thread; use sys::time::TimeSpec; libc_enum! { /// Mode for `AioCb::fsync`. Controls whether only data or both data and /// metadata are synced. #[repr(i32)] pub enum AioFsyncMode { /// do it like `fsync` O_SYNC, /// on supported operating systems only, do it like `fdatasync` #[cfg(any(target_os = "ios", target_os = "linux", target_os = "macos", target_os = "netbsd", target_os = "openbsd"))] O_DSYNC } } libc_enum! { /// When used with [`lio_listio`](fn.lio_listio.html), determines whether a /// given `aiocb` should be used for a read operation, a write operation, or /// ignored. Has no effect for any other aio functions. #[repr(i32)] pub enum LioOpcode { LIO_NOP, LIO_WRITE, LIO_READ, } } libc_enum! { /// Mode for [`lio_listio`](fn.lio_listio.html) #[repr(i32)] pub enum LioMode { /// Requests that [`lio_listio`](fn.lio_listio.html) block until all /// requested operations have been completed LIO_WAIT, /// Requests that [`lio_listio`](fn.lio_listio.html) return immediately LIO_NOWAIT, } } /// Return values for [`AioCb::cancel`](struct.AioCb.html#method.cancel) and /// [`aio_cancel_all`](fn.aio_cancel_all.html) #[repr(i32)] #[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)] pub enum AioCancelStat { /// All outstanding requests were canceled AioCanceled = libc::AIO_CANCELED, /// Some requests were not canceled. Their status should be checked with /// `AioCb::error` AioNotCanceled = libc::AIO_NOTCANCELED, /// All of the requests have already finished AioAllDone = libc::AIO_ALLDONE, } /// Owns (uniquely or shared) a memory buffer to keep it from `Drop`ing while /// the kernel has a pointer to it. pub enum Buffer<'a> { /// No buffer to own. /// /// Used for operations like `aio_fsync` that have no data, or for unsafe /// operations that work with raw pointers. None, /// Keeps a reference to a slice Phantom(PhantomData<&'a mut [u8]>), /// Generic thing that keeps a buffer from dropping BoxedSlice(Box<dyn Borrow<[u8]>>), /// Generic thing that keeps a mutable buffer from dropping BoxedMutSlice(Box<dyn BorrowMut<[u8]>>), } impl<'a> Debug for Buffer<'a> { // Note: someday it may be possible to Derive Debug for a trait object, but // not today. // https://github.com/rust-lang/rust/issues/1563 fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { match *self { Buffer::None => write!(fmt, "None"), Buffer::Phantom(p) => p.fmt(fmt), Buffer::BoxedSlice(ref bs) => { let borrowed : &dyn Borrow<[u8]> = bs.borrow(); write!(fmt, "BoxedSlice({:?})", borrowed as *const dyn Borrow<[u8]>) }, Buffer::BoxedMutSlice(ref bms) => { let borrowed : &dyn BorrowMut<[u8]> = bms.borrow(); write!(fmt, "BoxedMutSlice({:?})", borrowed as *const dyn BorrowMut<[u8]>) } } } } /// AIO Control Block. /// /// The basic structure used by all aio functions. Each `AioCb` represents one /// I/O request. pub struct AioCb<'a> { aiocb: libc::aiocb, /// Tracks whether the buffer pointed to by `libc::aiocb.aio_buf` is mutable mutable: bool, /// Could this `AioCb` potentially have any in-kernel state? in_progress: bool, /// Optionally keeps a reference to the data. /// /// Used to keep buffers from `Drop`'ing, and may be returned once the /// `AioCb` is completed by [`buffer`](#method.buffer). buffer: Buffer<'a> } impl<'a> AioCb<'a> { /// Remove the inner `Buffer` and return it /// /// It is an error to call this method while the `AioCb` is still in /// progress. pub fn buffer(&mut self) -> Buffer<'a> { assert!(!self.in_progress); let mut x = Buffer::None; mem::swap(&mut self.buffer, &mut x); x } /// Remove the inner boxed slice, if any, and return it. /// /// The returned value will be the argument that was passed to /// `from_boxed_slice` when this `AioCb` was created. /// /// It is an error to call this method while the `AioCb` is still in /// progress. pub fn boxed_slice(&mut self) -> Option<Box<dyn Borrow<[u8]>>> { assert!(!self.in_progress, "Can't remove the buffer from an AioCb that's still in-progress. Did you forget to call aio_return?"); if let Buffer::BoxedSlice(_) = self.buffer { let mut oldbuffer = Buffer::None; mem::swap(&mut self.buffer, &mut oldbuffer); if let Buffer::BoxedSlice(inner) = oldbuffer { Some(inner) } else { unreachable!(); } } else { None } } /// Remove the inner boxed mutable slice, if any, and return it. /// /// The returned value will be the argument that was passed to /// `from_boxed_mut_slice` when this `AioCb` was created. /// /// It is an error to call this method while the `AioCb` is still in /// progress. pub fn boxed_mut_slice(&mut self) -> Option<Box<dyn BorrowMut<[u8]>>> { assert!(!self.in_progress, "Can't remove the buffer from an AioCb that's still in-progress. Did you forget to call aio_return?"); if let Buffer::BoxedMutSlice(_) = self.buffer { let mut oldbuffer = Buffer::None; mem::swap(&mut self.buffer, &mut oldbuffer); if let Buffer::BoxedMutSlice(inner) = oldbuffer { Some(inner) } else { unreachable!(); } } else { None } } /// Returns the underlying file descriptor associated with the `AioCb` pub fn fd(&self) -> RawFd { self.aiocb.aio_fildes } /// Constructs a new `AioCb` with no associated buffer. /// /// The resulting `AioCb` structure is suitable for use with `AioCb::fsync`. /// /// # Parameters /// /// * `fd`: File descriptor. Required for all aio functions. /// * `prio`: If POSIX Prioritized IO is supported, then the /// operation will be prioritized at the process's /// priority level minus `prio`. /// * `sigev_notify`: Determines how you will be notified of event /// completion. /// /// # Examples /// /// Create an `AioCb` from a raw file descriptor and use it for an /// [`fsync`](#method.fsync) operation. /// /// ``` /// # extern crate tempfile; /// # extern crate nix; /// # use nix::errno::Errno; /// # use nix::Error; /// # use nix::sys::aio::*; /// # use nix::sys::signal::SigevNotify::SigevNone; /// # use std::{thread, time}; /// # use std::os::unix::io::AsRawFd; /// # use tempfile::tempfile; /// # fn main() { /// let f = tempfile().unwrap(); /// let mut aiocb = AioCb::from_fd( f.as_raw_fd(), 0, SigevNone); /// aiocb.fsync(AioFsyncMode::O_SYNC).expect("aio_fsync failed early"); /// while (aiocb.error() == Err(Error::from(Errno::EINPROGRESS))) { /// thread::sleep(time::Duration::from_millis(10)); /// } /// aiocb.aio_return().expect("aio_fsync failed late"); /// # } /// ``` pub fn from_fd(fd: RawFd, prio: libc::c_int, sigev_notify: SigevNotify) -> AioCb<'a> { let mut a = AioCb::common_init(fd, prio, sigev_notify); a.aio_offset = 0; a.aio_nbytes = 0; a.aio_buf = null_mut(); AioCb { aiocb: a, mutable: false, in_progress: false, buffer: Buffer::None } } /// Constructs a new `AioCb` from a mutable slice. /// /// The resulting `AioCb` will be suitable for both read and write /// operations, but only if the borrow checker can guarantee that the slice /// will outlive the `AioCb`. That will usually be the case if the `AioCb` /// is stack-allocated. If the borrow checker gives you trouble, try using /// [`from_boxed_mut_slice`](#method.from_boxed_mut_slice) instead. /// /// # Parameters /// /// * `fd`: File descriptor. Required for all aio functions. /// * `offs`: File offset /// * `buf`: A memory buffer /// * `prio`: If POSIX Prioritized IO is supported, then the /// operation will be prioritized at the process's /// priority level minus `prio` /// * `sigev_notify`: Determines how you will be notified of event /// completion. /// * `opcode`: This field is only used for `lio_listio`. It /// determines which operation to use for this individual /// aiocb /// /// # Examples /// /// Create an `AioCb` from a mutable slice and read into it. /// /// ``` /// # extern crate tempfile; /// # extern crate nix; /// # use nix::errno::Errno; /// # use nix::Error; /// # use nix::sys::aio::*; /// # use nix::sys::signal::SigevNotify; /// # use std::{thread, time}; /// # use std::io::Write; /// # use std::os::unix::io::AsRawFd; /// # use tempfile::tempfile; /// # fn main() { /// const INITIAL: &[u8] = b"abcdef123456"; /// const LEN: usize = 4; /// let mut rbuf = vec![0; LEN]; /// let mut f = tempfile().unwrap(); /// f.write_all(INITIAL).unwrap(); /// { /// let mut aiocb = AioCb::from_mut_slice( f.as_raw_fd(), /// 2, //offset /// &mut rbuf, /// 0, //priority /// SigevNotify::SigevNone, /// LioOpcode::LIO_NOP); /// aiocb.read().unwrap(); /// while (aiocb.error() == Err(Error::from(Errno::EINPROGRESS))) { /// thread::sleep(time::Duration::from_millis(10)); /// } /// assert_eq!(aiocb.aio_return().unwrap() as usize, LEN); /// } /// assert_eq!(rbuf, b"cdef"); /// # } /// ``` pub fn from_mut_slice(fd: RawFd, offs: off_t, buf: &'a mut [u8], prio: libc::c_int, sigev_notify: SigevNotify, opcode: LioOpcode) -> AioCb<'a> { let mut a = AioCb::common_init(fd, prio, sigev_notify); a.aio_offset = offs; a.aio_nbytes = buf.len() as size_t; a.aio_buf = buf.as_ptr() as *mut c_void; a.aio_lio_opcode = opcode as libc::c_int; AioCb { aiocb: a, mutable: true, in_progress: false, buffer: Buffer::Phantom(PhantomData), } } /// The safest and most flexible way to create an `AioCb`. /// /// Unlike [`from_slice`], this method returns a structure suitable for /// placement on the heap. It may be used for write operations, but not /// read operations. Unlike `from_ptr`, this method will ensure that the /// buffer doesn't `drop` while the kernel is still processing it. Any /// object that can be borrowed as a boxed slice will work. /// /// # Parameters /// /// * `fd`: File descriptor. Required for all aio functions. /// * `offs`: File offset /// * `buf`: A boxed slice-like object /// * `prio`: If POSIX Prioritized IO is supported, then the /// operation will be prioritized at the process's /// priority level minus `prio` /// * `sigev_notify`: Determines how you will be notified of event /// completion. /// * `opcode`: This field is only used for `lio_listio`. It /// determines which operation to use for this individual /// aiocb /// /// # Examples /// /// Create an `AioCb` from a Vector and use it for writing /// /// ``` /// # extern crate tempfile; /// # extern crate nix; /// # use nix::errno::Errno; /// # use nix::Error; /// # use nix::sys::aio::*; /// # use nix::sys::signal::SigevNotify; /// # use std::{thread, time}; /// # use std::io::Write; /// # use std::os::unix::io::AsRawFd; /// # use tempfile::tempfile; /// # fn main() { /// let wbuf = Box::new(Vec::from("CDEF")); /// let expected_len = wbuf.len(); /// let mut f = tempfile().unwrap(); /// let mut aiocb = AioCb::from_boxed_slice( f.as_raw_fd(), /// 2, //offset /// wbuf, /// 0, //priority /// SigevNotify::SigevNone, /// LioOpcode::LIO_NOP); /// aiocb.write().unwrap(); /// while (aiocb.error() == Err(Error::from(Errno::EINPROGRESS))) { /// thread::sleep(time::Duration::from_millis(10)); /// } /// assert_eq!(aiocb.aio_return().unwrap() as usize, expected_len); /// # } /// ``` /// /// Create an `AioCb` from a `Bytes` object /// /// ``` /// # extern crate bytes; /// # extern crate tempfile; /// # extern crate nix; /// # use bytes::Bytes; /// # use nix::sys::aio::*; /// # use nix::sys::signal::SigevNotify; /// # use std::os::unix::io::AsRawFd; /// # use tempfile::tempfile; /// # fn main() { /// let wbuf = Box::new(Bytes::from(&b"CDEF"[..])); /// let mut f = tempfile().unwrap(); /// let mut aiocb = AioCb::from_boxed_slice( f.as_raw_fd(), /// 2, //offset /// wbuf, /// 0, //priority /// SigevNotify::SigevNone, /// LioOpcode::LIO_NOP); /// # } /// ``` /// /// If a library needs to work with buffers that aren't `Box`ed, it can /// create a `Box`ed container for use with this method. Here's an example /// using an un`Box`ed `Bytes` object. /// /// ``` /// # extern crate bytes; /// # extern crate tempfile; /// # extern crate nix; /// # use bytes::Bytes; /// # use nix::sys::aio::*; /// # use nix::sys::signal::SigevNotify; /// # use std::borrow::Borrow; /// # use std::os::unix::io::AsRawFd; /// # use tempfile::tempfile; /// struct BytesContainer(Bytes); /// impl Borrow<[u8]> for BytesContainer { /// fn borrow(&self) -> &[u8] { /// self.0.as_ref() /// } /// } /// fn main() { /// let wbuf = Bytes::from(&b"CDEF"[..]); /// let boxed_wbuf = Box::new(BytesContainer(wbuf)); /// let mut f = tempfile().unwrap(); /// let mut aiocb = AioCb::from_boxed_slice( f.as_raw_fd(), /// 2, //offset /// boxed_wbuf, /// 0, //priority /// SigevNotify::SigevNone, /// LioOpcode::LIO_NOP); /// } /// ``` /// /// [`from_slice`]: #method.from_slice pub fn from_boxed_slice(fd: RawFd, offs: off_t, buf: Box<dyn Borrow<[u8]>>, prio: libc::c_int, sigev_notify: SigevNotify, opcode: LioOpcode) -> AioCb<'a> { let mut a = AioCb::common_init(fd, prio, sigev_notify); { let borrowed : &dyn Borrow<[u8]> = buf.borrow(); let slice : &[u8] = borrowed.borrow(); a.aio_nbytes = slice.len() as size_t; a.aio_buf = slice.as_ptr() as *mut c_void; } a.aio_offset = offs; a.aio_lio_opcode = opcode as libc::c_int; AioCb { aiocb: a, mutable: false, in_progress: false, buffer: Buffer::BoxedSlice(buf), } } /// The safest and most flexible way to create an `AioCb` for reading. /// /// Like [`from_boxed_slice`], but the slice is a mutable one. More /// flexible than [`from_mut_slice`], because a wide range of objects can be /// used. /// /// # Examples /// /// Create an `AioCb` from a Vector and use it for reading /// /// ``` /// # extern crate tempfile; /// # extern crate nix; /// # use nix::errno::Errno; /// # use nix::Error; /// # use nix::sys::aio::*; /// # use nix::sys::signal::SigevNotify; /// # use std::{thread, time}; /// # use std::io::Write; /// # use std::os::unix::io::AsRawFd; /// # use tempfile::tempfile; /// # fn main() { /// const INITIAL: &[u8] = b"abcdef123456"; /// const LEN: usize = 4; /// let rbuf = Box::new(vec![0; LEN]); /// let mut f = tempfile().unwrap(); /// f.write_all(INITIAL).unwrap(); /// let mut aiocb = AioCb::from_boxed_mut_slice( f.as_raw_fd(), /// 2, //offset /// rbuf, /// 0, //priority /// SigevNotify::SigevNone, /// LioOpcode::LIO_NOP); /// aiocb.read().unwrap(); /// while (aiocb.error() == Err(Error::from(Errno::EINPROGRESS))) { /// thread::sleep(time::Duration::from_millis(10)); /// } /// assert_eq!(aiocb.aio_return().unwrap() as usize, LEN); /// let mut buffer = aiocb.boxed_mut_slice().unwrap(); /// const EXPECT: &[u8] = b"cdef"; /// assert_eq!(buffer.borrow_mut(), EXPECT); /// # } /// ``` /// /// [`from_boxed_slice`]: #method.from_boxed_slice /// [`from_mut_slice`]: #method.from_mut_slice pub fn from_boxed_mut_slice(fd: RawFd, offs: off_t, mut buf: Box<dyn BorrowMut<[u8]>>, prio: libc::c_int, sigev_notify: SigevNotify, opcode: LioOpcode) -> AioCb<'a> { let mut a = AioCb::common_init(fd, prio, sigev_notify); { let borrowed : &mut dyn BorrowMut<[u8]> = buf.borrow_mut(); let slice : &mut [u8] = borrowed.borrow_mut(); a.aio_nbytes = slice.len() as size_t; a.aio_buf = slice.as_mut_ptr() as *mut c_void; } a.aio_offset = offs; a.aio_lio_opcode = opcode as libc::c_int; AioCb { aiocb: a, mutable: true, in_progress: false, buffer: Buffer::BoxedMutSlice(buf), } } /// Constructs a new `AioCb` from a mutable raw pointer /// /// Unlike `from_mut_slice`, this method returns a structure suitable for /// placement on the heap. It may be used for both reads and writes. Due /// to its unsafety, this method is not recommended. It is most useful when /// heap allocation is required but for some reason the data cannot be /// wrapped in a `struct` that implements `BorrowMut<[u8]>` /// /// # Parameters /// /// * `fd`: File descriptor. Required for all aio functions. /// * `offs`: File offset /// * `buf`: Pointer to the memory buffer /// * `len`: Length of the buffer pointed to by `buf` /// * `prio`: If POSIX Prioritized IO is supported, then the /// operation will be prioritized at the process's /// priority level minus `prio` /// * `sigev_notify`: Determines how you will be notified of event /// completion. /// * `opcode`: This field is only used for `lio_listio`. It /// determines which operation to use for this individual /// aiocb /// /// # Safety /// /// The caller must ensure that the storage pointed to by `buf` outlives the /// `AioCb`. The lifetime checker can't help here. pub unsafe fn from_mut_ptr(fd: RawFd, offs: off_t, buf: *mut c_void, len: usize, prio: libc::c_int, sigev_notify: SigevNotify, opcode: LioOpcode) -> AioCb<'a> { let mut a = AioCb::common_init(fd, prio, sigev_notify); a.aio_offset = offs; a.aio_nbytes = len; a.aio_buf = buf; a.aio_lio_opcode = opcode as libc::c_int; AioCb { aiocb: a, mutable: true, in_progress: false, buffer: Buffer::None } } /// Constructs a new `AioCb` from a raw pointer. /// /// Unlike `from_slice`, this method returns a structure suitable for /// placement on the heap. Due to its unsafety, this method is not /// recommended. It is most useful when heap allocation is required but for /// some reason the data cannot be wrapped in a `struct` that implements /// `Borrow<[u8]>` /// /// # Parameters /// /// * `fd`: File descriptor. Required for all aio functions. /// * `offs`: File offset /// * `buf`: Pointer to the memory buffer /// * `len`: Length of the buffer pointed to by `buf` /// * `prio`: If POSIX Prioritized IO is supported, then the /// operation will be prioritized at the process's /// priority level minus `prio` /// * `sigev_notify`: Determines how you will be notified of event /// completion. /// * `opcode`: This field is only used for `lio_listio`. It /// determines which operation to use for this individual /// aiocb /// /// # Safety /// /// The caller must ensure that the storage pointed to by `buf` outlives the /// `AioCb`. The lifetime checker can't help here. pub unsafe fn from_ptr(fd: RawFd, offs: off_t, buf: *const c_void, len: usize, prio: libc::c_int, sigev_notify: SigevNotify, opcode: LioOpcode) -> AioCb<'a> { let mut a = AioCb::common_init(fd, prio, sigev_notify); a.aio_offset = offs; a.aio_nbytes = len; // casting a const ptr to a mutable ptr here is ok, because we set the // AioCb's mutable field to false a.aio_buf = buf as *mut c_void; a.aio_lio_opcode = opcode as libc::c_int; AioCb { aiocb: a, mutable: false, in_progress: false, buffer: Buffer::None } } /// Like `from_mut_slice`, but works on constant slices rather than /// mutable slices. /// /// An `AioCb` created this way cannot be used with `read`, and its /// `LioOpcode` cannot be set to `LIO_READ`. This method is useful when /// writing a const buffer with `AioCb::write`, since `from_mut_slice` can't /// work with const buffers. /// /// # Examples /// /// Construct an `AioCb` from a slice and use it for writing. /// /// ``` /// # extern crate tempfile; /// # extern crate nix; /// # use nix::errno::Errno; /// # use nix::Error; /// # use nix::sys::aio::*; /// # use nix::sys::signal::SigevNotify; /// # use std::{thread, time}; /// # use std::os::unix::io::AsRawFd; /// # use tempfile::tempfile; /// # fn main() { /// const WBUF: &[u8] = b"abcdef123456"; /// let mut f = tempfile().unwrap(); /// let mut aiocb = AioCb::from_slice( f.as_raw_fd(), /// 2, //offset /// WBUF, /// 0, //priority /// SigevNotify::SigevNone, /// LioOpcode::LIO_NOP); /// aiocb.write().unwrap(); /// while (aiocb.error() == Err(Error::from(Errno::EINPROGRESS))) { /// thread::sleep(time::Duration::from_millis(10)); /// } /// assert_eq!(aiocb.aio_return().unwrap() as usize, WBUF.len()); /// # } /// ``` // Note: another solution to the problem of writing const buffers would be // to genericize AioCb for both &mut [u8] and &[u8] buffers. AioCb::read // could take the former and AioCb::write could take the latter. However, // then lio_listio wouldn't work, because that function needs a slice of // AioCb, and they must all be of the same type. pub fn from_slice(fd: RawFd, offs: off_t, buf: &'a [u8], prio: libc::c_int, sigev_notify: SigevNotify, opcode: LioOpcode) -> AioCb { let mut a = AioCb::common_init(fd, prio, sigev_notify); a.aio_offset = offs; a.aio_nbytes = buf.len() as size_t; // casting an immutable buffer to a mutable pointer looks unsafe, // but technically its only unsafe to dereference it, not to create // it. a.aio_buf = buf.as_ptr() as *mut c_void; assert!(opcode != LioOpcode::LIO_READ, "Can't read into an immutable buffer"); a.aio_lio_opcode = opcode as libc::c_int; AioCb { aiocb: a, mutable: false, in_progress: false, buffer: Buffer::None, } } fn common_init(fd: RawFd, prio: libc::c_int, sigev_notify: SigevNotify) -> libc::aiocb { // Use mem::zeroed instead of explicitly zeroing each field, because the // number and name of reserved fields is OS-dependent. On some OSes, // some reserved fields are used the kernel for state, and must be // explicitly zeroed when allocated. let mut a = unsafe { mem::zeroed::<libc::aiocb>()}; a.aio_fildes = fd; a.aio_reqprio = prio; a.aio_sigevent = SigEvent::new(sigev_notify).sigevent(); a } /// Update the notification settings for an existing `aiocb` pub fn set_sigev_notify(&mut self, sigev_notify: SigevNotify) { self.aiocb.aio_sigevent = SigEvent::new(sigev_notify).sigevent(); } /// Cancels an outstanding AIO request. /// /// The operating system is not required to implement cancellation for all /// file and device types. Even if it does, there is no guarantee that the /// operation has not already completed. So the caller must check the /// result and handle operations that were not canceled or that have already /// completed. /// /// # Examples /// /// Cancel an outstanding aio operation. Note that we must still call /// `aio_return` to free resources, even though we don't care about the /// result. /// /// ``` /// # extern crate tempfile; /// # extern crate nix; /// # use nix::errno::Errno; /// # use nix::Error; /// # use nix::sys::aio::*; /// # use nix::sys::signal::SigevNotify; /// # use std::{thread, time}; /// # use std::io::Write; /// # use std::os::unix::io::AsRawFd; /// # use tempfile::tempfile; /// # fn main() { /// let wbuf = b"CDEF"; /// let mut f = tempfile().unwrap(); /// let mut aiocb = AioCb::from_slice( f.as_raw_fd(), /// 2, //offset /// &wbuf[..], /// 0, //priority /// SigevNotify::SigevNone, /// LioOpcode::LIO_NOP); /// aiocb.write().unwrap(); /// let cs = aiocb.cancel().unwrap(); /// if cs == AioCancelStat::AioNotCanceled { /// while (aiocb.error() == Err(Error::from(Errno::EINPROGRESS))) { /// thread::sleep(time::Duration::from_millis(10)); /// } /// } /// // Must call `aio_return`, but ignore the result /// let _ = aiocb.aio_return(); /// # } /// ``` /// /// # References /// /// [aio_cancel](http://pubs.opengroup.org/onlinepubs/9699919799/functions/aio_cancel.html) pub fn cancel(&mut self) -> Result<AioCancelStat> { match unsafe { libc::aio_cancel(self.aiocb.aio_fildes, &mut self.aiocb) } { libc::AIO_CANCELED => Ok(AioCancelStat::AioCanceled), libc::AIO_NOTCANCELED => Ok(AioCancelStat::AioNotCanceled), libc::AIO_ALLDONE => Ok(AioCancelStat::AioAllDone), -1 => Err(Error::last()), _ => panic!("unknown aio_cancel return value") } } /// Retrieve error status of an asynchronous operation. /// /// If the request has not yet completed, returns `EINPROGRESS`. Otherwise, /// returns `Ok` or any other error. /// /// # Examples /// /// Issue an aio operation and use `error` to poll for completion. Polling /// is an alternative to `aio_suspend`, used by most of the other examples. /// /// ``` /// # extern crate tempfile; /// # extern crate nix; /// # use nix::errno::Errno; /// # use nix::Error; /// # use nix::sys::aio::*; /// # use nix::sys::signal::SigevNotify; /// # use std::{thread, time}; /// # use std::os::unix::io::AsRawFd; /// # use tempfile::tempfile; /// # fn main() { /// const WBUF: &[u8] = b"abcdef123456"; /// let mut f = tempfile().unwrap(); /// let mut aiocb = AioCb::from_slice( f.as_raw_fd(), /// 2, //offset /// WBUF, /// 0, //priority /// SigevNotify::SigevNone, /// LioOpcode::LIO_NOP); /// aiocb.write().unwrap(); /// while (aiocb.error() == Err(Error::from(Errno::EINPROGRESS))) { /// thread::sleep(time::Duration::from_millis(10)); /// } /// assert_eq!(aiocb.aio_return().unwrap() as usize, WBUF.len()); /// # } /// ``` /// /// # References /// /// [aio_error](http://pubs.opengroup.org/onlinepubs/9699919799/functions/aio_error.html) pub fn error(&mut self) -> Result<()> { match unsafe { libc::aio_error(&mut self.aiocb as *mut libc::aiocb) } { 0 => Ok(()), num if num > 0 => Err(Error::from_errno(Errno::from_i32(num))), -1 => Err(Error::last()), num => panic!("unknown aio_error return value {:?}", num) } } /// An asynchronous version of `fsync(2)`. /// /// # References /// /// [aio_fsync](http://pubs.opengroup.org/onlinepubs/9699919799/functions/aio_fsync.html) pub fn fsync(&mut self, mode: AioFsyncMode) -> Result<()> { let p: *mut libc::aiocb = &mut self.aiocb; Errno::result(unsafe { libc::aio_fsync(mode as libc::c_int, p) }).map(|_| { self.in_progress = true; }) } /// Returns the `aiocb`'s `LioOpcode` field /// /// If the value cannot be represented as an `LioOpcode`, returns `None` /// instead. pub fn lio_opcode(&self) -> Option<LioOpcode> { match self.aiocb.aio_lio_opcode { libc::LIO_READ => Some(LioOpcode::LIO_READ), libc::LIO_WRITE => Some(LioOpcode::LIO_WRITE), libc::LIO_NOP => Some(LioOpcode::LIO_NOP), _ => None } } /// Returns the requested length of the aio operation in bytes /// /// This method returns the *requested* length of the operation. To get the /// number of bytes actually read or written by a completed operation, use /// `aio_return` instead. pub fn nbytes(&self) -> usize { self.aiocb.aio_nbytes } /// Returns the file offset stored in the `AioCb` pub fn offset(&self) -> off_t { self.aiocb.aio_offset } /// Returns the priority of the `AioCb` pub fn priority(&self) -> libc::c_int { self.aiocb.aio_reqprio } /// Asynchronously reads from a file descriptor into a buffer /// /// # References /// /// [aio_read](http://pubs.opengroup.org/onlinepubs/9699919799/functions/aio_read.html) pub fn read(&mut self) -> Result<()> { assert!(self.mutable, "Can't read into an immutable buffer"); let p: *mut libc::aiocb = &mut self.aiocb; Errno::result(unsafe { libc::aio_read(p) }).map(|_| { self.in_progress = true; }) } /// Returns the `SigEvent` stored in the `AioCb` pub fn sigevent(&self) -> SigEvent { SigEvent::from(&self.aiocb.aio_sigevent) } /// Retrieve return status of an asynchronous operation. /// /// Should only be called once for each `AioCb`, after `AioCb::error` /// indicates that it has completed. The result is the same as for the /// synchronous `read(2)`, `write(2)`, of `fsync(2)` functions. /// /// # References /// /// [aio_return](http://pubs.opengroup.org/onlinepubs/9699919799/functions/aio_return.html) // Note: this should be just `return`, but that's a reserved word pub fn aio_return(&mut self) -> Result<isize> { let p: *mut libc::aiocb = &mut self.aiocb; self.in_progress = false; Errno::result(unsafe { libc::aio_return(p) }) } /// Asynchronously writes from a buffer to a file descriptor /// /// # References /// /// [aio_write](http://pubs.opengroup.org/onlinepubs/9699919799/functions/aio_write.html) pub fn write(&mut self) -> Result<()> { let p: *mut libc::aiocb = &mut self.aiocb; Errno::result(unsafe { libc::aio_write(p) }).map(|_| { self.in_progress = true; }) } } /// Cancels outstanding AIO requests for a given file descriptor. /// /// # Examples /// /// Issue an aio operation, then cancel all outstanding operations on that file /// descriptor. /// /// ``` /// # extern crate tempfile; /// # extern crate nix; /// # use nix::errno::Errno; /// # use nix::Error; /// # use nix::sys::aio::*; /// # use nix::sys::signal::SigevNotify; /// # use std::{thread, time}; /// # use std::io::Write; /// # use std::os::unix::io::AsRawFd; /// # use tempfile::tempfile; /// # fn main() { /// let wbuf = b"CDEF"; /// let mut f = tempfile().unwrap(); /// let mut aiocb = AioCb::from_slice( f.as_raw_fd(), /// 2, //offset /// &wbuf[..], /// 0, //priority /// SigevNotify::SigevNone, /// LioOpcode::LIO_NOP); /// aiocb.write().unwrap(); /// let cs = aio_cancel_all(f.as_raw_fd()).unwrap(); /// if cs == AioCancelStat::AioNotCanceled { /// while (aiocb.error() == Err(Error::from(Errno::EINPROGRESS))) { /// thread::sleep(time::Duration::from_millis(10)); /// } /// } /// // Must call `aio_return`, but ignore the result /// let _ = aiocb.aio_return(); /// # } /// ``` /// /// # References /// /// [`aio_cancel`](http://pubs.opengroup.org/onlinepubs/9699919799/functions/aio_cancel.html) pub fn aio_cancel_all(fd: RawFd) -> Result<AioCancelStat> { match unsafe { libc::aio_cancel(fd, null_mut()) } { libc::AIO_CANCELED => Ok(AioCancelStat::AioCanceled), libc::AIO_NOTCANCELED => Ok(AioCancelStat::AioNotCanceled), libc::AIO_ALLDONE => Ok(AioCancelStat::AioAllDone), -1 => Err(Error::last()), _ => panic!("unknown aio_cancel return value") } } /// Suspends the calling process until at least one of the specified `AioCb`s /// has completed, a signal is delivered, or the timeout has passed. /// /// If `timeout` is `None`, `aio_suspend` will block indefinitely. /// /// # Examples /// /// Use `aio_suspend` to block until an aio operation completes. /// // Disable doctest due to a known bug in FreeBSD's 32-bit emulation. The fix // will be included in release 11.2. // FIXME reenable the doc test when the CI machine gets upgraded to that release. // https://svnweb.freebsd.org/base?view=revision&revision=325018 /// ```no_run /// # extern crate tempfile; /// # extern crate nix; /// # use nix::sys::aio::*; /// # use nix::sys::signal::SigevNotify; /// # use std::os::unix::io::AsRawFd; /// # use tempfile::tempfile; /// # fn main() { /// const WBUF: &[u8] = b"abcdef123456"; /// let mut f = tempfile().unwrap(); /// let mut aiocb = AioCb::from_slice( f.as_raw_fd(), /// 2, //offset /// WBUF, /// 0, //priority /// SigevNotify::SigevNone, /// LioOpcode::LIO_NOP); /// aiocb.write().unwrap(); /// aio_suspend(&[&aiocb], None).expect("aio_suspend failed"); /// assert_eq!(aiocb.aio_return().unwrap() as usize, WBUF.len()); /// # } /// ``` /// # References /// /// [`aio_suspend`](http://pubs.opengroup.org/onlinepubs/9699919799/functions/aio_suspend.html) pub fn aio_suspend(list: &[&AioCb], timeout: Option<TimeSpec>) -> Result<()> { let plist = list as *const [&AioCb] as *const [*const libc::aiocb]; let p = plist as *const *const libc::aiocb; let timep = match timeout { None => null::<libc::timespec>(), Some(x) => x.as_ref() as *const libc::timespec }; Errno::result(unsafe { libc::aio_suspend(p, list.len() as i32, timep) }).map(drop) } impl<'a> Debug for AioCb<'a> { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { fmt.debug_struct("AioCb") .field("aiocb", &self.aiocb) .field("mutable", &self.mutable) .field("in_progress", &self.in_progress) .finish() } } impl<'a> Drop for AioCb<'a> { /// If the `AioCb` has no remaining state in the kernel, just drop it. /// Otherwise, dropping constitutes a resource leak, which is an error fn drop(&mut self) { assert!(thread::panicking() || !self.in_progress, "Dropped an in-progress AioCb"); } } /// LIO Control Block. /// /// The basic structure used to issue multiple AIO operations simultaneously. #[cfg(not(any(target_os = "ios", target_os = "macos")))] pub struct LioCb<'a> { /// A collection of [`AioCb`]s. All of these will be issued simultaneously /// by the [`listio`] method. /// /// [`AioCb`]: struct.AioCb.html /// [`listio`]: #method.listio pub aiocbs: Vec<AioCb<'a>>, /// The actual list passed to `libc::lio_listio`. /// /// It must live for as long as any of the operations are still being /// processesed, because the aio subsystem uses its address as a unique /// identifier. list: Vec<*mut libc::aiocb>, /// A partial set of results. This field will get populated by /// `listio_resubmit` when an `LioCb` is resubmitted after an error results: Vec<Option<Result<isize>>> } #[cfg(not(any(target_os = "ios", target_os = "macos")))] impl<'a> LioCb<'a> { /// Initialize an empty `LioCb` pub fn with_capacity(capacity: usize) -> LioCb<'a> { LioCb { aiocbs: Vec::with_capacity(capacity), list: Vec::with_capacity(capacity), results: Vec::with_capacity(capacity) } } /// Submits multiple asynchronous I/O requests with a single system call. /// /// They are not guaranteed to complete atomically, and the order in which /// the requests are carried out is not specified. Reads, writes, and /// fsyncs may be freely mixed. /// /// This function is useful for reducing the context-switch overhead of /// submitting many AIO operations. It can also be used with /// `LioMode::LIO_WAIT` to block on the result of several independent /// operations. Used that way, it is often useful in programs that /// otherwise make little use of AIO. /// /// # Examples /// /// Use `listio` to submit an aio operation and wait for its completion. In /// this case, there is no need to use [`aio_suspend`] to wait or /// [`AioCb::error`] to poll. /// /// ``` /// # extern crate tempfile; /// # extern crate nix; /// # use nix::sys::aio::*; /// # use nix::sys::signal::SigevNotify; /// # use std::os::unix::io::AsRawFd; /// # use tempfile::tempfile; /// # fn main() { /// const WBUF: &[u8] = b"abcdef123456"; /// let mut f = tempfile().unwrap(); /// let mut liocb = LioCb::with_capacity(1); /// liocb.aiocbs.push(AioCb::from_slice( f.as_raw_fd(), /// 2, //offset /// WBUF, /// 0, //priority /// SigevNotify::SigevNone, /// LioOpcode::LIO_WRITE)); /// liocb.listio(LioMode::LIO_WAIT, /// SigevNotify::SigevNone).unwrap(); /// assert_eq!(liocb.aio_return(0).unwrap() as usize, WBUF.len()); /// # } /// ``` /// /// # References /// /// [`lio_listio`](http://pubs.opengroup.org/onlinepubs/9699919799/functions/lio_listio.html) /// /// [`aio_suspend`]: fn.aio_suspend.html /// [`AioCb::error`]: struct.AioCb.html#method.error pub fn listio(&mut self, mode: LioMode, sigev_notify: SigevNotify) -> Result<()> { let sigev = SigEvent::new(sigev_notify); let sigevp = &mut sigev.sigevent() as *mut libc::sigevent; self.list.clear(); for a in &mut self.aiocbs { a.in_progress = true; self.list.push(a as *mut AioCb<'a> as *mut libc::aiocb); } let p = self.list.as_ptr(); Errno::result(unsafe { libc::lio_listio(mode as i32, p, self.list.len() as i32, sigevp) }).map(drop) } /// Resubmits any incomplete operations with [`lio_listio`]. /// /// Sometimes, due to system resource limitations, an `lio_listio` call will /// return `EIO`, or `EAGAIN`. Or, if a signal is received, it may return /// `EINTR`. In any of these cases, only a subset of its constituent /// operations will actually have been initiated. `listio_resubmit` will /// resubmit any operations that are still uninitiated. /// /// After calling `listio_resubmit`, results should be collected by /// [`LioCb::aio_return`]. /// /// # Examples /// ```no_run /// # extern crate tempfile; /// # extern crate nix; /// # use nix::Error; /// # use nix::errno::Errno; /// # use nix::sys::aio::*; /// # use nix::sys::signal::SigevNotify; /// # use std::os::unix::io::AsRawFd; /// # use std::{thread, time}; /// # use tempfile::tempfile; /// # fn main() { /// const WBUF: &[u8] = b"abcdef123456"; /// let mut f = tempfile().unwrap(); /// let mut liocb = LioCb::with_capacity(1); /// liocb.aiocbs.push(AioCb::from_slice( f.as_raw_fd(), /// 2, //offset /// WBUF, /// 0, //priority /// SigevNotify::SigevNone, /// LioOpcode::LIO_WRITE)); /// let mut err = liocb.listio(LioMode::LIO_WAIT, SigevNotify::SigevNone); /// while err == Err(Error::Sys(Errno::EIO)) || /// err == Err(Error::Sys(Errno::EAGAIN)) { /// thread::sleep(time::Duration::from_millis(10)); /// err = liocb.listio_resubmit(LioMode::LIO_WAIT, SigevNotify::SigevNone); /// } /// assert_eq!(liocb.aio_return(0).unwrap() as usize, WBUF.len()); /// # } /// ``` /// /// # References /// /// [`lio_listio`](http://pubs.opengroup.org/onlinepubs/9699919799/functions/lio_listio.html) /// /// [`lio_listio`]: http://pubs.opengroup.org/onlinepubs/9699919799/functions/lio_listio.html /// [`LioCb::aio_return`]: struct.LioCb.html#method.aio_return // Note: the addresses of any EINPROGRESS or EOK aiocbs _must_ not be // changed by this method, because the kernel relies on their addresses // being stable. // Note: aiocbs that are Ok(()) must be finalized by aio_return, or else the // sigev_notify will immediately refire. pub fn listio_resubmit(&mut self, mode:LioMode, sigev_notify: SigevNotify) -> Result<()> { let sigev = SigEvent::new(sigev_notify); let sigevp = &mut sigev.sigevent() as *mut libc::sigevent; self.list.clear(); while self.results.len() < self.aiocbs.len() { self.results.push(None); } for (i, a) in self.aiocbs.iter_mut().enumerate() { if self.results[i].is_some() { // Already collected final status for this operation continue; } match a.error() { Ok(()) => { // aiocb is complete; collect its status and don't resubmit self.results[i] = Some(a.aio_return()); }, Err(Error::Sys(Errno::EAGAIN)) => { self.list.push(a as *mut AioCb<'a> as *mut libc::aiocb); }, Err(Error::Sys(Errno::EINPROGRESS)) => { // aiocb is was successfully queued; no need to do anything }, Err(Error::Sys(Errno::EINVAL)) => panic!( "AioCb was never submitted, or already finalized"), _ => unreachable!() } } let p = self.list.as_ptr(); Errno::result(unsafe { libc::lio_listio(mode as i32, p, self.list.len() as i32, sigevp) }).map(drop) } /// Collect final status for an individual `AioCb` submitted as part of an /// `LioCb`. /// /// This is just like [`AioCb::aio_return`], except it takes into account /// operations that were restarted by [`LioCb::listio_resubmit`] /// /// [`AioCb::aio_return`]: struct.AioCb.html#method.aio_return /// [`LioCb::listio_resubmit`]: #method.listio_resubmit pub fn aio_return(&mut self, i: usize) -> Result<isize> { if i >= self.results.len() || self.results[i].is_none() { self.aiocbs[i].aio_return() } else { self.results[i].unwrap() } } /// Retrieve error status of an individual `AioCb` submitted as part of an /// `LioCb`. /// /// This is just like [`AioCb::error`], except it takes into account /// operations that were restarted by [`LioCb::listio_resubmit`] /// /// [`AioCb::error`]: struct.AioCb.html#method.error /// [`LioCb::listio_resubmit`]: #method.listio_resubmit pub fn error(&mut self, i: usize) -> Result<()> { if i >= self.results.len() || self.results[i].is_none() { self.aiocbs[i].error() } else { Ok(()) } } } #[cfg(not(any(target_os = "ios", target_os = "macos")))] impl<'a> Debug for LioCb<'a> { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { fmt.debug_struct("LioCb") .field("aiocbs", &self.aiocbs) .finish() } } #[cfg(not(any(target_os = "ios", target_os = "macos")))] impl<'a> From<Vec<AioCb<'a>>> for LioCb<'a> { fn from(src: Vec<AioCb<'a>>) -> LioCb<'a> { LioCb { list: Vec::with_capacity(src.capacity()), results: Vec::with_capacity(src.capacity()), aiocbs: src, } } } Reenable a test that had been disabled due to old CI infrastructure // vim: tw=80 //! POSIX Asynchronous I/O //! //! The POSIX AIO interface is used for asynchronous I/O on files and disk-like //! devices. It supports [`read`](struct.AioCb.html#method.read), //! [`write`](struct.AioCb.html#method.write), and //! [`fsync`](struct.AioCb.html#method.fsync) operations. Completion //! notifications can optionally be delivered via //! [signals](../signal/enum.SigevNotify.html#variant.SigevSignal), via the //! [`aio_suspend`](fn.aio_suspend.html) function, or via polling. Some //! platforms support other completion //! notifications, such as //! [kevent](../signal/enum.SigevNotify.html#variant.SigevKevent). //! //! Multiple operations may be submitted in a batch with //! [`lio_listio`](fn.lio_listio.html), though the standard does not guarantee //! that they will be executed atomically. //! //! Outstanding operations may be cancelled with //! [`cancel`](struct.AioCb.html#method.cancel) or //! [`aio_cancel_all`](fn.aio_cancel_all.html), though the operating system may //! not support this for all filesystems and devices. use {Error, Result}; use errno::Errno; use std::os::unix::io::RawFd; use libc::{c_void, off_t, size_t}; use libc; use std::borrow::{Borrow, BorrowMut}; use std::fmt; use std::fmt::Debug; use std::marker::PhantomData; use std::mem; use std::ptr::{null, null_mut}; use sys::signal::*; use std::thread; use sys::time::TimeSpec; libc_enum! { /// Mode for `AioCb::fsync`. Controls whether only data or both data and /// metadata are synced. #[repr(i32)] pub enum AioFsyncMode { /// do it like `fsync` O_SYNC, /// on supported operating systems only, do it like `fdatasync` #[cfg(any(target_os = "ios", target_os = "linux", target_os = "macos", target_os = "netbsd", target_os = "openbsd"))] O_DSYNC } } libc_enum! { /// When used with [`lio_listio`](fn.lio_listio.html), determines whether a /// given `aiocb` should be used for a read operation, a write operation, or /// ignored. Has no effect for any other aio functions. #[repr(i32)] pub enum LioOpcode { LIO_NOP, LIO_WRITE, LIO_READ, } } libc_enum! { /// Mode for [`lio_listio`](fn.lio_listio.html) #[repr(i32)] pub enum LioMode { /// Requests that [`lio_listio`](fn.lio_listio.html) block until all /// requested operations have been completed LIO_WAIT, /// Requests that [`lio_listio`](fn.lio_listio.html) return immediately LIO_NOWAIT, } } /// Return values for [`AioCb::cancel`](struct.AioCb.html#method.cancel) and /// [`aio_cancel_all`](fn.aio_cancel_all.html) #[repr(i32)] #[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)] pub enum AioCancelStat { /// All outstanding requests were canceled AioCanceled = libc::AIO_CANCELED, /// Some requests were not canceled. Their status should be checked with /// `AioCb::error` AioNotCanceled = libc::AIO_NOTCANCELED, /// All of the requests have already finished AioAllDone = libc::AIO_ALLDONE, } /// Owns (uniquely or shared) a memory buffer to keep it from `Drop`ing while /// the kernel has a pointer to it. pub enum Buffer<'a> { /// No buffer to own. /// /// Used for operations like `aio_fsync` that have no data, or for unsafe /// operations that work with raw pointers. None, /// Keeps a reference to a slice Phantom(PhantomData<&'a mut [u8]>), /// Generic thing that keeps a buffer from dropping BoxedSlice(Box<dyn Borrow<[u8]>>), /// Generic thing that keeps a mutable buffer from dropping BoxedMutSlice(Box<dyn BorrowMut<[u8]>>), } impl<'a> Debug for Buffer<'a> { // Note: someday it may be possible to Derive Debug for a trait object, but // not today. // https://github.com/rust-lang/rust/issues/1563 fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { match *self { Buffer::None => write!(fmt, "None"), Buffer::Phantom(p) => p.fmt(fmt), Buffer::BoxedSlice(ref bs) => { let borrowed : &dyn Borrow<[u8]> = bs.borrow(); write!(fmt, "BoxedSlice({:?})", borrowed as *const dyn Borrow<[u8]>) }, Buffer::BoxedMutSlice(ref bms) => { let borrowed : &dyn BorrowMut<[u8]> = bms.borrow(); write!(fmt, "BoxedMutSlice({:?})", borrowed as *const dyn BorrowMut<[u8]>) } } } } /// AIO Control Block. /// /// The basic structure used by all aio functions. Each `AioCb` represents one /// I/O request. pub struct AioCb<'a> { aiocb: libc::aiocb, /// Tracks whether the buffer pointed to by `libc::aiocb.aio_buf` is mutable mutable: bool, /// Could this `AioCb` potentially have any in-kernel state? in_progress: bool, /// Optionally keeps a reference to the data. /// /// Used to keep buffers from `Drop`'ing, and may be returned once the /// `AioCb` is completed by [`buffer`](#method.buffer). buffer: Buffer<'a> } impl<'a> AioCb<'a> { /// Remove the inner `Buffer` and return it /// /// It is an error to call this method while the `AioCb` is still in /// progress. pub fn buffer(&mut self) -> Buffer<'a> { assert!(!self.in_progress); let mut x = Buffer::None; mem::swap(&mut self.buffer, &mut x); x } /// Remove the inner boxed slice, if any, and return it. /// /// The returned value will be the argument that was passed to /// `from_boxed_slice` when this `AioCb` was created. /// /// It is an error to call this method while the `AioCb` is still in /// progress. pub fn boxed_slice(&mut self) -> Option<Box<dyn Borrow<[u8]>>> { assert!(!self.in_progress, "Can't remove the buffer from an AioCb that's still in-progress. Did you forget to call aio_return?"); if let Buffer::BoxedSlice(_) = self.buffer { let mut oldbuffer = Buffer::None; mem::swap(&mut self.buffer, &mut oldbuffer); if let Buffer::BoxedSlice(inner) = oldbuffer { Some(inner) } else { unreachable!(); } } else { None } } /// Remove the inner boxed mutable slice, if any, and return it. /// /// The returned value will be the argument that was passed to /// `from_boxed_mut_slice` when this `AioCb` was created. /// /// It is an error to call this method while the `AioCb` is still in /// progress. pub fn boxed_mut_slice(&mut self) -> Option<Box<dyn BorrowMut<[u8]>>> { assert!(!self.in_progress, "Can't remove the buffer from an AioCb that's still in-progress. Did you forget to call aio_return?"); if let Buffer::BoxedMutSlice(_) = self.buffer { let mut oldbuffer = Buffer::None; mem::swap(&mut self.buffer, &mut oldbuffer); if let Buffer::BoxedMutSlice(inner) = oldbuffer { Some(inner) } else { unreachable!(); } } else { None } } /// Returns the underlying file descriptor associated with the `AioCb` pub fn fd(&self) -> RawFd { self.aiocb.aio_fildes } /// Constructs a new `AioCb` with no associated buffer. /// /// The resulting `AioCb` structure is suitable for use with `AioCb::fsync`. /// /// # Parameters /// /// * `fd`: File descriptor. Required for all aio functions. /// * `prio`: If POSIX Prioritized IO is supported, then the /// operation will be prioritized at the process's /// priority level minus `prio`. /// * `sigev_notify`: Determines how you will be notified of event /// completion. /// /// # Examples /// /// Create an `AioCb` from a raw file descriptor and use it for an /// [`fsync`](#method.fsync) operation. /// /// ``` /// # extern crate tempfile; /// # extern crate nix; /// # use nix::errno::Errno; /// # use nix::Error; /// # use nix::sys::aio::*; /// # use nix::sys::signal::SigevNotify::SigevNone; /// # use std::{thread, time}; /// # use std::os::unix::io::AsRawFd; /// # use tempfile::tempfile; /// # fn main() { /// let f = tempfile().unwrap(); /// let mut aiocb = AioCb::from_fd( f.as_raw_fd(), 0, SigevNone); /// aiocb.fsync(AioFsyncMode::O_SYNC).expect("aio_fsync failed early"); /// while (aiocb.error() == Err(Error::from(Errno::EINPROGRESS))) { /// thread::sleep(time::Duration::from_millis(10)); /// } /// aiocb.aio_return().expect("aio_fsync failed late"); /// # } /// ``` pub fn from_fd(fd: RawFd, prio: libc::c_int, sigev_notify: SigevNotify) -> AioCb<'a> { let mut a = AioCb::common_init(fd, prio, sigev_notify); a.aio_offset = 0; a.aio_nbytes = 0; a.aio_buf = null_mut(); AioCb { aiocb: a, mutable: false, in_progress: false, buffer: Buffer::None } } /// Constructs a new `AioCb` from a mutable slice. /// /// The resulting `AioCb` will be suitable for both read and write /// operations, but only if the borrow checker can guarantee that the slice /// will outlive the `AioCb`. That will usually be the case if the `AioCb` /// is stack-allocated. If the borrow checker gives you trouble, try using /// [`from_boxed_mut_slice`](#method.from_boxed_mut_slice) instead. /// /// # Parameters /// /// * `fd`: File descriptor. Required for all aio functions. /// * `offs`: File offset /// * `buf`: A memory buffer /// * `prio`: If POSIX Prioritized IO is supported, then the /// operation will be prioritized at the process's /// priority level minus `prio` /// * `sigev_notify`: Determines how you will be notified of event /// completion. /// * `opcode`: This field is only used for `lio_listio`. It /// determines which operation to use for this individual /// aiocb /// /// # Examples /// /// Create an `AioCb` from a mutable slice and read into it. /// /// ``` /// # extern crate tempfile; /// # extern crate nix; /// # use nix::errno::Errno; /// # use nix::Error; /// # use nix::sys::aio::*; /// # use nix::sys::signal::SigevNotify; /// # use std::{thread, time}; /// # use std::io::Write; /// # use std::os::unix::io::AsRawFd; /// # use tempfile::tempfile; /// # fn main() { /// const INITIAL: &[u8] = b"abcdef123456"; /// const LEN: usize = 4; /// let mut rbuf = vec![0; LEN]; /// let mut f = tempfile().unwrap(); /// f.write_all(INITIAL).unwrap(); /// { /// let mut aiocb = AioCb::from_mut_slice( f.as_raw_fd(), /// 2, //offset /// &mut rbuf, /// 0, //priority /// SigevNotify::SigevNone, /// LioOpcode::LIO_NOP); /// aiocb.read().unwrap(); /// while (aiocb.error() == Err(Error::from(Errno::EINPROGRESS))) { /// thread::sleep(time::Duration::from_millis(10)); /// } /// assert_eq!(aiocb.aio_return().unwrap() as usize, LEN); /// } /// assert_eq!(rbuf, b"cdef"); /// # } /// ``` pub fn from_mut_slice(fd: RawFd, offs: off_t, buf: &'a mut [u8], prio: libc::c_int, sigev_notify: SigevNotify, opcode: LioOpcode) -> AioCb<'a> { let mut a = AioCb::common_init(fd, prio, sigev_notify); a.aio_offset = offs; a.aio_nbytes = buf.len() as size_t; a.aio_buf = buf.as_ptr() as *mut c_void; a.aio_lio_opcode = opcode as libc::c_int; AioCb { aiocb: a, mutable: true, in_progress: false, buffer: Buffer::Phantom(PhantomData), } } /// The safest and most flexible way to create an `AioCb`. /// /// Unlike [`from_slice`], this method returns a structure suitable for /// placement on the heap. It may be used for write operations, but not /// read operations. Unlike `from_ptr`, this method will ensure that the /// buffer doesn't `drop` while the kernel is still processing it. Any /// object that can be borrowed as a boxed slice will work. /// /// # Parameters /// /// * `fd`: File descriptor. Required for all aio functions. /// * `offs`: File offset /// * `buf`: A boxed slice-like object /// * `prio`: If POSIX Prioritized IO is supported, then the /// operation will be prioritized at the process's /// priority level minus `prio` /// * `sigev_notify`: Determines how you will be notified of event /// completion. /// * `opcode`: This field is only used for `lio_listio`. It /// determines which operation to use for this individual /// aiocb /// /// # Examples /// /// Create an `AioCb` from a Vector and use it for writing /// /// ``` /// # extern crate tempfile; /// # extern crate nix; /// # use nix::errno::Errno; /// # use nix::Error; /// # use nix::sys::aio::*; /// # use nix::sys::signal::SigevNotify; /// # use std::{thread, time}; /// # use std::io::Write; /// # use std::os::unix::io::AsRawFd; /// # use tempfile::tempfile; /// # fn main() { /// let wbuf = Box::new(Vec::from("CDEF")); /// let expected_len = wbuf.len(); /// let mut f = tempfile().unwrap(); /// let mut aiocb = AioCb::from_boxed_slice( f.as_raw_fd(), /// 2, //offset /// wbuf, /// 0, //priority /// SigevNotify::SigevNone, /// LioOpcode::LIO_NOP); /// aiocb.write().unwrap(); /// while (aiocb.error() == Err(Error::from(Errno::EINPROGRESS))) { /// thread::sleep(time::Duration::from_millis(10)); /// } /// assert_eq!(aiocb.aio_return().unwrap() as usize, expected_len); /// # } /// ``` /// /// Create an `AioCb` from a `Bytes` object /// /// ``` /// # extern crate bytes; /// # extern crate tempfile; /// # extern crate nix; /// # use bytes::Bytes; /// # use nix::sys::aio::*; /// # use nix::sys::signal::SigevNotify; /// # use std::os::unix::io::AsRawFd; /// # use tempfile::tempfile; /// # fn main() { /// let wbuf = Box::new(Bytes::from(&b"CDEF"[..])); /// let mut f = tempfile().unwrap(); /// let mut aiocb = AioCb::from_boxed_slice( f.as_raw_fd(), /// 2, //offset /// wbuf, /// 0, //priority /// SigevNotify::SigevNone, /// LioOpcode::LIO_NOP); /// # } /// ``` /// /// If a library needs to work with buffers that aren't `Box`ed, it can /// create a `Box`ed container for use with this method. Here's an example /// using an un`Box`ed `Bytes` object. /// /// ``` /// # extern crate bytes; /// # extern crate tempfile; /// # extern crate nix; /// # use bytes::Bytes; /// # use nix::sys::aio::*; /// # use nix::sys::signal::SigevNotify; /// # use std::borrow::Borrow; /// # use std::os::unix::io::AsRawFd; /// # use tempfile::tempfile; /// struct BytesContainer(Bytes); /// impl Borrow<[u8]> for BytesContainer { /// fn borrow(&self) -> &[u8] { /// self.0.as_ref() /// } /// } /// fn main() { /// let wbuf = Bytes::from(&b"CDEF"[..]); /// let boxed_wbuf = Box::new(BytesContainer(wbuf)); /// let mut f = tempfile().unwrap(); /// let mut aiocb = AioCb::from_boxed_slice( f.as_raw_fd(), /// 2, //offset /// boxed_wbuf, /// 0, //priority /// SigevNotify::SigevNone, /// LioOpcode::LIO_NOP); /// } /// ``` /// /// [`from_slice`]: #method.from_slice pub fn from_boxed_slice(fd: RawFd, offs: off_t, buf: Box<dyn Borrow<[u8]>>, prio: libc::c_int, sigev_notify: SigevNotify, opcode: LioOpcode) -> AioCb<'a> { let mut a = AioCb::common_init(fd, prio, sigev_notify); { let borrowed : &dyn Borrow<[u8]> = buf.borrow(); let slice : &[u8] = borrowed.borrow(); a.aio_nbytes = slice.len() as size_t; a.aio_buf = slice.as_ptr() as *mut c_void; } a.aio_offset = offs; a.aio_lio_opcode = opcode as libc::c_int; AioCb { aiocb: a, mutable: false, in_progress: false, buffer: Buffer::BoxedSlice(buf), } } /// The safest and most flexible way to create an `AioCb` for reading. /// /// Like [`from_boxed_slice`], but the slice is a mutable one. More /// flexible than [`from_mut_slice`], because a wide range of objects can be /// used. /// /// # Examples /// /// Create an `AioCb` from a Vector and use it for reading /// /// ``` /// # extern crate tempfile; /// # extern crate nix; /// # use nix::errno::Errno; /// # use nix::Error; /// # use nix::sys::aio::*; /// # use nix::sys::signal::SigevNotify; /// # use std::{thread, time}; /// # use std::io::Write; /// # use std::os::unix::io::AsRawFd; /// # use tempfile::tempfile; /// # fn main() { /// const INITIAL: &[u8] = b"abcdef123456"; /// const LEN: usize = 4; /// let rbuf = Box::new(vec![0; LEN]); /// let mut f = tempfile().unwrap(); /// f.write_all(INITIAL).unwrap(); /// let mut aiocb = AioCb::from_boxed_mut_slice( f.as_raw_fd(), /// 2, //offset /// rbuf, /// 0, //priority /// SigevNotify::SigevNone, /// LioOpcode::LIO_NOP); /// aiocb.read().unwrap(); /// while (aiocb.error() == Err(Error::from(Errno::EINPROGRESS))) { /// thread::sleep(time::Duration::from_millis(10)); /// } /// assert_eq!(aiocb.aio_return().unwrap() as usize, LEN); /// let mut buffer = aiocb.boxed_mut_slice().unwrap(); /// const EXPECT: &[u8] = b"cdef"; /// assert_eq!(buffer.borrow_mut(), EXPECT); /// # } /// ``` /// /// [`from_boxed_slice`]: #method.from_boxed_slice /// [`from_mut_slice`]: #method.from_mut_slice pub fn from_boxed_mut_slice(fd: RawFd, offs: off_t, mut buf: Box<dyn BorrowMut<[u8]>>, prio: libc::c_int, sigev_notify: SigevNotify, opcode: LioOpcode) -> AioCb<'a> { let mut a = AioCb::common_init(fd, prio, sigev_notify); { let borrowed : &mut dyn BorrowMut<[u8]> = buf.borrow_mut(); let slice : &mut [u8] = borrowed.borrow_mut(); a.aio_nbytes = slice.len() as size_t; a.aio_buf = slice.as_mut_ptr() as *mut c_void; } a.aio_offset = offs; a.aio_lio_opcode = opcode as libc::c_int; AioCb { aiocb: a, mutable: true, in_progress: false, buffer: Buffer::BoxedMutSlice(buf), } } /// Constructs a new `AioCb` from a mutable raw pointer /// /// Unlike `from_mut_slice`, this method returns a structure suitable for /// placement on the heap. It may be used for both reads and writes. Due /// to its unsafety, this method is not recommended. It is most useful when /// heap allocation is required but for some reason the data cannot be /// wrapped in a `struct` that implements `BorrowMut<[u8]>` /// /// # Parameters /// /// * `fd`: File descriptor. Required for all aio functions. /// * `offs`: File offset /// * `buf`: Pointer to the memory buffer /// * `len`: Length of the buffer pointed to by `buf` /// * `prio`: If POSIX Prioritized IO is supported, then the /// operation will be prioritized at the process's /// priority level minus `prio` /// * `sigev_notify`: Determines how you will be notified of event /// completion. /// * `opcode`: This field is only used for `lio_listio`. It /// determines which operation to use for this individual /// aiocb /// /// # Safety /// /// The caller must ensure that the storage pointed to by `buf` outlives the /// `AioCb`. The lifetime checker can't help here. pub unsafe fn from_mut_ptr(fd: RawFd, offs: off_t, buf: *mut c_void, len: usize, prio: libc::c_int, sigev_notify: SigevNotify, opcode: LioOpcode) -> AioCb<'a> { let mut a = AioCb::common_init(fd, prio, sigev_notify); a.aio_offset = offs; a.aio_nbytes = len; a.aio_buf = buf; a.aio_lio_opcode = opcode as libc::c_int; AioCb { aiocb: a, mutable: true, in_progress: false, buffer: Buffer::None } } /// Constructs a new `AioCb` from a raw pointer. /// /// Unlike `from_slice`, this method returns a structure suitable for /// placement on the heap. Due to its unsafety, this method is not /// recommended. It is most useful when heap allocation is required but for /// some reason the data cannot be wrapped in a `struct` that implements /// `Borrow<[u8]>` /// /// # Parameters /// /// * `fd`: File descriptor. Required for all aio functions. /// * `offs`: File offset /// * `buf`: Pointer to the memory buffer /// * `len`: Length of the buffer pointed to by `buf` /// * `prio`: If POSIX Prioritized IO is supported, then the /// operation will be prioritized at the process's /// priority level minus `prio` /// * `sigev_notify`: Determines how you will be notified of event /// completion. /// * `opcode`: This field is only used for `lio_listio`. It /// determines which operation to use for this individual /// aiocb /// /// # Safety /// /// The caller must ensure that the storage pointed to by `buf` outlives the /// `AioCb`. The lifetime checker can't help here. pub unsafe fn from_ptr(fd: RawFd, offs: off_t, buf: *const c_void, len: usize, prio: libc::c_int, sigev_notify: SigevNotify, opcode: LioOpcode) -> AioCb<'a> { let mut a = AioCb::common_init(fd, prio, sigev_notify); a.aio_offset = offs; a.aio_nbytes = len; // casting a const ptr to a mutable ptr here is ok, because we set the // AioCb's mutable field to false a.aio_buf = buf as *mut c_void; a.aio_lio_opcode = opcode as libc::c_int; AioCb { aiocb: a, mutable: false, in_progress: false, buffer: Buffer::None } } /// Like `from_mut_slice`, but works on constant slices rather than /// mutable slices. /// /// An `AioCb` created this way cannot be used with `read`, and its /// `LioOpcode` cannot be set to `LIO_READ`. This method is useful when /// writing a const buffer with `AioCb::write`, since `from_mut_slice` can't /// work with const buffers. /// /// # Examples /// /// Construct an `AioCb` from a slice and use it for writing. /// /// ``` /// # extern crate tempfile; /// # extern crate nix; /// # use nix::errno::Errno; /// # use nix::Error; /// # use nix::sys::aio::*; /// # use nix::sys::signal::SigevNotify; /// # use std::{thread, time}; /// # use std::os::unix::io::AsRawFd; /// # use tempfile::tempfile; /// # fn main() { /// const WBUF: &[u8] = b"abcdef123456"; /// let mut f = tempfile().unwrap(); /// let mut aiocb = AioCb::from_slice( f.as_raw_fd(), /// 2, //offset /// WBUF, /// 0, //priority /// SigevNotify::SigevNone, /// LioOpcode::LIO_NOP); /// aiocb.write().unwrap(); /// while (aiocb.error() == Err(Error::from(Errno::EINPROGRESS))) { /// thread::sleep(time::Duration::from_millis(10)); /// } /// assert_eq!(aiocb.aio_return().unwrap() as usize, WBUF.len()); /// # } /// ``` // Note: another solution to the problem of writing const buffers would be // to genericize AioCb for both &mut [u8] and &[u8] buffers. AioCb::read // could take the former and AioCb::write could take the latter. However, // then lio_listio wouldn't work, because that function needs a slice of // AioCb, and they must all be of the same type. pub fn from_slice(fd: RawFd, offs: off_t, buf: &'a [u8], prio: libc::c_int, sigev_notify: SigevNotify, opcode: LioOpcode) -> AioCb { let mut a = AioCb::common_init(fd, prio, sigev_notify); a.aio_offset = offs; a.aio_nbytes = buf.len() as size_t; // casting an immutable buffer to a mutable pointer looks unsafe, // but technically its only unsafe to dereference it, not to create // it. a.aio_buf = buf.as_ptr() as *mut c_void; assert!(opcode != LioOpcode::LIO_READ, "Can't read into an immutable buffer"); a.aio_lio_opcode = opcode as libc::c_int; AioCb { aiocb: a, mutable: false, in_progress: false, buffer: Buffer::None, } } fn common_init(fd: RawFd, prio: libc::c_int, sigev_notify: SigevNotify) -> libc::aiocb { // Use mem::zeroed instead of explicitly zeroing each field, because the // number and name of reserved fields is OS-dependent. On some OSes, // some reserved fields are used the kernel for state, and must be // explicitly zeroed when allocated. let mut a = unsafe { mem::zeroed::<libc::aiocb>()}; a.aio_fildes = fd; a.aio_reqprio = prio; a.aio_sigevent = SigEvent::new(sigev_notify).sigevent(); a } /// Update the notification settings for an existing `aiocb` pub fn set_sigev_notify(&mut self, sigev_notify: SigevNotify) { self.aiocb.aio_sigevent = SigEvent::new(sigev_notify).sigevent(); } /// Cancels an outstanding AIO request. /// /// The operating system is not required to implement cancellation for all /// file and device types. Even if it does, there is no guarantee that the /// operation has not already completed. So the caller must check the /// result and handle operations that were not canceled or that have already /// completed. /// /// # Examples /// /// Cancel an outstanding aio operation. Note that we must still call /// `aio_return` to free resources, even though we don't care about the /// result. /// /// ``` /// # extern crate tempfile; /// # extern crate nix; /// # use nix::errno::Errno; /// # use nix::Error; /// # use nix::sys::aio::*; /// # use nix::sys::signal::SigevNotify; /// # use std::{thread, time}; /// # use std::io::Write; /// # use std::os::unix::io::AsRawFd; /// # use tempfile::tempfile; /// # fn main() { /// let wbuf = b"CDEF"; /// let mut f = tempfile().unwrap(); /// let mut aiocb = AioCb::from_slice( f.as_raw_fd(), /// 2, //offset /// &wbuf[..], /// 0, //priority /// SigevNotify::SigevNone, /// LioOpcode::LIO_NOP); /// aiocb.write().unwrap(); /// let cs = aiocb.cancel().unwrap(); /// if cs == AioCancelStat::AioNotCanceled { /// while (aiocb.error() == Err(Error::from(Errno::EINPROGRESS))) { /// thread::sleep(time::Duration::from_millis(10)); /// } /// } /// // Must call `aio_return`, but ignore the result /// let _ = aiocb.aio_return(); /// # } /// ``` /// /// # References /// /// [aio_cancel](http://pubs.opengroup.org/onlinepubs/9699919799/functions/aio_cancel.html) pub fn cancel(&mut self) -> Result<AioCancelStat> { match unsafe { libc::aio_cancel(self.aiocb.aio_fildes, &mut self.aiocb) } { libc::AIO_CANCELED => Ok(AioCancelStat::AioCanceled), libc::AIO_NOTCANCELED => Ok(AioCancelStat::AioNotCanceled), libc::AIO_ALLDONE => Ok(AioCancelStat::AioAllDone), -1 => Err(Error::last()), _ => panic!("unknown aio_cancel return value") } } /// Retrieve error status of an asynchronous operation. /// /// If the request has not yet completed, returns `EINPROGRESS`. Otherwise, /// returns `Ok` or any other error. /// /// # Examples /// /// Issue an aio operation and use `error` to poll for completion. Polling /// is an alternative to `aio_suspend`, used by most of the other examples. /// /// ``` /// # extern crate tempfile; /// # extern crate nix; /// # use nix::errno::Errno; /// # use nix::Error; /// # use nix::sys::aio::*; /// # use nix::sys::signal::SigevNotify; /// # use std::{thread, time}; /// # use std::os::unix::io::AsRawFd; /// # use tempfile::tempfile; /// # fn main() { /// const WBUF: &[u8] = b"abcdef123456"; /// let mut f = tempfile().unwrap(); /// let mut aiocb = AioCb::from_slice( f.as_raw_fd(), /// 2, //offset /// WBUF, /// 0, //priority /// SigevNotify::SigevNone, /// LioOpcode::LIO_NOP); /// aiocb.write().unwrap(); /// while (aiocb.error() == Err(Error::from(Errno::EINPROGRESS))) { /// thread::sleep(time::Duration::from_millis(10)); /// } /// assert_eq!(aiocb.aio_return().unwrap() as usize, WBUF.len()); /// # } /// ``` /// /// # References /// /// [aio_error](http://pubs.opengroup.org/onlinepubs/9699919799/functions/aio_error.html) pub fn error(&mut self) -> Result<()> { match unsafe { libc::aio_error(&mut self.aiocb as *mut libc::aiocb) } { 0 => Ok(()), num if num > 0 => Err(Error::from_errno(Errno::from_i32(num))), -1 => Err(Error::last()), num => panic!("unknown aio_error return value {:?}", num) } } /// An asynchronous version of `fsync(2)`. /// /// # References /// /// [aio_fsync](http://pubs.opengroup.org/onlinepubs/9699919799/functions/aio_fsync.html) pub fn fsync(&mut self, mode: AioFsyncMode) -> Result<()> { let p: *mut libc::aiocb = &mut self.aiocb; Errno::result(unsafe { libc::aio_fsync(mode as libc::c_int, p) }).map(|_| { self.in_progress = true; }) } /// Returns the `aiocb`'s `LioOpcode` field /// /// If the value cannot be represented as an `LioOpcode`, returns `None` /// instead. pub fn lio_opcode(&self) -> Option<LioOpcode> { match self.aiocb.aio_lio_opcode { libc::LIO_READ => Some(LioOpcode::LIO_READ), libc::LIO_WRITE => Some(LioOpcode::LIO_WRITE), libc::LIO_NOP => Some(LioOpcode::LIO_NOP), _ => None } } /// Returns the requested length of the aio operation in bytes /// /// This method returns the *requested* length of the operation. To get the /// number of bytes actually read or written by a completed operation, use /// `aio_return` instead. pub fn nbytes(&self) -> usize { self.aiocb.aio_nbytes } /// Returns the file offset stored in the `AioCb` pub fn offset(&self) -> off_t { self.aiocb.aio_offset } /// Returns the priority of the `AioCb` pub fn priority(&self) -> libc::c_int { self.aiocb.aio_reqprio } /// Asynchronously reads from a file descriptor into a buffer /// /// # References /// /// [aio_read](http://pubs.opengroup.org/onlinepubs/9699919799/functions/aio_read.html) pub fn read(&mut self) -> Result<()> { assert!(self.mutable, "Can't read into an immutable buffer"); let p: *mut libc::aiocb = &mut self.aiocb; Errno::result(unsafe { libc::aio_read(p) }).map(|_| { self.in_progress = true; }) } /// Returns the `SigEvent` stored in the `AioCb` pub fn sigevent(&self) -> SigEvent { SigEvent::from(&self.aiocb.aio_sigevent) } /// Retrieve return status of an asynchronous operation. /// /// Should only be called once for each `AioCb`, after `AioCb::error` /// indicates that it has completed. The result is the same as for the /// synchronous `read(2)`, `write(2)`, of `fsync(2)` functions. /// /// # References /// /// [aio_return](http://pubs.opengroup.org/onlinepubs/9699919799/functions/aio_return.html) // Note: this should be just `return`, but that's a reserved word pub fn aio_return(&mut self) -> Result<isize> { let p: *mut libc::aiocb = &mut self.aiocb; self.in_progress = false; Errno::result(unsafe { libc::aio_return(p) }) } /// Asynchronously writes from a buffer to a file descriptor /// /// # References /// /// [aio_write](http://pubs.opengroup.org/onlinepubs/9699919799/functions/aio_write.html) pub fn write(&mut self) -> Result<()> { let p: *mut libc::aiocb = &mut self.aiocb; Errno::result(unsafe { libc::aio_write(p) }).map(|_| { self.in_progress = true; }) } } /// Cancels outstanding AIO requests for a given file descriptor. /// /// # Examples /// /// Issue an aio operation, then cancel all outstanding operations on that file /// descriptor. /// /// ``` /// # extern crate tempfile; /// # extern crate nix; /// # use nix::errno::Errno; /// # use nix::Error; /// # use nix::sys::aio::*; /// # use nix::sys::signal::SigevNotify; /// # use std::{thread, time}; /// # use std::io::Write; /// # use std::os::unix::io::AsRawFd; /// # use tempfile::tempfile; /// # fn main() { /// let wbuf = b"CDEF"; /// let mut f = tempfile().unwrap(); /// let mut aiocb = AioCb::from_slice( f.as_raw_fd(), /// 2, //offset /// &wbuf[..], /// 0, //priority /// SigevNotify::SigevNone, /// LioOpcode::LIO_NOP); /// aiocb.write().unwrap(); /// let cs = aio_cancel_all(f.as_raw_fd()).unwrap(); /// if cs == AioCancelStat::AioNotCanceled { /// while (aiocb.error() == Err(Error::from(Errno::EINPROGRESS))) { /// thread::sleep(time::Duration::from_millis(10)); /// } /// } /// // Must call `aio_return`, but ignore the result /// let _ = aiocb.aio_return(); /// # } /// ``` /// /// # References /// /// [`aio_cancel`](http://pubs.opengroup.org/onlinepubs/9699919799/functions/aio_cancel.html) pub fn aio_cancel_all(fd: RawFd) -> Result<AioCancelStat> { match unsafe { libc::aio_cancel(fd, null_mut()) } { libc::AIO_CANCELED => Ok(AioCancelStat::AioCanceled), libc::AIO_NOTCANCELED => Ok(AioCancelStat::AioNotCanceled), libc::AIO_ALLDONE => Ok(AioCancelStat::AioAllDone), -1 => Err(Error::last()), _ => panic!("unknown aio_cancel return value") } } /// Suspends the calling process until at least one of the specified `AioCb`s /// has completed, a signal is delivered, or the timeout has passed. /// /// If `timeout` is `None`, `aio_suspend` will block indefinitely. /// /// # Examples /// /// Use `aio_suspend` to block until an aio operation completes. /// /// ``` /// # extern crate tempfile; /// # extern crate nix; /// # use nix::sys::aio::*; /// # use nix::sys::signal::SigevNotify; /// # use std::os::unix::io::AsRawFd; /// # use tempfile::tempfile; /// # fn main() { /// const WBUF: &[u8] = b"abcdef123456"; /// let mut f = tempfile().unwrap(); /// let mut aiocb = AioCb::from_slice( f.as_raw_fd(), /// 2, //offset /// WBUF, /// 0, //priority /// SigevNotify::SigevNone, /// LioOpcode::LIO_NOP); /// aiocb.write().unwrap(); /// aio_suspend(&[&aiocb], None).expect("aio_suspend failed"); /// assert_eq!(aiocb.aio_return().unwrap() as usize, WBUF.len()); /// # } /// ``` /// # References /// /// [`aio_suspend`](http://pubs.opengroup.org/onlinepubs/9699919799/functions/aio_suspend.html) pub fn aio_suspend(list: &[&AioCb], timeout: Option<TimeSpec>) -> Result<()> { let plist = list as *const [&AioCb] as *const [*const libc::aiocb]; let p = plist as *const *const libc::aiocb; let timep = match timeout { None => null::<libc::timespec>(), Some(x) => x.as_ref() as *const libc::timespec }; Errno::result(unsafe { libc::aio_suspend(p, list.len() as i32, timep) }).map(drop) } impl<'a> Debug for AioCb<'a> { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { fmt.debug_struct("AioCb") .field("aiocb", &self.aiocb) .field("mutable", &self.mutable) .field("in_progress", &self.in_progress) .finish() } } impl<'a> Drop for AioCb<'a> { /// If the `AioCb` has no remaining state in the kernel, just drop it. /// Otherwise, dropping constitutes a resource leak, which is an error fn drop(&mut self) { assert!(thread::panicking() || !self.in_progress, "Dropped an in-progress AioCb"); } } /// LIO Control Block. /// /// The basic structure used to issue multiple AIO operations simultaneously. #[cfg(not(any(target_os = "ios", target_os = "macos")))] pub struct LioCb<'a> { /// A collection of [`AioCb`]s. All of these will be issued simultaneously /// by the [`listio`] method. /// /// [`AioCb`]: struct.AioCb.html /// [`listio`]: #method.listio pub aiocbs: Vec<AioCb<'a>>, /// The actual list passed to `libc::lio_listio`. /// /// It must live for as long as any of the operations are still being /// processesed, because the aio subsystem uses its address as a unique /// identifier. list: Vec<*mut libc::aiocb>, /// A partial set of results. This field will get populated by /// `listio_resubmit` when an `LioCb` is resubmitted after an error results: Vec<Option<Result<isize>>> } #[cfg(not(any(target_os = "ios", target_os = "macos")))] impl<'a> LioCb<'a> { /// Initialize an empty `LioCb` pub fn with_capacity(capacity: usize) -> LioCb<'a> { LioCb { aiocbs: Vec::with_capacity(capacity), list: Vec::with_capacity(capacity), results: Vec::with_capacity(capacity) } } /// Submits multiple asynchronous I/O requests with a single system call. /// /// They are not guaranteed to complete atomically, and the order in which /// the requests are carried out is not specified. Reads, writes, and /// fsyncs may be freely mixed. /// /// This function is useful for reducing the context-switch overhead of /// submitting many AIO operations. It can also be used with /// `LioMode::LIO_WAIT` to block on the result of several independent /// operations. Used that way, it is often useful in programs that /// otherwise make little use of AIO. /// /// # Examples /// /// Use `listio` to submit an aio operation and wait for its completion. In /// this case, there is no need to use [`aio_suspend`] to wait or /// [`AioCb::error`] to poll. /// /// ``` /// # extern crate tempfile; /// # extern crate nix; /// # use nix::sys::aio::*; /// # use nix::sys::signal::SigevNotify; /// # use std::os::unix::io::AsRawFd; /// # use tempfile::tempfile; /// # fn main() { /// const WBUF: &[u8] = b"abcdef123456"; /// let mut f = tempfile().unwrap(); /// let mut liocb = LioCb::with_capacity(1); /// liocb.aiocbs.push(AioCb::from_slice( f.as_raw_fd(), /// 2, //offset /// WBUF, /// 0, //priority /// SigevNotify::SigevNone, /// LioOpcode::LIO_WRITE)); /// liocb.listio(LioMode::LIO_WAIT, /// SigevNotify::SigevNone).unwrap(); /// assert_eq!(liocb.aio_return(0).unwrap() as usize, WBUF.len()); /// # } /// ``` /// /// # References /// /// [`lio_listio`](http://pubs.opengroup.org/onlinepubs/9699919799/functions/lio_listio.html) /// /// [`aio_suspend`]: fn.aio_suspend.html /// [`AioCb::error`]: struct.AioCb.html#method.error pub fn listio(&mut self, mode: LioMode, sigev_notify: SigevNotify) -> Result<()> { let sigev = SigEvent::new(sigev_notify); let sigevp = &mut sigev.sigevent() as *mut libc::sigevent; self.list.clear(); for a in &mut self.aiocbs { a.in_progress = true; self.list.push(a as *mut AioCb<'a> as *mut libc::aiocb); } let p = self.list.as_ptr(); Errno::result(unsafe { libc::lio_listio(mode as i32, p, self.list.len() as i32, sigevp) }).map(drop) } /// Resubmits any incomplete operations with [`lio_listio`]. /// /// Sometimes, due to system resource limitations, an `lio_listio` call will /// return `EIO`, or `EAGAIN`. Or, if a signal is received, it may return /// `EINTR`. In any of these cases, only a subset of its constituent /// operations will actually have been initiated. `listio_resubmit` will /// resubmit any operations that are still uninitiated. /// /// After calling `listio_resubmit`, results should be collected by /// [`LioCb::aio_return`]. /// /// # Examples /// ```no_run /// # extern crate tempfile; /// # extern crate nix; /// # use nix::Error; /// # use nix::errno::Errno; /// # use nix::sys::aio::*; /// # use nix::sys::signal::SigevNotify; /// # use std::os::unix::io::AsRawFd; /// # use std::{thread, time}; /// # use tempfile::tempfile; /// # fn main() { /// const WBUF: &[u8] = b"abcdef123456"; /// let mut f = tempfile().unwrap(); /// let mut liocb = LioCb::with_capacity(1); /// liocb.aiocbs.push(AioCb::from_slice( f.as_raw_fd(), /// 2, //offset /// WBUF, /// 0, //priority /// SigevNotify::SigevNone, /// LioOpcode::LIO_WRITE)); /// let mut err = liocb.listio(LioMode::LIO_WAIT, SigevNotify::SigevNone); /// while err == Err(Error::Sys(Errno::EIO)) || /// err == Err(Error::Sys(Errno::EAGAIN)) { /// thread::sleep(time::Duration::from_millis(10)); /// err = liocb.listio_resubmit(LioMode::LIO_WAIT, SigevNotify::SigevNone); /// } /// assert_eq!(liocb.aio_return(0).unwrap() as usize, WBUF.len()); /// # } /// ``` /// /// # References /// /// [`lio_listio`](http://pubs.opengroup.org/onlinepubs/9699919799/functions/lio_listio.html) /// /// [`lio_listio`]: http://pubs.opengroup.org/onlinepubs/9699919799/functions/lio_listio.html /// [`LioCb::aio_return`]: struct.LioCb.html#method.aio_return // Note: the addresses of any EINPROGRESS or EOK aiocbs _must_ not be // changed by this method, because the kernel relies on their addresses // being stable. // Note: aiocbs that are Ok(()) must be finalized by aio_return, or else the // sigev_notify will immediately refire. pub fn listio_resubmit(&mut self, mode:LioMode, sigev_notify: SigevNotify) -> Result<()> { let sigev = SigEvent::new(sigev_notify); let sigevp = &mut sigev.sigevent() as *mut libc::sigevent; self.list.clear(); while self.results.len() < self.aiocbs.len() { self.results.push(None); } for (i, a) in self.aiocbs.iter_mut().enumerate() { if self.results[i].is_some() { // Already collected final status for this operation continue; } match a.error() { Ok(()) => { // aiocb is complete; collect its status and don't resubmit self.results[i] = Some(a.aio_return()); }, Err(Error::Sys(Errno::EAGAIN)) => { self.list.push(a as *mut AioCb<'a> as *mut libc::aiocb); }, Err(Error::Sys(Errno::EINPROGRESS)) => { // aiocb is was successfully queued; no need to do anything }, Err(Error::Sys(Errno::EINVAL)) => panic!( "AioCb was never submitted, or already finalized"), _ => unreachable!() } } let p = self.list.as_ptr(); Errno::result(unsafe { libc::lio_listio(mode as i32, p, self.list.len() as i32, sigevp) }).map(drop) } /// Collect final status for an individual `AioCb` submitted as part of an /// `LioCb`. /// /// This is just like [`AioCb::aio_return`], except it takes into account /// operations that were restarted by [`LioCb::listio_resubmit`] /// /// [`AioCb::aio_return`]: struct.AioCb.html#method.aio_return /// [`LioCb::listio_resubmit`]: #method.listio_resubmit pub fn aio_return(&mut self, i: usize) -> Result<isize> { if i >= self.results.len() || self.results[i].is_none() { self.aiocbs[i].aio_return() } else { self.results[i].unwrap() } } /// Retrieve error status of an individual `AioCb` submitted as part of an /// `LioCb`. /// /// This is just like [`AioCb::error`], except it takes into account /// operations that were restarted by [`LioCb::listio_resubmit`] /// /// [`AioCb::error`]: struct.AioCb.html#method.error /// [`LioCb::listio_resubmit`]: #method.listio_resubmit pub fn error(&mut self, i: usize) -> Result<()> { if i >= self.results.len() || self.results[i].is_none() { self.aiocbs[i].error() } else { Ok(()) } } } #[cfg(not(any(target_os = "ios", target_os = "macos")))] impl<'a> Debug for LioCb<'a> { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { fmt.debug_struct("LioCb") .field("aiocbs", &self.aiocbs) .finish() } } #[cfg(not(any(target_os = "ios", target_os = "macos")))] impl<'a> From<Vec<AioCb<'a>>> for LioCb<'a> { fn from(src: Vec<AioCb<'a>>) -> LioCb<'a> { LioCb { list: Vec::with_capacity(src.capacity()), results: Vec::with_capacity(src.capacity()), aiocbs: src, } } }