repo stringlengths 6 65 | file_url stringlengths 81 311 | file_path stringlengths 6 227 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 15:31:58 2026-01-04 20:25:31 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
plabayo/venndb | https://github.com/plabayo/venndb/blob/68c929e6a7f97f74b0b1c84facfa5b1b61369be3/venndb-usage/tests/compiles/filter_and_key_and_skip.rs | venndb-usage/tests/compiles/filter_and_key_and_skip.rs | use venndb::VennDB;
#[derive(Debug, VennDB)]
struct Employee {
id: u32,
is_manager: bool,
is_active: bool,
#[venndb(filter, key, skip)]
country: String,
}
fn main() {}
| rust | Apache-2.0 | 68c929e6a7f97f74b0b1c84facfa5b1b61369be3 | 2026-01-04T20:24:17.564367Z | false |
plabayo/venndb | https://github.com/plabayo/venndb/blob/68c929e6a7f97f74b0b1c84facfa5b1b61369be3/venndb-usage/tests/compiles/derive_struct_with_filter_map_any.rs | venndb-usage/tests/compiles/derive_struct_with_filter_map_any.rs | use venndb::{Any, VennDB};
#[derive(Debug, VennDB)]
struct Employee {
id: u32,
name: String,
is_manager: bool,
is_admin: bool,
is_active: bool,
#[venndb(filter, any)]
department: Department,
}
#[derive(Debug, PartialEq, Eq, Hash, Clone)]
pub enum Department {
Any,
Engineering,
Sales,
Marketing,
HR,
}
impl Any for Department {
fn is_any(&self) -> bool {
self == &Department::Any
}
}
fn main() {
let _ = EmployeeDB::new();
}
| rust | Apache-2.0 | 68c929e6a7f97f74b0b1c84facfa5b1b61369be3 | 2026-01-04T20:24:17.564367Z | false |
plabayo/venndb | https://github.com/plabayo/venndb/blob/68c929e6a7f97f74b0b1c84facfa5b1b61369be3/venndb-usage/tests/compiles/derive_struct.rs | venndb-usage/tests/compiles/derive_struct.rs | use venndb::VennDB;
#[derive(Debug, VennDB)]
struct Employee {
id: u32,
name: String,
is_manager: bool,
is_admin: bool,
is_active: bool,
department: Department,
}
#[derive(Debug)]
pub enum Department {
Engineering,
Sales,
Marketing,
HR,
}
fn main() {
let _ = EmployeeDB::new();
}
| rust | Apache-2.0 | 68c929e6a7f97f74b0b1c84facfa5b1b61369be3 | 2026-01-04T20:24:17.564367Z | false |
plabayo/venndb | https://github.com/plabayo/venndb/blob/68c929e6a7f97f74b0b1c84facfa5b1b61369be3/venndb-usage/tests/compiles/derive_struct_with_validator.rs | venndb-usage/tests/compiles/derive_struct_with_validator.rs | use venndb::VennDB;
#[derive(Debug, VennDB)]
#[venndb(validator = sealed::employee_validator)]
struct Employee {
pub id: u32,
pub name: String,
pub is_manager: bool,
pub is_admin: bool,
pub is_active: bool,
pub department: Department,
}
#[derive(Debug)]
pub enum Department {
Engineering,
Sales,
Marketing,
HR,
}
mod sealed {
use super::Employee;
pub(super) fn employee_validator(employee: &Employee) -> bool {
employee.id > 0 && !employee.name.is_empty()
}
}
fn main() {
let _ = EmployeeDB::new();
}
| rust | Apache-2.0 | 68c929e6a7f97f74b0b1c84facfa5b1b61369be3 | 2026-01-04T20:24:17.564367Z | false |
plabayo/venndb | https://github.com/plabayo/venndb/blob/68c929e6a7f97f74b0b1c84facfa5b1b61369be3/venndb-usage/tests/compiles/derive_struct_with_key.rs | venndb-usage/tests/compiles/derive_struct_with_key.rs | use venndb::VennDB;
#[derive(Debug, VennDB)]
struct Employee {
#[venndb(key)]
id: u32,
name: String,
is_manager: bool,
is_admin: bool,
is_active: bool,
department: Department,
}
#[derive(Debug)]
pub enum Department {
Engineering,
Sales,
Marketing,
HR,
}
fn main() {
let _ = EmployeeDB::new();
}
| rust | Apache-2.0 | 68c929e6a7f97f74b0b1c84facfa5b1b61369be3 | 2026-01-04T20:24:17.564367Z | false |
plabayo/venndb | https://github.com/plabayo/venndb/blob/68c929e6a7f97f74b0b1c84facfa5b1b61369be3/venndb-usage/tests/compiles/derive_struct_with_filter_map.rs | venndb-usage/tests/compiles/derive_struct_with_filter_map.rs | use venndb::VennDB;
#[derive(Debug, VennDB)]
struct Employee {
id: u32,
name: String,
is_manager: bool,
is_admin: bool,
is_active: bool,
#[venndb(filter)]
department: Department,
}
#[derive(Debug, PartialEq, Eq, Hash, Clone)]
pub enum Department {
Engineering,
Sales,
Marketing,
HR,
}
fn main() {
let _ = EmployeeDB::new();
}
| rust | Apache-2.0 | 68c929e6a7f97f74b0b1c84facfa5b1b61369be3 | 2026-01-04T20:24:17.564367Z | false |
plabayo/venndb | https://github.com/plabayo/venndb/blob/68c929e6a7f97f74b0b1c84facfa5b1b61369be3/venndb-usage/tests/compiles/derive_struct_custom_name.rs | venndb-usage/tests/compiles/derive_struct_custom_name.rs | use venndb::VennDB;
#[derive(Debug, VennDB)]
#[venndb(name = "Database")]
struct Employee {
id: u32,
name: String,
is_manager: bool,
is_admin: bool,
is_active: bool,
department: Department,
}
#[derive(Debug)]
pub enum Department {
Engineering,
Sales,
Marketing,
HR,
}
fn main() {
let _ = Database::new();
}
| rust | Apache-2.0 | 68c929e6a7f97f74b0b1c84facfa5b1b61369be3 | 2026-01-04T20:24:17.564367Z | false |
plabayo/venndb | https://github.com/plabayo/venndb/blob/68c929e6a7f97f74b0b1c84facfa5b1b61369be3/venndb-usage/tests/compiles/derive_struct_skip_all.rs | venndb-usage/tests/compiles/derive_struct_skip_all.rs | use venndb::VennDB;
#[derive(Debug, VennDB)]
struct Employee {
#[venndb(skip)]
id: u32,
#[venndb(skip)]
name: String,
#[venndb(skip)]
is_manager: bool,
#[venndb(skip)]
department: Department,
}
#[derive(Debug)]
pub enum Department {
Engineering,
Sales,
Marketing,
HR,
}
fn main() {}
| rust | Apache-2.0 | 68c929e6a7f97f74b0b1c84facfa5b1b61369be3 | 2026-01-04T20:24:17.564367Z | false |
plabayo/venndb | https://github.com/plabayo/venndb/blob/68c929e6a7f97f74b0b1c84facfa5b1b61369be3/venndb-usage/tests/compiles/derive_struct_all_the_things.rs | venndb-usage/tests/compiles/derive_struct_all_the_things.rs | use venndb::{Any, VennDB};
#[derive(Debug, VennDB)]
#[venndb(name = "EmployeeSheet", validator = employee_validator)]
struct Employee {
#[venndb(key)]
id: u32,
name: String,
#[venndb(filter)] // explicit bool filter == regular bool
is_manager: bool,
is_admin: bool,
is_something: Option<bool>,
#[venndb(skip)]
is_active: bool,
#[venndb(filter, any)]
department: Department,
#[venndb(filter)]
country: Option<String>,
}
#[derive(Debug, PartialEq, Eq, Clone, Hash)]
pub enum Department {
Any,
Engineering,
Sales,
Marketing,
HR,
}
fn employee_validator(employee: &Employee) -> bool {
employee.id > 0 && !employee.name.is_empty()
}
impl Any for Department {
fn is_any(&self) -> bool {
self == &Department::Any
}
}
fn main() {
let mut db = EmployeeSheet::new();
db.append(Employee {
id: 1,
name: "Alice".to_string(),
is_manager: true,
is_admin: false,
is_something: None,
is_active: true,
department: Department::Engineering,
country: None,
})
.unwrap();
let employee_ref = db.get_by_id(&1).unwrap();
assert_eq!(employee_ref.id, 1);
assert_eq!(employee_ref.name, "Alice");
assert_eq!(employee_ref.is_something, None);
assert_eq!(employee_ref.country, None);
let mut query = db.query();
query
.is_manager(true)
.is_admin(true)
.department(Department::Engineering)
.department(Department::Sales);
assert!(query.execute().is_none());
}
| rust | Apache-2.0 | 68c929e6a7f97f74b0b1c84facfa5b1b61369be3 | 2026-01-04T20:24:17.564367Z | false |
plabayo/venndb | https://github.com/plabayo/venndb/blob/68c929e6a7f97f74b0b1c84facfa5b1b61369be3/venndb-usage/tests/compiles/derive_struct_empty.rs | venndb-usage/tests/compiles/derive_struct_empty.rs | use venndb::VennDB;
#[derive(Debug, VennDB)]
struct Employee {}
fn main() {}
| rust | Apache-2.0 | 68c929e6a7f97f74b0b1c84facfa5b1b61369be3 | 2026-01-04T20:24:17.564367Z | false |
haveibeensquatted/twistrs | https://github.com/haveibeensquatted/twistrs/blob/86f45b0ceb9751979ce546234d6a6f775c036119/twistrs/build.rs | twistrs/build.rs | use std::io::{self, BufRead};
use std::{env, fs, path::Path};
fn main() {
// The following build script converts a number of data assets
// to be embedded directly into the libraries final binaries
// without incurring any runtime costs.
//
// For more information on the internals as well as other
// possible solutions, please review the following blog post.
//
// https://dev.to/rustyoctopus/generating-static-arrays-during-compile-time-in-rust-10d8
let mut dicionary_output = String::from("");
let mut keywords_array_string = String::from(
"#[allow(dead_code)]
static KEYWORDS: [&str; ",
);
let mut whois_servers_string = String::from(
"#[allow(dead_code)]
static WHOIS_RAW_JSON: &str = r#",
);
match read_lines("./data/keywords.txt") {
Ok(lines) => {
// We want to unwrap to make sure that we are able to fetch all TLDs
let tlds = lines.map(|l| l.unwrap()).collect::<Vec<String>>();
// Finalize the variable signature and break into newline to
// start populating the TLDs
keywords_array_string.push_str(&tlds.len().to_string());
keywords_array_string.push_str("] = [\r\n");
// Start populating TLD contents
for line in tlds.into_iter() {
// Formatting some tabs (ASCII-20)
keywords_array_string.push_str("\u{20}\u{20}\u{20}\u{20}\"");
let tld = if line.chars().all(char::is_alphanumeric) {
line.to_string()
} else {
punycode::encode(line.to_string().as_str()).unwrap()
};
keywords_array_string.push_str(&tld[..]);
keywords_array_string.push_str("\",\r\n");
}
// Close off variable signature
keywords_array_string.push_str("];\r\n");
}
Err(e) => panic!(
"{}",
format!(
"unable to build library due to missing dictionary file(s): {}",
e
)
),
}
// Compile the WhoIs server config to later perform WhoIs lookups against
match read_lines("./data/whois-servers.json") {
Ok(lines) => {
// Construct the in-memory JSON
whois_servers_string.push('"');
lines.for_each(|l| whois_servers_string.push_str(&l.unwrap()));
whois_servers_string.push_str("\"#;");
}
Err(e) => panic!(
"{}",
format!(
"unable to build library due to missing dictionary file(s): {}",
e
)
),
}
// Build the final output
dicionary_output.push_str(&keywords_array_string);
dicionary_output.push('\n');
dicionary_output.push_str(&whois_servers_string);
// Write out contents to the final Rust file artifact
let out_dir = env::var("OUT_DIR").unwrap();
let dest_path = Path::new(&out_dir).join("data.rs");
fs::write(dest_path, dicionary_output).unwrap();
}
// The output is wrapped in a Result to allow matching on errors
// Returns an Iterator to the Reader of the lines of the file.
//
// This was taken from the official rust-lang docs:
// https://doc.rust-lang.org/stable/rust-by-example/std_misc/file/read_lines.html
fn read_lines<P>(filename: P) -> io::Result<io::Lines<io::BufReader<fs::File>>>
where
P: AsRef<Path>,
{
let file = fs::File::open(filename)?;
Ok(io::BufReader::new(file).lines())
}
| rust | MIT | 86f45b0ceb9751979ce546234d6a6f775c036119 | 2026-01-04T20:23:55.253743Z | false |
haveibeensquatted/twistrs | https://github.com/haveibeensquatted/twistrs/blob/86f45b0ceb9751979ce546234d6a6f775c036119/twistrs/src/tlds.rs | twistrs/src/tlds.rs | // This file is auto-generated. Do not edit manually.
pub const TLDS: [&str; 6934] = [
"0.bg",
"1.bg",
"2.bg",
"2000.hu",
"3.bg",
"4.bg",
"5.bg",
"5g.in",
"6.bg",
"6g.in",
"7.bg",
"8.bg",
"9.bg",
"9guacu.br",
"a.bg",
"a.se",
"aa.no",
"aaa",
"aaa.pro",
"aarborte.no",
"aarp",
"ab.ca",
"abashiri.hokkaido.jp",
"abb",
"abbott",
"abbvie",
"abc",
"abc.br",
"abeno.osaka.jp",
"abg.ec",
"abiko.chiba.jp",
"abira.hokkaido.jp",
"able",
"abo.pa",
"abogado",
"abr.it",
"abruzzo.it",
"abu.yamaguchi.jp",
"abudhabi",
"ac",
"ac.ae",
"ac.at",
"ac.bd",
"ac.be",
"ac.bw",
"ac.ci",
"ac.cn",
"ac.cr",
"ac.cy",
"ac.eg",
"ac.fj",
"ac.gn",
"ac.gov.br",
"ac.id",
"ac.il",
"ac.im",
"ac.in",
"ac.ir",
"ac.jp",
"ac.ke",
"ac.kr",
"ac.lk",
"ac.ls",
"ac.ma",
"ac.me",
"ac.ml",
"ac.mu",
"ac.mw",
"ac.mz",
"ac.ni",
"ac.nz",
"ac.pa",
"ac.pk",
"ac.pr",
"ac.rs",
"ac.rw",
"ac.se",
"ac.sz",
"ac.th",
"ac.tj",
"ac.tz",
"ac.ug",
"ac.uk",
"ac.vn",
"ac.za",
"ac.zm",
"ac.zw",
"aca.pro",
"academia.bo",
"academy",
"accenture",
"accident-investigation.aero",
"accident-prevention.aero",
"accountant",
"accountants",
"acct.pro",
"achi.nagano.jp",
"aco",
"act.au",
"act.edu.au",
"actor",
"ad",
"ad.jp",
"adachi.tokyo.jp",
"adm.br",
"adm.ec",
"ads",
"adult",
"adult.ht",
"adv.br",
"adv.mz",
"ae",
"aeg",
"aejrie.no",
"aero",
"aero.mv",
"aerobatic.aero",
"aeroclub.aero",
"aerodrome.aero",
"aetna",
"af",
"afjord.no",
"afl",
"africa",
"africa.bj",
"ag",
"ag.it",
"aga.niigata.jp",
"agakhan",
"agano.niigata.jp",
"agdenes.no",
"agematsu.nagano.jp",
"agency",
"agents.aero",
"agr.br",
"agrar.hu",
"agri.jo",
"agric.za",
"agrigento.it",
"agro.bj",
"agro.bo",
"agro.pl",
"agron.ec",
"aguni.okinawa.jp",
"ah.cn",
"ah.no",
"ai",
"ai.bd",
"ai.in",
"ai.jo",
"ai.kr",
"ai.vn",
"aibetsu.hokkaido.jp",
"aichi.jp",
"aid.pl",
"aig",
"aikawa.kanagawa.jp",
"ainan.ehime.jp",
"aioi.hyogo.jp",
"aip.ee",
"air-surveillance.aero",
"air-traffic-control.aero",
"airbus",
"aircraft.aero",
"airforce",
"airline.aero",
"airport.aero",
"airtel",
"airtraffic.aero",
"aisai.aichi.jp",
"aisho.shiga.jp",
"aizubange.fukushima.jp",
"aizumi.tokushima.jp",
"aizumisato.fukushima.jp",
"aizuwakamatsu.fukushima.jp",
"aju.br",
"ak.us",
"akabira.hokkaido.jp",
"akagi.shimane.jp",
"akaiwa.okayama.jp",
"akashi.hyogo.jp",
"akdn",
"aki.kochi.jp",
"akiruno.tokyo.jp",
"akishima.tokyo.jp",
"akita.akita.jp",
"akita.jp",
"akkeshi.hokkaido.jp",
"aknoluokta.no",
"ako.hyogo.jp",
"akrehamn.no",
"akune.kagoshima.jp",
"al",
"al.gov.br",
"al.it",
"al.no",
"al.us",
"alaheadju.no",
"aland.fi",
"alessandria.it",
"alesund.no",
"algard.no",
"alibaba",
"alipay",
"allfinanz",
"allstate",
"ally",
"alsace",
"alstahaug.no",
"alstom",
"alt.na",
"alt.za",
"alta.no",
"alto-adige.it",
"altoadige.it",
"alvdal.no",
"am",
"am.br",
"am.gov.br",
"am.in",
"ama.aichi.jp",
"ama.shimane.jp",
"amagasaki.hyogo.jp",
"amakusa.kumamoto.jp",
"amami.kagoshima.jp",
"amazon",
"ambulance.aero",
"americanexpress",
"americanfamily",
"amex",
"amfam",
"ami.ibaraki.jp",
"amica",
"amli.no",
"amot.no",
"amsterdam",
"an.it",
"analytics",
"anamizu.ishikawa.jp",
"anan.nagano.jp",
"anan.tokushima.jp",
"anani.br",
"ancona.it",
"andasuolo.no",
"andebu.no",
"ando.nara.jp",
"andoy.no",
"andria-barletta-trani.it",
"andria-trani-barletta.it",
"andriabarlettatrani.it",
"andriatranibarletta.it",
"android",
"andøy.no",
"angiang.vn",
"anjo.aichi.jp",
"ann-arbor.mi.us",
"annaka.gunma.jp",
"anpachi.gifu.jp",
"anquan",
"anz",
"ao",
"ao.it",
"aogaki.hyogo.jp",
"aogashima.tokyo.jp",
"aoki.nagano.jp",
"aol",
"aomori.aomori.jp",
"aomori.jp",
"aosta-valley.it",
"aosta.it",
"aostavalley.it",
"aoste.it",
"ap.gov.br",
"ap.gov.pl",
"ap.it",
"aparecida.br",
"apartments",
"api.br",
"app",
"app.br",
"apple",
"aq",
"aq.it",
"aquarelle",
"aquila.it",
"ar",
"ar.it",
"ar.us",
"arab",
"arai.shizuoka.jp",
"arakawa.saitama.jp",
"arakawa.tokyo.jp",
"aramco",
"arao.kumamoto.jp",
"archi",
"architectes.bj",
"ardal.no",
"aremark.no",
"arendal.no",
"arezzo.it",
"ariake.saga.jp",
"arida.wakayama.jp",
"aridagawa.wakayama.jp",
"arita.saga.jp",
"army",
"arna.no",
"arpa",
"arq.br",
"arqt.ec",
"art",
"art.br",
"art.do",
"art.dz",
"art.ec",
"art.ht",
"art.ml",
"art.sn",
"arte",
"arte.bo",
"arts.nf",
"arts.ro",
"arts.ve",
"as",
"as.us",
"asago.hyogo.jp",
"asahi.chiba.jp",
"asahi.ibaraki.jp",
"asahi.mie.jp",
"asahi.nagano.jp",
"asahi.toyama.jp",
"asahi.yamagata.jp",
"asahikawa.hokkaido.jp",
"asaka.saitama.jp",
"asakawa.fukushima.jp",
"asakuchi.okayama.jp",
"asaminami.hiroshima.jp",
"ascoli-piceno.it",
"ascolipiceno.it",
"asda",
"aseral.no",
"ashibetsu.hokkaido.jp",
"ashikaga.tochigi.jp",
"ashiya.fukuoka.jp",
"ashiya.hyogo.jp",
"ashoro.hokkaido.jp",
"asia",
"asker.no",
"askim.no",
"askoy.no",
"askvoll.no",
"askøy.no",
"asn.au",
"asn.lv",
"asnes.no",
"aso.kumamoto.jp",
"ass.km",
"assabu.hokkaido.jp",
"assn.lk",
"asso.ci",
"asso.dz",
"asso.fr",
"asso.gp",
"asso.ht",
"asso.km",
"asso.mc",
"asso.ml",
"asso.nc",
"asso.re",
"associates",
"association.aero",
"assur.bj",
"asti.it",
"asuke.aichi.jp",
"at",
"at.it",
"atami.shizuoka.jp",
"athleta",
"atm.pl",
"ato.br",
"atsugi.kanagawa.jp",
"atsuma.hokkaido.jp",
"attorney",
"au",
"auction",
"audi",
"audible",
"audio",
"audnedaln.no",
"augustow.pl",
"aukra.no",
"aure.no",
"aurland.no",
"aurskog-holand.no",
"aurskog-høland.no",
"auspost",
"austevoll.no",
"austrheim.no",
"author",
"author.aero",
"auto",
"auto.pl",
"autos",
"av.it",
"av.tr",
"avellino.it",
"averoy.no",
"averøy.no",
"avocat.pro",
"avocats.bj",
"avoues.fr",
"aw",
"awaji.hyogo.jp",
"aws",
"ax",
"axa",
"aya.miyazaki.jp",
"ayabe.kyoto.jp",
"ayagawa.kagawa.jp",
"ayase.kanagawa.jp",
"az",
"az.us",
"azumino.nagano.jp",
"azure",
"aéroport.ci",
"b.bg",
"b.br",
"b.se",
"ba",
"ba.gov.br",
"ba.it",
"babia-gora.pl",
"baby",
"bacgiang.vn",
"backan.vn",
"baclieu.vn",
"bacninh.vn",
"badaddja.no",
"bahcavuotna.no",
"bahccavuotna.no",
"baidar.no",
"baidu",
"bajddar.no",
"balat.no",
"balestrand.no",
"ballangen.no",
"ballooning.aero",
"balsan-sudtirol.it",
"balsan-suedtirol.it",
"balsan-südtirol.it",
"balsan.it",
"balsfjord.no",
"bamble.no",
"banamex",
"band",
"bandai.fukushima.jp",
"bando.ibaraki.jp",
"bank",
"bank.in",
"bar",
"bar.ec",
"bar.pro",
"barcelona",
"barclaycard",
"barclays",
"bardu.no",
"barefoot",
"bargains",
"bari.it",
"baria-vungtau.vn",
"barletta-trani-andria.it",
"barlettatraniandria.it",
"barueri.br",
"barum.no",
"bas.it",
"baseball",
"basilicata.it",
"basketball",
"bato.tochigi.jp",
"batsfjord.no",
"bauhaus",
"bayern",
"bb",
"bbc",
"bbs.tr",
"bbt",
"bbva",
"bc.ca",
"bcg",
"bcn",
"bd",
"bd.se",
"be",
"bearalvahki.no",
"bearalváhki.no",
"beardu.no",
"beats",
"beauty",
"bedzin.pl",
"beer",
"beiarn.no",
"bel.tr",
"belem.br",
"belluno.it",
"benevento.it",
"bentre.vn",
"beppu.oita.jp",
"berg.no",
"bergamo.it",
"bergen.no",
"berlevag.no",
"berlevåg.no",
"berlin",
"beskidy.pl",
"best",
"bestbuy",
"bet",
"bet.ar",
"bet.br",
"bf",
"bg",
"bg.it",
"bh",
"bharti",
"bhz.br",
"bi",
"bi.it",
"bialowieza.pl",
"bialystok.pl",
"bib.br",
"bib.ve",
"bibai.hokkaido.jp",
"bible",
"bid",
"biei.hokkaido.jp",
"bielawa.pl",
"biella.it",
"bieszczady.pl",
"bievat.no",
"bievát.no",
"bifuka.hokkaido.jp",
"bihar.in",
"bihoro.hokkaido.jp",
"bike",
"bindal.no",
"bing",
"bingo",
"binhdinh.vn",
"binhduong.vn",
"binhphuoc.vn",
"binhthuan.vn",
"bio",
"bio.br",
"biratori.hokkaido.jp",
"birkenes.no",
"biz",
"biz.az",
"biz.bb",
"biz.cy",
"biz.et",
"biz.fj",
"biz.gh",
"biz.id",
"biz.in",
"biz.ki",
"biz.ls",
"biz.mv",
"biz.mw",
"biz.my",
"biz.ni",
"biz.nr",
"biz.pk",
"biz.pl",
"biz.pr",
"biz.ss",
"biz.tj",
"biz.tr",
"biz.tt",
"biz.vn",
"biz.zm",
"bizen.okayama.jp",
"bj",
"bj.cn",
"bjerkreim.no",
"bjugn.no",
"bl.it",
"black",
"blackfriday",
"blockbuster",
"blog",
"blog.bo",
"blog.br",
"bloomberg",
"blue",
"bm",
"bmd.br",
"bms",
"bmw",
"bn",
"bn.it",
"bnpparibas",
"bo",
"bo.it",
"bo.nordland.no",
"bo.telemark.no",
"boats",
"boavista.br",
"bodo.no",
"bodø.no",
"boehringer",
"bofa",
"bokn.no",
"boleslawiec.pl",
"bolivia.bo",
"bologna.it",
"bolt.hu",
"bolzano-altoadige.it",
"bolzano.it",
"bom",
"bomlo.no",
"bond",
"boo",
"book",
"booking",
"bosch",
"bostik",
"boston",
"bot",
"boutique",
"box",
"bozen-sudtirol.it",
"bozen-suedtirol.it",
"bozen-südtirol.it",
"bozen.it",
"br",
"br.it",
"bradesco",
"brand.se",
"bremanger.no",
"brescia.it",
"bridgestone",
"brindisi.it",
"broadway",
"broker",
"broker.aero",
"bronnoy.no",
"bronnoysund.no",
"brother",
"brumunddal.no",
"brussels",
"bryne.no",
"brønnøy.no",
"brønnøysund.no",
"bs",
"bs.it",
"bsb.br",
"bt",
"bt.it",
"bu.no",
"budejju.no",
"build",
"builders",
"bulsan-sudtirol.it",
"bulsan-suedtirol.it",
"bulsan-südtirol.it",
"bulsan.it",
"bungoono.oita.jp",
"bungotakada.oita.jp",
"bunkyo.tokyo.jp",
"busan.kr",
"business",
"business.in",
"buy",
"buzen.fukuoka.jp",
"buzz",
"bv",
"bw",
"by",
"bydgoszcz.pl",
"bygland.no",
"bykle.no",
"bytom.pl",
"bz",
"bz.it",
"bzh",
"báhcavuotna.no",
"báhccavuotna.no",
"báidár.no",
"bájddar.no",
"bálát.no",
"bådåddjå.no",
"båtsfjord.no",
"bærum.no",
"bø.nordland.no",
"bø.telemark.no",
"bømlo.no",
"c.bg",
"c.se",
"ca",
"ca.in",
"ca.it",
"ca.us",
"caa.aero",
"cab",
"cafe",
"cagliari.it",
"cahcesuolo.no",
"cal",
"cal.it",
"calabria.it",
"call",
"caltanissetta.it",
"calvinklein",
"cam",
"cam.it",
"camau.vn",
"camera",
"camp",
"campania.it",
"campidano-medio.it",
"campidanomedio.it",
"campinagrande.br",
"campinas.br",
"campobasso.it",
"canon",
"cantho.vn",
"caobang.vn",
"capetown",
"capital",
"capitalone",
"car",
"caravan",
"carbonia-iglesias.it",
"carboniaiglesias.it",
"cards",
"care",
"career",
"careers",
"cargo.aero",
"carrara-massa.it",
"carraramassa.it",
"cars",
"casa",
"case",
"caserta.it",
"cash",
"casino",
"casino.hu",
"cat",
"catania.it",
"catanzaro.it",
"catering",
"catering.aero",
"catholic",
"catholic.edu.au",
"caxias.br",
"cb.it",
"cba",
"cbn",
"cbre",
"cc",
"cc.ak.us",
"cc.al.us",
"cc.ar.us",
"cc.as.us",
"cc.az.us",
"cc.ca.us",
"cc.co.us",
"cc.ct.us",
"cc.dc.us",
"cc.de.us",
"cc.fl.us",
"cc.ga.us",
"cc.gu.us",
"cc.hi.us",
"cc.ia.us",
"cc.id.us",
"cc.il.us",
"cc.in.us",
"cc.ks.us",
"cc.ky.us",
"cc.la.us",
"cc.ma.us",
"cc.md.us",
"cc.me.us",
"cc.mi.us",
"cc.mn.us",
"cc.mo.us",
"cc.ms.us",
"cc.mt.us",
"cc.nc.us",
"cc.nd.us",
"cc.ne.us",
"cc.nh.us",
"cc.nj.us",
"cc.nm.us",
"cc.nv.us",
"cc.ny.us",
"cc.oh.us",
"cc.ok.us",
"cc.or.us",
"cc.pa.us",
"cc.pr.us",
"cc.ri.us",
"cc.sc.us",
"cc.sd.us",
"cc.tn.us",
"cc.tx.us",
"cc.ut.us",
"cc.va.us",
"cc.vi.us",
"cc.vt.us",
"cc.wa.us",
"cc.wi.us",
"cc.wv.us",
"cc.wy.us",
"cci.fr",
"cd",
"ce.gov.br",
"ce.it",
"center",
"ceo",
"cern",
"certification.aero",
"cesena-forli.it",
"cesena-forlì.it",
"cesenaforli.it",
"cesenaforlì.it",
"cf",
"cfa",
"cfd",
"cg",
"ch",
"ch.it",
"championship.aero",
"chanel",
"channel",
"charity",
"charter.aero",
"chase",
"chat",
"cheap",
"chef.ec",
"cherkassy.ua",
"cherkasy.ua",
"chernigov.ua",
"chernihiv.ua",
"chernivtsi.ua",
"chernovtsy.ua",
"chiba.jp",
"chichibu.saitama.jp",
"chieti.it",
"chigasaki.kanagawa.jp",
"chihayaakasaka.osaka.jp",
"chijiwa.nagasaki.jp",
"chikugo.fukuoka.jp",
"chikuho.fukuoka.jp",
"chikuhoku.nagano.jp",
"chikujo.fukuoka.jp",
"chikuma.nagano.jp",
"chikusei.ibaraki.jp",
"chikushino.fukuoka.jp",
"chikuzen.fukuoka.jp",
"chino.nagano.jp",
"chintai",
"chippubetsu.hokkaido.jp",
"chiryu.aichi.jp",
"chita.aichi.jp",
"chitose.hokkaido.jp",
"chiyoda.gunma.jp",
"chiyoda.tokyo.jp",
"chizu.tottori.jp",
"chofu.tokyo.jp",
"chonan.chiba.jp",
"chosei.chiba.jp",
"choshi.chiba.jp",
"choyo.kumamoto.jp",
"christmas",
"chrome",
"chtr.k12.ma.us",
"chungbuk.kr",
"chungnam.kr",
"chuo.chiba.jp",
"chuo.fukuoka.jp",
"chuo.osaka.jp",
"chuo.tokyo.jp",
"chuo.yamanashi.jp",
"church",
"ci",
"ci.it",
"ciencia.bo",
"cieszyn.pl",
"cim.br",
"cipriani",
"circle",
"cisco",
"citadel",
"citi",
"citic",
"city",
"city.hu",
"city.kawasaki.jp",
"city.kitakyushu.jp",
"city.kobe.jp",
"city.nagoya.jp",
"city.sapporo.jp",
"city.sendai.jp",
"city.yokohama.jp",
"civilaviation.aero",
"ck",
"ck.ua",
"cl",
"cl.it",
"claims",
"cleaning",
"click",
"clinic",
"clinique",
"clothing",
"cloud",
"club",
"club.aero",
"club.tw",
"clubmed",
"cm",
"cn",
"cn.in",
"cn.it",
"cn.ua",
"cng.br",
"cnt.br",
"co",
"co.ae",
"co.ag",
"co.am",
"co.ao",
"co.at",
"co.az",
"co.bb",
"co.bd",
"co.bi",
"co.bj",
"co.bw",
"co.bz",
"co.ci",
"co.cl",
"co.cm",
"co.cr",
"co.dm",
"co.gg",
"co.gl",
"co.gy",
"co.hu",
"co.id",
"co.il",
"co.im",
"co.in",
"co.io",
"co.ir",
"co.it",
"co.je",
"co.jp",
"co.ke",
"co.kr",
"co.lc",
"co.ls",
"co.ma",
"co.me",
"co.mg",
"co.mu",
"co.mw",
"co.mz",
"co.na",
"co.ni",
"co.nz",
"co.om",
"co.pn",
"co.rs",
"co.rw",
"co.ss",
"co.st",
"co.sz",
"co.th",
"co.tj",
"co.tm",
"co.tt",
"co.tz",
"co.ug",
"co.uk",
"co.us",
"co.uz",
"co.ve",
"co.vi",
"co.za",
"co.zm",
"co.zw",
"coach",
"codes",
"coffee",
"cog.mi.us",
"college",
"cologne",
"com",
"com.ac",
"com.af",
"com.ag",
"com.ai",
"com.al",
"com.am",
"com.ar",
"com.au",
"com.aw",
"com.az",
"com.ba",
"com.bb",
"com.bd",
"com.bh",
"com.bi",
"com.bj",
"com.bm",
"com.bn",
"com.bo",
"com.br",
"com.bs",
"com.bt",
"com.by",
"com.bz",
"com.ci",
"com.cm",
"com.cn",
"com.co",
"com.cu",
"com.cv",
"com.cw",
"com.cy",
"com.dm",
"com.do",
"com.dz",
"com.ec",
"com.ee",
"com.eg",
"com.es",
"com.et",
"com.fj",
"com.fm",
"com.fr",
"com.ge",
"com.gh",
"com.gi",
"com.gl",
"com.gn",
"com.gp",
"com.gr",
"com.gt",
"com.gu",
"com.gy",
"com.hk",
"com.hn",
"com.hr",
"com.ht",
"com.im",
"com.in",
"com.io",
"com.iq",
"com.jo",
"com.kg",
"com.ki",
"com.km",
"com.kp",
"com.kw",
"com.ky",
"com.kz",
"com.la",
"com.lb",
"com.lc",
"com.lk",
"com.lr",
"com.lv",
"com.ly",
"com.mg",
"com.mk",
"com.ml",
"com.mo",
"com.ms",
"com.mt",
"com.mu",
"com.mv",
"com.mw",
"com.mx",
"com.my",
"com.na",
"com.nf",
"com.ng",
"com.ni",
"com.nr",
"com.om",
"com.pa",
"com.pe",
"com.pf",
"com.ph",
"com.pk",
"com.pl",
"com.pr",
"com.ps",
"com.pt",
"com.py",
"com.qa",
"com.re",
"com.ro",
"com.sa",
"com.sb",
"com.sc",
"com.sd",
"com.sg",
"com.sh",
"com.sl",
"com.sn",
"com.so",
"com.ss",
"com.st",
"com.sv",
"com.sy",
"com.tj",
"com.tm",
"com.tn",
"com.to",
"com.tr",
"com.tt",
"com.tw",
"com.ua",
"com.ug",
"com.uy",
"com.uz",
"com.vc",
"com.ve",
"com.vi",
"com.vn",
"com.vu",
"com.ws",
"com.ye",
"com.zm",
"commbank",
"commune.am",
"community",
"como.it",
"company",
"compare",
"computer",
"comsec",
"condos",
"conf.au",
"conf.lv",
"conference.aero",
"construction",
"consulado.st",
"consultant.aero",
"consulting",
"consulting.aero",
"cont.ec",
"contact",
"contagem.br",
"contractors",
"control.aero",
"cooking",
"cool",
"coop",
"coop.ar",
"coop.br",
"coop.ht",
"coop.in",
"coop.km",
"coop.mv",
"coop.mw",
"coop.py",
"coop.rw",
"cooperativa.bo",
"corsica",
"cosenza.it",
"council.aero",
"country",
"coupon",
"coupons",
"courses",
"coz.br",
"cpa",
"cpa.ec",
"cpa.pro",
"cq.cn",
"cr",
"cr.it",
"cr.ua",
"credit",
"creditcard",
"creditunion",
"cremona.it",
"crew.aero",
"cri.br",
"cri.nz",
"cricket",
"crimea.ua",
"crotone.it",
"crown",
"crs",
"cruise",
"cruises",
"cs.in",
"cs.it",
"ct.it",
"ct.us",
"cu",
"cue.ec",
"cuiaba.br",
"cuisinella",
"cuneo.it",
"curitiba.br",
"cv",
"cv.ua",
"cw",
"cx",
"cy",
"cymru",
"cyou",
"cz",
"cz.it",
"czeladz.pl",
"czest.pl",
"d.bg",
"d.se",
"dad",
"daegu.kr",
"daejeon.kr",
"daigo.ibaraki.jp",
"daisen.akita.jp",
"daito.osaka.jp",
"daiwa.hiroshima.jp",
"daklak.vn",
"daknong.vn",
"danang.vn",
"dance",
"data",
"date",
"date.fukushima.jp",
"date.hokkaido.jp",
"dating",
"datsun",
"davvenjarga.no",
"davvenjárga.no",
"davvesiida.no",
"day",
"dazaifu.fukuoka.jp",
"dc.us",
"dclk",
"dds",
"de",
"de.us",
"deal",
"dealer",
"deals",
"deatnu.no",
"def.br",
"degree",
"delhi.in",
"delivery",
"dell",
"dell-ogliastra.it",
"dellogliastra.it",
"deloitte",
"delta",
"democracia.bo",
"democrat",
"dent.ec",
"dental",
"dentist",
"dep.no",
"deporte.bo",
"des.br",
"desa.id",
"desi",
"design",
"design.aero",
"det.br",
"dev",
"dev.br",
"df.gov.br",
"dgca.aero",
"dgn.ec",
"dhl",
"diamonds",
"dielddanuorri.no",
"dienbien.vn",
"diet",
"digital",
"direct",
"directory",
"disco.ec",
"discount",
"discover",
"dish",
"divtasvuodna.no",
"divttasvuotna.no",
"diy",
"dj",
"dk",
"dlugoleka.pl",
"dm",
"dn.ua",
"dnepropetrovsk.ua",
"dni.us",
"dnipropetrovsk.ua",
"dnp",
"do",
"doc.ec",
"docs",
"doctor",
"dog",
"domains",
"donetsk.ua",
"dongnai.vn",
"dongthap.vn",
"donna.no",
"doshi.yamanashi.jp",
"dot",
"dovre.no",
"download",
"dp.ua",
"dr.in",
"dr.tr",
"drammen.no",
"drangedal.no",
"drive",
"drobak.no",
"drøbak.no",
"dst.mi.us",
"dtv",
"dubai",
"dupont",
"durban",
"dvag",
"dvr",
"dyroy.no",
"dyrøy.no",
"dz",
"dønna.no",
"e.bg",
"e.se",
"e12.ve",
"e164.arpa",
"earth",
"eat",
"eaton.mi.us",
"ebetsu.hokkaido.jp",
"ebina.kanagawa.jp",
"ebino.miyazaki.jp",
"ebiz.tw",
"ec",
"echizen.fukui.jp",
"ecn.br",
"eco",
"eco.bj",
"eco.br",
"ecologia.bo",
"econo.bj",
"economia.bo",
"ed.ao",
"ed.ci",
"ed.cr",
"ed.jp",
"edeka",
"edogawa.tokyo.jp",
"edu",
"edu.ac",
"edu.af",
"edu.al",
"edu.ao",
"edu.ar",
"edu.au",
"edu.az",
"edu.ba",
"edu.bb",
"edu.bd",
"edu.bh",
"edu.bi",
"edu.bj",
"edu.bm",
"edu.bn",
"edu.bo",
"edu.br",
"edu.bs",
"edu.bt",
"edu.bz",
"edu.ci",
"edu.cn",
"edu.co",
"edu.cu",
"edu.cv",
"edu.cw",
"edu.dm",
"edu.do",
"edu.dz",
"edu.ec",
"edu.ee",
"edu.eg",
"edu.es",
"edu.et",
"edu.fj",
"edu.fm",
"edu.gd",
"edu.ge",
"edu.gh",
"edu.gi",
"edu.gl",
"edu.gn",
"edu.gp",
"edu.gr",
"edu.gt",
"edu.gu",
"edu.gy",
"edu.hk",
"edu.hn",
"edu.ht",
"edu.in",
"edu.io",
"edu.iq",
"edu.it",
"edu.jo",
"edu.kg",
"edu.ki",
"edu.km",
"edu.kn",
"edu.kp",
"edu.kw",
"edu.ky",
"edu.kz",
"edu.la",
"edu.lb",
"edu.lc",
"edu.lk",
"edu.lr",
"edu.ls",
"edu.lv",
"edu.ly",
"edu.me",
"edu.mg",
"edu.mk",
"edu.ml",
"edu.mn",
"edu.mo",
"edu.ms",
"edu.mt",
"edu.mv",
"edu.mw",
"edu.mx",
"edu.my",
"edu.mz",
"edu.ng",
"edu.ni",
"edu.nr",
"edu.om",
"edu.pa",
"edu.pe",
"edu.pf",
"edu.ph",
"edu.pk",
"edu.pl",
"edu.pn",
"edu.pr",
"edu.ps",
"edu.pt",
"edu.py",
"edu.qa",
"edu.rs",
"edu.sa",
"edu.sb",
"edu.sc",
"edu.sd",
"edu.sg",
"edu.sl",
"edu.sn",
"edu.so",
"edu.ss",
"edu.st",
"edu.sv",
"edu.sy",
"edu.tj",
"edu.tm",
"edu.to",
"edu.tr",
"edu.tt",
"edu.tw",
"edu.ua",
"edu.ug",
"edu.uy",
"edu.vc",
"edu.ve",
"edu.vg",
"edu.vn",
"edu.vu",
"edu.ws",
"edu.ye",
"edu.za",
"edu.zm",
"education",
"educator.aero",
"ee",
"eg",
"egersund.no",
"ehime.jp",
"eid.no",
"eidfjord.no",
"eidsberg.no",
"eidskog.no",
"eidsvoll.no",
"eigersund.no",
"eiheiji.fukui.jp",
"ekloges.cy",
"elblag.pl",
"elk.pl",
"elverum.no",
"email",
"emb.kw",
"embaixada.st",
"embetsu.hokkaido.jp",
"emerck",
"emergency.aero",
"emilia-romagna.it",
"emiliaromagna.it",
"emp.br",
"emprende.ve",
"empresa.bo",
"emr.it",
"en.it",
"ena.gifu.jp",
"enebakk.no",
"energy",
"enf.br",
"eng.br",
"eng.ec",
"eng.jo",
"eng.pro",
"engerdal.no",
"engine.aero",
"engineer",
"engineer.aero",
"engineering",
"eniwa.hokkaido.jp",
"enna.it",
"ens.tn",
"enterprises",
"entertainment.aero",
"epson",
"equipment",
"equipment.aero",
"er",
"er.in",
"ericsson",
"erimo.hokkaido.jp",
"erni",
"erotica.hu",
"erotika.hu",
"es",
"es.gov.br",
"es.kr",
"esan.hokkaido.jp",
"esashi.hokkaido.jp",
"esm.ec",
"esp.br",
"esq",
"est.pr",
"estate",
"et",
"etajima.hiroshima.jp",
"etc.br",
"eti.br",
"etne.no",
"etnedal.no",
"eu",
"eu.int",
"eun.eg",
"eurovision",
"eus",
"evenassi.no",
"evenes.no",
"events",
"evenášši.no",
"evje-og-hornnes.no",
"exchange",
"exchange.aero",
"expert",
"exposed",
"express",
"express.aero",
"extraspace",
"f.bg",
"f.se",
"fage",
"fail",
"fairwinds",
"faith",
"fam.pk",
"family",
"fan",
"fans",
"far.br",
"farm",
"farmers",
"farsund.no",
"fashion",
"fast",
"fauske.no",
"fc.it",
"fe.it",
"federation.aero",
"fedex",
"fedje.no",
"feedback",
"feira.br",
"fermo.it",
"ferrara.it",
"ferrari",
"ferrero",
"fet.no",
"fetsund.no",
"fg.it",
"fh.se",
"fhs.no",
"fhsk.se",
"fhv.se",
"fi",
"fi.cr",
"fi.it",
"fidelity",
"fido",
"fie.ee",
"film",
"film.hu",
"fin.ec",
"fin.in",
"fin.tn",
"final",
"finance",
"financial",
"finnoy.no",
"finnøy.no",
"fire",
"firenze.it",
"firestone",
"firm.ht",
"firm.in",
"firm.nf",
"firm.ro",
"firm.ve",
"firmdale",
"fish",
"fishing",
"fit",
"fitjar.no",
"fitness",
"fj",
"fj.cn",
"fjaler.no",
"fjell.no",
"fk",
"fl.us",
"fla.no",
"flakstad.no",
"flatanger.no",
"flekkefjord.no",
"flesberg.no",
"flickr",
"flight.aero",
"flights",
"flir",
"flog.br",
"flora.no",
"florence.it",
"floripa.br",
"florist",
"floro.no",
"florø.no",
"flowers",
"fly",
"flå.no",
"fm",
"fm.br",
"fm.it",
"fm.jo",
"fm.no",
"fnd.br",
"fo",
"foggia.it",
"folkebibl.no",
"folldal.no",
"foo",
"food",
"football",
"ford",
"forde.no",
"forex",
"forli-cesena.it",
"forlicesena.it",
"forlì-cesena.it",
"forlìcesena.it",
"forsale",
"forsand.no",
"fortal.br",
"forum",
"forum.hu",
"fosnes.no",
"fot.br",
"fot.ec",
"foundation",
"fox",
"foz.br",
"fr",
"fr.it",
"frana.no",
"fredrikstad.no",
"free",
"frei.no",
"freight.aero",
"fresenius",
"friuli-v-giulia.it",
"friuli-ve-giulia.it",
"friuli-vegiulia.it",
"friuli-venezia-giulia.it",
"friuli-veneziagiulia.it",
"friuli-vgiulia.it",
"friuliv-giulia.it",
"friulive-giulia.it",
"friulivegiulia.it",
"friulivenezia-giulia.it",
"friuliveneziagiulia.it",
"friulivgiulia.it",
"frl",
"frogans",
"frogn.no",
"froland.no",
"from.hr",
"frontier",
"frosinone.it",
"frosta.no",
"froya.no",
"fræna.no",
"frøya.no",
"fst.br",
"ftr",
"fuchu.hiroshima.jp",
"fuchu.tokyo.jp",
"fuchu.toyama.jp",
"fudai.iwate.jp",
"fuefuki.yamanashi.jp",
"fuel.aero",
"fuji.shizuoka.jp",
"fujieda.shizuoka.jp",
"fujiidera.osaka.jp",
"fujikawa.shizuoka.jp",
"fujikawa.yamanashi.jp",
"fujikawaguchiko.yamanashi.jp",
"fujimi.nagano.jp",
"fujimi.saitama.jp",
"fujimino.saitama.jp",
"fujinomiya.shizuoka.jp",
"fujioka.gunma.jp",
"fujisato.akita.jp",
"fujisawa.iwate.jp",
"fujisawa.kanagawa.jp",
"fujishiro.ibaraki.jp",
"fujitsu",
"fujiyoshida.yamanashi.jp",
"fukagawa.hokkaido.jp",
"fukaya.saitama.jp",
"fukuchi.fukuoka.jp",
"fukuchiyama.kyoto.jp",
"fukudomi.saga.jp",
"fukui.fukui.jp",
"fukui.jp",
"fukumitsu.toyama.jp",
"fukuoka.jp",
"fukuroi.shizuoka.jp",
"fukusaki.hyogo.jp",
"fukushima.fukushima.jp",
"fukushima.hokkaido.jp",
"fukushima.jp",
"fukuyama.hiroshima.jp",
"fun",
"funabashi.chiba.jp",
"funagata.yamagata.jp",
"funahashi.toyama.jp",
"fund",
"fuoisku.no",
"fuossko.no",
"furano.hokkaido.jp",
"furniture",
"furubira.hokkaido.jp",
"furudono.fukushima.jp",
"furukawa.miyagi.jp",
"fusa.no",
"fuso.aichi.jp",
"fussa.tokyo.jp",
"futaba.fukushima.jp",
"futbol",
"futsu.nagasaki.jp",
"futtsu.chiba.jp",
"fvg.it",
"fyi",
"fylkesbibl.no",
"fyresdal.no",
"førde.no",
"g.bg",
"g.se",
"g12.br",
"ga",
"ga.us",
"gaivuotna.no",
"gal",
"gal.ec",
"gallery",
"gallo",
"gallup",
"galsa.no",
"gamagori.aichi.jp",
"game",
"game.tw",
"games",
"games.hu",
"gamo.shiga.jp",
"gamvik.no",
"gangaviika.no",
"gangwon.kr",
"gap",
"garden",
"gaular.no",
"gausdal.no",
"gay",
"gb",
"gbiz",
"gc.ca",
"gd",
"gd.cn",
"gdn",
"ge",
"ge.it",
"gea",
"geek.nz",
"geisei.kochi.jp",
"gen.in",
"gen.mi.us",
"gen.nz",
"gen.tr",
"genkai.saga.jp",
"genoa.it",
"genova.it",
"gent",
"genting",
"geo.br",
"george",
"gf",
"gg",
"ggee",
"ggf.br",
"gh",
"gi",
"gialai.vn",
"giehtavuoatna.no",
"gift",
"gifts",
"gifu.gifu.jp",
"gifu.jp",
"gildeskal.no",
"gildeskål.no",
"ginan.gifu.jp",
"ginowan.okinawa.jp",
"ginoza.okinawa.jp",
"giske.no",
"gives",
"giving",
"gjemnes.no",
"gjerdrum.no",
"gjerstad.no",
"gjesdal.no",
"gjovik.no",
"gjøvik.no",
"gkp.pk",
"gl",
"glass",
"gle",
"gliding.aero",
"global",
"globo",
"glogow.pl",
"gloppen.no",
"gm",
"gmail",
"gmbh",
"gmina.pl",
"gmo",
"gmx",
"gn",
"gniezno.pl",
"go.ci",
"go.cr",
"go.gov.br",
"go.id",
"go.it",
"go.jp",
"go.ke",
"go.kr",
"go.th",
"go.tj",
"go.tz",
"go.ug",
"gob.ar",
"gob.bo",
"gob.cl",
"gob.cu",
"gob.do",
"gob.ec",
"gob.es",
"gob.gt",
"gob.hn",
"gob.mx",
"gob.ni",
"gob.pa",
"gob.pe",
"gob.pk",
"gob.sv",
"gob.ve",
"gobo.wakayama.jp",
"godaddy",
"godo.gifu.jp",
"gog.pk",
"goiania.br",
"gojome.akita.jp",
"gok.pk",
"gokase.miyazaki.jp",
"gol.no",
"gold",
"goldpoint",
"golf",
"gonohe.aomori.jp",
"goo",
"goodyear",
"goog",
"google",
"gop",
"gop.pk",
"gorizia.it",
"gorlice.pl",
"gos.pk",
"gose.nara.jp",
"gosen.niigata.jp",
"goshiki.hyogo.jp",
"got",
"gotemba.shizuoka.jp",
"goto.nagasaki.jp",
"gotsu.shimane.jp",
"gouv.ci",
"gouv.fr",
"gouv.ht",
"gouv.km",
"gouv.ml",
"gouv.sn",
"gov",
"gov.ac",
"gov.ae",
"gov.af",
"gov.al",
"gov.ao",
"gov.ar",
"gov.as",
"gov.au",
"gov.az",
"gov.ba",
"gov.bb",
"gov.bd",
"gov.bf",
"gov.bh",
"gov.bm",
"gov.bn",
"gov.br",
"gov.bs",
"gov.bt",
"gov.bw",
"gov.by",
"gov.bz",
"gov.cd",
"gov.cl",
"gov.cm",
"gov.cn",
"gov.co",
"gov.cx",
"gov.cy",
"gov.cz",
"gov.dm",
"gov.do",
"gov.dz",
"gov.ec",
"gov.ee",
"gov.eg",
"gov.et",
"gov.fj",
"gov.gd",
"gov.ge",
"gov.gh",
"gov.gi",
"gov.gn",
"gov.gr",
"gov.gu",
"gov.gy",
"gov.hk",
"gov.ie",
"gov.il",
"gov.in",
"gov.io",
"gov.iq",
"gov.ir",
"gov.it",
"gov.jo",
"gov.kg",
"gov.ki",
"gov.km",
"gov.kn",
"gov.kp",
| rust | MIT | 86f45b0ceb9751979ce546234d6a6f775c036119 | 2026-01-04T20:23:55.253743Z | true |
haveibeensquatted/twistrs | https://github.com/haveibeensquatted/twistrs/blob/86f45b0ceb9751979ce546234d6a6f775c036119/twistrs/src/lib.rs | twistrs/src/lib.rs | //! Twistrs is a domain name permutation and enumeration library
//! that is built on top of async Rust.
//!
//! The library is designed to be fast, modular and easy-to-use
//! for clients.
//!
//! The two primary structs to look into are [Domain](./permutate/struct.Domain.html)
//! and [`DomainMetadata`](./enrich/struct.DomainMetadata.html).
//!
//! Additionally the module documentation for [permutation](./permutate/index.html)
//! and [enumeration](./enrich/index.html) provides more
//! granular details on how each module may be used indepedently.
//!
//! domain permutation and enrichment asynchronously.
//!
//! ### Example
//!
//! The following is a trivial example using [Tokio mpsc](https://docs.rs/tokio/0.2.22/tokio/sync/mpsc/index.html).
//!
//! ```
//! use twistrs::{
//! permutate::{Domain},
//! enrich::DomainMetadata,
//! filter::{Filter, Permissive},
//! };
//!
//! use tokio::sync::mpsc;
//!
//!
//! #[tokio::main]
//! async fn main() {
//! let domain = Domain::new("google.com").unwrap();
//! let permutations = domain.addition(&Permissive);
//!
//! let (tx, mut rx) = mpsc::channel(1000);
//!
//! for permutation in permutations {
//! let domain_metadata = DomainMetadata::new(permutation.domain.fqdn.clone());
//! let mut tx = tx.clone();
//!
//! tokio::spawn(async move {
//! if let Err(_) = tx.send((permutation.clone(), domain_metadata.dns_resolvable().await)).await {
//! println!("received dropped");
//! return;
//! }
//!
//! drop(tx);
//! });
//! }
//!
//! drop(tx);
//!
//! while let Some(i) = rx.recv().await {
//! println!("{:?}", i);
//! }
//! }
//! ```
#![deny(
// TODO(jdb): Uncomment missing docs later on
//missing_docs,
future_incompatible,
nonstandard_style,
missing_copy_implementations,
trivial_casts,
trivial_numeric_casts,
unsafe_code,
unused_qualifications
)]
#![deny(
clippy::cast_lossless,
clippy::cast_possible_truncation,
clippy::cast_possible_wrap,
clippy::cast_precision_loss,
clippy::cast_sign_loss,
clippy::checked_conversions,
clippy::decimal_literal_representation,
clippy::doc_markdown,
clippy::empty_enum,
clippy::explicit_into_iter_loop,
clippy::explicit_iter_loop,
clippy::expl_impl_clone_on_copy,
clippy::fallible_impl_from,
clippy::manual_filter_map,
clippy::filter_map_next,
clippy::manual_find_map,
clippy::float_arithmetic,
clippy::get_unwrap,
clippy::if_not_else,
clippy::inline_always,
clippy::invalid_upcast_comparisons,
clippy::items_after_statements,
clippy::map_flatten,
clippy::match_same_arms,
clippy::maybe_infinite_iter,
clippy::mem_forget,
clippy::module_name_repetitions,
clippy::multiple_inherent_impl,
clippy::mut_mut,
clippy::needless_borrow,
clippy::needless_continue,
clippy::needless_pass_by_value,
clippy::map_unwrap_or,
clippy::path_buf_push_overwrite,
clippy::print_stdout,
clippy::redundant_closure_for_method_calls,
clippy::shadow_reuse,
clippy::shadow_same,
clippy::shadow_unrelated,
clippy::single_match_else,
clippy::string_add,
clippy::string_add_assign,
clippy::type_repetition_in_bounds,
clippy::unicode_not_nfc,
// clippy::unimplemented,
clippy::unseparated_literal_suffix,
clippy::used_underscore_binding,
clippy::wildcard_dependencies,
// clippy::wildcard_enum_match_arm,
)]
#![recursion_limit = "128"]
#[macro_use]
extern crate lazy_static;
pub mod constants;
pub mod enrich;
pub mod error;
pub mod filter;
pub mod permutate;
pub mod tlds;
pub use permutate::{Domain, Permutation, PermutationError, PermutationKind};
| rust | MIT | 86f45b0ceb9751979ce546234d6a6f775c036119 | 2026-01-04T20:23:55.253743Z | false |
haveibeensquatted/twistrs | https://github.com/haveibeensquatted/twistrs/blob/86f45b0ceb9751979ce546234d6a6f775c036119/twistrs/src/filter.rs | twistrs/src/filter.rs | use crate::Domain;
/// The `Filter` trait provides functions that allow filtering of permutations given a certain
/// condition. This is useful when certain permutation methods (e.g.,
/// [`tld`](./permutate/Domain#tld)) expose permutations that you would like to dismiss.
pub trait Filter {
type Error;
fn matches(&self, domain: &Domain) -> bool;
/// **Note** — this is currently not being used internally.
fn try_matches(&self, domain: &Domain) -> Result<bool, Self::Error> {
Ok(Self::matches(self, domain))
}
}
/// Open filter, all results are retained; similar to a wildcard.
#[derive(Default, Copy, Clone)]
pub struct Permissive;
impl Filter for Permissive {
type Error = ();
fn matches(&self, _: &Domain) -> bool {
true
}
}
/// When passed a slice of string patterns, will filter out values that do **not** contain any of
/// the substrings.
///
/// Example usage may be filtering the [`tld`](./permutate/Domain#tld) permutations to only include
/// TLDs that contain part of the origin TLD.
#[derive(Default, Copy, Clone)]
pub struct Substring<'a, S: AsRef<str> + 'a> {
substrings: &'a [S],
}
impl<'a, S: AsRef<str>> Substring<'a, S> {
pub fn new(substrings: &'a [S]) -> Self {
Self { substrings }
}
}
impl<S: AsRef<str>> Filter for Substring<'_, S> {
type Error = ();
fn matches(&self, domain: &Domain) -> bool {
self.substrings
.iter()
.any(|s| domain.fqdn.contains(s.as_ref()))
}
}
| rust | MIT | 86f45b0ceb9751979ce546234d6a6f775c036119 | 2026-01-04T20:23:55.253743Z | false |
haveibeensquatted/twistrs | https://github.com/haveibeensquatted/twistrs/blob/86f45b0ceb9751979ce546234d6a6f775c036119/twistrs/src/enrich.rs | twistrs/src/enrich.rs | //! The enrichment module exposes functionality to enrich
//! a given domain with interesting metadata. Currently
//! including:
//!
//! * DNS resolution (through HTTP/80 lookup).
//! * Open SMTP server (for email misdirects).
//!
//! Example:
//!
//! ```
//! use twistrs::enrich::DomainMetadata;
//!
//! #[tokio::main]
//! async fn main() {
//! let domain_metadata = DomainMetadata::new(String::from("google.com"));
//! domain_metadata.dns_resolvable().await;
//! }
//! ```
//!
//! Note that the enrichment module is independent from the
//! permutation module and can be used with any given FQDN.
use serde::Serialize;
use std::net::IpAddr;
#[cfg(feature = "geoip_lookup")]
use maxminddb;
#[cfg(feature = "geoip_lookup")]
use maxminddb::geoip2;
#[cfg(feature = "whois_lookup")]
use whois_rust::WhoIsLookupOptions;
#[cfg(feature = "smtp_lookup")]
use async_smtp::{Envelope, SendableEmail, SmtpClient, SmtpTransport};
#[cfg(feature = "smtp_lookup")]
use tokio::{io::BufStream, net::TcpStream};
use hyper::{Body, Request};
use tokio::net;
use crate::constants::HTTP_CLIENT;
use crate::error::Error;
#[cfg(feature = "whois_lookup")]
use crate::constants::WHOIS;
#[derive(thiserror::Error, Debug)]
pub enum EnrichmentError {
#[error("error resolving domain name (domain: {domain})")]
DnsResolutionError { domain: String },
#[cfg(feature = "whois_lookup")]
#[error("error resolving domain name (domain: {domain}, error: {error})")]
WhoIsLookupError {
domain: String,
error: whois_rust::WhoIsError,
},
#[cfg(feature = "smtp_lookup")]
#[error("error performing smtp lookup (domain: {domain}, error: {error})")]
SmtpLookupError {
domain: String,
error: anyhow::Error,
},
#[error("error performing http banner lookup (domain: {domain}, error: {error})")]
HttpBannerError {
domain: String,
error: anyhow::Error,
},
#[error("error performing geoip lookup (domain: {domain}, error: {error})")]
GeoIpLookupError {
domain: String,
error: anyhow::Error,
},
}
/// Container to store interesting FQDN metadata
/// on domains that we resolve.
///
/// Whenever any domain enrichment occurs, the
/// following struct is return to indicate the
/// information that was derived.
///
/// **N.B**—there will be cases where a single
/// domain can have multiple `DomainMetadata`
/// instancees associated with it.
#[derive(Debug, Clone, Serialize, Default)]
pub struct DomainMetadata {
/// The domain that is being enriched.
pub fqdn: String,
/// Any IPv4 and IPv6 ips that were discovered during
/// domain resolution.
pub ips: Option<Vec<IpAddr>>,
/// Any SMTP message data (if any) that was returned by
/// an SMTP server.
pub smtp: Option<SmtpMetadata>,
/// HTTP server banner data extracted.
pub http_banner: Option<String>,
/// IP addresses resolved through `GeoIP` lookup to `City`, `Country`, `Continent`.
pub geo_ip_lookups: Option<Vec<(IpAddr, String)>>,
/// Block of text returned by the `WhoIs` registrar.
pub who_is_lookup: Option<String>,
}
/// SMTP specific metadata generated by a partic
/// ular domain.
#[derive(Debug, Clone, Serialize)]
pub struct SmtpMetadata {
/// Whether the email was dispatched successfully
pub is_positive: bool,
/// Message received back from the SMTP server
pub message: String,
}
impl DomainMetadata {
/// Create a new empty state for a particular FQDN.
pub fn new(fqdn: String) -> DomainMetadata {
DomainMetadata {
fqdn,
..Default::default()
}
}
/// Asynchronous DNS resolution on a `DomainMetadata` instance.
///
/// Returns `Ok(DomainMetadata)` is the domain was resolved,
/// otherwise returns `Err(EnrichmentError)`.
///
/// **N.B**—also host lookups are done over port 80.
pub async fn dns_resolvable(&self) -> Result<DomainMetadata, Error> {
Ok(net::lookup_host(&format!("{}:80", self.fqdn)[..])
.await
.map(|addrs| DomainMetadata {
fqdn: self.fqdn.clone(),
ips: Some(addrs.map(|addr| addr.ip()).collect()),
smtp: None,
http_banner: None,
geo_ip_lookups: None,
who_is_lookup: None,
})
.map_err(|_| EnrichmentError::DnsResolutionError {
domain: self.fqdn.clone(),
})?)
}
/// Asynchronous SMTP check. Attempts to establish an SMTP
/// connection to the FQDN on port 25 and send a pre-defi
/// ned email.
///
/// Currently returns `Ok(DomainMetadata)` always, which
/// internally contains `Option<SmtpMetadata>`. To check
/// if the SMTP relay worked, check that
/// `DomainMetadata.smtp` is `Some(v)`.
#[cfg(feature = "smtp_lookup")]
pub async fn mx_check(&self) -> Result<DomainMetadata, Error> {
let email = SendableEmail::new(
Envelope::new(
Some("twistrs@example.com".parse().unwrap()),
vec!["twistrs@example.com".parse().unwrap()],
)
.map_err(|e| EnrichmentError::SmtpLookupError {
domain: self.fqdn.clone(),
error: anyhow::Error::msg(e),
})?,
"And that's how the cookie crumbles\n",
);
let stream = BufStream::new(
TcpStream::connect(&format!("{}:25", self.fqdn))
.await
.map_err(|e| EnrichmentError::SmtpLookupError {
domain: self.fqdn.clone(),
error: anyhow::Error::msg(e),
})?,
);
let client = SmtpClient::new();
let mut transport = SmtpTransport::new(client, stream).await.map_err(|e| {
EnrichmentError::SmtpLookupError {
domain: self.fqdn.clone(),
error: anyhow::Error::msg(e),
}
})?;
let result = transport.send(email).await.map(|response| DomainMetadata {
fqdn: self.fqdn.clone(),
ips: None,
smtp: Some(SmtpMetadata {
is_positive: response.is_positive(),
message: response.message.into_iter().collect::<String>(),
}),
http_banner: None,
geo_ip_lookups: None,
who_is_lookup: None,
});
Ok(match result {
Ok(domain_metadata) => Ok(domain_metadata),
Err(async_smtp::error::Error::Timeout(_)) => Ok(DomainMetadata::new(self.fqdn.clone())),
Err(e) => Err(EnrichmentError::SmtpLookupError {
domain: self.fqdn.clone(),
error: anyhow::Error::msg(e),
}),
}?)
}
/// Asynchronous HTTP Banner fetch. Searches and parses `server` header
/// from an HTTP request to gather the HTTP banner.
///
/// Note that a `HEAD` request is issued to minimise bandwidth. Also note
/// that the internal [`HttpConnector`](https://docs.rs/hyper/0.13.8/hyper/client/struct.HttpConnector.html)
/// sets the response buffer window to 1024 bytes, the CONNECT timeout to
/// 5s and enforces HTTP scheme.
///
/// ```
/// use twistrs::enrich::DomainMetadata;
///
/// #[tokio::main]
/// async fn main() {
/// let domain_metadata = DomainMetadata::new(String::from("www.phishdeck.com"));
/// println!("{:?}", domain_metadata.http_banner().await);
/// }
/// ```
pub async fn http_banner(&self) -> Result<DomainMetadata, Error> {
// Construst the basic request to be sent out
let request = Request::builder()
.method("HEAD")
.uri(format!("http://{}", &self.fqdn))
.header("User-Agent", "github-juxhindb-twistrs-http-banner/1.0")
.body(Body::from("")) // This is annoying
.map_err(|e| EnrichmentError::HttpBannerError {
domain: self.fqdn.clone(),
error: anyhow::Error::msg(e),
})?;
if let Ok(response) = HTTP_CLIENT.request(request).await {
if let Some(server_header) = response.headers().get("server") {
let server =
server_header
.to_str()
.map_err(|e| EnrichmentError::HttpBannerError {
domain: self.fqdn.clone(),
error: anyhow::Error::msg(e),
})?;
return Ok(DomainMetadata {
fqdn: self.fqdn.clone(),
ips: None,
smtp: None,
http_banner: Some(String::from(server)),
geo_ip_lookups: None,
who_is_lookup: None,
});
}
}
Err(EnrichmentError::HttpBannerError {
domain: self.fqdn.clone(),
error: anyhow::Error::msg("unable to extract or parse server header from response"),
}
.into())
}
/// Asynchronous cached `GeoIP` lookup. Interface deviates from the usual enrichment
/// interfaces and requires the callee to pass a [`maxminddb::Reader`](https://docs.rs/maxminddb/0.15.0/maxminddb/struct.Reader.html)
/// to perform the lookup through. Internally, the maxminddb call is blocking and
/// may result in performance drops, however the lookups are in-memory.
///
/// The only reason you would want to do this, is to be able to get back a `DomainMetadata`
/// to then process as you would with other enrichment methods. Internally the lookup will
/// try to stitch together the City, Country & Continent that the [`IpAddr`](https://doc.rust-lang.org/std/net/enum.IpAddr.html)
/// resolves to.
///
/// ```
/// use maxminddb::Reader;
/// use twistrs::enrich::DomainMetadata;
///
/// #[tokio::main]
/// async fn main() {
/// let reader = maxminddb::Reader::open_readfile("./data/MaxMind-DB/test-data/GeoIP2-City-Test.mmdb").unwrap();
/// let domain_metadata = DomainMetadata::new(String::from("www.phishdeck.com"));
/// println!("{:?}", domain_metadata.geoip_lookup(&reader).await);
/// }
/// ```
///
/// ### Features
///
/// This function requires the `geoip_lookup` feature toggled.
#[cfg(feature = "geoip_lookup")]
pub async fn geoip_lookup(
&self,
geoip: &maxminddb::Reader<Vec<u8>>,
) -> Result<DomainMetadata, Error> {
let mut result: Vec<(IpAddr, String)> = Vec::new();
match &self.ips {
Some(ips) => {
for ip in ips {
if let Ok(Some(lookup_result)) = geoip.lookup::<geoip2::City>(*ip) {
let mut geoip_string = String::new();
if lookup_result.city.is_some() {
geoip_string.push_str(
lookup_result
.city
.ok_or(EnrichmentError::GeoIpLookupError {
domain: self.fqdn.clone(),
error: anyhow::Error::msg("could not find city"),
})?
.names
.ok_or(EnrichmentError::GeoIpLookupError {
domain: self.fqdn.clone(),
error: anyhow::Error::msg("could not find city names"),
})?["en"],
);
}
if lookup_result.country.is_some() {
if !geoip_string.is_empty() {
geoip_string.push_str(", ");
}
geoip_string.push_str(
lookup_result
.country
.ok_or(EnrichmentError::GeoIpLookupError {
domain: self.fqdn.clone(),
error: anyhow::Error::msg("could not find country"),
})?
.names
.ok_or(EnrichmentError::GeoIpLookupError {
domain: self.fqdn.clone(),
error: anyhow::Error::msg("could not find country names"),
})?["en"],
);
}
if lookup_result.continent.is_some() {
if !geoip_string.is_empty() {
geoip_string.push_str(", ");
}
geoip_string.push_str(
lookup_result
.continent
.ok_or(EnrichmentError::GeoIpLookupError {
domain: self.fqdn.clone(),
error: anyhow::Error::msg("could not find continent"),
})?
.names
.ok_or(EnrichmentError::GeoIpLookupError {
domain: self.fqdn.clone(),
error: anyhow::Error::msg("could not find continent name"),
})?["en"],
);
}
result.push((*ip, geoip_string));
}
}
Ok(DomainMetadata {
fqdn: self.fqdn.clone(),
ips: None,
smtp: None,
http_banner: None,
geo_ip_lookups: Some(result),
who_is_lookup: None,
})
}
None => Ok(DomainMetadata::new(self.fqdn.clone())),
}
}
/// Asyncrhonous `WhoIs` lookup using cached `WhoIs` server config. Note that
/// the internal lookups are not async and so this should be considered
/// a heavy/slow call.
///
/// ```
/// use twistrs::enrich::DomainMetadata;
///
/// #[tokio::main]
/// async fn main() {
/// let domain_metadata = DomainMetadata::new(String::from("www.phishdeck.com"));
/// println!("{:?}", domain_metadata.whois_lookup().await);
/// }
/// ```
///
/// ### Features
///
/// This function requires the `whois_lookup` feature toggled.
#[cfg(feature = "whois_lookup")]
pub async fn whois_lookup(&self) -> Result<DomainMetadata, Error> {
let mut result = DomainMetadata::new(self.fqdn.clone());
let mut whois_lookup_options =
WhoIsLookupOptions::from_string(&self.fqdn).map_err(|e| {
EnrichmentError::WhoIsLookupError {
domain: self.fqdn.to_string(),
error: e,
}
})?;
whois_lookup_options.timeout = Some(std::time::Duration::from_secs(5));
whois_lookup_options.follow = 1; // Only allow at most one redirect
result.who_is_lookup = Some(
WHOIS
.lookup(whois_lookup_options)
.map_err(|e| EnrichmentError::WhoIsLookupError {
domain: self.fqdn.to_string(),
error: e,
})?
.split("\r\n")
// The only entries we care about are the ones that start with 3 spaces.
// Ideally the whois_rust library would have parsed this nicely for us.
.filter(|s| s.starts_with(" "))
.collect::<Vec<&str>>()
.join("\n"),
);
Ok(result)
}
/// Performs all FQDN enrichment methods on a given FQDN.
/// This is the only function that returns a `Vec<DomainMetadata>`.
///
/// # Panics
///
/// Currently panics if any of the internal enrichment methods returns
/// an Err.
pub async fn all(&self) -> Result<Vec<DomainMetadata>, Error> {
// @CLEANUP(JDB): This should use try_join! in the future instead
#[cfg(feature = "smtp_lookup")]
let mx_check = self.mx_check();
let result = futures::join!(self.dns_resolvable(), self.http_banner());
Ok(vec![
result.0.unwrap(),
#[cfg(feature = "smtp_lookup")]
mx_check.await.unwrap(),
result.1.unwrap(),
])
}
}
#[cfg(test)]
mod tests {
use super::*;
#[cfg(feature = "geoip_lookup")]
use maxminddb;
use futures::executor::block_on;
#[tokio::test]
async fn test_dns_lookup() {
let domain_metadata = DomainMetadata::new(String::from("example.com"));
assert!(block_on(domain_metadata.dns_resolvable()).is_ok());
}
#[tokio::test]
#[cfg(feature = "geoip_lookup")]
async fn test_geoip_lookup() {
let domain_metadata = DomainMetadata::new(String::from("example.com"))
.dns_resolvable()
.await
.unwrap();
// MaxmindDB CSV entry for example.com subnet, prone to failure but saves space
let reader =
maxminddb::Reader::open_readfile("./data/MaxMind-DB/test-data/GeoIP2-City-Test.mmdb")
.unwrap();
assert!(domain_metadata.geoip_lookup(&reader).await.is_ok());
}
#[tokio::test]
#[cfg(feature = "whois_lookup")]
async fn test_whois_lookup() {
let domain_metadata = DomainMetadata::new(String::from("example.com"));
assert!(domain_metadata.whois_lookup().await.is_ok());
}
}
| rust | MIT | 86f45b0ceb9751979ce546234d6a6f775c036119 | 2026-01-04T20:23:55.253743Z | false |
haveibeensquatted/twistrs | https://github.com/haveibeensquatted/twistrs/blob/86f45b0ceb9751979ce546234d6a6f775c036119/twistrs/src/error.rs | twistrs/src/error.rs | use crate::enrich::EnrichmentError;
use crate::permutate::PermutationError;
use std::convert::Infallible;
#[derive(thiserror::Error, Debug)]
pub enum Error {
#[error(transparent)]
PermutationError(#[from] PermutationError),
#[error(transparent)]
EnrichmentError(#[from] EnrichmentError),
#[error(transparent)]
Infallible(#[from] Infallible),
}
| rust | MIT | 86f45b0ceb9751979ce546234d6a6f775c036119 | 2026-01-04T20:23:55.253743Z | false |
haveibeensquatted/twistrs | https://github.com/haveibeensquatted/twistrs/blob/86f45b0ceb9751979ce546234d6a6f775c036119/twistrs/src/mod.rs | twistrs/src/mod.rs | pub mod constants;
| rust | MIT | 86f45b0ceb9751979ce546234d6a6f775c036119 | 2026-01-04T20:23:55.253743Z | false |
haveibeensquatted/twistrs | https://github.com/haveibeensquatted/twistrs/blob/86f45b0ceb9751979ce546234d6a6f775c036119/twistrs/src/constants.rs | twistrs/src/constants.rs | use phf::phf_map;
#[cfg(feature = "whois_lookup")]
use whois_rust::WhoIs;
use hyper::client::Client;
use hyper::client::HttpConnector;
// Include further constants such as dictionaries that are
// generated during compile time.
include!(concat!(env!("OUT_DIR"), "/data.rs"));
lazy_static! {
pub static ref KEYBOARD_LAYOUTS: Vec<&'static phf::Map<char, &'static str>> = vec![
&QWERTY_KEYBOARD_LAYOUT,
&QWERTZ_KEYBOARD_LAYOUT,
&AZERTY_KEYBOARD_LAYOUT
];
/// Global HTTP client we use throughout the library
pub static ref HTTP_CLIENT: Client<HttpConnector> = Client::builder()
.pool_idle_timeout(std::time::Duration::from_secs(30))
.http2_only(false)
.http1_read_buf_exact_size(1024)
.retry_canceled_requests(false)
.build(http_connector());
}
// This is currently a bit annoying, however since the WHOIS lookup table
// is build at runtime, and is feature-gated, we cannot have this activated
// within the original lazy_static! macro. We would need to block the
// entire macro behind the feature gate instead.
#[cfg(feature = "whois_lookup")]
lazy_static! {
pub static ref WHOIS: WhoIs = WhoIs::from_string(WHOIS_RAW_JSON).unwrap();
}
/// Internal helper to create an HTTP Connector
fn http_connector() -> HttpConnector {
let mut c = HttpConnector::new();
c.set_recv_buffer_size(Some(1024));
c.set_connect_timeout(Some(std::time::Duration::new(5, 0)));
c.enforce_http(true);
c
}
/// Applys a default limit to the `vowel_shuffling` permutation method to avoid blowing up the
/// number of permuations.
pub(crate) const VOWEL_SHUFFLE_CEILING: usize = 6;
/// Static list of lowercase ASCII characters.
pub static ASCII_LOWER: [char; 26] = [
'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's',
't', 'u', 'v', 'w', 'x', 'y', 'z',
];
static QWERTY_KEYBOARD_LAYOUT: phf::Map<char, &'static str> = phf_map! {
'1' => "2q",
'2' => "3wq1",
'3' => "4ew2",
'4' => "5re3",
'5' => "6tr4",
'6' => "7yt5",
'7' => "8uy6",
'8' => "9iu7",
'9' => "0oi8",
'0' => "po9",
'q' => "12wa",
'w' => "3esaq2",
'e' => "4rdsw3",
'r' => "5tfde4",
't' => "6ygfr5",
'y' => "7uhgt6",
'u' => "8ijhy7",
'i' => "9okju8",
'o' => "0plki9",
'p' => "lo0",
'a' => "qwsz",
's' => "edxzaw",
'd' => "rfcxse",
'f' => "tgvcdr",
'g' => "yhbvft",
'h' => "ujnbgy",
'j' => "ikmnhu",
'k' => "olmji",
'l' => "kop",
'z' => "asx",
'x' => "zsdc",
'c' => "xdfv",
'v' => "cfgb",
'b' => "vghn",
'n' => "bhjm",
'm' => "njk"
};
static QWERTZ_KEYBOARD_LAYOUT: phf::Map<char, &'static str> = phf_map! {
'1'=> "2q",
'2'=> "3wq1",
'3'=> "4ew2",
'4'=> "5re3",
'5'=> "6tr4",
'6'=> "7zt5",
'7'=> "8uz6",
'8'=> "9iu7",
'9'=> "0oi8",
'0'=> "po9",
'q'=> "12wa",
'w'=> "3esaq2",
'e'=> "4rdsw3",
'r'=> "5tfde4",
't'=> "6zgfr5",
'z'=> "7uhgt6",
'u'=> "8ijhz7",
'i'=> "9okju8",
'o'=> "0plki9",
'p'=> "lo0",
'a'=> "qwsy",
's'=> "edxyaw",
'd'=> "rfcxse",
'f'=> "tgvcdr",
'g'=> "zhbvft",
'h'=> "ujnbgz",
'j'=> "ikmnhu",
'k'=> "olmji",
'l'=> "kop",
'y'=> "asx",
'x'=> "ysdc",
'c'=> "xdfv",
'v'=> "cfgb",
'b'=> "vghn",
'n'=> "bhjm",
'm'=> "njk"
};
static AZERTY_KEYBOARD_LAYOUT: phf::Map<char, &'static str> = phf_map! {
'1'=> "2a",
'2'=> "3za1",
'3'=> "4ez2",
'4'=> "5re3",
'5'=> "6tr4",
'6'=> "7yt5",
'7'=> "8uy6",
'8'=> "9iu7",
'9'=> "0oi8",
'0'=> "po9",
'a'=> "2zq1",
'z'=> "3esqa2",
'e'=> "4rdsz3",
'r'=> "5tfde4",
't'=> "6ygfr5",
'y'=> "7uhgt6",
'u'=> "8ijhy7",
'i'=> "9okju8",
'o'=> "0plki9",
'p'=> "lo0m",
'q'=> "zswa",
's'=> "edxwqz",
'd'=> "rfcxse",
'f'=> "tgvcdr",
'g'=> "yhbvft",
'h'=> "ujnbgy",
'j'=> "iknhu",
'k'=> "olji",
'l'=> "kopm",
'm'=> "lp",
'w'=> "sxq",
'x'=> "wsdc",
'c'=> "xdfv",
'v'=> "cfgb",
'b'=> "vghn",
'n'=> "bhj"
};
pub static HOMOGLYPHS: phf::Map<char, &'static str> = phf_map! {
'a' => "àáâãäåɑạǎăȧą",
'b' => "dʙɓḃḅḇƅ",
'c' => "eƈċćçčĉo",
'd' => "bɗđďɖḑḋḍḏḓ",
'e' => "céèêëēĕěėẹęȩɇḛ",
'f' => "ƒḟ",
'g' => "qɢɡġğǵģĝǧǥ",
'h' => "ĥȟħɦḧḩⱨḣḥḫẖ",
'i' => "1líìïıɩǐĭỉịɨȋī",
'j' => "ʝɉ",
'k' => "ḳḵⱪķ",
'l' => "1iɫł",
'm' => "nṁṃᴍɱḿ",
'n' => "mrńṅṇṉñņǹňꞑ",
'o' => "0ȯọỏơóö",
'p' => "ƿƥṕṗ",
'q' => "gʠ",
'r' => "ʀɼɽŕŗřɍɾȓȑṙṛṟ",
's' => "ʂśṣṡșŝš",
't' => "ţŧṫṭțƫ",
'u' => "ᴜǔŭüʉùúûũūųưůűȕȗụ",
'v' => "ṿⱱᶌṽⱴ",
'w' => "ŵẁẃẅⱳẇẉẘ",
'y' => "ʏýÿŷƴȳɏỿẏỵ",
'z' => "ʐżźᴢƶẓẕⱬ"
};
pub static MAPPED_VALUES: phf::Map<&'static str, &'static [&'static str]> = phf_map! {
"a" => &["4"],
"b" => &["8", "6"],
"c" => &[],
"d" => &["cl"],
"e" => &["3"],
"f" => &["ph"],
"g" => &["9", "6"],
"h" => &[],
"i" => &["1", "l"],
"j" => &[],
"k" => &[],
"l" => &["1", "i"],
"m" => &["rn", "nn"],
"n" => &[],
"o" => &["0"],
"p" => &[],
"q" => &["9"],
"r" => &[],
"s" => &["5", "z"],
"t" => &["7"],
"u" => &["v"],
"v" => &["u"],
"w" => &["vv"],
"x" => &[],
"y" => &[],
"z" => &["2", "s"],
"0" => &["o"],
"1" => &["i", "l"],
"2" => &["z"],
"3" => &["e"],
"4" => &["a"],
"5" => &["s"],
"6" => &["b", "g"],
"7" => &["t"],
"8" => &["b"],
"9" => &["g", "q"],
"ck" => &["kk"],
"oo" => &["00"],
};
pub static VOWELS: [char; 5] = ['a', 'e', 'i', 'o', 'u'];
| rust | MIT | 86f45b0ceb9751979ce546234d6a6f775c036119 | 2026-01-04T20:23:55.253743Z | false |
haveibeensquatted/twistrs | https://github.com/haveibeensquatted/twistrs/blob/86f45b0ceb9751979ce546234d6a6f775c036119/twistrs/src/permutate.rs | twistrs/src/permutate.rs | //! The permutation module exposes functionality around generating
//! multiple valid variations of a given domain. Note that this
//! module is _only_ concerned with generating possible permutations
//! of a given domain.
//!
//! For details on how to validate whether domains are actively used,
//! please see `enrich.rs`.
//!
//! Example:
//!
//! ```
//! use twistrs::{
//! permutate::{Domain, Permutation},
//! filter::{Filter, Permissive},
//! };
//!
//! let domain = Domain::new("google.com").unwrap();
//! let domain_permutations: Vec<Permutation> = domain.all(&Permissive).collect();
//! ```
//!
//! Additionally the permutation module can be used independently
//! from the enrichment module.
use crate::constants::{
ASCII_LOWER, HOMOGLYPHS, KEYBOARD_LAYOUTS, MAPPED_VALUES, VOWELS, VOWEL_SHUFFLE_CEILING,
};
use crate::error::Error;
use crate::filter::Filter;
use std::collections::HashSet;
use addr::parser::DomainName;
use addr::psl::List;
use itertools::{repeat_n, Itertools};
use serde::{Deserialize, Serialize};
// Include further constants such as dictionaries that are
// generated during compile time.
include!(concat!(env!("OUT_DIR"), "/data.rs"));
use crate::tlds::TLDS;
/// Wrapper around an FQDN to perform permutations against.
#[derive(Clone, Hash, Default, Debug, Serialize, Deserialize, Eq, PartialEq, Ord, PartialOrd)]
pub struct Domain {
/// The domain FQDN to generate permutations from.
pub fqdn: String,
/// The top-level domain of the FQDN (e.g. `.com`).
pub tld: String,
/// The remainder of the domain (e.g. `google`).
pub domain: String,
}
#[derive(Clone, Debug, Serialize, Deserialize, Hash, Eq, PartialEq, Ord, PartialOrd)]
pub struct Permutation {
pub domain: Domain,
pub kind: PermutationKind,
}
#[derive(Clone, Copy, Serialize, Deserialize, Hash, Debug, Eq, PartialEq, Ord, PartialOrd)]
pub enum PermutationKind {
Addition,
Bitsquatting,
Hyphenation,
Insertion,
Omission,
Repetition,
Replacement,
Subdomain,
Transposition,
VowelSwap,
VowelShuffle,
DoubleVowelInsertion,
Keyword,
Tld,
Homoglyph,
Mapped,
// @NOTE(juxhin): this is particularly ugly, as we should not be leaking internal permutation
// kinds publicly into the library. In reality `PermutationKind` should be wrapped internally
// to avoid confusing the library.
//
// For context to anyone that happens to read this -- this has been added to support
// certificate transparency generated domains as part of our [certgrep](https://certgrep.sh/)
// project.
CertificateTransparency,
}
#[derive(Clone, thiserror::Error, Debug)]
pub enum PermutationError {
#[error("invalid domain name, (expected {expected:?}, found {found:?})")]
InvalidDomain { expected: String, found: String },
#[error("error generating homoglyph permutation (domain {domain:?}, homoglyph {homoglyph:?})")]
InvalidHomoglyph { domain: String, homoglyph: String },
}
impl Domain {
/// Wrap a desired FQDN into a `Domain` container. Internally
/// will perform additional operations to break the domain into
/// one or more chunks to be used during domain permutations.
pub fn new(fqdn: &str) -> Result<Domain, Error> {
let parsed_domain =
List.parse_domain_name(fqdn)
.map_err(|_| PermutationError::InvalidDomain {
expected: "valid domain name that can be parsed".to_string(),
found: fqdn.to_string(),
})?;
let root_domain = parsed_domain
.root()
.ok_or(PermutationError::InvalidDomain {
expected: "valid domain name with a root domain".to_string(),
found: fqdn.to_string(),
})?;
let tld = parsed_domain.suffix().to_string();
// Verify that the TLD is in the list of known TLDs, this requires that
// the TLD data list is already ordered, otherwise the result of the
// binary search is meaningless. We also assume that all TLDs generated
// are lowercase already.
if TLDS.binary_search(&tld.as_str()).is_ok() {
let domain = Domain {
fqdn: fqdn.to_string(),
tld,
domain: root_domain
.find('.')
.and_then(|offset| root_domain.get(..offset))
// this should never error out since `root_domain` is a valid domain name
.ok_or(PermutationError::InvalidDomain {
expected: "valid domain name with a root domain".to_string(),
found: fqdn.to_string(),
})?
.to_string(),
};
Ok(domain)
} else {
let err = PermutationError::InvalidDomain {
expected: "valid domain tld in the list of accepted tlds globally".to_string(),
found: tld,
};
Err(err.into())
}
}
/// Specialised form of `Domain::new` that does not perform certain validations. This is
/// enables downstream users to generate domains faster, with looser validation requirements.
pub fn raw(fqdn: &str) -> Result<Domain, Error> {
let parsed_domain =
List.parse_domain_name(fqdn)
.map_err(|_| PermutationError::InvalidDomain {
expected: "valid domain name that can be parsed".to_string(),
found: fqdn.to_string(),
})?;
let root_domain = parsed_domain
.root()
.ok_or(PermutationError::InvalidDomain {
expected: "valid domain name with a root domain".to_string(),
found: fqdn.to_string(),
})?;
let tld = parsed_domain.suffix().to_string();
Ok(Domain {
fqdn: fqdn.to_string(),
tld,
domain: root_domain
.find('.')
.and_then(|offset| root_domain.get(..offset))
// this should never error out since `root_domain` is a valid domain name
.ok_or(PermutationError::InvalidDomain {
expected: "valid domain name with a root domain".to_string(),
found: fqdn.to_string(),
})?
.to_string(),
})
}
/// Generate any and all possible domain permutations for a given `Domain`.
///
/// Returns `Iterator<String>` with an iterator of domain permutations
/// and includes the results of all other individual permutation methods.
///
/// Any future permutations will also be included into this function call
/// without any changes required from any client implementations.
pub fn all<'a>(&'a self, filter: &'a impl Filter) -> impl Iterator<Item = Permutation> + 'a {
self.addition(filter)
.chain(self.bitsquatting(filter))
.chain(self.hyphenation(filter))
.chain(self.hyphenation_tld_boundary(filter))
.chain(self.insertion(filter))
.chain(self.omission(filter))
.chain(self.repetition(filter))
.chain(self.replacement(filter))
.chain(self.subdomain(filter))
.chain(self.transposition(filter))
.chain(self.vowel_swap(filter))
.chain(self.vowel_shuffle(VOWEL_SHUFFLE_CEILING, filter))
.chain(self.double_vowel_insertion(filter))
.chain(self.keyword(filter))
.chain(self.tld(filter))
.chain(self.mapped(filter))
.chain(self.homoglyph(filter))
}
/// Add every ASCII lowercase character between the Domain
/// (e.g. `google`) and top-level domain (e.g. `.com`).
pub fn addition<'a>(
&'a self,
filter: &'a impl Filter,
) -> impl Iterator<Item = Permutation> + 'a {
Self::permutation(
move || {
ASCII_LOWER
.iter()
.map(move |c| format!("{}{}.{}", self.domain, c, self.tld))
},
PermutationKind::Addition,
filter,
)
}
/// Following implementation takes inspiration from the following content:
///
/// - <`https://github.com/artemdinaburg/bitsquat-script/blob/master/bitsquat.py`>
/// - <`http://dinaburg.org/bitsquatting.html`>
///
/// Go through each char in the domain and XOR it against 8 separate masks:
///
/// 00000001 ^ chr
/// 00000010 ^ chr
/// 00000100 ^ chr
/// 00001000 ^ chr
/// 00010000 ^ chr
/// 00100000 ^ chr
/// 01000000 ^ chr
/// 10000000 ^ chr
///
/// Then check if the resulting bit operation falls within ASCII range.
pub fn bitsquatting<'a>(
&'a self,
filter: &'a impl Filter,
) -> impl Iterator<Item = Permutation> + 'a {
Self::permutation(
move || {
self.fqdn
.chars()
.flat_map(move |c| {
(0..8).filter_map(move |mask_index| {
let mask = 1 << mask_index;
// Can the below panic? Should we use a wider range (u32)?
let squatted_char: u8 = mask ^ (c as u8);
// Make sure we remain with ASCII range that we are happy with
if ((48..=57).contains(&squatted_char))
|| ((97..=122).contains(&squatted_char))
|| squatted_char == 45
{
Some((1..self.fqdn.len()).map(move |idx| {
let mut permutation = self.fqdn.to_string();
permutation.insert(idx, squatted_char as char);
permutation
}))
} else {
None
}
})
})
.flatten()
},
PermutationKind::Bitsquatting,
filter,
)
}
/// Permutation method that replaces ASCII characters with multiple homoglyphs
/// similar to the respective ASCII character.
pub fn homoglyph<'a>(
&'a self,
filter: &'a impl Filter,
) -> impl Iterator<Item = Permutation> + 'a {
// Convert the candidate into a vector of chars for proper indexing.
Self::permutation(
move || {
let chars: Vec<char> = self.fqdn.chars().collect();
let len = chars.len();
let mut results = HashSet::new();
// For each possible window size (from 1 to the full length)
for ws in 1..=len {
// For each starting index of the window
for i in 0..=len - ws {
let window = &chars[i..i + ws];
// Iterate over each character position in the window.
for j in 0..window.len() {
let c = window[j];
// Look up available homoglyphs for this character.
if let Some(glyphs) = HOMOGLYPHS.get(&c) {
// For each homoglyph candidate, create a new window and candidate string.
for g in glyphs.chars() {
let mut new_window: Vec<char> = window.to_vec();
new_window[j] = g;
// Reassemble the new candidate string:
let new_candidate: String = chars[..i]
.iter()
.chain(new_window.iter())
.chain(chars[i + ws..].iter())
.collect();
results.insert(new_candidate);
}
}
}
}
}
results.into_iter()
},
PermutationKind::Homoglyph,
filter,
)
}
/// Permutation method that inserts hyphens (i.e. `-`) between each
/// character in the domain where valid.
pub fn hyphenation<'a>(
&'a self,
filter: &'a impl Filter,
) -> impl Iterator<Item = Permutation> + 'a {
Self::permutation(
move || {
self.fqdn.chars().skip(1).enumerate().map(move |(i, _)| {
let mut permutation = self.fqdn.to_string();
permutation.insert(i, '-');
permutation
})
},
PermutationKind::Hyphenation,
filter,
)
}
/// In cases of multi-level TLDs, will swap the top-level dot to a hyphen. For example
/// `abcd.co.uk` would map to `abcd-co.uk`. Internally this still maps to the `Hyphenation`
/// permutation kind, however is a refined subset for performance purposes. Will always yield
/// at most, one permutation.
pub fn hyphenation_tld_boundary<'a>(
&'a self,
filter: &'a impl Filter,
) -> impl Iterator<Item = Permutation> + 'a {
Self::permutation(
move || {
// `then(..)` returns `Option<String>` with a single concrete type
// whether it is `Some` or `None`.
(self.tld.contains('.'))
.then(|| format!("{}-{}", self.domain, self.tld))
.into_iter() // Option → IntoIter (0‒1 items)
},
PermutationKind::Hyphenation,
filter,
)
}
/// Permutation method that inserts specific characters that are close to
/// any character in the domain depending on the keyboard (e.g. `Q` next
/// to `W` in qwerty keyboard layout.
pub fn insertion<'a>(
&'a self,
filter: &'a impl Filter,
) -> impl Iterator<Item = Permutation> + 'a {
Self::permutation(
move || {
self.fqdn
.chars()
.skip(1) // We don't want to insert at the beginning of the domain...
.take(self.fqdn.len() - 2) // ...or at the end of the domain.
.enumerate()
.flat_map(move |(i, c)| {
KEYBOARD_LAYOUTS.iter().flat_map(move |layout| {
layout
.get(&c) // Option<&[char]>
.into_iter()
.map(move |keyboard_chars| {
keyboard_chars.chars().map(move |keyboard_char| {
let mut permutation = self.fqdn.to_string();
permutation.insert(i, keyboard_char);
permutation.to_string()
})
})
})
})
.flatten()
},
PermutationKind::Insertion,
filter,
)
}
/// Permutation method that selectively removes a character from the domain.
pub fn omission<'a>(
&'a self,
filter: &'a impl Filter,
) -> impl Iterator<Item = Permutation> + 'a {
Self::permutation(
move || {
self.fqdn.chars().enumerate().map(move |(i, _)| {
let mut permutation = self.fqdn.to_string();
permutation.remove(i);
permutation
})
},
PermutationKind::Omission,
filter,
)
}
/// Permutation method that repeats characters twice provided they are
/// alphabetic characters (e.g. `google.com` -> `gooogle.com`).
pub fn repetition<'a>(
&'a self,
filter: &'a impl Filter,
) -> impl Iterator<Item = Permutation> + 'a {
Self::permutation(
move || {
self.fqdn.chars().enumerate().filter_map(move |(i, c)| {
if c.is_alphabetic() {
Some(format!("{}{}{}", &self.fqdn[..=i], c, &self.fqdn[i + 1..]))
} else {
None
}
})
},
PermutationKind::Repetition,
filter,
)
}
/// Permutation method similar to insertion, except that it replaces a given
/// character with another character in proximity depending on keyboard layout.
pub fn replacement<'a>(
&'a self,
filter: &'a impl Filter,
) -> impl Iterator<Item = Permutation> + 'a {
Self::permutation(
move || {
self.fqdn
.chars()
.skip(1) // We don't want to insert at the beginning of the domain...
.take(self.fqdn.len() - 2) // ...or at the end of the domain.
.enumerate()
.flat_map(move |(i, c)| {
KEYBOARD_LAYOUTS.iter().filter_map(move |layout| {
layout.get(&c).map(move |keyboard_chars| {
keyboard_chars.chars().map(move |keyboard_char| {
format!(
"{}{}{}",
&self.fqdn[..i],
keyboard_char,
&self.fqdn[i + 1..]
)
})
})
})
})
.flatten()
},
PermutationKind::Replacement,
filter,
)
}
pub fn subdomain<'a>(
&'a self,
filter: &'a impl Filter,
) -> impl Iterator<Item = Permutation> + 'a {
Self::permutation(
move || {
self.fqdn
.chars()
.take(self.fqdn.len() - 3)
.enumerate()
.tuple_windows()
.filter_map(move |((_, c1), (i2, c2))| {
if ['-', '.'].iter().all(|x| [c1, c2].contains(x)) {
None
} else {
Some(format!("{}.{}", &self.fqdn[..i2], &self.fqdn[i2..]))
}
})
},
PermutationKind::Subdomain,
filter,
)
}
/// Permutation method that swaps out characters in the domain (e.g.
/// `google.com` -> `goolge.com`).
pub fn transposition<'a>(
&'a self,
filter: &'a impl Filter,
) -> impl Iterator<Item = Permutation> + 'a {
Self::permutation(
move || {
self.fqdn.chars().enumerate().tuple_windows().filter_map(
move |((i1, c1), (i2, c2))| {
if c1 == c2 {
None
} else {
Some(format!(
"{}{}{}{}",
&self.fqdn[..i1],
c2,
c1,
&self.fqdn[i2 + 1..]
))
}
},
)
},
PermutationKind::Transposition,
filter,
)
}
/// Permutation method that swaps vowels for other vowels (e.g.
/// `google.com` -> `gougle.com`).
pub fn vowel_swap<'a>(
&'a self,
filter: &'a impl Filter,
) -> impl Iterator<Item = Permutation> + 'a {
Self::permutation(
move || {
self.fqdn
.chars()
.enumerate()
.filter_map(move |(i, c)| {
if VOWELS.contains(&c.to_ascii_lowercase()) {
Some(VOWELS.iter().filter_map(move |vowel| {
if *vowel == c {
None
} else {
Some(format!(
"{}{}{}",
&self.fqdn[..i],
vowel,
&self.fqdn[i + 1..]
))
}
}))
} else {
None
}
})
.flatten()
},
PermutationKind::VowelSwap,
filter,
)
}
/// A superset of [`vowel_swap`][`vowel_swap`], which computes the multiple cartesian product
/// of all vowels found in the domain, and maps them against their indices.
///
/// * `ceil`: limit the upperbound exponent of possible permutations that can be generated
/// (i.e., 5^{ceil}) where 5 is the number of possible vowels, and `{ceil}` is the
/// number of products to generate
pub fn vowel_shuffle<'a>(
&'a self,
ceil: usize,
filter: &'a impl Filter,
) -> impl Iterator<Item = Permutation> + 'a {
Self::permutation(
move || {
let vowel_positions = self
.domain
.chars()
.enumerate()
.filter_map(|(i, c)| if VOWELS.contains(&c) { Some(i) } else { None })
.collect_vec();
// |cartesian_product| = |VOWELS|^n = 5^n
let products =
repeat_n(VOWELS, vowel_positions.len().min(ceil)).multi_cartesian_product();
products.map(move |replacement| {
// build the new label
let mut label: Vec<char> = self.domain.chars().collect();
for (pos, &new_vowel) in vowel_positions.iter().zip(&replacement) {
label[*pos] = new_vowel;
}
let fqdn = format!("{}.{}", label.iter().collect::<String>(), self.tld);
fqdn
})
},
PermutationKind::VowelShuffle,
filter,
)
}
/// Permutation method that inserts every lowercase ascii character between
/// two vowels.
pub fn double_vowel_insertion<'a>(
&'a self,
filter: &'a impl Filter,
) -> impl Iterator<Item = Permutation> + 'a {
Self::permutation(
move || {
self.fqdn
.chars()
.enumerate()
.tuple_windows()
.filter_map(move |((i1, c1), (i2, c2))| {
if VOWELS.contains(&c1.to_ascii_lowercase())
&& VOWELS.contains(&c2.to_ascii_lowercase())
{
Some(ASCII_LOWER.iter().map(move |inserted| {
format!("{}{inserted}{}", &self.fqdn[..=i1], &self.fqdn[i2..])
}))
} else {
None
}
})
.flatten()
},
PermutationKind::DoubleVowelInsertion,
filter,
)
}
/// Permutation mode that appends and prepends common keywords to the
/// domain in the following order:
///
/// 1. Prepend keyword and dash (e.g. `foo.com` -> `word-foo.com`)
/// 2. Prepend keyword (e.g. `foo.com` -> `wordfoo.com`)
/// 3. Append keyword and dash (e.g. `foo.com` -> `foo-word.com`)
/// 4. Append keyword and dash (e.g. `foo.com` -> `fooword.com`)
pub fn keyword<'a>(
&'a self,
filter: &'a impl Filter,
) -> impl Iterator<Item = Permutation> + 'a {
Self::permutation(
move || {
KEYWORDS.iter().flat_map(move |keyword| {
vec![
format!("{}-{}.{}", &self.domain, keyword, &self.tld),
format!("{}{}.{}", &self.domain, keyword, &self.tld),
format!("{}-{}.{}", keyword, &self.domain, &self.tld),
format!("{}{}.{}", keyword, &self.domain, &self.tld),
]
.into_iter()
})
},
PermutationKind::Keyword,
filter,
)
}
/// Permutation method that replaces all TLDs as variations of the
/// root domain passed.
pub fn tld<'a>(&'a self, filter: &'a impl Filter) -> impl Iterator<Item = Permutation> + 'a {
Self::permutation(
move || {
TLDS.iter()
.map(move |tld| format!("{}.{}", &self.domain, tld))
},
PermutationKind::Mapped,
filter,
)
}
/// Permutation method that maps one or more characters into another
/// set of one or more characters that are similar, or easy to miss,
/// such as `d` -> `cl`, `ck` -> `kk`.
pub fn mapped<'a>(&'a self, filter: &'a impl Filter) -> impl Iterator<Item = Permutation> + 'a {
Self::permutation(
move || {
let mut results = vec![];
for (key, values) in MAPPED_VALUES.entries() {
if self.domain.contains(key) {
let parts = self.domain.split(key);
for mapped_value in *values {
let result = format!(
"{domain}.{tld}",
domain = parts.clone().join(mapped_value),
tld = self.tld
);
results.push(result);
}
}
}
results.into_iter()
},
PermutationKind::Mapped,
filter,
)
}
/// Auxilliary function that wraps each permutation function in order to perform validation and
/// filtering of results. This leaves us with a trimmed down list of permutations that are
/// valid domains and accepted by the `Filter` passed.
fn permutation<'a, S, T: Fn() -> S + 'a, U: Filter + 'a>(
f: T,
kind: PermutationKind,
filter: &'a U,
) -> impl Iterator<Item = Permutation> + use<'a, S, T, U>
where
S: Iterator<Item = String> + 'a,
{
f().filter_map(move |candidate| {
if let Ok(domain) = Domain::new(candidate.as_str()) {
if filter.matches(&domain) {
return Some(Permutation { domain, kind });
}
}
None
})
}
}
#[cfg(test)]
mod tests {
use crate::filter::{Permissive, Substring};
use super::*;
#[test]
fn test_all_mode() {
let d = Domain::new("www.example.com").unwrap();
let permutations: Vec<_> = d.all(&Permissive).collect();
assert!(!permutations.is_empty());
}
#[test]
fn test_addition_mode() {
let d = Domain::new("www.example.com").unwrap();
let permutations: Vec<_> = dbg!(d.addition(&Permissive).collect());
assert_eq!(permutations.len(), ASCII_LOWER.len());
}
#[test]
fn test_bitsquatting_mode() {
let d = Domain::new("www.example.com").unwrap();
let permutations: Vec<_> = dbg!(d.bitsquatting(&Permissive).collect());
assert!(!permutations.is_empty());
}
#[test]
fn test_homoglyph_mode() {
let d = Domain::new("www.example.com").unwrap();
let permutations: Vec<_> = dbg!(d.homoglyph(&Permissive).collect());
assert!(!permutations.is_empty());
}
#[test]
fn test_hyphenation_mode() {
let d = Domain::new("www.example.com").unwrap();
let permutations: Vec<_> = dbg!(d.hyphenation(&Permissive).collect());
assert!(!permutations.is_empty());
}
#[test]
fn test_insertion_mode() {
let d = Domain::new("www.example.com").unwrap();
let permutations: Vec<_> = dbg!(d.insertion(&Permissive).collect());
assert!(!permutations.is_empty());
}
#[test]
fn test_omission_mode() {
let d = Domain::new("www.example.com").unwrap();
let permutations: Vec<_> = dbg!(d.omission(&Permissive).collect());
assert!(!permutations.is_empty());
}
#[test]
fn test_repetition_mode() {
let d = Domain::new("www.example.com").unwrap();
let permutations: Vec<_> = dbg!(d.repetition(&Permissive).collect());
assert!(!permutations.is_empty());
}
#[test]
fn test_replacement_mode() {
let d = Domain::new("www.example.com").unwrap();
let permutations: Vec<_> = dbg!(d.replacement(&Permissive).collect());
assert!(!permutations.is_empty());
}
#[test]
fn test_subdomain_mode() {
let d = Domain::new("www.example.com").unwrap();
let permutations: Vec<_> = dbg!(d.subdomain(&Permissive).collect());
assert!(!permutations.is_empty());
}
#[test]
fn test_transposition_mode() {
let d = Domain::new("www.example.com").unwrap();
let permutations: Vec<_> = dbg!(d.transposition(&Permissive).collect());
assert!(!permutations.is_empty());
}
#[test]
fn test_vowel_swap_mode() {
let d = Domain::new("www.example.com").unwrap();
let permutations: Vec<_> = dbg!(d.vowel_swap(&Permissive).collect());
assert!(!permutations.is_empty());
}
#[test]
fn test_keyword_mode() {
let d = Domain::new("www.example.com").unwrap();
let permutations: Vec<_> = dbg!(d.keyword(&Permissive).collect());
assert!(!permutations.is_empty());
}
#[test]
fn test_tld_mode() {
let d = Domain::new("www.example.com").unwrap();
let permutations: Vec<_> = dbg!(d.tld(&Permissive).collect());
assert!(!permutations.is_empty());
}
#[test]
fn test_mapping_mode() {
let d = Domain::new("www.exoock96z.com").unwrap();
let permutations: Vec<_> = dbg!(d.mapped(&Permissive).collect());
assert!(!permutations.is_empty());
}
#[test]
fn test_domain_idna_filtering() {
// Examples taken from IDNA Punycode RFC:
// https://tools.ietf.org/html/rfc3492#section-7.1
let idns: Vec<Permutation> = vec![
// List of invalid domains
String::from("i1baa7eci9glrd9b2ae1bj0hfcgg6iyaf8o0a1dig0cd"),
String::from("4dbcagdahymbxekheh6e0a7fei0b"),
String::from("rpublique-numrique-bwbm"),
String::from("fiqs8s"),
String::from("acadmie-franaise-npb1a-google.com"),
String::from("google.com.acadmie-franaise-npb1a"),
// List of valid domains
String::from("acadmie-franaise-npb1a"),
String::from("google.com"),
String::from("phishdeck.com"),
String::from("xn--wgbl6a.icom.museum"),
String::from("xn--80aaxgrpt.icom.museum"),
]
.into_iter()
.filter_map(|idn| {
if let Ok(domain) = Domain::new(idn.as_str()) {
Some(Permutation {
domain,
kind: PermutationKind::Addition,
})
} else {
None
}
})
.collect();
let filtered_domains: Vec<Permutation> = idns.into_iter().collect();
dbg!(&filtered_domains);
assert_eq!(filtered_domains.len(), 5);
}
#[test]
fn test_domains_empty_permutations_regression() {
let domains: Vec<Domain> = vec!["ox.ac.uk", "oxford.ac.uk", "cool.co.nz"]
.into_iter()
.map(|fqdn| Domain::new(fqdn).unwrap())
.collect();
for domain in domains {
let permutations: Vec<_> = dbg!(domain.all(&Permissive).collect());
assert!(!permutations.is_empty());
}
| rust | MIT | 86f45b0ceb9751979ce546234d6a6f775c036119 | 2026-01-04T20:23:55.253743Z | true |
haveibeensquatted/twistrs | https://github.com/haveibeensquatted/twistrs/blob/86f45b0ceb9751979ce546234d6a6f775c036119/twistrs/benches/bench_permute.rs | twistrs/benches/bench_permute.rs | use criterion::{criterion_group, criterion_main, Criterion};
use twistrs::permutate::Domain;
fn bitsquatting(domain: &Domain) {
domain.bitsquatting().for_each(drop)
}
fn homoglyph(domain: &Domain) {
domain.homoglyph().unwrap().for_each(drop)
}
fn hyphentation(domain: &Domain) {
domain.hyphenation().for_each(drop)
}
fn insertion(domain: &Domain) {
domain.insertion().for_each(drop)
}
fn omission(domain: &Domain) {
domain.omission().for_each(drop)
}
fn repetition(domain: &Domain) {
domain.repetition().for_each(drop)
}
fn replacement(domain: &Domain) {
domain.replacement().for_each(drop)
}
fn subdomain(domain: &Domain) {
domain.subdomain().for_each(drop)
}
fn transposition(domain: &Domain) {
domain.transposition().for_each(drop)
}
fn vowel_swap(domain: &Domain) {
domain.vowel_swap().for_each(drop)
}
fn keyword(domain: &Domain) {
domain.keyword().for_each(drop)
}
fn tld(domain: &Domain) {
domain.tld().for_each(drop)
}
fn criterion_benchmark(c: &mut Criterion) {
let domain = Domain::new("example.com").unwrap();
c.bench_function("bitsquatting example.com", |b| {
b.iter(|| bitsquatting(&domain))
});
c.bench_function("homoglyph example.com", |b| b.iter(|| homoglyph(&domain)));
c.bench_function("hyphentation example.com", |b| {
b.iter(|| hyphentation(&domain))
});
c.bench_function("insertion example.com", |b| b.iter(|| insertion(&domain)));
c.bench_function("omission example.com", |b| b.iter(|| omission(&domain)));
c.bench_function("repetition example.com", |b| b.iter(|| repetition(&domain)));
c.bench_function("replacement example.com", |b| {
b.iter(|| replacement(&domain))
});
c.bench_function("subdomain example.com", |b| b.iter(|| subdomain(&domain)));
c.bench_function("transposition example.com", |b| {
b.iter(|| transposition(&domain))
});
c.bench_function("vowel_swap example.com", |b| b.iter(|| vowel_swap(&domain)));
c.bench_function("keyword example.com", |b| b.iter(|| keyword(&domain)));
c.bench_function("tld example.com", |b| b.iter(|| tld(&domain)));
}
criterion_group!(benches, criterion_benchmark);
criterion_main!(benches);
| rust | MIT | 86f45b0ceb9751979ce546234d6a6f775c036119 | 2026-01-04T20:23:55.253743Z | false |
haveibeensquatted/twistrs | https://github.com/haveibeensquatted/twistrs/blob/86f45b0ceb9751979ce546234d6a6f775c036119/examples/twistrs-cli/src/main.rs | examples/twistrs-cli/src/main.rs | use clap::{App, Arg};
use colored::*;
use tokio::sync::mpsc;
use twistrs::enrich::DomainMetadata;
use twistrs::filter::Permissive;
use twistrs::permutate::{Domain, Permutation};
use anyhow::Result;
use std::collections::HashSet;
use std::time::Instant;
#[tokio::main]
async fn main() -> Result<()> {
let start_time = Instant::now();
let matches = App::new("twistrs-cli")
.version("0.1.0")
.author("Juxhin D. Brigjaj <juxhin@phishdeck.com>")
.arg(Arg::new("domain").required(true))
.get_matches();
let domain = Domain::new(matches.value_of("domain").unwrap()).unwrap();
let domain_permutations = domain.all(&Permissive).collect::<HashSet<Permutation>>();
let domain_permutation_count = domain_permutations.len();
let (tx, mut rx) = mpsc::channel(5000);
for (i, v) in domain_permutations.into_iter().enumerate() {
let domain_metadata = DomainMetadata::new(v.domain.fqdn.clone());
let mut tx = tx.clone();
tokio::spawn(async move {
if tx
.send((i, v.clone(), domain_metadata.dns_resolvable().await))
.await
.is_err()
{
println!("received dropped");
return;
}
drop(tx);
});
}
drop(tx);
let mut enumeration_count = 0;
while let Some(i) = rx.recv().await {
if let Ok(v) = i.2 {
if v.ips.is_some() {
enumeration_count += 1;
println!(
"\n{}\nDomain: {}\n IPs: {:?}",
"Enriched Domain".bold(),
&v.fqdn,
&v.ips
);
}
}
}
println!(
"\n{}: {}",
"Total number of unique domain permutations generated".bold(),
domain_permutation_count.to_string().cyan()
);
println!(
"{}: {}",
"Total number of domains enriched".bold(),
enumeration_count.to_string().cyan()
);
println!(
"{}: {} seconds",
"Execution time".bold(),
start_time.elapsed().as_secs()
);
Ok(())
}
| rust | MIT | 86f45b0ceb9751979ce546234d6a6f775c036119 | 2026-01-04T20:23:55.253743Z | false |
haveibeensquatted/twistrs | https://github.com/haveibeensquatted/twistrs/blob/86f45b0ceb9751979ce546234d6a6f775c036119/examples/twistrs-ws/src/main.rs | examples/twistrs-ws/src/main.rs | // #![deny(warnings)]
use std::collections::{HashMap, HashSet};
use std::sync::{
atomic::{AtomicUsize, Ordering},
Arc,
};
use futures::{FutureExt, StreamExt};
use tokio::sync::{mpsc, RwLock};
use twistrs::filter::Permissive;
use warp::ws::{Message, WebSocket};
use warp::Filter;
use twistrs::enrich::DomainMetadata;
use twistrs::permutate::{Domain, Permutation};
/// Our global unique user id counter.
static NEXT_USER_ID: AtomicUsize = AtomicUsize::new(1);
type Users = Arc<RwLock<HashMap<usize, mpsc::UnboundedSender<Result<Message, warp::Error>>>>>;
#[tokio::main]
async fn main() {
pretty_env_logger::init();
let users = Users::default();
let users = warp::any().map(move || users.clone());
let chat = warp::path("chat")
.and(warp::ws())
.and(users)
.map(|ws: warp::ws::Ws, users| ws.on_upgrade(move |socket| user_connected(socket, users)));
let index = warp::path::end().map(|| warp::reply::html(INDEX_HTML));
let routes = index.or(chat);
warp::serve(routes).run(([127, 0, 0, 1], 3030)).await;
}
async fn user_connected(ws: WebSocket, users: Users) {
// Use a counter to assign a new unique ID for this user.
let my_id = NEXT_USER_ID.fetch_add(1, Ordering::Relaxed);
eprintln!("new user: {}", my_id);
// Split the socket into a sender and receive of messages.
let (user_ws_tx, mut user_ws_rx) = ws.split();
// Use an unbounded channel to handle buffering and flushing of messages
// to the websocket...
let (tx, rx) = mpsc::unbounded_channel();
tokio::task::spawn(rx.forward(user_ws_tx).map(|result| {
if let Err(e) = result {
eprintln!("websocket send error: {}", e);
}
}));
// Save the sender in our list of connected users.
users.write().await.insert(my_id, tx);
// Return a `Future` that is basically a state machine managing
// this specific user's connection.
// Make an extra clone to give to our disconnection handler...
let user_disconnection_handler = users.clone();
while let Some(result) = user_ws_rx.next().await {
let msg = match result {
Ok(msg) => msg,
Err(e) => {
eprintln!("websocket error(uid={}): {}", my_id, e);
break;
}
};
user_message(my_id, msg, &users).await;
}
// user_ws_rx stream will keep processing as long as the user stays
// connected. Once they disconnect, then...
user_disconnected(my_id, &user_disconnection_handler).await;
}
async fn user_message(my_id: usize, msg: Message, users: &Users) {
// Skip any non-Text messages...
let msg = if let Ok(s) = msg.to_str() {
s
} else {
return;
};
// New message from this user, send it to everyone else (except same uid)...
for (&uid, tx) in users.read().await.iter() {
if my_id == uid {
eprintln!("initiating dns resolution checks for user: {}", my_id);
let domain = Domain::new(msg).unwrap();
let domain_permutations = domain.all(&Permissive).collect::<HashSet<Permutation>>();
for v in domain_permutations.into_iter() {
let domain_metadata = DomainMetadata::new(v.domain.fqdn.clone());
let tx = tx.clone();
tokio::spawn(async move {
if let Ok(metadata) = domain_metadata.dns_resolvable().await {
if let Some(ips) = metadata.ips {
if tx.send(Ok(Message::text(format!("{:?}", ips)))).is_err() {
println!("received dropped");
return;
}
drop(tx);
}
}
});
}
}
}
}
async fn user_disconnected(my_id: usize, users: &Users) {
eprintln!("good bye user: {}", my_id);
// Stream closed up, so remove from the user list
users.write().await.remove(&my_id);
}
static INDEX_HTML: &str = r#"<!DOCTYPE html>
<html lang="en">
<head>
<title>Twistrs WebSocket DNS Resolution Example</title>
</head>
<body>
<h1>Twistrs WebSocket DNS Resolution Example</h1>
<input type="text" id="text" />
<button type="button" id="send">Send</button>
<div id="chat">
<p><em>Connecting...</em></p>
</div>
<script type="text/javascript">
const chat = document.getElementById('chat');
const text = document.getElementById('text');
const uri = 'ws://' + location.host + '/chat';
const ws = new WebSocket(uri);
function message(data) {
const line = document.createElement('p');
line.innerText = data;
chat.appendChild(line);
}
ws.onopen = function() {
chat.innerHTML = '<p><em>Connected!</em></p>';
};
ws.onmessage = function(msg) {
message(msg.data);
};
ws.onclose = function() {
chat.getElementsByTagName('em')[0].innerText = 'Disconnected!';
};
send.onclick = function() {
const msg = text.value;
ws.send(msg);
text.value = '';
message('<You>: ' + msg);
};
</script>
</body>
</html>
"#;
| rust | MIT | 86f45b0ceb9751979ce546234d6a6f775c036119 | 2026-01-04T20:23:55.253743Z | false |
haveibeensquatted/twistrs | https://github.com/haveibeensquatted/twistrs/blob/86f45b0ceb9751979ce546234d6a6f775c036119/examples/twistrs-grpc/build.rs | examples/twistrs-grpc/build.rs | fn main() -> Result<(), Box<dyn std::error::Error>> {
tonic_build::compile_protos("proto/domain_enumeration.proto")?;
Ok(())
} | rust | MIT | 86f45b0ceb9751979ce546234d6a6f775c036119 | 2026-01-04T20:23:55.253743Z | false |
haveibeensquatted/twistrs | https://github.com/haveibeensquatted/twistrs/blob/86f45b0ceb9751979ce546234d6a6f775c036119/examples/twistrs-grpc/src/domain_enumeration.rs | examples/twistrs-grpc/src/domain_enumeration.rs | tonic::include_proto!("domain_enumeration");
| rust | MIT | 86f45b0ceb9751979ce546234d6a6f775c036119 | 2026-01-04T20:23:55.253743Z | false |
haveibeensquatted/twistrs | https://github.com/haveibeensquatted/twistrs/blob/86f45b0ceb9751979ce546234d6a6f775c036119/examples/twistrs-grpc/src/client.rs | examples/twistrs-grpc/src/client.rs | use domain_enumeration::domain_enumeration_client::DomainEnumerationClient;
use domain_enumeration::Fqdn;
mod domain_enumeration;
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
let channel = tonic::transport::Channel::from_static("http://127.0.0.1:8080")
.connect()
.await?;
let mut client = DomainEnumerationClient::new(channel);
println!("[+] Starting DNS resolutions...");
let request = tonic::Request::new(Fqdn {
fqdn: String::from("google.com"),
});
let mut response = client.send_dns_resolution(request).await?.into_inner();
while let Some(res) = response.message().await? {
println!("Response: {:?}", res);
}
println!("[+] Starting MX Checks...");
let request = tonic::Request::new(Fqdn {
fqdn: String::from("google.com"),
});
let mut response = client.send_mx_check(request).await?.into_inner();
while let Some(res) = response.message().await? {
println!("Response: {:?}", res);
}
Ok(())
}
| rust | MIT | 86f45b0ceb9751979ce546234d6a6f775c036119 | 2026-01-04T20:23:55.253743Z | false |
haveibeensquatted/twistrs | https://github.com/haveibeensquatted/twistrs/blob/86f45b0ceb9751979ce546234d6a6f775c036119/examples/twistrs-grpc/src/server.rs | examples/twistrs-grpc/src/server.rs | mod domain_enumeration;
use tokio::sync::mpsc;
use tonic::{transport::Server, Request, Response, Status};
use twistrs::enrich::DomainMetadata;
use twistrs::filter::Permissive;
use twistrs::permutate::Domain;
use domain_enumeration::domain_enumeration_server::{DomainEnumeration, DomainEnumerationServer};
use domain_enumeration::{DomainEnumerationResponse, Fqdn, MxCheckResponse};
#[derive(Default)]
pub struct DomainEnumerationService {}
#[tonic::async_trait]
impl DomainEnumeration for DomainEnumerationService {
type SendDnsResolutionStream = mpsc::Receiver<Result<DomainEnumerationResponse, Status>>;
type SendMxCheckStream = mpsc::Receiver<Result<MxCheckResponse, Status>>;
async fn send_dns_resolution(
&self,
request: Request<Fqdn>,
) -> Result<Response<Self::SendDnsResolutionStream>, Status> {
let (tx, rx) = mpsc::channel(64);
for permutation in Domain::new(&request.get_ref().fqdn)
.unwrap()
.all(&Permissive)
{
let domain_metadata = DomainMetadata::new(permutation.domain.fqdn.clone());
let mut tx = tx.clone();
// Spawn DNS Resolution check
tokio::spawn(async move {
if let Ok(metadata) = domain_metadata.dns_resolvable().await {
if let Some(ips) = metadata.ips {
if tx
.send(Ok(DomainEnumerationResponse {
fqdn: permutation.domain.fqdn.to_string(),
ips: ips.into_iter().map(|x| format!("{}", x)).collect(),
}))
.await
.is_err()
{
println!("receiver dropped");
return;
}
}
}
drop(tx);
});
}
drop(tx);
Ok(Response::new(rx))
}
async fn send_mx_check(
&self,
request: Request<Fqdn>,
) -> Result<Response<Self::SendMxCheckStream>, Status> {
let (tx, rx) = mpsc::channel(64);
for permutation in Domain::new(&request.get_ref().fqdn)
.unwrap()
.all(&Permissive)
{
let domain_metadata = DomainMetadata::new(permutation.domain.fqdn.clone());
let mut tx = tx.clone();
// Spawn DNS Resolution check
tokio::spawn(async move {
if let Ok(metadata) = domain_metadata.mx_check().await {
if let Some(smtp) = metadata.smtp {
if tx
.send(Ok(MxCheckResponse {
fqdn: permutation.domain.fqdn.to_string(),
is_positive: smtp.is_positive,
message: smtp.message,
}))
.await
.is_err()
{
println!("receiver dropped");
return;
}
}
}
drop(tx);
});
}
drop(tx);
Ok(Response::new(rx))
}
}
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
let addr = "0.0.0.0:50051".parse().unwrap();
let rpc_service = DomainEnumerationService::default();
println!("[+] Listening on {}", addr);
Server::builder()
.add_service(DomainEnumerationServer::new(rpc_service))
.serve(addr)
.await?;
Ok(())
}
| rust | MIT | 86f45b0ceb9751979ce546234d6a6f775c036119 | 2026-01-04T20:23:55.253743Z | false |
kenba/opencl3 | https://github.com/kenba/opencl3/blob/27c55789c114765e8f857c8c691c863166e0f6d2/src/svm.rs | src/svm.rs | // Copyright (c) 2020-2025 Via Technology Ltd.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use super::Result;
use super::context::Context;
use cl3::device::{
CL_DEVICE_SVM_ATOMICS, CL_DEVICE_SVM_COARSE_GRAIN_BUFFER, CL_DEVICE_SVM_FINE_GRAIN_BUFFER,
CL_DEVICE_SVM_FINE_GRAIN_SYSTEM,
};
use cl3::memory::{
CL_MEM_READ_WRITE, CL_MEM_SVM_ATOMICS, CL_MEM_SVM_FINE_GRAIN_BUFFER, svm_alloc, svm_free,
};
use cl3::types::{cl_device_svm_capabilities, cl_svm_mem_flags, cl_uint};
use libc::c_void;
#[cfg(feature = "serde")]
use serde::de::{Deserialize, DeserializeSeed, Deserializer, Error, SeqAccess, Visitor};
#[cfg(feature = "serde")]
use serde::ser::{Serialize, SerializeSeq, Serializer};
use std::alloc::{self, Layout};
use std::fmt;
use std::fmt::Debug;
use std::iter::IntoIterator;
use std::marker::PhantomData;
use std::mem;
use std::ops::{Deref, DerefMut};
use std::ptr;
#[allow(unused_imports)]
use std::result;
struct SvmRawVec<'a, T> {
ptr: *mut T,
cap: usize,
context: &'a Context,
fine_grain_buffer: bool,
fine_grain_system: bool,
atomics: bool,
}
unsafe impl<T: Send> Send for SvmRawVec<'_, T> {}
unsafe impl<T: Sync> Sync for SvmRawVec<'_, T> {}
impl<'a, T> SvmRawVec<'a, T> {
fn new(context: &'a Context, svm_capabilities: cl_device_svm_capabilities) -> Self {
assert!(0 < mem::size_of::<T>(), "No Zero Sized Types!");
assert!(
0 != svm_capabilities
& (CL_DEVICE_SVM_COARSE_GRAIN_BUFFER | CL_DEVICE_SVM_FINE_GRAIN_BUFFER),
"No OpenCL SVM, use OpenCL buffers"
);
let fine_grain_buffer: bool = svm_capabilities & CL_DEVICE_SVM_FINE_GRAIN_BUFFER != 0;
let fine_grain_system: bool = svm_capabilities & CL_DEVICE_SVM_FINE_GRAIN_SYSTEM != 0;
let atomics: bool = (fine_grain_buffer || fine_grain_system)
&& (svm_capabilities & CL_DEVICE_SVM_ATOMICS != 0);
SvmRawVec {
ptr: ptr::null_mut(),
cap: 0,
context,
fine_grain_buffer,
fine_grain_system,
atomics,
}
}
fn with_capacity(
context: &'a Context,
svm_capabilities: cl_device_svm_capabilities,
capacity: usize,
) -> Result<Self> {
let mut v = Self::new(context, svm_capabilities);
v.grow(capacity)?;
Ok(v)
}
fn with_capacity_zeroed(
context: &'a Context,
svm_capabilities: cl_device_svm_capabilities,
capacity: usize,
) -> Result<Self> {
let mut v = Self::with_capacity(context, svm_capabilities, capacity)?;
v.zero(capacity);
Ok(v)
}
#[allow(clippy::cast_possible_truncation)]
fn grow(&mut self, count: usize) -> Result<()> {
let elem_size = mem::size_of::<T>();
// if pushing or inserting, double the capacity
let new_cap = if (0 < self.cap) && (count - self.cap == 1) {
2 * self.cap
} else {
count
};
let size = elem_size * new_cap;
// Ensure within capacity.
assert!(size <= (isize::MAX as usize) / 2, "capacity overflow");
// allocation, determine whether to use svm_alloc or not
let ptr = if self.fine_grain_system {
let new_layout = Layout::array::<T>(new_cap).expect("Layout::array failure.");
let new_ptr = unsafe { alloc::alloc(new_layout).cast::<c_void>() };
if new_ptr.is_null() {
alloc::handle_alloc_error(new_layout);
}
new_ptr
} else {
let svm_mem_flags: cl_svm_mem_flags = if self.fine_grain_buffer {
if self.atomics {
CL_MEM_SVM_FINE_GRAIN_BUFFER | CL_MEM_READ_WRITE | CL_MEM_SVM_ATOMICS
} else {
CL_MEM_SVM_FINE_GRAIN_BUFFER | CL_MEM_READ_WRITE
}
} else {
CL_MEM_READ_WRITE
};
let alignment = mem::align_of::<T>();
unsafe {
svm_alloc(
self.context.get(),
svm_mem_flags,
size,
alignment as cl_uint,
)?
}
};
// reallocation, copy old data to new pointer and free old memory
if 0 < self.cap {
unsafe { ptr::copy(self.ptr, ptr.cast::<T>(), self.cap) };
if self.fine_grain_system {
let layout = Layout::array::<T>(self.cap).expect("Layout::array failure.");
unsafe {
alloc::dealloc(self.ptr.cast::<u8>(), layout);
}
} else {
unsafe {
let _ = svm_free(self.context.get(), self.ptr.cast::<c_void>());
};
}
}
self.ptr = ptr.cast::<T>();
self.cap = new_cap;
Ok(())
}
const fn zero(&mut self, count: usize) {
unsafe { ptr::write_bytes(self.ptr, 0u8, count) };
}
}
impl<T> Drop for SvmRawVec<'_, T> {
fn drop(&mut self) {
if !self.ptr.is_null() {
if self.fine_grain_system {
let layout = Layout::array::<T>(self.cap).expect("Layout::array failure.");
unsafe {
alloc::dealloc(self.ptr.cast::<u8>(), layout);
}
} else {
unsafe {
let _ = svm_free(self.context.get(), self.ptr.cast::<c_void>());
};
}
self.ptr = ptr::null_mut();
}
}
}
/// An OpenCL Shared Virtual Memory (SVM) vector.
/// It has the lifetime of the [Context] that it was constructed from.
/// Note: T cannot be a "zero sized type" (ZST).
///
/// There are three types of Shared Virtual Memory:
/// - CL_DEVICE_SVM_COARSE_GRAIN_BUFFER: OpenCL buffer memory objects can be shared.
/// - CL_DEVICE_SVM_FINE_GRAIN_BUFFER: individual memory objects in an OpenCL buffer can be shared.
/// - CL_DEVICE_SVM_FINE_GRAIN_SYSTEM: individual memory objects *anywhere* in **host** memory can be shared.
///
/// This `SvmVec` struct is designed to support CL_DEVICE_SVM_COARSE_GRAIN_BUFFER
/// and CL_DEVICE_SVM_FINE_GRAIN_BUFFER.
/// A [Context] that supports CL_DEVICE_SVM_FINE_GRAIN_SYSTEM can (and should!)
/// use a standard Rust vector instead.
///
/// Intel provided an excellent overview of Shared Virtual Memory here:
/// [OpenCL 2.0 Shared Virtual Memory Overview](https://software.intel.com/content/www/us/en/develop/articles/opencl-20-shared-virtual-memory-overview.html).
/// A PDF version is available here: [SVM Overview](https://github.com/kenba/opencl3/blob/main/docs/svmoverview.pdf).
///
/// To summarise, a CL_DEVICE_SVM_COARSE_GRAIN_BUFFER requires the SVM to be *mapped*
/// before being read or written by the host and *unmapped* afterward, while
/// CL_DEVICE_SVM_FINE_GRAIN_BUFFER can be used like a standard Rust vector.
///
/// The `is_fine_grained method` can be used to determine whether an `SvmVec` supports
/// CL_DEVICE_SVM_FINE_GRAIN_BUFFER and should be used to control SVM map and unmap
/// operations, e.g.:
/// ```no_run
/// # use cl3::device::CL_DEVICE_TYPE_GPU;
/// # use opencl3::command_queue::CommandQueue;
/// # use opencl3::context::Context;
/// # use opencl3::device::Device;
/// # use opencl3::kernel::{ExecuteKernel, Kernel};
/// # use opencl3::memory::{CL_MAP_WRITE};
/// # use opencl3::platform::get_platforms;
/// # use opencl3::svm::SvmVec;
/// # use opencl3::types::*;
/// # use opencl3::Result;
///
/// # fn main() -> Result<()> {
/// # let platforms = get_platforms().unwrap();
/// # let devices = platforms[0].get_devices(CL_DEVICE_TYPE_GPU).unwrap();
/// # let device = Device::new(devices[0]);
/// # let context = Context::from_device(&device).unwrap();
/// # let queue = CommandQueue::create_default_with_properties(&context, 0, 0).unwrap();
/// // The input data
/// const ARRAY_SIZE: usize = 8;
/// let value_array: [cl_int; ARRAY_SIZE] = [3, 2, 5, 9, 7, 1, 4, 2];
///
/// // Create an OpenCL SVM vector
/// let mut test_values = SvmVec::<cl_int>::allocate(&context, ARRAY_SIZE)?;
///
/// // Map test_values if not an CL_MEM_SVM_FINE_GRAIN_BUFFER
/// if !test_values.is_fine_grained() {
/// unsafe { queue.enqueue_svm_map(CL_BLOCKING, CL_MAP_WRITE, &mut test_values, &[])?};
/// }
///
/// // Copy input data into the OpenCL SVM vector
/// test_values.clone_from_slice(&value_array);
///
/// // Unmap test_values if not an CL_MEM_SVM_FINE_GRAIN_BUFFER
/// if !test_values.is_fine_grained() {
/// let unmap_test_values_event = unsafe { queue.enqueue_svm_unmap(&test_values, &[])?};
/// unmap_test_values_event.wait()?;
/// }
/// # Ok(())
/// # }
/// ```
pub struct SvmVec<'a, T> {
buf: SvmRawVec<'a, T>,
len: usize,
}
impl<'a, T> SvmVec<'a, T> {
#[must_use]
const fn ptr(&self) -> *mut T {
self.buf.ptr
}
/// The capacity of the vector.
#[must_use]
pub const fn cap(&self) -> usize {
self.buf.cap
}
/// The length of the vector.
#[must_use]
pub const fn len(&self) -> usize {
self.len
}
/// Whether the vector is empty
#[must_use]
pub const fn is_empty(&self) -> bool {
self.len == 0
}
/// Whether the vector is fine grain buffer
#[must_use]
pub const fn is_fine_grain_buffer(&self) -> bool {
self.buf.fine_grain_buffer
}
/// Whether the vector is fine grain system
#[must_use]
pub const fn is_fine_grain_system(&self) -> bool {
self.buf.fine_grain_system
}
/// Whether the vector is fine grained
#[must_use]
pub const fn is_fine_grained(&self) -> bool {
self.buf.fine_grain_buffer || self.buf.fine_grain_system
}
/// Whether the vector can use atomics
#[must_use]
pub const fn has_atomics(&self) -> bool {
self.buf.atomics
}
/// Clear the vector, i.e. empty it.
pub const fn clear(&mut self) {
self.len = 0;
}
/// Set the length of the vector.
/// If new_len > len, the new memory will be uninitialised.
///
/// # Safety
/// May fail to grow buf if memory is not available for new_len.
pub unsafe fn set_len(&mut self, new_len: usize) -> Result<()> {
if self.cap() < new_len {
self.buf.grow(new_len)?;
}
self.len = new_len;
Ok(())
}
/// Construct an empty SvmVec from a [Context].
/// The SvmVec has the lifetime of the [Context].
///
/// # Panics
///
/// The cl_device_svm_capabilities of the [Context] must include
/// CL_DEVICE_SVM_COARSE_GRAIN_BUFFER or CL_DEVICE_SVM_FINE_GRAIN_BUFFER.
/// The cl_device_svm_capabilities must *not* include CL_DEVICE_SVM_FINE_GRAIN_SYSTEM,
/// a standard Rust `Vec!` should be used instead.
#[must_use]
pub fn new(context: &'a Context) -> Self {
let svm_capabilities = context.get_svm_mem_capability();
SvmVec {
buf: SvmRawVec::new(context, svm_capabilities),
len: 0,
}
}
/// Construct an SvmVec with the given len of values from a [Context].
///
/// returns a Result containing an SvmVec with len values of **uninitialised**
/// memory, or the OpenCL error.
////
/// # Panics
///
/// The cl_device_svm_capabilities of the [Context] must include
/// CL_DEVICE_SVM_COARSE_GRAIN_BUFFER or CL_DEVICE_SVM_FINE_GRAIN_BUFFER.
/// The cl_device_svm_capabilities must *not* include CL_DEVICE_SVM_FINE_GRAIN_SYSTEM,
/// a standard Rust `Vec!` should be used instead.
pub fn allocate(context: &'a Context, len: usize) -> Result<Self> {
let svm_capabilities = context.get_svm_mem_capability();
Ok(SvmVec {
buf: SvmRawVec::with_capacity(context, svm_capabilities, len)?,
len,
})
}
/// Construct an empty SvmVec with the given capacity from a [Context].
///
/// returns a Result containing an empty SvmVec, or the OpenCL error.
///
/// # Panics
///
/// The cl_device_svm_capabilities of the [Context] must include
/// CL_DEVICE_SVM_COARSE_GRAIN_BUFFER or CL_DEVICE_SVM_FINE_GRAIN_BUFFER.
/// The cl_device_svm_capabilities must *not* include CL_DEVICE_SVM_FINE_GRAIN_SYSTEM,
/// a standard Rust `Vec!` should be used instead.
pub fn with_capacity(context: &'a Context, capacity: usize) -> Result<Self> {
let svm_capabilities = context.get_svm_mem_capability();
Ok(SvmVec {
buf: SvmRawVec::with_capacity(context, svm_capabilities, capacity)?,
len: 0,
})
}
/// Construct an SvmVec with the given len of values from a [Context] and
/// the svm_capabilities of the device (or devices) in the [Context].
///
/// # Panics
///
/// The function will panic if the cl_device_svm_capabilities of the [Context]
/// does **not** include CL_DEVICE_SVM_FINE_GRAIN_BUFFER.
///
/// returns a Result containing an SvmVec with len values of zeroed
/// memory, or the OpenCL error.
pub fn allocate_zeroed(context: &'a Context, len: usize) -> Result<Self> {
let svm_capabilities = context.get_svm_mem_capability();
let fine_grain_buffer: bool = svm_capabilities & CL_DEVICE_SVM_FINE_GRAIN_BUFFER != 0;
assert!(
fine_grain_buffer,
"SVM is not fine grained, use `allocate` instead."
);
Ok(SvmVec {
buf: SvmRawVec::with_capacity_zeroed(context, svm_capabilities, len)?,
len,
})
}
/// Reserve vector capacity.
/// returns an empty Result or the OpenCL error.
pub fn reserve(&mut self, capacity: usize) -> Result<()> {
self.buf.grow(capacity)
}
/// Push a value onto the vector.
///
/// # Panics
///
/// The function will panic the vector cannot be grown, either because
/// the SVM is not fine grained or it has reached its limit.
pub fn push(&mut self, elem: T) {
if self.len == self.cap() {
assert!(
self.is_fine_grained(),
"SVM is not fine grained, cannot grow the vector."
);
self.buf
.grow(self.len + 1)
.expect("Cannot grow the vector.");
}
unsafe {
ptr::write(self.ptr().add(self.len), elem);
}
// Can't fail, we'll OOM first.
self.len += 1;
}
/// Pop a value from the vector.
pub const fn pop(&mut self) -> Option<T> {
if self.len == 0 {
None
} else {
self.len -= 1;
unsafe { Some(ptr::read(self.ptr().add(self.len))) }
}
}
/// Insert a value into the vector at index.
///
/// # Panics
///
/// The function will panic if the index is out of bounds or
/// if a coarse grain buffer attempts to grow the vector.
pub fn insert(&mut self, index: usize, elem: T) {
assert!(index <= self.len, "index out of bounds");
if self.cap() == self.len {
assert!(
self.is_fine_grained(),
"SVM is not fine grained, cannot grow the vector."
);
self.buf.grow(self.len + 1).expect("Layout::array failure.");
}
unsafe {
if index < self.len {
ptr::copy(
self.ptr().add(index),
self.ptr().add(index + 1),
self.len - index,
);
}
ptr::write(self.ptr().add(index), elem);
self.len += 1;
}
}
/// Remove a value from the vector at index.
///
/// # Panics
///
/// The function will panic if the index is out of bounds.
pub fn remove(&mut self, index: usize) -> T {
assert!(index < self.len, "index out of bounds");
unsafe {
self.len -= 1;
let result = ptr::read(self.ptr().add(index));
ptr::copy(
self.ptr().add(index + 1),
self.ptr().add(index),
self.len - index,
);
result
}
}
/// Drain the vector.
pub fn drain(&mut self) -> Drain<'_, T> {
unsafe {
let iter = RawValIter::new(self);
// this is a mem::forget safety thing. If Drain is forgotten, we just
// leak the whole Vec's contents. Also we need to do this *eventually*
// anyway, so why not do it now?
self.len = 0;
Drain {
iter,
vec: PhantomData,
}
}
}
}
impl<'a, T> IntoIterator for SvmVec<'a, T> {
type Item = T;
type IntoIter = IntoIter<'a, Self::Item>;
fn into_iter(self) -> Self::IntoIter {
unsafe {
let iter = RawValIter::new(&self);
let buf = ptr::read(&self.buf);
mem::forget(self);
Self::IntoIter { iter, _buf: buf }
}
}
}
impl<T> Drop for SvmVec<'_, T> {
fn drop(&mut self) {
while self.pop().is_some() {}
// allocation is handled by SvmRawVec
}
}
impl<T> Deref for SvmVec<'_, T> {
type Target = [T];
fn deref(&self) -> &[T] {
unsafe { std::slice::from_raw_parts(self.ptr(), self.len) }
}
}
impl<T> DerefMut for SvmVec<'_, T> {
fn deref_mut(&mut self) -> &mut [T] {
unsafe { std::slice::from_raw_parts_mut(self.ptr(), self.len) }
}
}
impl<T: Debug> fmt::Debug for SvmVec<'_, T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt::Debug::fmt(&**self, f)
}
}
/// A DeserializeSeed implementation that uses stateful deserialization to
/// append array elements onto the end of an existing SvmVec.
/// The pre-existing state ("seed") in this case is the SvmVec<'b, T>.
#[cfg(feature = "serde")]
pub struct ExtendSvmVec<'a, 'b, T: 'a>(pub &'a mut SvmVec<'b, T>);
#[cfg(feature = "serde")]
impl<'de, T> DeserializeSeed<'de> for ExtendSvmVec<'_, '_, T>
where
T: Deserialize<'de>,
{
// The return type of the `deserialize` method. Since this implementation
// appends onto an existing SvmVec the return type is ().
type Value = ();
fn deserialize<D>(self, deserializer: D) -> result::Result<Self::Value, D::Error>
where
D: Deserializer<'de>,
{
// Visitor implementation to walk an array of the deserializer input.
struct ExtendSvmVecVisitor<'a, 'b, T: 'a>(&'a mut SvmVec<'b, T>);
impl<'de, T> Visitor<'de> for ExtendSvmVecVisitor<'_, '_, T>
where
T: Deserialize<'de>,
{
type Value = ();
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
formatter.write_str("an array")
}
fn visit_seq<A>(self, mut seq: A) -> result::Result<(), A::Error>
where
A: SeqAccess<'de>,
{
// reserve SvmVec memory if the size of the deserializer array is known
if let Some(size) = seq.size_hint() {
let len = self.0.len + size;
self.0.reserve(len).map_err(A::Error::custom)?;
}
// Visit each element in the array and push it onto the existing SvmVec
while let Some(elem) = seq.next_element()? {
self.0.push(elem);
}
Ok(())
}
}
deserializer.deserialize_seq(ExtendSvmVecVisitor(self.0))
}
}
#[cfg(feature = "serde")]
impl<T> Serialize for SvmVec<'_, T>
where
T: Serialize,
{
fn serialize<S>(&self, serializer: S) -> result::Result<S::Ok, S::Error>
where
S: Serializer,
{
let mut seq = serializer.serialize_seq(Some(self.len()))?;
for element in self.iter() {
seq.serialize_element(element)?;
}
seq.end()
}
}
struct RawValIter<T> {
start: *const T,
end: *const T,
}
unsafe impl<T: Send> Send for RawValIter<T> {}
impl<T> RawValIter<T> {
unsafe fn new(slice: &[T]) -> Self {
unsafe {
Self {
start: slice.as_ptr(),
end: if mem::size_of::<T>() == 0 {
((slice.as_ptr() as usize) + slice.len()) as *const _
} else if slice.is_empty() {
slice.as_ptr()
} else {
slice.as_ptr().add(slice.len())
},
}
}
}
}
impl<T> Iterator for RawValIter<T> {
type Item = T;
fn next(&mut self) -> Option<T> {
if self.start == self.end {
None
} else {
unsafe {
let result = ptr::read(self.start);
self.start = if mem::size_of::<T>() == 0 {
(self.start as usize + 1) as *const _
} else {
self.start.offset(1)
};
Some(result)
}
}
}
fn size_hint(&self) -> (usize, Option<usize>) {
let elem_size = mem::size_of::<T>();
let len =
(self.end as usize - self.start as usize) / if elem_size == 0 { 1 } else { elem_size };
(len, Some(len))
}
}
impl<T> DoubleEndedIterator for RawValIter<T> {
fn next_back(&mut self) -> Option<T> {
if self.start == self.end {
None
} else {
unsafe {
self.end = if mem::size_of::<T>() == 0 {
(self.end as usize - 1) as *const _
} else {
self.end.offset(-1)
};
Some(ptr::read(self.end))
}
}
}
}
pub struct IntoIter<'a, T> {
_buf: SvmRawVec<'a, T>, // we don't actually care about this. Just need it to live.
iter: RawValIter<T>,
}
impl<T> Iterator for IntoIter<'_, T> {
type Item = T;
fn next(&mut self) -> Option<T> {
self.iter.next()
}
fn size_hint(&self) -> (usize, Option<usize>) {
self.iter.size_hint()
}
}
impl<T> DoubleEndedIterator for IntoIter<'_, T> {
fn next_back(&mut self) -> Option<T> {
self.iter.next_back()
}
}
impl<T> Drop for IntoIter<'_, T> {
fn drop(&mut self) {
for _ in &mut *self {}
}
}
pub struct Drain<'a, T: 'a> {
vec: PhantomData<&'a mut SvmVec<'a, T>>,
iter: RawValIter<T>,
}
impl<T> Iterator for Drain<'_, T> {
type Item = T;
fn next(&mut self) -> Option<T> {
self.iter.next()
}
fn size_hint(&self) -> (usize, Option<usize>) {
self.iter.size_hint()
}
}
impl<T> DoubleEndedIterator for Drain<'_, T> {
fn next_back(&mut self) -> Option<T> {
self.iter.next_back()
}
}
impl<T> Drop for Drain<'_, T> {
fn drop(&mut self) {
// pre-drain the iter
for _ in &mut self.iter {}
}
}
| rust | Apache-2.0 | 27c55789c114765e8f857c8c691c863166e0f6d2 | 2026-01-04T20:24:28.844285Z | false |
kenba/opencl3 | https://github.com/kenba/opencl3/blob/27c55789c114765e8f857c8c691c863166e0f6d2/src/event.rs | src/event.rs | // Copyright (c) 2020-2024 Via Technology Ltd. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
pub use cl3::event::*;
use super::Result;
use libc::c_void;
/// An OpenCL event object.
///
/// Has methods to return information from calls to clGetEventInfo and
/// clGetEventProfilingInfo with the appropriate parameters.
/// Implements the Drop trait to call release_event when the object is dropped.
#[derive(Debug)]
pub struct Event {
event: cl_event,
}
impl From<cl_event> for Event {
fn from(event: cl_event) -> Self {
Self { event }
}
}
impl From<Event> for cl_event {
fn from(value: Event) -> Self {
value.event as Self
}
}
impl Drop for Event {
fn drop(&mut self) {
unsafe { release_event(self.event).expect("Error: clReleaseEvent") };
}
}
unsafe impl Send for Event {}
unsafe impl Sync for Event {}
impl Event {
/// Create an Event from an OpenCL cl_event.
///
/// * `event` - a valid OpenCL cl_event.
///
/// returns the new Event
pub const fn new(event: cl_event) -> Self {
Self { event }
}
/// Get the underlying OpenCL cl_event.
pub const fn get(&self) -> cl_event {
self.event
}
/// Wait for the event to complete.
pub fn wait(&self) -> Result<()> {
let events = [self.get()];
Ok(wait_for_events(&events)?)
}
pub fn command_execution_status(&self) -> Result<CommandExecutionStatus> {
Ok(CommandExecutionStatus(
get_event_info(self.event, CL_EVENT_COMMAND_EXECUTION_STATUS)?.into(),
))
}
pub fn command_type(&self) -> Result<EventCommandType> {
Ok(EventCommandType(
get_event_info(self.event, CL_EVENT_COMMAND_TYPE)?.into(),
))
}
pub fn reference_count(&self) -> Result<cl_uint> {
Ok(get_event_info(self.event, CL_EVENT_REFERENCE_COUNT)?.into())
}
pub fn command_queue(&self) -> Result<cl_command_queue> {
Ok(isize::from(get_event_info(self.event, CL_EVENT_COMMAND_QUEUE)?) as cl_command_queue)
}
pub fn context(&self) -> Result<cl_context> {
Ok(isize::from(get_event_info(self.event, CL_EVENT_CONTEXT)?) as cl_context)
}
/// Get data about an OpenCL event.
/// Calls clGetEventInfo to get the desired data about the event.
pub fn get_data(&self, param_name: cl_event_info) -> Result<Vec<u8>> {
Ok(get_event_data(self.event, param_name)?)
}
pub fn set_callback(
&self,
command_exec_callback_type: cl_int,
pfn_notify: extern "C" fn(cl_event, cl_int, *mut c_void),
user_data: *mut c_void,
) -> Result<()> {
Ok(set_event_callback(
self.event,
command_exec_callback_type,
pfn_notify,
user_data,
)?)
}
pub fn profiling_command_queued(&self) -> Result<cl_ulong> {
Ok(get_event_profiling_info(self.event, CL_PROFILING_COMMAND_QUEUED)?.into())
}
pub fn profiling_command_submit(&self) -> Result<cl_ulong> {
Ok(get_event_profiling_info(self.event, CL_PROFILING_COMMAND_SUBMIT)?.into())
}
pub fn profiling_command_start(&self) -> Result<cl_ulong> {
Ok(get_event_profiling_info(self.event, CL_PROFILING_COMMAND_START)?.into())
}
pub fn profiling_command_end(&self) -> Result<cl_ulong> {
Ok(get_event_profiling_info(self.event, CL_PROFILING_COMMAND_END)?.into())
}
/// CL_VERSION_2_0
pub fn profiling_command_complete(&self) -> Result<cl_ulong> {
Ok(get_event_profiling_info(self.event, CL_PROFILING_COMMAND_COMPLETE)?.into())
}
/// Get profiling data about an OpenCL event.
/// Calls clGetEventProfilingInfo to get the desired profiling data about the event.
pub fn profiling_data(&self, param_name: cl_profiling_info) -> Result<Vec<u8>> {
Ok(get_event_profiling_data(self.event, param_name)?)
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::command_queue::{CL_QUEUE_PROFILING_ENABLE, CommandQueue};
use crate::context::Context;
use crate::device::{CL_DEVICE_TYPE_GPU, Device};
use crate::memory::{Buffer, CL_MEM_READ_ONLY};
use crate::platform::get_platforms;
use crate::types::{CL_NON_BLOCKING, cl_float};
use std::ptr;
extern "C" fn event_callback_function(
_event: cl_event,
event_command_status: cl_int,
_user_data: *mut c_void,
) {
println!(
"OpenCL event callback command status: {}",
event_command_status
);
}
#[test]
fn test_event() {
let platforms = get_platforms().unwrap();
assert!(0 < platforms.len());
// Get the first platform
let platform = &platforms[0];
let devices = platform.get_devices(CL_DEVICE_TYPE_GPU).unwrap();
assert!(0 < devices.len());
// Get the first device
let device = Device::new(devices[0]);
let context = Context::from_device(&device).unwrap();
// Create a command_queue on the Context's default device
let queue = CommandQueue::create_default(&context, CL_QUEUE_PROFILING_ENABLE)
.expect("CommandQueue::create_default failed");
const ARRAY_SIZE: usize = 1024;
let ones: [cl_float; ARRAY_SIZE] = [1.0; ARRAY_SIZE];
let mut buffer = unsafe {
Buffer::<cl_float>::create(&context, CL_MEM_READ_ONLY, ARRAY_SIZE, ptr::null_mut())
.unwrap()
};
let events: Vec<cl_event> = Vec::default();
// Non-blocking write, wait for event
let event = unsafe {
queue
.enqueue_write_buffer(&mut buffer, CL_NON_BLOCKING, 0, &ones, &events)
.unwrap()
};
// Set a callback_function on the event (i.e. write) being completed.
event
.set_callback(CL_COMPLETE, event_callback_function, ptr::null_mut())
.unwrap();
let value = event.command_execution_status().unwrap();
println!("event.command_execution_status(): {}", value);
// assert_eq!(CL_QUEUED, value.0);
let value = event.command_type().unwrap();
println!("event.command_type(): {}", value);
assert_eq!(CL_COMMAND_WRITE_BUFFER, value.0);
let value = event.reference_count().unwrap();
println!("event.reference_count(): {}", value);
// assert_eq!(1, value);
let value = event.command_queue().unwrap();
assert!(queue.get() == value);
let value = event.context().unwrap();
assert!(context.get() == value);
event.wait().unwrap();
let value = event.command_execution_status().unwrap();
println!("event.command_execution_status(): {}", value);
assert_eq!(CL_COMPLETE, value.0);
let value = event.profiling_command_queued().unwrap();
println!("event.profiling_command_queued(): {}", value);
assert!(0 < value);
let value = event.profiling_command_submit().unwrap();
println!("event.profiling_command_submit(): {}", value);
assert!(0 < value);
let value = event.profiling_command_start().unwrap();
println!("event.profiling_command_start(): {}", value);
assert!(0 < value);
let value = event.profiling_command_end().unwrap();
println!("event.profiling_command_end(): {}", value);
assert!(0 < value);
// CL_VERSION_2_0
match event.profiling_command_complete() {
Ok(value) => println!("event.profiling_command_complete(): {}", value),
Err(e) => println!("OpenCL error, event.profiling_command_complete(): {}", e),
}
}
}
| rust | Apache-2.0 | 27c55789c114765e8f857c8c691c863166e0f6d2 | 2026-01-04T20:24:28.844285Z | false |
kenba/opencl3 | https://github.com/kenba/opencl3/blob/27c55789c114765e8f857c8c691c863166e0f6d2/src/command_queue.rs | src/command_queue.rs | // Copyright (c) 2020-2024 Via Technology Ltd.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#![allow(deprecated)]
#![allow(clippy::too_many_arguments, clippy::missing_safety_doc)]
pub use cl3::command_queue::*;
use super::context::Context;
use super::Result;
use super::device::Device;
use super::event::Event;
use super::memory::*;
#[allow(unused_imports)]
use cl3::d3d10;
#[allow(unused_imports)]
use cl3::d3d11;
#[allow(unused_imports)]
use cl3::dx9_media_sharing;
#[allow(unused_imports)]
use cl3::egl;
#[allow(unused_imports)]
use cl3::ext;
use cl3::gl;
#[allow(unused_imports)]
use cl3::types::cl_program;
#[allow(unused_imports)]
use libc::{c_char, c_void, size_t};
use std::mem;
use std::ptr;
/// An OpenCL command-queue.
///
/// Operations on OpenCL memory and kernel objects are performed using a
/// command-queue.
#[derive(Debug)]
pub struct CommandQueue {
queue: cl_command_queue,
max_work_item_dimensions: cl_uint,
}
impl From<CommandQueue> for cl_command_queue {
fn from(value: CommandQueue) -> Self {
value.queue
}
}
impl Drop for CommandQueue {
fn drop(&mut self) {
unsafe { release_command_queue(self.queue).expect("Error: clReleaseCommandQueue") };
}
}
unsafe impl Send for CommandQueue {}
unsafe impl Sync for CommandQueue {}
impl CommandQueue {
const fn new(queue: cl_command_queue, max_work_item_dimensions: cl_uint) -> Self {
Self {
queue,
max_work_item_dimensions,
}
}
/// Get the underlying OpenCL cl_command_queue.
pub const fn get(&self) -> cl_command_queue {
self.queue
}
/// Get the max_work_item_dimensions for the device that the underlying OpenCL
/// device.
pub const fn max_work_item_dimensions(&self) -> cl_uint {
self.max_work_item_dimensions
}
/// Create an OpenCL command-queue on a specific device.
///
/// Queries the device the max_work_item_dimensions.
/// Deprecated in CL_VERSION_2_0 by create_command_queue_with_properties.
///
/// * `context` - a valid OpenCL context.
/// * `device_id` - a device or sub-device associated with context.
/// * `properties` - a list of properties for the command-queue, see
/// [cl_command_queue_properties](https://www.khronos.org/registry/OpenCL/specs/3.0-unified/html/OpenCL_API.html#legacy-queue-properties-table).
///
/// returns a Result containing the new CommandQueue
/// or the error code from the OpenCL C API function.
///
/// # Safety
///
/// This is unsafe when a device is not a member of context.
#[cfg(any(feature = "CL_VERSION_1_2", feature = "dynamic"))]
#[cfg_attr(
any(
feature = "CL_VERSION_2_0",
feature = "CL_VERSION_2_1",
feature = "CL_VERSION_2_2",
feature = "CL_VERSION_3_0"
),
deprecated(
since = "0.1.0",
note = "From CL_VERSION_2_0 use create_command_queue_with_properties"
)
)]
pub unsafe fn create(
context: &Context,
device_id: cl_device_id,
properties: cl_command_queue_properties,
) -> Result<Self> {
unsafe {
let queue = create_command_queue(context.get(), device_id, properties)?;
let device = Device::new(device_id);
let max_work_item_dimensions = device.max_work_item_dimensions()?;
Ok(Self::new(queue, max_work_item_dimensions))
}
}
/// Create an OpenCL command-queue on the context default device.
/// Queries the device the max_work_item_dimensions.
/// Deprecated in CL_VERSION_2_0 by create_command_queue_with_properties.
///
/// * `context` - a valid OpenCL context.
/// * `properties` - a list of properties for the command-queue, see
/// [cl_command_queue_properties](https://www.khronos.org/registry/OpenCL/specs/3.0-unified/html/OpenCL_API.html#legacy-queue-properties-table).
///
/// returns a Result containing the new CommandQueue
/// or the error code from the OpenCL C API function.
#[cfg(any(feature = "CL_VERSION_1_2", feature = "dynamic"))]
#[cfg_attr(
any(
feature = "CL_VERSION_2_0",
feature = "CL_VERSION_2_1",
feature = "CL_VERSION_2_2",
feature = "CL_VERSION_3_0"
),
deprecated(
since = "0.1.0",
note = "From CL_VERSION_2_0 use create_command_queue_with_properties"
)
)]
pub fn create_default(
context: &Context,
properties: cl_command_queue_properties,
) -> Result<Self> {
unsafe { Self::create(context, context.default_device(), properties) }
}
/// Create an OpenCL command-queue on a specific device.
/// Queries the device the max_work_item_dimensions.
/// CL_VERSION_2_0 onwards.
///
/// * `context` - a valid OpenCL context.
/// * `device_id` - a device or sub-device associated with context.
/// * `properties` - a null terminated list of properties for the command-queue, see
/// [cl_queue_properties](https://www.khronos.org/registry/OpenCL/specs/3.0-unified/html/OpenCL_API.html#queue-properties-table).
///
/// returns a Result containing the new CommandQueue
/// or the error code from the OpenCL C API function.
///
/// # Safety
///
/// This is unsafe when a device is not a member of context.
#[cfg(any(feature = "CL_VERSION_2_0", feature = "dynamic"))]
pub unsafe fn create_with_properties(
context: &Context,
device_id: cl_device_id,
properties: cl_command_queue_properties,
queue_size: cl_uint,
) -> Result<Self> {
unsafe {
let queue = if (0 < properties) || (0 < queue_size) {
let mut props: [cl_queue_properties; 5] = [0; 5];
let mut index = 0;
if 0 < properties {
props[index] = CL_QUEUE_PROPERTIES as cl_queue_properties;
props[index + 1] = properties as cl_queue_properties;
index += 2;
}
if 0 < queue_size {
props[index] = CL_QUEUE_SIZE as cl_queue_properties;
props[index + 1] = queue_size as cl_queue_properties;
}
create_command_queue_with_properties(context.get(), device_id, props.as_ptr())?
} else {
create_command_queue_with_properties(context.get(), device_id, ptr::null())?
};
let device = Device::new(device_id);
let max_work_item_dimensions = device.max_work_item_dimensions()?;
Ok(Self::new(queue, max_work_item_dimensions))
}
}
/// Create an OpenCL command-queue on the default device.
/// Queries the device the max_work_item_dimensions.
/// CL_VERSION_2_0 onwards.
///
/// * `context` - a valid OpenCL context.
/// * `properties` - a null terminated list of properties for the command-queue, see
/// [cl_queue_properties](https://www.khronos.org/registry/OpenCL/specs/3.0-unified/html/OpenCL_API.html#queue-properties-table).
///
/// returns a Result containing the new CommandQueue
/// or the error code from the OpenCL C API function.
#[cfg(any(feature = "CL_VERSION_2_0", feature = "dynamic"))]
pub fn create_default_with_properties(
context: &Context,
properties: cl_command_queue_properties,
queue_size: cl_uint,
) -> Result<Self> {
unsafe {
Self::create_with_properties(context, context.default_device(), properties, queue_size)
}
}
#[cfg(any(feature = "cl_khr_create_command_queue", feature = "dynamic"))]
pub fn create_with_properties_khr(
context: &Context,
device_id: cl_device_id,
properties: &[ext::cl_queue_properties_khr],
) -> Result<Self> {
let queue = ext::create_command_queue_with_properties_khr(
context.get(),
device_id,
properties.as_ptr(),
)?;
let device = Device::new(device_id);
let max_work_item_dimensions = device.max_work_item_dimensions()?;
Ok(Self::new(queue, max_work_item_dimensions))
}
/// Flush commands to a device.
/// returns an empty Result or the error code from the OpenCL C API function.
pub fn flush(&self) -> Result<()> {
Ok(flush(self.queue)?)
}
/// Wait for completion of commands on a device.
/// returns an empty Result or the error code from the OpenCL C API function.
pub fn finish(&self) -> Result<()> {
Ok(finish(self.queue)?)
}
pub unsafe fn enqueue_read_buffer<T>(
&self,
buffer: &Buffer<T>,
blocking_read: cl_bool,
offset: size_t,
data: &mut [T],
event_wait_list: &[cl_event],
) -> Result<Event> {
unsafe {
let event = enqueue_read_buffer(
self.queue,
buffer.get(),
blocking_read,
offset,
mem::size_of_val(data),
data.as_mut_ptr() as cl_mem,
event_wait_list.len() as cl_uint,
if !event_wait_list.is_empty() {
event_wait_list.as_ptr()
} else {
ptr::null()
},
)?;
Ok(Event::new(event))
}
}
#[allow(clippy::as_ptr_cast_mut)]
pub unsafe fn enqueue_read_buffer_rect<T>(
&self,
buffer: &Buffer<T>,
blocking_read: cl_bool,
buffer_origin: *const size_t,
host_origin: *const size_t,
region: *const size_t,
buffer_row_pitch: size_t,
buffer_slice_pitch: size_t,
host_row_pitch: size_t,
host_slice_pitch: size_t,
ptr: *mut c_void,
event_wait_list: &[cl_event],
) -> Result<Event> {
unsafe {
let event = enqueue_read_buffer_rect(
self.queue,
buffer.get(),
blocking_read,
buffer_origin,
host_origin,
region,
buffer_row_pitch,
buffer_slice_pitch,
host_row_pitch,
host_slice_pitch,
ptr,
event_wait_list.len() as cl_uint,
if !event_wait_list.is_empty() {
event_wait_list.as_ptr()
} else {
ptr::null()
},
)?;
Ok(Event::new(event))
}
}
#[allow(clippy::as_ptr_cast_mut)]
pub unsafe fn enqueue_write_buffer<T>(
&self,
buffer: &mut Buffer<T>,
blocking_write: cl_bool,
offset: size_t,
data: &[T],
event_wait_list: &[cl_event],
) -> Result<Event> {
unsafe {
let event = enqueue_write_buffer(
self.queue,
buffer.get_mut(),
blocking_write,
offset,
mem::size_of_val(data),
data.as_ptr() as cl_mem,
event_wait_list.len() as cl_uint,
if !event_wait_list.is_empty() {
event_wait_list.as_ptr()
} else {
ptr::null()
},
)?;
Ok(Event::new(event))
}
}
pub unsafe fn enqueue_write_buffer_rect<T>(
&self,
buffer: &mut Buffer<T>,
blocking_write: cl_bool,
buffer_origin: *const size_t,
host_origin: *const size_t,
region: *const size_t,
buffer_row_pitch: size_t,
buffer_slice_pitch: size_t,
host_row_pitch: size_t,
host_slice_pitch: size_t,
ptr: *mut c_void,
event_wait_list: &[cl_event],
) -> Result<Event> {
unsafe {
let event = enqueue_write_buffer_rect(
self.queue,
buffer.get_mut(),
blocking_write,
buffer_origin,
host_origin,
region,
buffer_row_pitch,
buffer_slice_pitch,
host_row_pitch,
host_slice_pitch,
ptr,
event_wait_list.len() as cl_uint,
if !event_wait_list.is_empty() {
event_wait_list.as_ptr()
} else {
ptr::null()
},
)?;
Ok(Event::new(event))
}
}
#[cfg(any(feature = "CL_VERSION_1_2", feature = "dynamic"))]
#[allow(clippy::as_ptr_cast_mut)]
pub unsafe fn enqueue_fill_buffer<T>(
&self,
buffer: &mut Buffer<T>,
pattern: &[T],
offset: size_t,
size: size_t,
event_wait_list: &[cl_event],
) -> Result<Event> {
unsafe {
let event = enqueue_fill_buffer(
self.queue,
buffer.get_mut(),
pattern.as_ptr() as cl_mem,
mem::size_of_val(pattern),
offset,
size,
event_wait_list.len() as cl_uint,
if !event_wait_list.is_empty() {
event_wait_list.as_ptr()
} else {
ptr::null()
},
)?;
Ok(Event::new(event))
}
}
pub unsafe fn enqueue_copy_buffer<T>(
&self,
src_buffer: &Buffer<T>,
dst_buffer: &mut Buffer<T>,
src_offset: size_t,
dst_offset: size_t,
size: size_t,
event_wait_list: &[cl_event],
) -> Result<Event> {
unsafe {
let event = enqueue_copy_buffer(
self.queue,
src_buffer.get(),
dst_buffer.get_mut(),
src_offset,
dst_offset,
size,
event_wait_list.len() as cl_uint,
if !event_wait_list.is_empty() {
event_wait_list.as_ptr()
} else {
ptr::null()
},
)?;
Ok(Event::new(event))
}
}
pub unsafe fn enqueue_copy_buffer_rect<T>(
&self,
src_buffer: &Buffer<T>,
dst_buffer: &mut Buffer<T>,
src_origin: *const size_t,
dst_origin: *const size_t,
region: *const size_t,
src_row_pitch: size_t,
src_slice_pitch: size_t,
dst_row_pitch: size_t,
dst_slice_pitch: size_t,
event_wait_list: &[cl_event],
) -> Result<Event> {
unsafe {
let event = enqueue_copy_buffer_rect(
self.queue,
src_buffer.get(),
dst_buffer.get_mut(),
src_origin,
dst_origin,
region,
src_row_pitch,
src_slice_pitch,
dst_row_pitch,
dst_slice_pitch,
event_wait_list.len() as cl_uint,
if !event_wait_list.is_empty() {
event_wait_list.as_ptr()
} else {
ptr::null()
},
)?;
Ok(Event::new(event))
}
}
pub unsafe fn enqueue_read_image(
&self,
image: &Image,
blocking_read: cl_bool,
origin: *const size_t,
region: *const size_t,
row_pitch: size_t,
slice_pitch: size_t,
ptr: *mut c_void,
event_wait_list: &[cl_event],
) -> Result<Event> {
unsafe {
let event = enqueue_read_image(
self.queue,
image.get(),
blocking_read,
origin,
region,
row_pitch,
slice_pitch,
ptr,
event_wait_list.len() as cl_uint,
if !event_wait_list.is_empty() {
event_wait_list.as_ptr()
} else {
ptr::null()
},
)?;
Ok(Event::new(event))
}
}
pub unsafe fn enqueue_write_image(
&self,
image: &mut Image,
blocking_write: cl_bool,
origin: *const size_t,
region: *const size_t,
row_pitch: size_t,
slice_pitch: size_t,
ptr: *mut c_void,
event_wait_list: &[cl_event],
) -> Result<Event> {
unsafe {
let event = enqueue_write_image(
self.queue,
image.get_mut(),
blocking_write,
origin,
region,
row_pitch,
slice_pitch,
ptr,
event_wait_list.len() as cl_uint,
if !event_wait_list.is_empty() {
event_wait_list.as_ptr()
} else {
ptr::null()
},
)?;
Ok(Event::new(event))
}
}
#[cfg(any(feature = "CL_VERSION_1_2", feature = "dynamic"))]
pub unsafe fn enqueue_fill_image(
&self,
image: &mut Image,
fill_color: *const c_void,
origin: *const size_t,
region: *const size_t,
event_wait_list: &[cl_event],
) -> Result<Event> {
unsafe {
let event = enqueue_fill_image(
self.queue,
image.get_mut(),
fill_color,
origin,
region,
event_wait_list.len() as cl_uint,
if !event_wait_list.is_empty() {
event_wait_list.as_ptr()
} else {
ptr::null()
},
)?;
Ok(Event::new(event))
}
}
pub unsafe fn enqueue_copy_image(
&self,
src_image: &Image,
dst_image: &mut Image,
src_origin: *const size_t,
dst_origin: *const size_t,
region: *const size_t,
event_wait_list: &[cl_event],
) -> Result<Event> {
unsafe {
let event = enqueue_copy_image(
self.queue,
src_image.get(),
dst_image.get_mut(),
src_origin,
dst_origin,
region,
event_wait_list.len() as cl_uint,
if !event_wait_list.is_empty() {
event_wait_list.as_ptr()
} else {
ptr::null()
},
)?;
Ok(Event::new(event))
}
}
pub unsafe fn enqueue_copy_image_to_buffer<T>(
&self,
src_image: &Image,
dst_buffer: &mut Buffer<T>,
src_origin: *const size_t,
region: *const size_t,
dst_offset: size_t,
event_wait_list: &[cl_event],
) -> Result<Event> {
unsafe {
let event = enqueue_copy_image_to_buffer(
self.queue,
src_image.get(),
dst_buffer.get_mut(),
src_origin,
region,
dst_offset,
event_wait_list.len() as cl_uint,
if !event_wait_list.is_empty() {
event_wait_list.as_ptr()
} else {
ptr::null()
},
)?;
Ok(Event::new(event))
}
}
pub unsafe fn enqueue_copy_buffer_to_image<T>(
&self,
src_buffer: &Buffer<T>,
dst_image: &mut Image,
src_offset: size_t,
dst_origin: *const size_t,
region: *const size_t,
event_wait_list: &[cl_event],
) -> Result<Event> {
unsafe {
let event = enqueue_copy_buffer_to_image(
self.queue,
src_buffer.get(),
dst_image.get_mut(),
src_offset,
dst_origin,
region,
event_wait_list.len() as cl_uint,
if !event_wait_list.is_empty() {
event_wait_list.as_ptr()
} else {
ptr::null()
},
)?;
Ok(Event::new(event))
}
}
pub unsafe fn enqueue_map_buffer<T>(
&self,
buffer: &Buffer<T>,
blocking_map: cl_bool,
map_flags: cl_map_flags,
offset: size_t,
size: size_t,
buffer_ptr: &mut cl_mem,
event_wait_list: &[cl_event],
) -> Result<Event> {
unsafe {
let event = enqueue_map_buffer(
self.queue,
buffer.get(),
blocking_map,
map_flags,
offset,
size,
buffer_ptr,
event_wait_list.len() as cl_uint,
if !event_wait_list.is_empty() {
event_wait_list.as_ptr()
} else {
ptr::null()
},
)?;
Ok(Event::new(event))
}
}
pub unsafe fn enqueue_map_image(
&self,
image: &Image,
blocking_map: cl_bool,
map_flags: cl_map_flags,
origin: *const size_t,
region: *const size_t,
image_row_pitch: *mut size_t,
image_slice_pitch: *mut size_t,
image_ptr: &mut cl_mem,
event_wait_list: &[cl_event],
) -> Result<Event> {
unsafe {
let event = enqueue_map_image(
self.queue,
image.get(),
blocking_map,
map_flags,
origin,
region,
image_row_pitch,
image_slice_pitch,
image_ptr,
event_wait_list.len() as cl_uint,
if !event_wait_list.is_empty() {
event_wait_list.as_ptr()
} else {
ptr::null()
},
)?;
Ok(Event::new(event))
}
}
pub unsafe fn enqueue_unmap_mem_object(
&self,
memobj: cl_mem,
mapped_ptr: *mut c_void,
event_wait_list: &[cl_event],
) -> Result<Event> {
unsafe {
let event = enqueue_unmap_mem_object(
self.queue,
memobj,
mapped_ptr,
event_wait_list.len() as cl_uint,
if !event_wait_list.is_empty() {
event_wait_list.as_ptr()
} else {
ptr::null()
},
)?;
Ok(Event::new(event))
}
}
#[cfg(any(feature = "CL_VERSION_1_2", feature = "dynamic"))]
pub unsafe fn enqueue_migrate_mem_object(
&self,
num_mem_objects: cl_uint,
mem_objects: *const cl_mem,
flags: cl_mem_migration_flags,
event_wait_list: &[cl_event],
) -> Result<Event> {
unsafe {
let event = enqueue_migrate_mem_object(
self.queue,
num_mem_objects,
mem_objects,
flags,
event_wait_list.len() as cl_uint,
if !event_wait_list.is_empty() {
event_wait_list.as_ptr()
} else {
ptr::null()
},
)?;
Ok(Event::new(event))
}
}
#[cfg(any(feature = "cl_ext_migrate_memobject", feature = "dynamic"))]
pub unsafe fn enqueue_migrate_mem_object_ext(
&self,
num_mem_objects: cl_uint,
mem_objects: *const cl_mem,
flags: ext::cl_mem_migration_flags_ext,
event_wait_list: &[cl_event],
) -> Result<Event> {
unsafe {
let event = ext::enqueue_migrate_mem_object_ext(
self.queue,
num_mem_objects,
mem_objects,
flags,
event_wait_list.len() as cl_uint,
if !event_wait_list.is_empty() {
event_wait_list.as_ptr()
} else {
ptr::null()
},
)?;
Ok(Event::new(event))
}
}
pub unsafe fn enqueue_nd_range_kernel(
&self,
kernel: cl_kernel,
work_dim: cl_uint,
global_work_offsets: *const size_t,
global_work_sizes: *const size_t,
local_work_sizes: *const size_t,
event_wait_list: &[cl_event],
) -> Result<Event> {
unsafe {
let event = enqueue_nd_range_kernel(
self.queue,
kernel,
work_dim,
global_work_offsets,
global_work_sizes,
local_work_sizes,
event_wait_list.len() as cl_uint,
if !event_wait_list.is_empty() {
event_wait_list.as_ptr()
} else {
ptr::null()
},
)?;
Ok(Event::new(event))
}
}
#[cfg(any(feature = "CL_VERSION_1_2", feature = "dynamic"))]
#[cfg_attr(
any(
feature = "CL_VERSION_2_0",
feature = "CL_VERSION_2_1",
feature = "CL_VERSION_2_2",
feature = "CL_VERSION_3_0"
),
deprecated(
since = "0.1.0",
note = "From CL_VERSION_2_0 use enqueue_nd_range_kernel"
)
)]
pub unsafe fn enqueue_task(
&self,
kernel: cl_kernel,
event_wait_list: &[cl_event],
) -> Result<Event> {
unsafe {
let event = enqueue_task(
self.queue,
kernel,
event_wait_list.len() as cl_uint,
if !event_wait_list.is_empty() {
event_wait_list.as_ptr()
} else {
ptr::null()
},
)?;
Ok(Event::new(event))
}
}
#[allow(clippy::as_ptr_cast_mut)]
pub unsafe fn enqueue_native_kernel(
&self,
user_func: Option<unsafe extern "C" fn(*mut c_void)>,
args: &[*mut c_void],
mem_list: &[cl_mem],
args_mem_loc: &[*const c_void],
event_wait_list: &[cl_event],
) -> Result<Event> {
unsafe {
let event = enqueue_native_kernel(
self.queue,
user_func,
args.as_ptr() as *mut c_void,
args.len() as size_t,
mem_list.len() as cl_uint,
if !mem_list.is_empty() {
mem_list.as_ptr()
} else {
ptr::null()
},
args_mem_loc.as_ptr(),
event_wait_list.len() as cl_uint,
if !event_wait_list.is_empty() {
event_wait_list.as_ptr()
} else {
ptr::null()
},
)?;
Ok(Event::new(event))
}
}
#[cfg(any(feature = "CL_VERSION_1_2", feature = "dynamic"))]
pub unsafe fn enqueue_marker_with_wait_list(
&self,
event_wait_list: &[cl_event],
) -> Result<Event> {
unsafe {
let event = enqueue_marker_with_wait_list(
self.queue,
event_wait_list.len() as cl_uint,
if !event_wait_list.is_empty() {
event_wait_list.as_ptr()
} else {
ptr::null()
},
)?;
Ok(Event::new(event))
}
}
#[cfg(any(feature = "CL_VERSION_1_2", feature = "dynamic"))]
pub unsafe fn enqueue_barrier_with_wait_list(
&self,
event_wait_list: &[cl_event],
) -> Result<Event> {
unsafe {
let event = enqueue_barrier_with_wait_list(
self.queue,
event_wait_list.len() as cl_uint,
if !event_wait_list.is_empty() {
event_wait_list.as_ptr()
} else {
ptr::null()
},
)?;
Ok(Event::new(event))
}
}
#[cfg(any(feature = "CL_VERSION_2_0", feature = "dynamic"))]
pub unsafe fn enqueue_svm_free(
&self,
svm_pointers: &[*const c_void],
pfn_free_func: Option<
unsafe extern "C" fn(
queue: cl_command_queue,
num_svm_pointers: cl_uint,
svm_pointers: *mut *mut c_void,
user_data: *mut c_void,
),
>,
user_data: *mut c_void,
event_wait_list: &[cl_event],
) -> Result<Event> {
unsafe {
let event = enqueue_svm_free(
self.queue,
svm_pointers.len() as cl_uint,
svm_pointers.as_ptr(),
pfn_free_func,
user_data,
event_wait_list.len() as cl_uint,
if !event_wait_list.is_empty() {
event_wait_list.as_ptr()
} else {
ptr::null()
},
)?;
Ok(Event::new(event))
}
}
#[cfg(any(feature = "CL_VERSION_2_0", feature = "dynamic"))]
pub unsafe fn enqueue_svm_mem_cpy(
&self,
blocking_copy: cl_bool,
dst_ptr: *mut c_void,
src_ptr: *const c_void,
size: size_t,
event_wait_list: &[cl_event],
) -> Result<Event> {
unsafe {
let event = enqueue_svm_mem_cpy(
self.queue,
blocking_copy,
dst_ptr,
src_ptr,
size,
event_wait_list.len() as cl_uint,
if !event_wait_list.is_empty() {
event_wait_list.as_ptr()
} else {
ptr::null()
},
)?;
Ok(Event::new(event))
}
}
#[cfg(any(feature = "CL_VERSION_2_0", feature = "dynamic"))]
pub unsafe fn enqueue_svm_mem_fill<T>(
&self,
svm_ptr: *mut c_void,
pattern: &[T],
size: size_t,
event_wait_list: &[cl_event],
) -> Result<Event> {
unsafe {
let event = enqueue_svm_mem_fill(
self.queue,
svm_ptr,
pattern.as_ptr() as *const c_void,
mem::size_of_val(pattern),
size,
event_wait_list.len() as cl_uint,
if !event_wait_list.is_empty() {
event_wait_list.as_ptr()
} else {
ptr::null()
},
)?;
Ok(Event::new(event))
}
}
#[cfg(any(feature = "CL_VERSION_2_0", feature = "dynamic"))]
pub unsafe fn enqueue_svm_map<T>(
&self,
blocking_map: cl_bool,
flags: cl_map_flags,
svm: &mut [T],
event_wait_list: &[cl_event],
) -> Result<Event> {
unsafe {
let event = enqueue_svm_map(
self.queue,
blocking_map,
flags,
svm.as_mut_ptr() as *mut c_void,
mem::size_of_val(svm),
event_wait_list.len() as cl_uint,
if !event_wait_list.is_empty() {
event_wait_list.as_ptr()
} else {
ptr::null()
},
)?;
Ok(Event::new(event))
}
}
#[cfg(any(feature = "CL_VERSION_2_0", feature = "dynamic"))]
#[allow(clippy::as_ptr_cast_mut)]
pub unsafe fn enqueue_svm_unmap<T>(
&self,
svm: &[T],
event_wait_list: &[cl_event],
) -> Result<Event> {
unsafe {
let event = enqueue_svm_unmap(
self.queue,
svm.as_ptr() as *mut c_void,
event_wait_list.len() as cl_uint,
if !event_wait_list.is_empty() {
event_wait_list.as_ptr()
} else {
| rust | Apache-2.0 | 27c55789c114765e8f857c8c691c863166e0f6d2 | 2026-01-04T20:24:28.844285Z | true |
kenba/opencl3 | https://github.com/kenba/opencl3/blob/27c55789c114765e8f857c8c691c863166e0f6d2/src/lib.rs | src/lib.rs | // Copyright (c) 2020-2021 Via Technology Ltd. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! [](https://crates.io/crates/opencl3)
//! [](https://docs.rs/opencl3/)
//! [](https://www.khronos.org/registry/OpenCL/)
//! [](https://opensource.org/licenses/Apache-2.0)
//!
//! A Rust implementation of the Khronos [OpenCL](https://www.khronos.org/registry/OpenCL/)
//! API.
//!
//! # Description
//!
//! This crate provides a relatively simple, object based model of the OpenCL 3.0
//! [API](https://www.khronos.org/registry/OpenCL/specs/3.0-unified/html/OpenCL_API.html).
//! It is built upon the [cl3](https://crates.io/crates/cl3) crate, which
//! provides a functional interface to the OpenCL API.
//!
//! **OpenCL** (Open Computing Language) is framework for general purpose
//! parallel programming across heterogeneous devices including: CPUs, GPUs,
//! DSPs, FPGAs and other processors or hardware accelerators.
//!
//! It is often considered as an open-source alternative to Nvidia's proprietary
//! Compute Unified Device Architecture [CUDA](https://developer.nvidia.com/cuda-zone)
//! for performing General-purpose computing on GPUs, see
//! [GPGPU](https://en.wikipedia.org/wiki/General-purpose_computing_on_graphics_processing_units).
//!
//! The [OpenCL Specification](https://www.khronos.org/registry/OpenCL/specs/3.0-unified/html/OpenCL_API.html#_the_opencl_architecture)
//! has evolved over time and not all device vendors support all OpenCL features.
//!
//! [OpenCL 3.0](https://www.khronos.org/registry/OpenCL/specs/3.0-unified/html/OpenCL_API.html)
//! is a unified specification that adds little new functionality to previous OpenCL versions.
//! It specifies that all **OpenCL 1.2** features are **mandatory**, while all
//! OpenCL 2.x and OpenCL 3.0 features are now optional.
//!
//! See [OpenCL Description](https://github.com/kenba/opencl3/blob/main/docs/opencl_description.md).
//!
//! # OpenCL Architecture
//!
//! The [OpenCL Specification](https://www.khronos.org/registry/OpenCL/specs/3.0-unified/html/OpenCL_API.html#_the_opencl_architecture)
//! considers OpenCL as four models:
//!
//! * **Platform Model**
//! The physical OpenCL hardware: a *host* containing one or more OpenCL [platform]s,
//! each connected to one or more OpenCL [device]s.
//! An OpenCL application running on the *host*, creates an OpenCL environment
//! called a [context] on a single [platform] to process data on one or more
//! of the OpenCL [device]s connected to the [platform].
//!
//! * **Programming Model**
//! An OpenCL [program] consists of OpenCL [kernel] functions that can run
//! on OpenCL [device]s within a [context].
//! OpenCL [program]s must be created (and most must be built) for a [context]
//! before their OpenCL [kernel] functions can be created from them,
//! the exception being "built-in" [kernel]s which don't need to be built
//! (or compiled and linked).
//! OpenCL [kernel]s are controlled by an OpenCL application that runs on the
//! *host*, see **Execution Model**.
//!
//! * **Memory Model**
//! **OpenCL 1.2** memory is divided into two fundamental memory regions:
//! **host memory** and **device memory**.
//! OpenCL [kernel]s run on **device memory**; an OpenCL application must write
//! **host memory** to **device memory** for OpenCL [kernel]s to process.
//! An OpenCL application must also read results from **device memory** to
//! **host memory** after a [kernel] has completed execution.
//! **OpenCL 2.0** shared virtual memory ([svm]) is shared between the host
//! and device(s) and synchronised by OpenCL; eliminating the explicit transfer
//! of memory between host and device(s) memory regions.
//!
//! * **Execution Model**
//! An OpenCL application creates at least one OpenCL [command_queue] for each
//! OpenCL [device] (or *sub-device*) within it's OpenCL [context].
//! OpenCL [kernel] executions and **OpenCL 1.2** memory reads and writes are
//! "enqueued" by the OpenCL application on each [command_queue].
//! An application can wait for all "enqueued" commands to finish on a
//! [command_queue] or it can wait for specific [event]s to complete.
//! Normally [command_queue]s run commands in the order that they are given.
//! However, [event]s can be used to execute [kernel]s out-of-order.
//!
//! # OpenCL Objects
//!
//! [Platform]: platform/struct.Platform.html
//! [Device]: device/struct.Device.html
//! [SubDevice]: device/struct.SubDevice.html
//! [Context]: context/struct.Context.html
//! [Program]: program/struct.Program.html
//! [Kernel]: kernel/struct.Kernel.html
//! [Buffer]: memory/struct.Buffer.html
//! [Image]: memory/struct.Image.html
//! [Sampler]: memory/struct.Sampler.html
//! [SvmVec]: svm/struct.SvmVec.html
//! [Pipe]: memory/struct.Pipe.html
//! [CommandQueue]: command_queue/struct.CommandQueue.html
//! [Event]: event/struct.Event.html
//!
//! ## Platform Model
//!
//! The platform model has thee objects:
//! * [Platform]
//! * [Device]
//! * [Context]
//!
//! Of these three objects, the OpenCL [Context] is by *far* the most important.
//! Each application must create a [Context] from the most appropriate [Device]s
//! available on one of [Platform]s on the *host* system that the application
//! is running on.
//!
//! Most example OpenCL applications just choose the first available [Platform]
//! and [Device] for their [Context]. However, since many systems have multiple
//! platforms and devices, the first [Platform] and [Device] are unlikely to
//! provide the best performance.
//! For example, on a system with an APU (combined CPU and GPU, e.g. Intel i7)
//! and a discrete graphics card (e.g. Nvidia GTX 1070) OpenCL may find the
//! either the integrated GPU or the GPU on the graphics card first.
//!
//! OpenCL applications often require the performance of discrete graphics cards
//! or specific OpenCL features, such as [svm] or double/half floating point
//! precision. In such cases, it is necessary to query the [Platform]s and
//! [Device]s to choose the most appropriate [Device]s for the application before
//! creating the [Context].
//!
//! The [Platform] and [Device] modules contain structures and methods to simplify
//! querying the host system [Platform]s and [Device]s to create a [Context].
//!
//! ## Programming Model
//!
//! The OpenCL programming model has two objects:
//! * [Program]
//! * [Kernel]
//!
//! OpenCL [Kernel] functions are contained in OpenCL [Program]s.
//!
//! Kernels are usually defined as functions in OpenCL [Program] source code,
//! however OpenCL [Device]s may contain built-in [Kernel]s,
//! e.g.: some Intel GPUs have built-in motion estimation kernels.
//!
//! OpenCL [Program] objects can be created from OpenCL source code,
//! built-in kernels, binaries and intermediate language binaries.
//! Depending upon how an OpenCL [Program] object was created, it may need to
//! be built (or complied and linked) before the [Kernel]s in them can be
//! created.
//!
//! All the [Kernel]s in an [Program] can be created together or they can be
//! created individually, by name.
//!
//! ## Memory Model
//!
//! The OpenCL memory model consists of five objects:
//! * [Buffer]
//! * [Image]
//! * [Sampler]
//! * [SvmVec]
//! * [Pipe]
//!
//! [Buffer], [Image] and [Sampler] are OpenCL 1.2 (i.e. **mandatory**) objects,
//! [svm] and [Pipe] are are OpenCL 2.0 (i.e. optional) objects.
//!
//! A [Buffer] is a contiguous block of memory used for general purpose data.
//! An [Image] holds data for one, two or three dimensional images.
//! A [Sampler] describes how a [Kernel] is to sample an [Image], see
//! [Sampler objects](https://www.khronos.org/registry/OpenCL/specs/3.0-unified/html/OpenCL_API.html#_sampler_objects).
//!
//! [Shared Virtual Memory](https://www.khronos.org/registry/OpenCL/specs/3.0-unified/html/OpenCL_API.html#shared-virtual-memory)
//! enables the host and kernels executing on devices to directly share data
//! without explicitly transferring it.
//!
//! [Pipes](https://www.khronos.org/registry/OpenCL/specs/3.0-unified/html/OpenCL_API.html#_pipes)
//! store memory as FIFOs between [Kernel]s. [Pipe]s are not accessible from the host.
//!
//! ## Execution Model
//!
//! The OpenCL execution model has two objects:
//! * [CommandQueue]
//! * [Event]
//!
//! OpenCL commands to transfer memory and execute kernels on devices are
//! performed via [CommandQueue]s.
//!
//! Each OpenCL device (and sub-device) must have at least one command_queue
//! associated with it, so that commands may be enqueued on to the device.
//!
//! There are several OpenCL [CommandQueue] "enqueue_" methods to transfer
//! data between host and device memory, map SVM memory and execute kernels.
//! All the "enqueue_" methods accept an event_wait_list parameter and return
//! an [Event] that can be used to monitor and control *out-of-order* execution
//! of kernels on a [CommandQueue], see
//! [Event Objects](https://www.khronos.org/registry/OpenCL/specs/3.0-unified/html/OpenCL_API.html#event-objects).
extern crate cl3;
#[cfg(any(feature = "cl_khr_command_buffer", feature = "dynamic"))]
pub mod command_buffer;
pub mod command_queue;
pub mod context;
pub mod device;
pub mod event;
pub mod kernel;
pub mod memory;
pub mod platform;
pub mod program;
#[cfg(any(feature = "CL_VERSION_2_0", feature = "dynamic"))]
pub mod svm;
pub mod error_codes {
pub use cl3::error_codes::*;
}
pub mod types {
pub use cl3::types::*;
}
use std::result;
/// Custom Result type to output OpenCL error text.
pub type Result<T> = result::Result<T, error_codes::ClError>;
| rust | Apache-2.0 | 27c55789c114765e8f857c8c691c863166e0f6d2 | 2026-01-04T20:24:28.844285Z | false |
kenba/opencl3 | https://github.com/kenba/opencl3/blob/27c55789c114765e8f857c8c691c863166e0f6d2/src/device.rs | src/device.rs | // Copyright (c) 2020-2024 Via Technology Ltd.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
pub use cl3::device::*;
pub use cl3::ext::cl_device_feature_capabilities_intel;
use super::Result;
use super::platform::get_platforms;
#[allow(unused_imports)]
use cl3::ext;
#[allow(unused_imports)]
use libc::{intptr_t, size_t};
/// Get the ids of all available devices of the given type.
pub fn get_all_devices(device_type: cl_device_type) -> Result<Vec<cl_device_id>> {
let mut device_ids = Vec::<cl_device_id>::new();
let platforms = get_platforms()?;
for platform in platforms {
let mut devices = platform.get_devices(device_type)?;
device_ids.append(&mut devices);
}
Ok(device_ids)
}
#[cfg(any(feature = "CL_VERSION_1_2", feature = "dynamic"))]
#[derive(Debug)]
pub struct SubDevice {
id: cl_device_id,
}
#[cfg(any(feature = "CL_VERSION_1_2", feature = "dynamic"))]
impl From<cl_device_id> for SubDevice {
fn from(id: cl_device_id) -> Self {
Self { id }
}
}
#[cfg(any(feature = "CL_VERSION_1_2", feature = "dynamic"))]
impl From<SubDevice> for cl_device_id {
fn from(value: SubDevice) -> Self {
value.id
}
}
#[cfg(any(feature = "CL_VERSION_1_2", feature = "dynamic"))]
impl Drop for SubDevice {
fn drop(&mut self) {
unsafe { release_device(self.id()).expect("Error: clReleaseDevice") };
}
}
#[cfg(any(feature = "CL_VERSION_1_2", feature = "dynamic"))]
unsafe impl Send for SubDevice {}
#[cfg(any(feature = "CL_VERSION_1_2", feature = "dynamic"))]
unsafe impl Sync for SubDevice {}
#[cfg(any(feature = "CL_VERSION_1_2", feature = "dynamic"))]
impl SubDevice {
pub const fn new(id: cl_device_id) -> Self {
Self { id }
}
/// Accessor for the underlying device id.
pub const fn id(&self) -> cl_device_id {
self.id
}
}
/// An OpenCL device id and methods to query it.
///
/// The query methods calls clGetDeviceInfo with the relevant param_name, see:
/// [Device Queries](https://www.khronos.org/registry/OpenCL/specs/3.0-unified/html/OpenCL_API.html#device-queries-table).
#[derive(Copy, Clone, Debug)]
pub struct Device {
id: intptr_t,
}
impl From<cl_device_id> for Device {
fn from(value: cl_device_id) -> Self {
Self {
id: value as intptr_t,
}
}
}
impl From<Device> for cl_device_id {
fn from(value: Device) -> Self {
value.id as Self
}
}
unsafe impl Send for Device {}
unsafe impl Sync for Device {}
impl Device {
pub fn new(id: cl_device_id) -> Self {
Self { id: id as intptr_t }
}
/// Accessor for the underlying device id.
pub const fn id(&self) -> cl_device_id {
self.id as cl_device_id
}
/// Create sub-devices by partitioning an OpenCL device.
///
/// * `properties` - the slice of cl_device_partition_property, see
/// [Subdevice Partition](https://www.khronos.org/registry/OpenCL/specs/3.0-unified/html/OpenCL_API.html#subdevice-partition-table).
///
/// returns a Result containing a vector of available SubDevices
/// or the error code from the OpenCL C API function.
#[cfg(any(feature = "CL_VERSION_1_2", feature = "dynamic"))]
pub fn create_sub_devices(
&self,
properties: &[cl_device_partition_property],
) -> Result<Vec<SubDevice>> {
let sub_device_ids = create_sub_devices(self.id(), properties)?;
Ok(sub_device_ids
.iter()
.map(|id| SubDevice::new(*id))
.collect::<Vec<SubDevice>>())
}
#[cfg(any(feature = "CL_VERSION_2_1", feature = "dynamic"))]
#[inline]
pub fn get_device_and_host_timer(&self) -> Result<[cl_ulong; 2]> {
Ok(get_device_and_host_timer(self.id())?)
}
#[cfg(any(feature = "CL_VERSION_2_1", feature = "dynamic"))]
#[inline]
pub fn get_host_timer(&self) -> Result<cl_ulong> {
Ok(get_host_timer(self.id())?)
}
/// The OpenCL device type, see
/// [Device Types](https://www.khronos.org/registry/OpenCL/specs/3.0-unified/html/OpenCL_API.html#device-types-table).
pub fn dev_type(&self) -> Result<cl_device_type> {
Ok(get_device_info(self.id(), CL_DEVICE_TYPE)?.into())
}
/// A unique device vendor identifier: a [PCI vendor ID](https://www.pcilookup.com/)
/// or a Khronos vendor ID if the vendor does not have a PCI vendor ID.
pub fn vendor_id(&self) -> Result<cl_uint> {
Ok(get_device_info(self.id(), CL_DEVICE_VENDOR_ID)?.into())
}
/// The number of parallel compute units on the device, minimum 1.
pub fn max_compute_units(&self) -> Result<cl_uint> {
Ok(get_device_info(self.id(), CL_DEVICE_MAX_COMPUTE_UNITS)?.into())
}
/// Maximum dimensions for global and local work-item IDs, minimum 3
/// if device is not CL_DEVICE_TYPE_CUSTOM.
pub fn max_work_item_dimensions(&self) -> Result<cl_uint> {
Ok(get_device_info(self.id(), CL_DEVICE_MAX_WORK_ITEM_DIMENSIONS)?.into())
}
/// Maximum number of work-items for each dimension of a work-group,
/// minimum [1, 1, 1] if device is not CL_DEVICE_TYPE_CUSTOM.
pub fn max_work_group_size(&self) -> Result<size_t> {
Ok(get_device_info(self.id(), CL_DEVICE_MAX_WORK_GROUP_SIZE)?.into())
}
pub fn max_work_item_sizes(&self) -> Result<Vec<size_t>> {
Ok(get_device_info(self.id(), CL_DEVICE_MAX_WORK_ITEM_SIZES)?.into())
}
pub fn max_preferred_vector_width_char(&self) -> Result<cl_uint> {
Ok(get_device_info(self.id(), CL_DEVICE_PREFERRED_VECTOR_WIDTH_CHAR)?.into())
}
pub fn max_preferred_vector_width_short(&self) -> Result<cl_uint> {
Ok(get_device_info(self.id(), CL_DEVICE_PREFERRED_VECTOR_WIDTH_SHORT)?.into())
}
pub fn max_preferred_vector_width_int(&self) -> Result<cl_uint> {
Ok(get_device_info(self.id(), CL_DEVICE_PREFERRED_VECTOR_WIDTH_INT)?.into())
}
pub fn max_preferred_vector_width_long(&self) -> Result<cl_uint> {
Ok(get_device_info(self.id(), CL_DEVICE_PREFERRED_VECTOR_WIDTH_LONG)?.into())
}
pub fn max_preferred_vector_width_float(&self) -> Result<cl_uint> {
Ok(get_device_info(self.id(), CL_DEVICE_PREFERRED_VECTOR_WIDTH_FLOAT)?.into())
}
pub fn max_preferred_vector_width_double(&self) -> Result<cl_uint> {
Ok(get_device_info(self.id(), CL_DEVICE_PREFERRED_VECTOR_WIDTH_DOUBLE)?.into())
}
pub fn max_clock_frequency(&self) -> Result<cl_uint> {
Ok(get_device_info(self.id(), CL_DEVICE_MAX_CLOCK_FREQUENCY)?.into())
}
pub fn address_bits(&self) -> Result<cl_uint> {
Ok(get_device_info(self.id(), CL_DEVICE_ADDRESS_BITS)?.into())
}
pub fn max_read_image_args(&self) -> Result<cl_uint> {
Ok(get_device_info(self.id(), CL_DEVICE_MAX_READ_IMAGE_ARGS)?.into())
}
pub fn max_write_image_args(&self) -> Result<cl_uint> {
Ok(get_device_info(self.id(), CL_DEVICE_MAX_WRITE_IMAGE_ARGS)?.into())
}
pub fn max_mem_alloc_size(&self) -> Result<cl_ulong> {
Ok(get_device_info(self.id(), CL_DEVICE_MAX_MEM_ALLOC_SIZE)?.into())
}
pub fn image2d_max_width(&self) -> Result<size_t> {
Ok(get_device_info(self.id(), CL_DEVICE_IMAGE2D_MAX_WIDTH)?.into())
}
pub fn image2d_max_height(&self) -> Result<size_t> {
Ok(get_device_info(self.id(), CL_DEVICE_IMAGE2D_MAX_HEIGHT)?.into())
}
pub fn image3d_max_width(&self) -> Result<size_t> {
Ok(get_device_info(self.id(), CL_DEVICE_IMAGE3D_MAX_WIDTH)?.into())
}
pub fn image3d_max_height(&self) -> Result<size_t> {
Ok(get_device_info(self.id(), CL_DEVICE_IMAGE3D_MAX_HEIGHT)?.into())
}
pub fn image3d_max_depth(&self) -> Result<size_t> {
Ok(get_device_info(self.id(), CL_DEVICE_IMAGE3D_MAX_DEPTH)?.into())
}
pub fn image_support(&self) -> Result<bool> {
Ok(cl_uint::from(get_device_info(self.id(), CL_DEVICE_IMAGE_SUPPORT)?) != CL_FALSE)
}
pub fn max_parameter_size(&self) -> Result<size_t> {
Ok(get_device_info(self.id(), CL_DEVICE_MAX_PARAMETER_SIZE)?.into())
}
pub fn max_device_samples(&self) -> Result<cl_uint> {
Ok(get_device_info(self.id(), CL_DEVICE_MAX_SAMPLERS)?.into())
}
pub fn mem_base_addr_align(&self) -> Result<cl_uint> {
Ok(get_device_info(self.id(), CL_DEVICE_MEM_BASE_ADDR_ALIGN)?.into())
}
pub fn min_data_type_align_size(&self) -> Result<cl_uint> {
Ok(get_device_info(self.id(), CL_DEVICE_MIN_DATA_TYPE_ALIGN_SIZE)?.into())
}
pub fn single_fp_config(&self) -> Result<cl_ulong> {
Ok(get_device_info(self.id(), CL_DEVICE_SINGLE_FP_CONFIG)?.into())
}
pub fn global_mem_cache_type(&self) -> Result<cl_uint> {
Ok(get_device_info(self.id(), CL_DEVICE_GLOBAL_MEM_CACHE_TYPE)?.into())
}
pub fn global_mem_cacheline_size(&self) -> Result<cl_uint> {
Ok(get_device_info(self.id(), CL_DEVICE_GLOBAL_MEM_CACHELINE_SIZE)?.into())
}
pub fn global_mem_cache_size(&self) -> Result<cl_ulong> {
Ok(get_device_info(self.id(), CL_DEVICE_GLOBAL_MEM_CACHE_SIZE)?.into())
}
pub fn global_mem_size(&self) -> Result<cl_ulong> {
Ok(get_device_info(self.id(), CL_DEVICE_GLOBAL_MEM_SIZE)?.into())
}
pub fn max_constant_buffer_size(&self) -> Result<cl_ulong> {
Ok(get_device_info(self.id(), CL_DEVICE_MAX_CONSTANT_BUFFER_SIZE)?.into())
}
pub fn max_constant_args(&self) -> Result<cl_uint> {
Ok(get_device_info(self.id(), CL_DEVICE_MAX_CONSTANT_ARGS)?.into())
}
pub fn local_mem_type(&self) -> Result<cl_uint> {
Ok(get_device_info(self.id(), CL_DEVICE_LOCAL_MEM_TYPE)?.into())
}
pub fn local_mem_size(&self) -> Result<cl_ulong> {
Ok(get_device_info(self.id(), CL_DEVICE_LOCAL_MEM_SIZE)?.into())
}
pub fn error_correction_support(&self) -> Result<bool> {
Ok(cl_uint::from(get_device_info(
self.id(),
CL_DEVICE_ERROR_CORRECTION_SUPPORT,
)?) != CL_FALSE)
}
pub fn profiling_timer_resolution(&self) -> Result<size_t> {
Ok(get_device_info(self.id(), CL_DEVICE_PROFILING_TIMER_RESOLUTION)?.into())
}
pub fn endian_little(&self) -> Result<bool> {
Ok(cl_uint::from(get_device_info(self.id(), CL_DEVICE_ENDIAN_LITTLE)?) != CL_FALSE)
}
pub fn available(&self) -> Result<bool> {
Ok(cl_uint::from(get_device_info(self.id(), CL_DEVICE_AVAILABLE)?) != CL_FALSE)
}
pub fn compiler_available(&self) -> Result<bool> {
Ok(cl_uint::from(get_device_info(self.id(), CL_DEVICE_COMPILER_AVAILABLE)?) != CL_FALSE)
}
pub fn execution_capabilities(&self) -> Result<cl_ulong> {
Ok(get_device_info(self.id(), CL_DEVICE_EXECUTION_CAPABILITIES)?.into())
}
pub fn queue_on_host_properties(&self) -> Result<cl_ulong> {
Ok(get_device_info(self.id(), CL_DEVICE_QUEUE_ON_HOST_PROPERTIES)?.into())
}
pub fn name(&self) -> Result<String> {
Ok(get_device_info(self.id(), CL_DEVICE_NAME)?.into())
}
pub fn vendor(&self) -> Result<String> {
Ok(get_device_info(self.id(), CL_DEVICE_VENDOR)?.into())
}
pub fn driver_version(&self) -> Result<String> {
Ok(get_device_info(self.id(), CL_DRIVER_VERSION)?.into())
}
pub fn profile(&self) -> Result<String> {
Ok(get_device_info(self.id(), CL_DEVICE_PROFILE)?.into())
}
pub fn version(&self) -> Result<String> {
Ok(get_device_info(self.id(), CL_DEVICE_VERSION)?.into())
}
pub fn extensions(&self) -> Result<String> {
Ok(get_device_info(self.id(), CL_DEVICE_EXTENSIONS)?.into())
}
pub fn platform(&self) -> Result<cl_platform_id> {
Ok(intptr_t::from(get_device_info(self.id(), CL_DEVICE_PLATFORM)?) as cl_platform_id)
}
/// CL_VERSION_1_2
pub fn double_fp_config(&self) -> Result<cl_ulong> {
Ok(get_device_info(self.id(), CL_DEVICE_DOUBLE_FP_CONFIG)?.into())
}
pub fn half_fp_config(&self) -> Result<cl_ulong> {
Ok(get_device_info(self.id(), CL_DEVICE_HALF_FP_CONFIG)?.into())
}
pub fn preferred_vector_width_half(&self) -> Result<cl_uint> {
Ok(get_device_info(self.id(), CL_DEVICE_PREFERRED_VECTOR_WIDTH_HALF)?.into())
}
// DEPRECATED 2.0
pub fn host_unified_memory(&self) -> Result<bool> {
Ok(cl_uint::from(get_device_info(self.id(), CL_DEVICE_HOST_UNIFIED_MEMORY)?) != CL_FALSE)
}
pub fn native_vector_width_char(&self) -> Result<cl_uint> {
Ok(get_device_info(self.id(), CL_DEVICE_NATIVE_VECTOR_WIDTH_CHAR)?.into())
}
pub fn native_vector_width_short(&self) -> Result<cl_uint> {
Ok(get_device_info(self.id(), CL_DEVICE_NATIVE_VECTOR_WIDTH_SHORT)?.into())
}
pub fn native_vector_width_int(&self) -> Result<cl_uint> {
Ok(get_device_info(self.id(), CL_DEVICE_NATIVE_VECTOR_WIDTH_INT)?.into())
}
pub fn native_vector_width_long(&self) -> Result<cl_uint> {
Ok(get_device_info(self.id(), CL_DEVICE_NATIVE_VECTOR_WIDTH_LONG)?.into())
}
pub fn native_vector_width_float(&self) -> Result<cl_uint> {
Ok(get_device_info(self.id(), CL_DEVICE_NATIVE_VECTOR_WIDTH_FLOAT)?.into())
}
pub fn native_vector_width_double(&self) -> Result<cl_uint> {
Ok(get_device_info(self.id(), CL_DEVICE_NATIVE_VECTOR_WIDTH_DOUBLE)?.into())
}
pub fn native_vector_width_half(&self) -> Result<cl_uint> {
Ok(get_device_info(self.id(), CL_DEVICE_NATIVE_VECTOR_WIDTH_HALF)?.into())
}
pub fn opencl_c_version(&self) -> Result<String> {
Ok(get_device_info(self.id(), CL_DEVICE_OPENCL_C_VERSION)?.into())
}
/// CL_VERSION_1_2
pub fn linker_available(&self) -> Result<bool> {
Ok(cl_uint::from(get_device_info(self.id(), CL_DEVICE_LINKER_AVAILABLE)?) != CL_FALSE)
}
/// CL_VERSION_1_2
pub fn built_in_kernels(&self) -> Result<String> {
Ok(get_device_info(self.id(), CL_DEVICE_BUILT_IN_KERNELS)?.into())
}
/// CL_VERSION_1_2
pub fn image_max_buffer_size(&self) -> Result<size_t> {
Ok(get_device_info(self.id(), CL_DEVICE_IMAGE_MAX_BUFFER_SIZE)?.into())
}
/// CL_VERSION_1_2
pub fn image_max_array_size(&self) -> Result<size_t> {
Ok(get_device_info(self.id(), CL_DEVICE_IMAGE_MAX_ARRAY_SIZE)?.into())
}
/// CL_VERSION_1_2
pub fn parent_device(&self) -> Result<cl_device_id> {
Ok(intptr_t::from(get_device_info(self.id(), CL_DEVICE_PARENT_DEVICE)?) as cl_device_id)
}
/// CL_VERSION_1_2
pub fn partition_max_sub_devices(&self) -> Result<cl_uint> {
Ok(get_device_info(self.id(), CL_DEVICE_PARTITION_MAX_SUB_DEVICES)?.into())
}
/// CL_VERSION_1_2
pub fn partition_properties(&self) -> Result<Vec<intptr_t>> {
Ok(get_device_info(self.id(), CL_DEVICE_PARTITION_PROPERTIES)?.into())
}
/// CL_VERSION_1_2
pub fn partition_affinity_domain(&self) -> Result<Vec<cl_ulong>> {
Ok(get_device_info(self.id(), CL_DEVICE_PARTITION_AFFINITY_DOMAIN)?.into())
}
/// CL_VERSION_1_2
pub fn partition_type(&self) -> Result<Vec<intptr_t>> {
Ok(get_device_info(self.id(), CL_DEVICE_PARTITION_TYPE)?.into())
}
/// CL_VERSION_1_2
pub fn reference_count(&self) -> Result<cl_uint> {
Ok(get_device_info(self.id(), CL_DEVICE_REFERENCE_COUNT)?.into())
}
/// CL_VERSION_1_2
pub fn preferred_interop_user_sync(&self) -> Result<bool> {
Ok(cl_uint::from(get_device_info(
self.id(),
CL_DEVICE_PREFERRED_INTEROP_USER_SYNC,
)?) != CL_FALSE)
}
/// CL_VERSION_1_2
pub fn printf_buffer_size(&self) -> Result<size_t> {
Ok(get_device_info(self.id(), CL_DEVICE_PRINTF_BUFFER_SIZE)?.into())
}
/// CL_VERSION_2_0
pub fn image_pitch_alignment(&self) -> Result<cl_uint> {
Ok(get_device_info(self.id(), CL_DEVICE_IMAGE_PITCH_ALIGNMENT)?.into())
}
/// CL_VERSION_2_0
pub fn image_base_address_alignment(&self) -> Result<cl_uint> {
Ok(get_device_info(self.id(), CL_DEVICE_IMAGE_BASE_ADDRESS_ALIGNMENT)?.into())
}
/// CL_VERSION_2_0
pub fn max_read_write_image_args(&self) -> Result<cl_uint> {
Ok(get_device_info(self.id(), CL_DEVICE_MAX_READ_WRITE_IMAGE_ARGS)?.into())
}
/// CL_VERSION_2_0
pub fn max_global_variable_size(&self) -> Result<size_t> {
Ok(get_device_info(self.id(), CL_DEVICE_MAX_GLOBAL_VARIABLE_SIZE)?.into())
}
/// CL_VERSION_2_0
pub fn queue_on_device_properties(&self) -> Result<Vec<intptr_t>> {
Ok(get_device_info(self.id(), CL_DEVICE_QUEUE_ON_DEVICE_PROPERTIES)?.into())
}
/// CL_VERSION_2_0
pub fn queue_on_device_preferred_size(&self) -> Result<size_t> {
Ok(get_device_info(self.id(), CL_DEVICE_QUEUE_ON_DEVICE_PREFERRED_SIZE)?.into())
}
/// CL_VERSION_2_0
pub fn queue_on_device_max_size(&self) -> Result<size_t> {
Ok(get_device_info(self.id(), CL_DEVICE_QUEUE_ON_DEVICE_MAX_SIZE)?.into())
}
/// CL_VERSION_2_0
pub fn max_on_device_queues(&self) -> Result<cl_uint> {
Ok(get_device_info(self.id(), CL_DEVICE_MAX_ON_DEVICE_QUEUES)?.into())
}
/// CL_VERSION_2_0
pub fn max_on_device_events(&self) -> Result<cl_uint> {
Ok(get_device_info(self.id(), CL_DEVICE_MAX_ON_DEVICE_EVENTS)?.into())
}
/// CL_VERSION_2_0
pub fn svm_capabilities(&self) -> Result<cl_device_svm_capabilities> {
Ok(get_device_info(self.id(), CL_DEVICE_SVM_CAPABILITIES)?.into())
}
/// CL_VERSION_2_0
pub fn global_variable_preferred_total_size(&self) -> Result<size_t> {
Ok(get_device_info(self.id(), CL_DEVICE_GLOBAL_VARIABLE_PREFERRED_TOTAL_SIZE)?.into())
}
/// CL_VERSION_2_0
pub fn max_pipe_args(&self) -> Result<cl_uint> {
Ok(get_device_info(self.id(), CL_DEVICE_MAX_PIPE_ARGS)?.into())
}
/// CL_VERSION_2_0
pub fn pipe_max_active_reservations(&self) -> Result<cl_uint> {
Ok(get_device_info(self.id(), CL_DEVICE_PIPE_MAX_ACTIVE_RESERVATIONS)?.into())
}
/// CL_VERSION_2_0
pub fn pipe_max_packet_size(&self) -> Result<cl_uint> {
Ok(get_device_info(self.id(), CL_DEVICE_PIPE_MAX_PACKET_SIZE)?.into())
}
/// CL_VERSION_2_0
pub fn preferred_platform_atomic_alignment(&self) -> Result<cl_uint> {
Ok(get_device_info(self.id(), CL_DEVICE_PREFERRED_PLATFORM_ATOMIC_ALIGNMENT)?.into())
}
/// CL_VERSION_2_0
pub fn preferred_global_atomic_alignment(&self) -> Result<cl_uint> {
Ok(get_device_info(self.id(), CL_DEVICE_PREFERRED_GLOBAL_ATOMIC_ALIGNMENT)?.into())
}
/// CL_VERSION_2_0
pub fn preferred_local_atomic_alignment(&self) -> Result<cl_uint> {
Ok(get_device_info(self.id(), CL_DEVICE_PREFERRED_LOCAL_ATOMIC_ALIGNMENT)?.into())
}
/// CL_VERSION_2_1
pub fn il_version(&self) -> Result<String> {
Ok(get_device_info(self.id(), CL_DEVICE_IL_VERSION)?.into())
}
/// CL_VERSION_2_1
pub fn max_num_sub_groups(&self) -> Result<cl_uint> {
Ok(get_device_info(self.id(), CL_DEVICE_MAX_NUM_SUB_GROUPS)?.into())
}
/// CL_VERSION_2_1
pub fn sub_group_independent_forward_progress(&self) -> Result<bool> {
Ok(cl_uint::from(get_device_info(
self.id(),
CL_DEVICE_SUB_GROUP_INDEPENDENT_FORWARD_PROGRESS,
)?) != CL_FALSE)
}
/// CL_VERSION_3_0
pub fn numeric_version(&self) -> Result<cl_uint> {
Ok(get_device_info(self.id(), CL_DEVICE_NUMERIC_VERSION)?.into())
}
/// CL_VERSION_3_0
pub fn extensions_with_version(&self) -> Result<Vec<cl_name_version>> {
Ok(get_device_info(self.id(), CL_DEVICE_EXTENSIONS_WITH_VERSION)?.into())
}
/// CL_VERSION_3_0
pub fn ils_with_version(&self) -> Result<Vec<cl_name_version>> {
Ok(get_device_info(self.id(), CL_DEVICE_ILS_WITH_VERSION)?.into())
}
/// CL_VERSION_3_0
pub fn built_in_kernels_with_version(&self) -> Result<Vec<cl_name_version>> {
Ok(get_device_info(self.id(), CL_DEVICE_BUILT_IN_KERNELS_WITH_VERSION)?.into())
}
/// CL_VERSION_3_0
pub fn atomic_memory_capabilities(&self) -> Result<cl_ulong> {
Ok(get_device_info(self.id(), CL_DEVICE_ATOMIC_MEMORY_CAPABILITIES)?.into())
}
/// CL_VERSION_3_0
pub fn atomic_fence_capabilities(&self) -> Result<cl_ulong> {
Ok(get_device_info(self.id(), CL_DEVICE_ATOMIC_FENCE_CAPABILITIES)?.into())
}
/// CL_VERSION_3_0
pub fn non_uniform_work_group_support(&self) -> Result<bool> {
Ok(cl_uint::from(get_device_info(
self.id(),
CL_DEVICE_NON_UNIFORM_WORK_GROUP_SUPPORT,
)?) != CL_FALSE)
}
/// CL_VERSION_3_0
pub fn opencl_c_all_versions(&self) -> Result<Vec<cl_name_version>> {
Ok(get_device_info(self.id(), CL_DEVICE_OPENCL_C_ALL_VERSIONS)?.into())
}
/// CL_VERSION_3_0
pub fn preferred_work_group_size_multiple(&self) -> Result<size_t> {
Ok(get_device_info(self.id(), CL_DEVICE_PREFERRED_WORK_GROUP_SIZE_MULTIPLE)?.into())
}
/// CL_VERSION_3_0
pub fn work_group_collective_functions_support(&self) -> Result<bool> {
Ok(cl_uint::from(get_device_info(
self.id(),
CL_DEVICE_WORK_GROUP_COLLECTIVE_FUNCTIONS_SUPPORT,
)?) != CL_FALSE)
}
/// CL_VERSION_3_0
pub fn generic_address_space_support(&self) -> Result<bool> {
Ok(cl_uint::from(get_device_info(
self.id(),
CL_DEVICE_GENERIC_ADDRESS_SPACE_SUPPORT,
)?) != CL_FALSE)
}
/// CL_VERSION_3_0
pub fn uuid_khr(&self) -> Result<[u8; CL_UUID_SIZE_KHR]> {
Ok(get_device_info(self.id(), CL_DEVICE_UUID_KHR)?.into())
}
/// CL_VERSION_3_0
pub fn driver_uuid_khr(&self) -> Result<[u8; CL_UUID_SIZE_KHR]> {
Ok(get_device_info(self.id(), CL_DRIVER_UUID_KHR)?.into())
}
/// CL_VERSION_3_0
pub fn luid_valid_khr(&self) -> Result<bool> {
Ok(cl_uint::from(get_device_info(self.id(), CL_DEVICE_LUID_VALID_KHR)?) != CL_FALSE)
}
/// CL_VERSION_3_0
pub fn luid_khr(&self) -> Result<[u8; CL_LUID_SIZE_KHR]> {
Ok(get_device_info(self.id(), CL_DEVICE_LUID_KHR)?.into())
}
/// CL_VERSION_3_0
pub fn node_mask_khr(&self) -> Result<cl_uint> {
Ok(get_device_info(self.id(), CL_DEVICE_NODE_MASK_KHR)?.into())
}
/// CL_VERSION_3_0
pub fn opencl_c_features(&self) -> Result<Vec<cl_name_version>> {
Ok(get_device_info(self.id(), CL_DEVICE_OPENCL_C_FEATURES)?.into())
}
/// CL_VERSION_3_0
pub fn device_enqueue_capabilities(&self) -> Result<cl_ulong> {
Ok(get_device_info(self.id(), CL_DEVICE_DEVICE_ENQUEUE_CAPABILITIES)?.into())
}
/// CL_VERSION_3_0
pub fn pipe_support(&self) -> Result<bool> {
Ok(cl_uint::from(get_device_info(self.id(), CL_DEVICE_PIPE_SUPPORT)?) != CL_FALSE)
}
/// CL_VERSION_3_0
pub fn latest_conformance_version_passed(&self) -> Result<String> {
Ok(get_device_info(self.id(), CL_DEVICE_LATEST_CONFORMANCE_VERSION_PASSED)?.into())
}
pub fn integer_dot_product_capabilities_khr(&self) -> Result<cl_ulong> {
Ok(get_device_info(self.id(), CL_DEVICE_INTEGER_DOT_PRODUCT_CAPABILITIES_KHR)?.into())
}
pub fn integer_dot_product_acceleration_properties_8bit_khr(
&self,
) -> Result<cl_device_integer_dot_product_acceleration_properties_khr> {
let value: Vec<u8> = get_device_info(
self.id(),
CL_DEVICE_INTEGER_DOT_PRODUCT_ACCELERATION_PROPERTIES_8BIT_KHR,
)?
.into();
Ok(get_device_integer_dot_product_acceleration_properties_khr(
&value,
))
}
pub fn integer_dot_product_acceleration_properties_4x8bit_packed_khr(
&self,
) -> Result<cl_device_integer_dot_product_acceleration_properties_khr> {
let value: Vec<u8> = get_device_info(
self.id(),
CL_DEVICE_INTEGER_DOT_PRODUCT_ACCELERATION_PROPERTIES_4x8BIT_PACKED_KHR,
)?
.into();
Ok(get_device_integer_dot_product_acceleration_properties_khr(
&value,
))
}
pub fn compute_capability_major_nv(&self) -> Result<cl_uint> {
Ok(get_device_info(self.id(), CL_DEVICE_COMPUTE_CAPABILITY_MAJOR_NV)?.into())
}
pub fn compute_capability_minor_nv(&self) -> Result<cl_uint> {
Ok(get_device_info(self.id(), CL_DEVICE_COMPUTE_CAPABILITY_MINOR_NV)?.into())
}
pub fn registers_per_block_nv(&self) -> Result<cl_uint> {
Ok(get_device_info(self.id(), CL_DEVICE_REGISTERS_PER_BLOCK_NV)?.into())
}
pub fn wrap_size_nv(&self) -> Result<cl_uint> {
Ok(get_device_info(self.id(), CL_DEVICE_WARP_SIZE_NV)?.into())
}
pub fn gpu_overlap_nv(&self) -> Result<cl_uint> {
Ok(get_device_info(self.id(), CL_DEVICE_GPU_OVERLAP_NV)?.into())
}
pub fn compute_kernel_exec_timeout_nv(&self) -> Result<cl_uint> {
Ok(get_device_info(self.id(), CL_DEVICE_KERNEL_EXEC_TIMEOUT_NV)?.into())
}
pub fn integrated_memory_nv(&self) -> Result<cl_uint> {
Ok(get_device_info(self.id(), CL_DEVICE_INTEGRATED_MEMORY_NV)?.into())
}
pub fn pci_bus_id_nv(&self) -> Result<cl_uint> {
Ok(get_device_info(self.id(), CL_DEVICE_PCI_BUS_ID_NV)?.into())
}
pub fn pci_slot_id_nv(&self) -> Result<cl_uint> {
Ok(get_device_info(self.id(), CL_DEVICE_PCI_SLOT_ID_NV)?.into())
}
pub fn profiling_timer_offset_amd(&self) -> Result<size_t> {
Ok(get_device_info(self.id(), CL_DEVICE_PROFILING_TIMER_OFFSET_AMD)?.into())
}
pub fn topology_amd(&self) -> Result<cl_amd_device_topology> {
let value: Vec<u8> = get_device_info(self.id(), CL_DEVICE_TOPOLOGY_AMD)?.into();
Ok(get_amd_device_topology(&value))
}
pub fn pci_bus_id_amd(&self) -> Result<cl_uint> {
let value = self.topology_amd()?;
Ok(value.bus as cl_uint)
}
pub fn pcibusinfokhr_intel(&self) -> Result<cl_device_pci_bus_info_khr> {
let value: Vec<u8> = get_device_info(self.id(), CL_DEVICE_PCI_BUS_INFO_KHR)?.into();
Ok(get_device_pci_bus_info_khr(&value))
}
pub fn pci_bus_id_intel(&self) -> Result<cl_uint> {
let value = self.pcibusinfokhr_intel()?;
Ok(value.pci_bus as cl_uint)
}
pub fn board_name_amd(&self) -> Result<String> {
Ok(get_device_info(self.id(), CL_DEVICE_BOARD_NAME_AMD)?.into())
}
pub fn global_free_memory_amd(&self) -> Result<size_t> {
Ok(get_device_info(self.id(), CL_DEVICE_GLOBAL_FREE_MEMORY_AMD)?.into())
}
pub fn simd_per_compute_unit_amd(&self) -> Result<cl_uint> {
Ok(get_device_info(self.id(), CL_DEVICE_SIMD_PER_COMPUTE_UNIT_AMD)?.into())
}
pub fn simd_width_amd(&self) -> Result<cl_uint> {
Ok(get_device_info(self.id(), CL_DEVICE_SIMD_WIDTH_AMD)?.into())
}
pub fn simd_instruction_width_amd(&self) -> Result<cl_uint> {
Ok(get_device_info(self.id(), CL_DEVICE_SIMD_INSTRUCTION_WIDTH_AMD)?.into())
}
pub fn wavefront_width_amd(&self) -> Result<cl_uint> {
Ok(get_device_info(self.id(), CL_DEVICE_WAVEFRONT_WIDTH_AMD)?.into())
}
pub fn global_mem_channels_amd(&self) -> Result<cl_uint> {
Ok(get_device_info(self.id(), CL_DEVICE_GLOBAL_MEM_CHANNELS_AMD)?.into())
}
pub fn global_mem_channel_banks_amd(&self) -> Result<cl_uint> {
Ok(get_device_info(self.id(), CL_DEVICE_GLOBAL_MEM_CHANNEL_BANKS_AMD)?.into())
}
pub fn global_mem_channel_bank_width_amd(&self) -> Result<cl_uint> {
Ok(get_device_info(self.id(), CL_DEVICE_GLOBAL_MEM_CHANNEL_BANK_WIDTH_AMD)?.into())
}
pub fn local_mem_size_per_compute_unit_amd(&self) -> Result<cl_uint> {
Ok(get_device_info(self.id(), CL_DEVICE_LOCAL_MEM_SIZE_PER_COMPUTE_UNIT_AMD)?.into())
}
pub fn local_mem_banks_amd(&self) -> Result<cl_uint> {
Ok(get_device_info(self.id(), CL_DEVICE_LOCAL_MEM_BANKS_AMD)?.into())
}
pub fn thread_trace_supported_amd(&self) -> Result<cl_uint> {
Ok(get_device_info(self.id(), CL_DEVICE_THREAD_TRACE_SUPPORTED_AMD)?.into())
}
pub fn gfxip_major_amd(&self) -> Result<cl_uint> {
Ok(get_device_info(self.id(), CL_DEVICE_GFXIP_MAJOR_AMD)?.into())
}
pub fn gfxip_minor_amd(&self) -> Result<cl_uint> {
Ok(get_device_info(self.id(), CL_DEVICE_GFXIP_MINOR_AMD)?.into())
}
pub fn available_async_queues_amd(&self) -> Result<cl_uint> {
Ok(get_device_info(self.id(), CL_DEVICE_AVAILABLE_ASYNC_QUEUES_AMD)?.into())
}
pub fn preferred_work_group_size_amd(&self) -> Result<size_t> {
Ok(get_device_info(self.id(), CL_DEVICE_PREFERRED_WORK_GROUP_SIZE_AMD)?.into())
}
pub fn max_work_group_size_amd(&self) -> Result<size_t> {
Ok(get_device_info(self.id(), CL_DEVICE_MAX_WORK_GROUP_SIZE_AMD)?.into())
}
pub fn preferred_constant_buffer_size_amd(&self) -> Result<size_t> {
Ok(get_device_info(self.id(), CL_DEVICE_PREFERRED_CONSTANT_BUFFER_SIZE_AMD)?.into())
}
pub fn pcie_id_amd(&self) -> Result<cl_uint> {
Ok(get_device_info(self.id(), CL_DEVICE_PCIE_ID_AMD)?.into())
}
pub fn device_ip_version_intel(&self) -> Result<cl_uint> {
Ok(get_device_info(self.id(), CL_DEVICE_IP_VERSION_INTEL)?.into())
}
pub fn device_id_intel(&self) -> Result<cl_uint> {
Ok(get_device_info(self.id(), CL_DEVICE_ID_INTEL)?.into())
}
pub fn device_num_slices_intel(&self) -> Result<cl_uint> {
Ok(get_device_info(self.id(), CL_DEVICE_NUM_SLICES_INTEL)?.into())
}
pub fn device_num_sub_slices_per_slice_intel(&self) -> Result<cl_uint> {
Ok(get_device_info(self.id(), CL_DEVICE_NUM_SUB_SLICES_PER_SLICE_INTEL)?.into())
}
pub fn device_num_eus_per_sub_slice_intel(&self) -> Result<cl_uint> {
Ok(get_device_info(self.id(), CL_DEVICE_NUM_EUS_PER_SUB_SLICE_INTEL)?.into())
}
pub fn device_num_threads_per_eu_intel(&self) -> Result<cl_uint> {
Ok(get_device_info(self.id(), CL_DEVICE_NUM_THREADS_PER_EU_INTEL)?.into())
}
pub fn device_feature_capabilities_intel(
&self,
) -> Result<cl_device_feature_capabilities_intel> {
Ok(get_device_info(self.id(), CL_DEVICE_FEATURE_CAPABILITIES_INTEL)?.into())
}
pub fn device_external_memory_import_handle_types_khr(&self) -> Result<Vec<u32>> {
Ok(get_device_info(self.id(), CL_DEVICE_EXTERNAL_MEMORY_IMPORT_HANDLE_TYPES_KHR)?.into())
}
pub fn device_semaphore_import_handle_types_khr(&self) -> Result<Vec<u32>> {
Ok(get_device_info(self.id(), CL_DEVICE_SEMAPHORE_IMPORT_HANDLE_TYPES_KHR)?.into())
}
pub fn device_semaphore_export_handle_types_khr(&self) -> Result<Vec<u32>> {
Ok(get_device_info(self.id(), CL_DEVICE_SEMAPHORE_EXPORT_HANDLE_TYPES_KHR)?.into())
}
pub fn device_semaphore_types_khr(&self) -> Result<Vec<u32>> {
Ok(get_device_info(self.id(), CL_DEVICE_SEMAPHORE_TYPES_KHR)?.into())
}
pub fn device_command_buffer_capabilities_khr(&self) -> Result<cl_ulong> {
Ok(get_device_info(self.id(), CL_DEVICE_COMMAND_BUFFER_CAPABILITIES_KHR)?.into())
}
pub fn device_command_buffer_required_queue_properties_khr(&self) -> Result<cl_ulong> {
Ok(get_device_info(
self.id(),
CL_DEVICE_COMMAND_BUFFER_REQUIRED_QUEUE_PROPERTIES_KHR,
)?
.into())
}
/// Get data about an OpenCL device.
/// Calls clGetDeviceInfo to get the desired data about the device.
pub fn get_data(&self, param_name: cl_device_info) -> Result<Vec<u8>> {
Ok(get_device_data(self.id(), param_name)?)
}
/// Determine if the device supports the given half floating point capability.
/// Returns true if the device supports it, false otherwise.
pub fn supports_half(&self, min_fp_capability: cl_device_fp_config) -> bool {
self.half_fp_config()
.is_ok_and(|fp| 0 < fp & min_fp_capability)
}
| rust | Apache-2.0 | 27c55789c114765e8f857c8c691c863166e0f6d2 | 2026-01-04T20:24:28.844285Z | true |
kenba/opencl3 | https://github.com/kenba/opencl3/blob/27c55789c114765e8f857c8c691c863166e0f6d2/src/program.rs | src/program.rs | // Copyright (c) 2020-2025 Via Technology Ltd. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#![allow(clippy::missing_safety_doc)]
pub use cl3::program::*;
use super::context::Context;
use super::Result;
#[allow(unused_imports)]
use cl3::error_codes::CL_BUILD_PROGRAM_FAILURE;
#[allow(unused_imports)]
use cl3::ext;
#[allow(unused_imports)]
use libc::{c_void, intptr_t, size_t};
#[allow(unused_imports)]
use std::ffi::{CStr, CString};
use std::ptr;
use std::result;
// Compile, link and build options.
// These options can be passed to Program::compile, Program::link or Program::build, see:
// [Compiler Options](https://www.khronos.org/registry/OpenCL/specs/3.0-unified/html/OpenCL_API.html#compiler-options)
// [Linker Options](https://www.khronos.org/registry/OpenCL/specs/3.0-unified/html/OpenCL_API.html#linker-options)
// [Build Options](https://man.opencl.org/clBuildProgram.html)
// Note: the options have a trailing space so that they can be concatenated.
// Math Intrinsics Options
pub const CL_SINGLE_RECISION_CONSTANT: &str = "-cl-single-precision-constant ";
pub const CL_DENORMS_ARE_ZERO: &str = "-cl-denorms-are-zero ";
pub const CL_FP_CORRECTLY_ROUNDED_DIVIDE_SQRT: &str = "-cl-fp32-correctly-rounded-divide-sqrt ";
// Optimization Options
pub const CL_OPT_DISABLE: &str = "-cl-opt-disable ";
pub const CL_STRICT_ALIASING: &str = "-cl-strict-aliasing ";
pub const CL_UNIFORM_WORK_GROUP_SIZE: &str = "-cl-uniform-work-group-size ";
pub const CL_NO_SUBGROUP_INFO: &str = "-cl-no-subgroup-ifp ";
pub const CL_MAD_ENABLE: &str = "-cl-mad-enable ";
pub const CL_NO_SIGNED_ZEROS: &str = "-cl-no-signed-zeros ";
pub const CL_UNSAFE_MATH_OPTIMIZATIONS: &str = "-cl-unsafe-math-optimizations ";
pub const CL_FINITE_MATH_ONLY: &str = "-cl-finite-math-only ";
pub const CL_FAST_RELAXED_MATH: &str = "-cl-fast-relaxed-math ";
// OpenCL C version Options
/// Applications are required to specify the -cl-std=CL2.0 build option to
/// compile or build programs with OpenCL C 2.0.
pub const CL_STD_2_0: &str = "-cl-std=CL2.0 ";
/// Applications are required to specify the -cl-std=CL3.0 build option to
/// compile or build programs with OpenCL C 3.0.
pub const CL_STD_3_0: &str = "-cl-std=CL3.0 ";
/// This option allows the compiler to store information about the
/// arguments of kernels in the program executable.
pub const CL_KERNEL_ARG_INFO: &str = "-cl-kernel-arg-info ";
pub const DEBUG_OPTION: &str = "-g ";
// Options enabled by the cl_khr_spir extension
pub const BUILD_OPTION_X_SPIR: &str = "-x spir ";
pub const BUILD_OPTION_SPIR_STD_1_2: &str = "-spir-std=1.2 ";
// Link and build options.
pub const CREATE_LIBRARY: &str = "-create-library ";
pub const ENABLE_LINK_OPTIONS: &str = "-enable-link-options ";
/// An OpenCL program object.
/// Stores the names of the OpenCL kernels in the program.
/// Implements the Drop trait to call release_program when the object is dropped.
#[derive(Debug)]
pub struct Program {
program: cl_program,
kernel_names: String,
}
impl From<Program> for cl_program {
fn from(value: Program) -> Self {
value.program as Self
}
}
impl Drop for Program {
fn drop(&mut self) {
unsafe { release_program(self.program).expect("Error: clReleaseProgram") };
}
}
unsafe impl Send for Program {}
unsafe impl Sync for Program {}
impl Program {
fn new(program: cl_program, kernel_names: &str) -> Self {
Self {
program,
kernel_names: kernel_names.to_owned(),
}
}
/// Get the underlying OpenCL cl_program.
pub const fn get(&self) -> cl_program {
self.program
}
/// Get the names of the OpenCL kernels in the Program, in a string
/// separated by semicolons.
#[allow(clippy::missing_const_for_fn)]
pub fn kernel_names(&self) -> &str {
&self.kernel_names
}
/// Create a Program for a context and load source code into that object.
///
/// * `context` - a valid OpenCL context.
/// * `sources` - an array of strs containing the source code strings.
///
/// returns a Result containing the new Program
/// or the error code from the OpenCL C API function.
pub fn create_from_sources(context: &Context, sources: &[&str]) -> Result<Self> {
Ok(Self::new(
create_program_with_source(context.get(), sources)?,
"",
))
}
/// Create a Program for a context and load a source code string into that object.
///
/// * `context` - a valid OpenCL context.
/// * `src` - a str containing a source code string.
///
/// returns a Result containing the new Program
/// or the error code from the OpenCL C API function.
pub fn create_from_source(context: &Context, src: &str) -> Result<Self> {
let sources = [src];
Ok(Self::new(
create_program_with_source(context.get(), &sources)?,
"",
))
}
/// Create a Program for a context and load binary bits into that object.
///
/// * `context` - a valid OpenCL context.
/// * `devices` - a slice of devices that are in context.
/// * `binaries` - a slice of program binaries slices.
///
/// returns a Result containing the new Program
/// or the error code from the OpenCL C API function.
///
/// # Safety
///
/// This is unsafe when a device is not a member of context.
pub unsafe fn create_from_binary(
context: &Context,
devices: &[cl_device_id],
binaries: &[&[u8]],
) -> Result<Self> {
unsafe {
Ok(Self::new(
create_program_with_binary(context.get(), devices, binaries)?,
"",
))
}
}
/// Create a Program for a context and loads the information related to
/// the built-in kernels into that object.
///
/// * `context` - a valid OpenCL context.
/// * `devices` - a slice of devices that are in context.
/// * `kernel_names` - a semi-colon separated list of built-in kernel names.
///
/// returns a Result containing the new Program
/// or the error code from the OpenCL C API function.
///
/// # Safety
///
/// This is unsafe when a device is not a member of context.
#[cfg(any(feature = "CL_VERSION_1_2", feature = "dynamic"))]
pub unsafe fn create_from_builtin_kernels(
context: &Context,
devices: &[cl_device_id],
kernel_names: &str,
) -> Result<Self> {
unsafe {
// Ensure options string is null terminated
let c_names = CString::new(kernel_names)
.expect("Program::create_from_builtin_kernels, invalid kernel_names");
Ok(Self::new(
create_program_with_builtin_kernels(context.get(), devices, &c_names)?,
kernel_names,
))
}
}
/// Create a Program for a context and load code in an intermediate language
/// into that object.
/// CL_VERSION_2_1
///
/// * `context` - a valid OpenCL context.
/// * `il` - a slice of program intermediate language code.
///
/// returns a Result containing the new Program
/// or the error code from the OpenCL C API function.
#[cfg(any(feature = "CL_VERSION_2_1", feature = "dynamic"))]
pub fn create_from_il(context: &Context, il: &[u8]) -> Result<Self> {
Ok(Self::new(create_program_with_il(context.get(), il)?, ""))
}
#[cfg(any(feature = "cl_khr_il_program", feature = "dynamic"))]
pub fn create_from_il_khr(context: &Context, il: &[u8]) -> Result<Self> {
Ok(Self::new(
ext::create_program_with_il_khr(context.get(), il)?,
"",
))
}
/// Build (compile & link) a Program.
///
/// * `devices` - a slice of devices that are in context.
/// * `options` - the build options in a null-terminated string.
/// * `pfn_notify` - an optional function pointer to a notification routine.
/// * `user_data` - passed as an argument when pfn_notify is called, or ptr::null_mut().
///
/// returns a null Result
/// or the error code from the OpenCL C API function.
pub fn build(&mut self, devices: &[cl_device_id], options: &str) -> Result<()> {
// Ensure options string is null terminated
let c_options = CString::new(options).expect("Program::build, invalid options");
build_program(self.program, devices, &c_options, None, ptr::null_mut())?;
self.kernel_names = self.get_kernel_names()?;
Ok(())
}
/// Create and build an OpenCL Program from an array of source code strings
/// with the given options.
///
/// * `context` - a valid OpenCL context.
/// * `sources` - an array of strs containing the source code strings.
/// * `options` - the build options in a null-terminated string.
///
/// returns a Result containing the new Program, the name of the error code
/// from the OpenCL C API function or the build log, if the build failed.
pub fn create_and_build_from_sources(
context: &Context,
sources: &[&str],
options: &str,
) -> result::Result<Self, String> {
let mut program = Self::create_from_sources(context, sources).map_err(String::from)?;
match program.build(context.devices(), options) {
Ok(_) => Ok(program),
Err(e) => {
if CL_BUILD_PROGRAM_FAILURE == e.0 {
let log = program
.get_build_log(context.devices()[0])
.map_err(String::from)?;
Err(String::from(e) + ", build log: " + &log)
} else {
Err(String::from(e))
}
}
}
}
/// Create and build an OpenCL Program from source code with the given options.
///
/// * `context` - a valid OpenCL context.
/// * `src` - a str containing a source code string.
/// * `options` - the build options in a null-terminated string.
///
/// returns a Result containing the new Program, the name of the error code
/// from the OpenCL C API function or the build log, if the build failed.
pub fn create_and_build_from_source(
context: &Context,
src: &str,
options: &str,
) -> result::Result<Self, String> {
let sources = [src];
Self::create_and_build_from_sources(context, &sources, options)
}
/// Create and build an OpenCL Program from binaries with the given options.
///
/// * `context` - a valid OpenCL context.
/// * `binaries` - a slice of program binaries slices.
/// * `options` - the build options in a null-terminated string.
///
/// returns a Result containing the new Program
/// or the error code from the OpenCL C API function.
pub fn create_and_build_from_binary(
context: &Context,
binaries: &[&[u8]],
options: &str,
) -> Result<Self> {
let mut program =
unsafe { Self::create_from_binary(context, context.devices(), binaries)? };
program.build(context.devices(), options)?;
Ok(program)
}
/// Create and build an OpenCL Program from intermediate language with the
/// given options.
/// CL_VERSION_2_1
///
/// * `context` - a valid OpenCL context.
/// * `il` - a slice of program intermediate language code.
/// * `options` - the build options in a null-terminated string.
///
/// returns a Result containing the new `Program`
/// or the error code from the OpenCL C API function.
#[cfg(any(feature = "CL_VERSION_2_1", feature = "dynamic"))]
pub fn create_and_build_from_il(context: &Context, il: &[u8], options: &str) -> Result<Self> {
let mut program = Self::create_from_il(context, il)?;
program.build(context.devices(), options)?;
Ok(program)
}
/// Compile a program’s source for the devices the OpenCL context associated
/// with the program.
///
/// * `devices` - a slice of devices that are in context.
/// * `options` - the compilation options in a null-terminated string.
/// * `input_headers` - a slice of programs that describe headers in the input_headers.
/// * `header_include_names` - an array that has a one to one correspondence with
/// input_headers.
///
/// returns a null Result
/// or the error code from the OpenCL C API function.
#[cfg(any(feature = "CL_VERSION_1_2", feature = "dynamic"))]
pub fn compile(
&mut self,
devices: &[cl_device_id],
options: &str,
input_headers: &[cl_program],
header_include_names: &[&CStr],
) -> Result<()> {
// Ensure options string is null terminated
let c_options = CString::new(options).expect("Program::compile, invalid options");
Ok(compile_program(
self.program,
devices,
&c_options,
input_headers,
header_include_names,
None,
ptr::null_mut(),
)?)
}
/// Link a set of compiled program objects and libraries for the devices in the
/// OpenCL context associated with the program.
///
/// * `devices` - a slice of devices that are in context.
/// * `options` - the link options in a null-terminated string.
/// * `input_programs` - a slice of programs that describe headers in the input_headers.
///
/// returns a null Result
/// or the error code from the OpenCL C API function.
///
/// # Safety
///
/// This is unsafe when a device is not a member of context.
#[cfg(any(feature = "CL_VERSION_1_2", feature = "dynamic"))]
pub unsafe fn link(
&mut self,
devices: &[cl_device_id],
options: &str,
input_programs: &[cl_program],
) -> Result<()> {
unsafe {
// Ensure options string is null terminated
let c_options = CString::new(options).expect("Program::link, invalid options");
self.program = link_program(
self.program,
devices,
&c_options,
input_programs,
None,
ptr::null_mut(),
)?;
self.kernel_names = self.get_kernel_names()?;
Ok(())
}
}
/// Set the value of a specialization constant.
/// CL_VERSION_2_2
///
/// * `spec_id` - the specialization constant whose value will be set.
/// * `spec_size` - size in bytes of the data pointed to by spec_value.
/// * `spec_value` - pointer to the memory location that contains the value
/// of the specialization constant.
///
/// returns an empty Result or the error code from the OpenCL C API function.
#[cfg(any(feature = "CL_VERSION_2_2", feature = "dynamic"))]
pub unsafe fn set_specialization_constant(
&self,
spec_id: cl_uint,
spec_size: size_t,
spec_value: *const c_void,
) -> Result<()> {
unsafe {
Ok(set_program_specialization_constant(
self.program,
spec_id,
spec_size,
spec_value,
)?)
}
}
pub fn get_reference_count(&self) -> Result<cl_uint> {
Ok(get_program_info(self.program, CL_PROGRAM_REFERENCE_COUNT)?.into())
}
pub fn get_context(&self) -> Result<cl_context> {
Ok(intptr_t::from(get_program_info(self.program, CL_PROGRAM_CONTEXT)?) as cl_context)
}
pub fn get_num_devices(&self) -> Result<cl_uint> {
Ok(get_program_info(self.program, CL_PROGRAM_NUM_DEVICES)?.into())
}
pub fn get_devices(&self) -> Result<Vec<intptr_t>> {
Ok(get_program_info(self.program, CL_PROGRAM_DEVICES)?.into())
}
pub fn get_source(&self) -> Result<String> {
Ok(get_program_info(self.program, CL_PROGRAM_SOURCE)?.into())
}
pub fn get_binary_sizes(&self) -> Result<Vec<size_t>> {
Ok(get_program_info(self.program, CL_PROGRAM_BINARY_SIZES)?.into())
}
pub fn get_binaries(&self) -> Result<Vec<Vec<cl_uchar>>> {
Ok(get_program_info(self.program, CL_PROGRAM_BINARIES)?.into())
}
pub fn get_num_kernels(&self) -> Result<size_t> {
Ok(get_program_info(self.program, CL_PROGRAM_NUM_KERNELS)?.into())
}
pub fn get_kernel_names(&self) -> Result<String> {
Ok(get_program_info(self.program, CL_PROGRAM_KERNEL_NAMES)?.into())
}
/// CL_VERSION_2_1
pub fn get_program_il(&self) -> Result<String> {
Ok(get_program_info(self.program, CL_PROGRAM_IL)?.into())
}
/// CL_VERSION_2_2
pub fn get_program_scope_global_ctors_present(&self) -> Result<bool> {
Ok(cl_uint::from(get_program_info(
self.program,
CL_PROGRAM_SCOPE_GLOBAL_CTORS_PRESENT,
)?) != CL_FALSE)
}
/// CL_VERSION_2_2
pub fn get_program_scope_global_dtors_present(&self) -> Result<bool> {
Ok(cl_uint::from(get_program_info(
self.program,
CL_PROGRAM_SCOPE_GLOBAL_DTORS_PRESENT,
)?) != CL_FALSE)
}
pub fn get_build_status(&self, device: cl_device_id) -> Result<cl_int> {
Ok(get_program_build_info(self.program, device, CL_PROGRAM_BUILD_STATUS)?.into())
}
pub fn get_build_options(&self, device: cl_device_id) -> Result<String> {
Ok(get_program_build_info(self.program, device, CL_PROGRAM_BUILD_OPTIONS)?.into())
}
pub fn get_build_log(&self, device: cl_device_id) -> Result<String> {
Ok(get_program_build_info(self.program, device, CL_PROGRAM_BUILD_LOG)?.into())
}
pub fn get_build_binary_type(&self, device: cl_device_id) -> Result<cl_uint> {
Ok(get_program_build_info(self.program, device, CL_PROGRAM_BINARY_TYPE)?.into())
}
/// CL_VERSION_2_0
pub fn get_build_global_variable_total_size(&self, device: cl_device_id) -> Result<size_t> {
Ok(get_program_build_info(
self.program,
device,
CL_PROGRAM_BUILD_GLOBAL_VARIABLE_TOTAL_SIZE,
)?
.into())
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::context::Context;
use crate::device::Device;
use crate::platform::get_platforms;
use cl3::device::CL_DEVICE_TYPE_GPU;
use std::collections::HashSet;
const PROGRAM_SOURCE: &str = r#"
kernel void add(global float* buffer, float scalar) {
buffer[get_global_id(0)] += scalar;
}
kernel void subtract(global float* buffer, float scalar) {
buffer[get_global_id(0)] -= scalar;
}
"#;
#[test]
fn test_create_and_build_from_source() {
let platforms = get_platforms().unwrap();
assert!(0 < platforms.len());
// Get the first platform
let platform = &platforms[0];
let devices = platform.get_devices(CL_DEVICE_TYPE_GPU).unwrap();
assert!(0 < devices.len());
// Get the first device
let device = Device::new(devices[0]);
let context = Context::from_device(&device).unwrap();
let program =
Program::create_and_build_from_source(&context, PROGRAM_SOURCE, CL_DENORMS_ARE_ZERO)
.expect("Program::create_and_build_from_source failed");
let names: HashSet<&str> = program.kernel_names().split(';').collect();
println!("OpenCL Program kernel_names len: {}", names.len());
println!("OpenCL Program kernel_names: {:?}", names);
let value = program.get_reference_count().unwrap();
println!("program.get_reference_count(): {}", value);
assert_eq!(1, value);
let value = program.get_context().unwrap();
assert!(context.get() == value);
let value = program.get_num_devices().unwrap();
println!("program.get_num_devices(): {}", value);
assert_eq!(1, value);
let value = program.get_devices().unwrap();
assert!(device.id() == value[0] as cl_device_id);
let value = program.get_source().unwrap();
println!("program.get_source(): {}", value);
assert!(!value.is_empty());
let value = program.get_binary_sizes().unwrap();
println!("program.get_binary_sizes(): {:?}", value);
assert!(0 < value[0]);
let value = program.get_binaries().unwrap();
// println!("program.get_binaries(): {:?}", value);
assert!(!value[0].is_empty());
let value = program.get_num_kernels().unwrap();
println!("program.get_num_kernels(): {}", value);
assert_eq!(2, value);
// let value = program.get_program_il().unwrap();
// println!("program.get_program_il(): {:?}", value);
// assert!(!value.is_empty());
let value = program.get_build_status(device.id()).unwrap();
println!("program.get_build_status(): {}", value);
assert!(CL_BUILD_SUCCESS == value);
let value = program.get_build_options(device.id()).unwrap();
println!("program.get_build_options(): {}", value);
assert!(!value.is_empty());
let value = program.get_build_log(device.id()).unwrap();
println!("program.get_build_log(): {}", value);
// assert!(!value.is_empty());
let value = program.get_build_binary_type(device.id()).unwrap();
println!("program.get_build_binary_type(): {}", value);
assert_eq!(CL_PROGRAM_BINARY_TYPE_EXECUTABLE as u32, value);
// CL_VERSION_2_0 value
match program.get_build_global_variable_total_size(device.id()) {
Ok(value) => println!("program.get_build_global_variable_total_size(): {}", value),
Err(e) => println!(
"OpenCL error, program.get_build_global_variable_total_size(): {}",
e
),
};
}
}
| rust | Apache-2.0 | 27c55789c114765e8f857c8c691c863166e0f6d2 | 2026-01-04T20:24:28.844285Z | false |
kenba/opencl3 | https://github.com/kenba/opencl3/blob/27c55789c114765e8f857c8c691c863166e0f6d2/src/platform.rs | src/platform.rs | // Copyright (c) 2020-2024 Via Technology Ltd.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#![allow(clippy::missing_safety_doc)]
pub use cl3::platform;
use super::Result;
use cl3::device;
#[allow(unused_imports)]
use cl3::dx9_media_sharing;
#[allow(unused_imports)]
use cl3::ext;
#[allow(unused_imports)]
use cl3::program;
#[allow(unused_imports)]
use cl3::types::{
cl_device_id, cl_device_type, cl_name_version, cl_platform_id, cl_platform_info, cl_uint,
cl_ulong, cl_version,
};
#[allow(unused_imports)]
use libc::{c_void, intptr_t};
/// An OpenCL platform id and methods to query it.
///
/// The query methods calls clGetPlatformInfo with the relevant param_name, see:
/// [Platform Queries](https://www.khronos.org/registry/OpenCL/specs/3.0-unified/html/OpenCL_API.html#platform-queries-table).
#[derive(Copy, Clone, Debug)]
pub struct Platform {
id: intptr_t,
}
impl From<cl_platform_id> for Platform {
fn from(value: cl_platform_id) -> Self {
Self {
id: value as intptr_t,
}
}
}
impl From<Platform> for cl_platform_id {
fn from(value: Platform) -> Self {
value.id as Self
}
}
unsafe impl Send for Platform {}
unsafe impl Sync for Platform {}
impl Platform {
pub fn new(id: cl_platform_id) -> Self {
Self { id: id as intptr_t }
}
/// Accessor for the underlying platform id.
pub const fn id(&self) -> cl_platform_id {
self.id as cl_platform_id
}
/// Get the ids of available devices of the given type on the Platform.
/// # Examples
/// ```
/// use opencl3::platform::get_platforms;
/// use cl3::device::CL_DEVICE_TYPE_GPU;
///
/// let platforms = get_platforms().unwrap();
/// assert!(0 < platforms.len());
///
/// // Choose a the first platform
/// let platform = &platforms[0];
/// let device_ids = platform.get_devices(CL_DEVICE_TYPE_GPU).unwrap();
/// println!("CL_DEVICE_TYPE_GPU count: {}", device_ids.len());
/// assert!(0 < device_ids.len());
/// ```
pub fn get_devices(&self, device_type: cl_device_type) -> Result<Vec<cl_device_id>> {
Ok(device::get_device_ids(self.id(), device_type)?)
}
#[cfg(any(feature = "cl_khr_dx9_media_sharing", feature = "dynamic"))]
pub unsafe fn get_device_ids_from_dx9_intel(
&self,
dx9_device_source: dx9_media_sharing::cl_dx9_device_source_intel,
dx9_object: *mut c_void,
dx9_device_set: dx9_media_sharing::cl_dx9_device_set_intel,
) -> Result<Vec<cl_device_id>> {
unsafe {
Ok(dx9_media_sharing::get_device_ids_from_dx9_intel(
self.id(),
dx9_device_source,
dx9_object,
dx9_device_set,
)?)
}
}
/// The OpenCL profile supported by the Platform,
/// it can be FULL_PROFILE or EMBEDDED_PROFILE.
pub fn profile(&self) -> Result<String> {
Ok(platform::get_platform_info(self.id(), platform::CL_PLATFORM_PROFILE)?.into())
}
/// The OpenCL profile version supported by the Platform,
/// e.g. OpenCL 1.2, OpenCL 2.0, OpenCL 2.1, etc.
pub fn version(&self) -> Result<String> {
Ok(platform::get_platform_info(self.id(), platform::CL_PLATFORM_VERSION)?.into())
}
/// The OpenCL Platform name string.
pub fn name(&self) -> Result<String> {
Ok(platform::get_platform_info(self.id(), platform::CL_PLATFORM_NAME)?.into())
}
/// The OpenCL Platform vendor string.
pub fn vendor(&self) -> Result<String> {
Ok(platform::get_platform_info(self.id(), platform::CL_PLATFORM_VENDOR)?.into())
}
/// A space separated list of extension names supported by the Platform.
pub fn extensions(&self) -> Result<String> {
Ok(platform::get_platform_info(self.id(), platform::CL_PLATFORM_EXTENSIONS)?.into())
}
/// The resolution of the host timer in nanoseconds as used by
/// clGetDeviceAndHostTimer.
/// CL_VERSION_2_1
pub fn host_timer_resolution(&self) -> Result<cl_ulong> {
Ok(
platform::get_platform_info(self.id(), platform::CL_PLATFORM_HOST_TIMER_RESOLUTION)?
.into(),
)
}
/// The detailed (major, minor, patch) version supported by the platform.
/// CL_VERSION_3_0
pub fn numeric_version(&self) -> Result<cl_version> {
Ok(platform::get_platform_info(self.id(), platform::CL_PLATFORM_NUMERIC_VERSION)?.into())
}
/// An array of description (name and version) structures that lists all the
/// extensions supported by the platform.
/// CL_VERSION_3_0
pub fn extensions_with_version(&self) -> Result<Vec<cl_name_version>> {
Ok(
platform::get_platform_info(self.id(), platform::CL_PLATFORM_EXTENSIONS_WITH_VERSION)?
.into(),
)
}
/// cl_khr_external_memory
pub fn platform_external_memory_import_handle_types_khr(&self) -> Result<Vec<cl_name_version>> {
Ok(platform::get_platform_info(
self.id(),
ext::CL_PLATFORM_EXTERNAL_MEMORY_IMPORT_HANDLE_TYPES_KHR,
)?
.into())
}
/// cl_khr_external_semaphore
pub fn platform_semaphore_import_handle_types_khr(&self) -> Result<Vec<cl_name_version>> {
Ok(platform::get_platform_info(
self.id(),
ext::CL_PLATFORM_SEMAPHORE_IMPORT_HANDLE_TYPES_KHR,
)?
.into())
}
/// cl_khr_external_semaphore
pub fn platform_semaphore_export_handle_types_khr(&self) -> Result<Vec<cl_name_version>> {
Ok(platform::get_platform_info(
self.id(),
ext::CL_PLATFORM_SEMAPHORE_EXPORT_HANDLE_TYPES_KHR,
)?
.into())
}
/// cl_khr_semaphore
pub fn platform_semaphore_types_khr(&self) -> Result<Vec<cl_name_version>> {
Ok(platform::get_platform_info(self.id(), ext::CL_PLATFORM_SEMAPHORE_TYPES_KHR)?.into())
}
/// Get data about an OpenCL platform.
/// Calls clGetPlatformInfo to get the desired data about the platform.
pub fn get_data(&self, param_name: cl_platform_info) -> Result<Vec<u8>> {
Ok(platform::get_platform_data(self.id(), param_name)?)
}
/// Unload an OpenCL compiler for a platform.
/// CL_VERSION_1_2
///
/// # Safety
///
/// Compiling is unsafe after the compiler has been unloaded.
#[cfg(any(feature = "CL_VERSION_1_2", feature = "dynamic"))]
pub unsafe fn unload_compiler(&self) -> Result<()> {
unsafe { Ok(program::unload_platform_compiler(self.id())?) }
}
}
/// Get the available OpenCL platforms.
/// # Examples
/// ```
/// use opencl3::platform::get_platforms;
///
/// let platforms = get_platforms().unwrap();
/// println!("Number of OpenCL platforms: {}", platforms.len());
/// assert!(0 < platforms.len());
/// ```
/// returns a Result containing a vector of available Platforms
/// or the error code from the OpenCL C API function.
pub fn get_platforms() -> Result<Vec<Platform>> {
let platform_ids = platform::get_platform_ids()?;
Ok(platform_ids
.iter()
.map(|id| Platform::new(*id))
.collect::<Vec<Platform>>())
}
#[cfg(any(feature = "cl_khr_icd", feature = "dynamic"))]
pub fn icd_get_platform_ids_khr() -> Result<Vec<Platform>> {
let platform_ids = ext::icd_get_platform_ids_khr()?;
Ok(platform_ids
.iter()
.map(|id| Platform::new(*id))
.collect::<Vec<Platform>>())
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_get_platforms() {
let platforms = get_platforms().unwrap();
println!("Number of platforms: {}", platforms.len());
assert!(0 < platforms.len());
for platform in platforms {
println!("Platform Debug Trait: {:?}", platform);
println!("CL_PLATFORM_NAME: {}", platform.name().unwrap());
println!("CL_PLATFORM_PROFILE: {}", platform.profile().unwrap());
let value = platform.version().unwrap();
println!("CL_PLATFORM_VERSION: {:?}", value);
println!("CL_PLATFORM_VENDOR: {}", platform.vendor().unwrap());
println!(
"CL_PLATFORM_EXTENSIONS: {:?}",
platform.extensions().unwrap()
);
// CL_VERSION_2_1 value, may not be supported
match platform.host_timer_resolution() {
Ok(value) => {
println!("CL_PLATFORM_HOST_TIMER_RESOLUTION: {}", value)
}
Err(e) => println!(
"OpenCL error, CL_PLATFORM_HOST_TIMER_RESOLUTION: {:?}, {}",
e, e
),
};
println!();
}
}
}
| rust | Apache-2.0 | 27c55789c114765e8f857c8c691c863166e0f6d2 | 2026-01-04T20:24:28.844285Z | false |
kenba/opencl3 | https://github.com/kenba/opencl3/blob/27c55789c114765e8f857c8c691c863166e0f6d2/src/memory.rs | src/memory.rs | // Copyright (c) 2020-2024 Via Technology Ltd.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#![allow(deprecated)]
#![allow(clippy::missing_safety_doc)]
use core::marker::PhantomData;
pub use cl3::memory::*;
use super::context::Context;
use super::Result;
#[cfg(any(feature = "cl_intel_dx9_media_sharing", feature = "dynamic"))]
#[allow(unused_imports)]
use cl3::dx9_media_sharing;
#[allow(unused_imports)]
use cl3::egl;
#[allow(unused_imports)]
use cl3::ext;
use cl3::gl;
use cl3::memory;
use cl3::sampler;
#[allow(unused_imports)]
use cl3::types::{
cl_addressing_mode, cl_bool, cl_filter_mode, cl_sampler, cl_sampler_info, cl_sampler_properties,
};
use libc::{c_void, intptr_t, size_t};
use std::mem;
pub trait ClMem {
fn get(&self) -> cl_mem;
fn get_mut(&mut self) -> cl_mem;
fn mem_type(&self) -> Result<cl_mem_object_type> {
Ok(memory::get_mem_object_info(self.get(), CL_MEM_TYPE)?.into())
}
fn flags(&self) -> Result<cl_mem_flags> {
Ok(memory::get_mem_object_info(self.get(), CL_MEM_FLAGS)?.into())
}
fn size(&self) -> Result<size_t> {
Ok(memory::get_mem_object_info(self.get(), CL_MEM_SIZE)?.into())
}
fn host_ptr(&self) -> Result<intptr_t> {
Ok(memory::get_mem_object_info(self.get(), CL_MEM_HOST_PTR)?.into())
}
fn map_count(&self) -> Result<cl_uint> {
Ok(memory::get_mem_object_info(self.get(), CL_MEM_MAP_COUNT)?.into())
}
fn reference_count(&self) -> Result<cl_uint> {
Ok(memory::get_mem_object_info(self.get(), CL_MEM_REFERENCE_COUNT)?.into())
}
fn context(&self) -> Result<cl_context> {
Ok(intptr_t::from(memory::get_mem_object_info(self.get(), CL_MEM_CONTEXT)?) as cl_context)
}
fn associated_memobject(&self) -> Result<cl_mem> {
Ok(intptr_t::from(memory::get_mem_object_info(
self.get(),
CL_MEM_ASSOCIATED_MEMOBJECT,
)?) as cl_mem)
}
fn offset(&self) -> Result<size_t> {
Ok(memory::get_mem_object_info(self.get(), CL_MEM_OFFSET)?.into())
}
fn uses_svm_pointer(&self) -> Result<cl_uint> {
Ok(memory::get_mem_object_info(self.get(), CL_MEM_USES_SVM_POINTER)?.into())
}
/// CL_VERSION_3_0
fn properties(&self) -> Result<Vec<cl_ulong>> {
Ok(memory::get_mem_object_info(self.get(), CL_MEM_PROPERTIES)?.into())
}
/// Get memory data about an OpenCL memory object.
/// Calls clGetMemObjectInfo to get the desired data about the memory object.
fn get_mem_data(&self, param_name: cl_mem_info) -> Result<Vec<u8>> {
Ok(get_mem_object_data(self.get(), param_name)?)
}
/// Query an OpenGL object used to create an OpenCL memory object.
///
/// returns a Result containing the OpenGL object type and name
/// or the error code from the OpenCL C API function.
fn gl_object_info(&self) -> Result<(gl::cl_GLuint, gl::cl_GLuint)> {
Ok(gl::get_gl_object_info(self.get())?)
}
}
/// An OpenCL buffer.
///
/// Implements the Drop trait to call release_mem_object when the object is dropped.
#[derive(Debug)]
pub struct Buffer<T> {
buffer: cl_mem,
#[doc(hidden)]
_type: PhantomData<T>,
}
impl<T> From<Buffer<T>> for cl_mem {
fn from(value: Buffer<T>) -> Self {
value.buffer
}
}
impl<T> ClMem for Buffer<T> {
fn get(&self) -> cl_mem {
self.buffer
}
fn get_mut(&mut self) -> cl_mem {
self.buffer
}
}
impl<T> Drop for Buffer<T> {
fn drop(&mut self) {
unsafe { memory::release_mem_object(self.get()).expect("Error: clReleaseMemObject") };
}
}
unsafe impl<T: Send> Send for Buffer<T> {}
unsafe impl<T: Sync> Sync for Buffer<T> {}
impl<T> Buffer<T> {
pub const fn new(buffer: cl_mem) -> Self {
Self {
buffer,
_type: PhantomData,
}
}
/// Create a Buffer for a context.
///
/// * `context` - a valid OpenCL context.
/// * `flags` - a bit-field used to specify allocation and usage information
/// about the image memory object being created, see:
/// [Memory Flags](https://www.khronos.org/registry/OpenCL/specs/3.0-unified/html/OpenCL_API.html#memory-flags-table).
/// * `count` - the number of T objects to be allocated.
/// * `host_ptr` - a pointer to the buffer data that may already be allocated
/// by the application.
///
/// returns a Result containing the new OpenCL buffer object
/// or the error code from the OpenCL C API function.
pub unsafe fn create(
context: &Context,
flags: cl_mem_flags,
count: size_t,
host_ptr: *mut c_void,
) -> Result<Self> {
unsafe {
let buffer =
memory::create_buffer(context.get(), flags, count * mem::size_of::<T>(), host_ptr)?;
Ok(Self::new(buffer))
}
}
/// Create an OpenCL buffer object for a context.
///
/// CL_VERSION_3_0
///
/// * `context` - a valid OpenCL context.
/// * `properties` - an optional null terminated list of properties.
/// * `flags` - a bit-field used to specify allocation and usage information
/// about the image memory object being created, see:
/// [Memory Flags](https://www.khronos.org/registry/OpenCL/specs/3.0-unified/html/OpenCL_API.html#memory-flags-table).
/// * `count` - the number of T objects to be allocated.
/// * `host_ptr` - a pointer to the buffer data that may already be allocated
/// by the application.
///
/// returns a Result containing the new OpenCL buffer object
/// or the error code from the OpenCL C API function.
#[cfg(any(feature = "CL_VERSION_3_0", feature = "dynamic"))]
pub unsafe fn create_with_properties(
context: &Context,
properties: *const cl_mem_properties,
flags: cl_mem_flags,
count: size_t,
host_ptr: *mut c_void,
) -> Result<Self> {
unsafe {
let buffer = memory::create_buffer_with_properties(
context.get(),
properties,
flags,
count * mem::size_of::<T>(),
host_ptr,
)?;
Ok(Self::new(buffer))
}
}
/// Create an OpenCL buffer object for a context from an OpenGL buffer.
///
/// * `context` - a valid OpenCL context created from an OpenGL context.
/// * `flags` - a bit-field used to specify allocation and usage information
/// about the image memory object being created, see:
/// [Memory Flags](https://www.khronos.org/registry/OpenCL/specs/3.0-unified/html/OpenCL_API.html#memory-flags-table).
/// * `bufobj` - the OpenGL buffer.
///
/// returns a Result containing the new OpenCL buffer object
/// or the error code from the OpenCL C API function.
pub unsafe fn create_from_gl_buffer(
context: &Context,
flags: cl_mem_flags,
bufobj: gl::cl_GLuint,
) -> Result<Self> {
unsafe {
let buffer = gl::create_from_gl_buffer(context.get(), flags, bufobj)?;
Ok(Self::new(buffer))
}
}
#[cfg(any(
feature = "cl_intel_create_buffer_with_properties",
feature = "dynamic"
))]
pub unsafe fn create_with_properties_intel(
context: &Context,
properties: *const ext::cl_mem_properties_intel,
flags: cl_mem_flags,
count: size_t,
host_ptr: *mut c_void,
) -> Result<Self> {
unsafe {
let buffer = ext::create_buffer_with_properties_intel(
context.get(),
properties,
flags,
count * mem::size_of::<T>(),
host_ptr,
)?;
Ok(Self::new(buffer))
}
}
/// Create an new OpenCL buffer object from an existing buffer object.
///
/// See: [SubBuffer Attributes](https://www.khronos.org/registry/OpenCL/specs/3.0-unified/html/OpenCL_API.html#subbuffer-create-info-table).
///
/// * `flags` - a bit-field used to specify allocation and usage information
/// about the sub-buffer memory object being created, see:
/// [Memory Flags](https://www.khronos.org/registry/OpenCL/specs/3.0-unified/html/OpenCL_API.html#memory-flags-table).
/// * `origin` - the offset in number of objects of type `T`.
/// * `count` - the size of the sub-buffer in number of objects of type `T`.
///
/// returns a Result containing the new OpenCL buffer object
/// or the error code from the OpenCL C API function.
pub unsafe fn create_sub_buffer(
&self,
flags: cl_mem_flags,
origin: usize,
count: usize,
) -> Result<Self> {
unsafe {
let buffer_create_info = cl_buffer_region {
origin: origin * std::mem::size_of::<T>(),
size: count * std::mem::size_of::<T>(),
};
let buffer = memory::create_sub_buffer(
self.buffer,
flags,
CL_BUFFER_CREATE_TYPE_REGION,
&buffer_create_info as *const _ as *const c_void,
)?;
Ok(Self::new(buffer))
}
}
}
/// An OpenCL image.
///
/// Has methods to return information from calls to clGetImageInfo with the
/// appropriate parameters.
/// Implements the Drop trait to call release_mem_object when the object is dropped.
#[derive(Debug)]
pub struct Image {
image: cl_mem,
}
impl From<Image> for cl_mem {
fn from(value: Image) -> Self {
value.image
}
}
impl ClMem for Image {
fn get(&self) -> cl_mem {
self.image
}
fn get_mut(&mut self) -> cl_mem {
self.image
}
}
impl Drop for Image {
fn drop(&mut self) {
unsafe { memory::release_mem_object(self.get()).expect("Error: clReleaseMemObject") };
}
}
unsafe impl Send for Image {}
impl Image {
pub const fn new(image: cl_mem) -> Self {
Self { image }
}
/// Create an OpenCL image object for a context.
///
/// * `context` - a valid OpenCL context.
/// * `flags` - a bit-field used to specify allocation and usage information
/// about the image memory object being created, see:
/// [Memory Flags](https://www.khronos.org/registry/OpenCL/specs/3.0-unified/html/OpenCL_API.html#memory-flags-table).
/// * `image_format` - a pointer to a structure that describes format properties
/// of the image to be allocated.
/// * `image_desc` - a pointer to a structure that describes type and dimensions
/// of the image to be allocated.
/// * `host_ptr` - a pointer to the image data that may already be allocated
/// by the application.
///
/// returns a Result containing the new OpenCL image object
/// or the error code from the OpenCL C API function.
#[cfg(any(feature = "CL_VERSION_1_2", feature = "dynamic"))]
pub unsafe fn create(
context: &Context,
flags: cl_mem_flags,
image_format: *const cl_image_format,
image_desc: *const cl_image_desc,
host_ptr: *mut c_void,
) -> Result<Self> {
unsafe {
let image =
memory::create_image(context.get(), flags, image_format, image_desc, host_ptr)?;
Ok(Self::new(image))
}
}
/// Create an OpenCL image object for a context.
///
/// CL_VERSION_3_0
///
/// * `context` - a valid OpenCL context.
/// * `properties` - an optional null terminated list of properties.
/// * `flags` - a bit-field used to specify allocation and usage information
/// about the image memory object being created, see:
/// [Memory Flags](https://www.khronos.org/registry/OpenCL/specs/3.0-unified/html/OpenCL_API.html#memory-flags-table).
/// * `image_format` - a pointer to a structure that describes format properties
/// of the image to be allocated.
/// * `image_desc` - a pointer to a structure that describes type and dimensions
/// of the image to be allocated.
/// * `host_ptr` - a pointer to the image data that may already be allocated
/// by the application.
///
/// returns a Result containing the new OpenCL image object
/// or the error code from the OpenCL C API function.
#[cfg(any(feature = "CL_VERSION_3_0", feature = "dynamic"))]
pub unsafe fn create_with_properties(
context: &Context,
properties: *const cl_mem_properties,
flags: cl_mem_flags,
image_format: *const cl_image_format,
image_desc: *const cl_image_desc,
host_ptr: *mut c_void,
) -> Result<Self> {
unsafe {
let image = memory::create_image_with_properties(
context.get(),
properties,
flags,
image_format,
image_desc,
host_ptr,
)?;
Ok(Self::new(image))
}
}
/// Create an OpenCL image object, image array object, or image buffer object
/// for a context.
///
/// From an OpenGL: texture object, texture array object, texture buffer object,
/// or a single face of an OpenGL cubemap texture object.
///
/// * `context` - a valid OpenCL context created from an OpenGL context.
/// * `flags` - a bit-field used to specify allocation and usage information
/// about the image memory object being created, see:
/// [Memory Flags](https://www.khronos.org/registry/OpenCL/specs/3.0-unified/html/OpenCL_API.html#memory-flags-table).
/// * `texture_target` - used to define the image type of texture.
/// * `miplevel ` - used to define the mipmap level.
/// * `texture ` - the name of a GL buffer texture object.
///
/// returns a Result containing the new OpenCL image object
/// or the error code from the OpenCL C API function.
pub unsafe fn create_from_gl_texture(
context: &Context,
flags: cl_mem_flags,
texture_target: gl::cl_GLenum,
miplevel: gl::cl_GLint,
texture: gl::cl_GLuint,
) -> Result<Self> {
unsafe {
let image = gl::create_from_gl_texture(
context.get(),
flags,
texture_target,
miplevel,
texture,
)?;
Ok(Self::new(image))
}
}
/// Create an OpenCL 2D image object from an OpenGL renderbuffer object.
///
/// * `context` - a valid OpenCL context created from an OpenGL context.
/// * `flags` - a bit-field used to specify allocation and usage information
/// about the image memory object being created, see:
/// [Memory Flags](https://www.khronos.org/registry/OpenCL/specs/3.0-unified/html/OpenCL_API.html#memory-flags-table).
/// * `renderbuffer` - a GL renderbuffer object.
///
/// returns a Result containing the new OpenCL image object
/// or the error code from the OpenCL C API function.
pub unsafe fn create_from_gl_render_buffer(
context: &Context,
flags: cl_mem_flags,
renderbuffer: gl::cl_GLuint,
) -> Result<Self> {
unsafe {
let image = gl::create_from_gl_render_buffer(context.get(), flags, renderbuffer)?;
Ok(Self::new(image))
}
}
/// Create an OpenCL image object, from the EGLImage source provided as image.
/// Requires the cl_khr_egl_image extension.
///
/// * `context` - a valid OpenCL context created from an OpenGL context.
/// * `display` - should be of type EGLDisplay, cast into the type CLeglDisplayKHR
/// * `image` - should be of type EGLImageKHR, cast into the type CLeglImageKHR.
/// * `flags` - usage information about the memory object being created.
/// * `properties` - a null terminated list of property names and their
/// corresponding values.
///
/// returns a Result containing the new OpenCL image object
/// or the error code from the OpenCL C API function.
#[cfg(any(feature = "cl_khr_egl_image", feature = "dynamic"))]
#[inline]
pub unsafe fn create_from_egl_image(
context: &Context,
display: egl::CLeglDisplayKHR,
image: egl::CLeglImageKHR,
flags: cl_mem_flags,
properties: &[egl::cl_egl_image_properties_khr],
) -> Result<Self> {
unsafe {
let image = egl::create_from_egl_image(
context.get(),
display,
image,
flags,
properties.as_ptr(),
)?;
Ok(Self::new(image))
}
}
#[cfg(any(feature = "cl_intel_dx9_media_sharing", feature = "dynamic"))]
#[inline]
pub unsafe fn create_from_dx9_media_surface_intel(
context: &Context,
flags: cl_mem_flags,
resource: dx9_media_sharing::IDirect3DSurface9_ptr,
shared_handle: dx9_media_sharing::HANDLE,
plane: cl_uint,
) -> Result<Self> {
unsafe {
let image = dx9_media_sharing::create_from_dx9_media_surface_intel(
context.get(),
flags,
resource,
shared_handle,
plane,
)?;
Ok(Self::new(image))
}
}
pub fn format(&self) -> Result<Vec<cl_image_format>> {
Ok(memory::get_image_info(self.image, CL_IMAGE_FORMAT)?.into())
}
pub fn element_size(&self) -> Result<size_t> {
Ok(memory::get_image_info(self.image, CL_IMAGE_ELEMENT_SIZE)?.into())
}
pub fn row_pitch(&self) -> Result<size_t> {
Ok(memory::get_image_info(self.image, CL_IMAGE_ROW_PITCH)?.into())
}
pub fn slice_pitch(&self) -> Result<size_t> {
Ok(memory::get_image_info(self.image, CL_IMAGE_SLICE_PITCH)?.into())
}
pub fn width(&self) -> Result<size_t> {
Ok(memory::get_image_info(self.image, CL_IMAGE_WIDTH)?.into())
}
pub fn height(&self) -> Result<size_t> {
Ok(memory::get_image_info(self.image, CL_IMAGE_HEIGHT)?.into())
}
pub fn depth(&self) -> Result<size_t> {
Ok(memory::get_image_info(self.image, CL_IMAGE_DEPTH)?.into())
}
pub fn array_size(&self) -> Result<size_t> {
Ok(memory::get_image_info(self.image, CL_IMAGE_ARRAY_SIZE)?.into())
}
pub fn buffer(&self) -> Result<cl_mem> {
Ok(intptr_t::from(memory::get_image_info(self.image, CL_IMAGE_BUFFER)?) as cl_mem)
}
pub fn num_mip_levels(&self) -> Result<cl_uint> {
Ok(memory::get_image_info(self.image, CL_IMAGE_NUM_MIP_LEVELS)?.into())
}
pub fn num_samples(&self) -> Result<cl_uint> {
Ok(memory::get_image_info(self.image, CL_IMAGE_NUM_SAMPLES)?.into())
}
/// Get data about an OpenCL image object.
/// Calls clGetImageInfo to get the desired data about the image object.
pub fn get_data(&self, param_name: cl_image_info) -> Result<Vec<u8>> {
Ok(get_image_data(self.image, param_name)?)
}
/// Get information about the GL texture target associated with a memory object.
pub fn gl_texture_target(&self) -> Result<cl_uint> {
Ok(gl::get_gl_texture_info(self.image, gl::CL_GL_TEXTURE_TARGET)?.into())
}
/// Get information about the GL mipmap level associated with a memory object.
pub fn gl_mipmap_level(&self) -> Result<cl_int> {
Ok(gl::get_gl_texture_info(self.image, gl::CL_GL_MIPMAP_LEVEL)?.into())
}
/// Get information about the GL number of samples associated with a memory object.
pub fn gl_num_samples(&self) -> Result<cl_int> {
Ok(gl::get_gl_texture_info(self.image, gl::CL_GL_NUM_SAMPLES)?.into())
}
/// Get GL texture information associated with a memory object.
pub fn get_gl_texture_data(&self, param_name: gl::cl_gl_texture_info) -> Result<Vec<u8>> {
Ok(gl::get_gl_texture_data(self.image, param_name)?)
}
}
/// An OpenCL sampler.
/// Has methods to return information from calls to clGetSamplerInfo with the
/// appropriate parameters.
/// Implements the Drop trait to call release_sampler when the object is dropped.
#[derive(Debug)]
pub struct Sampler {
sampler: cl_sampler,
}
impl From<Sampler> for cl_sampler {
fn from(value: Sampler) -> Self {
value.sampler
}
}
impl Drop for Sampler {
fn drop(&mut self) {
unsafe { sampler::release_sampler(self.sampler).expect("Error: clReleaseSampler") };
}
}
unsafe impl Send for Sampler {}
impl Sampler {
pub const fn new(sampler: cl_sampler) -> Self {
Self { sampler }
}
#[cfg_attr(
any(
feature = "CL_VERSION_2_0",
feature = "CL_VERSION_2_1",
feature = "CL_VERSION_2_2",
feature = "CL_VERSION_3_0"
),
deprecated(
since = "0.1.0",
note = "From CL_VERSION_2_0 use create_sampler_with_properties"
)
)]
pub fn create(
context: &Context,
normalize_coords: cl_bool,
addressing_mode: cl_addressing_mode,
filter_mode: cl_filter_mode,
) -> Result<Self> {
let sampler = sampler::create_sampler(
context.get(),
normalize_coords,
addressing_mode,
filter_mode,
)?;
Ok(Self::new(sampler))
}
#[cfg(any(feature = "CL_VERSION_2_0", feature = "dynamic"))]
pub fn create_with_properties(
context: &Context,
properties: *const cl_sampler_properties,
) -> Result<Self> {
let sampler = sampler::create_sampler_with_properties(context.get(), properties)?;
Ok(Self::new(sampler))
}
pub const fn get(&self) -> cl_sampler {
self.sampler
}
pub fn reference_count(&self) -> Result<cl_uint> {
Ok(sampler::get_sampler_info(self.get(), sampler::CL_SAMPLER_REFERENCE_COUNT)?.into())
}
pub fn context(&self) -> Result<cl_context> {
Ok(intptr_t::from(sampler::get_sampler_info(
self.get(),
sampler::CL_SAMPLER_CONTEXT,
)?) as cl_context)
}
pub fn normalized_coords(&self) -> Result<bool> {
Ok(cl_uint::from(sampler::get_sampler_info(
self.get(),
sampler::CL_SAMPLER_NORMALIZED_COORDS,
)?) != CL_FALSE)
}
pub fn addressing_mode(&self) -> Result<cl_addressing_mode> {
Ok(sampler::get_sampler_info(self.get(), sampler::CL_SAMPLER_ADDRESSING_MODE)?.into())
}
pub fn filter_mode(&self) -> Result<cl_filter_mode> {
Ok(sampler::get_sampler_info(self.get(), sampler::CL_SAMPLER_FILTER_MODE)?.into())
}
pub fn sampler_properties(&self) -> Result<Vec<intptr_t>> {
Ok(sampler::get_sampler_info(self.get(), sampler::CL_SAMPLER_PROPERTIES)?.into())
}
/// Get data about an OpenCL sampler object.
/// Calls clGetDeviceInfo to get the desired data about the sampler object.
pub fn get_data(&self, param_name: cl_sampler_info) -> Result<Vec<u8>> {
Ok(sampler::get_sampler_data(self.get(), param_name)?)
}
}
/// An OpenCL pipe.
/// Has methods to return information from calls to clGetPipeInfo with the
/// appropriate parameters.
/// Implements the Drop trait to call release_mem_object when the object is dropped.
#[cfg(any(feature = "CL_VERSION_2_0", feature = "dynamic"))]
#[derive(Debug)]
pub struct Pipe {
pipe: cl_mem,
}
#[cfg(any(feature = "CL_VERSION_2_0", feature = "dynamic"))]
impl From<cl_mem> for Pipe {
fn from(pipe: cl_mem) -> Self {
Self { pipe }
}
}
#[cfg(any(feature = "CL_VERSION_2_0", feature = "dynamic"))]
impl From<Pipe> for cl_mem {
fn from(value: Pipe) -> Self {
value.pipe as Self
}
}
#[cfg(any(feature = "CL_VERSION_2_0", feature = "dynamic"))]
impl ClMem for Pipe {
fn get(&self) -> cl_mem {
self.pipe
}
fn get_mut(&mut self) -> cl_mem {
self.pipe
}
}
#[cfg(any(feature = "CL_VERSION_2_0", feature = "dynamic"))]
impl Drop for Pipe {
fn drop(&mut self) {
unsafe { memory::release_mem_object(self.get()).expect("Error: clReleaseMemObject") };
}
}
#[cfg(any(feature = "CL_VERSION_2_0", feature = "dynamic"))]
impl Pipe {
pub const fn new(pipe: cl_mem) -> Self {
Self { pipe }
}
pub unsafe fn create(
context: &Context,
flags: cl_mem_flags,
pipe_packet_size: cl_uint,
pipe_max_packets: cl_uint,
) -> Result<Self> {
unsafe {
let pipe =
memory::create_pipe(context.get(), flags, pipe_packet_size, pipe_max_packets)?;
Ok(Self::new(pipe))
}
}
pub fn pipe_packet_size(&self) -> Result<cl_uint> {
Ok(memory::get_pipe_info(self.get(), CL_PIPE_PACKET_SIZE)?.into())
}
pub fn pipe_max_packets(&self) -> Result<cl_uint> {
Ok(memory::get_pipe_info(self.get(), CL_PIPE_MAX_PACKETS)?.into())
}
pub fn pipe_properties(&self) -> Result<Vec<intptr_t>> {
Ok(memory::get_pipe_info(self.get(), CL_PIPE_PROPERTIES)?.into())
}
/// Get data about an OpenCL pipe object.
/// Calls clGetPipeInfo to get the desired information about the pipe object.
pub fn get_data(&self, param_name: cl_pipe_info) -> Result<Vec<u8>> {
Ok(memory::get_pipe_data(self.get(), param_name)?)
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::context::Context;
use crate::device::{CL_DEVICE_TYPE_GPU, Device};
use crate::platform::get_platforms;
use crate::types::cl_float;
use std::ptr;
#[test]
fn test_memory_buffer() {
let platforms = get_platforms().unwrap();
assert!(0 < platforms.len());
// Get the first platform
let platform = &platforms[0];
let devices = platform.get_devices(CL_DEVICE_TYPE_GPU).unwrap();
assert!(0 < devices.len());
// Get the first device
let device = Device::new(devices[0]);
let context = Context::from_device(&device).unwrap();
const ARRAY_SIZE: usize = 1024;
let buffer = unsafe {
Buffer::<cl_float>::create(&context, CL_MEM_WRITE_ONLY, ARRAY_SIZE, ptr::null_mut())
.unwrap()
};
let value = buffer.mem_type().unwrap();
println!("buffer.mem_type(): {}", value);
assert_eq!(CL_MEM_OBJECT_BUFFER, value);
let value = buffer.flags().unwrap();
println!("buffer.flags(): {}", value);
assert_eq!(CL_MEM_WRITE_ONLY, value);
let value = buffer.size().unwrap();
println!("buffer.size(): {}", value);
assert_eq!(ARRAY_SIZE * mem::size_of::<cl_float>(), value);
let value = buffer.host_ptr().unwrap();
println!("buffer.host_ptr(): {:?}", value);
assert_eq!(0, value);
let value = buffer.map_count().unwrap();
println!("buffer.map_count(): {}", value);
assert_eq!(0, value);
let value = buffer.reference_count().unwrap();
println!("buffer.reference_count(): {}", value);
assert_eq!(1, value);
let value = buffer.context().unwrap();
assert!(context.get() == value);
let value = buffer.associated_memobject().unwrap() as intptr_t;
println!("buffer.associated_memobject(): {:?}", value);
assert_eq!(0, value);
let value = buffer.offset().unwrap();
println!("buffer.offset(): {}", value);
assert_eq!(0, value);
let value = buffer.uses_svm_pointer().unwrap();
println!("buffer.uses_svm_pointer(): {}", value);
assert_eq!(0, value);
// CL_VERSION_3_0
match buffer.properties() {
Ok(value) => {
println!("buffer.properties: {:?}", value)
}
Err(e) => println!("OpenCL error, CL_MEM_PROPERTIES: {:?}, {}", e, e),
}
}
}
| rust | Apache-2.0 | 27c55789c114765e8f857c8c691c863166e0f6d2 | 2026-01-04T20:24:28.844285Z | false |
kenba/opencl3 | https://github.com/kenba/opencl3/blob/27c55789c114765e8f857c8c691c863166e0f6d2/src/command_buffer.rs | src/command_buffer.rs | // Copyright (c) 2021-2024 Via Technology Ltd.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! OpenCL Command Buffers extension. Enable with feature: cl_khr_command_buffer.
#![allow(clippy::too_many_arguments, clippy::missing_safety_doc)]
use super::Result;
use super::event::Event;
use super::memory::*;
#[allow(unused_imports)]
use cl3::ext::{
CL_COMMAND_BUFFER_NUM_QUEUES_KHR, CL_COMMAND_BUFFER_PROPERTIES_ARRAY_KHR,
CL_COMMAND_BUFFER_QUEUES_KHR, CL_COMMAND_BUFFER_REFERENCE_COUNT_KHR,
CL_COMMAND_BUFFER_STATE_KHR, cl_bool, cl_command_buffer_info_khr, cl_command_buffer_khr,
cl_command_buffer_properties_khr, cl_command_properties_khr, cl_mutable_command_khr,
cl_sync_point_khr, command_barrier_with_wait_list_khr, command_copy_buffer_khr,
command_copy_buffer_rect_khr, command_copy_buffer_to_image_khr, command_copy_image_khr,
command_copy_image_to_buffer_khr, command_fill_buffer_khr, command_fill_image_khr,
command_nd_range_kernel_khr, command_svm_mem_fill_khr, command_svm_memcpy_khr,
create_command_buffer_khr, enqueue_command_buffer_khr, finalize_command_buffer_khr,
get_command_buffer_data_khr, get_command_buffer_info_khr,
get_command_buffer_mutable_dispatch_data, release_command_buffer_khr,
};
#[allow(unused_imports)]
use cl3::types::{cl_command_queue, cl_event, cl_kernel, cl_mem, cl_uint};
use libc::{c_void, size_t};
use std::mem;
use std::ptr;
/// An OpenCL command-buffer.
///
/// This extension adds the ability to record and replay buffers of OpenCL commands.
/// See [cl_khr_command_buffer](https://www.khronos.org/registry/OpenCL/specs/3.0-unified/html/OpenCL_Ext.html#cl_khr_command_buffer)
#[derive(Debug)]
pub struct CommandBuffer {
buffer: cl_command_buffer_khr,
}
impl From<CommandBuffer> for cl_command_buffer_khr {
fn from(value: CommandBuffer) -> Self {
value.buffer
}
}
impl Drop for CommandBuffer {
fn drop(&mut self) {
unsafe {
release_command_buffer_khr(self.buffer).expect("Error: clReleaseCommandBufferKHR")
};
}
}
unsafe impl Send for CommandBuffer {}
unsafe impl Sync for CommandBuffer {}
impl CommandBuffer {
const fn new(buffer: cl_command_buffer_khr) -> Self {
Self { buffer }
}
/// Get the underlying OpenCL cl_command_buffer_khr.
pub const fn get(&self) -> cl_command_buffer_khr {
self.buffer
}
/// Create a command-buffer that can record commands to the specified queues.
pub fn create(
queues: &[cl_command_queue],
properties: &[cl_command_buffer_properties_khr],
) -> Result<Self> {
let buffer = create_command_buffer_khr(queues, properties.as_ptr())?;
Ok(Self::new(buffer))
}
/// Finalizes command recording ready for enqueuing the command-buffer on a command-queue.
pub fn finalize(&self) -> Result<()> {
Ok(finalize_command_buffer_khr(self.buffer)?)
}
/// Enqueues a command-buffer to execute on command-queues specified by queues,
/// or on default command-queues used during recording if queues is empty.
pub unsafe fn enqueue(
&self,
queues: &mut [cl_command_queue],
event_wait_list: &[cl_event],
) -> Result<Event> {
unsafe {
let event = enqueue_command_buffer_khr(
queues.len() as cl_uint,
queues.as_mut_ptr(),
self.buffer,
event_wait_list.len() as cl_uint,
if !event_wait_list.is_empty() {
event_wait_list.as_ptr()
} else {
ptr::null()
},
)?;
Ok(Event::new(event))
}
}
/// Records a barrier operation used as a synchronization point.
pub unsafe fn command_barrier_with_wait_list(
&self,
queue: cl_command_queue,
properties: *const cl_command_properties_khr,
sync_point_wait_list: &[cl_sync_point_khr],
) -> Result<cl_sync_point_khr> {
let mut sync_point = 0;
unsafe {
command_barrier_with_wait_list_khr(
self.buffer,
queue,
properties,
sync_point_wait_list,
&mut sync_point,
ptr::null_mut(),
)?
};
Ok(sync_point)
}
/// Records a command to copy from one buffer object to another.
pub unsafe fn copy_buffer<T>(
&self,
queue: cl_command_queue,
properties: *const cl_command_properties_khr,
src_buffer: &Buffer<T>,
dst_buffer: &mut Buffer<T>,
src_offset: size_t,
dst_offset: size_t,
size: size_t,
sync_point_wait_list: &[cl_sync_point_khr],
) -> Result<cl_sync_point_khr> {
unsafe {
let mut sync_point = 0;
command_copy_buffer_khr(
self.buffer,
queue,
properties,
src_buffer.get(),
dst_buffer.get_mut(),
src_offset,
dst_offset,
size,
sync_point_wait_list,
&mut sync_point,
ptr::null_mut(),
)?;
Ok(sync_point)
}
}
/// Records a command to copy a rectangular region from a buffer object to another buffer object.
pub unsafe fn copy_buffer_rect<T>(
&self,
queue: cl_command_queue,
properties: *const cl_command_properties_khr,
src_buffer: &Buffer<T>,
dst_buffer: &mut Buffer<T>,
src_origin: *const size_t,
dst_origin: *const size_t,
region: *const size_t,
src_row_pitch: size_t,
src_slice_pitch: size_t,
dst_row_pitch: size_t,
dst_slice_pitch: size_t,
sync_point_wait_list: &[cl_sync_point_khr],
) -> Result<cl_sync_point_khr> {
unsafe {
let mut sync_point = 0;
command_copy_buffer_rect_khr(
self.buffer,
queue,
properties,
src_buffer.get(),
dst_buffer.get_mut(),
src_origin,
dst_origin,
region,
src_row_pitch,
src_slice_pitch,
dst_row_pitch,
dst_slice_pitch,
sync_point_wait_list,
&mut sync_point,
ptr::null_mut(),
)?;
Ok(sync_point)
}
}
/// Records a command to copy a buffer object to an image object.
pub unsafe fn copy_buffer_to_image<T>(
&self,
queue: cl_command_queue,
properties: *const cl_command_properties_khr,
src_buffer: &Buffer<T>,
dst_image: &mut Image,
src_offset: size_t,
dst_origin: *const size_t,
region: *const size_t,
sync_point_wait_list: &[cl_sync_point_khr],
) -> Result<cl_sync_point_khr> {
unsafe {
let mut sync_point = 0;
command_copy_buffer_to_image_khr(
self.buffer,
queue,
properties,
src_buffer.get(),
dst_image.get_mut(),
src_offset,
dst_origin,
region,
sync_point_wait_list,
&mut sync_point,
ptr::null_mut(),
)?;
Ok(sync_point)
}
}
/// Records a command to copy image objects.
pub unsafe fn copy_image<T>(
&self,
queue: cl_command_queue,
properties: *const cl_command_properties_khr,
src_image: Image,
dst_image: &mut Image,
src_origin: *const size_t,
dst_origin: *const size_t,
region: *const size_t,
sync_point_wait_list: &[cl_sync_point_khr],
) -> Result<cl_sync_point_khr> {
unsafe {
let mut sync_point = 0;
command_copy_image_khr(
self.buffer,
queue,
properties,
src_image.get(),
dst_image.get_mut(),
src_origin,
dst_origin,
region,
sync_point_wait_list,
&mut sync_point,
ptr::null_mut(),
)?;
Ok(sync_point)
}
}
/// Records a command to copy an image object to a buffer object.
pub unsafe fn copy_image_to_buffer<T>(
&self,
queue: cl_command_queue,
properties: *const cl_command_properties_khr,
src_image: &Image,
dst_buffer: &mut Buffer<T>,
src_origin: *const size_t,
region: *const size_t,
dst_offset: size_t,
sync_point_wait_list: &[cl_sync_point_khr],
) -> Result<cl_sync_point_khr> {
unsafe {
let mut sync_point = 0;
command_copy_image_to_buffer_khr(
self.buffer,
queue,
properties,
src_image.get(),
dst_buffer.get_mut(),
src_origin,
region,
dst_offset,
sync_point_wait_list,
&mut sync_point,
ptr::null_mut(),
)?;
Ok(sync_point)
}
}
/// Records a command to fill a buffer object with a pattern of a given pattern size.
#[allow(clippy::as_ptr_cast_mut)]
pub unsafe fn fill_buffer<T>(
&self,
queue: cl_command_queue,
properties: *const cl_command_properties_khr,
buffer: &mut Buffer<T>,
pattern: &[T],
offset: size_t,
size: size_t,
sync_point_wait_list: &[cl_sync_point_khr],
) -> Result<cl_sync_point_khr> {
unsafe {
let mut sync_point = 0;
command_fill_buffer_khr(
self.buffer,
queue,
properties,
buffer.get_mut(),
pattern.as_ptr() as cl_mem,
mem::size_of_val(pattern),
offset,
size,
sync_point_wait_list,
&mut sync_point,
ptr::null_mut(),
)?;
Ok(sync_point)
}
}
/// Records a command to fill an image object with a specified color.
pub unsafe fn fill_image<T>(
&self,
queue: cl_command_queue,
properties: *const cl_command_properties_khr,
image: &mut Image,
fill_color: *const c_void,
origin: *const size_t,
region: *const size_t,
sync_point_wait_list: &[cl_sync_point_khr],
) -> Result<cl_sync_point_khr> {
unsafe {
let mut sync_point = 0;
command_fill_image_khr(
self.buffer,
queue,
properties,
image.get_mut(),
fill_color,
origin,
region,
sync_point_wait_list,
&mut sync_point,
ptr::null_mut(),
)?;
Ok(sync_point)
}
}
/// Records a command to execute a kernel on a device.
pub unsafe fn nd_range_kernel(
&self,
queue: cl_command_queue,
properties: *const cl_command_properties_khr,
kernel: cl_kernel,
work_dim: cl_uint,
global_work_offsets: *const size_t,
global_work_sizes: *const size_t,
local_work_sizes: *const size_t,
sync_point_wait_list: &[cl_sync_point_khr],
) -> Result<cl_sync_point_khr> {
unsafe {
let mut sync_point = 0;
command_nd_range_kernel_khr(
self.buffer,
queue,
properties,
kernel,
work_dim,
global_work_offsets,
global_work_sizes,
local_work_sizes,
sync_point_wait_list,
&mut sync_point,
ptr::null_mut(),
)?;
Ok(sync_point)
}
}
pub unsafe fn svm_memcpy(
&self,
queue: cl_command_queue,
properties: *const cl_command_properties_khr,
dst_ptr: *mut c_void,
src_ptr: *const c_void,
size: size_t,
sync_point_wait_list: &[cl_sync_point_khr],
mutable_handle: *mut cl_mutable_command_khr,
) -> Result<cl_sync_point_khr> {
unsafe {
let mut sync_point = 0;
command_svm_memcpy_khr(
self.buffer,
queue,
properties,
dst_ptr,
src_ptr,
size,
sync_point_wait_list,
&mut sync_point,
mutable_handle,
)?;
Ok(sync_point)
}
}
pub unsafe fn svm_mem_fill(
&self,
queue: cl_command_queue,
properties: *const cl_command_properties_khr,
svm_ptr: *mut c_void,
pattern: *const c_void,
pattern_size: size_t,
size: size_t,
sync_point_wait_list: &[cl_sync_point_khr],
mutable_handle: *mut cl_mutable_command_khr,
) -> Result<cl_sync_point_khr> {
unsafe {
let mut sync_point = 0;
command_svm_mem_fill_khr(
self.buffer,
queue,
properties,
svm_ptr,
pattern,
pattern_size,
size,
sync_point_wait_list,
&mut sync_point,
mutable_handle,
)?;
Ok(sync_point)
}
}
pub fn num_queues(&self) -> Result<cl_uint> {
Ok(get_command_buffer_info_khr(self.buffer, CL_COMMAND_BUFFER_NUM_QUEUES_KHR)?.into())
}
pub fn queues(&self) -> Result<Vec<isize>> {
// cl_command_queue
Ok(get_command_buffer_info_khr(self.buffer, CL_COMMAND_BUFFER_QUEUES_KHR)?.into())
}
pub fn reference_count(&self) -> Result<cl_uint> {
Ok(get_command_buffer_info_khr(self.buffer, CL_COMMAND_BUFFER_REFERENCE_COUNT_KHR)?.into())
}
pub fn buffer_state(&self) -> Result<cl_uint> {
Ok(get_command_buffer_info_khr(self.buffer, CL_COMMAND_BUFFER_STATE_KHR)?.into())
}
pub fn properties_array(&self) -> Result<Vec<cl_command_buffer_properties_khr>> {
Ok(
get_command_buffer_info_khr(self.buffer, CL_COMMAND_BUFFER_PROPERTIES_ARRAY_KHR)?
.into(),
)
}
pub fn get_data(&self, param_name: cl_command_buffer_info_khr) -> Result<Vec<u8>> {
Ok(get_command_buffer_data_khr(self.buffer, param_name)?)
}
}
| rust | Apache-2.0 | 27c55789c114765e8f857c8c691c863166e0f6d2 | 2026-01-04T20:24:28.844285Z | false |
kenba/opencl3 | https://github.com/kenba/opencl3/blob/27c55789c114765e8f857c8c691c863166e0f6d2/src/kernel.rs | src/kernel.rs | // Copyright (c) 2020-2024 Via Technology Ltd. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
pub use cl3::kernel::*;
use super::Result;
use super::command_queue::CommandQueue;
use super::event::Event;
use super::program::Program;
#[allow(unused_imports)]
use cl3::ext;
#[allow(unused_imports)]
use cl3::types::{cl_command_queue, cl_context, cl_event};
use libc::{c_void, size_t};
use std::ffi::CString;
use std::mem;
use std::ptr;
/// An OpenCL kernel object.
/// Implements the Drop trait to call release_kernel when the object is dropped.
#[derive(Debug)]
pub struct Kernel {
kernel: cl_kernel,
}
impl From<Kernel> for cl_kernel {
fn from(value: Kernel) -> Self {
value.kernel
}
}
#[cfg(any(feature = "CL_VERSION_2_1", feature = "dynamic"))]
impl Clone for Kernel {
/// Clone an OpenCL kernel object.
/// CL_VERSION_2_1 see: [Copying Kernel Objects](https://www.khronos.org/registry/OpenCL/specs/3.0-unified/html/OpenCL_API.html#_copying_kernel_objects)
///
/// returns a Result containing the new Kernel
/// or the error code from the OpenCL C API function.
fn clone(&self) -> Self {
let kernel = clone_kernel(self.kernel).expect("Error: clCloneKernel");
Self { kernel }
}
}
impl Drop for Kernel {
fn drop(&mut self) {
unsafe { release_kernel(self.kernel).expect("Error: clReleaseKernel") };
}
}
unsafe impl Send for Kernel {}
impl Kernel {
/// Create a Kernel from an OpenCL cl_kernel.
///
/// * `kernel` - a valid OpenCL cl_kernel.
///
/// returns a Result containing the new Kernel
/// or the error code from the OpenCL C API function to get the number
/// of kernel arguments.
pub const fn new(kernel: cl_kernel) -> Self {
Self { kernel }
}
/// Get the underlying OpenCL cl_kernel.
pub const fn get(&self) -> cl_kernel {
self.kernel
}
/// Create a Kernel from an OpenCL Program.
///
/// * `program` - a built OpenCL Program.
/// * `name` - the name of the OpenCL kernel.
///
/// returns a Result containing the new Kernel
/// or the error code from the OpenCL C API function to get the number
/// of kernel arguments.
pub fn create(program: &Program, name: &str) -> Result<Self> {
// Ensure c_name string is null terminated
let c_name = CString::new(name).expect("Kernel::create, invalid name");
Ok(Self::new(create_kernel(program.get(), &c_name)?))
}
/// Set the argument value for a specific argument of a kernel.
///
/// * `arg_index` - the kernel argument index.
/// * `arg` - a reference to the data for the argument at arg_index.
///
/// returns an empty Result or the error code from the OpenCL C API function.
///
/// # Safety
///
/// This function is unsafe because the index, size and value must be valid.
pub unsafe fn set_arg<T>(&self, arg_index: cl_uint, arg: &T) -> Result<()> {
unsafe {
Ok(set_kernel_arg(
self.kernel,
arg_index,
mem::size_of::<T>(),
arg as *const _ as *const c_void,
)?)
}
}
/// Create a local memory buffer for a specific argument of a kernel.
///
/// * `arg_index` - the kernel argument index.
/// * `size` - the size of the local memory buffer in bytes.
///
/// returns an empty Result or the error code from the OpenCL C API function.
///
/// # Safety
///
/// This function is unsafe because the index and size must be valid.
pub unsafe fn set_arg_local_buffer(&self, arg_index: cl_uint, size: size_t) -> Result<()> {
unsafe { Ok(set_kernel_arg(self.kernel, arg_index, size, ptr::null())?) }
}
/// Set set a SVM pointer as the argument value for a specific argument of a kernel.
///
/// * `arg_index` - the kernel argument index.
/// * `arg_ptr` - the SVM pointer to the data for the argument at arg_index.
///
/// returns an empty Result or the error code from the OpenCL C API function.
///
/// # Safety
///
/// This function is unsafe because the index and ptr must be valid.
#[cfg(any(feature = "CL_VERSION_2_0", feature = "dynamic"))]
pub unsafe fn set_arg_svm_pointer(
&self,
arg_index: cl_uint,
arg_ptr: *const c_void,
) -> Result<()> {
unsafe { Ok(set_kernel_arg_svm_pointer(self.kernel, arg_index, arg_ptr)?) }
}
/// Pass additional information other than argument values to a kernel.
///
/// * `param_name` - the information to be passed to kernel, see:
/// [Kernel Execution Properties](https://www.khronos.org/registry/OpenCL/specs/3.0-unified/html/OpenCL_API.html#kernel-exec-info-table).
/// * `param_ptr` - pointer to the data for the param_name.
///
/// returns an empty Result or the error code from the OpenCL C API function.
///
/// # Safety
///
/// This function is unsafe because the name and ptr must be valid.
#[cfg(any(feature = "CL_VERSION_2_0", feature = "dynamic"))]
pub unsafe fn set_exec_info<T>(
&self,
param_name: cl_kernel_exec_info,
param_ptr: *const T,
) -> Result<()> {
unsafe {
Ok(set_kernel_exec_info(
self.kernel,
param_name,
mem::size_of::<T>(),
param_ptr as *const c_void,
)?)
}
}
pub fn function_name(&self) -> Result<String> {
Ok(get_kernel_info(self.kernel, CL_KERNEL_FUNCTION_NAME)?.into())
}
pub fn num_args(&self) -> Result<cl_uint> {
Ok(get_kernel_info(self.kernel, CL_KERNEL_NUM_ARGS)?.into())
}
pub fn reference_count(&self) -> Result<cl_uint> {
Ok(get_kernel_info(self.kernel, CL_KERNEL_REFERENCE_COUNT)?.into())
}
pub fn context(&self) -> Result<cl_context> {
Ok(isize::from(get_kernel_info(self.kernel, CL_KERNEL_CONTEXT)?) as cl_context)
}
pub fn program(&self) -> Result<cl_program> {
Ok(isize::from(get_kernel_info(self.kernel, CL_KERNEL_PROGRAM)?) as cl_program)
}
pub fn attributes(&self) -> Result<String> {
Ok(get_kernel_info(self.kernel, CL_KERNEL_ATTRIBUTES)?.into())
}
/// Get data about an OpenCL kernel.
/// Calls clGetKernelInfo to get the desired data about the kernel.
pub fn get_data(&self, param_name: cl_kernel_info) -> Result<Vec<u8>> {
Ok(get_kernel_data(self.kernel, param_name)?)
}
#[cfg(any(feature = "CL_VERSION_1_2", feature = "dynamic"))]
pub fn get_arg_address_qualifier(&self, arg_indx: cl_uint) -> Result<cl_uint> {
Ok(get_kernel_arg_info(self.kernel, arg_indx, CL_KERNEL_ARG_ADDRESS_QUALIFIER)?.into())
}
#[cfg(any(feature = "CL_VERSION_1_2", feature = "dynamic"))]
pub fn get_arg_access_qualifier(&self, arg_indx: cl_uint) -> Result<cl_uint> {
Ok(get_kernel_arg_info(self.kernel, arg_indx, CL_KERNEL_ARG_ACCESS_QUALIFIER)?.into())
}
#[cfg(any(feature = "CL_VERSION_1_2", feature = "dynamic"))]
pub fn get_arg_type_qualifier(&self, arg_indx: cl_uint) -> Result<cl_ulong> {
Ok(get_kernel_arg_info(self.kernel, arg_indx, CL_KERNEL_ARG_TYPE_QUALIFIER)?.into())
}
#[cfg(any(feature = "CL_VERSION_1_2", feature = "dynamic"))]
pub fn get_arg_type_name(&self, arg_indx: cl_uint) -> Result<String> {
Ok(get_kernel_arg_info(self.kernel, arg_indx, CL_KERNEL_ARG_TYPE_NAME)?.into())
}
#[cfg(any(feature = "CL_VERSION_1_2", feature = "dynamic"))]
pub fn get_arg_name(&self, arg_indx: cl_uint) -> Result<String> {
Ok(get_kernel_arg_info(self.kernel, arg_indx, CL_KERNEL_ARG_NAME)?.into())
}
/// Get data about arguments of an OpenCL kernel.
/// Calls clGetKernelArgInfo to get the desired data about arguments of the kernel.
#[cfg(any(feature = "CL_VERSION_1_2", feature = "dynamic"))]
pub fn get_arg_data(
&self,
arg_indx: cl_uint,
param_name: cl_kernel_arg_access_qualifier,
) -> Result<Vec<u8>> {
Ok(get_kernel_arg_data(self.kernel, arg_indx, param_name)?)
}
pub fn get_work_group_size(&self, device: cl_device_id) -> Result<size_t> {
Ok(get_kernel_work_group_info(self.kernel, device, CL_KERNEL_WORK_GROUP_SIZE)?.into())
}
pub fn get_compile_work_group_size(&self, device: cl_device_id) -> Result<Vec<size_t>> {
Ok(
get_kernel_work_group_info(self.kernel, device, CL_KERNEL_COMPILE_WORK_GROUP_SIZE)?
.into(),
)
}
pub fn get_local_mem_size(&self, device: cl_device_id) -> Result<cl_ulong> {
Ok(get_kernel_work_group_info(self.kernel, device, CL_KERNEL_LOCAL_MEM_SIZE)?.into())
}
pub fn get_work_group_size_multiple(&self, device: cl_device_id) -> Result<size_t> {
Ok(get_kernel_work_group_info(
self.kernel,
device,
CL_KERNEL_PREFERRED_WORK_GROUP_SIZE_MULTIPLE,
)?
.into())
}
pub fn get_private_mem_size(&self, device: cl_device_id) -> Result<cl_ulong> {
Ok(get_kernel_work_group_info(self.kernel, device, CL_KERNEL_PRIVATE_MEM_SIZE)?.into())
}
/// Get data about work groups of an OpenCL kernel.
/// Calls clGetKernelArgInfo to get the desired data about work groups of the kernel.
pub fn get_work_group_data(
&self,
device: cl_device_id,
param_name: cl_kernel_work_group_info,
) -> Result<Vec<u8>> {
Ok(get_kernel_work_group_data(self.kernel, device, param_name)?)
}
#[cfg(any(feature = "cl_khr_subgroups", feature = "dynamic"))]
pub fn get_kernel_sub_group_info_khr(
&self,
device: cl_device_id,
param_name: cl_kernel_sub_group_info,
input_values: &[size_t],
) -> Result<size_t> {
Ok(ext::get_kernel_sub_group_info_khr(
self.kernel,
device,
param_name,
input_values.len(),
input_values.as_ptr().cast::<c_void>(),
)?)
}
#[cfg(any(feature = "cl_khr_suggested_local_work_size", feature = "dynamic"))]
pub fn get_kernel_suggested_local_work_size_khr(
&self,
command_queue: cl_command_queue,
work_dim: cl_uint,
global_work_offset: *const size_t,
global_work_size: *const size_t,
) -> Result<size_t> {
Ok(ext::get_kernel_suggested_local_work_size_khr(
command_queue,
self.kernel,
work_dim,
global_work_offset,
global_work_size,
)?)
}
}
/// Create OpenCL Kernel objects for all the kernel functions in a program.
///
/// * `program` - a valid OpenCL program.
///
/// returns a Result containing the new Kernels in a Vec
/// or the error code from the OpenCL C API function.
pub fn create_program_kernels(program: &Program) -> Result<Vec<Kernel>> {
let kernels = create_kernels_in_program(program.get())?;
Ok(kernels
.iter()
.map(|kernel| Kernel::new(*kernel))
.collect::<Vec<Kernel>>())
}
/// A struct that implements the [builder pattern](https://doc.rust-lang.org/1.0.0/style/ownership/builders.html).
///
/// To simplify setting up [Kernel] arguments and the [NDRange](https://www.khronos.org/registry/OpenCL/specs/3.0-unified/html/OpenCL_API.html#_mapping_work_items_onto_an_ndrange)
/// when enqueueing a [Kernel] on a [CommandQueue].
#[derive(Debug)]
pub struct ExecuteKernel<'a> {
pub kernel: &'a Kernel,
pub num_args: cl_uint,
pub global_work_offsets: Vec<size_t>,
pub global_work_sizes: Vec<size_t>,
pub local_work_sizes: Vec<size_t>,
pub event_wait_list: Vec<cl_event>,
arg_index: cl_uint,
}
impl<'a> ExecuteKernel<'a> {
pub fn new(kernel: &'a Kernel) -> Self {
ExecuteKernel {
kernel,
num_args: kernel
.num_args()
.expect("ExecuteKernel: error reading kernel.num_args"),
global_work_offsets: Vec::new(),
global_work_sizes: Vec::new(),
local_work_sizes: Vec::new(),
event_wait_list: Vec::new(),
arg_index: 0,
}
}
/// Set the next argument of the kernel.
/// Calls `self.kernel.set_arg` to set the next unset kernel argument.
///
/// # Panics
///
/// Panics if too many arguments have been set or the argument is invalid.
///
/// * `arg` - a reference to the data for the kernel argument.
///
/// returns a reference to self.
///
/// # Safety
///
/// This function is unsafe because arg must be valid.
#[track_caller]
pub unsafe fn set_arg<'b, T>(&'b mut self, arg: &T) -> &'b mut Self {
unsafe {
assert!(
self.arg_index < self.num_args,
"ExecuteKernel::set_arg too many args"
);
if let Err(e) = self.kernel.set_arg(self.arg_index, arg) {
panic!(
"ExecuteKernel::set_arg invalid kernel arg at index: {}, {:?}, {}",
self.arg_index, e, e,
)
};
self.arg_index += 1;
self
}
}
/// Set the next argument of the kernel as a local buffer
/// Calls `self.kernel.set_arg_local_buffer` to set the next unset kernel argument.
///
/// # Panics
///
/// Panics if too many arguments have been set or the argument is invalid.
///
/// * `size` - the size of the local memory buffer in bytes.
///
/// returns a reference to self.
///
/// # Safety
///
/// This function is unsafe because size must be valid.
#[track_caller]
pub unsafe fn set_arg_local_buffer(&mut self, size: size_t) -> &mut Self {
unsafe {
assert!(
self.arg_index < self.num_args,
"ExecuteKernel::set_arg_local_buffer too many args"
);
if let Err(e) = self.kernel.set_arg_local_buffer(self.arg_index, size) {
panic!(
"ExecuteKernel::set_arg_local_buffer invalid kernel arg at index: {}, {:?}, {}",
self.arg_index, e, e,
)
};
self.arg_index += 1;
self
}
}
/// Set the next argument of the kernel.
/// Calls `self.kernel.set_arg` to set the next unset kernel argument.
///
/// # Panics
///
/// Panics if too many arguments have been set or the argument is invalid.
///
/// * `arg` - a reference to the data for the kernel argument.
///
/// returns a reference to self.
///
/// # Safety
///
/// This function is unsafe because ptr must be valid.
#[cfg(any(feature = "CL_VERSION_2_0", feature = "dynamic"))]
#[track_caller]
pub unsafe fn set_arg_svm<T>(&mut self, arg_ptr: *const T) -> &mut Self {
unsafe {
assert!(
self.arg_index < self.num_args,
"ExecuteKernel::set_arg_svm too many args"
);
if let Err(e) = self
.kernel
.set_arg_svm_pointer(self.arg_index, arg_ptr as *const c_void)
{
panic!(
"ExecuteKernel::set_arg_svm_pointer invalid kernel arg at index: {}, {:?}, {}",
self.arg_index, e, e,
)
};
self.arg_index += 1;
self
}
}
/// Pass additional information other than argument values to a kernel.
///
/// * `param_name` - the information to be passed to kernel, see:
/// [Kernel Execution Properties](https://www.khronos.org/registry/OpenCL/specs/3.0-unified/html/OpenCL_API.html#kernel-exec-info-table).
/// * `param_ptr` - pointer to the data for the param_name.
///
/// returns a reference to self.
///
/// # Safety
///
/// This function is unsafe because name and ptr must be valid.
#[cfg(any(feature = "CL_VERSION_2_0", feature = "dynamic"))]
pub unsafe fn set_exec_info<T>(
&mut self,
param_name: cl_kernel_exec_info,
param_ptr: *const T,
) -> &mut Self {
unsafe {
self.kernel
.set_exec_info(param_name, param_ptr)
.expect("Invalid param_name or param_ptr");
self
}
}
/// Set a global work offset for a call to clEnqueueNDRangeKernel.
///
/// * `size` - the size of the global work offset.
///
/// returns a reference to self.
pub fn set_global_work_offset(&mut self, size: size_t) -> &mut Self {
self.global_work_offsets.push(size);
self
}
/// Set the global work offsets for a call to clEnqueueNDRangeKernel.
///
/// # Panics
///
/// Panics if global_work_offsets is already set.
///
/// * `sizes` - the sizes of the global work offset.
///
/// returns a reference to self.
pub fn set_global_work_offsets(&mut self, sizes: &[size_t]) -> &mut Self {
assert!(
self.global_work_offsets.is_empty(),
"ExecuteKernel::set_global_work_offsets already set"
);
self.global_work_offsets.resize(sizes.len(), 0);
self.global_work_offsets.copy_from_slice(sizes);
self
}
/// Set a global work size for a call to clEnqueueNDRangeKernel.
///
/// * `size` - the size of the global work size.
///
/// returns a reference to self.
pub fn set_global_work_size(&mut self, size: size_t) -> &mut Self {
self.global_work_sizes.push(size);
self
}
/// Set the global work sizes for a call to clEnqueueNDRangeKernel.
///
/// # Panics
///
/// Panics if global_work_sizes is already set.
///
/// * `sizes` - the sizes of the global work sizes.
///
/// returns a reference to self.
pub fn set_global_work_sizes<'b>(&'b mut self, sizes: &[size_t]) -> &'b mut Self {
assert!(
self.global_work_sizes.is_empty(),
"ExecuteKernel::global_work_sizes already set"
);
self.global_work_sizes.resize(sizes.len(), 0);
self.global_work_sizes.copy_from_slice(sizes);
self
}
/// Set a local work size for a call to clEnqueueNDRangeKernel.
///
/// * `size` - the size of the local work size.
///
/// returns a reference to self.
pub fn set_local_work_size(&mut self, size: size_t) -> &mut Self {
self.local_work_sizes.push(size);
self
}
/// Set the local work sizes for a call to clEnqueueNDRangeKernel.
///
/// # Panics
///
/// Panics if local_work_sizes is already set.
///
/// * `sizes` - the sizes of the local work sizes.
///
/// returns a reference to self.
pub fn set_local_work_sizes<'b>(&'b mut self, sizes: &[size_t]) -> &'b mut Self {
assert!(
self.local_work_sizes.is_empty(),
"ExecuteKernel::local_work_sizes already set"
);
self.local_work_sizes.resize(sizes.len(), 0);
self.local_work_sizes.copy_from_slice(sizes);
self
}
/// Set an event for the event_wait_list in a call to clEnqueueNDRangeKernel.
///
/// * `event` - the Event to add to the event_wait_list.
///
/// returns a reference to self.
pub fn set_wait_event<'b>(&'b mut self, event: &Event) -> &'b mut Self {
self.event_wait_list.push(event.get());
self
}
/// Set the event_wait_list in a call to clEnqueueNDRangeKernel.
///
/// # Panics
///
/// Panics if event_wait_list is already set.
///
/// * `events` - the cl_events in the call to clEnqueueNDRangeKernel.
///
/// returns a reference to self.
pub fn set_event_wait_list<'b>(&'b mut self, events: &[cl_event]) -> &'b mut Self {
assert!(
self.event_wait_list.is_empty(),
"ExecuteKernel::event_wait_list already set"
);
self.event_wait_list.resize(events.len(), ptr::null_mut());
self.event_wait_list.copy_from_slice(events);
self
}
fn validate(&self, max_work_item_dimensions: usize) {
assert!(
self.num_args == self.arg_index,
"ExecuteKernel too few args"
);
let work_dim = self.global_work_sizes.len();
assert!(0 < work_dim, "ExecuteKernel not enough global_work_sizes");
assert!(
work_dim <= max_work_item_dimensions,
"ExecuteKernel too many global_work_sizes"
);
let offsets_dim = self.global_work_offsets.len();
assert!(
(0 == offsets_dim) || (offsets_dim == work_dim),
"ExecuteKernel global_work_offsets dimensions != global_work_sizes"
);
let locals_dim = self.local_work_sizes.len();
assert!(
(0 == locals_dim) || (locals_dim == work_dim),
"ExecuteKernel local_work_sizes dimensions != global_work_sizes"
);
}
fn clear(&mut self) {
self.global_work_offsets.clear();
self.global_work_sizes.clear();
self.local_work_sizes.clear();
self.event_wait_list.clear();
self.arg_index = 0;
}
/// Calls clEnqueueNDRangeKernel on the given with [CommandQueue] with the
/// global and local work sizes and the global work offsets together with
/// an events wait list.
///
/// # Panics
///
/// Panics if:
/// * too few kernel arguments have been set
/// * no global_work_sizes have been set
/// * too many global_work_sizes have been set
/// * global_work_offsets have been set and their dimensions do not match
/// global_work_sizes
/// * local_work_sizes have been set and their dimensions do not match
/// global_work_sizes
///
/// * `queue` - the [CommandQueue] to enqueue the [Kernel] on.
///
/// return the [Event] for this command
/// or the error code from the OpenCL C API function.
///
/// # Safety
///
/// This is unsafe when the kernel arguments have not been set up correctly.
pub unsafe fn enqueue_nd_range(&mut self, queue: &CommandQueue) -> Result<Event> {
unsafe {
// Get max_work_item_dimensions for the device CommandQueue
let max_work_item_dimensions = queue.max_work_item_dimensions() as usize;
self.validate(max_work_item_dimensions);
let event = queue.enqueue_nd_range_kernel(
self.kernel.get(),
self.global_work_sizes.len() as cl_uint,
if self.global_work_offsets.is_empty() {
ptr::null()
} else {
self.global_work_offsets.as_ptr()
},
self.global_work_sizes.as_ptr(),
if self.local_work_sizes.is_empty() {
ptr::null()
} else {
self.local_work_sizes.as_ptr()
},
&self.event_wait_list,
)?;
self.clear();
Ok(event)
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::context::Context;
use crate::device::Device;
use crate::platform::get_platforms;
use crate::program::{CL_KERNEL_ARG_INFO, Program};
use cl3::device::CL_DEVICE_TYPE_GPU;
use std::collections::HashSet;
const PROGRAM_SOURCE: &str = r#"
kernel void add(global float* buffer, float scalar) {
buffer[get_global_id(0)] += scalar;
}
kernel void subtract(global float* buffer, float scalar) {
buffer[get_global_id(0)] -= scalar;
}
"#;
#[test]
fn test_create_program_kernels() {
let platforms = get_platforms().unwrap();
assert!(0 < platforms.len());
// Get the first platform
let platform = &platforms[0];
let devices = platform.get_devices(CL_DEVICE_TYPE_GPU).unwrap();
assert!(0 < devices.len());
// Get the first device
let device = Device::new(devices[0]);
let context = Context::from_device(&device).unwrap();
let program =
Program::create_and_build_from_source(&context, PROGRAM_SOURCE, CL_KERNEL_ARG_INFO)
.expect("Program::create_and_build_from_source failed");
// Create the kernels from the OpenCL program source.
let kernels = create_program_kernels(&program).unwrap();
assert!(2 == kernels.len());
let kernel_0_name = kernels[0].function_name().unwrap();
println!("OpenCL kernel_0_name: {}", kernel_0_name);
let kernel_1_name = kernels[1].function_name().unwrap();
println!("OpenCL kernel_1_name: {}", kernel_1_name);
let kernel_names: HashSet<&str> = program.kernel_names().split(';').collect();
assert!(kernel_names.contains(&kernel_0_name as &str));
assert!(kernel_names.contains(&kernel_1_name as &str));
let num_args_0 = kernels[0].num_args().expect("OpenCL kernel_0.num_args");
println!("OpenCL kernel_0 num args: {}", num_args_0);
let value = kernels[0].num_args().unwrap();
println!("kernel.num_args(): {}", value);
assert_eq!(2, value);
let value = kernels[0].reference_count().unwrap();
println!("kernel.reference_count(): {}", value);
assert_eq!(1, value);
let value = kernels[0].context().unwrap();
assert!(context.get() == value);
let value = kernels[0].program().unwrap();
assert!(program.get() == value);
let value = kernels[0].attributes().unwrap();
println!("kernel.attributes(): {}", value);
// assert!(value.is_empty());
#[cfg(any(feature = "CL_VERSION_1_2", feature = "dynamic"))]
{
let arg0_address = kernels[0]
.get_arg_address_qualifier(0)
.expect("OpenCL kernel_0.get_arg_address_qualifier");
println!(
"OpenCL kernel_0.get_arg_address_qualifier: {:X}",
arg0_address
);
let arg0_access = kernels[0]
.get_arg_access_qualifier(0)
.expect("OpenCL kernel_0.get_arg_access_qualifier");
println!(
"OpenCL kernel_0.get_arg_access_qualifier: {:X}",
arg0_access
);
let arg0_type_name = kernels[0]
.get_arg_type_name(0)
.expect("OpenCL kernel_0.get_arg_type_name");
println!("OpenCL kernel_0.get_arg_type_name: {}", arg0_type_name);
let arg0_type = kernels[0]
.get_arg_type_qualifier(0)
.expect("OpenCL kernel_0.get_arg_type_qualifier");
println!("OpenCL kernel_0.get_arg_type_qualifier: {}", arg0_type);
let arg0_name = kernels[0]
.get_arg_name(0)
.expect("OpenCL kernel_0.get_arg_name");
println!("OpenCL kernel_0.get_arg_name: {}", arg0_name);
}
let value = kernels[0].get_work_group_size(device.id()).unwrap();
println!("kernel.get_work_group_size(): {}", value);
// assert_eq!(256, value);
let value = kernels[0].get_compile_work_group_size(device.id()).unwrap();
println!("kernel.get_work_group_size(): {:?}", value);
assert_eq!(3, value.len());
let value = kernels[0].get_local_mem_size(device.id()).unwrap();
println!("kernel.get_local_mem_size(): {}", value);
// assert_eq!(1, value);
let value = kernels[0]
.get_work_group_size_multiple(device.id())
.unwrap();
println!("kernel.get_work_group_size_multiple(): {}", value);
// assert_eq!(32, value);
let value = kernels[0].get_private_mem_size(device.id()).unwrap();
println!("kernel.get_private_mem_size(): {}", value);
// assert_eq!(0, value);
}
}
| rust | Apache-2.0 | 27c55789c114765e8f857c8c691c863166e0f6d2 | 2026-01-04T20:24:28.844285Z | false |
kenba/opencl3 | https://github.com/kenba/opencl3/blob/27c55789c114765e8f857c8c691c863166e0f6d2/src/context.rs | src/context.rs | // Copyright (c) 2020-2025 Via Technology Ltd.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#![allow(clippy::missing_safety_doc)]
pub use cl3::context;
use super::Result;
use super::device::Device;
#[cfg(any(feature = "CL_VERSION_1_2", feature = "dynamic"))]
use super::device::SubDevice;
#[allow(unused_imports)]
use cl3::dx9_media_sharing;
#[cfg(any(feature = "cl_khr_d3d10_sharing", feature = "dynamic"))]
#[allow(unused_imports)]
use cl3::d3d10;
#[cfg(any(feature = "cl_khr_d3d11_sharing", feature = "dynamic"))]
#[allow(unused_imports)]
use cl3::d3d11;
#[allow(unused_imports)]
use cl3::egl;
#[allow(unused_imports)]
use cl3::ext;
#[allow(unused_imports)]
use cl3::gl;
#[allow(unused_imports)]
use cl3::types::{
cl_context, cl_context_info, cl_context_properties, cl_device_id, cl_device_svm_capabilities,
cl_device_type, cl_event, cl_image_format, cl_mem, cl_mem_flags, cl_mem_object_type, cl_uint,
};
use libc::{c_char, c_void, intptr_t, size_t};
use std::ptr;
/// Get the current device used by an OpenGL context.
///
/// * `properties` - the OpenCL context properties.
///
/// returns a Result containing the device
/// or the error code from the OpenCL C API function.
#[cfg(any(feature = "cl_khr_gl_sharing", feature = "dynamic"))]
#[allow(clippy::as_ptr_cast_mut)]
pub fn get_current_device_for_gl_context_khr(
properties: &[cl_context_properties],
) -> Result<cl_device_id> {
let device = intptr_t::from(gl::get_gl_context_info_khr(
properties.as_ptr() as *mut cl_context_properties,
gl::CL_CURRENT_DEVICE_FOR_GL_CONTEXT_KHR,
)?) as cl_device_id;
Ok(device)
}
/// Get the devices for an OpenGL context.
///
/// * `properties` - the OpenCL context properties.
///
/// returns a Result containing the devices
/// or the error code from the OpenCL C API function.
#[cfg(any(feature = "cl_khr_gl_sharing", feature = "dynamic"))]
#[allow(clippy::as_ptr_cast_mut)]
pub fn get_devices_for_gl_context_khr(
properties: &[cl_context_properties],
) -> Result<Vec<cl_device_id>> {
let dev_ptrs: Vec<intptr_t> = gl::get_gl_context_info_khr(
properties.as_ptr() as *mut cl_context_properties,
gl::CL_DEVICES_FOR_GL_CONTEXT_KHR,
)?
.into();
let devices = dev_ptrs
.iter()
.map(|ptr| *ptr as cl_device_id)
.collect::<Vec<cl_device_id>>();
Ok(devices)
}
/// An OpenCL context object.
/// Implements the Drop trait to call release_context when the object is dropped.
#[derive(Debug)]
pub struct Context {
context: cl_context,
devices: Vec<cl_device_id>,
}
impl From<Context> for cl_context {
fn from(value: Context) -> Self {
value.context
}
}
impl Drop for Context {
fn drop(&mut self) {
self.devices.clear();
unsafe { context::release_context(self.context).expect("Error: clReleaseContext") };
}
}
unsafe impl Send for Context {}
unsafe impl Sync for Context {}
impl Context {
fn new(context: cl_context, devices: &[cl_device_id]) -> Self {
Self {
context,
devices: devices.to_vec(),
}
}
/// Get the underlying OpenCL cl_context.
pub const fn get(&self) -> cl_context {
self.context
}
/// Create a Context from a slice of cl_device_ids.
///
/// * `devices` - a slice of cl_device_ids for an OpenCL Platform.
/// * `properties` - a null terminated list of cl_context_properties, see
/// [Context Properties](https://www.khronos.org/registry/OpenCL/specs/3.0-unified/html/OpenCL_API.html#context-properties-table).
/// * `pfn_notify` - an optional callback function that can be registered by the application.
/// * `user_data` - passed as the user_data argument when pfn_notify is called.
///
/// returns a Result containing the new OpenCL context
/// or the error code from the OpenCL C API function.
pub fn from_devices(
devices: &[cl_device_id],
properties: &[cl_context_properties],
pfn_notify: Option<unsafe extern "C" fn(*const c_char, *const c_void, size_t, *mut c_void)>,
user_data: *mut c_void,
) -> Result<Self> {
let properties_ptr = if !properties.is_empty() {
properties.as_ptr()
} else {
ptr::null()
};
let context = context::create_context(devices, properties_ptr, pfn_notify, user_data)?;
Ok(Self::new(context, devices))
}
/// Create a Context from a [Device].
///
/// * `device` - a [Device].
///
/// returns a Result containing the new OpenCL context
/// or the error code from the OpenCL C API function.
pub fn from_device(device: &Device) -> Result<Self> {
let devices: Vec<cl_device_id> = vec![device.id()];
let properties = Vec::<cl_context_properties>::default();
Self::from_devices(&devices, &properties, None, ptr::null_mut())
}
/// Create a Context from a slice of SubDevices.
///
/// * `devices` - a slice of SubDevices for an OpenCL Platform.
/// * `properties` - a null terminated list of cl_context_properties, see
/// [Context Properties](https://www.khronos.org/registry/OpenCL/specs/3.0-unified/html/OpenCL_API.html#context-properties-table).
/// * `pfn_notify` - an optional callback function that can be registered by the application.
/// * `user_data` - passed as the user_data argument when pfn_notify is called.
///
/// returns a Result containing the new OpenCL context
/// or the error code from the OpenCL C API function.
#[cfg(any(feature = "CL_VERSION_1_2", feature = "dynamic"))]
pub fn from_sub_devices(
sub_devices: &[SubDevice],
properties: &[cl_context_properties],
pfn_notify: Option<unsafe extern "C" fn(*const c_char, *const c_void, size_t, *mut c_void)>,
user_data: *mut c_void,
) -> Result<Self> {
let devices = sub_devices
.iter()
.map(|dev| dev.id())
.collect::<Vec<cl_device_id>>();
Self::from_devices(&devices, properties, pfn_notify, user_data)
}
/// Create a Context from a cl_device_type.
///
/// * `device_type` - the cl_device_type to create a Context for.
/// * `properties` - a null terminated list of cl_context_properties, see
/// [Context Properties](https://www.khronos.org/registry/OpenCL/specs/3.0-unified/html/OpenCL_API.html#context-properties-table).
/// * `pfn_notify` - an optional callback function that can be registered by the application.
/// * `user_data` - passed as the user_data argument when pfn_notify is called.
///
/// returns a Result containing the new OpenCL context
/// or the error code from the OpenCL C API function.
pub fn from_device_type(
device_type: cl_device_type,
properties: &[cl_context_properties],
pfn_notify: Option<unsafe extern "C" fn(*const c_char, *const c_void, size_t, *mut c_void)>,
user_data: *mut c_void,
) -> Result<Self> {
let properties_ptr = if !properties.is_empty() {
properties.as_ptr()
} else {
ptr::null()
};
let context =
context::create_context_from_type(device_type, properties_ptr, pfn_notify, user_data)?;
let dev_ptrs: Vec<intptr_t> =
context::get_context_info(context, context::CL_CONTEXT_DEVICES)?.into();
let devices = dev_ptrs
.iter()
.map(|ptr| *ptr as cl_device_id)
.collect::<Vec<cl_device_id>>();
Ok(Self::new(context, &devices))
}
/// Get the common Shared Virtual Memory (SVM) capabilities of the
/// devices in the Context.
pub fn get_svm_mem_capability(&self) -> cl_device_svm_capabilities {
let device = Device::new(self.devices[0]);
let mut svm_capability = device.svm_mem_capability();
for index in 1..self.devices.len() {
let device = Device::new(self.devices[index]);
svm_capability &= device.svm_mem_capability();
}
svm_capability
}
/// Get the list of image formats supported by the Context for an image type,
/// and allocation information.
///
/// Calls clGetSupportedImageFormats to get the desired information about the context.
///
/// * `flags` - a bit-field used to specify allocation and usage information
/// about the image memory object being created, see:
/// [Memory Flags](https://www.khronos.org/registry/OpenCL/specs/3.0-unified/html/OpenCL_API.html#memory-flags-table).
/// * `image_type` - describes the image type.
///
/// returns a Result containing the desired information in an InfoType enum
/// or the error code from the OpenCL C API function.
pub fn get_supported_image_formats(
&self,
flags: cl_mem_flags,
image_type: cl_mem_object_type,
) -> Result<Vec<cl_image_format>> {
Ok(cl3::memory::get_supported_image_formats(
self.context,
flags,
image_type,
)?)
}
#[cfg(any(feature = "cl_arm_import_memory", feature = "dynamic"))]
pub unsafe fn import_memory_arm(
&self,
flags: cl_mem_flags,
properties: *const ext::cl_import_properties_arm,
memory: *mut c_void,
size: size_t,
) -> Result<cl_mem> {
unsafe {
Ok(ext::import_memory_arm(
self.context,
flags,
properties,
memory,
size,
)?)
}
}
#[allow(clippy::missing_const_for_fn)]
pub fn devices(&self) -> &[cl_device_id] {
&self.devices
}
pub fn default_device(&self) -> cl_device_id {
self.devices[0]
}
pub const fn num_devices(&self) -> cl_uint {
self.devices.len() as cl_uint
}
#[cfg(any(feature = "CL_VERSION_3_0", feature = "dynamic"))]
#[inline]
pub fn set_destructor_callback(
&self,
pfn_notify: Option<unsafe extern "C" fn(cl_context, *mut c_void)>,
user_data: *mut c_void,
) -> Result<()> {
context::set_context_destructor_callback(self.context, pfn_notify, user_data)
.map_err(Into::into)
}
pub fn reference_count(&self) -> Result<cl_uint> {
Ok(context::get_context_info(self.context, context::CL_CONTEXT_REFERENCE_COUNT)?.into())
}
pub fn properties(&self) -> Result<Vec<intptr_t>> {
Ok(context::get_context_info(self.context, context::CL_CONTEXT_PROPERTIES)?.into())
}
/// Get data about an OpenCL context.
/// Calls clGetContextInfo to get the desired data about the context.
pub fn get_data(&self, param_name: cl_context_info) -> Result<Vec<u8>> {
Ok(context::get_context_data(self.context, param_name)?)
}
#[cfg(any(feature = "cl_khr_terminate_context", feature = "dynamic"))]
pub unsafe fn terminate(&self) -> Result<()> {
unsafe { Ok(ext::terminate_context_khr(self.context)?) }
}
/// Create a cl_event linked to an OpenGL sync object.
/// Requires the cl_khr_gl_event extension
///
/// * `sync` - the sync object in the GL share group associated with context.
///
/// returns a Result containing the new OpenCL event
/// or the error code from the OpenCL C API function.
#[cfg(any(feature = "cl_khr_gl_sharing", feature = "dynamic"))]
pub fn create_event_from_gl_sync_khr(&self, sync: gl::cl_GLsync) -> Result<cl_event> {
Ok(gl::create_event_from_gl_sync_khr(self.context, sync)?)
}
/// Create an event object linked to an EGL fence sync object.
/// Requires the cl_khr_egl_event extension
///
/// * `sync` - the handle to an EGLSync object.
/// * `display` - the handle to an EGLDisplay.
///
/// returns a Result containing the new OpenCL event
/// or the error code from the OpenCL C API function.
#[cfg(any(feature = "cl_khr_egl_event", feature = "dynamic"))]
pub unsafe fn create_event_from_egl_sync_khr(
&self,
sync: egl::CLeglSyncKHR,
display: egl::CLeglDisplayKHR,
) -> Result<cl_event> {
unsafe {
Ok(egl::create_event_from_egl_sync_khr(
self.context,
sync,
display,
)?)
}
}
#[cfg(any(feature = "cl_khr_semaphore", feature = "dynamic"))]
pub fn create_semaphore_with_properties_khr(
&self,
sema_props: *const ext::cl_semaphore_properties_khr,
) -> Result<cl_mem> {
Ok(ext::create_semaphore_with_properties_khr(
self.context,
sema_props,
)?)
}
#[cfg(any(
feature = "cl_khr_dx9_media_sharing",
feature = "cl_intel_dx9_media_sharing",
feature = "dynamic"
))]
pub fn get_supported_dx9_media_surface_formats_intel(
&self,
flags: cl_mem_flags,
image_type: cl_mem_object_type,
plane: cl_uint,
) -> Result<Vec<cl_uint>> {
Ok(unsafe {
dx9_media_sharing::get_supported_dx9_media_surface_formats_intel(
self.context,
flags,
image_type,
plane,
)
}?)
}
#[cfg(any(feature = "cl_khr_d3d10_sharing", feature = "dynamic"))]
pub fn get_supported_d3d10_texture_formats_intel(
&self,
flags: cl_mem_flags,
image_type: cl_mem_object_type,
) -> Result<Vec<cl_uint>> {
Ok(unsafe {
d3d10::get_supported_d3d10_texture_formats_intel(self.context, flags, image_type)
}?)
}
#[cfg(any(feature = "cl_khr_d3d11_sharing", feature = "dynamic"))]
pub fn get_supported_d3d11_texture_formats_intel(
&self,
flags: cl_mem_flags,
image_type: cl_mem_object_type,
plane: cl_uint,
) -> Result<Vec<cl_uint>> {
Ok(unsafe {
d3d11::get_supported_d3d11_texture_formats_intel(self.context, flags, image_type, plane)
}?)
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::device::Device;
use crate::platform::get_platforms;
use cl3::device::CL_DEVICE_TYPE_GPU;
use cl3::info_type::InfoType;
use cl3::memory::{CL_MEM_OBJECT_IMAGE2D, CL_MEM_READ_WRITE};
#[test]
fn test_context() {
let platforms = get_platforms().unwrap();
assert!(0 < platforms.len());
// Get the first platform
let platform = &platforms[0];
let devices = platform.get_devices(CL_DEVICE_TYPE_GPU).unwrap();
assert!(0 < devices.len());
// Get the first device
let device = Device::new(devices[0]);
let context = Context::from_device(&device).unwrap();
println!(
"CL_DEVICE_SVM_CAPABILITIES: {:X}",
context.get_svm_mem_capability()
);
println!(
"clGetSupportedImageFormats:\norder: data_type {}",
InfoType::VecImageFormat(
context
.get_supported_image_formats(CL_MEM_READ_WRITE, CL_MEM_OBJECT_IMAGE2D)
.unwrap()
)
);
println!(
"CL_CONTEXT_REFERENCE_COUNT: {}",
context.reference_count().unwrap()
);
println!("CL_CONTEXT_PROPERTIES: {:?}", context.properties().unwrap());
}
#[test]
fn test_context_from_device_type() {
let properties = Vec::<cl_context_properties>::default();
let context =
Context::from_device_type(CL_DEVICE_TYPE_GPU, &properties, None, ptr::null_mut());
match context {
Ok(value) => {
println!("Context num devices: {}", value.num_devices())
}
Err(e) => println!("OpenCL error, Context::from_device_type: {}", e),
}
}
}
| rust | Apache-2.0 | 27c55789c114765e8f857c8c691c863166e0f6d2 | 2026-01-04T20:24:28.844285Z | false |
kenba/opencl3 | https://github.com/kenba/opencl3/blob/27c55789c114765e8f857c8c691c863166e0f6d2/tests/opencl2_kernel_test.rs | tests/opencl2_kernel_test.rs | // Copyright (c) 2021-2024 Via Technology Ltd. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#[cfg(any(feature = "CL_VERSION_2_0", feature = "dynamic"))]
extern crate opencl3;
use cl3::device::{
CL_DEVICE_SVM_FINE_GRAIN_BUFFER, CL_DEVICE_SVM_FINE_GRAIN_SYSTEM, CL_DEVICE_TYPE_ALL,
CL_DEVICE_TYPE_GPU,
};
use opencl3::Result;
use opencl3::command_queue::CommandQueue;
use opencl3::context::Context;
use opencl3::device::Device;
use opencl3::kernel::{ExecuteKernel, Kernel, create_program_kernels};
use opencl3::platform::get_platforms;
use opencl3::program::{CL_STD_2_0, Program};
use opencl3::svm::SvmVec;
use opencl3::types::cl_int;
use std::ptr;
// The OpenCL kernels in PROGRAM_SOURCE below use built-in work-group functions:
// work_group_reduce_add, work_group_scan_inclusive_add and work_group_broadcast
// which were introduced in OpenCL 2.0.
const PROGRAM_SOURCE: &str = r#"
kernel void sum_int (global int* sums,
global int const* values)
{
int value = sub_group_reduce_add(values[get_global_id(0)]);
if (0u == get_local_id(0))
sums[get_group_id(0)] = value;
}
kernel void inclusive_scan_int (global int* output,
global int const* values)
{
int sum = 0;
size_t lid = get_local_id(0);
size_t lsize = get_local_size(0);
size_t num_groups = get_num_groups(0);
for (size_t i = 0u; i < num_groups; ++i)
{
size_t lidx = i * lsize + lid;
int value = sub_group_scan_inclusive_add(values[lidx]);
output[lidx] = sum + value;
sum += sub_group_broadcast(value, lsize - 1);
}
}"#;
const SUM_KERNEL_NAME: &str = "sum_int";
const INCLUSIVE_SCAN_KERNEL_NAME: &str = "inclusive_scan_int";
#[test]
#[ignore]
fn test_opencl_2_kernel_example() -> Result<()> {
let platforms = get_platforms()?;
assert!(0 < platforms.len());
/////////////////////////////////////////////////////////////////////
// Query OpenCL compute environment
let opencl_2: &str = "OpenCL 2";
let opencl_3: &str = "OpenCL 3";
// Find an OpenCL fine grained SVM, platform and device
let mut device_id = ptr::null_mut();
let mut is_fine_grained_svm: bool = false;
for p in platforms {
let platform_version = p.version()?;
if platform_version.contains(&opencl_2) || platform_version.contains(&opencl_3) {
let devices = p
.get_devices(CL_DEVICE_TYPE_GPU)
.expect("Platform::get_devices failed");
for dev_id in devices {
let device = Device::new(dev_id);
let svm_mem_capability = device.svm_mem_capability();
is_fine_grained_svm = 0 < svm_mem_capability & CL_DEVICE_SVM_FINE_GRAIN_BUFFER;
if is_fine_grained_svm {
device_id = dev_id;
break;
}
}
}
}
if is_fine_grained_svm {
// Create OpenCL context from the OpenCL svm device
let device = Device::new(device_id);
let vendor = device.vendor()?;
let vendor_id = device.vendor_id()?;
println!("OpenCL device vendor name: {}", vendor);
println!("OpenCL device vendor id: {:X}", vendor_id);
/////////////////////////////////////////////////////////////////////
// Initialise OpenCL compute environment
// Create a Context on the OpenCL device
let context = Context::from_device(&device).expect("Context::from_device failed");
// Build the OpenCL program source.
let program = Program::create_and_build_from_source(&context, PROGRAM_SOURCE, CL_STD_2_0)
.expect("Program::create_and_build_from_source failed");
// Create the kernels from the OpenCL program source.
let kernels = create_program_kernels(&program)?;
assert!(0 < kernels.len());
let kernel_0_name = kernels[0].function_name()?;
println!("OpenCL kernel_0_name: {}", kernel_0_name);
let sum_kernel = if SUM_KERNEL_NAME == kernel_0_name {
&kernels[0]
} else {
&kernels[1]
};
let inclusive_scan_kernel = if INCLUSIVE_SCAN_KERNEL_NAME == kernel_0_name {
&kernels[0]
} else {
&kernels[1]
};
// Create a command_queue on the Context's device
let queue = CommandQueue::create_default_with_properties(&context, 0, 0)
.expect("CommandQueue::create_with_properties failed");
// Get the svm capability of all the devices in the context.
let svm_capability = context.get_svm_mem_capability();
assert!(0 < svm_capability);
// Create SVM vectors for the input and output data
// The input data
const ARRAY_SIZE: usize = 8;
let value_array: [cl_int; ARRAY_SIZE] = [3, 2, 5, 9, 7, 1, 4, 2];
// Copy into an OpenCL SVM vector
let mut test_values =
SvmVec::<cl_int>::allocate(&context, ARRAY_SIZE).expect("SVM allocation failed");
test_values.copy_from_slice(&value_array);
// Make test_values immutable
let test_values = test_values;
// The output data, an OpenCL SVM vector
let mut results =
SvmVec::<cl_int>::allocate_zeroed(&context, ARRAY_SIZE).expect("SVM allocation failed");
// Run the sum kernel on the input data
let sum_kernel_event = unsafe {
ExecuteKernel::new(sum_kernel)
.set_arg_svm(results.as_mut_ptr())
.set_arg_svm(test_values.as_ptr())
.set_global_work_size(ARRAY_SIZE)
.enqueue_nd_range(&queue)?
};
// Wait for the kernel to complete execution on the device
sum_kernel_event.wait()?;
// Can access OpenCL SVM directly, no need to map or read the results
println!("sum results: {:?}", results);
assert_eq!(33, results[0]);
assert_eq!(0, results[ARRAY_SIZE - 1]);
// Run the inclusive scan kernel on the input data
let kernel_event = unsafe {
ExecuteKernel::new(inclusive_scan_kernel)
.set_arg_svm(results.as_mut_ptr())
.set_arg_svm(test_values.as_ptr())
.set_global_work_size(ARRAY_SIZE)
.enqueue_nd_range(&queue)?
};
kernel_event.wait()?;
println!("inclusive_scan results: {:?}", results);
assert_eq!(value_array[0], results[0]);
assert_eq!(33, results[ARRAY_SIZE - 1]);
} else {
println!("OpenCL fine grained SVM capable device not found");
}
Ok(())
}
#[test]
#[ignore]
fn test_opencl_2_system_svm_example() -> Result<()> {
let platforms = get_platforms()?;
assert!(0 < platforms.len());
/////////////////////////////////////////////////////////////////////
// Query OpenCL compute environment
let opencl_2: &str = "OpenCL 2";
let opencl_3: &str = "OpenCL 3";
// Find an OpenCL fine grained SVM, platform and device
let mut device_id = ptr::null_mut();
let mut is_fine_grained_system_svm: bool = false;
for p in platforms {
let platform_version = p.version()?;
if platform_version.contains(&opencl_2) || platform_version.contains(&opencl_3) {
let devices = p
.get_devices(CL_DEVICE_TYPE_ALL)
.expect("Platform::get_devices failed");
for dev_id in devices {
let device = Device::new(dev_id);
let svm_mem_capability = device.svm_mem_capability();
is_fine_grained_system_svm =
0 < svm_mem_capability & CL_DEVICE_SVM_FINE_GRAIN_SYSTEM;
if is_fine_grained_system_svm {
device_id = dev_id;
break;
}
}
}
}
if is_fine_grained_system_svm {
// Create OpenCL context from the OpenCL svm device
let device = Device::new(device_id);
let vendor = device.vendor().expect("Device.vendor failed");
let vendor_id = device.vendor_id().expect("Device.vendor_id failed");
println!("OpenCL device vendor name: {}", vendor);
println!("OpenCL device vendor id: {:X}", vendor_id);
/////////////////////////////////////////////////////////////////////
// Initialise OpenCL compute environment
// Create a Context on the OpenCL svm device
let context = Context::from_device(&device).expect("Context::from_device failed");
// Build the OpenCL program source and create the kernel.
let program = Program::create_and_build_from_source(&context, PROGRAM_SOURCE, "")
.expect("Program::create_and_build_from_source failed");
let kernel = Kernel::create(&program, SUM_KERNEL_NAME).expect("Kernel::create failed");
// Create a command_queue on the Context's device
let queue = CommandQueue::create_default_with_properties(&context, 0, 0)
.expect("CommandQueue::create_default_with_properties failed");
// The input data
const ARRAY_SIZE: usize = 8;
let value_array: [cl_int; ARRAY_SIZE] = [3, 2, 5, 9, 7, 1, 4, 2];
// Copy into an OpenCL SVM vector
let mut test_values =
SvmVec::<cl_int>::allocate(&context, ARRAY_SIZE).expect("SVM allocation failed");
test_values.copy_from_slice(&value_array);
// Make test_values immutable
let test_values = test_values;
// The output data, an OpenCL SVM vector
let mut results =
SvmVec::<cl_int>::allocate_zeroed(&context, ARRAY_SIZE).expect("SVM allocation failed");
// Run the sum kernel on the input data
let sum_kernel_event = unsafe {
ExecuteKernel::new(&kernel)
.set_arg_svm(results.as_mut_ptr())
.set_arg_svm(test_values.as_ptr())
.set_global_work_size(ARRAY_SIZE)
.enqueue_nd_range(&queue)?
};
// Wait for the kernel to complete execution on the device
sum_kernel_event.wait()?;
// Can access OpenCL SVM directly, no need to map or read the results
println!("sum results: {:?}", results);
assert_eq!(33, results[0]);
assert_eq!(0, results[ARRAY_SIZE - 1]);
} else {
println!("OpenCL fine grained system SVM device not found")
}
Ok(())
}
| rust | Apache-2.0 | 27c55789c114765e8f857c8c691c863166e0f6d2 | 2026-01-04T20:24:28.844285Z | false |
kenba/opencl3 | https://github.com/kenba/opencl3/blob/27c55789c114765e8f857c8c691c863166e0f6d2/tests/integration_test.rs | tests/integration_test.rs | // Copyright (c) 2020-2021 Via Technology Ltd. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
extern crate opencl3;
use cl3::device::CL_DEVICE_TYPE_GPU;
use opencl3::Result;
use opencl3::command_queue::{CL_QUEUE_PROFILING_ENABLE, CommandQueue};
use opencl3::context::Context;
use opencl3::device::Device;
use opencl3::kernel::{ExecuteKernel, Kernel};
use opencl3::memory::{Buffer, CL_MEM_READ_ONLY, CL_MEM_WRITE_ONLY};
use opencl3::platform::get_platforms;
use opencl3::program::Program;
use opencl3::types::{CL_BLOCKING, CL_NON_BLOCKING, cl_event, cl_float};
use std::ptr;
const PROGRAM_SOURCE: &str = r#"
kernel void saxpy_float (global float* z,
global float const* x,
global float const* y,
float a)
{
size_t i = get_global_id(0);
z[i] = a*x[i] + y[i];
}"#;
const KERNEL_NAME: &str = "saxpy_float";
#[test]
#[ignore]
fn test_opencl_1_2_example() -> Result<()> {
let platforms = get_platforms()?;
assert!(0 < platforms.len());
// Get the first platform
let platform = &platforms[0];
let devices = platform
.get_devices(CL_DEVICE_TYPE_GPU)
.expect("Platform::get_devices failed");
assert!(0 < devices.len());
let platform_name = platform.name()?;
println!("Platform Name: {:?}", platform_name);
// Create OpenCL context from the first device
let device = Device::new(devices[0]);
let vendor = device.vendor().expect("Device.vendor failed");
let vendor_id = device.vendor_id().expect("Device.vendor_id failed");
println!("OpenCL device vendor name: {}", vendor);
println!("OpenCL device vendor id: {:X}", vendor_id);
/////////////////////////////////////////////////////////////////////
// Initialise OpenCL compute environment
// Create a Context on the OpenCL device
let context = Context::from_device(&device).expect("Context::from_device failed");
// Build the OpenCL program source and create the kernel.
let program = Program::create_and_build_from_source(&context, PROGRAM_SOURCE, "")
.expect("Program::create_and_build_from_source failed");
let kernel = Kernel::create(&program, KERNEL_NAME).expect("Kernel::create failed");
// Create a command_queue on the Context's device
let queue = CommandQueue::create_default(&context, CL_QUEUE_PROFILING_ENABLE)
.expect("CommandQueue::create_default failed");
/////////////////////////////////////////////////////////////////////
// Compute data
// The input data
const ARRAY_SIZE: usize = 1000;
let ones: [cl_float; ARRAY_SIZE] = [1.0; ARRAY_SIZE];
let mut sums: [cl_float; ARRAY_SIZE] = [0.0; ARRAY_SIZE];
for i in 0..ARRAY_SIZE {
sums[i] = 1.0 + 1.0 * i as cl_float;
}
// Create OpenCL device buffers
let mut x = unsafe {
Buffer::<cl_float>::create(&context, CL_MEM_READ_ONLY, ARRAY_SIZE, ptr::null_mut())?
};
let mut y = unsafe {
Buffer::<cl_float>::create(&context, CL_MEM_READ_ONLY, ARRAY_SIZE, ptr::null_mut())?
};
let z = unsafe {
Buffer::<cl_float>::create(&context, CL_MEM_WRITE_ONLY, ARRAY_SIZE, ptr::null_mut())?
};
// Blocking write
let _x_write_event = unsafe { queue.enqueue_write_buffer(&mut x, CL_BLOCKING, 0, &ones, &[])? };
// Non-blocking write, wait for y_write_event
let y_write_event =
unsafe { queue.enqueue_write_buffer(&mut y, CL_NON_BLOCKING, 0, &sums, &[])? };
// a value for the kernel function
let a: cl_float = 300.0;
// Use the ExecuteKernel builder to set the kernel buffer and
// cl_float value arguments, before setting the one dimensional
// global_work_size for the call to enqueue_nd_range.
// Unwraps the Result to get the kernel execution event.
let kernel_event = unsafe {
ExecuteKernel::new(&kernel)
.set_arg(&z)
.set_arg(&x)
.set_arg(&y)
.set_arg(&a)
.set_global_work_size(ARRAY_SIZE)
.set_wait_event(&y_write_event)
.enqueue_nd_range(&queue)?
};
let mut events: Vec<cl_event> = Vec::default();
events.push(kernel_event.get());
// Create a results array to hold the results from the OpenCL device
// and enqueue a read command to read the device buffer into the array
// after the kernel event completes.
let mut results: [cl_float; ARRAY_SIZE] = [0.0; ARRAY_SIZE];
let _event =
unsafe { queue.enqueue_read_buffer(&z, CL_NON_BLOCKING, 0, &mut results, &events)? };
// Block until all commands on the queue have completed
queue.finish()?;
assert_eq!(1300.0, results[ARRAY_SIZE - 1]);
println!("results back: {}", results[ARRAY_SIZE - 1]);
// Calculate the kernel duration, from the kernel_event
let start_time = kernel_event.profiling_command_start()?;
let end_time = kernel_event.profiling_command_end()?;
let duration = end_time - start_time;
println!("kernel execution duration (ns): {}", duration);
Ok(())
}
#[cfg(any(feature = "CL_VERSION_2_0", feature = "dynamic"))]
#[test]
#[ignore]
fn test_opencl_svm_example() -> Result<()> {
use cl3::device::{CL_DEVICE_SVM_COARSE_GRAIN_BUFFER, CL_DEVICE_SVM_FINE_GRAIN_BUFFER};
use opencl3::command_queue::CL_QUEUE_OUT_OF_ORDER_EXEC_MODE_ENABLE;
use opencl3::memory::{CL_MAP_READ, CL_MAP_WRITE};
use opencl3::svm::SvmVec;
let platforms = get_platforms()?;
assert!(0 < platforms.len());
/////////////////////////////////////////////////////////////////////
// Query OpenCL compute environment
let opencl_2: &str = "OpenCL 2";
let opencl_3: &str = "OpenCL 3";
// Find an OpenCL SVM, platform and device
let mut device_id = ptr::null_mut();
let mut is_svm_capable: bool = false;
for p in platforms {
let platform_version = p.version()?;
if platform_version.contains(&opencl_2) || platform_version.contains(&opencl_3) {
let devices = p
.get_devices(CL_DEVICE_TYPE_GPU)
.expect("Platform::get_devices failed");
for dev_id in devices {
let device = Device::new(dev_id);
let svm_mem_capability = device.svm_mem_capability();
is_svm_capable = 0 < svm_mem_capability
& (CL_DEVICE_SVM_COARSE_GRAIN_BUFFER | CL_DEVICE_SVM_FINE_GRAIN_BUFFER);
if is_svm_capable {
device_id = dev_id;
break;
}
}
}
}
if is_svm_capable {
// Create OpenCL context from the OpenCL svm device
let device = Device::new(device_id);
let vendor = device.vendor().expect("Device.vendor failed");
let vendor_id = device.vendor_id().expect("Device.vendor_id failed");
println!("OpenCL device vendor name: {}", vendor);
println!("OpenCL device vendor id: {:X}", vendor_id);
/////////////////////////////////////////////////////////////////////
// Initialise OpenCL compute environment
// Create a Context on the OpenCL svm device
let context = Context::from_device(&device).expect("Context::from_device failed");
// Build the OpenCL program source and create the kernel.
let program = Program::create_and_build_from_source(&context, PROGRAM_SOURCE, "")
.expect("Program::create_and_build_from_source failed");
let kernel = Kernel::create(&program, KERNEL_NAME).expect("Kernel::create failed");
// Create a command_queue on the Context's device
let queue = CommandQueue::create_default_with_properties(
&context,
CL_QUEUE_PROFILING_ENABLE | CL_QUEUE_OUT_OF_ORDER_EXEC_MODE_ENABLE,
0,
)
.expect("CommandQueue::create_default_with_properties failed");
/////////////////////////////////////////////////////////////////////
// Compute data
// Get the svm capability of all the devices in the context.
let svm_capability = context.get_svm_mem_capability();
assert!(0 < svm_capability);
let is_fine_grained_svm: bool = 0 < svm_capability & CL_DEVICE_SVM_FINE_GRAIN_BUFFER;
println!("OpenCL SVM is fine grained: {}", is_fine_grained_svm);
// Create SVM vectors for the data
// The SVM vectors
const ARRAY_SIZE: usize = 1000;
let mut ones =
SvmVec::<cl_float>::allocate(&context, ARRAY_SIZE).expect("SVM allocation failed");
let mut sums =
SvmVec::<cl_float>::allocate(&context, ARRAY_SIZE).expect("SVM allocation failed");
let mut results =
SvmVec::<cl_float>::allocate(&context, ARRAY_SIZE).expect("SVM allocation failed");
let a: cl_float = 300.0;
if is_fine_grained_svm {
// The input data
for i in 0..ARRAY_SIZE {
ones[i] = 1.0;
}
for i in 0..ARRAY_SIZE {
sums[i] = 1.0 + 1.0 * i as cl_float;
}
// Make ones and sums immutable
let ones = ones;
let sums = sums;
// Use the ExecuteKernel builder to set the kernel buffer and
// cl_float value arguments, before setting the one dimensional
// global_work_size for the call to enqueue_nd_range.
// Unwraps the Result to get the kernel execution event.
let kernel_event = unsafe {
ExecuteKernel::new(&kernel)
.set_arg_svm(results.as_mut_ptr())
.set_arg_svm(ones.as_ptr())
.set_arg_svm(sums.as_ptr())
.set_arg(&a)
.set_global_work_size(ARRAY_SIZE)
.enqueue_nd_range(&queue)?
};
// Wait for the kernel_event to complete
kernel_event.wait()?;
assert_eq!(1300.0, results[ARRAY_SIZE - 1]);
println!("results back: {}", results[ARRAY_SIZE - 1]);
// Calculate the kernel duration, from the kernel_event
let start_time = kernel_event.profiling_command_start()?;
let end_time = kernel_event.profiling_command_end()?;
let duration = end_time - start_time;
println!("kernel execution duration (ns): {}", duration);
} else {
// !is_fine_grained_svm
// Resize and map the input SVM vectors, before setting their data
unsafe {
ones.set_len(ARRAY_SIZE)?;
sums.set_len(ARRAY_SIZE)?;
queue.enqueue_svm_map(CL_BLOCKING, CL_MAP_WRITE, &mut ones, &[])?;
queue.enqueue_svm_map(CL_BLOCKING, CL_MAP_WRITE, &mut sums, &[])?;
}
// The input data
for i in 0..ARRAY_SIZE {
ones[i] = 1.0;
}
for i in 0..ARRAY_SIZE {
sums[i] = 1.0 + 1.0 * i as cl_float;
}
// Make ones and sums immutable
let ones = ones;
let sums = sums;
let mut events: Vec<cl_event> = Vec::default();
let unmap_sums_event = unsafe { queue.enqueue_svm_unmap(&sums, &[])? };
let unmap_ones_event = unsafe { queue.enqueue_svm_unmap(&ones, &[])? };
events.push(unmap_sums_event.get());
events.push(unmap_ones_event.get());
// Use the ExecuteKernel builder to set the kernel buffer and
// cl_float value arguments, before setting the one dimensional
// global_work_size for the call to enqueue_nd_range.
// Unwraps the Result to get the kernel execution event.
let kernel_event = unsafe {
ExecuteKernel::new(&kernel)
.set_arg_svm(results.as_mut_ptr())
.set_arg_svm(ones.as_ptr())
.set_arg_svm(sums.as_ptr())
.set_arg(&a)
.set_global_work_size(ARRAY_SIZE)
.set_event_wait_list(&events)
.enqueue_nd_range(&queue)?
};
// Wait for the kernel_event to complete
kernel_event.wait()?;
// Map SVM results before reading them
let _map_results_event =
unsafe { queue.enqueue_svm_map(CL_BLOCKING, CL_MAP_READ, &mut results, &[])? };
assert_eq!(1300.0, results[ARRAY_SIZE - 1]);
println!("results back: {}", results[ARRAY_SIZE - 1]);
// Calculate the kernel duration from the kernel_event
let start_time = kernel_event.profiling_command_start()?;
let end_time = kernel_event.profiling_command_end()?;
let duration = end_time - start_time;
println!("kernel execution duration (ns): {}", duration);
/////////////////////////////////////////////////////////////////////
// Clean up
let unmap_results_event = unsafe { queue.enqueue_svm_unmap(&results, &[])? };
unmap_results_event.wait()?;
println!("SVM buffers unmapped");
}
} else {
println!("OpenCL SVM capable device not found")
}
Ok(())
}
| rust | Apache-2.0 | 27c55789c114765e8f857c8c691c863166e0f6d2 | 2026-01-04T20:24:28.844285Z | false |
kenba/opencl3 | https://github.com/kenba/opencl3/blob/27c55789c114765e8f857c8c691c863166e0f6d2/examples/clinfo.rs | examples/clinfo.rs | // Copyright (c) 2021 Via Technology Ltd. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use opencl3::Result;
use opencl3::device::{CL_DEVICE_TYPE_ALL, Device, device_type_text, vendor_id_text};
/// Finds all the OpenCL platforms and devices on a system.
///
/// It displays OpenCL platform information from `clGetPlatformInfo` and
/// OpenCL device information from `clGetDeviceInfo` for all the platforms and
/// devices.
fn main() -> Result<()> {
let platforms = opencl3::platform::get_platforms()?;
println!("Number of platforms: {}", platforms.len());
for platform in platforms {
println!("CL_PLATFORM_VENDOR: {}", platform.vendor()?);
println!("CL_PLATFORM_NAME: {}", platform.name()?);
println!("CL_PLATFORM_VERSION: {}", platform.version()?);
println!("CL_PLATFORM_PROFILE: {}", platform.profile()?);
println!("CL_PLATFORM_EXTENSIONS: {}", platform.extensions()?);
let devices = platform.get_devices(CL_DEVICE_TYPE_ALL)?;
println!("Number of devices: {}", devices.len());
println!();
for device_id in devices {
let device = Device::new(device_id);
println!("\tCL_DEVICE_VENDOR: {}", device.vendor()?);
let vendor_id = device.vendor_id()?;
println!(
"\tCL_DEVICE_VENDOR_ID: {:X}, {}",
vendor_id,
vendor_id_text(vendor_id)
);
println!("\tCL_DEVICE_NAME: {}", device.name()?);
println!("\tCL_DEVICE_VERSION: {}", device.version()?);
let device_type = device.dev_type()?;
println!(
"\tCL_DEVICE_TYPE: {:X}, {}",
device_type,
device_type_text(device_type)
);
println!("\tCL_DEVICE_PROFILE: {}", device.profile()?);
println!("\tCL_DEVICE_EXTENSIONS: {}", device.extensions()?);
println!(
"\tCL_DEVICE_OPENCL_C_VERSION: {:?}",
device.opencl_c_version()?
);
println!(
"\tCL_DEVICE_BUILT_IN_KERNELS: {}",
device.built_in_kernels()?
);
println!(
"\tCL_DEVICE_SVM_CAPABILITIES: {:X}",
device.svm_mem_capability()
);
println!();
}
}
Ok(())
}
| rust | Apache-2.0 | 27c55789c114765e8f857c8c691c863166e0f6d2 | 2026-01-04T20:24:28.844285Z | false |
kenba/opencl3 | https://github.com/kenba/opencl3/blob/27c55789c114765e8f857c8c691c863166e0f6d2/examples/opencl2image.rs | examples/opencl2image.rs | // Copyright (c) 2023 Via Technology Ltd. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use cl3::ext::CL_IMAGE_FORMAT_NOT_SUPPORTED;
use cl3::memory::{CL_MEM_OBJECT_IMAGE2D, CL_MEM_WRITE_ONLY, CL_RGBA, CL_UNSIGNED_INT8};
use cl3::types::{CL_NON_BLOCKING, cl_image_desc, cl_image_format};
use libc::c_void;
use opencl3::Result;
use opencl3::command_queue::{CL_QUEUE_PROFILING_ENABLE, CommandQueue};
use opencl3::context::Context;
use opencl3::device::{CL_DEVICE_TYPE_GPU, Device};
use opencl3::kernel::{ExecuteKernel, Kernel};
use opencl3::memory::Image;
use opencl3::program::{CL_STD_2_0, Program};
use opencl3::types::cl_event;
const PROGRAM_SOURCE: &str = r#"
kernel void colorize(write_only image2d_t image)
{
const size_t x = get_global_id(0);
const size_t y = get_global_id(1);
write_imageui(image, (int2)(x, y), (uint4)(x, y, 0, 255));
}"#;
const KERNEL_NAME: &str = "colorize";
fn main() -> Result<()> {
// Find a usable platform and device for this application
let platforms = opencl3::platform::get_platforms()?;
let platform = platforms.first().expect("no OpenCL platforms");
let device = *platform
.get_devices(CL_DEVICE_TYPE_GPU)?
.first()
.expect("no device found in platform");
let device = Device::new(device);
// Create a Context on an OpenCL device
let context = Context::from_device(&device).expect("Context::from_device failed");
// Print some information about the device
println!(
"CL_DEVICE_IMAGE_SUPPORT: {:?}",
device.image_support().unwrap()
);
println!(
"CL_DEVICE_MAX_READ_WRITE_IMAGE_ARGS: {:?}",
device.max_read_write_image_args().unwrap()
);
println!(
"CL_DEVICE_MAX_READ_IMAGE_ARGS: {:?}",
device.max_read_image_args().unwrap()
);
println!(
"CL_DEVICE_MAX_WRITE_IMAGE_ARGS: {:?}",
device.max_write_image_args().unwrap()
);
println!(
"CL_DEVICE_MAX_SAMPLERS: {:?}",
device.max_device_samples().unwrap()
);
let supported_formats =
context.get_supported_image_formats(CL_MEM_WRITE_ONLY, CL_MEM_OBJECT_IMAGE2D)?;
if supported_formats
.iter()
.filter(|f| {
f.image_channel_order == CL_RGBA && f.image_channel_data_type == CL_UNSIGNED_INT8
})
.count()
<= 0
{
println!("Device does not support CL_RGBA with CL_UNSIGNED_INT8 for CL_MEM_WRITE_ONLY!");
return Err(CL_IMAGE_FORMAT_NOT_SUPPORTED.into());
}
// Build the OpenCL program source and create the kernel.
let program = Program::create_and_build_from_source(&context, PROGRAM_SOURCE, CL_STD_2_0)
.expect("Program::create_and_build_from_source failed");
let kernel = Kernel::create(&program, KERNEL_NAME).expect("Kernel::create failed");
// Create a command_queue on the Context's device
let queue =
CommandQueue::create_default_with_properties(&context, CL_QUEUE_PROFILING_ENABLE, 0)
.expect("CommandQueue::create_default_with_properties failed");
// Create an image
let mut image = unsafe {
Image::create(
&context,
CL_MEM_WRITE_ONLY,
&cl_image_format {
image_channel_order: CL_RGBA,
image_channel_data_type: CL_UNSIGNED_INT8,
},
&cl_image_desc {
image_type: CL_MEM_OBJECT_IMAGE2D,
image_width: 10 as usize,
image_height: 10 as usize,
image_depth: 1,
image_array_size: 1,
image_row_pitch: 0,
image_slice_pitch: 0,
num_mip_levels: 0,
num_samples: 0,
buffer: std::ptr::null_mut(),
},
std::ptr::null_mut(),
)
.expect("Image::create failed")
};
// Run the kernel on the input data
let kernel_event = unsafe {
ExecuteKernel::new(&kernel)
.set_arg(&image)
.set_global_work_sizes(&[10usize, 10usize])
.enqueue_nd_range(&queue)?
};
let mut events: Vec<cl_event> = Vec::default();
events.push(kernel_event.get());
// Fill the middle of the image with a solid color
let fill_color = [11u32, 22u32, 33u32, 44u32];
let fill_event = unsafe {
queue.enqueue_fill_image(
&mut image,
fill_color.as_ptr() as *const c_void,
&[3usize, 3usize, 0usize] as *const usize,
&[4usize, 4usize, 1usize] as *const usize,
&events,
)?
};
let mut events: Vec<cl_event> = Vec::default();
events.push(fill_event.get());
// Read the image data from the device
let mut image_data = [0u8; 10 * 10 * 4];
let read_event = unsafe {
queue.enqueue_read_image(
&image,
CL_NON_BLOCKING,
&[0usize, 0usize, 0usize] as *const usize,
&[10usize, 10usize, 1usize] as *const usize,
0,
0,
image_data.as_mut_ptr() as *mut c_void,
&events,
)?
};
// Wait for the read_event to complete.
read_event.wait()?;
// Print the image data
println!("image_data: ");
for y in 0..10 {
for x in 0..10 {
let offset = (y * 10 + x) * 4;
print!(
"({:>3}, {:>3}, {:>3}, {:>3}) ",
image_data[offset],
image_data[offset + 1],
image_data[offset + 2],
image_data[offset + 3]
);
}
println!();
}
Ok(())
}
| rust | Apache-2.0 | 27c55789c114765e8f857c8c691c863166e0f6d2 | 2026-01-04T20:24:28.844285Z | false |
kenba/opencl3 | https://github.com/kenba/opencl3/blob/27c55789c114765e8f857c8c691c863166e0f6d2/examples/opencl2serde.rs | examples/opencl2serde.rs | // Copyright (c) 2021 Via Technology Ltd. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use opencl3::Result;
use opencl3::command_queue::CommandQueue;
use opencl3::context::Context;
use opencl3::device::{CL_DEVICE_TYPE_GPU, Device, get_all_devices};
use opencl3::error_codes::cl_int;
use opencl3::kernel::{ExecuteKernel, Kernel};
use opencl3::memory::{CL_MAP_READ, CL_MAP_WRITE};
use opencl3::program::{CL_STD_2_0, Program};
use opencl3::svm::{ExtendSvmVec, SvmVec};
use opencl3::types::CL_BLOCKING;
use serde::de::DeserializeSeed;
use std::ptr;
const PROGRAM_SOURCE: &str = r#"
kernel void inclusive_scan_int (global int* output,
global int const* values)
{
int sum = 0;
size_t lid = get_local_id(0);
size_t lsize = get_local_size(0);
size_t num_groups = get_num_groups(0);
for (size_t i = 0u; i < num_groups; ++i)
{
size_t lidx = i * lsize + lid;
int value = work_group_scan_inclusive_add(values[lidx]);
output[lidx] = sum + value;
sum += work_group_broadcast(value, lsize - 1);
}
}"#;
const KERNEL_NAME: &str = "inclusive_scan_int";
fn main() -> Result<()> {
// Find a suitable device for this application
let devices = get_all_devices(CL_DEVICE_TYPE_GPU)?;
assert!(0 < devices.len());
// Find an OpenCL SVM device
let mut device_id = ptr::null_mut();
let mut is_svm_capable: bool = false;
for dev_id in devices {
let device = Device::new(dev_id);
let svm_mem_capability = device.svm_mem_capability();
is_svm_capable = 0 < svm_mem_capability;
if is_svm_capable {
device_id = dev_id;
break;
}
}
if is_svm_capable {
// Create OpenCL context from the OpenCL svm device
let device = Device::new(device_id);
let vendor = device.vendor()?;
let vendor_id = device.vendor_id()?;
println!("OpenCL device vendor name: {}", vendor);
println!("OpenCL device vendor id: {:X}", vendor_id);
/////////////////////////////////////////////////////////////////////
// Initialise OpenCL compute environment
// Create a Context on the OpenCL svm device
let context = Context::from_device(&device).expect("Context::from_device failed");
// Build the OpenCL program source and create the kernel.
let program = Program::create_and_build_from_source(&context, PROGRAM_SOURCE, CL_STD_2_0)
.expect("Program::create_and_build_from_source failed");
let kernel = Kernel::create(&program, KERNEL_NAME).expect("Kernel::create failed");
// Create a command_queue on the Context's device
let queue = CommandQueue::create_default_with_properties(&context, 0, 0)
.expect("CommandQueue::create_default_with_properties failed");
// The input data
const ARRAY_SIZE: usize = 8;
const VALUE_ARRAY: &str = "[3,2,5,9,7,1,4,2]";
// Deserialize into an OpenCL SVM vector
let mut test_values = SvmVec::<cl_int>::new(&context);
let mut deserializer = serde_json::Deserializer::from_str(&VALUE_ARRAY);
// Handle test_values if device only supports CL_DEVICE_SVM_COARSE_GRAIN_BUFFER
if !test_values.is_fine_grained() {
// SVM_COARSE_GRAIN_BUFFER needs to know the size of the data to allocate the SVM
test_values =
SvmVec::<cl_int>::allocate(&context, ARRAY_SIZE).expect("SVM allocation failed");
// Map the SVM for a SVM_COARSE_GRAIN_BUFFER
unsafe { queue.enqueue_svm_map(CL_BLOCKING, CL_MAP_WRITE, &mut test_values, &[])? };
// Clear the SVM for the deserializer
test_values.clear();
}
ExtendSvmVec(&mut test_values)
.deserialize(&mut deserializer)
.expect("Error deserializing the VALUE_ARRAY JSON string.");
// Make test_values SVM vector immutable
let test_values = test_values;
// Unmap test_values if not a CL_MEM_SVM_FINE_GRAIN_BUFFER
if !test_values.is_fine_grained() {
let unmap_test_values_event = unsafe { queue.enqueue_svm_unmap(&test_values, &[])? };
unmap_test_values_event.wait()?;
}
// The output data, an OpenCL SVM vector
let mut results =
SvmVec::<cl_int>::allocate(&context, ARRAY_SIZE).expect("SVM allocation failed");
// Run the sum kernel on the input data
let sum_kernel_event = unsafe {
ExecuteKernel::new(&kernel)
.set_arg_svm(results.as_mut_ptr())
.set_arg_svm(test_values.as_ptr())
.set_global_work_size(ARRAY_SIZE)
.enqueue_nd_range(&queue)?
};
// Wait for the kernel to complete execution on the device
sum_kernel_event.wait()?;
// Map results if not a CL_MEM_SVM_FINE_GRAIN_BUFFER
if !results.is_fine_grained() {
unsafe { queue.enqueue_svm_map(CL_BLOCKING, CL_MAP_READ, &mut results, &[])? };
}
// Convert SVM results to json
let json_results = serde_json::to_string(&results).unwrap();
println!("json results: {}", json_results);
// Unmap results if not a CL_MEM_SVM_FINE_GRAIN_BUFFER
if !results.is_fine_grained() {
let unmap_results_event = unsafe { queue.enqueue_svm_unmap(&results, &[])? };
unmap_results_event.wait()?;
}
} else {
println!("OpenCL fine grained system SVM device not found")
}
Ok(())
}
| rust | Apache-2.0 | 27c55789c114765e8f857c8c691c863166e0f6d2 | 2026-01-04T20:24:28.844285Z | false |
kenba/opencl3 | https://github.com/kenba/opencl3/blob/27c55789c114765e8f857c8c691c863166e0f6d2/examples/opencl2svm.rs | examples/opencl2svm.rs | // Copyright (c) 2021-2023 Via Technology Ltd. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use opencl3::Result;
use opencl3::command_queue::{CL_QUEUE_PROFILING_ENABLE, CommandQueue};
use opencl3::context::Context;
use opencl3::device::{CL_DEVICE_TYPE_GPU, Device};
use opencl3::error_codes::cl_int;
use opencl3::kernel::{ExecuteKernel, Kernel};
use opencl3::memory::{CL_MAP_READ, CL_MAP_WRITE};
use opencl3::program::{CL_STD_2_0, Program};
use opencl3::svm::SvmVec;
use opencl3::types::CL_BLOCKING;
const PROGRAM_SOURCE: &str = r#"
kernel void inclusive_scan_int (global int* output,
global int const* values)
{
int sum = 0;
size_t lid = get_local_id(0);
size_t lsize = get_local_size(0);
size_t num_groups = get_num_groups(0);
for (size_t i = 0u; i < num_groups; ++i)
{
size_t lidx = i * lsize + lid;
int value = work_group_scan_inclusive_add(values[lidx]);
output[lidx] = sum + value;
sum += work_group_broadcast(value, lsize - 1);
}
}"#;
const KERNEL_NAME: &str = "inclusive_scan_int";
fn main() -> Result<()> {
// Find a usable platform and device for this application
let platforms = opencl3::platform::get_platforms()?;
let platform = platforms.first().expect("no OpenCL platforms");
let device = *platform
.get_devices(CL_DEVICE_TYPE_GPU)?
.first()
.expect("no device found in platform");
let device = Device::new(device);
// Create a Context on an OpenCL device
let context = Context::from_device(&device).expect("Context::from_device failed");
// Build the OpenCL program source and create the kernel.
let program = Program::create_and_build_from_source(&context, PROGRAM_SOURCE, CL_STD_2_0)
.expect("Program::create_and_build_from_source failed");
let kernel = Kernel::create(&program, KERNEL_NAME).expect("Kernel::create failed");
// Create a command_queue on the Context's device
let queue =
CommandQueue::create_default_with_properties(&context, CL_QUEUE_PROFILING_ENABLE, 0)
.expect("CommandQueue::create_default_with_properties failed");
// The input data
const ARRAY_SIZE: usize = 8;
let value_array: [cl_int; ARRAY_SIZE] = [3, 2, 5, 9, 7, 1, 4, 2];
// Create an OpenCL SVM vector
let mut test_values =
SvmVec::<cl_int>::allocate(&context, ARRAY_SIZE).expect("SVM allocation failed");
// Map test_values if not a CL_MEM_SVM_FINE_GRAIN_BUFFER
if !test_values.is_fine_grained() {
unsafe { queue.enqueue_svm_map(CL_BLOCKING, CL_MAP_WRITE, &mut test_values, &[])? };
}
// Copy input data into the OpenCL SVM vector
test_values.clone_from_slice(&value_array);
// Make test_values immutable
let test_values = test_values;
// Unmap test_values if not a CL_MEM_SVM_FINE_GRAIN_BUFFER
if !test_values.is_fine_grained() {
let unmap_test_values_event = unsafe { queue.enqueue_svm_unmap(&test_values, &[])? };
unmap_test_values_event.wait()?;
}
// The output data, an OpenCL SVM vector
let mut results =
SvmVec::<cl_int>::allocate(&context, ARRAY_SIZE).expect("SVM allocation failed");
// Run the kernel on the input data
let kernel_event = unsafe {
ExecuteKernel::new(&kernel)
.set_arg_svm(results.as_mut_ptr())
.set_arg_svm(test_values.as_ptr())
.set_global_work_size(ARRAY_SIZE)
.enqueue_nd_range(&queue)?
};
// Wait for the kernel to complete execution on the device
kernel_event.wait()?;
// Map results if not a CL_MEM_SVM_FINE_GRAIN_BUFFER
if !results.is_fine_grained() {
unsafe { queue.enqueue_svm_map(CL_BLOCKING, CL_MAP_READ, &mut results, &[])? };
}
// Can access OpenCL SVM directly, no need to map or read the results
println!("sum results: {:?}", results);
// Unmap results if not a CL_MEM_SVM_FINE_GRAIN_BUFFER
if !results.is_fine_grained() {
let unmap_results_event = unsafe { queue.enqueue_svm_unmap(&results, &[])? };
unmap_results_event.wait()?;
}
Ok(())
}
| rust | Apache-2.0 | 27c55789c114765e8f857c8c691c863166e0f6d2 | 2026-01-04T20:24:28.844285Z | false |
kenba/opencl3 | https://github.com/kenba/opencl3/blob/27c55789c114765e8f857c8c691c863166e0f6d2/examples/basic.rs | examples/basic.rs | // Copyright (c) 2021 Via Technology Ltd. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use opencl3::Result;
use opencl3::command_queue::{CL_QUEUE_PROFILING_ENABLE, CommandQueue};
use opencl3::context::Context;
use opencl3::device::{CL_DEVICE_TYPE_GPU, Device, get_all_devices};
use opencl3::kernel::{ExecuteKernel, Kernel};
use opencl3::memory::{Buffer, CL_MEM_READ_ONLY, CL_MEM_WRITE_ONLY};
use opencl3::program::Program;
use opencl3::types::{CL_BLOCKING, CL_NON_BLOCKING, cl_event, cl_float};
use std::ptr;
const PROGRAM_SOURCE: &str = r#"
kernel void saxpy_float (global float* z,
global float const* x,
global float const* y,
float a)
{
const size_t i = get_global_id(0);
z[i] = a*x[i] + y[i];
}"#;
const KERNEL_NAME: &str = "saxpy_float";
fn main() -> Result<()> {
// Find a usable device for this application
let device_id = *get_all_devices(CL_DEVICE_TYPE_GPU)?
.first()
.expect("no device found in platform");
let device = Device::new(device_id);
// Create a Context on an OpenCL device
let context = Context::from_device(&device).expect("Context::from_device failed");
// Create a command_queue on the Context's device
let queue = CommandQueue::create_default(&context, CL_QUEUE_PROFILING_ENABLE)
.expect("CommandQueue::create_default failed");
// Build the OpenCL program source and create the kernel.
let program = Program::create_and_build_from_source(&context, PROGRAM_SOURCE, "")
.expect("Program::create_and_build_from_source failed");
let kernel = Kernel::create(&program, KERNEL_NAME).expect("Kernel::create failed");
/////////////////////////////////////////////////////////////////////
// Compute data
// The input data
const ARRAY_SIZE: usize = 1000;
let ones: [cl_float; ARRAY_SIZE] = [1.0; ARRAY_SIZE];
let mut sums: [cl_float; ARRAY_SIZE] = [0.0; ARRAY_SIZE];
for i in 0..ARRAY_SIZE {
sums[i] = 1.0 + 1.0 * i as cl_float;
}
// Create OpenCL device buffers
let mut x = unsafe {
Buffer::<cl_float>::create(&context, CL_MEM_READ_ONLY, ARRAY_SIZE, ptr::null_mut())?
};
let mut y = unsafe {
Buffer::<cl_float>::create(&context, CL_MEM_READ_ONLY, ARRAY_SIZE, ptr::null_mut())?
};
let z = unsafe {
Buffer::<cl_float>::create(&context, CL_MEM_WRITE_ONLY, ARRAY_SIZE, ptr::null_mut())?
};
// Blocking write
let _x_write_event = unsafe { queue.enqueue_write_buffer(&mut x, CL_BLOCKING, 0, &ones, &[])? };
// Non-blocking write, wait for y_write_event
let y_write_event =
unsafe { queue.enqueue_write_buffer(&mut y, CL_NON_BLOCKING, 0, &sums, &[])? };
// a value for the kernel function
let a: cl_float = 300.0;
// Use the ExecuteKernel builder to set the kernel buffer and
// cl_float value arguments, before setting the one dimensional
// global_work_size for the call to enqueue_nd_range.
// Unwraps the Result to get the kernel execution event.
let kernel_event = unsafe {
ExecuteKernel::new(&kernel)
.set_arg(&z)
.set_arg(&x)
.set_arg(&y)
.set_arg(&a)
.set_global_work_size(ARRAY_SIZE)
.set_wait_event(&y_write_event)
.enqueue_nd_range(&queue)?
};
let mut events: Vec<cl_event> = Vec::default();
events.push(kernel_event.get());
// Create a results array to hold the results from the OpenCL device
// and enqueue a read command to read the device buffer into the array
// after the kernel event completes.
let mut results: [cl_float; ARRAY_SIZE] = [0.0; ARRAY_SIZE];
let read_event =
unsafe { queue.enqueue_read_buffer(&z, CL_NON_BLOCKING, 0, &mut results, &events)? };
// Wait for the read_event to complete.
read_event.wait()?;
// Output the first and last results
println!("results front: {}", results[0]);
println!("results back: {}", results[ARRAY_SIZE - 1]);
// Calculate the kernel duration, from the kernel_event
let start_time = kernel_event.profiling_command_start()?;
let end_time = kernel_event.profiling_command_end()?;
let duration = end_time - start_time;
println!("kernel execution duration (ns): {}", duration);
Ok(())
}
| rust | Apache-2.0 | 27c55789c114765e8f857c8c691c863166e0f6d2 | 2026-01-04T20:24:28.844285Z | false |
sidju/hired | https://github.com/sidju/hired/blob/1b2fc1714e913520d5defd3937499ffdff79d80f/src/config.rs | src/config.rs | use std::collections::HashMap;
use std::ops::Not;
use clap::Parser;
use serde::{Serialize, Deserialize};
use figment::{
Figment,
providers::{
Serialized,
YamlExtended,
Env,
Format,
},
};
use add_ed::macros::Macro;
// Import default config
const DEFAULT_CONFIG: &str = include_str!("../default_config.yaml");
// The CLI arguments struct
// We do some fancy serde attrs to not serialize any arg not given
/// hired, the highlighting EDitor
#[derive(Parser, Debug, Serialize)]
#[clap(version, about)]
struct Args {
/// configuration profile to use (if none given uses default)
#[clap(long, default_value = "default")]
#[serde(skip_serializing)]
profile: String,
/// path to the file to open
#[clap(value_parser)]
#[serde(skip_serializing_if = "Option::is_none")]
path: Option<String>,
/// default to printing with line numbers
#[clap(action, short)]
#[serde(skip_serializing_if = "<&bool>::not")]
n: bool,
/// default to printing in literal mode
#[clap(action, short)]
#[serde(skip_serializing_if = "<&bool>::not")]
l: bool,
/// open configuration file
#[clap(action, long)]
#[arg(conflicts_with("path"))]
#[serde(skip_serializing)]
open_config: bool,
/// create default config file and open it
#[clap(action, long)]
#[arg(conflicts_with_all(["path","open_config"]))]
#[serde(skip_serializing)]
create_config: bool,
/// print attributions
#[clap(action, long)]
#[arg(conflicts_with_all(["path", "open_config", "create_config"]))]
#[serde(skip_serializing)]
attributions: bool,
}
// The configuration struct
// constructed by Figment using serde::Deserialize
#[derive(Debug, Deserialize)]
#[serde(deny_unknown_fields)]
pub struct Config {
// Path to the file to open
#[serde(default)]
pub path: String,
// Default printing flags
#[serde(default)]
pub n: bool,
#[serde(default)]
pub l: bool,
// Defined macros
#[serde(default)]
pub macros: HashMap<String, Macro>,
}
pub fn construct_config() -> Config {
// First figure out platform specific config paths
let (config_dir, config_path) = {
let app_dirs = directories::ProjectDirs::from("se", "sidju", "hired")
.expect("Failed to find a config directory. Is $HOME configured?")
;
// Return the needed data from this block
(app_dirs.config_dir().to_owned(), app_dirs.config_dir().join("config.yaml"))
};
// Parse arguments first, so we can see if we should create a default config
let args = Args::parse();
// If requested we print attributions and exit
if args.attributions {
println!();
println!("Written by sidju, inspired by ed (by Ken Thompson)");
println!("( See all contributors on github.com/sidju/hired and github.com/sidju/add-ed )\n");
println!("Special thanks to the crates regex and syntect, which made this project feasible.");
println!(
"Attributions for the theme and all syntax definitions can be found here:\n{}",
two_face::acknowledgement_url()
);
println!("Heartfelt thanks to the authors of those, and to the crates bat (and two-face) which gathered them.\n");
std::process::exit(0);
}
if args.create_config {
if config_path.exists() {
println!(
"There already exists a file at {}, delete it first if you wish to replace it.",
config_path.display(),
);
std::process::exit(1);
}
else {
// We need to first create our project folder in the config folder
if !config_dir.is_dir() {
std::fs::DirBuilder::new().create(config_dir)
.expect("Error when creating config directory for hired.")
;
}
std::fs::write(&config_path, DEFAULT_CONFIG)
.expect("Error when writing default config for hired.")
;
}
}
let mut config: Config = Figment::new()
// Read in config file
.merge(YamlExtended::file(&config_path).nested())
// Read in overrides from environment
.merge(Env::prefixed("HIRED_").global())
// Allow CLI arguments to override configuration
.merge(Serialized::globals(&args))
// Select which profile to load config from
.select(&args.profile)
// Convert back into config struct and verify it is valid
.extract()
.expect("Invalid configuration")
;
// If open/create config is given we overwrite any given path with config path
if args.open_config || args.create_config {
config.path = config_path.into_os_string().into_string()
.expect("Config path isn't valid unicode.")
;
}
config
}
| rust | MIT | 1b2fc1714e913520d5defd3937499ffdff79d80f | 2026-01-04T20:24:27.089787Z | false |
sidju/hired | https://github.com/sidju/hired/blob/1b2fc1714e913520d5defd3937499ffdff79d80f/src/macro_store.rs | src/macro_store.rs | use std::collections::HashMap;
use add_ed::EdError;
use add_ed::macros::{
Macro,
MacroGetter,
};
/// Aggregating macro getter
///
/// Tries to get macros, in order, from:
/// - Configuration
/// - TODO: Files in specific path
pub struct MacroStore<'a> {
pub config_macros: &'a HashMap<String, Macro>,
}
impl<'a> MacroGetter for MacroStore<'a> {
fn get_macro(&self, name: &str) -> Result<Option<&Macro>, EdError> {
Ok(self.config_macros.get(name).into())
}
}
| rust | MIT | 1b2fc1714e913520d5defd3937499ffdff79d80f | 2026-01-04T20:24:27.089787Z | false |
sidju/hired | https://github.com/sidju/hired/blob/1b2fc1714e913520d5defd3937499ffdff79d80f/src/main.rs | src/main.rs | mod config;
use config::construct_config;
mod macro_store;
// All UI abstractions
mod hui;
use hui::error::HighlightingUIError as HUIError;
use add_ed::ui::UI;
pub fn main() {
// Parse CLI arguments, env and config file into a run configuration
// (This will abort execution in a lot of cases, so it must be ran before
// enabling raw mode)
let config = construct_config();
// Construct editor
let mut ui = hui::HighlightingUI::new();
let mut io = add_ed::io::LocalIO::new();
// Create our macro store
let macro_store = macro_store::MacroStore{
config_macros: &config.macros,
};
let mut ed = add_ed::Ed::new(&mut io, ¯o_store);
ed.n = config.n;
ed.l = config.l;
// Start raw mode before using HighlightingUI
// Avoid using .unwrap(), .expect() or panic!() when in raw mode, as it leaves
// the terminal in an unusable state for bash.
crossterm::terminal::enable_raw_mode()
.map_err(HUIError::RawmodeSwitchFailed)
.unwrap()
;
// Run the editor, saving result
let res = (|| -> Result<(), add_ed::error::EdError>{
// Handle if hired is started not on column 0 (for example git may do this)
// (Doesn't require raw mode to run, but enters and leaves rawmode if not.)
let pos = crossterm::cursor::position()
.map_err(HUIError::TerminalIOFailed)
.unwrap()
;
if pos.0 != 0 { print!("\n\r"); }
let res = ed.run_command(&mut ui, &format!("e{}", config.path));
if let Err(e) = res {
ui.print_message(&format!("{}", e))?;
}
ed.run(&mut ui)?;
Ok(())
})();
// Clear out raw mode before reacting to result
crossterm::terminal::disable_raw_mode()
.map_err(HUIError::RawmodeSwitchFailed)
.unwrap();
// Panic if we exit because of a fatal error
res.unwrap();
}
| rust | MIT | 1b2fc1714e913520d5defd3937499ffdff79d80f | 2026-01-04T20:24:27.089787Z | false |
sidju/hired | https://github.com/sidju/hired/blob/1b2fc1714e913520d5defd3937499ffdff79d80f/src/hui/doc_print.rs | src/hui/doc_print.rs | use crossterm::{
event::{
self,
Event,
KeyEvent,
},
terminal::{
Clear,
ClearType,
EnterAlternateScreen,
LeaveAlternateScreen,
},
cursor::{
Hide,
Show,
},
queue,
};
use termimad::{
Area,
MadView,
MadSkin,
Error,
};
use std::io::Write;
fn view_area() -> Area {
let mut a = Area::full_screen();
a.pad_for_max_width(120);
a
}
pub fn display_doc(
doc: &str,
) -> Result<(), Error> {
// The mangling didn't work, rolling back to this
let mangled = join_joinable_lines(doc);
let mut w = std::io::stdout();
queue!(w, EnterAlternateScreen)?;
queue!(w, Hide)?; // Cursor
let res = (||{
let mut view = MadView::from(mangled, view_area(), MadSkin::default_dark());
// Event loop while printing
loop {
// Print (with scrollbar)
view.write_on(&mut w)?;
w.flush()?;
// Get input and react to it
match event::read() {
Ok(Event::Key(KeyEvent{code, ..})) => {
use crossterm::event::KeyCode as KC;
match code {
KC::Up => view.try_scroll_lines(-1),
KC::Down | KC::Enter => view.try_scroll_lines(1),
KC::PageUp => view.try_scroll_pages(-1),
KC::PageDown | KC::Char(' ') => view.try_scroll_pages(1),
_ => break,
}
},
Ok(Event::Resize(..)) => {
queue!(w, Clear(ClearType::All))?;
view.resize(&view_area());
},
_ => {},
}
}
Ok(())
})();
queue!(w, Show)?; // Restore cursor
queue!(w, LeaveAlternateScreen)?;
w.flush()?;
res
}
#[derive(Debug)]
struct State {
pub output: String,
pub remove_newlines: bool,
pub in_a_codeblock: bool,
}
impl State {
fn new() -> Self {
Self{
output: String::new(),
remove_newlines: true,
in_a_codeblock: false,
}
}
// We use this method to handle where a line affects state for the next, ie.
// explicit newlines
fn add_line(
&mut self,
line: &str,
) {
// Explicit newline
if line.ends_with(" ") {
self.remove_newlines = false;
// Remove all the trailing spaces
self.output.push_str(line.trim_end_matches(' '));
}
else if line.ends_with('\\') {
self.remove_newlines = false;
self.output.push_str(line);
// pop off the '\\' before going on
self.output.pop();
}
// Normal line
else {
self.output.push_str(line);
};
// Finally we add the newline after the text, always.
self.output.push('\n');
}
}
// Does line joining according to markdown syntax. Ie. normal newlines become
// blankspaces, unless otherwise indicated.
// (Currently doesn't do any line joining within block-quotes, otherwise should
// be correct.)
// (Has a quirk that it always adds a trailing newline to every line.)
fn join_joinable_lines(
input: &str,
) -> String {
// Construct state for parsing
let mut state = State::new();
// Go over each line, copying each into output
// (Manual loop so we can progress it manually)
let mut iter = input.lines().peekable();
'lines: loop{
let line = match iter.next() {
Some(x) => x,
None => { break; }
};
// Since codeblocks should prevent further logic we check for those first
// Check for codeblock edges, with state tracking
// As they are always handled like this they have precedence
if line == "```" {
state.in_a_codeblock = !state.in_a_codeblock;
// the line after the codeblock end isn't allowed to join to it
state.remove_newlines = false;
state.add_line(line);
continue;
}
// If we are in a codeblock we specifically do nothing and ignore md syntax
if state.in_a_codeblock {
state.add_line(line);
continue;
}
// If indented codeblock we also need to flag that it isn't valid to join next
// the next line to this one
if
line.starts_with('\t') ||
line.starts_with(" ")
{
state.remove_newlines = false;
state.add_line(line);
continue;
}
// Similar handling for paragraphs
if line == "" {
state.remove_newlines = false;
state.add_line(line);
continue;
}
// Fancy recursion for block quotes, as they are allowed to contain nested
// markdown
if line.starts_with('>') {
// Aggregate all lines that are part of this block (by start)
let start_len = if line.starts_with("> ") { 2 } else { 1 };
let mut block_lines = String::from(&line[start_len..]);
while let Some(line) = iter.next_if(|s| s.starts_with('>')) {
block_lines.push('\n');
// Slice out potential indent, to prevent weird joins
if line.starts_with("> ") {
block_lines.push_str(&line[2..]);
} else {
block_lines.push_str(&line[1..]);
}
}
// Then we recurse, as there can be markdown in the block
let joined_entry_lines = join_joinable_lines(&block_lines);
// And we add back the joined lines, with the indent put back
for line in joined_entry_lines.lines() {
// If the line is part of a blockquote from the inner we don't add a
// space after the '>', otherwise we do.
if line.starts_with('>') {
state.output.push('>');
}
else {
state.output.push_str("> ")
}
state.add_line(line);
}
// Finally prevent the next line from joining into the block
state.remove_newlines = false;
continue;
}
// Next look for line entry starts
if
// Unordered list start
line.starts_with("- ") ||
line.starts_with("* ") ||
line.starts_with("+ ")
{
// Aggregate all lines that are part of this entry (by indentation)
let entry_start = &line[..2];
let mut entry_lines = String::from(&line[2..]);
while let Some(line) = iter.next_if(|s| s.starts_with(" ") || s==&"") {
entry_lines.push('\n');
// Slice out indent, to prevent weird joins
// (use get to handle if it is an empty line)
entry_lines.push_str(line.get(2..).unwrap_or(""));
}
// Then we recurse, as there can be markdown in the entry
let joined_entry_lines = join_joinable_lines(&entry_lines);
// And we add back the joined lines, with the indent put back
let mut first_loop = true;
for line in joined_entry_lines.lines() {
// The first line needs the list entry start.
if first_loop {
first_loop = false;
state.output.push_str(entry_start);
state.add_line(line);
}
// Don't add trailing spaces for empty lines
else if line == "" {
state.add_line(line);
}
// Otherwise add back the indentation
else {
state.output.push_str(" ");
state.add_line(line);
}
}
// Finally prevent the next line from joining into the list entry
state.remove_newlines = false;
continue;
}
// Last we do fancy parsing of line contents
// Ordered list entry starts
let mut first_loop = true;
'chars: for (i, ch) in line.char_indices() {
// Numbered list is recognized by possibly indented numbers
if ch.is_numeric() { first_loop = false; continue 'chars; }
// followed directly by a dot
// If we get here it's a match, just add and take the next line
if ch == '.' && !first_loop {
// Aggregate all lines that are part of this entry (by indentation)
let entry_start = &line[..i + 2];
let mut entry_lines = String::from(&line[i + 2..]);
while let Some(line) = iter.next_if(|s| s.starts_with(" ") || s==&"") {
entry_lines.push('\n');
// Slice out indent, to prevent weird joins
entry_lines.push_str(line.get(2..).unwrap_or(""));
}
// Then we recurse, as there can be markdown in the entry
let joined_entry_lines = join_joinable_lines(&entry_lines);
// And we add back the joined lines, with the indent put back
let mut first_loop = true;
for line in joined_entry_lines.lines() {
// The first line needs the list entry start
if first_loop {
first_loop = false;
state.output.push_str(entry_start);
state.add_line(line);
}
// Don't add trailing spaces for empty lines
else if line == "" {
state.add_line(line);
}
// Otherwise add back the indentation
else {
state.output.push_str(" ");
state.add_line(line);
}
}
// Finally prevent next line from joining with the list entry
state.remove_newlines = false;
continue 'lines;
};
// Any other character before '.' or no digit before '.' means it isn't
// an ordered list entry
break 'chars;
}
// For each line finally check if the preceding line precludes joining
// If so we reset state and add the line without join
// If so: no need to think, just reset state and add the line
if !state.remove_newlines {
state.remove_newlines = true;
state.add_line(line);
continue;
}
// If we get this far we can actually join the line with the preceding.
// Handle trying to join the first line to non-existent preceding line
if let Some(ch) = state.output.pop() {
// Paranoid levels of insurance we don't delete any non-newline character
// (shouldn't be reachable, as state.add_line ALWAYS adds '\n' after each line)
if ch != '\n' { state.output.push(ch); }
else { state.output.push(' '); }
}
state.add_line(line);
}
state.output
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn md_join_normal_lines(){
assert_eq!(
&join_joinable_lines("just\nsome\ntext\nto\njoin"),
"just some text to join\n",
)
}
#[test]
fn md_join_paragraph() {
assert_eq!(
&join_joinable_lines("hello\nworld\n\nnice weather,\neh?\n"),
"hello world\n\nnice weather, eh?\n"
)
}
#[test]
fn md_join_explicit_newlines() {
assert_eq!(
&join_joinable_lines("hello\nworld\\\nnice weather \neh?\n"),
"hello world\nnice weather\neh?\n"
)
}
#[test]
fn md_join_codeblock() {
assert_eq!(
&join_joinable_lines(
"Code:\n source code
Other code:\n\tsourcerer\ncode
Other other code:\n```\nsourcerest\ncode\n```\nend\n"
),
"Code:\n source code
Other code:\n\tsourcerer\ncode \
Other other code:\n```\nsourcerest\ncode\n```\nend\n"
)
}
#[test]
// Should be able to join lines within the same blockquote
fn md_join_blockquote() {
assert_eq!(
&join_joinable_lines("> Hello\n> world!\n>> Nice\n>> weather!\n> Is\n> it?\n>> Yep!\n"),
"> Hello world!\n>> Nice weather!\n> Is it?\n>> Yep!\n"
)
}
// Lists should only join with indented lines
#[test]
fn md_join_list(){
assert_eq!(
&join_joinable_lines("- some\n+ list\n to\n* join\nand not\n"),
"- some\n+ list to\n* join\nand not\n",
)
}
#[test]
fn md_join_ordered_list() {
assert_eq!(
&join_joinable_lines("1. Fine\n stuff\n244. Okay-ish other\nstuff\n"),
"1. Fine stuff\n244. Okay-ish other\nstuff\n"
)
}
}
| rust | MIT | 1b2fc1714e913520d5defd3937499ffdff79d80f | 2026-01-04T20:24:27.089787Z | false |
sidju/hired | https://github.com/sidju/hired/blob/1b2fc1714e913520d5defd3937499ffdff79d80f/src/hui/error.rs | src/hui/error.rs | /// Error type for HighlightingUI
#[derive(Debug)]
pub enum HighlightingUIError {
// Separate, so we can print a guide for how to recover the terminal if needed
RawmodeSwitchFailed(std::io::Error),
// Can't do much smarter stuff. Possibly squeeze in some filename/linenumber.
TerminalIOFailed(std::io::Error),
// Received Ctrl+c, aborting input and returning to editor.
Interrupted,
// Terminal not wide enough to print docs
DocInsufficientWidth(termimad::InsufficientWidthError),
}
impl std::fmt::Display for HighlightingUIError {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
use HighlightingUIError as HE;
match self {
HE::RawmodeSwitchFailed(e) => {
write!(f,
concat!(
"Failed to switch terminal to/from rawmode.\n\n",
"If your terminal is in rawmode when the editor quits, run 'reset'.\n\n",
"Underlying error: {:?}"
),
e
)
},
HE::TerminalIOFailed(e) => {
write!(f,
concat!(
"Failed to interact with terminal.\n\n",
"Underlying error: {:?}"
),
e
)
},
HE::Interrupted => {
write!(f, "Interrupted!")
},
HE::DocInsufficientWidth(e) => {
write!(f,
concat!(
"Failed to render documentation.\n\n",
"Underlying error: {}",
),
e
)
},
}
}
}
impl std::error::Error for HighlightingUIError{}
impl add_ed::error::UIErrorTrait for HighlightingUIError{}
impl HighlightingUIError {
pub fn from_termimad(e: termimad::Error) -> Self {
use termimad::Error as TE;
match e {
TE::IO(inner) => Self::TerminalIOFailed(inner),
TE::InsufficientWidth(inner) => Self::DocInsufficientWidth(inner),
}
}
}
| rust | MIT | 1b2fc1714e913520d5defd3937499ffdff79d80f | 2026-01-04T20:24:27.089787Z | false |
sidju/hired | https://github.com/sidju/hired/blob/1b2fc1714e913520d5defd3937499ffdff79d80f/src/hui/mod.rs | src/hui/mod.rs | use crossterm::QueueableCommand;
use two_face::re_exports::syntect::parsing::SyntaxSet;
use two_face::re_exports::syntect::highlighting::Theme;
use std::io::stdout;
// use the UI trait, to implement it
use add_ed::ui::{
UI,
UILock,
};
use add_ed::{
Ed,
error::{
Result,
EdError,
},
};
mod print;
mod doc_print;
mod input;
pub mod error;
use error::HighlightingUIError as HUIError;
pub struct HighlightingUI {
syntax_lib: SyntaxSet,
theme: Theme,
term_size: (usize, usize),
command_history: Vec<String>,
}
impl HighlightingUI {
pub fn new() -> Self {
let theme: Theme = two_face::theme::extra().get(two_face::theme::EmbeddedThemeName::Base16).clone();
let syntax: SyntaxSet = two_face::syntax::extra_newlines();
Self{
syntax_lib: syntax,
theme: theme,
term_size: crossterm::terminal::size().map(|(a,b)| (a as usize, b as usize)).unwrap_or((80,24)),
command_history: Vec::new(),
}
}
}
use std::io::Write; // Needed for the queue and flush functions on stdout
impl UI for HighlightingUI {
fn print_message(
&mut self,
text: &str,
) -> Result<()> {
(|| -> std::io::Result<()> {
use crossterm::style::Print;
let mut stdout = stdout();
if crossterm::cursor::position()?.0 != 0 {
stdout.queue(Print("\n\r"))?;
}
for line in text.lines() {
stdout.queue(Print(line))?;
stdout.queue(Print("\n\r"))?;
}
stdout.flush()?;
Ok(())
})()
.map_err(HUIError::TerminalIOFailed)
.map_err(add_ed::error::UIError::from)
.map_err(EdError::UI)
}
fn print_commands(&mut self) -> Result<()> {
doc_print::display_doc(add_ed::messages::COMMAND_LIST.into())
.map_err(HUIError::from_termimad)
.map_err(add_ed::error::UIError::from)
.map_err(EdError::UI)
}
fn print_command_documentation(&mut self) -> Result<()> {
doc_print::display_doc(add_ed::messages::COMMAND_DOCUMENTATION.into())
.map_err(HUIError::from_termimad)
.map_err(add_ed::error::UIError::from)
.map_err(EdError::UI)
}
fn get_command(
&mut self,
_ed: &Ed,
prefix: Option<char>,
) -> Result<String> {
let command = input::event_input(
self,
Vec::new(),
prefix,
None, // We want one line specifically
)
.map_err(|e|add_ed::EdError::UI(e.into()))?
.remove(0)
;
self.command_history.push(command.clone());
Ok(command)
}
fn get_input(
&mut self,
_ed: &Ed,
terminator: char,
initial_buffer: Option<Vec<String>>,
) -> Result<Vec<String>> {
input::event_input(
self,
initial_buffer.unwrap_or(Vec::new()),
None, // No line prefix for input
Some(terminator)
)
.map_err(|e|add_ed::EdError::UI(e.into()))
}
fn print_selection(
&mut self,
ed: &Ed,
selection: (usize, usize),
numbered: bool,
literal: bool,
) -> Result<()> {
// First we get the data needed to call the internal function
let mut iter = ed.history.current().get_tagged_lines(selection)?;
let syntax = self.syntax_lib.find_syntax_for_file(&ed.file)
.unwrap_or(None)
.unwrap_or_else(|| self.syntax_lib.find_syntax_plain_text());
// Then we call the internal print
print::internal_print(
&self,
&syntax,
&mut iter,
print::PrintConf {
prefix: None,
cursor: None,
start_line: selection.0,
numbered: numbered,
literal: literal,
separator: true,
},
)
.map_err(HUIError::TerminalIOFailed)
.map_err(add_ed::error::UIError::from)
?;
Ok(())
}
fn lock_ui(&mut self) -> UILock {
// Before handing over to shell escaped commands we need to disable raw mode
crossterm::terminal::disable_raw_mode()
.map_err(HUIError::RawmodeSwitchFailed)
.unwrap()
;
UILock::new(self)
}
fn unlock_ui(&mut self) {
// Re-enable raw mode, to go back to using the UI
crossterm::terminal::enable_raw_mode()
.map_err(HUIError::RawmodeSwitchFailed)
.unwrap()
;
}
}
| rust | MIT | 1b2fc1714e913520d5defd3937499ffdff79d80f | 2026-01-04T20:24:27.089787Z | false |
sidju/hired | https://github.com/sidju/hired/blob/1b2fc1714e913520d5defd3937499ffdff79d80f/src/hui/print.rs | src/hui/print.rs | // Get the prerequisite definitions for writing these functions
use super::HighlightingUI;
use crossterm::{
QueueableCommand,
style::{
Print,
Color,
}
};
use std::io::{Result, Write}; // Needs to be used in for queue and flush
// Create some printing helpers
fn syntect_to_crossterm_color(
c: two_face::re_exports::syntect::highlighting::Color,
) -> Color {
// If alpha value is zero the red value is which 16 color to use
if c.a == 0 {
match c.r {
// NOTE: this makes background colors present as expected,
// it is a workaround that you are free to replace with a proper fix
0 => Color::Reset,
1 => Color::DarkRed,
2 => Color::DarkGreen,
3 => Color::DarkYellow,
4 => Color::DarkBlue,
5 => Color::DarkMagenta,
6 => Color::DarkCyan,
7 => Color::Grey,
8 => Color::DarkGrey,
9 => Color::Red,
10 => Color::Green,
11 => Color::Yellow,
12 => Color::Blue,
13 => Color::Magenta,
14 => Color::Cyan,
// NOTE: this makes foreground colors present as expected,
// it is a workaround that you are free to replace with a proper fix
15 => Color::Reset,
_ => panic!("Invalid theme. Alpha = 0 indicates 16 color in red."),
}
}
else {
Color::Rgb{r: c.r, g: c.g, b: c.b}
}
}
fn apply_style(
style: two_face::re_exports::syntect::highlighting::Style,
out: &mut impl Write,
) -> Result<()> {
use two_face::re_exports::syntect::highlighting::FontStyle;
use crossterm::style::{SetColors, SetAttribute, Colors, Attribute};
// First reset fully
out.queue(SetAttribute(Attribute::Reset))?;
// Prepare and apply colors
let colors = Colors::new(
syntect_to_crossterm_color(style.foreground),
syntect_to_crossterm_color(style.background)
);
out.queue(SetColors(colors))?;
// Interpret and apply styling
if style.font_style.contains(FontStyle::BOLD) {
out.queue(SetAttribute(Attribute::Bold))?;
}
if style.font_style.contains(FontStyle::ITALIC) {
out.queue(SetAttribute(Attribute::Italic))?;
}
if style.font_style.contains(FontStyle::UNDERLINE) {
out.queue(SetAttribute(Attribute::Underlined))?;
}
Ok(())
}
fn reset_style(out: &mut impl Write) -> Result<()> {
use crossterm::style::{ResetColor, SetAttribute, Attribute};
out.queue(ResetColor)?; // Not needed for linux, but maybe on windows.
out.queue(SetAttribute(Attribute::Reset))?;
Ok(())
}
fn print_separator(
out: &mut impl Write,
width: usize,
) -> Result<()> {
let mut sep = String::with_capacity(width);
let mut skip = 0;
for i in 0 .. width {
if i % 20 == 0 {
let num = i.to_string();
if i + num.len() < width {
skip = num.len() - 1; // -1 since we skipped one by going here
sep.push_str(&num);
} else {
sep.push('-');
}
}
else if skip > 0 {
skip -= 1;
}
else {
sep.push('-');
}
}
sep.push('\n');
sep.push('\r');
out.queue(Print(sep))?;
Ok(())
}
// Create a struct to return which clarifies what is returned
pub struct PrintData {
// Total height of the print
pub height: u16,
// The position of the cursor (relative bottom left)
pub cursor_x: u16,
pub cursor_y: u16,
}
// Create a struct to define print settings
pub struct PrintConf {
// Print prefix char at start of every line, before numbering if any
// Intended to support prefix at command input
pub prefix: Option<char>,
// Position (x,y in text) to leave cursor at
// Intended for when printing an actively edited buffer
pub cursor: Option<(usize, usize)>,
// Index in iterator from which to print
// Intended to feed syntax highlighter with preceding lines without printing them
pub start_line: usize,
// If true print line number at start of every line
pub numbered: bool,
// If true print like 'ed's literal print mode
pub literal: bool,
// If true print a separator before the given text
pub separator: bool,
}
// Uses state to print the given iterator with given syntax highlighting
pub fn internal_print(
state: &HighlightingUI,
syntax: &two_face::re_exports::syntect::parsing::SyntaxReference,
text: &mut dyn Iterator<Item = (char, &str)>,
conf: PrintConf,
) -> Result<PrintData> {
let mut stdout = std::io::stdout();
let theme = &state.theme;
let mut highlighter = two_face::re_exports::syntect::easy::HighlightLines::new(syntax, theme);
// Variables for tracking cursor positions
// i is used for width to detect when we need to wrap lines over to next line
let mut i = 0;
// print height is returned in case we wish to overwrite this printout later
let mut print_height: u16 = 0;
// x, y and passed are for returning the terminal position of an optional buffer
// position, if such was given. Always 0,0,false if not given or not found.
let mut passed = false;
let mut x: u16 = 0;
let mut y: u16 = 0;
if conf.separator {
// Print a separator from whatever came before
// potentially add more info to it later
print_separator(&mut stdout, state.term_size.0)?;
print_height += 1;
}
// Arguably one should give the highlighter all lines before the selection.
// Otherwise it fails to understand multiline stuff over the selection edges.
// Currently too minor for me to bother, PRs welcome
for (linenr, line) in text.enumerate() {
// For each new line the byte index starts at 0
// Used for tracking the offsets of the characters as in a string
// Needed to understand the cursor which gives byte offsets
let mut byte_index = 0;
// Highlight the line first
let highlighted = highlighter.highlight_line(line.1, &state.syntax_lib)
.unwrap(); // TODO: this should be handled, requires change of error type
// Iterate over syntactic segments, setting the style for each
for (style, text) in highlighted {
apply_style(style, &mut stdout)?;
for ch in text.chars() {
// If prefix is given, print at start of real but not wrapped lines
if let Some(pre) = conf.prefix {
if i == 0 {
reset_style(&mut stdout)?;
let pre_len = pre.len_utf8();
stdout.queue(Print(pre))?;
i += pre_len;
apply_style(style, &mut stdout)?;
}
}
// If line numbers are active, check if start of line
if conf.numbered && (i % state.term_size.0 == 0) {
reset_style(&mut stdout)?;
// Calculate number and convert to string
let tmp_num = (conf.start_line + linenr).to_string();
let tmp_num_len = tmp_num.len(); // Only works because linenr is ascii
// If this is a new line, print number
if i == 0 {
// If no line tag, print number
if line.0 == '\0' {
stdout.queue(Print(tmp_num))?;
}
// Else print the tag instead
else {
stdout.queue(Print(line.0))?;
for _ in 1 .. tmp_num_len { stdout.queue(Print(' '))?; }
}
}
// If a wrapped line, print inwards offset equal to the numbering
else {
for _ in 0 .. tmp_num_len { stdout.queue(Print(' '))?; }
}
// Print a separator and mark how many chars we printed
stdout.queue(Print('│'))?;
i += tmp_num_len + 1; // +1 for the separator
// Finally we MUST restore the styling
apply_style(style, &mut stdout)?;
}
// After printing potential prefixes we check against our given cursor, if given
// We must check before printing ch, since printing newline resets i
// Specifically we check if the cursor is before the current ch
if let Some(cur) = conf.cursor {
if ! passed {
if (cur.0 == linenr && cur.1 <= byte_index) || cur.0 < linenr {
// This all means we have passed by the given cursor for the first time
// Due to needing to place the cursor one step down in that case we specially handle '\n'
// Calculate current column and save in x
x = (i % state.term_size.0) as u16 + 1;
// Mark that we have passed, this will increment y for each new line started
passed = true;
}
// For each char while not passed add their len to byte-index
// Add after checking, since we otherwise cannot go to char index 0
byte_index += ch.len_utf8();
}
}
// Print the actual character
// If literal mode, handle edge cases
match ch {
'\n' => {
if conf.literal { stdout.queue(Print('$'))?; }
i = 0;
},
'$' => if conf.literal {
stdout.queue(Print("\\$"))?;
i += 2;
} else {
stdout.queue(Print('$'))?;
i += 1;
},
'\t' => {
if conf.literal { stdout.queue(Print("--->"))?; }
else { stdout.queue(Print(" "))?; }
i += 4;
},
c => {
stdout.queue(Print(c))?;
i += 1;
},
}
// Check if a new line is needed, aka. newline or wrapping
if i % state.term_size.0 == 0 {
stdout.queue(Print("\n\r"))?;
print_height += 1;
if passed { y += 1; }
}
}
}
}
// Closing cleanup and flush
reset_style(&mut stdout)?;
// Note that this increases height and y
stdout.flush()?;
Ok(PrintData{
height: print_height,
cursor_x: x,
cursor_y: y,
})
}
| rust | MIT | 1b2fc1714e913520d5defd3937499ffdff79d80f | 2026-01-04T20:24:27.089787Z | false |
sidju/hired | https://github.com/sidju/hired/blob/1b2fc1714e913520d5defd3937499ffdff79d80f/src/hui/input.rs | src/hui/input.rs | // This module takes events and handles them as text input or commands
// The trait for queueing commands
use crossterm::QueueableCommand;
// All the event classes
use crossterm::event::{KeyCode, KeyModifiers, Event};
// And the writeable trait, to be able to flush stdout
use std::io::Write;
// Finally the error consts we use as error type
use super::HUIError;
type Result<T> = std::result::Result<T, HUIError>;
// Since unicode is weird and this method is missing from str
// Finds the nearest char boundary preceding given index and returns its index
fn rfind_boundary(s: &str, i: usize) -> usize {
for b in (0 .. i).rev() {
if s.is_char_boundary(b) { return b; }
}
0
}
fn find_boundary(s: &str, i: usize) -> usize {
for b in i + 1 .. s.len() + 1 {
if s.is_char_boundary(b) { return b; }
}
i
}
// This input getter runs get_event and buffers the input with expected editing features
// Initial contents of the buffer is given as a vector of newline terminated strings
// A prefix can be given, which is then printed at start of every line and not included in input
// A terminator can be given.
// If given: input is returned after terminator has been entered alone on a line. Else on newline.
pub fn event_input(
state: &mut super::HighlightingUI,
initial_buffer: Vec<String>,
prefix: Option<char>,
terminator: Option<char>, // If none take only one line
) -> Result<Vec<String>> {
let mut stdout = std::io::stdout();
// Set the cursor to be visible, so our moves are visible
stdout.queue(crossterm::cursor::Show).map_err(HUIError::TerminalIOFailed)?;
// Set up buffer and variables for moving in it
let mut buffer = initial_buffer;
if buffer.len() == 0 { buffer.push("\n".to_string()); } // The buffer mustn't be empty
let mut lindex = buffer.len() - 1; // Line index, lin-dex
let mut chindex = buffer[lindex].len() - 1; // Char index, ch-index
// Variable for tracking how many steps back in history
// we are when moving back in history
let mut hoffset = state.command_history.len();
// And one for keeping current input while moving about in history
let mut semi_history = "\n".to_string();
// Then the distances we need to remember between printing
let mut dists = super::print::PrintData{ height: 0, cursor_y: 0, cursor_x: 0 };
// And if we are to return
let mut ret = false; // Flag when ready to return
// Then input specific variables
let mut partial = String::with_capacity(4); // To store partial chars
// and finally movement specific
// If we move via up/down to a line shorter than our current chindex that prior chindex is saved
// here, so we can go to that prior chindex if next operation is up/down. Else it's cleared.
let mut goal_chindex = None;
// loop getting input events, ignoring those we can't handle.
while !ret {
// Print before blocking waiting for input
// Move up the cursor to overwrite prior input with this input
if (dists.height - dists.cursor_y) > 0 {
stdout.queue(crossterm::cursor::MoveUp(dists.height - dists.cursor_y))
.map_err(HUIError::TerminalIOFailed)?;
}
stdout.queue(crossterm::cursor::MoveToColumn(0)).map_err(HUIError::TerminalIOFailed)?;
// Clear away old print
stdout.queue(crossterm::terminal::Clear(crossterm::terminal::ClearType::FromCursorDown))
.map_err(HUIError::TerminalIOFailed)?;
// Then print
let syntax = state.syntax_lib.find_syntax_plain_text();
dists = super::print::internal_print(
state,
&syntax,
&mut buffer.iter().map(|line| ('\0', &line[..])),
super::print::PrintConf {
prefix: prefix,
cursor: Some((lindex, chindex)),
start_line: 0,
literal: false,
numbered: false,
separator: true,
},
).map_err(HUIError::TerminalIOFailed)?;
// And move to the positions returned
if dists.cursor_y > 0 {
stdout.queue(crossterm::cursor::MoveUp(dists.cursor_y)).map_err(HUIError::TerminalIOFailed)?;
}
// Subtract one, because MoveToColumn is 0 indexed
stdout.queue(crossterm::cursor::MoveToColumn(dists.cursor_x.saturating_sub(1)))
.map_err(HUIError::TerminalIOFailed)?;
// Then make sure to flush this, or the cursor won't move
stdout.flush().map_err(HUIError::TerminalIOFailed)?;
match crossterm::event::read().map_err(HUIError::TerminalIOFailed)? {
// If resize event, just update usize
Event::Resize(x, y) => { state.term_size = (x as usize, y as usize); },
// Ignore paste events
Event::Paste(_) => (),
// Ignore mouse events
Event::Mouse(_) => (),
// Ignore focus events
Event::FocusGained | Event::FocusLost => (),
// If key event, match code and modifiers and handle thereafter
Event::Key(key) if key.kind == crossterm::event::KeyEventKind::Press => {
// Check if any of the state variables should be cleared
// Done here instead of in all but 2 key-handlers
// If doing anything but continued input of partial character, clear it
if let KeyCode::Char(_) = key.code {} else {
partial.clear();
}
// If doing anything but moving up/down, clear goal_chindex
if (key.code != KeyCode::Up &&
key.code != KeyCode::Down ) ||
key.modifiers != KeyModifiers::NONE
{
goal_chindex = None;
}
match (key.code, key.modifiers) {
// If Ctrl+C is entered, abort input and return semi error "Interrupted"
(KeyCode::Char('c'), KeyModifiers::CONTROL) | (KeyCode::Char('C'), KeyModifiers::CONTROL) => {
return Err(HUIError::Interrupted.into());
},
// Start with true input; characters and deletions
(KeyCode::Char(ch), KeyModifiers::SHIFT) | (KeyCode::Char(ch), KeyModifiers::NONE) => {
partial.push(ch);
// If the partial is now complete, put it in the buffer
if partial.is_char_boundary(0) {
let tmp = chindex;
chindex += partial.len();
buffer[lindex].insert(tmp, partial.remove(0));
}
},
(KeyCode::Tab, KeyModifiers::NONE) => {
partial.push('\t');
// If the partial is now complete, put it in the buffer
if partial.is_char_boundary(0) {
let tmp = chindex;
chindex += partial.len();
buffer[lindex].insert(tmp, partial.remove(0));
}
},
(KeyCode::Backspace, KeyModifiers::NONE) | (KeyCode::Char('h'), KeyModifiers::CONTROL) => {
if chindex == 0 {
// Join this and preceding line
if lindex != 0 {
// Go to end of previous line, remove its newline and append current line
let tmp = buffer.remove(lindex);
lindex -= 1;
buffer[lindex].pop();
chindex = buffer[lindex].len();
buffer[lindex].push_str(&tmp);
}
}
else {
// Just delete preceding character
chindex = rfind_boundary(&buffer[lindex], chindex);
buffer[lindex].remove(chindex);
}
},
(KeyCode::Delete, KeyModifiers::NONE) => {
if chindex >= buffer[lindex].len() - 1 {
// Join this and following line
// But only if there is a following line
if lindex != buffer.len() - 1 {
// Remove our newline and append next line
buffer[lindex].pop();
let tmp = buffer.remove(lindex + 1);
buffer[lindex].push_str(&tmp);
}
}
else {
// Just delete following character
buffer[lindex].remove(chindex);
}
},
(KeyCode::Enter, KeyModifiers::NONE) | (KeyCode::Enter, KeyModifiers::CONTROL) => {
// If only getting one line, return
if terminator.is_none() {
ret = true;
}
// Else, add a line
else {
// Insert a newline to properly terminate current line
buffer[lindex].insert(chindex, '\n');
chindex += 1;
// Split of the string at current index, inserting the resulting strings into buffer
let tmp = buffer[lindex].split_off(chindex);
buffer.insert(lindex + 1, tmp);
// If the line left behind is now a lone dot on a line, delete it and return
// Check if we just created the terminating line
let mut iter = buffer[lindex].chars();
if iter.next() == terminator && iter.next() == Some('\n') {
// Remove the terminating line
buffer.remove(lindex);
// Check to clear unexpected line created after terminating
if buffer[lindex] == "\n" {
buffer.remove(lindex);
}
ret = true;
}
// Else increment and reset chindex.
else {
lindex += 1;
chindex = 0;
}
}
}
// Then we have movement; right/left, up/down, home/end
(KeyCode::Right, KeyModifiers::NONE) => {
if chindex == buffer[lindex].len() - 1 {
// Go to next line
if buffer.len() - 1 > lindex {
lindex += 1;
chindex = 0;
}
}
else {
chindex = find_boundary(
&buffer[lindex][.. buffer[lindex].len() - 1],
chindex
);
}
},
(KeyCode::Left, KeyModifiers::NONE) => {
if chindex == 0 {
// Go to previous line
if lindex > 0 {
lindex -= 1;
chindex = buffer[lindex].len() - 1;
}
}
else {
chindex = rfind_boundary(&buffer[lindex], chindex);
}
},
(KeyCode::Up, KeyModifiers::NONE) | (KeyCode::Down, KeyModifiers::NONE) => {
// Go back/forth in history if in one-line mode
if terminator.is_none() {
// If we are currently in the present, save state before moving
if hoffset == state.command_history.len() {
// Save current input line as semi history, unwrap or shouldn't ever be needed
semi_history = buffer.pop().unwrap_or("\n".to_string());
}
else {
buffer.pop();
}
match key.code {
KeyCode::Up => {
// Then move into history
hoffset = hoffset.saturating_sub(1);
},
KeyCode::Down => {
// If not in the present, move forward in history
if hoffset < state.command_history.len() { hoffset += 1; }
},
_ => (),
}
// Read that history entry into the buffer
buffer.push(
state.command_history
.get(hoffset) // Get history at offset
.map(|line| line.clone()) // Convert from &str to String
.unwrap_or(semi_history.clone()) // If none we have arrived in the present
);
// Set cursor to tail of history entry
lindex = 0;
chindex = buffer[0].len() - 1;
}
else {
// First move to the indicated line, if possible
match key.code {
KeyCode::Up => {
if lindex > 0 { lindex -= 1; }
else { chindex = 0; }
},
KeyCode::Down => {
if lindex < buffer.len() - 1 { lindex += 1; }
// If on last line set chindex to max, so goal_chindex keeps cursor at EOL
else { chindex = usize::MAX; }
},
_ => (),
}
// Then try to go to goal_chindex and place chindex within the new line
match goal_chindex {
Some(tmp) => { chindex = tmp; },
None => (),
}
// If current chindex is too big, save it as goal and go to nearest valid chindex
if chindex >= buffer[lindex].len() {
goal_chindex = Some(chindex);
chindex = buffer[lindex].len() - 1;
}
}
},
(KeyCode::Home, KeyModifiers::NONE) => {
lindex = 0;
chindex = 0;
},
(KeyCode::End, KeyModifiers::NONE) => {
lindex = buffer.len() - 1;
chindex = buffer[lindex].len() - 1;
},
_ => (), // Ignore unknown codes
} // End of matching key-codes and modifiers
}, // End of Key input event matching
// Ignore key release events, if the terminal even provides them
Event::Key(_) => (),
} // End of event match
} // End of while
// Before returning print a clean print to leave in the buffer
// Move up the cursor to overwrite prior input with this input
if (dists.height - dists.cursor_y) > 0 {
stdout.queue(crossterm::cursor::MoveUp(dists.height - dists.cursor_y))
.map_err(HUIError::TerminalIOFailed)?;
}
stdout.queue(crossterm::cursor::MoveToColumn(0)).map_err(HUIError::TerminalIOFailed)?;
// Clear away old print
stdout.queue(crossterm::terminal::Clear(crossterm::terminal::ClearType::FromCursorDown))
.map_err(HUIError::TerminalIOFailed)?;
// Then print
let syntax = state.syntax_lib.find_syntax_plain_text();
super::print::internal_print(
state,
&syntax,
&mut buffer.iter().map(|line| ('\0', &line[..])),
super::print::PrintConf {
prefix: prefix,
cursor: None,
start_line: 0,
numbered: false,
literal: false,
separator: true,
},
).map_err(HUIError::TerminalIOFailed)?;
// Then flush and return
stdout.flush().map_err(HUIError::TerminalIOFailed)?;
Ok(buffer)
}
| rust | MIT | 1b2fc1714e913520d5defd3937499ffdff79d80f | 2026-01-04T20:24:27.089787Z | false |
freestrings/jsonpath | https://github.com/freestrings/jsonpath/blob/3a930bc03c3944c8374717289266ceb9cc1491c6/benchmark/src/main.rs | benchmark/src/main.rs | extern crate jsonpath_lib as jsonpath;
extern crate serde_json;
use serde_json::{json, Value};
fn main() {
let json: Value = json!(
{
"store": {
"book": [
{
"category": "reference",
"author": "Nigel Rees",
"title": "Sayings of the Century",
"price": 8.95
},
{
"category": "fiction",
"author": "Evelyn Waugh",
"title": "Sword of Honour",
"price": 12.99
},
{
"category": "fiction",
"author": "Herman Melville",
"title": "Moby Dick",
"isbn": "0-553-21311-3",
"price": 8.99
},
{
"category": "fiction",
"author": "J. R. R. Tolkien",
"title": "The Lord of the Rings",
"isbn": "0-395-19395-8",
"price": 22.99
}
],
"bicycle": {
"color": "red",
"price": 19.95
}
},
"expensive": 10
});
let path = r#"$.store.book[?( (@.price < 100 || @.price > 1) && @.price > 10 )]"#;
(0..10000).for_each(|_| {
let r = jsonpath::select(&json, path);
if r.is_err() {
panic!();
}
});
} | rust | MIT | 3a930bc03c3944c8374717289266ceb9cc1491c6 | 2026-01-04T20:24:15.035874Z | false |
freestrings/jsonpath | https://github.com/freestrings/jsonpath/blob/3a930bc03c3944c8374717289266ceb9cc1491c6/benchmark/benches/bench.rs | benchmark/benches/bench.rs | #![feature(test)]
extern crate bencher;
extern crate jsonpath_lib as jsonpath;
extern crate serde;
extern crate serde_json;
extern crate test;
use std::io::Read;
use std::rc::Rc;
use jsonpath::{JsonSelector, JsonSelectorMut, PathParser};
use serde::Deserialize;
use serde_json::Value;
use self::test::Bencher;
fn read_json(path: &str) -> String {
let mut f = std::fs::File::open(path).unwrap();
let mut contents = String::new();
f.read_to_string(&mut contents).unwrap();
contents
}
fn get_string() -> String {
read_json("./example.json")
}
fn get_json() -> Value {
let string = get_string();
serde_json::from_str(string.as_str()).unwrap()
}
fn get_path() -> &'static str {
r#"$..book[?(@.price<30 && @.category=="fiction")]"#
}
#[bench]
fn bench_selector(b: &mut Bencher) {
let json = get_json();
let mut selector = jsonpath::selector(&json);
b.iter(move || {
for _ in 1..100 {
let _ = selector(get_path()).unwrap();
}
});
}
#[bench]
fn bench_selector_as(b: &mut Bencher) {
let json = get_json();
let mut selector = jsonpath::selector_as::<Value>(&json);
b.iter(move || {
for _ in 1..100 {
let _ = selector(get_path()).unwrap();
}
});
}
#[bench]
fn bench_select_val(b: &mut Bencher) {
let json = get_json();
b.iter(move || {
for _ in 1..100 {
let _ = jsonpath::select(&json, get_path()).unwrap();
}
});
}
#[bench]
fn bench_select_as_str(b: &mut Bencher) {
let json = get_string();
b.iter(move || {
for _ in 1..100 {
let _ = jsonpath::select_as_str(&json, get_path()).unwrap();
}
});
}
#[bench]
fn bench_compile(b: &mut Bencher) {
let json = get_json();
let template = jsonpath::PathCompiled::compile(get_path()).unwrap();
b.iter(move || {
for _ in 1..100 {
let _ = template.select(&json).unwrap();
}
});
}
#[bench]
fn bench_select_as(b: &mut Bencher) {
let json = get_string();
#[derive(Deserialize, PartialEq, Debug)]
struct Book {
category: String,
author: String,
title: String,
price: f64,
}
b.iter(move || {
for _ in 1..100 {
let _: Vec<Book> = jsonpath::select_as(&json, r#"$..book[?(@.price<30 && @.category=="fiction")][0]"#).unwrap();
}
});
}
#[bench]
fn bench_delete(b: &mut Bencher) {
let json = get_json();
let parser = PathParser::compile(get_path()).unwrap();
let mut selector = JsonSelectorMut::new(parser);
b.iter(move || {
for _ in 1..100 {
let _ = selector.value(json.clone()).delete();
}
});
}
#[bench]
fn bench_select_to_compare_with_delete(b: &mut Bencher) {
let json = &get_json();
let parser = Rc::new(PathParser::compile(get_path()).unwrap());
b.iter(move || {
for _ in 1..100 {
let mut s = JsonSelector::new_ref(Rc::clone(&parser));
let _ = s.value(&json);
let r = s.select();
if r.is_err() {
panic!()
}
}
});
} | rust | MIT | 3a930bc03c3944c8374717289266ceb9cc1491c6 | 2026-01-04T20:24:15.035874Z | false |
freestrings/jsonpath | https://github.com/freestrings/jsonpath/blob/3a930bc03c3944c8374717289266ceb9cc1491c6/benchmark/benches/bench_example.rs | benchmark/benches/bench_example.rs | #![feature(test)]
extern crate bencher;
extern crate jsonpath_lib as jsonpath;
extern crate serde;
extern crate serde_json;
extern crate test;
use std::io::Read;
use serde_json::Value;
use self::test::Bencher;
fn read_json(path: &str) -> String {
let mut f = std::fs::File::open(path).unwrap();
let mut contents = String::new();
f.read_to_string(&mut contents).unwrap();
contents
}
fn get_string() -> String {
read_json("./example.json")
}
fn get_json() -> Value {
let string = get_string();
serde_json::from_str(string.as_str()).unwrap()
}
fn get_path(i: usize) -> &'static str {
let paths = vec![
"$.store.book[*].author", //0
"$..author", //1
"$.store.*", //2
"$.store..price", //3
"$..book[2]", //4
"$..book[-2]", //5
"$..book[0,1]", //6
"$..book[:2]", //7
"$..book[1:2]", //8
"$..book[-2:]", //9
"$..book[2:]", //10
"$..book[?(@.isbn)]", //11
"$.store.book[?(@.price == 10)]", //12
"$..*", //13
"$..book[ ?( (@.price < 13 || $.store.bicycle.price < @.price) && @.price <=10 ) ]", //14
"$.store.book[?( (@.price < 10 || @.price > 10) && @.price > 10 )]"
];
paths[i]
}
fn _selector(b: &mut Bencher, index: usize) {
let json = get_json();
b.iter(move || {
for _ in 1..100 {
let parser = jsonpath::PathParser::compile(get_path(index)).unwrap();
let mut selector = jsonpath::JsonSelector::new(parser);
selector.value(&json);
let r = selector.select();
if r.is_err() {
panic!()
}
}
});
}
macro_rules! selector {
($name:ident, $i:expr) => {
#[bench]
fn $name(b: &mut Bencher) { _selector(b, $i); }
};
}
selector!(example0_1, 0);
selector!(example1_1, 1);
selector!(example2_1, 2);
selector!(example3_1, 3);
selector!(example4_1, 4);
selector!(example5_1, 5);
selector!(example6_1, 6);
selector!(example7_1, 7);
selector!(example8_1, 8);
selector!(example9_1, 9);
selector!(example_10_1, 10);
selector!(example_11_1, 11);
selector!(example_12_1, 12);
selector!(example_13_1, 13);
selector!(example_14_1, 14);
selector!(example_15_1, 15); | rust | MIT | 3a930bc03c3944c8374717289266ceb9cc1491c6 | 2026-01-04T20:24:15.035874Z | false |
freestrings/jsonpath | https://github.com/freestrings/jsonpath/blob/3a930bc03c3944c8374717289266ceb9cc1491c6/src/lib.rs | src/lib.rs | //! JsonPath implementation written in Rust.
//!
//! # Example
//! ```
//! extern crate jsonpath_lib as jsonpath;
//! #[macro_use] extern crate serde_json;
//! let json_obj = json!({
//! "store": {
//! "book": [
//! {
//! "category": "reference",
//! "author": "Nigel Rees",
//! "title": "Sayings of the Century",
//! "price": 8.95
//! },
//! {
//! "category": "fiction",
//! "author": "Evelyn Waugh",
//! "title": "Sword of Honour",
//! "price": 12.99
//! },
//! {
//! "category": "fiction",
//! "author": "Herman Melville",
//! "title": "Moby Dick",
//! "isbn": "0-553-21311-3",
//! "price": 8.99
//! },
//! {
//! "category": "fiction",
//! "author": "J. R. R. Tolkien",
//! "title": "The Lord of the Rings",
//! "isbn": "0-395-19395-8",
//! "price": 22.99
//! }
//! ],
//! "bicycle": {
//! "color": "red",
//! "price": 19.95
//! }
//! },
//! "expensive": 10
//! });
//!
//! let mut selector = jsonpath::selector(&json_obj);
//!
//! assert_eq!(selector("$.store.book[*].author").unwrap(),
//! vec![
//! "Nigel Rees", "Evelyn Waugh", "Herman Melville", "J. R. R. Tolkien"
//! ]);
//!
//! assert_eq!(selector("$..author").unwrap(),
//! vec![
//! "Nigel Rees", "Evelyn Waugh", "Herman Melville", "J. R. R. Tolkien"
//! ]);
//!
//! assert_eq!(selector("$.store.*").unwrap(),
//! vec![
//! &json!([
//! { "category": "reference", "author": "Nigel Rees", "title": "Sayings of the Century", "price": 8.95 },
//! { "category": "fiction", "author": "Evelyn Waugh", "title": "Sword of Honour", "price": 12.99 },
//! { "category": "fiction", "author": "Herman Melville", "title": "Moby Dick", "isbn": "0-553-21311-3", "price": 8.99 },
//! { "category": "fiction", "author": "J. R. R. Tolkien", "title": "The Lord of the Rings", "isbn": "0-395-19395-8", "price": 22.99 }
//! ]),
//! &json!({ "color": "red", "price": 19.95 })
//! ]);
//!
//! assert_eq!(selector("$.store..price").unwrap(),
//! vec![
//! 8.95, 12.99, 8.99, 22.99, 19.95
//! ]);
//!
//! assert_eq!(selector("$..book[2]").unwrap(),
//! vec![
//! &json!({
//! "category" : "fiction",
//! "author" : "Herman Melville",
//! "title" : "Moby Dick",
//! "isbn" : "0-553-21311-3",
//! "price" : 8.99
//! })
//! ]);
//!
//! assert_eq!(selector("$..book[-2]").unwrap(),
//! vec![
//! &json!({
//! "category" : "fiction",
//! "author" : "Herman Melville",
//! "title" : "Moby Dick",
//! "isbn" : "0-553-21311-3",
//! "price" : 8.99
//! })
//! ]);
//!
//! assert_eq!(selector("$..book[0,1]").unwrap(),
//! vec![
//! &json!({"category" : "reference","author" : "Nigel Rees","title" : "Sayings of the Century","price" : 8.95}),
//! &json!({"category" : "fiction","author" : "Evelyn Waugh","title" : "Sword of Honour","price" : 12.99})
//! ]);
//!
//! assert_eq!(selector("$..book[:2]").unwrap(),
//! vec![
//! &json!({"category" : "reference","author" : "Nigel Rees","title" : "Sayings of the Century","price" : 8.95}),
//! &json!({"category" : "fiction","author" : "Evelyn Waugh","title" : "Sword of Honour","price" : 12.99})
//! ]);
//!
//! assert_eq!(selector("$..book[:2]").unwrap(),
//! vec![
//! &json!({"category" : "reference","author" : "Nigel Rees","title" : "Sayings of the Century","price" : 8.95}),
//! &json!({"category" : "fiction","author" : "Evelyn Waugh","title" : "Sword of Honour","price" : 12.99})
//! ]);
//!
//! assert_eq!(selector("$..book[?(@.isbn)]").unwrap(),
//! vec![
//! &json!({"category" : "fiction","author" : "Herman Melville","title" : "Moby Dick","isbn" : "0-553-21311-3","price" : 8.99}),
//! &json!({"category" : "fiction","author" : "J. R. R. Tolkien","title" : "The Lord of the Rings","isbn" : "0-395-19395-8","price" : 22.99})
//! ]);
//!
//! assert_eq!(selector("$.store.book[?(@.price < 10)]").unwrap(),
//! vec![
//! &json!({"category" : "reference","author" : "Nigel Rees","title" : "Sayings of the Century","price" : 8.95}),
//! &json!({"category" : "fiction","author" : "Herman Melville","title" : "Moby Dick","isbn" : "0-553-21311-3","price" : 8.99})
//! ]);
//! ```
extern crate core;
#[macro_use]
extern crate log;
extern crate serde;
extern crate serde_json;
use serde_json::Value;
#[allow(deprecated)]
use parser::Node;
#[allow(deprecated)]
pub use parser::Parser;
#[allow(deprecated)]
pub use select::{Selector, SelectorMut};
#[deprecated(
since = "0.4.0",
note = "It will be move to common module. since 0.5"
)]
pub use select::JsonPathError;
pub use paths::PathParser;
pub use selector::{JsonSelector, JsonSelectorMut};
use std::rc::Rc;
#[doc(hidden)]
#[deprecated(
since = "0.4.0",
note = "'ffi' is moved to another location like 'wasm' from version 0.5.x"
)]
mod ffi;
#[doc(hidden)]
mod parser;
#[doc(hidden)]
mod select;
mod paths;
mod selector;
impl From<&paths::TokenError> for JsonPathError {
fn from(e: &paths::TokenError) -> Self {
match e {
paths::TokenError::Eof => JsonPathError::Path("Eof".to_string()),
paths::TokenError::Position(pos) => {
JsonPathError::Path(["Position:", &pos.to_string()].concat())
},
}
}
}
/// It is a high-order function. it compile a jsonpath and then returns a closure that has JSON as argument. if you need to reuse a jsonpath, it is good for performance.
///
/// ```rust
/// extern crate jsonpath_lib as jsonpath;
/// #[macro_use] extern crate serde_json;
///
/// let mut first_firend = jsonpath::compile("$..friends[0]");
///
/// let json_obj = json!({
/// "school": {
/// "friends": [
/// {"name": "친구1", "age": 20},
/// {"name": "친구2", "age": 20}
/// ]
/// },
/// "friends": [
/// {"name": "친구3", "age": 30},
/// {"name": "친구4"}
/// ]});
///
/// let json = first_firend(&json_obj).unwrap();
///
/// assert_eq!(json, vec![
/// &json!({"name": "친구3", "age": 30}),
/// &json!({"name": "친구1", "age": 20})
/// ]);
/// ```
#[deprecated(
since = "0.2.5",
note = "Please use the PathCompiled::compile function instead. It will be removed from 0.4.1"
)]
pub fn compile(
path: &str
) -> impl FnMut(&Value) -> Result<Vec<&Value>, JsonPathError> {
#[allow(deprecated)]
let node = parser::Parser::compile(path);
move |json| match &node {
Ok(node) => {
#[allow(deprecated)]
let mut selector = Selector::default();
selector.compiled_path(node).value(json).select()
},
Err(e) => Err(JsonPathError::Path(e.to_string())),
}
}
/// It is a high-order function. it returns a closure that has a jsonpath string as argument. you can use diffenent jsonpath for one JSON object.
///
/// ```rust
/// extern crate jsonpath_lib as jsonpath;
/// #[macro_use] extern crate serde_json;
///
/// let json_obj = json!({
/// "school": {
/// "friends": [
/// {"name": "친구1", "age": 20},
/// {"name": "친구2", "age": 20}
/// ]
/// },
/// "friends": [
/// {"name": "친구3", "age": 30},
/// {"name": "친구4"}
/// ]});
///
/// let mut selector = jsonpath::selector(&json_obj);
///
/// let json = selector("$..friends[0]").unwrap();
///
/// assert_eq!(json, vec![
/// &json!({"name": "친구3", "age": 30}),
/// &json!({"name": "친구1", "age": 20})
/// ]);
///
/// let json = selector("$..friends[1]").unwrap();
///
/// assert_eq!(json, vec![
/// &json!({"name": "친구4"}),
/// &json!({"name": "친구2", "age": 20})
/// ]);
/// ```
#[allow(clippy::needless_lifetimes)]
pub fn selector<'a>(
json: &'a Value
) -> impl FnMut(&'a str) -> Result<Vec<&'a Value>, JsonPathError> {
let mut selector = JsonSelector::default();
move |path| {
let parser =
PathParser::compile(path).map_err(|e| JsonPathError::from(&e))?;
selector
.reset_parser(parser)
.value(json)
.reset_value()
.select()
}
}
/// It is the same to `selector` function. but it deserialize the result as given type `T`.
///
/// ```rust
/// extern crate jsonpath_lib as jsonpath;
/// extern crate serde;
/// #[macro_use] extern crate serde_json;
///
/// use serde::{Deserialize, Serialize};
///
/// let json_obj = json!({
/// "school": {
/// "friends": [
/// {"name": "친구1", "age": 20},
/// {"name": "친구2", "age": 20}
/// ]
/// },
/// "friends": [
/// {"name": "친구3", "age": 30},
/// {"name": "친구4"}
/// ]});
///
/// #[derive(Deserialize, PartialEq, Debug)]
/// struct Friend {
/// name: String,
/// age: Option<u8>,
/// }
///
/// let mut selector = jsonpath::selector_as::<Friend>(&json_obj);
///
/// let json = selector("$..friends[0]").unwrap();
///
/// let ret = vec!(
/// Friend { name: "친구3".to_string(), age: Some(30) },
/// Friend { name: "친구1".to_string(), age: Some(20) }
/// );
/// assert_eq!(json, ret);
///
/// let json = selector("$..friends[1]").unwrap();
///
/// let ret = vec!(
/// Friend { name: "친구4".to_string(), age: None },
/// Friend { name: "친구2".to_string(), age: Some(20) }
/// );
///
/// assert_eq!(json, ret);
/// ```
pub fn selector_as<'a, T: serde::de::DeserializeOwned>(
json: &'a Value
) -> impl FnMut(&'a str) -> Result<Vec<T>, JsonPathError> {
let mut selector = JsonSelector::default();
let _ = selector.value(json);
move |path: &str| {
let parser =
PathParser::compile(path).map_err(|e| JsonPathError::from(&e))?;
selector.reset_parser(parser).reset_value().select_as()
}
}
/// It is a simple select function. but it compile the jsonpath argument every time.
///
/// ```rust
/// extern crate jsonpath_lib as jsonpath;
/// #[macro_use] extern crate serde_json;
///
/// let json_obj = json!({
/// "school": {
/// "friends": [
/// {"name": "친구1", "age": 20},
/// {"name": "친구2", "age": 20}
/// ]
/// },
/// "friends": [
/// {"name": "친구3", "age": 30},
/// {"name": "친구4"}
/// ]});
///
/// let json = jsonpath::select(&json_obj, "$..friends[0]").unwrap();
///
/// assert_eq!(json, vec![
/// &json!({"name": "친구3", "age": 30}),
/// &json!({"name": "친구1", "age": 20})
/// ]);
/// ```
pub fn select<'a>(
json: &'a Value,
path: &'a str,
) -> Result<Vec<&'a Value>, JsonPathError> {
let parser =
PathParser::compile(path).map_err(|e| JsonPathError::from(&e))?;
JsonSelector::new(parser).value(json).select()
}
/// It is the same to `select` function but it return the result as string.
///
/// ```rust
/// extern crate jsonpath_lib as jsonpath;
/// #[macro_use] extern crate serde_json;
///
/// let ret = jsonpath::select_as_str(r#"
/// {
/// "school": {
/// "friends": [
/// {"name": "친구1", "age": 20},
/// {"name": "친구2", "age": 20}
/// ]
/// },
/// "friends": [
/// {"name": "친구3", "age": 30},
/// {"name": "친구4"}
/// ]
/// }
/// "#, "$..friends[0]").unwrap();
///
/// assert_eq!(ret, r#"[{"name":"친구3","age":30},{"name":"친구1","age":20}]"#);
/// ```
pub fn select_as_str(
json_str: &str,
path: &str,
) -> Result<String, JsonPathError> {
let json = serde_json::from_str(json_str)
.map_err(|e| JsonPathError::Serde(e.to_string()))?;
let parser =
PathParser::compile(path).map_err(|e| JsonPathError::from(&e))?;
let ret = JsonSelector::new(parser).value(&json).select()?;
serde_json::to_string(&ret).map_err(|e| JsonPathError::Serde(e.to_string()))
}
/// It is the same to `select` function but it deserialize the the result as given type `T`.
///
/// ```rust
/// extern crate jsonpath_lib as jsonpath;
/// extern crate serde;
/// #[macro_use] extern crate serde_json;
///
/// use serde::{Deserialize, Serialize};
///
/// #[derive(Deserialize, PartialEq, Debug)]
/// struct Person {
/// name: String,
/// age: u8,
/// phones: Vec<String>,
/// }
///
/// let ret: Vec<Person> = jsonpath::select_as(r#"
/// {
/// "person":
/// {
/// "name": "Doe John",
/// "age": 44,
/// "phones": [
/// "+44 1234567",
/// "+44 2345678"
/// ]
/// }
/// }
/// "#, "$.person").unwrap();
///
/// let person = Person {
/// name: "Doe John".to_string(),
/// age: 44,
/// phones: vec!["+44 1234567".to_string(), "+44 2345678".to_string()],
/// };
///
/// assert_eq!(ret[0], person);
/// ```
pub fn select_as<T: serde::de::DeserializeOwned>(
json_str: &str,
path: &str,
) -> Result<Vec<T>, JsonPathError> {
let json = serde_json::from_str(json_str)
.map_err(|e| JsonPathError::Serde(e.to_string()))?;
let parser =
PathParser::compile(path).map_err(|e| JsonPathError::from(&e))?;
JsonSelector::new(parser).value(&json).select_as()
}
/// Delete(= replace with null) the JSON property using the jsonpath.
///
/// ```rust
/// extern crate jsonpath_lib as jsonpath;
/// #[macro_use] extern crate serde_json;
///
/// let json_obj = json!({
/// "school": {
/// "friends": [
/// {"name": "친구1", "age": 20},
/// {"name": "친구2", "age": 20}
/// ]
/// },
/// "friends": [
/// {"name": "친구3", "age": 30},
/// {"name": "친구4"}
/// ]});
///
/// let ret = jsonpath::delete(json_obj, "$..[?(20 == @.age)]").unwrap();
///
/// assert_eq!(ret, json!({
/// "school": {
/// "friends": [
/// null,
/// null
/// ]
/// },
/// "friends": [
/// {"name": "친구3", "age": 30},
/// {"name": "친구4"}
/// ]}));
/// ```
pub fn delete(
value: Value,
path: &str,
) -> Result<Value, JsonPathError> {
let parser =
PathParser::compile(path).map_err(|e| JsonPathError::from(&e))?;
let mut selector = JsonSelectorMut::new(parser);
let value = selector.value(value).delete()?;
Ok(value.take().unwrap_or(Value::Null))
}
/// Select JSON properties using a jsonpath and transform the result and then replace it. via closure that implements `FnMut` you can transform the selected results.
///
/// ```rust
/// extern crate jsonpath_lib as jsonpath;
/// #[macro_use] extern crate serde_json;
///
/// use serde_json::Value;
///
/// let json_obj = json!({
/// "school": {
/// "friends": [
/// {"name": "친구1", "age": 20},
/// {"name": "친구2", "age": 20}
/// ]
/// },
/// "friends": [
/// {"name": "친구3", "age": 30},
/// {"name": "친구4"}
/// ]});
///
/// let ret = jsonpath::replace_with(json_obj, "$..[?(@.age == 20)].age", &mut |v| {
/// let age = if let Value::Number(n) = v {
/// n.as_u64().unwrap() * 2
/// } else {
/// 0
/// };
///
/// Some(json!(age))
/// }).unwrap();
///
/// assert_eq!(ret, json!({
/// "school": {
/// "friends": [
/// {"name": "친구1", "age": 40},
/// {"name": "친구2", "age": 40}
/// ]
/// },
/// "friends": [
/// {"name": "친구3", "age": 30},
/// {"name": "친구4"}
/// ]}));
/// ```
pub fn replace_with<F>(
value: Value,
path: &str,
fun: &mut F,
) -> Result<Value, JsonPathError>
where
F: FnMut(Value) -> Option<Value>,
{
let parser =
PathParser::compile(path).map_err(|e| JsonPathError::from(&e))?;
let mut selector = JsonSelectorMut::new(parser);
let value = selector.value(value).replace_with(fun)?;
Ok(value.take().unwrap_or(Value::Null))
}
/// A pre-compiled expression.
///
/// Calling the select function of this struct will re-use the existing, compiled expression.
///
/// ## Example
///
/// ```rust
/// extern crate jsonpath_lib as jsonpath;
/// #[macro_use] extern crate serde_json;
///
/// let mut first_friend = jsonpath::Compiled::compile("$..friends[0]").unwrap();
///
/// let json_obj = json!({
/// "school": {
/// "friends": [
/// {"name": "친구1", "age": 20},
/// {"name": "친구2", "age": 20}
/// ]
/// },
/// "friends": [
/// {"name": "친구3", "age": 30},
/// {"name": "친구4"}
/// ]});
///
/// // call a first time
///
/// let json = first_friend.select(&json_obj).unwrap();
///
/// assert_eq!(json, vec![
/// &json!({"name": "친구3", "age": 30}),
/// &json!({"name": "친구1", "age": 20})
/// ]);
///
/// // call a second time
///
/// let json = first_friend.select(&json_obj).unwrap();
///
/// assert_eq!(json, vec![
/// &json!({"name": "친구3", "age": 30}),
/// &json!({"name": "친구1", "age": 20})
/// ]);
/// ```
#[derive(Clone, Debug)]
#[deprecated(since = "0.4.0", note = "Please use PathCompiled.")]
pub struct Compiled {
#[allow(deprecated)]
node: Node,
}
#[allow(deprecated)]
impl Compiled {
/// Compile a path expression and return a compiled instance.
///
/// If parsing the path fails, it will return an error.
pub fn compile(path: &str) -> Result<Self, String> {
let node = parser::Parser::compile(path)?;
Ok(Self { node })
}
/// Execute the select operation on the pre-compiled path.
pub fn select<'a>(
&self,
value: &'a Value,
) -> Result<Vec<&'a Value>, JsonPathError> {
let mut selector = Selector::default();
selector.compiled_path(&self.node).value(value).select()
}
}
/// A pre-compiled expression.
///
/// Calling the select function of this struct will re-use the existing, compiled expression.
///
/// ## Example
///
/// ```rust
/// extern crate jsonpath_lib as jsonpath;
/// #[macro_use] extern crate serde_json;
///
/// let mut first_friend = jsonpath::PathCompiled::compile("$..friends[0]").unwrap();
///
/// let json_obj = json!({
/// "school": {
/// "friends": [
/// {"name": "친구1", "age": 20},
/// {"name": "친구2", "age": 20}
/// ]
/// },
/// "friends": [
/// {"name": "친구3", "age": 30},
/// {"name": "친구4"}
/// ]});
///
/// // call a first time
///
/// let json = first_friend.select(&json_obj).unwrap();
///
/// assert_eq!(json, vec![
/// &json!({"name": "친구3", "age": 30}),
/// &json!({"name": "친구1", "age": 20})
/// ]);
///
/// // call a second time
///
/// let json = first_friend.select(&json_obj).unwrap();
///
/// assert_eq!(json, vec![
/// &json!({"name": "친구3", "age": 30}),
/// &json!({"name": "친구1", "age": 20})
/// ]);
/// ```
#[derive(Clone, Debug)]
pub struct PathCompiled<'a> {
parser: Rc<PathParser<'a>>,
}
impl<'a> PathCompiled<'a> {
/// Compile a path expression and return a compiled instance.
///
/// If parsing the path fails, it will return an error.
pub fn compile(path: &str) -> Result<PathCompiled, JsonPathError> {
let parser =
PathParser::compile(path).map_err(|e| JsonPathError::from(&e))?;
Ok(PathCompiled {
parser: Rc::new(parser),
})
}
/// Execute the select operation on the pre-compiled path.
pub fn select(
&self,
value: &'a Value,
) -> Result<Vec<&'a Value>, JsonPathError> {
let mut selector = JsonSelector::new_ref(Rc::clone(&self.parser));
selector.value(value).select()
}
}
| rust | MIT | 3a930bc03c3944c8374717289266ceb9cc1491c6 | 2026-01-04T20:24:15.035874Z | false |
freestrings/jsonpath | https://github.com/freestrings/jsonpath/blob/3a930bc03c3944c8374717289266ceb9cc1491c6/src/selector/selector_impl.rs | src/selector/selector_impl.rs | use std::collections::HashSet;
use std::rc::Rc;
use serde_json::map::Entry;
use serde_json::{Number, Value};
use super::utils;
use crate::paths::{tokens::*, ParserTokenHandler, PathParser, StrRange};
use crate::JsonPathError;
use super::terms::*;
#[derive(Debug, Default)]
pub struct JsonSelector<'a> {
parser: Option<Rc<PathParser<'a>>>,
value: Option<&'a Value>,
tokens: Vec<ParseToken>,
current: Option<Vec<&'a Value>>,
selectors: Vec<JsonSelector<'a>>,
selector_filter: FilterTerms<'a>,
}
impl<'a> JsonSelector<'a> {
pub fn new(parser: PathParser<'a>) -> Self {
JsonSelector {
parser: Some(Rc::new(parser)),
value: None,
tokens: Vec::new(),
current: None,
selectors: Vec::new(),
selector_filter: FilterTerms(Vec::new()),
}
}
pub fn new_ref(parser: Rc<PathParser<'a>>) -> Self {
JsonSelector {
parser: Some(parser),
value: None,
tokens: Vec::new(),
current: None,
selectors: Vec::new(),
selector_filter: FilterTerms(Vec::new()),
}
}
pub fn reset_parser(
&mut self,
parser: PathParser<'a>,
) -> &mut Self {
self.parser = Some(Rc::new(parser));
self
}
pub fn reset_parser_ref(
&mut self,
parser: Rc<PathParser<'a>>,
) -> &mut Self {
self.parser = Some(parser);
self
}
pub fn reset_value(&mut self) -> &mut Self {
self.current = None;
self
}
pub fn value(
&mut self,
v: &'a Value,
) -> &mut Self {
self.value = Some(v);
self
}
fn _select(&mut self) -> Result<(), JsonPathError> {
let parser = self.parser.take();
if let Some(parser) = parser.as_ref() {
let _ = parser.parse(self);
}
self.parser = parser;
Ok(())
}
pub fn select_as<T: serde::de::DeserializeOwned>(
&mut self
) -> Result<Vec<T>, JsonPathError> {
self._select()?;
match &self.current {
Some(vec) => {
let mut ret = Vec::new();
for v in vec {
match T::deserialize(*v) {
Ok(v) => ret.push(v),
Err(e) => {
return Err(JsonPathError::Serde(e.to_string()));
},
}
}
Ok(ret)
},
_ => Err(JsonPathError::EmptyValue),
}
}
pub fn select_as_str(&mut self) -> Result<String, JsonPathError> {
self._select()?;
match &self.current {
Some(r) => Ok(serde_json::to_string(r)
.map_err(|e| JsonPathError::Serde(e.to_string()))?),
_ => Err(JsonPathError::EmptyValue),
}
}
pub fn select(&mut self) -> Result<Vec<&'a Value>, JsonPathError> {
self._select()?;
match &self.current {
Some(r) => Ok(r.to_vec()),
_ => Err(JsonPathError::EmptyValue),
}
}
fn compute_absolute_path_filter<F>(
&mut self,
token: &ParseToken,
parse_value_reader: &F,
) -> bool
where
F: Fn(&StrRange) -> &'a str,
{
if !self.selectors.is_empty() {
match token {
ParseToken::Absolute
| ParseToken::Relative
| ParseToken::Filter(_) => {
let selector = self.selectors.pop().unwrap();
if let Some(current) = &selector.current {
let term = current.into();
if let Some(s) = self.selectors.last_mut() {
s.selector_filter.push_term(Some(term));
} else {
self.selector_filter.push_term(Some(term));
}
} else {
unreachable!()
}
},
_ => {},
}
}
if self.selectors.is_empty() {
return false;
}
self.selectors
.last_mut()
.unwrap()
.handle(token, parse_value_reader);
true
}
}
impl<'a> JsonSelector<'a> {
fn visit_absolute(&mut self) {
if self.current.is_some() {
if let Some(value) = self.value {
let selector = JsonSelector {
parser: None,
value: Some(value),
tokens: Vec::new(),
current: Some(vec![value]),
selectors: Vec::new(),
selector_filter: FilterTerms(Vec::new()),
};
self.selectors.push(selector);
}
return;
}
if let Some(v) = &self.value {
self.current = Some(vec![v]);
}
}
fn visit_relative(&mut self) {
if let Some(ParseToken::Array) = self.tokens.last() {
let array_token = self.tokens.pop();
if let Some(ParseToken::Leaves) = self.tokens.last() {
self.tokens.pop();
self.current =
self.selector_filter.collect_all(self.current.take());
}
self.tokens.push(array_token.unwrap());
}
self.selector_filter.new_filter_context();
}
fn visit_array_eof(&mut self) {
if self.is_last_before_token_match(ParseToken::Array) {
if let Some(Some(e)) = self.selector_filter.pop_term() {
if let ExprTerm::String(key) = e {
self.current = self
.selector_filter
.filter_next_with_str(self.current.take(), key);
self.tokens.pop();
return;
}
self.selector_filter.push_term(Some(e));
}
}
if self.is_last_before_token_match(ParseToken::Leaves) {
self.tokens.pop();
self.tokens.pop();
if let Some(Some(e)) = self.selector_filter.pop_term() {
let selector_filter_consumed = match e {
ExprTerm::Number(n) => {
self.current =
self.selector_filter.collect_all_with_num(
self.current.take(),
utils::to_f64(&n),
);
self.selector_filter.pop_term();
true
},
ExprTerm::String(key) => {
self.current = self
.selector_filter
.collect_all_with_str(self.current.take(), key);
self.selector_filter.pop_term();
true
},
_ => {
self.selector_filter.push_term(Some(e));
false
},
};
if selector_filter_consumed {
return;
}
}
}
if let Some(Some(e)) = self.selector_filter.pop_term() {
match e {
ExprTerm::Number(n) => {
self.current = self.selector_filter.collect_next_with_num(
self.current.take(),
utils::to_f64(&n),
);
},
ExprTerm::String(key) => {
self.current = self
.selector_filter
.collect_next_with_str(self.current.take(), &[key]);
},
ExprTerm::Json(rel, _, v) => {
if v.is_empty() {
self.current = Some(Vec::new());
} else if let Some(vec) = rel {
self.current = Some(vec);
} else {
self.current = Some(v);
}
},
ExprTerm::Bool(false) => {
self.current = Some(vec![]);
},
_ => {},
}
}
self.tokens.pop();
}
fn is_last_before_token_match(
&mut self,
token: ParseToken,
) -> bool {
if self.tokens.len() > 1 {
return token == self.tokens[self.tokens.len() - 2];
}
false
}
fn visit_all(&mut self) {
if let Some(ParseToken::Array) = self.tokens.last() {
self.tokens.pop();
}
match self.tokens.last() {
Some(ParseToken::Leaves) => {
self.tokens.pop();
self.current =
self.selector_filter.collect_all(self.current.take());
},
Some(ParseToken::In) => {
self.tokens.pop();
self.current =
self.selector_filter.collect_next_all(self.current.take());
},
_ => {
self.current =
self.selector_filter.collect_next_all(self.current.take());
},
}
}
fn visit_key(
&mut self,
key: &'a str,
) {
if let Some(ParseToken::Array) = self.tokens.last() {
self.selector_filter.push_term(Some(ExprTerm::String(key)));
return;
}
if let Some(t) = self.tokens.pop() {
if self.selector_filter.is_term_empty() {
match t {
ParseToken::Leaves => {
self.current = self
.selector_filter
.collect_all_with_str(self.current.take(), key)
},
ParseToken::In => {
self.current = self
.selector_filter
.collect_next_with_str(self.current.take(), &[key])
},
_ => {},
}
} else {
match t {
ParseToken::Leaves => {
self.current = self
.selector_filter
.filter_all_with_str(self.current.take(), key);
},
ParseToken::In => {
self.current = self
.selector_filter
.filter_next_with_str(self.current.take(), key);
},
_ => {},
}
}
}
}
fn visit_keys(
&mut self,
keys: &[&'a str],
) {
if !self.selector_filter.is_term_empty() {
unimplemented!("keys in filter");
}
if let Some(ParseToken::Array) = self.tokens.pop() {
self.current = self
.selector_filter
.collect_next_with_str(self.current.take(), keys);
} else {
unreachable!();
}
}
fn visit_filter(
&mut self,
ft: &FilterToken,
) {
let right = match self.selector_filter.pop_term() {
Some(Some(right)) => right,
Some(None) => ExprTerm::Json(
None,
None,
match &self.current {
Some(current) => current.to_vec(),
_ => unreachable!(),
},
),
_ => ExprTerm::Json(None, None, vec![]), //panic!("empty term right"),
};
let mut left = match self.selector_filter.pop_term() {
Some(Some(left)) => left,
Some(None) => ExprTerm::Json(
None,
None,
match &self.current {
Some(current) => current.to_vec(),
_ => unreachable!(),
},
),
_ => ExprTerm::Json(None, None, vec![]), //panic!("empty term left"),
};
let expr = match ft {
FilterToken::Equal => left.eq_(right),
FilterToken::NotEqual => left.ne_(right),
FilterToken::Greater => left.gt(right),
FilterToken::GreaterOrEqual => left.ge(right),
FilterToken::Little => left.lt(right),
FilterToken::LittleOrEqual => left.le(right),
FilterToken::And => left.and(right),
FilterToken::Or => left.or(right),
};
self.selector_filter.push_term(Some(expr));
}
fn visit_range(
&mut self,
from: &Option<isize>,
to: &Option<isize>,
step: &Option<usize>,
) {
if !self.selector_filter.is_term_empty() {
unimplemented!("range syntax in filter");
}
if let Some(ParseToken::Array) = self.tokens.pop() {
let mut tmp = Vec::new();
if let Some(current) = &self.current {
for v in current {
if let Value::Array(vec) = v {
let from = if let Some(from) = from {
utils::abs_index(*from, vec.len())
} else {
0
};
let to = if let Some(to) = to {
utils::abs_index(*to, vec.len())
} else {
vec.len()
};
for i in (from..to).step_by(match step {
Some(step) => *step,
_ => 1,
}) {
if let Some(v) = vec.get(i) {
tmp.push(v);
}
}
}
}
}
self.current = Some(tmp);
} else {
unreachable!();
}
}
fn visit_union(
&mut self,
indices: &[isize],
) {
if !self.selector_filter.is_term_empty() {
unimplemented!("union syntax in filter");
}
if let Some(ParseToken::Array) = self.tokens.pop() {
let mut tmp = Vec::new();
if let Some(current) = &self.current {
for v in current {
if let Value::Array(vec) = v {
for i in indices {
if let Some(v) =
vec.get(utils::abs_index(*i, vec.len()))
{
tmp.push(v);
}
}
}
}
}
self.current = Some(tmp);
} else {
unreachable!();
}
}
}
impl<'a> ParserTokenHandler<'a> for JsonSelector<'a> {
fn handle<F>(
&mut self,
token: &ParseToken,
parse_value_reader: &F,
) where
F: Fn(&StrRange) -> &'a str,
{
debug!("token: {:?}, stack: {:?}", token, self.tokens);
if self.compute_absolute_path_filter(token, parse_value_reader) {
return;
}
match token {
ParseToken::Absolute => self.visit_absolute(),
ParseToken::Relative => self.visit_relative(),
ParseToken::In | ParseToken::Leaves | ParseToken::Array => {
self.tokens.push(token.clone());
},
ParseToken::ArrayEof => self.visit_array_eof(),
ParseToken::All => self.visit_all(),
ParseToken::Bool(b) => {
self.selector_filter.push_term(Some(ExprTerm::Bool(*b)));
},
ParseToken::Key(s) => {
let key = parse_value_reader(s);
self.visit_key(key);
},
ParseToken::Keys(keys) => {
let keys: Vec<&str> =
keys.iter().map(parse_value_reader).collect();
self.visit_keys(&keys)
},
ParseToken::Number(v) => {
self.selector_filter.push_term(Some(ExprTerm::Number(
Number::from_f64(*v).unwrap(),
)));
},
ParseToken::Filter(ref ft) => self.visit_filter(ft),
ParseToken::Range(from, to, step) => {
self.visit_range(from, to, step)
},
ParseToken::Union(indices) => self.visit_union(indices),
ParseToken::Eof => {
debug!("visit_token eof");
},
}
}
}
#[derive(Default)]
pub struct JsonSelectorMut<'a> {
value: Option<Value>,
parser: Option<Rc<PathParser<'a>>>,
}
impl<'a> JsonSelectorMut<'a> {
pub fn new(parser: PathParser<'a>) -> Self {
Self::new_ref(Rc::new(parser))
}
pub fn new_ref(parser: Rc<PathParser<'a>>) -> Self {
JsonSelectorMut {
value: None,
parser: Some(parser),
}
}
pub fn reset_parser(
&mut self,
parser: PathParser<'a>,
) -> &mut Self {
self.parser = Some(Rc::new(parser));
self
}
pub fn reset_parser_ref(
&mut self,
parser: Rc<PathParser<'a>>,
) -> &mut Self {
self.parser = Some(parser);
self
}
pub fn value(
&mut self,
value: Value,
) -> &mut Self {
self.value = Some(value);
self
}
pub fn take(&mut self) -> Option<Value> {
self.value.take()
}
pub fn delete(&mut self) -> Result<&mut Self, JsonPathError> {
self.replace_with(&mut |_| Some(Value::Null))
}
pub fn remove(&mut self) -> Result<&mut Self, JsonPathError> {
self.replace_with(&mut |_| None)
}
fn select(&self) -> Result<Vec<&Value>, JsonPathError> {
let mut selector = JsonSelector::default();
if let Some(parser) = self.parser.as_ref() {
selector.reset_parser_ref(Rc::clone(parser));
} else {
return Err(JsonPathError::EmptyPath);
}
if let Some(value) = self.value.as_ref() {
selector.value(value);
} else {
return Err(JsonPathError::EmptyValue);
}
selector.select()
}
pub fn replace_with<F>(
&mut self,
fun: &mut F,
) -> Result<&mut Self, JsonPathError>
where
F: FnMut(Value) -> Option<Value>,
{
let result = self.select()?;
let paths = self.compute_paths(result);
if let Some(ref mut value) = &mut self.value {
for tokens in paths {
Self::replace_value(tokens, value, fun);
}
}
Ok(self)
}
fn replace_value<F>(
mut tokens: Vec<String>,
value: &mut Value,
fun: &mut F,
) where
F: FnMut(Value) -> Option<Value>,
{
let mut target = value;
let last_index = tokens.len().saturating_sub(1);
for (i, token) in tokens.drain(..).enumerate() {
let target_once = target;
let is_last = i == last_index;
let target_opt = match *target_once {
Value::Object(ref mut map) => {
if is_last {
if let Entry::Occupied(mut e) = map.entry(token) {
let v = e.insert(Value::Null);
if let Some(res) = fun(v) {
e.insert(res);
} else {
e.remove();
}
}
return;
}
map.get_mut(&token)
},
Value::Array(ref mut vec) => {
if let Ok(x) = token.parse::<usize>() {
if is_last {
if x < vec.len() {
let v =
std::mem::replace(&mut vec[x], Value::Null);
if let Some(res) = fun(v) {
vec[x] = res;
} else {
vec.remove(x);
}
}
return;
}
vec.get_mut(x)
} else {
None
}
},
_ => None,
};
if let Some(t) = target_opt {
target = t;
} else {
break;
}
}
}
fn compute_paths(
&self,
mut result: Vec<&Value>,
) -> Vec<Vec<String>> {
let mut visited = HashSet::new();
let mut visited_order = Vec::new();
if let Some(origin) = &self.value {
let mut tokens = Vec::new();
Self::walk(
origin,
&mut result,
&mut tokens,
&mut visited,
&mut visited_order,
);
}
visited_order
}
fn walk(
origin: &Value,
target: &mut Vec<&Value>,
tokens: &mut Vec<String>,
visited: &mut HashSet<*const Value>,
visited_order: &mut Vec<Vec<String>>,
) -> bool {
trace!("{:?}, {:?}", target, tokens);
if target.is_empty() {
return true;
}
target.retain(|t| {
if std::ptr::eq(origin, *t) {
if visited.insert(*t) {
visited_order.push(tokens.to_vec());
}
false
} else {
true
}
});
match origin {
Value::Array(vec) => {
for (i, v) in vec.iter().enumerate() {
tokens.push(i.to_string());
if Self::walk(v, target, tokens, visited, visited_order) {
return true;
}
tokens.pop();
}
},
Value::Object(map) => {
for (k, v) in map {
tokens.push(k.clone());
if Self::walk(v, target, tokens, visited, visited_order) {
return true;
}
tokens.pop();
}
},
_ => {},
}
false
}
}
| rust | MIT | 3a930bc03c3944c8374717289266ceb9cc1491c6 | 2026-01-04T20:24:15.035874Z | false |
freestrings/jsonpath | https://github.com/freestrings/jsonpath/blob/3a930bc03c3944c8374717289266ceb9cc1491c6/src/selector/terms.rs | src/selector/terms.rs | use std::collections::HashSet;
use serde_json::{Number, Value};
use super::cmp::*;
use super::utils;
use super::value_walker::ValueWalker;
#[derive(Debug, PartialEq)]
pub enum ExprTerm<'a> {
String(&'a str),
Number(Number),
Bool(bool),
Json(
Option<Vec<&'a Value>>,
Option<FilterKey<'a>>,
Vec<&'a Value>,
),
}
impl<'a> ExprTerm<'a> {
fn cmp_string<C>(
s1: &str,
other: &mut ExprTerm<'a>,
cmp_fn: &C,
) -> ExprTerm<'a>
where
C: Cmp,
{
match other {
ExprTerm::String(s2) => {
let p1 = utils::to_path_str(s1);
let p2 = utils::to_path_str(s2);
ExprTerm::Bool(cmp_fn.cmp_string(p1.get_key(), p2.get_key()))
},
ExprTerm::Json(_, _, _) => unreachable!(),
_ => ExprTerm::Bool(cmp_fn.default()),
}
}
fn cmp_number<C>(
n1: &Number,
other: &mut ExprTerm<'a>,
cmp_fn: &C,
) -> ExprTerm<'a>
where
C: Cmp,
{
match other {
ExprTerm::Number(n2) => ExprTerm::Bool(
cmp_fn.cmp_f64(utils::to_f64(n1), utils::to_f64(n2)),
),
ExprTerm::Json(_, _, _) => unreachable!(),
_ => ExprTerm::Bool(cmp_fn.default()),
}
}
fn cmp_bool<C>(
b1: &bool,
other: &mut ExprTerm<'a>,
cmp_fn: &C,
) -> ExprTerm<'a>
where
C: Cmp,
{
match other {
ExprTerm::Bool(b2) => ExprTerm::Bool(cmp_fn.cmp_bool(*b1, *b2)),
ExprTerm::Json(_, _, _) => unreachable!(),
_ => ExprTerm::Bool(cmp_fn.default()),
}
}
fn cmp_json_string<C>(
s2: &str,
fk1: &Option<FilterKey>,
vec1: &[&'a Value],
cmp_fn: &C,
) -> Vec<&'a Value>
where
C: Cmp,
{
let path_str = utils::to_path_str(s2);
vec1.iter()
.filter(|v1| match v1 {
Value::String(s1) => cmp_fn.cmp_string(s1, path_str.get_key()),
Value::Object(map1) => {
if let Some(FilterKey::String(k)) = fk1 {
if let Some(Value::String(s1)) = map1.get(*k) {
return cmp_fn.cmp_string(s1, path_str.get_key());
}
}
cmp_fn.default()
},
_ => cmp_fn.default(),
})
.copied()
.collect()
}
fn cmp_json_number<C>(
n2: &Number,
fk1: &Option<FilterKey>,
vec1: &[&'a Value],
cmp_fn: &C,
) -> Vec<&'a Value>
where
C: Cmp,
{
let n2 = utils::to_f64(n2);
vec1.iter()
.filter(|v1| match v1 {
Value::Number(n1) => cmp_fn.cmp_f64(utils::to_f64(n1), n2),
Value::Object(map1) => {
if let Some(FilterKey::String(k)) = fk1 {
if let Some(Value::Number(n1)) = map1.get(*k) {
return cmp_fn.cmp_f64(utils::to_f64(n1), n2);
}
}
cmp_fn.default()
},
_ => cmp_fn.default(),
})
.copied()
.collect()
}
fn cmp_json_bool<C1>(
b2: &bool,
fk1: &Option<FilterKey>,
vec1: &[&'a Value],
cmp_fn: &C1,
) -> Vec<&'a Value>
where
C1: Cmp,
{
vec1.iter()
.filter(|v1| match v1 {
Value::Bool(b1) => cmp_fn.cmp_bool(*b1, *b2),
Value::Object(map1) => {
if let Some(FilterKey::String(k)) = fk1 {
if let Some(Value::Bool(b1)) = map1.get(*k) {
return cmp_fn.cmp_bool(*b1, *b2);
}
}
cmp_fn.default()
},
_ => cmp_fn.default(),
})
.copied()
.collect()
}
fn cmp_json_json<C1>(
rel: &Option<Vec<&'a Value>>,
parent: &Option<Vec<&'a Value>>,
vec1: &[&'a Value],
vec2: &[&'a Value],
cmp_fn: &C1,
) -> Vec<&'a Value>
where
C1: Cmp,
{
if let Some(vec1) = rel {
if let Some(vec2) = parent {
cmp_fn.cmp_json(vec1, vec2)
} else {
cmp_fn.cmp_json(vec1, vec2)
}
} else if let Some(vec2) = parent {
cmp_fn.cmp_json(vec1, vec2)
} else {
cmp_fn.cmp_json(vec1, vec2)
}
}
fn cmp_json<C1>(
rel: Option<Vec<&'a Value>>,
fk1: Option<FilterKey<'a>>,
vec1: &mut Vec<&'a Value>,
other: &mut ExprTerm<'a>,
cmp_fn: &C1,
) -> ExprTerm<'a>
where
C1: Cmp,
{
let ret: Vec<&Value> = match other {
ExprTerm::String(s2) => {
Self::cmp_json_string(s2, &fk1, vec1, cmp_fn)
},
ExprTerm::Number(n2) => {
Self::cmp_json_number(n2, &fk1, vec1, cmp_fn)
},
ExprTerm::Bool(b2) => Self::cmp_json_bool(b2, &fk1, vec1, cmp_fn),
ExprTerm::Json(parent, _, vec2) => {
Self::cmp_json_json(&rel, parent, vec1, vec2, cmp_fn)
},
};
if ret.is_empty() {
return ExprTerm::Bool(cmp_fn.default());
}
if rel.is_none() {
return ExprTerm::Json(None, None, ret);
}
if rel.is_some() {
if let ExprTerm::Json(_, _, _) = &other {
if let Some(rel) = rel {
return ExprTerm::Json(Some(rel), None, ret);
}
}
}
let rel = rel.unwrap();
let mut object_exist = false;
for v in &rel {
if v.is_object() {
object_exist = true;
break;
}
}
if !object_exist {
return ExprTerm::Json(Some(Vec::new()), None, ret);
}
let ret_set: HashSet<*const Value> =
ret.iter().fold(HashSet::new(), |mut acc, v| {
let ptr = *v as *const Value;
acc.insert(ptr);
acc
});
let mut tmp = Vec::new();
for rv in rel {
if let Value::Object(map) = rv {
for map_value in map.values() {
let ptr = map_value as *const Value;
if ret_set.contains(&ptr) {
tmp.push(rv);
}
}
}
}
ExprTerm::Json(Some(tmp), None, ret)
}
fn cmp<C1, C2>(
&mut self,
other: &mut Self,
cmp_fn: &C1,
rev_cmp_fn: &C2,
) -> ExprTerm<'a>
where
C1: Cmp,
C2: Cmp,
{
if let ExprTerm::Json(_, _, _) = other {
if let ExprTerm::Json(_, _, _) = &self {
//
} else {
return other.cmp(self, rev_cmp_fn, cmp_fn);
}
}
match self {
ExprTerm::String(s1) => Self::cmp_string(s1, other, cmp_fn),
ExprTerm::Number(n1) => Self::cmp_number(n1, other, cmp_fn),
ExprTerm::Bool(b1) => Self::cmp_bool(b1, other, cmp_fn),
ExprTerm::Json(rel, fk1, vec1) => {
Self::cmp_json(rel.take(), fk1.take(), vec1, other, cmp_fn)
},
}
}
pub fn eq_(
&mut self,
mut other: Self,
) -> ExprTerm<'a> {
debug!("eq - {:?} : {:?}", &self, &other);
let expr = self.cmp(&mut other, &CmpEq, &CmpEq);
debug!("eq = {:?}", expr);
expr
}
pub fn ne_(
&mut self,
mut other: Self,
) -> ExprTerm<'a> {
debug!("ne - {:?} : {:?}", &self, &other);
let expr = self.cmp(&mut other, &CmpNe, &CmpNe);
debug!("ne = {:?}", expr);
expr
}
pub fn gt(
&mut self,
mut other: Self,
) -> ExprTerm<'a> {
debug!("gt - {:?} : {:?}", &self, &other);
let expr = self.cmp(&mut other, &CmpGt, &CmpLt);
debug!("gt = {:?}", expr);
expr
}
pub fn ge(
&mut self,
mut other: Self,
) -> ExprTerm<'a> {
debug!("ge - {:?} : {:?}", &self, &other);
let expr = self.cmp(&mut other, &CmpGe, &CmpLe);
debug!("ge = {:?}", expr);
expr
}
pub fn lt(
&mut self,
mut other: Self,
) -> ExprTerm<'a> {
debug!("lt - {:?} : {:?}", &self, &other);
let expr = self.cmp(&mut other, &CmpLt, &CmpGt);
debug!("lt = {:?}", expr);
expr
}
pub fn le(
&mut self,
mut other: Self,
) -> ExprTerm<'a> {
debug!("le - {:?} : {:?}", &self, &other);
let expr = self.cmp(&mut other, &CmpLe, &CmpGe);
debug!("le = {:?}", expr);
expr
}
pub fn and(
&mut self,
mut other: Self,
) -> ExprTerm<'a> {
debug!("and - {:?} : {:?}", &self, &other);
let expr = self.cmp(&mut other, &CmpAnd, &CmpAnd);
debug!("and = {:?}", expr);
expr
}
pub fn or(
&mut self,
mut other: Self,
) -> ExprTerm<'a> {
debug!("or - {:?} : {:?}", &self, &other);
let expr = self.cmp(&mut other, &CmpOr, &CmpOr);
debug!("or = {:?}", expr);
expr
}
}
impl<'a> From<&Vec<&'a Value>> for ExprTerm<'a> {
fn from(vec: &Vec<&'a Value>) -> Self {
if vec.len() == 1 {
match &vec[0] {
Value::Number(v) => return ExprTerm::Number(v.clone()),
Value::String(v) => return ExprTerm::String(v.as_str()),
Value::Bool(v) => return ExprTerm::Bool(*v),
_ => {},
}
}
ExprTerm::Json(None, None, vec.to_vec())
}
}
#[derive(Debug, PartialEq)]
pub enum FilterKey<'a> {
String(&'a str),
All,
}
struct FilterResult<'a> {
key: FilterKey<'a>,
collected: Vec<&'a Value>,
}
#[derive(Debug, Default)]
pub struct FilterTerms<'a>(pub Vec<Option<ExprTerm<'a>>>);
impl<'a> FilterTerms<'a> {
pub fn new_filter_context(&mut self) {
self.0.push(None);
debug!("new_filter_context: {:?}", self.0);
}
pub fn is_term_empty(&self) -> bool {
self.0.is_empty()
}
pub fn push_term(
&mut self,
term: Option<ExprTerm<'a>>,
) {
self.0.push(term);
}
#[allow(clippy::option_option)]
pub fn pop_term(&mut self) -> Option<Option<ExprTerm<'a>>> {
self.0.pop()
}
fn filter_json_term<F>(
&mut self,
e: ExprTerm<'a>,
fun: F,
) where
F: Fn(&Vec<&'a Value>, &mut Option<HashSet<usize>>) -> FilterResult<'a>,
{
debug!("filter_json_term: {:?}", e);
if let ExprTerm::Json(rel, fk, vec) = e {
let mut not_matched = Some(HashSet::new());
let filter_result = if let Some(FilterKey::String(key)) = fk {
fun(&ValueWalker::next_with_str(&vec, key), &mut not_matched)
} else {
fun(&vec, &mut not_matched)
};
if rel.is_some() {
self.push_term(Some(ExprTerm::Json(
rel,
Some(filter_result.key),
filter_result.collected,
)));
} else {
let not_matched = not_matched.unwrap();
let filtered = vec
.iter()
.enumerate()
.filter(|(idx, _)| !not_matched.contains(idx))
.map(|(_, v)| *v)
.collect();
self.push_term(Some(ExprTerm::Json(
Some(filtered),
Some(filter_result.key),
filter_result.collected,
)));
}
} else {
unreachable!("unexpected: ExprTerm: {:?}", e);
}
}
fn push_json_term<F>(
&mut self,
current: Option<Vec<&'a Value>>,
fun: F,
) -> Option<Vec<&'a Value>>
where
F: Fn(&Vec<&'a Value>, &mut Option<HashSet<usize>>) -> FilterResult<'a>,
{
debug!("push_json_term: {:?}", ¤t);
if let Some(current) = ¤t {
let filter_result = fun(current, &mut None);
self.push_term(Some(ExprTerm::Json(
None,
Some(filter_result.key),
filter_result.collected,
)));
}
current
}
fn filter<F>(
&mut self,
current: Option<Vec<&'a Value>>,
fun: F,
) -> Option<Vec<&'a Value>>
where
F: Fn(&Vec<&'a Value>, &mut Option<HashSet<usize>>) -> FilterResult<'a>,
{
let peek = self.pop_term();
if let Some(None) = peek {
return self.push_json_term(current, fun);
}
if let Some(Some(e)) = peek {
self.filter_json_term(e, fun);
}
current
}
pub fn filter_all_with_str(
&mut self,
current: Option<Vec<&'a Value>>,
key: &'a str,
) -> Option<Vec<&'a Value>> {
let current = self.filter(current, |vec, _| FilterResult {
key: FilterKey::All,
collected: ValueWalker::all_with_str(vec, key),
});
debug!("filter_all_with_str : {}, {:?}", key, self.0);
current
}
pub fn filter_next_with_str(
&mut self,
current: Option<Vec<&'a Value>>,
key: &'a str,
) -> Option<Vec<&'a Value>> {
let current = self.filter(current, |vec, not_matched| {
let mut visited = HashSet::new();
let mut acc = Vec::new();
let path_key = &utils::to_path_str(key);
ValueWalker::walk_dedup_all(
vec,
path_key.get_key(),
&mut visited,
&mut |v| {
acc.push(v);
},
&mut |idx| {
if let Some(set) = not_matched {
set.insert(idx);
}
},
0,
);
FilterResult {
key: FilterKey::String(path_key.get_origin_key()),
collected: acc,
}
});
debug!("filter_next_with_str : {}, {:?}", key, self.0);
current
}
pub fn collect_next_with_num(
&mut self,
current: Option<Vec<&'a Value>>,
index: f64,
) -> Option<Vec<&'a Value>> {
if current.is_none() {
debug!("collect_next_with_num : {:?}, {:?}", &index, ¤t);
return current;
}
if let Some(Some(e)) = self.pop_term() {
match e {
ExprTerm::Json(rel, _, vec) => {
return if vec.is_empty() {
Some(Vec::new())
} else if let Some(vec) = rel {
let index = utils::abs_index(index as isize, vec.len());
let ret =
vec.get(index).map_or(Vec::new(), |v| vec![*v]);
Some(ret)
} else {
let index = utils::abs_index(index as isize, vec.len());
let ret =
vec.get(index).map_or(Vec::new(), |v| vec![*v]);
Some(ret)
};
},
_ => {
self.push_term(Some(e));
},
}
}
let acc = ValueWalker::next_with_num(¤t.unwrap(), index);
if acc.is_empty() {
self.pop_term();
}
Some(acc)
}
pub fn collect_next_with_str(
&mut self,
current: Option<Vec<&'a Value>>,
keys: &[&'a str],
) -> Option<Vec<&'a Value>> {
if current.is_none() {
debug!("collect_next_with_str : {:?}, {:?}", keys, ¤t);
return current;
}
let acc = ValueWalker::all_with_strs(current.as_ref().unwrap(), keys);
if acc.is_empty() {
self.pop_term();
}
Some(acc)
}
pub fn collect_next_all(
&mut self,
current: Option<Vec<&'a Value>>,
) -> Option<Vec<&'a Value>> {
if current.is_none() {
debug!("collect_next_all : {:?}", ¤t);
return current;
}
Some(ValueWalker::next_all(¤t.unwrap()))
}
pub fn collect_all(
&mut self,
current: Option<Vec<&'a Value>>,
) -> Option<Vec<&'a Value>> {
if current.is_none() {
debug!("collect_all: {:?}", ¤t);
return current;
}
Some(ValueWalker::all(current.as_ref().unwrap()))
}
pub fn collect_all_with_str(
&mut self,
current: Option<Vec<&'a Value>>,
key: &'a str,
) -> Option<Vec<&'a Value>> {
if current.is_none() {
debug!("collect_all_with_str: {}, {:?}", key, ¤t);
return current;
}
let ret = ValueWalker::all_with_str(current.as_ref().unwrap(), key);
Some(ret)
}
pub fn collect_all_with_num(
&mut self,
mut current: Option<Vec<&'a Value>>,
index: f64,
) -> Option<Vec<&'a Value>> {
if let Some(current) = current.take() {
let ret = ValueWalker::all_with_num(¤t, index);
if !ret.is_empty() {
return Some(ret);
}
}
debug!("collect_all_with_num: {}, {:?}", index, ¤t);
None
}
}
#[cfg(test)]
mod expr_term_inner_tests {
use serde_json::{Number, Value};
use crate::selector::terms::ExprTerm;
#[test]
fn value_vec_into() {
let v = Value::Bool(true);
let vec = &vec![&v];
let term: ExprTerm = vec.into();
assert_eq!(term, ExprTerm::Bool(true));
let v = Value::String("a".to_string());
let vec = &vec![&v];
let term: ExprTerm = vec.into();
assert_eq!(term, ExprTerm::String("a"));
let v = serde_json::from_str("1.0").unwrap();
let vec = &vec![&v];
let term: ExprTerm = vec.into();
assert_eq!(term, ExprTerm::Number(Number::from_f64(1.0).unwrap()));
}
}
| rust | MIT | 3a930bc03c3944c8374717289266ceb9cc1491c6 | 2026-01-04T20:24:15.035874Z | false |
freestrings/jsonpath | https://github.com/freestrings/jsonpath/blob/3a930bc03c3944c8374717289266ceb9cc1491c6/src/selector/value_walker.rs | src/selector/value_walker.rs | use std::collections::HashSet;
use super::utils;
use crate::selector::utils::PathKey;
use serde_json::Value;
pub(super) struct ValueWalker;
impl<'a> ValueWalker {
pub fn next_all(vec: &[&'a Value]) -> Vec<&'a Value> {
vec.iter().fold(Vec::new(), |mut acc, v| {
match v {
Value::Object(map) => acc.extend(map.values()),
Value::Array(vec) => acc.extend(vec),
_ => {},
}
acc
})
}
pub fn next_with_str(
vec: &[&'a Value],
key: &'a str,
) -> Vec<&'a Value> {
vec.iter().fold(Vec::new(), |mut acc, v| {
if let Value::Object(map) = v {
if let Some(v) = map.get(key) {
acc.push(v);
}
}
acc
})
}
pub fn next_with_num(
vec: &[&'a Value],
index: f64,
) -> Vec<&'a Value> {
vec.iter().fold(Vec::new(), |mut acc, v| {
if let Value::Array(vec) = v {
if let Some(v) =
vec.get(utils::abs_index(index as isize, vec.len()))
{
acc.push(v);
}
}
acc
})
}
pub fn all_with_num(
vec: &[&'a Value],
index: f64,
) -> Vec<&'a Value> {
Self::walk(vec, &|v, acc| {
if v.is_array() {
if let Some(v) = v.get(index as usize) {
acc.push(v);
}
}
})
}
pub fn all_with_str(
vec: &[&'a Value],
key: &'a str,
) -> Vec<&'a Value> {
let path_key = utils::to_path_str(key);
Self::walk(vec, &|v, acc| {
if let Value::Object(map) = v {
if let Some(v) = map.get(path_key.get_key()) {
acc.push(v);
}
}
})
}
pub fn all_with_strs(
vec: &[&'a Value],
keys: &[&'a str],
) -> Vec<&'a Value> {
let path_keys: &Vec<PathKey> =
&keys.iter().map(|key| utils::to_path_str(key)).collect();
vec.iter().fold(Vec::new(), |mut acc, v| {
if let Value::Object(map) = v {
path_keys.iter().for_each(|pk| {
if let Some(v) = map.get(pk.get_key()) {
acc.push(v)
}
});
}
acc
})
}
pub fn all(vec: &[&'a Value]) -> Vec<&'a Value> {
Self::walk(vec, &|v, acc| match v {
Value::Array(ay) => acc.extend(ay),
Value::Object(map) => {
acc.extend(map.values());
},
_ => {},
})
}
fn walk<F>(
vec: &[&'a Value],
fun: &F,
) -> Vec<&'a Value>
where
F: Fn(&'a Value, &mut Vec<&'a Value>),
{
vec.iter().fold(Vec::new(), |mut acc, v| {
Self::_walk(v, &mut acc, fun);
acc
})
}
fn _walk<F>(
v: &'a Value,
acc: &mut Vec<&'a Value>,
fun: &F,
) where
F: Fn(&'a Value, &mut Vec<&'a Value>),
{
fun(v, acc);
match v {
Value::Array(vec) => {
vec.iter().for_each(|v| Self::_walk(v, acc, fun));
},
Value::Object(map) => {
map.values().for_each(|v| Self::_walk(v, acc, fun));
},
_ => {},
}
}
pub fn walk_dedup_all<F1, F2>(
vec: &[&'a Value],
key: &str,
visited: &mut HashSet<*const Value>,
is_contain: &mut F1,
is_not_contain: &mut F2,
depth: usize,
) where
F1: FnMut(&'a Value),
F2: FnMut(usize),
{
vec.iter().enumerate().for_each(|(index, v)| {
Self::walk_dedup(
v,
key,
visited,
index,
is_contain,
is_not_contain,
depth,
)
});
}
fn walk_dedup<F1, F2>(
v: &'a Value,
key: &str,
visited: &mut HashSet<*const Value>,
index: usize,
is_contain: &mut F1,
is_not_contain: &mut F2,
depth: usize,
) where
F1: FnMut(&'a Value),
F2: FnMut(usize),
{
let ptr = v as *const Value;
if visited.contains(&ptr) {
return;
}
match v {
Value::Object(map) => {
if map.get(key).is_some() {
let ptr = v as *const Value;
if !visited.contains(&ptr) {
visited.insert(ptr);
is_contain(v);
}
} else if depth == 0 {
is_not_contain(index);
}
},
Value::Array(vec) => {
if depth == 0 {
is_not_contain(index);
}
vec.iter().for_each(|v| {
Self::walk_dedup(
v,
key,
visited,
index,
is_contain,
is_not_contain,
depth + 1,
);
})
},
_ => {
if depth == 0 {
is_not_contain(index);
}
},
}
}
}
| rust | MIT | 3a930bc03c3944c8374717289266ceb9cc1491c6 | 2026-01-04T20:24:15.035874Z | false |
freestrings/jsonpath | https://github.com/freestrings/jsonpath/blob/3a930bc03c3944c8374717289266ceb9cc1491c6/src/selector/utils.rs | src/selector/utils.rs | use serde_json::Number;
pub fn to_f64(n: &Number) -> f64 {
if n.is_i64() {
n.as_i64().unwrap() as f64
} else if n.is_f64() {
n.as_f64().unwrap()
} else {
n.as_u64().unwrap() as f64
}
}
pub fn abs_index(
n: isize,
len: usize,
) -> usize {
if n < 0_isize {
(n + len as isize).max(0) as usize
} else {
n.min(len as isize) as usize
}
}
pub struct PathKey<'a> {
key: &'a str,
special_key: Option<String>,
}
impl<'a: 'b, 'b> PathKey<'a> {
pub fn get_key(&'a self) -> &'b str {
if let Some(skey) = self.special_key.as_ref() {
skey
} else {
self.key
}
}
pub fn get_origin_key(&self) -> &'a str {
self.key
}
}
pub fn to_path_str(key: &str) -> PathKey {
let mut path_key = PathKey {
key,
special_key: None,
};
if key.starts_with('\'') || key.starts_with('"') {
let s = &key[1..key.len() - 1];
path_key.key = s;
if key.contains('\\') {
path_key.special_key =
Some(s.chars().filter(|ch| ch != &'\\').collect());
}
}
path_key
}
| rust | MIT | 3a930bc03c3944c8374717289266ceb9cc1491c6 | 2026-01-04T20:24:15.035874Z | false |
freestrings/jsonpath | https://github.com/freestrings/jsonpath/blob/3a930bc03c3944c8374717289266ceb9cc1491c6/src/selector/mod.rs | src/selector/mod.rs | pub use self::selector_impl::{JsonSelector, JsonSelectorMut};
mod cmp;
mod selector_impl;
mod terms;
mod utils;
mod value_walker;
| rust | MIT | 3a930bc03c3944c8374717289266ceb9cc1491c6 | 2026-01-04T20:24:15.035874Z | false |
freestrings/jsonpath | https://github.com/freestrings/jsonpath/blob/3a930bc03c3944c8374717289266ceb9cc1491c6/src/selector/cmp.rs | src/selector/cmp.rs | use serde_json::Value;
pub trait Cmp {
fn cmp_bool(
&self,
v1: bool,
v2: bool,
) -> bool;
fn cmp_f64(
&self,
v1: f64,
v2: f64,
) -> bool;
fn cmp_string(
&self,
v1: &str,
v2: &str,
) -> bool;
fn cmp_json<'a>(
&self,
v1: &[&'a Value],
v2: &[&'a Value],
) -> Vec<&'a Value>;
fn default(&self) -> bool {
false
}
}
pub struct CmpEq;
impl Cmp for CmpEq {
fn cmp_bool(
&self,
v1: bool,
v2: bool,
) -> bool {
v1 == v2
}
fn cmp_f64(
&self,
v1: f64,
v2: f64,
) -> bool {
(v1 - v2).abs() == 0_f64
}
fn cmp_string(
&self,
v1: &str,
v2: &str,
) -> bool {
v1 == v2
}
fn cmp_json<'a>(
&self,
v1: &[&'a Value],
v2: &[&'a Value],
) -> Vec<&'a Value> {
v1.iter().fold(Vec::new(), |acc, a| {
v2.iter().fold(acc, |mut acc, b| {
if std::ptr::eq(*a, *b) {
acc.push(*a);
}
acc
})
})
}
}
pub struct CmpNe;
impl Cmp for CmpNe {
fn cmp_bool(
&self,
v1: bool,
v2: bool,
) -> bool {
v1 != v2
}
fn cmp_f64(
&self,
v1: f64,
v2: f64,
) -> bool {
(v1 - v2).abs() != 0_f64
}
fn cmp_string(
&self,
v1: &str,
v2: &str,
) -> bool {
v1 != v2
}
fn cmp_json<'a>(
&self,
v1: &[&'a Value],
v2: &[&'a Value],
) -> Vec<&'a Value> {
let mut ret = v1.to_vec();
for v in v2 {
for i in 0..ret.len() {
if std::ptr::eq(*v, ret[i]) {
ret.remove(i);
break;
}
}
}
ret
}
fn default(&self) -> bool {
true
}
}
pub struct CmpGt;
impl Cmp for CmpGt {
fn cmp_bool(
&self,
v1: bool,
v2: bool,
) -> bool {
v1 & !v2
}
fn cmp_f64(
&self,
v1: f64,
v2: f64,
) -> bool {
v1 > v2
}
fn cmp_string(
&self,
v1: &str,
v2: &str,
) -> bool {
v1 > v2
}
fn cmp_json<'a>(
&self,
_: &[&'a Value],
_: &[&'a Value],
) -> Vec<&'a Value> {
Vec::new()
}
}
pub struct CmpGe;
impl Cmp for CmpGe {
fn cmp_bool(
&self,
v1: bool,
v2: bool,
) -> bool {
v1 >= v2
}
fn cmp_f64(
&self,
v1: f64,
v2: f64,
) -> bool {
v1 >= v2
}
fn cmp_string(
&self,
v1: &str,
v2: &str,
) -> bool {
v1 >= v2
}
fn cmp_json<'a>(
&self,
_: &[&'a Value],
_: &[&'a Value],
) -> Vec<&'a Value> {
Vec::new()
}
}
pub struct CmpLt;
impl Cmp for CmpLt {
fn cmp_bool(
&self,
v1: bool,
v2: bool,
) -> bool {
!v1 & v2
}
fn cmp_f64(
&self,
v1: f64,
v2: f64,
) -> bool {
v1 < v2
}
fn cmp_string(
&self,
v1: &str,
v2: &str,
) -> bool {
v1 < v2
}
fn cmp_json<'a>(
&self,
_: &[&'a Value],
_: &[&'a Value],
) -> Vec<&'a Value> {
Vec::new()
}
}
pub struct CmpLe;
impl Cmp for CmpLe {
fn cmp_bool(
&self,
v1: bool,
v2: bool,
) -> bool {
v1 <= v2
}
fn cmp_f64(
&self,
v1: f64,
v2: f64,
) -> bool {
v1 <= v2
}
fn cmp_string(
&self,
v1: &str,
v2: &str,
) -> bool {
v1 <= v2
}
fn cmp_json<'a>(
&self,
_: &[&'a Value],
_: &[&'a Value],
) -> Vec<&'a Value> {
Vec::new()
}
}
pub struct CmpAnd;
impl Cmp for CmpAnd {
fn cmp_bool(
&self,
v1: bool,
v2: bool,
) -> bool {
v1 && v2
}
fn cmp_f64(
&self,
_v1: f64,
_v2: f64,
) -> bool {
true
}
fn cmp_string(
&self,
v1: &str,
v2: &str,
) -> bool {
!v1.is_empty() && !v2.is_empty()
}
fn cmp_json<'a>(
&self,
v1: &[&'a Value],
v2: &[&'a Value],
) -> Vec<&'a Value> {
CmpEq.cmp_json(v1, v2)
}
}
pub struct CmpOr;
impl Cmp for CmpOr {
fn cmp_bool(
&self,
v1: bool,
v2: bool,
) -> bool {
v1 || v2
}
fn cmp_f64(
&self,
_v1: f64,
_v2: f64,
) -> bool {
true
}
fn cmp_string(
&self,
v1: &str,
v2: &str,
) -> bool {
!v1.is_empty() || !v2.is_empty()
}
fn cmp_json<'a>(
&self,
v1: &[&'a Value],
v2: &[&'a Value],
) -> Vec<&'a Value> {
v2.iter().fold(v1.to_vec(), |mut acc, v| {
let mut contains = false;
for ptr in v1.iter().map(|v| *v as *const Value) {
if ptr == *v as *const Value {
contains = true;
break;
}
}
if !contains {
acc.push(v);
}
acc
})
}
}
#[cfg(test)]
mod cmp_inner_tests {
use serde_json::Value;
use crate::selector::cmp::*;
#[test]
fn cmp_eq() {
let cmp_fn = CmpEq;
assert!(!cmp_fn.default());
assert!(!cmp_fn.cmp_bool(true, false));
assert!(cmp_fn.cmp_bool(true, true));
assert!(cmp_fn.cmp_f64(0.1, 0.1));
assert!(!cmp_fn.cmp_f64(0.1, 0.2));
assert!(cmp_fn.cmp_string("1", "1"));
assert!(!cmp_fn.cmp_string("1", "2"));
}
#[test]
fn cmp_ne() {
let cmp_fn = CmpNe;
assert!(cmp_fn.default());
assert!(cmp_fn.cmp_bool(true, false));
assert!(!cmp_fn.cmp_bool(true, true));
assert!(!cmp_fn.cmp_f64(0.1, 0.1));
assert!(cmp_fn.cmp_f64(0.1, 0.2));
assert!(!cmp_fn.cmp_string("1", "1"));
assert!(cmp_fn.cmp_string("1", "2"));
}
#[test]
fn cmp_gt() {
let cmp_fn = CmpGt;
assert!(!cmp_fn.default());
assert!(cmp_fn.cmp_bool(true, false));
assert!(!cmp_fn.cmp_bool(true, true));
assert!(cmp_fn.cmp_f64(0.2, 0.1));
assert!(!cmp_fn.cmp_f64(0.1, 0.2));
assert!(!cmp_fn.cmp_string("a", "a"));
assert!(cmp_fn.cmp_string("b", "a"));
assert!(!cmp_fn.cmp_string("1", "2"));
}
#[test]
fn cmp_ge() {
let cmp_fn = CmpGe;
assert!(!cmp_fn.default());
assert!(cmp_fn.cmp_bool(true, false));
assert!(cmp_fn.cmp_bool(true, true));
assert!(cmp_fn.cmp_f64(0.2, 0.1));
assert!(cmp_fn.cmp_f64(0.1, 0.1));
assert!(!cmp_fn.cmp_f64(0.1, 0.2));
assert!(cmp_fn.cmp_string("1", "1"));
assert!(cmp_fn.cmp_string("ab", "a"));
assert!(!cmp_fn.cmp_string("1", "2"));
}
#[test]
fn cmp_lt() {
let cmp_fn = CmpLt;
assert!(!cmp_fn.default());
assert!(!cmp_fn.cmp_bool(true, false));
assert!(cmp_fn.cmp_bool(false, true));
assert!(!cmp_fn.cmp_bool(true, true));
assert!(!cmp_fn.cmp_bool(false, false));
assert!(cmp_fn.cmp_f64(0.1, 0.2));
assert!(!cmp_fn.cmp_f64(0.1, 0.1));
assert!(!cmp_fn.cmp_f64(0.2, 0.1));
assert!(!cmp_fn.cmp_string("a", "a"));
assert!(cmp_fn.cmp_string("ab", "b"));
assert!(cmp_fn.cmp_string("1", "2"));
}
#[test]
fn cmp_le() {
let cmp_fn = CmpLe;
assert!(!cmp_fn.default());
assert!(!cmp_fn.cmp_bool(true, false));
assert!(cmp_fn.cmp_bool(false, true));
assert!(cmp_fn.cmp_bool(true, true));
assert!(cmp_fn.cmp_bool(false, false));
assert!(cmp_fn.cmp_f64(0.1, 0.2));
assert!(cmp_fn.cmp_f64(0.1, 0.1));
assert!(!cmp_fn.cmp_f64(0.2, 0.1));
assert!(cmp_fn.cmp_string("a", "a"));
assert!(cmp_fn.cmp_string("ab", "b"));
assert!(!cmp_fn.cmp_string("abd", "abc"));
assert!(cmp_fn.cmp_string("1", "2"));
}
#[test]
fn cmp_and() {
let cmp_fn = CmpAnd;
assert!(!cmp_fn.default());
assert!(!cmp_fn.cmp_bool(true, false));
assert!(!cmp_fn.cmp_bool(false, true));
assert!(cmp_fn.cmp_bool(true, true));
assert!(!cmp_fn.cmp_bool(false, false));
assert!(cmp_fn.cmp_f64(0.0, 0.0));
assert!(cmp_fn.cmp_string("a", "a"));
}
#[test]
fn cmp_or() {
let cmp_fn = CmpOr;
assert!(!cmp_fn.default());
assert!(cmp_fn.cmp_bool(true, false));
assert!(cmp_fn.cmp_bool(false, true));
assert!(cmp_fn.cmp_bool(true, true));
assert!(!cmp_fn.cmp_bool(false, false));
assert!(cmp_fn.cmp_f64(0.0, 0.0));
assert!(cmp_fn.cmp_string("a", "a"));
}
#[test]
fn cmp_json() {
let v1 = Value::Bool(true);
let v2 = Value::String("1".to_string());
let left = [&v1, &v2];
let right = [&v1, &v2];
let empty: Vec<&Value> = Vec::new();
assert_eq!(CmpEq.cmp_json(&left, &right), left.to_vec());
assert_eq!(CmpNe.cmp_json(&left, &right), empty);
assert_eq!(CmpGt.cmp_json(&left, &right), empty);
assert_eq!(CmpGe.cmp_json(&left, &right), empty);
assert_eq!(CmpLt.cmp_json(&left, &right), empty);
assert_eq!(CmpLe.cmp_json(&left, &right), empty);
assert_eq!(CmpAnd.cmp_json(&left, &right), left.to_vec());
assert_eq!(CmpOr.cmp_json(&left, &right), left.to_vec());
assert_eq!(
CmpEq.cmp_json(&[&Value::Bool(true)], &[&Value::Bool(true)]),
vec![&Value::Bool(true)]
);
assert_eq!(
CmpEq.cmp_json(&[&Value::Bool(true)], &[&Value::Bool(false)]),
empty
);
assert_eq!(
CmpNe.cmp_json(&[&Value::Bool(true)], &[&Value::Bool(true)]),
empty
);
assert_eq!(
CmpNe.cmp_json(&[&Value::Bool(false)], &[&Value::Bool(true)]),
vec![&Value::Bool(false)]
);
assert_eq!(
CmpAnd.cmp_json(&[&Value::Bool(true)], &[&Value::Bool(true)]),
vec![&Value::Bool(true)]
);
assert_eq!(
CmpOr.cmp_json(&[&Value::Bool(true)], &[&Value::Bool(false)]),
vec![&Value::Bool(true), &Value::Bool(false)]
);
}
}
| rust | MIT | 3a930bc03c3944c8374717289266ceb9cc1491c6 | 2026-01-04T20:24:15.035874Z | false |
freestrings/jsonpath | https://github.com/freestrings/jsonpath/blob/3a930bc03c3944c8374717289266ceb9cc1491c6/src/paths/str_reader.rs | src/paths/str_reader.rs | use std::result::Result;
use std::str::Chars;
#[derive(Debug, PartialEq)]
pub enum ReaderError {
Eof,
}
#[derive(Debug, PartialEq, Clone)]
pub struct StrRange {
pub pos: usize,
pub offset: usize,
}
impl StrRange {
pub fn new(
pos: usize,
offset: usize,
) -> Self {
StrRange { pos, offset }
}
}
#[derive(Clone, Debug)]
pub(crate) struct StrReader<'a> {
input: &'a str,
pos: usize,
chars: Chars<'a>,
peeked: Option<Option<char>>,
}
impl<'a> StrReader<'a> {
pub fn new(input: &'a str) -> Self {
StrReader {
input,
pos: 0,
chars: input.chars(),
peeked: None,
}
}
pub fn peek_char(&mut self) -> Result<char, ReaderError> {
let ch = self.peek().ok_or(ReaderError::Eof)?;
Ok(*ch)
}
pub fn take_while<F>(
&mut self,
fun: F,
) -> Result<StrRange, ReaderError>
where
F: Fn(&char) -> bool,
{
let mut char_len: usize = 0;
while let Some(c) = self.peek() {
if !fun(c) {
break;
}
match self.next() {
Some(ch) => char_len += ch.len_utf8(),
_ => return Err(ReaderError::Eof),
}
}
let pos = self.pos;
self.pos += char_len;
Ok(StrRange::new(pos, char_len))
}
pub fn next_char(&mut self) -> Result<(StrRange, char), ReaderError> {
let ch = self.next().ok_or(ReaderError::Eof)?;
let pos = self.pos;
let len = ch.len_utf8();
self.pos += len;
Ok((StrRange::new(pos, len), ch))
}
pub fn read(
&self,
span: &StrRange,
) -> &'a str {
&self.input[span.pos..(span.pos + span.offset)]
}
pub fn current_pos(&self) -> usize {
self.pos
}
pub fn origin_str(&self) -> &'a str {
self.input
}
fn next(&mut self) -> Option<char> {
match self.peeked.take() {
Some(v) => v,
None => self.chars.next(),
}
}
fn peek(&mut self) -> Option<&char> {
let chars = &mut self.chars;
self.peeked.get_or_insert_with(|| chars.next()).as_ref()
}
}
| rust | MIT | 3a930bc03c3944c8374717289266ceb9cc1491c6 | 2026-01-04T20:24:15.035874Z | false |
freestrings/jsonpath | https://github.com/freestrings/jsonpath/blob/3a930bc03c3944c8374717289266ceb9cc1491c6/src/paths/parser_node_visitor.rs | src/paths/parser_node_visitor.rs | use crate::paths::path_parser::ParserNode;
use crate::paths::tokens::ParseToken;
use crate::paths::{ParserTokenHandler, StrRange};
pub trait ParserNodeVisitor<'a> {
fn visit<F, F1>(
&self,
parse_node: &ParserNode,
token_handler: &mut F,
parse_value_reader: &F1,
) where
F: ParserTokenHandler<'a>,
F1: Fn(&StrRange) -> &'a str,
{
trace!("visit {:?}", parse_node);
// FIXME When written in "match" grammar, it is determined that "tarpaulin" did not cover the test coverage.
if parse_node.token == ParseToken::Absolute
|| parse_node.token == ParseToken::Relative
|| parse_node.token == ParseToken::All
|| matches!(&parse_node.token, &ParseToken::Key(_))
|| matches!(&parse_node.token, &ParseToken::Keys(_))
|| matches!(&parse_node.token, &ParseToken::Range(_, _, _))
|| matches!(&parse_node.token, &ParseToken::Union(_))
|| matches!(&parse_node.token, &ParseToken::Number(_))
|| matches!(&parse_node.token, &ParseToken::Bool(_))
{
token_handler.handle(&parse_node.token, parse_value_reader);
} else if parse_node.token == ParseToken::In
|| parse_node.token == ParseToken::Leaves
{
if let Some(n) = &parse_node.left {
self.visit(n, token_handler, parse_value_reader);
}
token_handler.handle(&parse_node.token, parse_value_reader);
if let Some(n) = &parse_node.right {
self.visit(n, token_handler, parse_value_reader);
}
} else if parse_node.token == ParseToken::Array {
if let Some(n) = &parse_node.left {
self.visit(n, token_handler, parse_value_reader);
}
token_handler.handle(&parse_node.token, parse_value_reader);
if let Some(n) = &parse_node.right {
self.visit(n, token_handler, parse_value_reader);
}
token_handler.handle(&ParseToken::ArrayEof, parse_value_reader);
} else if matches!(&parse_node.token, &ParseToken::Filter(_)) {
if let Some(n) = &parse_node.left {
self.visit(n, token_handler, parse_value_reader);
}
if let Some(n) = &parse_node.right {
self.visit(n, token_handler, parse_value_reader);
}
token_handler.handle(&parse_node.token, parse_value_reader);
}
}
}
| rust | MIT | 3a930bc03c3944c8374717289266ceb9cc1491c6 | 2026-01-04T20:24:15.035874Z | false |
freestrings/jsonpath | https://github.com/freestrings/jsonpath/blob/3a930bc03c3944c8374717289266ceb9cc1491c6/src/paths/parser_token_handler.rs | src/paths/parser_token_handler.rs | use super::str_reader::StrRange;
use super::tokens::ParseToken;
pub trait ParserTokenHandler<'a> {
fn handle<F>(
&mut self,
token: &ParseToken,
parse_value_reader: &F,
) where
F: Fn(&StrRange) -> &'a str;
}
| rust | MIT | 3a930bc03c3944c8374717289266ceb9cc1491c6 | 2026-01-04T20:24:15.035874Z | false |
freestrings/jsonpath | https://github.com/freestrings/jsonpath/blob/3a930bc03c3944c8374717289266ceb9cc1491c6/src/paths/tokens.rs | src/paths/tokens.rs | use super::str_reader::StrRange;
#[derive(Debug, PartialEq, Clone)]
pub enum Token {
Absolute(StrRange),
Dot(StrRange),
At(StrRange),
OpenArray(StrRange),
CloseArray(StrRange),
Asterisk(StrRange),
Question(StrRange),
Comma(StrRange),
Split(StrRange),
OpenParenthesis(StrRange),
CloseParenthesis(StrRange),
Key(StrRange),
DoubleQuoted(StrRange),
SingleQuoted(StrRange),
Equal(StrRange),
GreaterOrEqual(StrRange),
Greater(StrRange),
Little(StrRange),
LittleOrEqual(StrRange),
NotEqual(StrRange),
And(StrRange),
Or(StrRange),
Whitespace(StrRange),
}
impl Token {
pub fn is_match_token_type(
&self,
other: Token,
) -> bool {
match self {
Token::Absolute(_) => matches!(other, Token::Absolute(_)),
Token::Dot(_) => matches!(other, Token::Dot(_)),
Token::At(_) => matches!(other, Token::At(_)),
Token::OpenArray(_) => matches!(other, Token::OpenArray(_)),
Token::CloseArray(_) => matches!(other, Token::CloseArray(_)),
Token::Asterisk(_) => matches!(other, Token::Asterisk(_)),
Token::Question(_) => matches!(other, Token::Question(_)),
Token::Comma(_) => matches!(other, Token::Comma(_)),
Token::Split(_) => matches!(other, Token::Split(_)),
Token::OpenParenthesis(_) => {
matches!(other, Token::OpenParenthesis(_))
},
Token::CloseParenthesis(_) => {
matches!(other, Token::CloseParenthesis(_))
},
Token::Key(_) => matches!(other, Token::Key(_)),
Token::DoubleQuoted(_) => matches!(other, Token::DoubleQuoted(_)),
Token::SingleQuoted(_) => matches!(other, Token::SingleQuoted(_)),
Token::Equal(_) => matches!(other, Token::Equal(_)),
Token::GreaterOrEqual(_) => {
matches!(other, Token::GreaterOrEqual(_))
},
Token::Greater(_) => matches!(other, Token::Greater(_)),
Token::Little(_) => matches!(other, Token::Little(_)),
Token::LittleOrEqual(_) => {
matches!(other, Token::LittleOrEqual(_))
},
Token::NotEqual(_) => matches!(other, Token::NotEqual(_)),
Token::And(_) => matches!(other, Token::And(_)),
Token::Or(_) => matches!(other, Token::Or(_)),
Token::Whitespace(_) => matches!(other, Token::Whitespace(_)),
}
}
pub fn reset_span(
&mut self,
new_span: StrRange,
) -> Token {
match self {
Token::Absolute(_) => Token::Absolute(new_span),
Token::Dot(_) => Token::Dot(new_span),
Token::At(_) => Token::At(new_span),
Token::OpenArray(_) => Token::OpenArray(new_span),
Token::CloseArray(_) => Token::CloseArray(new_span),
Token::Asterisk(_) => Token::Asterisk(new_span),
Token::Question(_) => Token::Question(new_span),
Token::Comma(_) => Token::Comma(new_span),
Token::Split(_) => Token::Split(new_span),
Token::OpenParenthesis(_) => Token::OpenParenthesis(new_span),
Token::CloseParenthesis(_) => Token::CloseParenthesis(new_span),
Token::Key(_) => Token::Key(new_span),
Token::DoubleQuoted(_) => Token::DoubleQuoted(new_span),
Token::SingleQuoted(_) => Token::SingleQuoted(new_span),
Token::Equal(_) => Token::Equal(new_span),
Token::GreaterOrEqual(_) => Token::GreaterOrEqual(new_span),
Token::Greater(_) => Token::Greater(new_span),
Token::Little(_) => Token::Little(new_span),
Token::LittleOrEqual(_) => Token::LittleOrEqual(new_span),
Token::NotEqual(_) => Token::NotEqual(new_span),
Token::And(_) => Token::And(new_span),
Token::Or(_) => Token::Or(new_span),
Token::Whitespace(_) => Token::Whitespace(new_span),
}
}
}
#[derive(Debug, PartialEq, Clone)]
pub enum ParseToken {
// '$'
Absolute,
// '@'
Relative,
// '.'
In,
// '..'
Leaves,
// '*'
All,
Key(StrRange),
Keys(Vec<StrRange>),
// []
Array,
// 메타토큰
ArrayEof,
// ?( filter )
Filter(FilterToken),
// 1 : 2
Range(Option<isize>, Option<isize>, Option<usize>),
// 1, 2, 3
Union(Vec<isize>),
Number(f64),
Bool(bool),
Eof,
}
#[derive(Debug, PartialEq, Clone)]
pub enum FilterToken {
Equal,
NotEqual,
Little,
LittleOrEqual,
Greater,
GreaterOrEqual,
And,
Or,
}
| rust | MIT | 3a930bc03c3944c8374717289266ceb9cc1491c6 | 2026-01-04T20:24:15.035874Z | false |
freestrings/jsonpath | https://github.com/freestrings/jsonpath/blob/3a930bc03c3944c8374717289266ceb9cc1491c6/src/paths/path_parser.rs | src/paths/path_parser.rs | use std::str::FromStr;
use super::parser_node_visitor::ParserNodeVisitor;
use super::parser_token_handler::ParserTokenHandler;
use super::str_reader::StrRange;
use super::tokenizer::{TokenError, TokenReader};
use super::tokens::{FilterToken, ParseToken, Token};
#[derive(Clone, Debug)]
pub struct PathParser<'a> {
parser: ParserImpl<'a>,
}
impl<'a> PathParser<'a> {
pub fn compile(input: &'a str) -> Result<Self, TokenError> {
let mut parser = ParserImpl::new(input);
parser.compile()?;
Ok(PathParser { parser })
}
pub(crate) fn parse<F>(
&self,
parse_token_handler: &mut F,
) -> Result<(), String>
where
F: ParserTokenHandler<'a>,
{
if self.parser.parse_node.is_none() {
unreachable!()
}
let token_reader = &self.parser.token_reader;
if let Some(parse_node) = self.parser.parse_node.as_ref() {
self.visit(parse_node, parse_token_handler, &|s| {
token_reader.read_value(s)
});
}
Ok(())
}
}
impl<'a> ParserNodeVisitor<'a> for PathParser<'a> {}
#[derive(Clone, Debug)]
struct ParserImpl<'a> {
token_reader: TokenReader<'a>,
parse_node: Option<ParserNode>,
}
impl<'a> ParserImpl<'a> {
pub fn new(input: &'a str) -> Self {
ParserImpl {
token_reader: TokenReader::new(input),
parse_node: None,
}
}
fn string_to_num<F, S: FromStr>(
string: &str,
msg_handler: F,
) -> Result<S, TokenError>
where
F: Fn() -> TokenError,
{
match string.parse() {
Ok(n) => Ok(n),
_ => Err(msg_handler()),
}
}
pub fn compile(&mut self) -> Result<&mut Self, TokenError> {
self.parse_node = Some(self.json_path()?);
Ok(self)
}
fn json_path(&mut self) -> Result<ParserNode, TokenError> {
debug!("#json_path");
match self.token_reader.next_token() {
Ok(Token::Absolute(_)) => {
let node = self.create_node(ParseToken::Absolute);
self.paths(node)
},
_ => Err(self.token_reader.to_error()),
}
}
fn paths(
&mut self,
prev: ParserNode,
) -> Result<ParserNode, TokenError> {
debug!("#paths");
match self.token_reader.peek_token() {
Ok(Token::Dot(_)) => {
self.eat_token();
self.paths_dot(prev)
},
Ok(Token::OpenArray(_)) => {
self.eat_token();
self.eat_whitespace();
let node = self.array(prev)?;
self.paths(node)
},
_ => Ok(prev),
}
}
fn paths_dot(
&mut self,
prev: ParserNode,
) -> Result<ParserNode, TokenError> {
debug!("#paths_dot");
let node = self.path(prev)?;
self.paths(node)
}
fn path(
&mut self,
prev: ParserNode,
) -> Result<ParserNode, TokenError> {
debug!("#path");
match self.token_reader.peek_token() {
Ok(Token::Dot(_)) => self.path_leaves(prev),
Ok(Token::Asterisk(_)) => self.path_in_all(prev),
Ok(Token::Key(_)) => self.path_in_key(prev),
Ok(Token::OpenArray(_)) => {
self.eat_token();
self.array(prev)
},
_ => Err(self.token_reader.to_error()),
}
}
fn path_leaves(
&mut self,
prev: ParserNode,
) -> Result<ParserNode, TokenError> {
debug!("#path_leaves");
self.eat_token();
match self.token_reader.peek_token() {
Ok(Token::Asterisk(_)) => self.path_leaves_all(prev),
Ok(Token::OpenArray(_)) => {
let mut leaves_node = self.create_node(ParseToken::Leaves);
leaves_node.left = Some(Box::new(prev));
Ok(self.paths(leaves_node)?)
},
_ => self.path_leaves_key(prev),
}
}
#[allow(clippy::unnecessary_wraps)]
fn path_leaves_key(
&mut self,
prev: ParserNode,
) -> Result<ParserNode, TokenError> {
debug!("#path_leaves_key");
Ok(ParserNode {
token: ParseToken::Leaves,
left: Some(Box::new(prev)),
right: Some(Box::new(self.key()?)),
})
}
#[allow(clippy::unnecessary_wraps)]
fn path_leaves_all(
&mut self,
prev: ParserNode,
) -> Result<ParserNode, TokenError> {
debug!("#path_leaves_all");
self.eat_token();
Ok(ParserNode {
token: ParseToken::Leaves,
left: Some(Box::new(prev)),
right: Some(Box::new(self.create_node(ParseToken::All))),
})
}
#[allow(clippy::unnecessary_wraps)]
fn path_in_all(
&mut self,
prev: ParserNode,
) -> Result<ParserNode, TokenError> {
debug!("#path_in_all");
self.eat_token();
Ok(ParserNode {
token: ParseToken::In,
left: Some(Box::new(prev)),
right: Some(Box::new(self.create_node(ParseToken::All))),
})
}
#[allow(clippy::unnecessary_wraps)]
fn path_in_key(
&mut self,
prev: ParserNode,
) -> Result<ParserNode, TokenError> {
debug!("#path_in_key");
Ok(ParserNode {
token: ParseToken::In,
left: Some(Box::new(prev)),
right: Some(Box::new(self.key()?)),
})
}
fn key(&mut self) -> Result<ParserNode, TokenError> {
debug!("#key");
match self.token_reader.next_token() {
Ok(Token::Key(s)) => Ok(self.create_node(ParseToken::Key(s))),
_ => Err(self.token_reader.to_error()),
}
}
fn boolean(&mut self) -> Result<ParserNode, TokenError> {
debug!("#boolean");
fn validation_bool_value(v: &str) -> bool {
let b = v.as_bytes();
!b.is_empty()
&& (b[0] == b't'
|| b[0] == b'T'
|| b[0] == b'f'
|| b[0] == b'F')
}
if let Ok(Token::Key(s)) = self.token_reader.next_token() {
let v = self.token_reader.read_value(&s);
if validation_bool_value(v) {
return Ok(self.create_node(ParseToken::Bool(
v.eq_ignore_ascii_case("true"),
)));
}
}
Err(self.token_reader.to_error())
}
fn array_keys(
&mut self,
first_key: StrRange,
) -> Result<ParserNode, TokenError> {
let mut keys = vec![first_key];
while let Ok(Token::Comma(_)) = self.token_reader.peek_token() {
self.eat_token();
self.eat_whitespace();
match self.token_reader.next_token() {
Ok(Token::SingleQuoted(s)) | Ok(Token::DoubleQuoted(s)) => {
keys.push(s);
},
_ => return Err(self.token_reader.to_error()),
}
self.eat_whitespace();
}
Ok(self.create_node(ParseToken::Keys(keys)))
}
fn array_quote_value(&mut self) -> Result<ParserNode, TokenError> {
debug!("#array_quote_value");
let next = self.token_reader.next_token();
match next {
Ok(Token::SingleQuoted(s)) | Ok(Token::DoubleQuoted(s)) => {
if let Ok(Token::Comma(_)) = self.token_reader.peek_token() {
self.array_keys(s)
} else {
Ok(self.create_node(ParseToken::Key(s)))
}
},
_ => Err(self.token_reader.to_error()),
}
}
fn array_start(
&mut self,
prev: ParserNode,
) -> Result<ParserNode, TokenError> {
debug!("#array_start");
match self.token_reader.peek_token() {
Ok(Token::Question(_)) => {
self.eat_token();
Ok(ParserNode {
token: ParseToken::Array,
left: Some(Box::new(prev)),
right: Some(Box::new(self.filter()?)),
})
},
Ok(Token::Asterisk(_)) => {
self.eat_token();
Ok(ParserNode {
token: ParseToken::Array,
left: Some(Box::new(prev)),
right: Some(Box::new(self.create_node(ParseToken::All))),
})
},
_ => Ok(ParserNode {
token: ParseToken::Array,
left: Some(Box::new(prev)),
right: Some(Box::new(self.array_value()?)),
}),
}
}
fn array(
&mut self,
prev: ParserNode,
) -> Result<ParserNode, TokenError> {
debug!("#array");
let ret = self.array_start(prev)?;
self.eat_whitespace();
self.close_token(ret, Token::CloseArray(StrRange::new(0, 0)))
}
fn array_value_key(&mut self) -> Result<ParserNode, TokenError> {
debug!("#array_value_key");
if let Ok(Token::Key(s)) = self.token_reader.next_token() {
let val = self.token_reader.read_value(&s);
let digit =
Self::string_to_num(val, || self.token_reader.to_error())?;
self.eat_whitespace();
match self.token_reader.peek_token() {
Ok(Token::Comma(_)) => self.union(digit),
Ok(Token::Split(_)) => self.range_from(digit),
_ => Ok(self.create_node(ParseToken::Number(digit as f64))),
}
} else {
Err(self.token_reader.to_error())
}
}
fn array_value(&mut self) -> Result<ParserNode, TokenError> {
debug!("#array_value");
match self.token_reader.peek_token() {
Ok(Token::Key(_)) => self.array_value_key(),
Ok(Token::Split(_)) => {
self.eat_token();
self.range_to()
},
Ok(Token::DoubleQuoted(_)) | Ok(Token::SingleQuoted(_)) => {
self.array_quote_value()
},
Err(TokenError::Eof) => Ok(self.create_node(ParseToken::Eof)),
_ => {
self.eat_token();
Err(self.token_reader.to_error())
},
}
}
fn union(
&mut self,
num: isize,
) -> Result<ParserNode, TokenError> {
debug!("#union");
let mut values = vec![num];
while matches!(self.token_reader.peek_token(), Ok(Token::Comma(_))) {
self.eat_token();
self.eat_whitespace();
match self.token_reader.next_token() {
Ok(Token::Key(s)) => {
let val = self.token_reader.read_value(&s);
let digit = Self::string_to_num(val, || {
self.token_reader.to_error()
})?;
values.push(digit);
},
_ => {
return Err(self.token_reader.to_error());
},
}
}
Ok(self.create_node(ParseToken::Union(values)))
}
fn range_value<S: FromStr>(&mut self) -> Result<Option<S>, TokenError> {
self.eat_whitespace();
match self.token_reader.peek_token() {
Ok(Token::Split(_)) => {
self.eat_token();
self.eat_whitespace();
},
_ => {
return Ok(None);
},
}
match self.token_reader.peek_token() {
Ok(Token::Key(_)) => {},
_ => {
return Ok(None);
},
}
match self.token_reader.next_token() {
Ok(Token::Key(s)) => {
let str_step = self.token_reader.read_value(&s);
match Self::string_to_num(str_step, || {
self.token_reader.to_error()
}) {
Ok(step) => Ok(Some(step)),
Err(e) => Err(e),
}
},
_ => {
unreachable!();
},
}
}
fn range_from(
&mut self,
from: isize,
) -> Result<ParserNode, TokenError> {
debug!("#range_from");
self.eat_token();
self.eat_whitespace();
match self.token_reader.peek_token() {
Ok(Token::Key(_)) => self.range(from),
Ok(Token::Split(_)) => match self.range_value()? {
Some(step) => Ok(self.create_node(ParseToken::Range(
Some(from),
None,
Some(step),
))),
_ => Ok(self.create_node(ParseToken::Range(
Some(from),
None,
None,
))),
},
_ => {
Ok(self.create_node(ParseToken::Range(Some(from), None, None)))
},
}
}
fn range_to(&mut self) -> Result<ParserNode, TokenError> {
debug!("#range_to");
if let Some(step) = self.range_value()? {
return Ok(self.create_node(ParseToken::Range(
None,
None,
Some(step),
)));
}
if let Ok(Token::CloseArray(_)) = self.token_reader.peek_token() {
return Ok(self.create_node(ParseToken::Range(None, None, None)));
}
match self.token_reader.next_token() {
Ok(Token::Key(s)) => {
let to_str = self.token_reader.read_value(&s);
let to = Self::string_to_num(to_str, || {
self.token_reader.to_error()
})?;
let step = self.range_value()?;
Ok(self.create_node(ParseToken::Range(None, Some(to), step)))
},
_ => Err(self.token_reader.to_error()),
}
}
fn range(
&mut self,
from: isize,
) -> Result<ParserNode, TokenError> {
debug!("#range");
match self.token_reader.next_token() {
Ok(Token::Key(s)) => {
let str_to = self.token_reader.read_value(&s);
let to = Self::string_to_num(str_to, || {
self.token_reader.to_error()
})?;
let step = self.range_value()?;
Ok(self.create_node(ParseToken::Range(
Some(from),
Some(to),
step,
)))
},
_ => Err(self.token_reader.to_error()),
}
}
fn filter(&mut self) -> Result<ParserNode, TokenError> {
debug!("#filter");
match self.token_reader.next_token() {
Ok(Token::OpenParenthesis(_)) => {
let ret = self.exprs()?;
self.eat_whitespace();
self.close_token(
ret,
Token::CloseParenthesis(StrRange::new(0, 0)),
)
},
_ => Err(self.token_reader.to_error()),
}
}
fn exprs(&mut self) -> Result<ParserNode, TokenError> {
self.eat_whitespace();
debug!("#exprs");
let node = match self.token_reader.peek_token() {
Ok(Token::OpenParenthesis(_)) => {
self.eat_token();
trace!("\t-exprs - open_parenthesis");
let ret = self.exprs()?;
self.eat_whitespace();
self.close_token(
ret,
Token::CloseParenthesis(StrRange::new(0, 0)),
)?
},
_ => {
trace!("\t-exprs - else");
self.expr()?
},
};
self.eat_whitespace();
self.condition_expr(node)
}
fn condition_expr(
&mut self,
prev: ParserNode,
) -> Result<ParserNode, TokenError> {
debug!("#condition_expr");
match self.token_reader.peek_token() {
Ok(Token::And(_)) => {
self.eat_token();
Ok(ParserNode {
token: ParseToken::Filter(FilterToken::And),
left: Some(Box::new(prev)),
right: Some(Box::new(self.exprs()?)),
})
},
Ok(Token::Or(_)) => {
self.eat_token();
Ok(ParserNode {
token: ParseToken::Filter(FilterToken::Or),
left: Some(Box::new(prev)),
right: Some(Box::new(self.exprs()?)),
})
},
_ => Ok(prev),
}
}
fn expr(&mut self) -> Result<ParserNode, TokenError> {
debug!("#expr");
let has_prop_candidate =
matches!(self.token_reader.peek_token(), Ok(Token::At(_)));
let node = self.term()?;
self.eat_whitespace();
if matches!(
self.token_reader.peek_token(),
Ok(Token::Equal(_))
| Ok(Token::NotEqual(_))
| Ok(Token::Little(_))
| Ok(Token::LittleOrEqual(_))
| Ok(Token::Greater(_))
| Ok(Token::GreaterOrEqual(_))
) {
self.op(node)
} else if has_prop_candidate {
Ok(node)
} else {
Err(self.token_reader.to_error())
}
}
fn term_num(&mut self) -> Result<ParserNode, TokenError> {
debug!("#term_num");
match self.token_reader.next_token() {
Ok(Token::Key(s)) => {
let val = self.token_reader.read_value(&s);
match self.token_reader.peek_token() {
Ok(Token::Dot(_)) => self.term_num_float(val),
_ => {
let number = Self::string_to_num(val, || {
self.token_reader.to_error()
})?;
Ok(self.create_node(ParseToken::Number(number)))
},
}
},
_ => Err(self.token_reader.to_error()),
}
}
fn term_num_float(
&mut self,
num: &'a str,
) -> Result<ParserNode, TokenError> {
debug!("#term_num_float");
self.eat_token();
match self.token_reader.next_token() {
Ok(Token::Key(s)) => {
let frac = self.token_reader.read_value(&s);
let number =
Self::string_to_num(&[num, ".", frac].concat(), || {
self.token_reader.to_error()
})?;
Ok(self.create_node(ParseToken::Number(number)))
},
_ => Err(self.token_reader.to_error()),
}
}
fn term(&mut self) -> Result<ParserNode, TokenError> {
debug!("#term");
if self.token_reader.peek_token().is_err() {
return Err(self.token_reader.to_error());
}
let has_term_key =
if let Ok(Token::Key(s)) = self.token_reader.peek_token() {
Some(s.clone())
} else {
None
};
if let Some(s) = has_term_key {
let key = self.token_reader.read_value(&s);
return match key.as_bytes()[0] {
b'-' | b'0'..=b'9' => self.term_num(),
_ => self.boolean(),
};
}
match self.token_reader.peek_token() {
Ok(Token::At(_)) => {
self.eat_token();
let node = self.create_node(ParseToken::Relative);
match self.token_reader.peek_token() {
Ok(Token::Whitespace(_)) => {
self.eat_whitespace();
Ok(node)
},
_ => self.paths(node),
}
},
Ok(Token::Absolute(_)) => self.json_path(),
Ok(Token::DoubleQuoted(_)) | Ok(Token::SingleQuoted(_)) => {
self.array_quote_value()
},
_ => Err(self.token_reader.to_error()),
}
}
fn op(
&mut self,
prev: ParserNode,
) -> Result<ParserNode, TokenError> {
debug!("#op");
let token = match self.token_reader.next_token() {
Ok(Token::Equal(_)) => ParseToken::Filter(FilterToken::Equal),
Ok(Token::NotEqual(_)) => ParseToken::Filter(FilterToken::NotEqual),
Ok(Token::Little(_)) => ParseToken::Filter(FilterToken::Little),
Ok(Token::LittleOrEqual(_)) => {
ParseToken::Filter(FilterToken::LittleOrEqual)
},
Ok(Token::Greater(_)) => ParseToken::Filter(FilterToken::Greater),
Ok(Token::GreaterOrEqual(_)) => {
ParseToken::Filter(FilterToken::GreaterOrEqual)
},
_ => {
return Err(self.token_reader.to_error());
},
};
self.eat_whitespace();
Ok(ParserNode {
token,
left: Some(Box::new(prev)),
right: Some(Box::new(self.term()?)),
})
}
fn eat_whitespace(&mut self) {
while let Ok(Token::Whitespace(_)) = self.token_reader.peek_token() {
let _ = self.token_reader.next_token();
}
}
fn eat_token(&mut self) {
let _ = self.token_reader.next_token();
}
fn close_token(
&mut self,
ret: ParserNode,
token: Token,
) -> Result<ParserNode, TokenError> {
debug!("#close_token");
match self.token_reader.next_token() {
Ok(ref t) if t.is_match_token_type(token) => Ok(ret),
_ => Err(self.token_reader.to_error()),
}
}
fn create_node(
&mut self,
token: ParseToken,
) -> ParserNode {
ParserNode {
left: None,
right: None,
token,
}
}
}
#[derive(Debug, Clone)]
pub struct ParserNode {
pub left: Option<Box<ParserNode>>,
pub right: Option<Box<ParserNode>>,
pub token: ParseToken,
}
#[cfg(test)]
mod path_parser_tests {
use crate::paths::path_parser::PathParser;
use crate::paths::str_reader::StrRange;
use crate::paths::tokens::{FilterToken, ParseToken};
use crate::paths::ParserTokenHandler;
struct NodeVisitorTestImpl<'a> {
input: &'a str,
stack: Vec<ParseToken>,
}
impl<'a> NodeVisitorTestImpl<'a> {
fn new(input: &'a str) -> Self {
NodeVisitorTestImpl {
input,
stack: Vec::new(),
}
}
fn start(&mut self) -> Result<Vec<ParseToken>, String> {
let parser =
PathParser::compile(self.input).map_err(|_| "Token Error")?;
let _ = parser.parse(self);
Ok(self.stack.split_off(0))
}
}
impl<'a> ParserTokenHandler<'a> for NodeVisitorTestImpl<'a> {
fn handle<F>(
&mut self,
token: &ParseToken,
_: &F,
) where
F: Fn(&StrRange) -> &'a str,
{
trace!("handle {:?}", token);
self.stack.push(token.clone());
}
}
fn setup() {
let _ = env_logger::try_init();
}
fn run(input: &str) -> Result<Vec<ParseToken>, String> {
let mut interpreter = NodeVisitorTestImpl::new(input);
interpreter.start()
}
#[test]
fn parse_error() {
setup();
fn invalid(path: &str) {
assert!(run(path).is_err());
}
invalid("$[]");
invalid("$[a]");
invalid("$[?($.a)]");
invalid("$[?(@.a > @.b]");
invalid("$[?(@.a < @.b&&(@.c < @.d)]");
invalid("@.");
invalid("$..[?(a <= @.a)]"); // invalid term value
invalid("$['a', b]");
invalid("$[0, >=]");
invalid("$[a:]");
invalid("$[:a]");
invalid("$[::a]");
invalid("$[:>]");
invalid("$[1:>]");
invalid("$[1,,]");
invalid("$[?]");
invalid("$[?(1 = 1)]");
invalid("$[?(1 = >)]");
}
#[test]
fn parse_path() {
setup();
assert_eq!(
run("$.aa"),
Ok(vec![
ParseToken::Absolute,
ParseToken::In,
ParseToken::Key(StrRange::new(2, "aa".len()))
])
);
assert_eq!(
run("$.00.a"),
Ok(vec![
ParseToken::Absolute,
ParseToken::In,
ParseToken::Key(StrRange::new(2, "00".len())),
ParseToken::In,
ParseToken::Key(StrRange::new(5, "a".len()))
])
);
assert_eq!(
run("$.00.韓창.seok"),
Ok(vec![
ParseToken::Absolute,
ParseToken::In,
ParseToken::Key(StrRange::new(2, "00".len())),
ParseToken::In,
ParseToken::Key(StrRange::new(
5,
"韓창".chars().map(|c| c.len_utf8()).sum()
)),
ParseToken::In,
ParseToken::Key(StrRange::new(12, "seok".len()))
])
);
assert_eq!(
run("$.*"),
Ok(vec![ParseToken::Absolute, ParseToken::In, ParseToken::All])
);
assert_eq!(
run("$..*"),
Ok(vec![
ParseToken::Absolute,
ParseToken::Leaves,
ParseToken::All
])
);
assert_eq!(
run("$..[0]"),
Ok(vec![
ParseToken::Absolute,
ParseToken::Leaves,
ParseToken::Array,
ParseToken::Number(0.0),
ParseToken::ArrayEof
])
);
assert_eq!(
run("$.$a"),
Ok(vec![
ParseToken::Absolute,
ParseToken::In,
ParseToken::Key(StrRange::new(2, "$a".len()))
])
);
assert_eq!(
run("$.['$a']"),
Ok(vec![
ParseToken::Absolute,
ParseToken::Array,
ParseToken::Key(StrRange::new(3, "'$a'".len())),
ParseToken::ArrayEof,
])
);
if run("$.").is_ok() {
panic!();
}
if run("$..").is_ok() {
panic!();
}
if run("$. a").is_ok() {
panic!();
}
}
#[test]
fn parse_array_syntax() {
setup();
assert_eq!(
run("$.book[?(@.isbn)]"),
Ok(vec![
ParseToken::Absolute,
ParseToken::In,
ParseToken::Key(StrRange::new(2, "book".len())),
ParseToken::Array,
ParseToken::Relative,
ParseToken::In,
ParseToken::Key(StrRange::new(11, "isbn".len())),
ParseToken::ArrayEof
])
);
//
// Array도 컨텍스트 In으로 간주 할거라서 중첩되면 하나만
//
assert_eq!(
run("$.[*]"),
Ok(vec![
ParseToken::Absolute,
ParseToken::Array,
ParseToken::All,
ParseToken::ArrayEof
])
);
assert_eq!(
run("$.a[*]"),
Ok(vec![
ParseToken::Absolute,
ParseToken::In,
ParseToken::Key(StrRange::new(2, "a".len())),
ParseToken::Array,
ParseToken::All,
ParseToken::ArrayEof
])
);
assert_eq!(
run("$.a[*].가"),
Ok(vec![
ParseToken::Absolute,
ParseToken::In,
ParseToken::Key(StrRange::new(2, "a".len())),
ParseToken::Array,
ParseToken::All,
ParseToken::ArrayEof,
ParseToken::In,
ParseToken::Key(StrRange::new(7, '가'.len_utf8()))
])
);
assert_eq!(
run("$.a[0][1]"),
Ok(vec![
ParseToken::Absolute,
ParseToken::In,
ParseToken::Key(StrRange::new(2, "a".len())),
ParseToken::Array,
ParseToken::Number(0_f64),
ParseToken::ArrayEof,
ParseToken::Array,
ParseToken::Number(1_f64),
ParseToken::ArrayEof
])
);
assert_eq!(
run("$.a[1,2]"),
Ok(vec![
ParseToken::Absolute,
ParseToken::In,
ParseToken::Key(StrRange::new(2, "a".len())),
ParseToken::Array,
ParseToken::Union(vec![1, 2]),
ParseToken::ArrayEof
])
);
assert_eq!(
run("$.a[10:]"),
Ok(vec![
ParseToken::Absolute,
ParseToken::In,
ParseToken::Key(StrRange::new(2, "a".len())),
ParseToken::Array,
ParseToken::Range(Some(10), None, None),
ParseToken::ArrayEof
])
);
assert_eq!(
run("$.a[:11]"),
Ok(vec![
ParseToken::Absolute,
ParseToken::In,
ParseToken::Key(StrRange::new(2, "a".len())),
ParseToken::Array,
ParseToken::Range(None, Some(11), None),
ParseToken::ArrayEof
])
);
assert_eq!(
run("$.a[-12:13]"),
Ok(vec![
ParseToken::Absolute,
ParseToken::In,
ParseToken::Key(StrRange::new(2, "a".len())),
ParseToken::Array,
ParseToken::Range(Some(-12), Some(13), None),
ParseToken::ArrayEof
])
);
assert_eq!(
run(r#"$[0:3:2]"#),
Ok(vec![
ParseToken::Absolute,
ParseToken::Array,
ParseToken::Range(Some(0), Some(3), Some(2)),
ParseToken::ArrayEof
])
);
assert_eq!(
run(r#"$[:3:2]"#),
Ok(vec![
ParseToken::Absolute,
ParseToken::Array,
ParseToken::Range(None, Some(3), Some(2)),
ParseToken::ArrayEof
])
);
assert_eq!(
run(r#"$[:]"#),
Ok(vec![
ParseToken::Absolute,
ParseToken::Array,
ParseToken::Range(None, None, None),
ParseToken::ArrayEof
])
);
assert_eq!(
run(r#"$[::]"#),
Ok(vec![
ParseToken::Absolute,
ParseToken::Array,
ParseToken::Range(None, None, None),
ParseToken::ArrayEof
])
);
assert_eq!(
run(r#"$[::2]"#),
Ok(vec![
ParseToken::Absolute,
ParseToken::Array,
ParseToken::Range(None, None, Some(2)),
ParseToken::ArrayEof
])
);
assert_eq!(
run(r#"$["a", 'b']"#),
Ok(vec![
ParseToken::Absolute,
ParseToken::Array,
ParseToken::Keys(vec![
StrRange::new(2, "\"a\"".len()),
StrRange::new(7, "'b'".len())
]),
ParseToken::ArrayEof
])
);
assert_eq!(
run("$.a[?(1>2)]"),
Ok(vec![
ParseToken::Absolute,
ParseToken::In,
ParseToken::Key(StrRange::new(2, "a".len())),
ParseToken::Array,
ParseToken::Number(1_f64),
ParseToken::Number(2_f64),
ParseToken::Filter(FilterToken::Greater),
ParseToken::ArrayEof
])
);
assert_eq!(
run("$.a[?($.b>3)]"),
Ok(vec![
ParseToken::Absolute,
ParseToken::In,
ParseToken::Key(StrRange::new(2, "a".len())),
ParseToken::Array,
ParseToken::Absolute,
ParseToken::In,
| rust | MIT | 3a930bc03c3944c8374717289266ceb9cc1491c6 | 2026-01-04T20:24:15.035874Z | true |
freestrings/jsonpath | https://github.com/freestrings/jsonpath/blob/3a930bc03c3944c8374717289266ceb9cc1491c6/src/paths/mod.rs | src/paths/mod.rs | pub use self::parser_token_handler::ParserTokenHandler;
pub use self::path_parser::PathParser;
pub use self::str_reader::StrRange;
pub use self::tokenizer::TokenError;
mod parser_node_visitor;
mod parser_token_handler;
mod path_parser;
mod str_reader;
mod tokenizer;
pub mod tokens;
| rust | MIT | 3a930bc03c3944c8374717289266ceb9cc1491c6 | 2026-01-04T20:24:15.035874Z | false |
freestrings/jsonpath | https://github.com/freestrings/jsonpath/blob/3a930bc03c3944c8374717289266ceb9cc1491c6/src/paths/tokenizer.rs | src/paths/tokenizer.rs | use std::result::Result;
use super::str_reader::{ReaderError, StrRange, StrReader};
use super::tokens::Token;
const CH_DOLLA: char = '$';
const CH_DOT: char = '.';
const CH_ASTERISK: char = '*';
const CH_LARRAY: char = '[';
const CH_RARRAY: char = ']';
const CH_LPAREN: char = '(';
const CH_RPAREN: char = ')';
const CH_AT: char = '@';
const CH_QUESTION: char = '?';
const CH_COMMA: char = ',';
const CH_SEMICOLON: char = ':';
const CH_EQUAL: char = '=';
const CH_AMPERSAND: char = '&';
const CH_PIPE: char = '|';
const CH_LITTLE: char = '<';
const CH_GREATER: char = '>';
const CH_EXCLAMATION: char = '!';
const CH_SINGLE_QUOTE: char = '\'';
const CH_DOUBLE_QUOTE: char = '"';
#[derive(Debug, Clone, PartialEq)]
pub enum TokenError {
Eof,
Position(usize),
}
fn to_token_error(read_err: ReaderError) -> TokenError {
match read_err {
ReaderError::Eof => TokenError::Eof,
}
}
#[derive(Clone, Debug)]
pub(super) struct Tokenizer<'a> {
input: StrReader<'a>,
}
impl<'a> Tokenizer<'a> {
pub fn new(input: &'a str) -> Self {
trace!("input: {}", input);
Tokenizer {
input: StrReader::new(input),
}
}
fn dolla(&mut self) -> Result<Token, TokenError> {
let fun = |c: &char| match c {
&CH_DOT | &CH_ASTERISK | &CH_LARRAY | &CH_RARRAY | &CH_LPAREN
| &CH_RPAREN | &CH_AT | &CH_QUESTION | &CH_COMMA
| &CH_SEMICOLON | &CH_LITTLE | &CH_GREATER | &CH_EQUAL
| &CH_AMPERSAND | &CH_PIPE | &CH_EXCLAMATION => false,
_ => !c.is_whitespace(),
};
let read = self.input.take_while(fun).map_err(to_token_error)?;
if read.offset == 0 {
Ok(Token::Absolute(read))
} else {
Ok(Token::Key(read))
}
}
fn quote(
&mut self,
ch: char,
) -> Result<StrRange, TokenError> {
let span = self
.input
.take_while(|c| *c != ch)
.map_err(to_token_error)?;
let val = self.input.read(&span);
if let Some('\\') = val.chars().last() {
self.input.next_char().map_err(to_token_error)?;
let remain_span = self
.input
.take_while(|c| *c != ch)
.map_err(to_token_error)?;
self.input.next_char().map_err(to_token_error)?;
Ok(StrRange::new(span.pos, remain_span.offset))
} else {
self.input.next_char().map_err(to_token_error)?;
Ok(span)
}
}
fn single_quote(
&mut self,
ch: char,
) -> Result<Token, TokenError> {
Ok(Token::SingleQuoted(self.quote(ch)?))
}
fn double_quote(
&mut self,
ch: char,
) -> Result<Token, TokenError> {
Ok(Token::DoubleQuoted(self.quote(ch)?))
}
fn equal(
&mut self,
span: StrRange,
) -> Result<Token, TokenError> {
let ch = self.input.peek_char().map_err(to_token_error)?;
match ch {
CH_EQUAL => {
self.input.next_char().map_err(to_token_error)?;
Ok(Token::Equal(span))
},
_ => Err(TokenError::Position(span.pos)),
}
}
fn not_equal(
&mut self,
span: StrRange,
) -> Result<Token, TokenError> {
let ch = self.input.peek_char().map_err(to_token_error)?;
match ch {
CH_EQUAL => {
self.input.next_char().map_err(to_token_error)?;
Ok(Token::NotEqual(span))
},
_ => Err(TokenError::Position(span.pos)),
}
}
fn little(
&mut self,
span: StrRange,
) -> Result<Token, TokenError> {
let ch = self.input.peek_char().map_err(to_token_error)?;
match ch {
CH_EQUAL => {
self.input.next_char().map_err(to_token_error)?;
Ok(Token::LittleOrEqual(span))
},
_ => Ok(Token::Little(span)),
}
}
fn greater(
&mut self,
span: StrRange,
) -> Result<Token, TokenError> {
let ch = self.input.peek_char().map_err(to_token_error)?;
match ch {
CH_EQUAL => {
self.input.next_char().map_err(to_token_error)?;
Ok(Token::GreaterOrEqual(span))
},
_ => Ok(Token::Greater(span)),
}
}
fn and(
&mut self,
span: StrRange,
) -> Result<Token, TokenError> {
let ch = self.input.peek_char().map_err(to_token_error)?;
match ch {
CH_AMPERSAND => {
let _ = self.input.next_char().map_err(to_token_error);
Ok(Token::And(span))
},
_ => Err(TokenError::Position(span.pos)),
}
}
fn or(
&mut self,
span: StrRange,
) -> Result<Token, TokenError> {
let ch = self.input.peek_char().map_err(to_token_error)?;
match ch {
CH_PIPE => {
self.input.next_char().map_err(to_token_error)?;
Ok(Token::Or(span))
},
_ => Err(TokenError::Position(span.pos)),
}
}
fn whitespace(&mut self) -> Result<Token, TokenError> {
let span = self
.input
.take_while(|c| c.is_whitespace())
.map_err(to_token_error)?;
Ok(Token::Whitespace(span))
}
fn other(&mut self) -> Result<Token, TokenError> {
let fun = |c: &char| match c {
&CH_DOLLA | &CH_DOT | &CH_ASTERISK | &CH_LARRAY | &CH_RARRAY
| &CH_LPAREN | &CH_RPAREN | &CH_AT | &CH_QUESTION | &CH_COMMA
| &CH_SEMICOLON | &CH_LITTLE | &CH_GREATER | &CH_EQUAL
| &CH_AMPERSAND | &CH_PIPE | &CH_EXCLAMATION => false,
_ => !c.is_whitespace(),
};
let span = self.input.take_while(fun).map_err(to_token_error)?;
Ok(Token::Key(span))
}
fn read_token(
&mut self,
span: StrRange,
ch: char,
) -> Result<Token, TokenError> {
match ch {
CH_DOLLA => self.dolla(),
CH_DOT => Ok(Token::Dot(span)),
CH_ASTERISK => Ok(Token::Asterisk(span)),
CH_LARRAY => Ok(Token::OpenArray(span)),
CH_RARRAY => Ok(Token::CloseArray(span)),
CH_LPAREN => Ok(Token::OpenParenthesis(span)),
CH_RPAREN => Ok(Token::CloseParenthesis(span)),
CH_AT => Ok(Token::At(span)),
CH_QUESTION => Ok(Token::Question(span)),
CH_COMMA => Ok(Token::Comma(span)),
CH_SEMICOLON => Ok(Token::Split(span)),
CH_SINGLE_QUOTE => self.single_quote(ch),
CH_DOUBLE_QUOTE => self.double_quote(ch),
CH_EQUAL => self.equal(span),
CH_GREATER => self.greater(span),
CH_LITTLE => self.little(span),
CH_AMPERSAND => self.and(span),
CH_PIPE => self.or(span),
CH_EXCLAMATION => self.not_equal(span),
_ if ch.is_whitespace() => self.whitespace(),
_ => self.other(),
}
}
pub fn next_token(&mut self) -> Result<Token, TokenError> {
let (span, ch) = self.input.next_char().map_err(to_token_error)?;
match self.read_token(span, ch) {
Ok(t) => Ok(t),
Err(e) => Err(e),
}
}
fn current_pos(&self) -> usize {
self.input.current_pos()
}
fn read_span(
&self,
span: &StrRange,
) -> &'a str {
self.input.read(span)
}
}
#[derive(Clone, Debug)]
#[allow(dead_code)]
pub(super) struct TokenReader<'a> {
tokenizer: Tokenizer<'a>,
curr_pos: usize,
err: Option<TokenError>,
peeked: Option<Result<Token, TokenError>>,
}
impl<'a> TokenReader<'a> {
pub fn new(input: &'a str) -> Self {
TokenReader {
tokenizer: Tokenizer::new(input),
curr_pos: 0,
err: None,
peeked: None,
}
}
pub fn read_value(
&self,
str_range: &StrRange,
) -> &'a str {
self.tokenizer.read_span(str_range)
}
pub fn peek_token(&mut self) -> Result<&Token, &TokenError> {
let tokenizer = &mut self.tokenizer;
let prev_pos = self.curr_pos;
let peeked = self.peeked.get_or_insert_with(|| {
let mut token = tokenizer.next_token();
if let Ok(token) = &mut token {
let token = token.reset_span(StrRange::new(
prev_pos,
tokenizer.current_pos() - prev_pos,
));
return Ok(token);
}
token
});
self.curr_pos = tokenizer.current_pos();
peeked.as_ref()
}
pub fn next_token(&mut self) -> Result<Token, TokenError> {
match self.peeked.take() {
Some(v) => v,
None => {
let prev_pos = self.curr_pos;
let tokenizer = &mut self.tokenizer;
let mut token = tokenizer.next_token();
if let Ok(token) = &mut token {
let current_pos = tokenizer.current_pos();
let token = token.reset_span(StrRange::new(
prev_pos,
current_pos - prev_pos,
));
self.curr_pos = current_pos;
return Ok(token);
}
token
},
}
}
pub fn to_error(&self) -> TokenError {
let path = self.tokenizer.input.origin_str();
let curr_pos = self.curr_pos;
if path.len() == curr_pos {
TokenError::Eof
} else {
TokenError::Position(curr_pos)
}
}
}
#[cfg(test)]
mod tokenizer_tests {
use crate::paths::str_reader::StrRange;
use crate::paths::tokenizer::{TokenError, TokenReader};
use crate::paths::tokens::Token;
fn setup() {
let _ = env_logger::try_init();
}
fn collect_token(input: &str) -> (Vec<Token>, Option<TokenError>) {
let mut tokenizer = TokenReader::new(input);
let mut vec = vec![];
loop {
match tokenizer.next_token() {
Ok(t) => vec.push(t),
Err(e) => return (vec, Some(e)),
}
}
}
fn run(
input: &str,
expected: (Vec<Token>, Option<TokenError>),
) {
let (vec, err) = collect_token(input);
assert_eq!((vec, err), expected, "\"{}\"", input);
}
#[test]
fn peek() {
let mut tokenizer = TokenReader::new("$.a");
match tokenizer.next_token() {
Ok(t) => assert_eq!(Token::Absolute(StrRange::new(0, 1)), t),
_ => panic!(),
}
match tokenizer.peek_token() {
Ok(t) => assert_eq!(&Token::Dot(StrRange::new(1, 1)), t),
_ => panic!(),
}
match tokenizer.peek_token() {
Ok(t) => assert_eq!(&Token::Dot(StrRange::new(1, 1)), t),
_ => panic!(),
}
match tokenizer.next_token() {
Ok(t) => assert_eq!(Token::Dot(StrRange::new(1, 1)), t),
_ => panic!(),
}
}
#[test]
fn token() {
setup();
run(
"$.01.a",
(
vec![
Token::Absolute(StrRange::new(0, 1)),
Token::Dot(StrRange::new(1, 1)),
Token::Key(StrRange::new(2, 2)),
Token::Dot(StrRange::new(4, 1)),
Token::Key(StrRange::new(5, 1)),
],
Some(TokenError::Eof),
),
);
run(
"$. []",
(
vec![
Token::Absolute(StrRange::new(0, 1)),
Token::Dot(StrRange::new(1, 1)),
Token::Whitespace(StrRange::new(2, 3)),
Token::OpenArray(StrRange::new(5, 1)),
Token::CloseArray(StrRange::new(6, 1)),
],
Some(TokenError::Eof),
),
);
run(
"$..",
(
vec![
Token::Absolute(StrRange::new(0, 1)),
Token::Dot(StrRange::new(1, 1)),
Token::Dot(StrRange::new(2, 1)),
],
Some(TokenError::Eof),
),
);
run(
"$..ab",
(
vec![
Token::Absolute(StrRange::new(0, 1)),
Token::Dot(StrRange::new(1, 1)),
Token::Dot(StrRange::new(2, 1)),
Token::Key(StrRange::new(3, "ab".len())),
],
Some(TokenError::Eof),
),
);
run(
"$..가 [",
(
vec![
Token::Absolute(StrRange::new(0, 1)),
Token::Dot(StrRange::new(1, 1)),
Token::Dot(StrRange::new(2, 1)),
Token::Key(StrRange::new(3, '가'.len_utf8())),
Token::Whitespace(StrRange::new(6, 1)),
Token::OpenArray(StrRange::new(7, 1)),
],
Some(TokenError::Eof),
),
);
run(
"[-1, 2 ]",
(
vec![
Token::OpenArray(StrRange::new(0, 1)),
Token::Key(StrRange::new(1, "-1".len())),
Token::Comma(StrRange::new(3, 1)),
Token::Whitespace(StrRange::new(4, 1)),
Token::Key(StrRange::new(5, "2".len())),
Token::Whitespace(StrRange::new(6, 1)),
Token::CloseArray(StrRange::new(7, 1)),
],
Some(TokenError::Eof),
),
);
run(
"[ 1 2 , 3 \"abc\" : -10 ]",
(
vec![
Token::OpenArray(StrRange::new(0, 1)),
Token::Whitespace(StrRange::new(1, 1)),
Token::Key(StrRange::new(2, "1".len())),
Token::Whitespace(StrRange::new(3, 1)),
Token::Key(StrRange::new(4, "2".len())),
Token::Whitespace(StrRange::new(5, 1)),
Token::Comma(StrRange::new(6, 1)),
Token::Whitespace(StrRange::new(7, 1)),
Token::Key(StrRange::new(8, "3".len())),
Token::Whitespace(StrRange::new(9, 1)),
Token::DoubleQuoted(StrRange::new(10, "\"abc\"".len())),
Token::Whitespace(StrRange::new(15, 1)),
Token::Split(StrRange::new(16, 1)),
Token::Whitespace(StrRange::new(17, 1)),
Token::Key(StrRange::new(18, "-10".len())),
Token::Whitespace(StrRange::new(21, 1)),
Token::CloseArray(StrRange::new(22, 1)),
],
Some(TokenError::Eof),
),
);
run(
"?(@.a가 <41.01)",
(
vec![
Token::Question(StrRange::new(0, 1)),
Token::OpenParenthesis(StrRange::new(1, 1)),
Token::At(StrRange::new(2, 1)),
Token::Dot(StrRange::new(3, 1)),
Token::Key(StrRange::new(
4,
"a가".chars().map(|c| c.len_utf8()).sum(),
)),
Token::Whitespace(StrRange::new(8, 1)),
Token::Little(StrRange::new(9, 1)),
Token::Key(StrRange::new(10, "41".len())),
Token::Dot(StrRange::new(12, 1)),
Token::Key(StrRange::new(13, "01".len())),
Token::CloseParenthesis(StrRange::new(15, 1)),
],
Some(TokenError::Eof),
),
);
run(
"?(@.a <4a.01)",
(
vec![
Token::Question(StrRange::new(0, 1)),
Token::OpenParenthesis(StrRange::new(1, 1)),
Token::At(StrRange::new(2, 1)),
Token::Dot(StrRange::new(3, 1)),
Token::Key(StrRange::new(4, "a".len())),
Token::Whitespace(StrRange::new(5, 1)),
Token::Little(StrRange::new(6, 1)),
Token::Key(StrRange::new(7, "4a".len())),
Token::Dot(StrRange::new(9, 1)),
Token::Key(StrRange::new(10, "01".len())),
Token::CloseParenthesis(StrRange::new(12, 1)),
],
Some(TokenError::Eof),
),
);
run(
"?($.c>@.d)",
(
vec![
Token::Question(StrRange::new(0, 1)),
Token::OpenParenthesis(StrRange::new(1, 1)),
Token::Absolute(StrRange::new(2, 1)),
Token::Dot(StrRange::new(3, 1)),
Token::Key(StrRange::new(4, 1)),
Token::Greater(StrRange::new(5, 1)),
Token::At(StrRange::new(6, 1)),
Token::Dot(StrRange::new(7, 1)),
Token::Key(StrRange::new(8, 1)),
Token::CloseParenthesis(StrRange::new(9, 1)),
],
Some(TokenError::Eof),
),
);
run(
"$[:]",
(
vec![
Token::Absolute(StrRange::new(0, 1)),
Token::OpenArray(StrRange::new(1, 1)),
Token::Split(StrRange::new(2, 1)),
Token::CloseArray(StrRange::new(3, 1)),
],
Some(TokenError::Eof),
),
);
run(
r#"$['single\'quote']"#,
(
vec![
Token::Absolute(StrRange::new(0, 1)),
Token::OpenArray(StrRange::new(1, 1)),
Token::SingleQuoted(StrRange::new(
2,
r#"'single\'quote'"#.len(),
)),
Token::CloseArray(StrRange::new(17, 1)),
],
Some(TokenError::Eof),
),
);
run(
r#"$['single\'1','single\'2']"#,
(
vec![
Token::Absolute(StrRange::new(0, 1)),
Token::OpenArray(StrRange::new(1, 1)),
Token::SingleQuoted(StrRange::new(
2,
r#"'single\'1'"#.len(),
)),
Token::Comma(StrRange::new(13, 1)),
Token::SingleQuoted(StrRange::new(
14,
r#"'single\'2'"#.len(),
)),
Token::CloseArray(StrRange::new(25, 1)),
],
Some(TokenError::Eof),
),
);
run(
r#"$["double\"quote"]"#,
(
vec![
Token::Absolute(StrRange::new(0, 1)),
Token::OpenArray(StrRange::new(1, 1)),
Token::DoubleQuoted(StrRange::new(
2,
r#""double\"quote""#.len(),
)),
Token::CloseArray(StrRange::new(17, 1)),
],
Some(TokenError::Eof),
),
);
}
}
| rust | MIT | 3a930bc03c3944c8374717289266ceb9cc1491c6 | 2026-01-04T20:24:15.035874Z | false |
freestrings/jsonpath | https://github.com/freestrings/jsonpath/blob/3a930bc03c3944c8374717289266ceb9cc1491c6/src/ffi/mod.rs | src/ffi/mod.rs | use std::ffi::{CStr, CString};
use std::os::raw::{c_char, c_void};
use {crate::parser, crate::select, crate::select_as_str};
const INVALID_PATH: &str = "invalid path";
const INVALID_JSON: &str = "invalud json";
fn to_str(
v: *const c_char,
err_msg: &str,
) -> &str {
unsafe { CStr::from_ptr(v) }.to_str().expect(err_msg)
}
fn to_char_ptr(v: &str) -> *const c_char {
let s = std::mem::ManuallyDrop::new(
CString::new(v).unwrap_or_else(|_| panic!("invalid string: {}", v)),
);
s.as_ptr()
}
#[no_mangle]
pub extern "C" fn ffi_select(
json_str: *const c_char,
path: *const c_char,
) -> *const c_char {
let json_str = to_str(json_str, INVALID_JSON);
let path = to_str(path, INVALID_PATH);
match select_as_str(json_str, path) {
Ok(v) => to_char_ptr(v.as_str()),
Err(e) => {
panic!("{:?}", e);
},
}
}
#[no_mangle]
#[allow(forgetting_copy_types)]
pub extern "C" fn ffi_path_compile(path: *const c_char) -> *mut c_void {
let path = to_str(path, INVALID_PATH);
#[allow(deprecated)]
let ref_node =
Box::into_raw(Box::new(parser::Parser::compile(path).unwrap()));
let ptr = ref_node as *mut c_void;
let _ = ref_node;
ptr
}
#[no_mangle]
pub extern "C" fn ffi_select_with_compiled_path(
path_ptr: *mut c_void,
json_ptr: *const c_char,
) -> *const c_char {
#[allow(deprecated)]
let node = std::mem::ManuallyDrop::new(unsafe {
Box::from_raw(path_ptr as *mut parser::Node)
});
let json_str = to_str(json_ptr, INVALID_JSON);
let json = serde_json::from_str(json_str)
.unwrap_or_else(|_| panic!("invalid json string: {}", json_str));
#[allow(deprecated)]
let mut selector = select::Selector::default();
let found = selector.compiled_path(&node).value(&json).select().unwrap();
let result = serde_json::to_string(&found)
.unwrap_or_else(|_| panic!("json serialize error: {:?}", found));
to_char_ptr(result.as_str())
}
| rust | MIT | 3a930bc03c3944c8374717289266ceb9cc1491c6 | 2026-01-04T20:24:15.035874Z | false |
freestrings/jsonpath | https://github.com/freestrings/jsonpath/blob/3a930bc03c3944c8374717289266ceb9cc1491c6/src/select/value_walker.rs | src/select/value_walker.rs | use serde_json::Value;
use std::collections::HashSet;
pub(super) struct ValueWalker;
impl<'a> ValueWalker {
pub fn all_with_num(
vec: &[&'a Value],
tmp: &mut Vec<&'a Value>,
index: f64,
) {
Self::walk(vec, tmp, &|v| {
if v.is_array() {
v.get(index as usize).map(|item| vec![item])
} else {
None
}
});
}
pub fn all_with_str(
vec: &[&'a Value],
tmp: &mut Vec<&'a Value>,
key: &str,
is_filter: bool,
) {
if is_filter {
Self::walk(vec, tmp, &|v| match v {
Value::Object(map) if map.contains_key(key) => Some(vec![v]),
_ => None,
});
} else {
Self::walk(vec, tmp, &|v| match v {
Value::Object(map) => map.get(key).map(|v| vec![v]),
_ => None,
});
}
}
pub fn all(
vec: &[&'a Value],
tmp: &mut Vec<&'a Value>,
) {
Self::walk(vec, tmp, &|v| match v {
Value::Array(vec) => Some(vec.iter().collect()),
Value::Object(map) => {
let mut tmp = Vec::new();
for (_, v) in map {
tmp.push(v);
}
Some(tmp)
},
_ => None,
});
}
fn walk<F>(
vec: &[&'a Value],
tmp: &mut Vec<&'a Value>,
fun: &F,
) where
F: Fn(&Value) -> Option<Vec<&Value>>,
{
for v in vec {
Self::_walk(v, tmp, fun);
}
}
fn _walk<F>(
v: &'a Value,
tmp: &mut Vec<&'a Value>,
fun: &F,
) where
F: Fn(&Value) -> Option<Vec<&Value>>,
{
if let Some(mut ret) = fun(v) {
tmp.append(&mut ret);
}
match v {
Value::Array(vec) => {
for v in vec {
Self::_walk(v, tmp, fun);
}
},
Value::Object(map) => {
for (_, v) in map {
Self::_walk(v, tmp, fun);
}
},
_ => {},
}
}
pub fn walk_dedup(
v: &'a Value,
tmp: &mut Vec<&'a Value>,
key: &str,
visited: &mut HashSet<*const Value>,
) {
match v {
Value::Object(map) => {
if map.contains_key(key) {
let ptr = v as *const Value;
if !visited.contains(&ptr) {
visited.insert(ptr);
tmp.push(v)
}
}
},
Value::Array(vec) => {
for v in vec {
Self::walk_dedup(v, tmp, key, visited);
}
},
_ => {},
}
}
}
| rust | MIT | 3a930bc03c3944c8374717289266ceb9cc1491c6 | 2026-01-04T20:24:15.035874Z | false |
freestrings/jsonpath | https://github.com/freestrings/jsonpath/blob/3a930bc03c3944c8374717289266ceb9cc1491c6/src/select/mod.rs | src/select/mod.rs | use std::collections::HashSet;
use std::fmt;
use serde_json::map::Entry;
use serde_json::{Number, Value};
use crate::parser::*;
use self::expr_term::*;
use self::value_walker::ValueWalker;
mod cmp;
mod expr_term;
mod value_walker;
fn to_f64(n: &Number) -> f64 {
if n.is_i64() {
n.as_i64().unwrap() as f64
} else if n.is_f64() {
n.as_f64().unwrap()
} else {
n.as_u64().unwrap() as f64
}
}
fn abs_index(
n: isize,
len: usize,
) -> usize {
if n < 0_isize {
(n + len as isize).max(0) as usize
} else {
n.min(len as isize) as usize
}
}
#[derive(Debug, PartialEq)]
enum FilterKey {
String(String),
All,
}
pub enum JsonPathError {
EmptyPath,
EmptyValue,
Path(String),
Serde(String),
}
impl std::error::Error for JsonPathError {}
impl fmt::Debug for JsonPathError {
fn fmt(
&self,
f: &mut fmt::Formatter,
) -> fmt::Result {
write!(f, "{}", self)
}
}
impl fmt::Display for JsonPathError {
fn fmt(
&self,
f: &mut fmt::Formatter,
) -> fmt::Result {
match self {
JsonPathError::EmptyPath => f.write_str("path not set"),
JsonPathError::EmptyValue => f.write_str("json value not set"),
JsonPathError::Path(msg) => {
f.write_str(&format!("path error: \n{}\n", msg))
},
JsonPathError::Serde(msg) => {
f.write_str(&format!("serde error: \n{}\n", msg))
},
}
}
}
#[derive(Debug, Default)]
struct FilterTerms<'a>(Vec<Option<ExprTerm<'a>>>);
impl<'a> FilterTerms<'a> {
fn new_filter_context(&mut self) {
self.0.push(None);
debug!("new_filter_context: {:?}", self.0);
}
fn is_term_empty(&self) -> bool {
self.0.is_empty()
}
fn push_term(
&mut self,
term: Option<ExprTerm<'a>>,
) {
self.0.push(term);
}
#[allow(clippy::option_option)]
fn pop_term(&mut self) -> Option<Option<ExprTerm<'a>>> {
self.0.pop()
}
fn filter_json_term<
F: Fn(
&Vec<&'a Value>,
&mut Vec<&'a Value>,
&mut HashSet<usize>,
) -> FilterKey,
>(
&mut self,
e: ExprTerm<'a>,
fun: F,
) {
debug!("filter_json_term: {:?}", e);
if let ExprTerm::Json(rel, fk, vec) = e {
let mut tmp = Vec::new();
let mut not_matched = HashSet::new();
let filter_key = if let Some(FilterKey::String(key)) = fk {
let key_contained = &vec
.iter()
.map(|v| match v {
Value::Object(map) if map.contains_key(&key) => {
map.get(&key).unwrap()
},
_ => v,
})
.collect();
fun(key_contained, &mut tmp, &mut not_matched)
} else {
fun(&vec, &mut tmp, &mut not_matched)
};
if rel.is_some() {
self.0
.push(Some(ExprTerm::Json(rel, Some(filter_key), tmp)));
} else {
let filtered: Vec<&Value> = vec
.iter()
.enumerate()
.filter(|(idx, _)| !not_matched.contains(idx))
.map(|(_, v)| *v)
.collect();
self.0.push(Some(ExprTerm::Json(
Some(filtered),
Some(filter_key),
tmp,
)));
}
} else {
unreachable!("unexpected: ExprTerm: {:?}", e);
}
}
fn push_json_term<
F: Fn(
&Vec<&'a Value>,
&mut Vec<&'a Value>,
&mut HashSet<usize>,
) -> FilterKey,
>(
&mut self,
current: &Option<Vec<&'a Value>>,
fun: F,
) {
debug!("push_json_term: {:?}", ¤t);
if let Some(current) = ¤t {
let mut tmp = Vec::new();
let mut not_matched = HashSet::new();
let filter_key = fun(current, &mut tmp, &mut not_matched);
self.0
.push(Some(ExprTerm::Json(None, Some(filter_key), tmp)));
}
}
fn filter<
F: Fn(
&Vec<&'a Value>,
&mut Vec<&'a Value>,
&mut HashSet<usize>,
) -> FilterKey,
>(
&mut self,
current: &Option<Vec<&'a Value>>,
fun: F,
) {
if let Some(peek) = self.0.pop() {
if let Some(e) = peek {
self.filter_json_term(e, fun);
} else {
self.push_json_term(current, fun);
}
}
}
fn filter_all_with_str(
&mut self,
current: &Option<Vec<&'a Value>>,
key: &str,
) {
self.filter(current, |vec, tmp, _| {
ValueWalker::all_with_str(vec, tmp, key, true);
FilterKey::All
});
debug!("filter_all_with_str : {}, {:?}", key, self.0);
}
fn filter_next_with_str(
&mut self,
current: &Option<Vec<&'a Value>>,
key: &str,
) {
self.filter(current, |vec, tmp, not_matched| {
let mut visited = HashSet::new();
for (idx, v) in vec.iter().enumerate() {
match v {
Value::Object(map) => {
if map.contains_key(key) {
let ptr = *v as *const Value;
if !visited.contains(&ptr) {
visited.insert(ptr);
tmp.push(v)
}
} else {
not_matched.insert(idx);
}
},
Value::Array(vec) => {
not_matched.insert(idx);
for v in vec {
ValueWalker::walk_dedup(v, tmp, key, &mut visited);
}
},
_ => {
not_matched.insert(idx);
},
}
}
FilterKey::String(key.to_owned())
});
debug!("filter_next_with_str : {}, {:?}", key, self.0);
}
fn collect_next_with_num(
&mut self,
current: &Option<Vec<&'a Value>>,
index: f64,
) -> Option<Vec<&'a Value>> {
fn _collect<'a>(
tmp: &mut Vec<&'a Value>,
vec: &'a [Value],
index: f64,
) {
let index = abs_index(index as isize, vec.len());
if let Some(v) = vec.get(index) {
tmp.push(v);
}
}
if let Some(current) = current {
let mut tmp = Vec::new();
for c in current {
match c {
Value::Object(map) => {
for k in map.keys() {
if let Some(Value::Array(vec)) = map.get(k) {
_collect(&mut tmp, vec, index);
}
}
},
Value::Array(vec) => {
_collect(&mut tmp, vec, index);
},
_ => {},
}
}
if tmp.is_empty() {
self.0.pop();
return Some(vec![]);
} else {
return Some(tmp);
}
}
debug!("collect_next_with_num : {:?}, {:?}", &index, ¤t);
None
}
fn collect_next_all(
&mut self,
current: &Option<Vec<&'a Value>>,
) -> Option<Vec<&'a Value>> {
if let Some(current) = current {
let mut tmp = Vec::new();
for c in current {
match c {
Value::Object(map) => {
for (_, v) in map {
tmp.push(v)
}
},
Value::Array(vec) => {
for v in vec {
tmp.push(v);
}
},
_ => {},
}
}
return Some(tmp);
}
debug!("collect_next_all : {:?}", ¤t);
None
}
fn collect_next_with_str(
&mut self,
current: &Option<Vec<&'a Value>>,
keys: &[String],
) -> Option<Vec<&'a Value>> {
if let Some(current) = current {
let mut tmp = Vec::new();
for c in current {
if let Value::Object(map) = c {
for key in keys {
if let Some(v) = map.get(key) {
tmp.push(v)
}
}
}
}
if tmp.is_empty() {
self.0.pop();
return Some(vec![]);
} else {
return Some(tmp);
}
}
debug!("collect_next_with_str : {:?}, {:?}", keys, ¤t);
None
}
fn collect_all(
&mut self,
current: &Option<Vec<&'a Value>>,
) -> Option<Vec<&'a Value>> {
if let Some(current) = current {
let mut tmp = Vec::new();
ValueWalker::all(current, &mut tmp);
return Some(tmp);
}
debug!("collect_all: {:?}", ¤t);
None
}
fn collect_all_with_str(
&mut self,
current: &Option<Vec<&'a Value>>,
key: &str,
) -> Option<Vec<&'a Value>> {
if let Some(current) = current {
let mut tmp = Vec::new();
ValueWalker::all_with_str(current, &mut tmp, key, false);
return Some(tmp);
}
debug!("collect_all_with_str: {}, {:?}", key, ¤t);
None
}
fn collect_all_with_num(
&mut self,
current: &Option<Vec<&'a Value>>,
index: f64,
) -> Option<Vec<&'a Value>> {
if let Some(current) = current {
let mut tmp = Vec::new();
ValueWalker::all_with_num(current, &mut tmp, index);
return Some(tmp);
}
debug!("collect_all_with_num: {}, {:?}", index, ¤t);
None
}
}
#[deprecated(since = "0.4.0", note = "Please use `JsonSelector`")]
#[derive(Debug, Default)]
pub struct Selector<'a, 'b> {
#[allow(deprecated)]
node: Option<Node>,
#[allow(deprecated)]
node_ref: Option<&'b Node>,
value: Option<&'a Value>,
tokens: Vec<ParseToken>,
current: Option<Vec<&'a Value>>,
#[allow(deprecated)]
selectors: Vec<Selector<'a, 'b>>,
selector_filter: FilterTerms<'a>,
}
#[allow(deprecated)]
impl<'a, 'b> Selector<'a, 'b> {
pub fn new() -> Self {
Self::default()
}
pub fn str_path(
&mut self,
path: &str,
) -> Result<&mut Self, JsonPathError> {
debug!("path : {}", path);
self.node_ref.take();
self.node = Some(Parser::compile(path).map_err(JsonPathError::Path)?);
Ok(self)
}
pub fn node_ref(&self) -> Option<&Node> {
if let Some(node) = &self.node {
return Some(node);
}
if let Some(node) = &self.node_ref {
return Some(*node);
}
None
}
pub fn compiled_path(
&mut self,
node: &'b Node,
) -> &mut Self {
self.node.take();
self.node_ref = Some(node);
self
}
pub fn reset_value(&mut self) -> &mut Self {
self.current = None;
self
}
pub fn value(
&mut self,
v: &'a Value,
) -> &mut Self {
self.value = Some(v);
self
}
fn _select(&mut self) -> Result<(), JsonPathError> {
if self.node_ref.is_some() {
let node_ref = self.node_ref.take().unwrap();
self.visit(node_ref);
return Ok(());
}
if self.node.is_none() {
return Err(JsonPathError::EmptyPath);
}
let node = self.node.take().unwrap();
self.visit(&node);
self.node = Some(node);
Ok(())
}
pub fn select_as<T: serde::de::DeserializeOwned>(
&mut self
) -> Result<Vec<T>, JsonPathError> {
self._select()?;
match &self.current {
Some(vec) => {
let mut ret = Vec::new();
for v in vec {
match T::deserialize(*v) {
Ok(v) => ret.push(v),
Err(e) => {
return Err(JsonPathError::Serde(e.to_string()));
},
}
}
Ok(ret)
},
_ => Err(JsonPathError::EmptyValue),
}
}
pub fn select_as_str(&mut self) -> Result<String, JsonPathError> {
self._select()?;
match &self.current {
Some(r) => Ok(serde_json::to_string(r)
.map_err(|e| JsonPathError::Serde(e.to_string()))?),
_ => Err(JsonPathError::EmptyValue),
}
}
pub fn select(&mut self) -> Result<Vec<&'a Value>, JsonPathError> {
self._select()?;
match &self.current {
Some(r) => Ok(r.to_vec()),
_ => Err(JsonPathError::EmptyValue),
}
}
fn compute_absolute_path_filter(
&mut self,
token: &ParseToken,
) -> bool {
if !self.selectors.is_empty() {
match token {
ParseToken::Absolute
| ParseToken::Relative
| ParseToken::Filter(_) => {
let selector = self.selectors.pop().unwrap();
if let Some(current) = &selector.current {
let term = current.into();
if let Some(s) = self.selectors.last_mut() {
s.selector_filter.push_term(Some(term));
} else {
self.selector_filter.push_term(Some(term));
}
} else {
unreachable!()
}
},
_ => {},
}
}
if let Some(selector) = self.selectors.last_mut() {
selector.visit_token(token);
true
} else {
false
}
}
}
#[allow(deprecated)]
impl Selector<'_, '_> {
fn visit_absolute(&mut self) {
if self.current.is_some() {
let mut selector = Selector::default();
if let Some(value) = self.value {
selector.value = Some(value);
selector.current = Some(vec![value]);
self.selectors.push(selector);
}
return;
}
if let Some(v) = &self.value {
self.current = Some(vec![v]);
}
}
fn visit_relative(&mut self) {
if let Some(ParseToken::Array) = self.tokens.last() {
let array_token = self.tokens.pop();
if let Some(ParseToken::Leaves) = self.tokens.last() {
self.tokens.pop();
self.current = self.selector_filter.collect_all(&self.current);
}
self.tokens.push(array_token.unwrap());
}
self.selector_filter.new_filter_context();
}
fn visit_array_eof(&mut self) {
if self.is_last_before_token_match(ParseToken::Array) {
if let Some(Some(e)) = self.selector_filter.pop_term() {
if let ExprTerm::String(key) = e {
self.selector_filter
.filter_next_with_str(&self.current, &key);
self.tokens.pop();
return;
}
self.selector_filter.push_term(Some(e));
}
}
if self.is_last_before_token_match(ParseToken::Leaves) {
self.tokens.pop();
self.tokens.pop();
if let Some(Some(e)) = self.selector_filter.pop_term() {
let selector_filter_consumed = match &e {
ExprTerm::Number(n) => {
self.current = self
.selector_filter
.collect_all_with_num(&self.current, to_f64(n));
self.selector_filter.pop_term();
true
},
ExprTerm::String(key) => {
self.current = self
.selector_filter
.collect_all_with_str(&self.current, key);
self.selector_filter.pop_term();
true
},
_ => {
self.selector_filter.push_term(Some(e));
false
},
};
if selector_filter_consumed {
return;
}
}
}
if let Some(Some(e)) = self.selector_filter.pop_term() {
match e {
ExprTerm::Number(n) => {
self.current = self
.selector_filter
.collect_next_with_num(&self.current, to_f64(&n));
},
ExprTerm::String(key) => {
self.current = self
.selector_filter
.collect_next_with_str(&self.current, &[key]);
},
ExprTerm::Json(rel, _, v) => {
if v.is_empty() {
self.current = Some(vec![]);
} else if let Some(vec) = rel {
self.current = Some(vec);
} else {
self.current = Some(v);
}
},
ExprTerm::Bool(false) => {
self.current = Some(vec![]);
},
_ => {},
}
}
self.tokens.pop();
}
fn is_last_before_token_match(
&mut self,
token: ParseToken,
) -> bool {
if self.tokens.len() > 1 {
return token == self.tokens[self.tokens.len() - 2];
}
false
}
fn visit_all(&mut self) {
if let Some(ParseToken::Array) = self.tokens.last() {
self.tokens.pop();
}
match self.tokens.last() {
Some(ParseToken::Leaves) => {
self.tokens.pop();
self.current = self.selector_filter.collect_all(&self.current);
},
Some(ParseToken::In) => {
self.tokens.pop();
self.current =
self.selector_filter.collect_next_all(&self.current);
},
_ => {
self.current =
self.selector_filter.collect_next_all(&self.current);
},
}
}
fn visit_key(
&mut self,
key: &str,
) {
if let Some(ParseToken::Array) = self.tokens.last() {
self.selector_filter
.push_term(Some(ExprTerm::String(key.to_string())));
return;
}
if let Some(t) = self.tokens.pop() {
if self.selector_filter.is_term_empty() {
match t {
ParseToken::Leaves => {
self.current = self
.selector_filter
.collect_all_with_str(&self.current, key)
},
ParseToken::In => {
self.current =
self.selector_filter.collect_next_with_str(
&self.current,
&[key.to_string()],
)
},
_ => {},
}
} else {
match t {
ParseToken::Leaves => {
self.selector_filter
.filter_all_with_str(&self.current, key);
},
ParseToken::In => {
self.selector_filter
.filter_next_with_str(&self.current, key);
},
_ => {},
}
}
}
}
fn visit_keys(
&mut self,
keys: &[String],
) {
if !self.selector_filter.is_term_empty() {
unimplemented!("keys in filter");
}
if let Some(ParseToken::Array) = self.tokens.pop() {
self.current = self
.selector_filter
.collect_next_with_str(&self.current, keys);
} else {
unreachable!();
}
}
fn visit_filter(
&mut self,
ft: &FilterToken,
) {
let right = match self.selector_filter.pop_term() {
Some(Some(right)) => right,
Some(None) => ExprTerm::Json(
None,
None,
match &self.current {
Some(current) => current.to_vec(),
_ => unreachable!(),
},
),
_ => ExprTerm::Json(None, None, vec![]),
};
let left = match self.selector_filter.pop_term() {
Some(Some(left)) => left,
Some(None) => ExprTerm::Json(
None,
None,
match &self.current {
Some(current) => current.to_vec(),
_ => unreachable!(),
},
),
_ => ExprTerm::Json(None, None, vec![]),
};
let mut ret = None;
match ft {
FilterToken::Equal => left.eq(&right, &mut ret),
FilterToken::NotEqual => left.ne(&right, &mut ret),
FilterToken::Greater => left.gt(&right, &mut ret),
FilterToken::GreaterOrEqual => left.ge(&right, &mut ret),
FilterToken::Little => left.lt(&right, &mut ret),
FilterToken::LittleOrEqual => left.le(&right, &mut ret),
FilterToken::And => left.and(&right, &mut ret),
FilterToken::Or => left.or(&right, &mut ret),
};
if let Some(e) = ret {
self.selector_filter.push_term(Some(e));
}
}
fn visit_range(
&mut self,
from: &Option<isize>,
to: &Option<isize>,
step: &Option<usize>,
) {
if !self.selector_filter.is_term_empty() {
unimplemented!("range syntax in filter");
}
if let Some(ParseToken::Array) = self.tokens.pop() {
let mut tmp = Vec::new();
if let Some(current) = &self.current {
for v in current {
if let Value::Array(vec) = v {
let from = if let Some(from) = from {
abs_index(*from, vec.len())
} else {
0
};
let to = if let Some(to) = to {
abs_index(*to, vec.len())
} else {
vec.len()
};
for i in (from..to).step_by(match step {
Some(step) => *step,
_ => 1,
}) {
if let Some(v) = vec.get(i) {
tmp.push(v);
}
}
}
}
}
self.current = Some(tmp);
} else {
unreachable!();
}
}
fn visit_union(
&mut self,
indices: &[isize],
) {
if !self.selector_filter.is_term_empty() {
unimplemented!("union syntax in filter");
}
if let Some(ParseToken::Array) = self.tokens.pop() {
let mut tmp = Vec::new();
if let Some(current) = &self.current {
for v in current {
if let Value::Array(vec) = v {
for i in indices {
if let Some(v) = vec.get(abs_index(*i, vec.len())) {
tmp.push(v);
}
}
}
}
}
self.current = Some(tmp);
} else {
unreachable!();
}
}
}
#[allow(deprecated)]
impl NodeVisitor for Selector<'_, '_> {
fn visit_token(
&mut self,
token: &ParseToken,
) {
debug!("token: {:?}, stack: {:?}", token, self.tokens);
if self.compute_absolute_path_filter(token) {
return;
}
match token {
ParseToken::Absolute => self.visit_absolute(),
ParseToken::Relative => self.visit_relative(),
ParseToken::In | ParseToken::Leaves | ParseToken::Array => {
self.tokens.push(token.clone());
},
ParseToken::ArrayEof => self.visit_array_eof(),
ParseToken::All => self.visit_all(),
ParseToken::Bool(b) => {
self.selector_filter.push_term(Some(ExprTerm::Bool(*b)));
},
ParseToken::Key(key) => self.visit_key(key),
ParseToken::Keys(keys) => self.visit_keys(keys),
ParseToken::Number(v) => {
self.selector_filter.push_term(Some(ExprTerm::Number(
Number::from_f64(*v).unwrap(),
)));
},
ParseToken::Filter(ref ft) => self.visit_filter(ft),
ParseToken::Range(from, to, step) => {
self.visit_range(from, to, step)
},
ParseToken::Union(indices) => self.visit_union(indices),
ParseToken::Eof => {
debug!("visit_token eof");
},
}
}
}
#[deprecated(since = "0.4.0", note = "Please use `JsonSelectorMut`")]
#[derive(Default)]
pub struct SelectorMut {
#[allow(deprecated)]
path: Option<Node>,
value: Option<Value>,
}
fn replace_value<F: FnMut(Value) -> Option<Value>>(
mut tokens: Vec<String>,
value: &mut Value,
fun: &mut F,
) {
let mut target = value;
let last_index = tokens.len().saturating_sub(1);
for (i, token) in tokens.drain(..).enumerate() {
let target_once = target;
let is_last = i == last_index;
let target_opt = match *target_once {
Value::Object(ref mut map) => {
if is_last {
if let Entry::Occupied(mut e) = map.entry(token) {
let v = e.insert(Value::Null);
if let Some(res) = fun(v) {
e.insert(res);
} else {
e.remove();
}
}
return;
}
map.get_mut(&token)
},
Value::Array(ref mut vec) => {
if let Ok(x) = token.parse::<usize>() {
if is_last {
if x < vec.len() {
let v = std::mem::replace(&mut vec[x], Value::Null);
if let Some(res) = fun(v) {
vec[x] = res;
} else {
vec.remove(x);
}
}
return;
}
vec.get_mut(x)
} else {
None
}
},
_ => None,
};
if let Some(t) = target_opt {
target = t;
} else {
break;
}
}
}
#[allow(deprecated)]
impl SelectorMut {
pub fn new() -> Self {
Self::default()
}
pub fn str_path(
&mut self,
path: &str,
) -> Result<&mut Self, JsonPathError> {
self.path = Some(Parser::compile(path).map_err(JsonPathError::Path)?);
Ok(self)
}
pub fn value(
&mut self,
value: Value,
) -> &mut Self {
self.value = Some(value);
self
}
pub fn take(&mut self) -> Option<Value> {
self.value.take()
}
fn compute_paths(
&self,
mut result: Vec<&Value>,
) -> Vec<Vec<String>> {
fn _walk(
origin: &Value,
target: &mut Vec<&Value>,
tokens: &mut Vec<String>,
visited: &mut HashSet<*const Value>,
visited_order: &mut Vec<Vec<String>>,
) -> bool {
trace!("{:?}, {:?}", target, tokens);
if target.is_empty() {
return true;
}
target.retain(|t| {
if std::ptr::eq(origin, *t) {
if visited.insert(*t) {
visited_order.push(tokens.to_vec());
}
false
} else {
true
}
});
match origin {
Value::Array(vec) => {
for (i, v) in vec.iter().enumerate() {
tokens.push(i.to_string());
if _walk(v, target, tokens, visited, visited_order) {
return true;
}
tokens.pop();
}
},
Value::Object(map) => {
for (k, v) in map {
tokens.push(k.clone());
if _walk(v, target, tokens, visited, visited_order) {
return true;
}
tokens.pop();
}
},
_ => {},
}
false
}
let mut visited = HashSet::new();
let mut visited_order = Vec::new();
if let Some(origin) = &self.value {
let mut tokens = Vec::new();
_walk(
origin,
&mut result,
&mut tokens,
&mut visited,
&mut visited_order,
);
}
visited_order
}
pub fn delete(&mut self) -> Result<&mut Self, JsonPathError> {
self.replace_with(&mut |_| Some(Value::Null))
}
pub fn remove(&mut self) -> Result<&mut Self, JsonPathError> {
self.replace_with(&mut |_| None)
}
fn select(&self) -> Result<Vec<&Value>, JsonPathError> {
if let Some(node) = &self.path {
let mut selector = Selector::default();
selector.compiled_path(node);
if let Some(value) = &self.value {
selector.value(value);
}
Ok(selector.select()?)
} else {
Err(JsonPathError::EmptyPath)
}
}
pub fn replace_with<F: FnMut(Value) -> Option<Value>>(
&mut self,
fun: &mut F,
) -> Result<&mut Self, JsonPathError> {
let paths = {
let result = self.select()?;
self.compute_paths(result)
};
if let Some(ref mut value) = &mut self.value {
for tokens in paths {
replace_value(tokens, value, fun);
}
}
Ok(self)
}
}
// #[cfg(test)]
// mod select_inner_tests {
// use serde_json::Value;
//
// #[test]
// fn to_f64_i64() {
// let number = 0_i64;
// let v: Value = serde_json::from_str(&format!("{}", number)).unwrap();
// if let Value::Number(n) = v {
// assert!((super::to_f64(&n) - number as f64).abs() == 0_f64);
// } else {
// panic!();
// }
// }
//
// #[test]
// fn to_f64_f64() {
// let number = 0.1_f64;
// let v: Value = serde_json::from_str(&format!("{}", number)).unwrap();
// if let Value::Number(n) = v {
// assert!((super::to_f64(&n) - number).abs() == 0_f64);
// } else {
// panic!();
// }
// }
//
// #[test]
// fn to_f64_u64() {
| rust | MIT | 3a930bc03c3944c8374717289266ceb9cc1491c6 | 2026-01-04T20:24:15.035874Z | true |
freestrings/jsonpath | https://github.com/freestrings/jsonpath/blob/3a930bc03c3944c8374717289266ceb9cc1491c6/src/select/expr_term.rs | src/select/expr_term.rs | use crate::select::cmp::*;
use crate::select::{to_f64, FilterKey};
use serde_json::{Number, Value};
#[derive(Debug, PartialEq)]
pub(super) enum ExprTerm<'a> {
String(String),
Number(Number),
Bool(bool),
Json(Option<Vec<&'a Value>>, Option<FilterKey>, Vec<&'a Value>),
}
impl<'a> ExprTerm<'a> {
fn cmp<C1: Cmp, C2: Cmp>(
&self,
other: &Self,
cmp_fn: &C1,
reverse_cmp_fn: &C2,
) -> ExprTerm<'a> {
match &self {
ExprTerm::String(s1) => match &other {
ExprTerm::String(s2) => {
ExprTerm::Bool(cmp_fn.cmp_string(s1, s2))
},
ExprTerm::Json(_, _, _) => {
other.cmp(self, reverse_cmp_fn, cmp_fn)
},
_ => ExprTerm::Bool(cmp_fn.default()),
},
ExprTerm::Number(n1) => match &other {
ExprTerm::Number(n2) => {
ExprTerm::Bool(cmp_fn.cmp_f64(to_f64(n1), to_f64(n2)))
},
ExprTerm::Json(_, _, _) => {
other.cmp(self, reverse_cmp_fn, cmp_fn)
},
_ => ExprTerm::Bool(cmp_fn.default()),
},
ExprTerm::Bool(b1) => match &other {
ExprTerm::Bool(b2) => ExprTerm::Bool(cmp_fn.cmp_bool(*b1, *b2)),
ExprTerm::Json(_, _, _) => {
other.cmp(self, reverse_cmp_fn, cmp_fn)
},
_ => ExprTerm::Bool(cmp_fn.default()),
},
ExprTerm::Json(rel, fk1, vec1) => {
let ret: Vec<&Value> = match &other {
ExprTerm::String(s2) => vec1
.iter()
.filter(|v1| match v1 {
Value::String(s1) => cmp_fn.cmp_string(s1, s2),
Value::Object(map1) => {
if let Some(FilterKey::String(k)) = fk1 {
if let Some(Value::String(s1)) = map1.get(k)
{
return cmp_fn.cmp_string(s1, s2);
}
}
cmp_fn.default()
},
_ => cmp_fn.default(),
})
.cloned()
.collect(),
ExprTerm::Number(n2) => vec1
.iter()
.filter(|v1| match v1 {
Value::Number(n1) => {
cmp_fn.cmp_f64(to_f64(n1), to_f64(n2))
},
Value::Object(map1) => {
if let Some(FilterKey::String(k)) = fk1 {
if let Some(Value::Number(n1)) = map1.get(k)
{
return cmp_fn
.cmp_f64(to_f64(n1), to_f64(n2));
}
}
cmp_fn.default()
},
_ => cmp_fn.default(),
})
.cloned()
.collect(),
ExprTerm::Bool(b2) => vec1
.iter()
.filter(|v1| match v1 {
Value::Bool(b1) => cmp_fn.cmp_bool(*b1, *b2),
Value::Object(map1) => {
if let Some(FilterKey::String(k)) = fk1 {
if let Some(Value::Bool(b1)) = map1.get(k) {
return cmp_fn.cmp_bool(*b1, *b2);
}
}
cmp_fn.default()
},
_ => cmp_fn.default(),
})
.cloned()
.collect(),
ExprTerm::Json(parent, _, vec2) => {
if let Some(vec1) = rel {
cmp_fn.cmp_json(vec1, vec2)
} else if let Some(vec2) = parent {
cmp_fn.cmp_json(vec1, vec2)
} else {
cmp_fn.cmp_json(vec1, vec2)
}
},
};
if ret.is_empty() {
ExprTerm::Bool(cmp_fn.default())
} else if let Some(rel) = rel {
if let ExprTerm::Json(_, _, _) = &other {
ExprTerm::Json(Some(rel.to_vec()), None, ret)
} else {
let mut tmp = Vec::new();
for rel_value in rel {
if let Value::Object(map) = rel_value {
for map_value in map.values() {
for result_value in &ret {
if map_value.eq(*result_value) {
tmp.push(*rel_value);
}
}
}
}
}
ExprTerm::Json(Some(tmp), None, ret)
}
} else {
ExprTerm::Json(None, None, ret)
}
},
}
}
pub fn eq(
&self,
other: &Self,
ret: &mut Option<ExprTerm<'a>>,
) {
debug!("eq - {:?} : {:?}", &self, &other);
let _ = ret.take();
let tmp = self.cmp(other, &CmpEq, &CmpEq);
debug!("eq = {:?}", tmp);
*ret = Some(tmp);
}
pub fn ne(
&self,
other: &Self,
ret: &mut Option<ExprTerm<'a>>,
) {
debug!("ne - {:?} : {:?}", &self, &other);
let _ = ret.take();
let tmp = self.cmp(other, &CmpNe, &CmpNe);
debug!("ne = {:?}", tmp);
*ret = Some(tmp);
}
pub fn gt(
&self,
other: &Self,
ret: &mut Option<ExprTerm<'a>>,
) {
debug!("gt - {:?} : {:?}", &self, &other);
let _ = ret.take();
let tmp = self.cmp(other, &CmpGt, &CmpLt);
debug!("gt = {:?}", tmp);
*ret = Some(tmp);
}
pub fn ge(
&self,
other: &Self,
ret: &mut Option<ExprTerm<'a>>,
) {
debug!("ge - {:?} : {:?}", &self, &other);
let _ = ret.take();
let tmp = self.cmp(other, &CmpGe, &CmpLe);
debug!("ge = {:?}", tmp);
*ret = Some(tmp);
}
pub fn lt(
&self,
other: &Self,
ret: &mut Option<ExprTerm<'a>>,
) {
debug!("lt - {:?} : {:?}", &self, &other);
let _ = ret.take();
let tmp = self.cmp(other, &CmpLt, &CmpGt);
debug!("lt = {:?}", tmp);
*ret = Some(tmp);
}
pub fn le(
&self,
other: &Self,
ret: &mut Option<ExprTerm<'a>>,
) {
debug!("le - {:?} : {:?}", &self, &other);
let _ = ret.take();
let tmp = self.cmp(other, &CmpLe, &CmpGe);
debug!("le = {:?}", tmp);
*ret = Some(tmp);
}
pub fn and(
&self,
other: &Self,
ret: &mut Option<ExprTerm<'a>>,
) {
debug!("and - {:?} : {:?}", &self, &other);
let _ = ret.take();
let tmp = self.cmp(other, &CmpAnd, &CmpAnd);
debug!("and = {:?}", tmp);
*ret = Some(tmp);
}
pub fn or(
&self,
other: &Self,
ret: &mut Option<ExprTerm<'a>>,
) {
debug!("or - {:?} : {:?}", &self, &other);
let _ = ret.take();
let tmp = self.cmp(other, &CmpOr, &CmpOr);
debug!("or = {:?}", tmp);
*ret = Some(tmp);
}
}
impl<'a> From<&Vec<&'a Value>> for ExprTerm<'a> {
fn from(vec: &Vec<&'a Value>) -> Self {
if vec.len() == 1 {
match &vec[0] {
Value::Number(v) => return ExprTerm::Number(v.clone()),
Value::String(v) => return ExprTerm::String(v.clone()),
Value::Bool(v) => return ExprTerm::Bool(*v),
_ => {},
}
}
ExprTerm::Json(None, None, vec.to_vec())
}
}
// #[cfg(test)]
// mod expr_term_inner_tests {
// use serde_json::{Number, Value};
// use select::expr_term::ExprTerm;
//
// #[test]
// fn value_vec_into() {
// let v = Value::Bool(true);
// let vec = &vec![&v];
// let term: ExprTerm = vec.into();
// assert_eq!(term, ExprTerm::Bool(true));
//
// let v = Value::String("a".to_string());
// let vec = &vec![&v];
// let term: ExprTerm = vec.into();
// assert_eq!(term, ExprTerm::String("a".to_string()));
//
// let v = serde_json::from_str("1.0").unwrap();
// let vec = &vec![&v];
// let term: ExprTerm = vec.into();
// assert_eq!(term, ExprTerm::Number(Number::from_f64(1.0).unwrap()));
// }
// }
| rust | MIT | 3a930bc03c3944c8374717289266ceb9cc1491c6 | 2026-01-04T20:24:15.035874Z | false |
freestrings/jsonpath | https://github.com/freestrings/jsonpath/blob/3a930bc03c3944c8374717289266ceb9cc1491c6/src/select/cmp.rs | src/select/cmp.rs | use serde_json::Value;
pub(super) trait Cmp {
fn cmp_bool(
&self,
v1: bool,
v2: bool,
) -> bool;
fn cmp_f64(
&self,
v1: f64,
v2: f64,
) -> bool;
fn cmp_string(
&self,
v1: &str,
v2: &str,
) -> bool;
fn cmp_json<'a>(
&self,
v1: &[&'a Value],
v2: &[&'a Value],
) -> Vec<&'a Value>;
fn default(&self) -> bool {
false
}
}
pub(super) struct CmpEq;
impl Cmp for CmpEq {
fn cmp_bool(
&self,
v1: bool,
v2: bool,
) -> bool {
v1 == v2
}
fn cmp_f64(
&self,
v1: f64,
v2: f64,
) -> bool {
(v1 - v2).abs() == 0_f64
}
fn cmp_string(
&self,
v1: &str,
v2: &str,
) -> bool {
v1 == v2
}
fn cmp_json<'a>(
&self,
v1: &[&'a Value],
v2: &[&'a Value],
) -> Vec<&'a Value> {
let mut ret = vec![];
for a in v1 {
for b in v2 {
if a == b {
ret.push(*a);
}
}
}
ret
}
}
pub(super) struct CmpNe;
impl Cmp for CmpNe {
fn cmp_bool(
&self,
v1: bool,
v2: bool,
) -> bool {
v1 != v2
}
fn cmp_f64(
&self,
v1: f64,
v2: f64,
) -> bool {
(v1 - v2).abs() != 0_f64
}
fn cmp_string(
&self,
v1: &str,
v2: &str,
) -> bool {
v1 != v2
}
fn cmp_json<'a>(
&self,
v1: &[&'a Value],
v2: &[&'a Value],
) -> Vec<&'a Value> {
let mut ret = vec![];
for a in v1 {
for b in v2 {
if a != b {
ret.push(*a);
}
}
}
ret
}
}
pub(super) struct CmpGt;
impl Cmp for CmpGt {
fn cmp_bool(
&self,
v1: bool,
v2: bool,
) -> bool {
v1 & !v2
}
fn cmp_f64(
&self,
v1: f64,
v2: f64,
) -> bool {
v1 > v2
}
fn cmp_string(
&self,
v1: &str,
v2: &str,
) -> bool {
v1 > v2
}
fn cmp_json<'a>(
&self,
_: &[&'a Value],
_: &[&'a Value],
) -> Vec<&'a Value> {
Vec::new()
}
}
pub(super) struct CmpGe;
impl Cmp for CmpGe {
fn cmp_bool(
&self,
v1: bool,
v2: bool,
) -> bool {
v1 >= v2
}
fn cmp_f64(
&self,
v1: f64,
v2: f64,
) -> bool {
v1 >= v2
}
fn cmp_string(
&self,
v1: &str,
v2: &str,
) -> bool {
v1 >= v2
}
fn cmp_json<'a>(
&self,
_: &[&'a Value],
_: &[&'a Value],
) -> Vec<&'a Value> {
Vec::new()
}
}
pub(super) struct CmpLt;
impl Cmp for CmpLt {
fn cmp_bool(
&self,
v1: bool,
v2: bool,
) -> bool {
!v1 & v2
}
fn cmp_f64(
&self,
v1: f64,
v2: f64,
) -> bool {
v1 < v2
}
fn cmp_string(
&self,
v1: &str,
v2: &str,
) -> bool {
v1 < v2
}
fn cmp_json<'a>(
&self,
_: &[&'a Value],
_: &[&'a Value],
) -> Vec<&'a Value> {
Vec::new()
}
}
pub(super) struct CmpLe;
impl Cmp for CmpLe {
fn cmp_bool(
&self,
v1: bool,
v2: bool,
) -> bool {
v1 <= v2
}
fn cmp_f64(
&self,
v1: f64,
v2: f64,
) -> bool {
v1 <= v2
}
fn cmp_string(
&self,
v1: &str,
v2: &str,
) -> bool {
v1 <= v2
}
fn cmp_json<'a>(
&self,
_: &[&'a Value],
_: &[&'a Value],
) -> Vec<&'a Value> {
Vec::new()
}
}
pub(super) struct CmpAnd;
impl Cmp for CmpAnd {
fn cmp_bool(
&self,
v1: bool,
v2: bool,
) -> bool {
v1 && v2
}
fn cmp_f64(
&self,
_v1: f64,
_v2: f64,
) -> bool {
true
}
fn cmp_string(
&self,
v1: &str,
v2: &str,
) -> bool {
!v1.is_empty() && !v2.is_empty()
}
fn cmp_json<'a>(
&self,
v1: &[&'a Value],
v2: &[&'a Value],
) -> Vec<&'a Value> {
CmpEq.cmp_json(v1, v2)
}
}
pub(super) struct CmpOr;
impl Cmp for CmpOr {
fn cmp_bool(
&self,
v1: bool,
v2: bool,
) -> bool {
v1 || v2
}
fn cmp_f64(
&self,
_v1: f64,
_v2: f64,
) -> bool {
true
}
fn cmp_string(
&self,
v1: &str,
v2: &str,
) -> bool {
!v1.is_empty() || !v2.is_empty()
}
fn cmp_json<'a>(
&self,
v1: &[&'a Value],
v2: &[&'a Value],
) -> Vec<&'a Value> {
let mut ret = [v1, v2].concat();
for x in (0..ret.len()).rev() {
for y in (x + 1..ret.len()).rev() {
if ret[x] == ret[y] {
ret.remove(y);
}
}
}
ret
}
}
// #[cfg(test)]
// mod cmp_inner_tests {
// use serde_json::Value;
//
// use select::cmp::*;
//
// #[test]
// fn cmp_eq() {
// let cmp_fn = CmpEq;
// assert!(!cmp_fn.default());
// assert!(!cmp_fn.cmp_bool(true, false));
// assert!(cmp_fn.cmp_bool(true, true));
// assert!(cmp_fn.cmp_f64(0.1, 0.1));
// assert!(!cmp_fn.cmp_f64(0.1, 0.2));
// assert!(cmp_fn.cmp_string("1", "1"));
// assert!(!cmp_fn.cmp_string("1", "2"));
// }
//
// #[test]
// fn cmp_ne() {
// let cmp_fn = CmpNe;
// assert!(!cmp_fn.default());
// assert!(cmp_fn.cmp_bool(true, false));
// assert!(!cmp_fn.cmp_bool(true, true));
// assert!(!cmp_fn.cmp_f64(0.1, 0.1));
// assert!(cmp_fn.cmp_f64(0.1, 0.2));
// assert!(!cmp_fn.cmp_string("1", "1"));
// assert!(cmp_fn.cmp_string("1", "2"));
// }
//
// #[test]
// fn cmp_gt() {
// let cmp_fn = CmpGt;
// assert!(!cmp_fn.default());
// assert!(cmp_fn.cmp_bool(true, false));
// assert!(!cmp_fn.cmp_bool(true, true));
// assert!(cmp_fn.cmp_f64(0.2, 0.1));
// assert!(!cmp_fn.cmp_f64(0.1, 0.2));
// assert!(!cmp_fn.cmp_string("a", "a"));
// assert!(cmp_fn.cmp_string("b", "a"));
// assert!(!cmp_fn.cmp_string("1", "2"));
// }
//
// #[test]
// fn cmp_ge() {
// let cmp_fn = CmpGe;
// assert!(!cmp_fn.default());
// assert!(cmp_fn.cmp_bool(true, false));
// assert!(cmp_fn.cmp_bool(true, true));
// assert!(cmp_fn.cmp_f64(0.2, 0.1));
// assert!(cmp_fn.cmp_f64(0.1, 0.1));
// assert!(!cmp_fn.cmp_f64(0.1, 0.2));
// assert!(cmp_fn.cmp_string("1", "1"));
// assert!(cmp_fn.cmp_string("ab", "a"));
// assert!(!cmp_fn.cmp_string("1", "2"));
// }
//
// #[test]
// fn cmp_lt() {
// let cmp_fn = CmpLt;
// assert!(!cmp_fn.default());
// assert!(!cmp_fn.cmp_bool(true, false));
// assert!(cmp_fn.cmp_bool(false, true));
// assert!(!cmp_fn.cmp_bool(true, true));
// assert!(!cmp_fn.cmp_bool(false, false));
// assert!(cmp_fn.cmp_f64(0.1, 0.2));
// assert!(!cmp_fn.cmp_f64(0.1, 0.1));
// assert!(!cmp_fn.cmp_f64(0.2, 0.1));
// assert!(!cmp_fn.cmp_string("a", "a"));
// assert!(cmp_fn.cmp_string("ab", "b"));
// assert!(cmp_fn.cmp_string("1", "2"));
// }
//
// #[test]
// fn cmp_le() {
// let cmp_fn = CmpLe;
// assert!(!cmp_fn.default());
// assert!(!cmp_fn.cmp_bool(true, false));
// assert!(cmp_fn.cmp_bool(false, true));
// assert!(cmp_fn.cmp_bool(true, true));
// assert!(cmp_fn.cmp_bool(false, false));
// assert!(cmp_fn.cmp_f64(0.1, 0.2));
// assert!(cmp_fn.cmp_f64(0.1, 0.1));
// assert!(!cmp_fn.cmp_f64(0.2, 0.1));
// assert!(cmp_fn.cmp_string("a", "a"));
// assert!(cmp_fn.cmp_string("ab", "b"));
// assert!(!cmp_fn.cmp_string("abd", "abc"));
// assert!(cmp_fn.cmp_string("1", "2"));
// }
//
// #[test]
// fn cmp_and() {
// let cmp_fn = CmpAnd;
// assert!(!cmp_fn.default());
// assert!(!cmp_fn.cmp_bool(true, false));
// assert!(!cmp_fn.cmp_bool(false, true));
// assert!(cmp_fn.cmp_bool(true, true));
// assert!(!cmp_fn.cmp_bool(false, false));
// assert!(cmp_fn.cmp_f64(0.0, 0.0));
// assert!(cmp_fn.cmp_string("a", "a"));
// }
//
// #[test]
// fn cmp_or() {
// let cmp_fn = CmpOr;
// assert!(!cmp_fn.default());
// assert!(cmp_fn.cmp_bool(true, false));
// assert!(cmp_fn.cmp_bool(false, true));
// assert!(cmp_fn.cmp_bool(true, true));
// assert!(!cmp_fn.cmp_bool(false, false));
// assert!(cmp_fn.cmp_f64(0.0, 0.0));
// assert!(cmp_fn.cmp_string("a", "a"));
// }
//
// #[test]
// fn cmp_json() {
// let v1 = Value::Bool(true);
// let v2 = Value::String("1".to_string());
// let left = [&v1, &v2];
// let right = [&v1, &v2];
// let empty: Vec<&Value> = Vec::new();
//
// assert_eq!(CmpEq.cmp_json(&left, &right), left.to_vec());
// assert_eq!(CmpNe.cmp_json(&left, &right), left.to_vec());
// assert_eq!(CmpGt.cmp_json(&left, &right), empty);
// assert_eq!(CmpGe.cmp_json(&left, &right), empty);
// assert_eq!(CmpLt.cmp_json(&left, &right), empty);
// assert_eq!(CmpLe.cmp_json(&left, &right), empty);
// assert_eq!(CmpAnd.cmp_json(&left, &right), left.to_vec());
// assert_eq!(CmpOr.cmp_json(&left, &right), left.to_vec());
//
// assert_eq!(
// CmpEq.cmp_json(&[&Value::Bool(true)], &[&Value::Bool(true)]),
// vec![&Value::Bool(true)]
// );
// assert_eq!(
// CmpEq.cmp_json(&[&Value::Bool(true)], &[&Value::Bool(false)]),
// empty
// );
// assert_eq!(
// CmpNe.cmp_json(&[&Value::Bool(true)], &[&Value::Bool(true)]),
// empty
// );
// assert_eq!(
// CmpNe.cmp_json(&[&Value::Bool(false)], &[&Value::Bool(true)]),
// vec![&Value::Bool(false)]
// );
// assert_eq!(
// CmpAnd.cmp_json(&[&Value::Bool(true)], &[&Value::Bool(true)]),
// vec![&Value::Bool(true)]
// );
// assert_eq!(
// CmpOr.cmp_json(&[&Value::Bool(true)], &[&Value::Bool(false)]),
// vec![&Value::Bool(true), &Value::Bool(false)]
// );
// }
// }
| rust | MIT | 3a930bc03c3944c8374717289266ceb9cc1491c6 | 2026-01-04T20:24:15.035874Z | false |
freestrings/jsonpath | https://github.com/freestrings/jsonpath/blob/3a930bc03c3944c8374717289266ceb9cc1491c6/src/parser/path_reader.rs | src/parser/path_reader.rs | use std::result::Result;
#[derive(Debug, PartialEq)]
pub enum ReaderError {
Eof,
}
pub struct PathReader<'a> {
input: &'a str,
pos: usize,
}
impl<'a> PathReader<'a> {
pub fn new(input: &'a str) -> Self {
PathReader { input, pos: 0 }
}
pub fn peek_char(&self) -> Result<(usize, char), ReaderError> {
let ch = self.input.chars().next().ok_or(ReaderError::Eof)?;
Ok((self.pos + ch.len_utf8(), ch))
}
pub fn take_while<F>(
&mut self,
fun: F,
) -> Result<(usize, String), ReaderError>
where
F: Fn(&char) -> bool,
{
let mut char_len: usize = 0;
let mut ret = String::new();
for c in self.input.chars().by_ref() {
if !fun(&c) {
break;
}
char_len += c.len_utf8();
ret.push(c);
}
self.pos += char_len;
self.input = &self.input[char_len..];
Ok((self.pos, ret))
}
pub fn next_char(&mut self) -> Result<(usize, char), ReaderError> {
let (_, ch) = self.peek_char()?;
self.input = &self.input[ch.len_utf8()..];
let ret = Ok((self.pos, ch));
self.pos += ch.len_utf8();
ret
}
pub fn current_pos(&self) -> usize {
self.pos
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_new() {
let reader = PathReader::new("abc");
assert_eq!(reader.input, "abc");
assert_eq!(reader.pos, 0);
}
#[test]
fn test_peek_char() {
let reader = PathReader::new("abc");
assert_eq!(reader.peek_char(), Ok((1, 'a')));
let empty_reader = PathReader::new("");
assert_eq!(empty_reader.peek_char(), Err(ReaderError::Eof));
}
#[test]
fn test_take_while() {
let mut reader = PathReader::new("abc");
assert_eq!(reader.take_while(|c| *c != 'c'), Ok((2, "ab".to_string())));
assert_eq!(reader.take_while(|c| *c != 'c'), Ok((2, "".to_string()))); // already at 'c'
let mut empty_reader = PathReader::new("");
assert_eq!(empty_reader.take_while(|_| true), Ok((0, "".to_string())));
let mut reader = PathReader::new("abc");
assert_eq!(reader.take_while(|_| false), Ok((0, "".to_string())));
}
#[test]
fn test_next_char() {
let mut reader = PathReader::new("abc");
assert_eq!(reader.next_char(), Ok((0, 'a')));
assert_eq!(reader.next_char(), Ok((1, 'b')));
assert_eq!(reader.next_char(), Ok((2, 'c')));
assert_eq!(reader.next_char(), Err(ReaderError::Eof));
}
#[test]
fn test_current_pos() {
let mut reader = PathReader::new("abc");
assert_eq!(reader.current_pos(), 0);
reader.next_char().unwrap();
assert_eq!(reader.current_pos(), 1);
}
}
| rust | MIT | 3a930bc03c3944c8374717289266ceb9cc1491c6 | 2026-01-04T20:24:15.035874Z | false |
freestrings/jsonpath | https://github.com/freestrings/jsonpath/blob/3a930bc03c3944c8374717289266ceb9cc1491c6/src/parser/mod.rs | src/parser/mod.rs | mod path_reader;
mod tokenizer;
use std::str::FromStr;
use self::tokenizer::*;
const DUMMY: usize = 0;
type ParseResult<T> = Result<T, String>;
mod utils {
use std::str::FromStr;
pub fn string_to_num<F, S: FromStr>(
string: &str,
msg_handler: F,
) -> Result<S, String>
where
F: Fn() -> String,
{
match string.parse() {
Ok(n) => Ok(n),
_ => Err(msg_handler()),
}
}
}
#[derive(Debug, PartialEq, Clone)]
pub enum ParseToken {
// '$'
Absolute,
// '@'
Relative,
// '.'
In,
// '..'
Leaves,
// '*'
All,
Key(String),
Keys(Vec<String>),
// []
Array,
// 메타토큰
ArrayEof,
// ?( filter )
Filter(FilterToken),
// 1 : 2
Range(Option<isize>, Option<isize>, Option<usize>),
// 1, 2, 3
Union(Vec<isize>),
Number(f64),
Bool(bool),
Eof,
}
#[derive(Debug, PartialEq, Clone)]
pub enum FilterToken {
Equal,
NotEqual,
Little,
LittleOrEqual,
Greater,
GreaterOrEqual,
And,
Or,
}
#[deprecated(since = "0.4.0", note = "Please use `paths::PathParser`")]
#[derive(Debug, Clone)]
pub struct Node {
left: Option<Box<Node>>,
right: Option<Box<Node>>,
token: ParseToken,
}
#[deprecated(since = "0.4.0", note = "Please use `paths::PathParser`")]
pub struct Parser;
#[allow(deprecated)]
impl Parser {
pub fn compile(input: &str) -> ParseResult<Node> {
let mut tokenizer = TokenReader::new(input);
Self::json_path(&mut tokenizer)
}
fn json_path(tokenizer: &mut TokenReader) -> ParseResult<Node> {
debug!("#json_path");
match tokenizer.next_token() {
Ok(Token::Absolute(_)) => {
let node = Self::node(ParseToken::Absolute);
Self::paths(node, tokenizer)
},
_ => Err(tokenizer.err_msg()),
}
}
fn paths(
prev: Node,
tokenizer: &mut TokenReader,
) -> ParseResult<Node> {
debug!("#paths");
match tokenizer.peek_token() {
Ok(Token::Dot(_)) => {
Self::eat_token(tokenizer);
Self::paths_dot(prev, tokenizer)
},
Ok(Token::OpenArray(_)) => {
Self::eat_token(tokenizer);
Self::eat_whitespace(tokenizer);
let node = Self::array(prev, tokenizer)?;
Self::paths(node, tokenizer)
},
_ => Ok(prev),
}
}
fn paths_dot(
prev: Node,
tokenizer: &mut TokenReader,
) -> ParseResult<Node> {
debug!("#paths_dot");
let node = Self::path(prev, tokenizer)?;
Self::paths(node, tokenizer)
}
fn path(
prev: Node,
tokenizer: &mut TokenReader,
) -> ParseResult<Node> {
debug!("#path");
match tokenizer.peek_token() {
Ok(Token::Dot(_)) => Self::path_leaves(prev, tokenizer),
Ok(Token::Asterisk(_)) => Self::path_in_all(prev, tokenizer),
Ok(Token::Key(_, _)) => Self::path_in_key(prev, tokenizer),
Ok(Token::OpenArray(_)) => {
Self::eat_token(tokenizer);
Self::array(prev, tokenizer)
},
_ => Err(tokenizer.err_msg()),
}
}
fn path_leaves(
prev: Node,
tokenizer: &mut TokenReader,
) -> ParseResult<Node> {
debug!("#path_leaves");
Self::eat_token(tokenizer);
match tokenizer.peek_token() {
Ok(Token::Asterisk(_)) => Self::path_leaves_all(prev, tokenizer),
Ok(Token::OpenArray(_)) => {
let mut leaves_node = Self::node(ParseToken::Leaves);
leaves_node.left = Some(Box::new(prev));
Ok(Self::paths(leaves_node, tokenizer)?)
},
_ => Self::path_leaves_key(prev, tokenizer),
}
}
#[allow(clippy::unnecessary_wraps)]
fn path_leaves_key(
prev: Node,
tokenizer: &mut TokenReader,
) -> ParseResult<Node> {
debug!("#path_leaves_key");
Ok(Node {
token: ParseToken::Leaves,
left: Some(Box::new(prev)),
right: Some(Box::new(Self::key(tokenizer)?)),
})
}
#[allow(clippy::unnecessary_wraps)]
fn path_leaves_all(
prev: Node,
tokenizer: &mut TokenReader,
) -> ParseResult<Node> {
debug!("#path_leaves_all");
Self::eat_token(tokenizer);
Ok(Node {
token: ParseToken::Leaves,
left: Some(Box::new(prev)),
right: Some(Box::new(Self::node(ParseToken::All))),
})
}
#[allow(clippy::unnecessary_wraps)]
fn path_in_all(
prev: Node,
tokenizer: &mut TokenReader,
) -> ParseResult<Node> {
debug!("#path_in_all");
Self::eat_token(tokenizer);
Ok(Node {
token: ParseToken::In,
left: Some(Box::new(prev)),
right: Some(Box::new(Self::node(ParseToken::All))),
})
}
#[allow(clippy::unnecessary_wraps)]
fn path_in_key(
prev: Node,
tokenizer: &mut TokenReader,
) -> ParseResult<Node> {
debug!("#path_in_key");
Ok(Node {
token: ParseToken::In,
left: Some(Box::new(prev)),
right: Some(Box::new(Self::key(tokenizer)?)),
})
}
fn key(tokenizer: &mut TokenReader) -> ParseResult<Node> {
debug!("#key");
match tokenizer.next_token() {
Ok(Token::Key(_, v)) => Ok(Self::node(ParseToken::Key(v))),
_ => Err(tokenizer.err_msg()),
}
}
fn boolean(tokenizer: &mut TokenReader) -> ParseResult<Node> {
debug!("#boolean");
fn validation_bool_value(v: &str) -> bool {
let b = v.as_bytes();
!b.is_empty()
&& (b[0] == b't'
|| b[0] == b'T'
|| b[0] == b'f'
|| b[0] == b'F')
}
match tokenizer.next_token() {
Ok(Token::Key(_, ref v)) if validation_bool_value(v) => {
Ok(Self::node(ParseToken::Bool(v.eq_ignore_ascii_case("true"))))
},
_ => Err(tokenizer.err_msg()),
}
}
fn array_keys(
tokenizer: &mut TokenReader,
first_key: String,
) -> ParseResult<Node> {
let mut keys = vec![first_key];
while let Ok(Token::Comma(_)) = tokenizer.peek_token() {
Self::eat_token(tokenizer);
Self::eat_whitespace(tokenizer);
match tokenizer.next_token() {
Ok(Token::SingleQuoted(_, val))
| Ok(Token::DoubleQuoted(_, val)) => {
keys.push(val);
},
_ => return Err(tokenizer.err_msg()),
}
Self::eat_whitespace(tokenizer);
}
Ok(Self::node(ParseToken::Keys(keys)))
}
fn array_quote_value(tokenizer: &mut TokenReader) -> ParseResult<Node> {
debug!("#array_quote_value");
match tokenizer.next_token() {
Ok(Token::SingleQuoted(_, val))
| Ok(Token::DoubleQuoted(_, val)) => {
if let Ok(Token::Comma(_)) = tokenizer.peek_token() {
Self::array_keys(tokenizer, val)
} else {
Ok(Self::node(ParseToken::Key(val)))
}
},
_ => Err(tokenizer.err_msg()),
}
}
fn array_start(
prev: Node,
tokenizer: &mut TokenReader,
) -> ParseResult<Node> {
debug!("#array_start");
match tokenizer.peek_token() {
Ok(Token::Question(_)) => {
Self::eat_token(tokenizer);
Ok(Node {
token: ParseToken::Array,
left: Some(Box::new(prev)),
right: Some(Box::new(Self::filter(tokenizer)?)),
})
},
Ok(Token::Asterisk(_)) => {
Self::eat_token(tokenizer);
Ok(Node {
token: ParseToken::Array,
left: Some(Box::new(prev)),
right: Some(Box::new(Self::node(ParseToken::All))),
})
},
_ => Ok(Node {
token: ParseToken::Array,
left: Some(Box::new(prev)),
right: Some(Box::new(Self::array_value(tokenizer)?)),
}),
}
}
fn array(
prev: Node,
tokenizer: &mut TokenReader,
) -> ParseResult<Node> {
debug!("#array");
let ret = Self::array_start(prev, tokenizer)?;
Self::eat_whitespace(tokenizer);
Self::close_token(ret, Token::CloseArray(DUMMY), tokenizer)
}
fn array_value_key(tokenizer: &mut TokenReader) -> ParseResult<Node> {
debug!("#array_value_key");
match tokenizer.next_token() {
Ok(Token::Key(pos, ref val)) => {
let digit = utils::string_to_num(val, || {
tokenizer.err_msg_with_pos(pos)
})?;
Self::eat_whitespace(tokenizer);
match tokenizer.peek_token() {
Ok(Token::Comma(_)) => Self::union(digit, tokenizer),
Ok(Token::Split(_)) => Self::range_from(digit, tokenizer),
_ => Ok(Self::node(ParseToken::Number(digit as f64))),
}
},
_ => Err(tokenizer.err_msg()),
}
}
fn array_value(tokenizer: &mut TokenReader) -> ParseResult<Node> {
debug!("#array_value");
match tokenizer.peek_token() {
Ok(Token::Key(_, _)) => Self::array_value_key(tokenizer),
Ok(Token::Split(_)) => {
Self::eat_token(tokenizer);
Self::range_to(tokenizer)
},
Ok(Token::DoubleQuoted(_, _)) | Ok(Token::SingleQuoted(_, _)) => {
Self::array_quote_value(tokenizer)
},
Err(TokenError::Eof) => Ok(Self::node(ParseToken::Eof)),
_ => {
Self::eat_token(tokenizer);
Err(tokenizer.err_msg())
},
}
}
fn union(
num: isize,
tokenizer: &mut TokenReader,
) -> ParseResult<Node> {
debug!("#union");
let mut values = vec![num];
while matches!(tokenizer.peek_token(), Ok(Token::Comma(_))) {
Self::eat_token(tokenizer);
Self::eat_whitespace(tokenizer);
match tokenizer.next_token() {
Ok(Token::Key(pos, ref val)) => {
let digit = utils::string_to_num(val, || {
tokenizer.err_msg_with_pos(pos)
})?;
values.push(digit);
},
_ => {
return Err(tokenizer.err_msg());
},
}
}
Ok(Self::node(ParseToken::Union(values)))
}
fn range_value<S: FromStr>(
tokenizer: &mut TokenReader
) -> Result<Option<S>, String> {
Self::eat_whitespace(tokenizer);
match tokenizer.peek_token() {
Ok(Token::Split(_)) => {
Self::eat_token(tokenizer);
Self::eat_whitespace(tokenizer);
},
_ => {
return Ok(None);
},
}
match tokenizer.peek_token() {
Ok(Token::Key(_, _)) => {},
_ => {
return Ok(None);
},
}
match tokenizer.next_token() {
Ok(Token::Key(pos, str_step)) => {
match utils::string_to_num(&str_step, || {
tokenizer.err_msg_with_pos(pos)
}) {
Ok(step) => Ok(Some(step)),
Err(e) => Err(e),
}
},
_ => {
unreachable!();
},
}
}
fn range_from(
from: isize,
tokenizer: &mut TokenReader,
) -> ParseResult<Node> {
debug!("#range_from");
Self::eat_token(tokenizer);
Self::eat_whitespace(tokenizer);
match tokenizer.peek_token() {
Ok(Token::Key(_, _)) => Self::range(from, tokenizer),
Ok(Token::Split(_)) => match Self::range_value(tokenizer)? {
Some(step) => Ok(Self::node(ParseToken::Range(
Some(from),
None,
Some(step),
))),
_ => Ok(Self::node(ParseToken::Range(Some(from), None, None))),
},
_ => Ok(Self::node(ParseToken::Range(Some(from), None, None))),
}
}
fn range_to(tokenizer: &mut TokenReader) -> ParseResult<Node> {
debug!("#range_to");
if let Some(step) = Self::range_value(tokenizer)? {
return Ok(Self::node(ParseToken::Range(None, None, Some(step))));
}
if let Ok(Token::CloseArray(_)) = tokenizer.peek_token() {
return Ok(Self::node(ParseToken::Range(None, None, None)));
}
match tokenizer.next_token() {
Ok(Token::Key(pos, ref to_str)) => {
let to = utils::string_to_num(to_str, || {
tokenizer.err_msg_with_pos(pos)
})?;
let step = Self::range_value(tokenizer)?;
Ok(Self::node(ParseToken::Range(None, Some(to), step)))
},
_ => Err(tokenizer.err_msg()),
}
}
fn range(
from: isize,
tokenizer: &mut TokenReader,
) -> ParseResult<Node> {
debug!("#range");
match tokenizer.next_token() {
Ok(Token::Key(pos, ref str_to)) => {
let to = utils::string_to_num(str_to, || {
tokenizer.err_msg_with_pos(pos)
})?;
let step = Self::range_value(tokenizer)?;
Ok(Self::node(ParseToken::Range(Some(from), Some(to), step)))
},
_ => Err(tokenizer.err_msg()),
}
}
fn filter(tokenizer: &mut TokenReader) -> ParseResult<Node> {
debug!("#filter");
match tokenizer.next_token() {
Ok(Token::OpenParenthesis(_)) => {
let ret = Self::exprs(tokenizer)?;
Self::eat_whitespace(tokenizer);
Self::close_token(
ret,
Token::CloseParenthesis(DUMMY),
tokenizer,
)
},
_ => Err(tokenizer.err_msg()),
}
}
fn exprs(tokenizer: &mut TokenReader) -> ParseResult<Node> {
Self::eat_whitespace(tokenizer);
debug!("#exprs");
let node = match tokenizer.peek_token() {
Ok(Token::OpenParenthesis(_)) => {
Self::eat_token(tokenizer);
trace!("\t-exprs - open_parenthesis");
let ret = Self::exprs(tokenizer)?;
Self::eat_whitespace(tokenizer);
Self::close_token(
ret,
Token::CloseParenthesis(DUMMY),
tokenizer,
)?
},
_ => {
trace!("\t-exprs - else");
Self::expr(tokenizer)?
},
};
Self::eat_whitespace(tokenizer);
Self::condition_expr(node, tokenizer)
}
fn condition_expr(
prev: Node,
tokenizer: &mut TokenReader,
) -> ParseResult<Node> {
debug!("#condition_expr");
match tokenizer.peek_token() {
Ok(Token::And(_)) => {
Self::eat_token(tokenizer);
Ok(Node {
token: ParseToken::Filter(FilterToken::And),
left: Some(Box::new(prev)),
right: Some(Box::new(Self::exprs(tokenizer)?)),
})
},
Ok(Token::Or(_)) => {
Self::eat_token(tokenizer);
Ok(Node {
token: ParseToken::Filter(FilterToken::Or),
left: Some(Box::new(prev)),
right: Some(Box::new(Self::exprs(tokenizer)?)),
})
},
_ => Ok(prev),
}
}
fn expr(tokenizer: &mut TokenReader) -> ParseResult<Node> {
debug!("#expr");
let has_prop_candidate =
matches!(tokenizer.peek_token(), Ok(Token::At(_)));
let node = Self::term(tokenizer)?;
Self::eat_whitespace(tokenizer);
if matches!(
tokenizer.peek_token(),
Ok(Token::Equal(_))
| Ok(Token::NotEqual(_))
| Ok(Token::Little(_))
| Ok(Token::LittleOrEqual(_))
| Ok(Token::Greater(_))
| Ok(Token::GreaterOrEqual(_))
) {
Self::op(node, tokenizer)
} else if has_prop_candidate {
Ok(node)
} else {
Err(tokenizer.err_msg())
}
}
fn term_num(tokenizer: &mut TokenReader) -> ParseResult<Node> {
debug!("#term_num");
match tokenizer.next_token() {
Ok(Token::Key(pos, val)) => match tokenizer.peek_token() {
Ok(Token::Dot(_)) => {
Self::term_num_float(val.as_str(), tokenizer)
},
_ => {
let number = utils::string_to_num(&val, || {
tokenizer.err_msg_with_pos(pos)
})?;
Ok(Self::node(ParseToken::Number(number)))
},
},
_ => Err(tokenizer.err_msg()),
}
}
fn term_num_float(
num: &str,
tokenizer: &mut TokenReader,
) -> ParseResult<Node> {
debug!("#term_num_float");
Self::eat_token(tokenizer);
match tokenizer.next_token() {
Ok(Token::Key(pos, frac)) => {
let mut f = String::new();
f.push_str(num);
f.push('.');
f.push_str(frac.as_str());
let number = utils::string_to_num(&f, || {
tokenizer.err_msg_with_pos(pos)
})?;
Ok(Self::node(ParseToken::Number(number)))
},
_ => Err(tokenizer.err_msg()),
}
}
fn term(tokenizer: &mut TokenReader) -> ParseResult<Node> {
debug!("#term");
match tokenizer.peek_token() {
Ok(Token::At(_)) => {
Self::eat_token(tokenizer);
let node = Self::node(ParseToken::Relative);
match tokenizer.peek_token() {
Ok(Token::Whitespace(_, _)) => {
Self::eat_whitespace(tokenizer);
Ok(node)
},
_ => Self::paths(node, tokenizer),
}
},
Ok(Token::Absolute(_)) => Self::json_path(tokenizer),
Ok(Token::DoubleQuoted(_, _)) | Ok(Token::SingleQuoted(_, _)) => {
Self::array_quote_value(tokenizer)
},
Ok(Token::Key(_, key)) => match key.as_bytes()[0] {
b'-' | b'0'..=b'9' => Self::term_num(tokenizer),
_ => Self::boolean(tokenizer),
},
_ => Err(tokenizer.err_msg()),
}
}
fn op(
prev: Node,
tokenizer: &mut TokenReader,
) -> ParseResult<Node> {
debug!("#op");
let token = match tokenizer.next_token() {
Ok(Token::Equal(_)) => ParseToken::Filter(FilterToken::Equal),
Ok(Token::NotEqual(_)) => ParseToken::Filter(FilterToken::NotEqual),
Ok(Token::Little(_)) => ParseToken::Filter(FilterToken::Little),
Ok(Token::LittleOrEqual(_)) => {
ParseToken::Filter(FilterToken::LittleOrEqual)
},
Ok(Token::Greater(_)) => ParseToken::Filter(FilterToken::Greater),
Ok(Token::GreaterOrEqual(_)) => {
ParseToken::Filter(FilterToken::GreaterOrEqual)
},
_ => {
return Err(tokenizer.err_msg());
},
};
Self::eat_whitespace(tokenizer);
Ok(Node {
token,
left: Some(Box::new(prev)),
right: Some(Box::new(Self::term(tokenizer)?)),
})
}
fn eat_whitespace(tokenizer: &mut TokenReader) {
while let Ok(Token::Whitespace(_, _)) = tokenizer.peek_token() {
let _ = tokenizer.next_token();
}
}
fn eat_token(tokenizer: &mut TokenReader) {
let _ = tokenizer.next_token();
}
fn node(token: ParseToken) -> Node {
Node {
left: None,
right: None,
token,
}
}
fn close_token(
ret: Node,
token: Token,
tokenizer: &mut TokenReader,
) -> ParseResult<Node> {
debug!("#close_token");
match tokenizer.next_token() {
Ok(ref t) if t.is_match_token_type(token) => Ok(ret),
_ => Err(tokenizer.err_msg()),
}
}
}
#[allow(deprecated)]
pub trait NodeVisitor {
fn visit(
&mut self,
node: &Node,
) {
match &node.token {
ParseToken::Absolute
| ParseToken::Relative
| ParseToken::All
| ParseToken::Key(_)
| ParseToken::Keys(_)
| ParseToken::Range(_, _, _)
| ParseToken::Union(_)
| ParseToken::Number(_)
| ParseToken::Bool(_) => {
self.visit_token(&node.token);
},
ParseToken::In | ParseToken::Leaves => {
if let Some(n) = &node.left {
self.visit(n);
}
self.visit_token(&node.token);
if let Some(n) = &node.right {
self.visit(n);
}
},
ParseToken::Array => {
if let Some(n) = &node.left {
self.visit(n);
}
self.visit_token(&node.token);
if let Some(n) = &node.right {
self.visit(n);
}
self.visit_token(&ParseToken::ArrayEof);
},
ParseToken::Filter(FilterToken::And)
| ParseToken::Filter(FilterToken::Or) => {
if let Some(n) = &node.left {
self.visit(n);
}
if let Some(n) = &node.right {
self.visit(n);
}
self.visit_token(&node.token);
},
ParseToken::Filter(_) => {
if let Some(n) = &node.left {
self.visit(n);
}
self.end_term();
if let Some(n) = &node.right {
self.visit(n);
}
self.end_term();
self.visit_token(&node.token);
},
_ => {},
}
}
fn visit_token(
&mut self,
token: &ParseToken,
);
fn end_term(&mut self) {}
}
| rust | MIT | 3a930bc03c3944c8374717289266ceb9cc1491c6 | 2026-01-04T20:24:15.035874Z | false |
freestrings/jsonpath | https://github.com/freestrings/jsonpath/blob/3a930bc03c3944c8374717289266ceb9cc1491c6/src/parser/tokenizer.rs | src/parser/tokenizer.rs | use std::result::Result;
use super::path_reader::{PathReader, ReaderError};
const CH_DOLLA: char = '$';
const CH_DOT: char = '.';
const CH_ASTERISK: char = '*';
const CH_LARRAY: char = '[';
const CH_RARRAY: char = ']';
const CH_LPAREN: char = '(';
const CH_RPAREN: char = ')';
const CH_AT: char = '@';
const CH_QUESTION: char = '?';
const CH_COMMA: char = ',';
const CH_SEMICOLON: char = ':';
const CH_EQUAL: char = '=';
const CH_AMPERSAND: char = '&';
const CH_PIPE: char = '|';
const CH_LITTLE: char = '<';
const CH_GREATER: char = '>';
const CH_EXCLAMATION: char = '!';
const CH_SINGLE_QUOTE: char = '\'';
const CH_DOUBLE_QUOTE: char = '"';
#[derive(Debug, Clone, PartialEq)]
pub enum TokenError {
Eof,
Position(usize),
}
fn to_token_error(read_err: ReaderError) -> TokenError {
match read_err {
ReaderError::Eof => TokenError::Eof,
}
}
#[derive(Debug, PartialEq)]
pub enum Token {
Absolute(usize),
Dot(usize),
At(usize),
OpenArray(usize),
CloseArray(usize),
Asterisk(usize),
Question(usize),
Comma(usize),
Split(usize),
OpenParenthesis(usize),
CloseParenthesis(usize),
Key(usize, String),
DoubleQuoted(usize, String),
SingleQuoted(usize, String),
Equal(usize),
GreaterOrEqual(usize),
Greater(usize),
Little(usize),
LittleOrEqual(usize),
NotEqual(usize),
And(usize),
Or(usize),
Whitespace(usize, usize),
}
impl Token {
pub fn is_match_token_type(
&self,
other: Token,
) -> bool {
match self {
Token::Absolute(_) => matches!(other, Token::Absolute(_)),
Token::Dot(_) => matches!(other, Token::Dot(_)),
Token::At(_) => matches!(other, Token::At(_)),
Token::OpenArray(_) => matches!(other, Token::OpenArray(_)),
Token::CloseArray(_) => matches!(other, Token::CloseArray(_)),
Token::Asterisk(_) => matches!(other, Token::Asterisk(_)),
Token::Question(_) => matches!(other, Token::Question(_)),
Token::Comma(_) => matches!(other, Token::Comma(_)),
Token::Split(_) => matches!(other, Token::Split(_)),
Token::OpenParenthesis(_) => {
matches!(other, Token::OpenParenthesis(_))
},
Token::CloseParenthesis(_) => {
matches!(other, Token::CloseParenthesis(_))
},
Token::Key(_, _) => matches!(other, Token::Key(_, _)),
Token::DoubleQuoted(_, _) => {
matches!(other, Token::DoubleQuoted(_, _))
},
Token::SingleQuoted(_, _) => {
matches!(other, Token::SingleQuoted(_, _))
},
Token::Equal(_) => matches!(other, Token::Equal(_)),
Token::GreaterOrEqual(_) => {
matches!(other, Token::GreaterOrEqual(_))
},
Token::Greater(_) => matches!(other, Token::Greater(_)),
Token::Little(_) => matches!(other, Token::Little(_)),
Token::LittleOrEqual(_) => {
matches!(other, Token::LittleOrEqual(_))
},
Token::NotEqual(_) => matches!(other, Token::NotEqual(_)),
Token::And(_) => matches!(other, Token::And(_)),
Token::Or(_) => matches!(other, Token::Or(_)),
Token::Whitespace(_, _) => {
matches!(other, Token::Whitespace(_, _))
},
}
}
}
pub struct Tokenizer<'a> {
input: PathReader<'a>,
}
impl<'a> Tokenizer<'a> {
pub fn new(input: &'a str) -> Self {
trace!("input: {}", input);
Tokenizer {
input: PathReader::new(input),
}
}
// FIXME When written in "match" grammar, it is determined that "tarpaulin" did not cover the test coverage.
fn is_not_token(c: &char) -> bool {
if c == &CH_DOT
|| c == &CH_ASTERISK
|| c == &CH_LARRAY
|| c == &CH_RARRAY
|| c == &CH_LPAREN
|| c == &CH_RPAREN
|| c == &CH_AT
|| c == &CH_QUESTION
|| c == &CH_COMMA
|| c == &CH_SEMICOLON
|| c == &CH_LITTLE
|| c == &CH_GREATER
|| c == &CH_EQUAL
|| c == &CH_AMPERSAND
|| c == &CH_PIPE
|| c == &CH_EXCLAMATION
{
return false;
}
!c.is_whitespace()
}
fn dolla(
&mut self,
pos: usize,
ch: char,
) -> Result<Token, TokenError> {
let (_, mut vec) = self
.input
.take_while(Self::is_not_token)
.map_err(to_token_error)?;
vec.insert(0, ch);
if vec.len() == 1 {
Ok(Token::Absolute(pos))
} else {
Ok(Token::Key(pos, vec))
}
}
fn quote(
&mut self,
ch: char,
) -> Result<String, TokenError> {
let (_, mut val) = self
.input
.take_while(|c| *c != ch)
.map_err(to_token_error)?;
if let Some('\\') = val.chars().last() {
self.input.next_char().map_err(to_token_error)?;
let _ = val.pop();
let (_, val_remain) = self
.input
.take_while(|c| *c != ch)
.map_err(to_token_error)?;
self.input.next_char().map_err(to_token_error)?;
val.push(ch);
val.push_str(val_remain.as_str());
} else {
self.input.next_char().map_err(to_token_error)?;
}
Ok(val)
}
fn single_quote(
&mut self,
pos: usize,
ch: char,
) -> Result<Token, TokenError> {
let val = self.quote(ch)?;
Ok(Token::SingleQuoted(pos, val))
}
fn double_quote(
&mut self,
pos: usize,
ch: char,
) -> Result<Token, TokenError> {
let val = self.quote(ch)?;
Ok(Token::DoubleQuoted(pos, val))
}
fn equal(
&mut self,
pos: usize,
_: char,
) -> Result<Token, TokenError> {
let (_, ch) = self.input.peek_char().map_err(to_token_error)?;
match ch {
CH_EQUAL => {
self.input.next_char().map_err(to_token_error)?;
Ok(Token::Equal(pos))
},
_ => Err(TokenError::Position(pos)),
}
}
fn not_equal(
&mut self,
pos: usize,
_: char,
) -> Result<Token, TokenError> {
let (_, ch) = self.input.peek_char().map_err(to_token_error)?;
match ch {
CH_EQUAL => {
self.input.next_char().map_err(to_token_error)?;
Ok(Token::NotEqual(pos))
},
_ => Err(TokenError::Position(pos)),
}
}
fn little(
&mut self,
pos: usize,
_: char,
) -> Result<Token, TokenError> {
match self.input.peek_char() {
Ok((_, CH_EQUAL)) => {
self.input.next_char().map_err(to_token_error)?;
Ok(Token::LittleOrEqual(pos))
},
Ok(_) => Ok(Token::Little(pos)),
Err(ReaderError::Eof) => Ok(Token::Little(pos)),
}
}
fn greater(
&mut self,
pos: usize,
_: char,
) -> Result<Token, TokenError> {
match self.input.peek_char() {
Ok((_, CH_EQUAL)) => {
self.input.next_char().map_err(to_token_error)?;
Ok(Token::GreaterOrEqual(pos))
},
Ok(_) => Ok(Token::Greater(pos)),
Err(ReaderError::Eof) => Ok(Token::Greater(pos)),
}
}
fn and(
&mut self,
pos: usize,
_: char,
) -> Result<Token, TokenError> {
let (_, ch) = self.input.peek_char().map_err(to_token_error)?;
match ch {
CH_AMPERSAND => {
let _ = self.input.next_char().map_err(to_token_error);
Ok(Token::And(pos))
},
_ => Err(TokenError::Position(pos)),
}
}
fn or(
&mut self,
pos: usize,
_: char,
) -> Result<Token, TokenError> {
let (_, ch) = self.input.peek_char().map_err(to_token_error)?;
match ch {
CH_PIPE => {
self.input.next_char().map_err(to_token_error)?;
Ok(Token::Or(pos))
},
_ => Err(TokenError::Position(pos)),
}
}
fn whitespace(
&mut self,
pos: usize,
ch: char,
) -> Result<Token, TokenError> {
let (_, vec) = self
.input
.take_while(|c| c.is_whitespace())
.map_err(to_token_error)?;
Ok(Token::Whitespace(
pos,
if ch.is_whitespace() {
vec.len() + 1
} else {
vec.len()
},
))
}
fn other(
&mut self,
pos: usize,
ch: char,
) -> Result<Token, TokenError> {
let (_, mut vec) = self
.input
.take_while(Self::is_not_token)
.map_err(to_token_error)?;
vec.insert(0, ch);
Ok(Token::Key(pos, vec))
}
pub fn next_token(&mut self) -> Result<Token, TokenError> {
let (pos, ch) = self.input.next_char().map_err(to_token_error)?;
match ch {
CH_DOLLA => self.dolla(pos, ch),
CH_DOT => Ok(Token::Dot(pos)),
CH_ASTERISK => Ok(Token::Asterisk(pos)),
CH_LARRAY => Ok(Token::OpenArray(pos)),
CH_RARRAY => Ok(Token::CloseArray(pos)),
CH_LPAREN => Ok(Token::OpenParenthesis(pos)),
CH_RPAREN => Ok(Token::CloseParenthesis(pos)),
CH_AT => Ok(Token::At(pos)),
CH_QUESTION => Ok(Token::Question(pos)),
CH_COMMA => Ok(Token::Comma(pos)),
CH_SEMICOLON => Ok(Token::Split(pos)),
CH_SINGLE_QUOTE => self.single_quote(pos, ch),
CH_DOUBLE_QUOTE => self.double_quote(pos, ch),
CH_EQUAL => self.equal(pos, ch),
CH_GREATER => self.greater(pos, ch),
CH_LITTLE => self.little(pos, ch),
CH_AMPERSAND => self.and(pos, ch),
CH_PIPE => self.or(pos, ch),
CH_EXCLAMATION => self.not_equal(pos, ch),
_ if ch.is_whitespace() => self.whitespace(pos, ch),
_ => self.other(pos, ch),
}
}
fn current_pos(&self) -> usize {
self.input.current_pos()
}
}
pub struct TokenReader<'a> {
origin_input: &'a str,
err: TokenError,
err_pos: usize,
tokens: Vec<(usize, Token)>,
curr_pos: Option<usize>,
}
impl<'a> TokenReader<'a> {
pub fn new(input: &'a str) -> Self {
let mut tokenizer = Tokenizer::new(input);
let mut tokens = vec![];
loop {
match tokenizer.next_token() {
Ok(t) => {
tokens.insert(0, (tokenizer.current_pos(), t));
},
Err(e) => {
return TokenReader {
origin_input: input,
err: e,
err_pos: tokenizer.current_pos(),
tokens,
curr_pos: None,
};
},
}
}
}
pub fn peek_token(&self) -> Result<&Token, TokenError> {
match self.tokens.last() {
Some((_, t)) => {
trace!("%{:?}", t);
Ok(t)
},
_ => {
trace!("%{:?}", self.err);
Err(self.err.clone())
},
}
}
pub fn next_token(&mut self) -> Result<Token, TokenError> {
match self.tokens.pop() {
Some((pos, t)) => {
self.curr_pos = Some(pos);
trace!("@{:?}", t);
Ok(t)
},
_ => {
trace!("@{:?}", self.err);
Err(self.err.clone())
},
}
}
pub fn err_msg_with_pos(
&self,
pos: usize,
) -> String {
format!("{}\n{}", self.origin_input, "^".repeat(pos))
}
pub fn err_msg(&self) -> String {
match self.curr_pos {
Some(pos) => self.err_msg_with_pos(pos),
_ => self.err_msg_with_pos(self.err_pos),
}
}
}
#[cfg(test)]
mod token_tests {
use super::*;
#[test]
fn test_is_match_token_type() {
assert!(Token::Absolute(1).is_match_token_type(Token::Absolute(2)));
assert!(Token::Dot(1).is_match_token_type(Token::Dot(2)));
assert!(Token::At(1).is_match_token_type(Token::At(2)));
assert!(Token::OpenArray(1).is_match_token_type(Token::OpenArray(2)));
assert!(Token::CloseArray(1).is_match_token_type(Token::CloseArray(2)));
assert!(Token::Asterisk(1).is_match_token_type(Token::Asterisk(2)));
assert!(Token::Question(1).is_match_token_type(Token::Question(2)));
assert!(Token::Comma(1).is_match_token_type(Token::Comma(2)));
assert!(Token::Split(1).is_match_token_type(Token::Split(2)));
assert!(Token::OpenParenthesis(1)
.is_match_token_type(Token::OpenParenthesis(2)));
assert!(Token::CloseParenthesis(1)
.is_match_token_type(Token::CloseParenthesis(2)));
assert!(Token::Key(1, "key".to_string())
.is_match_token_type(Token::Key(2, "key".to_string())));
assert!(Token::DoubleQuoted(1, "value".to_string())
.is_match_token_type(Token::DoubleQuoted(2, "value".to_string())));
assert!(Token::SingleQuoted(1, "value".to_string())
.is_match_token_type(Token::SingleQuoted(2, "value".to_string())));
assert!(Token::Equal(1).is_match_token_type(Token::Equal(2)));
assert!(Token::GreaterOrEqual(1)
.is_match_token_type(Token::GreaterOrEqual(2)));
assert!(Token::Greater(1).is_match_token_type(Token::Greater(2)));
assert!(Token::Little(1).is_match_token_type(Token::Little(2)));
assert!(Token::LittleOrEqual(1)
.is_match_token_type(Token::LittleOrEqual(2)));
assert!(Token::NotEqual(1).is_match_token_type(Token::NotEqual(2)));
assert!(Token::And(1).is_match_token_type(Token::And(2)));
assert!(Token::Or(1).is_match_token_type(Token::Or(2)));
assert!(Token::Whitespace(1, 2)
.is_match_token_type(Token::Whitespace(3, 4)));
}
}
#[cfg(test)]
mod tokenizer_tests {
use super::*;
#[test]
fn test_dolla() {
let mut tokenizer = Tokenizer::new("$");
assert_eq!(tokenizer.next_token().unwrap(), Token::Absolute(0));
}
#[test]
fn test_dot() {
let mut tokenizer = Tokenizer::new(".");
assert_eq!(tokenizer.next_token().unwrap(), Token::Dot(0));
}
#[test]
fn test_asterisk() {
let mut tokenizer = Tokenizer::new("*");
assert_eq!(tokenizer.next_token().unwrap(), Token::Asterisk(0));
}
#[test]
fn test_open_array() {
let mut tokenizer = Tokenizer::new("[");
assert_eq!(tokenizer.next_token().unwrap(), Token::OpenArray(0));
}
#[test]
fn test_close_array() {
let mut tokenizer = Tokenizer::new("]");
assert_eq!(tokenizer.next_token().unwrap(), Token::CloseArray(0));
}
#[test]
fn test_open_parenthesis() {
let mut tokenizer = Tokenizer::new("(");
assert_eq!(tokenizer.next_token().unwrap(), Token::OpenParenthesis(0));
}
#[test]
fn test_close_parenthesis() {
let mut tokenizer = Tokenizer::new(")");
assert_eq!(tokenizer.next_token().unwrap(), Token::CloseParenthesis(0));
}
#[test]
fn test_at() {
let mut tokenizer = Tokenizer::new("@");
assert_eq!(tokenizer.next_token().unwrap(), Token::At(0));
}
#[test]
fn test_question() {
let mut tokenizer = Tokenizer::new("?");
assert_eq!(tokenizer.next_token().unwrap(), Token::Question(0));
}
#[test]
fn test_comma() {
let mut tokenizer = Tokenizer::new(",");
assert_eq!(tokenizer.next_token().unwrap(), Token::Comma(0));
}
#[test]
fn test_semicolon() {
let mut tokenizer = Tokenizer::new(":");
assert_eq!(tokenizer.next_token().unwrap(), Token::Split(0));
}
#[test]
fn test_single_quote() {
let mut tokenizer = Tokenizer::new("'value'");
assert_eq!(
tokenizer.next_token().unwrap(),
Token::SingleQuoted(0, "value".to_string())
);
let mut tokenizer = Tokenizer::new("'value\\''");
assert_eq!(
tokenizer.next_token().unwrap(),
Token::SingleQuoted(0, "value'".to_string())
);
}
#[test]
fn test_double_quote() {
let mut tokenizer = Tokenizer::new("\"value\"");
assert_eq!(
tokenizer.next_token().unwrap(),
Token::DoubleQuoted(0, "value".to_string())
);
}
#[test]
fn test_equal() {
let mut tokenizer = Tokenizer::new("==");
assert_eq!(tokenizer.next_token().unwrap(), Token::Equal(0));
}
#[test]
fn test_not_equal() {
let mut tokenizer = Tokenizer::new("!=");
assert_eq!(tokenizer.next_token().unwrap(), Token::NotEqual(0));
}
#[test]
fn test_little() {
let mut tokenizer = Tokenizer::new("<");
assert_eq!(tokenizer.next_token().unwrap(), Token::Little(0));
}
#[test]
fn test_little_or_equal() {
let mut tokenizer = Tokenizer::new("<=");
assert_eq!(tokenizer.next_token().unwrap(), Token::LittleOrEqual(0));
}
#[test]
fn test_greater() {
let mut tokenizer = Tokenizer::new(">");
assert_eq!(tokenizer.next_token().unwrap(), Token::Greater(0));
}
#[test]
fn test_greater_or_equal() {
let mut tokenizer = Tokenizer::new(">=");
assert_eq!(tokenizer.next_token().unwrap(), Token::GreaterOrEqual(0));
}
#[test]
fn test_and() {
let mut tokenizer = Tokenizer::new("&&");
assert_eq!(tokenizer.next_token().unwrap(), Token::And(0));
}
#[test]
fn test_or() {
let mut tokenizer = Tokenizer::new("||");
assert_eq!(tokenizer.next_token().unwrap(), Token::Or(0));
}
#[test]
fn test_whitespace() {
let mut tokenizer = Tokenizer::new(" ");
assert_eq!(tokenizer.next_token().unwrap(), Token::Whitespace(0, 3));
}
#[test]
fn test_other() {
let mut tokenizer = Tokenizer::new("key");
assert_eq!(
tokenizer.next_token().unwrap(),
Token::Key(0, "key".to_string())
);
}
#[test]
fn test_is_not_token() {
let mut tokenizer = Tokenizer::new("$key");
assert_eq!(
tokenizer.next_token().unwrap(),
Token::Key(0, "$key".to_string())
);
let mut tokenizer = Tokenizer::new("$.");
assert_eq!(tokenizer.next_token().unwrap(), Token::Absolute(0));
assert_eq!(tokenizer.next_token().unwrap(), Token::Dot(1));
let mut tokenizer = Tokenizer::new("$*");
assert_eq!(tokenizer.next_token().unwrap(), Token::Absolute(0));
assert_eq!(tokenizer.next_token().unwrap(), Token::Asterisk(1));
let mut tokenizer = Tokenizer::new("$[");
assert_eq!(tokenizer.next_token().unwrap(), Token::Absolute(0));
assert_eq!(tokenizer.next_token().unwrap(), Token::OpenArray(1));
let mut tokenizer = Tokenizer::new("$]");
assert_eq!(tokenizer.next_token().unwrap(), Token::Absolute(0));
assert_eq!(tokenizer.next_token().unwrap(), Token::CloseArray(1));
let mut tokenizer = Tokenizer::new("$(");
assert_eq!(tokenizer.next_token().unwrap(), Token::Absolute(0));
assert_eq!(tokenizer.next_token().unwrap(), Token::OpenParenthesis(1));
let mut tokenizer = Tokenizer::new("$)");
assert_eq!(tokenizer.next_token().unwrap(), Token::Absolute(0));
assert_eq!(tokenizer.next_token().unwrap(), Token::CloseParenthesis(1));
let mut tokenizer = Tokenizer::new("$@");
assert_eq!(tokenizer.next_token().unwrap(), Token::Absolute(0));
assert_eq!(tokenizer.next_token().unwrap(), Token::At(1));
let mut tokenizer = Tokenizer::new("$?");
assert_eq!(tokenizer.next_token().unwrap(), Token::Absolute(0));
assert_eq!(tokenizer.next_token().unwrap(), Token::Question(1));
let mut tokenizer = Tokenizer::new("$,");
assert_eq!(tokenizer.next_token().unwrap(), Token::Absolute(0));
assert_eq!(tokenizer.next_token().unwrap(), Token::Comma(1));
let mut tokenizer = Tokenizer::new("$:");
assert_eq!(tokenizer.next_token().unwrap(), Token::Absolute(0));
assert_eq!(tokenizer.next_token().unwrap(), Token::Split(1));
let mut tokenizer = Tokenizer::new("$<");
assert_eq!(tokenizer.next_token().unwrap(), Token::Absolute(0));
assert_eq!(tokenizer.next_token().unwrap(), Token::Little(1));
let mut tokenizer = Tokenizer::new("$>");
assert_eq!(tokenizer.next_token().unwrap(), Token::Absolute(0));
assert_eq!(tokenizer.next_token().unwrap(), Token::Greater(1));
let mut tokenizer = Tokenizer::new("$=");
assert_eq!(tokenizer.next_token().unwrap(), Token::Absolute(0));
assert_eq!(tokenizer.next_token(), Err(TokenError::Eof));
let mut tokenizer = Tokenizer::new("$&");
assert_eq!(tokenizer.next_token().unwrap(), Token::Absolute(0));
assert_eq!(tokenizer.next_token(), Err(TokenError::Eof));
let mut tokenizer = Tokenizer::new("$|");
assert_eq!(tokenizer.next_token().unwrap(), Token::Absolute(0));
assert_eq!(tokenizer.next_token(), Err(TokenError::Eof));
let mut tokenizer = Tokenizer::new("$!");
assert_eq!(tokenizer.next_token().unwrap(), Token::Absolute(0));
assert_eq!(tokenizer.next_token(), Err(TokenError::Eof));
let mut tokenizer = Tokenizer::new("$|");
assert_eq!(tokenizer.next_token().unwrap(), Token::Absolute(0));
assert_eq!(tokenizer.next_token(), Err(TokenError::Eof));
}
}
#[cfg(test)]
mod token_reader_tests {
use super::*;
#[test]
fn test_token_reader_new() {
let input = "some input string";
let reader = TokenReader::new(input);
assert_eq!(reader.origin_input, input);
assert!(!reader.tokens.is_empty() || reader.err_pos > 0);
}
#[test]
fn test_peek_token() {
let input = "some input string";
let reader = TokenReader::new(input);
assert_eq!(reader.peek_token(), Ok(&Token::Key(0, "some".to_string())));
}
#[test]
fn test_next_token() {
let input = "some input string";
let mut reader = TokenReader::new(input);
assert_eq!(reader.next_token(), Ok(Token::Key(0, "some".to_string())));
}
#[test]
fn test_err_msg_with_pos() {
let input = "some input string";
let reader = TokenReader::new(input);
let pos = 5;
let err_msg = reader.err_msg_with_pos(pos);
assert!(err_msg.contains("^".repeat(pos).as_str()));
}
#[test]
fn test_err_msg() {
let input = "some input string";
let mut reader = TokenReader::new(input);
while reader.next_token().is_ok() {}
let err_msg = reader.err_msg();
assert!(err_msg.contains("^"));
}
}
| rust | MIT | 3a930bc03c3944c8374717289266ceb9cc1491c6 | 2026-01-04T20:24:15.035874Z | false |
freestrings/jsonpath | https://github.com/freestrings/jsonpath/blob/3a930bc03c3944c8374717289266ceb9cc1491c6/tests/op.rs | tests/op.rs | #[macro_use]
extern crate serde_json;
use common::{read_json, select_and_then_compare, setup};
mod common;
#[test]
fn op_object_eq() {
setup();
select_and_then_compare(
"$.school[?(@.friends == @.friends)]",
read_json("./benchmark/data_obj.json"),
json!([{
"friends": [
{"id": 0, "name": "Millicent Norman"},
{"id": 1, "name": "Vincent Cannon" },
{"id": 2, "name": "Gray Berry"}
]
}]),
);
}
#[test]
fn op_object_ge() {
setup();
select_and_then_compare(
"$.friends[?(@.id >= 2)]",
read_json("./benchmark/data_obj.json"),
json!([
{ "id" : 2, "name" : "Gray Berry" }
]),
);
}
#[test]
fn op_object_or_default() {
setup();
select_and_then_compare(
"$.friends[?(@.id >= 2 || @.id == 1)]",
read_json("./benchmark/data_obj.json"),
json!([
{ "id" : 2, "name" : "Gray Berry" },
{ "id" : 1, "name" : "Vincent Cannon" }
]),
);
}
#[test]
fn op_object_and_or() {
setup();
select_and_then_compare(
"$.friends[?( (@.id >= 2 || @.id == 1) && @.id == 0)]",
read_json("./benchmark/data_obj.json"),
json!([]),
);
}
#[test]
fn op_result_type() {
setup();
select_and_then_compare(
"$..friends[?(@.id == $.index)].id",
read_json("./benchmark/data_obj.json"),
json!([0, 0]),
);
}
#[test]
fn op_absolute_path_result_type() {
setup();
select_and_then_compare(
"$..book[?($.store.bicycle.price < @.price)].price",
read_json("./benchmark/example.json"),
json!([22.99]),
);
}
#[test]
fn op_complicated() {
setup();
select_and_then_compare(
"$..book[?( (@.price == 12.99 || @.category == 'reference') && @.price > 10)].price",
read_json("./benchmark/example.json"),
json!([12.99]),
);
}
#[test]
fn op_gt() {
setup();
select_and_then_compare(
"$..[?(@.age > 40)]",
json!([
{ "name": "이름1", "age": 40, "phone": "+33 12341234" },
{ "name": "이름2", "age": 42, "phone": "++44 12341234" }
]),
json!([
{ "name" : "이름2", "age" : 42, "phone" : "++44 12341234" }
]),
);
}
#[test]
fn op_ge() {
setup();
select_and_then_compare(
"$..[?(@.age >= 30)]",
json!({
"school": {
"friends": [
{"name": "친구1", "age": 20},
{"name": "친구2", "age": 20}
]
},
"friends": [
{"name": "친구3", "age": 30},
{"name": "친구4"}
]}),
json!([
{ "name" : "친구3", "age" : 30 }
]),
);
}
#[test]
fn op_eq_for_number() {
setup();
select_and_then_compare(
"$.[?(@.a == 1)]",
json!({ "a": 1 }),
json!([{ "a": 1 }]),
);
}
#[test]
fn op_ne_for_number() {
setup();
select_and_then_compare(
"$.[?(@.a != 2)]",
json!({ "a": 1 }),
json!([{ "a": 1 }]),
);
}
#[test]
fn op_lt_for_number() {
setup();
select_and_then_compare(
"$.[?(@.a < 2)]",
json!({ "a": 1 }),
json!([{ "a": 1 }]),
);
}
#[test]
fn op_le_for_number() {
setup();
select_and_then_compare(
"$.[?(@.a <= 1)]",
json!({ "a": 1 }),
json!([{ "a": 1 }]),
);
}
#[test]
fn op_gt_for_number() {
setup();
select_and_then_compare(
"$.[?(@.a > 0)]",
json!({ "a": 1 }),
json!([{ "a": 1 }]),
);
}
#[test]
fn op_ge_for_number() {
setup();
select_and_then_compare(
"$.[?(@.a >= 0)]",
json!({ "a": 1 }),
json!([{ "a": 1 }]),
);
}
#[test]
fn op_eq_for_string_value() {
setup();
select_and_then_compare(
r#"$.[?(@.a == "b")]"#,
json!({ "a": "b" }),
json!([{ "a": "b" }]),
);
}
#[test]
fn op_ne_for_string_value() {
setup();
select_and_then_compare(
r#"$.[?(@.a != "c")]"#,
json!({ "a": "b" }),
json!([{ "a": "b" }]),
);
}
#[test]
fn op_lt_for_string_value() {
setup();
select_and_then_compare(
r#"$.[?(@.a < "b")]"#,
json!({ "a": "b" }),
json!([]),
);
}
#[test]
fn op_le_for_string_value() {
setup();
select_and_then_compare(
r#"$.[?(@.a <= "b")]"#,
json!({ "a": "b" }),
json!([{ "a": "b" }]),
);
}
#[test]
fn op_gt_for_string_value() {
setup();
select_and_then_compare(
r#"$.[?(@.a > "b")]"#,
json!({ "a": "b" }),
json!([]),
);
}
#[test]
fn op_ge_for_string_value() {
setup();
select_and_then_compare(
r#"$.[?(@.a >= "b")]"#,
json!({ "a": "b" }),
json!([{ "a": "b" }]),
);
}
#[test]
fn op_eq_for_object_value() {
setup();
select_and_then_compare(
r#"$.[?(@.a == @.c)]"#,
json!({"a": { "1": 1 }, "b": { "2": 2 }, "c": { "1": 1 }}),
json!([{"a": { "1": 1 }, "b": { "2": 2 }, "c": { "1": 1 }}]),
);
}
///
/// It seems to Jayway's bug.
///
/// 빈 배열이 아니라 current context가 리턴되야 하는데 빈배열이 리턴됨.
/// 참고: `op_ne_for_object_value2` 결과와 다름
///
#[test]
fn op_ne_for_object_value() {
setup();
select_and_then_compare(
r#"$.[?(@.a != @.c)]"#,
json!({
"a": {
"1": 1
},
"c": {
"1": 1
}
}),
json!([{
"a": {
"1": 1
},
"c": {
"1": 1
}
}]),
);
}
#[test]
fn op_ne_for_object_value2() {
setup();
select_and_then_compare(
"$.[?(@.store1 != @.store2)]",
json!({
"store1": {
"a" : 1
},
"store2": {
"b" : 1
}
}),
json!([{
"store1" : {
"a" : 1
},
"store2" : {
"b" : 1
}
}
]),
);
}
#[test]
fn cmp_json_rel() {
setup();
select_and_then_compare(
"$.[?(@.a.a == @.b.a)]",
json!({
"a": {
"a": [true, "1"]
},
"b": {
"a": [true, "1"]
}
}),
json!([
{
"a" : {
"a" : [
true,
"1"
]
},
"b" : {
"a" : [
true,
"1"
]
}
}
]),
)
}
#[test]
fn op_lt_for_object_value() {
setup();
select_and_then_compare(
r#"$.[?(@.a < @.c)]"#,
json!({"a": { "1": 1 }, "b": { "2": 2 }, "c": { "1": 1 }}),
json!([]),
);
}
#[test]
fn op_le_for_object_value() {
setup();
select_and_then_compare(
r#"$.[?(@.a <= @.c)]"#,
json!({"a": { "1": 1 }, "b": { "2": 2 }, "c": { "1": 1 }}),
json!([]),
);
}
#[test]
fn op_gt_for_object_value() {
setup();
select_and_then_compare(
r#"$.[?(@.a > @.c)]"#,
json!({"a": { "1": 1 }, "b": { "2": 2 }, "c": { "1": 1 }}),
json!([]),
);
}
#[test]
fn op_ge_for_object_value() {
setup();
select_and_then_compare(
r#"$.[?(@.a >= @.c)]"#,
json!({"a": { "1": 1 }, "b": { "2": 2 }, "c": { "1": 1 }}),
json!([]),
);
}
#[test]
fn op_eq_for_complex_value() {
setup();
select_and_then_compare(
r#"$.[?(1 == @.a)]"#,
json!({ "a": { "b": 1 } }),
json!([]),
);
}
#[test]
fn op_ne_for_complex_value() {
setup();
select_and_then_compare(
r#"$.[?("1" != @.a)]"#,
json!({
"a": {
"b": 1
}
}),
json!([{
"a" : {
"b" : 1
}
}
]),
);
}
#[test]
fn op_le_for_complex_value() {
setup();
select_and_then_compare(
r#"$.[?(@.a <= 1)]"#,
json!({ "a": { "b": 1 } }),
json!([]),
);
}
#[test]
fn op_gt_for_complex_value() {
setup();
select_and_then_compare(
r#"$.[?(@.a > "1")]"#,
json!({ "a": { "b": 1 } }),
json!([]),
);
}
#[test]
fn op_compare_different_types() {
setup();
for path in [
r#"$[?("1" == 1)]"#,
r#"$[?(1 == "1")]"#,
r#"$[?(true == 1)]"#,
r#"$[?(@ == 1)]"#,
]
.iter()
{
select_and_then_compare(path, json!({}), json!([]));
}
}
#[test]
fn op_for_same_type() {
setup();
select_and_then_compare(
r#"$..[?(@.a == 1)]"#,
json!({
"a": 1,
"b" : {"a": 1},
"c" : {"a": 1}
}),
json!([
{"a": 1},
{"a": 1}
]),
);
}
| rust | MIT | 3a930bc03c3944c8374717289266ceb9cc1491c6 | 2026-01-04T20:24:15.035874Z | false |
freestrings/jsonpath | https://github.com/freestrings/jsonpath/blob/3a930bc03c3944c8374717289266ceb9cc1491c6/tests/lib.rs | tests/lib.rs | extern crate jsonpath_lib as jsonpath;
extern crate serde;
#[macro_use]
extern crate serde_json;
use serde::Deserialize;
use serde_json::Value;
use common::{compare_result, read_contents, read_json, setup};
use jsonpath::JsonPathError;
mod common;
#[test]
fn compile() {
let compile_object = |path| {
let template = jsonpath::PathCompiled::compile(path).unwrap();
let json_obj = read_json("./benchmark/data_obj.json");
let json = template.select(&json_obj).unwrap();
let ret = json!([
{"id": 2,"name": "Gray Berry"},
{"id": 2,"name": "Gray Berry"}
]);
compare_result(json, ret);
};
let compile_array = |path| {
let template = jsonpath::PathCompiled::compile(path).unwrap();
let json_obj = read_json("./benchmark/data_array.json");
let json = template.select(&json_obj).unwrap();
let ret = json!([
{"id": 2,"name": "Gray Berry"},
{"id": 2,"name": "Rosetta Erickson"}
]);
compare_result(json, ret);
};
fn compile_error() {
#[allow(deprecated)]
let template = jsonpath::Compiled::compile("$[");
assert!(template.is_err());
}
setup();
compile_object("$..friends[2]");
compile_array("$..friends[2]");
compile_error();
}
#[test]
fn selector() {
setup();
fn select<'a, F>(
selector: &mut F,
path: &'a str,
target: Value,
) where
F: FnMut(&'a str) -> Result<Vec<&'a Value>, JsonPathError>,
{
let json = selector(path).unwrap();
compare_result(json, target);
}
let json_obj = read_json("./benchmark/data_obj.json");
let mut selector = jsonpath::selector(&json_obj);
select(
&mut selector,
"$..friends[2]",
json!([
{"id": 2,"name": "Gray Berry"},
{"id": 2,"name": "Gray Berry"}
]),
);
select(
&mut selector,
"$..friends[0]",
json!([
{"id": 0},
{"id": 0,"name": "Millicent Norman"}
]),
);
}
#[test]
fn selector_as() {
#[derive(Deserialize, PartialEq, Debug)]
struct Friend {
id: u8,
name: Option<String>,
}
fn select<'a, F>(
selector: &mut F,
path: &'a str,
target: Vec<Friend>,
) where
F: FnMut(&'a str) -> Result<Vec<Friend>, JsonPathError>,
{
let json = selector(path).unwrap();
assert_eq!(json, target);
}
let json_obj = read_json("./benchmark/data_obj.json");
let mut selector = jsonpath::selector_as::<Friend>(&json_obj);
select(
&mut selector,
"$..friends[2]",
vec![
Friend {
id: 2,
name: Some("Gray Berry".to_string()),
},
Friend {
id: 2,
name: Some("Gray Berry".to_string()),
},
],
);
select(
&mut selector,
"$..friends[0]",
vec![
Friend { id: 0, name: None },
Friend {
id: 0,
name: Some("Millicent Norman".to_string()),
},
],
);
}
#[test]
fn select() {
let json_obj = read_json("./benchmark/example.json");
let json = jsonpath::select(&json_obj, "$..book[2]").unwrap();
let ret = json!([{
"category" : "fiction",
"author" : "Herman Melville",
"title" : "Moby Dick",
"isbn" : "0-553-21311-3",
"price" : 8.99
}]);
compare_result(json, ret);
}
#[test]
fn select_str() {
let json_str = read_contents("./benchmark/example.json");
let result_str = jsonpath::select_as_str(&json_str, "$..book[2]").unwrap();
let ret = json!([{
"category" : "fiction",
"author" : "Herman Melville",
"title" : "Moby Dick",
"isbn" : "0-553-21311-3",
"price" : 8.99
}]);
let json: Value = serde_json::from_str(&result_str).unwrap();
assert_eq!(json, ret);
}
#[test]
fn test_to_struct() {
#[derive(Deserialize, PartialEq, Debug)]
struct Person {
name: String,
age: u8,
phones: Vec<String>,
}
let ret: Vec<Person> = jsonpath::select_as(
r#"
{
"person":
{
"name": "Doe John",
"age": 44,
"phones": [
"+44 1234567",
"+44 2345678"
]
}
}
"#,
"$.person",
)
.unwrap();
let person = Person {
name: "Doe John".to_string(),
age: 44,
phones: vec!["+44 1234567".to_string(), "+44 2345678".to_string()],
};
assert_eq!(vec![person], ret);
}
| rust | MIT | 3a930bc03c3944c8374717289266ceb9cc1491c6 | 2026-01-04T20:24:15.035874Z | false |
freestrings/jsonpath | https://github.com/freestrings/jsonpath/blob/3a930bc03c3944c8374717289266ceb9cc1491c6/tests/precompile.rs | tests/precompile.rs | #[macro_use]
extern crate serde_json;
extern crate jsonpath_lib;
use common::setup;
use jsonpath_lib::PathCompiled;
use serde_json::Value;
mod common;
#[test]
fn precompile_test() {
setup();
let json = json!({
"foo": {"bar": "baz"}
});
// compile once
let compiled = PathCompiled::compile("$.foo.bar");
assert!(compiled.is_ok());
let compiled = compiled.unwrap();
// re-use
//let result = compiled(&json).unwrap();
assert_eq!(
compiled.select(&json).unwrap().clone(),
vec![&Value::String("baz".into())]
);
assert_eq!(
compiled.select(&json).unwrap().clone(),
vec![&Value::String("baz".into())]
);
}
#[test]
fn precompile_failure() {
setup();
let compiled = PathCompiled::compile("");
assert!(compiled.is_err());
}
| rust | MIT | 3a930bc03c3944c8374717289266ceb9cc1491c6 | 2026-01-04T20:24:15.035874Z | false |
freestrings/jsonpath | https://github.com/freestrings/jsonpath/blob/3a930bc03c3944c8374717289266ceb9cc1491c6/tests/jsonpath_examples.rs | tests/jsonpath_examples.rs | #[macro_use]
extern crate serde_json;
use common::{read_json, select_and_then_compare, setup};
mod common;
#[test]
fn example_authros_of_all_books() {
setup();
select_and_then_compare(
r#"$.store.book[*].author"#,
read_json("./benchmark/example.json"),
json!([
"Nigel Rees",
"Evelyn Waugh",
"Herman Melville",
"J. R. R. Tolkien"
]),
);
}
#[test]
fn all_authors() {
setup();
select_and_then_compare(
r#"$..author"#,
read_json("./benchmark/example.json"),
json!([
"Nigel Rees",
"Evelyn Waugh",
"Herman Melville",
"J. R. R. Tolkien"
]),
);
}
#[test]
fn all_things_both_books_and_bicycles() {
setup();
select_and_then_compare(
r#"$.store.*"#,
read_json("./benchmark/example.json"),
json!([
[
{"category" : "reference", "author" : "Nigel Rees","title" : "Sayings of the Century", "price" : 8.95},
{"category" : "fiction", "author" : "Evelyn Waugh","title" : "Sword of Honour","price" : 12.99},
{"category" : "fiction", "author" : "Herman Melville","title" : "Moby Dick","isbn" : "0-553-21311-3","price" : 8.99},
{"category" : "fiction", "author" : "J. R. R. Tolkien","title" : "The Lord of the Rings","isbn" : "0-395-19395-8","price" : 22.99}
],
{"color" : "red","price" : 19.95},
]),
);
}
#[test]
fn the_price_of_everything() {
setup();
select_and_then_compare(
r#"$.store..price"#,
read_json("./benchmark/example.json"),
json!([8.95, 12.99, 8.99, 22.99, 19.95]),
);
}
#[test]
fn the_third_book() {
setup();
select_and_then_compare(
r#"$..book[2]"#,
read_json("./benchmark/example.json"),
json!([
{
"category" : "fiction",
"author" : "Herman Melville",
"title" : "Moby Dick",
"isbn" : "0-553-21311-3",
"price" : 8.99
}
]),
);
}
#[test]
fn the_second_to_last_book() {
setup();
select_and_then_compare(
r#"$..book[-2]"#,
read_json("./benchmark/example.json"),
json!([
{
"category" : "fiction",
"author" : "Herman Melville",
"title" : "Moby Dick",
"isbn" : "0-553-21311-3",
"price" : 8.99
}
]),
);
}
#[test]
fn the_first_two_books() {
setup();
select_and_then_compare(
r#"$..book[0, 1]"#,
read_json("./benchmark/example.json"),
json!([
{
"category" : "reference",
"author" : "Nigel Rees",
"title" : "Sayings of the Century",
"price" : 8.95
},
{
"category" : "fiction",
"author" : "Evelyn Waugh",
"title" : "Sword of Honour",
"price" : 12.99
}
]),
);
}
#[test]
fn all_books_from_index_0_inclusive_until_index_2_exclusive() {
setup();
select_and_then_compare(
r#"$..book[:2]"#,
read_json("./benchmark/example.json"),
json!([
{
"category" : "reference",
"author" : "Nigel Rees",
"title" : "Sayings of the Century",
"price" : 8.95
},
{
"category" : "fiction",
"author" : "Evelyn Waugh",
"title" : "Sword of Honour",
"price" : 12.99
}
]),
);
}
#[test]
fn all_books_from_index_1_inclusive_until_index_2_exclusive() {
setup();
select_and_then_compare(
r#"$..book[2:]"#,
read_json("./benchmark/example.json"),
json!([
{
"category" : "fiction",
"author" : "Herman Melville",
"title" : "Moby Dick",
"isbn" : "0-553-21311-3",
"price" : 8.99
},
{
"category" : "fiction",
"author" : "J. R. R. Tolkien",
"title" : "The Lord of the Rings",
"isbn" : "0-395-19395-8",
"price" : 22.99
}
]),
);
}
#[test]
fn all_books_with_an_isbn_number() {
setup();
select_and_then_compare(
r#"$..book[?(@.isbn)]"#,
read_json("./benchmark/example.json"),
json!([
{
"category" : "fiction",
"author" : "Herman Melville",
"title" : "Moby Dick",
"isbn" : "0-553-21311-3",
"price" : 8.99
},
{
"category" : "fiction",
"author" : "J. R. R. Tolkien",
"title" : "The Lord of the Rings",
"isbn" : "0-395-19395-8",
"price" : 22.99
}
]),
);
}
#[test]
fn all_books_in_store_cheaper_than_10() {
setup();
select_and_then_compare(
r#"$.store.book[?(@.price < 10)]"#,
read_json("./benchmark/example.json"),
json!([
{
"category" : "reference",
"author" : "Nigel Rees",
"title" : "Sayings of the Century",
"price" : 8.95
},
{
"category" : "fiction",
"author" : "Herman Melville",
"title" : "Moby Dick",
"isbn" : "0-553-21311-3",
"price" : 8.99
}
]),
);
}
#[test]
fn give_me_every_thing() {
setup();
select_and_then_compare(
r#"$..*"#,
read_json("./benchmark/example.json"),
read_json("./benchmark/giveme_every_thing_result.json"),
);
}
| rust | MIT | 3a930bc03c3944c8374717289266ceb9cc1491c6 | 2026-01-04T20:24:15.035874Z | false |
freestrings/jsonpath | https://github.com/freestrings/jsonpath/blob/3a930bc03c3944c8374717289266ceb9cc1491c6/tests/filter.rs | tests/filter.rs | #[macro_use]
extern crate serde_json;
use common::{read_json, select_and_then_compare, setup};
mod common;
#[test]
fn quote() {
setup();
select_and_then_compare(
r#"$['single\'quote']"#,
json!({"single'quote":"value"}),
json!(["value"]),
);
select_and_then_compare(
r#"$["double\"quote"]"#,
json!({"double\"quote":"value"}),
json!(["value"]),
);
}
#[test]
fn filter_next_all() {
setup();
for path in &[r#"$.*"#, r#"$[*]"#] {
select_and_then_compare(
path,
json!(["string", 42, { "key": "value" }, [0, 1]]),
json!(["string", 42, { "key": "value" }, [0, 1]]),
);
}
}
#[test]
fn filter_all() {
setup();
for path in &[r#"$..*"#, r#"$..[*]"#] {
select_and_then_compare(
path,
json!(["string", 42, { "key": "value" }, [0, 1]]),
json!([ "string", 42, { "key" : "value" }, [ 0, 1 ], "value", 0, 1 ]),
);
}
}
#[test]
fn filter_array_next_all() {
setup();
for path in &[r#"$.*.*"#, r#"$[*].*"#, r#"$.*[*]"#, r#"$[*][*]"#] {
select_and_then_compare(
path,
json!(["string", 42, { "key": "value" }, [0, 1]]),
json!(["value", 0, 1]),
);
}
}
#[test]
fn filter_all_complex() {
setup();
for path in &[r#"$..friends.*"#, r#"$[*].friends.*"#] {
select_and_then_compare(
path,
read_json("./benchmark/data_array.json"),
json!([
{ "id" : 0, "name" : "Millicent Norman" },
{ "id" : 1, "name" : "Vincent Cannon" },
{ "id" : 2, "name" : "Gray Berry" },
{ "id" : 0, "name" : "Tillman Mckay" },
{ "id" : 1, "name" : "Rivera Berg" },
{ "id" : 2, "name" : "Rosetta Erickson" }
]),
);
}
}
#[test]
fn filter_parent_with_matched_child() {
setup();
select_and_then_compare(
"$.a[?(@.b.c == 1)]",
json!({
"a": {
"b": {
"c": 1
}
}
}),
json!([
{
"b" : {
"c" : 1
}
}
]),
);
}
#[test]
fn filter_parent_exist_child() {
setup();
select_and_then_compare(
"$.a[?(@.b.c)]",
json!({
"a": {
"b": {
"c": 1
}
}
}),
json!([
{
"b" : {
"c" : 1
}
}
]),
);
}
#[test]
fn filter_parent_paths() {
setup();
select_and_then_compare(
"$[?(@.key.subKey == 'subKey2')]",
json!([
{"key": {"seq": 1, "subKey": "subKey1"}},
{"key": {"seq": 2, "subKey": "subKey2"}},
{"key": 42},
{"some": "value"}
]),
json!([{"key": {"seq": 2, "subKey": "subKey2"}}]),
);
}
#[test]
fn bugs33_exist_in_all() {
setup();
select_and_then_compare(
"$..[?(@.first.second)]",
json!({
"foo": {
"first": { "second": "value" }
},
"foo2": {
"first": {}
},
"foo3": {
}
}),
json!([
{
"first": {
"second": "value"
}
}
]),
);
}
#[test]
fn bugs33_exist_left_in_all_with_and_condition() {
setup();
select_and_then_compare(
"$..[?(@.first && @.first.second)]",
json!({
"foo": {
"first": { "second": "value" }
},
"foo2": {
"first": {}
},
"foo3": {
}
}),
json!([
{
"first": {
"second": "value"
}
}
]),
);
}
#[test]
fn bugs33_exist_right_in_all_with_and_condition() {
setup();
select_and_then_compare(
"$..[?(@.b.c.d && @.b)]",
json!({
"a": {
"b": {
"c": {
"d" : {
"e" : 1
}
}
}
}
}),
json!([
{
"b" : {
"c" : {
"d" : {
"e" : 1
}
}
}
}
]),
);
}
#[test]
fn bugs38_array_notation_in_filter() {
setup();
select_and_then_compare(
"$[?(@['key']==42)]",
json!([
{"key": 0},
{"key": 42},
{"key": -1},
{"key": 41},
{"key": 43},
{"key": 42.0001},
{"key": 41.9999},
{"key": 100},
{"some": "value"}
]),
json!([{"key": 42}]),
);
select_and_then_compare(
"$[?(@['key'].subKey == 'subKey2')]",
json!([
{"key": {"seq": 1, "subKey": "subKey1"}},
{"key": {"seq": 2, "subKey": "subKey2"}},
{"key": 42},
{"some": "value"}
]),
json!([{"key": {"seq": 2, "subKey": "subKey2"}}]),
);
select_and_then_compare(
"$[?(@['key']['subKey'] == 'subKey2')]",
json!([
{"key": {"seq": 1, "subKey": "subKey1"}},
{"key": {"seq": 2, "subKey": "subKey2"}},
{"key": 42},
{"some": "value"}
]),
json!([{"key": {"seq": 2, "subKey": "subKey2"}}]),
);
select_and_then_compare(
"$..key[?(@['subKey'] == 'subKey2')]",
json!([
{"key": {"seq": 1, "subKey": "subKey1"}},
{"key": {"seq": 2, "subKey": "subKey2"}},
{"key": 42},
{"some": "value"}
]),
json!([{"seq": 2, "subKey": "subKey2"}]),
);
}
#[test]
fn unsupported_in_filter() {
setup();
let json = json!([{
"a": {"x": {"i": 10}},
"b": {"x": {"i": 20, "j": 5}}
}]);
select_and_then_compare(
"$..x[?(@.i>10)]",
json.clone(),
json!([{"i": 20,"j": 5}]),
);
// Should not panic ('empty term left')
select_and_then_compare("$..x[?($.i>10)]", json.clone(), json!([]));
}
| rust | MIT | 3a930bc03c3944c8374717289266ceb9cc1491c6 | 2026-01-04T20:24:15.035874Z | false |
freestrings/jsonpath | https://github.com/freestrings/jsonpath/blob/3a930bc03c3944c8374717289266ceb9cc1491c6/tests/readme.rs | tests/readme.rs | extern crate jsonpath_lib as jsonpath;
extern crate serde;
#[macro_use]
extern crate serde_json;
use serde::Deserialize;
use serde_json::Value;
use jsonpath::{JsonSelector, JsonSelectorMut, PathParser};
mod common;
#[test]
fn readme() {
let json_obj = json!({
"store": {
"book": [
{
"category": "reference",
"author": "Nigel Rees",
"title": "Sayings of the Century",
"price": 8.95
},
{
"category": "fiction",
"author": "Evelyn Waugh",
"title": "Sword of Honour",
"price": 12.99
},
{
"category": "fiction",
"author": "Herman Melville",
"title": "Moby Dick",
"isbn": "0-553-21311-3",
"price": 8.99
},
{
"category": "fiction",
"author": "J. R. R. Tolkien",
"title": "The Lord of the Rings",
"isbn": "0-395-19395-8",
"price": 22.99
}
],
"bicycle": {
"color": "red",
"price": 19.95
}
},
"expensive": 10
});
let mut selector = jsonpath::selector(&json_obj);
assert_eq!(
selector("$.store.book[*].author").unwrap(),
vec![
"Nigel Rees",
"Evelyn Waugh",
"Herman Melville",
"J. R. R. Tolkien"
]
);
assert_eq!(
selector("$..author").unwrap(),
vec![
"Nigel Rees",
"Evelyn Waugh",
"Herman Melville",
"J. R. R. Tolkien"
]
);
assert_eq!(
selector("$.store.*").unwrap(),
vec![
&json!([
{ "category": "reference", "author": "Nigel Rees", "title": "Sayings of the Century", "price": 8.95 },
{ "category": "fiction", "author": "Evelyn Waugh", "title": "Sword of Honour", "price": 12.99 },
{ "category": "fiction", "author": "Herman Melville", "title": "Moby Dick", "isbn": "0-553-21311-3", "price": 8.99 },
{ "category": "fiction", "author": "J. R. R. Tolkien", "title": "The Lord of the Rings", "isbn": "0-395-19395-8", "price": 22.99 }
]),
&json!({ "color": "red", "price": 19.95 })
]
);
assert_eq!(
selector("$.store..price").unwrap(),
vec![8.95, 12.99, 8.99, 22.99, 19.95]
);
assert_eq!(
selector("$..book[2]").unwrap(),
vec![&json!({
"category" : "fiction",
"author" : "Herman Melville",
"title" : "Moby Dick",
"isbn" : "0-553-21311-3",
"price" : 8.99
})]
);
assert_eq!(
selector("$..book[-2]").unwrap(),
vec![&json!({
"category" : "fiction",
"author" : "Herman Melville",
"title" : "Moby Dick",
"isbn" : "0-553-21311-3",
"price" : 8.99
})]
);
assert_eq!(
selector("$..book[0,1]").unwrap(),
vec![
&json!({"category" : "reference","author" : "Nigel Rees","title" : "Sayings of the Century","price" : 8.95}),
&json!({"category" : "fiction","author" : "Evelyn Waugh","title" : "Sword of Honour","price" : 12.99})
]
);
assert_eq!(
selector("$..book[:2]").unwrap(),
vec![
&json!({"category" : "reference","author" : "Nigel Rees","title" : "Sayings of the Century","price" : 8.95}),
&json!({"category" : "fiction","author" : "Evelyn Waugh","title" : "Sword of Honour","price" : 12.99})
]
);
assert_eq!(
selector("$..book[:2]").unwrap(),
vec![
&json!({"category" : "reference","author" : "Nigel Rees","title" : "Sayings of the Century","price" : 8.95}),
&json!({"category" : "fiction","author" : "Evelyn Waugh","title" : "Sword of Honour","price" : 12.99})
]
);
assert_eq!(
selector("$..book[?(@.isbn)]").unwrap(),
vec![
&json!({"category" : "fiction","author" : "Herman Melville","title" : "Moby Dick","isbn" : "0-553-21311-3","price" : 8.99}),
&json!({"category" : "fiction","author" : "J. R. R. Tolkien","title" : "The Lord of the Rings","isbn" : "0-395-19395-8","price" : 22.99})
]
);
assert_eq!(
selector("$.store.book[?(@.price < 10)]").unwrap(),
vec![
&json!({"category" : "reference","author" : "Nigel Rees","title" : "Sayings of the Century","price" : 8.95}),
&json!({"category" : "fiction","author" : "Herman Melville","title" : "Moby Dick","isbn" : "0-553-21311-3","price" : 8.99})
]
);
}
#[test]
fn readme_selector() {
#[derive(Deserialize, PartialEq, Debug)]
struct Friend {
name: String,
age: Option<u8>,
}
let json_obj = json!({
"school": {
"friends": [
{"name": "친구1", "age": 20},
{"name": "친구2", "age": 20}
]
},
"friends": [
{"name": "친구3", "age": 30},
{"name": "친구4"}
]});
let parser = PathParser::compile("$..[?(@.age >= 30)]").unwrap();
let mut selector = JsonSelector::new(parser);
let result = selector.value(&json_obj).select().unwrap();
assert_eq!(vec![&json!({"name": "친구3", "age": 30})], result);
let result = selector.select_as_str().unwrap();
assert_eq!(r#"[{"name":"친구3","age":30}]"#, result);
let result = selector.select_as::<Friend>().unwrap();
assert_eq!(
vec![Friend {
name: "친구3".to_string(),
age: Some(30),
}],
result
);
}
#[test]
fn readme_selector_mut() {
let json_obj = json!({
"school": {
"friends": [
{"name": "친구1", "age": 20},
{"name": "친구2", "age": 20}
]
},
"friends": [
{"name": "친구3", "age": 30},
{"name": "친구4"}
]});
let parser = PathParser::compile("$..[?(@.age == 20)].age").unwrap();
let mut selector_mut = JsonSelectorMut::new(parser);
let result = selector_mut
.value(json_obj)
.replace_with(&mut |v| {
let age = if let Value::Number(n) = v {
n.as_u64().unwrap() * 2
} else {
0
};
Some(json!(age))
})
.unwrap()
.take()
.unwrap();
assert_eq!(
result,
json!({
"school": {
"friends": [
{"name": "친구1", "age": 40},
{"name": "친구2", "age": 40}
]
},
"friends": [
{"name": "친구3", "age": 30},
{"name": "친구4"}
]})
);
}
#[test]
fn readme_select() {
let json_obj = json!({
"school": {
"friends": [
{"name": "친구1", "age": 20},
{"name": "친구2", "age": 20}
]
},
"friends": [
{"name": "친구3", "age": 30},
{"name": "친구4"}
]});
let json = jsonpath::select(&json_obj, "$..friends[0]").unwrap();
assert_eq!(
json,
vec![
&json!({"name": "친구3", "age": 30}),
&json!({"name": "친구1", "age": 20})
]
);
}
#[test]
fn readme_select_as_str() {
let ret = jsonpath::select_as_str(
r#"
{
"school": {
"friends": [
{"name": "친구1", "age": 20},
{"name": "친구2", "age": 20}
]
},
"friends": [
{"name": "친구3", "age": 30},
{"name": "친구4"}
]
}
"#,
"$..friends[0]",
)
.unwrap();
assert_eq!(
ret,
r#"[{"name":"친구3","age":30},{"name":"친구1","age":20}]"#
);
}
#[test]
fn readme_select_as() {
#[derive(Deserialize, PartialEq, Debug)]
struct Person {
name: String,
age: u8,
phones: Vec<String>,
}
let ret: Vec<Person> = jsonpath::select_as(
r#"{
"person":
{
"name": "Doe John",
"age": 44,
"phones": [
"+44 1234567",
"+44 2345678"
]
}
}"#,
"$.person",
)
.unwrap();
let person = Person {
name: "Doe John".to_string(),
age: 44,
phones: vec!["+44 1234567".to_string(), "+44 2345678".to_string()],
};
assert_eq!(ret[0], person);
}
#[test]
fn readme_compile() {
let first_firend =
jsonpath::PathCompiled::compile("$..friends[0]").unwrap();
let json_obj = json!({
"school": {
"friends": [
{"name": "친구1", "age": 20},
{"name": "친구2", "age": 20}
]
},
"friends": [
{"name": "친구3", "age": 30},
{"name": "친구4"}
]});
let json = first_firend.select(&json_obj).unwrap();
assert_eq!(
json,
vec![
&json!({"name": "친구3", "age": 30}),
&json!({"name": "친구1", "age": 20})
]
);
}
#[test]
fn readme_selector_fn() {
let json_obj = json!({
"school": {
"friends": [
{"name": "친구1", "age": 20},
{"name": "친구2", "age": 20}
]
},
"friends": [
{"name": "친구3", "age": 30},
{"name": "친구4"}
]});
let mut selector = jsonpath::selector(&json_obj);
let json = selector("$..friends[0]").unwrap();
assert_eq!(
json,
vec![
&json!({"name": "친구3", "age": 30}),
&json!({"name": "친구1", "age": 20})
]
);
let json = selector("$..friends[1]").unwrap();
assert_eq!(
json,
vec![
&json!({"name": "친구4"}),
&json!({"name": "친구2", "age": 20})
]
);
}
#[test]
fn readme_selector_as() {
let json_obj = json!({
"school": {
"friends": [
{"name": "친구1", "age": 20},
{"name": "친구2", "age": 20}
]
},
"friends": [
{"name": "친구3", "age": 30},
{"name": "친구4"}
]});
#[derive(Deserialize, PartialEq, Debug)]
struct Friend {
name: String,
age: Option<u8>,
}
let mut selector = jsonpath::selector_as::<Friend>(&json_obj);
let json = selector("$..friends[0]").unwrap();
let ret = vec![
Friend {
name: "친구3".to_string(),
age: Some(30),
},
Friend {
name: "친구1".to_string(),
age: Some(20),
},
];
assert_eq!(json, ret);
let json = selector("$..friends[1]").unwrap();
let ret = vec![
Friend {
name: "친구4".to_string(),
age: None,
},
Friend {
name: "친구2".to_string(),
age: Some(20),
},
];
assert_eq!(json, ret);
}
#[test]
fn readme_delete() {
let json_obj = json!({
"school": {
"friends": [
{"name": "친구1", "age": 20},
{"name": "친구2", "age": 20}
]
},
"friends": [
{"name": "친구3", "age": 30},
{"name": "친구4"}
]});
let ret = jsonpath::delete(json_obj, "$..[?(20 == @.age)]").unwrap();
assert_eq!(
ret,
json!({
"school": {
"friends": [
null,
null
]
},
"friends": [
{"name": "친구3", "age": 30},
{"name": "친구4"}
]})
);
}
#[test]
fn readme_delete2() {
let json_obj = common::read_json("./benchmark/example.json");
let ret = jsonpath::delete(json_obj, "$.store.book").unwrap();
assert_eq!(
ret,
json!({
"store": {
"book": null,
"bicycle": {
"color": "red",
"price": 19.95
}
},
"expensive": 10
})
);
}
#[test]
fn readme_replace_with() {
let json_obj = json!({
"school": {
"friends": [
{"name": "친구1", "age": 20},
{"name": "친구2", "age": 20}
]
},
"friends": [
{"name": "친구3", "age": 30},
{"name": "친구4"}
]});
let result =
jsonpath::replace_with(json_obj, "$..[?(@.age == 20)].age", &mut |v| {
let age = if let Value::Number(n) = v {
n.as_u64().unwrap() * 2
} else {
0
};
Some(json!(age))
})
.unwrap();
assert_eq!(
result,
json!({
"school": {
"friends": [
{"name": "친구1", "age": 40},
{"name": "친구2", "age": 40}
]
},
"friends": [
{"name": "친구3", "age": 30},
{"name": "친구4"}
]})
);
}
| rust | MIT | 3a930bc03c3944c8374717289266ceb9cc1491c6 | 2026-01-04T20:24:15.035874Z | false |
freestrings/jsonpath | https://github.com/freestrings/jsonpath/blob/3a930bc03c3944c8374717289266ceb9cc1491c6/tests/array_filter.rs | tests/array_filter.rs | #[macro_use]
extern crate serde_json;
use common::{read_json, select_and_then_compare, setup};
mod common;
#[test]
fn array_range_default() {
setup();
select_and_then_compare(
"$.school.friends[1, 2]",
read_json("./benchmark/data_obj.json"),
json!([
{"id": 1, "name": "Vincent Cannon" },
{"id": 2, "name": "Gray Berry"}
]),
);
}
#[test]
fn array_range_all() {
setup();
select_and_then_compare(
"$[ : ]",
json!(["first", "second"]),
json!(["first", "second"]),
);
}
#[test]
fn array_range_step_all() {
setup();
select_and_then_compare(
"$[::]",
json!(["first", "second", "third", "forth", "fifth"]),
json!(["first", "second", "third", "forth", "fifth"]),
);
}
#[test]
fn array_range_step_only_step_value() {
setup();
select_and_then_compare(
"$[::2]",
json!(["first", "second", "third", "forth", "fifth"]),
json!(["first", "third", "fifth"]),
);
}
#[test]
fn array_range_step_only_start_index() {
setup();
select_and_then_compare(
"$[1::]",
json!(["first", "second", "third", "forth", "fifth"]),
json!(["second", "third", "forth", "fifth"]),
);
}
#[test]
fn array_range_step_empty_step_value() {
setup();
select_and_then_compare(
"$[1:2:]",
json!(["first", "second", "third", "forth", "fifth"]),
json!(["second"]),
);
}
#[test]
fn array_range_step_empty_end_index() {
setup();
select_and_then_compare(
"$[1::2]",
json!(["first", "second", "third", "forth", "fifth"]),
json!(["second", "forth"]),
);
}
#[test]
fn array_range_step_by_1() {
setup();
select_and_then_compare(
"$[0:3:1]",
json!(["first", "second", "third", "forth", "fifth"]),
json!(["first", "second", "third"]),
);
}
#[test]
fn array_range_step_by_2() {
setup();
select_and_then_compare(
"$[0:3:2]",
json!(["first", "second", "third", "forth", "fifth"]),
json!(["first", "third"]),
);
}
#[test]
fn array_range_only_negative_index() {
setup();
select_and_then_compare(
"$[-4:]",
json!(["first", "second", "third"]),
json!(["first", "second", "third"]),
);
}
#[test]
fn array_range_only_end_index() {
setup();
select_and_then_compare(
"$[:4]",
json!(["first", "second", "third"]),
json!(["first", "second", "third"]),
);
}
#[test]
fn array_range_only_from_index() {
setup();
select_and_then_compare(
"$.school.friends[1: ]",
read_json("./benchmark/data_obj.json"),
json!([
{"id": 1, "name": "Vincent Cannon" },
{"id": 2, "name": "Gray Berry"}
]),
);
}
#[test]
fn array_range_only_nagative_end_index() {
setup();
select_and_then_compare(
"$.school.friends[:-2]",
read_json("./benchmark/data_obj.json"),
json!([
{"id": 0, "name": "Millicent Norman"}
]),
);
}
#[test]
fn array_index() {
setup();
select_and_then_compare(
"$..friends[2].name",
read_json("./benchmark/data_obj.json"),
json!(["Gray Berry", "Gray Berry"]),
);
}
#[test]
fn array_all_index() {
setup();
select_and_then_compare(
"$..friends[*].name",
read_json("./benchmark/data_obj.json"),
json!([
"Vincent Cannon",
"Gray Berry",
"Millicent Norman",
"Vincent Cannon",
"Gray Berry"
]),
);
}
#[test]
fn array_all_and_then_key() {
setup();
select_and_then_compare(
"$['school']['friends'][*].['name']",
read_json("./benchmark/data_obj.json"),
json!(["Millicent Norman", "Vincent Cannon", "Gray Berry"]),
);
}
#[test]
fn array_index_and_then_key() {
setup();
select_and_then_compare(
"$['school']['friends'][0].['name']",
read_json("./benchmark/data_obj.json"),
json!(["Millicent Norman"]),
);
}
#[test]
fn array_multiple_key() {
setup();
select_and_then_compare(
r#"$.["eyeColor", "name"]"#,
read_json("./benchmark/data_obj.json"),
json!(["blue", "Leonor Herman"]),
);
}
#[test]
fn bugs40_bracket_notation_after_recursive_descent() {
setup();
select_and_then_compare(
"$..[0]",
json!([
"first",
{
"key": [
"first nested",
{
"more": [
{"nested": ["deepest", "second"]},
["more", "values"]
]
}
]
}
]),
json!([
"first",
"first nested",
{
"nested" : [
"deepest",
"second"
]
},
"deepest",
"more"
]),
);
}
#[test]
fn bugs50() {
setup();
select_and_then_compare("$[0]", json!({"f": [1,2,3]}), json!([]));
}
| rust | MIT | 3a930bc03c3944c8374717289266ceb9cc1491c6 | 2026-01-04T20:24:15.035874Z | false |
freestrings/jsonpath | https://github.com/freestrings/jsonpath/blob/3a930bc03c3944c8374717289266ceb9cc1491c6/tests/paths.rs | tests/paths.rs | #[macro_use]
extern crate serde_json;
use common::{select_and_then_compare, setup};
mod common;
#[test]
fn dolla_token_in_path() {
setup();
select_and_then_compare(
"$..$ref",
json!({
"Junk1": "This is a test to illustrate use of '$' in the attr for the expression $..['$ref'] ",
"$ref": "Match Root",
"Subset1":[
{"Junk2": "Data...",
"$ref": "Match Subset1"
}
],
"hierachy1":{
"hierachy2.1":{
"hierachy2.1.1":{ "$ref":"Match 2.1.1"},
"hierachy2.1.2":{ "ref":"Match 2.1.2"},
"hierachy2.1.3":{ "ref":"No Match 2.1.3"},
"hierachy2.1.4":{ "$ref":"Match 2.1.4"},
"hierachy2.1.5":{ "ref":"No Match 2.1.5"}
},
"hierachy2.2":{
"hierachy2.2.1":{ "ref":"No Match 2.2.1"},
"hierachy2.2.2":{ "$ref":"Match 2.2.2"},
"hierachy2.2.3":{ "ref":"No Match 2.2.3"},
"hierachy2.2.4":{ "ref":"No Match 2.2.5"},
"hierachy2.2.5":{ "$ref":"Match 2.2.5"}
},
"hierachy2.3":{
"hierachy2.3.1":{ "ref":"No Match 2.3.1"},
"hierachy2.3.2":{ "ref":"No Match 2.3.2"},
"hierachy2.3.3":{ "ref":"No Match 2.3.3"},
"hierachy2.3.4":{ "ref":"No Match 2.3.4"},
"hierachy2.3.5":{ "ref":"No Match 2.3.5"},
"hierachy2.3.6":{
"hierachy2.3.6.1":{ "$ref":"Match 2.3.6.1"},
"hierachy2.3.6.2":{ "ref":"No Match 2.3.6.2"},
"hierachy2.3.6.3":{ "ref":"No Match 2.3.6.3"},
"hierachy2.3.6.4":{ "ref":"No Match 2.3.6.4"},
"hierachy2.3.6.5":{ "ref":"No Match 2.3.6.5"}
}
}
}
}),
json!([
"Match Root",
"Match Subset1",
"Match 2.1.1",
"Match 2.1.4",
"Match 2.2.2",
"Match 2.2.5",
"Match 2.3.6.1"
]),
);
select_and_then_compare(
"$..['$ref']",
json!({
"Junk1": "This is a test to illustrate use of '$' in the attr for the expression $..['$ref'] ",
"$ref": "Match Root",
"Subset1":[
{"Junk2": "Data...",
"$ref": "Match Subset1"
}
],
"hierachy1":{
"hierachy2.1":{
"hierachy2.1.1":{ "$ref":"Match 2.1.1"},
"hierachy2.1.2":{ "ref":"Match 2.1.2"},
"hierachy2.1.3":{ "ref":"No Match 2.1.3"},
"hierachy2.1.4":{ "$ref":"Match 2.1.4"},
"hierachy2.1.5":{ "ref":"No Match 2.1.5"}
},
"hierachy2.2":{
"hierachy2.2.1":{ "ref":"No Match 2.2.1"},
"hierachy2.2.2":{ "$ref":"Match 2.2.2"},
"hierachy2.2.3":{ "ref":"No Match 2.2.3"},
"hierachy2.2.4":{ "ref":"No Match 2.2.5"},
"hierachy2.2.5":{ "$ref":"Match 2.2.5"}
},
"hierachy2.3":{
"hierachy2.3.1":{ "ref":"No Match 2.3.1"},
"hierachy2.3.2":{ "ref":"No Match 2.3.2"},
"hierachy2.3.3":{ "ref":"No Match 2.3.3"},
"hierachy2.3.4":{ "ref":"No Match 2.3.4"},
"hierachy2.3.5":{ "ref":"No Match 2.3.5"},
"hierachy2.3.6":{
"hierachy2.3.6.1":{ "$ref":"Match 2.3.6.1"},
"hierachy2.3.6.2":{ "ref":"No Match 2.3.6.2"},
"hierachy2.3.6.3":{ "ref":"No Match 2.3.6.3"},
"hierachy2.3.6.4":{ "ref":"No Match 2.3.6.4"},
"hierachy2.3.6.5":{ "ref":"No Match 2.3.6.5"}
}
}
}
}),
json!([
"Match Root",
"Match Subset1",
"Match 2.1.1",
"Match 2.1.4",
"Match 2.2.2",
"Match 2.2.5",
"Match 2.3.6.1"
]),
);
}
| rust | MIT | 3a930bc03c3944c8374717289266ceb9cc1491c6 | 2026-01-04T20:24:15.035874Z | false |
freestrings/jsonpath | https://github.com/freestrings/jsonpath/blob/3a930bc03c3944c8374717289266ceb9cc1491c6/tests/common.rs | tests/common.rs | extern crate env_logger;
extern crate jsonpath_lib as jsonpath;
extern crate serde_json;
use std::io::Read;
use serde_json::Value;
use self::jsonpath::{JsonSelector, PathParser};
#[allow(dead_code)]
pub fn setup() {
let _ = env_logger::try_init();
}
#[allow(dead_code)]
pub fn read_json(path: &str) -> Value {
let mut f = std::fs::File::open(path).unwrap();
let mut contents = String::new();
f.read_to_string(&mut contents).unwrap();
serde_json::from_str(&contents).unwrap()
}
#[allow(dead_code)]
pub fn read_contents(path: &str) -> String {
let mut f = std::fs::File::open(path).unwrap();
let mut contents = String::new();
f.read_to_string(&mut contents).unwrap();
contents
}
#[allow(dead_code)]
pub fn select_and_then_compare(
path: &str,
json: Value,
target: Value,
) {
let parser = PathParser::compile(path).unwrap();
let mut selector = JsonSelector::new(parser);
let result = selector.value(&json).select_as::<Value>().unwrap();
assert_eq!(
result,
match target {
Value::Array(vec) => vec,
_ => panic!("Give me the Array!"),
},
"{}",
path
);
}
#[allow(dead_code)]
pub fn compare_result(
result: Vec<&Value>,
target: Value,
) {
let result = serde_json::to_value(result).unwrap();
assert_eq!(result, target);
}
| rust | MIT | 3a930bc03c3944c8374717289266ceb9cc1491c6 | 2026-01-04T20:24:15.035874Z | false |
freestrings/jsonpath | https://github.com/freestrings/jsonpath/blob/3a930bc03c3944c8374717289266ceb9cc1491c6/tests/selector.rs | tests/selector.rs | extern crate jsonpath_lib as jsonpath;
#[macro_use]
extern crate serde_json;
use serde_json::Value;
use common::{read_json, setup};
use jsonpath::{JsonSelector, JsonSelectorMut, PathParser};
mod common;
#[test]
fn selector_mut() {
setup();
let parser = PathParser::compile("$.store..price").unwrap();
let mut selector_mut = JsonSelectorMut::new(parser);
let mut nums = Vec::new();
let result = selector_mut
.value(read_json("./benchmark/example.json"))
.replace_with(&mut |v| {
if let Value::Number(n) = v {
nums.push(n.as_f64().unwrap());
}
Some(Value::String("a".to_string()))
})
.unwrap()
.take()
.unwrap();
assert_eq!(
nums,
vec![8.95_f64, 12.99_f64, 8.99_f64, 22.99_f64, 19.95_f64]
);
let parser = PathParser::compile("$.store..price").unwrap();
let mut selector = JsonSelector::new(parser);
let result = selector.value(&result).select().unwrap();
assert_eq!(
vec![
&json!("a"),
&json!("a"),
&json!("a"),
&json!("a"),
&json!("a")
],
result
);
}
#[test]
fn selector_delete_multi_elements_from_array() {
setup();
let parser = PathParser::compile("$[0,2]").unwrap();
let mut selector_mut = JsonSelectorMut::new(parser);
let result = selector_mut
.value(serde_json::from_str("[1,2,3]").unwrap())
.remove()
.unwrap()
.take()
.unwrap();
assert_eq!(
result,
serde_json::from_str::<serde_json::Value>("[2,3]").unwrap(),
);
}
#[test]
fn selector_delete() {
setup();
let parser = PathParser::compile("$.store..price[?(@>13)]").unwrap();
let mut selector_mut = JsonSelectorMut::new(parser);
let result = selector_mut
.value(read_json("./benchmark/example.json"))
.delete()
.unwrap()
.take()
.unwrap();
let parser = PathParser::compile("$.store..price").unwrap();
let mut selector = JsonSelector::new(parser);
let result = selector.value(&result).select().unwrap();
assert_eq!(
result,
vec![
&json!(8.95),
&json!(12.99),
&json!(8.99),
&Value::Null,
&Value::Null
]
);
}
#[test]
fn selector_remove() {
setup();
let parser = PathParser::compile("$.store..price[?(@>13)]").unwrap();
let mut selector_mut = JsonSelectorMut::new(parser);
let result = selector_mut
.value(read_json("./benchmark/example.json"))
.remove()
.unwrap()
.take()
.unwrap();
let parser = PathParser::compile("$.store..price").unwrap();
let mut selector = JsonSelector::new(parser);
let result = selector.value(&result).select().unwrap();
assert_eq!(result, vec![&json!(8.95), &json!(12.99), &json!(8.99)]);
}
| rust | MIT | 3a930bc03c3944c8374717289266ceb9cc1491c6 | 2026-01-04T20:24:15.035874Z | false |
freestrings/jsonpath | https://github.com/freestrings/jsonpath/blob/3a930bc03c3944c8374717289266ceb9cc1491c6/tests/return_type.rs | tests/return_type.rs | #[macro_use]
extern crate serde_json;
use common::{read_json, select_and_then_compare, setup};
mod common;
#[test]
fn return_type_for_single_object() {
setup();
select_and_then_compare(
"$.school",
read_json("./benchmark/data_obj.json"),
json!([{
"friends": [
{"id": 0, "name": "Millicent Norman"},
{"id": 1, "name": "Vincent Cannon" },
{"id": 2, "name": "Gray Berry"}
]
}]),
);
}
#[test]
fn return_type_for_single_object_key_matched() {
setup();
select_and_then_compare(
"$.friends[?(@.name)]",
read_json("./benchmark/data_obj.json"),
json!([
{ "id" : 1, "name" : "Vincent Cannon" },
{ "id" : 2, "name" : "Gray Berry" }
]),
);
}
#[test]
fn return_type_for_child_object_matched() {
setup();
select_and_then_compare(
"$.school[?(@.friends[0])]",
read_json("./benchmark/data_obj.json"),
json!([{
"friends": [
{"id": 0, "name": "Millicent Norman"},
{"id": 1, "name": "Vincent Cannon" },
{"id": 2, "name": "Gray Berry"}
]
}]),
);
}
#[test]
fn return_type_for_child_object_not_matched() {
setup();
select_and_then_compare(
"$.school[?(@.friends[10])]",
read_json("./benchmark/data_obj.json"),
json!([]),
);
}
#[test]
fn return_type_for_object_filter_true() {
setup();
select_and_then_compare(
"$.school[?(1==1)]",
read_json("./benchmark/data_obj.json"),
json!([{
"friends": [
{"id": 0, "name": "Millicent Norman"},
{"id": 1, "name": "Vincent Cannon" },
{"id": 2, "name": "Gray Berry"}
]
}]),
);
}
#[test]
fn return_type_for_array_filter_true() {
setup();
select_and_then_compare(
"$.school.friends[?(1==1)]",
read_json("./benchmark/data_obj.json"),
json!([[
{"id": 0, "name": "Millicent Norman"},
{"id": 1, "name": "Vincent Cannon" },
{"id": 2, "name": "Gray Berry"}
]]),
);
}
#[test]
fn return_type_empty() {
setup();
select_and_then_compare("$[?(@.key==43)]", json!([{"key": 42}]), json!([]));
}
| rust | MIT | 3a930bc03c3944c8374717289266ceb9cc1491c6 | 2026-01-04T20:24:15.035874Z | false |
freestrings/jsonpath | https://github.com/freestrings/jsonpath/blob/3a930bc03c3944c8374717289266ceb9cc1491c6/lua/bench_lua_vs_rust/example.rs | lua/bench_lua_vs_rust/example.rs | extern crate jsonpath_lib as jsonpath;
extern crate serde;
extern crate serde_json;
use std::io::Read;
use serde_json::Value;
fn read_json(path: &str) -> String {
let mut f = std::fs::File::open(path).unwrap();
let mut contents = String::new();
f.read_to_string(&mut contents).unwrap();
contents
}
fn get_string() -> String {
read_json("../../benchmark/example.json")
}
fn get_json() -> Value {
let string = get_string();
serde_json::from_str(string.as_str()).unwrap()
}
fn get_path() -> &'static str {
r#"$..book[?(@.price<30 && @.category=="fiction")]"#
}
fn main() {
let args: Vec<String> = std::env::args().collect();
let iter = if args.len() < 2 { 5000_usize } else { args[1].as_str().parse::<usize>().unwrap() };
println!("rust iter - {}", iter);
let json = get_json();
for _ in 0..iter {
let mut selector = jsonpath::Selector::default();
let _ = selector.str_path(get_path());
selector.value(&json);
let r = selector.select();
if r.is_err() {
panic!();
}
// println!("{:?}", serde_json::to_string(&r.expect("")).unwrap());
}
} | rust | MIT | 3a930bc03c3944c8374717289266ceb9cc1491c6 | 2026-01-04T20:24:15.035874Z | false |
freestrings/jsonpath | https://github.com/freestrings/jsonpath/blob/3a930bc03c3944c8374717289266ceb9cc1491c6/wasm/src/lib.rs | wasm/src/lib.rs | extern crate cfg_if;
extern crate js_sys;
extern crate jsonpath_lib as jsonpath;
extern crate serde_json;
extern crate wasm_bindgen;
use cfg_if::cfg_if;
#[allow(deprecated)]
use jsonpath::Selector as _Selector;
#[allow(deprecated)]
use jsonpath::SelectorMut as _SelectorMut;
#[allow(deprecated)]
use jsonpath::{JsonPathError, Parser};
use serde_json::Value;
use wasm_bindgen::prelude::*;
cfg_if! {
if #[cfg(feature = "wee_alloc")] {
extern crate wee_alloc;
#[global_allocator]
static ALLOC: wee_alloc::WeeAlloc = wee_alloc::WeeAlloc::INIT;
}
}
cfg_if! {
if #[cfg(feature = "console_error_panic_hook")] {
extern crate console_error_panic_hook;
pub use self::console_error_panic_hook::set_once as set_panic_hook;
} else {
#[inline]
pub fn set_panic_hook() {}
}
}
#[wasm_bindgen]
extern "C" {
#[wasm_bindgen(js_namespace = console)]
fn error(s: &str);
}
macro_rules! console_error {
($($t:tt)*) => (error(&format_args!($($t)*).to_string()))
}
fn into_serde_json<D>(js_value: &JsValue) -> Result<D, String>
where
D: for<'a> serde::de::Deserialize<'a>,
{
if js_value.is_string() {
match serde_json::from_str(js_value.as_string().unwrap().as_str()) {
Ok(json) => Ok(json),
Err(e) => Err(e.to_string()),
}
} else {
match js_value.into_serde() {
Ok(json) => Ok(json),
Err(e) => Err(e.to_string()),
}
}
}
#[allow(clippy::unnecessary_wraps)]
fn replace_fun(v: Value, fun: &js_sys::Function) -> Option<Value> {
match JsValue::from_serde(&v) {
Ok(js_v) => match fun.call1(&JsValue::NULL, &js_v) {
Ok(result) => match into_serde_json(&result) {
Ok(json) => Some(json),
Err(e) => {
console_error!("replace_with - closure returned a invalid JSON: {:?}", e);
Some(Value::Null)
}
},
Err(e) => {
console_error!("replace_with - fail to call closure: {:?}", e);
Some(Value::Null)
}
},
Err(e) => {
console_error!("replace_with - invalid JSON object: {:?}", e);
Some(Value::Null)
}
}
}
#[wasm_bindgen]
pub fn compile(path: &str) -> JsValue {
#[allow(deprecated)]
let node = Parser::compile(path);
if let Err(e) = &node {
return JsValue::from_str(&format!("{:?}", JsonPathError::Path(e.clone())));
};
let cb = Closure::wrap(Box::new(move |js_value: JsValue| {
let json = match into_serde_json(&js_value) {
Ok(json) => json,
Err(e) => return JsValue::from_str(&format!("{:?}", JsonPathError::Serde(e))),
};
#[allow(deprecated)]
let mut selector = _Selector::new();
match &node {
Ok(node) => selector.compiled_path(node),
Err(e) => return JsValue::from_str(&format!("{:?}", JsonPathError::Path(e.clone()))),
};
match selector.value(&json).select() {
Ok(ret) => match JsValue::from_serde(&ret) {
Ok(ret) => ret,
Err(e) => JsValue::from_str(&format!("{:?}", JsonPathError::Serde(e.to_string()))),
},
Err(e) => JsValue::from_str(&format!("{:?}", e)),
}
}) as Box<dyn Fn(JsValue) -> JsValue>);
let ret = cb.as_ref().clone();
cb.forget();
ret
}
#[wasm_bindgen]
pub fn selector(js_value: JsValue) -> JsValue {
let json: Value = match JsValue::into_serde(&js_value) {
Ok(json) => json,
Err(e) => return JsValue::from_str(&format!("{:?}", JsonPathError::Serde(e.to_string()))),
};
#[allow(deprecated)]
let cb = Closure::wrap(
Box::new(move |path: String| match Parser::compile(path.as_str()) {
Ok(node) => {
let mut selector = _Selector::new();
let _ = selector.compiled_path(&node);
match selector.value(&json).select() {
Ok(ret) => match JsValue::from_serde(&ret) {
Ok(ret) => ret,
Err(e) => {
JsValue::from_str(&format!("{:?}", JsonPathError::Serde(e.to_string())))
}
},
Err(e) => JsValue::from_str(&format!("{:?}", e)),
}
}
Err(e) => JsValue::from_str(&format!("{:?}", JsonPathError::Path(e))),
}) as Box<dyn Fn(String) -> JsValue>,
);
let ret = cb.as_ref().clone();
cb.forget();
ret
}
#[wasm_bindgen]
pub fn select(js_value: JsValue, path: &str) -> JsValue {
let json = match into_serde_json(&js_value) {
Ok(json) => json,
Err(e) => return JsValue::from_str(&format!("{:?}", JsonPathError::Serde(e))),
};
match jsonpath::select(&json, path) {
Ok(ret) => match JsValue::from_serde(&ret) {
Ok(ret) => ret,
Err(e) => JsValue::from_str(&format!("{:?}", JsonPathError::Serde(e.to_string()))),
},
Err(e) => JsValue::from_str(&format!("{:?}", e)),
}
}
#[wasm_bindgen(catch, js_name = "deleteValue")]
pub fn delete(js_value: JsValue, path: &str) -> JsValue {
let json = match into_serde_json(&js_value) {
Ok(json) => json,
Err(e) => return JsValue::from_str(&format!("{:?}", JsonPathError::Serde(e))),
};
match jsonpath::delete(json, path) {
Ok(ret) => match JsValue::from_serde(&ret) {
Ok(ret) => ret,
Err(e) => JsValue::from_str(&format!("{:?}", JsonPathError::Serde(e.to_string()))),
},
Err(e) => JsValue::from_str(&format!("{:?}", e)),
}
}
#[wasm_bindgen(catch, js_name = "replaceWith")]
pub fn replace_with(js_value: JsValue, path: &str, fun: js_sys::Function) -> JsValue {
let json = match into_serde_json(&js_value) {
Ok(json) => json,
Err(e) => return JsValue::from_str(&format!("{:?}", JsonPathError::Serde(e))),
};
match jsonpath::replace_with(json, path, &mut |v| replace_fun(v, &fun)) {
Ok(ret) => match JsValue::from_serde(&ret) {
Ok(ret) => ret,
Err(e) => JsValue::from_str(&format!("{:?}", JsonPathError::Serde(e.to_string()))),
},
Err(e) => JsValue::from_str(&format!("{:?}", e)),
}
}
///
/// `wasm_bindgen` 제약으로 builder-pattern을 구사 할 수 없다.
/// lifetime 제약으로 Selector를 사용 할 수 없다.
///
#[wasm_bindgen]
#[derive(Default)]
pub struct Selector {
path: Option<String>,
value: Option<Value>,
}
#[wasm_bindgen]
impl Selector {
#[wasm_bindgen(constructor)]
pub fn new() -> Self {
Selector::default()
}
#[wasm_bindgen(catch)]
pub fn path(&mut self, path: &str) -> Result<(), JsValue> {
self.path = Some(path.to_string());
Ok(())
}
#[wasm_bindgen(catch)]
pub fn value(&mut self, value: JsValue) -> Result<(), JsValue> {
let json = into_serde_json(&value)
.map_err(|e| JsValue::from_str(&format!("{:?}", JsonPathError::Serde(e))))?;
self.value = Some(json);
Ok(())
}
#[wasm_bindgen(catch, js_name = select)]
pub fn select(&mut self) -> Result<JsValue, JsValue> {
#[allow(deprecated)]
let mut selector = _Selector::new();
if let Some(path) = &self.path {
let _ = selector
.str_path(path)
.map_err(|e| JsValue::from_str(&format!("{:?}", e)))?;
} else {
return Err(JsValue::from_str(&format!(
"{:?}",
JsonPathError::EmptyPath
)));
}
if let Some(value) = &self.value {
let _ = selector.value(value);
} else {
return Err(JsValue::from_str(&format!(
"{:?}",
JsonPathError::EmptyValue
)));
}
match selector.select() {
Ok(ret) => match JsValue::from_serde(&ret) {
Ok(ret) => Ok(ret),
Err(e) => Err(JsValue::from_str(&format!(
"{:?}",
JsonPathError::Serde(e.to_string())
))),
},
Err(e) => Err(JsValue::from_str(&format!("{:?}", e))),
}
}
}
///
/// `wasm_bindgen` 제약으로 builder-pattern을 구사 할 수 없다.
///
#[wasm_bindgen]
#[derive(Default)]
pub struct SelectorMut {
path: Option<String>,
value: Option<Value>,
}
#[wasm_bindgen]
impl SelectorMut {
#[wasm_bindgen(constructor)]
pub fn new() -> Self {
SelectorMut::default()
}
#[wasm_bindgen(catch)]
pub fn path(&mut self, path: &str) -> Result<(), JsValue> {
self.path = Some(path.to_string());
Ok(())
}
#[wasm_bindgen(catch)]
pub fn value(&mut self, value: JsValue) -> Result<(), JsValue> {
let json = into_serde_json(&value)
.map_err(|e| JsValue::from_str(&format!("{:?}", JsonPathError::Serde(e))))?;
self.value = Some(json);
Ok(())
}
#[wasm_bindgen(catch, js_name = "deleteValue")]
pub fn delete(&mut self) -> Result<(), JsValue> {
#[allow(deprecated)]
let mut selector = _SelectorMut::new();
if let Some(path) = &self.path {
let _ = selector.str_path(path);
} else {
return Err(JsValue::from_str(&format!(
"{:?}",
JsonPathError::EmptyPath
)));
};
if let Some(value) = self.value.take() {
selector.value(value);
} else {
return Err(JsValue::from_str(&format!(
"{:?}",
JsonPathError::EmptyValue
)));
};
match selector.delete() {
Err(e) => Err(JsValue::from_str(&format!("{:?}", e))),
_ => {
self.value = selector.take();
Ok(())
}
}
}
#[wasm_bindgen(catch, js_name = replaceWith)]
pub fn replace_with(&mut self, fun: js_sys::Function) -> Result<(), JsValue> {
#[allow(deprecated)]
let mut selector = _SelectorMut::new();
if let Some(path) = &self.path {
let _ = selector.str_path(path);
} else {
return Err(JsValue::from_str(&format!(
"{:?}",
JsonPathError::EmptyPath
)));
};
if let Some(value) = self.value.take() {
selector.value(value);
} else {
return Err(JsValue::from_str(&format!(
"{:?}",
JsonPathError::EmptyValue
)));
};
match selector.replace_with(&mut |v| replace_fun(v, &fun)) {
Err(e) => Err(JsValue::from_str(&format!("{:?}", e))),
_ => {
self.value = selector.take();
Ok(())
}
}
}
#[wasm_bindgen(catch)]
pub fn take(&mut self) -> Result<JsValue, JsValue> {
match self.value.take() {
Some(ret) => match JsValue::from_serde(&ret) {
Ok(ret) => Ok(ret),
Err(e) => Err(JsValue::from_str(&format!("{:?}", e))),
},
None => Err(JsValue::from_str(&format!(
"{:?}",
JsonPathError::EmptyValue
))),
}
}
}
| rust | MIT | 3a930bc03c3944c8374717289266ceb9cc1491c6 | 2026-01-04T20:24:15.035874Z | false |
freestrings/jsonpath | https://github.com/freestrings/jsonpath/blob/3a930bc03c3944c8374717289266ceb9cc1491c6/wasm/tests/web.rs | wasm/tests/web.rs | /**
* 실행 : `wasm-pack test`
* 그러나,, 잘안돼서 안씀
*/
#![cfg(target_arch = "wasm32")]
extern crate core;
extern crate js_sys;
extern crate jsonpath_wasm as jsonpath;
#[macro_use]
extern crate serde_json;
extern crate wasm_bindgen;
extern crate wasm_bindgen_test;
use serde_json::Value;
use wasm_bindgen::*;
use wasm_bindgen::prelude::*;
use wasm_bindgen_test::*;
wasm_bindgen_test_configure!(run_in_browser);
fn json_str() -> &'static str {
r#"
{
"store": {
"book": [
{
"category": "reference",
"author": "Nigel Rees",
"title": "Sayings of the Century",
"price": 8.95
},
{
"category": "fiction",
"author": "Evelyn Waugh",
"title": "Sword of Honour",
"price": 12.99
},
{
"category": "fiction",
"author": "Herman Melville",
"title": "Moby Dick",
"isbn": "0-553-21311-3",
"price": 8.99
},
{
"category": "fiction",
"author": "J. R. R. Tolkien",
"title": "The Lord of the Rings",
"isbn": "0-395-19395-8",
"price": 22.99
}
],
"bicycle": {
"color": "red",
"price": 19.95
}
},
"expensive": 10
}
"#
}
fn target_json() -> Value {
json!([{
"category" : "fiction",
"author" : "Herman Melville",
"title" : "Moby Dick",
"isbn" : "0-553-21311-3",
"price" : 8.99
}])
}
#[wasm_bindgen_test]
fn select() {
let json: Value = jsonpath::select(JsValue::from_str(json_str()), "$..book[2]").into_serde().unwrap();
assert_eq!(json, target_json());
}
#[wasm_bindgen_test]
fn compile() {
let js_value = jsonpath::compile("$..book[2]");
assert_eq!(js_value.is_function(), true);
let cb: &js_sys::Function = JsCast::unchecked_ref(js_value.as_ref());
let cb_result: JsValue = cb.call1(&js_value, &JsValue::from_str(json_str())).unwrap();
let json: Value = cb_result.into_serde().unwrap();
assert_eq!(json, target_json());
}
#[wasm_bindgen_test]
fn selector() {
let js_value = jsonpath::selector(JsValue::from_str(json_str()));
assert_eq!(js_value.is_function(), true);
let cb: &js_sys::Function = JsCast::unchecked_ref(js_value.as_ref());
let cb_result: JsValue = cb.call1(&js_value, &JsValue::from_str("$..book[2]")).unwrap();
let json: Value = cb_result.into_serde().unwrap();
assert_eq!(json, target_json());
}
#[wasm_bindgen_test]
fn selector_struct() {
let mut selector = jsonpath::Selector::new();
selector.path("$..book[2]").unwrap();
selector.value(JsValue::from_str(json_str())).unwrap();
let json: Value = selector.select_as().unwrap().into_serde().unwrap();
assert_eq!(json, target_json());
let cb = Closure::wrap(Box::new(|js_value: JsValue| {
match js_value.into_serde().unwrap() {
Value::Array(mut vec) => {
match vec.pop().unwrap() {
Value::Object(mut map) => {
map.clear();
map.insert("key".to_string(), Value::String("value".to_string()));
JsValue::from_serde(&Value::Object(map)).unwrap()
}
_ => return JsValue::NULL
}
}
_ => return JsValue::NULL
}
}) as Box<Fn(JsValue) -> JsValue>);
selector.map(cb.as_ref().clone()).unwrap();
let js_value = selector.get().unwrap();
assert_eq!(js_value.into_serde::<Value>().unwrap(), json!({ "key": "value" }));
} | rust | MIT | 3a930bc03c3944c8374717289266ceb9cc1491c6 | 2026-01-04T20:24:15.035874Z | false |
camrbuss/pinci | https://github.com/camrbuss/pinci/blob/4956f50cf3902504b41de4bbb62cca7649930eb4/firmware/src/main.rs | firmware/src/main.rs | #![no_std]
#![no_main]
use panic_halt as _;
#[rtic::app(device = rp_pico::hal::pac, peripherals = true, dispatchers = [PIO0_IRQ_0])]
mod app {
use cortex_m::prelude::_embedded_hal_watchdog_Watchdog;
use cortex_m::prelude::_embedded_hal_watchdog_WatchdogEnable;
use embedded_hal::digital::v2::{InputPin, OutputPin};
use embedded_time::duration::units::*;
use keyberon::action::{k, l, Action, HoldTapAction, HoldTapConfig};
use keyberon::chording::{ChordDef, Chording};
use keyberon::debounce::Debouncer;
use keyberon::key_code::{self, KeyCode::*};
use keyberon::layout::{self, Layout};
use keyberon::matrix::Matrix;
use rp_pico::{
hal::{
self, clocks::init_clocks_and_plls, gpio::DynPin, sio::Sio, timer::Alarm, usb::UsbBus,
watchdog::Watchdog,
},
XOSC_CRYSTAL_FREQ,
};
use usb_device::class_prelude::*;
use usb_device::device::UsbDeviceState;
const SCAN_TIME_US: u32 = 1000;
static mut USB_BUS: Option<usb_device::bus::UsbBusAllocator<rp2040_hal::usb::UsbBus>> = None;
#[derive(Debug, Clone, Copy, Eq, PartialEq)]
pub enum CustomActions {
Uf2,
Reset,
}
const UF2: Action<CustomActions> = Action::Custom(CustomActions::Uf2);
const RESET: Action<CustomActions> = Action::Custom(CustomActions::Reset);
const QW_ESC: ChordDef = ((0, 37), &[(0, 0), (0, 1)]);
const JU_ESC: ChordDef = ((0, 37), &[(0, 16), (0, 6)]);
const KI_TAB: ChordDef = ((0, 39), &[(0, 17), (0, 7)]);
const LO_ENTER: ChordDef = ((0, 38), &[(0, 18), (0, 8)]);
const CHORDS: [ChordDef; 4] = [QW_ESC, JU_ESC, KI_TAB, LO_ENTER];
const A_LSHIFT: Action<CustomActions> = Action::HoldTap(&HoldTapAction {
timeout: 200,
hold: k(LShift),
tap: k(A),
config: HoldTapConfig::PermissiveHold,
tap_hold_interval: 0,
});
const L5_S: Action<CustomActions> = Action::HoldTap(&HoldTapAction {
timeout: 200,
hold: l(5),
tap: k(S),
config: HoldTapConfig::Default,
tap_hold_interval: 0,
});
const D_LALT: Action<CustomActions> = Action::HoldTap(&HoldTapAction {
timeout: 200,
hold: k(LAlt),
tap: k(D),
config: HoldTapConfig::Default,
tap_hold_interval: 0,
});
const L2_F: Action<CustomActions> = Action::HoldTap(&HoldTapAction {
timeout: 200,
hold: l(2),
tap: k(F),
config: HoldTapConfig::Default,
tap_hold_interval: 0,
});
const DOT_RALT: Action<CustomActions> = Action::HoldTap(&HoldTapAction {
timeout: 200,
hold: k(RAlt),
tap: k(Dot),
config: HoldTapConfig::Default,
tap_hold_interval: 0,
});
const X_LALT: Action<CustomActions> = Action::HoldTap(&HoldTapAction {
timeout: 200,
hold: k(LAlt),
tap: k(X),
config: HoldTapConfig::Default,
tap_hold_interval: 0,
});
const SLASH_RCTRL: Action<CustomActions> = Action::HoldTap(&HoldTapAction {
timeout: 200,
hold: k(RCtrl),
tap: k(Slash),
config: HoldTapConfig::Default,
tap_hold_interval: 0,
});
const Z_LCTRL: Action<CustomActions> = Action::HoldTap(&HoldTapAction {
timeout: 200,
hold: k(LCtrl),
tap: k(Z),
config: HoldTapConfig::Default,
tap_hold_interval: 0,
});
const L4_C: Action<CustomActions> = Action::HoldTap(&HoldTapAction {
timeout: 200,
hold: l(4),
tap: k(C),
config: HoldTapConfig::Default,
tap_hold_interval: 0,
});
const SEMI_RSHIFT: Action<CustomActions> = Action::HoldTap(&HoldTapAction {
timeout: 200,
hold: k(RShift),
tap: k(SColon),
config: HoldTapConfig::PermissiveHold,
tap_hold_interval: 0,
});
const L7_SPACE: Action<CustomActions> = Action::HoldTap(&HoldTapAction {
timeout: 200,
hold: l(7),
tap: k(Space),
config: HoldTapConfig::Default,
tap_hold_interval: 0,
});
const L4_COMMA: Action<CustomActions> = Action::HoldTap(&HoldTapAction {
timeout: 200,
hold: l(4),
tap: k(Comma),
config: HoldTapConfig::Default,
tap_hold_interval: 0,
});
#[rustfmt::skip]
pub static LAYERS: keyberon::layout::Layers<40, 1, 8, CustomActions> = keyberon::layout::layout! {
{[ // 0
Q W E R T Y U I O P
{A_LSHIFT} {L5_S} {D_LALT} {L2_F} G H J K L {SEMI_RSHIFT}
{Z_LCTRL} {X_LALT} {L4_C} V B N M {L4_COMMA} {DOT_RALT} {SLASH_RCTRL}
t t t (3) BSpace {L7_SPACE} Tab Escape Enter Tab
]}
{[ // 1
t t t t t t t t t t
t t t t t t t t t t
t t t t t t t t t t
t t t t t t t t t t
]}
{[ // 2
t t t t t * 7 8 9 +
t t t t t / 4 5 6 -
t t t t t . 1 2 3 .
t t t t t 0 0 t t t
]}
{[ // 3
t 7 8 9 t t t t t t
t 4 5 6 t t t t t t
0 1 2 3 t t t t t t
t t t t t t t t t t
]}
{[ // 4
! @ # $ % t ~ | '`' +
'{' '}' '(' ')' t = '_' - '"' Quote
'[' ']' ^ & * t / '\\' t t
t t t t t t t t t t
]}
{[ // 5
t t t t t t t PgUp t t
t t Delete t t Left Down Up Right Enter
t t t t t t Home PgDown End t
t t t t t t t t t t
]}
{[ // 6
{RESET} {UF2} t t t t t t t MediaSleep
t t t t t t t t t t
t t t t t t t t t t
t t t t t t t t t t
]}
{[ // 7
t t t t t MediaNextSong MediaPlayPause MediaVolDown MediaVolUp PScreen
t t t t t t Escape Tab Enter Enter
t t t t t t t t t Delete
t t t t Delete t t t t t
]}
};
#[shared]
struct Shared {
usb_dev: usb_device::device::UsbDevice<'static, rp2040_hal::usb::UsbBus>,
usb_class: keyberon::hid::HidClass<
'static,
rp2040_hal::usb::UsbBus,
keyberon::keyboard::Keyboard<()>,
>,
uart: rp2040_hal::pac::UART0,
layout: Layout<40, 1, 8, CustomActions>,
}
#[local]
struct Local {
watchdog: hal::watchdog::Watchdog,
chording: Chording<4>,
matrix: Matrix<DynPin, DynPin, 17, 1>,
debouncer: Debouncer<[[bool; 17]; 1]>,
alarm: hal::timer::Alarm0,
transform: fn(layout::Event) -> layout::Event,
is_right: bool,
}
#[init]
fn init(c: init::Context) -> (Shared, Local, init::Monotonics) {
// Soft-reset does not release the hardware spinlocks
// Release them now to avoid a deadlock after debug or watchdog reset
unsafe {
hal::sio::spinlock_reset();
}
let mut resets = c.device.RESETS;
let mut watchdog = Watchdog::new(c.device.WATCHDOG);
let clocks = init_clocks_and_plls(
XOSC_CRYSTAL_FREQ,
c.device.XOSC,
c.device.CLOCKS,
c.device.PLL_SYS,
c.device.PLL_USB,
&mut resets,
&mut watchdog,
)
.ok()
.unwrap();
let sio = Sio::new(c.device.SIO);
let pins = hal::gpio::Pins::new(
c.device.IO_BANK0,
c.device.PADS_BANK0,
sio.gpio_bank0,
&mut resets,
);
// 17 input pins and 1 empty pin that is not really used, but
// is needed by keyberon as a "row"
let gpio2 = pins.gpio2;
let gpio28 = pins.gpio28;
let gpio3 = pins.gpio3;
let gpio27 = pins.gpio27;
let gpio4 = pins.gpio4;
let gpio5 = pins.gpio5;
let gpio26 = pins.gpio26;
let gpio6 = pins.gpio6;
let gpio22 = pins.gpio22;
let gpio7 = pins.gpio7;
let gpio10 = pins.gpio10;
let gpio11 = pins.gpio11;
let gpio12 = pins.gpio12;
let gpio21 = pins.gpio21;
let gpio13 = pins.gpio13;
let gpio15 = pins.gpio15;
let gpio14 = pins.gpio14;
let gpio20 = pins.gpio20;
let mut led = pins.gpio25.into_push_pull_output();
// GPIO1 is high for the right hand side
let side = pins.gpio1.into_floating_input();
// delay for power on
for _ in 0..1000 {
cortex_m::asm::nop();
}
// Use a transform to get correct layout from right and left side
let is_right = side.is_high().unwrap();
let transform: fn(layout::Event) -> layout::Event = if is_right {
|e| {
e.transform(|i: u8, j: u8| -> (u8, u8) {
// 0 -> 5, 5 -> 15, 10 -> 25
let x = ((j / 5) * 10) + (j % 5) + 5;
(i, x)
})
}
} else {
|e| {
e.transform(|i: u8, j: u8| -> (u8, u8) {
let x = ((j / 5) * 10) + 4 - (j % 5);
(i, x)
})
}
};
// Enable UART0
resets.reset.modify(|_, w| w.uart0().clear_bit());
while resets.reset_done.read().uart0().bit_is_clear() {}
let uart = c.device.UART0;
uart.uartibrd.write(|w| unsafe { w.bits(0b0100_0011) });
uart.uartfbrd.write(|w| unsafe { w.bits(0b0011_0100) });
uart.uartlcr_h.write(|w| unsafe { w.bits(0b0110_0000) });
uart.uartcr.write(|w| unsafe { w.bits(0b11_0000_0001) });
uart.uartimsc.write(|w| w.rxim().set_bit());
let matrix: Matrix<DynPin, DynPin, 17, 1> = Matrix::new(
[
gpio2.into_pull_up_input().into(),
gpio28.into_pull_up_input().into(),
gpio3.into_pull_up_input().into(),
gpio27.into_pull_up_input().into(),
gpio4.into_pull_up_input().into(),
gpio5.into_pull_up_input().into(),
gpio26.into_pull_up_input().into(),
gpio6.into_pull_up_input().into(),
gpio22.into_pull_up_input().into(),
gpio7.into_pull_up_input().into(),
gpio10.into_pull_up_input().into(),
gpio11.into_pull_up_input().into(),
gpio12.into_pull_up_input().into(),
gpio21.into_pull_up_input().into(),
gpio13.into_pull_up_input().into(),
gpio15.into_pull_up_input().into(),
gpio14.into_pull_up_input().into(),
],
[gpio20.into_push_pull_output().into()],
)
.unwrap();
let layout = Layout::new(&LAYERS);
let debouncer = Debouncer::new([[false; 17]; 1], [[false; 17]; 1], 20);
let chording = Chording::new(&CHORDS);
let mut timer = hal::Timer::new(c.device.TIMER, &mut resets);
let mut alarm = timer.alarm_0().unwrap();
let _ = alarm.schedule(SCAN_TIME_US.microseconds());
alarm.enable_interrupt();
// TRS cable only supports one direction of communication
if is_right {
let _rx_pin = pins.gpio17.into_mode::<hal::gpio::FunctionUart>();
led.set_high().unwrap();
} else {
let _tx_pin = pins.gpio16.into_mode::<hal::gpio::FunctionUart>();
}
let usb_bus = UsbBusAllocator::new(UsbBus::new(
c.device.USBCTRL_REGS,
c.device.USBCTRL_DPRAM,
clocks.usb_clock,
true,
&mut resets,
));
unsafe {
USB_BUS = Some(usb_bus);
}
let usb_class = keyberon::new_class(unsafe { USB_BUS.as_ref().unwrap() }, ());
let usb_dev = keyberon::new_device(unsafe { USB_BUS.as_ref().unwrap() });
// Start watchdog and feed it with the lowest priority task at 1000hz
watchdog.start(10_000.microseconds());
(
Shared {
usb_dev,
usb_class,
uart,
layout,
},
Local {
alarm,
chording,
watchdog,
matrix,
debouncer,
transform,
is_right,
},
init::Monotonics(),
)
}
#[task(binds = USBCTRL_IRQ, priority = 3, shared = [usb_dev, usb_class])]
fn usb_rx(c: usb_rx::Context) {
let mut usb_d = c.shared.usb_dev;
let mut usb_c = c.shared.usb_class;
usb_d.lock(|d| {
usb_c.lock(|c| {
if d.poll(&mut [c]) {
c.poll();
}
})
});
}
#[task(priority = 2, capacity = 8, shared = [usb_dev, usb_class, layout])]
fn handle_event(mut c: handle_event::Context, event: Option<layout::Event>) {
match event {
// TODO: Support Uf2 for the side not performing USB HID
// The right side only passes None here and buffers the keys
// for USB to send out when polled by the host
None => match c.shared.layout.lock(|l| l.tick()) {
layout::CustomEvent::Press(event) => match event {
CustomActions::Uf2 => {
hal::rom_data::reset_to_usb_boot(0, 0);
}
CustomActions::Reset => {
cortex_m::peripheral::SCB::sys_reset();
}
},
_ => (),
},
Some(e) => {
c.shared.layout.lock(|l| l.event(e));
return;
}
};
let report: key_code::KbHidReport = c.shared.layout.lock(|l| l.keycodes().collect());
if !c
.shared
.usb_class
.lock(|k| k.device_mut().set_keyboard_report(report.clone()))
{
return;
}
if c.shared.usb_dev.lock(|d| d.state()) != UsbDeviceState::Configured {
return;
}
while let Ok(0) = c.shared.usb_class.lock(|k| k.write(report.as_bytes())) {}
}
#[task(
binds = TIMER_IRQ_0,
priority = 1,
shared = [uart],
local = [matrix, debouncer, chording, watchdog, alarm, transform, is_right],
)]
fn scan_timer_irq(mut c: scan_timer_irq::Context) {
let alarm = c.local.alarm;
alarm.clear_interrupt();
let _ = alarm.schedule(SCAN_TIME_US.microseconds());
c.local.watchdog.feed();
let keys_pressed = c.local.matrix.get().unwrap();
let deb_events = c
.local
.debouncer
.events(keys_pressed)
.map(c.local.transform);
// TODO: right now chords cannot only be exclusively on one side
let events = c.local.chording.tick(deb_events.collect()).into_iter();
// TODO: With a TRS cable, we can only have one device support USB
if *c.local.is_right {
for event in events {
handle_event::spawn(Some(event)).unwrap();
}
handle_event::spawn(None).unwrap();
} else {
// coordinate and press/release is encoded in a single byte
// the first 6 bits are the coordinate and therefore cannot go past 63
// The last bit is to signify if it is the last byte to be sent, but
// this is not currently used as serial rx is the highest priority
// end? press=1/release=0 key_number
// 7 6 543210
let mut es: [Option<layout::Event>; 16] = [None; 16];
for (i, e) in events.enumerate() {
es[i] = Some(e);
}
let stop_index = es.iter().position(|&v| v == None).unwrap();
for i in 0..(stop_index + 1) {
let mut byte: u8;
if let Some(ev) = es[i] {
if ev.coord().1 <= 0b0011_1111 {
byte = ev.coord().1;
} else {
byte = 0b0011_1111;
}
byte |= (ev.is_press() as u8) << 6;
if i == stop_index + 1 {
byte |= 0b1000_0000;
}
// Watchdog will catch any possibility for an infinite loop
while c.shared.uart.lock(|u| u.uartfr.read().txff().bit_is_set()) {}
c.shared
.uart
.lock(|u| u.uartdr.write(|w| unsafe { w.data().bits(byte) }));
}
}
}
}
#[task(binds = UART0_IRQ, priority = 4, shared = [uart])]
fn rx(mut c: rx::Context) {
// RX FIFO is disabled so we just check that the byte received is valid
// and then we read it. If a bad byte is received, it is possible that the
// receiving side will never read. TODO: fix this
if c.shared.uart.lock(|u| {
u.uartmis.read().rxmis().bit_is_set()
&& u.uartfr.read().rxfe().bit_is_clear()
&& u.uartdr.read().oe().bit_is_clear()
&& u.uartdr.read().be().bit_is_clear()
&& u.uartdr.read().pe().bit_is_clear()
&& u.uartdr.read().fe().bit_is_clear()
}) {
let d: u8 = c.shared.uart.lock(|u| u.uartdr.read().data().bits());
if (d & 0b01000000) > 0 {
handle_event::spawn(Some(layout::Event::Press(0, d & 0b0011_1111))).unwrap();
} else {
handle_event::spawn(Some(layout::Event::Release(0, d & 0b0011_1111))).unwrap();
}
}
}
}
| rust | MIT | 4956f50cf3902504b41de4bbb62cca7649930eb4 | 2026-01-04T20:24:12.918928Z | false |
karpathy/rustbpe | https://github.com/karpathy/rustbpe/blob/ddf848f6961a0655dc8693742fc338e5682c0d3b/src/lib.rs | src/lib.rs | use std::cmp::Ordering;
use std::collections::HashMap as StdHashMap;
use dary_heap::OctonaryHeap;
use fancy_regex::Regex;
use pyo3::prelude::*;
use ahash::{AHashMap, AHashSet};
use compact_str::CompactString;
use rayon::prelude::*;
// Default GPT-4 style regex pattern for splitting text
const GPT4_PATTERN: &str = r"'(?i:[sdmt]|ll|ve|re)|[^\r\n\p{L}\p{N}]?+\p{L}+|\p{N}{1,3}| ?[^\s\p{L}\p{N}]++[\r\n]*|\s*[\r\n]|\s+(?!\S)|\s+";
type Pair = (u32, u32);
/// A Byte Pair Encoding tokenizer that matches the GPT-4 style implementation
#[pyclass]
pub struct Tokenizer {
/// Maps pairs of token IDs to their merged token ID
pub merges: StdHashMap<Pair, u32>,
/// The regex pattern used for text splitting
pub pattern: String,
/// Compiled regex for efficiency
compiled_pattern: Regex,
}
impl Default for Tokenizer {
fn default() -> Self {
Self::new()
}
}
// ------------------------ internal helpers ------------------------
#[derive(Clone, Debug)]
struct Word {
ids: Vec<u32>,
}
impl Word {
#[inline]
fn new(ids: Vec<u32>) -> Self {
Self { ids }
}
#[inline]
fn pairs(&self) -> impl Iterator<Item = Pair> + '_ {
self.ids.windows(2).map(|w| (w[0], w[1]))
}
/// Merge all non-overlapping occurrences of pair -> new_id.
/// Returns a small Vec of local pair-count deltas for THIS word only:
/// -1 for removed pairs, +1 for newly created pairs.
///
/// NOTE: this version deliberately avoids a HashMap in the hot loop.
fn merge_pair(&mut self, pair: Pair, new_id: u32) -> Vec<(Pair, i32)> {
let (a, b) = pair;
let n = self.ids.len();
if n < 2 {
return Vec::new();
}
let mut out: Vec<u32> = Vec::with_capacity(n);
let mut deltas: Vec<(Pair, i32)> = Vec::with_capacity(6);
let mut i = 0;
while i < n {
if i + 1 < n && self.ids[i] == a && self.ids[i + 1] == b {
let left = out.last().copied();
let right = if i + 2 < n {
Some(self.ids[i + 2])
} else {
None
};
// remove old pairs
if let Some(x) = left {
deltas.push(((x, a), -1));
deltas.push(((x, new_id), 1));
}
deltas.push(((a, b), -1));
if let Some(y) = right {
deltas.push(((b, y), -1));
deltas.push(((new_id, y), 1));
}
// write merged token
out.push(new_id);
i += 2; // skip 'a' and 'b'
} else {
out.push(self.ids[i]);
i += 1;
}
}
self.ids = out;
deltas
}
}
#[derive(Debug, Eq)]
struct MergeJob {
pair: Pair,
count: u64,
/// set of word indices where this pair may occur and needs processing
pos: AHashSet<usize>,
}
impl PartialEq for MergeJob {
fn eq(&self, other: &Self) -> bool {
self.count == other.count && self.pair == other.pair
}
}
impl PartialOrd for MergeJob {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl Ord for MergeJob {
fn cmp(&self, other: &Self) -> Ordering {
// Max-heap by count; tie-break to ascending pair order (deterministic)
if self.count != other.count {
self.count.cmp(&other.count)
} else {
// ascending order on the pair when counts tie
other.pair.cmp(&self.pair)
}
}
}
#[inline]
fn count_pairs_parallel(
words: &[Word],
counts: &[i32],
) -> (AHashMap<Pair, i32>, AHashMap<Pair, AHashSet<usize>>) {
words
.par_iter()
.enumerate()
.map(|(i, w)| {
let mut local_pc: AHashMap<Pair, i32> = AHashMap::new();
let mut local_wtu: AHashMap<Pair, AHashSet<usize>> = AHashMap::new();
if w.ids.len() >= 2 && counts[i] != 0 {
for (a, b) in w.pairs() {
*local_pc.entry((a, b)).or_default() += counts[i];
local_wtu.entry((a, b)).or_default().insert(i);
}
}
(local_pc, local_wtu)
})
.reduce(
|| (AHashMap::new(), AHashMap::new()),
|(mut acc_pc, mut acc_wtu), (pc, wtu)| {
for (k, v) in pc {
*acc_pc.entry(k).or_default() += v;
}
for (k, s) in wtu {
acc_wtu.entry(k).or_default().extend(s);
}
(acc_pc, acc_wtu)
},
)
}
// ------------------------ END helpers ------------------------
impl Tokenizer {
/// Core incremental BPE training given unique words and their counts.
/// `words`: one entry per unique chunk (Vec<u32> of token-ids/bytes).
/// `counts`: same length as `words`, count per chunk.
fn train_core_incremental(&mut self, mut words: Vec<Word>, counts: Vec<i32>, vocab_size: u32) {
assert!(vocab_size >= 256, "vocab_size must be at least 256");
let num_merges = vocab_size - 256;
log::info!("Starting BPE training: {} merges to compute", num_merges);
self.merges.clear();
// ---- Initial pair_counts and where_to_update (parallel) ----
log::info!(
"Computing initial pair counts from {} unique sequences",
words.len()
);
let (mut pair_counts, mut where_to_update) = count_pairs_parallel(&words, &counts);
// ---- Build heap ----
log::info!("Building heap with {} unique pairs", pair_counts.len());
let mut heap = OctonaryHeap::with_capacity(pair_counts.len());
for (pair, pos) in where_to_update.drain() {
let c = *pair_counts.get(&pair).unwrap_or(&0);
if c > 0 {
heap.push(MergeJob {
pair,
count: c as u64,
pos,
});
}
}
// ---- Merge loop ----
log::info!("Starting merge loop");
let mut merges_done = 0u32;
let mut last_log_percent = 0u32;
while merges_done < num_merges {
let Some(mut top) = heap.pop() else {
break;
};
// Lazy refresh: if the count changed since we queued this job, update and requeue
let current = *pair_counts.get(&top.pair).unwrap_or(&0);
if current <= 0 {
// Pair no longer exists or has non-positive count, skip it
continue;
}
if top.count != current as u64 {
top.count = current as u64;
heap.push(top);
continue;
}
// Record merge
let new_id = 256 + merges_done;
self.merges.insert(top.pair, new_id);
// Merge this pair in all words where it occurs
let mut local_pos_updates: AHashMap<Pair, AHashSet<usize>> = AHashMap::new();
for &word_idx in &top.pos {
// Apply merge to this word and collect pair-count deltas
let changes = words[word_idx].merge_pair(top.pair, new_id);
// Update global pair counts based on this word's count
for (pair, delta) in changes {
let delta_total = delta * counts[word_idx];
if delta_total != 0 {
*pair_counts.entry(pair).or_default() += delta_total;
if delta > 0 {
local_pos_updates.entry(pair).or_default().insert(word_idx);
}
}
}
}
// Add the updated pair counts back to the heap
for (pair, pos) in local_pos_updates {
let cnt = *pair_counts.get(&pair).unwrap_or(&0);
if cnt > 0 {
heap.push(MergeJob {
pair,
count: cnt as u64,
pos,
});
}
}
merges_done += 1;
// Log progress every 1%
let current_percent = (merges_done * 100) / num_merges;
if current_percent > last_log_percent {
log::info!(
"Progress: {}% ({}/{} merges) - Last merge: {:?} -> {} (frequency: {})",
current_percent,
merges_done,
num_merges,
top.pair,
new_id,
top.count
);
last_log_percent = current_percent;
}
}
log::info!("Finished training: {} merges completed", merges_done);
}
}
/// Public methods for the Tokenizer class that will be exposed to Python.
#[pymethods]
impl Tokenizer {
/// Create a new Tokenizer
#[new]
pub fn new() -> Self {
Self {
merges: StdHashMap::new(),
pattern: String::new(),
compiled_pattern: Regex::new("").expect("Empty regex should be valid"),
}
}
/// Train from a streaming iterator (parallel ingestion).
/// We refill a Rust Vec<String> buffer under the GIL, then release the GIL
/// to do the heavy splitting and counting **in parallel** with rayon.
#[pyo3(signature = (iterator, vocab_size, buffer_size=8192, pattern=None))]
#[pyo3(text_signature = "(self, iterator, vocab_size, buffer_size=8192, pattern=None)")]
pub fn train_from_iterator(
&mut self,
py: pyo3::Python<'_>,
iterator: &pyo3::Bound<'_, pyo3::PyAny>,
vocab_size: u32,
buffer_size: usize,
pattern: Option<String>,
) -> PyResult<()> {
// Use provided pattern or default to GPT-4 pattern
let pattern_str = pattern.unwrap_or_else(|| GPT4_PATTERN.to_string());
// Update the stored pattern and compile it
self.pattern = pattern_str.clone();
self.compiled_pattern = Regex::new(&pattern_str).map_err(|e| {
pyo3::exceptions::PyValueError::new_err(format!("Invalid regex pattern: {}", e))
})?;
// Prepare a true Python iterator object
let py_iter: pyo3::Py<pyo3::PyAny> = unsafe {
pyo3::Py::from_owned_ptr_or_err(py, pyo3::ffi::PyObject_GetIter(iterator.as_ptr()))?
};
// Global chunk counts
let mut counts: AHashMap<CompactString, i32> = AHashMap::new();
// Temporary buffer we refill under the GIL
let mut buf: Vec<String> = Vec::with_capacity(buffer_size);
log::info!(
"Processing sequences from iterator (buffer_size: {})",
buffer_size
);
let mut total_sequences = 0u64;
// Helper: refill `buf` with up to `buffer_size` strings from the Python iterator.
// Returns Ok(true) if the iterator is exhausted, Ok(false) otherwise.
let refill = |buf: &mut Vec<String>| -> PyResult<bool> {
pyo3::Python::attach(|py| {
buf.clear();
let it = py_iter.bind(py);
loop {
if buf.len() >= buffer_size {
return Ok(false);
}
// next(it)
let next_obj = unsafe {
pyo3::Bound::from_owned_ptr_or_opt(py, pyo3::ffi::PyIter_Next(it.as_ptr()))
};
match next_obj {
Some(obj) => {
let s: String = obj.extract()?;
buf.push(s);
}
None => {
if pyo3::PyErr::occurred(py) {
return Err(pyo3::PyErr::fetch(py));
} else {
return Ok(true); // exhausted
}
}
}
}
})
};
// Stream ingestion loop: refill under GIL, process without GIL (parallel)
loop {
let exhausted = refill(&mut buf)?;
if buf.is_empty() && exhausted {
break;
}
total_sequences += buf.len() as u64;
let pattern = self.compiled_pattern.clone();
let local: AHashMap<CompactString, i32> = py.detach(|| {
buf.par_iter()
.map(|s| {
let mut m: AHashMap<CompactString, i32> = AHashMap::new();
for mat in pattern.find_iter(s) {
let piece = mat.expect("regex match failed").as_str();
*m.entry(CompactString::from(piece)).or_default() += 1;
}
m
})
.reduce(AHashMap::new, |mut a, b| {
for (k, v) in b {
*a.entry(k).or_default() += v;
}
a
})
});
// Merge local into global (single-threaded)
for (k, v) in local {
*counts.entry(k).or_default() += v;
}
if exhausted {
break;
}
}
log::info!(
"Processed {} sequences total, {} unique",
total_sequences,
counts.len()
);
// Materialize words & counts
let mut words = Vec::with_capacity(counts.len());
let mut cvec = Vec::with_capacity(counts.len());
for (chunk, c) in counts.into_iter() {
words.push(Word::new(
chunk.as_bytes().iter().map(|&b| b as u32).collect(),
));
cvec.push(c);
}
self.train_core_incremental(words, cvec, vocab_size);
Ok(())
}
/// Return the regex pattern
pub fn get_pattern(&self) -> String {
self.pattern.clone()
}
/// Return the vocabulary size (256 base bytes + number of merges)
#[getter]
pub fn vocab_size(&self) -> u32 {
256 + self.merges.len() as u32
}
/// Return the mergeable ranks (token bytes -> token id / rank)
pub fn get_mergeable_ranks(&self) -> Vec<(Vec<u8>, u32)> {
let mut mergeable_ranks = Vec::new();
// Build vocabulary incrementally from low to high token IDs
let mut token_bytes: Vec<Vec<u8>> = (0..256_u32).map(|i| vec![i as u8]).collect();
for (i, bytes) in token_bytes.iter().enumerate() {
mergeable_ranks.push((bytes.clone(), i as u32));
}
// Sort merges by token id (so we can reconstruct bytes progressively)
let mut sorted_merges: Vec<_> = self.merges.iter().collect();
sorted_merges.sort_by_key(|&(_, &token_id)| token_id);
for (&pair, &merged_id) in sorted_merges {
let (left, right) = pair;
let mut merged_bytes = token_bytes[left as usize].clone();
merged_bytes.extend(&token_bytes[right as usize]);
if token_bytes.len() <= merged_id as usize {
token_bytes.resize(merged_id as usize + 1, Vec::new());
}
token_bytes[merged_id as usize] = merged_bytes.clone();
mergeable_ranks.push((merged_bytes, merged_id));
}
mergeable_ranks
}
/// Encode a string into token IDs
pub fn encode(&self, text: &str) -> Vec<u32> {
let mut all_ids = Vec::new();
// Split text using the regex pattern
for m in self.compiled_pattern.find_iter(text) {
// Handle potential regex errors gracefully
let chunk = match m {
Ok(mat) => mat.as_str(),
Err(e) => {
log::warn!("Regex match error, skipping chunk: {}", e);
continue;
}
};
// Convert chunk to bytes then to u32 IDs
let mut ids: Vec<u32> = chunk.bytes().map(|b| b as u32).collect();
// Apply merges iteratively (always merge the earliest-learned pair first)
while ids.len() >= 2 {
// Find the pair with lowest merge index (earliest merge = lowest new_id)
let mut best_pair: Option<(usize, Pair, u32)> = None;
for i in 0..ids.len() - 1 {
let pair: Pair = (ids[i], ids[i + 1]);
if let Some(&new_id) = self.merges.get(&pair) {
if best_pair.is_none() || new_id < best_pair.unwrap().2 {
best_pair = Some((i, pair, new_id));
}
}
}
// If we found a pair to merge, apply it
if let Some((idx, _pair, new_id)) = best_pair {
ids[idx] = new_id;
ids.remove(idx + 1);
} else {
// No more merges possible
break;
}
}
all_ids.extend(ids);
}
all_ids
}
/// Decode token IDs back to a string
pub fn decode(&self, ids: Vec<u32>) -> PyResult<String> {
// Build reverse mapping: token_id -> bytes
let mut vocab: Vec<Vec<u8>> = (0..256u32).map(|i| vec![i as u8]).collect();
// Sort merges by token id to reconstruct bytes in order
let mut sorted_merges: Vec<_> = self.merges.iter().collect();
sorted_merges.sort_by_key(|&(_, &token_id)| token_id);
for (&(left, right), &merged_id) in &sorted_merges {
let mut merged_bytes = vocab
.get(left as usize)
.ok_or_else(|| {
pyo3::exceptions::PyValueError::new_err(format!(
"Invalid token id {} in merge",
left
))
})?
.clone();
merged_bytes.extend(vocab.get(right as usize).ok_or_else(|| {
pyo3::exceptions::PyValueError::new_err(format!(
"Invalid token id {} in merge",
right
))
})?);
if vocab.len() <= merged_id as usize {
vocab.resize(merged_id as usize + 1, Vec::new());
}
vocab[merged_id as usize] = merged_bytes;
}
// Convert each token id to bytes and concatenate
let mut bytes = Vec::new();
for &id in &ids {
let token_bytes = vocab.get(id as usize).ok_or_else(|| {
pyo3::exceptions::PyValueError::new_err(format!("Unknown token id: {}", id))
})?;
bytes.extend(token_bytes);
}
// Convert bytes to string (UTF-8)
String::from_utf8(bytes).map_err(|e| {
pyo3::exceptions::PyUnicodeDecodeError::new_err(format!(
"Decoded bytes are not valid UTF-8: {}",
e
))
})
}
/// Encode multiple texts in parallel using rayon.
/// Returns a list of token ID vectors, one per input text.
#[pyo3(signature = (texts))]
#[pyo3(text_signature = "(self, texts)")]
pub fn batch_encode(&self, py: Python<'_>, texts: Vec<String>) -> PyResult<Vec<Vec<u32>>> {
// Release Python GIL and encode in parallel using rayon
let results = py.detach(|| {
texts
.par_iter()
.map(|text| self.encode(text))
.collect::<Vec<Vec<u32>>>()
});
Ok(results)
}
}
#[pymodule]
fn rustbpe(m: &Bound<'_, PyModule>) -> PyResult<()> {
pyo3_log::init(); // forwards Rust `log` to Python's `logging`
m.add_class::<Tokenizer>()?;
Ok(())
}
// ============================================================================
// RUST TESTS
// ============================================================================
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_word_pairs() {
let word = Word::new(vec![1, 2, 3, 4]);
let pairs: Vec<Pair> = word.pairs().collect();
assert_eq!(pairs, vec![(1, 2), (2, 3), (3, 4)]);
}
#[test]
fn test_word_pairs_empty() {
let word = Word::new(vec![]);
let pairs: Vec<Pair> = word.pairs().collect();
assert!(pairs.is_empty());
}
#[test]
fn test_word_pairs_single() {
let word = Word::new(vec![42]);
let pairs: Vec<Pair> = word.pairs().collect();
assert!(pairs.is_empty());
}
#[test]
fn test_word_merge_pair() {
// [1, 2, 3, 1, 2] with merge (1,2) -> 99 should become [99, 3, 99]
let mut word = Word::new(vec![1, 2, 3, 1, 2]);
let _deltas = word.merge_pair((1, 2), 99);
assert_eq!(word.ids, vec![99, 3, 99]);
}
#[test]
fn test_word_merge_pair_adjacent() {
// [1, 2, 1, 2, 1, 2] -> [99, 99, 99] (non-overlapping)
let mut word = Word::new(vec![1, 2, 1, 2, 1, 2]);
let _deltas = word.merge_pair((1, 2), 99);
assert_eq!(word.ids, vec![99, 99, 99]);
}
#[test]
fn test_word_merge_no_match() {
let mut word = Word::new(vec![1, 2, 3]);
let deltas = word.merge_pair((4, 5), 99);
assert_eq!(word.ids, vec![1, 2, 3]); // unchanged
// Only delta should be for pairs that don't exist, so effectively empty useful deltas
assert!(deltas.is_empty() || deltas.iter().all(|(_, d)| *d == 0));
}
#[test]
fn test_tokenizer_new() {
let tok = Tokenizer::new();
assert!(tok.merges.is_empty());
assert!(tok.pattern.is_empty());
}
#[test]
fn test_encode_untrained_simple() {
// With no merges and empty pattern, encode returns nothing (no regex matches)
let tok = Tokenizer::new();
let ids = tok.encode("hello");
assert!(ids.is_empty()); // empty pattern matches nothing
}
#[test]
fn test_encode_with_pattern_no_merges() {
// With a simple pattern but no merges, should return raw byte values
let tok = Tokenizer {
merges: StdHashMap::new(),
pattern: r"\w+".to_string(),
compiled_pattern: Regex::new(r"\w+").unwrap(),
};
let ids = tok.encode("hi");
// 'h' = 104, 'i' = 105
assert_eq!(ids, vec![104, 105]);
}
#[test]
fn test_encode_with_merges() {
// Set up a tokenizer with one merge: (104, 105) -> 256 ('h','i' -> 256)
let mut merges = StdHashMap::new();
merges.insert((104, 105), 256); // 'hi' -> 256
let tok = Tokenizer {
merges,
pattern: r"\w+".to_string(),
compiled_pattern: Regex::new(r"\w+").unwrap(),
};
let ids = tok.encode("hi");
assert_eq!(ids, vec![256]); // merged into single token
let ids2 = tok.encode("hip");
// 'hi' merges to 256, 'p' stays as 112
assert_eq!(ids2, vec![256, 112]);
}
#[test]
fn test_get_mergeable_ranks_empty() {
let tok = Tokenizer::new();
let ranks = tok.get_mergeable_ranks();
// Should have 256 byte-level tokens
assert_eq!(ranks.len(), 256);
// First should be [0] -> 0
assert_eq!(ranks[0], (vec![0u8], 0));
// Last should be [255] -> 255
assert_eq!(ranks[255], (vec![255u8], 255));
}
#[test]
fn test_get_mergeable_ranks_with_merge() {
let mut merges = StdHashMap::new();
// Merge bytes 65 ('A') and 66 ('B') into token 256
merges.insert((65, 66), 256);
let tok = Tokenizer {
merges,
pattern: String::new(),
compiled_pattern: Regex::new("").unwrap(),
};
let ranks = tok.get_mergeable_ranks();
assert_eq!(ranks.len(), 257); // 256 bytes + 1 merge
// The merge should produce bytes [65, 66] -> 256
let last = &ranks[256];
assert_eq!(last.0, vec![65u8, 66u8]);
assert_eq!(last.1, 256);
}
#[test]
fn test_count_pairs_parallel() {
let words = vec![Word::new(vec![1, 2, 3]), Word::new(vec![1, 2, 4])];
let counts = vec![1, 2]; // first word appears 1x, second 2x
let (pair_counts, positions) = count_pairs_parallel(&words, &counts);
// (1,2) appears in both: 1*1 + 1*2 = 3
assert_eq!(pair_counts.get(&(1, 2)), Some(&3));
// (2,3) appears only in first: 1*1 = 1
assert_eq!(pair_counts.get(&(2, 3)), Some(&1));
// (2,4) appears only in second: 1*2 = 2
assert_eq!(pair_counts.get(&(2, 4)), Some(&2));
// Check positions
assert!(positions.get(&(1, 2)).unwrap().contains(&0));
assert!(positions.get(&(1, 2)).unwrap().contains(&1));
}
#[test]
fn test_train_core_incremental() {
// Simple training test with repeated patterns
let mut tok = Tokenizer {
merges: StdHashMap::new(),
pattern: String::new(),
compiled_pattern: Regex::new("").unwrap(),
};
// "ab" repeated 10 times, "cd" repeated 5 times
let words = vec![
Word::new(vec![97, 98]), // "ab"
Word::new(vec![99, 100]), // "cd"
];
let counts = vec![10, 5];
// Train with vocab_size = 257 (one merge)
tok.train_core_incremental(words, counts, 257);
// Should have merged (97, 98) since it has higher count
assert_eq!(tok.merges.len(), 1);
assert!(tok.merges.contains_key(&(97, 98)));
assert_eq!(tok.merges.get(&(97, 98)), Some(&256));
}
// ==================== Additional comprehensive tests ====================
#[test]
fn test_default_trait() {
let tok = Tokenizer::default();
assert!(tok.merges.is_empty());
assert!(tok.pattern.is_empty());
}
#[test]
fn test_vocab_size() {
let mut tok = Tokenizer::new();
assert_eq!(tok.vocab_size(), 256);
// Add some merges manually
tok.merges.insert((97, 98), 256);
assert_eq!(tok.vocab_size(), 257);
tok.merges.insert((256, 99), 257);
assert_eq!(tok.vocab_size(), 258);
}
#[test]
fn test_word_merge_overlapping_pairs() {
// "aaa" = [97, 97, 97] with merge (97, 97) -> 256
// Should become [256, 97] (non-overlapping, left-to-right)
let mut word = Word::new(vec![97, 97, 97]);
let _deltas = word.merge_pair((97, 97), 256);
assert_eq!(word.ids, vec![256, 97]);
}
#[test]
fn test_word_merge_overlapping_pairs_even() {
// "aaaa" = [97, 97, 97, 97] with merge (97, 97) -> 256
// Should become [256, 256]
let mut word = Word::new(vec![97, 97, 97, 97]);
let _deltas = word.merge_pair((97, 97), 256);
assert_eq!(word.ids, vec![256, 256]);
}
#[test]
fn test_word_merge_multiple_occurrences() {
// "abXab" where X doesn't match
let mut word = Word::new(vec![1, 2, 99, 1, 2]);
let deltas = word.merge_pair((1, 2), 256);
assert_eq!(word.ids, vec![256, 99, 256]);
// Count (1, 2) removals in deltas
let ab_removals: i32 = deltas
.iter()
.filter(|(p, _)| *p == (1, 2))
.map(|(_, d)| d)
.sum();
assert_eq!(ab_removals, -2); // two occurrences removed
}
#[test]
fn test_encode_chained_merges() {
// Set up a tokenizer with chained merges:
// (97, 97) -> 256 ('aa' -> 256)
// (256, 97) -> 257 ('aaa' effectively -> 257)
let mut merges = StdHashMap::new();
merges.insert((97, 97), 256); // 'aa' -> 256 (learned first)
merges.insert((256, 97), 257); // 'aa' + 'a' -> 257 (learned second)
let tok = Tokenizer {
merges,
pattern: r"\w+".to_string(),
compiled_pattern: Regex::new(r"\w+").unwrap(),
};
// "aaa" should encode as [257]
// Step 1: [97, 97, 97]
// Step 2: merge (97, 97) at pos 0 -> [256, 97]
// Step 3: merge (256, 97) -> [257]
let ids = tok.encode("aaa");
assert_eq!(ids, vec![257]);
// "aaaa" should encode as [256, 256]
// Because (97, 97) has lower id than (256, 97), so we merge all 'aa' pairs first
let ids = tok.encode("aaaa");
assert_eq!(ids, vec![256, 256]);
// "aaaaa" should be [257, 256]
// [97, 97, 97, 97, 97]
// -> [256, 97, 97, 97] (merge first aa)
// -> [256, 256, 97] (merge second aa)
// -> [257, 256] (merge (256, 97))
// Wait, let me recalculate...
// Actually the algorithm picks the pair with LOWEST new_id.
// (97, 97) -> 256, (256, 97) -> 257
// So 256 < 257, meaning (97, 97) is always preferred.
// [97, 97, 97, 97, 97]
// Pairs: (97,97) at 0,1,2,3. All map to 256.
// Pick leftmost (position 0): [256, 97, 97, 97]
// Pairs: (256,97)->257, (97,97)->256 at pos 1,2
// 256 < 257, pick (97,97) at pos 1: [256, 256, 97]
// Pairs: (256,256) not in merges, (256,97)->257
// Only option is 257: [256, 257]
let ids = tok.encode("aaaaa");
assert_eq!(ids, vec![256, 257]);
}
#[test]
fn test_encode_decode_roundtrip_simple() {
// Set up tokenizer with some merges
let mut merges = StdHashMap::new();
merges.insert((104, 105), 256); // 'hi' -> 256
let tok = Tokenizer {
merges,
pattern: r"\w+|\s+".to_string(),
compiled_pattern: Regex::new(r"\w+|\s+").unwrap(),
};
let text = "hi";
let ids = tok.encode(text);
let decoded = tok.decode(ids).unwrap();
assert_eq!(decoded, text);
}
#[test]
fn test_encode_decode_roundtrip_with_spaces() {
let mut merges = StdHashMap::new();
merges.insert((104, 101), 256); // 'he' -> 256
merges.insert((108, 108), 257); // 'll' -> 257
merges.insert((256, 257), 258); // 'hell' -> 258
let tok = Tokenizer {
merges,
pattern: r"\w+|\s+".to_string(),
compiled_pattern: Regex::new(r"\w+|\s+").unwrap(),
};
let text = "hello world";
let ids = tok.encode(text);
let decoded = tok.decode(ids).unwrap();
assert_eq!(decoded, text);
}
#[test]
fn test_decode_byte_level() {
// Decode raw byte tokens (no merges)
let tok = Tokenizer {
merges: StdHashMap::new(),
pattern: String::new(),
compiled_pattern: Regex::new("").unwrap(),
};
// [104, 105] = "hi"
let decoded = tok.decode(vec![104, 105]).unwrap();
assert_eq!(decoded, "hi");
}
#[test]
fn test_decode_invalid_token() {
let tok = Tokenizer::new();
// Token 300 doesn't exist (only 0-255 in base vocab)
let result = tok.decode(vec![300]);
assert!(result.is_err());
}
#[test]
fn test_train_multiple_merges() {
let mut tok = Tokenizer {
merges: StdHashMap::new(),
pattern: String::new(),
compiled_pattern: Regex::new("").unwrap(),
};
// "ab" appears 100 times, "bc" appears 50 times
// After merging "ab", the corpus becomes "X c" where X=256
// Then "Xc" (256, 99) should be merged next? No wait...
// Let's use a simpler example:
// "ab" appears 10 times
let words = vec![
Word::new(vec![97, 98]), // "ab"
];
let counts = vec![10];
// Train with vocab_size = 258 (2 merges)
// But we only have one unique pair, so only one merge will happen
tok.train_core_incremental(words, counts, 258);
assert_eq!(tok.merges.len(), 1);
}
#[test]
fn test_train_creates_chained_merges() {
let mut tok = Tokenizer {
merges: StdHashMap::new(),
pattern: String::new(),
compiled_pattern: Regex::new("").unwrap(),
};
// "aaa" = [97, 97, 97]
// First merge: (97, 97) -> 256, word becomes [256, 97]
// Second merge: (256, 97) -> 257, word becomes [257]
let words = vec![Word::new(vec![97, 97, 97])];
let counts = vec![10];
tok.train_core_incremental(words, counts, 258);
assert_eq!(tok.merges.len(), 2);
| rust | MIT | ddf848f6961a0655dc8693742fc338e5682c0d3b | 2026-01-04T20:24:34.962425Z | true |
sjb3d/caldera | https://github.com/sjb3d/caldera/blob/42fc4b496cce21e907eca7112e6d6334d35fc41a/caldera-macro/src/lib.rs | caldera-macro/src/lib.rs | extern crate proc_macro;
use proc_macro::TokenStream;
use proc_macro2::TokenStream as TokenStream2;
use quote::{format_ident, quote};
use syn::{
braced, bracketed,
parse::{Parse, ParseStream},
parse_macro_input,
punctuated::Punctuated,
token, Error, Ident, LitInt, Result, Token,
};
mod kw {
syn::custom_keyword!(Sampler);
syn::custom_keyword!(SampledImage);
syn::custom_keyword!(CombinedImageSampler);
syn::custom_keyword!(StorageImage);
syn::custom_keyword!(UniformData);
syn::custom_keyword!(StorageBuffer);
syn::custom_keyword!(AccelerationStructure);
}
enum BindingType {
Sampler,
SampledImage,
CombinedImageSampler,
StorageImage,
UniformData { ty: Ident },
StorageBuffer,
AccelerationStructure,
}
struct Binding {
name: Ident,
ty: BindingType,
array: Option<usize>,
}
struct Layout {
visibility: Option<Token![pub]>,
name: Ident,
bindings: Punctuated<Binding, token::Comma>,
}
impl Parse for BindingType {
fn parse(input: ParseStream) -> Result<Self> {
let lookahead = input.lookahead1();
if lookahead.peek(kw::Sampler) {
input.parse::<kw::Sampler>()?;
Ok(BindingType::Sampler)
} else if lookahead.peek(kw::SampledImage) {
input.parse::<kw::SampledImage>()?;
Ok(BindingType::SampledImage)
} else if lookahead.peek(kw::CombinedImageSampler) {
input.parse::<kw::CombinedImageSampler>()?;
Ok(BindingType::CombinedImageSampler)
} else if lookahead.peek(kw::StorageImage) {
input.parse::<kw::StorageImage>()?;
Ok(BindingType::StorageImage)
} else if lookahead.peek(kw::UniformData) {
input.parse::<kw::UniformData>()?;
input.parse::<token::Lt>()?;
let ty = input.parse()?;
input.parse::<token::Gt>()?;
Ok(BindingType::UniformData { ty })
} else if lookahead.peek(kw::StorageBuffer) {
input.parse::<kw::StorageBuffer>()?;
Ok(BindingType::StorageBuffer)
} else if lookahead.peek(kw::AccelerationStructure) {
input.parse::<kw::AccelerationStructure>()?;
Ok(BindingType::AccelerationStructure)
} else {
Err(lookahead.error())
}
}
}
impl Parse for Binding {
fn parse(input: ParseStream) -> Result<Self> {
let name = input.parse()?;
input.parse::<token::Colon>()?;
let lookahead = input.lookahead1();
let (ty, array) = if lookahead.peek(token::Bracket) {
let content;
let _bracket_token: token::Bracket = bracketed!(content in input);
let ty = content.parse()?;
content.parse::<token::Semi>()?;
let array_lit = content.parse::<LitInt>()?;
let array = array_lit.base10_parse()?;
if !matches!(ty, BindingType::StorageImage) {
return Err(Error::new(
content.span(),
"expected `StorageImage` for descriptor array",
));
}
if array == 0 {
return Err(Error::new(array_lit.span(), "array length must be non-zero"));
}
(ty, Some(array))
} else {
let ty = input.parse()?;
(ty, None)
};
Ok(Self { name, ty, array })
}
}
impl Parse for Layout {
fn parse(input: ParseStream) -> Result<Self> {
let visibility = input.parse()?;
let name = input.parse()?;
let content;
let _brace_token: token::Brace = braced!(content in input);
let bindings = content.parse_terminated(Binding::parse)?;
Ok(Self {
visibility,
name,
bindings,
})
}
}
impl Binding {
fn get_binding(&self) -> TokenStream2 {
match self.ty {
BindingType::Sampler => {
quote!(DescriptorSetLayoutBinding::Sampler)
}
BindingType::SampledImage => {
quote!(DescriptorSetLayoutBinding::SampledImage)
}
BindingType::CombinedImageSampler => {
quote!(DescriptorSetLayoutBinding::CombinedImageSampler)
}
BindingType::StorageImage => {
let count = self.array.unwrap_or(1) as u32;
quote!(DescriptorSetLayoutBinding::StorageImage { count: #count })
}
BindingType::UniformData { ref ty } => quote!(DescriptorSetLayoutBinding::UniformData {
size: ::std::mem::size_of::<#ty>() as u32,
}),
BindingType::StorageBuffer => quote!(DescriptorSetLayoutBinding::StorageBuffer),
BindingType::AccelerationStructure => quote!(DescriptorSetLayoutBinding::AccelerationStructure),
}
}
fn get_data(&self) -> (TokenStream2, TokenStream2) {
match self.ty {
BindingType::Sampler => {
let sampler = format_ident!("{}_sampler", self.name);
(
quote!(#sampler: vk::Sampler),
quote!(DescriptorSetBindingData::Sampler { sampler: #sampler }),
)
}
BindingType::SampledImage => {
let image_view = format_ident!("{}_image_view", self.name);
(
quote!(#image_view: vk::ImageView),
quote!(DescriptorSetBindingData::SampledImage { image_view: #image_view }),
)
}
BindingType::CombinedImageSampler => {
let image_view = format_ident!("{}_image_view", self.name);
let sampler = format_ident!("{}_sampler", self.name);
(
quote!(#image_view: vk::ImageView, #sampler: vk::Sampler),
quote!(DescriptorSetBindingData::CombinedImageSampler { image_view: #image_view, sampler: #sampler }),
)
}
BindingType::StorageImage => {
if let Some(count) = self.array {
let image_views = format_ident!("{}_image_views", self.name);
(
quote!(#image_views: &[vk::ImageView]),
quote!({
assert_eq!(#image_views.len(), #count);
DescriptorSetBindingData::StorageImage { image_views: #image_views }
}),
)
} else {
let image_view = format_ident!("{}_image_view", self.name);
(
quote!(#image_view: vk::ImageView),
quote!(DescriptorSetBindingData::StorageImage { image_views: ::std::slice::from_ref(&#image_view) }),
)
}
}
BindingType::UniformData { ref ty } => {
let writer = format_ident!("{}_writer", self.name);
(
quote!(#writer: impl Fn(&mut #ty)),
quote!(DescriptorSetBindingData::UniformData {
size: ::std::mem::size_of::<#ty>() as u32,
align: ::std::mem::align_of::<#ty>() as u32,
writer: &move |s: &mut [u8]| {
#writer(bytemuck::from_bytes_mut(s));
},
}),
)
}
BindingType::StorageBuffer => {
let buffer = format_ident!("{}_buffer", self.name);
(
quote!(#buffer: vk::Buffer),
quote!(DescriptorSetBindingData::StorageBuffer { buffer: #buffer }),
)
}
BindingType::AccelerationStructure => {
let accel = format_ident!("{}_accel", self.name);
(
quote!(#accel: vk::AccelerationStructureKHR),
quote!(DescriptorSetBindingData::AccelerationStructure { accel: #accel }),
)
}
}
}
}
#[proc_macro]
pub fn descriptor_set(input: TokenStream) -> TokenStream {
let Layout {
visibility,
name,
bindings,
} = parse_macro_input!(input as Layout);
let binding_entries: Vec<_> = bindings.iter().map(Binding::get_binding).collect();
let (data_args, data_entries): (Vec<_>, Vec<_>) = bindings.iter().map(Binding::get_data).unzip();
quote!(
#visibility struct #name {}
impl #name {
const BINDINGS: &'static [DescriptorSetLayoutBinding] = &[#(#binding_entries),*];
pub fn layout(descriptor_pool: &DescriptorPool) -> vk::DescriptorSetLayout {
descriptor_pool.get_descriptor_set_layout(
::std::any::TypeId::of::<Self>(),
Self::BINDINGS)
}
pub fn create(descriptor_pool: &DescriptorPool, #(#data_args),*) -> DescriptorSet {
let layout = Self::layout(descriptor_pool);
let data = &[#(#data_entries),*];
let set = descriptor_pool.create_descriptor_set(layout, data);
DescriptorSet {
layout, set,
}
}
}
)
.into()
}
| rust | MIT | 42fc4b496cce21e907eca7112e6d6334d35fc41a | 2026-01-04T20:23:56.526296Z | false |
sjb3d/caldera | https://github.com/sjb3d/caldera/blob/42fc4b496cce21e907eca7112e6d6334d35fc41a/caldera/src/descriptor.rs | caldera/src/descriptor.rs | use crate::{command_buffer::CommandBufferPool, context::*};
use arrayvec::ArrayVec;
use spark::{vk, Builder};
use std::{
any::TypeId,
cell::{Cell, RefCell},
collections::HashMap,
slice,
};
fn align_up(x: u32, alignment: u32) -> u32 {
(x + alignment - 1) & !(alignment - 1)
}
pub(crate) struct StagingBuffer {
context: SharedContext,
min_alignment: u32,
atom_size: u32,
size_per_frame: u32,
mem: vk::DeviceMemory,
mapping: *mut u8,
buffers: [vk::Buffer; Self::COUNT],
buffer_index: usize,
next_offset: Cell<u32>,
last_usage: u32,
}
impl StagingBuffer {
const COUNT: usize = CommandBufferPool::COUNT;
pub fn new(
context: &SharedContext,
size_per_frame: u32,
min_alignment: u32,
usage_flags: vk::BufferUsageFlags,
) -> Self {
let atom_size = context.physical_device_properties.limits.non_coherent_atom_size as u32;
let mut memory_type_filter = 0xffff_ffff;
let buffers: [vk::Buffer; Self::COUNT] = {
let buffer_create_info = vk::BufferCreateInfo {
size: vk::DeviceSize::from(size_per_frame),
usage: usage_flags,
..Default::default()
};
let mut buffers = ArrayVec::new();
for _i in 0..Self::COUNT {
let buffer = unsafe { context.device.create_buffer(&buffer_create_info, None) }.unwrap();
let mem_req = unsafe { context.device.get_buffer_memory_requirements(buffer) };
assert_eq!(mem_req.size, buffer_create_info.size);
buffers.push(buffer);
memory_type_filter &= mem_req.memory_type_bits;
}
buffers.into_inner().unwrap()
};
let mem = {
let memory_type_index = context
.get_memory_type_index(memory_type_filter, vk::MemoryPropertyFlags::HOST_VISIBLE)
.unwrap();
let memory_allocate_info = vk::MemoryAllocateInfo {
allocation_size: (Self::COUNT * (size_per_frame as usize)) as vk::DeviceSize,
memory_type_index,
..Default::default()
};
unsafe { context.device.allocate_memory(&memory_allocate_info, None) }.unwrap()
};
for (i, buffer) in buffers.iter().enumerate() {
unsafe {
context
.device
.bind_buffer_memory(*buffer, mem, (i * (size_per_frame as usize)) as vk::DeviceSize)
}
.unwrap();
}
let mapping = unsafe { context.device.map_memory(mem, 0, vk::WHOLE_SIZE, Default::default()) }.unwrap();
Self {
context: SharedContext::clone(context),
min_alignment,
atom_size,
size_per_frame,
mem,
mapping: mapping as *mut _,
buffers,
buffer_index: 0,
next_offset: Cell::new(0),
last_usage: 0,
}
}
pub fn begin_frame(&mut self) {
self.buffer_index = (self.buffer_index + 1) % Self::COUNT;
self.next_offset = Cell::new(0);
}
pub fn end_frame(&mut self) {
self.last_usage = self.next_offset.get();
if self.last_usage == 0 {
return;
}
let mapped_ranges = [vk::MappedMemoryRange {
memory: self.mem,
offset: (self.buffer_index * (self.size_per_frame as usize)) as vk::DeviceSize,
size: vk::DeviceSize::from(align_up(self.last_usage, self.atom_size)),
..Default::default()
}];
unsafe { self.context.device.flush_mapped_memory_ranges(&mapped_ranges) }.unwrap();
}
pub fn get_buffer(&self) -> vk::Buffer {
self.buffers[self.buffer_index]
}
pub fn alloc(&self, size: u32, align: u32) -> Option<(&mut [u8], u32)> {
let base = self.next_offset.get();
let aligned_base = align_up(base, self.min_alignment.max(align));
let end = aligned_base + size;
self.next_offset.set(end);
if end <= self.size_per_frame {
Some((
unsafe {
slice::from_raw_parts_mut(
self.mapping
.add(self.buffer_index * (self.size_per_frame as usize))
.add(aligned_base as usize),
size as usize,
)
},
base,
))
} else {
None
}
}
pub fn ui_stats_table_rows(&self, ui: &mut egui::Ui, title: &str) {
ui.label(title);
ui.add(egui::ProgressBar::new(
(self.last_usage as f32) / (self.size_per_frame as f32),
));
ui.end_row();
}
}
impl Drop for StagingBuffer {
fn drop(&mut self) {
unsafe {
for &buffer in self.buffers.iter() {
self.context.device.destroy_buffer(buffer, None);
}
self.context.device.unmap_memory(self.mem);
self.context.device.free_memory(self.mem, None);
}
}
}
#[derive(Clone, Copy)]
pub struct DescriptorSet {
pub layout: vk::DescriptorSetLayout,
pub set: vk::DescriptorSet,
}
#[derive(Debug, Clone, Copy)]
pub enum DescriptorSetLayoutBinding {
Sampler,
SampledImage,
CombinedImageSampler,
StorageImage { count: u32 },
UniformData { size: u32 },
StorageBuffer,
AccelerationStructure,
}
pub enum DescriptorSetBindingData<'a> {
Sampler {
sampler: vk::Sampler,
},
SampledImage {
image_view: vk::ImageView,
},
CombinedImageSampler {
image_view: vk::ImageView,
sampler: vk::Sampler,
},
StorageImage {
image_views: &'a [vk::ImageView],
},
UniformData {
size: u32,
align: u32,
writer: &'a dyn Fn(&mut [u8]),
},
StorageBuffer {
buffer: vk::Buffer,
},
AccelerationStructure {
accel: vk::AccelerationStructureKHR,
},
}
struct DescriptorSetLayoutCache {
context: SharedContext,
use_inline_uniform_block: bool,
layouts: HashMap<TypeId, vk::DescriptorSetLayout>,
}
impl DescriptorSetLayoutCache {
fn new(context: &SharedContext, use_inline_uniform_block: bool) -> Self {
Self {
context: SharedContext::clone(context),
use_inline_uniform_block,
layouts: HashMap::new(),
}
}
pub fn get_layout(&mut self, key: TypeId, bindings: &[DescriptorSetLayoutBinding]) -> vk::DescriptorSetLayout {
let device = &self.context.device;
let use_inline_uniform_block = self.use_inline_uniform_block;
*self.layouts.entry(key).or_insert_with(|| {
let mut bindings_vk = Vec::new();
for (i, binding) in bindings.iter().enumerate() {
match binding {
DescriptorSetLayoutBinding::Sampler => {
bindings_vk.push(vk::DescriptorSetLayoutBinding {
binding: i as u32,
descriptor_type: vk::DescriptorType::SAMPLER,
descriptor_count: 1,
stage_flags: vk::ShaderStageFlags::ALL,
..Default::default()
});
}
DescriptorSetLayoutBinding::SampledImage => {
bindings_vk.push(vk::DescriptorSetLayoutBinding {
binding: i as u32,
descriptor_type: vk::DescriptorType::SAMPLED_IMAGE,
descriptor_count: 1,
stage_flags: vk::ShaderStageFlags::ALL,
..Default::default()
});
}
DescriptorSetLayoutBinding::CombinedImageSampler => {
bindings_vk.push(vk::DescriptorSetLayoutBinding {
binding: i as u32,
descriptor_type: vk::DescriptorType::COMBINED_IMAGE_SAMPLER,
descriptor_count: 1,
stage_flags: vk::ShaderStageFlags::ALL,
..Default::default()
});
}
DescriptorSetLayoutBinding::StorageImage { count } => {
bindings_vk.push(vk::DescriptorSetLayoutBinding {
binding: i as u32,
descriptor_type: vk::DescriptorType::STORAGE_IMAGE,
descriptor_count: *count,
stage_flags: vk::ShaderStageFlags::ALL,
..Default::default()
});
}
DescriptorSetLayoutBinding::UniformData { size } => {
if use_inline_uniform_block {
bindings_vk.push(vk::DescriptorSetLayoutBinding {
binding: i as u32,
descriptor_type: vk::DescriptorType::INLINE_UNIFORM_BLOCK_EXT,
descriptor_count: *size,
stage_flags: vk::ShaderStageFlags::ALL,
..Default::default()
});
} else {
bindings_vk.push(vk::DescriptorSetLayoutBinding {
binding: i as u32,
descriptor_type: vk::DescriptorType::UNIFORM_BUFFER,
descriptor_count: 1,
stage_flags: vk::ShaderStageFlags::ALL,
..Default::default()
});
}
}
DescriptorSetLayoutBinding::StorageBuffer => bindings_vk.push(vk::DescriptorSetLayoutBinding {
binding: i as u32,
descriptor_type: vk::DescriptorType::STORAGE_BUFFER,
descriptor_count: 1,
stage_flags: vk::ShaderStageFlags::ALL,
..Default::default()
}),
DescriptorSetLayoutBinding::AccelerationStructure => {
bindings_vk.push(vk::DescriptorSetLayoutBinding {
binding: i as u32,
descriptor_type: vk::DescriptorType::ACCELERATION_STRUCTURE_KHR,
descriptor_count: 1,
stage_flags: vk::ShaderStageFlags::ALL,
..Default::default()
})
}
}
}
let create_info = vk::DescriptorSetLayoutCreateInfo::builder().p_bindings(&bindings_vk);
unsafe { device.create_descriptor_set_layout(&create_info, None) }.unwrap()
})
}
}
impl Drop for DescriptorSetLayoutCache {
fn drop(&mut self) {
let device = &self.context.device;
for (_, layout) in self.layouts.drain() {
unsafe { device.destroy_descriptor_set_layout(layout, None) };
}
}
}
pub struct DescriptorPool {
context: SharedContext,
layout_cache: RefCell<DescriptorSetLayoutCache>,
pools: [vk::DescriptorPool; Self::COUNT],
pool_index: usize,
uniform_data_pool: Option<StagingBuffer>,
}
impl DescriptorPool {
const COUNT: usize = CommandBufferPool::COUNT;
// per frame maximums
const MAX_DESCRIPTORS_PER_FRAME: u32 = 64 * 1024;
const MAX_SETS_PER_FRAME: u32 = 64 * 1024;
const MAX_UNIFORM_DATA_PER_FRAME: u32 = 1 * 1024 * 1024;
const MAX_UNIFORM_DATA_PER_SET: usize = 2 * 1024;
const MAX_DESCRIPTORS_PER_SET: usize = 16;
pub fn new(context: &SharedContext) -> Self {
let use_inline_uniform_block = context
.physical_device_features
.inline_uniform_block
.inline_uniform_block
.as_bool();
if use_inline_uniform_block {
println!("using inline uniform block for uniform data");
}
let layout_cache = DescriptorSetLayoutCache::new(context, use_inline_uniform_block);
let pools = {
let mut descriptor_pool_sizes = Vec::new();
descriptor_pool_sizes.push(vk::DescriptorPoolSize {
ty: vk::DescriptorType::SAMPLER,
descriptor_count: Self::MAX_DESCRIPTORS_PER_FRAME,
});
descriptor_pool_sizes.push(vk::DescriptorPoolSize {
ty: vk::DescriptorType::SAMPLED_IMAGE,
descriptor_count: Self::MAX_DESCRIPTORS_PER_FRAME,
});
descriptor_pool_sizes.push(vk::DescriptorPoolSize {
ty: vk::DescriptorType::COMBINED_IMAGE_SAMPLER,
descriptor_count: Self::MAX_DESCRIPTORS_PER_FRAME,
});
descriptor_pool_sizes.push(vk::DescriptorPoolSize {
ty: vk::DescriptorType::STORAGE_IMAGE,
descriptor_count: Self::MAX_DESCRIPTORS_PER_FRAME,
});
descriptor_pool_sizes.push(if use_inline_uniform_block {
vk::DescriptorPoolSize {
ty: vk::DescriptorType::INLINE_UNIFORM_BLOCK_EXT,
descriptor_count: Self::MAX_UNIFORM_DATA_PER_FRAME,
}
} else {
vk::DescriptorPoolSize {
ty: vk::DescriptorType::UNIFORM_BUFFER,
descriptor_count: Self::MAX_DESCRIPTORS_PER_FRAME,
}
});
descriptor_pool_sizes.push(vk::DescriptorPoolSize {
ty: vk::DescriptorType::STORAGE_BUFFER,
descriptor_count: Self::MAX_DESCRIPTORS_PER_FRAME,
});
if context
.physical_device_features
.acceleration_structure
.acceleration_structure
.as_bool()
{
descriptor_pool_sizes.push(vk::DescriptorPoolSize {
ty: vk::DescriptorType::ACCELERATION_STRUCTURE_KHR,
descriptor_count: Self::MAX_DESCRIPTORS_PER_FRAME,
});
}
let mut inline_uniform_block_create_info = vk::DescriptorPoolInlineUniformBlockCreateInfoEXT::builder()
.max_inline_uniform_block_bindings(if use_inline_uniform_block {
Self::MAX_DESCRIPTORS_PER_FRAME
} else {
0
});
let descriptor_pool_create_info = vk::DescriptorPoolCreateInfo::builder()
.max_sets(Self::MAX_SETS_PER_FRAME)
.p_pool_sizes(&descriptor_pool_sizes)
.insert_next(&mut inline_uniform_block_create_info);
let mut pools = ArrayVec::new();
for _i in 0..Self::COUNT {
pools.push(
unsafe {
context
.device
.create_descriptor_pool(&descriptor_pool_create_info, None)
}
.unwrap(),
);
}
pools.into_inner().unwrap()
};
Self {
context: SharedContext::clone(context),
layout_cache: RefCell::new(layout_cache),
pools,
pool_index: 0,
uniform_data_pool: if use_inline_uniform_block {
None
} else {
let size_per_frame = Self::MAX_UNIFORM_DATA_PER_FRAME
.min(context.physical_device_properties.limits.max_uniform_buffer_range);
let min_alignment = context
.physical_device_properties
.limits
.min_uniform_buffer_offset_alignment as u32;
Some(StagingBuffer::new(
context,
size_per_frame,
min_alignment,
vk::BufferUsageFlags::UNIFORM_BUFFER,
))
},
}
}
pub fn begin_frame(&mut self) {
unsafe {
self.context
.device
.reset_descriptor_pool(self.pools[self.pool_index], vk::DescriptorPoolResetFlags::empty())
}
.unwrap();
if let Some(uniform_data_pool) = self.uniform_data_pool.as_mut() {
uniform_data_pool.begin_frame();
}
}
pub fn get_descriptor_set_layout(
&self,
key: TypeId,
bindings: &[DescriptorSetLayoutBinding],
) -> vk::DescriptorSetLayout {
self.layout_cache.borrow_mut().get_layout(key, bindings)
}
pub fn create_descriptor_set(
&self,
layout: vk::DescriptorSetLayout,
data: &[DescriptorSetBindingData],
) -> vk::DescriptorSet {
let descriptor_set_allocate_info = vk::DescriptorSetAllocateInfo::builder()
.descriptor_pool(self.pools[self.pool_index])
.p_set_layouts(slice::from_ref(&layout));
let descriptor_set = unsafe {
self.context
.device
.allocate_descriptor_sets_single(&descriptor_set_allocate_info)
}
.unwrap();
let mut buffer_info = ArrayVec::<_, { Self::MAX_DESCRIPTORS_PER_SET }>::new();
let mut image_info = ArrayVec::<_, { Self::MAX_DESCRIPTORS_PER_SET }>::new();
let mut writes = ArrayVec::<_, { Self::MAX_DESCRIPTORS_PER_SET }>::new();
let mut inline_writes = ArrayVec::<_, { Self::MAX_DESCRIPTORS_PER_SET }>::new();
let mut inline_uniform_data = ArrayVec::<u8, { Self::MAX_UNIFORM_DATA_PER_SET }>::new();
let mut acceleration_structure_writes = ArrayVec::<_, { Self::MAX_DESCRIPTORS_PER_SET }>::new();
for (i, data) in data.iter().enumerate() {
match data {
DescriptorSetBindingData::Sampler { sampler } => {
image_info.push(vk::DescriptorImageInfo {
sampler: *sampler,
image_view: vk::ImageView::null(),
image_layout: vk::ImageLayout::UNDEFINED,
});
writes.push(vk::WriteDescriptorSet {
dst_set: descriptor_set,
dst_binding: i as u32,
descriptor_count: 1,
descriptor_type: vk::DescriptorType::SAMPLER,
p_image_info: image_info.last().unwrap(),
..Default::default()
});
}
DescriptorSetBindingData::SampledImage { image_view } => {
image_info.push(vk::DescriptorImageInfo {
sampler: vk::Sampler::null(),
image_view: *image_view,
image_layout: vk::ImageLayout::SHADER_READ_ONLY_OPTIMAL,
});
writes.push(vk::WriteDescriptorSet {
dst_set: descriptor_set,
dst_binding: i as u32,
descriptor_count: 1,
descriptor_type: vk::DescriptorType::SAMPLED_IMAGE,
p_image_info: image_info.last().unwrap(),
..Default::default()
});
}
DescriptorSetBindingData::CombinedImageSampler { image_view, sampler } => {
image_info.push(vk::DescriptorImageInfo {
sampler: *sampler,
image_view: *image_view,
image_layout: vk::ImageLayout::SHADER_READ_ONLY_OPTIMAL,
});
writes.push(vk::WriteDescriptorSet {
dst_set: descriptor_set,
dst_binding: i as u32,
descriptor_count: 1,
descriptor_type: vk::DescriptorType::COMBINED_IMAGE_SAMPLER,
p_image_info: image_info.last().unwrap(),
..Default::default()
});
}
DescriptorSetBindingData::StorageImage { image_views } => {
let offset = image_info.len();
for image_view in image_views.iter() {
image_info.push(vk::DescriptorImageInfo {
sampler: vk::Sampler::null(),
image_view: *image_view,
image_layout: vk::ImageLayout::GENERAL,
});
}
writes.push(vk::WriteDescriptorSet {
dst_set: descriptor_set,
dst_binding: i as u32,
descriptor_count: image_views.len() as u32,
descriptor_type: vk::DescriptorType::STORAGE_IMAGE,
p_image_info: image_info.get(offset).unwrap(),
..Default::default()
});
}
DescriptorSetBindingData::UniformData { size, align, writer } => {
let (align, size) = (*align, *size);
if let Some(uniform_data_pool) = self.uniform_data_pool.as_ref() {
// write uniform data into buffer
let (addr, offset) = uniform_data_pool.alloc(size, align).unwrap();
writer(addr);
buffer_info.push(vk::DescriptorBufferInfo {
buffer: uniform_data_pool.get_buffer(),
offset: vk::DeviceSize::from(offset),
range: vk::DeviceSize::from(size),
});
writes.push(vk::WriteDescriptorSet {
dst_set: descriptor_set,
dst_binding: i as u32,
descriptor_count: 1,
descriptor_type: vk::DescriptorType::UNIFORM_BUFFER,
p_buffer_info: buffer_info.last().unwrap(),
..Default::default()
});
} else {
// write uniform data to the stack
let start_offset = inline_uniform_data.len();
let end_offset = start_offset + ((align + size) as usize);
if end_offset > inline_uniform_data.capacity() {
panic!("not enough space to write inline uniform data");
}
let block = unsafe {
inline_uniform_data.set_len(end_offset);
let start_ptr = inline_uniform_data.as_mut_ptr().add(start_offset);
let align_offset = start_ptr.align_offset(align as usize);
slice::from_raw_parts_mut(start_ptr.add(align_offset), size as usize)
};
writer(block);
inline_writes.push(vk::WriteDescriptorSetInlineUniformBlockEXT {
data_size: size,
p_data: block.as_ptr() as *const _,
..Default::default()
});
writes.push(vk::WriteDescriptorSet {
dst_set: descriptor_set,
dst_binding: i as u32,
descriptor_count: size,
descriptor_type: vk::DescriptorType::INLINE_UNIFORM_BLOCK_EXT,
p_next: inline_writes.last().unwrap() as *const _ as *const _,
..Default::default()
});
}
}
DescriptorSetBindingData::StorageBuffer { buffer } => {
buffer_info.push(vk::DescriptorBufferInfo {
buffer: *buffer,
offset: 0,
range: vk::WHOLE_SIZE,
});
writes.push(vk::WriteDescriptorSet {
dst_set: descriptor_set,
dst_binding: i as u32,
descriptor_count: 1,
descriptor_type: vk::DescriptorType::STORAGE_BUFFER,
p_buffer_info: buffer_info.last().unwrap(),
..Default::default()
});
}
DescriptorSetBindingData::AccelerationStructure { accel } => {
acceleration_structure_writes.push(vk::WriteDescriptorSetAccelerationStructureKHR {
acceleration_structure_count: 1,
p_acceleration_structures: accel,
..Default::default()
});
writes.push(vk::WriteDescriptorSet {
dst_set: descriptor_set,
dst_binding: i as u32,
descriptor_count: 1,
descriptor_type: vk::DescriptorType::ACCELERATION_STRUCTURE_KHR,
p_next: acceleration_structure_writes.last().unwrap() as *const _ as *const _,
..Default::default()
});
}
}
}
unsafe { self.context.device.update_descriptor_sets(&writes, &[]) };
descriptor_set
}
pub fn end_frame(&mut self) {
self.pool_index = (self.pool_index + 1) % Self::COUNT;
if let Some(uniform_data_pool) = self.uniform_data_pool.as_mut() {
uniform_data_pool.end_frame();
}
}
pub fn ui_stats_table_rows(&self, ui: &mut egui::Ui) {
if let Some(uniform_data_pool) = self.uniform_data_pool.as_ref() {
uniform_data_pool.ui_stats_table_rows(ui, "uniform data");
}
}
}
impl Drop for DescriptorPool {
fn drop(&mut self) {
let device = &self.context.device;
for pool in self.pools.iter() {
unsafe {
device.destroy_descriptor_pool(*pool, None);
}
}
}
}
| rust | MIT | 42fc4b496cce21e907eca7112e6d6334d35fc41a | 2026-01-04T20:23:56.526296Z | false |
sjb3d/caldera | https://github.com/sjb3d/caldera/blob/42fc4b496cce21e907eca7112e6d6334d35fc41a/caldera/src/swapchain.rs | caldera/src/swapchain.rs | use crate::{context::*, maths::*};
use spark::{vk, Builder};
use std::{cmp, slice};
pub struct Swapchain {
context: SharedContext,
swapchain: vk::SwapchainKHR,
surface_format: vk::SurfaceFormatKHR,
size: UVec2,
images: Vec<UniqueImage>,
}
pub enum SwapchainAcquireResult {
RecreateNow,
RecreateSoon(UniqueImage),
Ok(UniqueImage),
}
impl Swapchain {
const MIN_IMAGE_COUNT: u32 = 2;
fn create(
context: &Context,
window_extent: vk::Extent2D,
usage: vk::ImageUsageFlags,
old_swapchain: vk::SwapchainKHR,
) -> (vk::SwapchainKHR, vk::SurfaceFormatKHR, UVec2) {
let surface = context.surface;
let surface_capabilities = unsafe {
context
.instance
.get_physical_device_surface_capabilities_khr(context.physical_device, surface)
}
.unwrap();
let mut extent = surface_capabilities.current_extent;
if extent.width == u32::MAX && extent.height == u32::MAX {
extent = window_extent;
}
let surface_supported = unsafe {
context.instance.get_physical_device_surface_support_khr(
context.physical_device,
context.queue_family_index,
surface,
)
}
.unwrap();
if !surface_supported {
panic!("swapchain surface not supported");
}
let surface_formats = unsafe {
context
.instance
.get_physical_device_surface_formats_khr_to_vec(context.physical_device, surface)
}
.unwrap();
let surface_format = surface_formats
.iter()
.find(|sf| match (sf.format, sf.color_space) {
(vk::Format::R8G8B8A8_SRGB, vk::ColorSpaceKHR::SRGB_NONLINEAR) => true,
(vk::Format::B8G8R8A8_SRGB, vk::ColorSpaceKHR::SRGB_NONLINEAR) => true,
_ => false,
})
.copied()
.expect("no supported swapchain format found");
let min_image_count = cmp::max(Self::MIN_IMAGE_COUNT, surface_capabilities.min_image_count);
let swapchain_create_info = vk::SwapchainCreateInfoKHR::builder()
.surface(surface)
.min_image_count(min_image_count)
.image_format(surface_format.format)
.image_color_space(surface_format.color_space)
.image_extent(extent)
.image_array_layers(1)
.image_usage(usage)
.p_queue_family_indices(slice::from_ref(&context.queue_family_index))
.pre_transform(vk::SurfaceTransformFlagsKHR::IDENTITY)
.composite_alpha(vk::CompositeAlphaFlagsKHR::OPAQUE)
.present_mode(vk::PresentModeKHR::FIFO)
.clipped(true)
.old_swapchain(old_swapchain);
let swapchain = unsafe { context.device.create_swapchain_khr(&swapchain_create_info, None) }.unwrap();
(swapchain, surface_format, UVec2::new(extent.width, extent.height))
}
pub fn new(context: &SharedContext, window_extent: vk::Extent2D, usage: vk::ImageUsageFlags) -> Self {
let (swapchain, surface_format, size) =
Swapchain::create(context, window_extent, usage, vk::SwapchainKHR::null());
let images = unsafe { context.device.get_swapchain_images_khr_to_vec(swapchain) }.unwrap();
let uid = context.allocate_handle_uid();
Swapchain {
context: SharedContext::clone(context),
swapchain,
surface_format,
size,
images: images.iter().map(|&im| Unique::new(im, uid)).collect(),
}
}
pub fn recreate(&mut self, window_extent: vk::Extent2D, usage: vk::ImageUsageFlags) {
let (swapchain, surface_format, size) = Swapchain::create(&self.context, window_extent, usage, self.swapchain);
unsafe { self.context.device.destroy_swapchain_khr(self.swapchain, None) };
let images = unsafe { self.context.device.get_swapchain_images_khr_to_vec(swapchain) }.unwrap();
let uid = self.context.allocate_handle_uid();
self.swapchain = swapchain;
self.surface_format = surface_format;
self.size = size;
self.images = images.iter().map(|&im| Unique::new(im, uid)).collect();
}
pub fn acquire(&self, image_available_semaphore: vk::Semaphore) -> SwapchainAcquireResult {
let mut image_index = 0;
let res = unsafe {
self.context.device.acquire_next_image_khr(
self.swapchain,
u64::MAX,
image_available_semaphore,
vk::Fence::null(),
&mut image_index,
)
};
match res {
Ok(vk::Result::SUCCESS) => SwapchainAcquireResult::Ok(self.images[image_index as usize]),
Ok(vk::Result::SUBOPTIMAL_KHR) => SwapchainAcquireResult::RecreateSoon(self.images[image_index as usize]),
Ok(err) => panic!("failed to acquire next image {}", err),
Err(vk::Result::ERROR_OUT_OF_DATE_KHR) => SwapchainAcquireResult::RecreateNow,
Err(err) => panic!("failed to acquire next image {}", err),
}
}
pub fn get_format(&self) -> vk::Format {
self.surface_format.format
}
pub fn get_size(&self) -> UVec2 {
self.size
}
pub fn present(&self, image: UniqueImage, rendering_finished_semaphore: vk::Semaphore) {
let image_index = self.images.iter().position(|&x| x == image).unwrap() as u32;
let present_info = vk::PresentInfoKHR::builder()
.p_wait_semaphores(slice::from_ref(&rendering_finished_semaphore))
.p_swapchains(slice::from_ref(&self.swapchain), slice::from_ref(&image_index), None);
match unsafe { self.context.device.queue_present_khr(self.context.queue, &present_info) } {
Ok(vk::Result::SUCCESS) | Ok(vk::Result::SUBOPTIMAL_KHR) | Err(vk::Result::ERROR_OUT_OF_DATE_KHR) => {}
Ok(err) | Err(err) => panic!("failed to present {}", err),
}
}
}
impl Drop for Swapchain {
fn drop(&mut self) {
unsafe {
self.context.device.destroy_swapchain_khr(self.swapchain, None);
}
}
}
| rust | MIT | 42fc4b496cce21e907eca7112e6d6334d35fc41a | 2026-01-04T20:23:56.526296Z | false |
sjb3d/caldera | https://github.com/sjb3d/caldera/blob/42fc4b496cce21e907eca7112e6d6334d35fc41a/caldera/src/resource.rs | caldera/src/resource.rs | use crate::prelude::*;
use bytemuck::Contiguous;
use slotmap::{new_key_type, SlotMap};
use spark::{vk, Builder, Device};
use std::{
slice,
sync::{Arc, Mutex},
};
new_key_type! {
pub struct BufferId;
pub struct ImageId;
pub struct SamplerId;
}
pub(crate) enum BufferResource {
Described {
desc: BufferDesc,
all_usage: BufferUsage,
},
Active {
desc: BufferDesc,
alloc: Option<Alloc>,
buffer: UniqueBuffer,
accel: Option<vk::AccelerationStructureKHR>,
bindless_id: Option<BindlessId>,
current_usage: BufferUsage,
all_usage_check: BufferUsage,
},
}
impl BufferResource {
pub fn desc(&self) -> &BufferDesc {
match self {
BufferResource::Described { desc, .. } => desc,
BufferResource::Active { desc, .. } => desc,
}
}
pub fn alloc(&self) -> Option<Alloc> {
match self {
BufferResource::Described { .. } => panic!("buffer is only described"),
BufferResource::Active { alloc, .. } => *alloc,
}
}
pub fn buffer(&self) -> UniqueBuffer {
match self {
BufferResource::Described { .. } => panic!("buffer is only described"),
BufferResource::Active { buffer, .. } => *buffer,
}
}
pub fn accel(&self) -> Option<vk::AccelerationStructureKHR> {
match self {
BufferResource::Described { .. } => panic!("buffer is only described"),
BufferResource::Active { accel, .. } => *accel,
}
}
pub fn bindless_id(&self) -> Option<BindlessId> {
match self {
BufferResource::Described { .. } => panic!("buffer is only described"),
BufferResource::Active { bindless_id, .. } => *bindless_id,
}
}
pub fn declare_usage(&mut self, usage: BufferUsage) {
match self {
BufferResource::Described { ref mut all_usage, .. } => {
*all_usage |= usage;
}
BufferResource::Active { all_usage_check, .. } => {
if !all_usage_check.contains(usage) {
panic!("buffer usage {:?} was not declared in {:?}", usage, all_usage_check);
}
}
}
}
pub fn transition_usage(&mut self, new_usage: BufferUsage, device: &Device, cmd: vk::CommandBuffer) {
match self {
BufferResource::Active {
buffer,
ref mut current_usage,
all_usage_check,
..
} => {
if !all_usage_check.contains(new_usage) {
panic!("cannot set usage that buffer was not allocated with");
}
if *current_usage != new_usage || !new_usage.as_access_category().supports_overlap() {
emit_buffer_barrier(*current_usage, new_usage, buffer.0, device, cmd);
*current_usage = new_usage;
}
}
_ => panic!("image not ready"),
}
}
}
pub(crate) enum ImageResource {
Described {
desc: ImageDesc,
all_usage: ImageUsage,
},
Active {
desc: ImageDesc,
_alloc: Option<Alloc>,
image: UniqueImage,
bindless_id: Option<BindlessId>,
current_usage: ImageUsage,
all_usage_check: ImageUsage,
},
}
impl ImageResource {
pub fn desc(&self) -> &ImageDesc {
match self {
ImageResource::Described { desc, .. } => desc,
ImageResource::Active { desc, .. } => desc,
}
}
pub fn image(&self) -> UniqueImage {
match self {
ImageResource::Described { .. } => panic!("image is only described"),
ImageResource::Active { image, .. } => *image,
}
}
pub fn bindless_id(&self) -> Option<BindlessId> {
match self {
ImageResource::Described { .. } => panic!("image is only described"),
ImageResource::Active { bindless_id, .. } => *bindless_id,
}
}
pub fn declare_usage(&mut self, usage: ImageUsage) {
match self {
ImageResource::Described { ref mut all_usage, .. } => {
*all_usage |= usage;
}
ImageResource::Active { all_usage_check, .. } => {
if !all_usage_check.contains(usage) {
panic!("image usage {:?} was not declared in {:?}", usage, all_usage_check);
}
}
}
}
pub fn transition_usage(&mut self, new_usage: ImageUsage, device: &Device, cmd: vk::CommandBuffer) {
match self {
ImageResource::Active {
desc,
image,
ref mut current_usage,
all_usage_check,
..
} => {
if !all_usage_check.contains(new_usage) {
panic!("cannot set usage that image was not allocated with");
}
if *current_usage != new_usage || !new_usage.as_access_category().supports_overlap() {
emit_image_barrier(*current_usage, new_usage, image.0, desc.aspect_mask, device, cmd);
*current_usage = new_usage;
}
}
_ => panic!("image not ready"),
}
}
pub fn force_usage(&mut self, new_usage: ImageUsage) {
match self {
ImageResource::Active {
all_usage_check,
ref mut current_usage,
..
} => {
if !all_usage_check.contains(new_usage) {
panic!("cannot set usage that image was not allocated with");
}
*current_usage = new_usage;
}
_ => panic!("image not ready"),
}
}
}
pub(crate) struct SamplerResource {
pub(crate) sampler: vk::Sampler,
pub(crate) bindless_id: Option<BindlessId>,
}
#[repr(u8)]
#[derive(Debug, Clone, Copy, PartialOrd, Ord, PartialEq, Eq, Contiguous)]
pub enum BindlessClass {
StorageBuffer,
SampledImage2D,
Sampler,
}
impl BindlessClass {
fn new_buffer(all_usage: BufferUsage) -> Option<Self> {
if all_usage.as_flags().contains(vk::BufferUsageFlags::STORAGE_BUFFER) {
Some(Self::StorageBuffer)
} else {
None
}
}
fn new_image(desc: &ImageDesc, all_usage: ImageUsage) -> Option<Self> {
if desc.image_view_type() == vk::ImageViewType::N2D
&& all_usage.as_flags().contains(vk::ImageUsageFlags::SAMPLED)
{
Some(Self::SampledImage2D)
} else {
None
}
}
}
#[derive(Debug, Clone, Copy, PartialOrd, Ord, PartialEq, Eq)]
pub struct BindlessId {
pub class: BindlessClass,
pub index: u16,
}
#[derive(Clone)]
struct BindlessIndexSet {
next: u32,
limit: u32,
}
impl BindlessIndexSet {
fn new(limit: u32) -> Self {
Self { next: 0, limit }
}
fn allocate(&mut self) -> Option<u32> {
if self.next < self.limit {
let index = self.next;
self.next += 1;
Some(index)
} else {
None
}
}
}
pub(crate) struct Bindless {
context: SharedContext,
descriptor_set_layout: vk::DescriptorSetLayout,
descriptor_pool: vk::DescriptorPool,
descriptor_set: vk::DescriptorSet,
indices: [BindlessIndexSet; Self::CLASS_COUNT],
}
impl Bindless {
const CLASS_COUNT: usize = (1 + BindlessClass::MAX_VALUE) as usize;
const MAX_STORAGE_BUFFER: u32 = 16 * 1024;
const MAX_SAMPLED_IMAGE_2D: u32 = 1024;
const MAX_SAMPLERS: u32 = 32;
pub fn new(context: &SharedContext) -> Self {
let descriptor_set_layout = {
let bindings = [
vk::DescriptorSetLayoutBinding {
binding: BindlessClass::StorageBuffer.into_integer() as u32,
descriptor_type: vk::DescriptorType::STORAGE_BUFFER,
descriptor_count: Self::MAX_STORAGE_BUFFER,
stage_flags: vk::ShaderStageFlags::ALL,
..Default::default()
},
vk::DescriptorSetLayoutBinding {
binding: BindlessClass::SampledImage2D.into_integer() as u32,
descriptor_type: vk::DescriptorType::SAMPLED_IMAGE,
descriptor_count: Self::MAX_SAMPLED_IMAGE_2D,
stage_flags: vk::ShaderStageFlags::ALL,
..Default::default()
},
vk::DescriptorSetLayoutBinding {
binding: BindlessClass::Sampler.into_integer() as u32,
descriptor_type: vk::DescriptorType::SAMPLER,
descriptor_count: Self::MAX_SAMPLERS,
stage_flags: vk::ShaderStageFlags::ALL,
..Default::default()
},
];
let binding_flags = [vk::DescriptorBindingFlags::UPDATE_AFTER_BIND
| vk::DescriptorBindingFlags::PARTIALLY_BOUND
| vk::DescriptorBindingFlags::UPDATE_UNUSED_WHILE_PENDING;
Self::CLASS_COUNT];
let mut binding_flags_create_info =
vk::DescriptorSetLayoutBindingFlagsCreateInfo::builder().p_binding_flags(&binding_flags);
let create_info = vk::DescriptorSetLayoutCreateInfo::builder()
.flags(vk::DescriptorSetLayoutCreateFlags::UPDATE_AFTER_BIND_POOL)
.p_bindings(&bindings)
.insert_next(&mut binding_flags_create_info);
unsafe { context.device.create_descriptor_set_layout(&create_info, None) }.unwrap()
};
let descriptor_pool = {
let pool_sizes = [
vk::DescriptorPoolSize {
ty: vk::DescriptorType::STORAGE_BUFFER,
descriptor_count: Self::MAX_STORAGE_BUFFER,
},
vk::DescriptorPoolSize {
ty: vk::DescriptorType::SAMPLED_IMAGE,
descriptor_count: Self::MAX_SAMPLED_IMAGE_2D,
},
];
let create_info = vk::DescriptorPoolCreateInfo::builder()
.flags(vk::DescriptorPoolCreateFlags::UPDATE_AFTER_BIND)
.max_sets(1)
.p_pool_sizes(&pool_sizes);
unsafe { context.device.create_descriptor_pool(&create_info, None) }.unwrap()
};
let descriptor_set = {
let allocate_info = vk::DescriptorSetAllocateInfo::builder()
.descriptor_pool(descriptor_pool)
.p_set_layouts(slice::from_ref(&descriptor_set_layout));
unsafe { context.device.allocate_descriptor_sets_single(&allocate_info) }.unwrap()
};
Self {
context: SharedContext::clone(context),
descriptor_set_layout,
descriptor_pool,
descriptor_set,
indices: [
BindlessIndexSet::new(Self::MAX_STORAGE_BUFFER),
BindlessIndexSet::new(Self::MAX_SAMPLED_IMAGE_2D),
BindlessIndexSet::new(Self::MAX_SAMPLERS),
],
}
}
pub fn add_buffer(&mut self, buffer: vk::Buffer, all_usage: BufferUsage) -> Option<BindlessId> {
let class = BindlessClass::new_buffer(all_usage)?;
let index = self.indices[class.into_integer() as usize].allocate()?;
let buffer_info = vk::DescriptorBufferInfo {
buffer,
offset: 0,
range: vk::WHOLE_SIZE,
};
let write = vk::WriteDescriptorSet::builder()
.dst_set(self.descriptor_set)
.dst_binding(class.into_integer() as u32)
.dst_array_element(index)
.p_buffer_info(slice::from_ref(&buffer_info))
.descriptor_type(vk::DescriptorType::STORAGE_BUFFER);
unsafe { self.context.device.update_descriptor_sets(slice::from_ref(&write), &[]) };
Some(BindlessId {
class,
index: index as u16,
})
}
pub fn add_image(
&mut self,
desc: &ImageDesc,
image_view: vk::ImageView,
all_usage: ImageUsage,
) -> Option<BindlessId> {
let class = BindlessClass::new_image(desc, all_usage)?;
let index = self.indices[class.into_integer() as usize].allocate()?;
let image_info = vk::DescriptorImageInfo {
sampler: vk::Sampler::null(),
image_view,
image_layout: vk::ImageLayout::SHADER_READ_ONLY_OPTIMAL,
};
let write = vk::WriteDescriptorSet::builder()
.dst_set(self.descriptor_set)
.dst_binding(class.into_integer() as u32)
.dst_array_element(index)
.p_image_info(slice::from_ref(&image_info))
.descriptor_type(vk::DescriptorType::SAMPLED_IMAGE);
unsafe { self.context.device.update_descriptor_sets(slice::from_ref(&write), &[]) };
Some(BindlessId {
class,
index: index as u16,
})
}
pub fn add_sampler(&mut self, sampler: vk::Sampler) -> Option<BindlessId> {
let class = BindlessClass::Sampler;
let index = self.indices[class.into_integer() as usize].allocate()?;
let image_info = vk::DescriptorImageInfo {
sampler,
image_view: vk::ImageView::null(),
image_layout: vk::ImageLayout::SHADER_READ_ONLY_OPTIMAL,
};
let write = vk::WriteDescriptorSet::builder()
.dst_set(self.descriptor_set)
.dst_binding(class.into_integer() as u32)
.dst_array_element(index)
.p_image_info(slice::from_ref(&image_info))
.descriptor_type(vk::DescriptorType::SAMPLER);
unsafe { self.context.device.update_descriptor_sets(slice::from_ref(&write), &[]) };
Some(BindlessId {
class,
index: index as u16,
})
}
pub fn ui_stats_table_rows(&self, ui: &mut egui::Ui) {
for class_index in 0..Self::CLASS_COUNT {
let class = BindlessClass::from_integer(class_index as u8).unwrap();
ui.label(match class {
BindlessClass::StorageBuffer => "bindless buffers",
BindlessClass::SampledImage2D => "bindless sampled2d",
BindlessClass::Sampler => "bindless sampler",
});
ui.label(format!("{}", self.indices[class_index].next));
ui.end_row();
}
}
}
impl Drop for Bindless {
fn drop(&mut self) {
let device = &self.context.device;
unsafe {
device.destroy_descriptor_pool(self.descriptor_pool, None);
device.destroy_descriptor_set_layout(self.descriptor_set_layout, None);
}
}
}
pub(crate) type SharedResources = Arc<Mutex<Resources>>;
pub(crate) struct Resources {
context: SharedContext,
global_allocator: Allocator,
resource_cache: ResourceCache,
buffers: SlotMap<BufferId, BufferResource>,
images: SlotMap<ImageId, ImageResource>,
samplers: SlotMap<SamplerId, SamplerResource>,
bindless: Option<Bindless>,
}
impl Resources {
pub fn new(context: &SharedContext, global_chunk_size: u32) -> SharedResources {
Arc::new(Mutex::new(Self {
context: SharedContext::clone(context),
global_allocator: Allocator::new(context, global_chunk_size),
resource_cache: ResourceCache::new(context),
buffers: SlotMap::with_key(),
images: SlotMap::with_key(),
samplers: SlotMap::with_key(),
bindless: if context
.physical_device_features
.descriptor_indexing
.descriptor_binding_partially_bound
.as_bool()
{
Some(Bindless::new(context))
} else {
None
},
}))
}
pub fn bindless_descriptor_set(&self) -> vk::DescriptorSet {
self.bindless.as_ref().unwrap().descriptor_set
}
pub fn bindless_descriptor_set_layout(&self) -> vk::DescriptorSetLayout {
self.bindless.as_ref().unwrap().descriptor_set_layout
}
pub fn describe_buffer(&mut self, desc: &BufferDesc) -> BufferId {
self.buffers.insert(BufferResource::Described {
desc: *desc,
all_usage: BufferUsage::empty(),
})
}
pub fn import_buffer(
&mut self,
desc: &BufferDesc,
all_usage: BufferUsage,
buffer: UniqueBuffer,
current_usage: BufferUsage,
) -> BufferId {
self.buffers.insert(BufferResource::Active {
desc: *desc,
alloc: None,
buffer,
accel: None,
bindless_id: None,
current_usage,
all_usage_check: all_usage,
})
}
pub fn create_buffer(
&mut self,
desc: &BufferDesc,
all_usage: BufferUsage,
memory_property_flags: vk::MemoryPropertyFlags,
) -> BufferId {
let all_usage_flags = all_usage.as_flags();
let info = self.resource_cache.get_buffer_info(desc, all_usage_flags);
let alloc = self.global_allocator.allocate(&info.mem_req, memory_property_flags);
let buffer = self.resource_cache.get_buffer(desc, &info, &alloc, all_usage_flags);
let accel = self.resource_cache.get_buffer_accel(desc, buffer, all_usage);
let bindless_id = self
.bindless
.as_mut()
.and_then(|bindless| bindless.add_buffer(buffer.0, all_usage));
self.buffers.insert(BufferResource::Active {
desc: *desc,
alloc: Some(alloc),
buffer,
accel,
bindless_id,
current_usage: BufferUsage::empty(),
all_usage_check: all_usage,
})
}
pub fn buffer_resource(&self, id: BufferId) -> &BufferResource {
self.buffers.get(id).unwrap()
}
pub fn buffer_resource_mut(&mut self, id: BufferId) -> &mut BufferResource {
self.buffers.get_mut(id).unwrap()
}
pub fn buffer_desc(&self, id: BufferId) -> &BufferDesc {
self.buffers.get(id).unwrap().desc()
}
pub fn allocate_temporary_buffer(&mut self, id: BufferId, allocator: &mut Allocator) {
let buffer_resource = self.buffers.get_mut(id).unwrap();
match buffer_resource {
BufferResource::Described { desc, all_usage } => {
if !all_usage.is_empty() {
let all_usage_flags = all_usage.as_flags();
let memory_property_flags = vk::MemoryPropertyFlags::DEVICE_LOCAL;
let info = self.resource_cache.get_buffer_info(desc, all_usage_flags);
let alloc = allocator.allocate(&info.mem_req, memory_property_flags);
let buffer = self.resource_cache.get_buffer(desc, &info, &alloc, all_usage_flags);
let accel = self.resource_cache.get_buffer_accel(desc, buffer, *all_usage);
*buffer_resource = BufferResource::Active {
desc: *desc,
alloc: Some(alloc),
buffer,
accel,
bindless_id: None,
current_usage: BufferUsage::empty(),
all_usage_check: *all_usage,
}
}
}
_ => panic!("buffer is not temporary"),
}
}
pub fn remove_buffer(&mut self, id: BufferId) {
self.buffers.remove(id).unwrap();
}
pub fn describe_image(&mut self, desc: &ImageDesc) -> ImageId {
self.images.insert(ImageResource::Described {
desc: *desc,
all_usage: ImageUsage::empty(),
})
}
pub fn import_image(
&mut self,
desc: &ImageDesc,
all_usage: ImageUsage,
image: UniqueImage,
current_usage: ImageUsage,
) -> ImageId {
self.images.insert(ImageResource::Active {
desc: *desc,
_alloc: None,
image,
bindless_id: None,
current_usage,
all_usage_check: all_usage,
})
}
pub fn create_image(
&mut self,
desc: &ImageDesc,
all_usage: ImageUsage,
allocator: Option<&mut Allocator>,
) -> ImageId {
let all_usage_flags = all_usage.as_flags();
let memory_property_flags = vk::MemoryPropertyFlags::DEVICE_LOCAL;
let info = self.resource_cache.get_image_info(desc, all_usage_flags);
let alloc = allocator
.unwrap_or(&mut self.global_allocator)
.allocate(&info.mem_req, memory_property_flags);
let image = self.resource_cache.get_image(desc, &info, &alloc, all_usage_flags);
let image_view = self
.resource_cache
.get_image_view(desc, image, ImageViewDesc::default());
let bindless_id = self
.bindless
.as_mut()
.and_then(|bindless| bindless.add_image(desc, image_view.0, all_usage));
self.images.insert(ImageResource::Active {
desc: *desc,
_alloc: Some(alloc),
image,
bindless_id,
current_usage: ImageUsage::empty(),
all_usage_check: all_usage,
})
}
pub fn image_resource(&self, id: ImageId) -> &ImageResource {
self.images.get(id).unwrap()
}
pub fn image_resource_mut(&mut self, id: ImageId) -> &mut ImageResource {
self.images.get_mut(id).unwrap()
}
pub fn image_desc(&self, id: ImageId) -> &ImageDesc {
self.images.get(id).unwrap().desc()
}
pub fn image_view(&mut self, id: ImageId, view_desc: ImageViewDesc) -> UniqueImageView {
let resource = self.images.get(id).unwrap();
self.resource_cache
.get_image_view(resource.desc(), resource.image(), view_desc)
}
pub fn allocate_temporary_image(&mut self, id: ImageId, allocator: &mut Allocator) {
let image_resource = self.images.get_mut(id).unwrap();
match image_resource {
ImageResource::Described { desc, all_usage } => {
if !all_usage.is_empty() {
let all_usage_flags = all_usage.as_flags();
let memory_property_flags = vk::MemoryPropertyFlags::DEVICE_LOCAL;
let info = self.resource_cache.get_image_info(desc, all_usage_flags);
let alloc = allocator.allocate(&info.mem_req, memory_property_flags);
let image = self.resource_cache.get_image(desc, &info, &alloc, all_usage_flags);
*image_resource = ImageResource::Active {
desc: *desc,
_alloc: Some(alloc),
bindless_id: None,
image,
current_usage: ImageUsage::empty(),
all_usage_check: *all_usage,
};
}
}
_ => panic!("image is not temporary"),
}
}
pub fn remove_image(&mut self, id: ImageId) {
self.images.remove(id).unwrap();
}
pub fn transition_buffer_usage(
&mut self,
id: BufferId,
new_usage: BufferUsage,
context: &Context,
cmd: vk::CommandBuffer,
) {
self.buffers
.get_mut(id)
.unwrap()
.transition_usage(new_usage, &context.device, cmd);
}
pub fn transition_image_usage(
&mut self,
id: ImageId,
new_usage: ImageUsage,
context: &Context,
cmd: vk::CommandBuffer,
) {
self.images
.get_mut(id)
.unwrap()
.transition_usage(new_usage, &context.device, cmd);
}
pub fn create_sampler(&mut self, create_info: &vk::SamplerCreateInfo) -> SamplerId {
let sampler = unsafe { self.context.device.create_sampler(create_info, None) }.unwrap();
let bindless_id = self
.bindless
.as_mut()
.and_then(|bindless| bindless.add_sampler(sampler));
self.samplers.insert(SamplerResource { sampler, bindless_id })
}
pub fn sampler_resource(&self, id: SamplerId) -> &SamplerResource {
self.samplers.get(id).unwrap()
}
pub fn ui_stats_table_rows(&self, ui: &mut egui::Ui) {
self.global_allocator.ui_stats_table_rows(ui, "global memory");
ui.label("buffers");
ui.label(format!("{}", self.buffers.len()));
ui.end_row();
ui.label("images");
ui.label(format!("{}", self.images.len()));
ui.end_row();
self.resource_cache.ui_stats_table_rows(ui, "graph");
if let Some(bindless) = self.bindless.as_ref() {
bindless.ui_stats_table_rows(ui);
}
}
}
impl Drop for Resources {
fn drop(&mut self) {
let device = &self.context.device;
for (_id, resource) in self.samplers.drain() {
unsafe { device.destroy_sampler(resource.sampler, None) };
}
}
}
| rust | MIT | 42fc4b496cce21e907eca7112e6d6334d35fc41a | 2026-01-04T20:23:56.526296Z | false |
sjb3d/caldera | https://github.com/sjb3d/caldera/blob/42fc4b496cce21e907eca7112e6d6334d35fc41a/caldera/src/render_graph.rs | caldera/src/render_graph.rs | use crate::prelude::*;
use arrayvec::ArrayVec;
use spark::{vk, Builder};
use std::{ffi::CStr, mem};
/*
The goal is to manage:
* Temporary resources (buffers and images, and their views)
* Render passes and framebuffers
* Barriers and layout transitions
* Synchronisation between queues for async compute
The API should be along the lines of:
* Register externally provided resources (e.g. swapchain image for this frame)
* Describe temporary resources
* Describe commands
* All commands specify a list of resource views
* Render commands additionally specify render target views
Each command is expected to be a set of draw calls or dispatches that do not
require synchronisation between them.
Within each command, the caller must manage:
* Command buffers
* Pipelines
* Descriptor sets
* Vulkan draw and dispatch commands
*/
struct ResourceSet {
allocator: Allocator,
buffer_ids: Vec<BufferId>,
image_ids: Vec<ImageId>,
}
impl ResourceSet {
fn new(context: &SharedContext, chunk_size: u32) -> Self {
Self {
allocator: Allocator::new(context, chunk_size),
buffer_ids: Vec::new(),
image_ids: Vec::new(),
}
}
fn begin_frame(&mut self, resources: &mut Resources) {
self.allocator.reset();
for id in self.buffer_ids.drain(..) {
resources.remove_buffer(id);
}
for id in self.image_ids.drain(..) {
resources.remove_image(id);
}
}
}
pub struct RenderGraph {
resources: SharedResources,
render_cache: RenderCache,
temp_allocator: Allocator,
bounce_current_set: ResourceSet,
bounce_prev_set: ResourceSet,
import_set: ResourceSet,
transfer_staging: StagingBuffer,
}
impl RenderGraph {
pub(crate) fn new(
context: &SharedContext,
resources: &SharedResources,
temp_chunk_size: u32,
bounce_chunk_size: u32,
staging_size_per_frame: u32,
) -> Self {
Self {
resources: SharedResources::clone(resources),
render_cache: RenderCache::new(context),
temp_allocator: Allocator::new(context, temp_chunk_size),
bounce_current_set: ResourceSet::new(context, bounce_chunk_size),
bounce_prev_set: ResourceSet::new(context, bounce_chunk_size),
import_set: ResourceSet::new(context, 0),
transfer_staging: StagingBuffer::new(
context,
staging_size_per_frame,
4,
vk::BufferUsageFlags::TRANSFER_SRC,
),
}
}
pub fn create_buffer(&mut self, desc: &BufferDesc, all_usage: BufferUsage) -> BufferId {
self.resources
.lock()
.unwrap()
.create_buffer(desc, all_usage, vk::MemoryPropertyFlags::DEVICE_LOCAL)
}
pub fn get_buffer_desc(&self, id: BufferId) -> BufferDesc {
*self.resources.lock().unwrap().buffer_desc(id)
}
pub fn create_image(&mut self, desc: &ImageDesc, all_usage: ImageUsage) -> ImageId {
self.resources.lock().unwrap().create_image(desc, all_usage, None)
}
pub fn create_bounce_image(&mut self, desc: &ImageDesc, all_usage: ImageUsage) -> ImageId {
let image_id =
self.resources
.lock()
.unwrap()
.create_image(desc, all_usage, Some(&mut self.bounce_current_set.allocator));
self.bounce_current_set.image_ids.push(image_id);
image_id
}
pub fn get_image_desc(&self, id: ImageId) -> ImageDesc {
*self.resources.lock().unwrap().image_desc(id)
}
pub fn create_sampler(&mut self, create_info: &vk::SamplerCreateInfo) -> SamplerId {
self.resources.lock().unwrap().create_sampler(create_info)
}
pub fn get_sampler_bindless_id(&self, id: SamplerId) -> BindlessId {
self.resources.lock().unwrap().sampler_resource(id).bindless_id.unwrap()
}
pub(crate) fn begin_frame(&mut self) {
mem::swap(&mut self.bounce_current_set, &mut self.bounce_prev_set);
let mut resources = self.resources.lock().unwrap();
self.bounce_current_set.begin_frame(&mut resources);
self.import_set.begin_frame(&mut resources);
self.transfer_staging.begin_frame();
}
pub(crate) fn end_frame(&mut self) {
self.transfer_staging.end_frame();
}
pub fn ui_stats_table_rows(&self, ui: &mut egui::Ui) {
self.resources.lock().unwrap().ui_stats_table_rows(ui);
self.render_cache.ui_stats_table_rows(ui);
self.temp_allocator.ui_stats_table_rows(ui, "temp memory");
self.bounce_prev_set.allocator.ui_stats_table_rows(ui, "bounce memory");
ui.label("bounce buffer");
ui.label(format!("{}", self.bounce_prev_set.buffer_ids.len()));
ui.end_row();
ui.label("bounce image");
ui.label(format!("{}", self.bounce_prev_set.image_ids.len()));
ui.end_row();
}
}
#[derive(Clone, Copy, PartialEq, Eq, Hash)]
pub enum AttachmentLoadOp {
Load,
Clear,
}
#[derive(Clone, Copy, PartialEq, Eq, Hash)]
pub enum AttachmentStoreOp {
Store,
None,
}
#[derive(Clone, Copy)]
pub struct Attachment {
pub image_id: ImageId,
pub load_op: AttachmentLoadOp,
pub store_op: AttachmentStoreOp,
}
/// State for a batch of draw calls, becomes a Vulkan sub-pass
#[derive(Clone, Copy)]
pub struct RenderState {
pub color_output_id: Option<ImageId>,
pub color_clear_value: [f32; 4],
pub color_temp_id: Option<ImageId>,
pub depth: Option<Attachment>,
}
impl RenderState {
pub fn new() -> Self {
Self {
color_output_id: None,
color_clear_value: [0.0; 4],
color_temp_id: None,
depth: None,
}
}
pub fn with_color(mut self, id: ImageId, clear_value: &[f32; 4]) -> Self {
self.color_output_id = Some(id);
self.color_clear_value = *clear_value;
self
}
pub fn with_color_temp(mut self, id: ImageId) -> Self {
self.color_temp_id = Some(id);
self
}
pub fn with_depth(mut self, image_id: ImageId, load_op: AttachmentLoadOp, store_op: AttachmentStoreOp) -> Self {
self.depth = Some(Attachment {
image_id,
load_op,
store_op,
});
self
}
}
#[derive(Clone, Copy)]
enum ParameterDesc {
Buffer { id: BufferId, usage: BufferUsage },
Image { id: ImageId, usage: ImageUsage },
}
struct CommandCommon {
name: &'static CStr,
params: RenderParameterDeclaration,
}
enum Command<'graph> {
Compute {
common: CommandCommon,
callback: Box<dyn FnOnce(RenderParameterAccess, vk::CommandBuffer) + 'graph>,
},
Graphics {
common: CommandCommon,
state: RenderState,
callback: Box<dyn FnOnce(RenderParameterAccess, vk::CommandBuffer, vk::RenderPass) + 'graph>,
},
}
struct SplitCommandBuffer {
current: vk::CommandBuffer,
next: Option<vk::CommandBuffer>,
swap_image_id: Option<ImageId>,
}
impl SplitCommandBuffer {
fn new(first: vk::CommandBuffer, second: vk::CommandBuffer, swap_image_id: Option<ImageId>) -> Self {
Self {
current: first,
next: Some(second),
swap_image_id,
}
}
fn notify_image_use(&mut self, image_id: ImageId, query_pool: &mut QueryPool) {
if self.swap_image_id == Some(image_id) {
query_pool.end_frame(self.current);
self.current = self.next.take().unwrap();
self.swap_image_id = None;
}
}
}
pub struct RenderParameterDeclaration(Vec<ParameterDesc>);
impl RenderParameterDeclaration {
fn new() -> Self {
Self(Vec::new())
}
pub fn add_buffer(&mut self, id: BufferId, usage: BufferUsage) {
self.0.push(ParameterDesc::Buffer { id, usage });
}
pub fn add_image(&mut self, id: ImageId, usage: ImageUsage) {
self.0.push(ParameterDesc::Image { id, usage })
}
}
#[derive(Clone, Copy)]
pub struct RenderParameterAccess<'graph>(&'graph RenderGraph);
impl<'graph> RenderParameterAccess<'graph> {
fn new(render_graph: &'graph RenderGraph) -> Self {
Self(render_graph)
}
pub fn get_bindless_descriptor_set_layout(&self) -> vk::DescriptorSetLayout {
self.0.resources.lock().unwrap().bindless_descriptor_set_layout()
}
pub fn get_bindless_descriptor_set(&self) -> vk::DescriptorSet {
self.0.resources.lock().unwrap().bindless_descriptor_set()
}
pub fn get_buffer(&self, id: BufferId) -> vk::Buffer {
// TODO: cache these, avoid lock per parameter
self.0.resources.lock().unwrap().buffer_resource(id).buffer().0
}
pub fn get_buffer_accel(&self, id: BufferId) -> vk::AccelerationStructureKHR {
// TODO: cache these, avoid lock per parameter
self.0.resources.lock().unwrap().buffer_resource(id).accel().unwrap()
}
pub fn get_image_view(&self, id: ImageId, view_desc: ImageViewDesc) -> vk::ImageView {
// TODO: cache these, avoid lock per parameter
self.0.resources.lock().unwrap().image_view(id, view_desc).0
}
}
enum TemporaryId {
Buffer(BufferId),
Image(ImageId),
}
enum FinalUsage {
Buffer { id: BufferId, usage: BufferUsage },
Image { id: ImageId, usage: ImageUsage },
}
pub struct RenderSchedule<'graph> {
render_graph: &'graph mut RenderGraph,
temporaries: Vec<TemporaryId>,
commands: Vec<Command<'graph>>,
final_usage: Vec<FinalUsage>,
}
impl<'graph> RenderSchedule<'graph> {
pub(crate) fn new(render_graph: &'graph mut RenderGraph) -> Self {
render_graph.temp_allocator.reset();
RenderSchedule {
render_graph,
temporaries: Vec::new(),
commands: Vec::new(),
final_usage: Vec::new(),
}
}
pub fn get_bindless_descriptor_set_layout(&self) -> vk::DescriptorSetLayout {
self.render_graph
.resources
.lock()
.unwrap()
.bindless_descriptor_set_layout()
}
pub fn write_transfer(
&self,
size: usize,
align: usize,
writer: impl FnOnce(&mut [u8]),
) -> Option<(vk::Buffer, u32)> {
let (buf, offset) = self.render_graph.transfer_staging.alloc(size as u32, align as u32)?;
writer(buf);
Some((self.render_graph.transfer_staging.get_buffer(), offset))
}
pub fn get_buffer(&self, id: BufferId) -> vk::Buffer {
self.render_graph
.resources
.lock()
.unwrap()
.buffer_resource(id)
.buffer()
.0
}
pub fn get_buffer_accel(&self, id: BufferId) -> vk::AccelerationStructureKHR {
self.render_graph
.resources
.lock()
.unwrap()
.buffer_resource(id)
.accel()
.unwrap()
}
pub fn get_image_desc(&self, id: ImageId) -> ImageDesc {
*self.render_graph.resources.lock().unwrap().image_desc(id)
}
pub fn get_image_view(&self, id: ImageId, view_desc: ImageViewDesc) -> vk::ImageView {
self.render_graph.resources.lock().unwrap().image_view(id, view_desc).0
}
pub fn create_buffer(&mut self, desc: &BufferDesc, all_usage: BufferUsage) -> BufferId {
self.render_graph.create_buffer(desc, all_usage)
}
pub fn create_image(&mut self, desc: &ImageDesc, all_usage: ImageUsage) -> ImageId {
self.render_graph.create_image(desc, all_usage)
}
pub fn create_bounce_image(&mut self, desc: &ImageDesc, all_usage: ImageUsage) -> ImageId {
self.render_graph.create_bounce_image(desc, all_usage)
}
pub fn import_buffer(
&mut self,
desc: &BufferDesc,
all_usage: BufferUsage,
buffer: UniqueBuffer,
current_usage: BufferUsage,
final_usage: BufferUsage,
) -> BufferId {
let buffer_id =
self.render_graph
.resources
.lock()
.unwrap()
.import_buffer(desc, all_usage, buffer, current_usage);
self.render_graph.import_set.buffer_ids.push(buffer_id);
if !final_usage.is_empty() {
self.final_usage.push(FinalUsage::Buffer {
id: buffer_id,
usage: final_usage,
});
}
buffer_id
}
pub fn describe_buffer(&mut self, desc: &BufferDesc) -> BufferId {
let buffer_id = self.render_graph.resources.lock().unwrap().describe_buffer(desc);
self.temporaries.push(TemporaryId::Buffer(buffer_id));
buffer_id
}
pub fn import_image(
&mut self,
desc: &ImageDesc,
all_usage: ImageUsage,
image: UniqueImage,
current_usage: ImageUsage,
final_usage: ImageUsage,
) -> ImageId {
let image_id = self
.render_graph
.resources
.lock()
.unwrap()
.import_image(desc, all_usage, image, current_usage);
self.render_graph.import_set.image_ids.push(image_id);
if !final_usage.is_empty() {
self.final_usage.push(FinalUsage::Image {
id: image_id,
usage: final_usage,
});
}
image_id
}
pub fn describe_image(&mut self, desc: &ImageDesc) -> ImageId {
let image_id = self.render_graph.resources.lock().unwrap().describe_image(desc);
self.temporaries.push(TemporaryId::Image(image_id));
image_id
}
pub fn add_compute(
&mut self,
name: &'static CStr,
decl: impl FnOnce(&mut RenderParameterDeclaration),
callback: impl FnOnce(RenderParameterAccess, vk::CommandBuffer) + 'graph,
) {
let mut params = RenderParameterDeclaration::new();
decl(&mut params);
self.commands.push(Command::Compute {
common: CommandCommon { name, params },
callback: Box::new(callback),
});
}
pub fn add_graphics(
&mut self,
name: &'static CStr,
state: RenderState,
decl: impl FnOnce(&mut RenderParameterDeclaration),
callback: impl FnOnce(RenderParameterAccess, vk::CommandBuffer, vk::RenderPass) + 'graph,
) {
let mut params = RenderParameterDeclaration::new();
decl(&mut params);
self.commands.push(Command::Graphics {
common: CommandCommon { name, params },
state,
callback: Box::new(callback),
});
}
pub fn run(
mut self,
context: &Context,
pre_swapchain_cmd: vk::CommandBuffer,
post_swapchain_cmd: vk::CommandBuffer,
swap_image_id: Option<ImageId>,
query_pool: &mut QueryPool,
) {
// loop over commands to set usage for all resources
{
let mut resources = self.render_graph.resources.lock().unwrap();
for command in &self.commands {
let common = match command {
Command::Compute { common, .. } => common,
Command::Graphics { common, state, .. } => {
if let Some(id) = state.color_output_id {
resources
.image_resource_mut(id)
.declare_usage(ImageUsage::COLOR_ATTACHMENT_WRITE);
}
if let Some(id) = state.color_temp_id {
resources
.image_resource_mut(id)
.declare_usage(ImageUsage::TRANSIENT_COLOR_ATTACHMENT);
}
if let Some(depth) = state.depth {
let usage = match (depth.load_op, depth.store_op) {
(AttachmentLoadOp::Clear, AttachmentStoreOp::None) => {
ImageUsage::TRANSIENT_DEPTH_ATTACHMENT
}
_ => ImageUsage::DEPTH_ATTACHMENT,
};
resources.image_resource_mut(depth.image_id).declare_usage(usage)
}
common
}
};
for parameter_desc in &common.params.0 {
match parameter_desc {
ParameterDesc::Buffer { id, usage } => {
resources.buffer_resource_mut(*id).declare_usage(*usage);
}
ParameterDesc::Image { id, usage } => {
resources.image_resource_mut(*id).declare_usage(*usage);
}
}
}
}
// allocate temporaries
let temp_allocator = &mut self.render_graph.temp_allocator;
for id in &self.temporaries {
match id {
TemporaryId::Buffer(id) => resources.allocate_temporary_buffer(*id, temp_allocator),
TemporaryId::Image(id) => resources.allocate_temporary_image(*id, temp_allocator),
}
}
}
// for now we just emit single barriers just in time
/*
TODO: build graph of commands, barriers, and layout changes
Goals would be to:
* Do barriers and layout changes early
* Combine usage where possible (e.g. read by different stages)
* Combine with render pass for attachments where possible
TODO: think about how to do sub-passes... probably explicit is better?
*/
let mut cmd = SplitCommandBuffer::new(pre_swapchain_cmd, post_swapchain_cmd, swap_image_id);
for command in self.commands.drain(..) {
let common = match &command {
Command::Compute { common, .. } => common,
Command::Graphics { common, .. } => common,
};
let is_debug = context.instance.extensions.ext_debug_utils;
if is_debug {
let label = vk::DebugUtilsLabelEXT {
p_label_name: common.name.as_ptr(),
..Default::default()
};
unsafe { context.instance.cmd_begin_debug_utils_label_ext(cmd.current, &label) };
}
{
let mut resources = self.render_graph.resources.lock().unwrap();
for parameter_desc in &common.params.0 {
match parameter_desc {
ParameterDesc::Buffer { id, usage } => {
resources.transition_buffer_usage(*id, *usage, context, cmd.current);
}
ParameterDesc::Image { id, usage } => {
cmd.notify_image_use(*id, query_pool);
resources.transition_image_usage(*id, *usage, context, cmd.current);
}
}
}
}
let timestamp_name = common.name;
match command {
Command::Compute { callback, .. } => {
query_pool.emit_timestamp(cmd.current, timestamp_name);
(callback)(RenderParameterAccess::new(self.render_graph), cmd.current);
}
Command::Graphics { state, callback, .. } => {
let render_pass = {
let mut resources = self.render_graph.resources.lock().unwrap();
// TODO: initial layout as part of render pass
if let Some(id) = state.color_output_id {
cmd.notify_image_use(id, query_pool);
// TODO: resource transition when UNDEFINED is no longer ok for color
}
if let Some(depth) = state.depth {
if depth.load_op == AttachmentLoadOp::Load {
resources.transition_image_usage(
depth.image_id,
ImageUsage::DEPTH_ATTACHMENT,
context,
cmd.current,
);
}
}
let mut clear_values = ArrayVec::<_, { RenderCache::MAX_ATTACHMENTS }>::new();
let color_clear_value = vk::ClearValue {
color: vk::ClearColorValue {
float32: state.color_clear_value,
},
};
let depth_clear_value = vk::ClearValue {
depth_stencil: vk::ClearDepthStencilValue { depth: 0.0, stencil: 0 },
};
// get dimensions and sample count
let size = state
.color_output_id
.or(state.depth.map(|depth| depth.image_id))
.map(|id| resources.image_desc(id).size())
.unwrap();
let samples = state
.color_temp_id
.map(|id| resources.image_desc(id).samples)
.unwrap_or(vk::SampleCountFlags::N1);
// color output
let (color_format, color_output_image_view, color_temp_image_view) =
if let Some(color_output_id) = state.color_output_id {
let color_output_desc = *resources.image_desc(color_output_id);
let color_output_image_view =
resources.image_view(color_output_id, ImageViewDesc::default());
let format = color_output_desc.first_format();
assert_eq!(size, color_output_desc.size());
// color temp (if present)
let color_temp_image_view = if let Some(color_temp_id) = state.color_temp_id {
let color_temp_desc = *resources.image_desc(color_temp_id);
let color_temp_image_view =
resources.image_view(color_temp_id, ImageViewDesc::default());
assert_eq!(size, color_temp_desc.size());
assert_eq!(format, color_temp_desc.first_format());
clear_values.push(color_clear_value);
Some(color_temp_image_view)
} else {
None
};
clear_values.push(color_clear_value);
(Some(format), Some(color_output_image_view), color_temp_image_view)
} else {
(None, None, None)
};
// depth temp (if present)
let (render_pass_depth, depth_image_view) = if let Some(depth) = state.depth {
let depth_desc = *resources.image_desc(depth.image_id);
let depth_image_view = resources.image_view(depth.image_id, ImageViewDesc::default());
let format = depth_desc.first_format();
assert_eq!(size, depth_desc.size());
assert_eq!(samples, depth_desc.samples);
clear_values.push(depth_clear_value);
(
Some(RenderPassDepth {
format,
load_op: depth.load_op,
store_op: depth.store_op,
}),
Some(depth_image_view),
)
} else {
(None, None)
};
let render_pass =
self.render_graph
.render_cache
.get_render_pass(color_format, render_pass_depth, samples);
let framebuffer = self.render_graph.render_cache.get_framebuffer(
render_pass,
size,
color_output_image_view,
color_temp_image_view,
depth_image_view,
);
let render_pass_begin_info = vk::RenderPassBeginInfo::builder()
.render_pass(render_pass.0)
.framebuffer(framebuffer.0)
.render_area(vk::Rect2D {
offset: Default::default(),
extent: vk::Extent2D {
width: size.x,
height: size.y,
},
})
.p_clear_values(&clear_values);
unsafe {
context.device.cmd_begin_render_pass(
cmd.current,
&render_pass_begin_info,
vk::SubpassContents::INLINE,
)
};
render_pass
};
query_pool.emit_timestamp(cmd.current, timestamp_name);
(callback)(
RenderParameterAccess::new(self.render_graph),
cmd.current,
render_pass.0,
);
unsafe { context.device.cmd_end_render_pass(cmd.current) };
// TODO: final layout as part of render pass
if let Some(id) = state.color_output_id {
self.render_graph
.resources
.lock()
.unwrap()
.image_resource_mut(id)
.force_usage(ImageUsage::COLOR_ATTACHMENT_WRITE);
}
if let Some(depth) = state.depth {
if depth.store_op == AttachmentStoreOp::Store {
self.render_graph
.resources
.lock()
.unwrap()
.image_resource_mut(depth.image_id)
.force_usage(ImageUsage::DEPTH_ATTACHMENT);
}
}
}
}
if is_debug {
unsafe { context.instance.cmd_end_debug_utils_label_ext(cmd.current) };
}
}
{
let mut resources = self.render_graph.resources.lock().unwrap();
// free temporaries
for id in self.temporaries.drain(..) {
match id {
TemporaryId::Buffer(id) => resources.remove_buffer(id),
TemporaryId::Image(id) => resources.remove_image(id),
}
}
// emit any last barriers (usually at least the swap chain image)
for final_usage in self.final_usage.drain(..) {
match final_usage {
FinalUsage::Buffer { id, usage } => {
resources.transition_buffer_usage(id, usage, context, cmd.current);
}
FinalUsage::Image { id, usage } => {
cmd.notify_image_use(id, query_pool);
resources.transition_image_usage(id, usage, context, cmd.current);
}
}
}
}
}
}
| rust | MIT | 42fc4b496cce21e907eca7112e6d6334d35fc41a | 2026-01-04T20:23:56.526296Z | false |
sjb3d/caldera | https://github.com/sjb3d/caldera/blob/42fc4b496cce21e907eca7112e6d6334d35fc41a/caldera/src/pipeline_cache.rs | caldera/src/pipeline_cache.rs | use crate::context::*;
use arrayvec::ArrayVec;
use notify::{DebouncedEvent, RecommendedWatcher, RecursiveMode, Watcher};
use spark::{vk, Builder, Device};
use std::{
cell::RefCell,
collections::HashMap,
convert::TryInto,
ffi::CStr,
fs::File,
io::{self, prelude::*},
mem,
path::{Path, PathBuf},
ptr, slice,
sync::{mpsc, Arc, Mutex},
thread::{self, JoinHandle},
time::Duration,
};
fn read_file_words(path: &Path) -> io::Result<Vec<u32>> {
let mut file = File::open(path)?;
let mut bytes = Vec::new();
file.read_to_end(&mut bytes)?;
Ok(bytes
.chunks(4)
.map(|c| u32::from(c[3]) << 24 | u32::from(c[2]) << 16 | u32::from(c[1]) << 8 | u32::from(c[0]))
.collect())
}
fn load_shader_module(device: &Device, path: &Path) -> Option<vk::ShaderModule> {
let words = read_file_words(path).ok()?;
let shader_module_create_info = vk::ShaderModuleCreateInfo {
code_size: words.len() * mem::size_of::<u32>(),
p_code: words.as_ptr(),
..Default::default()
};
unsafe { device.create_shader_module(&shader_module_create_info, None) }.ok()
}
struct ShaderReloader {
watcher: RecommendedWatcher,
join_handle: JoinHandle<()>,
}
struct ShaderLoader {
context: SharedContext,
base_path: PathBuf,
reloader: Option<ShaderReloader>,
current_shaders: HashMap<PathBuf, vk::ShaderModule>,
new_shaders: Arc<Mutex<HashMap<PathBuf, vk::ShaderModule>>>,
}
impl ShaderLoader {
pub fn new<P: AsRef<Path>>(context: &SharedContext, base_path: P) -> Self {
let base_path = base_path.as_ref().to_owned();
let (tx, rx) = mpsc::channel();
let mut watcher = notify::watcher(tx, Duration::from_millis(500)).unwrap();
watcher.watch(&base_path, RecursiveMode::Recursive).unwrap();
let current_shaders = HashMap::new();
let new_shaders = Arc::new(Mutex::new(HashMap::new()));
let join_handle = thread::spawn({
let context = SharedContext::clone(context);
let new_shaders = Arc::clone(&new_shaders);
let short_base_path = base_path.clone();
let full_base_path = base_path.canonicalize().unwrap();
move || {
while let Ok(event) = rx.recv() {
if let DebouncedEvent::Create(path_buf)
| DebouncedEvent::Write(path_buf)
| DebouncedEvent::Rename(_, path_buf) = event
{
if let Ok(relative_path) = path_buf.canonicalize().unwrap().strip_prefix(&full_base_path) {
if let Some(shader) =
load_shader_module(&context.device, &short_base_path.join(relative_path))
{
let mut new_shaders = new_shaders.lock().unwrap();
println!("reloaded shader: {:?}", relative_path);
new_shaders.insert(relative_path.to_owned(), shader);
}
}
}
}
println!("shader reload stopping!");
}
});
Self {
context: SharedContext::clone(context),
base_path,
reloader: Some(ShaderReloader { watcher, join_handle }),
current_shaders,
new_shaders,
}
}
pub fn get_shader<P: AsRef<Path>>(&self, relative_path: P) -> Option<vk::ShaderModule> {
let relative_path = relative_path.as_ref();
self.current_shaders.get(relative_path).copied().or_else(|| {
let mut new_shaders = self.new_shaders.lock().unwrap();
new_shaders.get(relative_path).copied().or_else(|| {
load_shader_module(&self.context.device, &self.base_path.join(relative_path)).map(|shader| {
new_shaders.insert(relative_path.to_owned(), shader);
shader
})
})
})
}
pub fn transfer_new_shaders(&mut self) {
let mut new_shaders = self.new_shaders.lock().unwrap();
for (k, v) in new_shaders.drain() {
self.current_shaders.insert(k, v);
}
}
pub fn ui_stats_table_rows(&self, ui: &mut egui::Ui) {
ui.label("shader");
ui.label(format!("{}", self.current_shaders.len()));
ui.end_row();
}
}
impl Drop for ShaderLoader {
fn drop(&mut self) {
if let Some(ShaderReloader { watcher, join_handle }) = self.reloader.take() {
drop(watcher);
join_handle.join().unwrap();
}
for (_, shader) in self
.new_shaders
.lock()
.unwrap()
.drain()
.chain(self.current_shaders.drain())
{
unsafe {
self.context.device.destroy_shader_module(shader, None);
}
}
}
}
const MAX_DESCRIPTOR_SETS_PER_PIPELINE: usize = 4;
#[derive(Clone, PartialEq, Eq, Hash)]
struct PipelineLayoutKey(ArrayVec<vk::DescriptorSetLayout, MAX_DESCRIPTOR_SETS_PER_PIPELINE>);
struct PipelineLayoutCache {
context: SharedContext,
layouts: HashMap<PipelineLayoutKey, vk::PipelineLayout>,
}
impl PipelineLayoutCache {
fn new(context: &SharedContext) -> Self {
Self {
context: SharedContext::clone(context),
layouts: HashMap::new(),
}
}
fn get_layout(&mut self, descriptor_set_layouts: &[vk::DescriptorSetLayout]) -> vk::PipelineLayout {
let device = &self.context.device;
let key = PipelineLayoutKey(descriptor_set_layouts.try_into().unwrap());
*self.layouts.entry(key).or_insert_with(|| {
let create_info = vk::PipelineLayoutCreateInfo::builder().p_set_layouts(descriptor_set_layouts);
unsafe { device.create_pipeline_layout(&create_info, None) }.unwrap()
})
}
}
impl Drop for PipelineLayoutCache {
fn drop(&mut self) {
let device = &self.context.device;
for (_, layout) in self.layouts.drain() {
unsafe { device.destroy_pipeline_layout(layout, None) };
}
}
}
#[derive(Clone, Eq, PartialEq, Hash)]
pub struct GraphicsPipelineState {
vertex_input_bindings: ArrayVec<vk::VertexInputBindingDescription, 4>,
vertex_input_attributes: ArrayVec<vk::VertexInputAttributeDescription, 8>,
topology: vk::PrimitiveTopology,
render_pass: vk::RenderPass, // TODO: replace with state for *compatible* pass
samples: vk::SampleCountFlags,
depth_compare_op: vk::CompareOp,
}
impl GraphicsPipelineState {
pub fn new(render_pass: vk::RenderPass, samples: vk::SampleCountFlags) -> Self {
Self {
vertex_input_bindings: ArrayVec::new(),
vertex_input_attributes: ArrayVec::new(),
topology: vk::PrimitiveTopology::TRIANGLE_LIST,
render_pass,
samples,
depth_compare_op: vk::CompareOp::GREATER_OR_EQUAL,
}
}
pub fn with_vertex_inputs(
mut self,
bindings: &[vk::VertexInputBindingDescription],
attributes: &[vk::VertexInputAttributeDescription],
) -> Self {
self.vertex_input_bindings.clear();
self.vertex_input_bindings.try_extend_from_slice(bindings).unwrap();
self.vertex_input_attributes.clear();
self.vertex_input_attributes.try_extend_from_slice(attributes).unwrap();
self
}
pub fn with_topology(mut self, topology: vk::PrimitiveTopology) -> Self {
self.topology = topology;
self
}
pub fn with_depth_compare_op(mut self, compare_op: vk::CompareOp) -> Self {
self.depth_compare_op = compare_op;
self
}
}
pub enum RayTracingShaderGroupDesc<'a> {
Raygen(&'a str),
Miss(&'a str),
Hit {
closest_hit: &'a str,
any_hit: Option<&'a str>,
intersection: Option<&'a str>,
},
Callable(&'a str),
}
#[derive(Clone, Copy, PartialEq, Eq, Hash)]
enum RayTracingShaderGroup {
Raygen(vk::ShaderModule),
Miss(vk::ShaderModule),
Hit {
closest_hit: vk::ShaderModule,
any_hit: Option<vk::ShaderModule>,
intersection: Option<vk::ShaderModule>,
},
Callable(vk::ShaderModule),
}
#[derive(Clone, Copy, PartialEq, Eq, Hash)]
pub enum SpecializationConstantData {
Bool32(vk::Bool32),
U32(u32),
}
impl SpecializationConstantData {
fn as_bytes(&self) -> &[u8] {
match self {
SpecializationConstantData::Bool32(value) => bytemuck::bytes_of(value),
SpecializationConstantData::U32(value) => bytemuck::bytes_of(value),
}
}
}
impl From<bool> for SpecializationConstantData {
fn from(value: bool) -> Self {
SpecializationConstantData::Bool32(if value { vk::TRUE } else { vk::FALSE })
}
}
impl From<u32> for SpecializationConstantData {
fn from(value: u32) -> Self {
SpecializationConstantData::U32(value)
}
}
#[derive(Clone, Copy, PartialEq, Eq, Hash)]
pub struct SpecializationConstant {
pub id: u32,
pub data: SpecializationConstantData,
}
impl SpecializationConstant {
pub fn new(id: u32, data: impl Into<SpecializationConstantData>) -> Self {
Self { id, data: data.into() }
}
}
struct SpecializationData {
map_entries: Vec<vk::SpecializationMapEntry>,
store: Vec<u8>,
}
impl SpecializationData {
fn new(constants: &[SpecializationConstant]) -> Self {
let mut map_entries = Vec::new();
let mut store = Vec::new();
for constant in constants {
let bytes = constant.data.as_bytes();
map_entries.push(vk::SpecializationMapEntry {
constant_id: constant.id,
offset: store.len() as u32,
size: bytes.len(),
});
store.extend_from_slice(bytes);
}
Self { map_entries, store }
}
fn info(&self) -> <vk::SpecializationInfo as Builder>::Type {
vk::SpecializationInfo::builder()
.p_map_entries(&self.map_entries)
.p_data(&self.store)
}
}
pub enum VertexShaderDesc<'a> {
Standard {
vertex: &'a str,
// TODO: tesellation/geometry shader names
},
Mesh {
task: &'a str,
task_constants: &'a [SpecializationConstant],
task_subgroup_size: Option<u32>,
mesh: &'a str,
mesh_constants: &'a [SpecializationConstant],
},
}
impl<'a> VertexShaderDesc<'a> {
pub fn standard(vertex: &'a str) -> Self {
Self::Standard { vertex }
}
pub fn mesh(
task: &'a str,
task_constants: &'a [SpecializationConstant],
task_subgroup_size: Option<u32>,
mesh: &'a str,
mesh_constants: &'a [SpecializationConstant],
) -> Self {
Self::Mesh {
task,
task_constants,
task_subgroup_size,
mesh,
mesh_constants,
}
}
}
#[derive(Clone, PartialEq, Eq, Hash)]
enum VertexShaderKey {
Standard {
vertex: vk::ShaderModule,
},
Mesh {
task: vk::ShaderModule,
task_constants: Vec<SpecializationConstant>,
task_subgroup_size: Option<u32>,
mesh: vk::ShaderModule,
mesh_constants: Vec<SpecializationConstant>,
},
}
#[allow(clippy::large_enum_variant)]
#[derive(Clone, PartialEq, Eq, Hash)]
enum PipelineCacheKey {
Compute {
pipeline_layout: vk::PipelineLayout,
shader: vk::ShaderModule,
constants: Vec<SpecializationConstant>,
},
Graphics {
pipeline_layout: vk::PipelineLayout,
vertex_shader: VertexShaderKey,
fragment_shader: vk::ShaderModule,
state: GraphicsPipelineState,
},
Ui {
render_pass: vk::RenderPass,
samples: vk::SampleCountFlags,
},
RayTracing {
pipeline_layout: vk::PipelineLayout,
shader_groups: Vec<RayTracingShaderGroup>,
},
}
pub struct PipelineCache {
context: SharedContext,
shader_loader: ShaderLoader,
layout_cache: RefCell<PipelineLayoutCache>,
pipeline_cache: vk::PipelineCache,
pipelines: RefCell<HashMap<PipelineCacheKey, vk::Pipeline>>,
}
impl PipelineCache {
pub fn new<P: AsRef<Path>>(context: &SharedContext, path: P) -> Self {
let layout_cache = PipelineLayoutCache::new(context);
let pipeline_cache = {
// TODO: load from file
let create_info = vk::PipelineCacheCreateInfo {
flags: if context
.physical_device_features
.pipeline_creation_cache_control
.pipeline_creation_cache_control
.as_bool()
{
vk::PipelineCacheCreateFlags::EXTERNALLY_SYNCHRONIZED_EXT
} else {
vk::PipelineCacheCreateFlags::empty()
},
..Default::default()
};
unsafe { context.device.create_pipeline_cache(&create_info, None) }.unwrap()
};
Self {
context: SharedContext::clone(context),
shader_loader: ShaderLoader::new(context, path),
layout_cache: RefCell::new(layout_cache),
pipeline_cache,
pipelines: RefCell::new(HashMap::new()),
}
}
pub fn begin_frame(&mut self) {
self.shader_loader.transfer_new_shaders();
}
pub fn get_pipeline_layout(&self, descriptor_set_layouts: &[vk::DescriptorSetLayout]) -> vk::PipelineLayout {
self.layout_cache.borrow_mut().get_layout(descriptor_set_layouts)
}
pub fn get_compute(
&self,
shader_name: &str,
constants: &[SpecializationConstant],
pipeline_layout: vk::PipelineLayout,
) -> vk::Pipeline {
let shader = self.shader_loader.get_shader(shader_name).unwrap();
let key = PipelineCacheKey::Compute {
pipeline_layout,
shader,
constants: constants.to_vec(),
};
*self.pipelines.borrow_mut().entry(key).or_insert_with(|| {
let shader_entry_name = CStr::from_bytes_with_nul(b"main\0").unwrap();
let specialization_data = SpecializationData::new(constants);
let specialization_info = vk::SpecializationInfo::builder()
.p_map_entries(&specialization_data.map_entries)
.p_data(&specialization_data.store);
let pipeline_create_info = vk::ComputePipelineCreateInfo {
stage: vk::PipelineShaderStageCreateInfo {
stage: vk::ShaderStageFlags::COMPUTE,
module: shader,
p_name: shader_entry_name.as_ptr(),
p_specialization_info: &*specialization_info,
..Default::default()
},
layout: pipeline_layout,
..Default::default()
};
unsafe {
self.context
.device
.create_compute_pipelines_single(self.pipeline_cache, &pipeline_create_info, None)
}
.and_then(|(res, pipeline)| match res {
vk::Result::SUCCESS => Ok(pipeline),
_ => Err(res),
})
.unwrap()
})
}
pub fn get_graphics(
&self,
vertex_shader_desc: VertexShaderDesc,
fragment_shader_name: &str,
pipeline_layout: vk::PipelineLayout,
state: &GraphicsPipelineState,
) -> vk::Pipeline {
let vertex_shader = match vertex_shader_desc {
VertexShaderDesc::Standard { vertex } => VertexShaderKey::Standard {
vertex: self.shader_loader.get_shader(vertex).unwrap(),
},
VertexShaderDesc::Mesh {
task,
task_constants,
task_subgroup_size,
mesh,
mesh_constants,
} => VertexShaderKey::Mesh {
task: self.shader_loader.get_shader(task).unwrap(),
task_constants: task_constants.to_vec(),
task_subgroup_size,
mesh: self.shader_loader.get_shader(mesh).unwrap(),
mesh_constants: mesh_constants.to_vec(),
},
};
let fragment_shader = self.shader_loader.get_shader(fragment_shader_name).unwrap();
let key = PipelineCacheKey::Graphics {
pipeline_layout,
vertex_shader: vertex_shader.clone(),
fragment_shader,
state: state.clone(),
};
*self.pipelines.borrow_mut().entry(key).or_insert_with(|| {
let shader_entry_name = CStr::from_bytes_with_nul(b"main\0").unwrap();
let mut specialization_data = ArrayVec::<SpecializationData, 2>::new();
let mut specialization_info = ArrayVec::<vk::SpecializationInfo, 2>::new();
let mut required_subgroup_size_create_info =
ArrayVec::<vk::PipelineShaderStageRequiredSubgroupSizeCreateInfoEXT, 1>::new();
let mut shader_stage_create_info = ArrayVec::<vk::PipelineShaderStageCreateInfo, 3>::new();
match vertex_shader {
VertexShaderKey::Standard { vertex } => {
shader_stage_create_info.push(vk::PipelineShaderStageCreateInfo {
stage: vk::ShaderStageFlags::VERTEX,
module: vertex,
p_name: shader_entry_name.as_ptr(),
..Default::default()
});
}
VertexShaderKey::Mesh {
task,
task_constants,
task_subgroup_size,
mesh,
mesh_constants,
} => {
shader_stage_create_info.push({
specialization_data.push(SpecializationData::new(&task_constants));
specialization_info.push(*specialization_data.last().unwrap().info());
let mut p_next = ptr::null();
let mut flags = vk::PipelineShaderStageCreateFlags::empty();
if let Some(task_subgroup_size) = task_subgroup_size {
required_subgroup_size_create_info.push(
vk::PipelineShaderStageRequiredSubgroupSizeCreateInfoEXT {
required_subgroup_size: task_subgroup_size,
..Default::default()
},
);
p_next = required_subgroup_size_create_info.last().unwrap() as *const _ as *const _;
flags |= vk::PipelineShaderStageCreateFlags::REQUIRE_FULL_SUBGROUPS_EXT;
};
vk::PipelineShaderStageCreateInfo {
p_next,
flags,
stage: vk::ShaderStageFlags::TASK_NV,
module: task,
p_name: shader_entry_name.as_ptr(),
p_specialization_info: specialization_info.last().unwrap(),
..Default::default()
}
});
shader_stage_create_info.push({
specialization_data.push(SpecializationData::new(&mesh_constants));
specialization_info.push(*specialization_data.last().unwrap().info());
vk::PipelineShaderStageCreateInfo {
stage: vk::ShaderStageFlags::MESH_NV,
module: mesh,
p_name: shader_entry_name.as_ptr(),
p_specialization_info: specialization_info.last().unwrap(),
..Default::default()
}
});
}
}
shader_stage_create_info.push(vk::PipelineShaderStageCreateInfo {
stage: vk::ShaderStageFlags::FRAGMENT,
module: fragment_shader,
p_name: shader_entry_name.as_ptr(),
..Default::default()
});
let vertex_input_state_create_info = vk::PipelineVertexInputStateCreateInfo::builder()
.p_vertex_attribute_descriptions(&state.vertex_input_attributes)
.p_vertex_binding_descriptions(&state.vertex_input_bindings);
let input_assembly_state_create_info = vk::PipelineInputAssemblyStateCreateInfo {
topology: state.topology,
..Default::default()
};
let viewport_state_create_info = vk::PipelineViewportStateCreateInfo {
viewport_count: 1,
scissor_count: 1,
..Default::default()
};
let rasterization_state_create_info = vk::PipelineRasterizationStateCreateInfo {
polygon_mode: vk::PolygonMode::FILL,
cull_mode: vk::CullModeFlags::BACK,
line_width: 1.0,
..Default::default()
};
let multisample_state_create_info = vk::PipelineMultisampleStateCreateInfo {
rasterization_samples: state.samples,
..Default::default()
};
let depth_stencil_state = vk::PipelineDepthStencilStateCreateInfo::builder()
.depth_test_enable(true)
.depth_write_enable(true)
.depth_compare_op(state.depth_compare_op);
let color_blend_attachment_state = vk::PipelineColorBlendAttachmentState {
color_write_mask: vk::ColorComponentFlags::R
| vk::ColorComponentFlags::G
| vk::ColorComponentFlags::B
| vk::ColorComponentFlags::A,
..Default::default()
};
let color_blend_state_create_info = vk::PipelineColorBlendStateCreateInfo::builder()
.p_attachments(slice::from_ref(&color_blend_attachment_state));
let dynamic_states = [vk::DynamicState::VIEWPORT, vk::DynamicState::SCISSOR];
let pipeline_dynamic_state_create_info =
vk::PipelineDynamicStateCreateInfo::builder().p_dynamic_states(&dynamic_states);
let pipeline_create_info = vk::GraphicsPipelineCreateInfo::builder()
.p_stages(&shader_stage_create_info)
.p_vertex_input_state(Some(&vertex_input_state_create_info))
.p_input_assembly_state(Some(&input_assembly_state_create_info))
.p_viewport_state(Some(&viewport_state_create_info))
.p_rasterization_state(Some(&rasterization_state_create_info))
.p_multisample_state(Some(&multisample_state_create_info))
.p_depth_stencil_state(Some(&depth_stencil_state))
.p_color_blend_state(Some(&color_blend_state_create_info))
.p_dynamic_state(Some(&pipeline_dynamic_state_create_info))
.layout(pipeline_layout)
.render_pass(state.render_pass);
unsafe {
self.context
.device
.create_graphics_pipelines_single(self.pipeline_cache, &pipeline_create_info, None)
}
.and_then(|(res, pipeline)| match res {
vk::Result::SUCCESS => Ok(pipeline),
_ => Err(res),
})
.unwrap()
})
}
pub fn get_ui(
&self,
egui_renderer: &spark_egui::Renderer,
render_pass: vk::RenderPass,
samples: vk::SampleCountFlags,
) -> vk::Pipeline {
let key = PipelineCacheKey::Ui { render_pass, samples };
*self
.pipelines
.borrow_mut()
.entry(key)
.or_insert_with(|| egui_renderer.create_pipeline(&self.context.device, render_pass, samples))
}
pub fn get_ray_tracing(
&self,
group_desc: &[RayTracingShaderGroupDesc],
pipeline_layout: vk::PipelineLayout,
) -> vk::Pipeline {
let shader_groups: Vec<_> = group_desc
.iter()
.map(|desc| match desc {
RayTracingShaderGroupDesc::Raygen(raygen) => {
RayTracingShaderGroup::Raygen(self.shader_loader.get_shader(raygen).unwrap())
}
RayTracingShaderGroupDesc::Miss(miss) => {
RayTracingShaderGroup::Miss(self.shader_loader.get_shader(miss).unwrap())
}
RayTracingShaderGroupDesc::Hit {
closest_hit,
any_hit,
intersection,
} => RayTracingShaderGroup::Hit {
closest_hit: self.shader_loader.get_shader(closest_hit).unwrap(),
any_hit: any_hit.map(|name| self.shader_loader.get_shader(name).unwrap()),
intersection: intersection.map(|name| self.shader_loader.get_shader(name).unwrap()),
},
RayTracingShaderGroupDesc::Callable(callable) => {
RayTracingShaderGroup::Callable(self.shader_loader.get_shader(callable).unwrap())
}
})
.collect();
let key = PipelineCacheKey::RayTracing {
pipeline_layout,
shader_groups: shader_groups.clone(),
};
*self.pipelines.borrow_mut().entry(key).or_insert_with(|| {
let shader_entry_name = CStr::from_bytes_with_nul(b"main\0").unwrap();
let mut shader_stage_create_info = Vec::new();
let mut get_stage_index = |stage, module| {
if let Some(i) = shader_stage_create_info.iter().enumerate().find_map(
|(i, info): (usize, &vk::PipelineShaderStageCreateInfo)| {
if stage == info.stage && module == info.module {
Some(i as u32)
} else {
None
}
},
) {
i
} else {
shader_stage_create_info.push(vk::PipelineShaderStageCreateInfo {
stage,
module,
p_name: shader_entry_name.as_ptr(),
..Default::default()
});
(shader_stage_create_info.len() - 1) as u32
}
};
let shader_group_create_info: Vec<_> = shader_groups
.iter()
.map(|group| match group {
RayTracingShaderGroup::Raygen(raygen) => vk::RayTracingShaderGroupCreateInfoKHR {
ty: vk::RayTracingShaderGroupTypeKHR::GENERAL,
general_shader: get_stage_index(vk::ShaderStageFlags::RAYGEN_KHR, *raygen),
closest_hit_shader: vk::SHADER_UNUSED_KHR,
any_hit_shader: vk::SHADER_UNUSED_KHR,
intersection_shader: vk::SHADER_UNUSED_KHR,
..Default::default()
},
RayTracingShaderGroup::Miss(miss) => vk::RayTracingShaderGroupCreateInfoKHR {
ty: vk::RayTracingShaderGroupTypeKHR::GENERAL,
general_shader: get_stage_index(vk::ShaderStageFlags::MISS_KHR, *miss),
closest_hit_shader: vk::SHADER_UNUSED_KHR,
any_hit_shader: vk::SHADER_UNUSED_KHR,
intersection_shader: vk::SHADER_UNUSED_KHR,
..Default::default()
},
RayTracingShaderGroup::Hit {
closest_hit,
any_hit,
intersection,
} => vk::RayTracingShaderGroupCreateInfoKHR {
ty: if intersection.is_some() {
vk::RayTracingShaderGroupTypeKHR::PROCEDURAL_HIT_GROUP
} else {
vk::RayTracingShaderGroupTypeKHR::TRIANGLES_HIT_GROUP
},
general_shader: vk::SHADER_UNUSED_KHR,
closest_hit_shader: get_stage_index(vk::ShaderStageFlags::CLOSEST_HIT_KHR, *closest_hit),
any_hit_shader: any_hit.map_or(vk::SHADER_UNUSED_KHR, |module| {
get_stage_index(vk::ShaderStageFlags::ANY_HIT_KHR, module)
}),
intersection_shader: intersection.map_or(vk::SHADER_UNUSED_KHR, |module| {
get_stage_index(vk::ShaderStageFlags::INTERSECTION_KHR, module)
}),
..Default::default()
},
RayTracingShaderGroup::Callable(callable) => vk::RayTracingShaderGroupCreateInfoKHR {
ty: vk::RayTracingShaderGroupTypeKHR::GENERAL,
general_shader: get_stage_index(vk::ShaderStageFlags::CALLABLE_KHR, *callable),
closest_hit_shader: vk::SHADER_UNUSED_KHR,
any_hit_shader: vk::SHADER_UNUSED_KHR,
intersection_shader: vk::SHADER_UNUSED_KHR,
..Default::default()
},
})
.collect();
let pipeline_create_info = vk::RayTracingPipelineCreateInfoKHR::builder()
.p_stages(&shader_stage_create_info)
.p_groups(&shader_group_create_info)
.layout(pipeline_layout)
.max_pipeline_ray_recursion_depth(0);
unsafe {
self.context.device.create_ray_tracing_pipelines_khr_single(
vk::DeferredOperationKHR::null(),
self.pipeline_cache,
&pipeline_create_info,
None,
)
}
.and_then(|(res, pipeline)| match res {
vk::Result::SUCCESS => Ok(pipeline),
_ => Err(res),
})
.unwrap()
})
}
pub fn ui_stats_table_rows(&self, ui: &mut egui::Ui) {
self.shader_loader.ui_stats_table_rows(ui);
ui.label("pipeline");
ui.label(format!("{}", self.pipelines.borrow_mut().len()));
ui.end_row();
}
}
impl Drop for PipelineCache {
fn drop(&mut self) {
let device = &self.context.device;
for (_, pipeline) in self.pipelines.borrow_mut().drain() {
unsafe {
device.destroy_pipeline(pipeline, None);
}
}
unsafe {
// TODO: save to file
device.destroy_pipeline_cache(self.pipeline_cache, None)
}
}
}
| rust | MIT | 42fc4b496cce21e907eca7112e6d6334d35fc41a | 2026-01-04T20:23:56.526296Z | false |
sjb3d/caldera | https://github.com/sjb3d/caldera/blob/42fc4b496cce21e907eca7112e6d6334d35fc41a/caldera/src/lib.rs | caldera/src/lib.rs | mod allocator;
mod app_base;
mod barrier;
mod color_space;
mod command_buffer;
mod context;
mod descriptor;
mod heap;
mod loader;
mod maths;
mod pipeline_cache;
mod query;
mod render_cache;
mod render_graph;
mod resource;
mod swapchain;
pub mod window_surface;
pub mod prelude {
pub use caldera_macro::*;
pub use crate::allocator::*;
pub use crate::app_base::*;
pub use crate::barrier::*;
pub use crate::color_space::*;
pub use crate::command_buffer::*;
pub use crate::context::*;
pub use crate::descriptor::*;
pub use crate::loader::*;
pub use crate::maths::*;
pub use crate::pipeline_cache::*;
pub use crate::query::*;
pub use crate::render_cache::*;
pub use crate::render_graph::*;
pub use crate::resource::*;
pub use crate::swapchain::*;
pub use crate::command_name;
}
| rust | MIT | 42fc4b496cce21e907eca7112e6d6334d35fc41a | 2026-01-04T20:23:56.526296Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.