repo stringlengths 6 65 | file_url stringlengths 81 311 | file_path stringlengths 6 227 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 15:31:58 2026-01-04 20:25:31 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
akaza-im/akaza | https://github.com/akaza-im/akaza/blob/b7dace72e7ce2054a9daf6ba0e6aca008bb8926f/libakaza/src/graph/graph_builder.rs | libakaza/src/graph/graph_builder.rs | use std::collections::btree_map::BTreeMap;
use std::collections::HashSet;
use std::rc::Rc;
use std::sync::{Arc, Mutex};
use kelp::{hira2kata, ConvOption};
use log::trace;
use regex::Regex;
use crate::graph::lattice_graph::LatticeGraph;
use crate::graph::segmenter::SegmentationResult;
use crate::graph::word_node::WordNode;
use crate::kana_kanji::base::KanaKanjiDict;
use crate::lm::base::{SystemBigramLM, SystemUnigramLM};
use crate::user_side_data::user_data::UserData;
pub struct GraphBuilder<U: SystemUnigramLM, B: SystemBigramLM, KD: KanaKanjiDict> {
system_kana_kanji_dict: KD,
system_single_term_dict: KD,
user_data: Arc<Mutex<UserData>>,
system_unigram_lm: Rc<U>,
system_bigram_lm: Rc<B>,
number_pattern: Regex,
}
impl<U: SystemUnigramLM, B: SystemBigramLM, KD: KanaKanjiDict> GraphBuilder<U, B, KD> {
pub fn new(
system_kana_kanji_dict: KD,
system_single_term_dict: KD,
user_data: Arc<Mutex<UserData>>,
system_unigram_lm: Rc<U>,
system_bigram_lm: Rc<B>,
) -> GraphBuilder<U, B, KD> {
let number_pattern = Regex::new(r#"^[0-9]+"#).unwrap();
GraphBuilder {
system_kana_kanji_dict,
system_single_term_dict,
user_data,
system_unigram_lm,
system_bigram_lm,
number_pattern,
}
}
pub fn construct(&self, yomi: &str, words_ends_at: &SegmentationResult) -> LatticeGraph<U, B> {
// ใใฎใฐใฉใใฎใคใณใใฏในใฏๅ่ชใฎ็ตไบไฝ็ฝฎใ
let mut graph: BTreeMap<i32, Vec<WordNode>> = BTreeMap::new();
graph.insert(0, vec![WordNode::create_bos()]);
graph.insert(
(yomi.len() + 1) as i32,
vec![WordNode::create_eos(yomi.len() as i32)],
);
for (end_pos, segmented_yomis) in words_ends_at.iter() {
for segmented_yomi in segmented_yomis {
let vec = graph.entry(*end_pos as i32).or_default();
let mut seen: HashSet<String> = HashSet::new();
// TODO ใใฎใธใใณใใใใใใฎใงๆด็ๅฟ
่ฆใ
// ใทในใใ ่พๆธใซใใๅ่ฃใๅ
ใซๅ่ฃใใชในใใขใใใใ
if let Some(kanjis) = self.system_kana_kanji_dict.get(segmented_yomi) {
for kanji in kanjis {
let node = WordNode::new(
(end_pos - segmented_yomi.len()) as i32,
&kanji,
segmented_yomi,
self.system_unigram_lm
.find((kanji.to_string() + "/" + segmented_yomi).as_str()),
false,
);
trace!("WordIDScore: {:?}", node.word_id_and_score);
vec.push(node);
seen.insert(kanji.to_string());
}
}
if let Some(surfaces) = self.user_data.lock().unwrap().dict.get(segmented_yomi) {
for surface in surfaces {
if seen.contains(surface) {
continue;
}
let node = WordNode::new(
(end_pos - segmented_yomi.len()) as i32,
surface,
segmented_yomi,
self.system_unigram_lm
.find((surface.to_string() + "/" + segmented_yomi).as_str()),
false,
);
trace!("WordIDScore: {:?}", node.word_id_and_score);
vec.push(node);
seen.insert(surface.to_string());
}
}
// ใฒใใใชๅ่ฃใใชในใใขใใใใ
for surface in [
segmented_yomi,
hira2kata(segmented_yomi, ConvOption::default()).as_str(),
] {
if seen.contains(surface) {
continue;
}
// ใฒใใใชใใฎใใฎใจใใซใฟใซใ่กจ็พใใจใณใใชใผใจใใฆ็ป้ฒใใฆใใใ
let node = WordNode::new(
(end_pos - segmented_yomi.len()) as i32,
surface,
segmented_yomi,
None,
true,
);
vec.push(node);
}
// ๆฐๅญใฎๅ ดๅใฏๆฐๅญ็จใฎๅ็ๅคๆใๅ
ฅใใ
if self.number_pattern.is_match(segmented_yomi) {
let node = WordNode::new(
(end_pos - segmented_yomi.len()) as i32,
"(*(*(NUMBER-KANSUJI",
segmented_yomi,
None,
true,
);
vec.push(node);
}
// ๅคๆ็ฏๅฒใๅ
จไฝใซใชใฃใฆใใใฐ single term ่พๆธใๅฉ็จใใใ
if segmented_yomi == yomi {
if let Some(surfaces) = self.system_single_term_dict.get(yomi) {
for surface in surfaces {
let node = WordNode::new(
(end_pos - segmented_yomi.len()) as i32,
&surface,
segmented_yomi,
self.system_unigram_lm
.find((surface.to_string() + "/" + segmented_yomi).as_str()),
false,
);
vec.push(node);
}
}
}
}
}
LatticeGraph {
graph,
yomi: yomi.to_string(),
user_data: self.user_data.clone(),
system_unigram_lm: self.system_unigram_lm.clone(),
system_bigram_lm: self.system_bigram_lm.clone(),
}
}
}
#[cfg(test)]
mod tests {
use std::collections::HashMap;
use crate::kana_kanji::hashmap_vec::HashmapVecKanaKanjiDict;
use crate::lm::system_bigram::MarisaSystemBigramLMBuilder;
use crate::lm::system_unigram_lm::MarisaSystemUnigramLMBuilder;
use super::*;
#[test]
fn test_single_term() -> anyhow::Result<()> {
let graph_builder = GraphBuilder::new(
HashmapVecKanaKanjiDict::new(HashMap::new()),
HashmapVecKanaKanjiDict::new(HashMap::from([(
"ใใ".to_string(),
vec!["๐ฃ".to_string()],
)])),
Arc::new(Mutex::new(UserData::default())),
Rc::new(
MarisaSystemUnigramLMBuilder::default()
.set_unique_words(20)
.set_total_words(19)
.build(),
),
Rc::new(
MarisaSystemBigramLMBuilder::default()
.set_default_edge_cost(20_f32)
.build()?,
),
);
let yomi = "ใใ";
let got = graph_builder.construct(
yomi,
&SegmentationResult::new(BTreeMap::from([(6, vec!["ใใ".to_string()])])),
);
let nodes = got.node_list(6).unwrap();
let got_surfaces: Vec<String> = nodes.iter().map(|f| f.surface.to_string()).collect();
assert_eq!(
got_surfaces,
vec!["ใใ".to_string(), "ในใท".to_string(), "๐ฃ".to_string()]
);
Ok(())
}
// ใฒใใใชใใซใฟใซใใฎใจใณใใชใผใ่ชๅ็ใซๅ
ฅใใใใซใใใ
#[test]
fn test_default_terms() -> anyhow::Result<()> {
let graph_builder = GraphBuilder::new(
HashmapVecKanaKanjiDict::new(HashMap::new()),
HashmapVecKanaKanjiDict::new(HashMap::new()),
Arc::new(Mutex::new(UserData::default())),
Rc::new(
MarisaSystemUnigramLMBuilder::default()
.set_unique_words(20)
.set_total_words(19)
.build(),
),
Rc::new(
MarisaSystemBigramLMBuilder::default()
.set_default_edge_cost(20_f32)
.build()?,
),
);
let yomi = "ใ";
let got = graph_builder.construct(
yomi,
&SegmentationResult::new(BTreeMap::from([(3, vec!["ใ".to_string()])])),
);
let nodes = got.node_list(3).unwrap();
let got_surfaces: Vec<String> = nodes.iter().map(|f| f.surface.to_string()).collect();
assert_eq!(got_surfaces, vec!["ใ".to_string(), "ใน".to_string()]);
Ok(())
}
// ใฒใใใชใใซใฟใซใใใใงใซใใชๆผขๅญ่พๆธใใๆไพใใใฆใใๅ ดๅใงใใ้่คใใใชใใ
#[test]
fn test_default_terms_duplicated() -> anyhow::Result<()> {
let graph_builder = GraphBuilder::new(
HashmapVecKanaKanjiDict::new(HashMap::from([(
"ใ".to_string(),
vec!["ใ".to_string(), "ใน".to_string()],
)])),
HashmapVecKanaKanjiDict::new(HashMap::new()),
Arc::new(Mutex::new(UserData::default())),
Rc::new(
MarisaSystemUnigramLMBuilder::default()
.set_unique_words(20)
.set_total_words(19)
.build(),
),
Rc::new(
MarisaSystemBigramLMBuilder::default()
.set_default_edge_cost(20_f32)
.build()?,
),
);
let yomi = "ใ";
let got = graph_builder.construct(
yomi,
&SegmentationResult::new(BTreeMap::from([(3, vec!["ใ".to_string()])])),
);
let nodes = got.node_list(3).unwrap();
let got_surfaces: Vec<String> = nodes.iter().map(|f| f.surface.to_string()).collect();
assert_eq!(got_surfaces, vec!["ใ".to_string(), "ใน".to_string()]);
Ok(())
}
}
| rust | MIT | b7dace72e7ce2054a9daf6ba0e6aca008bb8926f | 2026-01-04T19:35:02.740723Z | false |
akaza-im/akaza | https://github.com/akaza-im/akaza/blob/b7dace72e7ce2054a9daf6ba0e6aca008bb8926f/libakaza/src/graph/candidate.rs | libakaza/src/graph/candidate.rs | use std::cmp::Ordering;
use crate::kansuji::int2kanji;
#[allow(unused_imports)]
use chrono::{DateTime, Local, TimeZone};
#[derive(Debug, Clone, PartialEq)]
pub struct Candidate {
pub surface: String,
pub yomi: String,
pub cost: f32,
/// ่คๅ่ชใ? ่คๅ่ชใ ใฃใใใtrue ใซใชใใฎใงใใใฎๅ ดๅใฏๅญฆ็ฟๆใซใฆใผใถใผ่พๆธใซ็ป้ฒใใๅฟ
่ฆใใใใ
pub compound_word: bool,
}
impl Eq for Candidate {}
impl PartialOrd<Self> for Candidate {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
self.cost.partial_cmp(&other.cost)
}
}
impl Ord for Candidate {
fn cmp(&self, other: &Self) -> Ordering {
self.cost.partial_cmp(&other.cost).unwrap()
}
}
impl Candidate {
pub(crate) fn key(&self) -> String {
self.surface.to_string() + "/" + self.yomi.as_str()
}
}
impl Candidate {
pub fn new(yomi: &str, surface: &str, cost: f32) -> Candidate {
Candidate {
yomi: yomi.to_string(),
surface: surface.to_string(),
cost,
compound_word: false,
}
}
/// ๅ็ใชใจใณใใชใผใ่ๆ
ฎใใไธใงใฎ surface ใๅพใใ
pub fn surface_with_dynamic(&self) -> String {
if self.surface.starts_with("(*(*(") {
match self.surface.as_str() {
"(*(*(TODAY-HYPHEN" => now().format("%Y-%m-%d").to_string(),
"(*(*(TODAY-SLASH" => now().format("%Y/%m/%d").to_string(),
"(*(*(TODAY-KANJI" => now().format("%Yๅนด%mๆ%dๆฅ").to_string(),
"(*(*(NOW-KANJI" => now().format("%Hๆ%Mๅ").to_string(),
"(*(*(NUMBER-KANSUJI" => match self.yomi.parse::<i64>() {
Ok(n) => int2kanji(n),
Err(e) => e.to_string(),
},
_ => "ไธๆใชๅ็ๅคๆ: ".to_string() + self.surface.as_str(),
}
} else {
self.surface.to_string()
}
}
}
#[cfg(not(test))]
fn now() -> DateTime<Local> {
Local::now()
}
#[cfg(test)]
fn now() -> DateTime<Local> {
Local.with_ymd_and_hms(2023, 1, 16, 15, 14, 16).unwrap()
}
#[cfg(test)]
mod tests {
use crate::graph::candidate::Candidate;
#[test]
fn dynamic() {
fn test(surface: &str) -> String {
Candidate::new("ใใใ", surface, 0.0_f32).surface_with_dynamic()
}
assert_eq!(test("(*(*(TODAY-HYPHEN"), "2023-01-16");
assert_eq!(test("(*(*(TODAY-SLASH"), "2023/01/16");
assert_eq!(test("(*(*(TODAY-KANJI"), "2023ๅนด01ๆ16ๆฅ");
assert_eq!(test("(*(*(NOW-KANJI"), "15ๆ14ๅ");
}
}
| rust | MIT | b7dace72e7ce2054a9daf6ba0e6aca008bb8926f | 2026-01-04T19:35:02.740723Z | false |
akaza-im/akaza | https://github.com/akaza-im/akaza/blob/b7dace72e7ce2054a9daf6ba0e6aca008bb8926f/libakaza/src/graph/mod.rs | libakaza/src/graph/mod.rs | pub mod candidate;
pub mod graph_builder;
pub mod graph_resolver;
pub mod lattice_graph;
pub mod segmenter;
pub mod word_node;
| rust | MIT | b7dace72e7ce2054a9daf6ba0e6aca008bb8926f | 2026-01-04T19:35:02.740723Z | false |
akaza-im/akaza | https://github.com/akaza-im/akaza/blob/b7dace72e7ce2054a9daf6ba0e6aca008bb8926f/libakaza/src/graph/lattice_graph.rs | libakaza/src/graph/lattice_graph.rs | use std::collections::btree_map::BTreeMap;
use std::fmt::{Debug, Formatter};
use std::rc::Rc;
use std::sync::{Arc, Mutex};
use log::{error, info, trace};
use crate::graph::word_node::WordNode;
use crate::lm::base::{SystemBigramLM, SystemUnigramLM};
use crate::user_side_data::user_data::UserData;
// ่ใใใใๅ่ชใฎๅๅ
จใฆใๅซใใใใชใฐใฉใๆง้
pub struct LatticeGraph<U: SystemUnigramLM, B: SystemBigramLM> {
pub(crate) yomi: String,
pub(crate) graph: BTreeMap<i32, Vec<WordNode>>,
pub(crate) user_data: Arc<Mutex<UserData>>,
pub(crate) system_unigram_lm: Rc<U>,
pub(crate) system_bigram_lm: Rc<B>,
}
impl<U: SystemUnigramLM, B: SystemBigramLM> Debug for LatticeGraph<U, B> {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
write!(
f,
"LatticeGraph(yomi={}, graph={:?})",
self.yomi, self.graph
)
}
}
impl<U: SystemUnigramLM, B: SystemBigramLM> LatticeGraph<U, B> {
/// iๆๅญ็ฎใง็ตใใใใผใใๆขใ
pub fn node_list(&self, end_pos: i32) -> Option<&Vec<WordNode>> {
self.graph.get(&end_pos)
}
// -1 0 1 2
// BOS ใ ใ ใ
// [ ][ ]
// [ ]
pub(crate) fn get_prev_nodes(&self, node: &WordNode) -> Option<&Vec<WordNode>> {
// ใใใฎๅฆ็ใ็ฐกๅใซใใใใใซ BOS ใๅ
ฅใฃใฆใใใใฎใ ใจใใใใ
trace!("get_prev_nodes: {}", node.start_pos - 1);
self.graph.get(&(node.start_pos))
}
pub(crate) fn get(&self, n: i32) -> Option<&Vec<WordNode>> {
return self.graph.get(&n);
}
// for debugging purpose
#[allow(unused)]
pub fn dump_position_dot(&self) -> String {
let mut buf = String::new();
buf += "digraph Lattice {\n";
// start ๅใณ end ใฏใbyte ๆฐๅไฝ
for (end_pos, nodes) in self.graph.iter() {
for node in nodes {
buf += &*format!(
r#" {} -> "{}/{}"{}"#,
node.start_pos, node.surface, node.yomi, "\n"
);
buf += &*format!(
r#" "{}/{}" -> {}{}"#,
node.surface, node.yomi, end_pos, "\n"
);
}
}
buf += &*"}\n".to_string();
buf
}
fn is_match(s: &str, expected: &str) -> bool {
if expected.contains(s) {
return true;
}
false
}
// for debugging purpose
/// ใณในใใๅใใผใใใใณใจใใธใซใคใใฆใใใใๅบๅใใใ
/// graphviz ใฎ dot ๅฝขๅผใงๅบๅใใใ
#[allow(unused)]
pub fn dump_cost_dot(&self, expected: &str) -> String {
let mut buf = String::new();
buf += "digraph Lattice {\n";
// start ๅใณ end ใฏใbyte ๆฐๅไฝ
for (end_pos, nodes) in self.graph.iter() {
for node in nodes {
if Self::is_match(node.surface.as_str(), expected) {
buf += &*format!(
r#" "{}/{}" [xlabel="{}"]{}"#,
node.surface,
node.yomi,
self.get_node_cost(node),
"\n"
);
if let Some(prev_nodes) = self.get_prev_nodes(node) {
for prev_node in prev_nodes {
if Self::is_match(prev_node.surface.as_str(), expected) {
buf += &*format!(
r#" "{}/{}" -> "{}/{}" [label="{}"]{}"#,
prev_node.surface,
prev_node.yomi,
node.surface,
node.yomi,
self.get_edge_cost(prev_node, node),
"\n"
);
}
}
} else {
error!("Missing previous nodes for {}", node);
}
}
}
}
buf += &*"}\n".to_string();
buf
}
pub(crate) fn get_node_cost(&self, node: &WordNode) -> f32 {
if let Some(user_cost) = self.user_data.lock().unwrap().get_unigram_cost(node) {
info!("Use user's node score: {:?}", node);
// use user's score. if it's exists.
return user_cost;
}
return if let Some((_, system_unigram_cost)) = node.word_id_and_score {
trace!("HIT!: {}, {}", node.key(), system_unigram_cost);
system_unigram_cost
} else if node.surface.len() < node.yomi.len() {
// ๅดๅ่
็ฝๅฎณ่ฃๅไฟ้บๆณ ใฎใใใซใใทในใใ ่พๆธใซใฏ wikipedia ใใๆก้ฒใใใฆใใใ,
// ่จ่ชใขใใซใซใฏๆก้ฒใใใฆใใชใๅ ดๅ,ๆผขๅญๅ่ฃใๅ
้ ญใซๆใฃใฆใใใ
// ใคใพใใๅคๆๅพใฎใปใใ็ญใใชใใใฎใฎใปใใใณในใใๅฎใใใฆใใใ
self.system_unigram_lm.get_cost(1)
} else {
self.system_unigram_lm.get_cost(0)
};
}
pub(crate) fn get_edge_cost(&self, prev: &WordNode, node: &WordNode) -> f32 {
if let Some(cost) = self.user_data.lock().unwrap().get_bigram_cost(prev, node) {
return cost;
}
let Some((prev_id, _)) = prev.word_id_and_score else {
return self.system_bigram_lm.get_default_edge_cost();
};
let Some((node_id, _)) = node.word_id_and_score else {
return self.system_bigram_lm.get_default_edge_cost();
};
if let Some(cost) = self.system_bigram_lm.get_edge_cost(prev_id, node_id) {
cost
} else {
self.system_bigram_lm.get_default_edge_cost()
}
}
pub fn get_default_edge_cost(&self) -> f32 {
self.system_bigram_lm.get_default_edge_cost()
}
}
| rust | MIT | b7dace72e7ce2054a9daf6ba0e6aca008bb8926f | 2026-01-04T19:35:02.740723Z | false |
akaza-im/akaza | https://github.com/akaza-im/akaza/blob/b7dace72e7ce2054a9daf6ba0e6aca008bb8926f/libakaza/src/graph/word_node.rs | libakaza/src/graph/word_node.rs | use std::fmt::{Display, Formatter};
use std::hash::{Hash, Hasher};
#[derive(Debug, Clone)]
pub struct WordNode {
pub start_pos: i32,
/// ่กจๅฑคใ
pub surface: String,
/// ่ชญใฟไปฎๅ
pub yomi: String,
pub cost: f32,
pub word_id_and_score: Option<(i32, f32)>,
pub auto_generated: bool,
}
impl Hash for WordNode {
fn hash<H: Hasher>(&self, state: &mut H) {
self.start_pos.hash(state);
self.surface.hash(state);
self.yomi.hash(state);
u32::from_le_bytes(self.cost.to_le_bytes()).hash(state);
}
}
impl PartialEq<Self> for WordNode {
fn eq(&self, other: &Self) -> bool {
self.start_pos == other.start_pos
&& self.surface == other.surface
&& self.yomi == other.yomi
&& self.cost == other.cost
}
}
impl Eq for WordNode {}
impl WordNode {
pub fn key(&self) -> String {
let mut buf = String::new();
buf += self.surface.as_str();
buf += "/";
buf += self.yomi.as_str();
buf
}
pub(crate) fn create_bos() -> WordNode {
WordNode {
start_pos: 0,
surface: "__BOS__".to_string(),
yomi: "__BOS__".to_string(),
cost: 0_f32,
word_id_and_score: None,
auto_generated: true,
}
}
pub(crate) fn create_eos(start_pos: i32) -> WordNode {
WordNode {
start_pos,
surface: "__EOS__".to_string(),
yomi: "__EOS__".to_string(),
cost: 0_f32,
word_id_and_score: None,
auto_generated: true,
}
}
pub fn new(
start_pos: i32,
surface: &str,
yomi: &str,
word_id_and_score: Option<(i32, f32)>,
auto_generated: bool,
) -> WordNode {
assert!(
!surface.is_empty(),
"Kanji shouldn't be empty: {surface}/{yomi}"
);
WordNode {
start_pos,
surface: surface.to_string(),
yomi: yomi.to_string(),
cost: 0_f32,
word_id_and_score,
auto_generated,
}
}
}
impl Display for WordNode {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
write!(f, "{}/{}", self.surface, self.yomi)
}
}
| rust | MIT | b7dace72e7ce2054a9daf6ba0e6aca008bb8926f | 2026-01-04T19:35:02.740723Z | false |
akaza-im/akaza | https://github.com/akaza-im/akaza/blob/b7dace72e7ce2054a9daf6ba0e6aca008bb8926f/libakaza/src/graph/graph_resolver.rs | libakaza/src/graph/graph_resolver.rs | use std::cmp::Ordering;
use std::collections::{BinaryHeap, HashMap};
use anyhow::Context;
use log::{info, trace};
use crate::graph::candidate::Candidate;
use crate::graph::lattice_graph::LatticeGraph;
use crate::graph::word_node::WordNode;
use crate::lm::base::{SystemBigramLM, SystemUnigramLM};
/**
* Segmenter ใซใใๅๅฒใใใใใช่กจ็พใใใใฐใฉใใๆง็ฏใใใ
*/
#[derive(Default)]
pub struct GraphResolver {}
impl GraphResolver {
/**
* ใใฟใใขใซใดใชใบใ ใงๆ้ฉใช็ต่ทฏใ่ฆใคใใใ
*/
pub fn resolve<U: SystemUnigramLM, B: SystemBigramLM>(
&self,
lattice: &LatticeGraph<U, B>,
) -> anyhow::Result<Vec<Vec<Candidate>>> {
let yomi = &lattice.yomi;
let mut prevmap: HashMap<&WordNode, &WordNode> = HashMap::new();
let mut costmap: HashMap<&WordNode, f32> = HashMap::new();
// ๅๅใใซๅ็่จ็ปๆณใงใใฉใ
for i in 1..yomi.len() + 2 {
let Some(nodes) = &lattice.node_list(i as i32) else {
continue;
};
for node in *nodes {
let node_cost = lattice.get_node_cost(node);
trace!("kanji={}, Cost={}", node, node_cost);
let mut cost = f32::MAX;
let mut shortest_prev = None;
let prev_nodes = lattice.get_prev_nodes(node).with_context(|| {
format!(
"Cannot get prev nodes for '{}' start={} lattice={:?}",
node.surface, node.start_pos, lattice
)
})?;
for prev in prev_nodes {
let edge_cost = lattice.get_edge_cost(prev, node);
let prev_cost = costmap.get(prev).unwrap_or(&0_f32); // unwrap ใๅฟ
่ฆใชใฎใฏใ __BOS__ ็จใ
let tmp_cost = prev_cost + edge_cost + node_cost;
trace!(
"Replace??? prev_cost={} tmp_cost={} < cost={}: {}",
prev_cost,
tmp_cost,
cost,
prev
);
// ใณในใใๆๅฐใช็ต่ทฏใ้ธใถใใใซใใใ
// ใใใใใตใใซใณในใใไปไธใใฆใใใฎใงใ
if cost > tmp_cost {
if shortest_prev.is_none() {
trace!("Replace None by {}", prev);
} else {
trace!("Replace {} by {}", shortest_prev.unwrap(), prev);
}
cost = tmp_cost;
shortest_prev = Some(prev);
}
}
prevmap.insert(node, shortest_prev.unwrap());
costmap.insert(node, cost);
}
}
// ๅพใๅใใซๅ่ฃใๆขใใฆใใ
let eos = lattice
.get((yomi.len() + 1) as i32)
.unwrap()
.get(0)
.unwrap();
let bos = lattice.get(0).unwrap().get(0).unwrap();
let mut node = eos;
let mut result: Vec<Vec<Candidate>> = Vec::new();
while node != bos {
if node.surface != "__EOS__" {
// ๅไธใฎ้ๅงไฝ็ฝฎใ็ตไบไฝ็ฝฎใๆใคใใฎใ้ใใใ
let end_pos = node.start_pos + (node.yomi.len() as i32);
let candidates: Vec<Candidate> =
self.get_candidates(node, lattice, &costmap, end_pos);
result.push(candidates);
}
node = prevmap
.get(node)
.unwrap_or_else(|| panic!("Cannot get previous node: {}", node.surface));
}
result.reverse();
Ok(result)
}
fn get_candidates<U: SystemUnigramLM, B: SystemBigramLM>(
&self,
node: &WordNode,
lattice: &LatticeGraph<U, B>,
costmap: &HashMap<&WordNode, f32>,
end_pos: i32,
) -> Vec<Candidate> {
// end_pos ใง็ตใใๅ่ชใๅพใใ
let mut strict_results: Vec<Candidate> = lattice
.node_list(end_pos)
.unwrap()
.iter()
.filter(|alt_node| {
alt_node.start_pos == node.start_pos // ๅใไฝ็ฝฎใใใใใๅใใๅงใพใฃใฆใใ
&& alt_node.yomi.len() == node.yomi.len() // ๅใ้ทใใฎๅ่ชใๅพใ
})
.map(|f| Candidate {
surface: f.surface.clone(),
yomi: f.yomi.clone(),
cost: *costmap.get(f).unwrap(),
compound_word: false,
})
.collect();
strict_results.sort();
// ใใใๅ่ฃใ่ใใๅฐใชใๅ ดๅใฏใใใฎๆ็ฏใๅๅฒใใใ
// ๅๅฒใใๅ ดๅใฎๅ่ชใฏ strict_results ใซ่ฟฝๅ ใใใใ
// ใใใฎ้พๅคใฏใใกใใใกใใใฅใผใชในใใฃใใฏใชๅคใงใใ
// ๅ้ฆ้ฃ/ใใใใช/ใญใฟใซใ ใฎใใใชใฑใผในใงใ 3 ไพใใใฎใงใใจใใๆๅฎใ
// ใใฎใปใใใใใใๆทฑใ้ๅฑคใฎใใผใใณใผใใใใฆใใใใฎใฏใใในใฆใใใฅใผใชในใใฃใใฏใ
if strict_results.len() < 5 {
let mut candidates: Vec<Candidate> = Vec::new();
Self::collect_breakdown_results(
&node.yomi,
node.yomi.len(),
node.start_pos,
&mut candidates,
String::new(),
String::new(),
lattice,
end_pos,
0,
&costmap,
0_f32,
None,
);
candidates.sort();
for x in candidates {
strict_results.push(x)
}
}
strict_results
}
/// - `tail_cost`: ๆซๅฐพใใ่พฟใฃใๅ ดๅใฎใณในใ
#[allow(clippy::too_many_arguments)]
fn collect_breakdown_results<U: SystemUnigramLM, B: SystemBigramLM>(
node_yomi: &str,
required_len: usize,
min_start_pos: i32,
strict_results: &mut Vec<Candidate>,
cur_surface: String,
cur_yomi: String,
lattice: &LatticeGraph<U, B>,
end_pos: i32,
depth: i32,
cost_map: &&HashMap<&WordNode, f32>,
tail_cost: f32,
next_node: Option<&WordNode>,
) {
if depth > 4 {
// depth ใๆทฑ้ใใใ่ซฆใใใ
info!(
"collect_splited_results: too deep: node_yomi={:?}, cur_surface={:?}",
node_yomi, cur_surface
);
return;
}
if cur_yomi.len() == node_yomi.len() {
trace!("Insert strict_results: {}/{}", cur_surface, cur_yomi);
strict_results.push(Candidate {
surface: cur_surface,
yomi: cur_yomi,
cost: tail_cost,
compound_word: true,
});
return;
}
let Some(targets) = lattice
.node_list(end_pos) else {
// ็ดๅใฎใใผใใฏใชใๅ ดๅใใใ
return;
};
trace!("Targets: {:?}", targets);
let mut targets = targets
.iter()
.filter(|cur| {
// ๅ่ชใฎ้ๅงไฝ็ฝฎใใnode ใฎ่กจ็คบ็ฏๅฒๅ
ใซๅใพใฃใฆใใใใฎใฎใฟใใชในใใขใใใใ
min_start_pos <= cur.start_pos
// ๅ
ใ
ใฎๅ่ฃใจๅฎๅ
จใซไธ่ดใใฆใใใใฎใฏ้คๅคใ
&& cur.yomi != node_yomi
})
.map(|f| BreakDown {
node: f.clone(),
head_cost: (*cost_map.get(f).unwrap()), // ๅ
้ ญใใ่พฟใฃใๅ ดๅใฎใณในใ
tail_cost: tail_cost
+ lattice.get_node_cost(f)
+ next_node
.map(|nn| lattice.get_edge_cost(f, nn))
.unwrap_or_else(|| lattice.get_default_edge_cost()),
})
.collect::<Vec<_>>();
targets.sort();
// ใใใฎ 3ใใฏใใฅใผใชในใใฃใใฏใชๅคใ
// ใใจใใฐใ3ๅ่ชใพใงใใฌใผใใใฆใณใใใจใใใฐใ3**3 ่พฟใใใจใซใชใใใใ ใใ
// ็ธๅฝๆฐใๅกใใฒใคใใใใใใ ใใใ
let targets = targets.iter().take(3).collect::<BinaryHeap<_>>();
trace!("Targets: {:?}, min_start_pos={}", targets, min_start_pos);
for target in targets {
if target.node.yomi == "__BOS__" || target.node.yomi == "__EOS__" {
continue;
}
trace!(
"Recursive tracking : {}/{}",
target.node.surface,
target.node.yomi
);
if required_len < target.node.yomi.len() {
panic!("??? underflow: {:?}, {:?}", required_len, target.node.yomi);
}
Self::collect_breakdown_results(
node_yomi,
required_len - target.node.yomi.len(),
min_start_pos,
strict_results,
target.node.surface.clone() + cur_surface.as_str(),
target.node.yomi.clone() + cur_yomi.as_str(),
lattice,
end_pos - (target.node.yomi.len() as i32),
depth + 1,
cost_map,
tail_cost + target.tail_cost,
Some(&target.node),
)
}
}
}
#[derive(PartialEq, Debug)]
struct BreakDown {
node: WordNode,
/// ๅ
้ ญใใ่พฟใฃใๅ ดๅใฎใณในใ
pub head_cost: f32,
/// ๆซๅฐพใใ่พฟใฃใๅ ดๅใฎใณในใ
pub tail_cost: f32,
}
impl Eq for BreakDown {}
impl PartialOrd<Self> for BreakDown {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
(self.head_cost + self.tail_cost).partial_cmp(&(other.head_cost + other.tail_cost))
}
}
impl Ord for BreakDown {
fn cmp(&self, other: &Self) -> Ordering {
(self.head_cost + self.tail_cost)
.partial_cmp(&(other.head_cost + other.tail_cost))
.unwrap()
}
}
#[cfg(test)]
mod tests {
use std::collections::btree_map::BTreeMap;
use std::fs::File;
use std::io::Write;
use std::rc::Rc;
use std::sync::{Arc, Mutex};
use anyhow::Result;
use log::LevelFilter;
use crate::graph::graph_builder::GraphBuilder;
use crate::graph::segmenter::{SegmentationResult, Segmenter};
use crate::kana_kanji::hashmap_vec::HashmapVecKanaKanjiDict;
use crate::kana_trie::cedarwood_kana_trie::CedarwoodKanaTrie;
use crate::lm::system_bigram::MarisaSystemBigramLMBuilder;
use crate::lm::system_unigram_lm::MarisaSystemUnigramLMBuilder;
use crate::user_side_data::user_data::UserData;
use super::*;
#[test]
fn test_resolver() -> Result<()> {
let _ = env_logger::builder().is_test(true).try_init();
let kana_trie = CedarwoodKanaTrie::build(Vec::from([
"abc".to_string(),
"ab".to_string(),
"c".to_string(),
]));
let graph_builder = Segmenter::new(vec![Arc::new(Mutex::new(kana_trie))]);
let graph = graph_builder.build("abc", None);
assert_eq!(
graph,
SegmentationResult::new(BTreeMap::from([
(2, vec!["ab".to_string()]),
(3, vec!["abc".to_string(), "c".to_string()]),
]))
);
// -1 0 1 2
// BOS a b c
let system_unigram_lm = MarisaSystemUnigramLMBuilder::default()
.set_unique_words(20)
.set_total_words(19)
.build();
let system_bigram_lm = MarisaSystemBigramLMBuilder::default()
.set_default_edge_cost(20_f32)
.build()?;
let user_data = UserData::default();
let graph_builder = GraphBuilder::new(
HashmapVecKanaKanjiDict::new(HashMap::new()),
HashmapVecKanaKanjiDict::new(Default::default()),
Arc::new(Mutex::new(user_data)),
Rc::new(system_unigram_lm),
Rc::new(system_bigram_lm),
);
let lattice = graph_builder.construct("abc", &graph);
let resolver = GraphResolver::default();
let got = resolver.resolve(&lattice)?;
let terms: Vec<String> = got.iter().map(|f| f[0].surface.clone()).collect();
let result = terms.join("");
assert_eq!(result, "abc");
Ok(())
}
#[test]
fn test_kana_kanji() -> Result<()> {
let _ = env_logger::builder().is_test(true).try_init();
let kana_trie = CedarwoodKanaTrie::build(Vec::from([
"ใใใ".to_string(),
"ใใ".to_string(),
"ใ".to_string(),
]));
let graph_builder = Segmenter::new(vec![Arc::new(Mutex::new(kana_trie))]);
let graph = graph_builder.build("ใใใ", None);
assert_eq!(
graph,
SegmentationResult::new(BTreeMap::from([
(6, vec!["ใใ".to_string()]),
(9, vec!["ใใใ".to_string(), "ใ".to_string()]),
]))
);
let dict = HashMap::from([(
"ใใใ".to_string(),
vec!["็ง".to_string(), "ๆธกใ".to_string()],
)]);
let yomi = "ใใใ".to_string();
let mut system_unigram_lm_builder = MarisaSystemUnigramLMBuilder::default();
let system_unigram_lm = system_unigram_lm_builder
.set_unique_words(19)
.set_total_words(20)
.build();
let system_bigram_lm = MarisaSystemBigramLMBuilder::default()
.set_default_edge_cost(20_f32)
.build()?;
let mut user_data = UserData::default();
// ็ง/ใใใ ใฎในใณใขใใฌใใจไธใใใ
user_data.record_entries(&[Candidate::new("ใใใ", "็ง", 0_f32)]);
let graph_builder = GraphBuilder::new(
HashmapVecKanaKanjiDict::new(dict),
HashmapVecKanaKanjiDict::new(HashMap::new()),
Arc::new(Mutex::new(user_data)),
Rc::new(system_unigram_lm),
Rc::new(system_bigram_lm),
);
let lattice = graph_builder.construct(&yomi, &graph);
// dot -Tpng -o /tmp/lattice.png /tmp/lattice.dot && open /tmp/lattice.png
// File::create("/tmp/lattice.dot")
// .unwrap()
// .write_all(lattice.dump_cost_dot().as_bytes())
// .unwrap();
let resolver = GraphResolver::default();
let got = resolver.resolve(&lattice)?;
let terms: Vec<String> = got.iter().map(|f| f[0].surface.clone()).collect();
let result = terms.join("");
assert_eq!(result, "็ง");
Ok(())
}
#[test]
fn test_kitakana() -> Result<()> {
// ใใใใใชใใๅคๆใใใจใใซใๅ้ฆ้ฃใ ใใงใฏใชใใๆฅใ/ใใ ใใช/ใใชใใฎใใใช
// ๆ็ฏใๅบๅใฃใๅ่ฃใๅบใฆๆฅใใใจใ
let _ = env_logger::builder()
.is_test(true)
.filter_level(LevelFilter::Trace)
.try_init();
let kana_trie = CedarwoodKanaTrie::build(Vec::from([
"ใใใใช".to_string(),
"ใใ".to_string(),
"ใ".to_string(),
"ใใใช".to_string(),
"ใใช".to_string(),
]));
let graph_builder = Segmenter::new(vec![Arc::new(Mutex::new(kana_trie))]);
let graph = graph_builder.build("ใใใใช", None);
// assert_eq!(
// graph,
// SegmentationResult::new(BTreeMap::from([
// (3, vec!["ใ".to_string()]),
// (6, vec!["ใใ".to_string()]),
// (
// 12,
// vec![
// "ใใใใช".to_string(),
// "ใใช".to_string(),
// "ใใใช".to_string(),
// ]
// ),
// ]))
// );
let dict = HashMap::from([
("ใใใใช".to_string(), vec!["ๅ้ฆ้ฃ".to_string()]),
("ใ".to_string(), vec!["ๆฐ".to_string()]),
("ใใใช".to_string(), vec!["้ซ่".to_string()]),
("ใใช".to_string(), vec!["ใใช".to_string()]),
(
"ใใ".to_string(),
vec!["ๆฅใ".to_string(), "ๅ".to_string()],
),
]);
let yomi = "ใใใใช".to_string();
let mut system_unigram_lm_builder = MarisaSystemUnigramLMBuilder::default();
let system_unigram_lm = system_unigram_lm_builder
.set_unique_words(19)
.set_total_words(20)
.build();
let system_bigram_lm = MarisaSystemBigramLMBuilder::default()
.set_default_edge_cost(20_f32)
.build()?;
let mut user_data = UserData::default();
// ๆฅใ/ใใ ใใช/ใใช ใฎใณในใใไธใใฆใใใ
user_data.record_entries(&[
Candidate::new("ใใ", "ๆฅใ", 0_f32),
// Candidate::new("ใใช", "ใใช", 0_f32),
]);
let graph_builder = GraphBuilder::new(
HashmapVecKanaKanjiDict::new(dict),
HashmapVecKanaKanjiDict::new(HashMap::new()),
Arc::new(Mutex::new(user_data)),
Rc::new(system_unigram_lm),
Rc::new(system_bigram_lm),
);
let lattice = graph_builder.construct(&yomi, &graph);
// dot -Tpng -o /tmp/lattice.png /tmp/lattice.dot && open /tmp/lattice.png
File::create("/tmp/dump.dot")
.unwrap()
.write_all(lattice.dump_cost_dot("ๆฅใใใช").as_bytes())
.unwrap();
let resolver = GraphResolver::default();
let got = resolver.resolve(&lattice)?;
// ๆฅใใใช ใๅ่ฃใซๅบใฆใใใ
let got = got[0]
.iter()
.collect::<Vec<_>>()
.iter()
.map(|it| it.surface.to_string())
.collect::<Vec<_>>()
.join(",");
info!("Got: {}", got);
assert!(got.contains("ๆฅใใใช"), "{}", got);
// assert_eq!(result, "ๆฅใใใช");
Ok(())
}
}
| rust | MIT | b7dace72e7ce2054a9daf6ba0e6aca008bb8926f | 2026-01-04T19:35:02.740723Z | false |
akaza-im/akaza | https://github.com/akaza-im/akaza/blob/b7dace72e7ce2054a9daf6ba0e6aca008bb8926f/libakaza/src/kana_trie/base.rs | libakaza/src/kana_trie/base.rs | pub trait KanaTrie {
fn common_prefix_search(&self, query: &str) -> Vec<String>;
}
| rust | MIT | b7dace72e7ce2054a9daf6ba0e6aca008bb8926f | 2026-01-04T19:35:02.740723Z | false |
akaza-im/akaza | https://github.com/akaza-im/akaza/blob/b7dace72e7ce2054a9daf6ba0e6aca008bb8926f/libakaza/src/kana_trie/cedarwood_kana_trie.rs | libakaza/src/kana_trie/cedarwood_kana_trie.rs | use cedarwood::Cedar;
use log::debug;
use crate::kana_trie::base::KanaTrie;
pub struct CedarwoodKanaTrie {
cedar: Cedar,
words: Vec<String>,
}
impl Default for CedarwoodKanaTrie {
fn default() -> Self {
let cedar = Cedar::new();
CedarwoodKanaTrie {
cedar,
words: Vec::new(),
}
}
}
impl CedarwoodKanaTrie {
pub fn build(keys: Vec<String>) -> CedarwoodKanaTrie {
let mut cedar = Cedar::new();
let mut words: Vec<String> = Vec::new();
for key in keys {
cedar.update(key.as_str(), words.len() as i32);
words.push(key);
}
CedarwoodKanaTrie { cedar, words }
}
pub fn contains(&self, key: &str) -> bool {
self.cedar.exact_match_search(key).is_some()
}
pub fn update(&mut self, key: &str) {
assert!(!key.is_empty());
self.cedar.update(key, self.words.len() as i32);
self.words.push(key.to_string());
}
}
impl KanaTrie for CedarwoodKanaTrie {
fn common_prefix_search(&self, query: &str) -> Vec<String> {
debug!("Search with CedarwoodKanaTrie: {}", query);
self.cedar
.common_prefix_iter(query)
.map(|(n, _)| self.words[n as usize].clone())
.collect::<Vec<String>>()
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn hello() -> anyhow::Result<()> {
let trie = CedarwoodKanaTrie::build(vec![
"ใใใ".to_string(),
"ใใ".to_string(),
"ใใ".to_string(),
"ใปใใปใ".to_string(),
]);
assert_eq!(
trie.common_prefix_search("ใใใใฎใใใก"),
vec!("ใใ", "ใใใ")
);
Ok(())
}
}
| rust | MIT | b7dace72e7ce2054a9daf6ba0e6aca008bb8926f | 2026-01-04T19:35:02.740723Z | false |
akaza-im/akaza | https://github.com/akaza-im/akaza/blob/b7dace72e7ce2054a9daf6ba0e6aca008bb8926f/libakaza/src/kana_trie/mod.rs | libakaza/src/kana_trie/mod.rs | /*
* ๅ
ฑ้ๆฅ้ ญ่พๆค็ดขใซๅฉ็จใใใใฉใคๆง้ ใ
* ไฝใๅฉ็จใใใใๅ
ฅใๆฟใใใใใใใซใใใใใชใใจใ
*/
pub mod base;
pub mod cedarwood_kana_trie;
| rust | MIT | b7dace72e7ce2054a9daf6ba0e6aca008bb8926f | 2026-01-04T19:35:02.740723Z | false |
akaza-im/akaza | https://github.com/akaza-im/akaza/blob/b7dace72e7ce2054a9daf6ba0e6aca008bb8926f/libakaza/src/kana_kanji/base.rs | libakaza/src/kana_kanji/base.rs | pub trait KanaKanjiDict {
fn get(&self, kana: &str) -> Option<Vec<String>>;
}
| rust | MIT | b7dace72e7ce2054a9daf6ba0e6aca008bb8926f | 2026-01-04T19:35:02.740723Z | false |
akaza-im/akaza | https://github.com/akaza-im/akaza/blob/b7dace72e7ce2054a9daf6ba0e6aca008bb8926f/libakaza/src/kana_kanji/marisa_kana_kanji_dict.rs | libakaza/src/kana_kanji/marisa_kana_kanji_dict.rs | use std::collections::HashMap;
use log::trace;
use marisa_sys::{Keyset, Marisa};
use crate::kana_kanji::base::KanaKanjiDict;
#[derive(Default)]
pub struct MarisaKanaKanjiDict {
marisa: Marisa,
}
impl MarisaKanaKanjiDict {
pub(crate) fn build_with_cache(
dict: HashMap<String, Vec<String>>,
cache_path: &str,
cache_serialized_key: &str,
) -> anyhow::Result<MarisaKanaKanjiDict> {
let mut keyset = Self::build_keyset(dict);
keyset.push_back(
[
"__CACHE_SERIALIZED__\t".as_bytes(),
cache_serialized_key.as_bytes(),
]
.concat()
.as_slice(),
);
let mut marisa = Marisa::default();
marisa.build(&keyset);
marisa.save(cache_path)?;
Ok(MarisaKanaKanjiDict { marisa })
}
pub(crate) fn build(dict: HashMap<String, Vec<String>>) -> anyhow::Result<MarisaKanaKanjiDict> {
let keyset = Self::build_keyset(dict);
let mut marisa = Marisa::default();
marisa.build(&keyset);
Ok(MarisaKanaKanjiDict { marisa })
}
pub fn build_keyset(dict: HashMap<String, Vec<String>>) -> Keyset {
let mut keyset = Keyset::default();
for (kana, surfaces) in dict {
keyset.push_back(
[
kana.as_bytes(),
b"\t", // seperator
surfaces.join("/").as_bytes(),
]
.concat()
.as_slice(),
);
}
keyset
}
pub fn load(file_name: &str) -> anyhow::Result<MarisaKanaKanjiDict> {
let mut marisa = Marisa::default();
marisa.load(file_name)?;
Ok(MarisaKanaKanjiDict { marisa })
}
pub fn cache_serialized(&self) -> String {
let mut p = String::new();
self.marisa
.predictive_search("__CACHE_SERIALIZED__\t".as_bytes(), |word, _| {
let idx = word.iter().position(|f| *f == b'\t').unwrap();
p = String::from_utf8_lossy(&word[idx + 1..word.len()]).to_string();
false
});
p
}
pub fn yomis(&self) -> Vec<String> {
let mut yomis: Vec<String> = Vec::new();
self.marisa.predictive_search("".as_bytes(), |word, _| {
if !word.starts_with("__CACHE_SERIALIZED__\t".as_bytes()) {
let idx = word.iter().position(|f| *f == b'\t').unwrap();
yomis.push(String::from_utf8_lossy(&word[0..idx]).to_string());
}
true
});
yomis
}
}
impl KanaKanjiDict for MarisaKanaKanjiDict {
fn get(&self, kana: &str) -> Option<Vec<String>> {
let mut surfaces: Vec<String> = Vec::new();
let query = [kana.as_bytes(), b"\t".as_slice()].concat();
self.marisa.predictive_search(query.as_slice(), |word, _| {
let idx = word.iter().position(|f| *f == b'\t').unwrap();
let s = String::from_utf8_lossy(&word[idx + 1..word.len()]).to_string();
for s in s.split('/').collect::<Vec<_>>() {
surfaces.push(s.to_string());
}
false
});
trace!("Got result: {:?}, {:?}", kana, surfaces);
Some(surfaces)
}
}
#[cfg(test)]
mod tests {
use tempfile::NamedTempFile;
use super::*;
#[test]
fn write_read() -> anyhow::Result<()> {
let tmpfile = NamedTempFile::new().unwrap();
let path = tmpfile.path().to_str().unwrap().to_string();
let dict = MarisaKanaKanjiDict::build_with_cache(
HashMap::from([("ใใชใ".to_string(), vec!["็ฐไธญ".to_string()])]),
path.as_str(),
"",
)?;
assert_eq!(dict.get("ใใชใ"), Some(vec!["็ฐไธญ".to_string()]));
Ok(())
}
}
| rust | MIT | b7dace72e7ce2054a9daf6ba0e6aca008bb8926f | 2026-01-04T19:35:02.740723Z | false |
akaza-im/akaza | https://github.com/akaza-im/akaza/blob/b7dace72e7ce2054a9daf6ba0e6aca008bb8926f/libakaza/src/kana_kanji/hashmap_vec.rs | libakaza/src/kana_kanji/hashmap_vec.rs | use std::collections::HashMap;
use crate::kana_kanji::base::KanaKanjiDict;
#[derive(Default)]
pub struct HashmapVecKanaKanjiDict {
map: HashMap<String, Vec<String>>,
}
impl HashmapVecKanaKanjiDict {
pub fn new(map: HashMap<String, Vec<String>>) -> Self {
HashmapVecKanaKanjiDict { map }
}
}
impl KanaKanjiDict for HashmapVecKanaKanjiDict {
fn get(&self, kana: &str) -> Option<Vec<String>> {
self.map.get(kana).cloned()
}
}
| rust | MIT | b7dace72e7ce2054a9daf6ba0e6aca008bb8926f | 2026-01-04T19:35:02.740723Z | false |
akaza-im/akaza | https://github.com/akaza-im/akaza/blob/b7dace72e7ce2054a9daf6ba0e6aca008bb8926f/libakaza/src/kana_kanji/mod.rs | libakaza/src/kana_kanji/mod.rs | pub mod base;
pub mod hashmap_vec;
pub mod marisa_kana_kanji_dict;
| rust | MIT | b7dace72e7ce2054a9daf6ba0e6aca008bb8926f | 2026-01-04T19:35:02.740723Z | false |
akaza-im/akaza | https://github.com/akaza-im/akaza/blob/b7dace72e7ce2054a9daf6ba0e6aca008bb8926f/libakaza/src/user_side_data/bigram_user_stats.rs | libakaza/src/user_side_data/bigram_user_stats.rs | use std::collections::HashMap;
use crate::cost::calc_cost;
use crate::graph::candidate::Candidate;
#[derive(Default)]
pub(crate) struct BiGramUserStats {
/// ใฆใใผใฏๅ่ชๆฐ
unique_words: u32,
// C
/// ็ทๅ่ชๅบ็พๆฐ
total_words: u32,
// V
/// ใใฎๅ่ชใฎๅบ็พ้ ปๅบฆใใๆผขๅญ/ๆผขๅญใใใญใผใ
pub(crate) word_count: HashMap<String, u32>,
}
impl BiGramUserStats {
pub(crate) fn new(
unique_words: u32,
total_words: u32,
word_count: HashMap<String, u32>,
) -> BiGramUserStats {
BiGramUserStats {
unique_words,
total_words,
word_count,
}
}
/**
* ใจใใธใณในใใ่จ็ฎใใใ
* ใทในใใ ่จ่ชใขใใซใฎใณในใใใใๅฎใใชใใใใซ่ชฟๆดใใฆใใใ
*/
pub(crate) fn get_cost(&self, key1: &str, key2: &str) -> Option<f32> {
let key = key1.to_owned() + "\t" + key2;
let Some(count) = self.word_count.get(key.as_str()) else {
return None;
};
Some(calc_cost(*count, self.unique_words, self.total_words))
}
pub(crate) fn record_entries(&mut self, candidates: &[Candidate]) {
if candidates.len() < 2 {
return;
}
// bigram
for i in 1..candidates.len() {
let Some(candidate1) = candidates.get(i - 1) else {
continue;
};
let Some(candidate2) = candidates.get(i) else {
continue;
};
let key = candidate1.key() + "\t" + candidate2.key().as_str();
if let Some(cnt) = self.word_count.get(&key) {
self.word_count.insert(key, cnt + 1);
} else {
self.word_count.insert(key, 1);
self.unique_words += 1;
}
self.total_words += 1;
}
}
}
| rust | MIT | b7dace72e7ce2054a9daf6ba0e6aca008bb8926f | 2026-01-04T19:35:02.740723Z | false |
akaza-im/akaza | https://github.com/akaza-im/akaza/blob/b7dace72e7ce2054a9daf6ba0e6aca008bb8926f/libakaza/src/user_side_data/user_data.rs | libakaza/src/user_side_data/user_data.rs | use std::collections::HashMap;
use std::path::Path;
use std::sync::{Arc, Mutex};
use std::time::SystemTime;
use anyhow::Result;
use encoding_rs::UTF_8;
use log::{info, warn};
use crate::dict::skk::read::read_skkdict;
use crate::dict::skk::write::write_skk_dict;
use crate::graph::candidate::Candidate;
use crate::graph::word_node::WordNode;
use crate::kana_trie::cedarwood_kana_trie::CedarwoodKanaTrie;
use crate::user_side_data::bigram_user_stats::BiGramUserStats;
use crate::user_side_data::unigram_user_stats::UniGramUserStats;
use crate::user_side_data::user_stats_utils::{read_user_stats_file, write_user_stats_file};
/**
* ใฆใผใถใผๅบๆใใผใฟ
*/
#[derive(Default)]
pub struct UserData {
/// ่ชญใฟไปฎๅใฎใใฉใคใๅ
ฅๅๅคๆๆใซๅ
ฑ้ๆฅ้ ญ่พๆค็ดขใใใใใซไฝฟ็จใ
// ใใใง MARISA ใงใฏใชใ Cedarwood ใๆก็จใใฆใใใฎใฏ
// - FFI ใใฆใใใจ std::marker::Send ใๅฎ่ฃ
ใงใใชใใฆในใฌใใใใพใใใ ๅฆ็ใๅฐ้ฃใซใชใใใ
// - ๆดๆฐๅฏ่ฝใชใใฉใคๆง้ ใ ใใ
pub(crate) kana_trie: Arc<Mutex<CedarwoodKanaTrie>>,
unigram_user_stats: UniGramUserStats,
bigram_user_stats: BiGramUserStats,
unigram_path: Option<String>,
bigram_path: Option<String>,
dict_path: Option<String>,
pub dict: HashMap<String, Vec<String>>,
pub(crate) need_save: bool,
}
impl UserData {
pub fn load_from_default_path() -> Result<Self> {
let basedir = xdg::BaseDirectories::with_prefix("akaza")?;
let unigram_path = basedir
.place_data_file(Path::new("unigram.v1.txt"))?
.to_str()
.unwrap()
.to_string();
let bigram_path = basedir
.place_data_file(Path::new("bigram.v1.txt"))?
.to_str()
.unwrap()
.to_string();
let dict_path = basedir
.place_data_file(Path::new("SKK-JISYO.user"))?
.to_str()
.unwrap()
.to_string();
info!(
"Load user data from default path: unigram={}, bigram={}",
unigram_path, bigram_path
);
Ok(UserData::load(&unigram_path, &bigram_path, &dict_path))
}
pub fn load(unigram_path: &String, bigram_path: &String, dict_path: &String) -> Self {
// ใฆใผใถใผใใผใฟใ่ชญใฟ่พผใใชใใใจใฏ fatal ใจใฉใผใงใฏใชใใ
// ๅๅ่ตทๅๆใซใฏใใผใฟใใชใใฎใงใ
// ใใผใฟใใชใใใฐๅๆๆ็ถๆ
ใใๅงใใ
let unigram_user_stats = match read_user_stats_file(unigram_path) {
Ok(dat) => {
let unique_count = dat.len() as u32;
let total_count: u32 = dat.iter().map(|f| f.1).sum();
let mut word_count: HashMap<String, u32> = HashMap::new();
for (word, count) in dat {
word_count.insert(word, count);
}
UniGramUserStats::new(unique_count, total_count, word_count)
}
Err(err) => {
warn!(
"Cannot load user unigram data from {}: {}",
unigram_path, err
);
UniGramUserStats::new(0, 0, HashMap::new())
}
};
// build bigram
let bigram_user_stats = match read_user_stats_file(bigram_path) {
Ok(dat) => {
let unique_count = dat.len() as u32;
let total_count: u32 = dat.iter().map(|f| f.1).sum();
let mut words_count: HashMap<String, u32> = HashMap::new();
for (words, count) in dat {
words_count.insert(words, count);
}
BiGramUserStats::new(unique_count, total_count, words_count)
}
Err(err) => {
warn!("Cannot load user bigram data from {}: {}", bigram_path, err);
// ใฆใผใถใผใใผใฟใฏๅๅ่ตทๅๆใชใฉใซใฏใชใใฎใงใใใผใฟใใชใใใฎใจใใฆๅฆ็ใ็ถ่กใใ
BiGramUserStats::new(0, 0, HashMap::new())
}
};
let dict = match read_skkdict(Path::new(dict_path), UTF_8) {
Ok(d) => d,
Err(err) => {
warn!("Cannot load user dict: {:?} {:?}", dict_path, err);
Default::default()
}
};
// let kana_trie = match CedarwoodKanaTrie::load(kana_trie_path) {
// Ok(trie) => trie,
// Err(err) => {
// warn!("Cannot load kana trie: {} {}", kana_trie_path, err);
// CedarwoodKanaTrie::default()
// }
// };
// cedarwood ใใฉใคใๆง็ฏใใใ
// ใญใฃใใทใฅใใใซๅ็ใซๆง็ฏใใๆนๅๆงใ
let t1 = SystemTime::now();
let mut yomis = unigram_user_stats
.word_count
.keys()
.filter_map(|it| it.split_once('/'))
.map(|(_, yomi)| yomi.to_string())
.collect::<Vec<_>>();
// ใฆใผใถใผ่พๆธใฎๅ
ๅฎนใ่ฟฝๅ
dict.keys().for_each(|yomi| yomis.push(yomi.to_string()));
let yomi_len = yomis.len();
let kana_trie = CedarwoodKanaTrie::build(yomis);
let t2 = SystemTime::now();
info!(
"Built kana trie in {}msec({} entries)",
t2.duration_since(t1).unwrap().as_millis(),
yomi_len
);
UserData {
unigram_user_stats,
bigram_user_stats,
dict,
kana_trie: Arc::new(Mutex::new(kana_trie)),
unigram_path: Some(unigram_path.clone()),
bigram_path: Some(bigram_path.clone()),
dict_path: Some(dict_path.clone()),
need_save: false,
}
}
/// ๅ
ฅๅ็ขบๅฎใใๆผขๅญใฎใชในใใใฆใผใถใผ็ตฑ่จใใผใฟใจใใฆ่จ้ฒใใใ
/// "Surface/Kana" ใฎใใฉใผใใใใงๆธกใใใจใ
pub fn record_entries(&mut self, candidates: &[Candidate]) {
self.unigram_user_stats.record_entries(candidates);
self.bigram_user_stats.record_entries(candidates);
// ่คๅ่ชใจใใฆ่ฆใใฆใใในใใใฎใใใใฐใๅญฆ็ฟใใใ
candidates
.iter()
.filter(|candidate| candidate.compound_word)
.for_each(|candidate| {
self.dict
.entry(candidate.yomi.to_string())
.or_default()
.push(candidate.surface.to_string())
});
// ใใชใใฉใคใๆดๆฐใใ
let mut kana_trie = self.kana_trie.lock().unwrap();
candidates
.iter()
.map(|it| it.yomi.to_string())
.for_each(|it| {
if !kana_trie.contains(it.as_str()) {
kana_trie.update(it.as_str())
}
});
self.need_save = true;
}
pub fn write_user_files(&mut self) -> Result<()> {
if self.need_save {
info!(
"Saving user stats file: unigram={:?},{}, bigram={:?},{}",
self.unigram_path,
self.unigram_user_stats.word_count.len(),
self.bigram_path,
self.bigram_user_stats.word_count.len(),
);
if let Some(unigram_path) = &self.unigram_path {
write_user_stats_file(unigram_path, &self.unigram_user_stats.word_count)?;
}
if let Some(bigram_path) = &self.bigram_path {
write_user_stats_file(bigram_path, &self.bigram_user_stats.word_count)?;
}
if let Some(dict_path) = &self.dict_path {
write_skk_dict(dict_path, vec![self.dict.clone()])?;
}
self.need_save = false;
}
Ok(())
}
pub fn get_unigram_cost(&self, node: &WordNode) -> Option<f32> {
self.unigram_user_stats.get_cost(node.key())
}
pub fn get_bigram_cost(&self, node1: &WordNode, node2: &WordNode) -> Option<f32> {
self.bigram_user_stats
.get_cost(node1.key().as_str(), node2.key().as_str())
}
}
#[cfg(test)]
mod tests {
use log::LevelFilter;
use super::*;
#[test]
fn test_record_entries() {
let _ = env_logger::builder()
.filter_level(LevelFilter::Trace)
.is_test(true)
.try_init();
let mut user_data = UserData::default();
let cost1 = user_data.get_unigram_cost(&WordNode::new(
0,
"ใขใฐใชใฒใผใทใงใณ",
"ใใใใใผใใใ",
None,
false,
));
assert_eq!(cost1, None);
user_data.record_entries(&[Candidate::new(
"ใใใใใผใใใ",
"ใขใฐใชใฒใผใทใงใณ",
0_f32,
)]);
let cost2 = user_data
.get_unigram_cost(&WordNode::new(
0,
"ใขใฐใชใฒใผใทใงใณ",
"ใใใใใผใใใ",
None,
false,
))
.unwrap();
user_data.record_entries(&[Candidate::new(
"ใใใใใผใใใ",
"ใขใฐใชใฒใผใทใงใณ",
0_f32,
)]);
let cost3 = user_data
.get_unigram_cost(&WordNode::new(
0,
"ใขใฐใชใฒใผใทใงใณ",
"ใใใใใผใใใ",
None,
false,
))
.unwrap();
info!("{}, {}", cost2, cost3);
assert!(cost2 > cost3);
}
}
| rust | MIT | b7dace72e7ce2054a9daf6ba0e6aca008bb8926f | 2026-01-04T19:35:02.740723Z | false |
akaza-im/akaza | https://github.com/akaza-im/akaza/blob/b7dace72e7ce2054a9daf6ba0e6aca008bb8926f/libakaza/src/user_side_data/user_stats_utils.rs | libakaza/src/user_side_data/user_stats_utils.rs | use anyhow::{Context, Result};
use std::collections::HashMap;
use std::fs;
use std::fs::{File, OpenOptions};
use std::io::{BufRead, BufReader, Write};
use std::os::unix::fs::OpenOptionsExt;
pub(crate) fn read_user_stats_file(path: &String) -> Result<Vec<(String, u32)>> {
let file = File::open(path)?;
let mut result: Vec<(String, u32)> = Vec::new();
for line in BufReader::new(file).lines() {
let line = line.context("Cannot read user language model file")?;
let Some((key, count)) = line.trim().split_once(' ') else {
continue;
};
let count = count
.to_string()
.parse::<u32>()
.with_context(|| format!("Invalid line in user language model: {count}"))?;
result.push((key.to_string(), count));
}
Ok(result)
}
pub(crate) fn write_user_stats_file(path: &str, word_count: &HashMap<String, u32>) -> Result<()> {
let mut tmpfile = OpenOptions::new()
.write(true)
.create(true)
.truncate(true)
.mode(0o600)
.open(path.to_string() + ".tmp")?;
for (key, cnt) in word_count {
tmpfile.write_all(key.as_bytes())?;
tmpfile.write_all(" ".as_bytes())?;
tmpfile.write_all(cnt.to_string().as_bytes())?;
tmpfile.write_all("\n".as_bytes())?;
}
fs::rename(path.to_owned() + ".tmp", path)?;
Ok(())
}
#[cfg(test)]
mod tests {
use super::*;
use std::io::Read;
use tempfile::NamedTempFile;
#[test]
fn test_write() {
let tmpfile = NamedTempFile::new().unwrap();
let path = tmpfile.path().to_str().unwrap().to_string();
write_user_stats_file(&path, &HashMap::from([("ๆธกใ".to_string(), 3_u32)])).unwrap();
let mut buf = String::new();
File::open(path).unwrap().read_to_string(&mut buf).unwrap();
assert_eq!(buf, "ๆธกใ 3\n");
}
}
| rust | MIT | b7dace72e7ce2054a9daf6ba0e6aca008bb8926f | 2026-01-04T19:35:02.740723Z | false |
akaza-im/akaza | https://github.com/akaza-im/akaza/blob/b7dace72e7ce2054a9daf6ba0e6aca008bb8926f/libakaza/src/user_side_data/mod.rs | libakaza/src/user_side_data/mod.rs | mod bigram_user_stats;
mod unigram_user_stats;
// ่ชฟๆดใใใฉใใใใฎใงใใฃใใใชใใ
pub mod user_data;
mod user_stats_utils;
| rust | MIT | b7dace72e7ce2054a9daf6ba0e6aca008bb8926f | 2026-01-04T19:35:02.740723Z | false |
akaza-im/akaza | https://github.com/akaza-im/akaza/blob/b7dace72e7ce2054a9daf6ba0e6aca008bb8926f/libakaza/src/user_side_data/unigram_user_stats.rs | libakaza/src/user_side_data/unigram_user_stats.rs | use std::collections::HashMap;
use crate::cost::calc_cost;
use crate::graph::candidate::Candidate;
#[derive(Default)]
pub(crate) struct UniGramUserStats {
/// ใฆใใผใฏๅ่ชๆฐ
unique_words: u32,
// C
/// ็ทๅ่ชๅบ็พๆฐ
total_words: u32,
// V
/// ใใฎๅ่ชใฎๅบ็พ้ ปๅบฆใใๆผขๅญ/ใใชใใใญใผใ
pub(crate) word_count: HashMap<String, u32>,
}
impl UniGramUserStats {
pub(crate) fn new(
unique_words: u32,
total_words: u32,
word_count: HashMap<String, u32>,
) -> UniGramUserStats {
UniGramUserStats {
unique_words,
total_words,
word_count,
}
}
/**
* ใใผใใณในใใ่จ็ฎใใใ
*/
pub(crate) fn get_cost(&self, key: String) -> Option<f32> {
let Some(count) = self.word_count.get(key.as_str()) else {
return None;
};
Some(calc_cost(*count, self.unique_words, self.total_words))
}
pub(crate) fn record_entries(&mut self, candidates: &[Candidate]) {
for candidate in candidates {
let key = candidate.key();
if let Some(i) = self.word_count.get(&key) {
self.word_count.insert(key, i + 1);
} else {
self.word_count.insert(key, 1);
self.unique_words += 1;
}
self.total_words += 1;
}
}
}
| rust | MIT | b7dace72e7ce2054a9daf6ba0e6aca008bb8926f | 2026-01-04T19:35:02.740723Z | false |
akaza-im/akaza | https://github.com/akaza-im/akaza/blob/b7dace72e7ce2054a9daf6ba0e6aca008bb8926f/libakaza/src/lm/system_bigram.rs | libakaza/src/lm/system_bigram.rs | use std::collections::HashMap;
use anyhow::{bail, Result};
use half::f16;
use log::info;
use marisa_sys::{Keyset, Marisa};
use crate::lm::base::SystemBigramLM;
use crate::search_result::SearchResult;
/*
{word1 ID} # 3 bytes
{word2 ID} # 3 bytes
packed float # score: 4 bytes
*/
const DEFAULT_COST_KEY: &str = "__DEFAULT_EDGE_COST__";
/**
* bigram ่จ่ชใขใใซใ
* unigram ใฎ็ๆใฎใจใใซๅพใใใๅ่ชIDใๅฉ็จใใใใจใงใๅง็ธฎใใฆใใใ
*/
#[derive(Default)]
pub struct MarisaSystemBigramLMBuilder {
keyset: Keyset,
}
impl MarisaSystemBigramLMBuilder {
pub fn add(&mut self, word_id1: i32, word_id2: i32, score: f32) {
// edge cost ่จ่ชใขใใซใใกใคใซใฎๅฎน้ใๅฐใใไฟใคใใใซ
// 3 byte ใซ ID ใๅใใใใใซใใใ
// ๆๅคงใงใ 8,388,608 ๅ่ชใพใงใซใชใใใใซ vocab ใๅถ้ใใใใจใ
// ็พๅฎ็ใช็ทใงๅใฃใฆใใ500ไธๅ่ชใใใใงๅๅใ ใจๆใใใใ
// -rw-r--r-- 1 tokuhirom tokuhirom 28M Dec 31 23:56 bigram.model
// โ 1MB ็ฏ็ดใงใใใ
// -rw-r--r-- 1 tokuhirom tokuhirom 27M Jan 1 02:05 bigram.model
// 4+4+4=12ใใคใๅฟ
่ฆใ ใฃใใจใใใใ3+3+4=10ใใคใใซใชใฃใฆใ10/12=5/6 ใชใฎใงใ
// ๆฌๆฅใชใ 23.3 MB ใใใใพใงๆธใฃใฆใปใใใจใใใ ใใฉใใใใพใงใฏใใใชใใ
// TRIE ๆง้ ใ ใใใใใใๆใใซใฏๆธใใชใใ
// ใใใซใในใณใขใ f16 ใซใใฆใฟใใใใใพใใใใใชใใ
// -rw-r--r-- 1 tokuhirom tokuhirom 27M Jan 1 02:14 bigram.model
let id1_bytes = word_id1.to_le_bytes();
let id2_bytes = word_id2.to_le_bytes();
assert_eq!(id1_bytes[3], 0);
assert_eq!(id2_bytes[3], 0);
let mut key: Vec<u8> = Vec::new();
key.extend(id1_bytes[0..3].iter());
key.extend(id2_bytes[0..3].iter());
key.extend(f16::from_f32(score).to_le_bytes());
self.keyset.push_back(key.as_slice());
}
pub fn set_default_edge_cost(&mut self, score: f32) -> &mut Self {
let key = format!("{DEFAULT_COST_KEY}\t{score}");
let key1 = key.as_bytes().to_vec();
self.keyset.push_back(key1.as_slice());
self
}
pub fn build(&self) -> Result<MarisaSystemBigramLM> {
let mut marisa = Marisa::default();
marisa.build(&self.keyset);
let default_edge_cost = MarisaSystemBigramLM::read_default_edge_cost(&marisa)?;
Ok(MarisaSystemBigramLM {
marisa,
default_edge_cost,
})
}
pub fn save(&self, ofname: &str) -> Result<()> {
let mut marisa = Marisa::default();
marisa.build(&self.keyset);
marisa.save(ofname)?;
Ok(())
}
}
pub struct MarisaSystemBigramLM {
marisa: Marisa,
default_edge_cost: f32,
}
impl MarisaSystemBigramLM {
pub fn load(filename: &str) -> Result<MarisaSystemBigramLM> {
info!("Loading system-bigram: {}", filename);
let mut marisa = Marisa::default();
marisa.load(filename)?;
let default_edge_cost = Self::read_default_edge_cost(&marisa);
Ok(MarisaSystemBigramLM {
marisa,
default_edge_cost: default_edge_cost?,
})
}
pub fn num_keys(&self) -> usize {
self.marisa.num_keys()
}
fn read_default_edge_cost(marisa: &Marisa) -> Result<f32> {
let mut keys: Vec<Vec<u8>> = Vec::new();
marisa.predictive_search(DEFAULT_COST_KEY.as_bytes(), |key, _| {
keys.push(key.to_vec());
false
});
let Some(key) = keys.get(0) else {
bail!("Cannot read default cost from bigram-trie");
};
let key = String::from_utf8_lossy(key);
if let Some((_, score)) = key.split_once('\t') {
Ok(score.parse::<f32>()?)
} else {
bail!("Cannot parse default edge cost from trie");
}
}
}
impl SystemBigramLM for MarisaSystemBigramLM {
fn get_default_edge_cost(&self) -> f32 {
self.default_edge_cost
}
/**
* edge cost ใๅพใใ
* ใใฎ ID ใฏใunigram ใฎ trie ใงใตใใใใใฎใ
*/
fn get_edge_cost(&self, word_id1: i32, word_id2: i32) -> Option<f32> {
let mut key: Vec<u8> = Vec::new();
key.extend(word_id1.to_le_bytes()[0..3].iter());
key.extend(word_id2.to_le_bytes()[0..3].iter());
let mut got: Vec<SearchResult> = Vec::new();
self.marisa.predictive_search(key.as_slice(), |key, id| {
got.push(SearchResult {
keyword: key.to_vec(),
id,
});
true
});
let Some(result) = got.first() else {
return None;
};
let last2: [u8; 2] = result.keyword[result.keyword.len() - 2..result.keyword.len()]
.try_into()
.unwrap();
let score: f16 = f16::from_le_bytes(last2);
Some(score.to_f32())
}
fn as_hash_map(&self) -> HashMap<(i32, i32), f32> {
let mut map: HashMap<(i32, i32), f32> = HashMap::new();
self.marisa.predictive_search("".as_bytes(), |word, _id| {
if word.len() == 8 {
let word_id1 = i32::from_le_bytes([word[0], word[1], word[2], 0]);
let word_id2 = i32::from_le_bytes([word[3], word[4], word[5], 0]);
let cost = f16::from_le_bytes([word[6], word[7]]).to_f32();
map.insert((word_id1, word_id2), cost);
}
true
});
map
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn build_and_load() -> anyhow::Result<()> {
let mut builder = MarisaSystemBigramLMBuilder::default();
builder.set_default_edge_cost(20_f32);
builder.add(4649, 5963, 5.11_f32);
let system_bigram_lm = builder.build()?;
let got_score = system_bigram_lm.get_edge_cost(4649, 5963).unwrap();
assert!(5.0 < got_score && got_score < 5.12);
let map = system_bigram_lm.as_hash_map();
assert!(map.contains_key(&(4649, 5963)));
let g = *map.get(&(4649, 5963)).unwrap();
assert!(5.10_f32 < g && g < 5.12_f32);
Ok(())
}
}
| rust | MIT | b7dace72e7ce2054a9daf6ba0e6aca008bb8926f | 2026-01-04T19:35:02.740723Z | false |
akaza-im/akaza | https://github.com/akaza-im/akaza/blob/b7dace72e7ce2054a9daf6ba0e6aca008bb8926f/libakaza/src/lm/base.rs | libakaza/src/lm/base.rs | use std::collections::HashMap;
pub trait SystemBigramLM {
fn get_default_edge_cost(&self) -> f32;
fn get_edge_cost(&self, word_id1: i32, word_id2: i32) -> Option<f32>;
fn as_hash_map(&self) -> HashMap<(i32, i32), f32>;
}
pub trait SystemUnigramLM {
fn get_cost(&self, wordcnt: u32) -> f32;
fn find(&self, word: &str) -> Option<(i32, f32)>;
fn as_hash_map(&self) -> HashMap<String, (i32, f32)>;
}
| rust | MIT | b7dace72e7ce2054a9daf6ba0e6aca008bb8926f | 2026-01-04T19:35:02.740723Z | false |
akaza-im/akaza | https://github.com/akaza-im/akaza/blob/b7dace72e7ce2054a9daf6ba0e6aca008bb8926f/libakaza/src/lm/system_unigram_lm.rs | libakaza/src/lm/system_unigram_lm.rs | use std::collections::HashMap;
use anyhow::{bail, Result};
use log::info;
use marisa_sys::{Keyset, Marisa};
use crate::cost::calc_cost;
use crate::lm::base::SystemUnigramLM;
/*
{word} # in utf-8
0xff # marker
packed ID # 3 bytes(24bit). ๆๅคง่ชๅฝ: 8,388,608(2**24/2)
packed float # score: 4 bytes
*/
const UNIQUE_WORDS_KEY: &str = "__UNIQUE_WORDS__";
const TOTAL_WORDS_KEY: &str = "__TOTAL_WORDS__";
/**
* unigram ่จ่ชใขใใซใ
* ใๆผขๅญ/ใใชใใซๅฏพใใฆใ็บ็็ขบ็ในใณใขใไฟๆใใฆใใใ
*/
#[derive(Default)]
pub struct MarisaSystemUnigramLMBuilder {
data: Vec<(String, f32)>,
}
impl MarisaSystemUnigramLMBuilder {
pub fn add(&mut self, word: &str, score: f32) {
self.data.push((word.to_string(), score));
}
pub fn keyset(&self) -> Keyset {
let mut keyset = Keyset::default();
for (kanji, score) in &self.data {
// ๅบๅใๆๅญใใใใชใใฆใใๆซๅฐพใฎ4ใใคใใๅใๅบใใฐๅๅใชๆฐใใใชใใงใใชใใใ
// ๅ
้ ญไธ่ดใซใใฆใ+4ใใคใใซใชใใใฎใๆขใใฐใใใฏใใ
// ๆ้ฉๅใฎไฝๅฐใ ใใฉใ็พๅฎ็ใซใฏ็ฉบ้ๅน็ใใใ้ๅบฆใฎใปใใ้่ฆใใใใใชใใ
let key = [
kanji.as_bytes(),
b"\xff",
score.to_le_bytes().as_slice(), // ใใคใใชใซใใฆใใผใฟๅฎน้ใ็ฏ็ดใใ
]
.concat();
keyset.push_back(key.as_slice());
}
keyset
}
pub fn set_total_words(&mut self, total_words: u32) -> &mut Self {
self.add(TOTAL_WORDS_KEY, total_words as f32);
self
}
pub fn set_unique_words(&mut self, unique_words: u32) -> &mut Self {
self.add(UNIQUE_WORDS_KEY, unique_words as f32);
self
}
pub fn save(&self, fname: &str) -> Result<()> {
let mut marisa = Marisa::default();
marisa.build(&self.keyset());
marisa.save(fname)?;
Ok(())
}
pub fn build(&self) -> MarisaSystemUnigramLM {
let mut marisa = Marisa::default();
marisa.build(&self.keyset());
let (_, total_words) =
MarisaSystemUnigramLM::find_from_trie(&marisa, TOTAL_WORDS_KEY).unwrap();
let (_, unique_words) =
MarisaSystemUnigramLM::find_from_trie(&marisa, UNIQUE_WORDS_KEY).unwrap();
MarisaSystemUnigramLM {
marisa,
total_words: total_words as u32,
unique_words: unique_words as u32,
}
}
}
pub struct MarisaSystemUnigramLM {
marisa: Marisa,
total_words: u32,
unique_words: u32,
}
impl MarisaSystemUnigramLM {
pub fn num_keys(&self) -> usize {
self.marisa.num_keys()
}
pub fn load(fname: &str) -> Result<MarisaSystemUnigramLM> {
info!("Reading {}", fname);
let mut marisa = Marisa::default();
marisa.load(fname)?;
let Some((_, total_words)) = Self::find_from_trie(&marisa, TOTAL_WORDS_KEY) else {
bail!("Missing key for {}", TOTAL_WORDS_KEY);
};
let Some((_, unique_words)) = Self::find_from_trie(&marisa, UNIQUE_WORDS_KEY) else {
bail!("Missing key for {}", UNIQUE_WORDS_KEY);
};
Ok(MarisaSystemUnigramLM {
marisa,
total_words: total_words as u32,
unique_words: unique_words as u32,
})
}
fn find_from_trie(marisa: &Marisa, word: &str) -> Option<(i32, f32)> {
assert_ne!(word.len(), 0);
let key = [word.as_bytes(), b"\xff"].concat();
let mut kanji_id: usize = usize::MAX;
let mut score = f32::MAX;
marisa.predictive_search(key.as_slice(), |word, id| {
kanji_id = id;
let idx = word.iter().position(|f| *f == b'\xff').unwrap();
let bytes: [u8; 4] = word[idx + 1..idx + 1 + 4].try_into().unwrap();
score = f32::from_le_bytes(bytes);
false
});
if kanji_id != usize::MAX {
Some((kanji_id as i32, score))
} else {
None
}
}
}
impl SystemUnigramLM for MarisaSystemUnigramLM {
fn get_cost(&self, wordcnt: u32) -> f32 {
calc_cost(wordcnt, self.total_words, self.unique_words)
}
/// @return (word_id, score)ใ
fn find(&self, word: &str) -> Option<(i32, f32)> {
Self::find_from_trie(&self.marisa, word)
}
fn as_hash_map(&self) -> HashMap<String, (i32, f32)> {
let mut map = HashMap::new();
self.marisa.predictive_search("".as_bytes(), |word, id| {
let idx = word.iter().position(|f| *f == b'\xff').unwrap();
let bytes: [u8; 4] = word[idx + 1..idx + 1 + 4].try_into().unwrap();
let word = String::from_utf8_lossy(&word[0..idx]);
let cost = f32::from_le_bytes(bytes);
map.insert(word.to_string(), (id as i32, cost));
true
});
map
}
}
#[cfg(test)]
mod tests {
use tempfile::NamedTempFile;
use super::*;
#[test]
fn test() {
let named_tmpfile = NamedTempFile::new().unwrap();
let tmpfile = named_tmpfile.path().to_str().unwrap().to_string();
let mut builder = MarisaSystemUnigramLMBuilder::default();
builder.add("hello", 0.4);
builder.add("world", 0.2);
builder
.set_total_words(2)
.set_unique_words(2)
.save(&tmpfile)
.unwrap();
let lm = MarisaSystemUnigramLM::load(&tmpfile).unwrap();
{
let (word_id, score) = lm.find("hello").unwrap();
assert_eq!(word_id, 0);
assert_eq!(score, 0.4_f32);
}
{
let p = lm.find("unknown");
assert_eq!(p, None);
}
}
}
| rust | MIT | b7dace72e7ce2054a9daf6ba0e6aca008bb8926f | 2026-01-04T19:35:02.740723Z | false |
akaza-im/akaza | https://github.com/akaza-im/akaza/blob/b7dace72e7ce2054a9daf6ba0e6aca008bb8926f/libakaza/src/lm/mod.rs | libakaza/src/lm/mod.rs | pub mod base;
pub mod on_memory;
pub mod system_bigram;
pub mod system_unigram_lm;
| rust | MIT | b7dace72e7ce2054a9daf6ba0e6aca008bb8926f | 2026-01-04T19:35:02.740723Z | false |
akaza-im/akaza | https://github.com/akaza-im/akaza/blob/b7dace72e7ce2054a9daf6ba0e6aca008bb8926f/libakaza/src/lm/on_memory/mod.rs | libakaza/src/lm/on_memory/mod.rs | pub mod on_memory_system_bigram_lm;
pub mod on_memory_system_unigram_lm;
| rust | MIT | b7dace72e7ce2054a9daf6ba0e6aca008bb8926f | 2026-01-04T19:35:02.740723Z | false |
akaza-im/akaza | https://github.com/akaza-im/akaza/blob/b7dace72e7ce2054a9daf6ba0e6aca008bb8926f/libakaza/src/lm/on_memory/on_memory_system_bigram_lm.rs | libakaza/src/lm/on_memory/on_memory_system_bigram_lm.rs | use std::cell::RefCell;
use std::collections::HashMap;
use std::rc::Rc;
use crate::cost::calc_cost;
use crate::lm::base::SystemBigramLM;
pub struct OnMemorySystemBigramLM {
// (word_id, word_id) -> cost
map: Rc<RefCell<HashMap<(i32, i32), u32>>>,
default_edge_cost: f32,
pub total_words: u32,
pub unique_words: u32,
}
impl OnMemorySystemBigramLM {
pub fn new(
map: Rc<RefCell<HashMap<(i32, i32), u32>>>,
default_edge_cost: f32,
c: u32,
v: u32,
) -> Self {
OnMemorySystemBigramLM {
map,
default_edge_cost,
total_words: c,
unique_words: v,
}
}
pub fn update(&self, word_id1: i32, word_id2: i32, cnt: u32) {
self.map.borrow_mut().insert((word_id1, word_id2), cnt);
}
pub fn get_edge_cnt(&self, word_id1: i32, word_id2: i32) -> Option<u32> {
self.map.borrow().get(&(word_id1, word_id2)).copied()
}
}
impl SystemBigramLM for OnMemorySystemBigramLM {
#[inline]
fn get_default_edge_cost(&self) -> f32 {
self.default_edge_cost
}
fn get_edge_cost(&self, word_id1: i32, word_id2: i32) -> Option<f32> {
self.map
.borrow()
.get(&(word_id1, word_id2))
.map(|f| calc_cost(*f, self.total_words, self.unique_words))
}
fn as_hash_map(&self) -> HashMap<(i32, i32), f32> {
self.map
.borrow()
.iter()
.map(|((id1, id2), cnt)| {
(
(*id1, *id2),
calc_cost(*cnt, self.total_words, self.unique_words),
)
})
.collect()
}
}
| rust | MIT | b7dace72e7ce2054a9daf6ba0e6aca008bb8926f | 2026-01-04T19:35:02.740723Z | false |
akaza-im/akaza | https://github.com/akaza-im/akaza/blob/b7dace72e7ce2054a9daf6ba0e6aca008bb8926f/libakaza/src/lm/on_memory/on_memory_system_unigram_lm.rs | libakaza/src/lm/on_memory/on_memory_system_unigram_lm.rs | use crate::cost::calc_cost;
use std::cell::RefCell;
use std::collections::HashMap;
use std::rc::Rc;
use crate::lm::base::SystemUnigramLM;
pub struct OnMemorySystemUnigramLM {
// word -> (word_id, cost)
map: Rc<RefCell<HashMap<String, (i32, u32)>>>,
pub total_words: u32,
pub unique_words: u32,
}
impl OnMemorySystemUnigramLM {
pub fn new(
map: Rc<RefCell<HashMap<String, (i32, u32)>>>,
total_words: u32,
unique_words: u32,
) -> Self {
OnMemorySystemUnigramLM {
map,
total_words,
unique_words,
}
}
pub fn update(&self, word: &str, cnt: u32) {
let Some((word_id, _)) = self.find(word) else {
// ็ป้ฒใใใฆใชใๅ่ชใฏ็ก่ฆใ
return;
};
self.map
.borrow_mut()
.insert(word.to_string(), (word_id, cnt));
}
pub fn reverse_lookup(&self, word_id: i32) -> Option<String> {
self.map
.borrow()
.iter()
.filter(|(_, (id, _))| *id == word_id)
.map(|(key, (_, _))| key.clone())
.next()
}
pub fn find_cnt(&self, word: &str) -> Option<(i32, u32)> {
self.map.borrow().get(word).copied()
}
}
impl SystemUnigramLM for OnMemorySystemUnigramLM {
fn get_cost(&self, wordcnt: u32) -> f32 {
calc_cost(wordcnt, self.total_words, self.unique_words)
}
fn find(&self, word: &str) -> Option<(i32, f32)> {
self.map
.borrow()
.get(word)
.map(|(id, cnt)| (*id, calc_cost(*cnt, self.total_words, self.unique_words)))
}
fn as_hash_map(&self) -> HashMap<String, (i32, f32)> {
self.map
.borrow()
.iter()
.map(|(key, (id, cnt))| {
(
key.to_string(),
(*id, calc_cost(*cnt, self.total_words, self.unique_words)),
)
})
.collect()
}
}
| rust | MIT | b7dace72e7ce2054a9daf6ba0e6aca008bb8926f | 2026-01-04T19:35:02.740723Z | false |
akaza-im/akaza | https://github.com/akaza-im/akaza/blob/b7dace72e7ce2054a9daf6ba0e6aca008bb8926f/libakaza/src/engine/base.rs | libakaza/src/engine/base.rs | use std::ops::Range;
use crate::graph::candidate::Candidate;
pub trait HenkanEngine {
fn learn(&mut self, candidates: &[Candidate]);
fn convert(
&self,
yomi: &str,
force_ranges: Option<&[Range<usize>]>,
) -> anyhow::Result<Vec<Vec<Candidate>>>;
}
| rust | MIT | b7dace72e7ce2054a9daf6ba0e6aca008bb8926f | 2026-01-04T19:35:02.740723Z | false |
akaza-im/akaza | https://github.com/akaza-im/akaza/blob/b7dace72e7ce2054a9daf6ba0e6aca008bb8926f/libakaza/src/engine/mod.rs | libakaza/src/engine/mod.rs | pub mod base;
pub mod bigram_word_viterbi_engine;
| rust | MIT | b7dace72e7ce2054a9daf6ba0e6aca008bb8926f | 2026-01-04T19:35:02.740723Z | false |
akaza-im/akaza | https://github.com/akaza-im/akaza/blob/b7dace72e7ce2054a9daf6ba0e6aca008bb8926f/libakaza/src/engine/bigram_word_viterbi_engine.rs | libakaza/src/engine/bigram_word_viterbi_engine.rs | use std::fmt::{Debug, Formatter};
use std::ops::Range;
use std::rc::Rc;
use std::sync::{Arc, Mutex};
use anyhow::Result;
use crate::config::{DictConfig, DictEncoding, DictType, DictUsage, EngineConfig};
use crate::dict::loader::{load_dicts, load_dicts_with_cache};
use crate::engine::base::HenkanEngine;
use crate::graph::candidate::Candidate;
use crate::graph::graph_builder::GraphBuilder;
use crate::graph::graph_resolver::GraphResolver;
use crate::graph::lattice_graph::LatticeGraph;
use crate::graph::segmenter::Segmenter;
use crate::kana_kanji::base::KanaKanjiDict;
use crate::kana_kanji::marisa_kana_kanji_dict::MarisaKanaKanjiDict;
use crate::kana_trie::cedarwood_kana_trie::CedarwoodKanaTrie;
use crate::lm::base::{SystemBigramLM, SystemUnigramLM};
use crate::lm::system_bigram::MarisaSystemBigramLM;
use crate::lm::system_unigram_lm::MarisaSystemUnigramLM;
use crate::user_side_data::user_data::UserData;
/// ใใคใฐใฉใ ใฎใใฟใใใผในใใชๆผขๅญๅคๆใจใณใธใณใงใใ
/// ๅ่ชใใคใฐใฉใ ใๆก็จใใฆใใพใใ
pub struct BigramWordViterbiEngine<U: SystemUnigramLM, B: SystemBigramLM, KD: KanaKanjiDict> {
graph_builder: GraphBuilder<U, B, KD>,
pub segmenter: Segmenter,
pub graph_resolver: GraphResolver,
pub user_data: Arc<Mutex<UserData>>,
}
impl<U: SystemUnigramLM, B: SystemBigramLM, KD: KanaKanjiDict> Debug
for BigramWordViterbiEngine<U, B, KD>
{
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
f.write_fmt(format_args!("BigramWordViterbiEngine"))
}
}
impl<U: SystemUnigramLM, B: SystemBigramLM, KD: KanaKanjiDict> HenkanEngine
for BigramWordViterbiEngine<U, B, KD>
{
fn learn(&mut self, candidates: &[Candidate]) {
self.user_data.lock().unwrap().record_entries(candidates);
}
fn convert(
&self,
yomi: &str,
force_ranges: Option<&[Range<usize>]>,
) -> Result<Vec<Vec<Candidate>>> {
let lattice = self.to_lattice(yomi, force_ranges)?;
self.resolve(&lattice)
}
}
impl<U: SystemUnigramLM, B: SystemBigramLM, KD: KanaKanjiDict> BigramWordViterbiEngine<U, B, KD> {
pub fn resolve(&self, lattice: &LatticeGraph<U, B>) -> Result<Vec<Vec<Candidate>>> {
self.graph_resolver.resolve(lattice)
}
pub fn to_lattice(
&self,
yomi: &str,
force_ranges: Option<&[Range<usize>]>,
) -> Result<LatticeGraph<U, B>> {
let segmentation_result = &self.segmenter.build(yomi, force_ranges);
let lattice = self.graph_builder.construct(yomi, segmentation_result);
Ok(lattice)
}
}
pub struct BigramWordViterbiEngineBuilder {
user_data: Option<Arc<Mutex<UserData>>>,
config: EngineConfig,
}
impl BigramWordViterbiEngineBuilder {
pub fn new(config: EngineConfig) -> BigramWordViterbiEngineBuilder {
BigramWordViterbiEngineBuilder {
user_data: None,
config,
}
}
pub fn user_data(&mut self, user_data: Arc<Mutex<UserData>>) -> &mut Self {
self.user_data = Some(user_data);
self
}
pub fn build(
&self,
) -> Result<
BigramWordViterbiEngine<MarisaSystemUnigramLM, MarisaSystemBigramLM, MarisaKanaKanjiDict>,
> {
let model_name = self.config.model.clone();
let system_unigram_lm =
MarisaSystemUnigramLM::load(Self::try_load(&model_name, "unigram.model")?.as_str())?;
let system_bigram_lm =
MarisaSystemBigramLM::load(Self::try_load(&model_name, "bigram.model")?.as_str())?;
let system_dict = Self::try_load(&model_name, "SKK-JISYO.akaza")?;
let user_data = if let Some(d) = &self.user_data {
d.clone()
} else {
Arc::new(Mutex::new(UserData::default()))
};
let dict = {
let mut dicts = self
.config
.dicts
.iter()
.filter(|it| it.usage == DictUsage::Normal)
.cloned()
.collect::<Vec<_>>();
dicts.push(DictConfig {
path: system_dict,
dict_type: DictType::SKK,
encoding: DictEncoding::Utf8,
usage: DictUsage::Normal,
});
if self.config.dict_cache {
load_dicts_with_cache(&dicts, "kana_kanji_cache.marisa")?
} else {
let dict = load_dicts(&dicts)?;
MarisaKanaKanjiDict::build(dict)?
}
};
let single_term = {
let dicts = self
.config
.dicts
.iter()
.filter(|it| it.usage == DictUsage::SingleTerm)
.cloned()
.collect::<Vec<_>>();
if self.config.dict_cache {
load_dicts_with_cache(&dicts, "single_term_cache.marisa")?
} else {
let dict = load_dicts(&dicts)?;
MarisaKanaKanjiDict::build(dict)?
}
};
// ่พๆธใๅ
ใซใใใฉใคใไฝๆใใฆใใใ
let mut kana_trie = CedarwoodKanaTrie::default();
for yomi in dict.yomis() {
assert!(!yomi.is_empty());
kana_trie.update(yomi.as_str());
}
for yomi in single_term.yomis() {
assert!(!yomi.is_empty());
kana_trie.update(yomi.as_str());
}
let segmenter = Segmenter::new(vec![
Arc::new(Mutex::new(kana_trie)),
user_data.lock().unwrap().kana_trie.clone(),
]);
let graph_builder: GraphBuilder<
MarisaSystemUnigramLM,
MarisaSystemBigramLM,
MarisaKanaKanjiDict,
> = GraphBuilder::new(
dict,
single_term,
user_data.clone(),
Rc::new(system_unigram_lm),
Rc::new(system_bigram_lm),
);
let graph_resolver = GraphResolver::default();
Ok(BigramWordViterbiEngine {
graph_builder,
segmenter,
graph_resolver,
user_data,
})
}
fn try_load(model_dir: &str, name: &str) -> Result<String> {
Ok(model_dir.to_string() + "/" + name)
}
}
| rust | MIT | b7dace72e7ce2054a9daf6ba0e6aca008bb8926f | 2026-01-04T19:35:02.740723Z | false |
akaza-im/akaza | https://github.com/akaza-im/akaza/blob/b7dace72e7ce2054a9daf6ba0e6aca008bb8926f/akaza-dict/src/lib.rs | akaza-dict/src/lib.rs | pub mod conf;
| rust | MIT | b7dace72e7ce2054a9daf6ba0e6aca008bb8926f | 2026-01-04T19:35:02.740723Z | false |
akaza-im/akaza | https://github.com/akaza-im/akaza/blob/b7dace72e7ce2054a9daf6ba0e6aca008bb8926f/akaza-dict/src/conf.rs | akaza-dict/src/conf.rs | use std::collections::HashMap;
use std::fs;
use std::path::Path;
use std::sync::{Arc, Mutex};
use anyhow::Result;
use encoding_rs::UTF_8;
use gtk::glib::signal::Inhibit;
use gtk::prelude::*;
use gtk::{Application, ApplicationWindow, Button, ListStore};
use gtk4 as gtk;
use gtk4::builders::MessageDialogBuilder;
use gtk4::gio::ApplicationFlags;
use gtk4::glib::Type;
use gtk4::{CellRendererText, Grid, MessageType, TreeView, TreeViewColumn};
use log::{info, trace};
use libakaza::config::Config;
use libakaza::dict::skk::read::read_skkdict;
use libakaza::dict::skk::write::write_skk_dict;
pub fn open_userdict_window(user_dict_path: &str) -> Result<()> {
let config = Arc::new(Mutex::new(Config::load()?));
let app = Application::new(Some("com.github.akaza.config"), ApplicationFlags::empty());
let user_dict_path = user_dict_path.to_string();
app.connect_activate(move |app| {
connect_activate(app, config.clone(), &user_dict_path).unwrap();
});
let v: Vec<String> = Vec::new();
app.run_with_args(v.as_slice());
Ok(())
}
fn connect_activate(
app: &Application,
_config: Arc<Mutex<Config>>,
user_dict_path: &str,
) -> Result<()> {
let window = ApplicationWindow::builder()
.application(app)
.default_width(520)
.default_height(500)
.title("Akaza ใฎ่จญๅฎ")
.build();
let grid = Grid::builder().build();
info!("Loading skk dict from {user_dict_path}");
let dict = read_skkdict(Path::new(user_dict_path), UTF_8)?;
let dict = dict
.iter()
.flat_map(|(yomi, surfaces)| {
surfaces
.iter()
.map(|surface| (yomi.to_string(), surface.to_string()))
.collect::<Vec<_>>()
})
.collect::<Vec<_>>();
let list_store = ListStore::new(&[Type::STRING, Type::STRING]);
for (yomi, surface) in dict {
list_store.set(&list_store.append(), &[(0, &yomi), (1, &surface)]);
}
// list_store.set(&list_store.append(), &[(0, &"world".to_string())]);
let tree_view = TreeView::builder().model(&list_store).build();
{
let tree_view_column = build_tree_view_column("่ชญใฟ", 0, list_store.clone());
tree_view.append_column(&tree_view_column);
}
{
let tree_view_column = build_tree_view_column("่กจ่จ", 1, list_store.clone());
tree_view.append_column(&tree_view_column);
}
// https://gitlab.gnome.org/GNOME/gtk/-/issues/3561
grid.attach(&tree_view, 0, 0, 6, 1);
// TODO ใใฎใธใใฏ Menu ใซใใใใgtk4-rs ใง menu ใไฝฟใๆนๆณใๅใใใใ
let add_button = Button::with_label("่ฟฝๅ ");
{
let list_store = list_store.clone();
add_button.connect_clicked(move |_| {
info!("Add new row...");
list_store.set(&list_store.append(), &[(0, &""), (1, &"")]);
});
}
grid.attach(&add_button, 4, 1, 1, 1);
{
let delete_btn = Button::with_label("ๅ้ค");
let list_store = list_store.clone();
let tree_view = tree_view;
delete_btn.connect_clicked(move |_| {
let selection = tree_view.selection();
let Some((_, tree_iter)) = selection.selected() else {
return;
};
list_store.remove(&tree_iter);
});
grid.attach(&delete_btn, 5, 1, 1, 1);
}
{
let save_btn = Button::with_label("ไฟๅญ");
let user_dict_path = user_dict_path.to_string();
save_btn.connect_clicked(move |_| {
let Some(iter) = list_store.iter_first() else {
return;
};
let mut dict: HashMap<String, Vec<String>> = HashMap::new();
loop {
let yomi: String = list_store.get(&iter, 0);
let surface: String = list_store.get(&iter, 1);
info!("Got: {}, {}", yomi, surface);
dict.entry(yomi).or_insert_with(Vec::new).push(surface);
if !list_store.iter_next(&iter) {
break;
}
}
if let Err(err) = write_skk_dict(&(user_dict_path.to_string() + ".tmp"), vec![dict]) {
let dialog = MessageDialogBuilder::new()
.message_type(MessageType::Error)
.text(&format!("Error: {err}"))
.build();
dialog.show();
}
info!("Renaming file");
if let Err(err) = fs::rename(user_dict_path.to_string() + ".tmp", &user_dict_path) {
let dialog = MessageDialogBuilder::new()
.message_type(MessageType::Error)
.text(&format!("Error: {err}"))
.build();
dialog.show();
}
});
grid.attach(&save_btn, 6, 1, 1, 1);
}
window.set_child(Some(&grid));
window.connect_close_request(move |window| {
if let Some(application) = window.application() {
application.remove_window(window);
}
Inhibit(false)
});
window.show();
Ok(())
}
fn build_tree_view_column(title: &str, column: u32, list_store: ListStore) -> TreeViewColumn {
let cell_renderer = CellRendererText::builder()
.editable(true)
.xpad(20)
.ypad(20)
.build();
cell_renderer.connect_edited(move |_cell_renderer, _treepath, _str| {
trace!("{:?}, {:?}", _treepath, _str);
if _str.is_empty() {
return;
}
let Some(iter) = list_store.iter(&_treepath) else {
return;
};
list_store.set_value(&iter, column, &_str.to_value());
});
TreeViewColumn::with_attributes(title, &cell_renderer, &[("text", column as i32)])
}
| rust | MIT | b7dace72e7ce2054a9daf6ba0e6aca008bb8926f | 2026-01-04T19:35:02.740723Z | false |
akaza-im/akaza | https://github.com/akaza-im/akaza/blob/b7dace72e7ce2054a9daf6ba0e6aca008bb8926f/akaza-dict/src/bin/akaza-dict.rs | akaza-dict/src/bin/akaza-dict.rs | use anyhow::Result;
use log::LevelFilter;
use akaza_dict::conf::open_userdict_window;
use std::env;
/// ใใใใฐ็จ
fn main() -> Result<()> {
let _ = env_logger::builder()
.filter_level(LevelFilter::Info)
.try_init();
let args: Vec<String> = env::args().collect();
open_userdict_window(&args[1])?;
Ok(())
}
| rust | MIT | b7dace72e7ce2054a9daf6ba0e6aca008bb8926f | 2026-01-04T19:35:02.740723Z | false |
Stavrospanakakis/is_ready | https://github.com/Stavrospanakakis/is_ready/blob/4ff5d7def790bef08ba48dc946b3790af1433ac8/src/lib.rs | src/lib.rs | use clap::Parser;
use std::net::TcpStream;
use std::process::Command;
use std::thread;
use std::time::Duration;
use tokio::time::timeout;
#[derive(Parser, Debug)]
#[command(author, version, about, long_about = None)]
struct Args {
#[arg(short, long, default_value_t = 30)]
timeout: u64,
#[arg(short, long = "addr", required = true, value_name = "ADDRESS")]
addresses: Vec<String>,
#[arg(
required = true,
last = true,
allow_hyphen_values = true,
value_name = "COMMAND"
)]
cmd: Vec<String>,
#[arg(short = 'q', long)]
quiet: bool,
}
async fn wait_for(addresses: Vec<String>, quiet: bool) {
let mut threads = Vec::new();
for address in addresses {
let thread = tokio::spawn(async move {
loop {
match TcpStream::connect(&address) {
Ok(_) => {
if !quiet {
println!("Connected to {} successfully", address);
}
break;
}
Err(_) => {
if !quiet {
println!("Waiting for {}", address)
}
}
}
tokio::time::sleep(Duration::from_millis(500)).await;
}
});
threads.push(thread);
}
for thread in threads {
thread.await.unwrap_or_default();
}
}
fn exec_command(command: &str, args: &[String]) -> Result<(), String> {
if Command::new(command).args(args).spawn().is_err() {
let err = format!("Command not found: {}", command);
return Err(err);
}
Ok(())
}
pub async fn run() -> Result<(), String> {
let args = Args::parse();
let thread = thread::spawn(move || async move {
let my_duration = tokio::time::Duration::from_secs(args.timeout);
timeout(my_duration, wait_for(args.addresses, args.quiet)).await
});
if thread.join().unwrap().await.is_err() {
return Err(String::from(
"Connection timeout, could not connect to the addresses.",
));
}
exec_command(&args.cmd[0], &args.cmd[1..])?;
Ok(())
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn command_not_found() {
let args = Args {
timeout: 30,
addresses: vec![String::from("google.com:80")],
cmd: vec![String::from("not_a_command")],
quiet: false,
};
assert_eq!(
exec_command(&args.cmd[0], &args.cmd[1..]),
Err(String::from("Command not found: not_a_command"))
);
}
}
| rust | MIT | 4ff5d7def790bef08ba48dc946b3790af1433ac8 | 2026-01-04T20:17:06.789960Z | false |
Stavrospanakakis/is_ready | https://github.com/Stavrospanakakis/is_ready/blob/4ff5d7def790bef08ba48dc946b3790af1433ac8/src/main.rs | src/main.rs | use std::process;
#[tokio::main]
async fn main() {
if let Err(e) = is_ready::run().await {
println!("{}", e);
process::exit(1);
}
}
| rust | MIT | 4ff5d7def790bef08ba48dc946b3790af1433ac8 | 2026-01-04T20:17:06.789960Z | false |
quartiq/stabilizer | https://github.com/quartiq/stabilizer/blob/cb1f14508dffa91c636cb3ed3701e24b04e469d2/build.rs | build.rs | fn main() {
built::write_built_file()
.expect("Failed to acquire build-time information");
println!("cargo:rerun-if-changed=memory.x");
}
| rust | Apache-2.0 | cb1f14508dffa91c636cb3ed3701e24b04e469d2 | 2026-01-04T20:16:49.858302Z | false |
quartiq/stabilizer | https://github.com/quartiq/stabilizer/blob/cb1f14508dffa91c636cb3ed3701e24b04e469d2/stream/src/stream.rs | stream/src/stream.rs | #![allow(non_camel_case_types)] // https://github.com/rust-embedded/heapless/issues/411
use super::{Format, Target};
use core::mem::MaybeUninit;
use heapless::{
box_pool,
pool::boxed::{Box, BoxBlock},
spsc::{Consumer, Producer, Queue},
};
use smoltcp_nal::embedded_nal::{UdpClientStack, nb};
// Magic first bytes indicating a UDP frame of straming data
const MAGIC: u16 = 0x057B;
// The size of the header, calculated in words.
// The header has a 16-bit magic word, an 8-bit format, 8-bit batch-size, and 32-bit sequence
// number, which corresponds to 8 bytes.
const HEADER_SIZE: usize = 8;
// The number of frames that can be buffered.
const FRAME_COUNT: usize = 4;
// The size of each frame in bytes.
// Ensure the resulting ethernet frame is within the MTU:
// 1500 MTU - 40 IP6 header - 8 UDP header - 32 VPN - 20 IP4
const FRAME_SIZE: usize = 1500 - 40 - 8 - 32 - 20;
// The size of the frame queue must be at least as large as the number of frame buffers. Every
// allocated frame buffer should fit in the queue.
const FRAME_QUEUE_SIZE: usize = FRAME_COUNT * 2;
type Frame = [MaybeUninit<u8>; FRAME_SIZE];
box_pool!(FRAME_POOL: Frame);
/// Configure streaming on a device.
///
/// # Args
/// * `stack` - A reference to the shared network stack.
///
/// # Returns
/// (generator, stream) where `generator` can be used to enqueue "batches" for transmission. The
/// `stream` is the logically consumer (UDP transmitter) of the enqueued data.
#[cfg(target_arch = "arm")]
pub fn setup<N: UdpClientStack<Error = smoltcp_nal::NetworkError>>(
stack: N,
) -> (FrameGenerator, DataStream<N>) {
// The queue needs to be at least as large as the frame count to ensure that every allocated
// frame can potentially be enqueued for transmission.
let queue =
cortex_m::singleton!(: Queue<StreamFrame, FRAME_QUEUE_SIZE> = Queue::new())
.unwrap();
let (producer, consumer) = queue.split();
#[allow(clippy::declare_interior_mutable_const)]
const FRAME: BoxBlock<Frame> = BoxBlock::new();
let memory =
cortex_m::singleton!(FRAME_DATA: [BoxBlock<Frame>; FRAME_COUNT] =
[FRAME; FRAME_COUNT])
.unwrap();
for block in memory.iter_mut() {
FRAME_POOL.manage(block);
}
let generator = FrameGenerator::new(producer);
let stream = DataStream::new(stack, consumer);
(generator, stream)
}
#[derive(Debug)]
struct StreamFrame {
buffer: Box<FRAME_POOL>,
offset: usize,
batches: u8,
}
impl StreamFrame {
pub fn new(
mut buffer: Box<FRAME_POOL>,
format_id: u8,
sequence_number: u32,
) -> Self {
for (byte, buf) in MAGIC
.to_le_bytes()
.iter()
.chain(&[format_id, 0])
.chain(sequence_number.to_le_bytes().iter())
.zip(buffer.iter_mut())
{
buf.write(*byte);
}
Self {
buffer,
offset: HEADER_SIZE,
batches: 0,
}
}
pub fn add_batch<F>(&mut self, mut f: F) -> usize
where
F: FnMut(&mut [MaybeUninit<u8>]) -> usize,
{
let len = f(&mut self.buffer[self.offset..]);
self.offset += len;
self.batches += 1;
len
}
pub fn is_full(&self, len: usize) -> bool {
self.offset + len > self.buffer.len()
}
pub fn finish(&mut self) -> &[MaybeUninit<u8>] {
self.buffer[3].write(self.batches);
&self.buffer[..self.offset]
}
}
/// The data generator for a stream.
pub struct FrameGenerator {
queue: Producer<'static, StreamFrame, FRAME_QUEUE_SIZE>,
current_frame: Option<StreamFrame>,
sequence_number: u32,
format: u8,
}
impl FrameGenerator {
fn new(queue: Producer<'static, StreamFrame, FRAME_QUEUE_SIZE>) -> Self {
Self {
queue,
format: Format::Unknown.into(),
current_frame: None,
sequence_number: 0,
}
}
/// Configure the format of the stream.
///
/// # Note:
/// This function shall only be called once upon initializing streaming
///
/// # Args
/// * `format` - The desired format of the stream.
pub fn configure(&mut self, format: impl Into<u8>) {
self.format = format.into();
}
/// Add a batch to the current stream frame.
///
/// # Args
/// * `f` - A closure that will be provided the buffer to write batch data into.
///
/// Returns the number of bytes written.
pub fn add<F>(&mut self, func: F)
where
F: FnMut(&mut [MaybeUninit<u8>]) -> usize,
{
let sequence_number = self.sequence_number;
self.sequence_number = self.sequence_number.wrapping_add(1);
let current_frame = match self.current_frame.as_mut() {
None => {
if let Ok(buffer) =
FRAME_POOL.alloc([MaybeUninit::uninit(); FRAME_SIZE])
{
self.current_frame.insert(StreamFrame::new(
buffer,
self.format,
sequence_number,
))
} else {
return;
}
}
Some(frame) => frame,
};
let len = current_frame.add_batch(func);
if current_frame.is_full(len) {
// Note(unwrap): The queue is designed to be at least as large as the frame buffer
// count, so this enqueue should always succeed.
if let Some(frame) = self.current_frame.take() {
self.queue.enqueue(frame).unwrap();
}
}
}
}
/// The "consumer" portion of the data stream.
///
/// # Note
/// This is responsible for consuming data and sending it over UDP.
pub struct DataStream<N: UdpClientStack> {
stack: N,
socket: Option<<N as UdpClientStack>::UdpSocket>,
queue: Consumer<'static, StreamFrame, FRAME_QUEUE_SIZE>,
remote: Target,
}
impl<N: UdpClientStack<Error = smoltcp_nal::NetworkError>> DataStream<N> {
/// Construct a new data streamer.
///
/// # Args
/// * `stack` - A reference to the shared network stack.
/// * `consumer` - The read side of the queue containing data to transmit.
/// * `frame_pool` - The Pool to return stream frame objects into.
fn new(
stack: N,
consumer: Consumer<'static, StreamFrame, FRAME_QUEUE_SIZE>,
) -> Self {
Self {
stack,
socket: None,
remote: Target::default(),
queue: consumer,
}
}
fn close(&mut self) {
if let Some(socket) = self.socket.take() {
log::info!("Closing stream");
// Note(unwrap): We guarantee that the socket is available above.
self.stack.close(socket).unwrap();
}
}
// Open new socket.
fn open(&mut self) -> Result<(), ()> {
// If there is already a socket of if remote address is unspecified,
// do not open a new socket.
if self.socket.is_some() || self.remote.0.ip().is_unspecified() {
return Err(());
}
let mut socket = self.stack.socket().or(Err(()))?;
// We may fail to connect if we don't have an IP address yet.
if self.stack.connect(&mut socket, self.remote.0).is_err() {
self.stack.close(socket).unwrap();
return Err(());
}
self.socket.replace(socket);
log::info!("Opening stream");
Ok(())
}
/// Configure the remote endpoint of the stream.
///
/// # Args
/// * `remote` - The destination to send stream data to.
pub fn set_remote(&mut self, remote: Target) {
// Close socket to be reopened if the remote has changed.
if remote != self.remote {
self.close();
}
self.remote = remote;
}
/// Process any data for transmission.
pub fn process(&mut self) {
match self.socket.as_mut() {
None => {
// If there's no socket available, try to connect to our remote.
if self.open().is_ok() {
// If we just successfully opened the socket, flush old data from queue.
while let Some(frame) = self.queue.dequeue() {
drop(frame.buffer);
}
}
}
Some(handle) => {
if let Some(mut frame) = self.queue.dequeue() {
// Transmit the frame and return it to the pool.
let buf = frame.finish();
let data = unsafe {
core::slice::from_raw_parts(
buf.as_ptr() as *const u8,
size_of_val(buf),
)
};
// If we fail to send, it can only be because the socket got closed on us (i.e.
// address update due to DHCP). If this happens, reopen the socket.
match self.stack.send(handle, data) {
Ok(_) => {},
// Our IP address may have changedm so handle reopening the UDP stream.
Err(nb::Error::Other(smoltcp_nal::NetworkError::UdpWriteFailure(smoltcp_nal::smoltcp::socket::udp::SendError::Unaddressable))) => {
log::warn!( "IP address updated during stream. Reopening socket");
let socket = self.socket.take().unwrap();
self.stack.close(socket).unwrap();
}
// The buffer should clear up once ICMP resolves the IP address, so ignore
// this error.
Err(nb::Error::Other(smoltcp_nal::NetworkError::UdpWriteFailure(smoltcp_nal::smoltcp::socket::udp::SendError::BufferFull))) => {}
Err(other) => {
log::warn!("Unexpected UDP error during data stream: {other:?}");
}
}
drop(frame.buffer)
}
}
}
}
}
| rust | Apache-2.0 | cb1f14508dffa91c636cb3ed3701e24b04e469d2 | 2026-01-04T20:16:49.858302Z | false |
quartiq/stabilizer | https://github.com/quartiq/stabilizer/blob/cb1f14508dffa91c636cb3ed3701e24b04e469d2/stream/src/lib.rs | stream/src/lib.rs | //! Stabilizer data stream capabilities
//!
//! # Design
//! Data streamining utilizes UDP packets to send data streams at high throughput.
//! Packets are always sent in a best-effort fashion, and data may be dropped.
//!
//! Stabilizer organizes streamed data into batches within a "Frame" that will be sent as a UDP
//! packet. Each frame consits of a header followed by sequential batch serializations. The packet
//! header is constant for all streaming capabilities, but the serialization format after the header
//! is application-defined.
//!
//! ## Frame Header
//! The header consists of the following, all in little-endian.
//!
//! * **Magic word 0x057B** (u16): a constant to identify Stabilizer streaming data.
//! * **Format Code** (u8): a unique ID that indicates the serialization format of each batch of data
//! in the frame. Refer to [Format] for further information.
//! * **Batch Count** (u8): the number of batches of data.
//! * **Sequence Number** (u32): an the sequence number of the first batch in the frame.
//! This can be used to determine if and how many stream batches are lost.
//!
//! # Example
//! A sample Python script is available in `scripts/stream_throughput.py` to demonstrate reception
//! of streamed data.
#![no_std]
use core::{fmt::Write, net::SocketAddr};
use heapless::String;
use num_enum::IntoPrimitive;
use serde::Serialize;
use serde_with::DeserializeFromStr;
/// Represents the destination for the UDP stream to send data to.
///
/// # Miniconf
/// `<addr>:<port>`
///
/// * `<addr>` is an IPv4 address. E.g. `192.168.0.1`
/// * `<port>` is any unsigned 16-bit value.
///
/// ## Example
/// `192.168.0.1:1234`
#[derive(Copy, Clone, Debug, DeserializeFromStr, PartialEq, Eq)]
pub struct Target(pub SocketAddr);
impl Default for Target {
fn default() -> Self {
Self("0.0.0.0:0".parse().unwrap())
}
}
impl Serialize for Target {
fn serialize<S: serde::Serializer>(
&self,
serializer: S,
) -> Result<S::Ok, S::Error> {
let mut display: String<30> = String::new();
write!(&mut display, "{}", self.0).unwrap();
serializer.serialize_str(&display)
}
}
impl core::str::FromStr for Target {
type Err = &'static str;
fn from_str(s: &str) -> Result<Self, Self::Err> {
let addr = SocketAddr::from_str(s)
.map_err(|_| "Invalid socket address format")?;
Ok(Self(addr))
}
}
/// Specifies the format of streamed data
#[repr(u8)]
#[derive(Debug, Copy, Clone, PartialEq, Eq, IntoPrimitive)]
pub enum Format {
/// Reserved, unused format specifier.
Unknown = 0,
/// ADC0, ADC1, DAC0, and DAC1 sequentially in little-endian format.
///
/// # Example
/// With a batch size of 2, the serialization would take the following form:
/// ```
/// <ADC0[0]> <ADC0[1]> <ADC1[0]> <ADC1[1]> <DAC0[0]> <DAC0[1]> <DAC1[0]> <DAC1[1]>
/// ```
AdcDacData = 1,
/// FLS (fiber length stabilization) format. See the FLS application.
Fls = 2,
/// Thermostat-EEM data. See `thermostat-eem` repo and application.
ThermostatEem = 3,
}
#[cfg(target_arch = "arm")]
mod stream;
#[cfg(target_arch = "arm")]
pub use stream::*;
| rust | Apache-2.0 | cb1f14508dffa91c636cb3ed3701e24b04e469d2 | 2026-01-04T20:16:49.858302Z | false |
quartiq/stabilizer | https://github.com/quartiq/stabilizer/blob/cb1f14508dffa91c636cb3ed3701e24b04e469d2/signal_generator/src/lib.rs | signal_generator/src/lib.rs | #![no_std]
use core::iter::Take;
use idsp::{AccuOsc, Sweep};
use miniconf::Tree;
use rand_core::{RngCore, SeedableRng};
use rand_xorshift::XorShiftRng;
use serde::{Deserialize, Serialize};
/// Types of signals that can be generated.
#[derive(Copy, Clone, Debug, Deserialize, Serialize)]
pub enum Signal {
Cosine,
Square,
Triangle,
WhiteNoise,
SweptSine,
}
impl Signal {
#[inline]
fn map(&self, x: i32) -> i32 {
match self {
Self::Cosine => idsp::cossin(x).0,
Self::Square => {
if x.is_negative() {
-i32::MAX
} else {
i32::MAX
}
}
Self::Triangle => i32::MIN + (x.saturating_abs() << 1),
_ => unimplemented!(),
}
}
}
/// Basic configuration for a generated signal.
#[derive(Clone, Debug, Tree, Serialize, Deserialize)]
#[tree(meta(doc, typename))]
pub struct Config {
/// The signal type that should be generated. See [Signal] variants.
#[tree(with=miniconf::leaf)]
signal: Signal,
/// The frequency of the generated signal in Hertz.
frequency: f32,
/// The normalized symmetry of the signal. At 0% symmetry, the duration of the first half oscillation is minimal.
/// At 25% symmetry, the first half oscillation lasts for 25% of the signal period. For square wave output this
/// symmetry is the duty cycle.
symmetry: f32,
/// The amplitude of the output signal
amplitude: f32,
/// Output offset
offset: f32,
/// The initial phase of the period output signal in turns
phase: f32,
/// Number of half periods (periodic) or samples (sweep and noise), 0 for infinte
length: u32,
/// Sweep: initial state
state: i64,
/// Sweep: Sweep rate
rate: i32,
}
impl Default for Config {
fn default() -> Self {
Self {
frequency: 1.0e3,
symmetry: 0.5,
signal: Signal::Cosine,
amplitude: 0.0,
phase: 0.0,
offset: 0.0,
state: 0,
rate: 0,
length: 0,
}
}
}
#[derive(Clone, Debug)]
pub struct AsymmetricAccu {
ftw: [i32; 2],
pow: i32,
accu: i32,
count: u32,
}
impl Iterator for AsymmetricAccu {
type Item = i32;
fn next(&mut self) -> Option<Self::Item> {
let sign = self.accu.is_negative();
self.accu = self.accu.wrapping_add(self.ftw[sign as usize]);
self.count
.checked_sub(sign as u32 ^ self.accu.is_negative() as u32)
.map(|c| {
self.count = c;
self.accu.wrapping_add(self.pow)
})
}
}
#[derive(Clone, Debug)]
pub struct Scaler {
amp: i32,
offset: i32,
}
impl Scaler {
fn map(&self, x: i32) -> i32 {
(((x as i64 * self.amp as i64) >> 31) as i32)
.saturating_add(self.offset)
}
}
/// Represents the errors that can occur when attempting to configure the signal generator.
#[derive(Copy, Clone, Debug, thiserror::Error)]
pub enum Error {
/// The provided amplitude is out-of-range.
#[error("Invalid amplitude")]
Amplitude,
/// The provided symmetry is out of range.
#[error("Invalid symmetry")]
Symmetry,
/// The provided frequency is out of range.
#[error("Invalid frequency")]
Frequency,
/// Sweep would wrap/invalid
#[error("Sweep would wrap")]
Wrap,
}
#[derive(Clone, Debug)]
pub enum Source {
SweptSine {
sweep: Take<AccuOsc<Sweep>>,
amp: Scaler,
},
Periodic {
accu: AsymmetricAccu,
signal: Signal,
amp: Scaler,
},
WhiteNoise {
rng: XorShiftRng,
count: u32,
amp: Scaler,
},
}
impl Iterator for Source {
type Item = i32;
#[inline]
fn next(&mut self) -> Option<Self::Item> {
let (s, a) = match self {
Self::SweptSine { sweep, amp } => (sweep.next().map(|c| c.im), amp),
Self::Periodic { accu, signal, amp } => {
(accu.next().map(|p| signal.map(p)), amp)
}
Self::WhiteNoise { rng, count, amp } => (
count.checked_sub(1).map(|m| {
*count = m;
rng.next_u32() as i32
}),
amp,
),
};
Some(a.map(s.unwrap_or_default()))
}
}
impl Config {
/// Convert from SI config
pub fn build(&self, period: f32, scale: f32) -> Result<Source, Error> {
if !(0.0..1.0).contains(&self.symmetry) {
return Err(Error::Symmetry);
}
const NYQUIST: f32 = (1u32 << 31) as _;
let ftw0 = self.frequency * period * NYQUIST;
if !(0.0..2.0 * NYQUIST).contains(&ftw0) {
return Err(Error::Frequency);
}
// Clip both frequency tuning words to within Nyquist before rounding.
let ftw = [
if self.symmetry * NYQUIST > ftw0 {
ftw0 / self.symmetry
} else {
NYQUIST
} as i32,
if (1.0 - self.symmetry) * NYQUIST > ftw0 {
ftw0 / (1.0 - self.symmetry)
} else {
NYQUIST
} as i32,
];
let offset = self.offset * scale;
let amplitude = self.amplitude * scale;
fn abs(x: f32) -> f32 {
if x.is_sign_negative() { -x } else { x }
}
if abs(offset) + abs(amplitude) >= 1.0 {
return Err(Error::Amplitude);
}
let amp = Scaler {
amp: (amplitude * NYQUIST) as _,
offset: (offset * NYQUIST) as _,
};
Ok(match self.signal {
signal @ (Signal::Cosine | Signal::Square | Signal::Triangle) => {
Source::Periodic {
accu: AsymmetricAccu {
ftw,
pow: (self.phase * NYQUIST) as i32,
accu: 0,
count: self.length,
},
signal,
amp,
}
}
Signal::SweptSine => Source::SweptSine {
sweep: AccuOsc::new(Sweep::new(self.rate, self.state))
.take(self.length as _),
amp,
},
Signal::WhiteNoise => Source::WhiteNoise {
rng: XorShiftRng::from_seed(Default::default()),
count: self.length,
amp,
},
})
}
}
| rust | Apache-2.0 | cb1f14508dffa91c636cb3ed3701e24b04e469d2 | 2026-01-04T20:16:49.858302Z | false |
quartiq/stabilizer | https://github.com/quartiq/stabilizer/blob/cb1f14508dffa91c636cb3ed3701e24b04e469d2/src/design_parameters.rs | src/design_parameters.rs | use arbitrary_int::u5;
use fugit::MegahertzU32 as MegaHertz;
/// The system clock, used in various timer calculations
pub const SYSCLK: MegaHertz = MegaHertz::MHz(400);
/// The ADC setup time is the number of seconds after the CSn line goes low before the serial clock
/// may begin. This is used for performing the internal ADC conversion.
pub const ADC_SETUP_TIME: f32 = 220e-9;
/// The maximum DAC/ADC serial clock line frequency. This is a hardware limit.
pub const ADC_DAC_SCK_MAX: MegaHertz = MegaHertz::MHz(50);
/// The optimal counting frequency of the hardware timers used for timestamping and sampling.
pub const TIMER_FREQUENCY: MegaHertz = MegaHertz::MHz(100);
pub const TIMER_PERIOD: f32 = 1. / (TIMER_FREQUENCY.to_Hz() as f32);
/// The QSPI frequency for communicating with the pounder DDS.
pub const POUNDER_QSPI_FREQUENCY: MegaHertz = MegaHertz::MHz(50);
/// The delay after initiating a QSPI transfer before asserting the IO_Update for the pounder DDS.
// Pending Pounder Profile writes are up to 32 bytes (QSPI FIFO depth),
// with 2 cycles required per byte, coming out to a total of 64 QSPI clock cycles.
// The QSPI is configured for 50MHz, so this comes out to an offset
// of 1280 ns. We use 1300 ns to be safe.
pub const POUNDER_IO_UPDATE_DELAY: f32 = 1_300e-9;
/// The duration to assert IO_Update for the pounder DDS.
// IO_Update should be latched for 4 SYNC_CLK cycles after the QSPI profile write. With pounder
// SYNC_CLK running at 100MHz (1/4 of the pounder reference clock of 500MHz), this corresponds to
// 32ns. To accomodate rounding errors, we use 50ns instead.
pub const POUNDER_IO_UPDATE_DURATION: f32 = 50e-9;
/// The DDS reference clock frequency in MHz.
pub const DDS_REF_CLK: MegaHertz = MegaHertz::MHz(100);
/// The multiplier used for the DDS reference clock PLL.
pub const DDS_MULTIPLIER: u5 = u5::new(5);
/// The DDS system clock frequency after the internal PLL multiplication.
#[allow(dead_code)]
pub const DDS_SYSTEM_CLK: MegaHertz =
MegaHertz::MHz(DDS_REF_CLK.to_MHz() * DDS_MULTIPLIER.value() as u32);
/// The divider from the DDS system clock to the SYNC_CLK output (sync-clk is always 1/4 of sysclk).
#[allow(dead_code)]
pub const DDS_SYNC_CLK_DIV: u8 = 4;
/// The maximum ADC/DAC sample processing buffer size.
pub const MAX_SAMPLE_BUFFER_SIZE: usize = 32;
pub type SampleBuffer = [u16; MAX_SAMPLE_BUFFER_SIZE];
| rust | Apache-2.0 | cb1f14508dffa91c636cb3ed3701e24b04e469d2 | 2026-01-04T20:16:49.858302Z | false |
quartiq/stabilizer | https://github.com/quartiq/stabilizer/blob/cb1f14508dffa91c636cb3ed3701e24b04e469d2/src/lib.rs | src/lib.rs | #![no_std]
#![cfg_attr(feature = "nightly", feature(core_intrinsics))]
pub mod design_parameters;
#[cfg(target_os = "none")]
pub mod hardware;
pub mod telemetry;
pub mod convert;
pub mod statistics;
| rust | Apache-2.0 | cb1f14508dffa91c636cb3ed3701e24b04e469d2 | 2026-01-04T20:16:49.858302Z | false |
quartiq/stabilizer | https://github.com/quartiq/stabilizer/blob/cb1f14508dffa91c636cb3ed3701e24b04e469d2/src/convert.rs | src/convert.rs | use bitbybit::bitenum;
use core::convert::TryFrom;
use serde::{Deserialize, Serialize};
#[derive(Debug, Serialize, Deserialize, Default)]
#[bitenum(u2, exhaustive = true)]
pub enum Gain {
#[default]
G1 = 0b00,
G2 = 0b01,
G5 = 0b10,
G10 = 0b11,
}
impl Gain {
/// Get the AFE gain as a numerical value.
pub const fn gain(self) -> f32 {
match self {
Gain::G1 => 1.0,
Gain::G2 => 2.0,
Gain::G5 => 5.0,
Gain::G10 => 10.0,
}
}
}
/// A type representing an ADC sample.
#[derive(Copy, Clone, Default)]
pub struct AdcCode(pub u16);
impl AdcCode {
// The ADC has a differential input with a range of +/- 4.096 V and 16-bit resolution.
// The gain into the two inputs is 1/5.
const FULL_SCALE: f32 = 5.0 / 2.0 * 4.096;
const VOLT_PER_LSB: f32 = -Self::FULL_SCALE / i16::MIN as f32;
const LSB_PER_VOLT: f32 = 1. / Self::VOLT_PER_LSB;
}
impl From<u16> for AdcCode {
/// Construct an ADC code from a provided binary (ADC-formatted) code.
fn from(value: u16) -> Self {
Self(value)
}
}
impl From<i16> for AdcCode {
/// Construct an ADC code from the stabilizer-defined code (i16 full range).
fn from(value: i16) -> Self {
Self(value as u16)
}
}
impl From<AdcCode> for i16 {
/// Get a stabilizer-defined code from the ADC code.
fn from(code: AdcCode) -> i16 {
code.0 as i16
}
}
impl From<AdcCode> for u16 {
/// Get an ADC-frmatted binary value from the code.
fn from(code: AdcCode) -> u16 {
code.0
}
}
impl From<AdcCode> for f32 {
/// Convert raw ADC codes to/from voltage levels.
///
/// # Note
/// This does not account for the programmable gain amplifier at the signal input.
fn from(code: AdcCode) -> f32 {
i16::from(code) as f32 * AdcCode::VOLT_PER_LSB
}
}
impl TryFrom<f32> for AdcCode {
type Error = ();
fn try_from(voltage: f32) -> Result<AdcCode, ()> {
let code = voltage * Self::LSB_PER_VOLT;
if !(i16::MIN as f32..=i16::MAX as f32).contains(&code) {
Err(())
} else {
Ok(AdcCode::from(code as i16))
}
}
}
/// Custom type for referencing DAC output codes.
/// The internal integer is the raw code written to the DAC output register.
#[derive(Copy, Clone, Default)]
pub struct DacCode(pub u16);
impl DacCode {
// The DAC output range in bipolar mode (including the external output op-amp) is +/- 4.096
// V with 16-bit resolution. The anti-aliasing filter has an additional gain of 2.5.
pub const FULL_SCALE: f32 = 4.096 * 2.5;
pub const VOLT_PER_LSB: f32 = -Self::FULL_SCALE / i16::MIN as f32;
pub const LSB_PER_VOLT: f32 = 1. / Self::VOLT_PER_LSB;
}
impl TryFrom<f32> for DacCode {
type Error = ();
fn try_from(voltage: f32) -> Result<DacCode, ()> {
let code = voltage * Self::LSB_PER_VOLT;
if !(i16::MIN as f32..=i16::MAX as f32).contains(&code) {
Err(())
} else {
Ok(DacCode::from(code as i16))
}
}
}
impl From<DacCode> for f32 {
fn from(code: DacCode) -> f32 {
i16::from(code) as f32 * DacCode::VOLT_PER_LSB
}
}
impl From<DacCode> for i16 {
fn from(code: DacCode) -> i16 {
(code.0 as i16).wrapping_sub(i16::MIN)
}
}
impl From<i16> for DacCode {
/// Encode signed 16-bit values into DAC offset binary for a bipolar output configuration.
fn from(value: i16) -> Self {
Self(value.wrapping_add(i16::MIN) as u16)
}
}
impl From<u16> for DacCode {
/// Create a dac code from the provided DAC output code.
fn from(value: u16) -> Self {
Self(value)
}
}
pub fn att_is_valid(attenuation: f32) -> bool {
(0.0..=31.5).contains(&attenuation)
}
| rust | Apache-2.0 | cb1f14508dffa91c636cb3ed3701e24b04e469d2 | 2026-01-04T20:16:49.858302Z | false |
quartiq/stabilizer | https://github.com/quartiq/stabilizer/blob/cb1f14508dffa91c636cb3ed3701e24b04e469d2/src/telemetry.rs | src/telemetry.rs | use crate::convert::{AdcCode, DacCode, Gain};
use serde::Serialize;
/// The telemetry buffer is used for storing sample values during execution.
///
/// # Note
/// These values can be converted to SI units immediately before reporting to save processing time.
/// This allows for the DSP process to continually update the values without incurring significant
/// run-time overhead during conversion to SI units.
#[derive(Clone, Default)]
pub struct TelemetryBuffer {
/// The latest input sample on ADC0/ADC1.
pub adcs: [AdcCode; 2],
/// The latest output code on DAC0/DAC1.
pub dacs: [DacCode; 2],
/// The latest digital input states during processing.
pub digital_inputs: [bool; 2],
}
/// The telemetry structure is data that is ultimately reported as telemetry over MQTT.
///
/// # Note
/// This structure should be generated on-demand by the buffer when required to minimize conversion
/// overhead.
#[derive(Serialize)]
pub struct Telemetry {
/// Most recent input voltage measurement.
pub adcs: [f32; 2],
/// Most recent output voltage.
pub dacs: [f32; 2],
/// Most recent digital input assertion state.
pub digital_inputs: [bool; 2],
/// The CPU temperature in degrees Celsius.
pub cpu_temp: f32,
}
impl TelemetryBuffer {
/// Convert the telemetry buffer to finalized, SI-unit telemetry for reporting.
///
/// # Args
/// * `afe0` - The current AFE configuration for channel 0.
/// * `afe1` - The current AFE configuration for channel 1.
/// * `cpu_temp` - The current CPU temperature.
///
/// # Returns
/// The finalized telemetry structure that can be serialized and reported.
pub fn finalize(self, afe0: Gain, afe1: Gain, cpu_temp: f32) -> Telemetry {
let in0_volts = f32::from(self.adcs[0]) / afe0.gain();
let in1_volts = f32::from(self.adcs[1]) / afe1.gain();
Telemetry {
cpu_temp,
adcs: [in0_volts, in1_volts],
dacs: [self.dacs[0].into(), self.dacs[1].into()],
digital_inputs: self.digital_inputs,
}
}
}
| rust | Apache-2.0 | cb1f14508dffa91c636cb3ed3701e24b04e469d2 | 2026-01-04T20:16:49.858302Z | false |
quartiq/stabilizer | https://github.com/quartiq/stabilizer/blob/cb1f14508dffa91c636cb3ed3701e24b04e469d2/src/statistics.rs | src/statistics.rs | use serde::Serialize;
#[derive(Copy, Clone, PartialEq, Eq, Debug, Serialize)]
pub struct State {
x0: i32,
count: u32,
min: i32,
max: i32,
m1: i64,
// Note: The variance computation is (almost) the most naive one.
// Alternative algorithms (e.g. Welford) are less limited in dynamic
// range but need a floating point division for each `update()`.
//
// Here we increase dynamic range by taking data relative to the first sample.
// That works well in many cases. Still, `m2` overflows if `sum((x-x0)**2) > u64::MAX`.
// There are further (unmentioned) constraints in `get()` and `get_scaled()`.
m2: u64,
}
impl Default for State {
fn default() -> Self {
Self {
x0: 0,
count: 0,
max: i32::MIN,
min: i32::MAX,
m1: 0,
m2: 0,
}
}
}
impl State {
pub fn update(&mut self, x: i32) {
if self.count == 0 {
self.x0 = x;
}
let x64 = (x - self.x0) as i64;
self.count += 1;
self.m1 += x64;
self.m2 += (x64 * x64) as u64;
self.max = self.max.max(x);
self.min = self.min.min(x);
}
pub fn get(&self) -> Statistics {
let mut stat = Statistics {
mean: 0,
var: 0,
max: self.max,
min: self.min,
};
if self.count != 0 {
let mean = self.m1 / self.count as i64;
stat.mean = mean as i32 + self.x0;
stat.var = (self.m2 / self.count as u64) - (mean * mean) as u64;
}
stat
}
pub fn get_scaled(&self, scale: f32) -> ScaledStatistics {
let mut stat = ScaledStatistics {
mean: 0.,
std: 0.,
max: self.max as f32 * scale,
min: self.min as f32 * scale,
};
if self.count != 0 {
let c = 1. / self.count as f64;
let mean = self.m1 as f64 * c;
stat.mean = (mean as f32 + self.x0 as f32) * scale;
let var = self.m2 as f64 * c - mean * mean;
stat.std = libm::sqrtf(var as _) * scale;
}
stat
}
}
#[derive(Copy, Clone, PartialEq, Eq, Debug, Default, Serialize)]
pub struct Statistics {
pub min: i32,
pub max: i32,
pub mean: i32,
pub var: u64,
}
#[derive(Copy, Clone, PartialEq, Debug, Default, Serialize)]
pub struct ScaledStatistics {
pub min: f32,
pub max: f32,
pub mean: f32,
pub std: f32,
}
| rust | Apache-2.0 | cb1f14508dffa91c636cb3ed3701e24b04e469d2 | 2026-01-04T20:16:49.858302Z | false |
quartiq/stabilizer | https://github.com/quartiq/stabilizer/blob/cb1f14508dffa91c636cb3ed3701e24b04e469d2/src/hardware/setup.rs | src/hardware/setup.rs | //! Stabilizer hardware configuration
//!
//! This file contains all of the hardware-specific configuration of Stabilizer.
use super::hal::{
self,
ethernet::{self, PHY},
gpio::{self, Speed},
prelude::*,
};
use core::cell::RefCell;
use core::sync::atomic::{self, AtomicBool, Ordering};
use core::{fmt::Write, ptr};
use embedded_hal_compat::{Forward, ForwardCompat, markers::ForwardOutputPin};
use grounded::uninit::GroundedCell;
use heapless::String;
use smoltcp_nal::smoltcp;
use platform::{AppSettings, ApplicationMetadata, NetSettings};
use crate::design_parameters;
use super::{
DigitalInput0, DigitalInput1, Eem, Gpio, HardwareVersion, Pgia,
SerialTerminal, SystemTimer, Systick, UsbDevice, adc, afe,
cpu_temp_sensor::CpuTempSensor,
dac, eeprom,
input_stamper::InputStamper,
net::{EthernetPhy, NetworkStack},
pounder,
pounder::dds_output::DdsOutput,
shared_adc::SharedAdc,
timers,
};
const NUM_TCP_SOCKETS: usize = 4;
const NUM_UDP_SOCKETS: usize = 1;
const NUM_SOCKETS: usize = NUM_UDP_SOCKETS + NUM_TCP_SOCKETS;
pub struct NetStorage {
pub ip_addrs: [smoltcp::wire::IpCidr; 1],
// Note: There is an additional socket set item required for the DHCP and DNS sockets
// respectively.
pub sockets: [smoltcp::iface::SocketStorage<'static>; NUM_SOCKETS + 2],
pub tcp_socket_storage: [TcpSocketStorage; NUM_TCP_SOCKETS],
pub udp_socket_storage: [UdpSocketStorage; NUM_UDP_SOCKETS],
pub dns_storage: [Option<smoltcp::socket::dns::DnsQuery>; 1],
}
#[derive(Clone)]
pub struct UdpSocketStorage {
rx_storage: [u8; 1024],
tx_storage: [u8; 2048],
tx_metadata: [smoltcp::storage::PacketMetadata<
smoltcp::socket::udp::UdpMetadata,
>; 10],
rx_metadata: [smoltcp::storage::PacketMetadata<
smoltcp::socket::udp::UdpMetadata,
>; 10],
}
impl Default for UdpSocketStorage {
fn default() -> Self {
Self {
rx_storage: [0; 1024],
tx_storage: [0; 2048],
tx_metadata: [smoltcp::storage::PacketMetadata::EMPTY; 10],
rx_metadata: [smoltcp::storage::PacketMetadata::EMPTY; 10],
}
}
}
#[derive(Clone)]
pub struct TcpSocketStorage {
rx_storage: [u8; 1024],
tx_storage: [u8; 1024],
}
impl Default for TcpSocketStorage {
fn default() -> Self {
Self {
rx_storage: [0; 1024],
tx_storage: [0; 1024],
}
}
}
impl Default for NetStorage {
fn default() -> Self {
NetStorage {
// Placeholder for the real IP address, which is initialized at runtime.
ip_addrs: [smoltcp::wire::IpCidr::Ipv6(
smoltcp::wire::Ipv6Cidr::SOLICITED_NODE_PREFIX,
)],
sockets: [smoltcp::iface::SocketStorage::EMPTY; NUM_SOCKETS + 2],
tcp_socket_storage: Default::default(),
udp_socket_storage: Default::default(),
dns_storage: [None; 1],
}
}
}
/// The available networking devices on Stabilizer.
pub struct NetworkDevices {
pub stack: NetworkStack,
pub phy: EthernetPhy,
pub mac_address: smoltcp::wire::EthernetAddress,
}
/// The available hardware interfaces on Stabilizer.
pub struct Stabilizer<C: serial_settings::Settings + 'static> {
pub temperature_sensor: CpuTempSensor,
pub afes: [Pgia; 2],
pub adcs: (adc::Adc0Input, adc::Adc1Input),
pub dacs: (dac::Dac0Output, dac::Dac1Output),
pub input_stamper: InputStamper,
pub sampling_timer: timers::SamplingTimer,
pub timestamp_timer: timers::TimestampTimer,
pub network_devices: NetworkDevices,
pub digital_inputs: (DigitalInput0, DigitalInput1),
pub usb_serial: SerialTerminal<C>,
pub usb: UsbDevice,
pub fp_led: [gpio::ErasedPin<gpio::Output>; 4],
pub metadata: &'static ApplicationMetadata,
pub settings: C,
}
pub enum Mezzanine {
None,
Pounder(Pounder),
}
/// The available Pounder-specific hardware interfaces.
pub struct Pounder {
pub pounder: pounder::PounderDevices,
pub dds_output: DdsOutput,
#[cfg(not(feature = "pounder_v1_0"))]
pub timestamper: pounder::timestamp::Timestamper,
}
#[unsafe(link_section = ".sram3.eth")]
/// Static storage for the ethernet DMA descriptor ring.
static DES_RING: GroundedCell<
ethernet::DesRing<
{ super::net::TX_DESRING_CNT },
{ super::net::RX_DESRING_CNT },
>,
> = GroundedCell::uninit();
/// Setup ITCM and load its code from flash.
///
/// For portability and maintainability this is implemented in Rust.
/// Since this is implemented in Rust the compiler may assume that bss and data are set
/// up already. There is no easy way to ensure this implementation will never need bss
/// or data. Hence we can't safely run this as the cortex-m-rt `pre_init` hook before
/// bss/data is setup.
///
/// Calling (through IRQ or directly) any code in ITCM before having called
/// this method is undefined.
fn load_itcm() {
unsafe extern "C" {
// ZST (`()`: not layout-stable. empty/zst struct in `repr(C)``: not "proper" C)
unsafe static mut __sitcm: [u32; 0];
unsafe static mut __eitcm: [u32; 0];
unsafe static mut __siitcm: [u32; 0];
}
// NOTE(unsafe): Assuming the address symbols from the linker as well as
// the source instruction data are all valid, this is safe as it only
// copies linker-prepared data to where the code expects it to be.
// Calling it multiple times is safe as well.
// ITCM is enabled on reset on our CPU but might not be on others.
// Keep for completeness.
const ITCMCR: *mut u32 = 0xE000_EF90usize as _;
unsafe {
ptr::write_volatile(ITCMCR, ptr::read_volatile(ITCMCR) | 1);
}
// Ensure ITCM is enabled before loading.
atomic::fence(Ordering::SeqCst);
let sitcm = ptr::addr_of_mut!(__sitcm) as *mut u32;
let eitcm = ptr::addr_of!(__eitcm) as *const u32;
let siitcm = ptr::addr_of!(__siitcm) as *const u32;
unsafe {
let len = eitcm.offset_from(sitcm) as usize;
// Load code into ITCM.
ptr::copy(siitcm, sitcm, len);
}
// Ensure ITCM is loaded before potentially executing any instructions from it.
atomic::fence(Ordering::SeqCst);
cortex_m::asm::dsb();
cortex_m::asm::isb();
}
/// Configure the stabilizer hardware for operation.
///
/// # Note
/// Refer to [design_parameters::TIMER_FREQUENCY] to determine the frequency of the sampling timer.
///
/// # Args
/// * `core` - The cortex-m peripherals.
/// * `device` - The microcontroller peripherals to be configured.
/// * `clock` - A `SystemTimer` implementing `Clock`.
/// * `batch_size` - The size of each ADC/DAC batch.
/// * `sample_ticks` - The number of timer ticks between each sample.
///
/// # Returns
/// (stabilizer, pounder) where `stabilizer` is a `StabilizerDevices` structure containing all
/// stabilizer hardware interfaces in a disabled state. `pounder` is an `Option` containing
/// `Some(devices)` if pounder is detected, where `devices` is a `PounderDevices` structure
/// containing all of the pounder hardware interfaces in a disabled state.
pub fn setup<C>(
mut core: hal::stm32::CorePeripherals,
device: hal::stm32::Peripherals,
clock: SystemTimer,
batch_size: usize,
sample_ticks: u32,
) -> (Stabilizer<C>, Mezzanine, Eem)
where
C: serial_settings::Settings + AppSettings,
{
// Set up RTT logging
{
// Enable debug during WFE/WFI-induced sleep
device.DBGMCU.cr.modify(|_, w| w.dbgsleep_d1().set_bit());
// Set up RTT channel to use for `rprintln!()` as "best effort".
// This removes a critical section around the logging and thus allows
// high-prio tasks to always interrupt at low latency.
// It comes at a cost:
// If a high-priority tasks preempts while we are logging something,
// and if we then also want to log from within that high-preiority task,
// the high-prio log message will be lost.
let channels = rtt_target::rtt_init_default!();
// Note(unsafe): The closure we pass does not establish a critical section
// as demanded but it does ensure synchronization and implements a lock.
unsafe {
rtt_target::set_print_channel_cs(
channels.up.0,
&((|arg, f| {
static LOCKED: AtomicBool = AtomicBool::new(false);
if LOCKED.compare_exchange_weak(
false,
true,
Ordering::Acquire,
Ordering::Relaxed,
) == Ok(false)
{
f(arg);
LOCKED.store(false, Ordering::Release);
}
}) as rtt_target::CriticalSectionFunc),
);
}
static LOGGER: rtt_logger::RTTLogger =
rtt_logger::RTTLogger::new(log::LevelFilter::Info);
log::set_logger(&LOGGER)
.map(|()| log::set_max_level(log::LevelFilter::Trace))
.unwrap();
log::info!("Starting");
}
// Check for a reboot to DFU before doing any system configuration.
if platform::dfu_flag_is_set() {
platform::bootload_dfu();
}
let pwr = device.PWR.constrain();
let vos = pwr.freeze();
// Enable SRAM3 for the ethernet descriptor ring.
device.RCC.ahb2enr.modify(|_, w| w.sram3en().set_bit());
// Clear reset flags.
device.RCC.rsr.write(|w| w.rmvf().set_bit());
// Select the PLLs for SPI.
device
.RCC
.d2ccip1r
.modify(|_, w| w.spi123sel().pll2_p().spi45sel().pll2_q());
device.RCC.d1ccipr.modify(|_, w| w.qspisel().rcc_hclk3());
device
.RCC
.d3ccipr
.modify(|_, w| w.adcsel().per().spi6sel().pll2_q());
let rcc = device.RCC.constrain();
let mut ccdr = rcc
.use_hse(8.MHz())
.sysclk(design_parameters::SYSCLK.convert())
.hclk(200.MHz())
.per_ck(64.MHz()) // fixed frequency HSI, only used for internal ADC. This is not the "peripheral" clock for timers and others.
.pll2_p_ck(100.MHz())
.pll2_q_ck(100.MHz())
.freeze(vos, &device.SYSCFG);
// Set up USB clocks.
ccdr.clocks.hsi48_ck().unwrap();
ccdr.peripheral
.kernel_usb_clk_mux(hal::rcc::rec::UsbClkSel::Hsi48);
// Before being able to call any code in ITCM, load that code from flash.
load_itcm();
Systick::start(core.SYST, ccdr.clocks.sysclk().to_Hz());
// After ITCM loading.
core.SCB.enable_icache();
// Note: Frequencies are scaled by 2 to account for the M7 dual instruction pipeline.
let mut delay = platform::AsmDelay::new(ccdr.clocks.c_ck().to_Hz() * 2);
let gpioa = device.GPIOA.split(ccdr.peripheral.GPIOA);
let gpiob = device.GPIOB.split(ccdr.peripheral.GPIOB);
let gpioc = device.GPIOC.split(ccdr.peripheral.GPIOC);
let gpiod = device.GPIOD.split(ccdr.peripheral.GPIOD);
let gpioe = device.GPIOE.split(ccdr.peripheral.GPIOE);
let gpiof = device.GPIOF.split(ccdr.peripheral.GPIOF);
let mut gpiog = device.GPIOG.split(ccdr.peripheral.GPIOG);
let dma_streams =
hal::dma::dma::StreamsTuple::new(device.DMA1, ccdr.peripheral.DMA1);
// Verify that batch period does not exceed RTIC Monotonic timer period.
assert!(
(batch_size as u32 * sample_ticks) as f32
* design_parameters::TIMER_PERIOD
* (super::MONOTONIC_FREQUENCY as f32)
< 1.
);
// Configure timer 2 to trigger conversions for the ADC
let mut sampling_timer = {
// The timer frequency is manually adjusted below, so the 1KHz setting here is a
// dont-care.
let mut timer2 =
device
.TIM2
.timer(1.kHz(), ccdr.peripheral.TIM2, &ccdr.clocks);
// Configure the timer to count at the designed tick rate. We will manually set the
// period below.
timer2.pause();
timer2.set_tick_freq(design_parameters::TIMER_FREQUENCY.convert());
let mut sampling_timer = timers::SamplingTimer::new(timer2);
sampling_timer.set_period_ticks(sample_ticks - 1);
// The sampling timer is used as the master timer for the shadow-sampling timer. Thus,
// it generates a trigger whenever it is enabled.
sampling_timer
};
let mut shadow_sampling_timer = {
// The timer frequency is manually adjusted below, so the 1KHz setting here is a
// dont-care.
let mut timer3 =
device
.TIM3
.timer(1.kHz(), ccdr.peripheral.TIM3, &ccdr.clocks);
// Configure the timer to count at the designed tick rate. We will manually set the
// period below.
timer3.pause();
timer3.reset_counter();
timer3.set_tick_freq(design_parameters::TIMER_FREQUENCY.convert());
let mut shadow_sampling_timer =
timers::ShadowSamplingTimer::new(timer3);
shadow_sampling_timer.set_period_ticks(sample_ticks as u16 - 1);
// The shadow sampling timer is a slave-mode timer to the sampling timer. It should
// always be in-sync - thus, we configure it to operate in slave mode using "Trigger
// mode".
// For TIM3, TIM2 can be made the internal trigger connection using ITR1. Thus, the
// SamplingTimer start now gates the start of the ShadowSamplingTimer.
shadow_sampling_timer.set_slave_mode(
timers::TriggerSource::Trigger1,
timers::SlaveMode::Trigger,
);
shadow_sampling_timer
};
let sampling_timer_channels = sampling_timer.channels();
let shadow_sampling_timer_channels = shadow_sampling_timer.channels();
let mut timestamp_timer = {
// The timer frequency is manually adjusted below, so the 1KHz setting here is a
// dont-care.
let mut timer5 =
device
.TIM5
.timer(1.kHz(), ccdr.peripheral.TIM5, &ccdr.clocks);
// Configure the timer to count at the designed tick rate. We will manually set the
// period below.
timer5.pause();
timer5.set_tick_freq(design_parameters::TIMER_FREQUENCY.convert());
// The timestamp timer runs at the counter cycle period as the sampling timers.
// To accomodate this, we manually set the prescaler identical to the sample
// timer, but use maximum overflow period.
let mut timer = timers::TimestampTimer::new(timer5);
// TODO: Check hardware synchronization of timestamping and the sampling timers
// for phase shift determinism.
timer.set_period_ticks(u32::MAX);
timer
};
let timestamp_timer_channels = timestamp_timer.channels();
// Configure the SPI interfaces to the ADCs and DACs.
let adcs = {
let adc0 = {
let miso = gpiob.pb14.into_alternate().speed(Speed::VeryHigh);
let sck = gpiob.pb10.into_alternate().speed(Speed::VeryHigh);
let nss = gpiob.pb9.into_alternate().speed(Speed::VeryHigh);
let config = hal::spi::Config::new(hal::spi::Mode {
polarity: hal::spi::Polarity::IdleHigh,
phase: hal::spi::Phase::CaptureOnSecondTransition,
})
.hardware_cs(hal::spi::HardwareCS {
mode: hal::spi::HardwareCSMode::WordTransaction,
assertion_delay: design_parameters::ADC_SETUP_TIME,
polarity: hal::spi::Polarity::IdleHigh,
})
.communication_mode(hal::spi::CommunicationMode::Receiver);
let spi: hal::spi::Spi<_, _, u16> = device.SPI2.spi(
(sck, miso, hal::spi::NoMosi, nss),
config,
design_parameters::ADC_DAC_SCK_MAX.convert(),
ccdr.peripheral.SPI2,
&ccdr.clocks,
);
adc::Adc0Input::new(
spi,
dma_streams.0,
dma_streams.1,
dma_streams.2,
sampling_timer_channels.ch1,
shadow_sampling_timer_channels.ch1,
batch_size,
)
};
let adc1 = {
let miso = gpiob.pb4.into_alternate().speed(Speed::VeryHigh);
let sck = gpioc.pc10.into_alternate().speed(Speed::VeryHigh);
let nss = gpioa.pa15.into_alternate().speed(Speed::VeryHigh);
let config = hal::spi::Config::new(hal::spi::Mode {
polarity: hal::spi::Polarity::IdleHigh,
phase: hal::spi::Phase::CaptureOnSecondTransition,
})
.hardware_cs(hal::spi::HardwareCS {
mode: hal::spi::HardwareCSMode::WordTransaction,
assertion_delay: design_parameters::ADC_SETUP_TIME,
polarity: hal::spi::Polarity::IdleHigh,
})
.communication_mode(hal::spi::CommunicationMode::Receiver);
let spi: hal::spi::Spi<_, _, u16> = device.SPI3.spi(
(sck, miso, hal::spi::NoMosi, nss),
config,
design_parameters::ADC_DAC_SCK_MAX.convert(),
ccdr.peripheral.SPI3,
&ccdr.clocks,
);
adc::Adc1Input::new(
spi,
dma_streams.3,
dma_streams.4,
dma_streams.5,
sampling_timer_channels.ch2,
shadow_sampling_timer_channels.ch2,
batch_size,
)
};
(adc0, adc1)
};
let dacs = {
let mut dac_clr_n = gpioe.pe12.into_push_pull_output();
dac_clr_n.set_high();
let dac0_spi = {
let miso = gpioe.pe5.into_alternate().speed(Speed::VeryHigh);
let sck = gpioe.pe2.into_alternate().speed(Speed::VeryHigh);
let nss = gpioe.pe4.into_alternate().speed(Speed::VeryHigh);
let config = hal::spi::Config::new(hal::spi::Mode {
polarity: hal::spi::Polarity::IdleHigh,
phase: hal::spi::Phase::CaptureOnSecondTransition,
})
.hardware_cs(hal::spi::HardwareCS {
mode: hal::spi::HardwareCSMode::WordTransaction,
assertion_delay: 0.0,
polarity: hal::spi::Polarity::IdleHigh,
})
.communication_mode(hal::spi::CommunicationMode::Transmitter)
.swap_mosi_miso();
device.SPI4.spi(
(sck, miso, hal::spi::NoMosi, nss),
config,
design_parameters::ADC_DAC_SCK_MAX.convert(),
ccdr.peripheral.SPI4,
&ccdr.clocks,
)
};
let dac1_spi = {
let miso = gpiof.pf8.into_alternate().speed(Speed::VeryHigh);
let sck = gpiof.pf7.into_alternate().speed(Speed::VeryHigh);
let nss = gpiof.pf6.into_alternate().speed(Speed::VeryHigh);
let config = hal::spi::Config::new(hal::spi::Mode {
polarity: hal::spi::Polarity::IdleHigh,
phase: hal::spi::Phase::CaptureOnSecondTransition,
})
.hardware_cs(hal::spi::HardwareCS {
mode: hal::spi::HardwareCSMode::WordTransaction,
assertion_delay: 0.0,
polarity: hal::spi::Polarity::IdleHigh,
})
.communication_mode(hal::spi::CommunicationMode::Transmitter)
.swap_mosi_miso();
device.SPI5.spi(
(sck, miso, hal::spi::NoMosi, nss),
config,
design_parameters::ADC_DAC_SCK_MAX.convert(),
ccdr.peripheral.SPI5,
&ccdr.clocks,
)
};
let dac0 = dac::Dac0Output::new(
dac0_spi,
dma_streams.6,
sampling_timer_channels.ch3,
batch_size,
);
let dac1 = dac::Dac1Output::new(
dac1_spi,
dma_streams.7,
sampling_timer_channels.ch4,
batch_size,
);
dac_clr_n.set_low();
// dac0_ldac_n
gpioe.pe11.into_push_pull_output().set_low();
// dac1_ldac_n
gpioe.pe15.into_push_pull_output().set_low();
dac_clr_n.set_high();
(dac0, dac1)
};
let afes = {
// AFE_PWR_ON on hardware revision v1.3.2
gpioe.pe1.into_push_pull_output().set_high();
let afe0 = afe::ProgrammableGainAmplifier::new([
gpiof.pf2.into_push_pull_output().erase().forward(),
gpiof.pf5.into_push_pull_output().erase().forward(),
]);
let afe1 = afe::ProgrammableGainAmplifier::new([
gpiod.pd14.into_push_pull_output().erase().forward(),
gpiod.pd15.into_push_pull_output().erase().forward(),
]);
[afe0, afe1]
};
let input_stamper = {
let trigger = gpioa.pa3.into_alternate();
InputStamper::new(trigger, timestamp_timer_channels.ch4)
};
let digital_inputs = {
let di0 = gpiog.pg9.into_floating_input();
let di1 = gpioc.pc15.into_floating_input();
(di0, di1)
};
let mut eeprom_i2c = {
let sda = gpiof.pf0.into_alternate().set_open_drain();
let scl = gpiof.pf1.into_alternate().set_open_drain();
device.I2C2.i2c(
(scl, sda),
100.kHz(),
ccdr.peripheral.I2C2,
&ccdr.clocks,
)
};
let metadata = {
// Read the hardware version pins.
let hardware_version = HardwareVersion::from(
&[
gpiog.pg0.into_pull_down_input().is_high(),
gpiog.pg1.into_pull_down_input().is_high(),
gpiog.pg2.into_pull_down_input().is_high(),
gpiog.pg3.into_pull_down_input().is_high(),
][..],
);
crate::hardware::metadata(hardware_version.into())
};
let mac_addr = smoltcp::wire::EthernetAddress(eeprom::read_eui48(
&mut eeprom_i2c,
&mut delay,
));
log::info!("EUI48: {}", mac_addr);
let mut flash = {
let (_, flash_bank2) = device.FLASH.split();
platform::AsyncFlash(crate::hardware::Flash(flash_bank2.unwrap()))
};
let mut settings = C::new(NetSettings::new(mac_addr));
platform::SerialSettingsPlatform::<_, _, ()>::load(
&mut settings,
&mut flash,
);
let network_devices = {
let ethernet_pins = {
// Reset the PHY before configuring pins.
let mut eth_phy_nrst = gpioe.pe3.into_push_pull_output();
eth_phy_nrst.set_low();
delay.delay_us(200u8);
eth_phy_nrst.set_high();
let ref_clk = gpioa.pa1.into_alternate().speed(Speed::VeryHigh);
let mdio = gpioa.pa2.into_alternate().speed(Speed::VeryHigh);
let mdc = gpioc.pc1.into_alternate().speed(Speed::VeryHigh);
let crs_dv = gpioa.pa7.into_alternate().speed(Speed::VeryHigh);
let rxd0 = gpioc.pc4.into_alternate().speed(Speed::VeryHigh);
let rxd1 = gpioc.pc5.into_alternate().speed(Speed::VeryHigh);
let tx_en = gpiob.pb11.into_alternate().speed(Speed::VeryHigh);
let txd0 = gpiob.pb12.into_alternate().speed(Speed::VeryHigh);
let txd1 = gpiog.pg14.into_alternate().speed(Speed::VeryHigh);
(ref_clk, mdio, mdc, crs_dv, rxd0, rxd1, tx_en, txd0, txd1)
};
let ring = unsafe {
let ring = DES_RING.get();
ring.write(ethernet::DesRing::new());
&mut *ring
};
// Configure the ethernet controller
let (mut eth_dma, eth_mac) = ethernet::new(
device.ETHERNET_MAC,
device.ETHERNET_MTL,
device.ETHERNET_DMA,
ethernet_pins,
// Note(unsafe): We only call this function once to take ownership of the
// descriptor ring.
ring,
mac_addr,
ccdr.peripheral.ETH1MAC,
&ccdr.clocks,
);
// Reset and initialize the ethernet phy.
let mut lan8742a =
ethernet::phy::LAN8742A::new(eth_mac.set_phy_addr(0));
lan8742a.phy_reset();
lan8742a.phy_init();
unsafe { ethernet::enable_interrupt() };
// Configure IP address according to DHCP socket availability
let ip_addrs: smoltcp::wire::IpAddress = match settings.net().ip.parse()
{
Ok(addr) => addr,
Err(e) => {
log::warn!(
"Invalid IP address in settings: {e:?}. Defaulting to 0.0.0.0 (DHCP)"
);
"0.0.0.0".parse().unwrap()
}
};
let random_seed = {
let mut rng =
device.RNG.constrain(ccdr.peripheral.RNG, &ccdr.clocks);
let mut data = [0u8; 8];
rng.fill(&mut data).unwrap();
data
};
// Note(unwrap): The hardware configuration function is only allowed to be called once.
// Unwrapping is intended to panic if called again to prevent re-use of global memory.
let store =
cortex_m::singleton!(: NetStorage = NetStorage::default()).unwrap();
store.ip_addrs[0] = smoltcp::wire::IpCidr::new(ip_addrs, 24);
let mut ethernet_config = smoltcp::iface::Config::new(
smoltcp::wire::HardwareAddress::Ethernet(mac_addr),
);
ethernet_config.random_seed = u64::from_be_bytes(random_seed);
let mut interface = smoltcp::iface::Interface::new(
ethernet_config,
&mut eth_dma,
smoltcp::time::Instant::ZERO,
);
interface
.routes_mut()
.add_default_ipv4_route(smoltcp::wire::Ipv4Address::UNSPECIFIED)
.unwrap();
interface.update_ip_addrs(|ref mut addrs| {
if !ip_addrs.is_unspecified() {
addrs
.push(smoltcp::wire::IpCidr::new(ip_addrs, 24))
.unwrap();
}
});
let mut sockets =
smoltcp::iface::SocketSet::new(&mut store.sockets[..]);
for storage in store.tcp_socket_storage[..].iter_mut() {
let tcp_socket = {
let rx_buffer = smoltcp::socket::tcp::SocketBuffer::new(
&mut storage.rx_storage[..],
);
let tx_buffer = smoltcp::socket::tcp::SocketBuffer::new(
&mut storage.tx_storage[..],
);
smoltcp::socket::tcp::Socket::new(rx_buffer, tx_buffer)
};
sockets.add(tcp_socket);
}
if ip_addrs.is_unspecified() {
sockets.add(smoltcp::socket::dhcpv4::Socket::new());
}
sockets.add(smoltcp::socket::dns::Socket::new(
&[],
&mut store.dns_storage[..],
));
for storage in store.udp_socket_storage[..].iter_mut() {
let udp_socket = {
let rx_buffer = smoltcp::socket::udp::PacketBuffer::new(
&mut storage.rx_metadata[..],
&mut storage.rx_storage[..],
);
let tx_buffer = smoltcp::socket::udp::PacketBuffer::new(
&mut storage.tx_metadata[..],
&mut storage.tx_storage[..],
);
smoltcp::socket::udp::Socket::new(rx_buffer, tx_buffer)
};
sockets.add(udp_socket);
}
let mut stack =
smoltcp_nal::NetworkStack::new(interface, eth_dma, sockets, clock);
stack.seed_random_port(&random_seed);
NetworkDevices {
stack,
phy: lan8742a,
mac_address: mac_addr,
}
};
let mut fp_led = [
gpiod.pd5.into_push_pull_output().erase(),
gpiod.pd6.into_push_pull_output().erase(),
gpiog.pg4.into_push_pull_output().erase(),
gpiod.pd12.into_push_pull_output().erase(),
];
for fp_led in fp_led.iter_mut() {
fp_led.set_low();
}
let (adc1, adc2, adc3) = {
let (mut adc1, mut adc2) = hal::adc::adc12(
device.ADC1,
device.ADC2,
hal::time::Hertz::MHz(25),
&mut delay,
ccdr.peripheral.ADC12,
&ccdr.clocks,
);
let mut adc3 = hal::adc::Adc::adc3(
device.ADC3,
hal::time::Hertz::MHz(25),
&mut delay,
ccdr.peripheral.ADC3,
&ccdr.clocks,
);
adc1.set_sample_time(hal::adc::AdcSampleTime::T_810);
adc1.set_resolution(hal::adc::Resolution::SixteenBit);
adc1.calibrate();
adc2.set_sample_time(hal::adc::AdcSampleTime::T_810);
adc2.set_resolution(hal::adc::Resolution::SixteenBit);
adc2.calibrate();
adc3.set_sample_time(hal::adc::AdcSampleTime::T_810);
adc3.set_resolution(hal::adc::Resolution::SixteenBit);
adc3.calibrate();
hal::adc::Temperature::new().enable(&adc3);
let adc1 = adc1.enable();
let adc2 = adc2.enable();
let adc3 = adc3.enable();
(
// The ADCs must live as global, mutable singletons so that we can hand out references
// to the internal ADC. If they were instead to live within e.g. StabilizerDevices,
// they would not yet live in 'static memory, which means that we could not hand out
// references during initialization, since those references would be invalidated when
// we move StabilizerDevices into the late RTIC resources.
cortex_m::singleton!(: SharedAdc<hal::stm32::ADC1> = SharedAdc::new(adc1.slope() as f32, adc1)).unwrap(),
cortex_m::singleton!(: SharedAdc<hal::stm32::ADC2> = SharedAdc::new(adc2.slope() as f32, adc2)).unwrap(),
cortex_m::singleton!(: SharedAdc<hal::stm32::ADC3> = SharedAdc::new(adc3.slope() as f32, adc3)).unwrap(),
)
};
let temperature_sensor =
CpuTempSensor::new(adc3.create_channel(hal::adc::Temperature::new()));
// Measure the Pounder PGOOD output to detect if pounder is present on Stabilizer.
let pounder_pgood = gpiob.pb13.into_pull_down_input();
delay.delay_us(2000u32);
let pounder = if pounder_pgood.is_high() {
log::info!("Found Pounder");
let i2c1 = {
let sda = gpiob.pb7.into_alternate().set_open_drain();
let scl = gpiob.pb8.into_alternate().set_open_drain();
let i2c1 = device.I2C1.i2c(
(scl, sda),
400.kHz(),
ccdr.peripheral.I2C1,
&ccdr.clocks,
);
shared_bus::new_atomic_check!(hal::i2c::I2c<hal::stm32::I2C1> = i2c1).unwrap()
};
let spi = {
let mosi = gpiod.pd7.into_alternate();
let miso = gpioa.pa6.into_alternate();
let sck = gpiog.pg11.into_alternate();
let config = hal::spi::Config::new(hal::spi::Mode {
polarity: hal::spi::Polarity::IdleHigh,
phase: hal::spi::Phase::CaptureOnSecondTransition,
});
// The maximum frequency of this SPI must be limited due to capacitance on the MISO
// line causing a long RC decay.
device.SPI1.spi(
(sck, miso, mosi),
config,
5.MHz(),
ccdr.peripheral.SPI1,
&ccdr.clocks,
)
};
let pwr0 = adc1.create_channel(gpiof.pf11.into_analog());
let pwr1 = adc2.create_channel(gpiof.pf14.into_analog());
let aux_adc0 = adc3.create_channel(gpiof.pf3.into_analog());
let aux_adc1 = adc3.create_channel(gpiof.pf4.into_analog());
let pounder_devices = pounder::PounderDevices::new(
i2c1.acquire_i2c(),
spi,
(pwr0, pwr1),
(aux_adc0, aux_adc1),
)
.unwrap();
let ad9959 = {
let qspi = {
// Instantiate the QUADSPI pins and peripheral interface.
let pins = {
let _ncs =
gpioc.pc11.into_alternate::<9>().speed(Speed::VeryHigh);
let clk = gpiob.pb2.into_alternate().speed(Speed::VeryHigh);
let io0 = gpioe.pe7.into_alternate().speed(Speed::VeryHigh);
| rust | Apache-2.0 | cb1f14508dffa91c636cb3ed3701e24b04e469d2 | 2026-01-04T20:16:49.858302Z | true |
quartiq/stabilizer | https://github.com/quartiq/stabilizer/blob/cb1f14508dffa91c636cb3ed3701e24b04e469d2/src/hardware/eeprom.rs | src/hardware/eeprom.rs | use embedded_hal_02::blocking::{delay::DelayUs, i2c::WriteRead};
// The EEPROM is a variant without address bits, so the 3 LSB of this word are "dont-cares".
const I2C_ADDR: u8 = 0x50;
// The MAC address is stored in the last 6 bytes of the 256 byte address space.
const MAC_POINTER: u8 = 0xFA;
pub fn read_eui48<T>(i2c: &mut T, delay: &mut impl DelayUs<u32>) -> [u8; 6]
where
T: WriteRead,
{
let mut previous_read: Option<[u8; 6]> = None;
// On Stabilizer v1.1 and earlier hardware, there is a fault where the I2C bus is not connected
// to the CPU until the P12V0A rail enables, which can take many seconds, or may never come up
// at all. During these transient turn-on conditions, we may fail the I2C read operation. To
// accomodate this, we repeat the I2C read for a set number of attempts with a fixed delay
// between them. Then, we wait for the bus to stabilize by waiting until the MAC address
// read-out is identical for two consecutive reads.
for _ in 0..40 {
let mut buffer = [0u8; 6];
if i2c
.write_read(I2C_ADDR, &[MAC_POINTER], &mut buffer)
.is_ok()
{
if let Some(old_read) = previous_read
&& old_read == buffer
{
return buffer;
}
previous_read.replace(buffer);
} else {
// Remove any pending previous read if we failed the last attempt.
previous_read.take();
}
delay.delay_us(100_000);
}
panic!("Failed to read MAC address");
}
| rust | Apache-2.0 | cb1f14508dffa91c636cb3ed3701e24b04e469d2 | 2026-01-04T20:16:49.858302Z | false |
quartiq/stabilizer | https://github.com/quartiq/stabilizer/blob/cb1f14508dffa91c636cb3ed3701e24b04e469d2/src/hardware/dac.rs | src/hardware/dac.rs | //! Stabilizer DAC management interface
//!
//! # Design
//!
//! Stabilizer DACs are connected to the MCU via a simplex, SPI-compatible interface. Each DAC
//! accepts a 16-bit output code.
//!
//! In order to maximize CPU processing time, the DAC code updates are offloaded to hardware using
//! a timer compare channel, DMA stream, and the DAC SPI interface.
//!
//! The timer comparison channel is configured to generate a DMA request whenever the comparison
//! occurs. Thus, whenever a comparison happens, a single DAC code can be written to the output. By
//! configuring a DMA stream for a number of successive DAC codes, hardware can regularly update
//! the DAC without requiring the CPU.
//!
//! In order to ensure alignment between the ADC sample batches and DAC output code batches, a DAC
//! output batch is always exactly 3 batches after the ADC batch that generated it.
//!
//! The DMA transfer for the DAC output codes utilizes a double-buffer mode to avoid losing any
//! transfer events generated by the timer (for example, when 2 update cycles occur before the DMA
//! transfer completion is handled). In this mode, by the time DMA swaps buffers, there is always a valid buffer in the
//! "next-transfer" double-buffer location for the DMA transfer. Once a transfer completes,
//! software then has exactly one batch duration to fill the next buffer before its
//! transfer begins. If software does not meet this deadline, old data will be repeatedly generated
//! on the output and output will be shifted by one batch.
//!
//! ## Multiple Samples to Single DAC Codes
//!
//! For some applications, it may be desirable to generate a single DAC code from multiple ADC
//! samples. In order to maintain timing characteristics between ADC samples and DAC code outputs,
//! applications are required to generate one DAC code for each ADC sample. To accomodate mapping
//! multiple inputs to a single output, the output code can be repeated a number of times in the
//! output buffer corresponding with the number of input samples that were used to generate it.
//!
//!
//! # Note
//!
//! There is a very small amount of latency between updating the two DACs due to bus matrix
//! priority. As such, one of the DACs will be updated marginally earlier before the other because
//! the DMA requests are generated simultaneously. This can be avoided by providing a known offset
//! to other DMA requests, which can be completed by setting e.g. DAC0's comparison to a
//! counter value of 2 and DAC1's comparison to a counter value of 3. This will have the effect of
//! generating the DAC updates with a known latency of 1 timer tick to each other and prevent the
//! DMAs from racing for the bus. As implemented, the DMA channels utilize natural priority of the
//! DMA channels to arbitrate which transfer occurs first.
//!
//!
//! # Limitations
//!
//! While double-buffered mode is used for DMA to avoid lost DAC-update events, there is no check
//! for re-use of a previously provided DAC output buffer. It is assumed that the DMA request is
//! served promptly after the transfer completes.
use rtic::Mutex;
use super::timers;
use crate::{
convert::DacCode,
design_parameters::{MAX_SAMPLE_BUFFER_SIZE, SampleBuffer},
};
use super::hal::{
self,
dma::{
DMAError, MemoryToPeripheral, Transfer,
dma::{DMAReq, DmaConfig},
traits::TargetAddress,
},
spi::{HalDisabledSpi, HalEnabledSpi, HalSpi},
};
// The following global buffers are used for the DAC code DMA transfers. Two buffers are used for
// each transfer in a ping-pong buffer configuration (one is being prepared while the other is being
// processed). Note that the contents of AXI SRAM is uninitialized, so the buffer contents on
// startup are undefined. The dimensions are `ADC_BUF[adc_index][ping_pong_index][sample_index]`.
#[unsafe(link_section = ".axisram.buffers")]
static mut DAC_BUF: [[SampleBuffer; 2]; 2] =
[[[0; MAX_SAMPLE_BUFFER_SIZE]; 2]; 2];
macro_rules! dac_output {
($name:ident, $index:literal, $data_stream:ident,
$spi:ident, $trigger_channel:ident, $dma_req:ident) => {
/// $spi is used as a type for indicating a DMA transfer into the SPI TX FIFO
struct $spi {
spi: hal::spi::Spi<hal::stm32::$spi, hal::spi::Disabled, u16>,
_channel: timers::tim2::$trigger_channel,
}
impl $spi {
pub fn new(
_channel: timers::tim2::$trigger_channel,
spi: hal::spi::Spi<hal::stm32::$spi, hal::spi::Disabled, u16>,
) -> Self {
Self { spi, _channel }
}
/// Start the SPI and begin operating in a DMA-driven transfer mode.
pub fn start_dma(&mut self) {
// Allow the SPI FIFOs to operate using only DMA data channels.
self.spi.enable_dma_tx();
// Enable SPI and start it in infinite transaction mode.
self.spi.inner().cr1.modify(|_, w| w.spe().set_bit());
self.spi.inner().cr1.modify(|_, w| w.cstart().started());
}
}
// Note(unsafe): This is safe because the DMA request line is logically owned by this module.
// Additionally, the SPI is owned by this structure and is known to be configured for u16 word
// sizes.
unsafe impl TargetAddress<MemoryToPeripheral> for $spi {
/// SPI is configured to operate using 16-bit transfer words.
type MemSize = u16;
/// SPI DMA requests are generated whenever TIM2 CHx ($dma_req) comparison occurs.
const REQUEST_LINE: Option<u8> = Some(DMAReq::$dma_req as u8);
/// Whenever the DMA request occurs, it should write into SPI's TX FIFO.
fn address(&self) -> usize {
&self.spi.inner().txdr as *const _ as usize
}
}
/// Represents data associated with DAC.
pub struct $name {
// Note: SPI TX functionality may not be used from this structure to ensure safety with DMA.
transfer: Transfer<
hal::dma::dma::$data_stream<hal::stm32::DMA1>,
$spi,
MemoryToPeripheral,
&'static mut [u16],
hal::dma::DBTransfer,
>,
}
impl $name {
/// Construct the DAC output channel.
///
/// # Args
/// * `spi` - The SPI interface used to communicate with the ADC.
/// * `stream` - The DMA stream used to write DAC codes over SPI.
/// * `trigger_channel` - The sampling timer output compare channel for update triggers.
pub fn new(
spi: hal::spi::Spi<hal::stm32::$spi, hal::spi::Enabled, u16>,
stream: hal::dma::dma::$data_stream<hal::stm32::DMA1>,
trigger_channel: timers::tim2::$trigger_channel,
batch_size: usize,
) -> Self {
// Generate DMA events when an output compare of the timer hitting zero (timer roll over)
// occurs.
trigger_channel.listen_dma();
trigger_channel.to_output_compare(4 + $index);
// The stream constantly writes to the TX FIFO to write new update codes.
let trigger_config = DmaConfig::default()
.memory_increment(true)
.double_buffer(true)
.peripheral_increment(false);
// Listen for any potential SPI error signals, which may indicate that we are not generating
// update codes.
let mut spi = spi.disable();
spi.listen(hal::spi::Event::Error);
// AXISRAM is uninitialized. As such, we manually initialize it for a 0V DAC output
// here before starting the transfer .
// Note(unsafe): We currently own all DAC_BUF[index] buffers and are not using them
// elsewhere, so it is safe to access them here.
for buf in unsafe { DAC_BUF[$index].iter_mut() } {
for byte in buf.iter_mut() {
*byte = DacCode::try_from(0.0f32).unwrap().0;
}
}
// Construct the trigger stream to write from memory to the peripheral.
let transfer: Transfer<_, _, MemoryToPeripheral, _, _> =
Transfer::init(
stream,
$spi::new(trigger_channel, spi),
// Note(unsafe): This buffer is only used once and provided for the DMA transfer.
unsafe { &mut DAC_BUF[$index][0][..batch_size] },
// Note(unsafe): This buffer is only used once and provided for the DMA transfer.
unsafe { Some(&mut DAC_BUF[$index][1][..batch_size]) },
trigger_config,
);
Self { transfer }
}
pub fn start(&mut self) {
self.transfer.start(|spi| spi.start_dma());
}
/// Wait for the transfer of the currently active buffer to complete,
/// then call a function on the now inactive buffer and acknowledge the
/// transfer complete flag.
///
/// NOTE(unsafe): Memory safety and access ordering is not guaranteed
/// (see the HAL DMA docs).
pub fn with_buffer<F, R>(&mut self, f: F) -> Result<R, DMAError>
where
F: FnOnce(&mut &'static mut [u16]) -> R,
{
unsafe {
self.transfer.next_dbm_transfer_with(|buf, _current| f(buf))
}
}
}
// This is not actually a Mutex. It only re-uses the semantics and macros of mutex-trait
// to reduce rightward drift when jointly calling `with_buffer(f)` on multiple DAC/ADCs.
impl Mutex for $name {
type T = &'static mut [u16];
fn lock<R>(&mut self, f: impl FnOnce(&mut Self::T) -> R) -> R {
self.with_buffer(f).unwrap()
}
}
};
}
dac_output!(Dac0Output, 0, Stream6, SPI4, Channel3, Tim2Ch3);
dac_output!(Dac1Output, 1, Stream7, SPI5, Channel4, Tim2Ch4);
| rust | Apache-2.0 | cb1f14508dffa91c636cb3ed3701e24b04e469d2 | 2026-01-04T20:16:49.858302Z | false |
quartiq/stabilizer | https://github.com/quartiq/stabilizer/blob/cb1f14508dffa91c636cb3ed3701e24b04e469d2/src/hardware/input_stamper.rs | src/hardware/input_stamper.rs | //! Digital Input 0 (DI0) reference clock timestamper
//!
//! This module provides a means of timestamping the rising edges of an external reference clock on
//! the DI0 with a timer value from TIM5.
//!
//! # Design
//! An input capture channel is configured on DI0 and fed into TIM5's capture channel 4. TIM5 is
//! then run in a free-running mode with a configured tick rate (PSC) and maximum count value
//! (ARR). Whenever an edge on DI0 triggers, the current TIM5 counter value is captured and
//! recorded as a timestamp. This timestamp can be either directly read from the timer channel or
//! can be collected asynchronously via DMA collection.
//!
//! To prevent silently discarding timestamps, the TIM5 input capture over-capture flag is
//! continually checked. Any over-capture event (which indicates an overwritten timestamp) then
//! triggers a panic to indicate the dropped timestamp so that design parameters can be adjusted.
//!
//! # Tradeoffs
//! It appears that DMA transfers can take a significant amount of time to disable (400ns) if they
//! are being prematurely stopped (such is the case here). As such, for a sample batch size of 1,
//! this can take up a significant amount of the total available processing time for the samples.
//! This module checks for any captured timestamps from the timer capture channel manually. In
//! this mode, the maximum input clock frequency supported is dependant on the sampling rate and
//! batch size.
//!
//! This module only supports DI0 for timestamping due to trigger constraints on the DIx pins. If
//! timestamping is desired in DI1, a separate timer + capture channel will be necessary.
use super::{
hal::gpio::{Alternate, gpioa::PA3},
timers,
};
/// The timestamper for DI0 reference clock inputs.
pub struct InputStamper {
_di0_trigger: PA3<Alternate<2>>,
capture_channel: timers::tim5::Channel4InputCapture,
}
impl InputStamper {
/// Construct the DI0 input timestamper.
///
/// # Args
/// * `trigger` - The capture trigger input pin.
/// * `timer_channel - The timer channel used for capturing timestamps.
pub fn new(
trigger: PA3<Alternate<2>>,
timer_channel: timers::tim5::Channel4,
) -> Self {
// Utilize the TIM5 CH4 as an input capture channel - use TI4 (the DI0 input trigger) as the
// capture source.
let mut input_capture =
timer_channel.into_input_capture(timers::tim5::CaptureSource4::Ti4);
// Do not prescale the input capture signal - require 8 consecutive samples to record an
// incoming event - this prevents spurious glitches from triggering captures.
input_capture.configure_filter(timers::InputFilter::Div1N8);
Self {
capture_channel: input_capture,
_di0_trigger: trigger,
}
}
/// Start to capture timestamps on DI0.
#[allow(dead_code)]
pub fn start(&mut self) {
self.capture_channel.enable();
}
/// Get the latest timestamp that has occurred.
///
/// # Note
/// This function must be called at least as often as timestamps arrive.
/// If an over-capture event occurs, this function will clear the overflow,
/// and return a new timestamp of unknown recency an `Err()`.
/// Note that this indicates at least one timestamp was inadvertently dropped.
#[allow(dead_code)]
pub fn latest_timestamp(&mut self) -> Result<Option<u32>, Option<u32>> {
self.capture_channel.latest_capture()
}
}
| rust | Apache-2.0 | cb1f14508dffa91c636cb3ed3701e24b04e469d2 | 2026-01-04T20:16:49.858302Z | false |
quartiq/stabilizer | https://github.com/quartiq/stabilizer/blob/cb1f14508dffa91c636cb3ed3701e24b04e469d2/src/hardware/shared_adc.rs | src/hardware/shared_adc.rs | use super::hal;
/// Shared Internal ADC Support
///
/// # Description
/// This module provides an abstraction to share ownership of a single ADC peripheral with multiple
/// ADC channels attached to it.
///
/// The design of this module mimics that of [`shared-bus`].
///
/// First, the shared ADC is created with the use of a macro, which places the ADC peripheral into
/// a mutable, static (singleton) location. Then, individual channels are created by passing in the
/// associated ADC input pin to the [SharedAdc::create_channel()] function to generate an
/// [AdcChannel]. The [AdcChannel]'s ownership can then be moved to any required drivers.
///
/// ## Synchronization
/// If the multiple priorities utilize the ADC that results in resource pre-emption, pre-emption is
/// protected against through the use of an atomic bool. Attempting to utilize the ADC from a
/// higher priority level while it is in use at a lower level will result in a [AdcError::InUse].
use embedded_hal_02::adc::{Channel, OneShot};
#[derive(Debug, Copy, Clone)]
pub enum AdcError {
/// Indicates that the ADC is already in use
InUse,
}
/// A single channel on an ADC peripheral.
pub struct AdcChannel<'a, Adc, PIN> {
pin: PIN,
slope: f32,
mutex: &'a spin::Mutex<hal::adc::Adc<Adc, hal::adc::Enabled>>,
}
impl<Adc, PIN> AdcChannel<'_, Adc, PIN>
where
PIN: Channel<Adc, ID = u8>,
hal::adc::Adc<Adc, hal::adc::Enabled>: OneShot<Adc, u32, PIN>,
<hal::adc::Adc<Adc, hal::adc::Enabled> as OneShot<Adc, u32, PIN>>::Error:
core::fmt::Debug,
{
/// Read the ADC channel and normalize the result.
///
/// # Returns
/// The normalized ADC measurement as a ratio of full-scale.
pub fn read_normalized(&mut self) -> Result<f32, AdcError> {
self.read_raw().map(|code| code as f32 / self.slope)
}
/// Read the raw ADC sample for the channel.
///
/// # Returns
/// The raw ADC code measured on the channel.
pub fn read_raw(&mut self) -> Result<u32, AdcError> {
let mut adc = self.mutex.try_lock().ok_or(AdcError::InUse)?;
Ok(adc.read(&mut self.pin).unwrap())
}
}
/// An ADC peripheral that can provide ownership of individual channels for sharing between
/// drivers.
pub struct SharedAdc<Adc> {
mutex: spin::Mutex<hal::adc::Adc<Adc, hal::adc::Enabled>>,
slope: f32,
}
impl<Adc> SharedAdc<Adc> {
/// Construct a new shared ADC driver.
///
/// # Args
/// * `slope` - The slope of the ADC conversion transfer function.
/// * `adc` - The ADC peripheral to share.
pub fn new(slope: f32, adc: hal::adc::Adc<Adc, hal::adc::Enabled>) -> Self {
Self {
slope,
mutex: spin::Mutex::new(adc),
}
}
/// Allocate an ADC channel for usage.
///
/// # Args
/// * `pin` - The ADC input associated with the desired ADC channel. Often, this is a GPIO pin.
///
/// # Returns
/// An instantiated [AdcChannel] whose ownership can be transferred to other drivers.
pub fn create_channel<PIN: Channel<Adc, ID = u8>>(
&self,
pin: PIN,
) -> AdcChannel<'_, Adc, PIN> {
AdcChannel {
pin,
slope: self.slope,
mutex: &self.mutex,
}
}
}
| rust | Apache-2.0 | cb1f14508dffa91c636cb3ed3701e24b04e469d2 | 2026-01-04T20:16:49.858302Z | false |
quartiq/stabilizer | https://github.com/quartiq/stabilizer/blob/cb1f14508dffa91c636cb3ed3701e24b04e469d2/src/hardware/mod.rs | src/hardware/mod.rs | //! Module for all hardware-specific setup of Stabilizer
pub use embedded_hal_02;
use embedded_hal_compat::{Forward, markers::ForwardOutputPin};
use hal::{
flash::{LockedFlashBank, UnlockedFlashBank},
gpio::{self, ErasedPin, Input, Output},
};
pub use stm32h7xx_hal as hal;
use platform::{ApplicationMetadata, AsyncFlash, UnlockFlash};
pub mod adc;
pub mod afe;
pub mod cpu_temp_sensor;
pub mod dac;
mod eeprom;
pub mod input_stamper;
pub mod net;
pub mod pounder;
pub mod setup;
pub mod shared_adc;
pub mod timers;
// Type alias for the analog front-end
pub type Pgia = afe::ProgrammableGainAmplifier<
Forward<ErasedPin<Output>, ForwardOutputPin>,
>;
pub type UsbBus = hal::usb_hs::UsbBus<hal::usb_hs::USB2>;
// Type alias for the USB device.
pub type UsbDevice = usb_device::device::UsbDevice<'static, UsbBus>;
pub struct Gpio {
pub lvds4: gpio::gpiod::PD1<Input>,
pub lvds5: gpio::gpiod::PD2<Input>,
pub lvds6: gpio::gpiod::PD3<Output>,
pub lvds7: gpio::gpiod::PD4<Output>,
}
pub type Urukul = urukul::Urukul<
'static,
Forward<hal::spi::Spi<hal::stm32::SPI6, hal::spi::Enabled>>,
Forward<ErasedPin<Output>, ForwardOutputPin>,
>;
pub enum Eem {
Gpio(Gpio),
Urukul(Urukul),
None,
}
// Type alias for digital input 0 (DI0).
pub type DigitalInput0 = hal::gpio::gpiog::PG9<hal::gpio::Input>;
// Type alias for digital input 1 (DI1).
pub type DigitalInput1 = hal::gpio::gpioc::PC15<hal::gpio::Input>;
/// System timer (RTIC Monotonic) tick frequency
pub const MONOTONIC_FREQUENCY: u32 = 1_000;
rtic_monotonics::systick_monotonic!(Systick, MONOTONIC_FREQUENCY);
pub type SystemTimer = mono_clock::MonoClock<u32, MONOTONIC_FREQUENCY>;
pub type I2c1Proxy = shared_bus::I2cProxy<
'static,
shared_bus::AtomicCheckMutex<hal::i2c::I2c<hal::stm32::I2C1>>,
>;
pub type SerialPort = usbd_serial::SerialPort<
'static,
UsbBus,
&'static mut [u8],
&'static mut [u8],
>;
pub type SerialTerminal<C> = serial_settings::Runner<
'static,
platform::SerialSettingsPlatform<C, AsyncFlash<Flash>, SerialPort>,
>;
pub struct Flash(LockedFlashBank);
impl embedded_storage::nor_flash::ErrorType for Flash {
type Error =
<LockedFlashBank as embedded_storage::nor_flash::ErrorType>::Error;
}
impl embedded_storage::nor_flash::ReadNorFlash for Flash {
const READ_SIZE: usize = LockedFlashBank::READ_SIZE;
fn capacity(&self) -> usize {
self.0.capacity()
}
fn read(
&mut self,
offset: u32,
bytes: &mut [u8],
) -> Result<(), Self::Error> {
self.0.read(offset, bytes)
}
}
impl UnlockFlash for Flash {
type Unlocked<'a> = UnlockedFlashBank<'a>;
fn unlock(&mut self) -> Self::Unlocked<'_> {
self.0.unlocked()
}
}
mod build_info {
include!(concat!(env!("OUT_DIR"), "/built.rs"));
}
/// Construct the global metadata.
///
/// # Note
/// This may only be called once.
///
/// # Args
/// * `hardware_version` - The hardware version detected.
///
/// # Returns
/// A reference to the global metadata.
pub fn metadata(version: &'static str) -> &'static ApplicationMetadata {
cortex_m::singleton!(: ApplicationMetadata = ApplicationMetadata {
firmware_version: build_info::GIT_VERSION.unwrap_or("Unspecified"),
rust_version: build_info::RUSTC_VERSION,
profile: build_info::PROFILE,
git_dirty: build_info::GIT_DIRTY.unwrap_or(false),
features: build_info::FEATURES_STR,
hardware_version: version,
panic_info: panic_persist::get_panic_message_utf8().unwrap_or("None"),
})
.unwrap()
}
#[derive(strum::IntoStaticStr)]
pub enum HardwareVersion {
Rev1_0,
Rev1_1,
Rev1_2,
Rev1_3,
Unknown(u8),
}
impl From<&[bool]> for HardwareVersion {
fn from(bits: &[bool]) -> Self {
match bits.iter().rev().fold(0, |v, b| (v << 1) | *b as u8) {
0b000 => HardwareVersion::Rev1_0,
0b001 => HardwareVersion::Rev1_1,
0b010 => HardwareVersion::Rev1_2,
0b011 => HardwareVersion::Rev1_3,
other => HardwareVersion::Unknown(other),
}
}
}
impl core::fmt::Display for HardwareVersion {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
match self {
HardwareVersion::Rev1_0 => write!(f, "v1.0"),
HardwareVersion::Rev1_1 => write!(f, "v1.1"),
HardwareVersion::Rev1_2 => write!(f, "v1.2"),
HardwareVersion::Rev1_3 => write!(f, "v1.3"),
HardwareVersion::Unknown(other) => {
write!(f, "Unknown ({:#b})", other)
}
}
}
}
#[inline(never)]
#[panic_handler]
fn panic(info: &core::panic::PanicInfo) -> ! {
use core::{
fmt::Write,
sync::atomic::{AtomicBool, Ordering},
};
use cortex_m::asm;
use rtt_target::{ChannelMode, UpChannel};
cortex_m::interrupt::disable();
// Recursion protection
static PANICKED: AtomicBool = AtomicBool::new(false);
while PANICKED.load(Ordering::Relaxed) {
asm::bkpt();
}
PANICKED.store(true, Ordering::Relaxed);
// Turn on both red LEDs, FP_LED_1, FP_LED_3
let gpiod = unsafe { &*hal::stm32::GPIOD::ptr() };
gpiod.odr.modify(|_, w| w.odr6().high().odr12().high());
// Analogous to panic-rtt-target
if let Some(mut channel) = unsafe { UpChannel::conjure(0) } {
channel.set_mode(ChannelMode::BlockIfFull);
writeln!(channel, "{info}").ok();
}
panic_persist::report_panic_info(info);
// Abort
asm::udf();
// Halt
// loop { core::sync::atomic::compiler_fence(Ordering::SeqCst); }
}
#[cortex_m_rt::exception]
unsafe fn HardFault(ef: &cortex_m_rt::ExceptionFrame) -> ! {
panic!("HardFault at {:#?}", ef);
}
#[cortex_m_rt::exception]
unsafe fn DefaultHandler(irqn: i16) {
panic!("Unhandled exception (IRQn = {})", irqn);
}
| rust | Apache-2.0 | cb1f14508dffa91c636cb3ed3701e24b04e469d2 | 2026-01-04T20:16:49.858302Z | false |
quartiq/stabilizer | https://github.com/quartiq/stabilizer/blob/cb1f14508dffa91c636cb3ed3701e24b04e469d2/src/hardware/afe.rs | src/hardware/afe.rs | use crate::convert::Gain;
/// A programmable gain amplifier that allows for setting the gain via GPIO.
pub struct ProgrammableGainAmplifier<P> {
a: [P; 2],
}
impl<P> ProgrammableGainAmplifier<P>
where
P: embedded_hal_1::digital::OutputPin,
{
/// Construct a new programmable gain driver.
///
/// Args:
/// * `a0` - An output connected to the A0 input of the amplifier.
/// * `a1` - An output connected to the A1 input of the amplifier.
pub fn new(a: [P; 2]) -> Self {
let mut afe = Self { a };
afe.set_gain(Gain::default());
afe
}
/// Set the gain of the front-end.
pub fn set_gain(&mut self, gain: Gain) {
let mut gain = gain as u8;
for a in self.a.iter_mut() {
a.set_state(((gain & 1) != 0).into()).unwrap();
gain >>= 1;
}
}
}
| rust | Apache-2.0 | cb1f14508dffa91c636cb3ed3701e24b04e469d2 | 2026-01-04T20:16:49.858302Z | false |
quartiq/stabilizer | https://github.com/quartiq/stabilizer/blob/cb1f14508dffa91c636cb3ed3701e24b04e469d2/src/hardware/cpu_temp_sensor.rs | src/hardware/cpu_temp_sensor.rs | //! STM32 Temperature Sensor Driver
//!
//! # Description
//! This file provides an API for measuring the internal STM32 temperature sensor. This temperature
//! sensor measures the silicon junction temperature (Tj) and is connected via an internal ADC.
use super::hal::{
self,
signature::{TS_CAL_30, TS_CAL_110},
};
use super::shared_adc::{AdcChannel, AdcError};
/// Helper utility to convert raw codes into temperature measurements.
struct Calibration {
slope: f32,
offset: f32,
}
impl Calibration {
/// Construct the calibration utility.
pub fn new() -> Self {
let ts_cal2 = TS_CAL_110::read();
let ts_cal1 = TS_CAL_30::read();
let slope = (110. - 30.) / (ts_cal2 as f32 - ts_cal1 as f32);
let offset = 30. - slope * ts_cal1 as f32;
Self { slope, offset }
}
/// Convert a raw ADC sample to a temperature in degrees Celsius.
pub fn sample_to_temperature(&self, sample: u32) -> f32 {
// We use a 2.048V reference, but calibration data was taken at 3.3V.
let sample_3v3 = sample as f32 * 2.048 / 3.3;
self.slope * sample_3v3 + self.offset
}
}
/// A driver to access the CPU temeprature sensor.
pub struct CpuTempSensor {
sensor: AdcChannel<'static, hal::stm32::ADC3, hal::adc::Temperature>,
calibration: Calibration,
}
impl CpuTempSensor {
/// Construct the temperature sensor.
///
/// # Args
/// * `sensor` - The ADC channel of the integrated temperature sensor.
pub fn new(
sensor: AdcChannel<'static, hal::stm32::ADC3, hal::adc::Temperature>,
) -> Self {
Self {
sensor,
calibration: Calibration::new(),
}
}
/// Get the temperature of the CPU in degrees Celsius.
pub fn get_temperature(&mut self) -> Result<f32, AdcError> {
let t = self.sensor.read_raw()?;
Ok(self.calibration.sample_to_temperature(t))
}
}
| rust | Apache-2.0 | cb1f14508dffa91c636cb3ed3701e24b04e469d2 | 2026-01-04T20:16:49.858302Z | false |
quartiq/stabilizer | https://github.com/quartiq/stabilizer/blob/cb1f14508dffa91c636cb3ed3701e24b04e469d2/src/hardware/timers.rs | src/hardware/timers.rs | //! The sampling timer is used for managing ADC sampling and external reference timestamping.
use super::hal;
use num_enum::TryFromPrimitive;
use hal::stm32::{
// TIM1 and TIM8 have identical registers.
tim1 as __tim8,
tim2 as __tim2,
// TIM2 and TIM5 have identical registers.
tim2 as __tim5,
tim3 as __tim3,
};
/// The event that should generate an external trigger from the peripheral.
#[allow(dead_code)]
pub enum TriggerGenerator {
Reset = 0b000,
Enable = 0b001,
Update = 0b010,
ComparePulse = 0b011,
Ch1Compare = 0b100,
Ch2Compare = 0b101,
Ch3Compare = 0b110,
Ch4Compare = 0b111,
}
/// Selects the trigger source for the timer peripheral.
#[allow(dead_code)]
pub enum TriggerSource {
Trigger0 = 0,
Trigger1 = 0b01,
Trigger2 = 0b10,
Trigger3 = 0b11,
}
/// Prescalers for externally-supplied reference clocks.
#[allow(dead_code)]
#[derive(TryFromPrimitive)]
#[repr(u8)]
pub enum Prescaler {
Div1 = 0b00,
Div2 = 0b01,
Div4 = 0b10,
Div8 = 0b11,
}
/// Optional slave operation modes of a timer.
#[allow(dead_code)]
pub enum SlaveMode {
Disabled = 0,
Trigger = 0b0110,
}
/// Optional input capture preconditioning filter configurations.
#[allow(dead_code)]
pub enum InputFilter {
Div1N1 = 0b0000,
Div1N8 = 0b0011,
}
macro_rules! timer_channels {
($name:ident, $TY:ident, $size:ty) => {
paste::paste! {
/// The timer used for managing ADC sampling.
pub struct $name {
timer: hal::timer::Timer<hal::stm32::[< $TY >]>,
channels: Option<[< $TY:lower >]::Channels>,
update_event: Option<[< $TY:lower >]::UpdateEvent>,
}
impl $name {
/// Construct the sampling timer.
#[allow(dead_code)]
pub fn new(mut timer: hal::timer::Timer<hal::stm32::[< $TY>]>) -> Self {
timer.pause();
Self {
timer,
// Note(unsafe): Once these channels are taken, we guarantee that we do not
// modify any of the underlying timer channel registers, as ownership of the
// channels is now provided through the associated channel structures. We
// additionally guarantee this can only be called once because there is only
// one Timer2 and this resource takes ownership of it once instantiated.
channels: unsafe { Some([< $TY:lower >]::Channels::new()) },
update_event: unsafe { Some([< $TY:lower >]::UpdateEvent::new()) },
}
}
/// Get the timer capture/compare channels.
#[allow(dead_code)]
pub fn channels(&mut self) -> [< $TY:lower >]::Channels {
self.channels.take().unwrap()
}
/// Get the timer update event.
#[allow(dead_code)]
pub fn update_event(&mut self) -> [< $TY:lower >]::UpdateEvent {
self.update_event.take().unwrap()
}
/// Get the period of the timer.
#[allow(dead_code)]
pub fn get_period(&self) -> $size {
let regs = unsafe { &*hal::stm32::$TY::ptr() };
regs.arr.read().arr().bits()
}
/// Manually set the period of the timer.
#[allow(dead_code)]
pub fn set_period_ticks(&mut self, period: $size) {
let regs = unsafe { &*hal::stm32::$TY::ptr() };
regs.arr.write(|w| w.arr().bits(period));
// Force the new period to take effect immediately.
self.timer.apply_freq();
}
/// Clock the timer from an external source.
///
/// # Note:
/// * Currently, only an external source applied to ETR is supported.
///
/// # Args
/// * `prescaler` - The prescaler to use for the external source.
#[allow(dead_code)]
pub fn set_external_clock(&mut self, prescaler: Prescaler) {
let regs = unsafe { &*hal::stm32::$TY::ptr() };
regs.smcr.modify(|_, w| w.etps().bits(prescaler as u8).ece().set_bit());
// Clear any other prescaler configuration.
regs.psc.write(|w| w.psc().bits(0));
}
/// Start the timer.
#[allow(dead_code)]
pub fn start(&mut self) {
// Force a refresh of the frequency settings.
self.timer.apply_freq();
self.timer.reset_counter();
self.timer.resume();
}
/// Configure the timer peripheral to generate a trigger based on the provided
/// source.
#[allow(dead_code)]
pub fn generate_trigger(&mut self, source: TriggerGenerator) {
let regs = unsafe { &*hal::stm32::$TY::ptr() };
// Note(unsafe) The TriggerGenerator enumeration is specified such that this is
// always in range.
regs.cr2.modify(|_, w| w.mms().bits(source as u8));
}
/// Select a trigger source for the timer peripheral.
#[allow(dead_code)]
pub fn set_trigger_source(&mut self, source: TriggerSource) {
let regs = unsafe { &*hal::stm32::$TY::ptr() };
// Note(unsafe) The TriggerSource enumeration is specified such that this is
// always in range.
regs.smcr.modify(|_, w| unsafe { w.ts().bits(source as u8) } );
}
#[allow(dead_code)]
pub fn set_slave_mode(&mut self, source: TriggerSource, mode: SlaveMode) {
let regs = unsafe { &*hal::stm32::$TY::ptr() };
// Note(unsafe) The TriggerSource and SlaveMode enumerations are specified such
// that they are always in range.
regs.smcr.modify(|_, w| unsafe { w.sms().bits(mode as u8).ts().bits(source as u8) } );
}
}
pub mod [< $TY:lower >] {
use super::hal::{stm32::$TY, dma::{traits::TargetAddress, PeripheralToMemory, dma::DMAReq}};
pub struct UpdateEvent {}
impl UpdateEvent {
/// Create a new update event
///
/// # Safety
/// This is only safe to call once.
#[allow(dead_code)]
pub unsafe fn new() -> Self {
Self {}
}
/// Enable DMA requests upon timer updates.
#[allow(dead_code)]
pub fn listen_dma(&self) {
// Note(unsafe): We perform only atomic operations on the timer registers.
let regs = unsafe { &*<$TY>::ptr() };
regs.dier.modify(|_, w| w.ude().set_bit());
}
/// Trigger a DMA request manually
#[allow(dead_code)]
pub fn trigger(&self) {
let regs = unsafe { &*<$TY>::ptr() };
regs.egr.write(|w| w.ug().set_bit());
}
}
/// The channels representing the timer.
pub struct Channels {
pub ch1: Channel1,
pub ch2: Channel2,
pub ch3: Channel3,
pub ch4: Channel4,
}
impl Channels {
/// Construct a new set of channels.
///
/// # Safety
/// This is only safe to call once.
#[allow(dead_code)]
pub unsafe fn new() -> Self {
unsafe { Self {
ch1: Channel1::new(),
ch2: Channel2::new(),
ch3: Channel3::new(),
ch4: Channel4::new(),
} }
}
}
timer_channels!(1, $TY, ccmr1, $size);
timer_channels!(2, $TY, ccmr1, $size);
timer_channels!(3, $TY, ccmr2, $size);
timer_channels!(4, $TY, ccmr2, $size);
}
}
};
($index:expr, $TY:ty, $ccmrx:expr, $size:ty) => {
paste::paste! {
pub use super::[< __ $TY:lower >]::[< $ccmrx _input >]::[< CC $index S_A>] as [< CaptureSource $index >];
/// A capture/compare channel of the timer.
pub struct [< Channel $index >] {}
/// A capture channel of the timer.
pub struct [< Channel $index InputCapture>] {}
impl [< Channel $index >] {
/// Construct a new timer channel.
///
/// Note(unsafe): This function must only be called once. Once constructed, the
/// constructee guarantees to never modify the timer channel.
#[allow(dead_code)]
unsafe fn new() -> Self {
Self {}
}
/// Allow the channel to generate DMA requests.
#[allow(dead_code)]
pub fn listen_dma(&self) {
let regs = unsafe { &*<$TY>::ptr() };
regs.dier.modify(|_, w| w.[< cc $index de >]().set_bit());
}
/// Operate the channel as an output-compare.
///
/// # Args
/// * `value` - The value to compare the sampling timer's counter against.
#[allow(dead_code)]
pub fn to_output_compare(&self, value: $size) {
let regs = unsafe { &*<$TY>::ptr() };
let arr = regs.arr.read().bits() as $size;
assert!(value <= arr);
regs.ccr[$index - 1].write(|w| w.ccr().bits(value));
regs.[< $ccmrx _output >]()
.modify(|_, w| unsafe { w.[< cc $index s >]().bits(0) });
}
/// Operate the channel in input-capture mode.
///
/// # Args
/// * `input` - The input source for the input capture event.
#[allow(dead_code)]
pub fn into_input_capture(self, input: [< CaptureSource $index >]) -> [< Channel $index InputCapture >]{
let regs = unsafe { &*<$TY>::ptr() };
regs.[< $ccmrx _input >]().modify(|_, w| w.[< cc $index s>]().variant(input));
[< Channel $index InputCapture >] {}
}
}
impl [< Channel $index InputCapture >] {
/// Get the latest capture from the channel.
#[allow(dead_code)]
pub fn latest_capture(&mut self) -> Result<Option<$size>, Option<$size>> {
// Note(unsafe): This channel owns all access to the specific timer channel.
// Only atomic operations on completed on the timer registers.
let regs = unsafe { &*<$TY>::ptr() };
if regs.sr.read().[< cc $index if >]().bit_is_set() {
// Read the capture value. Reading the captured value clears the flag in the
// status register automatically.
let result = regs.ccr[$index - 1].read().ccr().bits();
// Read SR again to check for a potential over-capture. Return an error in
// that case.
let sr = regs.sr.read();
if sr.[< cc $index of >]().bit_is_set() {
// NOTE(unsafe) write-back is safe
regs.sr.write(|w| unsafe { w.bits(sr.bits()) }.[< cc $index of >]().clear_bit());
Err(Some(result))
} else {
Ok(Some(result))
}
} else {
Ok(None)
}
}
/// Allow the channel to generate DMA requests.
#[allow(dead_code)]
pub fn listen_dma(&self) {
// Note(unsafe): This channel owns all access to the specific timer channel.
// Only atomic operations on completed on the timer registers.
let regs = unsafe { &*<$TY>::ptr() };
regs.dier.modify(|_, w| w.[< cc $index de >]().set_bit());
}
/// Enable the input capture to begin capturing timer values.
#[allow(dead_code)]
pub fn enable(&mut self) {
// Read the latest input capture to clear any pending data in the register.
let _ = self.latest_capture();
// Note(unsafe): This channel owns all access to the specific timer channel.
// Only atomic operations on completed on the timer registers.
let regs = unsafe { &*<$TY>::ptr() };
regs.ccer.modify(|_, w| w.[< cc $index e >]().set_bit());
}
/// Check if an over-capture event has occurred.
#[allow(dead_code)]
pub fn check_overcapture(&self) -> bool {
// Note(unsafe): This channel owns all access to the specific timer channel.
// Only atomic operations on completed on the timer registers.
let regs = unsafe { &*<$TY>::ptr() };
regs.sr.read().[< cc $index of >]().bit_is_set()
}
/// Configure the input capture input pre-filter.
///
/// # Args
/// * `filter` - The desired input filter stage configuration. Defaults to disabled.
#[allow(dead_code)]
pub fn configure_filter(&mut self, filter: super::InputFilter) {
// Note(unsafe): This channel owns all access to the specific timer channel.
// Only atomic operations on completed on the timer registers.
let regs = unsafe { &*<$TY>::ptr() };
regs.[< $ccmrx _input >]().modify(|_, w| w.[< ic $index f >]().bits(filter as u8));
}
/// Configure the input capture prescaler.
///
/// # Args
/// * `psc` - Prescaler exponent.
#[allow(dead_code)]
pub fn configure_prescaler(&mut self, prescaler: super::Prescaler) {
// Note(unsafe): This channel owns all access to the specific timer channel.
// Only atomic operations on completed on the timer registers.
let regs = unsafe { &*<$TY>::ptr() };
// Note(unsafe): Enum values are all valid.
#[allow(unused_unsafe)]
regs.[< $ccmrx _input >]().modify(|_, w| unsafe {
w.[< ic $index psc >]().bits(prescaler as u8)});
}
}
// Note(unsafe): This manually implements DMA support for input-capture channels. This
// is safe as it is only completed once per channel and each DMA request is allocated to
// each channel as the owner.
unsafe impl TargetAddress<PeripheralToMemory> for [< Channel $index InputCapture >] {
type MemSize = $size;
const REQUEST_LINE: Option<u8> = Some(DMAReq::[< $TY:camel Ch $index >]as u8);
fn address(&self) -> usize {
let regs = unsafe { &*<$TY>::ptr() };
®s.ccr[$index - 1] as *const _ as usize
}
}
}
};
}
timer_channels!(SamplingTimer, TIM2, u32);
timer_channels!(ShadowSamplingTimer, TIM3, u16);
timer_channels!(TimestampTimer, TIM5, u32);
timer_channels!(PounderTimestampTimer, TIM8, u16);
| rust | Apache-2.0 | cb1f14508dffa91c636cb3ed3701e24b04e469d2 | 2026-01-04T20:16:49.858302Z | false |
quartiq/stabilizer | https://github.com/quartiq/stabilizer/blob/cb1f14508dffa91c636cb3ed3701e24b04e469d2/src/hardware/net.rs | src/hardware/net.rs | //! Stabilizer network management module
//!
//! # Design
//! The stabilizer network architecture supports numerous layers to permit transmission of
//! telemetry (via MQTT), configuration of run-time settings (via MQTT + Miniconf), and data
//! streaming over raw UDP/TCP sockets. This module encompasses the main processing routines
//! related to Stabilizer networking operations.
use heapless;
use miniconf;
use crate::hardware::{SystemTimer, hal::ethernet};
use platform::{ApplicationMetadata, NetSettings, TelemetryClient};
use stream::{DataStream, FrameGenerator, Target};
use core::fmt::Write;
use heapless::String;
use miniconf::{TreeDeserializeOwned, TreeSerialize};
use miniconf_mqtt::minimq;
pub type EthernetPhy = ethernet::phy::LAN8742A<ethernet::EthernetMAC>;
pub type NetworkReference =
smoltcp_nal::shared::NetworkStackProxy<'static, NetworkStack>;
// Number of TX descriptors in the ethernet descriptor ring.
pub const TX_DESRING_CNT: usize = 4;
// Number of RX descriptors in the ethernet descriptor ring.
pub const RX_DESRING_CNT: usize = 4;
pub type NetworkStack = smoltcp_nal::NetworkStack<
'static,
ethernet::EthernetDMA<TX_DESRING_CNT, RX_DESRING_CNT>,
SystemTimer,
>;
pub type NetworkManager = smoltcp_nal::shared::NetworkManager<
'static,
ethernet::EthernetDMA<TX_DESRING_CNT, RX_DESRING_CNT>,
SystemTimer,
>;
struct MqttStorage {
telemetry: [u8; 2048],
settings: [u8; 1024],
}
impl Default for MqttStorage {
fn default() -> Self {
Self {
telemetry: [0u8; 2048],
settings: [0u8; 1024],
}
}
}
pub enum UpdateState {
NoChange,
Updated,
}
pub enum NetworkState {
SettingsChanged,
Updated,
NoChange,
}
const MAX_DEPTH: usize = 16;
/// A structure of Stabilizer's default network users.
pub struct NetworkUsers<S> {
miniconf: miniconf_mqtt::MqttClient<
'static,
S,
NetworkReference,
SystemTimer,
minimq::broker::NamedBroker<NetworkReference>,
MAX_DEPTH,
>,
pub processor: NetworkProcessor,
stream: DataStream<NetworkReference>,
generator: Option<FrameGenerator>,
pub telemetry: TelemetryClient<SystemTimer, NetworkReference>,
}
impl<S> NetworkUsers<S>
where
S: TreeDeserializeOwned + TreeSerialize,
{
/// Construct Stabilizer's default network users.
///
/// # Args
/// * `stack` - The network stack that will be used to share with all network users.
/// * `phy` - The ethernet PHY connecting the network.
/// * `clock` - A `SystemTimer` implementing `Clock`.
/// * `app` - The name of the application.
/// * `net_settings` - The network-specific settings to use for the application.
/// * `metadata` - The application metadata
///
/// # Returns
/// A new struct of network users.
pub fn new(
stack: NetworkStack,
phy: EthernetPhy,
clock: SystemTimer,
app: &str,
net_settings: &NetSettings,
metadata: &'static ApplicationMetadata,
) -> Self {
let stack_manager =
cortex_m::singleton!(: NetworkManager = NetworkManager::new(stack))
.unwrap();
let processor =
NetworkProcessor::new(stack_manager.acquire_stack(), phy);
let prefix = cortex_m::singleton!(: String<128> = get_device_prefix(app, &net_settings.id)).unwrap();
let store =
cortex_m::singleton!(: MqttStorage = MqttStorage::default())
.unwrap();
let named_broker = minimq::broker::NamedBroker::new(
&net_settings.broker,
stack_manager.acquire_stack(),
)
.unwrap();
let miniconf = miniconf_mqtt::MqttClient::<_, _, _, _, MAX_DEPTH>::new(
stack_manager.acquire_stack(),
prefix.as_str(),
clock,
minimq::ConfigBuilder::new(named_broker, &mut store.settings)
.client_id(&get_client_id(&net_settings.id, "settings"))
.unwrap(),
)
.unwrap();
let named_broker = minimq::broker::NamedBroker::new(
&net_settings.broker,
stack_manager.acquire_stack(),
)
.unwrap();
let mqtt = minimq::Minimq::new(
stack_manager.acquire_stack(),
clock,
minimq::ConfigBuilder::new(named_broker, &mut store.telemetry)
// The telemetry client doesn't receive any messages except MQTT control packets.
// As such, we don't need much of the buffer for RX.
.rx_buffer(minimq::config::BufferConfig::Maximum(100))
.client_id(&get_client_id(&net_settings.id, "tlm"))
.unwrap(),
);
let telemetry = TelemetryClient::new(mqtt, prefix, metadata);
let (generator, stream) = stream::setup(stack_manager.acquire_stack());
NetworkUsers {
miniconf,
processor,
telemetry,
stream,
generator: Some(generator),
}
}
/// Enable data streaming.
///
/// # Args
/// * `format` - A unique u8 code indicating the format of the data.
pub fn configure_streaming(
&mut self,
format: impl Into<u8>,
) -> FrameGenerator {
let mut generator = self.generator.take().unwrap();
generator.configure(format);
generator
}
/// Direct the stream to the provided remote target.
///
/// # Args
/// * `remote` - The destination for the streamed data.
pub fn direct_stream(&mut self, remote: Target) {
if self.generator.is_none() {
self.stream.set_remote(remote);
}
}
/// Update and process all of the network users state.
///
/// # Returns
/// An indication if any of the network users indicated a state change.
/// The SettingsChanged option contains the path of the settings that changed.
pub fn update(&mut self, settings: &mut S) -> NetworkState {
// Update the MQTT clients.
self.telemetry.update();
// Update the data stream.
if self.generator.is_none() {
self.stream.process();
}
// Poll for incoming data.
let poll_result = match self.processor.update() {
UpdateState::NoChange => NetworkState::NoChange,
UpdateState::Updated => NetworkState::Updated,
};
let res = self.miniconf.update(settings);
match res {
Ok(true) => NetworkState::SettingsChanged,
_ => poll_result,
}
}
}
/// Get an MQTT client ID for a client.
///
/// # Args
/// * `id` - The base client ID
/// * `mode` - The operating mode of this client. (i.e. tlm, settings)
///
/// # Returns
/// A client ID that may be used for MQTT client identification.
fn get_client_id(id: &str, mode: &str) -> String<64> {
let mut identifier = String::new();
write!(&mut identifier, "{id}-{mode}").unwrap();
identifier
}
/// Get the MQTT prefix of a device.
///
/// # Args
/// * `app` - The name of the application that is executing.
/// * `id` - The MQTT ID of the device.
///
/// # Returns
/// The MQTT prefix used for this device.
fn get_device_prefix(app: &str, id: &str) -> String<128> {
// Note(unwrap): The mac address + binary name must be short enough to fit into this string. If
// they are defined too long, this will panic and the device will fail to boot.
let mut prefix: String<128> = String::new();
write!(&mut prefix, "dt/sinara/{app}/{id}").unwrap();
prefix
}
// Task to process network hardware.
//
// # Design
// The network processir is a small taks to regularly process incoming data over ethernet, handle
// the ethernet PHY state, and reset the network as appropriate.
/// Processor for managing network hardware.
pub struct NetworkProcessor {
stack: NetworkReference,
phy: EthernetPhy,
network_was_reset: bool,
}
impl NetworkProcessor {
/// Construct a new network processor.
///
/// # Args
/// * `stack` - A reference to the shared network stack
/// * `phy` - The ethernet PHY used for the network.
///
/// # Returns
/// The newly constructed processor.
pub fn new(stack: NetworkReference, phy: EthernetPhy) -> Self {
Self {
stack,
phy,
network_was_reset: false,
}
}
/// Handle ethernet link connection status.
///
/// # Note
/// This may take non-trivial amounts of time to communicate with the PHY. As such, this should
/// only be called as often as necessary (e.g. once per second or so).
pub fn handle_link(&mut self) {
// If the PHY indicates there's no more ethernet link, reset the DHCP server in the network
// stack.
let link_up = self.phy.poll_link();
match (link_up, self.network_was_reset) {
(true, true) => {
log::warn!("Network link UP");
self.network_was_reset = false;
}
// Only reset the network stack once per link reconnection. This prevents us from
// sending an excessive number of DHCP requests.
(false, false) => {
log::warn!("Network link DOWN");
self.network_was_reset = true;
self.stack.lock(|stack| stack.handle_link_reset());
}
_ => {}
};
}
/// Process and update the state of the network.
///
/// # Note
/// This function should be called regularly before other network tasks to update the state of
/// all relevant network sockets.
///
/// # Returns
/// An update state corresponding with any changes in the underlying network.
pub fn update(&mut self) -> UpdateState {
match self.stack.lock(|stack| stack.poll()) {
Ok(true) => UpdateState::Updated,
Ok(false) => UpdateState::NoChange,
Err(_) => UpdateState::Updated,
}
}
}
| rust | Apache-2.0 | cb1f14508dffa91c636cb3ed3701e24b04e469d2 | 2026-01-04T20:16:49.858302Z | false |
quartiq/stabilizer | https://github.com/quartiq/stabilizer/blob/cb1f14508dffa91c636cb3ed3701e24b04e469d2/src/hardware/adc.rs | src/hardware/adc.rs | //! Stabilizer ADC management interface
//!
//! # Design
//!
//! Stabilizer ADCs are connected to the MCU via a simplex, SPI-compatible interface. The ADCs
//! require a setup conversion time after asserting the CSn (convert) signal to generate the ADC
//! code from the sampled level. Once the setup time has elapsed, the ADC data is clocked out of
//! MISO. The internal setup time is managed by the SPI peripheral via a CSn setup time parameter
//! during SPI configuration, which allows offloading the management of the setup time to hardware.
//!
//! Because of the SPI-compatibility of the ADCs, a single SPI peripheral + DMA is used to automate
//! the collection of multiple ADC samples without requiring processing by the CPU, which reduces
//! overhead and provides the CPU with more time for processing-intensive tasks, like DSP.
//!
//! The automation of sample collection utilizes three DMA streams, the SPI peripheral, and two
//! timer compare channel for each ADC. One timer comparison channel is configured to generate a
//! comparison event every time the timer is equal to a specific value. Each comparison then
//! generates a DMA transfer event to write into the SPI CR1 register to initiate the transfer.
//! This allows the SPI interface to periodically read a single sample. The other timer comparison
//! channel is configured to generate a comparison event slightly before the first (~10 timer
//! cycles). This channel triggers a separate DMA stream to clear the EOT flag within the SPI
//! peripheral. The EOT flag must be cleared after each transfer or the SPI peripheral will not
//! properly complete the single conversion. Thus, by using two DMA streams and timer comparison
//! channels, the SPI can regularly acquire ADC samples.
//!
//! In order to collect the acquired ADC samples into a RAM buffer, a final DMA transfer is
//! configured to read from the SPI RX FIFO into RAM. The request for this transfer is connected to
//! the SPI RX data signal, so the SPI peripheral will request to move data into RAM whenever it is
//! available. When enough samples have been collected, a transfer-complete interrupt is generated
//! and the ADC samples are available for processing.
//!
//! After a complete transfer of a batch of samples, the inactive buffer is available to the
//! user for processing. The processing must complete before the DMA transfer of the next batch
//! completes.
//!
//! ## Starting Data Collection
//!
//! Because the DMA data collection is automated via timer count comparisons and DMA transfers, the
//! ADCs can be initialized and configured, but will not begin sampling the external ADCs until the
//! sampling timer is enabled. As such, the sampling timer should be enabled after all
//! initialization has completed and immediately before the embedded processing loop begins.
//!
//!
//! ## Batch Sizing
//!
//! The ADCs collect a group of N samples, which is referred to as a batch. The size of the batch
//! is configured by the user at compile-time to allow for a custom-tailored implementation. Larger
//! batch sizes generally provide for lower overhead and more processing time per sample, but come
//! at the expense of increased input -> output latency.
//!
//!
//! # Note
//!
//! While there are two ADCs, only a single ADC is configured to generate transfer-complete
//! interrupts. This is done because it is assumed that the ADCs will always be sampled
//! simultaneously. If only a single ADC is used, it must always be ADC0, as ADC1 will not generate
//! transfer-complete interrupts.
//!
//! There is a very small amount of latency between sampling of ADCs due to bus matrix priority. As
//! such, one of the ADCs will be sampled marginally earlier before the other because the DMA
//! requests are generated simultaneously. This can be avoided by providing a known offset to the
//! sample DMA requests, which can be completed by setting e.g. ADC0's comparison to a counter
//! value of 0 and ADC1's comparison to a counter value of 1.
//!
//! In this implementation, double buffer mode DMA transfers are used because the SPI RX FIFOs
//! have finite depth, FIFO access is slower than AXISRAM access, and because the single
//! buffer mode DMA disable/enable and buffer update sequence is slow.
use rtic::Mutex;
use grounded::uninit::{GroundedArrayCell, GroundedCell};
use super::timers;
use crate::design_parameters::SampleBuffer;
use super::hal::{
self,
dma::{
DMAError, MemoryToPeripheral, PeripheralToMemory, Transfer,
config::Priority,
dma::{DMAReq, DmaConfig},
traits::TargetAddress,
},
spi::{HalDisabledSpi, HalEnabledSpi, HalSpi},
};
// The following data is written by the timer ADC sample trigger into the SPI CR1 to start the
// transfer. Data in AXI SRAM is not initialized on boot, so the contents are random. This value is
// initialized during setup.
#[unsafe(link_section = ".axisram.buffers")]
static SPI_START: GroundedCell<[u32; 1]> = GroundedCell::uninit();
// The following data is written by the timer flag clear trigger into the SPI IFCR register to clear
// the EOT flag. Data in AXI SRAM is not initialized on boot, so the contents are random. This
// value is initialized during setup.
#[unsafe(link_section = ".axisram.buffers")]
static SPI_EOT_CLEAR: GroundedCell<[u32; 1]> = GroundedCell::uninit();
// The following global buffers are used for the ADC sample DMA transfers. Two buffers are used for
// each transfer in a ping-pong buffer configuration (one is being acquired while the other is being
// processed). Note that the contents of AXI SRAM is uninitialized, so the buffer contents on
// startup are undefined. The dimensions are `ADC_BUF[adc_index][ping_pong_index][sample_index]`.
#[unsafe(link_section = ".axisram.buffers")]
static ADC_BUF: GroundedArrayCell<[SampleBuffer; 2], 2> =
GroundedArrayCell::uninit();
macro_rules! adc_input {
($name:ident, $index:literal, $trigger_stream:ident, $data_stream:ident, $clear_stream:ident,
$spi:ident, $trigger_channel:ident, $dma_req:ident, $clear_channel:ident, $dma_clear_req:ident) => {
paste::paste! {
/// $spi-CR is used as a type for indicating a DMA transfer into the SPI control
/// register whenever the tim2 update dma request occurs.
struct [< $spi CR >] {
_channel: timers::tim2::$trigger_channel,
}
impl [< $spi CR >] {
pub fn new(_channel: timers::tim2::$trigger_channel) -> Self {
Self { _channel }
}
}
// Note(unsafe): This structure is only safe to instantiate once. The DMA request is
// hard-coded and may only be used if ownership of the timer2 $trigger_channel compare
// channel is assured, which is ensured by maintaining ownership of the channel.
unsafe impl TargetAddress<MemoryToPeripheral> for [< $spi CR >] {
type MemSize = u32;
/// SPI DMA requests are generated whenever TIM2 CHx ($dma_req) comparison occurs.
const REQUEST_LINE: Option<u8> = Some(DMAReq::$dma_req as u8);
/// Whenever the DMA request occurs, it should write into SPI's CR1 to start the
/// transfer.
fn address(&self) -> usize {
// Note(unsafe): It is assumed that SPI is owned by another DMA transfer. This
// is only safe because we are writing to a configuration register.
let regs = unsafe { &*hal::stm32::$spi::ptr() };
®s.cr1 as *const _ as usize
}
}
/// $spi-IFCR is used as a type for indicating a DMA transfer into the SPI flag clear
/// register whenever the tim3 compare dma request occurs. The flag must be cleared
/// before the transfer starts.
struct [< $spi IFCR >] {
_channel: timers::tim3::$clear_channel,
}
impl [< $spi IFCR >] {
pub fn new(_channel: timers::tim3::$clear_channel) -> Self {
Self { _channel }
}
}
// Note(unsafe): This structure is only safe to instantiate once. The DMA request is
// hard-coded and may only be used if ownership of the timer3 $clear_channel compare
// channel is assured, which is ensured by maintaining ownership of the channel.
unsafe impl TargetAddress<MemoryToPeripheral> for [< $spi IFCR >] {
type MemSize = u32;
/// SPI DMA requests are generated whenever TIM3 CHx ($dma_clear_req) comparison
/// occurs.
const REQUEST_LINE: Option<u8> = Some(DMAReq::$dma_clear_req as u8);
/// Whenever the DMA request occurs, it should write into SPI's IFCR to clear the
/// EOT flag to allow the next transmission.
fn address(&self) -> usize {
// Note(unsafe): It is assumed that SPI is owned by another DMA transfer and
// this DMA is only used for writing to the configuration registers.
let regs = unsafe { &*hal::stm32::$spi::ptr() };
®s.ifcr as *const _ as usize
}
}
/// Represents data associated with ADC.
pub struct $name {
transfer: Transfer<
hal::dma::dma::$data_stream<hal::stm32::DMA1>,
hal::spi::Spi<hal::stm32::$spi, hal::spi::Disabled, u16>,
PeripheralToMemory,
&'static mut [u16],
hal::dma::DBTransfer,
>,
trigger_transfer: Transfer<
hal::dma::dma::$trigger_stream<hal::stm32::DMA1>,
[< $spi CR >],
MemoryToPeripheral,
&'static mut [u32; 1],
hal::dma::DBTransfer,
>,
clear_transfer: Transfer<
hal::dma::dma::$clear_stream<hal::stm32::DMA1>,
[< $spi IFCR >],
MemoryToPeripheral,
&'static mut [u32; 1],
hal::dma::DBTransfer,
>,
}
impl $name {
/// Construct the ADC input channel.
///
/// # Args
/// * `spi` - The SPI interface used to communicate with the ADC.
/// * `trigger_stream` - The DMA stream used to trigger each ADC transfer by
/// writing a word into the SPI TX FIFO.
/// * `data_stream` - The DMA stream used to read samples received over SPI into a data buffer.
/// * `clear_stream` - The DMA stream used to clear the EOT flag in the SPI peripheral.
/// * `trigger_channel` - The ADC sampling timer output compare channel for read triggers.
/// * `clear_channel` - The shadow sampling timer output compare channel used for
/// clearing the SPI EOT flag.
pub fn new(
spi: hal::spi::Spi<hal::stm32::$spi, hal::spi::Enabled, u16>,
trigger_stream: hal::dma::dma::$trigger_stream<
hal::stm32::DMA1,
>,
data_stream: hal::dma::dma::$data_stream<hal::stm32::DMA1>,
clear_stream: hal::dma::dma::$clear_stream<hal::stm32::DMA1>,
trigger_channel: timers::tim2::$trigger_channel,
clear_channel: timers::tim3::$clear_channel,
batch_size: usize,
) -> Self {
// The flag clear DMA transfer always clears the EOT flag in the SPI
// peripheral. It has the highest priority to ensure it is completed before the
// transfer trigger.
let clear_config = DmaConfig::default()
.priority(Priority::VeryHigh)
.circular_buffer(true);
// Note(unsafe): Because this is a Memory->Peripheral transfer, this data is
// never actually modified. It technically only needs to be immutably
// borrowed, but the current HAL API only supports mutable borrows.
let spi_eot_clear = unsafe {
let ptr = SPI_EOT_CLEAR.get();
ptr.write([1 << 3]);
&mut *ptr
};
// Generate DMA events when the timer hits zero (roll-over). This must be before
// the trigger channel DMA occurs, as if the trigger occurs first, the
// transmission will not occur.
clear_channel.listen_dma();
clear_channel.to_output_compare(0);
let clear_transfer: Transfer<
_,
_,
MemoryToPeripheral,
_,
_,
> = Transfer::init(
clear_stream,
[< $spi IFCR >]::new(clear_channel),
spi_eot_clear,
None,
clear_config,
);
// Generate DMA events when an output compare of the timer hits the specified
// value.
trigger_channel.listen_dma();
trigger_channel.to_output_compare(2 + $index);
// The trigger stream constantly writes to the SPI CR1 using a static word
// (which is a static value to enable the SPI transfer). Thus, neither the
// memory or peripheral address ever change. This is run in circular mode to be
// completed at every DMA request.
let trigger_config = DmaConfig::default()
.priority(Priority::High)
.circular_buffer(true);
// Note(unsafe): This word is initialized once per ADC initialization to verify
// it is initialized properly.
// Note(unsafe): Because this is a Memory->Peripheral transfer, this data is never
// actually modified. It technically only needs to be immutably borrowed, but the
// current HAL API only supports mutable borrows.
// Write a binary code into the SPI control register to initiate a transfer.
let spi_start = unsafe {
let ptr = SPI_START.get();
ptr.write([0x201]);
&mut *ptr
};
// Construct the trigger stream to write from memory to the peripheral.
let trigger_transfer: Transfer<
_,
_,
MemoryToPeripheral,
_,
_,
> = Transfer::init(
trigger_stream,
[< $spi CR >]::new(trigger_channel),
spi_start,
None,
trigger_config,
);
// The data stream constantly reads from the SPI RX FIFO into a RAM buffer. The peripheral
// stalls reads of the SPI RX FIFO until data is available, so the DMA transfer completes
// after the requested number of samples have been collected. Note that only ADC1's (sic!)
// data stream is used to trigger a transfer completion interrupt.
let data_config = DmaConfig::default()
.memory_increment(true)
.double_buffer(true)
.transfer_complete_interrupt($index == 1)
.priority(Priority::VeryHigh);
// A SPI peripheral error interrupt is used to determine if the RX FIFO
// overflows. This indicates that samples were dropped due to excessive
// processing time in the main application (e.g. a second DMA transfer completes
// before the first was done with processing). This is used as a flow control
// indicator to guarantee that no ADC samples are lost.
let mut spi = spi.disable();
spi.listen(hal::spi::Event::Error);
let adc_bufs = unsafe {
ADC_BUF.initialize_all_with(|| Default::default());
ADC_BUF.get_element_mut_unchecked($index).split_at_mut(1)
};
// The data transfer is always a transfer of data from the peripheral to a RAM
// buffer.
let data_transfer: Transfer<_, _, PeripheralToMemory, _, _> =
Transfer::init(
data_stream,
spi,
// Note(unsafe): The ADC_BUF[$index] is "owned" by this peripheral.
// It shall not be used anywhere else in the module.
&mut adc_bufs.0[0][..batch_size],
Some(&mut adc_bufs.1[0][..batch_size]),
data_config,
);
Self {
transfer: data_transfer,
trigger_transfer,
clear_transfer,
}
}
/// Enable the ADC DMA transfer sequence.
pub fn start(&mut self) {
self.transfer.start(|spi| {
spi.enable_dma_rx();
spi.inner().cr2.modify(|_, w| w.tsize().bits(1));
spi.inner().cr1.modify(|_, w| w.spe().set_bit());
});
self.clear_transfer.start(|_| {});
self.trigger_transfer.start(|_| {});
}
/// Wait for the transfer of the currently active buffer to complete,
/// then call a function on the now inactive buffer and acknowledge the
/// transfer complete flag.
///
/// NOTE(unsafe): Memory safety and access ordering is not guaranteed
/// (see the HAL DMA docs).
pub fn with_buffer<F, R>(&mut self, f: F) -> Result<R, DMAError>
where
F: FnOnce(&mut &'static mut [u16]) -> R,
{
unsafe { self.transfer.next_dbm_transfer_with(|buf, _current| {
f(buf)
})}
}
}
// This is not actually a Mutex. It only re-uses the semantics and macros of mutex-trait
// to reduce rightward drift when jointly calling `with_buffer(f)` on multiple DAC/ADCs.
impl Mutex for $name {
type T = &'static mut [u16];
fn lock<R>(&mut self, f: impl FnOnce(&mut Self::T) -> R) -> R {
self.with_buffer(f).unwrap()
}
}
}
};
}
adc_input!(
Adc0Input, 0, Stream0, Stream1, Stream2, SPI2, Channel1, Tim2Ch1, Channel1,
Tim3Ch1
);
adc_input!(
Adc1Input, 1, Stream3, Stream4, Stream5, SPI3, Channel2, Tim2Ch2, Channel2,
Tim3Ch2
);
| rust | Apache-2.0 | cb1f14508dffa91c636cb3ed3701e24b04e469d2 | 2026-01-04T20:16:49.858302Z | false |
quartiq/stabilizer | https://github.com/quartiq/stabilizer/blob/cb1f14508dffa91c636cb3ed3701e24b04e469d2/src/hardware/pounder/dds_output.rs | src/hardware/pounder/dds_output.rs | //! The DdsOutput is used as an output stream to the pounder DDS.
//!
//! # Design
//!
//! The DDS stream interface is a means of quickly updating pounder DDS (direct digital synthesis)
//! outputs of the AD9959 DDS chip. The DDS communicates via a quad-SPI interface and a single
//! IO-update output pin.
//!
//! In order to update the DDS interface, the frequency tuning word, amplitude control word, and
//! the phase offset word for a channel can be modified to change the frequency, amplitude, or
//! phase on any of the 4 available output channels. Changes do not propagate to DDS outputs until
//! the IO-update pin is toggled high to activate the new configurations. This allows multiple
//! channels or parameters to be updated and then effects can take place simultaneously.
//!
//! In this implementation, the phase, frequency, or amplitude can be updated for any single
//! collection of outputs simultaneously. This is done by serializing the register writes to the
//! DDS into a single buffer of data and then writing the data over QSPI to the DDS.
//!
//! In order to minimize software overhead, data is written directly into the QSPI output FIFO. In
//! order to accomplish this most efficiently, serialized data is written as 32-bit words to
//! minimize the number of bus cycles necessary to write to the peripheral FIFO. A consequence of
//! this is that additional unneeded register writes may be appended to align a transfer to 32-bit
//! word sizes.
//!
//! In order to pulse the IO-update signal, the high-resolution timer output is used. The timer is
//! configured to assert the IO-update signal after a predefined delay and then de-assert the
//! signal after a predefined assertion duration. This allows for the actual QSPI transfer and
//! IO-update toggle to be completed asynchronously to the rest of software processing - that is,
//! software can schedule the DDS updates and then continue data processing. DDS updates then take
//! place in the future when the IO-update is toggled by hardware.
//!
//!
//! # Limitations
//!
//! The QSPI output FIFO is used as an intermediate buffer for holding pending QSPI writes. Because
//! of this, the implementation only supports up to 16 serialized bytes (the QSPI FIFO is 8 32-bit
//! words, or 32 bytes, wide) in a single update.
//!
//! There is currently no synchronization between completion of the QSPI data write and the
//! IO-update signal. It is currently assumed that the QSPI transfer will always complete within a
//! predefined delay (the pre-programmed IO-update timer delay).
//!
//!
//! # Future Improvement
//!
//! In the future, it would be possible to utilize a DMA transfer to complete the QSPI transfer.
//! Once the QSPI transfer completed, this could trigger the IO-update timer to start to
//! asynchronously complete IO-update automatically. This would allow for arbitrary profile sizes
//! and ensure that IO-update was in-sync with the QSPI transfer.
//!
//! Currently, serialization is performed on each processing cycle. If there is a
//! compile-time-known register update sequence needed for the application, the serialization
//! process can be done once and then register values can be written into a pre-computed serialized
//! buffer to avoid the software overhead of much of the serialization process.
use log::warn;
use super::{QspiInterface, hal, hrtimer::HighResTimerE};
use ad9959::{Mode, ProfileSerializer};
/// The DDS profile update stream.
pub struct DdsOutput {
_qspi: QspiInterface,
io_update_trigger: HighResTimerE,
mode: Mode,
}
impl DdsOutput {
/// Construct a new DDS output stream.
///
/// # Note
/// It is assumed that the QSPI stream and the IO_Update trigger timer have been configured in a
/// way such that the profile has sufficient time to be written before the IO_Update signal is
/// generated.
///
/// # Args
/// * `qspi` - The QSPI interface to the run the stream on.
/// * `io_update_trigger` - The HighResTimerE used to generate IO_Update pulses.
/// * `config` - The frozen DDS configuration.
pub fn new(
mut qspi: QspiInterface,
io_update_trigger: HighResTimerE,
mode: Mode,
) -> Self {
qspi.start_stream().unwrap();
Self {
mode,
_qspi: qspi,
io_update_trigger,
}
}
/// Get a builder for serializing a Pounder DDS profile.
pub fn builder(&mut self) -> ProfileSerializer {
ProfileSerializer::new(self.mode)
}
/// Write a profile to the stream.
///
/// # Note:
/// If a profile of more than 8 words is provided, the QSPI interface will likely
/// stall execution. If there are still bytes pending in the FIFO, the write will certainly
/// stall.
///
/// # Args
/// * `profile` - The serialized DDS profile to write.
#[inline]
pub fn write(&mut self, mut profile: ProfileSerializer) {
// Note(unsafe): We own the QSPI interface, so it is safe to access the registers in a raw
// fashion.
let regs = unsafe { &*hal::stm32::QUADSPI::ptr() };
// Warn if the fifo is still at least half full.
if regs.sr.read().flevel().bits() >= 16 {
warn!("QSPI stalling")
}
for word in profile.finalize().iter() {
// Note(unsafe): any bit pattern is valid for a TX FIFO write.
regs.dr.write(|w| unsafe { w.bits(*word) });
}
// Trigger the IO_update signal generating timer to asynchronous create the IO_Update pulse.
self.io_update_trigger.trigger();
}
}
| rust | Apache-2.0 | cb1f14508dffa91c636cb3ed3701e24b04e469d2 | 2026-01-04T20:16:49.858302Z | false |
quartiq/stabilizer | https://github.com/quartiq/stabilizer/blob/cb1f14508dffa91c636cb3ed3701e24b04e469d2/src/hardware/pounder/timestamp.rs | src/hardware/pounder/timestamp.rs | //! ADC sample timestamper using external Pounder reference clock.
//!
//! # Design
//!
//! The pounder timestamper utilizes the pounder SYNC_CLK output as a fast external reference clock
//! for recording a timestamp for each of the ADC samples.
//!
//! To accomplish this, a timer peripheral is configured to be driven by an external clock input.
//! Due to the limitations of clock frequencies allowed by the timer peripheral, the SYNC_CLK input
//! is divided by 4. This clock then clocks the timer peripheral in a free-running mode with an ARR
//! (max count register value) configured to overflow once per ADC sample batch.
//!
//! Once the timer is configured, an input capture is configured to record the timer count
//! register. The input capture is configured to utilize an internal trigger for the input capture.
//! The internal trigger is selected such that when a sample is generated on ADC0, the input
//! capture is simultaneously triggered. That trigger is prescaled (its rate is divided) by the
//! batch size. This results in the input capture triggering identically to when the ADC samples
//! the last sample of the batch. That sample is then available for processing by the user.
use super::hal;
use crate::hardware::timers;
/// Software unit to timestamp stabilizer ADC samples using an external pounder reference clock.
pub struct Timestamper {
timer: timers::PounderTimestampTimer,
capture_channel: timers::tim8::Channel1InputCapture,
}
impl Timestamper {
/// Construct the pounder sample timestamper.
///
/// # Args
/// * `timestamp_timer` - The timer peripheral used for capturing timestamps from.
/// * `capture_channel` - The input capture channel for collecting timestamps.
/// * `sampling_timer` - The stabilizer ADC sampling timer.
/// * `_clock_input` - The input pin for the external clock from Pounder.
/// * `batch_size` - The number of samples in each batch.
///
/// # Returns
/// The new pounder timestamper in an operational state.
pub fn new(
mut timestamp_timer: timers::PounderTimestampTimer,
capture_channel: timers::tim8::Channel1,
sampling_timer: &mut timers::SamplingTimer,
_clock_input: hal::gpio::gpioa::PA0<hal::gpio::Alternate<3>>,
batch_size: usize,
) -> Self {
// The sampling timer should generate a trigger output when CH1 comparison occurs.
sampling_timer.generate_trigger(timers::TriggerGenerator::ComparePulse);
// The timestamp timer trigger input should use TIM2 (SamplingTimer)'s trigger, which is
// mapped to ITR1.
timestamp_timer.set_trigger_source(timers::TriggerSource::Trigger1);
// The capture channel should capture whenever the trigger input occurs.
let mut input_capture = capture_channel
.into_input_capture(timers::tim8::CaptureSource1::Trc);
let prescaler = match batch_size {
1 => timers::Prescaler::Div1,
2 => timers::Prescaler::Div2,
4 => timers::Prescaler::Div4,
8 => timers::Prescaler::Div8,
_ => panic!("Batch size does not support DDS timestamping"),
};
// Capture at the batch period.
input_capture.configure_prescaler(prescaler);
Self {
timer: timestamp_timer,
capture_channel: input_capture,
}
}
/// Start collecting timestamps.
pub fn start(&mut self) {
self.capture_channel.enable();
}
/// Update the period of the underlying timestamp timer.
pub fn update_period(&mut self, period: u16) {
self.timer.set_period_ticks(period);
}
/// Obtain a timestamp.
///
/// # Returns
/// A `Result` potentially indicating capture overflow and containing a `Option` of a captured
/// timestamp.
pub fn latest_timestamp(&mut self) -> Result<Option<u16>, Option<u16>> {
self.capture_channel.latest_capture()
}
}
| rust | Apache-2.0 | cb1f14508dffa91c636cb3ed3701e24b04e469d2 | 2026-01-04T20:16:49.858302Z | false |
quartiq/stabilizer | https://github.com/quartiq/stabilizer/blob/cb1f14508dffa91c636cb3ed3701e24b04e469d2/src/hardware/pounder/hrtimer.rs | src/hardware/pounder/hrtimer.rs | //! The HRTimer (High Resolution Timer) is used to generate IO_Update pulses to the Pounder DDS.
use super::hal::{
self,
rcc::{ResetEnable, rec},
};
/// A HRTimer output channel.
#[allow(dead_code)]
pub enum Channel {
One,
Two,
}
/// The high resolution timer. Currently, only Timer E is supported.
pub struct HighResTimerE {
master: hal::stm32::HRTIM_MASTER,
timer: hal::stm32::HRTIM_TIME,
common: hal::stm32::HRTIM_COMMON,
}
impl HighResTimerE {
/// Construct a new high resolution timer for generating IO_update signals.
pub fn new(
timer_regs: hal::stm32::HRTIM_TIME,
master_regs: hal::stm32::HRTIM_MASTER,
common_regs: hal::stm32::HRTIM_COMMON,
prec: rec::Hrtim,
) -> Self {
prec.reset().enable();
Self {
master: master_regs,
timer: timer_regs,
common: common_regs,
}
}
/// Configure the timer to operate in single-shot mode.
///
/// # Note
/// This will configure the timer to generate a single pulse on an output channel. The timer
/// will only count up once and must be `trigger()`'d after / configured.
///
/// The output will be asserted from `set_offset` to `set_offset` + `set_duration` in the count.
///
/// # Args
/// * `channel` - The timer output channel to configure.
/// * `set_duration` - The duration that the output should be asserted for.
/// * `set_offset` - The first time at which the output should be asserted.
pub fn configure_single_shot(
&mut self,
channel: Channel,
delay: f32,
duration: f32,
clk: f32,
) {
// Disable the timer before configuration.
self.master.mcr.modify(|_, w| w.tecen().clear_bit());
// Configure the desired timer for single shot mode with set and reset of the specified
// channel at the desired durations. The HRTIM is on APB2 (D2 domain), and the kernel clock
// is the APB bus clock.
let end = ((delay + duration) * clk) as u32 + 1;
// Determine the clock divider, which may be 1, 2, or 4. We will choose a clock divider that
// allows us the highest resolution per tick, so lower dividers are favored.
let div: u8 = if end < 0xFFDF {
1
} else if (end / 2) < 0xFFDF {
2
} else if (end / 4) < 0xFFDF {
3
} else {
panic!("Unattainable timing parameters!");
};
// The period register must be greater than or equal to 3 cycles.
let period = (end / (1 << (div - 1)) as u32) as u16;
assert!(period > 2);
// We now have the prescaler and the period registers. Configure the timer.
// Note(unsafe): The prescaler is guaranteed to be greater than or equal to 4 (minimum
// allowed value) due to the addition. The setting is always 1, 2, or 3, which represents
// all valid values.
self.timer
.timecr
.modify(|_, w| unsafe { w.ck_pscx().bits(div + 4) });
// Note(unsafe): The period register is guaranteed to be a 16-bit value, which will fit in
// this register.
self.timer.perer.write(|w| unsafe { w.perx().bits(period) });
// Configure the comparator 1 level.
let delay = (delay * clk) as u16;
// Note(unsafe): The offset is always a 16-bit value, so is always valid for values >= 3, as
// specified by the datasheet.
assert!(delay >= 3);
self.timer
.cmp1er
.write(|w| unsafe { w.cmp1x().bits(delay) });
// Configure the set/reset signals.
// Set on compare with CMP1, reset upon reaching PER
match channel {
Channel::One => {
self.timer.sete1r.write(|w| w.cmp1().set_bit());
self.timer.rste1r.write(|w| w.per().set_bit());
self.common.oenr.write(|w| w.te1oen().set_bit());
}
Channel::Two => {
self.timer.sete2r.write(|w| w.cmp1().set_bit());
self.timer.rste2r.write(|w| w.per().set_bit());
self.common.oenr.write(|w| w.te2oen().set_bit());
}
}
// Enable the timer now that it is configured.
self.master.mcr.modify(|_, w| w.tecen().set_bit());
}
/// Generate a single trigger of the timer to start the output pulse generation.
pub fn trigger(&mut self) {
// Generate a reset event to force the timer to start counting.
self.common.cr2.write(|w| w.terst().set_bit());
}
}
| rust | Apache-2.0 | cb1f14508dffa91c636cb3ed3701e24b04e469d2 | 2026-01-04T20:16:49.858302Z | false |
quartiq/stabilizer | https://github.com/quartiq/stabilizer/blob/cb1f14508dffa91c636cb3ed3701e24b04e469d2/src/hardware/pounder/mod.rs | src/hardware/pounder/mod.rs | use super::hal;
use crate::hardware::{I2c1Proxy, shared_adc::AdcChannel};
use ad9959::Address;
use embedded_hal_02::blocking::spi::Transfer;
use serde::{Deserialize, Serialize};
use strum::IntoEnumIterator;
pub mod dds_output;
pub mod hrtimer;
#[cfg(not(feature = "pounder_v1_0"))]
pub mod timestamp;
#[derive(Debug, Copy, Clone, strum::EnumIter)]
pub enum GpioPin {
Led4Green,
Led5Red,
Led6Green,
Led7Red,
Led8Green,
Led9Red,
DetPwrdown0,
DetPwrdown1,
AttLe0,
AttLe1,
AttLe2,
AttLe3,
DdsReset,
AttRstN,
OscEnN,
ExtClkSel,
}
impl From<GpioPin> for mcp230xx::Mcp23017 {
fn from(x: GpioPin) -> Self {
match x {
GpioPin::Led4Green => Self::A0,
GpioPin::Led5Red => Self::A1,
GpioPin::Led6Green => Self::A2,
GpioPin::Led7Red => Self::A3,
GpioPin::Led8Green => Self::A4,
GpioPin::Led9Red => Self::A5,
GpioPin::DetPwrdown0 => Self::A6,
GpioPin::DetPwrdown1 => Self::A7,
GpioPin::AttLe0 => Self::B0,
GpioPin::AttLe1 => Self::B1,
GpioPin::AttLe2 => Self::B2,
GpioPin::AttLe3 => Self::B3,
GpioPin::DdsReset => Self::B4,
GpioPin::AttRstN => Self::B5,
GpioPin::OscEnN => Self::B6,
GpioPin::ExtClkSel => Self::B7,
}
}
}
impl From<GpioPin> for tca9539::Pin {
fn from(x: GpioPin) -> Self {
match x {
GpioPin::Led4Green => Self::P00,
GpioPin::Led5Red => Self::P01,
GpioPin::Led6Green => Self::P02,
GpioPin::Led7Red => Self::P03,
GpioPin::Led8Green => Self::P04,
GpioPin::Led9Red => Self::P05,
GpioPin::DetPwrdown0 => Self::P06,
GpioPin::DetPwrdown1 => Self::P07,
GpioPin::AttLe0 => Self::P10,
GpioPin::AttLe1 => Self::P11,
GpioPin::AttLe2 => Self::P12,
GpioPin::AttLe3 => Self::P13,
GpioPin::DdsReset => Self::P14,
GpioPin::AttRstN => Self::P15,
GpioPin::OscEnN => Self::P16,
GpioPin::ExtClkSel => Self::P17,
}
}
}
#[derive(Debug, Copy, Clone)]
pub enum Error {
Spi,
I2c,
Qspi(hal::xspi::QspiError),
Bounds,
InvalidAddress,
InvalidChannel,
Adc,
InvalidState,
}
impl From<hal::xspi::QspiError> for Error {
fn from(e: hal::xspi::QspiError) -> Error {
Error::Qspi(e)
}
}
/// The numerical value (discriminant) of the Channel enum is the index in the attenuator shift
/// register as well as the attenuator latch enable signal index on the GPIO extender.
#[derive(Debug, Copy, Clone, PartialEq, PartialOrd)]
#[allow(dead_code)]
pub enum Channel {
In0 = 0,
Out0 = 1,
In1 = 2,
Out1 = 3,
}
impl From<Channel> for GpioPin {
fn from(x: Channel) -> Self {
match x {
Channel::In0 => GpioPin::AttLe0,
Channel::Out0 => GpioPin::AttLe1,
Channel::In1 => GpioPin::AttLe2,
Channel::Out1 => GpioPin::AttLe3,
}
}
}
#[derive(Serialize, Deserialize, Copy, Clone, Debug)]
pub struct DdsChannelState {
pub phase_offset: f32,
pub frequency: f32,
pub amplitude: f32,
pub enabled: bool,
}
#[derive(Serialize, Deserialize, Copy, Clone, Debug)]
pub struct ChannelState {
pub parameters: DdsChannelState,
pub attenuation: f32,
}
#[derive(Serialize, Deserialize, Copy, Clone, Debug)]
pub struct InputChannelState {
pub attenuation: f32,
pub power: f32,
pub mixer: DdsChannelState,
}
#[derive(Serialize, Deserialize, Copy, Clone, Debug)]
pub struct OutputChannelState {
pub attenuation: f32,
pub channel: DdsChannelState,
}
#[derive(Serialize, Deserialize, Copy, Clone, Debug)]
pub struct DdsClockConfig {
pub multiplier: u8,
pub reference_clock: f32,
pub external_clock: bool,
}
impl From<Channel> for ad9959::Channel {
/// Translate pounder channels to DDS output channels.
fn from(other: Channel) -> Self {
Self::new(
1 << match other {
Channel::In0 => 1,
Channel::In1 => 3,
Channel::Out0 => 0,
Channel::Out1 => 2,
},
)
}
}
/// A structure for the QSPI interface for the DDS.
pub struct QspiInterface {
pub qspi: hal::xspi::Qspi<hal::stm32::QUADSPI>,
mode: ad9959::Mode,
streaming: bool,
}
impl QspiInterface {
/// Initialize the QSPI interface.
///
/// Args:
/// * `qspi` - The QSPI peripheral driver.
pub fn new(
mut qspi: hal::xspi::Qspi<hal::stm32::QUADSPI>,
) -> Result<Self, Error> {
// This driver only supports operation in 4-bit mode due to bus inconsistencies between the
// QSPI peripheral and the DDS. Instead, we will bit-bang communications in
// single-bit-two-wire to the DDS to configure it to 4-bit operation.
qspi.configure_mode(hal::xspi::QspiMode::FourBit)?;
Ok(Self {
qspi,
mode: ad9959::Mode::SingleBitTwoWire,
streaming: false,
})
}
pub fn start_stream(&mut self) -> Result<(), Error> {
self.qspi.is_busy()?;
// Configure QSPI for infinite transaction mode using only a data phase (no instruction or
// address).
let qspi_regs = unsafe { &*hal::stm32::QUADSPI::ptr() };
qspi_regs.fcr.modify(|_, w| w.ctcf().set_bit());
unsafe {
qspi_regs.dlr.write(|w| w.dl().bits(0xFFFF_FFFF));
qspi_regs.ccr.modify(|_, w| {
w.imode().bits(0).fmode().bits(0).admode().bits(0)
});
}
self.streaming = true;
Ok(())
}
}
impl ad9959::Interface for QspiInterface {
type Error = Error;
/// Configure the operations mode of the interface.
///
/// Args:
/// * `mode` - The newly desired operational mode.
fn configure_mode(&mut self, mode: ad9959::Mode) -> Result<(), Error> {
self.mode = mode;
Ok(())
}
/// Write data over QSPI to the DDS.
///
/// Args:
/// * `addr` - The address to write over QSPI to the DDS.
/// * `data` - The data to write.
fn write(&mut self, addr: Address, data: &[u8]) -> Result<(), Error> {
let addr = addr.raw_value().value();
// The QSPI interface implementation always operates in 4-bit mode because the AD9959 uses
// IO3 as SYNC_IO in some output modes. In order for writes to be successful, SYNC_IO must
// be driven low. However, the QSPI peripheral forces IO3 high when operating in 1 or 2 bit
// modes. As a result, any writes while in single- or dual-bit modes has to instead write
// the data encoded into 4-bit QSPI data so that IO3 can be driven low.
match self.mode {
ad9959::Mode::SingleBitTwoWire => {
// Encode the data into a 4-bit QSPI pattern.
// In 4-bit mode, we can send 2 bits of address and data per byte transfer. As
// such, we need at least 4x more bytes than the length of data. To avoid dynamic
// allocation, we assume the maximum transaction length for single-bit-two-wire is
// 2 bytes.
let mut encoded_data: [u8; 12] = [0; 12];
if (data.len() * 4) > (encoded_data.len() - 4) {
return Err(Error::Bounds);
}
// Encode the address into the first 4 bytes.
for address_bit in 0..8 {
let offset: u8 =
{ if address_bit % 2 != 0 { 4 } else { 0 } };
// Encode MSB first. Least significant bits are placed at the most significant
// byte.
let byte_position = 3 - (address_bit >> 1) as usize;
if addr & (1 << address_bit) != 0 {
encoded_data[byte_position] |= 1 << offset;
}
}
// Encode the data into the remaining bytes.
for byte_index in 0..data.len() {
let byte = data[byte_index];
for bit in 0..8 {
let offset: u8 = { if bit % 2 != 0 { 4 } else { 0 } };
// Encode MSB first. Least significant bits are placed at the most
// significant byte.
let byte_position = 3 - (bit >> 1) as usize;
if byte & (1 << bit) != 0 {
encoded_data
[(byte_index + 1) * 4 + byte_position] |=
1 << offset;
}
}
}
let (encoded_address, encoded_payload) = {
let end_index = (1 + data.len()) * 4;
(encoded_data[0], &encoded_data[1..end_index])
};
self.qspi.write(encoded_address, encoded_payload)?;
Ok(())
}
ad9959::Mode::FourBitSerial => {
if self.streaming {
Err(Error::InvalidState)
} else {
self.qspi.write(addr, data)?;
Ok(())
}
}
_ => Err(Error::InvalidState),
}
}
fn read(&mut self, addr: Address, dest: &mut [u8]) -> Result<(), Error> {
let addr = addr.raw_value().value();
// This implementation only supports operation (read) in four-bit-serial mode.
if self.mode != ad9959::Mode::FourBitSerial {
return Err(Error::InvalidState);
}
self.qspi.read(0x80 | addr, dest)?;
Ok(())
}
}
enum IoExpander {
Mcp(mcp230xx::Mcp230xx<I2c1Proxy, mcp230xx::Mcp23017>),
Pca(tca9539::Pca9539<I2c1Proxy>),
}
impl IoExpander {
fn new(i2c: I2c1Proxy) -> Self {
// Population option on Pounder v1.2 and later.
let mut mcp23017 =
mcp230xx::Mcp230xx::new_default(i2c.clone()).unwrap();
if mcp23017.read(0).is_ok() {
Self::Mcp(mcp23017)
} else {
let pca9359 = tca9539::Pca9539::new_default(i2c).unwrap();
Self::Pca(pca9359)
}
}
/// Set the state (its electrical level) of the given GPIO pin on Pounder.
fn set_gpio_dir(
&mut self,
pin: GpioPin,
dir: mcp230xx::Direction,
) -> Result<(), Error> {
match self {
Self::Mcp(dev) => {
dev.set_direction(pin.into(), dir).map_err(|_| Error::I2c)
}
Self::Pca(dev) => {
let dir = match dir {
mcp230xx::Direction::Output => tca9539::Direction::Output,
_ => tca9539::Direction::Input,
};
dev.set_direction(pin.into(), dir).map_err(|_| Error::I2c)
}
}
}
/// Set the state (its electrical level) of the given GPIO pin on Pounder.
fn set_gpio_level(
&mut self,
pin: GpioPin,
level: mcp230xx::Level,
) -> Result<(), Error> {
match self {
Self::Mcp(dev) => {
dev.set_gpio(pin.into(), level).map_err(|_| Error::I2c)
}
Self::Pca(dev) => {
let level = match level {
mcp230xx::Level::Low => tca9539::Level::Low,
_ => tca9539::Level::High,
};
dev.set_level(pin.into(), level).map_err(|_| Error::I2c)
}
}
}
}
/// A structure containing implementation for Pounder hardware.
pub struct PounderDevices {
io: IoExpander,
lm75: lm75::Lm75<I2c1Proxy, lm75::ic::Lm75>,
attenuator_spi: hal::spi::Spi<hal::stm32::SPI1, hal::spi::Enabled, u8>,
pwr: (
AdcChannel<
'static,
hal::stm32::ADC1,
hal::gpio::gpiof::PF11<hal::gpio::Analog>,
>,
AdcChannel<
'static,
hal::stm32::ADC2,
hal::gpio::gpiof::PF14<hal::gpio::Analog>,
>,
),
aux_adc: (
AdcChannel<
'static,
hal::stm32::ADC3,
hal::gpio::gpiof::PF3<hal::gpio::Analog>,
>,
AdcChannel<
'static,
hal::stm32::ADC3,
hal::gpio::gpiof::PF4<hal::gpio::Analog>,
>,
),
}
impl PounderDevices {
/// Construct and initialize pounder-specific hardware.
///
/// Args:
/// * `i2c` - A Proxy to I2C1.
/// * `attenuator_spi` - A SPI interface to control digital attenuators.
/// * `pwr` - The ADC channels to measure the IN0/1 input power.
/// * `aux_adc` - The ADC channels to measure the ADC0/1 auxiliary input.
pub fn new(
i2c: I2c1Proxy,
attenuator_spi: hal::spi::Spi<hal::stm32::SPI1, hal::spi::Enabled, u8>,
pwr: (
AdcChannel<
'static,
hal::stm32::ADC1,
hal::gpio::gpiof::PF11<hal::gpio::Analog>,
>,
AdcChannel<
'static,
hal::stm32::ADC2,
hal::gpio::gpiof::PF14<hal::gpio::Analog>,
>,
),
aux_adc: (
AdcChannel<
'static,
hal::stm32::ADC3,
hal::gpio::gpiof::PF3<hal::gpio::Analog>,
>,
AdcChannel<
'static,
hal::stm32::ADC3,
hal::gpio::gpiof::PF4<hal::gpio::Analog>,
>,
),
) -> Result<Self, Error> {
let mut devices = Self {
lm75: lm75::Lm75::new(i2c.clone(), lm75::Address::default()),
io: IoExpander::new(i2c.clone()),
attenuator_spi,
pwr,
aux_adc,
};
// Configure power-on-default state for pounder. All LEDs are off, on-board oscillator
// selected and enabled, attenuators out of reset. Note that testing indicates the
// output state needs to be set first to properly update the output registers.
for pin in GpioPin::iter() {
devices.io.set_gpio_level(pin, mcp230xx::Level::Low)?;
devices.io.set_gpio_dir(pin, mcp230xx::Direction::Output)?;
}
devices.reset_attenuators().unwrap();
devices.reset_dds().unwrap();
Ok(devices)
}
/// Sample one of the two auxiliary ADC channels associated with the respective RF input channel.
pub fn sample_aux_adc(&mut self, channel: Channel) -> Result<f32, Error> {
let adc_scale = match channel {
Channel::In0 => self.aux_adc.0.read_normalized().unwrap(),
Channel::In1 => self.aux_adc.1.read_normalized().unwrap(),
_ => return Err(Error::InvalidChannel),
};
// Convert analog percentage to voltage. Note that the ADC uses an external 2.048V analog
// reference.
Ok(adc_scale * 2.048)
}
/// Select external reference clock input.
pub fn set_ext_clk(&mut self, enabled: bool) -> Result<(), Error> {
let level = if enabled {
mcp230xx::Level::High
} else {
mcp230xx::Level::Low
};
// Active low
self.io.set_gpio_level(GpioPin::OscEnN, level)?;
self.io.set_gpio_level(GpioPin::ExtClkSel, level)
}
/// Reset the DDS via the GPIO extender (Pounder v1.2 and later)
pub fn reset_dds(&mut self) -> Result<(), Error> {
// DDS reset (Pounder v1.2 or later)
self.io
.set_gpio_level(GpioPin::DdsReset, mcp230xx::Level::High)?;
// I2C duration of this transaction is long enough (> 5 ยตs) to ensure valid reset.
self.io
.set_gpio_level(GpioPin::DdsReset, mcp230xx::Level::Low)
}
/// Read the temperature reported by the LM75 temperature sensor on Pounder in deg C.
pub fn temperature(&mut self) -> Result<f32, Error> {
self.lm75.read_temperature().map_err(|_| Error::I2c)
}
}
impl PounderDevices {
/// Reset all of the attenuators to a power-on default state.
fn reset_attenuators(&mut self) -> Result<(), Error> {
// Active low
self.io
.set_gpio_level(GpioPin::AttRstN, mcp230xx::Level::Low)?;
self.io
.set_gpio_level(GpioPin::AttRstN, mcp230xx::Level::High)
}
/// Latch a configuration into a digital attenuator.
///
/// Args:
/// * `channel` - The attenuator channel to latch.
fn latch_attenuator(&mut self, channel: Channel) -> Result<(), Error> {
// Rising edge sensitive
// Be robust against initial state: drive low, then high (contrary to the datasheet figure).
self.io
.set_gpio_level(channel.into(), mcp230xx::Level::Low)?;
self.io
.set_gpio_level(channel.into(), mcp230xx::Level::High)
}
/// Read the raw attenuation codes stored in the attenuator shift registers.
///
/// Args:
/// * `channels` - A 4 byte slice to be shifted into the
/// attenuators and to contain the data shifted out.
fn transfer_attenuators(
&mut self,
channels: &mut [u8; 4],
) -> Result<(), Error> {
self.attenuator_spi
.transfer(channels)
.map_err(|_| Error::Spi)?;
Ok(())
}
/// Set the attenuation of a single channel.
///
/// Args:
/// * `channel` - The pounder channel to configure the attenuation of.
/// * `attenuation` - The desired attenuation of the channel in dB. This has a resolution of
/// 0.5dB.
pub fn set_attenuation(
&mut self,
channel: Channel,
attenuation: f32,
) -> Result<f32, Error> {
if !crate::convert::att_is_valid(attenuation) {
return Err(Error::Bounds);
}
// Calculate the attenuation code to program into the attenuator. The attenuator uses a
// code where the LSB is 0.5 dB.
let attenuation_code = (attenuation * 2.0) as u8;
// Read all the channels, modify the channel of interest, and write all the channels back.
// This ensures the staging register and the output register are always in sync.
let mut channels = [0_u8; 4];
self.transfer_attenuators(&mut channels)?;
// The lowest 2 bits of the 8-bit shift register on the attenuator are ignored. Shift the
// attenuator code into the upper 6 bits of the register value. Note that the attenuator
// treats inputs as active-low, so the code is inverted before writing.
channels[channel as usize] = !(attenuation_code << 2);
self.transfer_attenuators(&mut channels)?;
// Finally, latch the output of the updated channel to force it into an active state.
self.latch_attenuator(channel)?;
Ok(attenuation_code as f32 / 2.0)
}
/// Get the attenuation of a channel.
///
/// Args:
/// * `channel` - The channel to get the attenuation of.
///
/// Returns:
/// The programmed attenuation of the channel in dB.
pub fn get_attenuation(&mut self, channel: Channel) -> Result<f32, Error> {
let mut channels = [0_u8; 4];
// Reading the data always shifts data out of the staging registers, so we perform a
// duplicate write-back to ensure the staging register is always equal to the output
// register.
self.transfer_attenuators(&mut channels)?;
self.transfer_attenuators(&mut channels)?;
// The attenuation code is stored in the upper 6 bits of the register, where each LSB
// represents 0.5 dB. The attenuator stores the code as active-low, so inverting the result
// (before the shift) has the affect of transforming the bits of interest (and the
// dont-care bits) into an active-high state and then masking off the don't care bits. If
// the shift occurs before the inversion, the upper 2 bits (which would then be don't
// care) would contain erroneous data.
let attenuation_code = (!channels[channel as usize]) >> 2;
// Convert the desired channel code into dB of attenuation.
Ok(attenuation_code as f32 / 2.0)
}
}
impl PounderDevices {
/// Sample an ADC channel.
///
/// Args:
/// * `channel` - The channel to sample.
///
/// Returns:
/// The sampled voltage of the specified channel.
fn sample_converter(&mut self, channel: Channel) -> Result<f32, Error> {
let adc_scale = match channel {
Channel::In0 => self.pwr.0.read_normalized().unwrap(),
Channel::In1 => self.pwr.1.read_normalized().unwrap(),
_ => return Err(Error::InvalidChannel),
};
// Convert analog percentage to voltage. Note that the ADC uses an external 2.048V analog
// reference.
Ok(adc_scale * 2.048)
}
/// Measure the power of an input channel in dBm.
///
/// Args:
/// * `channel` - The pounder input channel to measure the power of.
///
/// Returns:
/// Power in dBm after the digitally controlled attenuator before the amplifier.
pub fn measure_power(&mut self, channel: Channel) -> Result<f32, Error> {
let analog_measurement = self.sample_converter(channel)?;
// The AD8363 with VSET connected to VOUT provides an output voltage of 51.7 mV/dB at
// 100MHz with an intercept of -58 dBm.
// It is placed behind a 20 dB tap.
Ok(analog_measurement * (1. / 0.0517) + (-58. + 20.))
}
}
| rust | Apache-2.0 | cb1f14508dffa91c636cb3ed3701e24b04e469d2 | 2026-01-04T20:16:49.858302Z | false |
quartiq/stabilizer | https://github.com/quartiq/stabilizer/blob/cb1f14508dffa91c636cb3ed3701e24b04e469d2/src/bin/dds.rs | src/bin/dds.rs | //! Urukul as a downstream EEM on stabilizer.
//!
//! This requires the alternate direction EEM transceiver configuration.
//! It exposes the Urukul CPLD and DDS settings via miniconf (MQTT and USB).
//!
//! Note that several values are not range checked and out-of-range values
//! will lead to panics.
#![cfg_attr(target_os = "none", no_std)]
#![cfg_attr(target_os = "none", no_main)]
use arbitrary_int::{u2, u5};
use fugit::ExtU32;
use miniconf::Tree;
use rtic_monotonics::Monotonic;
use platform::{AppSettings, NetSettings};
#[derive(Clone, Debug, Tree, Default)]
#[tree(meta(doc, typename))]
pub struct Settings {
urukul: App,
net: NetSettings,
}
impl AppSettings for Settings {
fn new(net: NetSettings) -> Self {
Self {
net,
urukul: App::default(),
}
}
fn net(&self) -> &NetSettings {
&self.net
}
}
impl serial_settings::Settings for Settings {
fn reset(&mut self) {
*self = Self {
urukul: App::default(),
net: NetSettings::new(self.net.mac),
}
}
}
#[derive(Clone, Debug, Tree)]
#[tree(meta(doc, typename))]
pub struct Channel {
#[tree(with=miniconf::leaf)]
pll_n: Option<u5>,
pll_doubler: bool,
frequency: f64,
phase: f32,
full_scale_current: f32,
attenuation: f32,
enable: bool,
update: bool,
}
impl Default for Channel {
fn default() -> Self {
Self {
frequency: 0.0,
phase: 0.0,
full_scale_current: 20e-3,
attenuation: 31.5,
enable: false,
pll_n: Some(u5::new(3)),
pll_doubler: false,
update: true,
}
}
}
#[derive(Clone, Debug, Tree)]
#[tree(meta(doc, typename))]
pub struct App {
refclk: f64,
#[tree(with=miniconf::leaf)]
clk_sel: urukul::ClkSel,
#[tree(with=miniconf::leaf)]
div_sel: urukul::DivSel,
update: bool,
ch: [Channel; 4],
}
impl Default for App {
fn default() -> Self {
Self {
clk_sel: urukul::ClkSel::Osc,
div_sel: urukul::DivSel::One,
update: true,
refclk: 100.0e6,
ch: Default::default(),
}
}
}
#[cfg(not(target_os = "none"))]
fn main() {
use miniconf::{json::to_json_value, json_schema::TreeJsonSchema};
let s = Settings::default();
println!(
"{}",
serde_json::to_string_pretty(&to_json_value(&s).unwrap()).unwrap()
);
let mut schema = TreeJsonSchema::new(Some(&s)).unwrap();
schema
.root
.insert("title".to_string(), "Stabilizer dds".into());
println!("{}", serde_json::to_string_pretty(&schema.root).unwrap());
}
#[cfg(target_os = "none")]
#[rtic::app(device = stabilizer::hardware::hal::stm32, peripherals = true, dispatchers=[DCMI, JPEG, LTDC, SDMMC])]
mod app {
use super::*;
use stabilizer::hardware::{
self, SerialTerminal, SystemTimer, Systick, Urukul, UsbDevice, hal,
net::{NetworkState, NetworkUsers},
};
#[shared]
struct Shared {
usb: UsbDevice,
network: NetworkUsers<App>,
settings: Settings,
}
#[local]
struct Local {
urukul: Urukul,
usb_terminal: SerialTerminal<Settings>,
}
#[init]
fn init(c: init::Context) -> (Shared, Local) {
let clock = SystemTimer::new(|| Systick::now().ticks());
let (stabilizer, _mezzanine, eem) = hardware::setup::setup::<Settings>(
c.core,
c.device,
clock,
8,
1 << 7,
);
let stabilizer::hardware::Eem::Urukul(urukul) = eem else {
panic!("No Urukul detected.")
};
let network = NetworkUsers::new(
stabilizer.network_devices.stack,
stabilizer.network_devices.phy,
clock,
env!("CARGO_BIN_NAME"),
&stabilizer.settings.net,
stabilizer.metadata,
);
let shared = Shared {
usb: stabilizer.usb,
network,
settings: stabilizer.settings,
};
let local = Local {
urukul,
usb_terminal: stabilizer.usb_serial,
};
// Spawn a settings update for default settings.
settings_update::spawn().unwrap();
ethernet_link::spawn().unwrap();
usb::spawn().unwrap();
(shared, local)
}
#[idle(shared=[network, settings, usb])]
fn idle(mut c: idle::Context) -> ! {
loop {
match (&mut c.shared.network, &mut c.shared.settings)
.lock(|net, settings| net.update(&mut settings.urukul))
{
NetworkState::SettingsChanged => {
settings_update::spawn().unwrap()
}
NetworkState::Updated => {}
NetworkState::NoChange => {
// We can't sleep if USB is not in suspend.
if c.shared.usb.lock(|usb| {
usb.state()
== usb_device::device::UsbDeviceState::Suspend
}) {
cortex_m::asm::wfi();
}
}
}
}
}
#[task(priority = 1, shared=[settings], local=[urukul])]
async fn settings_update(mut c: settings_update::Context) {
let u = c.local.urukul;
c.shared.settings.lock(|s| {
let s = &mut s.urukul;
if s.update {
s.update = false;
u.set_cfg(
u.cfg().with_clk_sel(s.clk_sel).with_div_sel(s.div_sel),
)
.unwrap();
}
let power = ad9912::Power::builder()
.with_digital_pd(false)
.with_full_pd(false)
.with_pll_pd(true)
.with_output_doubler_en(false)
.with_cmos_en(false)
.with_hstl_pd(true)
.build();
for (i, ch) in s.ch.iter_mut().enumerate() {
if ch.update {
ch.update = false;
let refclk = s.refclk / s.div_sel.divider() as f64;
let i = u2::new(i as _);
let sysclk = if let Some(pll_n) = ch.pll_n {
u.dds(i).set_power(power.with_pll_pd(false)).unwrap();
u.dds(i).set_ndiv(pll_n).unwrap();
let mut pll = ad9912::Pll::default()
.with_charge_pump(ad9912::ChargePump::Ua375)
.with_ref_doubler(ch.pll_doubler);
let sysclk = pll.set_refclk(pll_n, refclk);
u.dds(i).set_pll(pll).unwrap();
sysclk
} else {
u.dds(i).set_power(power.with_pll_pd(true)).unwrap();
refclk
};
u.dds(i).set_frequency(ch.frequency, sysclk).unwrap();
u.dds(i).set_phase(ch.phase).unwrap();
u.io_update().unwrap();
u.dds(i)
.set_full_scale_current(ch.full_scale_current, 10e3)
.unwrap();
u.set_att(i, urukul::att_to_mu(ch.attenuation)).unwrap();
u.set_rf_sw(i, ch.enable).unwrap();
}
}
});
}
#[task(priority = 1, shared=[usb, settings], local=[usb_terminal])]
async fn usb(mut c: usb::Context) -> ! {
loop {
c.shared.usb.lock(|usb| {
usb.poll(&mut [c
.local
.usb_terminal
.interface_mut()
.inner_mut()]);
});
c.shared.settings.lock(|settings| {
if c.local.usb_terminal.poll(settings).unwrap() {
settings_update::spawn().unwrap()
}
});
Systick::delay(10.millis()).await;
}
}
#[task(priority = 1, shared=[network])]
async fn ethernet_link(mut c: ethernet_link::Context) -> ! {
loop {
c.shared.network.lock(|net| net.processor.handle_link());
Systick::delay(1.secs()).await;
}
}
#[task(binds = ETH, priority = 1)]
fn eth(_: eth::Context) {
unsafe { hal::ethernet::interrupt_handler() }
}
}
| rust | Apache-2.0 | cb1f14508dffa91c636cb3ed3701e24b04e469d2 | 2026-01-04T20:16:49.858302Z | false |
quartiq/stabilizer | https://github.com/quartiq/stabilizer/blob/cb1f14508dffa91c636cb3ed3701e24b04e469d2/src/bin/fls.rs | src/bin/fls.rs | #![cfg_attr(target_os = "none", no_std)]
#![cfg_attr(target_os = "none", no_main)]
//! Patent pending: DE102021112017A1
//!
//! # Algorithm description
//!
//! This application can be understood as a universal phase (frequency)
//! signal processor. It determines the phase (we will drop frequency
//! from now on as in a phase-aware system frequency is merely the
//! difference between successive phases) of an RF input signal and
//! emits an RF output signal with a phase that depends on the input
//! phase. The transfer function between input and output phase is a
//! sequence of various types of filters (analog RC, digital FIR, IIR,
//! unwrapping, scaling, clipping) designed to implement either
//! high-quality phase measurements or a certain constrained and
//! somewhat exotic phase locked loop that is highly applicable
//! to the task of stabilizing the arm length of an optical Michelson
//! interferometer which in turn occurs when stabilizing the effective
//! path length of an optical frequency transmission system.
//!
//! The sequence of processing steps is as follows. Analyzing it's
//! application in the context of optical path length stabilization including
//! laser sources, optical modulators, and photodetectors optical is left as
//! an exercise for the user.
//!
//! ## PLL path
//!
//! * DDS locks its sysclk (500 MHz) to XO or external ref
//! * DDS emits SYNC signal at sysclk/4
//! * Prescaler 1/4 (in CPU)
//! * Drives CPU timer counter
//! * Counter is captured once per batch (based on CPU clock).
//! See [stabilizer::hardware::pounder::timestamp].
//! * Digital PLL reconstructs SYNC frequency and phase (thus sysclk)
//! w.r.t. batch and sample frequency and phase.
//! This determines the relation of the CPU 8 MHz crystal (thus CPU
//! clock and timers) to the DDS clock (derived from an external reference
//! frequency or internal XCO). See [idsp::PLL].
//!
//! ## Signal path
//!
//! * RF signal enters Pounder at Pounder IN0
//! * Adjustable attenuation `demod_att`.
//! * 30 dB gain block
//! * Mixing with DDS at `demod_freq`
//! * RC lowpass and amplification to reject unwanted demodulation products and
//! harmonics
//! * IF signal enters Stabilizer and is available at ADC0 for analog monitoring
//! * 2x PGIA and AA filter on Stabilizer
//! * ADC digitization at 1/1.28 ยตs interval
//! * Data processing in batches of 8 samples
//! * Digital mixing with the reconstructed sample phase (PLL path). See [idsp::Lockin].
//! * Lowpass filtering with a second order (12 dB/octave)
//! IIR lowpass with an additional double zero at Nyquist. Adjustable corner frequency.
//! See [idsp::Lowpass]
//! * Full rate baseband demodulated data (quadrature only) on DAC0
//! * Lowpass filtering with a batch-size boxcar FIR filter (zeros at n/4 Nyquist)
//! * Computation of signal power and phase. See [idsp::ComplexExt].
//! * Fractional rescaling (`phase_scale`) and unwrapping of the phase with 32 bit turn range.
//! * Scaling and clamping.
//! * Filtering by a second order (biquad) IIR filter (supporting e.g. II, I, P
//! action). See [idsp::iir].
//! * Clamping, output offset, and anti-windup. See [idsp::iir].
//! * Feedback onto a frequency offset of the modulation DDS at `mod_freq`
//! * Additional feedback path from the phase before unwrapping onto the
//! modulation DDS phase offset with an adjustable gain `pow_gain`
//! * Adjustable DDS output amplitude and blanking on digital input
//! * Adjustable modulation attenuation `mod_att`
//! * Modulation output at Pounder OUT0
//!
//! # Telemetry
//! Data is regularly published via MQTT. See [Telemetry].
//!
//! # Streaming
//! Full-rate ADC and DAC data is available via configurable UDP data streaming.
//! See [stream]. To view and analyze noise spectra the graphical application
//! [`stabilizer-stream`](https://github.com/quartiq/stabilizer-stream) can be used.
use ad9959::Acr;
use arbitrary_int::{u14, u24};
use idsp::{
Accu, Complex, ComplexExt, Filter, Lockin, Lowpass, PLL, Unwrapper, iir,
};
use miniconf::Tree;
use platform::NetSettings;
use serde::{Deserialize, Serialize};
use stabilizer::{
convert::{DacCode, Gain},
statistics,
};
/// Sample and batch period configuration.
/// Note that both `SAMPLE_TICKS_LOG2` and `BATCH_SIZE_LOG2` are implicitly used in the
/// lockin harmonic computation below. Do not change them without accounting for that.
const SAMPLE_TICKS_LOG2: u32 = 7;
const BATCH_SIZE_LOG2: usize = 3;
/// ADC and DAC sample rate in timer cycles. One timer cycle at 100 MHz is 10 ns.
const SAMPLE_TICKS: u32 = 1 << SAMPLE_TICKS_LOG2; // 1.28 ยตs
/// ADC/DAC Samples per batch. The [app::process] routine is invoked once per batch period
/// and has access to the two (both channels) filled buffers of ADC samples from the
/// previous batch period and to the two to-be-filled buffers of DAC samples that will
/// be emitted in the next batch period.
const BATCH_SIZE: usize = 1 << BATCH_SIZE_LOG2;
// Delta FTW between the two DDS: DF
// Timestamp counter wrap period in DDS clock cycles:
// 1 << (2 (dds SYNC prescaler) + 2 (timer prescaler) + 16 (timer counter width))
// Lockin demodulation period in DDS clock cycles: (1 << 32) / DF
// Counter capture period in samples (also batch size): 1 << 3
//
// DDS clock interval t_dds = 2 ns
// Lockin period
// t_lo = 1/(f_b - f_a) = (1 << 32)*t_dds/DF
// SYNC interval
// t_sync = t_dds*psc_dds*psc_tim
// CPU timer clock interval:
// t_cpu = 10 ns
// Batch interval:
// t_batch = t_cpu*128*8
// Timestamper increment:
// dt_sync = t_batch/t_sync = t_cpu*128*8/(t_dds*4*4) = t_cpu/t_dds*64
// Sample interval
// t_sample = t_batch/n_batch = dt_sync*t_sync/n_batch = dt_sync*t_dds*2
// Sample phase increment
// dp_sample = t_sample/t_lo*(1 << 32) = dt_sync*2*DF
// Ratio between sample phase increment and timestamper increment
// harmonic_sample = dp_sample/dt_sync = DF << 1
// Scaling factor (harmonic) to convert PLL frequency to lockin LO frequency.
const MULT_SHIFT: u32 = 2 + 2 + 14 - BATCH_SIZE_LOG2 as u32;
// Phase scale for fine phase offset, such that 1 is one DDS LSB.
const PHASE_SCALE_SHIFT: u32 = 12;
// Default modulation/demodulation frequency for characterization.
// High CTZ has fewest DDS phase truncation spurs. Near 160 MHz.
const F_DEMOD: u32 = 0x5200_0000;
#[derive(Clone, Debug, Tree)]
pub struct BiquadRepr<T>
where
T: idsp::Coefficient
+ num_traits::AsPrimitive<f32>
+ num_traits::AsPrimitive<T>,
f32: num_traits::AsPrimitive<T>,
{
// Order matters
/// Biquad representation type
#[tree(rename="typ", typ="&str", with=miniconf::str_leaf, defer=self.repr)]
_typ: (),
/// Biquad parameters
/// Biquad representation subtree access
repr: iir::BiquadRepr<f32, T>,
/// Update trigger. TODO: Needs explicit trigger for serial-settings
#[tree(rename="update", with=biquad_update, defer=*self)]
_update: (),
/// Built raw IIR
#[tree(skip)]
iir: iir::Biquad<T>,
#[tree(skip)]
period: f32,
#[tree(skip)]
b_scale: f32,
#[tree(skip)]
y_scale: f32,
}
mod biquad_update {
use super::BiquadRepr;
use miniconf::{Keys, SerdeError, leaf};
pub use miniconf::{
deny::{mut_any_by_key, ref_any_by_key},
leaf::SCHEMA,
};
use serde::{Deserialize, Deserializer, Serializer};
pub fn serialize_by_key<S, T>(
_value: &BiquadRepr<T>,
keys: impl Keys,
ser: S,
) -> Result<S::Ok, SerdeError<S::Error>>
where
S: Serializer,
T: idsp::Coefficient
+ num_traits::AsPrimitive<f32>
+ num_traits::AsPrimitive<T>,
f32: num_traits::AsPrimitive<T>,
{
leaf::serialize_by_key(&(), keys, ser)
}
pub fn deserialize_by_key<'de, D, T>(
value: &mut BiquadRepr<T>,
keys: impl Keys,
de: D,
) -> Result<(), SerdeError<D::Error>>
where
D: Deserializer<'de>,
T: idsp::Coefficient
+ num_traits::AsPrimitive<f32>
+ num_traits::AsPrimitive<T>,
f32: num_traits::AsPrimitive<T>,
{
leaf::deserialize_by_key(&mut (), keys, de)?;
value.iir =
value
.repr
.build::<f32>(value.period, value.b_scale, value.y_scale);
Ok(())
}
#[allow(clippy::extra_unused_type_parameters)]
pub fn probe_by_key<'de, T, D>(
keys: impl Keys,
de: D,
) -> Result<(), SerdeError<D::Error>>
where
T: Deserialize<'de>,
D: Deserializer<'de>,
{
leaf::probe_by_key::<'_, T, _>(keys, de)
}
}
impl<T> Default for BiquadRepr<T>
where
T: idsp::Coefficient
+ num_traits::AsPrimitive<f32>
+ num_traits::AsPrimitive<T>,
f32: num_traits::AsPrimitive<T>,
{
fn default() -> Self {
Self {
_typ: (),
repr: iir::BiquadRepr::Raw(iir::Biquad::IDENTITY),
_update: (),
iir: iir::Biquad::IDENTITY,
period: 1.0,
b_scale: 1.0,
y_scale: 1.0,
}
}
}
#[derive(Clone, Debug, Deserialize, Serialize, Tree)]
struct DdsSettings {
/// RF output (modulation) or input (demodulation) offset frequency tuning word.
/// The DDS sample clock is nominally 500 MHz.
///
/// # Value
/// Modulation/demodulation frequency tuning word (32 bit).
/// Range [0, 0xffff_ffff]
///
/// # Default
/// A `0x5200_0000` tuning word corresponds to close to 160 MHz.
freq: u32,
/// Modulation/demodulation RF attenuation.
///
/// # Value
/// Attenuation in dB, Range [0, 31.5]
///
/// # Default
/// 6 dB output attenuation, 31.5 dB input attenuation
#[tree(with=validate_att)]
att: f32,
/// Modulation/demodulation phase offset.
///
/// # Value
/// Phase offset in machine units (16 bit).
///
/// # Default
/// 0
#[tree(with=miniconf::leaf)]
phase: u14,
}
mod validate_att {
use miniconf::ValueError;
pub use miniconf::{
Keys, SerdeError,
deny::mut_any_by_key,
leaf::{self, SCHEMA, probe_by_key, ref_any_by_key, serialize_by_key},
};
use serde::Deserializer;
pub fn deserialize_by_key<'de, D: Deserializer<'de>>(
value: &mut f32,
keys: impl Keys,
de: D,
) -> Result<(), SerdeError<D::Error>> {
let mut att = *value;
leaf::deserialize_by_key(&mut att, keys, de)?;
if !stabilizer::convert::att_is_valid(att) {
Err(ValueError::Access("Attenuation out of range (0..=31.5 dB)")
.into())
} else {
*value = att;
Ok(())
}
}
}
type LockinLowpass = Lowpass<2>;
#[derive(Clone, Debug, Tree)]
struct ChannelSettings {
/// Input (demodulation) DDS settings
/// Feedback to stabilize the RF input phase is applied to the RF output
/// on top of the output frequency and phase.
///
/// For the demodulation this is the total (DDS **minus** Lockin, i.e. lower sideband)
/// demodulation frequency. If the modulation AOM is passed twice at +1 order,
/// `input/freq` should be twice `output/freq`.
input: DdsSettings,
/// Output (modulation) DDS settings
output: DdsSettings,
/// Demodulation amplitude control register.
///
/// # Value
/// AD9959 amplitude control register (24 bits, see datasheet)
///
/// # Default
/// 0 for full scale amplitude and multiplier disable
#[tree(with=miniconf::leaf)]
amp: u24,
/// Lockin lowpass time constant. The lowpass is a cascade of one second order IIR
/// filters, 12 dB/octave.
/// This needs to be high enough to suppress the unwanted demodulation components
/// and harmonics but as low as possible to maximize bandwidth. Many demodulation
/// components and harmonics are also suppressed by the zeros of the batch size
/// moving average FIR filter and judicious choice of `lockin_freq`.
///
/// TODO: settle pll and lockin settings into design after confirming optimal choice
///
/// # Default
/// `lockin_k = [0x200_0000, -0x2000_0000]`
#[tree(with=miniconf::leaf)]
lockin_k: <LockinLowpass as Filter>::Config,
/// Minimum demodulated signal power to enable feedback.
/// Note that this is RMS and that the signal peak must not clip.
///
/// # Value
/// `log2` of the signal power relative to full scale. Range: `[-63..0]`
///
/// # Default
/// `min_power = -24` corresponding to about -69 dBFS.
min_power: i32,
/// Clear the phase unwrap tracking counters once.
/// To make this setting edge-sensitive, after setting it to `true`,
/// it must be reset to `false` by the user before setting any other settings.
clear: bool,
/// Scaling factor of the unwrapped phase and fine rational offset.
/// The phase scaling is located after the phase unwrapping before the feedback
/// IIR filter.
///
/// FIXME: doc rational offset
///
/// # Value
/// `[[phase_factor, phase_shr], [time_factor, time_shr]]`
///
/// # Default
/// `phase_scale = [[1, 16], [0, 0]]`:
/// clamped range: ยฑ33 k turn (tracked range is ยฑ2 G turn)
/// quantization: 1.5 ยต turn, 0.1 Hz
#[tree(with=phase_scale, defer=*self)]
phase_scale: [[i32; 2]; 2],
/// Feedback IIR filter settings. The filter input is phase, the output is frequency.
///
/// # Default
/// A proportional gain=-1 filter.
iir: BiquadRepr<i32>,
/// Phase offset feedback gain.
/// Phase feedback is a proportional bypass of the unwrapper, the IIR
/// (including its input and output scaling) and the frequency feedback path.
/// The phase offset gain is `pow_gain/(1 << 13) rad/rad`.
///
/// # Value
/// Integer scaled phase feedback gain. Range: `[-0x2000, 0x2000]`
///
/// # Default
/// 0 for no phase feedback
pow_gain: i16,
/// Allow digital input to hold
hold_en: bool,
/// Amplitude IIR filter. The filter input is squared magnitude, the output is DDS amplitude.
///
/// # Default
/// No feedback
iir_amp: BiquadRepr<f32>,
}
const DDS_LSB_PER_HZ: f32 = (1i64 << 32) as f32
/ stabilizer::design_parameters::DDS_SYSTEM_CLK.to_Hz() as f32;
impl Default for ChannelSettings {
fn default() -> Self {
let mut iir_prop = iir::Biquad::IDENTITY;
iir_prop.ba_mut()[0] *= -1;
iir_prop.set_min(-0x4_0000);
iir_prop.set_max(0x4_0000);
let mut iir_amp = iir::Biquad::default();
iir_amp.set_u(0x3ff as _);
iir_amp.set_min(0.0);
iir_amp.set_max(0x3ff as _);
let mut s = Self {
input: DdsSettings {
freq: F_DEMOD,
att: 31.5,
phase: u14::new(0),
},
output: DdsSettings {
freq: F_DEMOD,
att: 6.0,
phase: u14::new(0),
},
lockin_k: [-(i32::MIN >> 6), i32::MIN >> 2],
amp: u24::new(0),
min_power: -24,
clear: true,
phase_scale: [[1, 16], [0, 0]],
iir: BiquadRepr {
repr: iir::BiquadRepr::Raw(iir_prop.clone()),
iir: iir_prop,
period: stabilizer::design_parameters::TIMER_PERIOD
* (SAMPLE_TICKS * BATCH_SIZE as u32) as f32,
y_scale: DDS_LSB_PER_HZ,
..Default::default()
},
pow_gain: 0,
hold_en: false,
iir_amp: BiquadRepr {
repr: iir::BiquadRepr::Raw(iir_amp.clone()),
iir: iir_amp.clone(),
period: 10e-3,
b_scale: iir_amp.max(),
y_scale: iir_amp.max(),
..Default::default()
},
};
s.update_phase_scale();
s
}
}
mod phase_scale {
use super::ChannelSettings;
pub use miniconf::{
Keys, SerdeError,
deny::{mut_any_by_key, ref_any_by_key},
leaf::{self, SCHEMA},
};
use serde::{Deserialize, Deserializer, Serializer};
pub fn serialize_by_key<S: Serializer>(
value: &ChannelSettings,
keys: impl Keys,
ser: S,
) -> Result<S::Ok, SerdeError<S::Error>> {
leaf::serialize_by_key(&value.phase_scale, keys, ser)
}
pub fn deserialize_by_key<'de, D: Deserializer<'de>>(
value: &mut ChannelSettings,
keys: impl Keys,
de: D,
) -> Result<(), SerdeError<D::Error>> {
leaf::deserialize_by_key(&mut value.phase_scale, keys, de)?;
value.update_phase_scale();
Ok(())
}
pub fn probe_by_key<'de, T: Deserialize<'de>, D: Deserializer<'de>>(
keys: impl Keys,
de: D,
) -> Result<(), SerdeError<D::Error>> {
leaf::probe_by_key::<'de, T, _>(keys, de)
}
}
impl ChannelSettings {
fn update_phase_scale(&mut self) {
// Units: [x] = turns, [y] = Hz
// TODO: verify
let phase_lsb_per_turn =
(self.phase_scale[0][0] << (32 - self.phase_scale[0][1])) as f32;
self.iir.b_scale = DDS_LSB_PER_HZ / phase_lsb_per_turn;
}
}
/// Settings structure for the application.
/// All fields in this structure are available through MQTT and can be configured at runtime.
#[derive(Clone, Debug, Tree)]
pub struct Fls {
/// Channel-specific settings.
ch: [ChannelSettings; 2],
/// External reference
///
/// # Value
/// `true` for external 100 MHz reference input selected,
/// `false` for internal 100 MHz XO enabled and selected
///
/// # Default
/// `false`
ext_clk: bool,
/// Lockin local oscillator frequency tuning word. Common to both demodulation/input
/// channels.
///
/// The demodulation DDS frequency tuning word
/// is `/ch/+/input/freq + lockin_freq*0x8000` (lower sideband).
///
/// TODO: settle pll and lockin settings into design after confirming optimal choice
///
/// # Default
/// `0x40` corresponding to 244 kHz. 5/8 Nyquist.
lockin_freq: u32,
/// Lockin demodulation oscillator PLL bandwidth.
/// This PLL reconstructs the DDS SYNC clock output on the CPU clock timescale.
///
/// TODO: settle pll and lockin settings into design after confirming optimal choice
///
/// # Default
/// `/pll_k = 0x4_0000` corresponds to to a time constant of about 0.4 s.
pll_k: i32,
/// Telemetry output period in seconds
///
/// # Default
/// 2 second interval
telemetry_period: u16,
/// Target for data streaming
///
/// # Default
/// Streaming disabled
#[tree(with=miniconf::leaf)]
stream: stream::Target,
}
impl Default for Fls {
fn default() -> Self {
Self {
ch: Default::default(),
ext_clk: false,
lockin_freq: 0x40,
pll_k: 0x4_0000,
telemetry_period: 10,
stream: Default::default(),
}
}
}
#[derive(Clone, Debug, Tree, Default)]
pub struct Settings {
pub fls: Fls,
pub net: NetSettings,
}
impl platform::AppSettings for Settings {
fn new(net: NetSettings) -> Self {
Self {
net,
fls: Default::default(),
}
}
fn net(&self) -> &NetSettings {
&self.net
}
}
impl serial_settings::Settings for Settings {
fn reset(&mut self) {
*self = Self {
fls: Default::default(),
net: NetSettings::new(self.net.mac),
}
}
}
/// Stream data format.
#[derive(
Clone, Copy, Debug, Default, Serialize, bytemuck::Zeroable, bytemuck::Pod,
)]
#[repr(C)]
struct Stream {
/// Demodulated signal. `-1 << 31` corresponds to negative full scale.
demod: Complex<i32>,
/// Current number of phase wraps. In units of turns.
phase: [i32; 2],
/// Current frequency tuning word added to the configured modulation
/// offset `mod_freq`.
delta_ftw: i32,
/// Current phase offset word applied to the modulation DDS.
delta_pow: i16,
/// Modulation DDS amplitude word
mod_amp: u16,
/// PLL time
pll: u32,
}
/// Channel Telemetry
#[derive(Default, Clone, Serialize)]
struct ChannelTelemetry {
/// Current phase. Offset and scaled.
phase: i64,
/// Power estimate, `|demod|ยฒ` re full scale.
power_log: i32,
///
// power: i32,
/// Auxiliary front panel ADC input values, undersmpled
aux_adc: f32,
mod_amp: u16,
/// Number of sampler where digital input signal was high.
holds: u32,
/// Number of potential phase slips where the absolute
/// phase difference between successive samples is larger than ฯ/2.
slips: u32,
/// Counter for the number of samples with low power.
blanks: u32,
}
#[derive(Default, Clone)]
pub struct Telemetry {
pll_time: i64,
ch: [ChannelTelemetry; 2],
stats: [statistics::State; 2],
}
/// Telemetry structure.
/// This structure is published via MQTT at the `telemetry_interval` configured in
/// [Settings].
/// There is no dedicated AA filtering for telemetry data (except for `stats`),
/// it is just decimated by the telemetry interval. Use streaming for full
/// bandwidth data.
#[derive(Default, Clone, Serialize)]
pub struct CookedTelemetry {
/// PLL time
/// DDS PLL time as seen by CPU (sample) clock.
/// Settles increments of approximately `0x140_0000`.
pll_time: i64,
/// Statistics of scaled (settings.phase_scale) phase including wraps.
/// Phase statistics state. Each message corresponds to the statistics of the
/// phase data since the last message.
phase: [statistics::ScaledStatistics; 2],
/// RF power in dBm as reported by the RF detector and ADC. Functionality
/// limited. <https://github.com/sinara-hw/Pounder/issues/95>
rf_power: [f32; 2],
/// Raw (binary) channel telemetry, mostly "stateful"
raw: [ChannelTelemetry; 2],
/// Channel frequency estimate (PI counter between telemetry messages)
/// TODO: deprecate
ch_freq: [f64; 2],
/// Pounder board temperature
temp: f32,
}
#[derive(Clone, Default)]
pub struct ChannelState {
lockin: Lockin<LockinLowpass>,
x0: i32,
t0: i32,
t: i64,
y: i64,
unwrapper: Unwrapper<i64>,
iir: [i32; 5],
iir_amp: [f32; 4],
hold: bool,
}
#[cfg(not(target_os = "none"))]
fn main() {
use miniconf::{json::to_json_value, json_schema::TreeJsonSchema};
let s = Settings::default();
println!(
"{}",
serde_json::to_string_pretty(&to_json_value(&s).unwrap()).unwrap()
);
let mut schema = TreeJsonSchema::new(Some(&s)).unwrap();
schema
.root
.insert("title".to_string(), "Stabilizer fls".into());
println!("{}", serde_json::to_string_pretty(&schema.root).unwrap());
}
#[cfg(target_os = "none")]
#[cfg_attr(target_os = "none", rtic::app(device = stabilizer::hardware::hal::stm32, peripherals = true, dispatchers=[DCMI, JPEG, LTDC, SDMMC]))]
mod app {
use arbitrary_int::u10;
use core::sync::atomic::{Ordering, fence};
use fugit::ExtU32 as _;
use rtic_monotonics::Monotonic;
use stabilizer::hardware::{
self,
DigitalInput0,
DigitalInput1,
SerialTerminal,
SystemTimer,
Systick,
UsbDevice,
adc::{Adc0Input, Adc1Input},
dac::{Dac0Output, Dac1Output},
hal,
net::{NetworkState, NetworkUsers},
// afe::Gain,
pounder::{
Channel, PounderDevices, dds_output::DdsOutput,
timestamp::Timestamper,
},
timers::SamplingTimer,
};
use stream::FrameGenerator;
use super::*;
#[shared]
struct Shared {
usb: UsbDevice,
network: NetworkUsers<Fls>,
active_settings: Fls,
settings: Settings,
telemetry: Telemetry,
dds_output: DdsOutput,
pounder: PounderDevices,
state: [ChannelState; 2],
}
#[local]
struct Local {
usb_terminal: SerialTerminal<Settings>,
sampling_timer: SamplingTimer,
digital_inputs: (DigitalInput0, DigitalInput1),
adcs: (Adc0Input, Adc1Input),
dacs: (Dac0Output, Dac1Output),
generator: FrameGenerator,
timestamper: Timestamper,
stream: [Stream; 2],
tele_state: [i64; 3],
pll: PLL,
}
#[init]
fn init(c: init::Context) -> (Shared, Local) {
let clock = SystemTimer::new(|| Systick::now().ticks());
// Configure the microcontroller
let (mut carrier, mezzanine, _eem) = hardware::setup::setup::<Settings>(
c.core,
c.device,
clock,
BATCH_SIZE,
SAMPLE_TICKS,
);
let mut network = NetworkUsers::new(
carrier.network_devices.stack,
carrier.network_devices.phy,
clock,
env!("CARGO_BIN_NAME"),
&carrier.settings.net,
carrier.metadata,
);
let generator = network.configure_streaming(stream::Format::Fls);
// ADC0 full scale 5V
carrier.afes[0].set_gain(Gain::G2);
carrier.afes[1].set_gain(Gain::G2);
let hardware::setup::Mezzanine::Pounder(mut pounder) = mezzanine else {
panic!("Missing Pounder Mezzanine");
};
pounder.timestamper.start();
// Enable ADC/DAC events
carrier.adcs.0.start();
carrier.adcs.1.start();
carrier.dacs.0.start();
carrier.dacs.1.start();
let shared = Shared {
usb: carrier.usb,
network,
telemetry: Telemetry::default(),
active_settings: carrier.settings.fls.clone(),
settings: carrier.settings,
dds_output: pounder.dds_output,
pounder: pounder.pounder,
state: Default::default(),
};
let local = Local {
usb_terminal: carrier.usb_serial,
sampling_timer: carrier.sampling_timer,
digital_inputs: carrier.digital_inputs,
adcs: carrier.adcs,
dacs: carrier.dacs,
generator,
timestamper: pounder.timestamper,
stream: Default::default(),
tele_state: [0; 3],
pll: PLL::default(),
};
settings_update::spawn().unwrap();
telemetry::spawn().unwrap();
aux_adc::spawn().unwrap();
usb::spawn().unwrap();
ethernet_link::spawn().unwrap();
start::spawn().unwrap();
(shared, local)
}
#[task(priority = 1, local = [sampling_timer])]
async fn start(c: start::Context) {
Systick::delay(200.millis()).await;
c.local.sampling_timer.start();
}
/// Main DSP processing routine.
///
/// See `dual-iir` for general notes on processing time and timing.
///
/// This is an implementation of fiber length stabilization using super-heterodyne
/// (pounder + lockin) and digital feedback to a DDS.
#[task(binds = DMA1_STR4, local=[timestamper, adcs, dacs, generator, digital_inputs, stream, pll], shared = [active_settings, state, telemetry, dds_output], priority = 3)]
#[unsafe(link_section = ".itcm.process")]
fn process(c: process::Context) {
let process::LocalResources {
adcs: (adc0, adc1),
dacs: (dac0, dac1),
digital_inputs,
timestamper,
generator,
stream,
pll,
..
} = c.local;
// A counter running at a fourth of the DDS SYNC interval is captured by
// the overflow of a timer synchronized to the sampling timer (locked to the
// CPU clock and the other CPU timer clocks).
// Captured timestamps are about 0x140 counts apart between batches.
// They determine the phase and period of the DDS clock (driving the counter)
// in terms of the CPU clock (driving the capture).
// Discard double captures (overcaptures) and extrapolate.
// Extrapolate on no capture (undercapture).
let timestamp = timestamper
.latest_timestamp()
.unwrap_or(None)
.map(|t| ((t as u32) << 16) as i32);
(
c.shared.state,
c.shared.active_settings,
c.shared.dds_output,
c.shared.telemetry,
)
.lock(|state, settings, dds_output, telemetry| {
// Reconstruct frequency and phase using a lowpass that is aware of phase and frequency
// wraps.
pll.update(timestamp, settings.pll_k);
// TODO: implement clear
stream[0].pll = pll.frequency() as _;
stream[1].pll = pll.phase() as _;
telemetry.pll_time =
telemetry.pll_time.wrapping_add(pll.frequency() as _);
let mut demod = [Complex::<i32>::default(); BATCH_SIZE];
// TODO: fixed lockin_freq, const 5/16 frequency table (80 entries), then rotate each by pll phase
for (d, p) in demod.iter_mut().zip(Accu::new(
(pll.phase() << BATCH_SIZE_LOG2)
.wrapping_mul(settings.lockin_freq as _),
pll.frequency().wrapping_mul(settings.lockin_freq as _),
)) {
*d = Complex::from_angle(p);
}
(adc0, adc1, dac0, dac1).lock(|adc0, adc1, dac0, dac1| {
fence(Ordering::SeqCst);
let adc: [&[u16; BATCH_SIZE]; 2] = [
(**adc0).try_into().unwrap(),
(**adc1).try_into().unwrap(),
];
let dac: [&mut [u16; BATCH_SIZE]; 2] = [
(*dac0).try_into().unwrap(),
(*dac1).try_into().unwrap(),
];
// Perform lockin demodulation of the ADC samples in the batch.
for ((((adc, dac), state), settings), stream) in adc
.into_iter()
.zip(dac.into_iter())
.zip(state.iter_mut())
.zip(settings.ch.iter())
.zip(stream.iter_mut())
{
stream.demod = adc
.iter()
.zip(dac.iter_mut())
.zip(demod.iter())
.map(|((a, d), p)| {
// Demodulate the ADC sample `a0` with the sample's phase `p` and
// filter it with the lowpass.
// zero(s) at fs/2 (Nyquist) by lowpass
let y = state.lockin.update_iq(
// 3 bit headroom for coeff sum minus one bit gain for filter
(*a as i16 as i32) << 14,
*p,
&settings.lockin_k,
);
// Convert quadrature demodulated output to DAC data for monitoring
*d = DacCode::from((y.im >> 13) as i16).0;
y
})
// Add more zeros at fs/2, fs/4, and fs/8 by rectangular window.
// Sum up all demodulated samples in the batch. Corresponds to a boxcar
// averager with sinc frequency response. The first 15 lockin harmonics end up
// in zeros of the filter.
.sum();
}
fence(Ordering::SeqCst);
});
let di =
[digital_inputs.0.is_high(), digital_inputs.1.is_high()];
// TODO: pll.frequency()?
let time = pll.phase() & (-1 << PHASE_SCALE_SHIFT);
let dtime = time.wrapping_sub(state[0].t0) >> PHASE_SCALE_SHIFT;
state[0].t0 = time;
state[1].t0 = time;
let mut builder = dds_output.builder();
for (
(((((idx, di), settings), state), telemetry), stream),
stats,
) in [Channel::Out0, Channel::Out1]
.into_iter()
.zip(di)
.zip(settings.ch.iter_mut())
.zip(state.iter_mut())
| rust | Apache-2.0 | cb1f14508dffa91c636cb3ed3701e24b04e469d2 | 2026-01-04T20:16:49.858302Z | true |
quartiq/stabilizer | https://github.com/quartiq/stabilizer/blob/cb1f14508dffa91c636cb3ed3701e24b04e469d2/src/bin/dual-iir.rs | src/bin/dual-iir.rs | //! # Dual IIR
//!
//! The Dual IIR application exposes two configurable channels. Stabilizer samples input at a fixed
//! rate, digitally filters the data, and then generates filtered output signals on the respective
//! channel outputs.
//!
//! ## Features
//! * Two indpenendent channels
//! * up to 800 kHz rate, timed sampling
//! * Run-time filter configuration
//! * Input/Output data streaming
//! * Down to 2 ยตs latency
//! * f32 IIR math
//! * Generic biquad (second order) IIR filter
//! * Anti-windup
//! * Derivative kick avoidance
//!
//! ## Settings
//! Refer to the [DualIir] structure for documentation of run-time configurable settings for this
//! application.
//!
//! ## Telemetry
//! Refer to [stabilizer::telemetry::Telemetry] for information about telemetry reported by this application.
//!
//! ## Stream
//! This application streams raw ADC and DAC data over UDP. Refer to
//! [stream] for more information.
#![cfg_attr(target_os = "none", no_std)]
#![cfg_attr(target_os = "none", no_main)]
use miniconf::Tree;
use idsp::iir;
use platform::{AppSettings, NetSettings};
use serde::{Deserialize, Serialize};
use signal_generator::{self, Source};
use stabilizer::convert::{AdcCode, DacCode, Gain};
// The number of cascaded IIR biquads per channel. Select 1 or 2!
const IIR_CASCADE_LENGTH: usize = 1;
// The number of samples in each batch process
const BATCH_SIZE: usize = 8;
// The logarithm of the number of 100MHz timer ticks between each sample. With a value of 2^7 =
// 128, there is 1.28uS per sample, corresponding to a sampling frequency of 781.25 KHz.
const SAMPLE_TICKS_LOG2: u8 = 7;
const SAMPLE_TICKS: u32 = 1 << SAMPLE_TICKS_LOG2;
const SAMPLE_PERIOD: f32 =
SAMPLE_TICKS as f32 * stabilizer::design_parameters::TIMER_PERIOD;
#[derive(Clone, Debug, Tree, Default)]
#[tree(meta(doc, typename))]
pub struct Settings {
dual_iir: DualIir,
net: NetSettings,
}
impl AppSettings for Settings {
fn new(net: NetSettings) -> Self {
Self {
net,
dual_iir: DualIir::default(),
}
}
fn net(&self) -> &NetSettings {
&self.net
}
}
impl serial_settings::Settings for Settings {
fn reset(&mut self) {
*self = Self {
dual_iir: DualIir::default(),
net: NetSettings::new(self.net.mac),
}
}
}
#[derive(Clone, Debug, Tree)]
#[tree(meta(doc, typename = "BiquadReprTree"))]
pub struct BiquadRepr {
/// Biquad parameters
#[tree(rename="typ", typ="&str", with=miniconf::str_leaf, defer=self.repr)]
_typ: (),
repr: iir::BiquadRepr<f32, f32>,
}
impl Default for BiquadRepr {
fn default() -> Self {
let mut i = iir::Biquad::IDENTITY;
i.set_min(-i16::MAX as _);
i.set_max(i16::MAX as _);
Self {
_typ: (),
repr: iir::BiquadRepr::Raw(i),
}
}
}
#[derive(Copy, Clone, Debug, Serialize, Deserialize, Default)]
pub enum Run {
#[default]
/// Run
Run,
/// Hold
Hold,
/// Hold controlled by corresponding digital input
External,
}
impl Run {
fn run(&self, di: bool) -> bool {
match self {
Self::Run => true,
Self::Hold => false,
Self::External => di,
}
}
}
/// A ADC-DAC channel
#[derive(Clone, Debug, Tree, Default)]
#[tree(meta(doc, typename))]
pub struct Channel {
/// Analog Front End (AFE) gain.
#[tree(with=miniconf::leaf)]
gain: Gain,
/// Biquad
biquad: [BiquadRepr; IIR_CASCADE_LENGTH],
/// Run/Hold behavior
#[tree(with=miniconf::leaf)]
run: Run,
/// Signal generator configuration to add to the DAC0/DAC1 outputs
source: signal_generator::Config,
}
impl Channel {
fn build(&self) -> Result<Active, signal_generator::Error> {
Ok(Active {
source: self
.source
.build(SAMPLE_PERIOD, DacCode::FULL_SCALE.recip())
.unwrap(),
state: Default::default(),
run: self.run,
biquad: self.biquad.each_ref().map(|biquad| {
biquad.repr.build::<f32>(
SAMPLE_PERIOD,
1.0,
DacCode::LSB_PER_VOLT,
)
}),
})
}
}
#[derive(Clone, Debug, Tree)]
#[tree(meta(doc, typename))]
pub struct DualIir {
/// Channel configuration
ch: [Channel; 2],
/// Trigger both signal sources
#[tree(with=miniconf::leaf)]
trigger: bool,
/// Telemetry output period in seconds.
#[tree(with=miniconf::leaf)]
telemetry_period: f32,
/// Target IP and port for UDP streaming.
///
/// Can be multicast.
#[tree(with=miniconf::leaf)]
stream: stream::Target,
}
impl Default for DualIir {
fn default() -> Self {
Self {
telemetry_period: 10.0,
trigger: false,
stream: Default::default(),
ch: Default::default(),
}
}
}
#[derive(Clone, Debug)]
pub struct Active {
run: Run,
biquad: [iir::Biquad<f32>; IIR_CASCADE_LENGTH],
state: [[f32; 4]; IIR_CASCADE_LENGTH],
source: Source,
}
#[cfg(not(target_os = "none"))]
fn main() {
use miniconf::{json::to_json_value, json_schema::TreeJsonSchema};
let s = Settings::default();
println!(
"{}",
serde_json::to_string_pretty(&to_json_value(&s).unwrap()).unwrap()
);
let mut schema = TreeJsonSchema::new(Some(&s)).unwrap();
schema
.root
.insert("title".to_string(), "Stabilizer dual-iir".into());
println!("{}", serde_json::to_string_pretty(&schema.root).unwrap());
}
#[cfg(target_os = "none")]
#[cfg_attr(target_os = "none", rtic::app(device = stabilizer::hardware::hal::stm32, peripherals = true, dispatchers=[DCMI, JPEG, LTDC, SDMMC]))]
mod app {
use super::*;
use core::sync::atomic::{Ordering, fence};
use fugit::ExtU32 as _;
use rtic_monotonics::Monotonic;
use stabilizer::{
hardware::{
self, DigitalInput0, DigitalInput1, Pgia, SerialTerminal,
SystemTimer, Systick, UsbDevice,
adc::{Adc0Input, Adc1Input},
dac::{Dac0Output, Dac1Output},
hal,
net::{NetworkState, NetworkUsers},
timers::SamplingTimer,
},
telemetry::TelemetryBuffer,
};
use stream::FrameGenerator;
#[shared]
struct Shared {
usb: UsbDevice,
network: NetworkUsers<DualIir>,
settings: Settings,
active: [Active; 2],
telemetry: TelemetryBuffer,
}
#[local]
struct Local {
usb_terminal: SerialTerminal<Settings>,
sampling_timer: SamplingTimer,
digital_inputs: (DigitalInput0, DigitalInput1),
afes: [Pgia; 2],
adcs: (Adc0Input, Adc1Input),
dacs: (Dac0Output, Dac1Output),
generator: FrameGenerator,
cpu_temp_sensor: stabilizer::hardware::cpu_temp_sensor::CpuTempSensor,
}
#[init]
fn init(c: init::Context) -> (Shared, Local) {
let clock = SystemTimer::new(|| Systick::now().ticks());
// Configure the microcontroller
let (stabilizer, _mezzanine, _eem) = hardware::setup::setup::<Settings>(
c.core,
c.device,
clock,
BATCH_SIZE,
SAMPLE_TICKS,
);
let mut network = NetworkUsers::new(
stabilizer.network_devices.stack,
stabilizer.network_devices.phy,
clock,
env!("CARGO_BIN_NAME"),
&stabilizer.settings.net,
stabilizer.metadata,
);
let generator = network.configure_streaming(stream::Format::AdcDacData);
let shared = Shared {
usb: stabilizer.usb,
network,
active: stabilizer
.settings
.dual_iir
.ch
.each_ref()
.map(|a| a.build().unwrap()),
telemetry: TelemetryBuffer::default(),
settings: stabilizer.settings,
};
let mut local = Local {
usb_terminal: stabilizer.usb_serial,
sampling_timer: stabilizer.sampling_timer,
digital_inputs: stabilizer.digital_inputs,
afes: stabilizer.afes,
adcs: stabilizer.adcs,
dacs: stabilizer.dacs,
generator,
cpu_temp_sensor: stabilizer.temperature_sensor,
};
// Enable ADC/DAC events
local.adcs.0.start();
local.adcs.1.start();
local.dacs.0.start();
local.dacs.1.start();
// Spawn a settings update for default settings.
settings_update::spawn().unwrap();
telemetry::spawn().unwrap();
ethernet_link::spawn().unwrap();
usb::spawn().unwrap();
start::spawn().unwrap();
(shared, local)
}
#[task(priority = 1, local=[sampling_timer])]
async fn start(c: start::Context) {
Systick::delay(100.millis()).await;
// Start sampling ADCs and DACs.
c.local.sampling_timer.start();
}
/// Main DSP processing routine.
///
/// # Note
/// Processing time for the DSP application code is bounded by the following constraints:
///
/// DSP application code starts after the ADC has generated a batch of samples and must be
/// completed by the time the next batch of ADC samples has been acquired (plus the FIFO buffer
/// time). If this constraint is not met, firmware will panic due to an ADC input overrun.
///
/// The DSP application code must also fill out the next DAC output buffer in time such that the
/// DAC can switch to it when it has completed the current buffer. If this constraint is not met
/// it's possible that old DAC codes will be generated on the output and the output samples will
/// be delayed by 1 batch.
///
/// Because the ADC and DAC operate at the same rate, these two constraints actually implement
/// the same time bounds, meeting one also means the other is also met.
#[task(
binds=DMA1_STR4,
local=[digital_inputs, adcs, dacs, generator, source: [[i16; BATCH_SIZE]; 2] = [[0; BATCH_SIZE]; 2]],
shared=[active, telemetry],
priority=3)]
#[unsafe(link_section = ".itcm.process")]
fn process(c: process::Context) {
let process::SharedResources {
active, telemetry, ..
} = c.shared;
let process::LocalResources {
digital_inputs,
adcs: (adc0, adc1),
dacs: (dac0, dac1),
generator,
source,
..
} = c.local;
(active, telemetry).lock(|active, telemetry| {
(adc0, adc1, dac0, dac1).lock(|adc0, adc1, dac0, dac1| {
// Preserve instruction and data ordering w.r.t. DMA flag access before and after.
fence(Ordering::SeqCst);
let adc: [&[u16; BATCH_SIZE]; 2] = [
(**adc0).try_into().unwrap(),
(**adc1).try_into().unwrap(),
];
let mut dac: [&mut [u16; BATCH_SIZE]; 2] =
[(*dac0).try_into().unwrap(), (*dac1).try_into().unwrap()];
for ((((adc, dac), active), di), source) in adc
.into_iter()
.zip(dac.iter_mut())
.zip(active.iter_mut())
.zip(telemetry.digital_inputs)
.zip(source.iter())
{
for ((adc, dac), source) in
adc.iter().zip(dac.iter_mut()).zip(source)
{
let x = f32::from(*adc as i16);
let y = active
.biquad
.iter()
.zip(active.state.iter_mut())
.fold(x, |y, (ch, state)| {
let filter = if active.run.run(di) {
ch
} else {
&iir::Biquad::HOLD
};
filter.update(state, y)
});
// Note(unsafe): The filter limits must ensure that the value is in range.
// The truncation introduces 1/2 LSB distortion.
let y: i16 = unsafe { y.to_int_unchecked() };
*dac = DacCode::from(y.saturating_add(*source)).0;
}
}
telemetry.adcs = [AdcCode(adc[0][0]), AdcCode(adc[1][0])];
telemetry.dacs = [DacCode(dac[0][0]), DacCode(dac[1][0])];
const N: usize = BATCH_SIZE * size_of::<i16>();
generator.add(|buf| {
[adc[0], adc[1], dac[0], dac[1]]
.into_iter()
.zip(buf.chunks_exact_mut(N))
.map(|(data, buf)| {
buf.copy_from_slice(bytemuck::cast_slice(data))
})
.count()
* N
});
fence(Ordering::SeqCst);
});
*source = active.each_mut().map(|ch| {
core::array::from_fn(|_| (ch.source.next().unwrap() >> 16) as _)
});
telemetry.digital_inputs =
[digital_inputs.0.is_high(), digital_inputs.1.is_high()];
});
}
#[idle(shared=[network, settings, usb])]
fn idle(mut c: idle::Context) -> ! {
loop {
match (&mut c.shared.network, &mut c.shared.settings)
.lock(|net, settings| net.update(&mut settings.dual_iir))
{
NetworkState::SettingsChanged => {
settings_update::spawn().unwrap();
}
NetworkState::Updated => {}
NetworkState::NoChange => {
// We can't sleep if USB is not in suspend.
if c.shared.usb.lock(|usb| {
usb.state()
== usb_device::device::UsbDeviceState::Suspend
}) {
cortex_m::asm::wfi();
}
}
}
}
}
#[task(priority = 1, local=[afes], shared=[network, settings, active])]
async fn settings_update(mut c: settings_update::Context) {
c.shared.settings.lock(|settings| {
c.local.afes[0].set_gain(settings.dual_iir.ch[0].gain);
c.local.afes[1].set_gain(settings.dual_iir.ch[1].gain);
if settings.dual_iir.trigger {
settings.dual_iir.trigger = false;
let s = settings.dual_iir.ch.each_ref().map(|ch| {
let s = ch
.source
.build(SAMPLE_PERIOD, DacCode::FULL_SCALE.recip());
if let Err(err) = &s {
log::error!("Failed to update source: {:?}", err);
}
s
});
c.shared.active.lock(|ch| {
for (ch, s) in ch.iter_mut().zip(s) {
if let Ok(s) = s {
ch.source = s;
}
}
});
}
let b = settings.dual_iir.ch.each_ref().map(|ch| {
(
ch.run,
ch.biquad.each_ref().map(|b| {
b.repr.build::<f32>(
SAMPLE_PERIOD,
1.0,
DacCode::LSB_PER_VOLT,
)
}),
)
});
c.shared.active.lock(|active| {
for (a, b) in active.iter_mut().zip(b) {
(a.run, a.biquad) = b;
}
});
c.shared
.network
.lock(|net| net.direct_stream(settings.dual_iir.stream));
});
}
#[task(priority = 1, shared=[network, settings, telemetry], local=[cpu_temp_sensor])]
async fn telemetry(mut c: telemetry::Context) -> ! {
loop {
let telemetry =
c.shared.telemetry.lock(|telemetry| telemetry.clone());
let (gains, telemetry_period) =
c.shared.settings.lock(|settings| {
(
settings.dual_iir.ch.each_ref().map(|ch| ch.gain),
settings.dual_iir.telemetry_period,
)
});
c.shared.network.lock(|net| {
net.telemetry.publish_telemetry(
"/telemetry",
&telemetry.finalize(
gains[0],
gains[1],
c.local.cpu_temp_sensor.get_temperature().unwrap(),
),
)
});
Systick::delay(((telemetry_period * 1000.0) as u32).millis()).await;
}
}
#[task(priority = 1, shared=[usb, settings], local=[usb_terminal])]
async fn usb(mut c: usb::Context) -> ! {
loop {
// Handle the USB serial terminal.
c.shared.usb.lock(|usb| {
usb.poll(&mut [c
.local
.usb_terminal
.interface_mut()
.inner_mut()]);
});
c.shared.settings.lock(|settings| {
if c.local.usb_terminal.poll(settings).unwrap() {
settings_update::spawn().unwrap()
}
});
Systick::delay(10.millis()).await;
}
}
#[task(priority = 1, shared=[network])]
async fn ethernet_link(mut c: ethernet_link::Context) -> ! {
loop {
c.shared.network.lock(|net| net.processor.handle_link());
Systick::delay(1.secs()).await;
}
}
#[task(binds = ETH, priority = 1)]
fn eth(_: eth::Context) {
unsafe { hal::ethernet::interrupt_handler() }
}
#[task(binds = SPI2, priority = 4)]
fn spi2(_: spi2::Context) {
panic!("ADC0 SPI error");
}
#[task(binds = SPI3, priority = 4)]
fn spi3(_: spi3::Context) {
panic!("ADC1 SPI error");
}
#[task(binds = SPI4, priority = 4)]
fn spi4(_: spi4::Context) {
panic!("DAC0 SPI error");
}
#[task(binds = SPI5, priority = 4)]
fn spi5(_: spi5::Context) {
panic!("DAC1 SPI error");
}
}
| rust | Apache-2.0 | cb1f14508dffa91c636cb3ed3701e24b04e469d2 | 2026-01-04T20:16:49.858302Z | false |
quartiq/stabilizer | https://github.com/quartiq/stabilizer/blob/cb1f14508dffa91c636cb3ed3701e24b04e469d2/src/bin/lockin.rs | src/bin/lockin.rs | //! # Lockin
//!
//! The `lockin` application implements a lock-in amplifier using either an external or internally
//! generated reference.
//!
//! ## Features
//! * Up to 800 kHz sampling
//! * Up to 400 kHz modulation frequency
//! * Supports internal and external reference sources:
//! 1. Internal: Generate reference internally and output on one of the channel outputs
//! 2. External: Reciprocal PLL, reference input applied to DI0.
//! * Adjustable PLL and locking time constants
//! * Adjustable phase offset and harmonic index
//! * Run-time configurable output modes (in-phase, quadrature, magnitude, log2 power, phase, frequency)
//! * Input/output data streamng via UDP
//!
//! ## Settings
//! Refer to the [Lockin] structure for documentation of run-time configurable settings
//! for this application.
//!
//! ## Telemetry
//! Refer to [stabilizer::telemetry::Telemetry] for information about telemetry reported by this application.
//!
//! ## Stream
//! This application streams raw ADC and DAC data over UDP. Refer to
//! [stream] for more information.
#![cfg_attr(target_os = "none", no_std)]
#![cfg_attr(target_os = "none", no_main)]
use core::{
iter,
mem::MaybeUninit,
sync::atomic::{Ordering, fence},
};
use fugit::ExtU32;
use idsp::{Accu, Complex, ComplexExt, Filter, Lowpass, RPLL, Repeat};
use miniconf::{Leaf, Tree};
use rtic_monotonics::Monotonic;
use serde::{Deserialize, Serialize};
use stabilizer::convert::{AdcCode, DacCode, Gain};
use platform::{AppSettings, NetSettings};
// The logarithm of the number of samples in each batch process. This corresponds with 2^3 samples
// per batch = 8 samples
const BATCH_SIZE_LOG2: u32 = 3;
const BATCH_SIZE: usize = 1 << BATCH_SIZE_LOG2;
// The logarithm of the number of 100MHz timer ticks between each sample. This corresponds with a
// sampling period of 2^7 = 128 ticks. At 100MHz, 10ns per tick, this corresponds to a sampling
// period of 1.28 uS or 781.25 KHz.
const SAMPLE_TICKS_LOG2: u32 = 7;
const SAMPLE_TICKS: u32 = 1 << SAMPLE_TICKS_LOG2;
#[derive(Clone, Debug, Tree, Default)]
#[tree(meta(doc, typename))]
pub struct Settings {
lockin: Lockin,
net: NetSettings,
}
impl AppSettings for Settings {
fn new(net: NetSettings) -> Self {
Self {
net,
lockin: Lockin::default(),
}
}
fn net(&self) -> &NetSettings {
&self.net
}
}
impl serial_settings::Settings for Settings {
fn reset(&mut self) {
*self = Self {
lockin: Lockin::default(),
net: NetSettings::new(self.net.mac),
}
}
}
#[derive(Copy, Clone, Debug, Serialize, Deserialize)]
enum Conf {
/// Output the lockin magnitude.
Magnitude,
/// Output the phase of the lockin
Phase,
/// Output the lockin reference frequency as a sinusoid
ReferenceFrequency,
/// Output the logarithmic power of the lockin
LogPower,
/// Output the in-phase component of the lockin signal.
InPhase,
/// Output the quadrature component of the lockin signal.
Quadrature,
/// Output the lockin internal modulation frequency as a sinusoid
Modulation,
}
#[derive(Copy, Clone, Debug, PartialEq, Serialize, Deserialize)]
enum LockinMode {
/// Utilize an internally generated reference for demodulation
Internal,
/// Utilize an external modulation signal supplied to DI0
External,
}
#[derive(Clone, Debug, Tree)]
#[tree(meta(doc, typename))]
pub struct Lockin {
/// Configure the Analog Front End (AFE) gain.
afe: [Leaf<Gain>; 2],
/// Specifies the operational mode of the lockin.
#[tree(with=miniconf::leaf)]
lockin_mode: LockinMode,
/// Specifis the PLL time constant.
///
/// The PLL time constant exponent (1-31).
pll_tc: [u32; 2],
/// Specifies the lockin lowpass gains.
#[tree(with=miniconf::leaf)]
lockin_k: <Lowpass<2> as Filter>::Config,
/// Specifies which harmonic to use for the lockin.
///
/// Harmonic index of the LO. -1 to _de_modulate the fundamental (complex conjugate)
lockin_harmonic: i32,
/// Specifies the LO phase offset.
///
/// Demodulation LO phase offset. Units are in terms of i32, where [i32::MIN] is equivalent to
/// -pi and [i32::MAX] is equivalent to +pi.
lockin_phase: i32,
/// Specifies DAC output mode.
output_conf: [Leaf<Conf>; 2],
/// Specifies the telemetry output period in seconds.
telemetry_period: u16,
/// Specifies the target for data streaming.
#[tree(with=miniconf::leaf)]
stream: stream::Target,
}
impl Default for Lockin {
fn default() -> Self {
Self {
afe: [Leaf(Gain::G1); 2],
lockin_mode: LockinMode::External,
pll_tc: [21, 21], // frequency and phase settling time (log2 counter cycles)
lockin_k: [0x8_0000, -0x400_0000], // lockin lowpass gains
lockin_harmonic: -1, // Harmonic index of the LO: -1 to _de_modulate the fundamental (complex conjugate)
lockin_phase: 0, // Demodulation LO phase offset
output_conf: [Leaf(Conf::InPhase), Leaf(Conf::Quadrature)],
// The default telemetry period in seconds.
telemetry_period: 10,
stream: Default::default(),
}
}
}
#[cfg(not(target_os = "none"))]
fn main() {
use miniconf::{json::to_json_value, json_schema::TreeJsonSchema};
let s = Settings::default();
println!(
"{}",
serde_json::to_string_pretty(&to_json_value(&s).unwrap()).unwrap()
);
let mut schema = TreeJsonSchema::new(Some(&s)).unwrap();
schema
.root
.insert("title".to_string(), "Stabilizer lockin".into());
println!("{}", serde_json::to_string_pretty(&schema.root).unwrap());
}
#[cfg(target_os = "none")]
#[rtic::app(device = stabilizer::hardware::hal::stm32, peripherals = true, dispatchers=[DCMI, JPEG, SDMMC])]
mod app {
use super::*;
use stabilizer::{
hardware::{
self, DigitalInput0, DigitalInput1, Pgia, SerialTerminal,
SystemTimer, Systick, UsbDevice,
adc::{Adc0Input, Adc1Input},
dac::{Dac0Output, Dac1Output},
hal,
input_stamper::InputStamper,
net::{NetworkState, NetworkUsers},
timers::SamplingTimer,
},
telemetry::TelemetryBuffer,
};
use stream::FrameGenerator;
#[shared]
struct Shared {
usb: UsbDevice,
network: NetworkUsers<Lockin>,
settings: Settings,
active_settings: Lockin,
telemetry: TelemetryBuffer,
}
#[local]
struct Local {
usb_terminal: SerialTerminal<Settings>,
sampling_timer: SamplingTimer,
digital_inputs: (DigitalInput0, DigitalInput1),
timestamper: InputStamper,
afes: [Pgia; 2],
adcs: (Adc0Input, Adc1Input),
dacs: (Dac0Output, Dac1Output),
pll: RPLL,
lockin: idsp::Lockin<Repeat<2, Lowpass<2>>>,
source: idsp::AccuOsc<iter::Repeat<i64>>,
generator: FrameGenerator,
cpu_temp_sensor: stabilizer::hardware::cpu_temp_sensor::CpuTempSensor,
}
#[init]
fn init(c: init::Context) -> (Shared, Local) {
let clock = SystemTimer::new(|| Systick::now().ticks());
// Configure the microcontroller
let (mut stabilizer, _mezzanine, _eem) =
hardware::setup::setup::<Settings>(
c.core,
c.device,
clock,
BATCH_SIZE,
SAMPLE_TICKS,
);
let mut network = NetworkUsers::new(
stabilizer.network_devices.stack,
stabilizer.network_devices.phy,
clock,
env!("CARGO_BIN_NAME"),
&stabilizer.settings.net,
stabilizer.metadata,
);
let generator = network.configure_streaming(stream::Format::AdcDacData);
let shared = Shared {
network,
usb: stabilizer.usb,
telemetry: TelemetryBuffer::default(),
active_settings: stabilizer.settings.lockin.clone(),
settings: stabilizer.settings,
};
let mut local = Local {
usb_terminal: stabilizer.usb_serial,
sampling_timer: stabilizer.sampling_timer,
digital_inputs: stabilizer.digital_inputs,
afes: stabilizer.afes,
adcs: stabilizer.adcs,
dacs: stabilizer.dacs,
timestamper: stabilizer.input_stamper,
cpu_temp_sensor: stabilizer.temperature_sensor,
pll: RPLL::new(SAMPLE_TICKS_LOG2 + BATCH_SIZE_LOG2),
lockin: idsp::Lockin::default(),
source: idsp::AccuOsc::new(iter::repeat(
1i64 << (64 - BATCH_SIZE_LOG2),
)),
generator,
};
// Enable ADC/DAC events
local.adcs.0.start();
local.adcs.1.start();
local.dacs.0.start();
local.dacs.1.start();
// Spawn a settings and telemetry update for default settings.
settings_update::spawn().unwrap();
telemetry::spawn().unwrap();
ethernet_link::spawn().unwrap();
start::spawn().unwrap();
usb::spawn().unwrap();
// Start recording digital input timestamps.
stabilizer.timestamp_timer.start();
// Enable the timestamper.
local.timestamper.start();
(shared, local)
}
#[task(priority = 1, local=[sampling_timer])]
async fn start(c: start::Context) {
Systick::delay(100.millis()).await;
// Start sampling ADCs and DACs.
c.local.sampling_timer.start();
}
/// Main DSP processing routine.
///
/// See `dual-iir` for general notes on processing time and timing.
///
/// This is an implementation of a externally (DI0) referenced PLL lockin on the ADC0 signal.
/// It outputs either I/Q or power/phase on DAC0/DAC1. Data is normalized to full scale.
/// PLL bandwidth, filter bandwidth, slope, and x/y or power/phase post-filters are available.
#[task(binds=DMA1_STR4, shared=[active_settings, telemetry], local=[adcs, dacs, lockin, timestamper, pll, generator, source], priority=3)]
#[unsafe(link_section = ".itcm.process")]
fn process(c: process::Context) {
let process::SharedResources {
active_settings,
telemetry,
..
} = c.shared;
let process::LocalResources {
timestamper,
adcs: (adc0, adc1),
dacs: (dac0, dac1),
pll,
lockin,
source,
generator,
..
} = c.local;
(active_settings, telemetry).lock(|settings, telemetry| {
let (reference_phase, reference_frequency) =
match settings.lockin_mode {
LockinMode::External => {
let timestamp =
timestamper.latest_timestamp().unwrap_or(None); // Ignore data from timer capture overflows.
let (pll_phase, pll_frequency) = pll.update(
timestamp.map(|t| t as i32),
settings.pll_tc[0],
settings.pll_tc[1],
);
(pll_phase, (pll_frequency >> BATCH_SIZE_LOG2) as i32)
}
LockinMode::Internal => {
// Reference phase and frequency are known.
(1i32 << 30, 1i32 << (32 - BATCH_SIZE_LOG2))
}
};
let sample_frequency =
reference_frequency.wrapping_mul(settings.lockin_harmonic);
let sample_phase = settings.lockin_phase.wrapping_add(
reference_phase.wrapping_mul(settings.lockin_harmonic),
);
(adc0, adc1, dac0, dac1).lock(|adc0, adc1, dac0, dac1| {
let adc_samples = [adc0, adc1];
let mut dac_samples = [dac0, dac1];
// Preserve instruction and data ordering w.r.t. DMA flag access.
fence(Ordering::SeqCst);
let output: Complex<i32> = adc_samples[0]
.iter()
// Zip in the LO phase.
.zip(Accu::new(sample_phase, sample_frequency))
// Convert to signed, MSB align the ADC sample, update the Lockin (demodulate, filter)
.map(|(&sample, phase)| {
let s = (sample as i16 as i32) << 16;
lockin.update(s, phase, &settings.lockin_k)
})
// Decimate
.last()
.unwrap()
* 2; // Full scale assuming the 2f component is gone.
// Convert to DAC data.
for (channel, samples) in dac_samples.iter_mut().enumerate() {
for sample in samples.iter_mut() {
let value = match *settings.output_conf[channel] {
Conf::Magnitude => output.abs_sqr() as i32 >> 16,
Conf::Phase => output.arg() >> 16,
Conf::LogPower => output.log2() << 8,
Conf::ReferenceFrequency => {
reference_frequency >> 16
}
Conf::InPhase => output.re >> 16,
Conf::Quadrature => output.im >> 16,
Conf::Modulation => source.next().unwrap().re,
};
*sample = DacCode::from(value as i16).0;
}
}
// Stream the data.
const N: usize = BATCH_SIZE * size_of::<i16>()
/ size_of::<MaybeUninit<u8>>();
generator.add(|buf| {
for (data, buf) in adc_samples
.iter()
.chain(dac_samples.iter())
.zip(buf.chunks_exact_mut(N))
{
let data = unsafe {
core::slice::from_raw_parts(
data.as_ptr() as *const MaybeUninit<u8>,
N,
)
};
buf.copy_from_slice(data)
}
N * 4
});
// Update telemetry measurements.
telemetry.adcs =
[AdcCode(adc_samples[0][0]), AdcCode(adc_samples[1][0])];
telemetry.dacs =
[DacCode(dac_samples[0][0]), DacCode(dac_samples[1][0])];
// Preserve instruction and data ordering w.r.t. DMA flag access.
fence(Ordering::SeqCst);
});
});
}
#[idle(shared=[settings, network, usb])]
fn idle(mut c: idle::Context) -> ! {
loop {
match (&mut c.shared.network, &mut c.shared.settings)
.lock(|net, settings| net.update(&mut settings.lockin))
{
NetworkState::SettingsChanged => {
settings_update::spawn().unwrap()
}
NetworkState::Updated => {}
NetworkState::NoChange => {
// We can't sleep if USB is not in suspend.
if c.shared.usb.lock(|usb| {
usb.state()
== usb_device::device::UsbDeviceState::Suspend
}) {
cortex_m::asm::wfi();
}
}
}
}
}
#[task(priority = 1, local=[afes], shared=[network, settings, active_settings])]
async fn settings_update(mut c: settings_update::Context) {
c.shared.settings.lock(|settings| {
c.local.afes[0].set_gain(*settings.lockin.afe[0]);
c.local.afes[1].set_gain(*settings.lockin.afe[1]);
c.shared
.network
.lock(|net| net.direct_stream(settings.lockin.stream));
c.shared
.active_settings
.lock(|current| *current = settings.lockin.clone());
});
}
#[task(priority = 1, local=[digital_inputs, cpu_temp_sensor], shared=[network, settings, telemetry])]
async fn telemetry(mut c: telemetry::Context) -> ! {
loop {
let mut telemetry =
c.shared.telemetry.lock(|telemetry| telemetry.clone());
telemetry.digital_inputs = [
c.local.digital_inputs.0.is_high(),
c.local.digital_inputs.1.is_high(),
];
let (gains, telemetry_period) =
c.shared.settings.lock(|settings| {
(settings.lockin.afe, settings.lockin.telemetry_period)
});
c.shared.network.lock(|net| {
net.telemetry.publish_telemetry(
"/telemetry",
&telemetry.finalize(
*gains[0],
*gains[1],
c.local.cpu_temp_sensor.get_temperature().unwrap(),
),
)
});
// Schedule the telemetry task in the future.
Systick::delay((telemetry_period as u32).secs()).await;
}
}
#[task(priority = 1, shared=[usb, settings], local=[usb_terminal])]
async fn usb(mut c: usb::Context) -> ! {
loop {
// Handle the USB serial terminal.
c.shared.usb.lock(|usb| {
usb.poll(&mut [c
.local
.usb_terminal
.interface_mut()
.inner_mut()]);
});
c.shared.settings.lock(|settings| {
if c.local.usb_terminal.poll(settings).unwrap() {
settings_update::spawn().unwrap()
}
});
Systick::delay(10.millis()).await;
}
}
#[task(priority = 1, shared=[network])]
async fn ethernet_link(mut c: ethernet_link::Context) -> ! {
loop {
c.shared.network.lock(|net| net.processor.handle_link());
Systick::delay(1.secs()).await;
}
}
#[task(binds = ETH, priority = 1)]
fn eth(_: eth::Context) {
unsafe { hal::ethernet::interrupt_handler() }
}
}
| rust | Apache-2.0 | cb1f14508dffa91c636cb3ed3701e24b04e469d2 | 2026-01-04T20:16:49.858302Z | false |
quartiq/stabilizer | https://github.com/quartiq/stabilizer/blob/cb1f14508dffa91c636cb3ed3701e24b04e469d2/urukul/src/lib.rs | urukul/src/lib.rs | #![no_std]
use arbitrary_int::{u2, u3, u4, u7, u24};
use bitbybit::{bitenum, bitfield};
use embedded_hal::digital::OutputPin;
use embedded_hal::spi::{self, SpiBus, SpiDevice};
use embedded_hal_bus::spi::{DeviceError, NoDelay, RefCellDevice};
use num_traits::float::FloatCore;
use serde::{Deserialize, Serialize};
use ad9912::Ad9912;
use encoded_pin::EncodedPin;
#[derive(Debug, Clone, PartialEq, thiserror::Error)]
pub enum Error {
#[error("Initialization: {0}: {1}")]
Initialization(&'static str, u32),
#[error("SPI Error {0}")]
Spi(spi::ErrorKind),
#[error("DDS")]
Dds(#[source] ad9912::Error),
}
impl<E: spi::Error> From<E> for Error {
fn from(value: E) -> Self {
Self::Spi(value.kind())
}
}
#[bitfield(u24, default = 0x000700)]
#[derive(Debug, PartialEq, Serialize, Deserialize)]
pub struct Cfg {
#[bits(0..=3, rw)]
rf_sw: u4,
#[bits(4..=7, rw)]
led: u4,
#[bits(8..=10, rw)]
profile: u3,
#[bit(12, rw)]
io_update: bool,
#[bits(13..=16, rw)]
mask_nu: u4,
#[bits([17, 21], rw)]
clk_sel: ClkSel,
#[bit(18, rw)]
sync_sel: bool,
#[bit(19, rw)]
rst: bool,
#[bit(20, rw)]
io_rst: bool,
#[bits(22..=23, rw)]
div_sel: DivSel,
}
#[bitfield(u24)]
#[derive(Debug, PartialEq, Serialize, Deserialize)]
pub struct Status {
#[bits(0..=3, r)]
pub rf_sw: u4,
#[bits(4..=7, r)]
pub smp_err: u4,
#[bits(8..=11, r)]
pub pll_lock: u4,
#[bits(12..=15, r)]
pub ifc_mode: u4,
#[bits(16..=22, r)]
pub proto_rev: u7,
}
#[bitenum(u2, exhaustive = true)]
#[derive(Debug, PartialEq, Serialize, Deserialize)]
pub enum ClkSel {
Osc = 0,
Sma = 1,
Mmcx = 2,
_Sma = 3,
}
#[bitenum(u2, exhaustive = true)]
#[derive(Debug, PartialEq, Serialize, Deserialize)]
pub enum DivSel {
One = 0,
_One = 1,
Two = 2,
Four = 3,
}
impl DivSel {
pub fn divider(&self) -> u32 {
match self {
Self::One => 1,
Self::Two => 2,
Self::Four => 4,
Self::_One => 1,
}
}
}
pub fn att_to_mu(att: f32) -> u8 {
255 - (att * 8.0).round() as u8
}
pub struct Urukul<'a, B, P> {
att_spi: RefCellDevice<'a, B, EncodedPin<'a, P, 3>, NoDelay>,
cfg_spi: RefCellDevice<'a, B, EncodedPin<'a, P, 3>, NoDelay>,
io_update: P,
_sync: P,
cfg: Cfg,
att: [u8; 4],
dds: [Ad9912<RefCellDevice<'a, B, EncodedPin<'a, P, 3>, NoDelay>>; 4],
}
impl<'a, B: SpiBus<u8>, P: OutputPin> Urukul<'a, B, P> {
pub fn new(
spi: &'a core::cell::RefCell<B>,
cs: &'a core::cell::RefCell<[P; 3]>,
io_update: P,
sync: P,
) -> Result<Self, Error> {
let sel = |sel| {
RefCellDevice::new(spi, EncodedPin::new(cs, u3::new(sel)), NoDelay)
.unwrap()
};
let cfg_spi = sel(1);
let att_spi = sel(2);
let mut dev = Self {
cfg_spi,
att_spi,
io_update,
_sync: sync,
cfg: Cfg::default(),
att: [0; 4],
dds: [
Ad9912::new(sel(4)),
Ad9912::new(sel(5)),
Ad9912::new(sel(6)),
Ad9912::new(sel(7)),
],
};
dev.init()?;
Ok(dev)
}
pub fn cfg(&self) -> Cfg {
self.cfg
}
pub fn set_cfg(
&mut self,
cfg: Cfg,
) -> Result<Status, DeviceError<B::Error, P::Error>> {
let mut bits = [0; 3];
let w = cfg.raw_value().to_be_bytes();
self.cfg_spi.transfer(&mut bits, &w)?;
self.cfg = cfg;
Ok(Status::new_with_raw_value(u24::from_be_bytes(bits)))
}
pub fn att(&self, ch: u2) -> u8 {
self.att[(u2::new(3) - ch).value() as usize]
}
pub fn set_att(
&mut self,
ch: u2,
att: u8,
) -> Result<(), DeviceError<B::Error, P::Error>> {
self.att[(u2::new(3) - ch).value() as usize] = att;
self.att_spi.write(&self.att)
}
pub fn init(&mut self) -> Result<(), Error> {
let sta = self.set_cfg(self.cfg())?;
if sta.proto_rev().value() != 0x8 {
return Err(Error::Initialization(
"Invalid PROTO_REV",
sta.proto_rev().value() as _,
));
}
if sta.rf_sw().value() != 0 {
return Err(Error::Initialization(
"RF_SW driven",
sta.rf_sw().value() as _,
));
}
if sta.ifc_mode().value() != 0 {
return Err(Error::Initialization(
"Invalid IFC_MODE",
sta.ifc_mode().value() as _,
));
}
let cfg = self.cfg();
self.set_cfg(cfg.with_io_rst(true).with_rst(true))?;
self.set_cfg(cfg)?;
for want in [[0; 4], [0xff; 4], [0x5a; 4], [0xa5; 4]].iter() {
self.att_spi.write(want)?;
let mut have = [0; 4];
self.att_spi.read(&mut have)?;
if want != &have {
return Err(Error::Initialization(
"Attenuator mismatch",
u32::from_be_bytes(have),
));
}
}
// This is destructive and clears attenuation
// https://github.com/rust-embedded/embedded-hal/issues/642
self.att_spi.write(&self.att)?;
for dds in self.dds.iter_mut() {
dds.init().map_err(Error::Dds)?;
}
log::info!("Urukul initialized");
Ok(())
}
pub fn io_update(&mut self) -> Result<(), P::Error> {
self.io_update.set_high()?;
self.io_update.set_low()
}
pub fn set_rf_sw(
&mut self,
ch: u2,
state: bool,
) -> Result<(), DeviceError<B::Error, P::Error>> {
let mut v = self.cfg.rf_sw().value();
v &= !(1 << ch.value());
v |= (state as u8) << ch.value();
self.set_cfg(self.cfg.with_rf_sw(u4::new(v)))?;
Ok(())
}
pub fn set_led(
&mut self,
ch: u2,
state: bool,
) -> Result<(), DeviceError<B::Error, P::Error>> {
let mut v = self.cfg.led().value();
v &= !(1 << ch.value());
v |= (state as u8) << ch.value();
self.set_cfg(self.cfg.with_led(u4::new(v)))?;
Ok(())
}
pub fn dds(
&mut self,
ch: u2,
) -> &mut Ad9912<RefCellDevice<'a, B, EncodedPin<'a, P, 3>, NoDelay>> {
&mut self.dds[ch.value() as usize]
}
}
| rust | Apache-2.0 | cb1f14508dffa91c636cb3ed3701e24b04e469d2 | 2026-01-04T20:16:49.858302Z | false |
quartiq/stabilizer | https://github.com/quartiq/stabilizer/blob/cb1f14508dffa91c636cb3ed3701e24b04e469d2/ad9959/src/lib.rs | ad9959/src/lib.rs | #![no_std]
use arbitrary_int::{Number, u2, u3, u4, u5, u10, u14, u24};
use bitbybit::{bitenum, bitfield};
use embedded_hal::{blocking::delay::DelayUs, digital::v2::OutputPin};
/// A trait that allows a HAL to provide a means of communicating with the AD9959.
pub trait Interface {
type Error;
fn configure_mode(&mut self, mode: Mode) -> Result<(), Self::Error>;
fn write(&mut self, addr: Address, data: &[u8]) -> Result<(), Self::Error>;
fn read(
&mut self,
addr: Address,
data: &mut [u8],
) -> Result<(), Self::Error>;
}
/// Indicates various communication modes of the DDS. The value of this enumeration is equivalent to
/// the configuration bits of the DDS CSR register.
#[derive(PartialEq)]
#[bitenum(u2, exhaustive = true)]
pub enum Mode {
SingleBitTwoWire = 0b00,
SingleBitThreeWire = 0b01,
TwoBitSerial = 0b10,
FourBitSerial = 0b11,
}
pub type Channel = u4;
#[bitfield(u8, default = 0xf0)]
#[derive(Debug, PartialEq)]
pub struct Csr {
#[bit(0, rw)]
lsb_first: bool,
#[bits(1..=2, rw)]
mode: Mode,
#[bits(4..=7, rw)]
channel: u4,
}
#[bitfield(u24, default = 0)]
#[derive(Debug, PartialEq)]
pub struct Fr1 {
#[bit(0, rw)]
sw_sync: bool,
#[bit(1, rw)]
hw_sync: bool,
#[bit(4, rw)]
dac_ref_pd: bool,
#[bit(5, rw)]
sync_clk_pd: bool,
#[bit(6, rw)]
ext_pd: bool,
#[bit(7, rw)]
ext_clk_pd: bool,
#[bits(8..=9, rw)]
modulation: u2,
#[bits(10..=11, rw)]
ramp_up_down: u2,
#[bits(12..=14, rw)]
profile_pin: u3,
#[bits(16..=17, rw)]
charge_pump: u2,
#[bits(18..=22, rw)]
pll_divier: u5,
#[bit(23, rw)]
vco_high: bool,
}
#[bitfield(u24, default = 0)]
#[derive(Debug, PartialEq)]
pub struct Acr {
#[bits(0..=9, rw)]
asf: u10,
#[bit(10, rw)]
load_arr: bool,
#[bit(11, rw)]
ramp: bool,
#[bit(12, rw)]
multiplier: bool,
#[bits(14..=15, rw)]
step: u2,
#[bits(16..=23, rw)]
arr: u8,
}
#[allow(clippy::upper_case_acronyms)]
#[bitenum(u7)]
pub enum Address {
CSR = 0x00,
FR1 = 0x01,
FR2 = 0x02,
CFR = 0x03,
CFTW0 = 0x04,
CPOW0 = 0x05,
ACR = 0x06,
LSRR = 0x07,
RDW = 0x08,
FDW = 0x09,
CW1 = 0x0a,
CW2 = 0x0b,
CW3 = 0x0c,
CW4 = 0x0d,
CW5 = 0x0e,
CW6 = 0x0f,
CW7 = 0x10,
CW8 = 0x11,
CW9 = 0x12,
CW10 = 0x13,
CW11 = 0x14,
CW12 = 0x15,
CW13 = 0x16,
CW14 = 0x17,
CW15 = 0x18,
}
/// Possible errors generated by the AD9959 driver.
#[derive(Debug)]
pub enum Error {
Interface,
Check,
Bounds,
Pin,
Frequency,
}
/// A device driver for the AD9959 direct digital synthesis (DDS) chip.
///
/// This chip provides four independently controllable digital-to-analog output sinusoids with
/// configurable phase, amplitude, and frequency. All channels are inherently synchronized as they
/// are derived off a common system clock.
///
/// The chip contains a configurable PLL and supports system clock frequencies up to 500 MHz.
///
/// The chip supports a number of serial interfaces to improve data throughput, including normal,
/// dual, and quad SPI configurations.
pub struct Ad9959<I> {
interface: I,
ftw_per_hz: f32,
mode: Mode,
}
impl<I: Interface> Ad9959<I> {
/// Construct and initialize the DDS.
///
/// Args:
/// * `interface` - An interface to the DDS.
/// * `reset_pin` - A pin connected to the DDS reset input.
/// * `io_update` - A pin connected to the DDS io_update input.
/// * `delay` - A delay implementation for blocking operation for specific amounts of time.
/// * `desired_mode` - The desired communication mode of the interface to the DDS.
/// * `clock_frequency` - The clock frequency of the reference clock input.
/// * `multiplier` - The desired clock multiplier for the system clock. This multiplies
/// `clock_frequency` to generate the system clock.
pub fn new(
interface: I,
reset: &mut impl OutputPin,
io_update: &mut impl OutputPin,
delay: &mut impl DelayUs<u8>,
mode: Mode,
reference_clock_frequency: f32,
multiplier: u5,
) -> Result<Self, Error> {
let mut ad9959 = Ad9959 {
interface,
ftw_per_hz: 0.0,
mode,
};
io_update.set_low().or(Err(Error::Pin))?;
// Reset the AD9959 (Pounder v1.1 and earlier)
// On Pounder v1.2 and later the reset has been done through the GPIO extender in
// PounderDevices before.
reset.set_high().or(Err(Error::Pin))?;
// Delays here are at least 1 SYNC_CLK period. The SYNC_CLK is guaranteed
// to be at least 250KHz (1/4 of 1MHz minimum REF_CLK). We use 5uS instead of 4uS to
// guarantee conformance with datasheet requirements.
delay.delay_us(5);
reset.set_low().or(Err(Error::Pin))?;
ad9959
.interface
.configure_mode(Mode::SingleBitTwoWire)
.or(Err(Error::Interface))?;
let csr = Csr::default().with_channel(u4::new(0b1111)).with_mode(mode);
ad9959.write(Address::CSR, &csr.raw_value().to_be_bytes())?;
io_update.set_high().or(Err(Error::Pin))?;
delay.delay_us(5);
io_update.set_low().or(Err(Error::Pin))?;
ad9959
.interface
.configure_mode(mode)
.or(Err(Error::Interface))?;
// Empirical evidence indicates a delay is necessary here for the IO update to become
// active. This is likely due to needing to wait at least 1 clock cycle of the DDS for the
// interface update to occur.
delay.delay_us(5);
// Read back the CSR to ensure it specifies the mode correctly.
let mut updated_csr = 0u8.to_be_bytes();
ad9959.read(Address::CSR, &mut updated_csr)?;
if updated_csr != csr.raw_value().to_be_bytes() {
return Err(Error::Check);
}
// Set the clock frequency to configure the device as necessary.
ad9959.set_system_clock(reference_clock_frequency, multiplier)?;
io_update.set_high().or(Err(Error::Pin))?;
delay.delay_us(5);
io_update.set_low().or(Err(Error::Pin))?;
Ok(ad9959)
}
fn read(&mut self, reg: Address, data: &mut [u8]) -> Result<(), Error> {
self.interface.read(reg, data).or(Err(Error::Interface))
}
fn write(&mut self, reg: Address, data: &[u8]) -> Result<(), Error> {
self.interface.write(reg, data).or(Err(Error::Interface))
}
/// Configure the internal system clock of the chip.
///
/// Arguments:
/// * `reference_clock_frequency` - The reference clock frequency provided to the AD9959 core.
/// * `multiplier` - The frequency multiplier of the system clock. Must be 1 or 4-20.
///
/// Returns:
/// The actual frequency configured for the internal system clock.
fn set_system_clock(
&mut self,
reference_clock_frequency: f32,
multiplier: u5,
) -> Result<f32, Error> {
let sysclk = multiplier.value() as f32 * reference_clock_frequency;
if match multiplier.value() {
1 => !(1e6..=500e6).contains(&reference_clock_frequency),
4..=20 => {
!(10e6..=125e6).contains(&reference_clock_frequency)
|| !(100e6..=500e6).contains(&sysclk)
}
_ => true,
} {
return Err(Error::Bounds);
}
let mut fr1 = u24::new(0).to_be_bytes();
self.read(Address::FR1, &mut fr1)?;
let fr1 = Fr1::new_with_raw_value(u24::from_be_bytes(fr1))
.with_pll_divier(multiplier)
.with_vco_high(sysclk >= 200e6);
self.write(Address::FR1, &fr1.raw_value().to_be_bytes())?;
self.ftw_per_hz = (1u64 << 32) as f32 / sysclk;
Ok(sysclk)
}
/// Get the current CSR register.
pub fn csr(&mut self) -> Result<Csr, Error> {
let mut data = u8::new(0).to_be_bytes();
self.read(Address::CSR, &mut data)?;
Ok(Csr::new_with_raw_value(u8::from_be_bytes(data)))
}
/// Get the current FR1 register.
pub fn fr1(&mut self) -> Result<Fr1, Error> {
let mut data = u24::new(0).to_be_bytes();
self.read(Address::FR1, &mut data)?;
Ok(Fr1::new_with_raw_value(u24::from_be_bytes(data)))
}
/// Perform a self-test of the communication interface.
///
/// Note:
/// This modifies the existing channel enables. They are restored upon exit.
///
/// Returns:
/// True if the self test succeeded. False otherwise.
pub fn self_test(&mut self) -> Result<bool, Error> {
let mut data = [0];
// Get current CSR.
self.read(Address::CSR, &mut data)?;
let old_csr = data;
let mut csr = Csr::new_with_raw_value(data[0]);
// Enable all channels.
csr.set_channel(u4::new(0b1111));
self.write(Address::CSR, &[csr.raw_value()])?;
self.read(Address::CSR, &mut data)?;
if Csr::new_with_raw_value(data[0]).channel() != csr.channel() {
return Ok(false);
}
// Clear all channel enables.
csr.set_channel(u4::new(0b0000));
self.write(Address::CSR, &[csr.raw_value()])?;
self.read(Address::CSR, &mut data)?;
if Csr::new_with_raw_value(data[0]).channel() != csr.channel() {
return Ok(false);
}
// Restore the CSR.
self.write(Address::CSR, &old_csr)?;
Ok(true)
}
/// Get the current system clock frequency in Hz.
fn system_clock_frequency(&self) -> f32 {
(1u64 << 32) as f32 / self.ftw_per_hz
}
/// Update an output channel configuration register.
///
/// Args:
/// * `channel` - The channel to configure.
/// * `register` - The register to update.
/// * `data` - The contents to write to the provided register.
fn write_channel(
&mut self,
channel: Channel,
register: Address,
data: &[u8],
) -> Result<(), Error> {
// Disable all other outputs so that we can update the configuration register of only the
// specified channel.
let csr = Csr::default().with_channel(channel).with_mode(self.mode);
self.write(Address::CSR, &csr.raw_value().to_be_bytes())?;
self.write(register, data)?;
Ok(())
}
/// Read a configuration register of a specific channel.
///
/// Args:
/// * `channel` - The channel to read.
/// * `register` - The register to read.
/// * `data` - A location to store the read register contents.
fn read_channel(
&mut self,
channel: Channel,
register: Address,
data: &mut [u8],
) -> Result<(), Error> {
let csr = Csr::default().with_channel(channel).with_mode(self.mode);
self.write(Address::CSR, &csr.raw_value().to_be_bytes())?;
self.read(register, data)?;
Ok(())
}
/// Configure the phase of a specified channel.
///
/// Arguments:
/// * `channel` - The channel to configure the frequency of.
/// * `phase_turns` - The desired phase offset in turns.
///
/// Returns:
/// The actual programmed phase offset of the channel in turns.
pub fn set_phase(
&mut self,
channel: Channel,
phase: f32,
) -> Result<f32, Error> {
let pow = u14::new((phase * (1 << 14) as f32) as u16 & 0x3FFF);
self.write_channel(
channel,
Address::CPOW0,
&pow.value().to_be_bytes(),
)?;
Ok(pow.value() as f32 / (1 << 14) as f32)
}
/// Get the current phase of a specified channel.
///
/// Args:
/// * `channel` - The channel to get the phase of.
///
/// Returns:
/// The phase of the channel in turns.
pub fn get_phase(&mut self, channel: Channel) -> Result<f32, Error> {
let mut pow = 0u16.to_be_bytes();
self.read_channel(channel, Address::CPOW0, &mut pow)?;
let pow = u16::from_be_bytes(pow) & 0x3FFF;
Ok(pow as f32 / (1 << 14) as f32)
}
/// Configure the amplitude of a specified channel.
///
/// Arguments:
/// * `channel` - The channel to configure the frequency of.
/// * `amplitude` - A normalized amplitude setting [0, 1].
///
/// Returns:
/// The actual normalized amplitude of the channel relative to full-scale range.
pub fn set_amplitude(
&mut self,
channel: Channel,
amplitude: f32,
) -> Result<f32, Error> {
if !(0.0..=1.0).contains(&litude) {
return Err(Error::Bounds);
}
let asf = (amplitude * (1 << 10) as f32) as u16;
let acr = match u10::try_new(asf) {
Ok(asf) => Acr::default().with_multiplier(true).with_asf(asf),
Err(_) => Acr::default().with_multiplier(false),
};
self.write_channel(
channel,
Address::ACR,
&acr.raw_value().to_be_bytes(),
)?;
Ok(asf as f32 / (1 << 10) as f32)
}
/// Get the configured amplitude of a channel.
///
/// Args:
/// * `channel` - The channel to get the amplitude of.
///
/// Returns:
/// The normalized amplitude of the channel.
pub fn get_amplitude(&mut self, channel: Channel) -> Result<f32, Error> {
let mut acr = u24::new(0).to_be_bytes();
self.read_channel(channel, Address::ACR, &mut acr)?;
let acr = Acr::new_with_raw_value(u24::from_be_bytes(acr));
Ok(if acr.multiplier() {
1.0
} else {
acr.asf().value() as f32 / (1 << 10) as f32
})
}
/// Configure the frequency of a specified channel.
///
/// Arguments:
/// * `channel` - The channel to configure the frequency of.
/// * `frequency` - The desired output frequency in Hz.
///
/// Returns:
/// The actual programmed frequency of the channel.
pub fn set_frequency(
&mut self,
channel: Channel,
frequency: f32,
) -> Result<f32, Error> {
if frequency < 0.0 || frequency > self.system_clock_frequency() {
return Err(Error::Bounds);
}
let ftw = (frequency * self.ftw_per_hz) as u32;
self.write_channel(channel, Address::CFTW0, &ftw.to_be_bytes())?;
Ok(ftw as f32 / self.ftw_per_hz)
}
/// Get the frequency of a channel.
///
/// Arguments:
/// * `channel` - The channel to get the frequency of.
///
/// Returns:
/// The frequency of the channel in Hz.
pub fn get_frequency(&mut self, channel: Channel) -> Result<f32, Error> {
let mut ftw = 0u32.to_be_bytes();
self.read_channel(channel, Address::CFTW0, &mut ftw)?;
let ftw = u32::from_be_bytes(ftw);
Ok(ftw as f32 / self.ftw_per_hz)
}
/// Finalize DDS configuration
///
/// # Note
/// This is intended for when the DDS profiles will be written as a stream of data to the DDS.
///
/// # Returns
/// (i, mode) where `i` is the interface to the DDS and `mode` is the frozen `Mode`.
pub fn freeze(self) -> (I, Mode) {
(self.interface, self.mode)
}
}
/// Represents a means of serializing a DDS profile for writing to a stream.
pub struct ProfileSerializer {
mode: Mode,
// reorder or pad to work around https://github.com/japaric/heapless/issues/305
// TODO: check
// heapless::Vec<u8, 32>, especially its extend_from_slice() is slow
index: usize,
data: [u8; 32],
}
impl ProfileSerializer {
/// Construct a new serializer.
///
/// # Args
/// * `mode` - The communication mode of the DDS.
pub fn new(mode: Mode) -> Self {
Self {
mode,
index: 0,
data: [0; 32],
}
}
/// Update a number of channels with the requested profile.
///
/// # Args
/// * `channels` - A set of channels to apply the configuration to.
/// * `ftw` - If provided, indicates a frequency tuning word for the channels.
/// * `pow` - If provided, indicates a phase offset word for the channels.
/// * `acr` - If provided, indicates the amplitude control register for the channels. The ACR
/// should be stored in the 3 LSB of the word. Note that if amplitude scaling is to be used,
/// the "Amplitude multiplier enable" bit must be set.
#[inline]
pub fn push(
&mut self,
channels: Channel,
ftw: Option<u32>,
pow: Option<u14>,
acr: Option<Acr>,
) {
self.push_write(
Address::CSR,
&Csr::default()
.with_mode(self.mode)
.with_channel(channels)
.raw_value()
.to_be_bytes(),
);
if let Some(ftw) = ftw {
self.push_write(Address::CFTW0, &ftw.to_be_bytes());
}
if let Some(pow) = pow {
self.push_write(Address::CPOW0, &pow.value().to_be_bytes());
}
if let Some(acr) = acr {
self.push_write(Address::ACR, &acr.raw_value().to_be_bytes());
}
}
/// Add a register write to the serialization data.
#[inline]
fn push_write(&mut self, register: Address, value: &[u8]) {
let data = &mut self.data[self.index..];
data[0] = register as u8;
data[1..1 + value.len()].copy_from_slice(value);
self.index += 1 + value.len();
}
/// Get the serialized profile as a slice of 32-bit words.
///
/// # Note
/// The serialized profile will be padded to the next 32-bit word boundary by adding dummy
/// writes to the CSR or LSRR registers.
///
/// # Returns
/// A slice of `u32` words representing the serialized profile.
#[inline]
pub fn finalize(&mut self) -> &[u32] {
// Pad the buffer to 32-bit (4 byte) alignment by adding dummy writes to CSR and LSRR.
// In the case of 1 byte padding, this instead pads with 5 bytes as there is no
// valid single-byte write that could be used.
if self.index & 1 != 0 {
// Pad with 3 bytes
self.push_write(Address::LSRR, &0u16.to_be_bytes());
}
if self.index & 2 != 0 {
// Pad with 2 bytes
self.push_write(
Address::CSR,
&Csr::default()
.with_mode(self.mode)
.raw_value()
.to_be_bytes(),
);
}
bytemuck::cast_slice(&self.data[..self.index])
}
}
| rust | Apache-2.0 | cb1f14508dffa91c636cb3ed3701e24b04e469d2 | 2026-01-04T20:16:49.858302Z | false |
quartiq/stabilizer | https://github.com/quartiq/stabilizer/blob/cb1f14508dffa91c636cb3ed3701e24b04e469d2/ad9912/src/lib.rs | ad9912/src/lib.rs | #![no_std]
use arbitrary_int::{Number, u5, u10, u14, u48};
use bitbybit::{bitenum, bitfield};
use embedded_hal::spi::{self, Operation, SpiDevice};
use num_traits::float::FloatCore;
#[bitenum(u13)]
#[derive(PartialEq, Debug)]
pub enum Addr {
Serial = 0x0000,
PartId = 0x0003,
Buffer = 0x0004,
Update = 0x0005,
Power = 0x0010,
DdsReset = 0x0012,
Reset = 0x0013,
NDiv = 0x0020,
Pll = 0x0022,
SDiv = 0x0106,
Ftw0 = 0x01ab,
Phase = 0x01ad,
Hstl = 0x0200,
Cmos = 0x0201,
Fsc = 0x040c,
SpurA = 0x0504,
SpurB = 0x0509,
}
#[bitenum(u2, exhaustive = true)]
#[derive(PartialEq, Debug, Default)]
pub enum Size {
#[default]
One = 0,
Two = 1,
Three = 2,
Stream = 3,
}
impl From<usize> for Size {
fn from(value: usize) -> Self {
match value {
0 => unimplemented!(),
1 => Self::One,
2 => Self::Two,
3 => Self::Three,
_ => Self::Stream,
}
}
}
#[bitfield(u16)]
#[derive(Debug, PartialEq)]
pub struct Instruction {
#[bits(0..=12, rw)]
addr: Option<Addr>,
#[bits(13..=14, rw)]
size: Size,
#[bit(15, rw)]
read: bool,
}
#[bitfield(u8, default = 0x18)]
#[derive(Debug, PartialEq)]
pub struct Serial {
#[bit(0, rw)]
sdo_active: bool,
#[bit(1, rw)]
lsb_first: bool,
#[bit(2, rw)]
soft_reset: bool,
#[bit(3, rw)]
long_insn: bool,
}
impl Serial {
pub fn mirror(self) -> Self {
let v = self.raw_value();
Self::new_with_raw_value(
v & 0x0f
| ((v & 1) << 7)
| ((v & 2) << 5)
| ((v & 4) << 3)
| ((v & 8) << 1),
)
}
}
#[bitfield(u8, default = 0xc0)]
#[derive(Debug, PartialEq)]
pub struct Power {
#[bit(0, rw)]
digital_pd: bool,
#[bit(1, rw)]
full_pd: bool,
#[bit(4, rw)]
pll_pd: bool,
#[bit(5, rw)]
output_doubler_en: bool,
#[bit(6, rw)]
cmos_en: bool,
#[bit(7, rw)]
hstl_pd: bool,
}
#[bitfield(u8, default = 0x00)]
#[derive(Debug, PartialEq)]
pub struct Reset {
#[bit(1, rw)]
sdiv: bool,
#[bit(3, rw)]
sdiv2: bool,
#[bit(7, rw)]
fund_dds_pd: bool,
}
#[bitenum(u2, exhaustive = true)]
#[derive(Debug, PartialEq)]
pub enum ChargePump {
Ua250 = 0,
Ua375 = 1,
Off = 2,
Ua125 = 3,
}
#[bitfield(u8, default = 0x04)]
#[derive(Debug, PartialEq)]
pub struct Pll {
#[bits(0..=1, rw)]
charge_pump: ChargePump,
#[bit(2, rw)]
vco_range_high: bool,
#[bit(3, rw)]
ref_doubler: bool,
#[bit(7, rw)]
vco_auto_range: bool,
}
impl Pll {
pub fn set_refclk(&mut self, ndiv: u5, refclk: f64) -> f64 {
let sysclk = refclk
* ((self.ref_doubler() as u8 + 1) * 2 * (ndiv.value() + 2)) as f64;
*self = if sysclk > 900e6 {
self.with_vco_auto_range(false).with_vco_range_high(true)
} else if sysclk > 810e6 {
self.with_vco_auto_range(true)
} else {
self.with_vco_auto_range(false).with_vco_range_high(false)
};
sysclk
}
}
#[derive(Debug, Clone, Copy, PartialEq, thiserror::Error)]
pub enum Error {
#[error("Invalid Part ID {0}")]
Id(u16),
#[error("SPI")]
Bus(spi::ErrorKind),
}
impl<E: spi::Error> From<E> for Error {
fn from(value: E) -> Self {
value.kind().into()
}
}
#[derive(Clone, Debug)]
pub struct Ad9912<B> {
bus: B,
}
pub fn frequency_to_ftw(frequency: f64, sysclk: f64) -> u48 {
let lsb = sysclk.recip() * (1u64 << 48) as f64;
// Alias into Nyquist
u48::new(((frequency * lsb).round() as i64 as u64) & u48::MASK)
}
pub fn phase_to_pow(phase: f32) -> u14 {
// Alias into Nyquist
u14::new(((phase * (1u32 << 14) as f32).round() as i32 as u16) & u14::MASK)
}
pub fn dac_fs_to_fsc(dac_fs: f32, r_dac_ref: f32) -> u10 {
let lsb = r_dac_ref * (1024.0 / 192.0 / 1.2);
let fsc = dac_fs * lsb - (1024.0 / 192.0 * 72.0);
// Clamp
u10::new(
fsc.round()
.clamp(u10::MIN.value() as _, u10::MAX.value() as _) as _,
)
}
impl<B: SpiDevice<u8>> Ad9912<B> {
pub fn new(bus: B) -> Self {
Self { bus }
}
fn write(&mut self, addr: Addr, data: &[u8]) -> Result<(), Error> {
Ok(self.bus.transaction(&mut [
Operation::Write(
&Instruction::builder()
.with_addr(addr)
.with_size(data.len().into())
.with_read(false)
.build()
.raw_value()
.to_be_bytes(),
),
Operation::Write(data),
])?)
}
fn read(&mut self, addr: Addr, data: &mut [u8]) -> Result<(), Error> {
Ok(self.bus.transaction(&mut [
Operation::Write(
&Instruction::builder()
.with_addr(addr)
.with_size(data.len().into())
.with_read(true)
.build()
.raw_value()
.to_be_bytes(),
),
Operation::Read(data),
])?)
}
pub fn init(&mut self) -> Result<(), Error> {
self.write(
Addr::Serial,
&Serial::builder()
.with_sdo_active(true)
.with_lsb_first(false)
.with_soft_reset(false)
.with_long_insn(true)
.build()
.mirror()
.raw_value()
.to_be_bytes(),
)?;
let mut id = [0; 2];
self.read(Addr::PartId, &mut id)?;
let id = u16::from_be_bytes(id);
if id != 0x1982 {
return Err(Error::Id(id));
}
Ok(())
}
pub fn set_power(&mut self, power: Power) -> Result<(), Error> {
self.write(Addr::Power, &power.raw_value().to_be_bytes())
}
/// Non-clearing, needs init()
pub fn soft_reset(&mut self) -> Result<(), Error> {
self.write(
Addr::Serial,
&Serial::builder()
.with_sdo_active(true)
.with_lsb_first(false)
.with_soft_reset(true)
.with_long_insn(true)
.build()
.mirror()
.raw_value()
.to_be_bytes(),
)
}
/// Needs io-update
pub fn dds_reset(&mut self) -> Result<(), Error> {
self.write(Addr::DdsReset, &0x01u8.to_be_bytes())
}
pub fn set_ndiv(&mut self, ndiv: u5) -> Result<(), Error> {
self.write(Addr::NDiv, &ndiv.value().to_be_bytes())
}
pub fn set_pll(&mut self, pll: Pll) -> Result<(), Error> {
self.write(Addr::Pll, &pll.raw_value().to_be_bytes())
}
pub fn set_ftw(&mut self, ftw: u48) -> Result<(), Error> {
self.write(Addr::Ftw0, &ftw.to_be_bytes())
}
pub fn ftw(&mut self) -> Result<u48, Error> {
let mut r = u48::default().to_be_bytes();
self.read(Addr::Ftw0, &mut r)?;
Ok(u48::from_be_bytes(r))
}
pub fn set_frequency(
&mut self,
frequency: f64,
sysclk: f64,
) -> Result<u48, Error> {
let ftw = frequency_to_ftw(frequency, sysclk);
self.set_ftw(ftw)?;
Ok(ftw)
}
pub fn set_pow(&mut self, pow: u14) -> Result<(), Error> {
self.write(Addr::Phase, &pow.value().to_be_bytes())
}
pub fn set_phase(&mut self, phase: f32) -> Result<u14, Error> {
let pow = phase_to_pow(phase);
self.set_pow(pow)?;
Ok(pow)
}
pub fn set_fsc(&mut self, fsc: u10) -> Result<(), Error> {
self.write(Addr::Fsc, &fsc.value().to_be_bytes())
}
pub fn set_full_scale_current(
&mut self,
dac_fs: f32,
r_dac_ref: f32,
) -> Result<u10, Error> {
let fsc = dac_fs_to_fsc(dac_fs, r_dac_ref);
self.set_fsc(fsc)?;
Ok(fsc)
}
}
| rust | Apache-2.0 | cb1f14508dffa91c636cb3ed3701e24b04e469d2 | 2026-01-04T20:16:49.858302Z | false |
quartiq/stabilizer | https://github.com/quartiq/stabilizer/blob/cb1f14508dffa91c636cb3ed3701e24b04e469d2/encoded_pin/src/lib.rs | encoded_pin/src/lib.rs | #![no_std]
use arbitrary_int::UInt;
use core::cell::RefCell;
use embedded_hal::digital::{ErrorType, OutputPin};
pub struct EncodedPin<'a, P, const N: usize> {
cs: &'a RefCell<[P; N]>,
sel: UInt<u8, N>,
}
impl<'a, P, const N: usize> EncodedPin<'a, P, N> {
pub fn new(cs: &'a RefCell<[P; N]>, sel: UInt<u8, N>) -> Self {
assert!(sel.value() != 0);
Self { cs, sel }
}
}
impl<P: ErrorType, const N: usize> ErrorType for EncodedPin<'_, P, N> {
type Error = P::Error;
}
impl<P: OutputPin, const N: usize> OutputPin for EncodedPin<'_, P, N> {
fn set_low(&mut self) -> Result<(), Self::Error> {
// assert
for (i, cs) in self.cs.borrow_mut().iter_mut().enumerate() {
if self.sel.value() & (1 << i) != 0 {
cs.set_high()?;
}
}
Ok(())
}
fn set_high(&mut self) -> Result<(), Self::Error> {
// deassert
for (i, cs) in self.cs.borrow_mut().iter_mut().enumerate() {
if self.sel.value() & (1 << i) != 0 {
cs.set_low()?;
}
}
Ok(())
}
}
| rust | Apache-2.0 | cb1f14508dffa91c636cb3ed3701e24b04e469d2 | 2026-01-04T20:16:49.858302Z | false |
quartiq/stabilizer | https://github.com/quartiq/stabilizer/blob/cb1f14508dffa91c636cb3ed3701e24b04e469d2/serial_settings/src/lib.rs | serial_settings/src/lib.rs | #![doc = include_str!("../README.md")]
#![no_std]
use embedded_io::{ErrorType, Read, ReadReady, Write};
use heapless::String;
use miniconf::{
NodeIter, Path, SerdeError, TreeDeserializeOwned, TreeSchema,
TreeSerialize, ValueError, json_core, postcard,
};
mod interface;
pub use interface::BestEffortInterface;
/// Specifies the API required for objects that are used as settings with the serial terminal
/// interface.
pub trait Settings:
TreeSchema + TreeSerialize + TreeDeserializeOwned + Clone
{
/// Reset the settings to their default values.
fn reset(&mut self) {}
}
/// Platform support for serial settings.
///
/// Covers platform-specific commands, persistent key-value storage and the
/// Read/Write interface for interaction.
///
/// Assuming there are no unit fields in the `Settings`, the empty value can be
/// used to mark the "cleared" state.
pub trait Platform {
/// This type specifies the interface to the user, for example, a USB CDC-ACM serial port.
type Interface: embedded_io::Read
+ embedded_io::ReadReady
+ embedded_io::Write;
type Error: core::fmt::Debug;
type Settings: Settings;
/// Fetch a value from persisten storage
fn fetch<'a>(
&mut self,
buf: &'a mut [u8],
key: &[u8],
) -> Result<Option<&'a [u8]>, Self::Error>;
/// Store a value to persistent storage
fn store(
&mut self,
buf: &mut [u8],
key: &[u8],
value: &[u8],
) -> Result<(), Self::Error>;
/// Remove a key from storage.
fn clear(&mut self, buf: &mut [u8], key: &[u8]) -> Result<(), Self::Error>;
/// Execute a platform specific command.
fn cmd(&mut self, cmd: &str);
/// Return a mutable reference to the `Interface`.
fn interface_mut(&mut self) -> &mut Self::Interface;
}
struct Interface<'a, P> {
platform: P,
buffer: &'a mut [u8],
updated: bool,
}
impl<'a, P: Platform> Interface<'a, P> {
fn handle_platform(
_menu: &menu::Menu<Self, P::Settings>,
item: &menu::Item<Self, P::Settings>,
args: &[&str],
interface: &mut Self,
_settings: &mut P::Settings,
) {
let key = menu::argument_finder(item, args, "cmd").unwrap().unwrap();
interface.platform.cmd(key)
}
fn iter_root<F>(
key: Option<&str>,
interface: &mut Self,
settings: &mut P::Settings,
mut func: F,
) where
F: FnMut(
Path<&str, '/'>,
&mut Self,
&mut P::Settings,
&mut P::Settings,
),
{
let iter = if let Some(key) = key {
match NodeIter::with_root(P::Settings::SCHEMA, Path::<_, '/'>(key))
{
Ok(it) => it,
Err(e) => {
writeln!(interface, "Failed to locate `{key}`: {e}")
.unwrap();
return;
}
}
} else {
NodeIter::<Path<String<128>, '/'>, MAX_DEPTH>::new(
P::Settings::SCHEMA,
)
};
let mut defaults = settings.clone();
defaults.reset();
for key in iter {
match key {
Ok(key) => func(
Path(key.0.as_str()),
interface,
settings,
&mut defaults,
),
Err(depth) => {
writeln!(
interface,
"Failed to build path: no space at depth {depth}"
)
.unwrap();
}
}
}
}
fn handle_get(
_menu: &menu::Menu<Self, P::Settings>,
item: &menu::Item<Self, P::Settings>,
args: &[&str],
interface: &mut Self,
settings: &mut P::Settings,
) {
let key = menu::argument_finder(item, args, "path").unwrap();
Self::iter_root(
key,
interface,
settings,
|key, interface, settings, defaults| {
// Get current
let check = match json_core::get_by_key(
settings,
key,
interface.buffer,
) {
Err(SerdeError::Value(ValueError::Absent)) => {
return;
}
Err(e) => {
writeln!(interface, "Failed to get `{}`: {e}", key.0)
.unwrap();
return;
}
Ok(len) => {
write!(
interface.platform.interface_mut(),
"{}: {}",
key.0,
core::str::from_utf8(&interface.buffer[..len])
.unwrap()
)
.unwrap();
yafnv::fnv1a::<u32>(&interface.buffer[..len])
}
};
// Get default and compare
match json_core::get_by_key(defaults, key, interface.buffer) {
Err(SerdeError::Value(ValueError::Absent)) => {
write!(interface, " [default: absent]")
}
Err(e) => {
write!(interface, " [default serialization error: {e}]")
}
Ok(len) => {
if yafnv::fnv1a::<u32>(&interface.buffer[..len])
!= check
{
write!(
interface.platform.interface_mut(),
" [default: {}]",
core::str::from_utf8(&interface.buffer[..len])
.unwrap()
)
} else {
write!(interface, " [default]")
}
}
}
.unwrap();
// Get stored and compare
match interface
.platform
.fetch(interface.buffer, key.0.as_bytes())
{
Err(e) => write!(interface, " [fetch error: {e:?}]"),
Ok(None) => write!(interface, " [not stored]"),
Ok(Some(stored)) => {
let slic = ::postcard::de_flavors::Slice::new(stored);
// Use defaults as scratch space for postcard->json conversion
match postcard::set_by_key(defaults, key, slic) {
Err(e) => write!(
interface,
" [stored deserialize error: {e}]"
),
Ok(_rest) => match json_core::get_by_key(
defaults,
key,
interface.buffer,
) {
Err(e) => write!(
interface,
" [stored serialization error: {e}]"
),
Ok(len) => {
if yafnv::fnv1a::<u32>(
&interface.buffer[..len],
) != check
{
write!(
interface.platform.interface_mut(),
" [stored: {}]",
core::str::from_utf8(
&interface.buffer[..len]
)
.unwrap()
)
} else {
write!(interface, " [stored]")
}
}
},
}
}
}
.unwrap();
writeln!(interface).unwrap();
},
);
}
fn handle_clear(
_menu: &menu::Menu<Self, P::Settings>,
item: &menu::Item<Self, P::Settings>,
args: &[&str],
interface: &mut Self,
settings: &mut P::Settings,
) {
let key = menu::argument_finder(item, args, "path").unwrap();
Self::iter_root(
key,
interface,
settings,
|key, interface, settings, defaults| {
// Get current value checksum
let slic =
::postcard::ser_flavors::Slice::new(interface.buffer);
let check = match postcard::get_by_key(settings, key, slic) {
Err(SerdeError::Value(ValueError::Absent)) => {
return;
}
Err(e) => {
writeln!(interface, "Failed to get {}: {e:?}", key.0)
.unwrap();
return;
}
Ok(slic) => yafnv::fnv1a::<u32>(slic),
};
// Get default if different
let slic =
::postcard::ser_flavors::Slice::new(interface.buffer);
let slic = match postcard::get_by_key(defaults, key, slic) {
Err(SerdeError::Value(ValueError::Absent)) => {
log::warn!(
"Can't clear. Default is absent: `{}`",
key.0
);
None
}
Err(e) => {
writeln!(
interface,
"Failed to get default `{}`: {e}",
key.0
)
.unwrap();
return;
}
Ok(slic) => {
if yafnv::fnv1a::<u32>(slic) != check {
Some(slic)
} else {
None
}
}
};
// Set default
if let Some(slic) = slic {
let slic = ::postcard::de_flavors::Slice::new(slic);
match postcard::set_by_key(settings, key, slic) {
Err(SerdeError::Value(ValueError::Absent)) => {
return;
}
Err(e) => {
writeln!(
interface,
"Failed to set {}: {e:?}",
key.0
)
.unwrap();
return;
}
Ok(_rest) => {
interface.updated = true;
writeln!(interface, "Cleared current `{}`", key.0)
.unwrap()
}
}
}
// Check for stored
match interface
.platform
.fetch(interface.buffer, key.0.as_bytes())
{
Err(e) => {
writeln!(
interface,
"Failed to fetch `{}`: {e:?}",
key.0
)
.unwrap();
}
Ok(None) => {}
// Clear stored
Ok(Some(_stored)) => match interface
.platform
.clear(interface.buffer, key.0.as_bytes())
{
Ok(()) => {
writeln!(interface, "Clear stored `{}`", key.0)
}
Err(e) => {
writeln!(
interface,
"Failed to clear `{}` from storage: {e:?}",
key.0
)
}
}
.unwrap(),
}
},
);
interface.updated = true;
writeln!(interface, "Some values may require reboot to become active")
.unwrap();
}
fn handle_store(
_menu: &menu::Menu<Self, P::Settings>,
item: &menu::Item<Self, P::Settings>,
args: &[&str],
interface: &mut Self,
settings: &mut P::Settings,
) {
let key = menu::argument_finder(item, args, "path").unwrap();
let force = menu::argument_finder(item, args, "force")
.unwrap()
.is_some();
Self::iter_root(
key,
interface,
settings,
|key, interface, settings, defaults| {
// Get default value checksum
let slic =
::postcard::ser_flavors::Slice::new(interface.buffer);
let mut check = match postcard::get_by_key(defaults, key, slic)
{
// Could also serialize directly into the hasher for all these checksum calcs
Ok(slic) => yafnv::fnv1a::<u32>(slic),
Err(SerdeError::Value(ValueError::Absent)) => {
log::warn!("Default absent: `{}`", key.0);
return;
}
Err(e) => {
writeln!(
interface,
"Failed to get `{}` default: {e:?}",
key.0
)
.unwrap();
return;
}
};
// Get stored value checksum
match interface
.platform
.fetch(interface.buffer, key.0.as_bytes())
{
Ok(None) => {}
Ok(Some(stored)) => {
let stored = yafnv::fnv1a::<u32>(stored);
if stored != check {
log::debug!(
"Stored differs from default: `{}`",
key.0
);
} else {
log::debug!("Stored matches default: `{}`", key.0);
}
check = stored;
}
Err(e) => {
writeln!(
interface,
"Failed to fetch `{}`: {e:?}",
key.0
)
.unwrap();
}
}
// Get value
let slic =
::postcard::ser_flavors::Slice::new(interface.buffer);
let value = match postcard::get_by_key(settings, key, slic) {
Ok(value) => value,
Err(SerdeError::Value(ValueError::Absent)) => {
return;
}
Err(e) => {
writeln!(interface, "Could not get `{}`: {e}", key.0)
.unwrap();
return;
}
};
// Check for mismatch
if yafnv::fnv1a::<u32>(value) == check && !force {
log::debug!(
"Not saving matching default/stored `{}`",
key.0
);
return;
}
let len = value.len();
let (value, rest) = interface.buffer.split_at_mut(len);
// Store
match interface.platform.store(rest, key.0.as_bytes(), value) {
Ok(_) => writeln!(interface, "`{}` stored", key.0),
Err(e) => {
writeln!(
interface,
"Failed to store `{}`: {e:?}",
key.0
)
}
}
.unwrap();
},
);
writeln!(interface, "Some values may require reboot to become active")
.unwrap();
}
fn handle_set(
_menu: &menu::Menu<Self, P::Settings>,
item: &menu::Item<Self, P::Settings>,
args: &[&str],
interface: &mut Self,
settings: &mut P::Settings,
) {
let key = menu::argument_finder(item, args, "path").unwrap().unwrap();
let value =
menu::argument_finder(item, args, "value").unwrap().unwrap();
// Now, write the new value into memory.
match json_core::set(settings, key, value.as_bytes()) {
Ok(_) => {
interface.updated = true;
writeln!(
interface,
"Set but not stored. May require store and reboot to activate."
)
}
Err(e) => {
writeln!(interface, "Failed to set `{key}`: {e:?}")
}
}
.unwrap();
}
fn menu() -> menu::Menu<'a, Self, P::Settings> {
menu::Menu {
label: "settings",
items: &[
&menu::Item {
command: "get",
help: Some(
"List paths and read current, default, and stored values",
),
item_type: menu::ItemType::Callback {
function: Self::handle_get,
parameters: &[menu::Parameter::Optional {
parameter_name: "path",
help: Some(
"The path of the value or subtree to list/read.",
),
}],
},
},
&menu::Item {
command: "set",
help: Some("Update a value"),
item_type: menu::ItemType::Callback {
function: Self::handle_set,
parameters: &[
menu::Parameter::Mandatory {
parameter_name: "path",
help: Some("The path to set"),
},
menu::Parameter::Mandatory {
parameter_name: "value",
help: Some(
"The value to be written, JSON-encoded",
),
},
],
},
},
&menu::Item {
command: "store",
help: Some("Store values that differ from defaults"),
item_type: menu::ItemType::Callback {
function: Self::handle_store,
parameters: &[
menu::Parameter::Named {
parameter_name: "force",
help: Some(
"Also store values that match defaults",
),
},
menu::Parameter::Optional {
parameter_name: "path",
help: Some(
"The path of the value or subtree to store.",
),
},
],
},
},
&menu::Item {
command: "clear",
help: Some(
"Clear active to defaults and remove all stored values",
),
item_type: menu::ItemType::Callback {
function: Self::handle_clear,
parameters: &[menu::Parameter::Optional {
parameter_name: "path",
help: Some(
"The path of the value or subtree to clear",
),
}],
},
},
&menu::Item {
command: "platform",
help: Some("Platform specific commands"),
item_type: menu::ItemType::Callback {
function: Self::handle_platform,
parameters: &[menu::Parameter::Mandatory {
parameter_name: "cmd",
help: Some(
"The name of the command (e.g. `reboot`, `service`, `dfu`).",
),
}],
},
},
],
entry: None,
exit: None,
}
}
}
impl<P: Platform> core::fmt::Write for Interface<'_, P> {
fn write_str(&mut self, s: &str) -> core::fmt::Result {
self.platform
.interface_mut()
.write_all(s.as_bytes())
.or(Err(core::fmt::Error))
}
}
impl<P: Platform> ErrorType for Interface<'_, P> {
type Error = <P::Interface as ErrorType>::Error;
}
impl<P: Platform> Write for Interface<'_, P> {
fn write(&mut self, buf: &[u8]) -> Result<usize, Self::Error> {
self.platform.interface_mut().write(buf)
}
fn flush(&mut self) -> Result<(), Self::Error> {
self.platform.interface_mut().flush()
}
}
/// Max settings depth
pub const MAX_DEPTH: usize = 16;
// The Menu runner
pub struct Runner<'a, P: Platform>(
menu::Runner<'a, Interface<'a, P>, P::Settings, [u8]>,
);
impl<'a, P: Platform> Runner<'a, P> {
/// Constructor
///
/// # Args
/// * `platform` - The platform associated with the serial settings, providing the necessary
/// context and API to manage device settings.
///
/// * `line_buf` - A buffer used for maintaining the serial menu input line. It should be at
/// least as long as the longest user input.
///
/// * `serialize_buf` - A buffer used for serializing and deserializing settings. This buffer
/// needs to be at least as big as twice the biggest serialized setting plus its path.
pub fn new(
platform: P,
line_buf: &'a mut [u8],
serialize_buf: &'a mut [u8],
settings: &mut P::Settings,
) -> Result<Self, P::Error> {
assert!(P::Settings::SCHEMA.shape().max_depth <= MAX_DEPTH);
Ok(Self(menu::Runner::new(
Interface::menu(),
line_buf,
Interface {
platform,
buffer: serialize_buf,
updated: false,
},
settings,
)))
}
/// Get the device communication interface
pub fn interface_mut(&mut self) -> &mut P::Interface {
self.0.interface.platform.interface_mut()
}
pub fn platform_mut(&mut self) -> &mut P {
&mut self.0.interface.platform
}
pub fn platform(&mut self) -> &P {
&self.0.interface.platform
}
/// Must be called periodically to process user input.
///
/// # Returns
/// A boolean indicating true if the settings were modified.
pub fn poll(
&mut self,
settings: &mut P::Settings,
) -> Result<bool, <P::Interface as embedded_io::ErrorType>::Error> {
self.0.interface.updated = false;
while self.interface_mut().read_ready()? {
let mut buffer = [0u8; 64];
let count = self.interface_mut().read(&mut buffer)?;
for &value in &buffer[..count] {
self.0.input_byte(value, settings);
}
}
Ok(self.0.interface.updated)
}
}
| rust | Apache-2.0 | cb1f14508dffa91c636cb3ed3701e24b04e469d2 | 2026-01-04T20:16:49.858302Z | false |
quartiq/stabilizer | https://github.com/quartiq/stabilizer/blob/cb1f14508dffa91c636cb3ed3701e24b04e469d2/serial_settings/src/interface.rs | serial_settings/src/interface.rs | /// Wrapper type for a "best effort" serial interface.
///
/// # Note
/// Overflows of the output are silently ignored.
pub struct BestEffortInterface<T>(T);
impl<T> BestEffortInterface<T>
where
T: embedded_io::Write
+ embedded_io::WriteReady
+ embedded_io::Read
+ embedded_io::ReadReady,
{
/// Construct an interface where overflows and errors when writing on the output are silently
/// ignored.
pub fn new(interface: T) -> Self {
Self(interface)
}
/// Get access to the inner (wrapped) interface
pub fn inner(&self) -> &T {
&self.0
}
/// Get mutable access to the inner (wrapped) interface
pub fn inner_mut(&mut self) -> &mut T {
&mut self.0
}
}
impl<T> embedded_io::Write for BestEffortInterface<T>
where
T: embedded_io::Write + embedded_io::WriteReady,
{
fn write(&mut self, buf: &[u8]) -> Result<usize, T::Error> {
if let Ok(true) = self.0.write_ready() {
self.0.write(buf)
} else {
Ok(buf.len()) // discard!
}
}
fn flush(&mut self) -> Result<(), Self::Error> {
Ok(())
}
}
impl<T> embedded_io::ErrorType for BestEffortInterface<T>
where
T: embedded_io::ErrorType,
{
type Error = <T as embedded_io::ErrorType>::Error;
}
impl<T> embedded_io::Read for BestEffortInterface<T>
where
T: embedded_io::Read,
{
fn read(&mut self, buf: &mut [u8]) -> Result<usize, Self::Error> {
self.0.read(buf)
}
}
impl<T> embedded_io::ReadReady for BestEffortInterface<T>
where
T: embedded_io::ReadReady,
{
fn read_ready(&mut self) -> Result<bool, Self::Error> {
self.0.read_ready()
}
}
| rust | Apache-2.0 | cb1f14508dffa91c636cb3ed3701e24b04e469d2 | 2026-01-04T20:16:49.858302Z | false |
quartiq/stabilizer | https://github.com/quartiq/stabilizer/blob/cb1f14508dffa91c636cb3ed3701e24b04e469d2/platform/src/mqtt_app.rs | platform/src/mqtt_app.rs | use core::fmt::Write;
use heapless::String;
use miniconf::Tree;
use smoltcp_nal::smoltcp::wire::EthernetAddress;
/// Settings that are used for configuring the network interface to Stabilizer.
#[derive(Clone, Debug, Tree)]
#[tree(meta(doc, typename))]
pub struct NetSettings {
/// The broker domain name (or IP address) to use for MQTT connections.
pub broker: String<255>,
/// The MQTT ID to use upon connection with a broker.
pub id: String<23>,
/// An optional static IP address to use. An unspecified IP address (or malformed address) will
/// use DHCP.
pub ip: String<15>,
#[tree(skip)]
/// The MAC address of Stabilizer, which is used to reinitialize the ID to default settings.
pub mac: EthernetAddress,
}
impl Default for NetSettings {
fn default() -> Self {
Self {
broker: String::try_from("mqtt").unwrap(),
ip: String::try_from("0.0.0.0").unwrap(),
id: String::try_from("<mac>").unwrap(),
mac: EthernetAddress::default(),
}
}
}
impl NetSettings {
pub fn new(mac: EthernetAddress) -> Self {
let mut id = String::new();
write!(&mut id, "{mac}").unwrap();
Self {
id,
mac,
..Default::default()
}
}
}
pub trait AppSettings {
/// Construct the settings given known network settings.
fn new(net: NetSettings) -> Self;
/// Get the network settings from the application settings.
fn net(&self) -> &NetSettings;
}
| rust | Apache-2.0 | cb1f14508dffa91c636cb3ed3701e24b04e469d2 | 2026-01-04T20:16:49.858302Z | false |
quartiq/stabilizer | https://github.com/quartiq/stabilizer/blob/cb1f14508dffa91c636cb3ed3701e24b04e469d2/platform/src/settings.rs | platform/src/settings.rs | //! Stabilizer Settings Management
//!
//! # Design
//! Stabilizer supports two types of settings:
//! 1. Static Device Configuration
//! 2. Dynamic Run-time Settings
//!
//! Static device configuration settings are loaded and used only at device power-up. These include
//! things like the MQTT broker address and the MQTT identifier. Conversely, the dynamic run-time
//! settings can be changed and take effect immediately during device operation.
//!
//! This settings management interface is currently targeted at the static device configuration
//! settings. Settings are persisted into the unused 1MB flash bank of Stabilizer for future
//! recall. They can be modified via the USB interface to facilitate device configuration.
//!
//! Settings are stored in flash using a key-value pair mapping, where the `key` is the name of the
//! entry in the settings structure. This has a number of benefits:
//! 1. The `Settings` structure can have new entries added to it in the future without losing old
//! settings values, as each entry of the `Settings` struct is stored separately as its own
//! key-value pair.
//! 2. The `Settings` can be used among multiple Stabilizer firmware versions that need the same
//! settings values
//! 3. Unknown/unneeded settings values in flash can be actively ignored, facilitating simple flash
//! storage sharing.
use crate::{dfu, metadata::ApplicationMetadata};
use embassy_futures::block_on;
use embedded_io::{Read as EioRead, ReadReady, Write as EioWrite, WriteReady};
use embedded_storage_async::nor_flash::NorFlash;
use heapless::{String, Vec};
use miniconf::{
Path, TreeDeserializeOwned, TreeSchema, TreeSerialize, postcard,
};
use sequential_storage::{
cache::NoCache,
map::{SerializationError, fetch_item, store_item},
};
use serial_settings::{BestEffortInterface, Platform, Settings};
#[derive(
Default, serde::Serialize, serde::Deserialize, Clone, PartialEq, Eq,
)]
pub struct SettingsKey(Vec<u8, 128>);
impl sequential_storage::map::Key for SettingsKey {
fn serialize_into(
&self,
buffer: &mut [u8],
) -> Result<usize, SerializationError> {
Ok(::postcard::to_slice(self, buffer)
.map_err(|_| SerializationError::BufferTooSmall)?
.len())
}
fn deserialize_from(
buffer: &[u8],
) -> Result<(Self, usize), SerializationError> {
let original_length = buffer.len();
let (result, remainder) = ::postcard::take_from_bytes(buffer)
.map_err(|_| SerializationError::BufferTooSmall)?;
Ok((result, original_length - remainder.len()))
}
}
pub struct SerialSettingsPlatform<C, F, S> {
/// The interface to read/write data to/from serially (via text) to the user.
pub interface: BestEffortInterface<S>,
pub _settings_marker: core::marker::PhantomData<C>,
/// The storage mechanism used to persist settings to between boots.
pub storage: F,
/// Metadata associated with the application
pub metadata: &'static ApplicationMetadata,
}
impl<C, F, S> SerialSettingsPlatform<C, F, S>
where
C: TreeDeserializeOwned + TreeSerialize + TreeSchema,
F: NorFlash,
{
pub fn load(structure: &mut C, storage: &mut F) {
// Loop over flash and read settings
let mut buffer = [0u8; 512];
for path in C::SCHEMA
.nodes::<Path<String<128>, '/'>, { serial_settings::MAX_DEPTH }>()
{
let path = path.unwrap();
// Try to fetch the setting from flash.
let value: &[u8] = match block_on(fetch_item(
storage,
0..storage.capacity() as _,
&mut NoCache::new(),
&mut buffer,
&SettingsKey(path.clone().into_inner().into_bytes()),
)) {
Err(e) => {
log::warn!(
"Failed to fetch `{}` from flash: {e:?}",
path.0.as_str()
);
continue;
}
Ok(Some(value)) => value,
Ok(None) => continue,
};
// An empty vector may be saved to flash to "erase" a setting, since the H7 doesn't support
// multi-write NOR flash. If we see an empty vector, ignore this entry.
if value.is_empty() {
continue;
}
log::info!("Loading initial `{}` from flash", path.0.as_str());
let flavor = ::postcard::de_flavors::Slice::new(value);
if let Err(e) = postcard::set_by_key(structure, &path, flavor) {
log::warn!(
"Failed to deserialize `{}` from flash: {e:?}",
path.0.as_str()
);
}
}
}
}
impl<C, F, S> Platform for SerialSettingsPlatform<C, F, S>
where
C: Settings,
F: NorFlash,
S: EioWrite + WriteReady + ReadReady + EioRead,
{
type Interface = BestEffortInterface<S>;
type Settings = C;
type Error = sequential_storage::Error<F::Error>;
fn fetch<'a>(
&mut self,
buf: &'a mut [u8],
key: &[u8],
) -> Result<Option<&'a [u8]>, Self::Error> {
let range = 0..self.storage.capacity() as _;
block_on(fetch_item(
&mut self.storage,
range,
&mut NoCache::new(),
buf,
&SettingsKey(Vec::try_from(key).unwrap()),
))
.map(|v| v.filter(|v: &&[u8]| !v.is_empty()))
}
fn store(
&mut self,
buf: &mut [u8],
key: &[u8],
value: &[u8],
) -> Result<(), Self::Error> {
let range = 0..self.storage.capacity() as _;
block_on(store_item(
&mut self.storage,
range,
&mut NoCache::new(),
buf,
&SettingsKey(Vec::try_from(key).unwrap()),
&value,
))
}
fn clear(&mut self, buf: &mut [u8], key: &[u8]) -> Result<(), Self::Error> {
self.store(buf, key, b"")
}
fn cmd(&mut self, cmd: &str) {
match cmd {
"reboot" => cortex_m::peripheral::SCB::sys_reset(),
"dfu" => dfu::dfu_reboot(),
"service" => {
write!(&mut self.interface, "{}", &self.metadata).unwrap();
}
_ => {
writeln!(
self.interface_mut(),
"Invalid platform command: `{cmd}` not in [`dfu`, `reboot`, `service`]"
)
.ok();
}
}
}
fn interface_mut(&mut self) -> &mut Self::Interface {
&mut self.interface
}
}
| rust | Apache-2.0 | cb1f14508dffa91c636cb3ed3701e24b04e469d2 | 2026-01-04T20:16:49.858302Z | false |
quartiq/stabilizer | https://github.com/quartiq/stabilizer/blob/cb1f14508dffa91c636cb3ed3701e24b04e469d2/platform/src/flash.rs | platform/src/flash.rs | use embedded_storage::nor_flash::{ErrorType, NorFlash, ReadNorFlash};
pub struct AsyncFlash<T>(pub T);
pub trait UnlockFlash: ReadNorFlash {
type Unlocked<'a>: NorFlash<Error = Self::Error>
where
Self: 'a;
fn unlock(&mut self) -> Self::Unlocked<'_>;
}
impl<T: ReadNorFlash> ErrorType for AsyncFlash<T> {
type Error = T::Error;
}
impl<T: ReadNorFlash> embedded_storage_async::nor_flash::ReadNorFlash
for AsyncFlash<T>
{
const READ_SIZE: usize = T::READ_SIZE;
async fn read(
&mut self,
offset: u32,
bytes: &mut [u8],
) -> Result<(), Self::Error> {
self.0.read(offset, bytes)
}
fn capacity(&self) -> usize {
self.0.capacity()
}
}
impl<T: UnlockFlash> embedded_storage_async::nor_flash::NorFlash
for AsyncFlash<T>
{
const WRITE_SIZE: usize = T::Unlocked::WRITE_SIZE;
const ERASE_SIZE: usize = T::Unlocked::ERASE_SIZE;
async fn erase(&mut self, from: u32, to: u32) -> Result<(), Self::Error> {
self.0.unlock().erase(from, to)
}
async fn write(
&mut self,
offset: u32,
bytes: &[u8],
) -> Result<(), Self::Error> {
self.0.unlock().write(offset, bytes)
}
}
| rust | Apache-2.0 | cb1f14508dffa91c636cb3ed3701e24b04e469d2 | 2026-01-04T20:16:49.858302Z | false |
quartiq/stabilizer | https://github.com/quartiq/stabilizer/blob/cb1f14508dffa91c636cb3ed3701e24b04e469d2/platform/src/lib.rs | platform/src/lib.rs | #![no_std]
#[cfg(target_arch = "arm")]
mod dfu;
#[cfg(target_arch = "arm")]
pub use dfu::*;
#[cfg(target_arch = "arm")]
mod flash;
#[cfg(target_arch = "arm")]
pub use flash::*;
#[cfg(target_arch = "arm")]
mod settings;
#[cfg(target_arch = "arm")]
pub use settings::*;
mod metadata;
pub use metadata::*;
mod mqtt_app;
pub use mqtt_app::*;
mod telemetry;
pub use telemetry::*;
mod delay;
pub use delay::*;
| rust | Apache-2.0 | cb1f14508dffa91c636cb3ed3701e24b04e469d2 | 2026-01-04T20:16:49.858302Z | false |
quartiq/stabilizer | https://github.com/quartiq/stabilizer/blob/cb1f14508dffa91c636cb3ed3701e24b04e469d2/platform/src/telemetry.rs | platform/src/telemetry.rs | //! Stabilizer Telemetry Capabilities
//!
//! # Design
//! Telemetry is reported regularly using an MQTT client. All telemetry is reported in SI units
//! using standard JSON format.
//!
//! In order to report ADC/DAC codes generated during the DSP routines, a telemetry buffer is
//! employed to track the latest codes. Converting these codes to SI units would result in
//! repetitive and unnecessary calculations within the DSP routine, slowing it down and limiting
//! sampling frequency. Instead, the raw codes are stored and the telemetry is generated as
//! required immediately before transmission. This ensures that any slower computation required
//! for unit conversion can be off-loaded to lower priority tasks.
use crate::ApplicationMetadata;
use heapless::String;
use minimq::{
PubError, Publication,
embedded_nal::{Dns, TcpClientStack},
embedded_time::Clock,
};
use serde::Serialize;
use smoltcp_nal::NetworkError;
/// Default metadata message if formatting errors occur.
const DEFAULT_METADATA: &str = "{\"message\":\"Truncated: See USB terminal\"}";
/// The telemetry client for reporting telemetry data over MQTT.
pub struct TelemetryClient<C: Clock, S: TcpClientStack> {
mqtt: minimq::Minimq<'static, S, C, minimq::broker::NamedBroker<S>>,
prefix: &'static str,
meta_published: bool,
metadata: &'static ApplicationMetadata,
}
impl<C: Clock, S: TcpClientStack<Error = smoltcp_nal::NetworkError> + Dns>
TelemetryClient<C, S>
{
/// Construct a new telemetry client.
///
/// # Args
/// * `mqtt` - The MQTT client
/// * `prefix` - The device prefix to use for MQTT telemetry reporting.
///
/// # Returns
/// A new telemetry client.
pub fn new(
mqtt: minimq::Minimq<'static, S, C, minimq::broker::NamedBroker<S>>,
prefix: &'static str,
metadata: &'static ApplicationMetadata,
) -> Self {
Self {
mqtt,
meta_published: false,
prefix,
metadata,
}
}
/// Publish telemetry over MQTT
///
/// # Note
/// Telemetry is reported in a "best-effort" fashion. Failure to transmit telemetry will cause
/// it to be silently dropped.
///
/// # Args
/// * `telemetry` - The telemetry to report
pub fn publish_telemetry<T: Serialize>(
&mut self,
suffix: &str,
telemetry: &T,
) {
let mut topic: String<128> = self.prefix.try_into().unwrap();
topic.push_str(suffix).unwrap();
self.publish(&topic, telemetry)
.map_err(|e| log::error!("Telemetry publishing error: {:?}", e))
.ok();
}
pub fn publish<T: Serialize>(
&mut self,
topic: &str,
payload: &T,
) -> Result<(), PubError<NetworkError, serde_json_core::ser::Error>> {
self.mqtt
.client()
.publish(minimq::Publication::new(topic, |buf: &mut [u8]| {
serde_json_core::to_slice(payload, buf)
}))
}
/// Update the telemetry client
///
/// # Note
/// This function is provided to force the underlying MQTT state machine to process incoming
/// and outgoing messages. Without this, the client will never connect to the broker. This
/// should be called regularly.
pub fn update(&mut self) {
match self.mqtt.poll(|_client, _topic, _message, _properties| {}) {
Err(minimq::Error::Network(
smoltcp_nal::NetworkError::TcpConnectionFailure(
smoltcp_nal::smoltcp::socket::tcp::ConnectError::Unaddressable
),
)) => {}
Err(error) => log::info!("Unexpected error: {:?}", error),
_ => {}
}
if !self.mqtt.client().is_connected() {
self.meta_published = false;
return;
}
// Publish application metadata
if !self.meta_published
&& self.mqtt.client().can_publish(minimq::QoS::AtMostOnce)
{
let Self { mqtt, metadata, .. } = self;
let mut topic: String<128> = self.prefix.try_into().unwrap();
topic.push_str("/meta").unwrap();
if mqtt
.client()
.publish(Publication::new(&topic, |buf: &mut [u8]| {
serde_json_core::to_slice(&metadata, buf)
}))
.is_err()
{
// Note(unwrap): We can guarantee that this message will be sent because we checked
// for ability to publish above.
mqtt.client()
.publish(Publication::new(
&topic,
DEFAULT_METADATA.as_bytes(),
))
.unwrap();
}
self.meta_published = true;
}
}
}
| rust | Apache-2.0 | cb1f14508dffa91c636cb3ed3701e24b04e469d2 | 2026-01-04T20:16:49.858302Z | false |
quartiq/stabilizer | https://github.com/quartiq/stabilizer/blob/cb1f14508dffa91c636cb3ed3701e24b04e469d2/platform/src/delay.rs | platform/src/delay.rs | //! Basic blocking delay
//!
//! This module provides a basic asm-based blocking delay.
use embedded_hal_02::blocking::delay::DelayUs;
/// A basic delay implementation.
pub struct AsmDelay {
frequency_us: u32,
}
impl AsmDelay {
/// Create a new delay.
///
/// # Args
/// * `freq` - The CPU core frequency.
pub fn new(freq: u32) -> AsmDelay {
AsmDelay {
frequency_us: freq / 1_000_000,
}
}
}
impl<U> DelayUs<U> for AsmDelay
where
U: Into<u32>,
{
fn delay_us(&mut self, us: U) {
cortex_m::asm::delay(self.frequency_us * us.into())
}
}
| rust | Apache-2.0 | cb1f14508dffa91c636cb3ed3701e24b04e469d2 | 2026-01-04T20:16:49.858302Z | false |
quartiq/stabilizer | https://github.com/quartiq/stabilizer/blob/cb1f14508dffa91c636cb3ed3701e24b04e469d2/platform/src/dfu.rs | platform/src/dfu.rs | use core::{
ptr,
sync::atomic::{self, Ordering},
};
/// Flag used to indicate that a reboot to DFU is requested.
const DFU_FLAG: u32 = 0xDEAD_BEEF;
unsafe extern "C" {
unsafe static mut _dfu_flag: u8;
}
/// Indicate a reboot to DFU is requested.
pub fn dfu_reboot() {
unsafe {
ptr::write_unaligned(ptr::addr_of_mut!(_dfu_flag).cast(), DFU_FLAG);
}
cortex_m::peripheral::SCB::sys_reset();
}
/// Check if the DFU reboot flag is set, indicating a reboot to DFU is requested.
pub fn dfu_flag_is_set() -> bool {
unsafe {
let start_ptr = ptr::addr_of_mut!(_dfu_flag).cast();
let set = DFU_FLAG == ptr::read_unaligned(start_ptr);
// Clear the boot flag after checking it to ensure it doesn't stick between reboots.
core::ptr::write_unaligned(start_ptr, 0);
atomic::fence(Ordering::SeqCst);
cortex_m::asm::dsb();
set
}
}
/// Execute the DFU bootloader stored in system memory.
///
/// # Note
/// This function must be called before any system configuration is performed, as the DFU
/// bootloader expects the system in an uninitialized state.
pub fn bootload_dfu() {
// This process is largely adapted from
// https://community.st.com/t5/stm32-mcus/jump-to-bootloader-from-application-on-stm32h7-devices/ta-p/49510
cortex_m::interrupt::disable();
// Disable the SysTick peripheral.
let systick = unsafe { &*cortex_m::peripheral::SYST::PTR };
unsafe {
systick.csr.write(0);
systick.rvr.write(0);
systick.cvr.write(0);
}
// Clear NVIC interrupt flags and enables.
let nvic = unsafe { &*cortex_m::peripheral::NVIC::PTR };
for reg in nvic.icer.iter() {
unsafe {
reg.write(u32::MAX);
}
}
for reg in nvic.icpr.iter() {
unsafe {
reg.write(u32::MAX);
}
}
unsafe { cortex_m::interrupt::enable() };
log::info!("Jumping to DFU");
// The chip does not provide a means to modify the BOOT pins during
// run-time. Jump to the bootloader in system memory instead.
unsafe {
cortex_m::asm::bootload(0x1FF0_9800 as _);
}
}
| rust | Apache-2.0 | cb1f14508dffa91c636cb3ed3701e24b04e469d2 | 2026-01-04T20:16:49.858302Z | false |
quartiq/stabilizer | https://github.com/quartiq/stabilizer/blob/cb1f14508dffa91c636cb3ed3701e24b04e469d2/platform/src/metadata.rs | platform/src/metadata.rs | use core::fmt;
use serde::Serialize;
#[derive(Serialize)]
pub struct ApplicationMetadata {
pub firmware_version: &'static str,
pub rust_version: &'static str,
pub profile: &'static str,
pub git_dirty: bool,
pub features: &'static str,
pub panic_info: &'static str,
pub hardware_version: &'static str,
}
impl fmt::Display for ApplicationMetadata {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_fmt(format_args!(
"{:<20}: {} [{}]",
"Version", self.firmware_version, self.profile,
))?;
f.write_fmt(format_args!(
"{:<20}: {}",
"Hardware Revision", self.hardware_version
))?;
f.write_fmt(format_args!(
"{:<20}: {}",
"Rustc Version", self.rust_version
))?;
f.write_fmt(format_args!("{:<20}: {}", "Features", self.features))?;
f.write_fmt(format_args!("{:<20}: {}", "Panic Info", self.panic_info))?;
Ok(())
}
}
| rust | Apache-2.0 | cb1f14508dffa91c636cb3ed3701e24b04e469d2 | 2026-01-04T20:16:49.858302Z | false |
ixpantia/faucet | https://github.com/ixpantia/faucet/blob/e11015f009992d1f6399cd5a18bddeaab2b443ec/src/lib.rs | src/lib.rs | pub mod cli;
pub mod client;
pub mod error;
pub mod global_conn;
pub(crate) mod networking;
pub mod server;
pub mod shutdown;
pub mod telemetry;
#[macro_export]
macro_rules! leak {
($val:expr, $ty:ty) => {
std::boxed::Box::leak(std::boxed::Box::from($val)) as &'static $ty
};
($val:expr) => {
std::boxed::Box::leak(std::boxed::Box::from($val))
};
}
| rust | MIT | e11015f009992d1f6399cd5a18bddeaab2b443ec | 2026-01-04T20:17:10.449299Z | false |
ixpantia/faucet | https://github.com/ixpantia/faucet/blob/e11015f009992d1f6399cd5a18bddeaab2b443ec/src/shutdown.rs | src/shutdown.rs | use std::sync::atomic::{AtomicBool, Ordering};
use tokio::sync::Notify;
use crate::leak;
const WAIT_STOP_PRINT: std::time::Duration = std::time::Duration::from_secs(5);
pub struct ShutdownSignal {
is_shutdown: AtomicBool,
notify: Notify,
}
impl Default for ShutdownSignal {
fn default() -> Self {
Self::new()
}
}
impl ShutdownSignal {
pub fn new() -> Self {
ShutdownSignal {
is_shutdown: AtomicBool::new(false),
notify: Notify::new(),
}
}
pub fn shutdown(&self) {
self.is_shutdown.store(true, Ordering::Relaxed);
self.notify.notify_waiters();
}
pub async fn wait(&self) {
if self.is_shutdown.load(Ordering::Relaxed) {
return;
}
// Wait for notification
self.notify.notified().await;
}
}
pub fn graceful() -> &'static ShutdownSignal {
use crate::global_conn::current_connections;
let signal = leak!(ShutdownSignal::new()) as &'static ShutdownSignal;
{
ctrlc::set_handler(move || {
log::info!(target: "faucet", "Received stop signal, waiting for all users to disconnect");
let mut last_5_sec = std::time::Instant::now();
while current_connections() > 0 {
std::thread::yield_now();
if last_5_sec.elapsed() > WAIT_STOP_PRINT {
log::info!(
target: "faucet",
"Active connections = {}, waiting for all connections to stop.",
current_connections()
);
last_5_sec = std::time::Instant::now();
}
}
signal.shutdown();
})
.expect("Unable to set term handler. This is a bug");
}
signal
}
pub fn immediate() -> &'static ShutdownSignal {
let signal = leak!(ShutdownSignal::new()) as &'static ShutdownSignal;
{
ctrlc::set_handler(move || {
log::info!(target: "faucet", "Starting immediate shutdown handle");
signal.shutdown()
})
.expect("Unable to set term handler. This is a bug");
}
signal
}
| rust | MIT | e11015f009992d1f6399cd5a18bddeaab2b443ec | 2026-01-04T20:17:10.449299Z | false |
ixpantia/faucet | https://github.com/ixpantia/faucet/blob/e11015f009992d1f6399cd5a18bddeaab2b443ec/src/cli.rs | src/cli.rs | use std::{
ffi::OsString,
path::{Path, PathBuf},
};
use clap::{Parser, Subcommand};
use crate::client::{load_balancing, worker::WorkerType};
fn is_plumber(dir: &Path) -> bool {
let plumber = dir.join("plumber.R");
let plumber_entrypoint = dir.join("entrypoint.R");
plumber.exists() || plumber_entrypoint.exists()
}
fn is_shiny(dir: &Path) -> bool {
let shiny_app = dir.join("app.R");
let shiny_ui = dir.join("ui.R");
let shiny_server = dir.join("server.R");
shiny_app.exists() || (shiny_ui.exists() && shiny_server.exists())
}
#[derive(clap::ValueEnum, Debug, Clone, Copy)]
enum ServerType {
FastAPI,
Plumber,
Shiny,
QuartoShiny,
Auto,
}
#[derive(clap::ValueEnum, Debug, Clone, Copy)]
pub enum Strategy {
/// Sends requests to workers in a round-robin fashion.
RoundRobin,
/// Hashes the IP address of the client to determine which worker to send the request to.
IpHash,
/// Adds a cookie to the requests to identify the worker to send the
/// request to. This is useful for sticky sessions from within the same
/// network.
CookieHash,
/// Round-robin with RPS (Requests Per Second) scaling.
Rps,
}
impl From<Strategy> for load_balancing::Strategy {
fn from(value: Strategy) -> Self {
match value {
Strategy::RoundRobin => load_balancing::Strategy::RoundRobin,
Strategy::IpHash => load_balancing::Strategy::IpHash,
Strategy::CookieHash => load_balancing::Strategy::CookieHash,
Strategy::Rps => load_balancing::Strategy::Rps,
}
}
}
#[derive(clap::ValueEnum, Debug, Clone, Copy)]
pub enum IpFrom {
Client,
XForwardedFor,
XRealIp,
}
impl From<IpFrom> for load_balancing::IpExtractor {
fn from(value: IpFrom) -> Self {
match value {
IpFrom::Client => load_balancing::IpExtractor::ClientAddr,
IpFrom::XForwardedFor => load_balancing::IpExtractor::XForwardedFor,
IpFrom::XRealIp => load_balancing::IpExtractor::XRealIp,
}
}
}
#[derive(clap::ValueEnum, Debug, Clone, Copy, Default)]
pub enum Shutdown {
Graceful,
#[default]
Immediate,
}
#[derive(Parser, Debug)]
pub struct StartArgs {
/// The number of threads to use to handle requests.
#[arg(short, long, env = "FAUCET_WORKERS", default_value_t = num_cpus::get())]
pub workers: usize,
/// The load balancing strategy to use.
#[arg(short, long, env = "FAUCET_STRATEGY", default_value = "round-robin")]
pub strategy: Strategy,
/// The type of workers to spawn.
#[arg(short, long, env = "FAUCET_TYPE", default_value = "auto")]
type_: ServerType,
/// The directory to spawn workers in.
/// Defaults to the current directory.
#[arg(short, long, env = "FAUCET_DIR", default_value = ".")]
pub dir: PathBuf,
/// Argument passed on to `appDir` when running Shiny.
#[arg(long, short, env = "FAUCET_APP_DIR", default_value = None)]
pub app_dir: Option<String>,
/// Quarto Shiny file path.
#[arg(long, short, env = "FAUCET_QMD", default_value = None)]
pub qmd: Option<PathBuf>,
/// The maximum requests per second for the RPS autoscaler strategy.
#[arg(long, env = "FAUCET_MAX_RPS", default_value = None)]
pub max_rps: Option<f64>,
}
#[derive(Parser, Debug)]
pub struct RouterArgs {
/// Router config file.
#[arg(
long,
short,
env = "FAUCET_ROUTER_CONF",
default_value = "./frouter.toml"
)]
pub conf: PathBuf,
}
#[derive(Subcommand, Debug)]
pub enum Commands {
/// Start a simple faucet server.
#[command(name = "start")]
Start(StartArgs),
/// Runs faucet in "router" mode.
#[command(name = "router")]
Router(RouterArgs),
/// Run an Rscript through faucet.
Rscript {
#[arg(trailing_var_arg = true, allow_hyphen_values = true)]
args: Vec<OsString>,
},
/// Run an uv through faucet.
Uv {
#[arg(trailing_var_arg = true, allow_hyphen_values = true)]
args: Vec<OsString>,
},
}
#[derive(Debug, Clone, Copy, clap::ValueEnum)]
pub enum PgSslMode {
Disable,
Prefer,
Require,
VerifyCa,
VerifyFull,
}
impl PgSslMode {
pub fn as_str(self) -> &'static str {
match self {
Self::Disable => "disable",
Self::Prefer => "prefer",
Self::Require => "require",
Self::VerifyCa => "verify-ca",
Self::VerifyFull => "verify-full",
}
}
}
///
/// โโโโโโโโ โโโโโโ โโโ โโโ โโโโโโโโโโโโโโโโโโโโโโโโ
/// โโโโโโโโโโโโโโโโโโโ โโโโโโโโโโโโโโโโโโโโโโโโโโโโ
/// โโโโโโ โโโโโโโโโโโ โโโโโโ โโโโโโ โโโ
/// โโโโโโ โโโโโโโโโโโ โโโโโโ โโโโโโ โโโ
/// โโโ โโโ โโโโโโโโโโโโโโโโโโโโโโโโโโโโ โโโ
/// โโโ โโโ โโโ โโโโโโโ โโโโโโโโโโโโโโโ โโโ
/// Fast, async, and concurrent data applications.
///
#[derive(Parser)]
#[command(author, version, verbatim_doc_comment)]
pub struct Args {
#[command(subcommand)]
pub command: Commands,
/// The host to bind to.
#[arg(long, env = "FAUCET_HOST", default_value = "127.0.0.1:3838")]
pub host: String,
/// The IP address to extract from.
/// Defaults to client address.
#[arg(short, long, env = "FAUCET_IP_FROM", default_value = "client")]
pub ip_from: IpFrom,
/// Command, path, or executable to run Rscript.
#[arg(long, short, env = "FAUCET_RSCRIPT", default_value = "Rscript")]
pub rscript: OsString,
/// Command, path, or executable to run quarto.
#[arg(long, short, env = "FAUCET_QUARTO", default_value = "quarto")]
pub quarto: OsString,
/// Command, path, or executable to run uv.
#[arg(long, short, env = "FAUCET_UV", default_value = "uv")]
pub uv: OsString,
/// Save logs to a file. Will disable colors!
#[arg(long, short, env = "FAUCET_LOG_FILE", default_value = None)]
pub log_file: Option<PathBuf>,
#[arg(long, short, env = "FAUCET_MAX_LOG_FILE_SIZE", default_value = None, value_parser = |s: &str| parse_size::parse_size(s))]
/// The maximum size of the log file. (Ex. 10M, 1GB)
pub max_log_file_size: Option<u64>,
/// The strategy for shutting down faucet
#[arg(long, env = "FAUCET_SHUTDOWN", default_value = "immediate")]
pub shutdown: Shutdown,
/// Maximum size of a WebSocket message. This is useful for DDOS prevention. Not set means no size limit.
#[arg(long, env = "FAUCET_MAX_MESSAGE_SIZE", default_value = None, value_parser = |s: &str| parse_size::parse_size(s))]
pub max_message_size: Option<u64>,
/// Connection string to a PostgreSQL database for saving HTTP events.
#[arg(long, env = "FAUCET_TELEMETRY_POSTGRES_STRING", default_value = None)]
pub pg_con_string: Option<String>,
/// Path to CA certificate for PostgreSQL SSL/TLS.
#[arg(long, env = "FAUCET_TELEMETRY_POSTGRES_SSLCERT", default_value = None)]
pub pg_sslcert: Option<PathBuf>,
/// SSL mode for PostgreSQL connection (disable, prefer, require, verify-ca, verify-full).
#[arg(
long,
env = "FAUCET_TELEMETRY_POSTGRES_SSLMODE",
default_value = "prefer"
)]
pub pg_sslmode: PgSslMode,
/// Save HTTP events on PostgreSQL under a specific namespace.
#[arg(long, env = "FAUCET_TELEMETRY_NAMESPACE", default_value = "faucet")]
pub telemetry_namespace: String,
/// Represents the source code version of the service to run. This is useful for telemetry.
#[arg(long, env = "FAUCET_TELEMETRY_VERSION", default_value = None)]
pub telemetry_version: Option<String>,
}
impl StartArgs {
pub fn server_type(&self) -> WorkerType {
match self.type_ {
ServerType::FastAPI => WorkerType::FastAPI,
ServerType::Plumber => WorkerType::Plumber,
ServerType::Shiny => WorkerType::Shiny,
ServerType::QuartoShiny => WorkerType::QuartoShiny,
ServerType::Auto => {
if is_plumber(&self.dir) {
WorkerType::Plumber
} else if is_shiny(&self.dir) {
WorkerType::Shiny
} else {
log::error!(target: "faucet", "Could not determine worker type. Please specify with --type.");
std::process::exit(1);
}
}
}
}
}
| rust | MIT | e11015f009992d1f6399cd5a18bddeaab2b443ec | 2026-01-04T20:17:10.449299Z | false |
ixpantia/faucet | https://github.com/ixpantia/faucet/blob/e11015f009992d1f6399cd5a18bddeaab2b443ec/src/error.rs | src/error.rs | use std::convert::Infallible;
use crate::client::ExclusiveBody;
pub enum BadRequestReason {
MissingHeader(&'static str),
InvalidHeader(&'static str),
MissingQueryParam(&'static str),
InvalidQueryParam(&'static str),
NoPathOrQuery,
NoHostName,
UnsupportedUrlScheme,
}
pub type FaucetResult<T> = std::result::Result<T, FaucetError>;
use thiserror::Error;
impl std::fmt::Display for BadRequestReason {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
match self {
BadRequestReason::MissingQueryParam(param) => {
write!(f, "Missing query parameter: {param}")
}
BadRequestReason::InvalidQueryParam(param) => {
write!(f, "Invalid query parameter: {param}")
}
BadRequestReason::UnsupportedUrlScheme => {
write!(f, "UnsupportedUrlScheme use ws:// or wss://")
}
BadRequestReason::NoHostName => write!(f, "No Host Name"),
BadRequestReason::MissingHeader(header) => write!(f, "Missing header: {header}"),
BadRequestReason::InvalidHeader(header) => write!(f, "Invalid header: {header}"),
BadRequestReason::NoPathOrQuery => write!(f, "No path and/or query"),
}
}
}
#[derive(Error)]
pub enum FaucetError {
#[error("Pool build error: {0}")]
PoolBuild(#[from] deadpool::managed::BuildError),
#[error("Pool timeout error: {0:?}")]
PoolTimeout(deadpool::managed::TimeoutType),
#[error("Pool post create hook error")]
PoolPostCreateHook,
#[error("Pool closed error")]
PoolClosed,
#[error("Pool no runtime specified error")]
PoolNoRuntimeSpecified,
#[error("No sockets available")]
NoSocketsAvailable,
#[error("Connection closed")]
ConnectionClosed,
#[error("IO error: {0}")]
Io(#[from] std::io::Error),
#[error("Unknown error: {0}")]
Unknown(String),
#[error("Error parsing host address: {0}")]
HostParseError(#[from] std::net::AddrParseError),
#[error("Hyper error: {0}")]
Hyper(#[from] hyper::Error),
#[error("{0}")]
BadRequest(BadRequestReason),
#[error("Invalid header values: {0}")]
InvalidHeaderValues(#[from] hyper::header::InvalidHeaderValue),
#[error("Http error: {0}")]
Http(#[from] hyper::http::Error),
#[error("Missing argument: {0}")]
MissingArgument(&'static str),
#[error("Route '{0}' is duplicated")]
DuplicateRoute(String),
#[error("Utf8 Coding error: {0}")]
Utf8Coding(String),
#[error("Buffer Capacity: {0}")]
BufferCapacity(tokio_tungstenite::tungstenite::error::CapacityError),
#[error("Protocol violation: {0}")]
ProtocolViolation(tokio_tungstenite::tungstenite::error::ProtocolError),
#[error("Web Socket Write buffer full, {0}")]
WSWriteBufferFull(Box<tokio_tungstenite::tungstenite::Message>),
#[error("PostgreSQL error: {0}")]
PostgreSQL(#[from] tokio_postgres::Error),
#[error("WebSocket Connection in use")]
WebSocketConnectionInUse,
#[error(
"WebSocket Connection purged. The client is trying to access a Shiny connection that does not exist."
)]
WebSocketConnectionPurged,
#[error("Attack attempt detected")]
AttackAttempt,
}
impl From<tokio_tungstenite::tungstenite::Error> for FaucetError {
fn from(value: tokio_tungstenite::tungstenite::Error) -> Self {
use tokio_tungstenite::tungstenite::error::UrlError;
use tokio_tungstenite::tungstenite::Error;
match value {
Error::Io(err) => FaucetError::Io(err),
Error::Url(err) => match err {
UrlError::NoPathOrQuery => FaucetError::BadRequest(BadRequestReason::NoPathOrQuery),
UrlError::NoHostName | UrlError::EmptyHostName => {
FaucetError::BadRequest(BadRequestReason::NoHostName)
}
UrlError::TlsFeatureNotEnabled => panic!("TLS Not enabled"),
UrlError::UnableToConnect(err) => FaucetError::Unknown(err),
UrlError::UnsupportedUrlScheme => {
FaucetError::BadRequest(BadRequestReason::UnsupportedUrlScheme)
}
},
Error::Tls(err) => FaucetError::Unknown(err.to_string()),
Error::Utf8(err) => FaucetError::Utf8Coding(err),
Error::Http(_) => FaucetError::Unknown("Unknown HTTP error".to_string()),
Error::Capacity(err) => FaucetError::BufferCapacity(err),
Error::HttpFormat(err) => FaucetError::Http(err),
Error::Protocol(err) => FaucetError::ProtocolViolation(err),
Error::AlreadyClosed | Error::ConnectionClosed => FaucetError::ConnectionClosed,
Error::AttackAttempt => FaucetError::AttackAttempt,
Error::WriteBufferFull(msg) => FaucetError::WSWriteBufferFull(msg),
}
}
}
impl From<deadpool::managed::PoolError<FaucetError>> for FaucetError {
fn from(value: deadpool::managed::PoolError<FaucetError>) -> Self {
match value {
deadpool::managed::PoolError::Backend(e) => e,
deadpool::managed::PoolError::Timeout(e) => Self::PoolTimeout(e),
deadpool::managed::PoolError::Closed => Self::PoolClosed,
deadpool::managed::PoolError::PostCreateHook(_) => Self::PoolPostCreateHook,
deadpool::managed::PoolError::NoRuntimeSpecified => Self::PoolNoRuntimeSpecified,
}
}
}
impl From<Infallible> for FaucetError {
fn from(_: Infallible) -> Self {
unreachable!("Infallible error")
}
}
impl std::fmt::Debug for FaucetError {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(f, "{self}")
}
}
impl FaucetError {
pub fn no_sec_web_socket_key() -> Self {
Self::BadRequest(BadRequestReason::MissingHeader("Sec-WebSocket-Key"))
}
pub fn unknown(s: impl ToString) -> Self {
Self::Unknown(s.to_string())
}
}
impl From<FaucetError> for hyper::Response<ExclusiveBody> {
fn from(val: FaucetError) -> Self {
let mut resp = hyper::Response::new(ExclusiveBody::plain_text(val.to_string()));
*resp.status_mut() = hyper::StatusCode::INTERNAL_SERVER_ERROR;
resp
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_faucet_error() {
let err = FaucetError::unknown("test");
assert_eq!(err.to_string(), "Unknown error: test");
}
#[test]
fn test_faucet_error_debug() {
let err = FaucetError::unknown("test");
assert_eq!(format!("{err:?}"), r#"Unknown error: test"#);
}
#[test]
fn test_faucet_error_from_hyper_error() {
let err = hyper::Request::builder()
.uri("INVALID URI")
.body(())
.unwrap_err();
let _err: FaucetError = From::from(err);
}
#[test]
fn test_faucet_error_from_io_error() {
let err = std::io::Error::other("test");
let _err: FaucetError = From::from(err);
}
#[test]
fn test_faucet_error_from_pool_error() {
let err = deadpool::managed::PoolError::Backend(FaucetError::unknown("test"));
let _err: FaucetError = From::from(err);
}
#[test]
fn test_faucet_error_from_pool_build_error() {
let err = deadpool::managed::BuildError::NoRuntimeSpecified;
let _err: FaucetError = From::from(err);
}
#[test]
fn test_faucet_error_from_pool_timeout_error() {
let err = deadpool::managed::PoolError::<FaucetError>::Timeout(
deadpool::managed::TimeoutType::Create,
);
let _err: FaucetError = From::from(err);
}
#[test]
fn test_faucet_error_from_pool_closed_error() {
let err = deadpool::managed::PoolError::<FaucetError>::Closed;
let _err: FaucetError = From::from(err);
}
#[test]
fn test_faucet_error_from_pool_post_create_hook_error() {
let err = deadpool::managed::PoolError::<FaucetError>::PostCreateHook(
deadpool::managed::HookError::message("test"),
);
let _err: FaucetError = From::from(err);
}
#[test]
fn test_faucet_error_from_pool_no_runtime_specified_error() {
let err = deadpool::managed::PoolError::<FaucetError>::NoRuntimeSpecified;
let _err: FaucetError = From::from(err);
}
#[test]
fn test_faucet_error_from_hyper_invalid_header_value_error() {
let err = hyper::header::HeaderValue::from_bytes([0x00].as_ref()).unwrap_err();
let _err: FaucetError = From::from(err);
}
#[test]
fn test_faucet_error_from_addr_parse_error() {
let err = "INVALID".parse::<std::net::SocketAddr>().unwrap_err();
let _err: FaucetError = From::from(err);
}
#[test]
fn test_faucet_error_displat_missing_header() {
let _err = FaucetError::BadRequest(BadRequestReason::MissingHeader("test"));
}
#[test]
fn test_faucet_error_displat_invalid_header() {
let _err = FaucetError::BadRequest(BadRequestReason::InvalidHeader("test"));
}
#[test]
fn test_from_fauct_error_to_hyper_response() {
let err = FaucetError::unknown("test");
let resp: hyper::Response<ExclusiveBody> = err.into();
assert_eq!(resp.status(), hyper::StatusCode::INTERNAL_SERVER_ERROR);
}
}
| rust | MIT | e11015f009992d1f6399cd5a18bddeaab2b443ec | 2026-01-04T20:17:10.449299Z | false |
ixpantia/faucet | https://github.com/ixpantia/faucet/blob/e11015f009992d1f6399cd5a18bddeaab2b443ec/src/networking.rs | src/networking.rs | use std::{net::SocketAddr, ops::RangeInclusive};
use rand::Rng;
use tokio::{io, net::TcpListener};
use crate::error::{FaucetError, FaucetResult};
const UNSAFE_PORTS: &[u16] = &[
1, 7, 9, 11, 13, 15, 17, 19, 20, 21, 22, 23, 25, 37, 42, 43, 53, 77, 79, 87, 95, 101, 102, 103,
104, 109, 110, 111, 113, 115, 117, 119, 123, 135, 139, 143, 179, 389, 427, 465, 512, 513, 514,
515, 526, 530, 531, 532, 540, 548, 556, 563, 587, 601, 636, 993, 995, 2049, 3659, 4045, 6000,
6665, 6666, 6667, 6668, 6669, 6697,
];
pub async fn socket_is_available(socket_addr: SocketAddr) -> FaucetResult<bool> {
let result = TcpListener::bind(socket_addr).await;
match result {
Ok(_) => Ok(true),
Err(e) => match e.kind() {
io::ErrorKind::AddrInUse => Ok(false),
_ => Err(FaucetError::Io(e)),
},
}
}
const PORT_RANGE: RangeInclusive<u16> = 1024..=49151;
pub async fn get_available_socket(tries: usize) -> Result<SocketAddr, FaucetError> {
let mut rng = rand::rng();
for _ in 0..tries {
let port: u16 = rng.random_range(PORT_RANGE);
let socket_addr = SocketAddr::from(([127, 0, 0, 1], port));
if UNSAFE_PORTS.contains(&port) {
continue;
}
if socket_is_available(socket_addr).await? {
return Ok(socket_addr);
}
}
Err(FaucetError::NoSocketsAvailable)
}
| rust | MIT | e11015f009992d1f6399cd5a18bddeaab2b443ec | 2026-01-04T20:17:10.449299Z | false |
ixpantia/faucet | https://github.com/ixpantia/faucet/blob/e11015f009992d1f6399cd5a18bddeaab2b443ec/src/main.rs | src/main.rs | use clap::Parser;
use faucet_server::cli::{Args, Commands};
use faucet_server::client::worker::log_stdio;
use faucet_server::error::FaucetResult;
use faucet_server::leak;
use faucet_server::server::logger::build_logger;
use faucet_server::server::{FaucetServerBuilder, RouterConfig};
use faucet_server::telemetry::TelemetryManager;
use faucet_server::{cli::Shutdown, shutdown};
use tokio_tungstenite::tungstenite::protocol::WebSocketConfig;
#[tokio::main]
pub async fn main() -> FaucetResult<()> {
log::info!("Logger test: faucet starting up");
dotenv::from_filename(".Renviron").ok();
dotenv::from_filename(".env").ok();
let cli_args = Args::parse();
let shutdown_signal = match cli_args.shutdown {
Shutdown::Immediate => shutdown::immediate(),
Shutdown::Graceful => shutdown::graceful(),
};
let telemetry = cli_args.pg_con_string.map(|pg_con| {
match TelemetryManager::start_postgres(
&cli_args.telemetry_namespace,
cli_args.telemetry_version.as_deref(),
&pg_con,
cli_args.pg_sslmode,
cli_args.pg_sslcert.as_deref(),
shutdown_signal,
) {
Ok(telemetry) => telemetry,
Err(e) => {
eprintln!("Unable to start telemetry manager: {e}");
std::process::exit(1);
}
}
});
let log_thread_handle = build_logger(
cli_args
.log_file
.as_ref()
.map_or(faucet_server::server::logger::Target::Stderr, |file| {
faucet_server::server::logger::Target::File(file.to_path_buf())
}),
cli_args.max_log_file_size,
shutdown_signal,
);
let max_message_size = cli_args.max_message_size.map(|v| v as usize);
let websocket_config: &'static WebSocketConfig = leak!(WebSocketConfig::default()
.max_message_size(max_message_size)
.max_frame_size(max_message_size));
match cli_args.command {
Commands::Start(start_args) => {
log::info!(target: "faucet", "Building the faucet server...");
FaucetServerBuilder::new()
.strategy(Some(start_args.strategy.into()))
.workers(start_args.workers)
.server_type(start_args.server_type())
.extractor(cli_args.ip_from.into())
.bind(cli_args.host.parse()?)
.workdir(start_args.dir)
.rscript(cli_args.rscript)
.uv(cli_args.uv)
.app_dir(start_args.app_dir)
.quarto(cli_args.quarto)
.qmd(start_args.qmd)
.max_rps(start_args.max_rps)
.build()?
.run(shutdown_signal, websocket_config)
.await?;
}
Commands::Router(router_args) => {
let config: RouterConfig =
toml::from_str(&std::fs::read_to_string(router_args.conf).unwrap()).unwrap();
config
.run(
cli_args.rscript,
cli_args.quarto,
cli_args.uv,
cli_args.ip_from.into(),
cli_args.host.parse()?,
shutdown_signal,
websocket_config,
)
.await?;
}
Commands::Rscript { args } => {
let child = tokio::process::Command::new(cli_args.rscript)
.args(args)
.stdin(std::process::Stdio::null())
.stdout(std::process::Stdio::piped())
.stderr(std::process::Stdio::piped())
.spawn()
.unwrap();
let mut child = log_stdio(child, "faucet").unwrap();
child.wait().await?;
shutdown_signal.shutdown();
}
Commands::Uv { args } => {
let child = tokio::process::Command::new(cli_args.uv)
.args(args)
.stdin(std::process::Stdio::null())
.stdout(std::process::Stdio::piped())
.stderr(std::process::Stdio::piped())
.spawn()
.unwrap();
let mut child = log_stdio(child, "faucet").unwrap();
child.wait().await?;
shutdown_signal.shutdown();
}
}
log::debug!("Main server shutdown. Waiting to shutdown co-routines.");
if let Some(telemetry) = telemetry {
log::debug!("Waiting to stop DB writes");
let _ = telemetry.http_events_join_handle.await;
}
if let Some(handle) = log_thread_handle {
log::debug!("Waiting for log thread to be finished!");
let _ = handle.await;
}
log::debug!("All co-routines shutdown.");
std::process::exit(0);
}
| rust | MIT | e11015f009992d1f6399cd5a18bddeaab2b443ec | 2026-01-04T20:17:10.449299Z | false |
ixpantia/faucet | https://github.com/ixpantia/faucet/blob/e11015f009992d1f6399cd5a18bddeaab2b443ec/src/global_conn.rs | src/global_conn.rs | use std::sync::{atomic::AtomicI64, OnceLock};
pub static CORRENT_CONNECTIONS: OnceLock<AtomicI64> = OnceLock::new();
pub fn add_connection() {
CORRENT_CONNECTIONS
.get_or_init(|| AtomicI64::new(0))
.fetch_add(1, std::sync::atomic::Ordering::SeqCst);
}
pub fn remove_connection() {
CORRENT_CONNECTIONS
.get_or_init(|| unreachable!())
.fetch_sub(1, std::sync::atomic::Ordering::SeqCst);
}
pub fn current_connections() -> i64 {
CORRENT_CONNECTIONS
.get_or_init(|| AtomicI64::new(0))
.load(std::sync::atomic::Ordering::SeqCst)
}
| rust | MIT | e11015f009992d1f6399cd5a18bddeaab2b443ec | 2026-01-04T20:17:10.449299Z | false |
ixpantia/faucet | https://github.com/ixpantia/faucet/blob/e11015f009992d1f6399cd5a18bddeaab2b443ec/src/server/onion.rs | src/server/onion.rs | use std::net::IpAddr;
pub trait Service<Request>: Send + Sync {
type Response;
type Error;
fn call(
&self,
req: Request,
ip_addr: Option<IpAddr>,
) -> impl std::future::Future<Output = Result<Self::Response, Self::Error>>;
}
pub trait Layer<S> {
type Service;
fn layer(&self, inner: S) -> Self::Service;
}
pub struct ServiceBuilder<S> {
service: S,
}
impl<S> ServiceBuilder<S> {
pub fn new(service: S) -> Self {
ServiceBuilder { service }
}
pub fn layer<L>(self, layer: L) -> ServiceBuilder<L::Service>
where
L: Layer<S>,
{
ServiceBuilder::new(layer.layer(self.service))
}
pub fn build(self) -> S {
self.service
}
}
#[cfg(test)]
mod tests {
use super::*;
#[tokio::test]
async fn basic_service_response() {
struct Svc;
impl Service<()> for Svc {
type Response = String;
type Error = ();
async fn call(&self, _: (), _: Option<IpAddr>) -> Result<Self::Response, Self::Error> {
Ok("Hello, world!".to_string())
}
}
let svc = ServiceBuilder::new(Svc).build();
assert_eq!(svc.call((), None).await.unwrap(), "Hello, world!");
}
#[tokio::test]
async fn basic_service_middleware() {
struct Svc;
impl Service<&'static str> for Svc {
type Response = String;
type Error = ();
async fn call(
&self,
_: &'static str,
_: Option<IpAddr>,
) -> Result<Self::Response, Self::Error> {
Ok("Hello, world!".to_string())
}
}
struct GoodByeService<S> {
inner: S,
}
impl<S> Service<&'static str> for GoodByeService<S>
where
S: Service<&'static str, Response = String, Error = ()>,
{
type Response = String;
type Error = ();
async fn call(
&self,
req: &'static str,
_: Option<IpAddr>,
) -> Result<Self::Response, Self::Error> {
if req == "Goodbye" {
Ok("Goodbye, world!".to_string())
} else {
self.inner.call(req, None).await
}
}
}
struct GoodByeLayer;
impl<S> Layer<S> for GoodByeLayer {
type Service = GoodByeService<S>;
fn layer(&self, inner: S) -> Self::Service {
GoodByeService { inner }
}
}
let svc = ServiceBuilder::new(Svc).layer(GoodByeLayer).build();
assert_eq!(svc.call("Goodbye", None).await.unwrap(), "Goodbye, world!");
assert_eq!(svc.call("Hello", None).await.unwrap(), "Hello, world!");
}
#[tokio::test]
async fn multiple_layer_middleware() {
struct Svc;
impl Service<&'static str> for Svc {
type Response = String;
type Error = ();
async fn call(
&self,
_: &'static str,
_: Option<IpAddr>,
) -> Result<Self::Response, Self::Error> {
Ok("Hello, world!".to_string())
}
}
struct GoodByeService<S> {
inner: S,
}
impl<S> Service<&'static str> for GoodByeService<S>
where
S: Service<&'static str, Response = String, Error = ()>,
{
type Response = String;
type Error = ();
async fn call(
&self,
req: &'static str,
_: Option<IpAddr>,
) -> Result<Self::Response, Self::Error> {
if req == "Goodbye" {
Ok("Goodbye, world!".to_string())
} else {
self.inner.call(req, None).await
}
}
}
struct GoodByeLayer;
impl<S> Layer<S> for GoodByeLayer {
type Service = GoodByeService<S>;
fn layer(&self, inner: S) -> Self::Service {
GoodByeService { inner }
}
}
struct HowAreYouService<S> {
inner: S,
}
impl<S> Service<&'static str> for HowAreYouService<S>
where
S: Service<&'static str, Response = String, Error = ()>,
{
type Response = String;
type Error = ();
async fn call(
&self,
req: &'static str,
_: Option<IpAddr>,
) -> Result<Self::Response, Self::Error> {
if req == "How are you?" {
Ok("I'm fine, thank you!".to_string())
} else {
self.inner.call(req, None).await
}
}
}
struct HowAreYouLayer;
impl<S> Layer<S> for HowAreYouLayer {
type Service = HowAreYouService<S>;
fn layer(&self, inner: S) -> Self::Service {
HowAreYouService { inner }
}
}
let svc = ServiceBuilder::new(Svc)
.layer(GoodByeLayer)
.layer(HowAreYouLayer)
.build();
assert_eq!(svc.call("Goodbye", None).await.unwrap(), "Goodbye, world!");
assert_eq!(svc.call("Hello", None).await.unwrap(), "Hello, world!");
assert_eq!(
svc.call("How are you?", None).await.unwrap(),
"I'm fine, thank you!"
);
}
}
| rust | MIT | e11015f009992d1f6399cd5a18bddeaab2b443ec | 2026-01-04T20:17:10.449299Z | false |
ixpantia/faucet | https://github.com/ixpantia/faucet/blob/e11015f009992d1f6399cd5a18bddeaab2b443ec/src/server/service.rs | src/server/service.rs | use std::net::IpAddr;
use crate::{
client::{load_balancing::Strategy, Client, ExclusiveBody, UpgradeStatus},
error::FaucetError,
server::load_balancing::LoadBalancer,
shutdown::ShutdownSignal,
};
use hyper::{body::Incoming, header::HeaderValue};
use tokio_tungstenite::tungstenite::protocol::WebSocketConfig;
use super::onion::{Layer, Service};
#[derive(Clone)]
pub(crate) struct State {
pub uuid: uuid::Uuid,
pub remote_addr: IpAddr,
pub client: Client,
}
impl State {
#[inline(always)]
fn new(remote_addr: IpAddr, client: Client) -> State {
let uuid = uuid::Uuid::now_v7();
State {
remote_addr,
client,
uuid,
}
}
}
#[derive(Clone)]
pub struct AddStateService<S> {
inner: S,
load_balancer: LoadBalancer,
}
fn uuid_to_header_value(uuid: uuid::Uuid) -> HeaderValue {
let mut buffer = [0u8; uuid::fmt::Hyphenated::LENGTH];
HeaderValue::from_str(uuid.hyphenated().encode_lower(&mut buffer))
.expect("Unable to convert from uuid to header value, this is a bug")
}
fn extract_lb_uuid_from_req_cookies<B>(req: &hyper::Request<B>) -> Option<uuid::Uuid> {
req.headers().get("Cookie").and_then(|cookie| {
cookie.to_str().ok().and_then(|cookie_str| {
for cookie in cookie::Cookie::split_parse(cookie_str) {
match cookie {
Err(e) => {
log::error!(target: "faucet", "Error parsing cookie: {e}");
continue;
}
Ok(cookie) => {
if cookie.name() == "FAUCET_LB_COOKIE" {
let parse_res = cookie.value().parse::<uuid::Uuid>();
return match parse_res {
Ok(uuid) => Some(uuid),
Err(e) => {
log::error!(target: "faucet", "Error parsing UUID from cookie: {e}");
None
}
};
}
}
}
}
None
})
})
}
fn add_lb_cookie_to_resp(resp: &mut hyper::Response<ExclusiveBody>, lb_cookie: Option<uuid::Uuid>) {
if let Some(lb_cookie) = lb_cookie {
resp.headers_mut().append(
"Set-Cookie",
HeaderValue::from_str(&format!(
"FAUCET_LB_COOKIE={lb_cookie}; Path=/; HttpOnly; SameSite=Lax"
))
.expect("UUID is invalid, this is a bug! Report it please!"),
);
}
}
// Interesting behavior:
//
// If using a cookie hash strategy and the browser starts by sending N simultaneous requests
// to the server, there will be a period on time where the server will send the
// request to random workers. It will eventually settle down to the
// Last-Used worker for the given cookie hash.
//
// Does this have any impact? I don't believe but just to take into account.
//
// Andrรฉs
const RESERVED_RECONNECT_PATH: &str = "__faucet__/reconnect.js";
const RECONNECT_JS: &str = include_str!("reconnect.js");
impl<S, ReqBody> Service<hyper::Request<ReqBody>> for AddStateService<S>
where
ReqBody: hyper::body::Body + Send + Sync + 'static,
S: Service<
hyper::Request<ReqBody>,
Response = hyper::Response<ExclusiveBody>,
Error = FaucetError,
> + Send
+ Sync,
{
type Error = FaucetError;
type Response = hyper::Response<ExclusiveBody>;
async fn call(
&self,
mut req: hyper::Request<ReqBody>,
socket_addr: Option<IpAddr>,
) -> Result<Self::Response, Self::Error> {
let remote_addr = match self.load_balancer.extract_ip(&req, socket_addr) {
Ok(ip) => ip,
Err(e) => {
log::error!(target: "faucet", "Error extracting IP, verify that proxy headers are set correctly: {e}");
return Err(e);
}
};
// Check if the user is asking for "/__faucet__/reconnect.js"
if req.uri().path().ends_with(RESERVED_RECONNECT_PATH) {
return Ok(hyper::Response::builder()
.status(200)
.body(ExclusiveBody::plain_text(RECONNECT_JS))
.expect("Response should build"));
}
let is_cookie_hash = self.load_balancer.get_strategy() == Strategy::CookieHash;
let lb_cookie = (is_cookie_hash)
.then_some(extract_lb_uuid_from_req_cookies(&req).unwrap_or(uuid::Uuid::now_v7()));
let client = self
.load_balancer
.get_client(remote_addr, lb_cookie)
.await?;
let state = State::new(remote_addr, client);
// Add the state's UUID to the request. `X-` headers are depracted
// https://www.rfc-editor.org/rfc/rfc6648
req.headers_mut()
.insert("Faucet-Request-Uuid", uuid_to_header_value(state.uuid));
req.extensions_mut().insert(state);
let mut resp = self.inner.call(req, Some(remote_addr)).await;
if let Ok(resp) = &mut resp {
if is_cookie_hash {
add_lb_cookie_to_resp(resp, lb_cookie);
}
}
resp
}
}
pub struct AddStateLayer {
load_balancer: LoadBalancer,
}
impl AddStateLayer {
#[inline]
pub fn new(load_balancer: LoadBalancer) -> Self {
Self { load_balancer }
}
}
impl<S> Layer<S> for AddStateLayer {
type Service = AddStateService<S>;
fn layer(&self, inner: S) -> Self::Service {
AddStateService {
inner,
load_balancer: self.load_balancer.clone(),
}
}
}
pub(crate) struct ProxyService {
pub shutdown: &'static ShutdownSignal,
pub websocket_config: &'static WebSocketConfig,
}
impl Service<hyper::Request<Incoming>> for ProxyService {
type Error = FaucetError;
type Response = hyper::Response<ExclusiveBody>;
async fn call(
&self,
req: hyper::Request<Incoming>,
_: Option<IpAddr>,
) -> Result<Self::Response, Self::Error> {
let state = req
.extensions()
.get::<State>()
.expect("State not found")
.clone();
match state
.client
.attempt_upgrade(req, self.shutdown, self.websocket_config)
.await?
{
UpgradeStatus::Upgraded(res) => {
log::debug!(
target: "faucet",
"Initializing WebSocket bridge from {} to {}",
state.remote_addr,
state.client.config.target
);
Ok(res)
}
UpgradeStatus::NotUpgraded(req) => {
let connection = state.client.get().await?;
connection.send_request(req).await
}
}
}
}
| rust | MIT | e11015f009992d1f6399cd5a18bddeaab2b443ec | 2026-01-04T20:17:10.449299Z | false |
ixpantia/faucet | https://github.com/ixpantia/faucet/blob/e11015f009992d1f6399cd5a18bddeaab2b443ec/src/server/mod.rs | src/server/mod.rs | pub mod logging;
pub use logging::{logger, HttpLogData, LogOption};
use tokio_tungstenite::tungstenite::protocol::WebSocketConfig;
pub mod onion;
mod router;
mod service;
use crate::{
client::{
load_balancing::{self, LoadBalancer, Strategy},
worker::{WorkerConfigs, WorkerType},
ExclusiveBody,
},
error::{FaucetError, FaucetResult},
leak,
shutdown::ShutdownSignal,
};
use hyper::{body::Incoming, server::conn::http1, service::service_fn, Request};
use hyper_util::rt::TokioIo;
use onion::{Service, ServiceBuilder};
use service::{AddStateLayer, ProxyService};
use std::{
ffi::{OsStr, OsString},
net::SocketAddr,
num::NonZeroUsize,
path::{Path, PathBuf},
pin::pin,
sync::Arc,
};
use tokio::net::TcpListener;
pub use router::RouterConfig;
use self::{logging::LogService, service::AddStateService};
fn determine_strategy(server_type: WorkerType, strategy: Option<Strategy>) -> Strategy {
match server_type {
WorkerType::FastAPI => strategy.unwrap_or_else(|| {
log::debug!(target: "faucet", "No load balancing strategy specified. Defaulting to round robin for FastAPI.");
Strategy::RoundRobin
}),
WorkerType::Plumber =>
strategy.unwrap_or_else(|| {
log::debug!(target: "faucet", "No load balancing strategy specified. Defaulting to round robin for plumber.");
Strategy::RoundRobin
}),
WorkerType::Shiny | WorkerType::QuartoShiny => match strategy {
None => {
log::debug!(target: "faucet", "No load balancing strategy specified. Defaulting to Cookie Hash for shiny.");
Strategy::CookieHash
},
Some(Strategy::Rps) => {
log::debug!(target: "faucet", "RPS load balancing strategy specified for shiny, switching to IP hash.");
Strategy::IpHash
},
Some(Strategy::CookieHash) => Strategy::CookieHash,
Some(Strategy::RoundRobin) => {
log::debug!(target: "faucet", "Round robin load balancing strategy specified for shiny, switching to IP hash.");
Strategy::IpHash
},
Some(Strategy::IpHash) => Strategy::IpHash,
},
#[cfg(test)]
WorkerType::Dummy => {
log::debug!(target: "faucet", "WorkerType is Dummy, defaulting strategy to RoundRobin for tests.");
Strategy::RoundRobin
}
}
}
pub struct FaucetServerBuilder {
strategy: Option<Strategy>,
bind: Option<SocketAddr>,
n_workers: Option<NonZeroUsize>,
server_type: Option<WorkerType>,
workdir: Option<PathBuf>,
extractor: Option<load_balancing::IpExtractor>,
rscript: Option<OsString>,
uv: Option<OsString>,
app_dir: Option<String>,
quarto: Option<OsString>,
qmd: Option<PathBuf>,
route: Option<String>,
max_rps: Option<f64>,
}
impl FaucetServerBuilder {
pub fn new() -> Self {
FaucetServerBuilder {
strategy: None,
bind: None,
n_workers: None,
server_type: None,
workdir: None,
extractor: None,
rscript: None,
uv: None,
app_dir: None,
route: None,
quarto: None,
qmd: None,
max_rps: None,
}
}
pub fn app_dir(mut self, app_dir: Option<impl AsRef<str>>) -> Self {
self.app_dir = app_dir.map(|s| s.as_ref().into());
self
}
pub fn strategy(mut self, strategy: Option<Strategy>) -> Self {
log::debug!(target: "faucet", "Using load balancing strategy: {strategy:?}");
self.strategy = strategy;
self
}
pub fn bind(mut self, bind: SocketAddr) -> Self {
log::debug!(target: "faucet", "Will bind to: {bind}");
self.bind = Some(bind);
self
}
pub fn extractor(mut self, extractor: load_balancing::IpExtractor) -> Self {
log::debug!(target: "faucet", "Using IP extractor: {extractor:?}");
self.extractor = Some(extractor);
self
}
pub fn workers(mut self, n: usize) -> Self {
log::debug!(target: "faucet", "Will spawn {n} workers");
self.n_workers = match n.try_into() {
Ok(n) => Some(n),
Err(_) => {
log::error!(target: "faucet", "Number of workers must be greater than 0");
std::process::exit(1);
}
};
self
}
pub fn server_type(mut self, server_type: WorkerType) -> Self {
log::debug!(target: "faucet", "Using worker type: {server_type:?}");
self.server_type = Some(server_type);
self
}
pub fn workdir(mut self, workdir: impl AsRef<Path>) -> Self {
log::debug!(target: "faucet", "Using workdir: {:?}", workdir.as_ref());
self.workdir = Some(workdir.as_ref().into());
self
}
pub fn rscript(mut self, rscript: impl AsRef<OsStr>) -> Self {
log::debug!(target: "faucet", "Using Rscript command: {:?}", rscript.as_ref());
self.rscript = Some(rscript.as_ref().into());
self
}
pub fn uv(mut self, uv: impl AsRef<OsStr>) -> Self {
log::debug!(target: "faucet", "Using uv command: {:?}", uv.as_ref());
self.uv = Some(uv.as_ref().into());
self
}
pub fn quarto(mut self, quarto: impl AsRef<OsStr>) -> Self {
log::debug!(target: "faucet", "Using quarto command: {:?}", quarto.as_ref());
self.quarto = Some(quarto.as_ref().into());
self
}
pub fn qmd(mut self, qmd: Option<impl AsRef<Path>>) -> Self {
self.qmd = qmd.map(|s| s.as_ref().into());
self
}
pub fn route(mut self, route: String) -> Self {
self.route = Some(route);
self
}
pub fn max_rps(mut self, max_rps: Option<f64>) -> Self {
self.max_rps = max_rps;
self
}
pub fn build(self) -> FaucetResult<FaucetServerConfig> {
let server_type = self
.server_type
.ok_or(FaucetError::MissingArgument("server_type"))?;
let strategy = determine_strategy(server_type, self.strategy);
let bind = self.bind;
let n_workers = self.n_workers.unwrap_or_else(|| {
log::debug!(target: "faucet", "No number of workers specified. Defaulting to the number of logical cores.");
num_cpus::get().try_into().expect("num_cpus::get() returned 0")
});
let workdir = self.workdir
.map(|wd| leak!(wd, Path))
.unwrap_or_else(|| {
log::debug!(target: "faucet", "No workdir specified. Defaulting to the current directory.");
Path::new(".")
});
let rscript = self.rscript.map(|wd| leak!(wd, OsStr)).unwrap_or_else(|| {
log::debug!(target: "faucet", "No Rscript command specified. Defaulting to `Rscript`.");
OsStr::new("Rscript")
});
let uv = self.uv.map(|wd| leak!(wd, OsStr)).unwrap_or_else(|| {
log::debug!(target: "faucet", "No uv command specified. Defaulting to `uv`.");
OsStr::new("uv")
});
let extractor = self.extractor.unwrap_or_else(|| {
log::debug!(target: "faucet", "No IP extractor specified. Defaulting to client address.");
load_balancing::IpExtractor::ClientAddr
});
let app_dir = self.app_dir.map(|app_dir| leak!(app_dir, str));
let qmd = self.qmd.map(|qmd| leak!(qmd, Path));
let quarto = self.quarto.map(|qmd| leak!(qmd, OsStr)).unwrap_or_else(|| {
log::debug!(target: "faucet", "No quarto command specified. Defaulting to `quarto`.");
OsStr::new("quarto")
});
let route = self.route.map(|r| -> &'static _ { leak!(r) });
let max_rps = self.max_rps;
Ok(FaucetServerConfig {
strategy,
bind,
n_workers,
server_type,
workdir,
extractor,
rscript,
app_dir,
uv,
route,
quarto,
qmd,
max_rps,
})
}
}
impl Default for FaucetServerBuilder {
fn default() -> Self {
Self::new()
}
}
#[derive(Clone)]
pub struct FaucetServerConfig {
pub strategy: Strategy,
pub bind: Option<SocketAddr>,
pub n_workers: NonZeroUsize,
pub server_type: WorkerType,
pub workdir: &'static Path,
pub extractor: load_balancing::IpExtractor,
pub rscript: &'static OsStr,
pub uv: &'static OsStr,
pub quarto: &'static OsStr,
pub app_dir: Option<&'static str>,
pub route: Option<&'static str>,
pub qmd: Option<&'static Path>,
pub max_rps: Option<f64>,
}
impl FaucetServerConfig {
pub async fn run(
self,
shutdown: &'static ShutdownSignal,
websocket_config: &'static WebSocketConfig,
) -> FaucetResult<()> {
let mut workers = WorkerConfigs::new(self.clone(), shutdown).await?;
let load_balancer = LoadBalancer::new(
self.strategy,
self.extractor,
&workers.workers,
self.max_rps,
)
.await?;
let bind = self.bind.ok_or(FaucetError::MissingArgument("bind"))?;
let load_balancer = load_balancer.clone();
let service = Arc::new(
ServiceBuilder::new(ProxyService {
shutdown,
websocket_config,
})
.layer(logging::LogLayer {})
.layer(AddStateLayer::new(load_balancer))
.build(),
);
// Bind to the port and listen for incoming TCP connections
let listener = TcpListener::bind(bind).await?;
log::info!(target: "faucet", "Listening on http://{bind}");
let main_loop = || async {
loop {
match listener.accept().await {
Err(e) => {
log::error!(target: "faucet", "Unable to accept TCP connection: {e}");
return;
}
Ok((tcp, client_addr)) => {
let tcp = TokioIo::new(tcp);
log::debug!(target: "faucet", "Accepted TCP connection from {client_addr}");
let service = service.clone();
tokio::task::spawn(async move {
let mut conn = http1::Builder::new()
.half_close(true)
.serve_connection(
tcp,
service_fn(|req: Request<Incoming>| {
service.call(req, Some(client_addr.ip()))
}),
)
.with_upgrades();
let conn = pin!(&mut conn);
tokio::select! {
result = conn => {
if let Err(e) = result {
log::error!(target: "faucet", "Connection error: {e:?}");
}
}
_ = shutdown.wait() => ()
}
});
}
};
}
};
// Race the shutdown vs the main loop
tokio::select! {
_ = shutdown.wait() => (),
_ = main_loop() => (),
}
log::debug!("Main loop ended!");
for worker in &mut workers.workers {
log::debug!("Waiting for {} to finish", worker.target);
worker.wait_until_done().await;
}
log::debug!("All workers are finished!");
FaucetResult::Ok(())
}
pub async fn extract_service(
self,
shutdown: &'static ShutdownSignal,
websocket_config: &'static WebSocketConfig,
) -> FaucetResult<(FaucetServerService, WorkerConfigs)> {
let workers = WorkerConfigs::new(self.clone(), shutdown).await?;
let load_balancer = LoadBalancer::new(
self.strategy,
self.extractor,
&workers.workers,
self.max_rps,
)
.await?;
let service = Arc::new(
ServiceBuilder::new(ProxyService {
shutdown,
websocket_config,
})
.layer(logging::LogLayer {})
.layer(AddStateLayer::new(load_balancer))
.build(),
);
Ok((FaucetServerService { inner: service }, workers))
}
}
pub struct FaucetServerService {
inner: Arc<AddStateService<LogService<ProxyService>>>,
}
impl Clone for FaucetServerService {
fn clone(&self) -> Self {
FaucetServerService {
inner: Arc::clone(&self.inner),
}
}
}
impl Service<hyper::Request<Incoming>> for FaucetServerService {
type Error = FaucetError;
type Response = hyper::Response<ExclusiveBody>;
async fn call(
&self,
req: hyper::Request<Incoming>,
ip_addr: Option<std::net::IpAddr>,
) -> Result<Self::Response, Self::Error> {
self.inner.call(req, ip_addr).await
}
}
| rust | MIT | e11015f009992d1f6399cd5a18bddeaab2b443ec | 2026-01-04T20:17:10.449299Z | false |
ixpantia/faucet | https://github.com/ixpantia/faucet/blob/e11015f009992d1f6399cd5a18bddeaab2b443ec/src/server/logging.rs | src/server/logging.rs | use hyper::{http::HeaderValue, Method, Request, Response, Uri, Version};
use serde::de::DeserializeOwned;
use uuid::Uuid;
use super::onion::{Layer, Service};
use crate::{server::service::State, telemetry::send_http_event};
use std::{net::IpAddr, time};
pub mod logger {
use std::{io::BufWriter, io::Write, path::PathBuf};
use hyper::body::Bytes;
use tokio::task::JoinHandle;
use crate::shutdown::ShutdownSignal;
pub enum Target {
Stderr,
File(PathBuf),
}
struct LogFileWriter {
sender: tokio::sync::mpsc::Sender<Bytes>,
}
impl std::io::Write for LogFileWriter {
fn write(&mut self, buf: &[u8]) -> std::io::Result<usize> {
let _ = self.sender.try_send(Bytes::copy_from_slice(buf));
Ok(buf.len())
}
fn flush(&mut self) -> std::io::Result<()> {
Ok(())
}
}
fn start_log_writer_thread(
path: PathBuf,
max_file_size: Option<u64>,
shutdown: &'static ShutdownSignal,
) -> (LogFileWriter, JoinHandle<()>) {
let max_file_size = max_file_size.unwrap_or(u64::MAX);
let mut current_file_size = match std::fs::metadata(&path) {
Ok(md) => md.len(),
Err(_) => 0,
};
let file = std::fs::File::options()
.create(true)
.append(true)
.truncate(false)
.open(&path)
.expect("Unable to open or create log file");
// Create a file path to a backup of the previous logs with MAX file size
let mut copy_path = path.clone();
copy_path.as_mut_os_string().push(".bak");
let mut writer = BufWriter::new(file);
let mut stderr = BufWriter::new(std::io::stderr());
let (sender, mut receiver) = tokio::sync::mpsc::channel::<Bytes>(1000);
let writer_thread = tokio::task::spawn(async move {
loop {
tokio::select! {
bytes = receiver.recv() => {
match bytes {
Some(bytes) => {
if let Err(e) = stderr.write_all(bytes.as_ref()) {
eprintln!("Unable to write to stderr: {e}");
};
if let Err(e) = writer.write_all(bytes.as_ref()) {
eprintln!("Unable to write to {path:?}: {e}");
};
current_file_size += bytes.len() as u64;
if current_file_size > max_file_size {
// Flush the writer
let _ = writer.flush();
let file = writer.get_mut();
// Copy the current file to the backup
if let Err(e) = std::fs::copy(&path, ©_path) {
log::error!("Unable to copy logs to backup file: {e}");
}
// Truncate the logs file
if let Err(e) = file.set_len(0) {
log::error!("Unable to truncate logs file: {e}");
}
current_file_size = 0;
}
},
None => break
}
},
_ = shutdown.wait() => break
}
}
let _ = writer.flush();
let _ = stderr.flush();
});
(LogFileWriter { sender }, writer_thread)
}
pub fn build_logger(
target: Target,
max_file_size: Option<u64>,
shutdown: &'static ShutdownSignal,
) -> Option<JoinHandle<()>> {
let (target, handle) = match target {
Target::File(path) => {
let (writer, handle) = start_log_writer_thread(path, max_file_size, shutdown);
(env_logger::Target::Pipe(Box::new(writer)), Some(handle))
}
Target::Stderr => (env_logger::Target::Stderr, None),
};
let mut env_builder = env_logger::Builder::new();
env_builder
.parse_env(env_logger::Env::new().filter_or("FAUCET_LOG", "info"))
.target(target)
.init();
handle
}
}
#[derive(Clone, Copy)]
pub struct StateData {
pub uuid: uuid::Uuid,
pub ip: IpAddr,
pub worker_route: Option<&'static str>,
pub worker_id: usize,
pub target: &'static str,
}
trait StateLogData: Send + Sync + 'static {
fn get_state_data(&self) -> StateData;
}
impl StateLogData for State {
#[inline(always)]
fn get_state_data(&self) -> StateData {
let uuid = self.uuid;
let ip = self.remote_addr;
let worker_id = self.client.config.worker_id;
let worker_route = self.client.config.worker_route;
let target = self.client.config.target;
StateData {
uuid,
ip,
worker_id,
worker_route,
target,
}
}
}
#[derive(PartialEq, Eq)]
pub enum LogOption<T> {
None,
Some(T),
}
impl<T> From<Option<T>> for LogOption<T> {
fn from(opt: Option<T>) -> Self {
match opt {
None => LogOption::None,
Some(v) => LogOption::Some(v),
}
}
}
impl<T> std::fmt::Display for LogOption<T>
where
T: std::fmt::Display,
{
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
match self {
LogOption::None => write!(f, "-"),
LogOption::Some(v) => write!(f, "{v}"),
}
}
}
impl<T> std::fmt::Debug for LogOption<T>
where
T: std::fmt::Debug,
{
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
match self {
LogOption::None => write!(f, r#""-""#),
LogOption::Some(v) => write!(f, "{v:?}"),
}
}
}
pub struct HttpLogData {
pub state_data: StateData,
pub method: Method,
pub path: Uri,
pub version: Version,
pub status: i16,
pub user_agent: LogOption<HeaderValue>,
pub elapsed: i64,
}
impl HttpLogData {
fn log(&self) {
log::info!(
target: self.state_data.target,
r#"{ip} "{method} {route}{path} {version:?}" {status} {user_agent:?} {elapsed}"#,
route = self.state_data.worker_route.map(|r| r.trim_end_matches('/')).unwrap_or_default(),
ip = self.state_data.ip,
method = self.method,
path = self.path,
version = self.version,
status = self.status,
user_agent = self.user_agent,
elapsed = self.elapsed,
);
}
}
#[inline(always)]
async fn capture_log_data<Body, ResBody, Error, State: StateLogData>(
inner: &impl Service<Request<Body>, Response = Response<ResBody>, Error = Error>,
req: Request<Body>,
) -> Result<(Response<ResBody>, HttpLogData), Error> {
let start = time::Instant::now();
// Extract request info for logging
let state = req.extensions().get::<State>().expect("State not found");
let state_data = state.get_state_data();
let method = req.method().clone();
let path = req.uri().clone();
let version = req.version();
let headers = req.headers();
let user_agent: LogOption<_> = headers.get(hyper::header::USER_AGENT).cloned().into();
// Make the request
let res = inner.call(req, None).await?;
// Extract response info for logging
let status = res.status().as_u16() as i16;
let elapsed = start.elapsed().as_millis() as i64;
let log_data = HttpLogData {
state_data,
method,
path,
version,
status,
user_agent,
elapsed,
};
Ok((res, log_data))
}
pub(super) struct LogService<S> {
inner: S,
}
impl<S, Body, ResBody> Service<Request<Body>> for LogService<S>
where
S: Service<Request<Body>, Response = Response<ResBody>> + Send + Sync,
{
type Error = S::Error;
type Response = Response<ResBody>;
async fn call(
&self,
req: Request<Body>,
_: Option<IpAddr>,
) -> Result<Self::Response, Self::Error> {
let (res, log_data) = capture_log_data::<_, _, _, State>(&self.inner, req).await?;
log_data.log();
send_http_event(log_data);
Ok(res)
}
}
pub(super) struct LogLayer {}
impl<S> Layer<S> for LogLayer {
type Service = LogService<S>;
fn layer(&self, inner: S) -> Self::Service {
LogService { inner }
}
}
#[derive(serde::Deserialize, Clone, Copy)]
pub enum FaucetTracingLevel {
Error,
Warn,
Info,
Debug,
Trace,
}
impl FaucetTracingLevel {
pub fn as_str(self) -> &'static str {
match self {
FaucetTracingLevel::Trace => "trace",
FaucetTracingLevel::Debug => "debug",
FaucetTracingLevel::Error => "error",
FaucetTracingLevel::Warn => "warn",
FaucetTracingLevel::Info => "info",
}
}
}
#[derive(Debug, serde::Deserialize)]
#[serde(untagged)]
enum L1OrScalar<T> {
Scalar(T),
L1([T; 1]),
}
fn deserialize_l1_or_scalar<'de, T, D>(data: D) -> Result<T, D::Error>
where
D: serde::Deserializer<'de>,
T: DeserializeOwned,
{
let value: L1OrScalar<T> = serde::Deserialize::deserialize(data)?;
match value {
L1OrScalar::Scalar(v) => Ok(v),
L1OrScalar::L1([v]) => Ok(v),
}
}
#[derive(serde::Deserialize)]
pub struct EventLogData {
#[serde(deserialize_with = "deserialize_l1_or_scalar")]
pub target: String,
#[serde(deserialize_with = "deserialize_l1_or_scalar")]
pub event_id: Uuid,
#[serde(deserialize_with = "deserialize_l1_or_scalar")]
pub level: FaucetTracingLevel,
#[serde(deserialize_with = "deserialize_l1_or_scalar")]
pub parent_event_id: Option<Uuid>,
#[serde(deserialize_with = "deserialize_l1_or_scalar")]
pub event_type: String,
#[serde(deserialize_with = "deserialize_l1_or_scalar")]
pub message: String,
pub body: Option<serde_json::Value>,
}
#[derive(Debug)]
pub enum FaucetEventParseError<'a> {
UnableToSplit,
InvalidString(&'a str),
SerdeError {
err: serde_json::Error,
str: &'a str,
},
}
pub enum FaucetEventResult<'a> {
Event(EventLogData),
Output(&'a str),
EventError(FaucetEventParseError<'a>),
}
pub fn parse_faucet_event(content: &str) -> FaucetEventResult<'_> {
use FaucetEventResult::*;
let content = content.trim_end_matches('\n');
if !content.starts_with("{{ faucet_event }}:") {
return Output(content);
}
match content.split_once(':') {
Some((_, content)) => {
let structure: EventLogData = match serde_json::from_str(content.trim()) {
Ok(structure) => structure,
Err(e) => {
return FaucetEventResult::EventError(FaucetEventParseError::SerdeError {
err: e,
str: content,
})
}
};
Event(structure)
}
None => EventError(FaucetEventParseError::UnableToSplit),
}
}
#[cfg(test)]
mod tests {
use hyper::StatusCode;
use super::*;
#[tokio::test]
async fn log_capture() {
#[derive(Clone)]
struct MockState;
impl StateLogData for MockState {
fn get_state_data(&self) -> StateData {
StateData {
uuid: uuid::Uuid::now_v7(),
ip: IpAddr::V4([127, 0, 0, 1].into()),
target: "test",
worker_id: 1,
worker_route: None,
}
}
}
struct Svc;
impl Service<Request<()>> for Svc {
type Response = Response<()>;
type Error = ();
async fn call(
&self,
_: Request<()>,
_: Option<IpAddr>,
) -> Result<Self::Response, Self::Error> {
tokio::time::sleep(std::time::Duration::from_millis(5)).await;
Ok(Response::builder().status(StatusCode::OK).body(()).unwrap())
}
}
let req = Request::builder()
.method(Method::GET)
.uri("https://example.com/")
.extension(MockState)
.version(Version::HTTP_11)
.header(hyper::header::USER_AGENT, "test")
.body(())
.unwrap();
let (_, log_data) = capture_log_data::<_, _, _, MockState>(&Svc, req)
.await
.unwrap();
assert_eq!(log_data.state_data.ip, IpAddr::V4([127, 0, 0, 1].into()));
assert_eq!(log_data.method, Method::GET);
assert_eq!(log_data.path, "https://example.com/");
assert_eq!(log_data.version, Version::HTTP_11);
assert_eq!(log_data.status, 200);
assert_eq!(
log_data.user_agent,
LogOption::Some(HeaderValue::from_static("test"))
);
assert!(log_data.elapsed > 0);
assert_eq!(log_data.state_data.target, "test");
}
#[test]
fn log_option_display() {
assert_eq!(LogOption::<u8>::None.to_string(), "-");
assert_eq!(LogOption::Some(1).to_string(), "1");
}
#[test]
fn log_option_debug() {
assert_eq!(format!("{:?}", LogOption::<u8>::None), r#""-""#);
assert_eq!(format!("{:?}", LogOption::Some(1)), "1");
}
#[test]
fn log_option_from_option() {
assert_eq!(LogOption::<u8>::from(None), LogOption::None);
assert_eq!(LogOption::from(Some(1)), LogOption::Some(1));
}
#[test]
fn log_data_log() {
use std::io::Write;
use std::sync::{Arc, Mutex};
struct Buffer(Arc<Mutex<Vec<u8>>>);
impl Write for Buffer {
fn write(&mut self, buf: &[u8]) -> std::io::Result<usize> {
self.0.lock().unwrap().write(buf)
}
fn flush(&mut self) -> std::io::Result<()> {
self.0.lock().unwrap().flush()
}
}
impl Buffer {
fn clone_buf(&self) -> Vec<u8> {
self.0.lock().unwrap().clone()
}
}
impl Clone for Buffer {
fn clone(&self) -> Self {
Buffer(Arc::clone(&self.0))
}
}
let log_data = HttpLogData {
state_data: StateData {
uuid: uuid::Uuid::now_v7(),
target: "test",
ip: IpAddr::V4([127, 0, 0, 1].into()),
worker_route: None,
worker_id: 1,
},
method: Method::GET,
path: "https://example.com/".parse().unwrap(),
version: Version::HTTP_11,
status: 200,
user_agent: LogOption::Some(HeaderValue::from_static("test")),
elapsed: 5,
};
let buf = Buffer(Arc::new(Mutex::new(Vec::new())));
let mut logger = env_logger::Builder::new();
// ALWAYS USE INFO LEVEL FOR LOGGING
logger.filter_level(log::LevelFilter::Info);
logger.format(|f, record| writeln!(f, "{}", record.args()));
logger.target(env_logger::Target::Pipe(Box::new(buf.clone())));
logger.init();
log_data.log();
let log = String::from_utf8(buf.clone_buf()).unwrap();
assert_eq!(
log.trim(),
r#"127.0.0.1 "GET https://example.com/ HTTP/1.1" 200 "test" 5"#
)
}
#[test]
fn event_log_data_deserializes_from_scalars() {
let event_id = Uuid::now_v7();
let parent_event_id = Uuid::now_v7();
let json_str = format!(
r#"{{
"target": "my_target",
"event_id": "{}",
"level": "Info",
"parent_event_id": "{}",
"event_type": "request",
"message": "hello world",
"body": {{ "key": "value" }}
}}"#,
event_id, parent_event_id
);
let data: EventLogData = serde_json::from_str(&json_str).unwrap();
assert_eq!(data.target, "my_target");
assert_eq!(data.event_id, event_id);
assert!(matches!(data.level, FaucetTracingLevel::Info));
assert_eq!(data.parent_event_id, Some(parent_event_id));
assert_eq!(data.event_type, "request");
assert_eq!(data.message, "hello world");
assert!(data.body.is_some());
}
#[test]
fn event_log_data_deserializes_from_scalars_with_null_parent() {
let event_id = Uuid::now_v7();
let json_str = format!(
r#"{{
"target": "my_target",
"event_id": "{}",
"level": "Info",
"parent_event_id": null,
"event_type": "request",
"message": "hello world",
"body": null
}}"#,
event_id
);
let data: EventLogData = serde_json::from_str(&json_str).unwrap();
assert_eq!(data.target, "my_target");
assert_eq!(data.event_id, event_id);
assert!(matches!(data.level, FaucetTracingLevel::Info));
assert_eq!(data.parent_event_id, None);
assert_eq!(data.event_type, "request");
assert_eq!(data.message, "hello world");
assert!(data.body.is_none());
}
#[test]
fn event_log_data_deserializes_from_l1_vectors() {
let event_id = Uuid::now_v7();
let parent_event_id = Uuid::now_v7();
let json_str = format!(
r#"{{
"target": ["my_target"],
"event_id": ["{}"],
"level": ["Info"],
"parent_event_id": ["{}"],
"event_type": ["request"],
"message": ["hello world"],
"body": {{ "key": "value" }}
}}"#,
event_id, parent_event_id
);
let data: EventLogData = serde_json::from_str(&json_str).unwrap();
assert_eq!(data.target, "my_target");
assert_eq!(data.event_id, event_id);
assert!(matches!(data.level, FaucetTracingLevel::Info));
assert_eq!(data.parent_event_id, Some(parent_event_id));
assert_eq!(data.event_type, "request");
assert_eq!(data.message, "hello world");
assert!(data.body.is_some());
}
#[test]
fn event_log_data_deserializes_from_l1_vectors_with_null_parent() {
let event_id = Uuid::now_v7();
let json_str = format!(
r#"{{
"target": ["my_target"],
"event_id": ["{}"],
"level": ["Info"],
"parent_event_id": [null],
"event_type": ["request"],
"message": ["hello world"],
"body": null
}}"#,
event_id
);
let data: EventLogData = serde_json::from_str(&json_str).unwrap();
assert_eq!(data.target, "my_target");
assert_eq!(data.event_id, event_id);
assert!(matches!(data.level, FaucetTracingLevel::Info));
assert_eq!(data.parent_event_id, None);
assert_eq!(data.event_type, "request");
assert_eq!(data.message, "hello world");
assert!(data.body.is_none());
}
#[test]
fn event_log_data_deserializes_from_mixed_scalars_and_l1_vectors() {
let event_id = Uuid::now_v7();
let parent_event_id = Uuid::now_v7();
let json_str = format!(
r#"{{
"target": "my_target",
"event_id": ["{}"],
"level": "Info",
"parent_event_id": ["{}"],
"event_type": "request",
"message": ["hello world"],
"body": {{ "key": "value" }}
}}"#,
event_id, parent_event_id
);
let data: EventLogData = serde_json::from_str(&json_str).unwrap();
assert_eq!(data.target, "my_target");
assert_eq!(data.event_id, event_id);
assert!(matches!(data.level, FaucetTracingLevel::Info));
assert_eq!(data.parent_event_id, Some(parent_event_id));
assert_eq!(data.event_type, "request");
assert_eq!(data.message, "hello world");
assert!(data.body.is_some());
}
}
| rust | MIT | e11015f009992d1f6399cd5a18bddeaab2b443ec | 2026-01-04T20:17:10.449299Z | false |
ixpantia/faucet | https://github.com/ixpantia/faucet/blob/e11015f009992d1f6399cd5a18bddeaab2b443ec/src/server/router/mod.rs | src/server/router/mod.rs | use std::{
collections::HashSet, ffi::OsStr, net::SocketAddr, num::NonZeroUsize, path::PathBuf, pin::pin,
sync::Arc,
};
use hyper::{body::Incoming, server::conn::http1, service::service_fn, Request, Uri};
use hyper_util::rt::TokioIo;
use tokio::net::TcpListener;
use tokio_tungstenite::tungstenite::{http::uri::PathAndQuery, protocol::WebSocketConfig};
use super::{onion::Service, FaucetServerBuilder, FaucetServerService};
use crate::{
client::{
load_balancing::{IpExtractor, Strategy},
worker::{WorkerConfigs, WorkerType},
ExclusiveBody,
},
error::{FaucetError, FaucetResult},
shutdown::ShutdownSignal,
};
fn default_workdir() -> PathBuf {
PathBuf::from(".")
}
#[derive(serde::Deserialize)]
struct ReducedServerConfig {
pub strategy: Option<Strategy>,
#[serde(default = "default_workdir")]
pub workdir: PathBuf,
pub app_dir: Option<String>,
pub workers: NonZeroUsize,
pub server_type: WorkerType,
pub qmd: Option<PathBuf>,
pub max_rps: Option<f64>,
}
#[derive(serde::Deserialize)]
struct RouteConfig {
route: String,
#[serde(flatten)]
config: ReducedServerConfig,
}
#[derive(serde::Deserialize)]
pub struct RouterConfig {
route: Vec<RouteConfig>,
}
#[derive(Clone)]
struct RouterService {
routes: &'static [String],
clients: Arc<[FaucetServerService]>,
}
fn strip_prefix_exact(path_and_query: &PathAndQuery, prefix: &str) -> Option<PathAndQuery> {
if path_and_query.path() == prefix {
return Some(match path_and_query.query() {
Some(query) => format!("/?{query}").parse().unwrap(),
None => "/".parse().unwrap(),
});
}
None
}
fn strip_prefix_relative(path_and_query: &PathAndQuery, prefix: &str) -> Option<PathAndQuery> {
// Try to strip the prefix. It is fails we short-circuit.
let after_prefix = path_and_query.path().strip_prefix(prefix)?;
let start_slash = after_prefix.starts_with('/');
Some(match (start_slash, path_and_query.query()) {
(true, None) => after_prefix.parse().unwrap(),
(true, Some(query)) => format!("{after_prefix}?{query}").parse().unwrap(),
(false, None) => format!("/{after_prefix}").parse().unwrap(),
(false, Some(query)) => format!("/{after_prefix}?{query}").parse().unwrap(),
})
}
fn strip_prefix(uri: &Uri, prefix: &str) -> Option<Uri> {
let path_and_query = uri.path_and_query()?;
let new_path_and_query = match prefix.ends_with('/') {
true => strip_prefix_relative(path_and_query, prefix)?,
false => strip_prefix_exact(path_and_query, prefix)?,
};
let mut parts = uri.clone().into_parts();
parts.path_and_query = Some(new_path_and_query);
Some(Uri::from_parts(parts).unwrap())
}
impl Service<hyper::Request<Incoming>> for RouterService {
type Error = FaucetError;
type Response = hyper::Response<ExclusiveBody>;
async fn call(
&self,
mut req: hyper::Request<Incoming>,
ip_addr: Option<std::net::IpAddr>,
) -> Result<Self::Response, Self::Error> {
let mut client = None;
for i in 0..self.routes.len() {
let route = &self.routes[i];
if let Some(new_uri) = strip_prefix(req.uri(), route) {
client = Some(&self.clients[i]);
*req.uri_mut() = new_uri;
break;
}
}
match client {
None => Ok(hyper::Response::builder()
.status(404)
.body(ExclusiveBody::plain_text("404 not found"))
.expect("Response should build")),
Some(client) => client.call(req, ip_addr).await,
}
}
}
impl RouterConfig {
async fn into_service(
self,
rscript: impl AsRef<OsStr>,
quarto: impl AsRef<OsStr>,
uv: impl AsRef<OsStr>,
ip_from: IpExtractor,
shutdown: &'static ShutdownSignal,
websocket_config: &'static WebSocketConfig,
) -> FaucetResult<(RouterService, Vec<WorkerConfigs>)> {
let mut all_workers = Vec::with_capacity(self.route.len());
let mut routes = Vec::with_capacity(self.route.len());
let mut clients = Vec::with_capacity(self.route.len());
let mut routes_set = HashSet::with_capacity(self.route.len());
for route_conf in self.route.into_iter() {
let route = route_conf.route;
if !routes_set.insert(route.clone()) {
return Err(FaucetError::DuplicateRoute(route));
}
let (client, workers) = FaucetServerBuilder::new()
.workdir(route_conf.config.workdir)
.server_type(route_conf.config.server_type)
.strategy(route_conf.config.strategy)
.rscript(&rscript)
.uv(&uv)
.quarto(&quarto)
.qmd(route_conf.config.qmd)
.workers(route_conf.config.workers.get())
.extractor(ip_from)
.app_dir(route_conf.config.app_dir)
.route(route.clone())
.max_rps(route_conf.config.max_rps)
.build()?
.extract_service(shutdown, websocket_config)
.await?;
routes.push(route);
all_workers.push(workers);
clients.push(client);
}
let routes = routes.leak();
let clients = clients.into();
let service = RouterService { clients, routes };
Ok((service, all_workers))
}
}
impl RouterConfig {
pub async fn run(
self,
rscript: impl AsRef<OsStr>,
quarto: impl AsRef<OsStr>,
uv: impl AsRef<OsStr>,
ip_from: IpExtractor,
addr: SocketAddr,
shutdown: &'static ShutdownSignal,
websocket_config: &'static WebSocketConfig,
) -> FaucetResult<()> {
let (service, all_workers) = self
.into_service(rscript, quarto, uv, ip_from, shutdown, websocket_config)
.await?;
// Bind to the port and listen for incoming TCP connections
let listener = TcpListener::bind(addr).await?;
log::info!(target: "faucet", "Listening on http://{addr}");
let main_loop = || async {
loop {
match listener.accept().await {
Err(e) => {
log::error!(target: "faucet", "Unable to accept TCP connection: {e}");
return;
}
Ok((tcp, client_addr)) => {
let tcp = TokioIo::new(tcp);
log::debug!(target: "faucet", "Accepted TCP connection from {client_addr}");
let service = service.clone();
tokio::task::spawn(async move {
let mut conn = http1::Builder::new()
.serve_connection(
tcp,
service_fn(|req: Request<Incoming>| {
service.call(req, Some(client_addr.ip()))
}),
)
.with_upgrades();
let conn = pin!(&mut conn);
tokio::select! {
result = conn => {
if let Err(e) = result {
log::error!(target: "faucet", "Connection error: {e:?}");
}
}
_ = shutdown.wait() => ()
}
});
}
}
}
};
// Race the shutdown vs the main loop
tokio::select! {
_ = shutdown.wait() => (),
_ = main_loop() => (),
}
// Kill child process
for w in all_workers.iter().flat_map(|ws| &ws.workers) {
w.wait_until_done().await;
}
FaucetResult::Ok(())
}
}
| rust | MIT | e11015f009992d1f6399cd5a18bddeaab2b443ec | 2026-01-04T20:17:10.449299Z | false |
ixpantia/faucet | https://github.com/ixpantia/faucet/blob/e11015f009992d1f6399cd5a18bddeaab2b443ec/src/client/websockets.rs | src/client/websockets.rs | use super::{pool::ExtractSocketAddr, Client, ExclusiveBody};
use crate::{
error::{BadRequestReason, FaucetError, FaucetResult},
global_conn::{add_connection, remove_connection},
server::logging::{EventLogData, FaucetTracingLevel},
shutdown::ShutdownSignal,
telemetry::send_log_event,
};
use base64::Engine;
use bytes::Bytes;
use futures_util::StreamExt;
use hyper::{
header::UPGRADE,
http::{uri::PathAndQuery, HeaderValue},
upgrade::Upgraded,
HeaderMap, Request, Response, StatusCode, Uri,
};
use hyper_util::rt::TokioIo;
use serde_json::json;
use sha1::{Digest, Sha1};
use std::{
collections::HashMap, future::Future, net::SocketAddr, str::FromStr, sync::LazyLock,
time::Duration,
};
use tokio::sync::Mutex;
use tokio_tungstenite::tungstenite::{
protocol::{frame::coding::CloseCode, CloseFrame, WebSocketConfig},
Message, Utf8Bytes,
};
use uuid::Uuid;
struct UpgradeInfo {
headers: HeaderMap,
uri: Uri,
}
impl UpgradeInfo {
fn new<ReqBody>(req: &Request<ReqBody>, socket_addr: SocketAddr) -> FaucetResult<Self> {
let headers = req.headers().clone();
let uri = build_uri(socket_addr, req.uri().path_and_query())?;
Ok(Self { headers, uri })
}
}
const SEC_WEBSOCKET_APPEND: &[u8] = b"258EAFA5-E914-47DA-95CA-C5AB0DC85B11";
const SEC_WEBSOCKET_KEY: &str = "Sec-WebSocket-Key";
const SEC_WEBSOCKET_ACCEPT: &str = "Sec-WebSocket-Accept";
fn calculate_sec_websocket_accept<'buffer>(key: &[u8], buffer: &'buffer mut [u8]) -> &'buffer [u8] {
let mut hasher = Sha1::new();
hasher.update(key);
hasher.update(SEC_WEBSOCKET_APPEND);
let len = base64::engine::general_purpose::STANDARD
.encode_slice(hasher.finalize(), buffer)
.expect("Should always write the internal buffer");
&buffer[..len]
}
fn build_uri(socket_addr: SocketAddr, path: Option<&PathAndQuery>) -> FaucetResult<Uri> {
let mut uri_builder = Uri::builder()
.scheme("ws")
.authority(socket_addr.to_string());
match path {
Some(path) => uri_builder = uri_builder.path_and_query(path.clone()),
None => uri_builder = uri_builder.path_and_query("/"),
}
Ok(uri_builder.build()?)
}
// We want to keep the shiny tx and rx in memory in case the upgraded connection is dropped. If the user reconnect we want to immediately
// re establish the connection back to shiny
use futures_util::SinkExt;
type ConnectionPair = (
futures_util::stream::SplitSink<
tokio_tungstenite::WebSocketStream<
tokio_tungstenite::MaybeTlsStream<tokio::net::TcpStream>,
>,
tokio_tungstenite::tungstenite::Message,
>,
futures_util::stream::SplitStream<
tokio_tungstenite::WebSocketStream<
tokio_tungstenite::MaybeTlsStream<tokio::net::TcpStream>,
>,
>,
);
#[derive(Default)]
struct ConnectionInstance {
purged: bool,
access_count: usize,
pair: Option<ConnectionPair>,
}
impl ConnectionInstance {
fn take(&mut self) -> ConnectionPair {
self.access_count += 1;
self.pair.take().unwrap()
}
fn put_back(&mut self, pair: ConnectionPair) {
self.access_count += 1;
self.pair = Some(pair);
}
}
struct ConnectionManagerInner {
map: HashMap<Uuid, ConnectionInstance>,
purge_count: usize,
}
struct ConnectionManager {
inner: Mutex<ConnectionManagerInner>,
}
impl ConnectionManager {
fn new() -> Self {
ConnectionManager {
inner: Mutex::new(ConnectionManagerInner {
map: HashMap::new(),
purge_count: 0,
}),
}
}
async fn initialize_if_not(
&self,
session_id: Uuid,
attempt: usize,
init: impl Future<Output = FaucetResult<ConnectionPair>>,
) -> Option<FaucetResult<ConnectionPair>> {
{
let mut inner = self.inner.lock().await;
let entry = inner.map.entry(session_id).or_default();
if entry.access_count != 0 {
return None;
}
if entry.purged {
return Some(Err(FaucetError::WebSocketConnectionPurged));
}
if entry.access_count == 0 && attempt > 0 {
return Some(Err(FaucetError::WebSocketConnectionPurged));
}
entry.access_count += 1;
}
let connection_pair = match init.await {
Ok(connection_pair) => connection_pair,
Err(e) => return Some(Err(e)),
};
Some(Ok(connection_pair))
}
async fn attempt_take(&self, session_id: Uuid) -> FaucetResult<ConnectionPair> {
match self.inner.try_lock() {
Ok(mut inner) => {
let instance = inner.map.entry(session_id).or_default();
if instance.access_count % 2 == 0 {
return Ok(instance.take());
}
Err(FaucetError::WebSocketConnectionInUse)
}
_ => Err(FaucetError::WebSocketConnectionInUse),
}
}
async fn put_pack(&self, session_id: Uuid, pair: ConnectionPair) {
let mut inner = self.inner.lock().await;
if let Some(instance) = inner.map.get_mut(&session_id) {
instance.put_back(pair);
}
}
async fn remove_session(&self, session_id: Uuid) {
let mut inner = self.inner.lock().await;
inner.map.remove(&session_id);
inner.purge_count += 1;
if let Some(instance) = inner.map.get_mut(&session_id) {
instance.purged = true;
}
}
}
// Note: This is a simplified cache for a single shiny connection using a static Mutex.
// A more robust solution would use a session identifier to cache multiple connections.
// We use a std::sync::Mutex as the lock is not held across .await points.
static SHINY_CONNECTION_CACHE: LazyLock<ConnectionManager> = LazyLock::new(ConnectionManager::new);
async fn connect_to_worker(
mut upgrade_info: UpgradeInfo,
session_id: Uuid,
config: &'static WebSocketConfig,
) -> FaucetResult<ConnectionPair> {
let mut request = Request::builder().uri(upgrade_info.uri).body(())?;
upgrade_info.headers.append(
"FAUCET_SESSION_ID",
HeaderValue::from_str(&session_id.to_string())
.expect("Unable to set Session ID as header. This is a bug. please report it!"),
);
*request.headers_mut() = upgrade_info.headers;
let (shiny_ws, _) =
tokio_tungstenite::connect_async_with_config(request, Some(*config), false).await?;
send_log_event(EventLogData {
target: "faucet".into(),
event_id: session_id,
parent_event_id: None,
level: FaucetTracingLevel::Info,
event_type: "websocket_connection".into(),
message: "Established new WebSocket connection to shiny".to_string(),
body: None,
});
Ok(shiny_ws.split())
}
async fn connect_or_retrieve(
upgrade_info: UpgradeInfo,
session_id: Uuid,
attempt: usize,
config: &'static WebSocketConfig,
) -> FaucetResult<ConnectionPair> {
let init_pair = SHINY_CONNECTION_CACHE
.initialize_if_not(
session_id,
attempt,
connect_to_worker(upgrade_info, session_id, config),
)
.await;
match init_pair {
None => {
// This means that the connection has already been initialized
// in the past
match SHINY_CONNECTION_CACHE.attempt_take(session_id).await {
Ok(con) => {
send_log_event(EventLogData {
target: "faucet".into(),
event_id: Uuid::new_v4(),
parent_event_id: Some(session_id),
event_type: "websocket_connection".into(),
level: FaucetTracingLevel::Info,
message: "Client successfully reconnected".to_string(),
body: Some(json!({"attempts": attempt})),
});
Ok(con)
}
Err(e) => FaucetResult::Err(e),
}
}
Some(init_pair_res) => init_pair_res,
}
}
const RECHECK_TIME: Duration = Duration::from_secs(60);
const PING_INTERVAL: Duration = Duration::from_secs(1);
const PING_INTERVAL_TIMEOUT: Duration = Duration::from_secs(30);
const PING_BYTES: Bytes = Bytes::from_static(b"Ping");
async fn server_upgraded_io(
upgraded: Upgraded,
upgrade_info: UpgradeInfo,
session_id: Uuid,
attempt: usize,
shutdown: &'static ShutdownSignal,
websocket_config: &'static WebSocketConfig,
) -> FaucetResult<()> {
// Set up the WebSocket connection with the client.
let upgraded = TokioIo::new(upgraded);
let upgraded_ws = tokio_tungstenite::WebSocketStream::from_raw_socket(
upgraded,
tokio_tungstenite::tungstenite::protocol::Role::Server,
Some(*websocket_config),
)
.await;
let (mut upgraded_tx, mut upgraded_rx) = upgraded_ws.split();
// Attempt to retrieve a cached connection to Shiny.
let (mut shiny_tx, mut shiny_rx) =
match connect_or_retrieve(upgrade_info, session_id, attempt, websocket_config).await {
Ok(pair) => pair,
Err(e) => match e {
FaucetError::WebSocketConnectionPurged => {
upgraded_tx
.send(Message::Close(Some(CloseFrame {
code: CloseCode::Normal,
reason: Utf8Bytes::from_static(
"Connection purged due to inactivity, update or error.",
),
})))
.await?;
return Err(FaucetError::WebSocketConnectionPurged);
}
e => return Err(e),
},
};
enum DisconnectionSource {
Shiny,
ClientUnexpected,
ClientExpected,
}
let connection = async {
loop {
let ping_future = async {
tokio::time::sleep(PING_INTERVAL).await;
upgraded_tx.send(Message::Ping(PING_BYTES)).await
};
log::debug!("Waiting for message or ping timeout");
tokio::select! {
msg = shiny_rx.next() => {
match msg {
Some(Ok(Message::Ping(bytes))) => {
if shiny_tx.send(Message::Pong(bytes)).await.is_err() {
break DisconnectionSource::Shiny; // Shiny connection closed
}
}
Some(Ok(msg)) => {
if upgraded_tx.send(msg).await.is_err() {
break DisconnectionSource::ClientUnexpected; // Client connection closed
}
},
Some(Err(e)) => {
log::error!("Error sending websocket message to client: {e}");
break DisconnectionSource::Shiny; // Error receiving message from shiny
}
_ => break DisconnectionSource::Shiny // Recieved no data from shiny
}
},
msg = upgraded_rx.next() => {
log::debug!("Received msg: {msg:?}");
match msg {
// Browsers don't natively implement ping / pong from the client
Some(Ok(Message::Text(bytes))) if bytes.as_str() == "ping" => {
if upgraded_tx.send(Message::Text(Utf8Bytes::from_static("pong"))).await.is_err() {
break DisconnectionSource::ClientUnexpected; // Client connection closed
}
}
Some(Ok(Message::Close(Some(CloseFrame { code, reason })))) => {
match code {
CloseCode::Away | CloseCode::Normal => {
// If the client closes the session normally
// pass the message onto shiny and break
if shiny_tx.send(Message::Close(Some(CloseFrame { code, reason }))).await.is_err() {
break DisconnectionSource::Shiny;
}
break DisconnectionSource::ClientExpected // This is a graceful session end
}
_ => break DisconnectionSource::ClientUnexpected
}
}
Some(Ok(msg)) => {
if shiny_tx.send(msg).await.is_err() {
break DisconnectionSource::Shiny; // Shiny connection closed
}
},
Some(Err(e)) => {
log::error!("Error sending websocket message to shiny: {e}");
break DisconnectionSource::ClientUnexpected; // Error receiving message from client
}
_ => break DisconnectionSource::ClientUnexpected // Received no data from client
}
},
_ = ping_future => continue,
_ = tokio::time::sleep(PING_INTERVAL_TIMEOUT) => {
log::debug!("Ping timeout reached for session {session_id}");
break DisconnectionSource::ClientUnexpected; // Did not receive ping from client
}
}
}
};
// Wait for either the client or Shiny to disconnect.
tokio::select! {
disconnect_source = connection => {
match disconnect_source {
DisconnectionSource::ClientUnexpected => {
send_log_event(EventLogData {
target: "faucet".into(),
event_id: Uuid::new_v4(),
parent_event_id: Some(session_id),
event_type: "websocket_connection".into(),
level: FaucetTracingLevel::Info,
message: "Session ended by client.".to_string(),
body: None,
});
log::debug!("Client connection closed for session {session_id}.")
},
DisconnectionSource::ClientExpected => {
// If this happens that means that the client ended the session, gracefully.
// We should not save for reconnection
SHINY_CONNECTION_CACHE.remove_session(session_id).await;
send_log_event(EventLogData {
target: "faucet".into(),
event_id: Uuid::new_v4(),
parent_event_id: Some(session_id),
event_type: "websocket_connection".into(),
level: FaucetTracingLevel::Info,
message: "Shiny session ended by Client, gracefully.".to_string(),
body: None,
});
log::debug!("Shiny connection closed for session {session_id}.");
return Ok(());
}
DisconnectionSource::Shiny => {
// If this happens that means shiny ended the session, immediately
// remove the session from the cache
SHINY_CONNECTION_CACHE.remove_session(session_id).await;
send_log_event(EventLogData {
target: "faucet".into(),
event_id: Uuid::new_v4(),
parent_event_id: Some(session_id),
event_type: "websocket_connection".into(),
level: FaucetTracingLevel::Info,
message: "Shiny session ended by Shiny.".to_string(),
body: None,
});
log::debug!("Shiny connection closed for session {session_id}.");
return Ok(());
}
}
},
_ = shutdown.wait() => {
log::debug!("Received shutdown signal. Exiting websocket bridge.");
return Ok(());
}
};
// Getting here meant that the only possible way the session ended is if
// the client ended the connection
log::debug!("Client websocket connection to session {session_id} ended but the Shiny connection may still be alive. Saving for reconnection.");
SHINY_CONNECTION_CACHE
.put_pack(session_id, (shiny_tx, shiny_rx))
.await;
// Schedule a check in 30 seconds. If the connection is not in use
tokio::select! {
_ = tokio::time::sleep(RECHECK_TIME) => {
let entry = SHINY_CONNECTION_CACHE.attempt_take(session_id).await;
match entry {
Err(_) => (),
Ok((shiny_tx, shiny_rx)) => {
let mut ws = shiny_tx
.reunite(shiny_rx)
.expect("shiny_rx and tx always have the same origin.");
if ws
.close(Some(CloseFrame {
code: CloseCode::Abnormal,
reason: Utf8Bytes::default(),
}))
.await
.is_ok()
{
log::debug!("Closed reserved connection for session {session_id}");
}
SHINY_CONNECTION_CACHE.remove_session(session_id).await;
}
}
},
_ = shutdown.wait() => {
log::debug!("Shutdown signaled, not running websocket cleanup for session {session_id}");
}
}
Ok(())
}
pub enum UpgradeStatus<ReqBody> {
Upgraded(Response<ExclusiveBody>),
NotUpgraded(Request<ReqBody>),
}
const SESSION_ID_QUERY: &str = "sessionId";
/// zero allocation case insensitive ascii compare
fn case_insensitive_eq(this: &str, that: &str) -> bool {
if this.len() != that.len() {
return false;
}
this.bytes()
.zip(that.bytes())
.all(|(a, b)| a.eq_ignore_ascii_case(&b))
}
async fn upgrade_connection_from_request<ReqBody>(
mut req: Request<ReqBody>,
client: impl ExtractSocketAddr,
shutdown: &'static ShutdownSignal,
websocket_config: &'static WebSocketConfig,
) -> FaucetResult<()> {
// Extract sessionId query parameter
let query = req.uri().query().ok_or(FaucetError::BadRequest(
BadRequestReason::MissingQueryParam("Unable to parse query params"),
))?;
let mut session_id: Option<uuid::Uuid> = None;
let mut attempt: Option<usize> = None;
url::form_urlencoded::parse(query.as_bytes()).for_each(|(key, value)| {
if case_insensitive_eq(&key, SESSION_ID_QUERY) {
session_id = uuid::Uuid::from_str(&value).ok();
} else if case_insensitive_eq(&key, "attempt") {
attempt = value.parse::<usize>().ok();
}
});
let session_id = session_id.ok_or(FaucetError::BadRequest(
BadRequestReason::MissingQueryParam("sessionId"),
))?;
let attempt = attempt.ok_or(FaucetError::BadRequest(
BadRequestReason::MissingQueryParam("attempt"),
))?;
let upgrade_info = UpgradeInfo::new(&req, client.socket_addr())?;
let upgraded = hyper::upgrade::on(&mut req).await?;
server_upgraded_io(
upgraded,
upgrade_info,
session_id,
attempt,
shutdown,
websocket_config,
)
.await?;
Ok(())
}
async fn init_upgrade<ReqBody: Send + Sync + 'static>(
req: Request<ReqBody>,
client: impl ExtractSocketAddr + Send + Sync + 'static,
shutdown: &'static ShutdownSignal,
websocket_config: &'static WebSocketConfig,
) -> FaucetResult<Response<ExclusiveBody>> {
let mut res = Response::new(ExclusiveBody::empty());
let sec_websocket_key = req
.headers()
.get(SEC_WEBSOCKET_KEY)
.cloned()
.ok_or(FaucetError::no_sec_web_socket_key())?;
tokio::task::spawn(async move {
add_connection();
if let Err(e) =
upgrade_connection_from_request(req, client, shutdown, websocket_config).await
{
log::error!(target: "faucet", "upgrade error: {e:?}");
}
remove_connection();
});
*res.status_mut() = StatusCode::SWITCHING_PROTOCOLS;
res.headers_mut()
.insert(UPGRADE, HeaderValue::from_static("websocket"));
res.headers_mut().insert(
hyper::header::CONNECTION,
HeaderValue::from_static("Upgrade"),
);
let mut buffer = [0u8; 32];
res.headers_mut().insert(
SEC_WEBSOCKET_ACCEPT,
HeaderValue::from_bytes(calculate_sec_websocket_accept(
sec_websocket_key.as_bytes(),
&mut buffer,
))?,
);
Ok(res)
}
#[inline(always)]
async fn attempt_upgrade<ReqBody: Send + Sync + 'static>(
req: Request<ReqBody>,
client: impl ExtractSocketAddr + Send + Sync + 'static,
shutdown: &'static ShutdownSignal,
websocket_config: &'static WebSocketConfig,
) -> FaucetResult<UpgradeStatus<ReqBody>> {
if req.headers().contains_key(UPGRADE) {
return Ok(UpgradeStatus::Upgraded(
init_upgrade(req, client, shutdown, websocket_config).await?,
));
}
Ok(UpgradeStatus::NotUpgraded(req))
}
impl Client {
pub async fn attempt_upgrade<ReqBody>(
&self,
req: Request<ReqBody>,
shutdown: &'static ShutdownSignal,
websocket_config: &'static WebSocketConfig,
) -> FaucetResult<UpgradeStatus<ReqBody>>
where
ReqBody: Send + Sync + 'static,
{
attempt_upgrade(req, self.clone(), shutdown, websocket_config).await
}
}
#[cfg(test)]
mod tests {
use crate::{leak, networking::get_available_socket, shutdown::ShutdownSignal};
use super::*;
use uuid::Uuid;
#[test]
fn test_insensitive_compare() {
let session_id = "sessionid";
assert!(case_insensitive_eq(session_id, SESSION_ID_QUERY));
}
#[test]
fn test_calculate_sec_websocket_accept() {
let key = "dGhlIHNhbXBsZSBub25jZQ==";
let mut buffer = [0u8; 32];
let accept = calculate_sec_websocket_accept(key.as_bytes(), &mut buffer);
assert_eq!(accept, b"s3pPLMBiTxaQ9kYGzzhZRbK+xOo=");
}
#[test]
fn test_build_uri() {
let socket_addr = "127.0.0.1:8000".parse().unwrap();
let path_and_query = "/websocket".parse().unwrap();
let path = Some(&path_and_query);
let result = build_uri(socket_addr, path).unwrap();
assert_eq!(result, "ws://127.0.0.1:8000/websocket");
}
#[test]
fn build_uri_no_path() {
let socket_addr = "127.0.0.1:8000".parse().unwrap();
let path = None;
let result = build_uri(socket_addr, path).unwrap();
assert_eq!(result, "ws://127.0.0.1:8000");
}
#[tokio::test]
async fn test_init_upgrade_from_request() {
struct MockClient {
socket_addr: SocketAddr,
}
impl ExtractSocketAddr for MockClient {
fn socket_addr(&self) -> SocketAddr {
self.socket_addr
}
}
let websocket_config = leak!(WebSocketConfig::default());
let socket_addr = get_available_socket(20).await.unwrap();
let client = MockClient { socket_addr };
let server = tokio::spawn(async move {
dummy_websocket_server::run(socket_addr).await.unwrap();
});
let uri = Uri::builder()
.scheme("http")
.authority(socket_addr.to_string().as_str())
.path_and_query(format!("/?{}={}", SESSION_ID_QUERY, Uuid::now_v7()))
.build()
.unwrap();
let req = Request::builder()
.uri(uri.clone())
.header(UPGRADE, "websocket")
.header("Sec-WebSocket-Key", "dGhlIHNhbXBsZSBub25jZQ==")
.body(())
.unwrap();
let shutdown = leak!(ShutdownSignal::new());
let result = init_upgrade(req, client, shutdown, websocket_config)
.await
.unwrap();
server.abort();
assert_eq!(result.status(), StatusCode::SWITCHING_PROTOCOLS);
assert_eq!(
result.headers().get(UPGRADE).unwrap(),
HeaderValue::from_static("websocket")
);
assert_eq!(
result.headers().get(SEC_WEBSOCKET_ACCEPT).unwrap(),
HeaderValue::from_static("s3pPLMBiTxaQ9kYGzzhZRbK+xOo=")
);
assert_eq!(
result.headers().get(hyper::header::CONNECTION).unwrap(),
HeaderValue::from_static("Upgrade")
);
}
#[tokio::test]
async fn test_init_upgrade_from_request_no_sec_key() {
struct MockClient {
socket_addr: SocketAddr,
}
impl ExtractSocketAddr for MockClient {
fn socket_addr(&self) -> SocketAddr {
self.socket_addr
}
}
let websocket_config = leak!(WebSocketConfig::default());
let socket_addr = get_available_socket(20).await.unwrap();
let client = MockClient { socket_addr };
let server = tokio::spawn(async move {
dummy_websocket_server::run(socket_addr).await.unwrap();
});
let uri = Uri::builder()
.scheme("http")
.authority(socket_addr.to_string().as_str())
.path_and_query(format!("/?{}={}", SESSION_ID_QUERY, Uuid::now_v7()))
.build()
.unwrap();
let req = Request::builder()
.uri(uri.clone())
.header(UPGRADE, "websocket")
.body(())
.unwrap();
let shutdown = leak!(ShutdownSignal::new());
let result = init_upgrade(req, client, shutdown, websocket_config).await;
server.abort();
assert!(result.is_err());
}
#[tokio::test]
async fn test_attempt_upgrade_no_upgrade_header() {
struct MockClient {
socket_addr: SocketAddr,
}
impl ExtractSocketAddr for MockClient {
fn socket_addr(&self) -> SocketAddr {
self.socket_addr
}
}
let socket_addr = get_available_socket(20).await.unwrap();
let websocket_config = leak!(WebSocketConfig::default());
let client = MockClient { socket_addr };
let server = tokio::spawn(async move {
dummy_websocket_server::run(socket_addr).await.unwrap();
});
let uri = Uri::builder()
.scheme("http")
.authority(socket_addr.to_string().as_str())
.path_and_query("/")
.build()
.unwrap();
let req = Request::builder()
.uri(uri)
.header("Sec-WebSocket-Key", "dGhlIHNhbXBsZSBub25jZQ==")
.body(())
.unwrap();
let shutdown = leak!(ShutdownSignal::new());
let result = attempt_upgrade(req, client, shutdown, websocket_config)
.await
.unwrap();
server.abort();
match result {
UpgradeStatus::NotUpgraded(_) => {}
_ => panic!("Expected NotUpgraded"),
}
}
#[tokio::test]
async fn test_attempt_upgrade_with_upgrade_header() {
struct MockClient {
socket_addr: SocketAddr,
}
impl ExtractSocketAddr for MockClient {
fn socket_addr(&self) -> SocketAddr {
self.socket_addr
}
}
let websocket_config = leak!(WebSocketConfig::default());
let socket_addr = get_available_socket(20).await.unwrap();
let client = MockClient { socket_addr };
let server = tokio::spawn(async move {
dummy_websocket_server::run(socket_addr).await.unwrap();
});
let uri = Uri::builder()
.scheme("http")
.authority(socket_addr.to_string().as_str())
.path_and_query(format!("/?{}={}", SESSION_ID_QUERY, Uuid::now_v7()))
.build()
.unwrap();
let req = Request::builder()
.uri(uri)
.header("Sec-WebSocket-Key", "dGhlIHNhbXBsZSBub25jZQ==")
.header(UPGRADE, "websocket")
.body(())
.unwrap();
let shutdown = leak!(ShutdownSignal::new());
let result = attempt_upgrade(req, client, shutdown, websocket_config)
.await
.unwrap();
server.abort();
match result {
UpgradeStatus::Upgraded(res) => {
assert_eq!(res.status(), StatusCode::SWITCHING_PROTOCOLS);
assert_eq!(
res.headers().get(UPGRADE).unwrap(),
HeaderValue::from_static("websocket")
);
assert_eq!(
res.headers().get(SEC_WEBSOCKET_ACCEPT).unwrap(),
HeaderValue::from_static("s3pPLMBiTxaQ9kYGzzhZRbK+xOo=")
);
assert_eq!(
res.headers().get(hyper::header::CONNECTION).unwrap(),
HeaderValue::from_static("Upgrade")
);
}
_ => panic!("Expected Upgraded"),
}
}
mod dummy_websocket_server {
use std::{io::Error, net::SocketAddr};
use futures_util::{future, StreamExt, TryStreamExt};
use log::info;
use tokio::net::{TcpListener, TcpStream};
pub async fn run(addr: SocketAddr) -> Result<(), Error> {
// Create the event loop and TCP listener we'll accept connections on.
let try_socket = TcpListener::bind(&addr).await;
let listener = try_socket.expect("Failed to bind");
info!("Listening on: {addr}");
while let Ok((stream, _)) = listener.accept().await {
tokio::spawn(accept_connection(stream));
}
Ok(())
}
async fn accept_connection(stream: TcpStream) {
let addr = stream
.peer_addr()
.expect("connected streams should have a peer address");
info!("Peer address: {addr}");
let ws_stream = tokio_tungstenite::accept_async(stream)
.await
.expect("Error during the websocket handshake occurred");
info!("New WebSocket connection: {addr}");
let (write, read) = ws_stream.split();
// We should not forward messages other than text or binary.
read.try_filter(|msg| future::ready(msg.is_text() || msg.is_binary()))
.forward(write)
.await
.expect("Failed to forward messages")
}
}
}
| rust | MIT | e11015f009992d1f6399cd5a18bddeaab2b443ec | 2026-01-04T20:17:10.449299Z | false |
ixpantia/faucet | https://github.com/ixpantia/faucet/blob/e11015f009992d1f6399cd5a18bddeaab2b443ec/src/client/worker.rs | src/client/worker.rs | use crate::{
error::{FaucetError, FaucetResult},
leak,
networking::get_available_socket,
server::{
logging::{parse_faucet_event, FaucetEventResult},
FaucetServerConfig,
},
shutdown::ShutdownSignal,
telemetry::send_log_event,
};
use std::{
ffi::OsStr,
net::SocketAddr,
path::Path,
sync::atomic::{AtomicBool, Ordering},
time::Duration,
};
use tokio::{
process::Child,
sync::{Mutex, Notify},
task::JoinHandle,
};
use tokio_stream::StreamExt;
use tokio_util::codec::FramedRead;
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Debug, serde::Deserialize)]
pub enum WorkerType {
#[serde(alias = "plumber", alias = "Plumber")]
Plumber,
#[serde(alias = "shiny", alias = "Shiny")]
Shiny,
#[serde(alias = "quarto-shiny", alias = "QuartoShiny", alias = "quarto_shiny")]
QuartoShiny,
#[serde(alias = "fast-api", alias = "FastAPI")]
FastAPI,
#[cfg(test)]
Dummy,
}
pub fn log_stdio(mut child: Child, target: &'static str) -> FaucetResult<Child> {
let pid = child.id().expect("Failed to get plumber worker PID");
let mut stdout = FramedRead::new(
child.stdout.take().ok_or(FaucetError::Unknown(format!(
"Unable to take stdout from PID {pid}"
)))?,
tokio_util::codec::AnyDelimiterCodec::new(vec![b'\n'], vec![]),
);
let mut stderr = FramedRead::new(
child.stderr.take().ok_or(FaucetError::Unknown(format!(
"Unable to take stderr from PID {pid}"
)))?,
tokio_util::codec::AnyDelimiterCodec::new(vec![b'\n'], vec![]),
);
tokio::spawn(async move {
while let Some(line) = stderr.next().await {
match line {
Ok(line) => match std::str::from_utf8(&line) {
Ok(line) => match parse_faucet_event(&line) {
FaucetEventResult::Output(line) => log::warn!(target: target, "{line}"),
FaucetEventResult::Event(e) => {
send_log_event(e);
}
FaucetEventResult::EventError(e) => {
log::error!(target: target, "{e:?}")
}
},
Err(e) => {
log::warn!(target: target, "Unable to parse non-utf8 stderr output: {e}")
}
},
Err(e) => log::error!(target: target, "{e}"),
}
}
});
tokio::spawn(async move {
while let Some(line) = stdout.next().await {
if let Ok(line) = line {
match std::str::from_utf8(&line) {
Ok(line) => {
log::info!(target: target, "{line}");
}
Err(e) => {
log::warn!(target: target, "Unable to parse non-utf8 stdout output: {e}")
}
}
}
}
});
Ok(child)
}
#[derive(Copy, Clone)]
pub struct WorkerConfig {
pub wtype: WorkerType,
pub app_dir: Option<&'static str>,
pub rscript: &'static OsStr,
pub uv: &'static OsStr,
pub quarto: &'static OsStr,
pub workdir: &'static Path,
pub addr: SocketAddr,
pub target: &'static str,
pub worker_id: usize,
pub worker_route: Option<&'static str>,
pub is_online: &'static AtomicBool,
pub qmd: Option<&'static Path>,
pub handle: &'static Mutex<Option<JoinHandle<FaucetResult<()>>>>,
pub shutdown: &'static ShutdownSignal,
pub idle_stop: &'static Notify,
}
impl WorkerConfig {
fn new(
worker_id: usize,
addr: SocketAddr,
server_config: &FaucetServerConfig,
shutdown: &'static ShutdownSignal,
) -> Self {
Self {
addr,
worker_id,
is_online: leak!(AtomicBool::new(false)),
workdir: server_config.workdir,
worker_route: server_config.route,
target: leak!(format!("Worker::{}", worker_id)),
app_dir: server_config.app_dir,
wtype: server_config.server_type,
rscript: server_config.rscript,
quarto: server_config.quarto,
qmd: server_config.qmd,
uv: server_config.uv,
handle: leak!(Mutex::new(None)),
shutdown,
idle_stop: leak!(Notify::new()),
}
}
#[allow(dead_code)]
#[cfg(test)]
pub fn dummy(target: &'static str, addr: &str, online: bool) -> WorkerConfig {
WorkerConfig {
target,
is_online: leak!(AtomicBool::new(online)),
addr: addr.parse().unwrap(),
app_dir: None,
worker_route: None,
rscript: OsStr::new(""),
wtype: WorkerType::Dummy,
worker_id: 1,
quarto: OsStr::new(""),
uv: OsStr::new(""),
workdir: Path::new("."),
qmd: None,
handle: leak!(Mutex::new(None)),
shutdown: leak!(ShutdownSignal::new()),
idle_stop: leak!(Notify::new()),
}
}
}
fn spawn_child_fastapi_server(config: &WorkerConfig) -> FaucetResult<Child> {
let mut cmd = tokio::process::Command::new(config.uv);
// Set the current directory to the directory containing the entrypoint
cmd.current_dir(config.workdir)
.args(["run", "fastapi", "run", "--workers", "1"])
.arg("--port")
.arg(config.addr.port().to_string())
.stdin(std::process::Stdio::null())
.stdout(std::process::Stdio::piped())
.stderr(std::process::Stdio::piped())
.env("FAUCET_WORKER_ID", config.worker_id.to_string())
// This is needed to make sure the child process is killed when the parent is dropped
.kill_on_drop(true);
#[cfg(unix)]
unsafe {
cmd.pre_exec(|| {
// Create a new process group for the child process
nix::libc::setpgid(0, 0);
Ok(())
});
}
let child = cmd.spawn()?;
log_stdio(child, config.target)
}
fn spawn_child_rscript_process(
config: &WorkerConfig,
command: impl AsRef<str>,
) -> FaucetResult<Child> {
let mut cmd = tokio::process::Command::new(config.rscript);
// Set the current directory to the directory containing the entrypoint
cmd.current_dir(config.workdir)
.arg("-e")
.arg(command.as_ref())
.stdin(std::process::Stdio::null())
.stdout(std::process::Stdio::piped())
.stderr(std::process::Stdio::piped())
.env("FAUCET_WORKER_ID", config.worker_id.to_string())
// This is needed to make sure the child process is killed when the parent is dropped
.kill_on_drop(true);
#[cfg(unix)]
unsafe {
cmd.pre_exec(|| {
// Create a new process group for the child process
nix::libc::setpgid(0, 0);
Ok(())
});
}
cmd.spawn().map_err(Into::into)
}
fn spawn_plumber_worker(config: &WorkerConfig) -> FaucetResult<Child> {
let command = format!(
r#"
options("plumber.port" = {port})
plumber::pr_run(plumber::plumb())
"#,
port = config.addr.port()
);
let child = spawn_child_rscript_process(config, command)?;
log_stdio(child, config.target)
}
fn spawn_shiny_worker(config: &WorkerConfig) -> FaucetResult<Child> {
let command = format!(
r###"
options("shiny.port" = {port})
options(shiny.http.response.filter = function(...) {{
response <- list(...)[[length(list(...))]]
if (response$status < 200 || response$status > 300) return(response)
if ('file' %in% names(response$content)) return(response)
if (!grepl("^text/html", response$content_type, perl = T)) return(response)
if (is.raw(response$content)) response$content <- rawToChar(response$content)
response$content <- sub("</head>", '<script src="__faucet__/reconnect.js"></script></head>', response$content, ignore.case = T)
return(response)
}})
shiny::runApp("{app_dir}")
"###,
port = config.addr.port(),
app_dir = config.app_dir.unwrap_or(".")
);
let child = spawn_child_rscript_process(config, command)?;
log_stdio(child, config.target)
}
fn spawn_quarto_shiny_worker(config: &WorkerConfig) -> FaucetResult<Child> {
let mut cmd = tokio::process::Command::new(config.quarto);
// Set the current directory to the directory containing the entrypoint
cmd.current_dir(config.workdir)
.arg("serve")
.args(["--port", config.addr.port().to_string().as_str()])
.arg(config.qmd.ok_or(FaucetError::MissingArgument("qmd"))?)
.stdin(std::process::Stdio::null())
.stdout(std::process::Stdio::piped())
.stderr(std::process::Stdio::piped())
.env("FAUCET_WORKER_ID", config.worker_id.to_string())
// This is needed to make sure the child process is killed when the parent is dropped
.kill_on_drop(true);
#[cfg(unix)]
unsafe {
cmd.pre_exec(|| {
// Create a new process group for the child process
nix::libc::setpgid(0, 0);
Ok(())
});
}
let child = cmd.spawn()?;
log_stdio(child, config.target)
}
impl WorkerConfig {
fn spawn_process(&self) -> FaucetResult<Child> {
let child_result = match self.wtype {
WorkerType::Plumber => spawn_plumber_worker(self),
WorkerType::Shiny => spawn_shiny_worker(self),
WorkerType::QuartoShiny => spawn_quarto_shiny_worker(self),
WorkerType::FastAPI => spawn_child_fastapi_server(self),
#[cfg(test)]
WorkerType::Dummy => unreachable!(
"WorkerType::Dummy should be handled in spawn_worker_task and not reach spawn_process"
),
};
match child_result {
Ok(child) => Ok(child),
Err(e) => {
log::error!(target: "faucet", "Failed to invoke R for {target}: {e}", target = self.target);
Err(e)
}
}
}
pub async fn wait_until_done(&self) {
if let Some(handle) = self.handle.lock().await.take() {
log::debug!("Waiting for process to be finished");
match handle.await {
Ok(Ok(_)) => {
log::debug!("Task ended successfully!")
}
Ok(Err(e)) => {
panic!("Worker task for target '{}' failed: {:?}", self.target, e);
}
Err(e) => {
panic!(
"Worker task for target '{}' panicked or was cancelled: {:?}",
self.target, e
);
}
}
}
}
pub async fn spawn_worker_task(&'static self) {
let mut handle = self.handle.lock().await;
if let Some(handle) = handle.as_ref() {
if !handle.is_finished() {
log::warn!(target: "faucet", "Worker task for {target} is already running, skipping spawn", target = self.target);
return;
}
}
*handle = Some(tokio::spawn(async move {
#[cfg(test)]
if self.wtype == WorkerType::Dummy {
log::debug!(
target: "faucet",
"Worker {target} is type Dummy, skipping real process spawn.",
target = self.target
);
return FaucetResult::Ok(());
}
'outer: loop {
let mut child = match self.spawn_process() {
Ok(c) => c,
Err(e) => {
log::error!(
target: "faucet",
"Worker task for {target} failed to spawn initial process: {e}",
target = self.target
);
return Err(e);
}
};
let pid = match child.id() {
Some(id) => id,
None => {
let err_msg = format!(
"Spawned process for {target} has no PID",
target = self.target
);
log::error!(target: "faucet", "{err_msg}");
return Err(FaucetError::Unknown(err_msg));
}
};
// We will run this loop asynchrnously on this same thread.
// We will use this to wait for either the stop signal
// or the child exiting
let child_loop = async {
log::info!(target: "faucet", "Starting process {pid} for {target} on port {port}", port = self.addr.port(), target = self.target);
loop {
// Try to connect to the socket
let check_status = check_if_online(self.addr).await;
// If it's online, we can break out of the loop and start serving connections
if check_status {
log::info!(target: "faucet", "{target} is online and ready to serve connections at {route}", target = self.target, route = self.worker_route.unwrap_or("/"));
self.is_online.store(check_status, Ordering::SeqCst);
break;
}
// If it's not online but the child process has exited, we should break out of the loop
// and restart the process
if child.try_wait()?.is_some() {
break;
}
tokio::time::sleep(RECHECK_INTERVAL).await;
}
FaucetResult::Ok(child.wait().await?)
};
tokio::select! {
// If we receive a stop signal that means we will stop the outer loop
// and kill the process
_ = self.shutdown.wait() => {
let _ = child.kill().await;
log::info!(target: "faucet", "{target}'s process ({pid}) killed for shutdown", target = self.target);
break 'outer;
},
_ = self.idle_stop.notified() => {
self.is_online.store(false, std::sync::atomic::Ordering::SeqCst);
let _ = child.kill().await;
log::info!(target: "faucet", "{target}'s process ({pid}) killed for idle stop", target = self.target);
break 'outer;
},
// If our child loop stops that means the process crashed. We will restart it
status = child_loop => {
self
.is_online
.store(false, std::sync::atomic::Ordering::SeqCst);
log::error!(target: "faucet", "{target}'s process ({}) exited with status {}", pid, status?, target = self.target);
continue 'outer;
}
}
}
log::debug!("{target}'s process has ended.", target = self.target);
FaucetResult::Ok(())
}));
}
}
async fn check_if_online(addr: SocketAddr) -> bool {
let stream = tokio::net::TcpStream::connect(addr).await;
stream.is_ok()
}
const RECHECK_INTERVAL: Duration = Duration::from_millis(250);
pub struct WorkerConfigs {
pub workers: Box<[&'static WorkerConfig]>,
}
const TRIES: usize = 20;
impl WorkerConfigs {
pub(crate) async fn new(
server_config: FaucetServerConfig,
shutdown: &'static ShutdownSignal,
) -> FaucetResult<Self> {
let mut workers =
Vec::<&'static WorkerConfig>::with_capacity(server_config.n_workers.get());
for id in 0..server_config.n_workers.get() {
// Probably hacky but it works. I need to guarantee that ports are never
// reused
let socket_addr = 'find_socket: loop {
let addr_candidate = get_available_socket(TRIES).await?;
// Check if another worker has already reserved this port
if workers.iter().any(|w| w.addr == addr_candidate) {
continue 'find_socket;
}
break 'find_socket addr_candidate;
};
let config = leak!(WorkerConfig::new(
id + 1,
socket_addr,
&server_config,
shutdown
)) as &'static WorkerConfig;
workers.push(config);
}
let workers = workers.into_boxed_slice();
Ok(Self { workers })
}
}
| rust | MIT | e11015f009992d1f6399cd5a18bddeaab2b443ec | 2026-01-04T20:17:10.449299Z | false |
ixpantia/faucet | https://github.com/ixpantia/faucet/blob/e11015f009992d1f6399cd5a18bddeaab2b443ec/src/client/mod.rs | src/client/mod.rs | mod body;
mod pool;
mod websockets;
pub mod load_balancing;
pub mod worker;
pub use body::ExclusiveBody;
pub(crate) use pool::Client;
pub use pool::ExtractSocketAddr;
pub use websockets::UpgradeStatus;
| rust | MIT | e11015f009992d1f6399cd5a18bddeaab2b443ec | 2026-01-04T20:17:10.449299Z | false |
ixpantia/faucet | https://github.com/ixpantia/faucet/blob/e11015f009992d1f6399cd5a18bddeaab2b443ec/src/client/body.rs | src/client/body.rs | use std::pin::Pin;
use super::pool::HttpConnection;
use crate::error::FaucetError;
use http_body_util::{BodyExt, Empty, Full};
use hyper::body::{Body, Bytes, SizeHint};
pub struct ExclusiveBody {
inner: Pin<Box<dyn Body<Data = Bytes, Error = FaucetError> + Send + 'static>>,
_connection: Option<HttpConnection>,
}
impl core::fmt::Debug for ExclusiveBody {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("ExclusiveBody").finish()
}
}
impl ExclusiveBody {
pub fn new(
body: impl Body<Data = Bytes, Error = FaucetError> + Send + Sync + 'static,
connection: Option<HttpConnection>,
) -> Self {
Self {
inner: Box::pin(body),
_connection: connection,
}
}
pub fn empty() -> Self {
Self::new(Empty::new().map_err(Into::into), None)
}
pub fn plain_text(text: impl Into<String>) -> Self {
Self::new(Full::from(text.into()).map_err(Into::into), None)
}
}
impl Body for ExclusiveBody {
type Data = Bytes;
type Error = FaucetError;
fn poll_frame(
mut self: std::pin::Pin<&mut Self>,
cx: &mut std::task::Context<'_>,
) -> std::task::Poll<Option<Result<hyper::body::Frame<Self::Data>, Self::Error>>> {
self.inner.as_mut().poll_frame(cx)
}
fn is_end_stream(&self) -> bool {
self.inner.is_end_stream()
}
fn size_hint(&self) -> SizeHint {
self.inner.size_hint()
}
}
| rust | MIT | e11015f009992d1f6399cd5a18bddeaab2b443ec | 2026-01-04T20:17:10.449299Z | false |
ixpantia/faucet | https://github.com/ixpantia/faucet/blob/e11015f009992d1f6399cd5a18bddeaab2b443ec/src/client/pool.rs | src/client/pool.rs | use super::body::ExclusiveBody;
use super::worker::WorkerConfig;
use crate::error::{FaucetError, FaucetResult};
use crate::global_conn::{add_connection, remove_connection};
use deadpool::managed::{self, Object, Pool, RecycleError};
use http_body_util::BodyExt;
use hyper::body::Incoming;
use hyper::client::conn::http1::SendRequest;
use hyper::{Request, Response};
use hyper_util::rt::TokioIo;
use std::net::SocketAddr;
use tokio::net::TcpStream;
struct ConnectionHandle {
sender: SendRequest<Incoming>,
}
struct ConnectionManager {
config: &'static WorkerConfig,
}
impl ConnectionManager {
fn new(config: &'static WorkerConfig) -> Self {
Self { config }
}
}
const RETRY_DELAY: std::time::Duration = std::time::Duration::from_millis(20);
impl managed::Manager for ConnectionManager {
type Type = ConnectionHandle;
type Error = FaucetError;
async fn create(&self) -> FaucetResult<Self::Type> {
log::debug!(target: "faucet", "Establishing TCP connection to {}", self.config.target);
let connection_res = loop {
match TcpStream::connect(self.config.addr).await {
Ok(stream) => break stream,
Err(_) => tokio::time::sleep(RETRY_DELAY).await,
}
};
let stream = TokioIo::new(connection_res);
let (sender, conn) = hyper::client::conn::http1::handshake(stream).await?;
tokio::spawn(async move {
match conn.await {
Ok(_) => (),
Err(err) => {
log::debug!(target: "faucet", "{err}");
}
}
});
log::debug!(target: "faucet", "Established TCP connection to {}", self.config.target);
Ok(ConnectionHandle { sender })
}
async fn recycle(
&self,
conn: &mut ConnectionHandle,
_: &managed::Metrics,
) -> managed::RecycleResult<FaucetError> {
if !self
.config
.is_online
.load(std::sync::atomic::Ordering::SeqCst)
{
return Err(RecycleError::message("Worker is offline"));
}
if conn.sender.is_closed() {
Err(RecycleError::message("Connection closed"))
} else {
log::debug!(target: "faucet", "Recycling TCP connection to {}", self.config.target);
Ok(())
}
}
}
pub struct HttpConnection {
inner: Object<ConnectionManager>,
}
impl HttpConnection {
pub async fn send_request(
mut self,
request: Request<Incoming>,
) -> FaucetResult<Response<ExclusiveBody>> {
add_connection();
let (parts, body) = self.inner.sender.send_request(request).await?.into_parts();
let body = ExclusiveBody::new(body.map_err(Into::into), Some(self));
Ok(Response::from_parts(parts, body))
}
}
impl Drop for HttpConnection {
fn drop(&mut self) {
remove_connection();
}
}
const DEFAULT_MAX_SIZE: usize = 1024;
#[derive(Clone)]
pub(crate) struct Client {
pool: Pool<ConnectionManager>,
pub(crate) config: &'static WorkerConfig,
}
impl Client {
pub fn new(config: &'static WorkerConfig) -> Self {
let builder = Pool::builder(ConnectionManager::new(config)).max_size(DEFAULT_MAX_SIZE);
let pool = builder
.build()
.expect("Failed to create connection pool. This is a bug");
Self { pool, config }
}
pub async fn get(&self) -> FaucetResult<HttpConnection> {
Ok(HttpConnection {
inner: self.pool.get().await?,
})
}
pub fn is_online(&self) -> bool {
self.config
.is_online
.load(std::sync::atomic::Ordering::SeqCst)
}
}
pub trait ExtractSocketAddr {
fn socket_addr(&self) -> SocketAddr;
}
impl ExtractSocketAddr for Client {
#[inline(always)]
fn socket_addr(&self) -> SocketAddr {
self.config.addr
}
}
| rust | MIT | e11015f009992d1f6399cd5a18bddeaab2b443ec | 2026-01-04T20:17:10.449299Z | false |
ixpantia/faucet | https://github.com/ixpantia/faucet/blob/e11015f009992d1f6399cd5a18bddeaab2b443ec/src/client/load_balancing/rps_autoscale.rs | src/client/load_balancing/rps_autoscale.rs | use rand::Rng;
use tokio::sync::Mutex;
use super::LoadBalancingStrategy;
use crate::client::{worker::WorkerConfig, Client};
use std::net::IpAddr;
struct RequestCounter {
last_reset: std::time::Instant,
current_window: f64,
previous_window_rps: f64,
big_reset_counter: f64,
pub max_rps: f64,
}
const WINDOW_SIZE: f64 = 10.0; // seconds
const BIG_RESET_WINDOW_SIZE: f64 = 30.0; // seconds
impl RequestCounter {
fn new(max_rps: f64) -> Self {
RequestCounter {
last_reset: std::time::Instant::now(),
current_window: 0.0,
previous_window_rps: 0.0,
big_reset_counter: 0.0,
max_rps,
}
}
fn add(&mut self, count: f64) {
self.current_window += count;
self.big_reset_counter += count;
}
fn set_new_window(&mut self) -> f64 {
let elapsed = self.last_reset.elapsed();
let elapsed_secs = elapsed.as_secs_f64();
let previous_window_rps = if elapsed_secs > 0.0 {
self.current_window / elapsed_secs
} else {
// Avoid division by zero if elapsed time is extremely small
// Treat as very high RPS if there were any requests
if self.current_window > 0.0 {
f64::MAX
} else {
0.0
}
};
self.previous_window_rps = previous_window_rps;
self.last_reset = std::time::Instant::now();
self.current_window = 0.0;
previous_window_rps
}
fn rps(&mut self) -> f64 {
let elapsed_secs = self.last_reset.elapsed().as_secs_f64();
if elapsed_secs > 0.0 {
self.current_window / elapsed_secs
} else {
match self.current_window > 0.0 {
true => f64::MAX,
false => 0.0,
}
}
}
fn total_requests_since_big_reset(&mut self) -> f64 {
self.big_reset_counter
+ self.previous_window_rps * self.last_reset.elapsed().as_secs_f64()
+ self.current_window
}
fn reset_big(&mut self) {
self.big_reset_counter = 0.0;
}
}
struct Targets {
targets: &'static [Client],
request_counter: &'static [Mutex<RequestCounter>],
_request_counter_calculator_handle: tokio::task::JoinHandle<()>,
}
const WAIT_TIME_UNTIL_RETRY: std::time::Duration = std::time::Duration::from_millis(500);
impl Targets {
fn new(configs: &[&'static WorkerConfig], max_rps: f64) -> Self {
let mut targets_vec = Vec::new();
let mut request_counters_vec = Vec::new();
for config in configs {
let client = Client::new(config);
targets_vec.push(client);
request_counters_vec.push(Mutex::new(RequestCounter::new(max_rps)));
}
let targets = Box::leak(targets_vec.into_boxed_slice()) as &'static [Client];
let request_counter_static_slice = Box::leak(request_counters_vec.into_boxed_slice())
as &'static [Mutex<RequestCounter>];
let request_per_second_calculator_handle = tokio::spawn(async move {
let mut last_big_reset_time = std::time::Instant::now();
loop {
tokio::time::sleep(std::time::Duration::from_secs_f64(WINDOW_SIZE)).await;
let is_big_reset_due =
last_big_reset_time.elapsed().as_secs_f64() >= BIG_RESET_WINDOW_SIZE;
if is_big_reset_due {
last_big_reset_time = std::time::Instant::now();
}
for i in 0..targets.len() {
let mut rc_guard = request_counter_static_slice[i].lock().await;
let calculated_rps = rc_guard.set_new_window();
if calculated_rps > rc_guard.max_rps {
log::debug!(
target: "faucet",
"Target {} ({}) is overloaded ({} RPS), attempting to spawn worker for next target",
i, targets[i].config.target, calculated_rps
);
match targets.get(i + 1) {
Some(next_target_client) => {
log::info!(
target: "faucet",
"Spawning worker task for adjacent target {} due to overload on target {}",
next_target_client.config.target, targets[i].config.target
);
next_target_client.config.spawn_worker_task().await;
}
_ if targets.len() == 1 => {
log::warn!(
target: "faucet",
"Target {} is overloaded but it's the only target. No autoscaling action possible for spawning.",
targets[i].config.target
);
}
_ => (),
}
}
if is_big_reset_due {
let total_requests = rc_guard.total_requests_since_big_reset();
if total_requests == 0.0 {
// Check if the worker is actually running before trying to stop it.
// For dummy workers, handle might be None if never "spawned".
// If handle is Some, and not finished, then it's "running".
let is_running = targets[i]
.config
.handle
.lock()
.await
.as_ref()
.map_or_else(|| false, |h| !h.is_finished());
if is_running || targets[i].is_online() {
// is_online for initial state before handle is set
log::info!(
target: "faucet",
"Target {} ({}) has no requests in the last ~{} seconds, notifying idle stop.",
i, targets[i].config.target, BIG_RESET_WINDOW_SIZE
);
targets[i].config.idle_stop.notify_waiters();
}
}
rc_guard.reset_big();
}
}
}
});
Targets {
targets,
request_counter: request_counter_static_slice,
_request_counter_calculator_handle: request_per_second_calculator_handle,
}
}
fn get(&self, index: usize) -> (Client, &'static Mutex<RequestCounter>) {
(
self.targets[index % self.targets.len()].clone(),
&self.request_counter[index % self.targets.len()],
)
}
}
pub struct RpsAutoscale {
targets: Targets,
}
impl RpsAutoscale {
pub(crate) async fn new(configs: &[&'static WorkerConfig], max_rps: f64) -> Self {
// Spawn initial worker tasks as per configs
for config in configs {
if config.is_online.load(std::sync::atomic::Ordering::SeqCst) {
// If configured to be initially online
config.spawn_worker_task().await;
}
}
Self {
targets: Targets::new(configs, max_rps),
}
}
}
impl LoadBalancingStrategy for RpsAutoscale {
type Input = IpAddr;
async fn entry(&self, _ip: IpAddr) -> Client {
let len = self.targets.targets.len();
if len == 0 {
panic!("RpsAutoscale called with no targets!");
}
let mut passes = 0;
let mut current_index; // Start at a random target
loop {
current_index = rand::rng().random_range(0..len);
passes += 1;
let (client, request_counter_mutex) = self.targets.get(current_index);
let is_online = client.is_online();
let mut rc_guard = match request_counter_mutex.try_lock() {
Ok(rc) => rc,
Err(_) => {
continue;
}
};
if is_online && (rc_guard.rps() <= rc_guard.max_rps || passes > len) {
rc_guard.add(1.0);
return client;
}
if (passes > len * 2) && is_online {
return client; // If we tried all once and this one is online, return it
}
if (passes > len * 5) && !is_online {
log::warn!(target: "faucet", "Looped {} times, still no suitable target. Trying to spawn for target 0 if offline.", 5);
client.config.spawn_worker_task().await;
// Wait a bit for it to potentially come online
for _ in 0..1000 {
// Try for up to 10 * WAIT_TIME_UNTIL_RETRY
tokio::time::sleep(WAIT_TIME_UNTIL_RETRY).await;
if client.is_online() {
rc_guard.add(1.0);
return client;
}
}
}
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::client::worker::WorkerConfig; // WorkerType needed for dummy
use std::net::{IpAddr, Ipv4Addr};
use std::sync::Arc;
use std::time::Duration;
use tokio::sync::Notify; // Notify used in WorkerConfig::dummy
// Helper to create &'static WorkerConfig using WorkerConfig::dummy
fn create_leaked_dummy_config(
id_prefix: &str,
index: usize,
initial_online: bool,
) -> &'static WorkerConfig {
let target_name =
Box::leak(format!("{id_prefix}-{index}").into_boxed_str()) as &'static str;
let addr_str = format!("127.0.0.1:{}", 9500 + index); // Ensure unique ports for tests
&*Box::leak(Box::new(WorkerConfig::dummy(
target_name,
&addr_str,
initial_online,
)))
}
fn dummy_ip() -> IpAddr {
IpAddr::V4(Ipv4Addr::new(192, 168, 0, 1)) // A typical private IP
}
#[tokio::test]
async fn test_new_rps_autoscale() {
let config1 = create_leaked_dummy_config("new", 0, true);
let config2 = create_leaked_dummy_config("new", 1, true);
let autoscale = RpsAutoscale::new(&[config1, config2], 10.0).await;
assert_eq!(autoscale.targets.targets.len(), 2);
// Drop the autoscale to allow its background task to be cleaned up if possible
drop(autoscale);
}
#[tokio::test]
async fn test_load_balancing_strategy_basic_entry() {
let config1 = create_leaked_dummy_config("basic", 0, true);
let autoscale = RpsAutoscale::new(&[config1], 10.0).await;
let client = autoscale.entry(dummy_ip()).await;
assert_eq!(client.config.target, config1.target);
assert!(client.is_online());
drop(autoscale);
}
#[tokio::test]
async fn test_load_balancing_strategy_offline_target() {
let config_offline = create_leaked_dummy_config("offline", 0, false);
let config_online = create_leaked_dummy_config("offline", 1, true);
let autoscale = RpsAutoscale::new(&[config_offline, config_online], 10.0).await;
for _ in 0..5 {
let client = autoscale.entry(dummy_ip()).await;
assert_eq!(
client.config.target, config_online.target,
"Should pick the online target"
);
assert!(client.is_online());
}
drop(autoscale);
}
#[tokio::test]
async fn test_load_balancing_overloaded_target_skipped_by_entry() {
let config1 = create_leaked_dummy_config("overload", 0, true);
let config2 = create_leaked_dummy_config("overload", 1, true);
let autoscale = RpsAutoscale::new(&[config1, config2], 10.0).await;
{
let (_client1, rc1_mutex) = autoscale.targets.get(0);
let mut rc1_guard = rc1_mutex.lock().await;
rc1_guard.current_window = rc1_guard.max_rps * 5.0;
}
tokio::time::sleep(Duration::from_millis(10)).await; // Ensure a tiny bit of time has passed for rc1.last_reset
let mut picked_config2 = false;
for _ in 0..5 {
let client = autoscale.entry(dummy_ip()).await;
if client.config.target == config2.target {
picked_config2 = true;
break;
}
tokio::time::sleep(Duration::from_millis(5)).await;
}
assert!(
picked_config2,
"Load balancer should skip overloaded target config1 and pick config2"
);
drop(autoscale);
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn test_autoscale_spawn_worker_on_overload_background_task() {
let config0 = create_leaked_dummy_config("autospawn", 0, true); // Target to be overloaded
let config1 = create_leaked_dummy_config("autospawn", 1, true); // Target whose worker should be "spawned"
assert!(
config1.handle.lock().await.is_none(),
"Config1 handle should be None initially"
);
let autoscale = RpsAutoscale::new(&[config0, config1], 10.0).await;
{
let rc0_mutex = &autoscale.targets.request_counter[0];
let mut rc0_guard = rc0_mutex.lock().await;
rc0_guard.current_window = (rc0_guard.max_rps + 1.0) * WINDOW_SIZE;
}
let wait_duration = Duration::from_secs_f64(WINDOW_SIZE + 2.0);
tokio::time::sleep(wait_duration).await;
let config1_handle_lock = config1.handle.lock().await;
assert!(config1_handle_lock.is_some(), "Worker handle for config1 should be set after simulated overload of config0 and background task execution.");
drop(autoscale);
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn test_autoscale_shutdown_idle_worker_background_task() {
let config0 = create_leaked_dummy_config("autoshutdown", 0, true);
// We need to ensure spawn_worker_task was called for config0 so it's considered "running"
// RpsAutoscale::new calls spawn_worker_task for initially online workers.
let autoscale = RpsAutoscale::new(&[config0], 10.0).await;
// Wait for config0's handle to be set by RpsAutoscale::new
tokio::time::sleep(Duration::from_millis(100)).await;
assert!(
config0.handle.lock().await.is_some(),
"Config0 handle should be set after RpsAutoscale::new"
);
let idle_stop_notification = Arc::new(Notify::new());
let notification_clone = idle_stop_notification.clone();
// Spawn a task to listen for the idle_stop notification from the config
tokio::spawn(async move {
config0.idle_stop.notified().await;
notification_clone.notify_one();
});
let wait_duration = Duration::from_secs_f64(BIG_RESET_WINDOW_SIZE + WINDOW_SIZE + 5.0); // e.g., 30s + 10s + 5s = 45s
log::debug!(target: "faucet_test", "Waiting for {:?} for idle shutdown test on target {}", wait_duration, config0.target);
match tokio::time::timeout(wait_duration, idle_stop_notification.notified()).await {
Ok(_) => {
log::info!(target: "faucet_test", "Idle stop notification received for target {}", config0.target);
}
Err(_) => {
panic!("Idle stop notification timed out for target {}. Worker was not shut down as expected.", config0.target);
}
}
drop(autoscale);
}
}
| rust | MIT | e11015f009992d1f6399cd5a18bddeaab2b443ec | 2026-01-04T20:17:10.449299Z | false |
ixpantia/faucet | https://github.com/ixpantia/faucet/blob/e11015f009992d1f6399cd5a18bddeaab2b443ec/src/client/load_balancing/cookie_hash.rs | src/client/load_balancing/cookie_hash.rs | use uuid::Uuid;
use super::LoadBalancingStrategy;
use super::WorkerConfig;
use crate::client::Client;
use crate::leak;
use std::time::Duration;
struct Targets {
targets: &'static [Client],
}
impl Targets {
fn new(configs: &[&'static WorkerConfig]) -> Self {
let mut targets = Vec::new();
for state in configs {
let client = Client::new(state);
targets.push(client);
}
let targets = leak!(targets);
Targets { targets }
}
}
pub struct CookieHash {
targets: Targets,
targets_len: usize,
}
impl CookieHash {
pub(crate) async fn new(configs: &[&'static WorkerConfig]) -> Self {
// Start the process of each config
for config in configs {
config.spawn_worker_task().await;
}
Self {
targets_len: configs.as_ref().len(),
targets: Targets::new(configs),
}
}
}
fn calculate_hash(cookie_uuid: Uuid) -> u64 {
let mut hash_value = cookie_uuid.as_u128() as u64;
hash_value ^= hash_value >> 33;
hash_value = hash_value.wrapping_mul(0xff51afd7ed558ccd);
hash_value ^= hash_value >> 33;
hash_value = hash_value.wrapping_mul(0xc4ceb9fe1a85ec53);
hash_value ^= hash_value >> 33;
hash_value
}
fn hash_to_index(value: Uuid, length: usize) -> usize {
let hash = calculate_hash(value);
(hash % length as u64) as usize
}
// 50ms is the minimum backoff time for exponential backoff
const BASE_BACKOFF: Duration = Duration::from_millis(1);
const MAX_BACKOFF: Duration = Duration::from_millis(500);
fn calculate_exponential_backoff(retries: u32) -> Duration {
(BASE_BACKOFF * 2u32.pow(retries)).min(MAX_BACKOFF)
}
impl LoadBalancingStrategy for CookieHash {
type Input = Uuid;
async fn entry(&self, id: Uuid) -> Client {
let mut retries = 0;
let index = hash_to_index(id, self.targets_len);
let client = self.targets.targets[index].clone();
loop {
if client.is_online() {
break client;
}
let backoff = calculate_exponential_backoff(retries);
log::debug!(
target: "faucet",
"LB Session {} tried to connect to offline {}, retrying in {:?}",
id,
client.config.target,
backoff
);
tokio::time::sleep(backoff).await;
retries += 1;
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::client::ExtractSocketAddr;
use uuid::Uuid;
#[test]
fn uuid_test_distribution_of_hash_function_len_4() {
const N_UUIDS: usize = 100_000;
let uuids: Vec<Uuid> = (0..N_UUIDS).map(|_| Uuid::now_v7()).collect();
let mut counts = [0; 4];
uuids.iter().for_each(|uuid| {
let index = hash_to_index(*uuid, 4);
counts[index] += 1;
});
let percent_0 = counts[0] as f64 / N_UUIDS as f64;
let percent_1 = counts[1] as f64 / N_UUIDS as f64;
let percent_2 = counts[2] as f64 / N_UUIDS as f64;
let percent_3 = counts[3] as f64 / N_UUIDS as f64;
assert!((0.24..=0.26).contains(&percent_0));
assert!((0.24..=0.26).contains(&percent_1));
assert!((0.24..=0.26).contains(&percent_2));
assert!((0.24..=0.26).contains(&percent_3));
}
#[test]
fn uuid_test_distribution_of_hash_function_len_3() {
const N_UUIDS: usize = 100_000;
let uuids: Vec<Uuid> = (0..N_UUIDS).map(|_| Uuid::now_v7()).collect();
let mut counts = [0; 3];
uuids.iter().for_each(|uuid| {
let index = hash_to_index(*uuid, 3);
counts[index] += 1;
});
let percent_0 = counts[0] as f64 / N_UUIDS as f64;
let percent_1 = counts[1] as f64 / N_UUIDS as f64;
let percent_2 = counts[2] as f64 / N_UUIDS as f64;
assert!((0.32..=0.34).contains(&percent_0));
assert!((0.32..=0.34).contains(&percent_1));
assert!((0.32..=0.34).contains(&percent_2));
}
#[test]
fn uuid_test_distribution_of_hash_function_len_2() {
const N_UUIDS: usize = 100_000;
let uuids: Vec<Uuid> = (0..N_UUIDS).map(|_| Uuid::now_v7()).collect();
let mut counts = [0; 2];
uuids.iter().for_each(|uuid| {
let index = hash_to_index(*uuid, 2);
counts[index] += 1;
});
let percent_0 = counts[0] as f64 / N_UUIDS as f64;
let percent_1 = counts[1] as f64 / N_UUIDS as f64;
assert!((0.49..=0.51).contains(&percent_0));
assert!((0.49..=0.51).contains(&percent_1));
}
#[test]
fn test_new_targets() {
let worker_state: &'static WorkerConfig = Box::leak(Box::new(WorkerConfig::dummy(
"test",
"127.0.0.1:9999",
true,
)));
let Targets { targets } = Targets::new(&[worker_state]);
assert_eq!(targets.len(), 1);
}
#[tokio::test]
async fn test_new_cookie_hash() {
let worker_state: &'static WorkerConfig = Box::leak(Box::new(WorkerConfig::dummy(
"test",
"127.0.0.1:9999",
true,
)));
let CookieHash {
targets,
targets_len,
} = CookieHash::new(&[worker_state]).await;
assert_eq!(targets.targets.len(), 1);
assert_eq!(targets_len, 1);
worker_state.wait_until_done().await;
}
#[test]
fn test_calculate_exponential_backoff() {
assert_eq!(calculate_exponential_backoff(0), BASE_BACKOFF);
assert_eq!(calculate_exponential_backoff(1), BASE_BACKOFF * 2);
assert_eq!(calculate_exponential_backoff(2), BASE_BACKOFF * 4);
assert_eq!(calculate_exponential_backoff(3), BASE_BACKOFF * 8);
}
#[tokio::test]
async fn test_load_balancing_strategy() {
let worker1: &'static WorkerConfig = Box::leak(Box::new(WorkerConfig::dummy(
"test1",
"127.0.0.1:9999",
true,
)));
let worker2: &'static WorkerConfig = Box::leak(Box::new(WorkerConfig::dummy(
"test2",
"127.0.0.1:8888",
true,
)));
let workers_static_refs = [worker1, worker2];
let cookie_hash = CookieHash::new(&workers_static_refs).await;
let uuid1 = Uuid::now_v7();
let client1_a = cookie_hash.entry(uuid1).await;
let client1_b = cookie_hash.entry(uuid1).await;
assert_eq!(client1_a.socket_addr(), client1_b.socket_addr());
// Generate many UUIDs to increase chance of hitting the other target
// This doesn't guarantee hitting the other target if hash distribution is not perfect
// or if N_TARGETS is small, but it's a practical test.
let mut client2_addr = client1_a.socket_addr();
let mut uuid2 = Uuid::now_v7();
for _ in 0..100 {
// Try a few times to get a different client
uuid2 = Uuid::now_v7();
let client_temp = cookie_hash.entry(uuid2).await;
if client_temp.socket_addr() != client1_a.socket_addr() {
client2_addr = client_temp.socket_addr();
break;
}
}
// It's possible (though unlikely for 2 targets and good hash) that we always hit the same target.
// A more robust test would mock specific hash results or use more targets.
// For now, we assert that two different UUIDs *can* map to different clients.
// And the same UUID (uuid2) consistently maps.
let client2_a = cookie_hash.entry(uuid2).await;
let client2_b = cookie_hash.entry(uuid2).await;
assert_eq!(client2_a.socket_addr(), client2_b.socket_addr());
assert_eq!(client2_a.socket_addr(), client2_addr);
if workers_static_refs.len() > 1 {
// Only assert inequality if we expect different clients to be possible and were found
if client1_a.socket_addr() != client2_a.socket_addr() {
assert_ne!(client1_a.socket_addr(), client2_a.socket_addr());
} else {
// This might happen if all UUIDs hashed to the same target, or only 1 worker.
// Consider logging a warning if this happens frequently with >1 workers.
println!("Warning: test_load_balancing_strategy did not find two different UUIDs mapping to different targets.");
}
} else {
assert_eq!(client1_a.socket_addr(), client2_a.socket_addr());
}
for worker_config in workers_static_refs.iter() {
worker_config.wait_until_done().await;
}
}
}
| rust | MIT | e11015f009992d1f6399cd5a18bddeaab2b443ec | 2026-01-04T20:17:10.449299Z | false |
ixpantia/faucet | https://github.com/ixpantia/faucet/blob/e11015f009992d1f6399cd5a18bddeaab2b443ec/src/client/load_balancing/ip_hash.rs | src/client/load_balancing/ip_hash.rs | use super::LoadBalancingStrategy;
use super::WorkerConfig;
use crate::client::Client;
use crate::leak;
use std::net::IpAddr;
use std::time::Duration;
struct Targets {
targets: &'static [Client],
}
impl Targets {
fn new(configs: &[&'static WorkerConfig]) -> Self {
let mut targets = Vec::new();
for state in configs {
let client = Client::new(state);
targets.push(client);
}
let targets = leak!(targets);
Targets { targets }
}
}
pub struct IpHash {
targets: Targets,
targets_len: usize,
}
impl IpHash {
pub(crate) async fn new(configs: &[&'static WorkerConfig]) -> Self {
// Start the process of each config
for config in configs {
config.spawn_worker_task().await;
}
Self {
targets_len: configs.as_ref().len(),
targets: Targets::new(configs),
}
}
}
fn calculate_hash(ip: IpAddr) -> u64 {
let mut hash_value = match ip {
IpAddr::V4(ip) => ip.to_bits() as u64,
IpAddr::V6(ip) => ip.to_bits() as u64,
};
hash_value ^= hash_value >> 33;
hash_value = hash_value.wrapping_mul(0xff51afd7ed558ccd);
hash_value ^= hash_value >> 33;
hash_value = hash_value.wrapping_mul(0xc4ceb9fe1a85ec53);
hash_value ^= hash_value >> 33;
hash_value
}
fn hash_to_index(value: IpAddr, length: usize) -> usize {
let hash = calculate_hash(value);
(hash % length as u64) as usize
}
// 50ms is the minimum backoff time for exponential backoff
const BASE_BACKOFF: Duration = Duration::from_millis(50);
fn calculate_exponential_backoff(retries: u32) -> Duration {
BASE_BACKOFF * 2u32.pow(retries)
}
impl LoadBalancingStrategy for IpHash {
type Input = IpAddr;
async fn entry(&self, ip: IpAddr) -> Client {
let mut retries = 0;
let index = hash_to_index(ip, self.targets_len);
let client = self.targets.targets[index].clone();
loop {
if client.is_online() {
break client;
}
let backoff = calculate_exponential_backoff(retries);
log::debug!(
target: "faucet",
"IP {} tried to connect to offline {}, retrying in {:?}",
ip,
client.config.target,
backoff
);
tokio::time::sleep(backoff).await;
retries += 1;
}
}
}
#[cfg(test)]
mod tests {
use std::sync::{atomic::AtomicBool, Arc};
use super::*;
#[test]
fn ip_v4_test_distribution_of_hash_function_len_4() {
const N_IP: usize = 100_000;
// Generate 10_000 ip address and see the
// distribution over diferent lengths
let ips: Vec<IpAddr> = (0..N_IP)
.map(|_| IpAddr::V4(std::net::Ipv4Addr::from_bits(rand::random::<u32>())))
.collect();
// Counts when length == 4
let mut counts = [0; 4];
ips.iter().for_each(|ip| {
let index = hash_to_index(*ip, 4);
counts[index] += 1;
});
let percent_0 = counts[0] as f64 / N_IP as f64;
let percent_1 = counts[1] as f64 / N_IP as f64;
let percent_2 = counts[2] as f64 / N_IP as f64;
let percent_3 = counts[3] as f64 / N_IP as f64;
assert!((0.24..=0.26).contains(&percent_0));
assert!((0.24..=0.26).contains(&percent_1));
assert!((0.24..=0.26).contains(&percent_2));
assert!((0.24..=0.26).contains(&percent_3));
}
#[test]
fn ip_v4_test_distribution_of_hash_function_len_3() {
const N_IP: usize = 100_000;
// Generate 10_000 ip address and see the
// distribution over diferent lengths
let ips: Vec<IpAddr> = (0..N_IP)
.map(|_| IpAddr::V4(std::net::Ipv4Addr::from_bits(rand::random::<u32>())))
.collect();
// Counts when length == 4
let mut counts = [0; 3];
ips.iter().for_each(|ip| {
let index = hash_to_index(*ip, 3);
counts[index] += 1;
});
let percent_0 = counts[0] as f64 / N_IP as f64;
let percent_1 = counts[1] as f64 / N_IP as f64;
let percent_2 = counts[2] as f64 / N_IP as f64;
assert!((0.32..=0.34).contains(&percent_0));
assert!((0.32..=0.34).contains(&percent_1));
assert!((0.32..=0.34).contains(&percent_2));
}
#[test]
fn ip_v4_test_distribution_of_hash_function_len_2() {
const N_IP: usize = 100_000;
// Generate 10_000 ip address and see the
// distribution over diferent lengths
let ips: Vec<IpAddr> = (0..N_IP)
.map(|_| IpAddr::V4(std::net::Ipv4Addr::from_bits(rand::random::<u32>())))
.collect();
// Counts when length == 4
let mut counts = [0; 2];
ips.iter().for_each(|ip| {
let index = hash_to_index(*ip, 2);
counts[index] += 1;
});
let percent_0 = counts[0] as f64 / N_IP as f64;
let percent_1 = counts[1] as f64 / N_IP as f64;
assert!((0.49..=0.51).contains(&percent_0));
assert!((0.49..=0.51).contains(&percent_1));
}
#[test]
fn ip_v6_test_distribution_of_hash_function_len_4() {
const N_IP: usize = 100_000;
// Generate 10_000 ip address and see the
// distribution over diferent lengths
let ips: Vec<IpAddr> = (0..N_IP)
.map(|_| IpAddr::V6(std::net::Ipv6Addr::from_bits(rand::random::<u128>())))
.collect();
// Counts when length == 4
let mut counts = [0; 4];
ips.iter().for_each(|ip| {
let index = hash_to_index(*ip, 4);
counts[index] += 1;
});
let percent_0 = counts[0] as f64 / N_IP as f64;
let percent_1 = counts[1] as f64 / N_IP as f64;
let percent_2 = counts[2] as f64 / N_IP as f64;
let percent_3 = counts[3] as f64 / N_IP as f64;
assert!((0.24..=0.26).contains(&percent_0));
assert!((0.24..=0.26).contains(&percent_1));
assert!((0.24..=0.26).contains(&percent_2));
assert!((0.24..=0.26).contains(&percent_3));
}
#[test]
fn ip_v6_test_distribution_of_hash_function_len_3() {
const N_IP: usize = 100_000;
// Generate 10_000 ip address and see the
// distribution over diferent lengths
let ips: Vec<IpAddr> = (0..N_IP)
.map(|_| IpAddr::V6(std::net::Ipv6Addr::from_bits(rand::random::<u128>())))
.collect();
// Counts when length == 4
let mut counts = [0; 3];
ips.iter().for_each(|ip| {
let index = hash_to_index(*ip, 3);
counts[index] += 1;
});
let percent_0 = counts[0] as f64 / N_IP as f64;
let percent_1 = counts[1] as f64 / N_IP as f64;
let percent_2 = counts[2] as f64 / N_IP as f64;
assert!((0.32..=0.34).contains(&percent_0));
assert!((0.32..=0.34).contains(&percent_1));
assert!((0.32..=0.34).contains(&percent_2));
}
#[test]
fn ip_v6_test_distribution_of_hash_function_len_2() {
const N_IP: usize = 100_000;
// Generate 10_000 ip address and see the
// distribution over diferent lengths
let ips: Vec<IpAddr> = (0..N_IP)
.map(|_| IpAddr::V6(std::net::Ipv6Addr::from_bits(rand::random::<u128>())))
.collect();
// Counts when length == 4
let mut counts = [0; 2];
ips.iter().for_each(|ip| {
let index = hash_to_index(*ip, 2);
counts[index] += 1;
});
let percent_0 = counts[0] as f64 / N_IP as f64;
let percent_1 = counts[1] as f64 / N_IP as f64;
assert!((0.49..=0.51).contains(&percent_0));
assert!((0.49..=0.51).contains(&percent_1));
}
#[test]
fn test_new_targets() {
let worker_state: &'static WorkerConfig = Box::leak(Box::new(WorkerConfig::dummy(
"test",
"127.0.0.1:9999",
true,
)));
let Targets { targets } = Targets::new(&[worker_state]);
assert_eq!(targets.len(), 1);
}
#[tokio::test]
async fn test_new_ip_hash() {
let worker_state: &'static WorkerConfig = Box::leak(Box::new(WorkerConfig::dummy(
"test",
"127.0.0.1:9999",
true,
)));
let IpHash {
targets,
targets_len,
} = IpHash::new(&[worker_state]).await;
assert_eq!(targets.targets.len(), 1);
assert_eq!(targets_len, 1);
worker_state.wait_until_done().await;
}
#[test]
fn test_calculate_exponential_backoff() {
assert_eq!(calculate_exponential_backoff(0), BASE_BACKOFF);
assert_eq!(calculate_exponential_backoff(1), BASE_BACKOFF * 2);
assert_eq!(calculate_exponential_backoff(2), BASE_BACKOFF * 4);
assert_eq!(calculate_exponential_backoff(3), BASE_BACKOFF * 8);
}
#[tokio::test]
async fn test_load_balancing_strategy() {
use crate::client::ExtractSocketAddr;
let worker1: &'static WorkerConfig = Box::leak(Box::new(WorkerConfig::dummy(
"test",
"127.0.0.1:9999",
true,
)));
let worker2: &'static WorkerConfig = Box::leak(Box::new(WorkerConfig::dummy(
"test",
"127.0.0.1:8888",
true,
)));
let workers_static_refs = [worker1, worker2];
let ip_hash = IpHash::new(&workers_static_refs).await;
let client1 = ip_hash.entry("192.168.0.1".parse().unwrap()).await;
let client2 = ip_hash.entry("192.168.0.1".parse().unwrap()).await;
assert_eq!(client1.socket_addr(), client2.socket_addr());
// This IP address should hash to a different index
let client3 = ip_hash.entry("192.168.0.10".parse().unwrap()).await;
let client4 = ip_hash.entry("192.168.0.10".parse().unwrap()).await;
assert_eq!(client3.socket_addr(), client4.socket_addr());
assert_eq!(client1.socket_addr(), client2.socket_addr());
assert_ne!(client1.socket_addr(), client3.socket_addr());
for worker_config in workers_static_refs.iter() {
worker_config.wait_until_done().await;
}
}
#[tokio::test]
async fn test_load_balancing_strategy_offline() {
use crate::client::ExtractSocketAddr;
let online = Arc::new(AtomicBool::new(false));
let worker: &'static WorkerConfig = Box::leak(Box::new(WorkerConfig::dummy(
"test",
"127.0.0.1:9999",
true,
)));
let ip_hash = IpHash::new(&[worker]).await;
tokio::spawn(async move {
tokio::time::sleep(Duration::from_millis(100)).await;
online.store(true, std::sync::atomic::Ordering::SeqCst);
});
let entry = ip_hash.entry("192.168.0.1".parse().unwrap()).await;
assert_eq!(entry.socket_addr(), "127.0.0.1:9999".parse().unwrap());
worker.wait_until_done().await;
}
}
| rust | MIT | e11015f009992d1f6399cd5a18bddeaab2b443ec | 2026-01-04T20:17:10.449299Z | false |
ixpantia/faucet | https://github.com/ixpantia/faucet/blob/e11015f009992d1f6399cd5a18bddeaab2b443ec/src/client/load_balancing/ip_extractor.rs | src/client/load_balancing/ip_extractor.rs | use crate::error::{BadRequestReason, FaucetError, FaucetResult};
use hyper::{http::HeaderValue, Request};
use std::net::IpAddr;
#[derive(Clone, Copy, Debug, serde::Deserialize)]
#[serde(rename = "snake_case")]
pub enum IpExtractor {
ClientAddr,
XForwardedFor,
XRealIp,
}
const MISSING_X_FORWARDED_FOR: FaucetError =
FaucetError::BadRequest(BadRequestReason::MissingHeader("X-Forwarded-For"));
const INVALID_X_FORWARDED_FOR: FaucetError =
FaucetError::BadRequest(BadRequestReason::InvalidHeader("X-Forwarded-For"));
fn extract_ip_from_x_forwarded_for(x_forwarded_for: &HeaderValue) -> FaucetResult<IpAddr> {
let x_forwarded_for = x_forwarded_for
.to_str()
.map_err(|_| MISSING_X_FORWARDED_FOR)?;
let ip_str = x_forwarded_for
.split(',')
.next()
.map(|ip| ip.trim())
.ok_or(INVALID_X_FORWARDED_FOR)?;
ip_str.parse().map_err(|_| INVALID_X_FORWARDED_FOR)
}
const MISSING_X_REAL_IP: FaucetError =
FaucetError::BadRequest(BadRequestReason::MissingHeader("X-Real-IP"));
const INVALID_X_REAL_IP: FaucetError =
FaucetError::BadRequest(BadRequestReason::InvalidHeader("X-Real-IP"));
fn extract_ip_from_x_real_ip(x_real_ip: &HeaderValue) -> FaucetResult<IpAddr> {
let x_real_ip = x_real_ip.to_str().map_err(|_| MISSING_X_REAL_IP)?;
x_real_ip.parse().map_err(|_| INVALID_X_REAL_IP)
}
impl IpExtractor {
pub fn extract<B>(self, req: &Request<B>, client_addr: Option<IpAddr>) -> FaucetResult<IpAddr> {
use IpExtractor::*;
let ip = match self {
ClientAddr => client_addr.expect("Unable to get client address"),
XForwardedFor => match req.headers().get("X-Forwarded-For") {
Some(header) => extract_ip_from_x_forwarded_for(header)?,
None => return Err(MISSING_X_FORWARDED_FOR),
},
XRealIp => match req.headers().get("X-Real-IP") {
Some(header) => extract_ip_from_x_real_ip(header)?,
None => return Err(MISSING_X_REAL_IP),
},
};
Ok(ip)
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn extract_ip_from_x_forwarded_for_ipv4() {
let header_value = HeaderValue::from_static("127.0.0.1");
let ip = extract_ip_from_x_forwarded_for(&header_value).unwrap();
assert_eq!(ip, IpAddr::from([127, 0, 0, 1]));
}
#[test]
fn extract_ip_from_x_forwarded_for_ipv6() {
let header_value = HeaderValue::from_static("::1");
let ip = extract_ip_from_x_forwarded_for(&header_value).unwrap();
assert_eq!(ip, IpAddr::from([0, 0, 0, 0, 0, 0, 0, 1]));
}
#[test]
fn extract_ip_from_x_forwarded_for_multiple() {
let header_value = HeaderValue::from_static("192.168.0.1, 127.0.0.1");
let ip = extract_ip_from_x_forwarded_for(&header_value).unwrap();
assert_eq!(ip, IpAddr::from([192, 168, 0, 1]));
}
#[test]
fn extract_x_real_ip_ipv4_from_request() {
let header_value = HeaderValue::from_static("127.0.0.1");
let request = Request::builder()
.header("X-Real-IP", header_value)
.body(())
.unwrap();
let ip = IpExtractor::XRealIp
.extract(&request, Some(IpAddr::from([0, 0, 0, 0])))
.unwrap();
assert_eq!(ip, IpAddr::from([127, 0, 0, 1]));
}
#[test]
fn extract_x_real_ip_ipv6_from_request() {
let header_value = HeaderValue::from_static("::1");
let request = Request::builder()
.header("X-Real-IP", header_value)
.body(())
.unwrap();
let ip = IpExtractor::XRealIp
.extract(&request, Some(IpAddr::from([0, 0, 0, 0])))
.unwrap();
assert_eq!(ip, IpAddr::from([0, 0, 0, 0, 0, 0, 0, 1]));
}
#[test]
fn extract_x_forwarded_for_ipv4_from_request() {
let header_value = HeaderValue::from_static("127.0.0.1");
let request = Request::builder()
.header("X-Forwarded-For", header_value)
.body(())
.unwrap();
let ip = IpExtractor::XForwardedFor
.extract(&request, Some(IpAddr::from([0, 0, 0, 0])))
.unwrap();
assert_eq!(ip, IpAddr::from([127, 0, 0, 1]));
}
#[test]
fn extract_x_forwarded_for_ipv6_from_request() {
let header_value = HeaderValue::from_static("::1");
let request = Request::builder()
.header("X-Forwarded-For", header_value)
.body(())
.unwrap();
let ip = IpExtractor::XForwardedFor
.extract(&request, Some(IpAddr::from([0, 0, 0, 0])))
.unwrap();
assert_eq!(ip, IpAddr::from([0, 0, 0, 0, 0, 0, 0, 1]));
}
#[test]
fn extract_x_forwarded_for_ipv4_from_request_multiple() {
let header_value = HeaderValue::from_static("192.168.0.1, 127.0.0.1");
let request = Request::builder()
.header("X-Forwarded-For", header_value)
.body(())
.unwrap();
let ip = IpExtractor::XForwardedFor
.extract(&request, Some(IpAddr::from([0, 0, 0, 0])))
.unwrap();
assert_eq!(ip, IpAddr::from([192, 168, 0, 1]));
}
#[test]
fn extract_client_addr_ipv4_from_request() {
let request = Request::builder().body(()).unwrap();
let ip = IpExtractor::ClientAddr
.extract(&request, Some(IpAddr::from([127, 0, 0, 1])))
.unwrap();
assert_eq!(ip, IpAddr::from([127, 0, 0, 1]));
}
#[test]
fn extract_client_addr_ipv6_from_request() {
let request = Request::builder().body(()).unwrap();
let ip = IpExtractor::ClientAddr
.extract(&request, Some(IpAddr::from([0, 0, 0, 0, 0, 0, 0, 1])))
.unwrap();
assert_eq!(ip, IpAddr::from([0, 0, 0, 0, 0, 0, 0, 1]));
}
#[test]
fn extract_client_addr_ipv4_with_x_forwarded_for_from_request() {
let header_value = HeaderValue::from_static("192.168.0.1");
let request = Request::builder()
.header("X-Forwarded-For", header_value)
.body(())
.unwrap();
let ip = IpExtractor::ClientAddr
.extract(&request, Some(IpAddr::from([127, 0, 0, 1])))
.unwrap();
assert_eq!(ip, IpAddr::from([127, 0, 0, 1]));
}
}
| rust | MIT | e11015f009992d1f6399cd5a18bddeaab2b443ec | 2026-01-04T20:17:10.449299Z | false |
ixpantia/faucet | https://github.com/ixpantia/faucet/blob/e11015f009992d1f6399cd5a18bddeaab2b443ec/src/client/load_balancing/mod.rs | src/client/load_balancing/mod.rs | pub mod cookie_hash;
mod ip_extractor;
pub mod ip_hash;
pub mod round_robin;
pub mod rps_autoscale;
use super::worker::WorkerConfig;
use crate::client::Client;
use crate::error::FaucetResult;
use crate::leak;
use cookie_hash::CookieHash;
use hyper::Request;
pub use ip_extractor::IpExtractor;
use std::net::IpAddr;
use std::str::FromStr;
use uuid::Uuid;
use self::ip_hash::IpHash;
use self::round_robin::RoundRobin;
use self::rps_autoscale::RpsAutoscale;
const DEFAULT_MAX_RPS: f64 = 10.0;
trait LoadBalancingStrategy {
type Input;
async fn entry(&self, ip: Self::Input) -> Client;
}
#[derive(Debug, Clone, Copy, clap::ValueEnum, Eq, PartialEq, serde::Deserialize)]
#[serde(rename = "snake_case")]
pub enum Strategy {
#[serde(alias = "round_robin", alias = "RoundRobin", alias = "round-robin")]
RoundRobin,
#[serde(alias = "ip_hash", alias = "IpHash", alias = "ip-hash")]
IpHash,
#[serde(alias = "cookie_hash", alias = "CookieHash", alias = "cookie-hash")]
CookieHash,
#[serde(alias = "rps", alias = "Rps", alias = "rps")]
Rps,
}
impl FromStr for Strategy {
type Err = &'static str;
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s {
"round_robin" => Ok(Self::RoundRobin),
"ip_hash" => Ok(Self::IpHash),
"cookie_hash" => Ok(Self::CookieHash),
"rps" => Ok(Self::Rps),
_ => Err("invalid strategy"),
}
}
}
#[derive(Debug, Clone, Copy)]
enum LBIdent {
Ip(IpAddr),
Uuid(Uuid),
}
#[derive(Copy, Clone)]
enum DynLoadBalancer {
IpHash(&'static ip_hash::IpHash),
RoundRobin(&'static round_robin::RoundRobin),
CookieHash(&'static cookie_hash::CookieHash),
Rps(&'static rps_autoscale::RpsAutoscale),
}
impl LoadBalancingStrategy for DynLoadBalancer {
type Input = LBIdent;
async fn entry(&self, ip: LBIdent) -> Client {
match ip {
LBIdent::Ip(ip) => match self {
DynLoadBalancer::RoundRobin(rr) => rr.entry(ip).await,
DynLoadBalancer::IpHash(ih) => ih.entry(ip).await,
DynLoadBalancer::Rps(rr) => rr.entry(ip).await,
_ => unreachable!(
"This should never happen, ip should never be passed to cookie hash"
),
},
LBIdent::Uuid(uuid) => match self {
DynLoadBalancer::CookieHash(ch) => ch.entry(uuid).await,
_ => unreachable!(
"This should never happen, uuid should never be passed to round robin or ip hash"
),
},
}
}
}
pub(crate) struct LoadBalancer {
strategy: DynLoadBalancer,
extractor: IpExtractor,
}
impl LoadBalancer {
pub async fn new(
strategy: Strategy,
extractor: IpExtractor,
workers: &[&'static WorkerConfig],
max_rps_config: Option<f64>, // New parameter
) -> FaucetResult<Self> {
let strategy: DynLoadBalancer = match strategy {
Strategy::RoundRobin => {
DynLoadBalancer::RoundRobin(leak!(RoundRobin::new(workers).await))
}
Strategy::IpHash => DynLoadBalancer::IpHash(leak!(IpHash::new(workers).await)),
Strategy::CookieHash => {
DynLoadBalancer::CookieHash(leak!(CookieHash::new(workers).await))
}
Strategy::Rps => {
let rps_value = max_rps_config.unwrap_or(DEFAULT_MAX_RPS);
DynLoadBalancer::Rps(leak!(RpsAutoscale::new(workers, rps_value).await))
}
};
Ok(Self {
strategy,
extractor,
})
}
pub fn get_strategy(&self) -> Strategy {
match self.strategy {
DynLoadBalancer::RoundRobin(_) => Strategy::RoundRobin,
DynLoadBalancer::IpHash(_) => Strategy::IpHash,
DynLoadBalancer::CookieHash(_) => Strategy::CookieHash,
DynLoadBalancer::Rps(_) => Strategy::Rps,
}
}
async fn get_client_ip(&self, ip: IpAddr) -> FaucetResult<Client> {
Ok(self.strategy.entry(LBIdent::Ip(ip)).await)
}
async fn get_client_uuid(&self, uuid: Uuid) -> FaucetResult<Client> {
Ok(self.strategy.entry(LBIdent::Uuid(uuid)).await)
}
pub async fn get_client(&self, ip: IpAddr, uuid: Option<Uuid>) -> FaucetResult<Client> {
if let Some(uuid) = uuid {
self.get_client_uuid(uuid).await
} else {
self.get_client_ip(ip).await
}
}
pub fn extract_ip<B>(
&self,
request: &Request<B>,
socket: Option<IpAddr>,
) -> FaucetResult<IpAddr> {
self.extractor.extract(request, socket)
}
}
impl Clone for LoadBalancer {
fn clone(&self) -> Self {
Self {
strategy: self.strategy,
extractor: self.extractor,
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_strategy_from_str() {
assert_eq!(
Strategy::from_str("round_robin").unwrap(),
Strategy::RoundRobin
);
assert_eq!(Strategy::from_str("ip_hash").unwrap(), Strategy::IpHash);
assert!(Strategy::from_str("invalid").is_err());
}
#[tokio::test]
async fn test_load_balancer_new_round_robin() {
let configs = Vec::new();
let _ = LoadBalancer::new(
Strategy::RoundRobin,
IpExtractor::XForwardedFor,
&configs,
None,
)
.await
.expect("failed to create load balancer");
}
#[tokio::test]
async fn test_load_balancer_new_ip_hash() {
let configs = Vec::new();
let _ = LoadBalancer::new(Strategy::IpHash, IpExtractor::XForwardedFor, &configs, None)
.await
.expect("failed to create load balancer");
}
#[tokio::test]
async fn test_load_balancer_extract_ip() {
let configs = Vec::new();
let load_balancer = LoadBalancer::new(
Strategy::RoundRobin,
IpExtractor::XForwardedFor,
&configs,
None,
)
.await
.expect("failed to create load balancer");
let request = Request::builder()
.header("x-forwarded-for", "192.168.0.1")
.body(())
.unwrap();
let ip = load_balancer
.extract_ip(&request, Some("127.0.0.1".parse().unwrap()))
.expect("failed to extract ip");
assert_eq!(ip, "192.168.0.1".parse::<IpAddr>().unwrap());
}
#[tokio::test]
async fn test_load_balancer_get_client() {
use crate::client::ExtractSocketAddr;
let configs: [&'static WorkerConfig; 2] = [
&*Box::leak(Box::new(WorkerConfig::dummy(
"test",
"127.0.0.1:9999",
true,
))),
&*Box::leak(Box::new(WorkerConfig::dummy(
"test",
"127.0.0.1:9998",
true,
))),
];
let load_balancer = LoadBalancer::new(
Strategy::RoundRobin,
IpExtractor::XForwardedFor,
&configs,
None,
)
.await
.expect("failed to create load balancer");
let ip = "192.168.0.1".parse().unwrap();
let client = load_balancer
.get_client_ip(ip)
.await
.expect("failed to get client");
assert_eq!(client.socket_addr(), "127.0.0.1:9999".parse().unwrap());
let client = load_balancer
.get_client_ip(ip)
.await
.expect("failed to get client");
assert_eq!(client.socket_addr(), "127.0.0.1:9998".parse().unwrap());
for config in configs.iter() {
config.wait_until_done().await;
}
}
#[tokio::test]
async fn test_clone_load_balancer() {
let configs = Vec::new();
let load_balancer = LoadBalancer::new(
Strategy::RoundRobin,
IpExtractor::XForwardedFor,
&configs,
None,
)
.await
.expect("failed to create load balancer");
let _ = load_balancer.clone();
}
}
| rust | MIT | e11015f009992d1f6399cd5a18bddeaab2b443ec | 2026-01-04T20:17:10.449299Z | false |
ixpantia/faucet | https://github.com/ixpantia/faucet/blob/e11015f009992d1f6399cd5a18bddeaab2b443ec/src/client/load_balancing/round_robin.rs | src/client/load_balancing/round_robin.rs | use super::LoadBalancingStrategy;
use crate::client::{worker::WorkerConfig, Client};
use std::{net::IpAddr, sync::atomic::AtomicUsize};
struct Targets {
targets: &'static [Client],
index: AtomicUsize,
}
// 500us is the time it takes for the round robin to move to the next target
// in the unlikely event that the target is offline
const WAIT_TIME_UNTIL_RETRY: std::time::Duration = std::time::Duration::from_micros(500);
impl Targets {
fn new(configs: &[&'static WorkerConfig]) -> Self {
let mut targets = Vec::new();
for state in configs {
let client = Client::new(state);
targets.push(client);
}
let targets = Box::leak(targets.into_boxed_slice());
Targets {
targets,
index: AtomicUsize::new(0),
}
}
fn next(&self) -> Client {
let index = self.index.fetch_add(1, std::sync::atomic::Ordering::SeqCst);
self.targets[index % self.targets.len()].clone()
}
}
pub struct RoundRobin {
targets: Targets,
}
impl RoundRobin {
pub(crate) async fn new(configs: &[&'static WorkerConfig]) -> Self {
// Start the process of each config
for config in configs {
config.spawn_worker_task().await;
}
Self {
targets: Targets::new(configs),
}
}
}
impl LoadBalancingStrategy for RoundRobin {
type Input = IpAddr;
async fn entry(&self, _ip: IpAddr) -> Client {
let mut client = self.targets.next();
loop {
if client.is_online() {
break client;
}
tokio::time::sleep(WAIT_TIME_UNTIL_RETRY).await;
client = self.targets.next();
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_new_targets() {
let configs_static_refs: Vec<&'static WorkerConfig> = (0..3)
.map(|i| {
&*Box::leak(Box::new(WorkerConfig::dummy(
"test",
&format!("127.0.0.1:900{i}"),
true,
)))
})
.collect();
let _ = Targets::new(&configs_static_refs);
}
#[tokio::test]
async fn test_new_round_robin() {
let configs_static_refs: Vec<&'static WorkerConfig> = (0..3)
.map(|i| {
&*Box::leak(Box::new(WorkerConfig::dummy(
"test",
&format!("127.0.0.1:900{i}"),
true,
)))
})
.collect();
let _ = RoundRobin::new(&configs_static_refs).await;
for config in configs_static_refs.iter() {
config.wait_until_done().await;
}
}
#[tokio::test]
async fn test_round_robin_entry() {
use crate::client::ExtractSocketAddr;
let original_addrs: Vec<std::net::SocketAddr> = (0..3)
.map(|i| {
format!("127.0.0.1:900{i}")
.parse()
.expect("Failed to parse addr")
})
.collect();
let configs_static_refs: Vec<&'static WorkerConfig> = (0..3)
.map(|i| {
&*Box::leak(Box::new(WorkerConfig::dummy(
"test",
&format!("127.0.0.1:900{i}"),
true,
)))
})
.collect();
let rr = RoundRobin::new(&configs_static_refs).await;
let ip = "0.0.0.0".parse().expect("failed to parse ip");
assert_eq!(rr.entry(ip).await.socket_addr(), original_addrs[0]);
assert_eq!(rr.entry(ip).await.socket_addr(), original_addrs[1]);
assert_eq!(rr.entry(ip).await.socket_addr(), original_addrs[2]);
assert_eq!(rr.entry(ip).await.socket_addr(), original_addrs[0]);
assert_eq!(rr.entry(ip).await.socket_addr(), original_addrs[1]);
assert_eq!(rr.entry(ip).await.socket_addr(), original_addrs[2]);
for config in configs_static_refs.iter() {
config.wait_until_done().await;
}
}
#[tokio::test]
async fn test_round_robin_entry_with_offline_target() {
use crate::client::ExtractSocketAddr;
// Storing the target address for assertion, as the original WorkerConfig array is no longer directly used.
let target_online_addr: std::net::SocketAddr = "127.0.0.1:9002".parse().unwrap();
let configs_static_refs: [&'static WorkerConfig; 3] = [
&*Box::leak(Box::new(WorkerConfig::dummy(
"test",
"127.0.0.1:9000",
false,
))),
&*Box::leak(Box::new(WorkerConfig::dummy(
"test",
"127.0.0.1:9001",
false,
))),
&*Box::leak(Box::new(WorkerConfig::dummy(
"test",
"127.0.0.1:9002",
true,
))),
];
let rr = RoundRobin::new(&configs_static_refs).await;
let ip = "0.0.0.0".parse().expect("failed to parse ip");
assert_eq!(rr.entry(ip).await.socket_addr(), target_online_addr);
for config in configs_static_refs.iter() {
config.wait_until_done().await;
}
}
}
| rust | MIT | e11015f009992d1f6399cd5a18bddeaab2b443ec | 2026-01-04T20:17:10.449299Z | false |
ixpantia/faucet | https://github.com/ixpantia/faucet/blob/e11015f009992d1f6399cd5a18bddeaab2b443ec/src/telemetry/mod.rs | src/telemetry/mod.rs | use std::{path::Path, str::FromStr, sync::OnceLock};
mod pg;
use chrono::Local;
use deadpool_postgres::{Manager, ManagerConfig, Pool, RecyclingMethod};
use tokio::{sync::mpsc::UnboundedSender, task::JoinHandle};
use crate::{
cli::PgSslMode,
error::FaucetResult,
leak,
server::{logging::EventLogData, HttpLogData},
shutdown::ShutdownSignal,
};
#[derive(Clone, Debug)]
pub struct TelemetrySender {
pub sender_http_events: UnboundedSender<(chrono::DateTime<Local>, HttpLogData)>,
pub sender_log_events: UnboundedSender<(chrono::DateTime<Local>, EventLogData)>,
}
impl TelemetrySender {
pub fn send_http_event(&self, data: HttpLogData) {
let timestamp = chrono::Local::now();
let _ = self.sender_http_events.send((timestamp, data));
}
pub fn send_log_event(&self, data: EventLogData) {
let timestamp = chrono::Local::now();
let _ = self.sender_log_events.send((timestamp, data));
}
}
pub struct TelemetryManager {
pub http_events_join_handle: JoinHandle<()>,
pub log_events_join_handle: JoinHandle<()>,
}
static TELEMETRY_SENDER: OnceLock<TelemetrySender> = OnceLock::new();
pub fn send_http_event(http_event: HttpLogData) {
if let Some(sender) = TELEMETRY_SENDER.get() {
sender.send_http_event(http_event);
}
}
pub fn send_log_event(http_event: EventLogData) {
if let Some(sender) = TELEMETRY_SENDER.get() {
sender.send_log_event(http_event);
}
}
impl TelemetryManager {
pub fn start_postgres(
namespace: &str,
version: Option<&str>,
database_url: &str,
sslmode: PgSslMode,
sslcert: Option<&Path>,
shutdown_signal: &'static ShutdownSignal,
) -> FaucetResult<TelemetryManager> {
log::debug!("Connecting to PostgreSQL with params: namespace='{}', version='{:?}', database_url='[REDACTED]'", namespace, version);
let namespace = leak!(namespace) as &'static str;
let version = version.map(|v| leak!(v) as &'static str);
let config = tokio_postgres::Config::from_str(database_url)?;
let mgr_config = ManagerConfig {
recycling_method: RecyclingMethod::Fast,
};
let mgr = Manager::from_config(config, pg::make_tls(sslmode, sslcert), mgr_config);
let pool = Pool::builder(mgr).max_size(10).build()?;
let (
sender_http_events,
sender_log_events,
http_events_join_handle,
log_events_join_handle,
) = handle_http_events(pool.clone(), namespace, version, shutdown_signal);
let sender = TelemetrySender {
sender_http_events,
sender_log_events,
};
TELEMETRY_SENDER
.set(sender)
.expect("Unable to set telemetry sender. This is a bug! Report it!");
Ok(TelemetryManager {
http_events_join_handle,
log_events_join_handle,
})
}
}
fn handle_http_events(
pool: Pool,
namespace: &'static str,
version: Option<&'static str>,
shutdown_signal: &'static ShutdownSignal,
) -> (
UnboundedSender<(chrono::DateTime<Local>, HttpLogData)>,
UnboundedSender<(chrono::DateTime<Local>, EventLogData)>,
JoinHandle<()>,
JoinHandle<()>,
) {
let (http_tx, http_rx) = tokio::sync::mpsc::unbounded_channel::<(_, HttpLogData)>();
let (event_tx, event_rx) = tokio::sync::mpsc::unbounded_channel::<(_, EventLogData)>();
let event_handle =
pg::spawn_events_task(event_rx, pool.clone(), namespace, version, shutdown_signal);
let http_handle =
pg::spawn_http_events_task(http_rx, pool, namespace, version, shutdown_signal);
(http_tx, event_tx, http_handle, event_handle)
}
| rust | MIT | e11015f009992d1f6399cd5a18bddeaab2b443ec | 2026-01-04T20:17:10.449299Z | false |
ixpantia/faucet | https://github.com/ixpantia/faucet/blob/e11015f009992d1f6399cd5a18bddeaab2b443ec/src/telemetry/pg.rs | src/telemetry/pg.rs | use chrono::{DateTime, Local};
use deadpool_postgres::Pool;
use std::path::Path;
use std::{io::Write, pin::pin};
use tokio::{sync::mpsc::UnboundedReceiver, task::JoinHandle};
use crate::server::logging::EventLogData;
use crate::{
cli::PgSslMode,
server::{HttpLogData, LogOption},
shutdown::ShutdownSignal,
};
pub fn make_tls(
sslmode: PgSslMode,
sslcert: Option<&Path>,
) -> tokio_postgres_rustls::MakeRustlsConnect {
let mut root_store = rustls::RootCertStore::empty();
if matches!(sslmode, PgSslMode::VerifyCa | PgSslMode::VerifyFull) {
match sslcert {
Some(cert_path) => {
let mut reader =
std::io::BufReader::new(std::fs::File::open(cert_path).unwrap_or_else(|e| {
panic!("Failed to open certificate file '{:?}': {}", cert_path, e)
}));
for cert in rustls_pemfile::certs(&mut reader).flatten() {
if let Err(e) = root_store.add(cert.clone()) {
log::error!("Failed to add PEM certificate: {}", e);
}
}
}
None => panic!(
"Specified {} but did not provide a certificate path.",
sslmode.as_str()
),
}
}
let config = rustls::ClientConfig::builder()
.with_root_certificates(root_store)
.with_no_client_auth();
tokio_postgres_rustls::MakeRustlsConnect::new(config)
}
type PgType = tokio_postgres::types::Type;
pub fn spawn_http_events_task(
mut http_rx: UnboundedReceiver<(DateTime<Local>, HttpLogData)>,
pool: Pool,
namespace: &'static str,
version: Option<&'static str>,
shutdown_signal: &'static ShutdownSignal,
) -> JoinHandle<()> {
tokio::task::spawn(async move {
let types = &[
PgType::UUID, // UUID
PgType::TEXT, // Namespace
PgType::TEXT, // Version
PgType::TEXT, // Target
PgType::TEXT, // Worker Route
PgType::INT4, // Worker ID
PgType::INET, // IpAddr
PgType::TEXT, // Method
PgType::TEXT, // Path
PgType::TEXT, // Query Params
PgType::TEXT, // HTTP Version
PgType::INT2, // Status
PgType::TEXT, // User Agent
PgType::INT8, // Elapsed
PgType::TIMESTAMPTZ, // TIMESTAMP
];
let mut logs_buffer = Vec::with_capacity(100);
let mut path_buffer = Vec::<u8>::new();
let mut query_buffer = Vec::<u8>::new();
let mut version_buffer = Vec::<u8>::new();
let mut user_agent_buffer = Vec::<u8>::new();
'recv: loop {
tokio::select! {
_ = shutdown_signal.wait() => break 'recv,
received = http_rx.recv_many(&mut logs_buffer, 100) => {
if received == 0 {
break 'recv;
}
let connection = match pool.get().await {
Ok(conn) => conn,
Err(e) => {
log::error!("Unable to acquire postgresql connection: {e}");
continue 'recv;
}
};
let copy_sink_res = connection
.copy_in::<_, bytes::Bytes>(
"COPY faucet_http_events FROM STDIN WITH (FORMAT binary)",
)
.await;
match copy_sink_res {
Ok(copy_sink) => {
let copy_in_writer =
tokio_postgres::binary_copy::BinaryCopyInWriter::new(copy_sink, types);
let mut copy_in_writer = pin!(copy_in_writer);
log::debug!("Writing {} http events to the database", logs_buffer.len());
'write: for (timestamp, log_data) in logs_buffer.drain(..) {
let uuid = &log_data.state_data.uuid;
let target = &log_data.state_data.target;
let worker_id = log_data.state_data.worker_id as i32;
let worker_route = log_data.state_data.worker_route;
let ip = &log_data.state_data.ip;
let method = &log_data.method.as_str();
let _ = write!(path_buffer, "{}", log_data.path.path());
let path = &std::str::from_utf8(&path_buffer).unwrap_or_default();
let _ = write!(
query_buffer,
"{}",
log_data.path.query().unwrap_or_default()
);
let query = &std::str::from_utf8(&query_buffer).unwrap_or_default();
let query = if query.is_empty() { None } else { Some(query) };
let _ = write!(version_buffer, "{:?}", log_data.version);
let http_version =
&std::str::from_utf8(&version_buffer).unwrap_or_default();
let status = &log_data.status;
let user_agent = match &log_data.user_agent {
LogOption::Some(v) => v.to_str().ok(),
LogOption::None => None,
};
let elapsed = &log_data.elapsed;
let copy_result = copy_in_writer
.as_mut()
.write(&[
uuid,
&namespace,
&version,
target,
&worker_route,
&worker_id,
ip,
method,
path,
&query,
http_version,
status,
&user_agent,
elapsed,
×tamp,
])
.await;
path_buffer.clear();
version_buffer.clear();
user_agent_buffer.clear();
query_buffer.clear();
if let Err(e) = copy_result {
log::error!("Error writing to PostgreSQL: {e}");
break 'write;
}
}
let copy_in_finish_res = copy_in_writer.finish().await;
if let Err(e) = copy_in_finish_res {
log::error!("Error writing to PostgreSQL: {e}");
continue 'recv;
}
}
Err(e) => {
log::error!(target: "telemetry", "Error writing to the database: {e}")
}
}
}
}
}
})
}
pub fn spawn_events_task(
mut event_rx: UnboundedReceiver<(chrono::DateTime<Local>, EventLogData)>,
pool: Pool,
namespace: &'static str,
version: Option<&'static str>,
shutdown_signal: &'static ShutdownSignal,
) -> JoinHandle<()> {
tokio::task::spawn(async move {
let types = &[
PgType::TEXT, // Namespace
PgType::TEXT, // Version
PgType::TEXT, // Target
PgType::TIMESTAMPTZ, // Timestamp
PgType::UUID, // Event_Id
PgType::UUID, // Parent_Event_Id
PgType::TEXT, // Level
PgType::TEXT, // Event Type
PgType::TEXT, // Message
PgType::JSONB, // Body
];
let mut logs_buffer = Vec::with_capacity(100);
'recv: loop {
tokio::select! {
_ = shutdown_signal.wait() => break 'recv,
received = event_rx.recv_many(&mut logs_buffer, 100) => {
if received == 0 {
break 'recv;
}
let connection = match pool.get().await {
Ok(conn) => conn,
Err(e) => {
log::error!("Unable to acquire postgresql connection: {e}");
continue 'recv;
}
};
let copy_sink_res = connection
.copy_in::<_, bytes::Bytes>(
"COPY faucet_log_events FROM STDIN WITH (FORMAT binary)",
)
.await;
match copy_sink_res {
Ok(copy_sink) => {
let copy_in_writer =
tokio_postgres::binary_copy::BinaryCopyInWriter::new(copy_sink, types);
let mut copy_in_writer = pin!(copy_in_writer);
log::debug!("Writing {} log events to the database", logs_buffer.len());
'write: for (timestamp, event) in logs_buffer.drain(..) {
let target = &event.target;
let event_id = &event.event_id;
let parent_event_id = &event.parent_event_id;
let event_type = &event.event_type;
let message = &event.message;
let body = &event.body;
let level = &event.level.as_str();
let copy_result = copy_in_writer
.as_mut()
.write(&[
&namespace,
&version,
target,
×tamp,
event_id,
parent_event_id,
level,
event_type,
message,
body,
])
.await;
if let Err(e) = copy_result {
log::error!("Error writing to PostgreSQL: {e}");
break 'write;
}
}
let copy_in_finish_res = copy_in_writer.finish().await;
if let Err(e) = copy_in_finish_res {
log::error!("Error writing to PostgreSQL: {e}");
continue 'recv;
}
}
Err(e) => {
log::error!(target: "telemetry", "Error writing to the database: {e}")
}
}
}
}
}
})
}
| rust | MIT | e11015f009992d1f6399cd5a18bddeaab2b443ec | 2026-01-04T20:17:10.449299Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.