repo stringlengths 6 65 | file_url stringlengths 81 311 | file_path stringlengths 6 227 | content stringlengths 0 32.8k | language stringclasses 1 value | license stringclasses 7 values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 15:31:58 2026-01-04 20:25:31 | truncated bool 2 classes |
|---|---|---|---|---|---|---|---|---|
dbalsom/fluxfox | https://github.com/dbalsom/fluxfox/blob/b4c04b51746e5fe7769f49a1b32b8caad426fc81/src/visualization/data_segmenter.rs | src/visualization/data_segmenter.rs | /*
FluxFox
https://github.com/dbalsom/fluxfox
Copyright 2024-2025 Daniel Balsom
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the “Software”),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
--------------------------------------------------------------------------
*/
/// The [DataSegmenter] struct implements an iterator that yields exactly `divisor`
/// values whose sum is exactly `dividend`. The values are as evenly distributed as
/// possible between `base` and `base + 1`.
pub struct DataSegmenter {
divisor: usize,
base: usize,
remainder_step: usize,
remainder_accum: usize,
count: usize,
}
impl DataSegmenter {
pub fn new(dividend: usize, divisor: usize) -> DataSegmenter {
// Avoid dividing by zero
assert!(divisor > 0, "divisor must be > 0");
let base = dividend / divisor;
let remainder = dividend % divisor;
DataSegmenter {
divisor,
base,
remainder_step: remainder,
remainder_accum: 0,
count: 0,
}
}
}
impl Iterator for DataSegmenter {
type Item = usize;
/// Return the next item in the sequence, or None if we've yielded `divisor` items.
fn next(&mut self) -> Option<Self::Item> {
// If we've already yielded everything, return None
if self.count >= self.divisor {
return None;
}
self.count += 1;
// Accumulate remainder
self.remainder_accum += self.remainder_step;
// If remainder_accum >= divisor, we've crossed a boundary
// that means we add an extra +1 for this element.
if self.remainder_accum >= self.divisor {
self.remainder_accum -= self.divisor;
Some(self.base + 1)
}
else {
Some(self.base)
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn data_segmenter_even_distribution() {
let segmenter = DataSegmenter::new(10, 5);
let result: Vec<usize> = segmenter.collect();
assert_eq!(result, vec![2, 2, 2, 2, 2]);
}
#[test]
fn data_segmenter_uneven_distribution() {
let segmenter = DataSegmenter::new(20, 6);
let result: Vec<usize> = segmenter.collect();
assert_eq!(result, vec![3, 3, 4, 3, 3, 4]);
}
#[test]
fn data_segmenter_single_divisor() {
let segmenter = DataSegmenter::new(10, 1);
let result: Vec<usize> = segmenter.collect();
assert_eq!(result, vec![10]);
}
#[test]
fn data_segmenter_dividend_zero() {
let segmenter = DataSegmenter::new(0, 5);
let result: Vec<usize> = segmenter.collect();
assert_eq!(result, vec![0, 0, 0, 0, 0]);
}
#[test]
fn data_segmenter_divisor_greater_than_dividend() {
let segmenter = DataSegmenter::new(3, 5);
let result: Vec<usize> = segmenter.collect();
assert_eq!(result, vec![0, 1, 0, 1, 1]);
}
}
| rust | MIT | b4c04b51746e5fe7769f49a1b32b8caad426fc81 | 2026-01-04T20:24:04.021295Z | false |
dbalsom/fluxfox | https://github.com/dbalsom/fluxfox/blob/b4c04b51746e5fe7769f49a1b32b8caad426fc81/src/visualization/vectorize_disk.rs | src/visualization/vectorize_disk.rs | /*
FluxFox
https://github.com/dbalsom/fluxfox
Copyright 2024-2025 Daniel Balsom
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the “Software”),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
--------------------------------------------------------------------------
*/
//! Methods for emitting lists of VizSector objects from the elements of a
//! track.
use crate::{
track_schema::GenericTrackElement,
types::DiskCh,
visualization::{
collect_metadata,
collect_streams,
data_segmenter::DataSegmenter,
metadata,
stream,
types::{
display_list::{VizDataSliceDisplayList, *},
shapes::{
VizArc,
VizCircle,
VizDataSlice,
VizElement,
VizElementFlags,
VizElementInfo,
VizPoint2d,
VizQuadraticArc,
VizSector,
VizShape,
},
},
CommonVizParams,
DiskHitTestResult,
RenderDiskHitTestParams,
RenderDiskSelectionParams,
RenderGeometry,
RenderTrackDataParams,
RenderTrackMetadataParams,
RenderVectorizationParams,
RenderWinding,
TurningDirection,
},
DiskImage,
DiskVisualizationError,
MAX_CYLINDER,
};
use std::{
cmp::min,
f32::consts::{PI, TAU},
ops::Range,
};
pub struct CalcElementParams {
pub center: VizPoint2d<f32>,
pub start_angle: f32,
pub end_angle: f32,
pub inner_radius: f32,
pub outer_radius: f32,
pub ch: DiskCh,
pub color: u32,
pub flags: VizElementFlags,
pub element: Option<VizElementInfo>,
}
/// Create a [VizElementDisplayList] collection for a single side of a [DiskImage].
/// # Arguments:
/// - `disk_image`: The [DiskImage] to render.
/// - `p`: A reference to a [CommonVizParams] object containing the parameters common to all
/// visualization functions.
/// - `r`: A reference to a [RenderTrackMetadataParams] object containing the parameters for
/// rendering the disk.
#[deprecated(
since = "0.1.0",
note = "Can generate un-renderable geometry. Please use `vectorize_disk_elements_by_quadrants` instead"
)]
pub fn vectorize_disk_elements(
disk_image: &DiskImage,
p: &CommonVizParams,
r: &RenderTrackMetadataParams,
) -> Result<VizElementDisplayList, DiskVisualizationError> {
let r_tracks = collect_streams(r.side, disk_image);
let r_metadata = collect_metadata(r.side, disk_image);
let num_tracks = min(r_tracks.len(), p.track_limit.unwrap_or(MAX_CYLINDER));
if num_tracks == 0 {
return Err(DiskVisualizationError::NoTracks);
}
log::debug!("visualize_disk_elements(): Rendering {} tracks", num_tracks);
let mut display_list = VizElementDisplayList::new(p.direction, r.side, num_tracks as u16);
// Maximum size of a metadata item that can overlap the index without being excluded
// from rendering. Large sectors (8192 bytes) will fill the entire disk surface, so are not
// particularly useful to render.
let overlap_max = (1024 + 6) * 16;
let outer_radius = p.radius.unwrap_or(0.5);
let mut min_radius = p.min_radius_ratio * outer_radius; // Scale min_radius to pixel value
// If pinning has been specified, adjust the minimum radius.
// We subtract any over-dumped tracks from the radius, so that the minimum radius fraction
// is consistent with the last standard track.
min_radius = if p.pin_last_standard_track {
let normalized_track_ct = match num_tracks {
0..50 => 40,
50..90 => 80,
90.. => 160,
};
let track_width = (outer_radius - min_radius) / normalized_track_ct as f32;
let overdump = num_tracks.saturating_sub(normalized_track_ct);
p.min_radius_ratio * outer_radius - (overdump as f32 * track_width)
}
else {
min_radius
};
// Calculate the rendered width of each track, excluding the track gap.
let track_width = (outer_radius - min_radius) / num_tracks as f32;
let center = VizPoint2d::from((outer_radius, outer_radius));
let (clip_start, clip_end) = match p.direction {
TurningDirection::Clockwise => (0.0, TAU),
TurningDirection::CounterClockwise => (0.0, TAU),
};
// We loop twice, once drawing all non-element markers, then drawing marker elements.
// The reason for this is that markers are small and may be overwritten by overlapping sector
// data elements. This guarantees that markers are emitted last, and thus rendered on top of
// all other elements.
for draw_markers in [false, true].iter() {
for (ti, track_meta) in r_metadata.iter().enumerate() {
let mut has_elements = false;
let outer_radius = outer_radius - (ti as f32 * track_width);
let inner_radius = outer_radius - (track_width * (1.0 - p.track_gap));
// Look for non-marker elements crossing the index, and emit them first.
// These elements will be clipped at the index boundary, so we will have at least two
// display list entries for each element that crosses the index.
if !r.draw_sector_lookup && !*draw_markers {
for meta_item in track_meta.items.iter() {
if meta_item.end >= r_tracks[ti].len() {
let meta_length = meta_item.end - meta_item.start;
let overlap_long = meta_length > overlap_max;
log::trace!(
"vectorize_disk_elements(): Overlapping metadata item at {}-{} len: {} max: {} long: {}",
meta_item.start,
meta_item.end,
meta_length,
overlap_max,
overlap_long,
);
has_elements = true;
let mut start_angle;
let mut end_angle;
if overlap_long {
start_angle = p.index_angle;
end_angle = p.index_angle
+ ((((meta_item.start + overlap_max) % r_tracks[ti].len()) as f32
/ r_tracks[ti].len() as f32)
* TAU);
}
else {
start_angle = p.index_angle;
end_angle = p.index_angle + ((meta_item.end as f32 / r_tracks[ti].len() as f32) * TAU);
}
if start_angle > end_angle {
std::mem::swap(&mut start_angle, &mut end_angle);
}
(start_angle, end_angle) = match p.direction {
TurningDirection::Clockwise => (start_angle, end_angle),
TurningDirection::CounterClockwise => (TAU - start_angle, TAU - end_angle),
};
if start_angle > end_angle {
std::mem::swap(&mut start_angle, &mut end_angle);
}
// Skip sectors that are outside the current quadrant
if end_angle <= clip_start || start_angle >= clip_end {
continue;
}
// Clamp start and end angle to quadrant boundaries
if start_angle < clip_start {
start_angle = clip_start;
}
if end_angle > clip_end {
end_angle = clip_end;
}
let overlap_sector = VizSector::from_angles(
&VizPoint2d::new(center.x, center.y),
r.winding,
start_angle,
end_angle,
inner_radius,
outer_radius,
);
let mut flags = VizElementFlags::default();
flags.set(VizElementFlags::OVERLAP_LONG, overlap_long);
let generic_element = GenericTrackElement::from(meta_item.element);
let element_info = VizElementInfo::new(
generic_element,
DiskCh::new(ti as u16, r.side),
meta_item.chsn,
None,
None,
None,
);
let overlap_metadata = VizElement::new(overlap_sector, flags, element_info);
display_list.push(ti, overlap_metadata);
}
}
}
let mut phys_s: u8 = 0; // Physical sector index, 0-indexed from first sector on track
log::debug!("vectorize_disk_elements(): Rendering elements on track {}", ti);
// Draw non-overlapping metadata.
for (_mi, meta_item) in track_meta.items.iter().enumerate() {
log::debug!("vectorize_disk_elements(): Rendering element at {}", _mi);
let generic_element = GenericTrackElement::from(meta_item.element);
match generic_element {
GenericTrackElement::Marker { .. } if !*draw_markers => {
continue;
}
GenericTrackElement::Marker { .. } => {}
_ if *draw_markers => {
continue;
}
_ => {}
}
// Advance physical sector number for each sector header encountered.
if meta_item.element.is_sector_header() {
phys_s = phys_s.wrapping_add(1);
}
has_elements = true;
let mut start_angle = ((meta_item.start as f32 / r_tracks[ti].len() as f32) * TAU) + p.index_angle;
let mut end_angle = ((meta_item.end as f32 / r_tracks[ti].len() as f32) * TAU) + p.index_angle;
if start_angle > end_angle {
std::mem::swap(&mut start_angle, &mut end_angle);
}
// Invert the angles for clockwise rotation
(start_angle, end_angle) = match p.direction {
TurningDirection::Clockwise => (start_angle, end_angle),
TurningDirection::CounterClockwise => (TAU - start_angle, TAU - end_angle),
};
// Normalize the angle to the range 0..2π
// start_angle = (start_angle % TAU).abs();
// end_angle = (end_angle % TAU).abs();
// Exchange start and end if reversed
if start_angle > end_angle {
std::mem::swap(&mut start_angle, &mut end_angle);
}
// // Skip sectors that are outside the current quadrant
// if end_angle <= clip_start || start_angle >= clip_end {
// continue;
// }
// Clip the elements to one revolution
if start_angle < clip_start {
start_angle = clip_start;
}
if end_angle > clip_end {
end_angle = clip_end;
}
let element_sector = VizSector::from_angles(
&VizPoint2d::new(center.x, center.y),
r.winding,
start_angle,
end_angle,
inner_radius,
outer_radius,
);
let element_flags = VizElementFlags::default();
let element_info =
VizElementInfo::new(generic_element, DiskCh::new(ti as u16, r.side), None, None, None, None);
let element_metadata = VizElement::new(element_sector, element_flags, element_info);
log::debug!("vectorize_disk_elements(): Pushing element to display list");
display_list.push(ti, element_metadata);
}
// If a track contained no elements and 'draw_empty_tracks' is set, emit a `NullElement`
// that fills the entire track.
if !has_elements && r.draw_empty_tracks {
let element_sector = VizSector::from_angles(
&VizPoint2d::new(center.x, center.y),
r.winding,
clip_start,
clip_end,
inner_radius,
outer_radius,
);
let mut element_flags = VizElementFlags::default();
element_flags.set(VizElementFlags::TRACK, true);
let element_info = VizElementInfo::new(
GenericTrackElement::NullElement,
DiskCh::new(ti as u16, r.side),
None,
None,
None,
None,
);
let element_metadata = VizElement::new(element_sector, element_flags, element_info);
display_list.push(ti, element_metadata);
}
}
}
Ok(display_list)
}
/// Create a [VizElementDisplayList] collection for a single side of a [DiskImage], splitting visual
/// elements into quadrants. This is useful when rendering a display list with a graphics library
/// that does not handle rendering major arcs gracefully. This function will render the elements
/// for the specified quadrant, or all quadrants if none are specified.
///
/// Quadrants of the circle are defined by the unit circle as:
/// `/1 0\`
/// `\2 3/`
/// Note that the order is reversed in Clockwise turning direction.
///
/// Quadrants are rendered counter-clockwise, starting from the top right quadrant (0). The order
/// of quadrant rendering is independent of the data turning direction.
///
/// # Arguments:
/// - `disk_image`: The [DiskImage] to render.
/// - `p`: A reference to a [CommonVizParams] object containing the parameters common to all
/// visualization functions.
/// - `r`: A reference to a [RenderTrackMetadataParams] object containing the parameters specific
/// to rendering metadata elements. The `quadrant` parameter specifies which quadrant to
/// render, or all quadrants if `None`.
///
/// # Returns:
/// A [VizElementDisplayList] containing the elements to render, or a [DiskVisualizationError] if
/// an error occurred, such as no tracks being found.
pub fn vectorize_disk_elements_by_quadrants(
disk: &DiskImage,
p: &CommonVizParams,
r: &RenderTrackMetadataParams,
) -> Result<VizElementDisplayList, DiskVisualizationError> {
// Render the specified quadrant if provided, otherwise render all quadrants.
let quadrant_list = r.quadrant.map(|q| vec![q]).unwrap_or(vec![0, 1, 2, 3]);
// Maximum size of a metadata item that can overlap the index without being excluded
// from rendering. Large sectors (8192 bytes) will fill the entire disk surface, so are not
// particularly useful to render.
let overlap_max = (1024 + 6) * 16;
// Collect streams.
let r_tracks = collect_streams(r.side, disk);
let r_metadata = collect_metadata(r.side, disk);
if r_tracks.len() != r_metadata.len() {
return Err(DiskVisualizationError::InvalidParameter(
"Mismatched track and metadata lengths".to_string(),
));
}
let tp = p.track_params(disk.track_ct(r.side as usize))?;
let num_tracks = min(r_tracks.len(), p.track_limit.unwrap_or(MAX_CYLINDER));
if num_tracks == 0 {
return Err(DiskVisualizationError::NoTracks);
}
let mut display_list = VizElementDisplayList::new(p.direction, r.side, num_tracks as u16);
log::debug!(
"vectorize_disk_elements_by_quadrants(): Rendering {} tracks over quadrants: {:?}",
num_tracks,
quadrant_list
);
// Loop through each track and the track element metadata for each track.
for (ti, track_meta) in r_metadata.iter().enumerate() {
let (outer, middle, inner) = tp.radii(ti, true);
// Loop through each quadrant and render the elements for that quadrant.
for quadrant in &quadrant_list {
// Set the appropriate clipping angles for the current quadrant.
let quadrant_angles = match quadrant & 0x03 {
0 => (0.0, PI / 2.0),
1 => (PI / 2.0, PI),
2 => (PI, 3.0 * PI / 2.0),
3 => (3.0 * PI / 2.0, TAU),
_ => unreachable!(),
};
let (clip_start, clip_end) = (quadrant_angles.0, quadrant_angles.1);
// Emit a NullElement arc for this quadrant to represent the track background.
// Note: must be a cubic arc due to 90-degree angle.
let track_quadrant_arc = VizArc::from_angles(&tp.center, middle, clip_start, clip_end);
let mut track_quadrant_flags = VizElementFlags::default();
track_quadrant_flags.set(VizElementFlags::TRACK, true);
// If no elements on this track, also set the empty track flag
if track_meta.items.is_empty() {
track_quadrant_flags.set(VizElementFlags::EMPTY_TRACK, true);
}
let element_info = VizElementInfo::new(
GenericTrackElement::NullElement,
DiskCh::new(ti as u16, r.side),
None,
None,
None,
None,
);
let element_metadata = VizElement::new(
(track_quadrant_arc, tp.render_track_width),
track_quadrant_flags,
element_info,
);
display_list.push(ti, element_metadata);
// Look for non-marker elements crossing the index, and emit them first.
// These elements will always be drawn in quadrant 0, clipped at the index
// boundary, so we will have at least two display list entries for each element
// that crosses the index.
if *quadrant == 0 && !r.draw_sector_lookup {
for meta_item in track_meta.items.iter() {
let generic_element = GenericTrackElement::from(meta_item.element);
if matches!(generic_element, GenericTrackElement::Marker) {
// Skip markers.
continue;
}
if meta_item.end >= r_tracks[ti].len() {
let meta_length = meta_item.end - meta_item.start;
let meta_overlap = meta_item.end % r_tracks[ti].len();
let overlap_long = meta_length > overlap_max;
log::trace!(
"vectorize_disk_elements_by_quadrants(): Overlapping metadata item at {}-{} len: {} max: {} long: {}",
meta_item.start,
meta_item.end,
meta_length,
overlap_max,
overlap_long,
);
let mut start_angle;
let mut end_angle;
if overlap_long {
start_angle = p.index_angle;
end_angle = p.index_angle
+ ((((meta_item.start + overlap_max) % r_tracks[ti].len()) as f32
/ r_tracks[ti].len() as f32)
* TAU);
}
else {
// The start angle is the index angle.
start_angle = p.index_angle;
end_angle = p.index_angle + ((meta_overlap as f32 / r_tracks[ti].len() as f32) * TAU);
}
if start_angle > end_angle {
std::mem::swap(&mut start_angle, &mut end_angle);
}
(start_angle, end_angle) = match p.direction {
TurningDirection::Clockwise => (start_angle, end_angle),
TurningDirection::CounterClockwise => (TAU - start_angle, TAU - end_angle),
};
if start_angle > end_angle {
std::mem::swap(&mut start_angle, &mut end_angle);
}
// Skip sectors that are outside the current quadrant
if end_angle <= clip_start || start_angle >= clip_end {
continue;
}
// Clamp start and end angle to quadrant boundaries
if start_angle < clip_start {
start_angle = clip_start;
}
if end_angle > clip_end {
end_angle = clip_end;
}
match r.geometry {
RenderGeometry::Sector => {
let overlap_sector =
VizSector::from_angles(&tp.center, r.winding, start_angle, end_angle, inner, outer);
let mut flags = VizElementFlags::default();
flags.set(VizElementFlags::OVERLAP_LONG, overlap_long);
let generic_element = GenericTrackElement::from(meta_item.element);
let element_info = VizElementInfo::new(
generic_element,
DiskCh::new(ti as u16, r.side),
meta_item.chsn,
None,
None,
None,
);
let overlap_metadata = VizElement::new(overlap_sector, flags, element_info);
display_list.push(ti, overlap_metadata);
}
RenderGeometry::Arc => {
let overlap_arc = VizArc::from_angles(&tp.center, middle, start_angle, end_angle);
let mut flags = VizElementFlags::default();
flags.set(VizElementFlags::OVERLAP_LONG, overlap_long);
let generic_element = GenericTrackElement::from(meta_item.element);
let element_info = VizElementInfo::new(
generic_element,
DiskCh::new(ti as u16, r.side),
meta_item.chsn,
None,
None,
None,
);
let overlap_metadata =
VizElement::new((overlap_arc, tp.render_track_width), flags, element_info);
display_list.push(ti, overlap_metadata);
}
}
}
}
}
// We loop through each track quadrant twice, once drawing all non-element markers, then
// drawing marker elements.
// The reason for this is that markers are small and may be overwritten by overlapping
// sector data elements. This guarantees that markers are emitted last, and thus
// rendered on top of all other elements.
for draw_markers in [false, true].iter() {
let mut phys_s: u8 = 0; // Physical sector index, 0-indexed from first sector on track
// Draw non-overlapping metadata.
for (_mi, meta_item) in track_meta.items.iter().enumerate() {
let generic_element = GenericTrackElement::from(meta_item.element);
match generic_element {
GenericTrackElement::Marker if !*draw_markers => {
continue;
}
GenericTrackElement::Marker => {}
_ if *draw_markers => {
continue;
}
_ => {}
}
// Advance physical sector number for each sector header encountered.
if meta_item.element.is_sector_header() {
phys_s = phys_s.wrapping_add(1);
}
let mut start_angle = ((meta_item.start as f32 / r_tracks[ti].len() as f32) * TAU) + p.index_angle;
let mut end_angle = ((meta_item.end as f32 / r_tracks[ti].len() as f32) * TAU) + p.index_angle;
if start_angle > end_angle {
std::mem::swap(&mut start_angle, &mut end_angle);
}
// Invert the angles for clockwise rotation
(start_angle, end_angle) = match p.direction {
TurningDirection::Clockwise => (start_angle, end_angle),
TurningDirection::CounterClockwise => (TAU - start_angle, TAU - end_angle),
};
// Normalize the angle to the range 0..2π
// start_angle = (start_angle % TAU).abs();
// end_angle = (end_angle % TAU).abs();
// Exchange start and end if reversed
if start_angle > end_angle {
std::mem::swap(&mut start_angle, &mut end_angle);
}
// Skip sectors that are outside the current quadrant
if end_angle <= clip_start || start_angle >= clip_end {
continue;
}
// Clamp start and end angle to quadrant boundaries
start_angle = start_angle.max(clip_start);
end_angle = end_angle.min(clip_end);
match r.geometry {
RenderGeometry::Sector => {
let element_sector =
VizSector::from_angles(&tp.center, r.winding, start_angle, end_angle, inner, outer);
let flags = VizElementFlags::default();
let generic_element = GenericTrackElement::from(meta_item.element);
let element_info = VizElementInfo::new(
generic_element,
DiskCh::new(ti as u16, r.side),
meta_item.chsn,
None,
None,
None,
);
let element_metadata = VizElement::new(element_sector, flags, element_info);
display_list.push(ti, element_metadata);
}
RenderGeometry::Arc => {
let overlap_arc = VizArc::from_angles(&tp.center, middle, start_angle, end_angle);
let flags = VizElementFlags::default();
let generic_element = GenericTrackElement::from(meta_item.element);
let element_info = VizElementInfo::new(
generic_element,
DiskCh::new(ti as u16, r.side),
meta_item.chsn,
None,
None,
None,
);
let overlap_metadata =
VizElement::new((overlap_arc, tp.render_track_width), flags, element_info);
display_list.push(ti, overlap_metadata);
}
}
}
}
}
}
Ok(display_list)
}
/// Return a [VizElementDisplayList] representing a selection on a disk image.
/// The selection will be divided into multiple display elements if it exceeds 90 degrees.
/// # Arguments:
/// - `disk_image`: The [DiskImage] to render.
/// - `p`: A reference to a [CommonVizParams] object containing the parameters common to all
/// visualization functions.
/// - `r`: A reference to a [RenderDiskSelectionParams] object containing the parameters for
/// rendering the disk selection.
pub fn vectorize_disk_selection(
disk_image: &DiskImage,
p: &CommonVizParams,
r: &RenderDiskSelectionParams,
) -> Result<VizElementDisplayList, DiskVisualizationError> {
let track = stream(r.ch, disk_image);
let track_len = track.len();
let r_metadata = metadata(r.ch, disk_image);
let track_limit = p.track_limit.unwrap_or(MAX_CYLINDER);
let num_tracks = min(disk_image.tracks(r.ch.h()) as usize, track_limit);
if num_tracks == 0 {
return Err(DiskVisualizationError::NoTracks);
}
if r.ch.c() >= num_tracks as u16 {
return Err(DiskVisualizationError::InvalidParameter(
"Invalid track number".to_string(),
));
}
let mut display_list = VizElementDisplayList::new(p.direction, r.ch.h(), num_tracks as u16);
// If no radius was specified, default to 0.5 - this creates a display list that is in the
// range [(0,0)-(1,1)], suitable for transformations as desired by the output rasterizer.
let total_radius = p.radius.unwrap_or(0.5);
let mut min_radius = p.min_radius_ratio * total_radius; // Scale min_radius to pixel value
// If pinning has been specified, adjust the minimum radius.
// We subtract any over-dumped tracks from the radius, so that the minimum radius fraction
// is consistent with the last standard track.
min_radius = if p.pin_last_standard_track {
let normalized_track_ct = match num_tracks {
0..50 => 40,
50..90 => 80,
90.. => 160,
};
let track_width = (total_radius - min_radius) / normalized_track_ct as f32;
let overdump = num_tracks.saturating_sub(normalized_track_ct);
p.min_radius_ratio * total_radius - (overdump as f32 * track_width)
}
else {
min_radius
};
let track_width = (total_radius - min_radius) / num_tracks as f32;
let center = VizPoint2d::from((total_radius, total_radius));
let (clip_start, clip_end) = match p.direction {
TurningDirection::Clockwise => (0.0, TAU),
TurningDirection::CounterClockwise => (0.0, TAU),
};
for draw_markers in [false, true].iter() {
let ti = r.ch.c() as usize;
let track_meta = r_metadata;
let outer_radius = total_radius - (ti as f32 * track_width);
let inner_radius = outer_radius - (track_width * (1.0 - p.track_gap));
let mut phys_s: u8 = 0; // Physical sector index, 0-indexed from first sector on track
| rust | MIT | b4c04b51746e5fe7769f49a1b32b8caad426fc81 | 2026-01-04T20:24:04.021295Z | true |
dbalsom/fluxfox | https://github.com/dbalsom/fluxfox/blob/b4c04b51746e5fe7769f49a1b32b8caad426fc81/src/visualization/types/display_list.rs | src/visualization/types/display_list.rs | /*
FluxFox
https://github.com/dbalsom/fluxfox
Copyright 2024-2025 Daniel Balsom
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the “Software”),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
--------------------------------------------------------------------------
*/
//! A [VizElementDisplayList] is a list of [VizElement] objects to be rendered.
//! Operations can be implemented on this list, such as scaling and rotation.
use crate::visualization::{
types::shapes::{VizDataSlice, VizElement},
TurningDirection,
};
/// A [VizElementDisplayList] is a list of [VizElement] objects to be rendered.
/// Operations can be implemented on this list, such as scaling and rotation.
#[derive(Clone)]
pub struct VizElementDisplayList {
pub turning: TurningDirection,
pub side: u8,
pub tracks: Vec<Vec<VizElement>>,
}
impl VizElementDisplayList {
pub fn new(turning: TurningDirection, side: u8, cylinders: u16) -> VizElementDisplayList {
VizElementDisplayList {
turning,
side,
tracks: vec![Vec::new(); cylinders as usize],
}
}
/// Push a [VizElement] onto the display list at the specified track.
/// If the track does not exist, nothing will happen.
pub fn push(&mut self, c: usize, element: VizElement) {
if c < self.tracks.len() {
self.tracks[c].push(element);
}
}
/// Return the total number of [VizElement]s in the display list.
pub fn len(&self) -> usize {
let mut total = 0;
for track in &self.tracks {
log::debug!("track.len() = {}", track.len());
total += track.len()
}
total
}
/// Rotate all items in the display list by the specified `angle` in radians.
/// ## Warning: This is a lossy operation. Multiple rotations will accumulate errors.
/// This feature is mostly designed for debugging and testing.
/// To properly rotate a visualization you should use a transformation matrix in your rendering
/// engine. See the `imgviz` example crate for an example of how to do this with `svg` and
/// `tiny_skia`, or the `ff_egui_lib` crate for an example of how to do this with `egui`.
// pub fn rotate(&mut self, angle: f32) {
// for track in &mut self.tracks {
// for element in track {
// element.rotate(angle);
// }
// }
// }
/// Return an Iterator that yields all the [VizElement]s in the display list,
/// in order, by track.
pub fn iter(&self) -> VizDisplayListIter {
let mut outer = self.tracks.iter();
// Initialize inner iterator with the first track
let inner = outer.next().map(|v| v.iter());
VizDisplayListIter { outer, inner }
}
/// Return a slice of the items in the display list at the specified track.
pub fn items(&self, c: usize) -> Option<&[VizElement]> {
self.tracks.get(c).map(|v| v.as_slice())
}
}
// Iterator struct
pub struct VizDisplayListIter<'a> {
outer: std::slice::Iter<'a, Vec<VizElement>>,
inner: Option<std::slice::Iter<'a, VizElement>>,
}
impl<'a> Iterator for VizDisplayListIter<'a> {
type Item = &'a VizElement;
fn next(&mut self) -> Option<Self::Item> {
loop {
if let Some(inner) = &mut self.inner {
if let Some(next_item) = inner.next() {
return Some(next_item);
}
}
// Move to the next outer track if the current inner is exhausted
self.inner = self.outer.next().map(|v| v.iter());
// If there are no more tracks, break out
if self.inner.is_none() {
return None;
}
}
}
}
/// A [VizDataSliceDisplayList] is a list of [VizDataSlice] objects to be rendered.
/// Operations can be implemented on this list, such as scaling and rotation.
pub struct VizDataSliceDisplayList {
pub min_density: f32,
pub max_density: f32,
pub track_width: f32,
pub turning: TurningDirection,
pub tracks: Vec<Vec<VizDataSlice>>,
}
// Iterator struct
pub struct VizDataDisplayListIter<'a> {
outer: std::slice::Iter<'a, Vec<VizDataSlice>>,
inner: Option<std::slice::Iter<'a, VizDataSlice>>,
}
impl<'a> Iterator for VizDataDisplayListIter<'a> {
type Item = &'a VizDataSlice;
fn next(&mut self) -> Option<Self::Item> {
loop {
if let Some(inner) = &mut self.inner {
if let Some(next_item) = inner.next() {
return Some(next_item);
}
}
// Move to the next outer track if the current inner is exhausted
self.inner = self.outer.next().map(|v| v.iter());
// If there are no more tracks, break out
if self.inner.is_none() {
return None;
}
}
}
}
impl VizDataSliceDisplayList {
pub fn new(turning: TurningDirection, cylinders: usize, track_width: f32) -> VizDataSliceDisplayList {
VizDataSliceDisplayList {
min_density: 0.0,
max_density: 1.0,
track_width,
turning,
tracks: vec![Vec::new(); cylinders],
}
}
pub fn set_track_width(&mut self, track_width: f32) {
self.track_width = track_width;
}
pub fn push(&mut self, c: usize, element: VizDataSlice) {
if c < self.tracks.len() {
self.tracks[c].push(element);
}
}
pub fn len(&self) -> usize {
let mut total = 0;
for track in &self.tracks {
total += track.len()
}
total
}
/// Rotate all items in the display list by the specified `angle` in radians.
/// ## Warning: This is a lossy operation. Multiple rotations will accumulate errors.
/// This feature is mostly designed for debugging and testing.
/// To properly rotate a visualization you should use a transformation matrix in your rendering engine.
// pub fn rotate(&mut self, angle: f32) {
// for track in &mut self.tracks {
// for element in track {
// element.rotate(angle);
// }
// }
// }
/// Produce an iterator that yields all the [VizDataSlice]s in the display list,
pub fn iter(&self) -> VizDataDisplayListIter {
let mut outer = self.tracks.iter();
// Initialize inner iterator with the first track
let inner = outer.next().map(|v| v.iter());
VizDataDisplayListIter { outer, inner }
}
}
| rust | MIT | b4c04b51746e5fe7769f49a1b32b8caad426fc81 | 2026-01-04T20:24:04.021295Z | false |
dbalsom/fluxfox | https://github.com/dbalsom/fluxfox/blob/b4c04b51746e5fe7769f49a1b32b8caad426fc81/src/visualization/types/mod.rs | src/visualization/types/mod.rs | /*
FluxFox
https://github.com/dbalsom/fluxfox
Copyright 2024-2025 Daniel Balsom
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the “Software”),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
--------------------------------------------------------------------------
*/
pub mod blend;
pub mod color;
pub mod display_list;
pub mod pixmap;
pub mod shapes;
| rust | MIT | b4c04b51746e5fe7769f49a1b32b8caad426fc81 | 2026-01-04T20:24:04.021295Z | false |
dbalsom/fluxfox | https://github.com/dbalsom/fluxfox/blob/b4c04b51746e5fe7769f49a1b32b8caad426fc81/src/visualization/types/pixmap.rs | src/visualization/types/pixmap.rs | /*
FluxFox
https://github.com/dbalsom/fluxfox
Copyright 2024-2025 Daniel Balsom
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the “Software”),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
--------------------------------------------------------------------------
*/
//! Defines a generic VizPixmap that is intended to be generally compatible
//! with tiny_skia's Pixmap without requiring a dependency on tiny_skia.
//! A [VizPixmap] represents a 32-bit RGBA image buffer with a width, height,
//! and u8 pixel buffer.
use crate::visualization::types::color::VizColor;
use bytemuck::cast_slice;
pub struct VizPixmap {
pub width: u32,
pub height: u32,
pub pixel_data: Vec<u8>,
}
impl VizPixmap {
pub fn new(width: u32, height: u32) -> Self {
let pixel_data = vec![0; (width * height) as usize * 4];
Self {
width,
height,
pixel_data,
}
}
pub fn pixel_data(&self) -> &[u8] {
&self.pixel_data
}
pub fn pixel_data_mut(&mut self) -> &mut [u8] {
&mut self.pixel_data
}
pub fn as_vizcolor(&self) -> &[VizColor] {
cast_slice(self.pixel_data())
}
}
| rust | MIT | b4c04b51746e5fe7769f49a1b32b8caad426fc81 | 2026-01-04T20:24:04.021295Z | false |
dbalsom/fluxfox | https://github.com/dbalsom/fluxfox/blob/b4c04b51746e5fe7769f49a1b32b8caad426fc81/src/visualization/types/shapes.rs | src/visualization/types/shapes.rs | /*
FluxFox
https://github.com/dbalsom/fluxfox
Copyright 2024-2025 Daniel Balsom
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the “Software”),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
--------------------------------------------------------------------------
*/
//! Methods to construct cubic and quadratic Bezier approximations of circular arcs.
//! These methods are used to represent track elements in the visualization layer.
//! The cubic approximation is used for longer arcs, while the quadratic approximation is used for
//! shorter arcs.
//!
//! Derived constants are used to generate quadrant (90 degree) arcs, taken from:
//! https://spencermortensen.com/articles/bezier-circle/
//!
//! I'm not the greatest at math so if you spot any optimizations here, please let me know!
use std::{
fmt::{Display, Formatter},
ops::{Add, Div, Range},
};
use crate::{
track_schema::GenericTrackElement,
types::DiskCh,
visualization::{RenderWinding, VizRotate},
};
use crate::types::DiskChsn;
#[cfg(feature = "tiny_skia")]
use crate::visualization::prelude::VizColor;
use bitflags::bitflags;
use core::fmt;
use num_traits::Num;
use std::ops::Mul;
#[cfg(feature = "tiny_skia")]
impl From<VizColor> for tiny_skia::Color {
#[inline]
fn from(color: VizColor) -> tiny_skia::Color {
tiny_skia::Color::from_rgba8(color.r, color.g, color.b, color.a)
}
}
bitflags! {
#[derive (Clone, Debug, Default)]
pub struct VizElementFlags: u32 {
// No flags set
const NONE = 0b0000_0000;
// This element represents a section of an entire track and can be used to draw the track background
const TRACK = 0b0000_0001;
/// This element represents a section of an empty track
const EMPTY_TRACK = 0b0000_0010;
// This element crosses the index
const OVERLAP = 0b0000_0100;
// This element crosses the index, and is sufficiently long that it should be faded out
const OVERLAP_LONG = 0b0000_1000;
// This element represents a highlighted element
const HIGHLIGHT = 0b0001_0000;
// This element represents a selected element
const SELECTED = 0b0010_0000;
}
}
/// A [VizDimensions] represents the width and height of a rectangular region, such as a pixmap.
pub type VizDimensions = VizPoint2d<u32>;
/// A VizShape represents a shape that can be rendered in a visualization. This is a simple enum
/// that can represent a cubic Bezier arc, a quadratic Bezier arc, a sector, a circle, or a line.
/// The second parameter, if present, is the thickness of the shape. This should be used for the
/// stroke parameter during rendering.
#[derive(Copy, Clone, Debug)]
pub enum VizShape {
CubicArc(VizArc, f32),
QuadraticArc(VizQuadraticArc, f32),
Sector(VizSector),
Circle(VizCircle, f32),
Line(VizLine<f32>, f32),
}
impl VizRotate for VizShape {
#[inline]
fn rotate(self, rot: &VizRotation) -> VizShape {
match self {
VizShape::CubicArc(arc, t) => VizShape::from((arc.rotate(rot), t)),
VizShape::QuadraticArc(quadratic, t) => VizShape::from((quadratic.rotate(rot), t)),
VizShape::Sector(sector) => sector.rotate(rot).into(),
VizShape::Circle(_, _) => self,
VizShape::Line(_, _) => self,
}
}
}
/// A [VizLine] represents a line segment in 2D space.
#[derive(Copy, Clone, Debug)]
pub struct VizLine<T: Num + Copy + Default + Into<f64>> {
pub start: VizPoint2d<T>,
pub end: VizPoint2d<T>,
}
impl<T: Num + Copy + Default> VizLine<T>
where
f64: From<T>,
{
pub fn new(start: VizPoint2d<T>, end: VizPoint2d<T>) -> VizLine<T> {
VizLine { start, end }
}
pub fn length(&self) -> f64 {
let dx = f64::from(self.end.x - self.start.x);
let dy = f64::from(self.end.y - self.start.y);
(dx * dx + dy * dy).sqrt()
}
}
impl<T: Num + Copy + Default> From<(T, T, T, T)> for VizLine<T>
where
f64: From<T>,
{
fn from(tuple: (T, T, T, T)) -> Self {
VizLine {
start: VizPoint2d::from((tuple.0, tuple.1)),
end: VizPoint2d::from((tuple.2, tuple.3)),
}
}
}
/// A [VizRect] represents a rectangle in 2D space. It is generic across numeric types, using
/// `num_traits`.
///
/// The rectangle is defined by two points, the top-left and bottom-right corners.
/// Methods are provided for calculating the width and height of the rectangle.
#[derive(Clone, Default, Debug)]
pub struct VizRect<T: Num + Copy + PartialOrd + Default> {
pub top_left: VizPoint2d<T>,
pub bottom_right: VizPoint2d<T>,
}
impl<T: Num + Copy + PartialOrd + Default> VizRect<T> {
#[inline]
fn min(a: T, b: T) -> T {
if a < b {
a
}
else {
b
}
}
#[inline]
fn max(a: T, b: T) -> T {
if a > b {
a
}
else {
b
}
}
pub fn new(top_left: VizPoint2d<T>, bottom_right: VizPoint2d<T>) -> VizRect<T> {
VizRect { top_left, bottom_right }
}
pub fn from_tuple(top_left: (T, T), bottom_right: (T, T)) -> VizRect<T> {
VizRect {
top_left: VizPoint2d::from(top_left),
bottom_right: VizPoint2d::from(bottom_right),
}
}
pub fn width(&self) -> T {
self.bottom_right.x - self.top_left.x
}
pub fn height(&self) -> T {
self.bottom_right.y - self.top_left.y
}
/// Returns the intersection of two [VizRect] as a [VizRect], or returns `None` if they do not
/// intersect.
pub fn intersection(&self, other: &VizRect<T>) -> Option<VizRect<T>> {
let top_left = VizPoint2d::new(
Self::max(self.top_left.x, other.top_left.x),
Self::max(self.top_left.y, other.top_left.y),
);
let bottom_right = VizPoint2d::new(
Self::min(self.bottom_right.x, other.bottom_right.x),
Self::min(self.bottom_right.y, other.bottom_right.y),
);
if top_left.x <= bottom_right.x && top_left.y <= bottom_right.y {
Some(VizRect::new(top_left, bottom_right))
}
else {
None
}
}
/// Returns bounding box that includes both [VizRect]s
pub fn bounding_box(&self, other: &VizRect<T>) -> VizRect<T> {
let top_left = VizPoint2d::new(
Self::min(self.top_left.x, other.top_left.x),
Self::min(self.top_left.y, other.top_left.y),
);
let bottom_right = VizPoint2d::new(
Self::max(self.bottom_right.x, other.bottom_right.x),
Self::max(self.bottom_right.y, other.bottom_right.y),
);
VizRect::new(top_left, bottom_right)
}
/// Return whether the specified point is within Self
pub fn contains_point(&self, point: &VizPoint2d<T>) -> bool {
point.x >= self.top_left.x
&& point.x <= self.bottom_right.x
&& point.y >= self.top_left.y
&& point.y <= self.bottom_right.y
}
/// Return whether the specified rectangle is within Self
pub fn contains_rect(&self, other: &VizRect<T>) -> bool {
self.contains_point(&other.top_left) && self.contains_point(&other.bottom_right)
}
/// Grow the rectangle by a factor, preserving the top-left corner position.
/// If the factor is negative, the rectangle will flip across the top-left corner.
pub fn grow_pinned(&self, factor: T) -> VizRect<T> {
let new_width = self.width() * factor;
let new_height = self.height() * factor;
let new_rect = VizRect {
top_left: VizPoint2d::new(self.top_left.x, self.top_left.y),
bottom_right: VizPoint2d::new(self.top_left.x + new_width, self.bottom_right.y + new_height),
};
new_rect.normalize()
}
/// Ensure that the top left coordinate is less than the bottom right coordinate.
pub fn normalize(&self) -> VizRect<T> {
let top_left = VizPoint2d::new(
Self::min(self.top_left.x, self.bottom_right.x),
Self::min(self.top_left.y, self.bottom_right.y),
);
let bottom_right = VizPoint2d::new(
Self::max(self.top_left.x, self.bottom_right.x),
Self::max(self.top_left.y, self.bottom_right.y),
);
VizRect::new(top_left, bottom_right)
}
pub fn to_tuple(&self) -> (T, T, T, T) {
(
self.top_left.x,
self.top_left.y,
self.bottom_right.x,
self.bottom_right.y,
)
}
}
impl<T> VizRect<T>
where
T: Num + Copy + Add<T, Output = T> + Div<T, Output = T> + From<f32> + PartialOrd + Default,
{
pub fn center(&self) -> VizPoint2d<T> {
VizPoint2d::new(
self.top_left.x + self.width() / T::from(2.0),
self.top_left.y + self.height() / T::from(2.0),
)
}
}
impl From<(f32, f32, f32, f32)> for VizRect<f32> {
fn from(tuple: (f32, f32, f32, f32)) -> Self {
VizRect {
top_left: VizPoint2d::new(tuple.0, tuple.1),
bottom_right: VizPoint2d::new(tuple.2, tuple.3),
}
}
}
#[derive(Copy, Clone, Debug)]
pub struct VizRotation {
pub angle: f32,
pub sin: f32,
pub cos: f32,
pub center: VizPoint2d<f32>,
}
impl VizRotation {
pub fn new(angle: f32, center: VizPoint2d<f32>) -> VizRotation {
let (sin, cos) = angle.sin_cos();
VizRotation {
angle,
sin,
cos,
center,
}
}
}
/// A [VizPoint2d] represents a point in 2D space in the range `[(0,0), (1,1)]`.
/// It is generic across numeric types, using `num_traits`.
#[derive(Copy, Clone, Debug)]
pub struct VizPoint2d<T> {
pub x: T,
pub y: T,
}
impl<T: Num + Copy + Default + Display> Display for VizPoint2d<T> {
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
write!(f, "({}, {})", self.x, self.y)
}
}
impl<T: Num + Copy + Default> Default for VizPoint2d<T> {
fn default() -> Self {
VizPoint2d {
x: T::default(),
y: T::default(),
}
}
}
impl<T: Num + Copy + Default> From<(T, T)> for VizPoint2d<T> {
fn from(tuple: (T, T)) -> Self {
VizPoint2d { x: tuple.0, y: tuple.1 }
}
}
impl<T: Num + Copy + Default> VizPoint2d<T> {
pub fn new(x: T, y: T) -> Self {
VizPoint2d { x, y }
}
pub fn to_tuple(&self) -> (T, T) {
(self.x, self.y)
}
pub fn scale(&self, factor: T) -> VizPoint2d<T> {
VizPoint2d {
x: self.x * factor,
y: self.y * factor,
}
}
}
impl<T, Rhs> Mul<Rhs> for VizPoint2d<T>
where
T: Num + Copy + Default + Mul<Rhs, Output = T>,
Rhs: Num + Copy + Default + Mul<T>,
{
type Output = VizPoint2d<<T as Mul<Rhs>>::Output>;
fn mul(self, rhs: Rhs) -> Self::Output {
Self {
x: self.x * rhs,
y: self.y * rhs,
}
}
}
impl VizRotate for VizPoint2d<f32> {
#[inline]
fn rotate(self, rot: &VizRotation) -> VizPoint2d<f32> {
let dx = self.x - rot.center.x;
let dy = self.y - rot.center.y;
VizPoint2d {
x: rot.center.x + dx * rot.cos - dy * rot.sin,
y: rot.center.y + dx * rot.sin + dy * rot.cos,
}
}
}
/// A [VizArc] represents a cubic Bezier curve in 2D space.
#[derive(Copy, Clone, Debug)]
pub struct VizArc {
pub start: VizPoint2d<f32>, // Start point of arc
pub end: VizPoint2d<f32>, // End point of arc
pub cp1: VizPoint2d<f32>, // 1st control point
pub cp2: VizPoint2d<f32>, // 2nd control point
}
impl VizArc {
/// Calculate cubic Bézier parameters from a center point, radius, and start and end angles.
/// This assumes the curve represents a segment of a circle.
pub fn from_angles(center: &VizPoint2d<f32>, radius: f32, start_angle: f32, end_angle: f32) -> VizArc {
// Calculate start and end points with simple trigonometry
let x1 = center.x + radius * start_angle.cos();
let y1 = center.y + radius * start_angle.sin();
let x4 = center.x + radius * end_angle.cos();
let y4 = center.y + radius * end_angle.sin();
// Compute relative vectors
let ax = x1 - center.x;
let ay = y1 - center.y;
let bx = x4 - center.x;
let by = y4 - center.y;
// Circular cubic approximation using (4/3).
// q1 = |A|^2 = ax² + ay²
// q2 = q1 + (A · B) = q1 + ax*bx + ay*by
let q1 = ax * ax + ay * ay;
let q2 = q1 + ax * bx + ay * by;
let k2 = (4.0 / 3.0) * ((2.0 * q1 * q2).sqrt() - q2) / (ax * by - ay * bx);
// Reapply center offset
let (x2, y2) = (center.x + ax - k2 * ay, center.y + ay + k2 * ax);
let (x3, y3) = (center.x + bx + k2 * by, center.y + by - k2 * bx);
VizArc {
start: VizPoint2d { x: x1, y: y1 },
end: VizPoint2d { x: x4, y: y4 },
cp1: VizPoint2d { x: x2, y: y2 },
cp2: VizPoint2d { x: x3, y: y3 },
}
}
}
impl From<(VizArc, f32)> for VizShape {
#[inline]
fn from(tuple: (VizArc, f32)) -> VizShape {
VizShape::CubicArc(tuple.0, tuple.1)
}
}
impl VizRotate for VizArc {
#[inline]
fn rotate(self, rot: &VizRotation) -> VizArc {
VizArc {
start: self.start.rotate(rot),
end: self.end.rotate(rot),
cp1: self.cp1.rotate(rot),
cp2: self.cp2.rotate(rot),
}
}
}
/// A [VizQuadraticArc] represents a quadratic Bézier curve in 2D space.
/// A lower order curve than a cubic Bézier, a Quadratic Bézier requires more curves to represent
/// the same shapes as a cubic Bezier, but is computationally simpler and requires one fewer control
/// point.
/// We can optimize the representation of short arcs with quadratic Bézier curves and use cubic
/// Bézier curves for longer arcs.
#[derive(Copy, Clone, Debug)]
pub struct VizQuadraticArc {
pub start: VizPoint2d<f32>, // Start point of arc
pub end: VizPoint2d<f32>, // End point of arc
pub cp: VizPoint2d<f32>, // Control point
}
impl VizQuadraticArc {
/// Calculate quadratic Bézier parameters from a center point, radius, and start and end angles.
/// This assumes the curve represents a segment of a circle.
pub fn from_angles(center: &VizPoint2d<f32>, radius: f32, start_angle: f32, end_angle: f32) -> VizQuadraticArc {
// Calculate start and end points with simple trigonometry
let x1 = center.x + radius * start_angle.cos();
let y1 = center.y + radius * start_angle.sin();
let x2 = center.x + radius * end_angle.cos();
let y2 = center.y + radius * end_angle.sin();
// Calculate the midpoint of the arc
let mid_angle = (start_angle + end_angle) * 0.5;
let mx = center.x + radius * mid_angle.cos();
let my = center.y + radius * mid_angle.sin();
// Calculate the control point to represent a circular arc
let cx = 2.0 * mx - 0.5 * (x1 + x2);
let cy = 2.0 * my - 0.5 * (y1 + y2);
VizQuadraticArc {
start: VizPoint2d { x: x1, y: y1 },
end: VizPoint2d { x: x2, y: y2 },
cp: VizPoint2d { x: cx, y: cy },
}
}
}
impl From<(VizQuadraticArc, f32)> for VizShape {
#[inline]
fn from(tuple: (VizQuadraticArc, f32)) -> VizShape {
VizShape::QuadraticArc(tuple.0, tuple.1)
}
}
impl VizRotate for VizQuadraticArc {
#[inline]
fn rotate(self, rot: &VizRotation) -> VizQuadraticArc {
VizQuadraticArc {
start: self.start.rotate(rot),
end: self.end.rotate(rot),
cp: self.cp.rotate(rot),
}
}
}
/// A [VizCircle] represents a simple circle with center point and radius.
#[derive(Copy, Clone, Debug)]
pub struct VizCircle {
pub center: VizPoint2d<f32>,
pub radius: f32,
}
impl VizCircle {
pub fn new(center: &VizPoint2d<f32>, radius: f32) -> VizCircle {
VizCircle {
center: *center,
radius,
}
}
}
impl From<(VizCircle, f32)> for VizShape {
#[inline]
fn from(tuple: (VizCircle, f32)) -> VizShape {
VizShape::Circle(tuple.0, tuple.1)
}
}
/// A [VizSector] represents an arc with thickness, or an 'annular sector'. This may be literally
/// be a sector on a disk, but may represent other track elements or regions as well.
#[derive(Copy, Clone, Debug)]
pub struct VizSector {
pub start: f32, // The angle at which the sector starts
pub end: f32, // The angle at which the sector ends
pub outer: VizArc,
pub inner: VizArc,
}
impl VizSector {
/// Calculate a [VizSector] from a center point, start and end angles in radians, and an inner and
/// outer radius.
#[inline]
pub fn from_angles(
center: &VizPoint2d<f32>,
render_winding: RenderWinding,
start_angle: f32,
end_angle: f32,
inner_radius: f32,
outer_radius: f32,
) -> VizSector {
let (outer, inner) = match render_winding {
RenderWinding::Clockwise => {
let outer = VizArc::from_angles(center, outer_radius, start_angle, end_angle);
let inner = VizArc::from_angles(center, inner_radius, end_angle, start_angle);
(outer, inner)
}
RenderWinding::CounterClockwise => {
let outer = VizArc::from_angles(center, outer_radius, end_angle, start_angle);
let inner = VizArc::from_angles(center, inner_radius, start_angle, end_angle);
(outer, inner)
}
};
VizSector::from((start_angle, end_angle, outer, inner))
}
}
impl From<VizSector> for VizShape {
#[inline]
fn from(sector: VizSector) -> VizShape {
VizShape::Sector(sector)
}
}
impl VizRotate for VizSector {
#[inline]
fn rotate(self, rot: &VizRotation) -> VizSector {
VizSector {
start: self.start + rot.angle,
end: self.end + rot.angle,
outer: self.outer.rotate(rot),
inner: self.inner.rotate(rot),
}
}
}
/// A [VizElementInfo] represents all the information needed to render a track element in a visualization
/// as well as resolve the element back to the track, useful for interactive visualizations (e.g.,
/// selecting sectors with the mouse).
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct VizElementInfo {
/// The type of element as [GenericTrackElement]
pub element_type: GenericTrackElement,
/// The physical track containing the element
pub ch: DiskCh,
/// The optional DiskChsn of the element, if it corresponds to a sector
pub chsn: Option<DiskChsn>,
/// The bit index of the element within the track.
pub bit_range: Option<Range<usize>>,
/// The index of the element within the track's element list.
pub element_idx: Option<usize>,
/// The physical index of the sector on the track, starting at 0 at the index.
pub sector_idx: Option<usize>,
}
impl VizElementInfo {
pub fn new(
element_type: GenericTrackElement,
ch: DiskCh,
chsn: Option<DiskChsn>,
bit_range: Option<Range<usize>>,
element_idx: Option<usize>,
sector_idx: Option<usize>,
) -> VizElementInfo {
VizElementInfo {
element_type,
ch,
chsn,
bit_range,
element_idx,
sector_idx,
}
}
}
impl Default for VizElementInfo {
fn default() -> VizElementInfo {
VizElementInfo {
element_type: GenericTrackElement::NullElement,
ch: DiskCh::default(),
chsn: None,
bit_range: None,
element_idx: None,
sector_idx: None,
}
}
}
/// A [VizElement] represents a [VizSector] with additional metadata, such as color and
/// track location.
#[derive(Clone, Debug)]
pub struct VizElement {
pub shape: VizShape, // The shape of the element
pub flags: VizElementFlags, // Flags to control rendering of the element
pub info: VizElementInfo, // Metadata fields for the element
}
impl VizElement {
pub fn new(shape: impl Into<VizShape>, flags: VizElementFlags, info: VizElementInfo) -> VizElement {
VizElement {
shape: shape.into(),
flags,
info,
}
}
}
impl VizRotate for VizElement {
#[inline]
fn rotate(self, rot: &VizRotation) -> VizElement {
VizElement {
shape: self.shape.rotate(rot),
flags: self.flags,
info: self.info,
}
}
}
/// Convert a tuple of two [VizArc] objects into a [VizSector].
impl From<(f32, f32, VizArc, VizArc)> for VizSector {
#[inline]
fn from((start, end, outer, inner): (f32, f32, VizArc, VizArc)) -> VizSector {
VizSector {
start,
end,
outer,
inner,
}
}
}
#[cfg(feature = "tiny_skia")]
impl From<VizPoint2d<f32>> for tiny_skia::Point {
#[inline]
fn from(p: VizPoint2d<f32>) -> tiny_skia::Point {
tiny_skia::Point { x: p.x, y: p.y }
}
}
#[cfg(feature = "tiny_skia")]
impl From<tiny_skia::Point> for VizPoint2d<f32> {
#[inline]
fn from(p: tiny_skia::Point) -> VizPoint2d<f32> {
VizPoint2d { x: p.x, y: p.y }
}
}
/// A slice of a track used in vector based data layer visualization.
#[derive(Clone, Debug)]
pub struct VizDataSlice {
pub density: f32, // The ratio of 1 bits set to the total number of bits in the slice
pub mapped_density: u8, // The density mapped to a u8 display value by the codec
pub arc: VizQuadraticArc, // The slice arc
}
impl VizRotate for VizDataSlice {
#[inline]
fn rotate(self, rot: &VizRotation) -> VizDataSlice {
VizDataSlice {
density: self.density,
mapped_density: self.mapped_density,
arc: self.arc.rotate(rot),
}
}
}
| rust | MIT | b4c04b51746e5fe7769f49a1b32b8caad426fc81 | 2026-01-04T20:24:04.021295Z | false |
dbalsom/fluxfox | https://github.com/dbalsom/fluxfox/blob/b4c04b51746e5fe7769f49a1b32b8caad426fc81/src/visualization/types/color.rs | src/visualization/types/color.rs | /*
FluxFox
https://github.com/dbalsom/fluxfox
Copyright 2024-2025 Daniel Balsom
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the “Software”),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
--------------------------------------------------------------------------
*/
use bytemuck::{Pod, Zeroable};
/// A [VizColor] represents a color in 32-bit premultiplied RGBA format.
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
#[repr(C)]
#[derive(Copy, Clone, Debug, Pod, Zeroable)]
pub struct VizColor {
pub r: u8,
pub g: u8,
pub b: u8,
pub a: u8,
}
impl Default for VizColor {
fn default() -> VizColor {
VizColor::TRANSPARENT
}
}
#[rustfmt::skip]
impl VizColor {
pub const TRANSPARENT: VizColor = VizColor { r: 0, g: 0, b: 0, a: 0 };
pub const WHITE: VizColor = VizColor { r: 255, g: 255, b: 255, a: 255 };
pub const BLACK: VizColor = VizColor { r: 0, g: 0, b: 0, a: 255 };
pub const RED: VizColor = VizColor { r: 255, g: 0, b: 0, a: 255 };
pub const GREEN: VizColor = VizColor { r: 0, g: 255, b: 0, a: 255 };
pub const BLUE: VizColor = VizColor { r: 0, g: 0, b: 255, a: 255 };
pub fn from_rgba8(r: u8, g: u8, b: u8, a: u8) -> VizColor {
VizColor { r, g, b, a }
}
pub fn from_value(value: u8, alpha: u8) -> VizColor {
VizColor {
r: value,
g: value,
b: value,
a: alpha,
}
}
#[inline]
pub fn r(&self) -> u8 {
self.r
}
#[inline]
pub fn set_r(&mut self, r: u8) {
self.r = r;
}
#[inline]
pub fn g(&self) -> u8 {
self.g
}
#[inline]
pub fn set_g(&mut self, g: u8) {
self.g = g;
}
#[inline]
pub fn b(&self) -> u8 {
self.b
}
#[inline]
pub fn set_b(&mut self, b: u8) {
self.b = b;
}
#[inline]
pub fn a(&self) -> u8 {
self.a
}
#[inline]
pub fn set_a(&mut self, a: u8) {
self.a = a;
}
}
| rust | MIT | b4c04b51746e5fe7769f49a1b32b8caad426fc81 | 2026-01-04T20:24:04.021295Z | false |
dbalsom/fluxfox | https://github.com/dbalsom/fluxfox/blob/b4c04b51746e5fe7769f49a1b32b8caad426fc81/src/visualization/types/blend.rs | src/visualization/types/blend.rs | /*
FluxFox
https://github.com/dbalsom/fluxfox
Copyright 2024-2025 Daniel Balsom
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the “Software”),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
--------------------------------------------------------------------------
*/
//! Generic blend mode definitions. These are a subset supported by both SVG (really CSS)
//! and the `tiny_skia` library.
use std::{
fmt,
fmt::{Display, Formatter},
};
/// All supported SVG `style` tag blending modes.
/// https://developer.mozilla.org/en-US/docs/Web/CSS/mix-blend-mode
///
/// If the `serde` feature is enabled these will be available for deserialization,
/// such as from a config file (see the `imgviz` example for an example of this).
///
/// The blending mode is applied to the metadata layer when metadata visualization
/// is enabled in addition to data visualization.
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
#[cfg_attr(feature = "serde", serde(rename_all = "lowercase"))]
#[derive(Copy, Clone, Debug, Default)]
pub enum VizBlendMode {
#[default]
Normal,
Multiply,
Screen,
Overlay,
Darken,
Lighten,
ColorDodge,
ColorBurn,
HardLight,
SoftLight,
Difference,
Exclusion,
Hue,
Saturation,
Color,
Luminosity,
}
/// Implement the `Display` trait for `VizBlendMode`, in a fashion compatible with CSS names for
/// standard blend modes. This allows direct use of blend modes as CSS strings.
impl Display for VizBlendMode {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
use VizBlendMode::*;
match self {
Normal => write!(f, "normal"),
Multiply => write!(f, "multiply"),
Screen => write!(f, "screen"),
Overlay => write!(f, "overlay"),
Darken => write!(f, "darken"),
Lighten => write!(f, "lighten"),
ColorDodge => write!(f, "color-dodge"),
ColorBurn => write!(f, "color-burn"),
HardLight => write!(f, "hard-light"),
SoftLight => write!(f, "soft-light"),
Difference => write!(f, "difference"),
Exclusion => write!(f, "exclusion"),
Hue => write!(f, "hue"),
Saturation => write!(f, "saturation"),
Color => write!(f, "color"),
Luminosity => write!(f, "luminosity"),
}
}
}
| rust | MIT | b4c04b51746e5fe7769f49a1b32b8caad426fc81 | 2026-01-04T20:24:04.021295Z | false |
dbalsom/fluxfox | https://github.com/dbalsom/fluxfox/blob/b4c04b51746e5fe7769f49a1b32b8caad426fc81/src/bit_ring/mod.rs | src/bit_ring/mod.rs | /*
FluxFox
https://github.com/dbalsom/fluxfox
Copyright 2024-2025 Daniel Balsom
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the “Software”),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
--------------------------------------------------------------------------
*/
//! A [BitRing] is a binary ring buffer, which can be indexed and iterated over
//! infinitely. It is intended to represent the bitstream of a disk track, which
//! is a continuous topological ring. Disk read operations can often wrap around
//! from the end of the track to the beginning, so [BitRing] is designed to
//! emulate this behavior.
use crate::io::{self, Read};
use bit_vec::BitVec;
use std::ops::Index;
/// A [BitRingIter] may be used to iterate over the bits of a [BitRing], producing
/// a sequence of `bool` values.
pub struct BitRingIter<'a> {
ring: &'a BitRing,
cursor: usize,
limit: Option<usize>, // Optional limit for one revolution
}
impl Iterator for BitRingIter<'_> {
type Item = bool;
fn next(&mut self) -> Option<Self::Item> {
if let Some(limit) = self.limit {
if self.cursor >= limit {
return None;
}
}
let bit = self.ring.bits[self.cursor];
self.cursor += 1;
Some(bit)
}
}
/// A [BitRing] is a binary ring buffer, which can be indexed and iterated over
/// infinitely. It is intended to represent the bitstream of a disk track, which
/// is a continuous topological ring. Disk read operations can often wrap around
/// from the end of the track to the beginning, and a [BitRing] can represent this
/// behavior.
///
/// [BitRing] is implemented as a wrapper around a [BitVec] from the bit_vec crate
/// (not to be confused with the bitvec crate).
///
/// A [BitRing] allows an overrideable length at which to wrap around, to support
/// adjustable track wrapping strategies. It may also be configured to return a
/// specific value when indexed beyond the wrap point - this is useful to ignore
/// any clock bits that may only be valid within the first revolution.
#[derive(Clone)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub struct BitRing {
bits: BitVec,
wrap: usize,
cursor: usize,
wrap_value: Option<bool>,
}
/// Produce a [BitRing] from a [BitVec].
impl From<BitVec> for BitRing {
fn from(bits: BitVec) -> BitRing {
let wrap = bits.len();
BitRing {
bits,
wrap,
cursor: 0,
wrap_value: None,
}
}
}
/// Produce a [BitRing] from a byte slice.
impl From<&[u8]> for BitRing {
fn from(bytes: &[u8]) -> BitRing {
let bits = BitVec::from_bytes(bytes);
let wrap = bits.len();
BitRing {
bits,
wrap,
cursor: 0,
wrap_value: None,
}
}
}
#[allow(dead_code)]
impl BitRing {
/// Return an infinite iterator ([BitRingIter]) over the bits of the [BitRing], starting at
/// the beginning of the track.
pub fn iter(&self) -> BitRingIter {
BitRingIter {
ring: self,
cursor: 0,
limit: None,
}
}
/// Return a single-revolution iterator ([BitRingIter]) over the bits of the [BitRing], starting
/// at the beginning of the track and ending at the wrap point.
pub fn iter_revolution(&self) -> BitRingIter {
BitRingIter {
ring: self,
cursor: 0,
limit: Some(self.wrap),
}
}
/// Create a new [BitRing] from a byte slice.
pub fn from_bytes(bytes: &[u8]) -> BitRing {
BitRing::from(bytes)
}
/// Create a new [BitRing] with the specified length, containing the specified element.
pub fn from_elem(len: usize, elem: bool) -> BitRing {
BitRing {
bits: BitVec::from_elem(len, elem),
wrap: len,
cursor: 0,
wrap_value: None,
}
}
/// Return the length of the [BitRing] in bits.
#[inline]
pub fn len(&self) -> usize {
self.bits.len()
}
/// Return a bool indicating if the [BitRing] is empty.
#[inline]
pub fn is_empty(&self) -> bool {
self.bits.is_empty()
}
/// Return the wrapping point of the [BitRing].
#[inline]
pub fn wrap_len(&self) -> usize {
self.wrap
}
/// Return a reference to the underlying [BitVec] representation.
#[inline]
pub fn bits(&self) -> &BitVec {
&self.bits
}
#[inline]
pub fn bits_mut(&mut self) -> &mut BitVec {
&mut self.bits
}
/// Return a copy of the [BitRing] data as a byte vector.
/// Data beyond the bit length of the [BitRing] is undefined.
#[inline]
pub fn to_bytes(&self) -> Vec<u8> {
self.bits.to_bytes()
}
/// Set the bit at `index` to the value of `bit`
#[inline]
pub fn set(&mut self, index: usize, bit: bool) {
if index < self.wrap {
self.bits.set(index, bit);
}
else {
self.bits.set(index % self.wrap, bit);
}
}
/// Set the wrapping point of the BitRing.
/// `set_wrap` will not allow the length to be set longer than the underlying [BitVec].
pub fn set_wrap(&mut self, wrap_len: usize) {
self.wrap = std::cmp::min(wrap_len, self.bits.len());
}
/// Set an override value to return when the index wraps around. `None` will return the actual
/// value, while `Some(bool)` will return the specified override value.
pub fn set_wrap_value(&mut self, wrap_value: impl Into<Option<bool>>) {
self.wrap_value = wrap_value.into();
}
#[inline]
fn incr_cursor(&mut self) {
self.cursor = self.wrap_cursor(self.cursor + 1);
}
#[inline]
fn wrap_cursor(&self, cursor: usize) -> usize {
if cursor != cursor % self.wrap {
log::warn!("Cursor wrapped around at {}", cursor);
}
cursor % self.wrap
}
}
impl Iterator for BitRing {
type Item = bool;
fn next(&mut self) -> Option<Self::Item> {
let bit = self.bits[self.cursor];
self.incr_cursor();
Some(bit)
}
}
impl Read for BitRing {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
let mut read = 0;
for buf_byte in buf.iter_mut() {
let mut byte = 0;
for _ in 0..8 {
byte = (byte << 1) | (self.bits[self.cursor] as u8);
self.incr_cursor();
}
*buf_byte = byte;
read += 1;
}
Ok(read)
}
}
impl Index<usize> for BitRing {
type Output = bool;
fn index(&self, index: usize) -> &Self::Output {
if index < self.wrap {
&self.bits[index]
}
else {
if index == self.wrap {
//log::debug!("Index wrapped around at {}", index);
}
self.wrap_value
.as_ref()
.unwrap_or_else(|| &self.bits[index % self.wrap])
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use bit_vec::BitVec;
#[test]
fn test_from_bitvec() {
// Initialize BitRing from a BitVec
let bits = BitVec::from_bytes(&[0b1010_1010]);
let ring = BitRing::from(bits.clone());
assert_eq!(ring.len(), bits.len());
for i in 0..bits.len() {
assert_eq!(ring[i], bits[i]);
}
}
#[test]
fn test_from_bytes() {
// Initialize BitRing from a byte slice
let bytes = &[0b1010_1010, 0b1100_1100];
let ring = BitRing::from_bytes(bytes);
let expected_bits = BitVec::from_bytes(bytes);
for i in 0..expected_bits.len() {
assert_eq!(ring[i], expected_bits[i]);
}
}
#[test]
fn test_wrap_behavior_no_wrap_value() {
// Test wrapping behavior with no wrap_value set
let bits = BitVec::from_bytes(&[0b1010_1010]); // 8 bits
let mut ring = BitRing::from(bits.clone());
// Set wrap point at 8 (length of BitVec)
ring.set_wrap(8);
// Access beyond wrap point should wrap around to the beginning
for i in 0..16 {
if i == 8 {
assert_eq![ring[i], true]
}
assert_eq!(ring[i], bits[i % 8]);
}
}
#[test]
fn test_wrap_behavior2() {
// Test wrapping behavior with no wrap_value set
let bits = BitVec::from_bytes(&[0b1111_1111, 0b0000_0000]); // 16 bits
let mut ring = BitRing::from(bits.clone());
// Set wrap point at 16 (length of BitVec)
ring.set_wrap(16);
// Access beyond wrap point should wrap around to the beginning
for i in 0..32 {
if i == 15 {
assert_eq![ring[i], false]
}
if i == 16 {
assert_eq![ring[i], true]
}
assert_eq!(ring[i], bits[i % 16]);
}
}
#[test]
fn test_wrap_behavior_with_wrap_value() {
// Test wrapping behavior with a wrap_value set
let bits = BitVec::from_bytes(&[0b1010_1010]); // 8 bits
let mut ring = BitRing::from(bits);
// Set wrap point at 8 (length of BitVec) and wrap_value to Some(false)
ring.set_wrap(8);
ring.set_wrap_value(Some(false));
// Access within wrap point should return actual bits
for i in 0..8 {
assert_eq!(ring[i], ring.bits[i]);
}
// Access beyond wrap point should return wrap_value (false)
for i in 8..16 {
assert!(!ring[i]);
}
}
#[test]
fn test_iterate_over_bits() {
// Test iterator behavior over BitRing
let bytes = &[0b1010_1010];
let ring = BitRing::from_bytes(bytes);
// Collect the iterator output into a vector
let collected: Vec<bool> = Iterator::take(ring, 9).collect();
// Verify it matches the expected pattern
let expected = vec![true, false, true, false, true, false, true, false, true];
assert_eq!(collected, expected);
}
#[test]
fn test_read_to_buffer() {
// Test `Read` implementation
let bytes = &[0b1010_1010];
let mut ring = BitRing::from_bytes(bytes);
let mut buf = [0; 1];
let read_bytes = ring.read(&mut buf).expect("Failed to read from BitRing");
// Ensure 1 byte is read and matches the input pattern
assert_eq!(read_bytes, 1);
assert_eq!(buf[0], 0b1010_1010);
}
#[test]
fn test_custom_wrap_len() {
// Test custom wrap length shorter than the full length
let bits = BitVec::from_bytes(&[0b1010_1010, 0b1100_1100]); // 16 bits
let mut ring = BitRing::from(bits);
// Set a custom wrap length at 8 (half the total length)
ring.set_wrap(8);
// Accessing beyond 8 should wrap around to the beginning
for i in 0..16 {
assert_eq!(ring[i], ring.bits[i % 8]);
}
}
#[test]
fn test_iter_revolution_length() {
// Initialize BitRing with a known bit pattern
let bits = BitVec::from_bytes(&[0b1010_1010, 0b1100_1100]); // 16 bits
let ring = BitRing::from(bits.clone());
// Check that iter_revolution returns exactly `len()` elements
let revolution: Vec<bool> = ring.iter_revolution().collect();
assert_eq!(
revolution.len(),
ring.len(),
"iter_revolution should return exactly len() elements"
);
// Optional: Verify the content of the revolution matches the original bits
for i in 0..ring.len() {
assert_eq!(
revolution[i], ring.bits[i],
"Mismatch at index {} in iter_revolution",
i
);
}
}
}
| rust | MIT | b4c04b51746e5fe7769f49a1b32b8caad426fc81 | 2026-01-04T20:24:04.021295Z | false |
dbalsom/fluxfox | https://github.com/dbalsom/fluxfox/blob/b4c04b51746e5fe7769f49a1b32b8caad426fc81/src/bitstream_codec/gcr.rs | src/bitstream_codec/gcr.rs | /*
FluxFox
https://github.com/dbalsom/fluxfox
Copyright 2024-2025 Daniel Balsom
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the “Software”),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
--------------------------------------------------------------------------
src/mfm.rs
Implements a wrapper around a BitVec to provide MFM encoding and decoding.
*/
#![allow(dead_code)]
#![allow(unused_variables)]
//! A stub implementation of a GCR codec. This is a placeholder for a proper
//! implementation in the future. The Snow mac emulator doesn't need decoding
//! support, just raw access to GCR tracks.
use std::ops::{Index, Range};
use crate::{
bit_ring::BitRing,
bitstream_codec::{EncodingVariant, MarkerEncoding, TrackCodec},
io::{Error, ErrorKind, Read, Result, Seek, SeekFrom},
range_check::RangeChecker,
types::{TrackDataEncoding, TrackRegion},
};
use bit_vec::BitVec;
#[derive(Clone)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub struct GcrCodec {
bits: BitRing,
clock_map: BitRing,
error_map: BitRing,
weak_enabled: bool,
weak_mask: BitRing,
initial_phase: usize,
bit_cursor: usize,
track_padding: usize,
data_ranges: RangeChecker,
data_ranges_filtered: RangeChecker,
}
#[cfg_attr(feature = "serde", typetag::serde)]
impl TrackCodec for GcrCodec {
fn encoding(&self) -> TrackDataEncoding {
TrackDataEncoding::Gcr
}
fn len(&self) -> usize {
self.bits.len()
}
fn is_empty(&self) -> bool {
self.bits.is_empty()
}
fn replace(&mut self, new_bits: BitVec) {
self.bits = BitRing::from(new_bits);
}
fn data(&self) -> &BitVec {
self.bits.bits()
}
fn data_mut(&mut self) -> &mut BitVec {
self.bits.bits_mut()
}
fn data_copied(&self) -> Vec<u8> {
self.bits.to_bytes()
}
fn set_clock_map(&mut self, clock_map: BitVec) {
assert_eq!(clock_map.len(), self.bits.len());
self.clock_map = BitRing::from(clock_map);
// Set the wrap value for the clock map to false. This disables index adjustment when
// we read across the track index.
}
fn clock_map(&self) -> &BitVec {
self.clock_map.bits()
}
fn clock_map_mut(&mut self) -> &mut BitVec {
self.clock_map.bits_mut()
}
fn enable_weak(&mut self, enable: bool) {
self.weak_enabled = enable;
}
fn weak_mask(&self) -> &BitVec {
self.weak_mask.bits()
}
fn weak_mask_mut(&mut self) -> &mut BitVec {
self.weak_mask.bits_mut()
}
fn weak_data(&self) -> Vec<u8> {
self.weak_mask.to_bytes()
}
fn set_weak_mask(&mut self, new: BitVec) {
self.weak_mask = new.into();
}
fn has_weak_bits(&self) -> bool {
!self.detect_weak_bits(6).0 > 0
}
fn error_map(&self) -> &BitVec {
self.error_map.bits()
}
fn set_track_padding(&mut self) {}
fn read_raw_u8(&self, index: usize) -> Option<u8> {
let mut byte = 0;
for bi in index..index + 8 {
byte = (byte << 1) | self.bits[bi] as u8;
}
Some(byte)
}
fn read_raw_buf(&self, buf: &mut [u8], offset: usize) -> usize {
let mut bytes_read = 0;
for byte in buf.iter_mut() {
*byte = self.read_raw_u8(offset + (bytes_read * 8)).unwrap();
bytes_read += 1;
}
bytes_read
}
fn write_raw_u8(&mut self, index: usize, byte: u8) {
for (i, bi) in (index..index + 8).enumerate() {
self.bits.set(bi, (byte & 0x80 >> i) != 0);
}
}
/// This is essentially a reimplementation of Read + Iterator that avoids mutation.
/// This allows us to read track data through an immutable reference.
fn read_decoded_u8(&self, index: usize) -> Option<u8> {
None
}
fn read_decoded_u32_le(&self, index: usize) -> u32 {
0
}
fn read_decoded_u32_be(&self, index: usize) -> u32 {
0
}
fn read_decoded_buf(&self, buf: &mut [u8], offset: usize) -> usize {
0
}
fn write_encoded_buf(&mut self, buf: &[u8], offset: usize) -> usize {
0
}
fn write_raw_buf(&mut self, buf: &[u8], offset: usize) -> usize {
0
}
fn encode(&self, data: &[u8], prev_bit: bool, encoding_type: EncodingVariant) -> BitVec {
BitVec::new()
}
fn find_marker(&self, marker: &MarkerEncoding, start: usize, limit: Option<usize>) -> Option<(usize, u16)> {
log::trace!("find_marker(): Not implemented for GCR!");
None
}
fn set_data_ranges(&mut self, ranges: Vec<Range<usize>>) {
// Don't set ranges for overlapping sectors. This avoids visual discontinuities during
// visualization.
let filtered_ranges = ranges
.clone()
.into_iter()
.filter(|range| !(range.start >= self.bits.len() || range.end >= self.bits.len()))
.collect::<Vec<Range<usize>>>();
self.data_ranges_filtered = RangeChecker::new(&filtered_ranges);
self.data_ranges = RangeChecker::new(&ranges);
}
fn is_data(&self, index: usize, wrapping: bool) -> bool {
if wrapping {
self.data_ranges.contains(index)
}
else {
self.data_ranges_filtered.contains(index)
}
}
fn debug_marker(&self, index: usize) -> String {
let mut shift_reg: u64 = 0;
for bi in index..std::cmp::min(index + 64, self.bits.len()) {
shift_reg = (shift_reg << 1) | self.bits[bi] as u64;
}
format!("{:16X}/{:064b}", shift_reg, shift_reg)
}
fn debug_decode(&self, index: usize) -> String {
let mut shift_reg: u32 = 0;
let start = index << 1;
for bi in (start..std::cmp::min(start + 64, self.bits.len())).step_by(2) {
shift_reg = (shift_reg << 1) | self.bits[self.initial_phase + bi] as u32;
}
format!("{:08X}/{:032b}", shift_reg, shift_reg)
}
}
impl GcrCodec {
pub const WEAK_BIT_RUN: usize = 6;
pub fn new(mut bits: BitVec, bit_ct: Option<usize>, weak_mask: Option<BitVec>) -> Self {
// If a bit count was provided, we can trim the bit vector to that length.
if let Some(bit_ct) = bit_ct {
bits.truncate(bit_ct);
}
let clock_map = BitVec::from_elem(bits.len(), false);
let weak_mask = match weak_mask {
Some(mask) => mask,
None => BitVec::from_elem(bits.len(), false),
};
if weak_mask.len() < bits.len() {
panic!("GcrCodec::new(): Weak mask must be the same length as the bit vector");
}
let error_bits = GcrCodec::create_error_map(&bits);
let error_bit_ct = error_bits.count_ones();
if error_bit_ct > 16 {
log::warn!("GcrCodec::new(): created error map with {} error bits", error_bit_ct);
}
let error_map = BitRing::from(error_bits);
GcrCodec {
bits: BitRing::from(bits),
clock_map: BitRing::from(clock_map),
error_map,
weak_enabled: true,
weak_mask: BitRing::from(weak_mask),
initial_phase: 0,
bit_cursor: 0,
track_padding: 0,
data_ranges: Default::default(),
data_ranges_filtered: Default::default(),
}
}
pub fn set_weak_mask(&mut self, weak_mask: BitVec) -> Result<()> {
if weak_mask.len() != self.bits.len() {
return Err(Error::new(
ErrorKind::InvalidInput,
"Weak mask must be the same length as the bit vector",
));
}
self.weak_mask = BitRing::from(weak_mask);
Ok(())
}
/// Encode an GCR address mark.
/// `data` must be a 4-byte slice.
/// Returns the encoded value in a u64 suitable for comparison to a shift register used to search
/// a BitVec.
pub fn encode_marker(data: &[u8]) -> u64 {
0
}
#[allow(dead_code)]
fn read_bit(self) -> Option<bool> {
if self.weak_enabled && self.weak_mask[self.bit_cursor] {
// Weak bits return random data
Some(rand::random())
}
else {
Some(self.bits[self.bit_cursor])
}
}
fn ref_bit_at(&self, index: usize) -> &bool {
let p_off: usize = self.clock_map[index] as usize;
if self.weak_enabled && self.weak_mask[p_off + (index << 1)] {
// Weak bits return random data
// TODO: precalculate random table and return reference to it.
&self.bits[p_off + (index << 1)]
}
else {
&self.bits[p_off + (index << 1)]
}
}
pub(crate) fn detect_weak_bits(&self, run: usize) -> (usize, usize) {
let mut region_ct = 0;
let mut weak_bit_ct = 0;
let mut zero_ct = 0;
for bit in self.bits.iter_revolution() {
if !bit {
zero_ct += 1;
}
else {
if zero_ct >= run {
region_ct += 1;
}
zero_ct = 0;
}
if zero_ct > 3 {
weak_bit_ct += 1;
}
}
(region_ct, weak_bit_ct)
}
#[allow(dead_code)]
pub(crate) fn detect_weak_regions(&self, run: usize) -> Vec<TrackRegion> {
let mut regions = Vec::new();
let mut zero_ct = 0;
let mut region_start = 0;
for (i, bit) in self.bits.iter().enumerate() {
if !bit {
zero_ct += 1;
}
else {
if zero_ct >= run {
regions.push(TrackRegion {
start: region_start,
end: i - 1,
});
}
zero_ct = 0;
}
if zero_ct == run {
region_start = i;
}
}
regions
}
/// Not every format will have a separate weak bit mask, but that doesn't mean weak bits cannot
/// be encoded. Formats can encode weak bits as a run of 4 or more zero bits. Here we detect
/// such runs and extract them into a weak bit mask as a BitVec.
pub(crate) fn create_weak_bit_mask(&self, run: usize) -> BitVec {
let mut weak_bitvec = BitVec::with_capacity(self.bits.len());
let mut zero_ct = 0;
log::debug!("create_weak_bit_mask(): bits: {}", self.bits.len());
for bit in self.bits.iter_revolution() {
if !bit {
zero_ct += 1;
}
else {
zero_ct = 0;
}
if zero_ct > run {
weak_bitvec.push(true);
}
else {
weak_bitvec.push(false);
}
}
log::warn!(
"create_weak_bit_mask(): bits: {} weak: {}",
self.bits.len(),
weak_bitvec.len(),
);
assert_eq!(weak_bitvec.len(), self.bits.len());
weak_bitvec
}
/// Create an error map that marks where MFM clock violations occur
fn create_error_map(bits: &BitVec) -> BitVec {
let mut error_bitvec = BitVec::with_capacity(bits.len());
let mut zero_ct = 0;
let mut in_bad_region = false;
for bit in bits.iter() {
if !bit {
zero_ct += 1;
if zero_ct > 3 {
in_bad_region = true;
}
}
else {
if zero_ct < 4 {
in_bad_region = false;
}
zero_ct = 0;
}
error_bitvec.push(in_bad_region);
}
error_bitvec
}
}
impl Iterator for GcrCodec {
type Item = bool;
fn next(&mut self) -> Option<Self::Item> {
// The bit cursor should always be aligned to a clock bit. If it is not, we can try to nudge
// it to the next clock bit. If the next bit is also not a clock bit, we are in an
// unsynchronized region and can't really do anything about it.
if !self.clock_map[self.bit_cursor] && self.clock_map[self.bit_cursor + 1] {
self.bit_cursor += 1;
log::debug!("next(): nudging to next clock bit @ {:05X}", self.bit_cursor);
}
// Now that we are (hopefully) aligned to a clock bit, retrieve the next bit which should
// be a data bit, or return a random bit if weak bits are enabled and the current bit is weak.
let decoded_bit = if self.weak_enabled && self.weak_mask[self.bit_cursor + 1] {
rand::random()
}
else {
self.bits[self.bit_cursor + 1]
};
// Advance to the next clock bit.
self.bit_cursor += 2;
Some(decoded_bit)
}
}
impl Seek for GcrCodec {
fn seek(&mut self, pos: SeekFrom) -> Result<u64> {
if self.bits.is_empty() {
return Err(Error::new(ErrorKind::InvalidInput, "Cannot seek on an empty bitstream"));
}
let mut new_cursor = match pos {
SeekFrom::Start(offset) => offset as usize,
SeekFrom::End(offset) => self.bits.len().saturating_add_signed(offset as isize),
SeekFrom::Current(offset) => self.bit_cursor.saturating_add_signed(offset as isize),
};
// If we have seeked to a data bit, nudge the bit cursor to the next clock bit.
// Don't bother if the next bit isn't a clock bit either, as we're in some unsynchronized
// track region.
if !self.clock_map[new_cursor] && self.clock_map[new_cursor + 1] {
new_cursor += 1;
log::debug!("seek(): nudging to next clock bit @ {:05X}", new_cursor);
}
self.bit_cursor = new_cursor;
Ok(self.bit_cursor as u64)
}
}
impl Read for GcrCodec {
fn read(&mut self, buf: &mut [u8]) -> Result<usize> {
if self.bits.is_empty() {
return Err(Error::new(ErrorKind::InvalidInput, "Cannot read an empty bitstream"));
}
let mut bytes_read = 0;
for byte in buf.iter_mut() {
let mut byte_val = 0;
for _ in 0..8 {
if let Some(bit) = self.next() {
byte_val = (byte_val << 1) | bit as u8;
}
else {
break;
}
}
*byte = byte_val;
bytes_read += 1;
}
Ok(bytes_read)
}
}
impl Index<usize> for GcrCodec {
type Output = bool;
fn index(&self, index: usize) -> &Self::Output {
if index >= self.bits.len() {
panic!("index out of bounds");
}
// Decode the bit here (implement the MFM decoding logic)
self.ref_bit_at(index)
}
}
| rust | MIT | b4c04b51746e5fe7769f49a1b32b8caad426fc81 | 2026-01-04T20:24:04.021295Z | false |
dbalsom/fluxfox | https://github.com/dbalsom/fluxfox/blob/b4c04b51746e5fe7769f49a1b32b8caad426fc81/src/bitstream_codec/fm.rs | src/bitstream_codec/fm.rs | /*
FluxFox
https://github.com/dbalsom/fluxfox
Copyright 2024-2025 Daniel Balsom
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the “Software”),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
--------------------------------------------------------------------------
src/fm.rs
Implements a wrapper around a BitVec to provide FM encoding and decoding.
*/
use crate::{
bitstream_codec::{EncodingVariant, MarkerEncoding, TrackCodec},
io::{Error, ErrorKind, Read, Result, Seek, SeekFrom},
range_check::RangeChecker,
types::{TrackDataEncoding, TrackRegion},
};
use bit_vec::BitVec;
use std::ops::{Index, Range};
pub const FM_BYTE_LEN: usize = 16;
pub const FM_MARKER_LEN: usize = 64;
//pub const FM_MARKER_CLOCK_MASK: u64 = 0xAAAA_AAAA_AAAA_0000;
pub const FM_MARKER_DATA_MASK: u64 = 0x0000_0000_0000_5555;
pub const FM_MARKER_CLOCK_MASK: u64 = 0xAAAA_AAAA_AAAA_AAAA;
pub const FM_MARKER_CLOCK_PATTERN: u64 = 0xAAAA_AAAA_AAAA_A02A;
#[doc(hidden)]
#[macro_export]
macro_rules! fm_offset {
($x:expr) => {
$x * 16
};
}
// FmCodec is very similar to MfmCodec, and subsequently contains a lot of repeated code.
// It would be nice to refactor this to reduce the amount of duplicated code - perhaps we could
// implement Mfm as a Fm variant.
/// Implements a bitstream codec for FM encoded data.
///
/// FM encoding is a simple encoding scheme where each data bit is encoded as two bits, with a
/// clock bit between each data bit. FM encoding is used on 'standard density' diskettes, typically
/// seen in 8" formats, and occasionally on certain 5.25" diskettes.
///
/// FM encoded tracks are often present at the end of the standard track count on a diskette,
/// used by commercial disk duplicators to store additional data about the disk being mastered.
#[derive(Clone)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub struct FmCodec {
bit_vec: BitVec,
clock_map: BitVec,
weak_enabled: bool,
weak_mask: BitVec,
error_map: BitVec,
initial_phase: usize,
bit_cursor: usize,
track_padding: usize,
data_ranges: RangeChecker,
data_ranges_filtered: RangeChecker,
}
pub enum FmEncodingType {
Data,
AddressMark,
}
pub fn get_fm_sync_offset(track: &BitVec) -> Option<bool> {
match find_sync(track, 0) {
Some(offset) => {
if offset % 2 == 0 {
Some(false)
}
else {
Some(true)
}
}
None => None,
}
}
pub fn find_sync(track: &BitVec, start_idx: usize) -> Option<usize> {
let mut shift_reg: u32 = 0;
for (i, bit) in track.into_iter().skip(start_idx).enumerate() {
shift_reg = shift_reg << 1 | (bit as u32);
if i >= 32 && shift_reg == 0xAA_AA_AA_AA {
return Some(i - 32);
}
}
None
}
#[cfg_attr(feature = "serde", typetag::serde)]
impl TrackCodec for FmCodec {
fn encoding(&self) -> TrackDataEncoding {
TrackDataEncoding::Fm
}
fn len(&self) -> usize {
self.bit_vec.len()
}
fn is_empty(&self) -> bool {
self.bit_vec.is_empty()
}
fn replace(&mut self, new_bits: BitVec) {
self.bit_vec = new_bits;
}
fn data(&self) -> &BitVec {
&self.bit_vec
}
fn data_mut(&mut self) -> &mut BitVec {
&mut self.bit_vec
}
fn data_copied(&self) -> Vec<u8> {
self.bit_vec.to_bytes()
}
fn set_clock_map(&mut self, clock_map: BitVec) {
self.clock_map = clock_map;
}
fn clock_map(&self) -> &BitVec {
&self.clock_map
}
fn clock_map_mut(&mut self) -> &mut BitVec {
&mut self.clock_map
}
fn enable_weak(&mut self, enable: bool) {
self.weak_enabled = enable;
}
fn weak_mask(&self) -> &BitVec {
&self.weak_mask
}
fn weak_mask_mut(&mut self) -> &mut BitVec {
&mut self.weak_mask
}
fn weak_data(&self) -> Vec<u8> {
self.weak_mask.to_bytes()
}
fn set_weak_mask(&mut self, new: BitVec) {
self.weak_mask = new;
}
fn has_weak_bits(&self) -> bool {
!self.detect_weak_bits(6).0 > 0
}
fn error_map(&self) -> &BitVec {
&self.error_map
}
fn set_track_padding(&mut self) {
let mut wrap_buffer: [u8; 4] = [0; 4];
if self.bit_vec.len() % 8 == 0 {
// Track length was an even multiple of 8, so it is possible track data was padded to
// byte margins.
let found_pad = false;
// Read buffer across the end of the track and see if all bytes are the same.
for pad in 1..16 {
log::trace!(
"bitcells: {} data bits: {} window_start: {}",
self.bit_vec.len(),
self.bit_vec.len() / 2,
self.bit_vec.len() - (8 * 2)
);
let wrap_addr = (self.bit_vec.len() / 2) - (8 * 2);
self.track_padding = pad;
self.seek(SeekFrom::Start(wrap_addr as u64)).unwrap();
self.read_exact(&mut wrap_buffer).unwrap();
log::trace!(
"set_track_padding(): wrap_buffer at {}, pad {}: {:02X?}",
wrap_addr,
pad,
wrap_buffer
);
}
if !found_pad {
// No padding found
log::warn!("set_track_padding(): Unable to determine track padding.");
self.track_padding = 0;
}
}
else {
// Track length is not an even multiple of 8 - the only explanation is that there is no
// track padding.
self.track_padding = 0;
}
}
fn read_raw_u8(&self, index: usize) -> Option<u8> {
if index >= self.len() {
return None;
}
let mut byte = 0;
for bi in index..std::cmp::min(index + 8, self.bit_vec.len()) {
byte = (byte << 1) | self.bit_vec[bi] as u8;
}
Some(byte)
}
fn read_raw_buf(&self, buf: &mut [u8], offset: usize) -> usize {
let mut bytes_read = 0;
for byte in buf.iter_mut() {
*byte = self.read_raw_u8(offset + (bytes_read * 8)).unwrap();
bytes_read += 1;
}
bytes_read
}
fn write_raw_u8(&mut self, index: usize, byte: u8) {
if index >= self.len() {
return;
}
for bi in index..std::cmp::min(index + 8, self.bit_vec.len()) {
self.bit_vec.set(bi, byte & (0x80 >> bi) != 0);
}
}
fn write_raw_buf(&mut self, buf: &[u8], offset: usize) -> usize {
let mut bytes_written = 0;
let mut offset = offset;
for byte in buf {
for bit_pos in (0..8).rev() {
let bit = byte & (0x01 << bit_pos) != 0;
self.bit_vec.set(offset, bit);
offset += 1;
}
bytes_written += 1;
}
bytes_written
}
fn read_decoded_u8(&self, index: usize) -> Option<u8> {
if index >= self.bit_vec.len() || index >= self.clock_map.len() {
log::error!(
"read_decoded_byte(): index out of bounds: {} vec: {} clock_map:{}",
index,
self.bit_vec.len(),
self.clock_map.len()
);
return None;
}
let p_off: usize = self.clock_map[index] as usize;
let mut byte = 0;
for bi in (index..std::cmp::min(index + FM_BYTE_LEN, self.bit_vec.len()))
.skip(p_off)
.step_by(2)
{
byte = (byte << 1) | self.bit_vec[bi] as u8;
}
Some(byte)
}
fn read_decoded_u32_le(&self, _index: usize) -> u32 {
todo!()
}
fn read_decoded_u32_be(&self, _index: usize) -> u32 {
todo!()
}
fn read_decoded_buf(&self, buf: &mut [u8], offset: usize) -> usize {
let mut bytes_read = 0;
for byte in buf.iter_mut() {
*byte = self.read_decoded_u8(offset + bytes_read).unwrap();
bytes_read += 1;
}
bytes_read
}
fn write_encoded_buf(&mut self, buf: &[u8], offset: usize) -> usize {
let encoded_buf = Self::encode(buf, false, EncodingVariant::Data);
let mut copy_len = encoded_buf.len();
if self.bit_vec.len() < offset + encoded_buf.len() {
copy_len = self.bit_vec.len() - offset;
}
let mut bits_written = 0;
let phase = !self.clock_map[offset] as usize;
log::trace!("write_buf(): offset: {} phase: {}", offset, phase);
for (i, bit) in encoded_buf.into_iter().enumerate().take(copy_len) {
self.bit_vec.set(offset + phase + i, bit);
bits_written += 1;
}
(bits_written + 7) / 8
}
fn encode(&self, data: &[u8], prev_bit: bool, encoding_type: EncodingVariant) -> BitVec {
let mut bitvec = BitVec::new();
let mut bit_count = 0;
for &byte in data {
for i in (0..8).rev() {
let bit = (byte & (1 << i)) != 0;
if bit {
// 1 is encoded as 01
bitvec.push(false);
bitvec.push(true);
}
else {
// 0 is encoded as 10 if previous bit was 0, otherwise 00
let previous_bit = if bitvec.is_empty() {
prev_bit
}
else {
bitvec[bitvec.len() - 1]
};
if previous_bit {
bitvec.push(false);
}
else {
bitvec.push(true);
}
bitvec.push(false);
}
bit_count += 1;
// Omit clock bit between source bits 3 and 4 for address marks
if let EncodingVariant::AddressMark = encoding_type {
if bit_count == 4 {
// Clear the previous clock bit (which is between bit 3 and 4)
bitvec.set(bitvec.len() - 2, false);
}
}
}
// Reset bit_count for the next byte
bit_count = 0;
}
bitvec
}
fn find_marker(&self, marker: &MarkerEncoding, start: usize, limit: Option<usize>) -> Option<(usize, u16)> {
//log::debug!("Fm::find_marker(): Searching for marker {:016X} at {}", marker, start);
if self.bit_vec.is_empty() {
return None;
}
let mut shift_reg: u64 = 0;
let mut shift_ct: u32 = 0;
let search_limit = if let Some(provided_limit) = limit {
std::cmp::min(provided_limit, self.bit_vec.len())
}
else {
self.bit_vec.len()
};
for bi in start..search_limit {
shift_reg = (shift_reg << 1) | self.bit_vec[bi] as u64;
shift_ct += 1;
let have_marker = (shift_reg & FM_MARKER_CLOCK_MASK) == FM_MARKER_CLOCK_PATTERN;
let have_data =
(shift_reg & FM_MARKER_DATA_MASK & marker.mask) == marker.bits & FM_MARKER_DATA_MASK & marker.mask;
if shift_ct >= 64 && have_marker {
log::debug!(
"found marker clock at {}: Shift reg {:16X}: data: {:16X} mask: {:16X}, marker data: {:16X}",
bi - 64,
shift_reg & FM_MARKER_CLOCK_MASK,
shift_reg & FM_MARKER_DATA_MASK,
marker.mask,
marker.bits & FM_MARKER_DATA_MASK
);
}
if shift_ct >= 64 && have_marker && have_data {
log::debug!(
"Fm::find_marker(): Found marker at {} data match: {}",
bi - 64,
have_data
);
return Some(((bi - 64) + 1, (shift_reg & 0xFFFF) as u16));
}
}
log::debug!("Fm::find_marker(): Failed to find marker!");
None
}
fn set_data_ranges(&mut self, ranges: Vec<Range<usize>>) {
// Don't set ranges for overlapping sectors. This avoids visual discontinuities during
// visualization.
let filtered_ranges = ranges
.clone()
.into_iter()
.filter(|range| !(range.start >= self.bit_vec.len() || range.end >= self.bit_vec.len()))
.collect::<Vec<Range<usize>>>();
self.data_ranges_filtered = RangeChecker::new(&filtered_ranges);
self.data_ranges = RangeChecker::new(&ranges);
}
fn is_data(&self, index: usize, wrapping: bool) -> bool {
if wrapping {
self.data_ranges.contains(index)
}
else {
self.data_ranges_filtered.contains(index)
}
}
fn debug_marker(&self, index: usize) -> String {
let mut shift_reg: u64 = 0;
for bi in index..std::cmp::min(index + 64, self.bit_vec.len()) {
shift_reg = (shift_reg << 1) | self.bit_vec[bi] as u64;
}
format!("{:16X}/{:064b}", shift_reg, shift_reg)
}
fn debug_decode(&self, index: usize) -> String {
let mut shift_reg: u32 = 0;
let start = index << 1;
for bi in (start..std::cmp::min(start + 64, self.bit_vec.len())).step_by(2) {
shift_reg = (shift_reg << 1) | self.bit_vec[self.initial_phase + bi] as u32;
}
format!("{:08X}/{:032b}", shift_reg, shift_reg)
}
}
impl FmCodec {
pub const WEAK_BIT_RUN: usize = 6;
pub fn new(mut bit_vec: BitVec, bit_ct: Option<usize>, weak_mask: Option<BitVec>) -> Self {
// If a bit count was provided, we can trim the bit vector to that length.
if let Some(bit_ct) = bit_ct {
bit_vec.truncate(bit_ct);
}
let encoding_sync = get_fm_sync_offset(&bit_vec).unwrap_or(false);
let sync = encoding_sync.into();
let clock_map = BitVec::from_elem(bit_vec.len(), encoding_sync);
let weak_mask = match weak_mask {
Some(mask) => mask,
None => BitVec::from_elem(bit_vec.len(), false),
};
if weak_mask.len() < bit_vec.len() {
panic!("Weak mask must be the same length as the bit vector");
}
// Create an empty error map until we can determine how to set errors for FM tracks.
let error_map = BitVec::from_elem(bit_vec.len(), false);
FmCodec {
bit_vec,
clock_map,
weak_enabled: true,
weak_mask,
error_map,
initial_phase: sync,
bit_cursor: sync,
track_padding: 0,
data_ranges: Default::default(),
data_ranges_filtered: Default::default(),
}
}
pub fn weak_data(&self) -> Vec<u8> {
self.weak_mask.to_bytes()
}
pub fn set_weak_mask(&mut self, weak_mask: BitVec) -> Result<()> {
if weak_mask.len() != self.bit_vec.len() {
return Err(Error::new(
ErrorKind::InvalidInput,
"Weak mask must be the same length as the bit vector",
));
}
self.weak_mask = weak_mask;
Ok(())
}
pub fn encode(data: &[u8], prev_bit: bool, encoding_type: EncodingVariant) -> BitVec {
let mut bitvec = BitVec::new();
let mut bit_count = 0;
for &byte in data {
for i in (0..8).rev() {
let bit = (byte & (1 << i)) != 0;
if bit {
// 1 is encoded as 01
bitvec.push(false);
bitvec.push(true);
}
else {
// 0 is encoded as 10 if previous bit was 0, otherwise 00
let previous_bit = if bitvec.is_empty() {
prev_bit
}
else {
bitvec[bitvec.len() - 1]
};
if previous_bit {
bitvec.push(false);
}
else {
bitvec.push(true);
}
bitvec.push(false);
}
bit_count += 1;
// Omit clock bit between source bits 3 and 4 for address marks
if let EncodingVariant::AddressMark = encoding_type {
if bit_count == 4 {
// Clear the previous clock bit (which is between bit 3 and 4)
bitvec.set(bitvec.len() - 2, false);
}
}
}
// Reset bit_count for the next byte
bit_count = 0;
}
bitvec
}
/// Encode an MFM address mark.
/// `data` must be a 4-byte slice.
/// Returns the encoded value in a u64 suitable for comparison to a shift register used to search
/// a BitVec.
pub fn encode_marker(data: &[u8]) -> u64 {
assert_eq!(data.len(), 4);
let mut accum: u64 = 0;
// A mark is always preceded by a SYNC block of 0's, so we know the previous bit will always
// be 0.
let mut previous_bit = false;
for &byte in data {
for i in (0..8).rev() {
let bit = (byte & (1 << i)) != 0;
if bit {
// 1 is encoded as 01
accum = (accum << 2) | 0b01;
}
else {
// 0 is encoded as 10 if previous bit was 0, otherwise 00
if !previous_bit {
accum = (accum << 2) | 0b10;
}
else {
accum <<= 2;
}
}
previous_bit = bit;
}
}
accum
}
#[allow(dead_code)]
fn read_bit(self) -> Option<bool> {
if self.weak_enabled && self.weak_mask[self.bit_cursor] {
// Weak bits return random data
Some(rand::random())
}
else {
Some(self.bit_vec[self.bit_cursor])
}
}
#[allow(dead_code)]
fn read_bit_at(&self, index: usize) -> Option<bool> {
if self.weak_enabled && self.weak_mask[self.initial_phase + (index << 1)] {
// Weak bits return random data
Some(rand::random())
}
else {
Some(self.bit_vec[self.initial_phase + (index << 1)])
}
}
fn ref_bit_at(&self, index: usize) -> &bool {
let p_off: usize = self.clock_map[index] as usize;
if self.weak_enabled && self.weak_mask[p_off + (index << 1)] {
// Weak bits return random data
// TODO: precalculate random table and return reference to it.
&self.bit_vec[p_off + (index << 1)]
}
else {
&self.bit_vec[p_off + (index << 1)]
}
}
pub(crate) fn detect_weak_bits(&self, run: usize) -> (usize, usize) {
let mut region_ct = 0;
let mut weak_bit_ct = 0;
let mut zero_ct = 0;
for bit in self.bit_vec.iter() {
if !bit {
zero_ct += 1;
}
else {
if zero_ct >= run {
region_ct += 1;
}
zero_ct = 0;
}
if zero_ct > 3 {
weak_bit_ct += 1;
}
}
(region_ct, weak_bit_ct)
}
#[allow(dead_code)]
pub(crate) fn detect_weak_regions(&self, run: usize) -> Vec<TrackRegion> {
let mut regions = Vec::new();
let mut zero_ct = 0;
let mut region_start = 0;
for (i, bit) in self.bit_vec.iter().enumerate() {
if !bit {
zero_ct += 1;
}
else {
if zero_ct >= run {
regions.push(TrackRegion {
start: region_start,
end: i - 1,
});
}
zero_ct = 0;
}
if zero_ct == run {
region_start = i;
}
}
regions
}
/// Not every format will have a separate weak bit mask, but that doesn't mean weak bits cannot
/// be encoded. Formats can encode weak bits as a run of 4 or more zero bits. Here we detect
/// such runs and extract them into a weak bit mask as a BitVec.
#[allow(dead_code)]
pub(crate) fn create_weak_bit_mask(&self, run: usize) -> BitVec {
let mut weak_bitvec = BitVec::new();
let mut zero_ct = 0;
for bit in self.bit_vec.iter() {
if !bit {
zero_ct += 1;
}
else {
zero_ct = 0;
}
if zero_ct > run {
weak_bitvec.push(true);
}
else {
weak_bitvec.push(false);
}
}
assert_eq!(weak_bitvec.len(), self.bit_vec.len());
weak_bitvec
}
}
impl Iterator for FmCodec {
type Item = bool;
fn next(&mut self) -> Option<Self::Item> {
if self.bit_cursor >= (self.bit_vec.len() - 1) {
return None;
}
// The bit cursor should always be aligned to a clock bit.
// So retrieve the next bit which is the data bit, then point to the next clock.
let mut data_idx = self.bit_cursor + 1;
if data_idx > (self.bit_vec.len() - self.track_padding) {
// Wrap around to the beginning of the track
data_idx = 0;
}
let decoded_bit = if self.weak_enabled && self.weak_mask[data_idx] {
// Weak bits return random data
rand::random()
}
else {
self.bit_vec[data_idx]
};
let new_cursor = data_idx + 1;
if new_cursor >= (self.bit_vec.len() - self.track_padding) {
// Wrap around to the beginning of the track
self.bit_cursor = 0;
}
else {
self.bit_cursor = new_cursor;
}
Some(decoded_bit)
}
}
impl Seek for FmCodec {
fn seek(&mut self, pos: SeekFrom) -> Result<u64> {
let (base, offset) = match pos {
// TODO: avoid casting to isize
SeekFrom::Start(offset) => (0, offset as isize),
SeekFrom::End(offset) => (self.bit_vec.len() as isize, offset as isize),
SeekFrom::Current(offset) => (self.bit_cursor as isize, offset as isize),
};
let new_pos = base.checked_add(offset).ok_or(Error::new(
ErrorKind::InvalidInput,
"invalid seek to a negative or overflowed position",
))?;
let mut new_cursor = (new_pos as usize) << 1;
/*
let mut debug_vec = Vec::new();
for i in 0..5 {
debug_vec.push(self.clock_map[new_cursor - 2 + i]);
}
log::debug!(
"seek() clock_map[{}]: {} {:?}",
new_cursor,
self.clock_map[new_cursor],
debug_vec
);
*/
// If we have seeked to a data bit, nudge the bit cursor to the next clock bit.
if !self.clock_map[new_cursor] {
//log::trace!("seek(): nudging to next clock bit");
new_cursor += 1;
}
if new_cursor > self.bit_vec.len() {
return Err(Error::new(
ErrorKind::InvalidInput,
"invalid seek to a negative or overflowed position",
));
}
self.bit_cursor = new_cursor;
//log::trace!("seek(): new_pos: {}", self.bit_cursor);
Ok(self.bit_cursor as u64)
}
}
impl Read for FmCodec {
fn read(&mut self, buf: &mut [u8]) -> Result<usize> {
let mut bytes_read = 0;
for byte in buf.iter_mut() {
let mut byte_val = 0;
for _ in 0..8 {
if let Some(bit) = self.next() {
byte_val = (byte_val << 1) | bit as u8;
}
else {
break;
}
}
*byte = byte_val;
bytes_read += 1;
}
Ok(bytes_read)
}
}
impl Index<usize> for FmCodec {
type Output = bool;
fn index(&self, index: usize) -> &Self::Output {
if index >= self.bit_vec.len() {
panic!("index out of bounds");
}
// Decode the bit here (implement the MFM decoding logic)
self.ref_bit_at(index)
}
}
| rust | MIT | b4c04b51746e5fe7769f49a1b32b8caad426fc81 | 2026-01-04T20:24:04.021295Z | false |
dbalsom/fluxfox | https://github.com/dbalsom/fluxfox/blob/b4c04b51746e5fe7769f49a1b32b8caad426fc81/src/bitstream_codec/mfm.rs | src/bitstream_codec/mfm.rs | /*
FluxFox
https://github.com/dbalsom/fluxfox
Copyright 2024-2025 Daniel Balsom
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the “Software”),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
--------------------------------------------------------------------------
src/mfm.rs
Implements a wrapper around a BitVec to provide MFM encoding and decoding.
*/
use std::ops::{Index, Range};
use crate::{
bit_ring::BitRing,
bitstream_codec::{EncodingVariant, MarkerEncoding, TrackCodec},
io::{Error, ErrorKind, Read, Result, Seek, SeekFrom},
range_check::RangeChecker,
types::{TrackDataEncoding, TrackRegion},
};
use bit_vec::BitVec;
pub const MFM_BYTE_LEN: usize = 16;
pub const MFM_MARKER_LEN: usize = 64;
pub const MFM_MARKER_CLOCK: u64 = 0x0220_0220_0220_0000;
#[doc(hidden)]
#[macro_export]
macro_rules! mfm_offset {
($x:expr) => {
$x * 16
};
}
#[derive(Clone)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub struct MfmCodec {
bits: BitRing,
clock_map: BitRing,
error_map: BitRing,
weak_enabled: bool,
weak_mask: BitRing,
initial_phase: usize,
bit_cursor: usize,
track_padding: usize,
data_ranges: RangeChecker,
data_ranges_filtered: RangeChecker,
}
pub fn get_mfm_sync_offset(track: &BitVec) -> Option<bool> {
match find_sync(track, 0) {
Some(offset) => {
if offset % 2 == 0 {
Some(false)
}
else {
Some(true)
}
}
None => None,
}
}
pub fn find_sync(track: &BitVec, start_idx: usize) -> Option<usize> {
let mut shift_reg: u32 = 0;
for (i, bit) in track.into_iter().skip(start_idx).enumerate() {
shift_reg = shift_reg << 1 | (bit as u32);
if i >= 32 && shift_reg == 0xAA_AA_AA_AA {
return Some(i - 32);
}
}
None
}
#[cfg_attr(feature = "serde", typetag::serde)]
impl TrackCodec for MfmCodec {
fn encoding(&self) -> TrackDataEncoding {
TrackDataEncoding::Mfm
}
fn len(&self) -> usize {
self.bits.len()
}
fn is_empty(&self) -> bool {
self.bits.is_empty()
}
fn replace(&mut self, new_bits: BitVec) {
self.bits = BitRing::from(new_bits);
}
fn data(&self) -> &BitVec {
self.bits.bits()
}
fn data_mut(&mut self) -> &mut BitVec {
self.bits.bits_mut()
}
fn data_copied(&self) -> Vec<u8> {
self.bits.to_bytes()
}
fn set_clock_map(&mut self, clock_map: BitVec) {
assert_eq!(clock_map.len(), self.bits.len());
self.clock_map = BitRing::from(clock_map);
// Set the wrap value for the clock map to false. This disables index adjustment when
// we read across the track index.
}
fn clock_map(&self) -> &BitVec {
self.clock_map.bits()
}
fn clock_map_mut(&mut self) -> &mut BitVec {
self.clock_map.bits_mut()
}
fn enable_weak(&mut self, enable: bool) {
self.weak_enabled = enable;
}
fn weak_mask(&self) -> &BitVec {
self.weak_mask.bits()
}
fn weak_mask_mut(&mut self) -> &mut BitVec {
self.weak_mask.bits_mut()
}
fn weak_data(&self) -> Vec<u8> {
self.weak_mask.to_bytes()
}
fn set_weak_mask(&mut self, new: BitVec) {
self.weak_mask = new.into();
}
fn has_weak_bits(&self) -> bool {
!self.detect_weak_bits(6).0 > 0
}
fn error_map(&self) -> &BitVec {
self.error_map.bits()
}
fn set_track_padding(&mut self) {
let mut wrap_buffer: [u8; 4] = [0; 4];
if self.bits.len() % 8 == 0 {
// Track length was an even multiple of 8, so it is possible track data was padded to
// byte margins.
let found_pad = false;
// Read buffer across the end of the track and see if all bytes are the same.
for pad in 1..16 {
log::trace!(
"bitcells: {} data bits: {} window_start: {}",
self.bits.len(),
self.bits.len() / 2,
self.bits.len() - (8 * 2)
);
let wrap_addr = (self.bits.len() / 2) - (8 * 2);
self.track_padding = pad;
self.seek(SeekFrom::Start(wrap_addr as u64)).unwrap();
self.read_exact(&mut wrap_buffer).unwrap();
log::trace!(
"set_track_padding(): wrap_buffer at {}, pad {}: {:02X?}",
wrap_addr,
pad,
wrap_buffer
);
}
if !found_pad {
// No padding found
log::debug!("set_track_padding(): Unable to determine track padding",);
self.track_padding = 0;
}
}
else {
// Track length is not an even multiple of 8 - the only explanation is that there is no
// track padding.
self.track_padding = 0;
}
}
fn read_raw_u8(&self, index: usize) -> Option<u8> {
let mut byte = 0;
for bi in index..index + 8 {
byte = (byte << 1) | self.bits[bi] as u8;
}
Some(byte)
}
fn read_raw_buf(&self, buf: &mut [u8], offset: usize) -> usize {
let mut bytes_read = 0;
for byte in buf.iter_mut() {
*byte = self.read_raw_u8(offset + (bytes_read * 8)).unwrap();
bytes_read += 1;
}
bytes_read
}
fn write_raw_u8(&mut self, index: usize, byte: u8) {
for (i, bi) in (index..index + 8).enumerate() {
self.bits.set(bi, (byte & 0x80 >> i) != 0);
}
}
/// This is essentially a reimplementation of Read + Iterator that avoids mutation.
/// This allows us to read track data through an immutable reference.
fn read_decoded_u8(&self, index: usize) -> Option<u8> {
let mut byte = 0;
let mut cursor = index;
// If we are not pointing to a clock bit, advance to the next data bit.
// If the next bit is not a clock bit either, we are in an unsynchronized region, so don't
// bother adjusting the index
if !self.clock_map[cursor] && self.clock_map[cursor + 1] {
cursor += 1;
}
// Advance to the data bit.
cursor += 1;
for _ in 0..8 {
let decoded_bit = if self.weak_enabled && !self.weak_mask.is_empty() && self.weak_mask[cursor] {
// Weak bits return random data
rand::random()
}
else {
self.bits[cursor]
};
byte = (byte << 1) | decoded_bit as u8;
// Advance to next data bit.
cursor += 2;
}
Some(byte)
}
fn read_decoded_u32_le(&self, index: usize) -> u32 {
let mut dword = 0;
let mut cursor = index;
// If we are not pointing to a clock bit, advance to the next clock bit.
cursor += !self.clock_map[cursor] as usize;
// Now that we are aligned to a clock bit, point to the next data bit
cursor += 1;
for b in 0..4 {
let mut byte = 0;
for _ in 0..8 {
let decoded_bit = if self.weak_enabled && !self.weak_mask.is_empty() && self.weak_mask[cursor] {
// Weak bits return random data
rand::random()
}
else {
self.bits[cursor]
};
byte = (byte << 1) | decoded_bit as u32;
// Advance to next data bit.
cursor += 2;
}
dword |= byte << (b * 8);
}
dword
}
fn read_decoded_u32_be(&self, index: usize) -> u32 {
let mut dword = 0;
let mut cursor = index;
// If we are not pointing to a clock bit, advance to the next clock bit.
cursor += !self.clock_map[cursor] as usize;
// Now that we are aligned to a clock bit, point to the next data bit
cursor += 1;
for _ in 0..32 {
let decoded_bit = if self.weak_enabled && !self.weak_mask.is_empty() && self.weak_mask[cursor] {
// Weak bits return random data
rand::random()
}
else {
self.bits[cursor]
};
dword = (dword << 1) | decoded_bit as u32;
// Advance to next data bit.
cursor += 2;
}
dword
}
fn read_decoded_buf(&self, buf: &mut [u8], offset: usize) -> usize {
let mut bytes_read = 0;
for byte in buf.iter_mut() {
*byte = self.read_decoded_u8(offset + (bytes_read * MFM_BYTE_LEN)).unwrap();
bytes_read += 1;
}
bytes_read
}
fn write_encoded_buf(&mut self, buf: &[u8], offset: usize) -> usize {
let mut offset = offset;
let encoded_buf = self.encode(buf, false, EncodingVariant::Data);
// let mut copy_len = encoded_buf.len();
// if self.bits.len() < offset + encoded_buf.len() {
// copy_len = self.bits.len() - offset;
// }
let mut bits_written = 0;
// If we landed on a data bit, advance to the next clock bit.
// If the next bit is not a clock bit either, we are in an unsynchronized region, so don't
// bother.
if !self.clock_map[offset] && self.clock_map[offset + 1] {
offset += 1;
}
for (i, bit) in encoded_buf.into_iter().enumerate() {
self.bits.set(offset + i, bit);
bits_written += 1;
}
(bits_written + 7) / 8
}
fn write_raw_buf(&mut self, buf: &[u8], offset: usize) -> usize {
let mut bytes_written = 0;
let mut offset = offset;
for byte in buf {
for bit_pos in 0..8 {
self.bits.set(offset, byte & (0x80 >> bit_pos) != 0);
offset += 1;
}
bytes_written += 1;
}
bytes_written
}
fn encode(&self, data: &[u8], prev_bit: bool, encoding_type: EncodingVariant) -> BitVec {
let mut bitvec = BitVec::new();
let mut bit_count = 0;
for &byte in data {
for i in 0..8 {
//let bit = ;
if (byte & (0x80 >> i)) != 0 {
// 1 is encoded as 01
bitvec.push(false);
bitvec.push(true);
}
else {
// 0 is encoded as 10 if previous bit was 0, otherwise 00
let previous_bit = if bitvec.is_empty() {
prev_bit
}
else {
bitvec[bitvec.len() - 1]
};
if previous_bit {
bitvec.push(false);
}
else {
bitvec.push(true);
}
bitvec.push(false);
}
bit_count += 1;
// Omit clock bit between source bits 3 and 4 for address marks
if let EncodingVariant::AddressMark = encoding_type {
if bit_count == 4 {
// Clear the previous clock bit (which is between bit 3 and 4)
bitvec.set(bitvec.len() - 2, false);
}
}
}
// Reset bit_count for the next byte
bit_count = 0;
}
bitvec
}
fn find_marker(&self, marker: &MarkerEncoding, start: usize, limit: Option<usize>) -> Option<(usize, u16)> {
//log::debug!("Mfm::find_marker(): Searching for marker: {:016X}", marker);
if self.bits.is_empty() {
return None;
}
let mut shift_reg: u64 = 0;
let mut shift_ct: u32 = 0;
let search_limit = if let Some(provided_limit) = limit {
std::cmp::min(provided_limit, self.bits.len())
}
else {
self.bits.len()
};
for bi in start..search_limit {
shift_reg = (shift_reg << 1) | self.bits[bi] as u64;
shift_ct += 1;
if shift_ct >= 64 && ((shift_reg & marker.mask) == marker.bits) {
return Some(((bi - marker.len) + 1, (shift_reg & 0xFFFF) as u16));
}
}
log::trace!("find_marker(): Failed to find marker!");
None
}
fn set_data_ranges(&mut self, ranges: Vec<Range<usize>>) {
// Don't set ranges for overlapping sectors. This avoids visual discontinuities during
// visualization.
let filtered_ranges = ranges
.clone()
.into_iter()
.filter(|range| !(range.start >= self.bits.len() || range.end >= self.bits.len()))
.collect::<Vec<Range<usize>>>();
self.data_ranges_filtered = RangeChecker::new(&filtered_ranges);
self.data_ranges = RangeChecker::new(&ranges);
}
fn is_data(&self, index: usize, wrapping: bool) -> bool {
if wrapping {
self.data_ranges.contains(index)
}
else {
self.data_ranges_filtered.contains(index)
}
}
fn debug_marker(&self, index: usize) -> String {
let mut shift_reg: u64 = 0;
for bi in index..std::cmp::min(index + 64, self.bits.len()) {
shift_reg = (shift_reg << 1) | self.bits[bi] as u64;
}
format!("{:16X}/{:064b}", shift_reg, shift_reg)
}
fn debug_decode(&self, index: usize) -> String {
let mut shift_reg: u32 = 0;
let start = index << 1;
for bi in (start..std::cmp::min(start + 64, self.bits.len())).step_by(2) {
shift_reg = (shift_reg << 1) | self.bits[self.initial_phase + bi] as u32;
}
format!("{:08X}/{:032b}", shift_reg, shift_reg)
}
fn map_density(&self, density: f32) -> u8 {
((density * 1.5).clamp(0.0, 1.0) * 255.0) as u8
}
}
impl MfmCodec {
pub const WEAK_BIT_RUN: usize = 6;
pub fn new(mut bits: BitVec, bit_ct: Option<usize>, weak_mask: Option<BitVec>) -> Self {
// If a bit count was provided, we can trim the bit vector to that length.
if let Some(bit_ct) = bit_ct {
bits.truncate(bit_ct);
}
let encoding_sync = get_mfm_sync_offset(&bits).unwrap_or(false);
let sync = encoding_sync.into();
let clock_map = BitVec::from_elem(bits.len(), encoding_sync);
let weak_mask = match weak_mask {
Some(mask) => mask,
None => BitVec::from_elem(bits.len(), false),
};
if weak_mask.len() < bits.len() {
panic!("MfmCodec::new(): Weak mask must be the same length as the bit vector");
}
let error_bits = MfmCodec::create_error_map(&bits);
let error_bit_ct = error_bits.count_ones();
if error_bit_ct > 16 {
log::warn!("MfmCodec::new(): created error map with {} error bits", error_bit_ct);
}
let error_map = BitRing::from(error_bits);
let mut clock_map = BitRing::from(clock_map);
// Set the wrap value for the clock map to false, this disables the clock map when reading
// across the track index, as we should follow the clock phase from the last marker.
clock_map.set_wrap_value(false);
MfmCodec {
bits: BitRing::from(bits),
clock_map,
error_map,
weak_enabled: true,
weak_mask: BitRing::from(weak_mask),
initial_phase: sync,
bit_cursor: sync,
track_padding: 0,
data_ranges: Default::default(),
data_ranges_filtered: Default::default(),
}
}
pub fn set_weak_mask(&mut self, weak_mask: BitVec) -> Result<()> {
if weak_mask.len() != self.bits.len() {
return Err(Error::new(
ErrorKind::InvalidInput,
"Weak mask must be the same length as the bit vector",
));
}
self.weak_mask = BitRing::from(weak_mask);
Ok(())
}
/// Encode an MFM address mark.
/// `data` must be a 4-byte slice.
/// Returns the encoded value in a u64 suitable for comparison to a shift register used to search
/// a BitVec.
pub fn encode_marker(data: &[u8]) -> u64 {
assert_eq!(data.len(), 4);
let mut accum: u64 = 0;
// A mark is always preceded by a SYNC block of 0's, so we know the previous bit will always
// be 0.
let mut previous_bit = false;
for &byte in data {
for i in (0..8).rev() {
let bit = (byte & (1 << i)) != 0;
if bit {
// 1 is encoded as 01
accum = (accum << 2) | 0b01;
}
else {
// 0 is encoded as 10 if previous bit was 0, otherwise 00
if !previous_bit {
accum = (accum << 2) | 0b10;
}
else {
accum <<= 2;
}
}
previous_bit = bit;
}
}
accum
}
#[allow(dead_code)]
fn read_bit(self) -> Option<bool> {
if self.weak_enabled && self.weak_mask[self.bit_cursor] {
// Weak bits return random data
Some(rand::random())
}
else {
Some(self.bits[self.bit_cursor])
}
}
fn ref_bit_at(&self, index: usize) -> &bool {
let p_off: usize = self.clock_map[index] as usize;
if self.weak_enabled && self.weak_mask[p_off + (index << 1)] {
// Weak bits return random data
// TODO: precalculate random table and return reference to it.
&self.bits[p_off + (index << 1)]
}
else {
&self.bits[p_off + (index << 1)]
}
}
pub(crate) fn detect_weak_bits(&self, run: usize) -> (usize, usize) {
let mut region_ct = 0;
let mut weak_bit_ct = 0;
let mut zero_ct = 0;
for bit in self.bits.iter_revolution() {
if !bit {
zero_ct += 1;
}
else {
if zero_ct >= run {
region_ct += 1;
}
zero_ct = 0;
}
if zero_ct > 3 {
weak_bit_ct += 1;
}
}
(region_ct, weak_bit_ct)
}
#[allow(dead_code)]
pub(crate) fn detect_weak_regions(&self, run: usize) -> Vec<TrackRegion> {
let mut regions = Vec::new();
let mut zero_ct = 0;
let mut region_start = 0;
for (i, bit) in self.bits.iter().enumerate() {
if !bit {
zero_ct += 1;
}
else {
if zero_ct >= run {
regions.push(TrackRegion {
start: region_start,
end: i - 1,
});
}
zero_ct = 0;
}
if zero_ct == run {
region_start = i;
}
}
regions
}
/// Not every format will have a separate weak bit mask, but that doesn't mean weak bits cannot
/// be encoded. Formats can encode weak bits as a run of 4 or more zero bits. Here we detect
/// such runs and extract them into a weak bit mask as a BitVec.
pub(crate) fn create_weak_bit_mask(&self, run: usize) -> BitVec {
let mut weak_bitvec = BitVec::with_capacity(self.bits.len());
let mut zero_ct = 0;
log::debug!("create_weak_bit_mask(): bits: {}", self.bits.len());
for bit in self.bits.iter_revolution() {
if !bit {
zero_ct += 1;
}
else {
zero_ct = 0;
}
if zero_ct > run {
weak_bitvec.push(true);
}
else {
weak_bitvec.push(false);
}
}
log::warn!(
"create_weak_bit_mask(): bits: {} weak: {}",
self.bits.len(),
weak_bitvec.len(),
);
assert_eq!(weak_bitvec.len(), self.bits.len());
weak_bitvec
}
/// Create an error map that marks where MFM clock violations occur
fn create_error_map(bits: &BitVec) -> BitVec {
let mut error_bitvec = BitVec::with_capacity(bits.len());
let mut zero_ct = 0;
let mut in_bad_region = false;
for bit in bits.iter() {
if !bit {
zero_ct += 1;
if zero_ct > 3 {
in_bad_region = true;
}
}
else {
if zero_ct < 4 {
in_bad_region = false;
}
zero_ct = 0;
}
error_bitvec.push(in_bad_region);
}
error_bitvec
}
}
impl Iterator for MfmCodec {
type Item = bool;
fn next(&mut self) -> Option<Self::Item> {
// The bit cursor should always be aligned to a clock bit. If it is not, we can try to nudge
// it to the next clock bit. If the next bit is also not a clock bit, we are in an
// unsynchronized region and can't really do anything about it.
if !self.clock_map[self.bit_cursor] && self.clock_map[self.bit_cursor + 1] {
self.bit_cursor += 1;
log::debug!("next(): nudging to next clock bit @ {:05X}", self.bit_cursor);
}
// Now that we are (hopefully) aligned to a clock bit, retrieve the next bit which should
// be a data bit, or return a random bit if weak bits are enabled and the current bit is weak.
let decoded_bit = if self.weak_enabled && self.weak_mask[self.bit_cursor + 1] {
rand::random()
}
else {
self.bits[self.bit_cursor + 1]
};
// Advance to the next clock bit.
self.bit_cursor += 2;
Some(decoded_bit)
}
}
impl Seek for MfmCodec {
fn seek(&mut self, pos: SeekFrom) -> Result<u64> {
if self.bits.is_empty() {
return Err(Error::new(ErrorKind::InvalidInput, "Cannot seek on an empty bitstream"));
}
let mut new_cursor = match pos {
SeekFrom::Start(offset) => offset as usize,
SeekFrom::End(offset) => self.bits.len().saturating_add_signed(offset as isize),
SeekFrom::Current(offset) => self.bit_cursor.saturating_add_signed(offset as isize),
};
// If we have seeked to a data bit, nudge the bit cursor to the next clock bit.
// Don't bother if the next bit isn't a clock bit either, as we're in some unsynchronized
// track region.
if !self.clock_map[new_cursor] && self.clock_map[new_cursor + 1] {
new_cursor += 1;
log::debug!("seek(): nudging to next clock bit @ {:05X}", new_cursor);
}
self.bit_cursor = new_cursor;
Ok(self.bit_cursor as u64)
}
}
impl Read for MfmCodec {
fn read(&mut self, buf: &mut [u8]) -> Result<usize> {
if self.bits.is_empty() {
return Err(Error::new(ErrorKind::InvalidInput, "Cannot read an empty bitstream"));
}
let mut bytes_read = 0;
for byte in buf.iter_mut() {
let mut byte_val = 0;
for _ in 0..8 {
if let Some(bit) = self.next() {
byte_val = (byte_val << 1) | bit as u8;
}
else {
break;
}
}
*byte = byte_val;
bytes_read += 1;
}
Ok(bytes_read)
}
}
impl Index<usize> for MfmCodec {
type Output = bool;
fn index(&self, index: usize) -> &Self::Output {
if index >= self.bits.len() {
panic!("index out of bounds");
}
// Decode the bit here (implement the MFM decoding logic)
self.ref_bit_at(index)
}
}
| rust | MIT | b4c04b51746e5fe7769f49a1b32b8caad426fc81 | 2026-01-04T20:24:04.021295Z | false |
dbalsom/fluxfox | https://github.com/dbalsom/fluxfox/blob/b4c04b51746e5fe7769f49a1b32b8caad426fc81/src/bitstream_codec/mod.rs | src/bitstream_codec/mod.rs | /*
FluxFox
https://github.com/dbalsom/fluxfox
Copyright 2024-2025 Daniel Balsom
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the “Software”),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
--------------------------------------------------------------------------
*/
pub mod fm;
pub mod gcr;
pub mod mfm;
use crate::{
io::{Read, Seek},
types::TrackDataEncoding,
};
use bit_vec::BitVec;
use dyn_clone::{clone_trait_object, DynClone};
use std::ops::{Index, Range};
// fn find_marker(&self, marker: u64, mask: Option<u64>, start: usize, limit: Option<usize>) -> Option<(usize, u16)>;
/// Defines the bit pattern and mask for an FM or MFM track marker.
pub struct MarkerEncoding {
pub bits: u64,
pub mask: u64,
pub len: usize,
}
impl Default for MarkerEncoding {
fn default() -> Self {
MarkerEncoding {
bits: 0,
mask: !0,
len: 64,
}
}
}
/// When encoding data with a `TrackCodex`, an `EncodingVariant` specifies if the encoding should
/// use the standard `Data` encoding, or encode the data using the special clock pattern to make
/// it an `AddressMark`.
#[derive(Copy, Clone, Debug)]
pub enum EncodingVariant {
Data,
AddressMark,
}
/// A `TrackCodec` is a trait that represents the data encoding of a disk track.
/// Data encodings
#[cfg_attr(feature = "serde", typetag::serde(tag = "type"))]
pub trait TrackCodec: DynClone + Read + Seek + Index<usize, Output = bool> + Send + Sync {
/// Return the `[DiskDataEncoding]` of the data on this track.
/// A single track may only have one encoding.
fn encoding(&self) -> TrackDataEncoding;
/// Return the length of the track in bits.
fn len(&self) -> usize;
/// Return a bool indicating if the track is empty.
fn is_empty(&self) -> bool;
/// Replace the data bits of the track with the provided bits.
fn replace(&mut self, new_bits: BitVec);
/// Return a reference to the data bits of the track as a `BitVec`.
fn data(&self) -> &BitVec;
/// Return a mutable reference to the data bits of the track as a `BitVec`.
fn data_mut(&mut self) -> &mut BitVec;
/// Return a copy of the track data as a `Vec<u8>`.
fn data_copied(&self) -> Vec<u8>;
/// Set the clock map for the track.
/// A clock map is a `BitVec` where each 1 bit set corresponds to a clock bit.
/// Maintaining a clock map enables random access to a track.
fn set_clock_map(&mut self, clock_map: BitVec);
/// Return a reference to the clock map of the track as a &[BitVec].
fn clock_map(&self) -> &BitVec;
/// Return a mutable reference to the clock map of the track as a &mut [BitVec].
fn clock_map_mut(&mut self) -> &mut BitVec;
/// Control whether weak bits should be calculated when reading the track.
fn enable_weak(&mut self, enable: bool);
/// Return a reference to the weak bit mask as a &[BitVec].
fn weak_mask(&self) -> &BitVec;
/// Return a mutable reference to the weak bit mask as a &mut [BitVec].
fn weak_mask_mut(&mut self) -> &mut BitVec;
/// Return a copy of the weak bits of the track as a `Vec<u8>`.
fn weak_data(&self) -> Vec<u8>;
/// Replace the weak bits of the track with the provided [BitVec].
fn set_weak_mask(&mut self, mask: BitVec);
/// Return a bool indicating if the track has bits set in the weak bit mask.
fn has_weak_bits(&self) -> bool;
/// Return a reference to the error map of the track as a &[BitVec].
fn error_map(&self) -> &BitVec;
fn set_track_padding(&mut self);
/// Read a raw (encoded) byte from the track at the specified bit index.
fn read_raw_u8(&self, index: usize) -> Option<u8>;
/// Fill a buffer with raw (encoded) bytes from the track starting at the specified bit index.
fn read_raw_buf(&self, buf: &mut [u8], offset: usize) -> usize;
/// Write a raw (encoded) byte to the track at the specified bit index.
fn write_raw_u8(&mut self, index: usize, byte: u8);
/// Write a buffer of raw (encoded) bytes to the track starting at the specified bit index.
fn write_raw_buf(&mut self, buf: &[u8], offset: usize) -> usize;
/// Read a decoded byte from the track at the specified bit index.
fn read_decoded_u8(&self, index: usize) -> Option<u8>;
fn read_decoded_u32_le(&self, index: usize) -> u32;
fn read_decoded_u32_be(&self, index: usize) -> u32;
/// Fill a buffer with decoded bytes from the track starting at the specified bit index.
fn read_decoded_buf(&self, buf: &mut [u8], offset: usize) -> usize;
/// Encode a buffer of data and write it to the track starting at the specified bit index.
fn write_encoded_buf(&mut self, buf: &[u8], offset: usize) -> usize;
/// Encode a buffer of data and return it as a `BitVec`.
fn encode(&self, data: &[u8], prev_bit: bool, encoding_type: EncodingVariant) -> BitVec;
/// Find the next marker in the track starting at the specified bit index, searching up to
/// `limit` bit index if provided.
fn find_marker(&self, marker: &MarkerEncoding, start: usize, limit: Option<usize>) -> Option<(usize, u16)>;
fn set_data_ranges(&mut self, ranges: Vec<Range<usize>>);
/// Return a bool indicating if the bit at the specified index is inside sector data.
/// Requires the track to have data ranges set with set_data_ranges().
fn is_data(&self, index: usize, wrapping: bool) -> bool;
fn debug_marker(&self, index: usize) -> String;
fn debug_decode(&self, index: usize) -> String;
/// Map a density ratio (0-1.0) to a visual density value in u8 (0-255).
/// Used for visualization functions.
fn map_density(&self, density: f32) -> u8 {
(density.clamp(0.0, 1.0) * 255.0) as u8
}
}
clone_trait_object!(TrackCodec);
pub type TrackDataStream = Box<dyn TrackCodec<Output = bool>>;
| rust | MIT | b4c04b51746e5fe7769f49a1b32b8caad426fc81 | 2026-01-04T20:24:04.021295Z | false |
dbalsom/fluxfox | https://github.com/dbalsom/fluxfox/blob/b4c04b51746e5fe7769f49a1b32b8caad426fc81/src/types/standard_format.rs | src/types/standard_format.rs | /*
FluxFox
https://github.com/dbalsom/fluxfox
Copyright 2024-2025 Daniel Balsom
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the “Software”),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
--------------------------------------------------------------------------
types/standard_format.rs
Represents information about standard (non-copy-protected) disk formats,
such as those that can be represented with a raw sector image (IMG).
Since the formats are well known, we can provide many default parameters
for them.
fluxfox currently supports (or aims to support) the following formats:
PC 160K DD Single-Sided 5.25"
PC 180K DD Single-Sided 5.25"
PC 320K DD Double-Sided 5.25"
PC 360K DD Double-Sided 5.25"
PC 720K DD Double-Sided 3.5"
PC 1.2M HD Double-Sided 5.25"
PC 1.44M HD Double-Sided 3.5"
PC 2.88M ED Double-Sided 3.5"
*/
//! The `standard_format` module defines the [StandardFormat] enum that defines parameters for
//! several standard PC disk formats.
use std::{
fmt::{Display, Formatter},
str::FromStr,
};
use crate::{
types::{
sector_layout::SectorLayout,
DiskDescriptor,
DiskRpm,
Platform,
TrackDataEncoding,
TrackDataRate,
TrackDensity,
},
DiskCh,
DiskChs,
DiskChsn,
};
/// A newtype for [StandardFormat] for use in parsing [StandardFormat] from user-provided strings,
/// such as command-line arguments.
#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq)]
pub struct StandardFormatParam(pub StandardFormat);
impl FromStr for StandardFormatParam {
type Err = String;
/// Implement FromStr for StandardFormat.
/// This can be used by utilities that wish to take a StandardFormat as a command-line argument.
/// For backwards compatibility, formats strings can specify a pc_ prefix to refer to PC disk
/// formats, but it is not required.
/// Non-pc formats will require the appropriate prefix.
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s
.to_lowercase()
.strip_prefix("pc_")
.unwrap_or(s.to_lowercase().as_str())
{
"pc_160k" => Ok(StandardFormatParam(StandardFormat::PcFloppy160)),
"pc_180k" => Ok(StandardFormatParam(StandardFormat::PcFloppy180)),
"pc_320k" => Ok(StandardFormatParam(StandardFormat::PcFloppy320)),
"pc_360k" => Ok(StandardFormatParam(StandardFormat::PcFloppy360)),
"pc_720k" => Ok(StandardFormatParam(StandardFormat::PcFloppy720)),
"pc_1200k" => Ok(StandardFormatParam(StandardFormat::PcFloppy1200)),
"pc_1440k" => Ok(StandardFormatParam(StandardFormat::PcFloppy1440)),
"pc_2880k" => Ok(StandardFormatParam(StandardFormat::PcFloppy2880)),
#[cfg(feature = "amiga")]
"amiga_880k" => Ok(StandardFormatParam(StandardFormat::AmigaFloppy880)),
#[cfg(feature = "amiga")]
"amiga_1760k" => Ok(StandardFormatParam(StandardFormat::AmigaFloppy1760)),
_ => Err(format!("Invalid format: {}", s)),
}
}
}
impl Display for StandardFormatParam {
fn fmt(&self, f: &mut Formatter) -> std::fmt::Result {
match self.0 {
StandardFormat::PcFloppy160 => write!(f, "pc_160k"),
StandardFormat::PcFloppy180 => write!(f, "pc_180k"),
StandardFormat::PcFloppy320 => write!(f, "pc_320k"),
StandardFormat::PcFloppy360 => write!(f, "pc_360k"),
StandardFormat::PcFloppy720 => write!(f, "pc_720k"),
StandardFormat::PcFloppy1200 => write!(f, "pc_1200k"),
StandardFormat::PcFloppy1440 => write!(f, "pc_1440k"),
StandardFormat::PcFloppy2880 => write!(f, "pc_2880k"),
#[cfg(feature = "amiga")]
StandardFormat::AmigaFloppy880 => write!(f, "amiga_880k"),
#[cfg(feature = "amiga")]
StandardFormat::AmigaFloppy1760 => write!(f, "amiga_1760k"),
}
}
}
impl From<StandardFormat> for StandardFormatParam {
fn from(format: StandardFormat) -> Self {
StandardFormatParam(format)
}
}
impl StandardFormatParam {
/// Return a list of all supported StandardFormats and their string representations
/// as StandardFormatParam's. This method can be used to generate help text for utilities
/// that accept StandardFormat as a command-line argument.
pub fn list() -> Vec<(String, StandardFormat)> {
vec![
("pc_160k".to_string(), StandardFormat::PcFloppy160),
("pc_180k".to_string(), StandardFormat::PcFloppy180),
("pc_320k".to_string(), StandardFormat::PcFloppy320),
("pc_360k".to_string(), StandardFormat::PcFloppy360),
("pc_720k".to_string(), StandardFormat::PcFloppy720),
("pc_1200k".to_string(), StandardFormat::PcFloppy1200),
("pc_1440k".to_string(), StandardFormat::PcFloppy1440),
("pc_2880k".to_string(), StandardFormat::PcFloppy2880),
#[cfg(feature = "amiga")]
("amiga_880k".to_string(), StandardFormat::AmigaFloppy880),
]
}
}
/// An enumeration describing one of several standard PC disk formats.
#[derive(Debug, Copy, Clone, Hash, Eq, PartialEq, strum::EnumIter)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub enum StandardFormat {
/// A single-sided, 8-sectored, 48tpi, double-density disk
PcFloppy160,
/// A single-sided, 9-sectored, 48tpi, double-density disk
PcFloppy180,
/// A double-sided, 8-sectored, 48tpi, double-density disk
PcFloppy320,
/// A double-sided, 9-sectored, 48tpi, double-density disk
PcFloppy360,
/// A double-sided, 9-sectored, 96tpi, double-density disk
PcFloppy720,
/// A double-sided, 15-sectored, 96tpi, high-density disk
PcFloppy1200,
/// A double-sided, 18-sectored, 96tpi, high-density disk
PcFloppy1440,
/// A double-sided, 36-sectored, 96tpi, high-density disk
PcFloppy2880,
#[cfg(feature = "amiga")]
/// A double-sided, 11-sectored, 96tpi, double-density disk
AmigaFloppy880,
#[cfg(feature = "amiga")]
/// A double-sided, 22-sectored, 96tpi, high-density disk
AmigaFloppy1760,
}
impl Display for StandardFormat {
fn fmt(&self, f: &mut Formatter) -> std::fmt::Result {
match self {
StandardFormat::PcFloppy160 => write!(f, "160KB 5.25\" DD"),
StandardFormat::PcFloppy180 => write!(f, "180KB 5.25\" DD"),
StandardFormat::PcFloppy320 => write!(f, "320KB 5.25\" DD"),
StandardFormat::PcFloppy360 => write!(f, "360KB 5.25\" DD"),
StandardFormat::PcFloppy720 => write!(f, "720KB 3.5\" DD"),
StandardFormat::PcFloppy1200 => write!(f, "1.2MB 5.25\" HD"),
StandardFormat::PcFloppy1440 => write!(f, "1.44MB 3.5\" HD"),
StandardFormat::PcFloppy2880 => write!(f, "2.88MB 3.5\" ED"),
#[cfg(feature = "amiga")]
StandardFormat::AmigaFloppy880 => write!(f, "880KB 3,5\" DD"),
#[cfg(feature = "amiga")]
StandardFormat::AmigaFloppy1760 => write!(f, "1.76MB 3,5\" HD"),
}
}
}
impl From<StandardFormatParam> for StandardFormat {
fn from(param: StandardFormatParam) -> Self {
param.0
}
}
impl StandardFormat {
/// Returns the geometry corresponding to the `StandardFormat` as a `DiskChsn` struct.
pub fn layout(&self) -> SectorLayout {
match self {
StandardFormat::PcFloppy160 => SectorLayout::new(40, 1, 8, 1, 512),
StandardFormat::PcFloppy180 => SectorLayout::new(40, 1, 9, 1, 512),
StandardFormat::PcFloppy320 => SectorLayout::new(40, 2, 8, 1, 512),
StandardFormat::PcFloppy360 => SectorLayout::new(40, 2, 9, 1, 512),
StandardFormat::PcFloppy720 => SectorLayout::new(80, 2, 9, 1, 512),
StandardFormat::PcFloppy1200 => SectorLayout::new(80, 2, 15, 1, 512),
StandardFormat::PcFloppy1440 => SectorLayout::new(80, 2, 18, 1, 512),
StandardFormat::PcFloppy2880 => SectorLayout::new(80, 2, 36, 1, 512),
#[cfg(feature = "amiga")]
StandardFormat::AmigaFloppy880 => SectorLayout::new(80, 2, 11, 0, 512),
#[cfg(feature = "amiga")]
StandardFormat::AmigaFloppy1760 => SectorLayout::new(80, 2, 22, 0, 512),
}
}
pub fn normalized_track_ct(track_ct: usize) -> Option<usize> {
match track_ct {
35..50 => Some(40),
75..100 => Some(80),
_ => None,
}
}
/// Return the sector size in bytes corresponding to the `StandardFormat`.
/// Note: This is always 512 for standard PC disk formats.
pub fn sector_size(&self) -> usize {
self.layout().size()
}
/// Returns the geometry corresponding to the `StandardFormat` as a `DiskCh` struct.
pub fn ch(&self) -> DiskCh {
self.layout().ch()
}
/// Returns the geometry corresponding to the D`StandardFormat` as a `DiskChs` struct.
pub fn chs(&self) -> DiskChs {
self.layout().chs()
}
/// Returns the geometry corresponding to the D`StandardFormat` as a `DiskChsn` struct.
pub fn chsn(&self) -> DiskChsn {
self.layout().chsn()
}
/// Returns the `DiskDataEncoding` corresponding to the `StandardFormat`.
pub fn encoding(&self) -> TrackDataEncoding {
TrackDataEncoding::Mfm
}
/// Returns the `DiskDataRate` corresponding to the `StandardFormat`.
pub fn data_rate(&self) -> TrackDataRate {
match self {
StandardFormat::PcFloppy160 => TrackDataRate::Rate250Kbps(1.0),
StandardFormat::PcFloppy180 => TrackDataRate::Rate250Kbps(1.0),
StandardFormat::PcFloppy320 => TrackDataRate::Rate250Kbps(1.0),
StandardFormat::PcFloppy360 => TrackDataRate::Rate250Kbps(1.0),
StandardFormat::PcFloppy720 => TrackDataRate::Rate250Kbps(1.0),
StandardFormat::PcFloppy1200 => TrackDataRate::Rate500Kbps(1.0),
StandardFormat::PcFloppy1440 => TrackDataRate::Rate500Kbps(1.0),
StandardFormat::PcFloppy2880 => TrackDataRate::Rate1000Kbps(1.0),
#[cfg(feature = "amiga")]
StandardFormat::AmigaFloppy880 => TrackDataRate::Rate250Kbps(1.0),
// We are going to ignore the fact that Amiga HD drives spun at 150RPM for half the data rate.
// From a normalized perspective, we consider them 300RPM and 500Kbps.
#[cfg(feature = "amiga")]
StandardFormat::AmigaFloppy1760 => TrackDataRate::Rate500Kbps(1.0),
}
}
/// Returns the `DiskDensity` corresponding to the `StandardFormat`.
pub fn density(&self) -> TrackDensity {
TrackDensity::from(self.data_rate())
}
/// Returns the default `DiskRpm` corresponding to the `StandardFormat`.
/// Note: The actual RPM of an image may vary depending on the drive used to create the disk image.
pub fn rpm(&self) -> DiskRpm {
match self {
StandardFormat::PcFloppy160 => DiskRpm::Rpm300(1.0),
StandardFormat::PcFloppy180 => DiskRpm::Rpm300(1.0),
StandardFormat::PcFloppy320 => DiskRpm::Rpm300(1.0),
StandardFormat::PcFloppy360 => DiskRpm::Rpm300(1.0),
StandardFormat::PcFloppy720 => DiskRpm::Rpm300(1.0),
StandardFormat::PcFloppy1200 => DiskRpm::Rpm360(1.0),
StandardFormat::PcFloppy1440 => DiskRpm::Rpm300(1.0),
StandardFormat::PcFloppy2880 => DiskRpm::Rpm300(1.0),
#[cfg(feature = "amiga")]
StandardFormat::AmigaFloppy880 => DiskRpm::Rpm300(1.0),
// See note above in data_rate() for Amiga HD drives.
#[cfg(feature = "amiga")]
StandardFormat::AmigaFloppy1760 => DiskRpm::Rpm300(1.0),
}
}
/// Return the number of bitcells per track corresponding to the `StandardFormat`.
pub fn bitcell_ct(&self) -> usize {
match self {
StandardFormat::PcFloppy160 => 100_000,
StandardFormat::PcFloppy180 => 100_000,
StandardFormat::PcFloppy320 => 100_000,
StandardFormat::PcFloppy360 => 100_000,
StandardFormat::PcFloppy720 => 100_000,
StandardFormat::PcFloppy1200 => 166_666,
StandardFormat::PcFloppy1440 => 200_000,
StandardFormat::PcFloppy2880 => 400_000,
#[cfg(feature = "amiga")]
StandardFormat::AmigaFloppy880 => 100_000,
#[cfg(feature = "amiga")]
StandardFormat::AmigaFloppy1760 => 200_000,
}
}
/// Return a standard default GAP3 value corresponding to the `StandardFormat`.
pub fn gap3(&self) -> usize {
match self {
StandardFormat::PcFloppy160 => 0x50,
StandardFormat::PcFloppy180 => 0x50,
StandardFormat::PcFloppy320 => 0x50,
StandardFormat::PcFloppy360 => 0x50,
StandardFormat::PcFloppy720 => 0x50,
StandardFormat::PcFloppy1200 => 0x54,
StandardFormat::PcFloppy1440 => 0x6C,
StandardFormat::PcFloppy2880 => 0x53,
#[cfg(feature = "amiga")]
StandardFormat::AmigaFloppy880 => 0x50, // TODO: Replace placeholder value
#[cfg(feature = "amiga")]
StandardFormat::AmigaFloppy1760 => 0x50, // TODO: Replace placeholder value
}
}
/// Return a standard `DiskDescriptor` struct corresponding to the `StandardFormat`.
pub fn descriptor(&self) -> DiskDescriptor {
DiskDescriptor {
platforms: Some(vec![Platform::from(*self)]),
geometry: self.ch(),
data_encoding: TrackDataEncoding::Mfm,
density: self.density(),
data_rate: self.data_rate(),
rpm: Some(self.rpm()),
write_protect: None,
}
}
/// Return the size in bytes of a raw sector image corresponding to the `StandardFormat`.
pub fn disk_size(&self) -> usize {
match self {
StandardFormat::PcFloppy160 => 163_840,
StandardFormat::PcFloppy180 => 184_320,
StandardFormat::PcFloppy320 => 327_680,
StandardFormat::PcFloppy360 => 368_640,
StandardFormat::PcFloppy720 => 737_280,
StandardFormat::PcFloppy1200 => 1_228_800,
StandardFormat::PcFloppy1440 => 1_474_560,
StandardFormat::PcFloppy2880 => 2_949_120,
#[cfg(feature = "amiga")]
StandardFormat::AmigaFloppy880 => 901_120,
#[cfg(feature = "amiga")]
StandardFormat::AmigaFloppy1760 => 1_802_240,
}
}
}
impl From<StandardFormat> for DiskCh {
/// Convert a `StandardFormat` variant into a `DiskCh` struct.
fn from(format: StandardFormat) -> Self {
format.ch()
}
}
impl From<StandardFormat> for DiskChs {
/// Convert a `StandardFormat` variant into a `DiskChs` struct.
fn from(format: StandardFormat) -> Self {
format.chs()
}
}
impl From<StandardFormat> for DiskChsn {
/// Convert a `StandardFormat` variant into a `DiskChsn` struct.
fn from(format: StandardFormat) -> Self {
format.chsn()
}
}
impl TryFrom<usize> for StandardFormat {
type Error = String;
/// Convert a size in bytes to a `StandardFormat` variant.
fn try_from(size: usize) -> Result<Self, Self::Error> {
let size = match size {
163_840 => StandardFormat::PcFloppy160,
184_320 => StandardFormat::PcFloppy180,
327_680 => StandardFormat::PcFloppy320,
368_640 => StandardFormat::PcFloppy360,
737_280 => StandardFormat::PcFloppy720,
1_228_800 => StandardFormat::PcFloppy1200,
1_474_560 => StandardFormat::PcFloppy1440,
2_949_120 => StandardFormat::PcFloppy2880,
#[cfg(feature = "amiga")]
901_120 => StandardFormat::AmigaFloppy880,
_ => return Err("Invalid size".to_string()),
};
Ok(size)
}
}
impl TryFrom<DiskChs> for StandardFormat {
type Error = String;
/// Convert a `DiskChs` struct into a `StandardFormat` variant.
fn try_from(chs: DiskChs) -> Result<Self, Self::Error> {
StandardFormat::try_from(&chs)
}
}
impl TryFrom<&DiskChs> for StandardFormat {
type Error = String;
/// Convert a `DiskChs` struct into a `StandardFormat` variant.
fn try_from(chs: &DiskChs) -> Result<Self, Self::Error> {
let chs = match chs.get() {
(40, 1, 8) => StandardFormat::PcFloppy160,
(40, 1, 9) => StandardFormat::PcFloppy180,
(40, 2, 8) => StandardFormat::PcFloppy320,
(40, 2, 9) => StandardFormat::PcFloppy360,
(80, 2, 9) => StandardFormat::PcFloppy720,
(80, 2, 15) => StandardFormat::PcFloppy1200,
(80, 2, 18) => StandardFormat::PcFloppy1440,
(80, 2, 36) => StandardFormat::PcFloppy2880,
#[cfg(feature = "amiga")]
(80, 2, 11) => StandardFormat::AmigaFloppy880,
_ => return Err("Invalid geometry".to_string()),
};
Ok(chs)
}
}
impl From<StandardFormat> for usize {
/// Convert a `StandardFormat` variant into a size in bytes.
fn from(format: StandardFormat) -> Self {
format.disk_size()
}
}
| rust | MIT | b4c04b51746e5fe7769f49a1b32b8caad426fc81 | 2026-01-04T20:24:04.021295Z | false |
dbalsom/fluxfox | https://github.com/dbalsom/fluxfox/blob/b4c04b51746e5fe7769f49a1b32b8caad426fc81/src/types/chs.rs | src/types/chs.rs | /*
FluxFox
https://github.com/dbalsom/fluxfox
Copyright 2024-2025 Daniel Balsom
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the “Software”),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
--------------------------------------------------------------------------
*/
//! The `chs` module defines several structures for working with Cylinder-Head-Sector (CHS)
//! addressing and sector IDs.
use crate::{types::sector_layout::SectorLayout, MAXIMUM_SECTOR_SIZE};
use std::{cmp::Ordering, fmt::Display};
/// A structure representing a query against the four components of sector header:
/// - Cylinder ID (c)
/// - Head ID (h)
/// - Sector ID (s)
/// - Sector Size (n)
///
/// The only required field in a `DiskChsnQuery` is the Sector ID field.
/// Any other field may be set to None to indicate that it should be ignored when matching.
#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq, Default)]
pub struct DiskChsnQuery {
c: Option<u16>,
h: Option<u8>,
s: u8,
n: Option<u8>,
}
impl Display for DiskChsnQuery {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let c_str = self.c.as_ref().map_or("*".to_string(), |c| c.to_string());
let h_str = self.h.as_ref().map_or("*".to_string(), |h| h.to_string());
let n_str = self.n.as_ref().map_or("*".to_string(), |n| n.to_string());
write!(f, "[c:{:2} h:{} s:{:3} n:{}]", c_str, h_str, self.s, n_str)
}
}
#[allow(dead_code)]
impl DiskChsnQuery {
/// Create a new DiskChsnQuery structure from the four sector ID components.
pub fn new(c: impl Into<Option<u16>>, h: impl Into<Option<u8>>, s: u8, n: impl Into<Option<u8>>) -> Self {
Self {
c: c.into(),
h: h.into(),
s,
n: n.into(),
}
}
/// Return the cylinder (c) field.
pub fn c(&self) -> Option<u16> {
self.c
}
/// Return the head (h) field.
pub fn h(&self) -> Option<u8> {
self.h
}
/// Return the sector id (s) field.
pub fn s(&self) -> u8 {
self.s
}
/// Return the size (n) field.
pub fn n(&self) -> Option<u8> {
self.n
}
/// Return the size of the 'n' parameter in bytes, or None if n is not set.
/// The formula for calculating size from n is (128 * 2^n)
/// We enforce a maximum size of 8192 bytes for a single sector.
pub fn n_size(&self) -> Option<usize> {
self.n
.map(|n| std::cmp::min(MAXIMUM_SECTOR_SIZE, 128usize.overflowing_shl(n as u32).0))
}
/// Return a boolean indicating whether the specified `DiskChsn` matches the query.
pub fn matches(&self, id_chsn: &DiskChsn) -> bool {
if self.s != id_chsn.s() {
return false;
}
if let Some(c) = self.c {
if c != id_chsn.c() {
return false;
}
}
if let Some(h) = self.h {
if h != id_chsn.h() {
return false;
}
}
if let Some(n) = self.n {
if n != id_chsn.n() {
return false;
}
}
true
}
}
impl From<DiskChsn> for DiskChsnQuery {
fn from(chsn: DiskChsn) -> Self {
Self {
c: Some(chsn.c()),
h: Some(chsn.h()),
s: chsn.s(),
n: Some(chsn.n()),
}
}
}
impl From<DiskChs> for DiskChsnQuery {
fn from(chs: DiskChs) -> Self {
Self {
c: Some(chs.c()),
h: Some(chs.h()),
s: chs.s(),
n: None,
}
}
}
/// A structure representing the four components of Sector ID:
/// - Cylinder (c)
/// - Head (h)
/// - Sector ID (s)
/// - Sector Size (n)
///
/// A DiskChsn may represent a Sector ID or an overall disk geometry.
#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub struct DiskChsn {
chs: DiskChs,
n: u8,
}
impl Default for DiskChsn {
fn default() -> Self {
Self {
chs: DiskChs::default(),
n: 2,
}
}
}
impl From<(u16, u8, u8, u8)> for DiskChsn {
fn from((c, h, s, n): (u16, u8, u8, u8)) -> Self {
Self {
chs: DiskChs::from((c, h, s)),
n,
}
}
}
impl From<(DiskChs, u8)> for DiskChsn {
fn from((chs, n): (DiskChs, u8)) -> Self {
Self { chs, n }
}
}
impl Display for DiskChsn {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "[c:{:2} h:{} s:{:3} n:{}]", self.c(), self.h(), self.s(), self.n)
}
}
#[allow(dead_code)]
impl DiskChsn {
/// Create a new DiskChsn structure from the four sector ID components.
pub fn new(c: u16, h: u8, s: u8, n: u8) -> Self {
Self {
chs: DiskChs::from((c, h, s)),
n,
}
}
/// Return all four sector ID components.
/// # Returns:
/// A tuple containing the cylinder, head, sector ID, and sector size.
#[inline]
pub fn get(&self) -> (u16, u8, u8, u8) {
(self.c(), self.h(), self.s(), self.n())
}
/// Return the cylinder (c) field.
#[inline]
pub fn c(&self) -> u16 {
self.chs.c()
}
/// Return the head (h) field.
#[inline]
pub fn h(&self) -> u8 {
self.chs.h()
}
/// Return the sector id (s) field.
#[inline]
pub fn s(&self) -> u8 {
self.chs.s()
}
/// Return a `DiskCh` structure representing the cylinder and head components of a DiskChsn.
#[inline]
pub fn ch(&self) -> DiskCh {
self.chs.ch()
}
/// Return the size (n) field.
#[inline]
pub fn n(&self) -> u8 {
self.n
}
/// Return the size of the 'n' parameter in bytes.
/// The formula for calculating size from n is (128 * 2^n)
/// We enforce a maximum size of 8192 bytes for a single sector.
#[inline]
pub fn n_size(&self) -> usize {
std::cmp::min(MAXIMUM_SECTOR_SIZE, 128usize.overflowing_shl(self.n as u32).0)
}
/// Convert the value of the sector size field (n) into bytes.
#[inline]
pub fn n_to_bytes(n: u8) -> usize {
std::cmp::min(MAXIMUM_SECTOR_SIZE, 128usize.overflowing_shl(n as u32).0)
}
/// Convert a size in bytes into a valid sector size field value (n)
#[inline]
pub fn bytes_to_n(size: usize) -> u8 {
let mut n = 0;
let mut size = size;
while size > 128 {
size >>= 1;
n += 1;
}
n
}
/// Set the four components of a sector ID.
pub fn set(&mut self, c: u16, h: u8, s: u8, n: u8) {
self.set_c(c);
self.set_h(h);
self.set_s(s);
self.n = n;
}
/// Set the cylinder component of a sector ID.
#[inline]
pub fn set_c(&mut self, c: u16) {
self.chs.set_c(c)
}
/// Set the head component of a sector ID.
#[inline]
pub fn set_h(&mut self, h: u8) {
self.chs.set_h(h)
}
/// Set the sector ID component of a sector ID.
#[inline]
pub fn set_s(&mut self, s: u8) {
self.chs.set_s(s)
}
/// Set the CHS components of a sector ID.
pub fn set_chs(&mut self, chs: DiskChs) {
self.chs = chs;
}
/// Return the number of sectors represented by a `DiskChsn` structure, interpreted as drive geometry.
pub fn sector_count(&self) -> u32 {
self.chs.sector_count()
}
/// Return a boolean indicating whether this `DiskChsn`, interpreted as drive geometry, contains
/// the specified `DiskChs` representing a sector.
#[inline]
pub fn contains(&self, other: impl Into<DiskChs>) -> bool {
let other = other.into();
self.chs.contains(other)
}
/// Convert a `DiskChsn` struct to an LBA sector address. A reference drive geometry is required to
/// calculate the address.
#[inline]
pub fn to_lba(&self, geom: &SectorLayout) -> usize {
self.chs.to_lba(geom)
}
/// Return a new `DiskChsn` that is the next sector on the disk, according to the specified
/// geometry.
/// Returns None if the current `DiskChsn` represents the last sector of the specified geometry.
/// This function should only be used for iterating through sectors in a standard disk format.
/// It will not work correctly for non-standard disk formats.
pub fn next_sector(&self, geom: &SectorLayout) -> Option<DiskChsn> {
self.chs.next_sector(geom).map(|chs| DiskChsn::from((chs, self.n)))
}
/// Return a new `Option<DiskChsn>` that is `sectors` number of sectors advanced from the current
/// `DiskChsn`, according to a provided geometry.
/// Returns None if advanced past the end of the disk.
/// # Arguments:
/// * `geom` - Any type implementing `Into<DiskChs>`, representing the number of heads,
/// cylinders, and sectors per track on the disk.
pub(crate) fn offset_sectors(&mut self, sectors: u32, geom: &SectorLayout) -> Option<DiskChsn> {
self.chs
.offset_sectors(sectors, geom)
.map(|chs| DiskChsn::from((chs, self.n)))
}
pub fn iter(&self, geom: SectorLayout) -> DiskChsnIterator {
DiskChsnIterator { geom, chs: None }
}
}
/// A structure representing three of the four components of Sector ID:
/// - Cylinder (c)
/// - Head (h)
/// - Sector ID (s)
///
/// A DiskChs may represent a Sector ID, where size is ignored, or an overall disk geometry.
#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub struct DiskChs {
pub(crate) c: u16,
pub(crate) h: u8,
pub(crate) s: u8,
}
impl Default for DiskChs {
fn default() -> Self {
Self { c: 0, h: 0, s: 1 }
}
}
impl From<DiskChsn> for DiskChs {
fn from(chsn: DiskChsn) -> Self {
chsn.chs
}
}
impl From<(u16, u8, u8)> for DiskChs {
fn from((c, h, s): (u16, u8, u8)) -> Self {
Self { c, h, s }
}
}
impl From<DiskChs> for (u16, u8, u8) {
fn from(chs: DiskChs) -> Self {
(chs.c, chs.h, chs.s)
}
}
impl From<(DiskCh, u8)> for DiskChs {
fn from((ch, s): (DiskCh, u8)) -> Self {
Self {
c: ch.c(),
h: ch.h(),
s,
}
}
}
impl Display for DiskChs {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "[c:{:2} h:{} s:{:3}]", self.c, self.h, self.s)
}
}
impl DiskChs {
/// Create a new `DiskChs` structure from cylinder, head and sector id components.
pub fn new(c: u16, h: u8, s: u8) -> Self {
Self { c, h, s }
}
/// Return the cylinder, head and sector id components in a tuple.
#[inline]
pub fn get(&self) -> (u16, u8, u8) {
(self.c, self.h, self.s)
}
/// Return the cylinder (c) field.
#[inline]
pub fn c(&self) -> u16 {
self.c
}
/// Return the head (h) field.
#[inline]
pub fn h(&self) -> u8 {
self.h
}
/// Return the sector id (s) field.
#[inline]
pub fn s(&self) -> u8 {
self.s
}
/// Return a `DiskCh` structure representing the cylinder and head components of a DiskChs.
#[inline]
pub fn ch(&self) -> DiskCh {
DiskCh::new(self.c, self.h)
}
/// Set the three components of a `DiskChs`
pub fn set(&mut self, c: u16, h: u8, s: u8) {
self.c = c;
self.h = h;
self.s = s;
}
/// Set the cylinder (c) component of a `DiskChs`
#[inline]
pub fn set_c(&mut self, c: u16) {
self.c = c;
}
/// Set the head (h) component of a `DiskChs`
#[inline]
pub fn set_h(&mut self, h: u8) {
self.h = h;
}
/// Set the sector ID (s) component of a `DiskChs`
#[inline]
pub fn set_s(&mut self, s: u8) {
self.s = s;
}
/// Seek to the specified CHS.
/// This function is deprecated. Seeking cannot be performed directly on a `DiskChs` structure,
/// as sector IDs are not always sequential.
#[deprecated]
#[allow(deprecated)]
pub fn seek(&mut self, c: u16, h: u8, s: u8) {
self.seek_to(&DiskChs::from((c, h, s)));
}
/// Seek to the specified CHS.
/// This function is deprecated. Seeking cannot be performed directly on a `DiskChs` structure,
/// as sector IDs are not always sequential.
#[deprecated]
pub fn seek_to(&mut self, dst_chs: &DiskChs) {
self.c = dst_chs.c;
self.h = dst_chs.h;
self.s = dst_chs.s;
}
/// Return the number of sectors represented by a DiskChs structure, interpreted as drive geometry.
pub fn sector_count(&self) -> u32 {
(self.c as u32) * (self.h as u32) * (self.s as u32)
}
/// Return the number of sectors represented by a DiskChs structure, interpreted as drive geometry.
pub fn total_sectors(&self) -> usize {
(self.c as usize) * (self.h as usize) * (self.s as usize)
}
/// Return a boolean indicating whether this `DiskChs`, interpreted as drive geometry, contains
/// the specified `DiskChs` representing a sector.
pub fn contains(&self, other: impl Into<DiskChs>) -> bool {
let other = other.into();
self.c > other.c && self.h > other.h && self.s >= other.s
}
/// Convert a [DiskChs] struct to an LBA sector address.
/// A reference [SectorLayout] is required to calculate the address.
/// Only valid for standard disk formats.
pub fn to_lba(&self, geom: &SectorLayout) -> usize {
let hpc = geom.h() as usize;
let spt = geom.s() as usize;
(self.c as usize * hpc + (self.h as usize)) * spt + (self.s.saturating_sub(geom.s_off) as usize)
}
/// Convert an LBA sector address into a [DiskChs] struct and byte offset into the resulting sector.
/// A reference drive geometry is required to calculate the address.
/// Only valid for standard disk formats.
/// # Arguments:
/// * `lba` - The LBA sector address to convert.
/// * `geom` - A [SectorLayout], representing the number of heads and cylinders on the disk.
/// # Returns:
/// * `Some(DiskChs)` representing the resulting CHS address.
/// * `None` if the LBA address is invalid for the specified geometry.
pub fn from_lba(lba: usize, geom: &SectorLayout) -> Option<DiskChs> {
let hpc = geom.h() as usize;
let spt = geom.s() as usize;
let c = lba / (hpc * spt);
let h = (lba / spt) % hpc;
let s = (lba % spt) + geom.s_off as usize;
if c >= geom.c() as usize || h >= hpc || s > spt {
return None;
}
Some(DiskChs::from((c as u16, h as u8, s as u8)))
}
/// Convert a raw byte offset into a `DiskChs` struct and byte offset into the resulting sector.
/// A reference standard disk geometry is required to calculate the address.
/// Only valid for standard disk formats. This function is intended to assist seeking within a raw sector view.
/// # Arguments:
/// * `lba` - The LBA sector address to convert.
/// * `lba` - The LBA sector address to convert.
/// * `geom` - A [SectorLayout], representing the number of heads and cylinders on the disk.
/// # Returns:
/// A tuple containing the resulting `DiskChs` and the byte offset into the sector.
pub fn from_raw_offset(offset: usize, geom: &SectorLayout) -> Option<(DiskChs, usize)> {
let lba = offset / geom.size();
DiskChs::from_lba(lba, geom).map(|chs| (chs, offset % geom.size()))
}
/// Convert a `DiskChs` into a raw byte offset
/// A reference drive geometry is required to calculate the address.
/// Only valid for standard disk formats. This function is intended to assist seeking within a raw sector view.
/// # Arguments:
/// * `lba` - The LBA sector address to convert.
/// * `geom` - A [SectorLayout], representing the number of heads and cylinders on the disk.
/// # Returns:
/// A tuple containing the resulting `DiskChs` and the byte offset into the sector.
pub fn to_raw_offset(&self, geom: &SectorLayout) -> Option<usize> {
geom.contains(*self).then_some(self.to_lba(geom) * geom.size())
}
/// Return a new `DiskChs` that is the next sector on the disk, according to the specified
/// geometry.
/// Returns None if the current `DiskChs` represents the last sector of the specified geometry.
/// This function should only be used for iterating through sectors in a standard disk format.
/// It will not work correctly for non-standard disk formats.
/// # Arguments:
/// * `geom` - A [SectorLayout], representing the number of heads and cylinders on the disk.
pub fn next_sector(&self, geom: &SectorLayout) -> Option<DiskChs> {
if self.s < (geom.s() - 1 + geom.s_off) {
// println!(
// "Geometry: {} current sector: {}, spt: {}, last valid sector:{} Next sector: {}",
// geom,
// self.s,
// geom.s(),
// geom.s() - 1 + geom.s_off,
// self.s + 1
// );
// Not at last sector, just return next sector
Some(DiskChs::from((self.c, self.h, self.s + 1)))
}
else if self.h < geom.h().saturating_sub(1) {
// At last sector, but not at last head, go to next head, same cylinder, sector 1
Some(DiskChs::from((self.c, self.h + 1, geom.s_off)))
}
else if self.c < geom.c().saturating_sub(1) {
// At last sector and last head, go to next cylinder, head 0, sector (s_off)
Some(DiskChs::from((self.c + 1, 0, geom.s_off)))
}
else {
// At end of disk.
None
}
}
/// Return a new `Option<DiskChs>` that is `sectors` number of sectors advanced from the current
/// `DiskChs`, according to a provided geometry.
/// Returns None if advanced past the end of the disk.
/// # Arguments:
/// * `geom` - A [SectorLayout], representing the number of heads and cylinders on the disk.
pub fn offset_sectors(&mut self, sectors: u32, geom: &SectorLayout) -> Option<DiskChs> {
let mut start_chs = *self;
for _ in 0..sectors {
start_chs = start_chs.next_sector(geom)?;
}
Some(start_chs)
}
/// Return a `DiskChsIterator` that will iterate through all sectors in order, interpreting the `DiskChs` as a standard disk geometry.
/// This should only be used for standard disk formats. It will skip non-standard sectors, and may access sectors out of physical order.
pub fn iter(&self, geom: SectorLayout) -> DiskChsIterator {
DiskChsIterator { geom, chs: None }
}
}
/// A structure representing two of the four components of Sector ID:
/// - Cylinder (c)
/// - Head (h)
///
/// A `DiskCh` is usually used as a physical track specifier. It can hold the geometry of a disk,
/// or act as a cursor specifying a specific track on a disk.
#[repr(C)]
#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq, Default)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub struct DiskCh {
pub(crate) c: u16,
pub(crate) h: u8,
}
impl PartialOrd for DiskCh {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl Ord for DiskCh {
fn cmp(&self, other: &Self) -> Ordering {
self.c
.cmp(&other.c) // Compare by cylinder first
.then_with(|| self.h.cmp(&other.h)) // Then by head
}
}
impl From<(u16, u8)> for DiskCh {
fn from((c, h): (u16, u8)) -> Self {
Self { c, h }
}
}
impl From<DiskChs> for DiskCh {
fn from(chs: DiskChs) -> Self {
Self { c: chs.c, h: chs.h }
}
}
impl From<DiskChsn> for DiskCh {
fn from(chsn: DiskChsn) -> Self {
Self {
c: chsn.c(),
h: chsn.h(),
}
}
}
impl Display for DiskCh {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "[c:{} h:{}]", self.c, self.h)
}
}
impl DiskCh {
/// Create a new DiskCh structure from a Cylinder (c) and Head (h) specifier.
pub fn new(c: u16, h: u8) -> Self {
Self { c, h }
}
/// Return the cylinder (c) field.
pub fn c(&self) -> u16 {
self.c
}
/// Return the head (h) field.
pub fn h(&self) -> u8 {
self.h
}
/// Set the cylinder (c) field.
pub fn set_c(&mut self, c: u16) {
self.c = c
}
/// Set the head (h) field.
pub fn set_h(&mut self, h: u8) {
self.h = h
}
/// Return a new `DiskCh` that represents the next track on disk.
/// # Arguments:
/// * `geom` - Any type implementing `Into<DiskCh>`, representing the number of heads and cylinders on the disk.
/// # Returns:
/// `Some(DiskCh)` representing the next track on disk.
/// `None` if the current `DiskCh` is the last track on the disk.
pub fn next_track(&self, geom: impl Into<DiskCh>) -> Option<DiskCh> {
let geom = geom.into();
if self.h < geom.h().saturating_sub(1) {
// Not at last head, just return next head
Some(DiskCh::from((self.c, self.h + 1)))
}
else if self.c < geom.c().saturating_sub(1) {
// At last head, but not at last cylinder. Return next cylinder, head 0
Some(DiskCh::from((self.c + 1, 0)))
}
else {
// At last head and track, return None.
None
}
}
/// Return a new `DiskCh` that represents the next track on disk.
/// # Arguments:
/// * `heads` - A u8 value representing the number of heads on the disk.
/// # Returns:
/// A new `DiskCh` representing the next logical track.
pub fn next_track_unchecked(&self, heads: u8) -> DiskCh {
if self.h < heads.saturating_sub(1) {
// Not at last head, just return next head
DiskCh::from((self.c, self.h + 1))
}
else {
// Advance to the next cylinder, head 0
DiskCh::from((self.c + 1, 0))
}
}
/// Treating the `DiskCh` as a track cursor, set it to reference the next logical track on the disk.
/// If the current `DiskCh` is the last track on the disk, it will remain unchanged.
/// # Arguments:
/// * `geom` - Any type implementing `Into<DiskCh>`, representing the number of heads and cylinders on the disk.
///
/// # Returns:
/// A boolean indicating whether the track was successfully advanced. false indicates that the current
/// track was the last track on the disk.
pub fn seek_next_track(&mut self, geom: impl Into<DiskCh>) -> bool {
let geom = geom.into();
if self.c() == geom.c().saturating_sub(1) && self.h() >= geom.h().saturating_sub(1) {
return false;
}
*self = self.next_track(geom).unwrap_or(*self);
true
}
/// Treating the `DiskCh` as a track cursor, set it to reference the next logical track on the disk.
/// The cylinder number will be allowed to advance unbounded. It may no longer represent a valid track.
/// This routine is intended for building disk images, where the track number may grow as tracks
/// are added.
/// # Arguments:
/// * `heads` - The number of heads on the disk.
pub fn seek_next_track_unchecked(&mut self, heads: u8) {
*self = self.next_track_unchecked(heads);
}
/// Return a `DiskChsIterator` that will iterate through all sectors in order, interpreting the `DiskChs` as a standard disk geometry.
/// This should only be used for standard disk formats. It will skip non-standard sectors, and may access sectors out of physical order.
pub fn iter(&self) -> DiskChIterator {
DiskChIterator {
geom: *self,
ch: None,
}
}
}
pub struct DiskChIterator {
geom: DiskCh,
ch: Option<DiskCh>,
}
impl Iterator for DiskChIterator {
type Item = DiskCh;
fn next(&mut self) -> Option<Self::Item> {
if let Some(ch) = &mut self.ch {
*ch = ch.next_track(self.geom)?;
}
else {
self.ch = Some(DiskCh::new(0, 0));
}
self.ch
}
}
pub struct DiskChsIterator {
geom: SectorLayout,
chs: Option<DiskChs>,
}
impl Iterator for DiskChsIterator {
type Item = DiskChs;
fn next(&mut self) -> Option<Self::Item> {
if let Some(chs) = &mut self.chs {
*chs = chs.next_sector(&self.geom)?;
}
else {
self.chs = Some(DiskChs::new(0, 0, self.geom.s_off));
}
self.chs
}
}
pub struct DiskChsnIterator {
geom: SectorLayout,
chs: Option<DiskChs>,
}
impl Iterator for DiskChsnIterator {
type Item = DiskChsn;
fn next(&mut self) -> Option<Self::Item> {
if let Some(chs) = &mut self.chs {
*chs = chs.next_sector(&self.geom)?;
}
else {
self.chs = Some(DiskChs::new(0, 0, self.geom.s_off));
}
Some(DiskChsn::from((self.chs.unwrap(), self.geom.n())))
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::StandardFormat;
#[test]
fn diskchsn_new_creates_correct_instance() {
let chsn = DiskChsn::new(1, 2, 3, 4);
assert_eq!(chsn.c(), 1);
assert_eq!(chsn.h(), 2);
assert_eq!(chsn.s(), 3);
assert_eq!(chsn.n(), 4);
}
#[test]
fn diskchsn_n_size_calculates_correct_size() {
let chsn = DiskChsn::new(0, 0, 0, 3);
assert_eq!(chsn.n_size(), 1024);
}
#[test]
fn diskchsn_n_size_enforces_maximum_size() {
let chsn = DiskChsn::new(0, 0, 0, 7);
assert_eq!(chsn.n_size(), 8192);
}
#[test]
fn diskchsn_size_to_n_calculates_correct_n() {
assert_eq!(DiskChsn::bytes_to_n(1024), 3);
}
#[test]
fn diskchs_to_lba_calculates_correct_lba() {
let geom = SectorLayout::new(40, 2, 9, 1, 512);
let chs = DiskChs::new(2, 1, 5);
// 2(cyl) * 2(heads) * 9(sectors) + (1(head) * 9(sectors)) + 5(sector) = 49
assert_eq!(chs.to_lba(&geom), 49);
}
#[test]
fn diskchs_from_lba_calculates_correct_chs() {
let geom = SectorLayout::new(40, 2, 9, 1, 512);
let lba = 49;
let chs = DiskChs::from_lba(lba, &geom).unwrap();
assert_eq!(chs, DiskChs::new(2, 1, 5));
}
#[test]
fn diskchs_from_raw_offset_calculates_correct_chs() {
let geom = StandardFormat::PcFloppy360.layout();
let offset = 2560; // 5 sectors offset
let (chs, byte_offset) = DiskChs::from_raw_offset(offset, &geom).unwrap();
assert_eq!(byte_offset, 0);
assert_eq!(DiskChs::new(0, 0, 6), chs);
}
#[test]
fn diskchs_from_lba_returns_none_for_out_of_range() {
let geom = SectorLayout::new(40, 2, 9, 1, 512);
let lba = 720; // Out of range LBA for the given geometry
let chs = DiskChs::from_lba(lba, &geom);
assert!(chs.is_none());
}
#[test]
fn diskchs_from_raw_offset_calculates_correct_chs_and_offset() {
let geom = SectorLayout::new(40, 2, 9, 1, 1024);
let offset = 5120; // 10 sectors offset
let (chs, byte_offset) = DiskChs::from_raw_offset(offset, &geom).unwrap();
assert_eq!(chs, DiskChs::new(0, 0, 6));
assert_eq!(byte_offset, 0);
let offset = 5123; // 10 sectors and 3 bytes offset
let (chs, byte_offset) = DiskChs::from_raw_offset(offset, &geom).unwrap();
assert_eq!(chs, DiskChs::new(0, 0, 6));
assert_eq!(byte_offset, 3);
}
#[test]
fn diskch_get_next_track_wraps_correctly() {
let ch = DiskCh::new(1, 1);
let next_ch = ch.next_track(StandardFormat::PcFloppy360);
assert_eq!(next_ch, Some(DiskCh::new(2, 0)));
}
#[test]
fn diskch_iter_works() {
let geom = StandardFormat::PcFloppy360.ch();
let ch = geom.iter().next().unwrap();
assert_eq!(ch, DiskCh::new(0, 0));
let last_chs = geom.iter().last().unwrap();
assert_eq!(last_chs, DiskCh::new(geom.c() - 1, geom.h() - 1));
let iter_ct = geom.iter().count();
assert_eq!(iter_ct, geom.c() as usize * geom.h() as usize);
}
#[test]
fn diskchs_iter_works() {
let geom = StandardFormat::PcFloppy360.layout();
let total_sectors = geom.total_sectors();
let first_chs = geom.chs_iter().next().unwrap();
assert_eq!(first_chs, DiskChs::new(0, 0, 1));
let last_chs = geom.chs_iter().last().unwrap();
assert_eq!(last_chs, DiskChs::new(geom.c() - 1, geom.h() - 1, geom.s()));
let iter_ct = geom.chs_iter().count();
assert_eq!(iter_ct, total_sectors);
}
#[test]
#[cfg(feature = "amiga")]
fn diskchs_iter_works_with_0_offset() {
let geom = StandardFormat::AmigaFloppy880.layout();
let total_sectors = geom.total_sectors();
let first_chs = geom.chs_iter().next().unwrap();
assert_eq!(first_chs, DiskChs::new(0, 0, 0));
let last_chs = geom.chs_iter().last().unwrap();
assert_eq!(last_chs, DiskChs::new(geom.c() - 1, geom.h() - 1, geom.s() - 1));
let iter_ct = geom.chs_iter().count();
assert_eq!(iter_ct, total_sectors);
}
}
| rust | MIT | b4c04b51746e5fe7769f49a1b32b8caad426fc81 | 2026-01-04T20:24:04.021295Z | false |
dbalsom/fluxfox | https://github.com/dbalsom/fluxfox/blob/b4c04b51746e5fe7769f49a1b32b8caad426fc81/src/types/structs.rs | src/types/structs.rs | /*
FluxFox
https://github.com/dbalsom/fluxfox
Copyright 2024-2025 Daniel Balsom
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the “Software”),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
--------------------------------------------------------------------------
structs.rs
Defines common structs
*/
use crate::{
file_parsers::FormatCaps,
platform::Platform,
prelude::{DiskCh, DiskChsn},
track::TrackAnalysis,
track_schema::TrackSchema,
types::{DiskRpm, IntegrityCheck, TrackDataEncoding, TrackDataRate, TrackDensity},
};
use std::{
fmt,
fmt::{Display, Formatter},
ops::Range,
};
/// A structure that defines several flags that can apply to a sector.
#[derive(Copy, Clone, Debug, Default)]
pub struct SectorAttributes {
pub address_error: bool,
pub data_error: bool,
pub deleted_mark: bool,
pub no_dam: bool,
}
/// A structure used to describe the parameters of a sector to be created on a `MetaSector`
/// resolution track.
#[derive(Default)]
pub struct AddSectorParams<'a> {
pub id_chsn: DiskChsn,
pub data: &'a [u8],
pub weak_mask: Option<&'a [u8]>,
pub hole_mask: Option<&'a [u8]>,
pub attributes: SectorAttributes,
pub alternate: bool,
pub bit_index: Option<usize>,
}
/// A structure to uniquely identify a specific sector on a track.
#[derive(Copy, Clone, Debug, Default)]
pub struct SectorCursor {
/// The sector id. Either a `sector_idx` or `bit_offset` is required to discriminate between
/// sectors with the same ID.
pub id_chsn: DiskChsn,
/// The physical sector index within the track, starting at 0.
pub sector_idx: Option<usize>,
/// The bit offset of the start of the sector header element.
pub header_offset: Option<usize>,
/// The bit offset of the start of the sector data element.
pub data_offset: Option<usize>,
}
#[derive(Copy, Clone, Debug, Default)]
pub struct SectorMapEntry {
pub chsn: DiskChsn,
pub attributes: SectorAttributes,
}
/// A DiskConsistency structure maintains information about the consistency of a disk image.
#[derive(Default)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub struct DiskAnalysis {
// A field to hold image format capability flags that this image requires in order to be represented.
pub image_caps: FormatCaps,
/// Whether the disk image contains weak bits.
pub weak: bool,
/// Whether the disk image contains deleted sectors.
pub deleted_data: bool,
/// Whether the disk image contains sector IDAMs with no corresponding DAMS.
pub no_dam: bool,
/// Whether the disk image contains sectors with bad address mark CRCs
pub address_error: bool,
/// Whether the disk image contains sectors with bad data CRCs
pub data_error: bool,
/// Whether the disk image contains overlapped sectors
pub overlapped: bool,
/// The sector size if the disk image has consistent sector sizes, otherwise None.
pub consistent_sector_size: Option<u8>,
/// The track length in sectors if the disk image has consistent track lengths, otherwise None.
pub consistent_track_length: Option<u32>,
}
impl DiskAnalysis {
pub fn set_track_analysis(&mut self, ta: &TrackAnalysis) {
self.deleted_data = ta.deleted_data;
self.address_error = ta.address_error;
self.data_error = ta.data_error;
self.no_dam = ta.no_dam;
if ta.consistent_sector_size.is_none() {
self.consistent_sector_size = None;
}
}
}
/// A `DiskDescriptor` structure describes the basic geometry and parameters of a disk image.
#[derive(Clone, Default)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub struct DiskDescriptor {
/// The platform(s) that the disk image is intended for, if determined
/// Multiple platforms can be specified for dual and triple-format disks.
pub platforms: Option<Vec<Platform>>,
/// The basic geometry of the disk. Not all tracks present need to conform to the specified sector count (s).
pub geometry: DiskCh,
/// The overall data encoding of the disk. (one or more tracks may have different encodings).
pub data_encoding: TrackDataEncoding,
/// The overall density of the disk (one or more tracks may have different densities).
pub density: TrackDensity,
/// The overall data rate of the disk (one or more tracks may have different data rates).
pub data_rate: TrackDataRate,
/// The rotation rate of the disk. If not provided, this can be determined from other parameters.
pub rpm: Option<DiskRpm>,
/// Whether the disk image should be considered read-only (None if image did not define this flag)
pub write_protect: Option<bool>,
}
/// A `ScanSectorResult` structure contains the results of a scan sector operation.
#[derive(Debug, Clone)]
pub struct ScanSectorResult {
/// Whether the specified Sector ID was found.
pub not_found: bool,
/// Whether the specified Sector ID was found, but no corresponding sector data was found.
pub no_dam: bool,
/// Whether the specific sector has a "deleted data" address mark.
pub deleted_mark: bool,
/// Whether the specified sector failed a header data integrity check.
pub address_error: bool,
/// Whether the specified sector failed a data integrity check.
pub data_error: bool,
/// Whether the specified sector ID was not matched, but a sector ID with a different cylinder
/// specifier was found.
pub wrong_cylinder: bool,
/// Whether the specified sector ID was not matched, but a sector ID with a bad cylinder
/// specifier was found.
pub bad_cylinder: bool,
/// Whether the specified sector ID was not matched, but a sector ID with a different head
/// specifier was found.
pub wrong_head: bool,
}
impl Default for ScanSectorResult {
fn default() -> Self {
Self {
not_found: true,
no_dam: false,
deleted_mark: false,
address_error: false,
data_error: false,
wrong_cylinder: false,
bad_cylinder: false,
wrong_head: false,
}
}
}
/// A structure containing the (optional) recorded and calculated CRC values for a region of data.
/// This can represent the result of a CRC or checksum calculation resulting in the specified type,
/// but does not specify the exact algorithm used.
///
/// An [IntegrityField] is usually stored within a [DataIntegrity] enum that specifies the type of
/// check performed.
#[derive(Copy, Clone, Debug)]
pub struct IntegrityField<T> {
pub valid: bool,
pub recorded: Option<T>,
pub calculated: T,
}
impl<T: PartialEq> From<(T, T)> for IntegrityField<T> {
fn from((recorded, calculated): (T, T)) -> Self {
IntegrityField::new(recorded, calculated)
}
}
impl<T> Display for IntegrityField<T>
where
T: Display + PartialEq + fmt::UpperHex,
{
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
let hex_width = size_of::<T>() * 2; // Determine the width for hex formatting
match &self.recorded {
Some(recorded) => write!(
f,
"[Recorded: {:#0hex_width$X}, Calculated: {:#0hex_width$X} {}]",
recorded,
self.calculated,
if self.is_valid() { "Valid" } else { "*Invalid*" },
hex_width = hex_width
),
None => write!(
f,
"[No CRC recorded, Calculated: {:#0hex_width$X}]",
self.calculated,
hex_width = hex_width
),
}
}
}
impl<T> IntegrityField<T>
where
T: PartialEq,
{
pub fn new(recorded: T, calculated: T) -> Self {
Self {
valid: recorded == calculated,
recorded: Some(recorded),
calculated,
}
}
/// Create a new DataCheckResult without a recorded value (ie, from a MetaSector resolution
/// image file that only stores boolean flags for CRC validity).
pub fn unrecorded(valid: bool, calculated: T) -> Self {
Self {
valid,
recorded: None,
calculated,
}
}
/// Check whether the recorded value matches the calculated value.
pub fn is_valid(&self) -> bool {
self.recorded
.as_ref()
.map(|recorded| recorded == &self.calculated)
.unwrap_or(self.valid)
}
pub fn is_error(&self) -> bool {
!self.is_valid()
}
}
/// A `ReadSectorResult` structure contains the results of a read sector operation.
#[derive(Clone)]
pub struct ReadSectorResult {
/// The matching Sector ID as `DiskChsn`, or `None`.
pub id_chsn: Option<DiskChsn>,
/// Whether the specified Sector ID was found.
pub not_found: bool,
/// Whether the specified Sector ID was found, but no corresponding sector data was found.
pub no_dam: bool,
/// Whether the specific sector was marked deleted.
pub deleted_mark: bool,
/// Whether the specified sector had a CRC error with the sector header.
pub address_crc_error: bool,
/// The CRC values for the sector header, if available.
pub address_crc: Option<IntegrityCheck>,
/// Whether the specified sector had a CRC error with the sector data.
pub data_crc_error: bool,
/// The CRC values for the sector data, if available.
pub data_crc: Option<IntegrityCheck>,
/// Whether the specified sector ID was not matched, but a sector ID with a different cylinder
/// specifier was found.
pub wrong_cylinder: bool,
/// Whether the specified sector ID was not matched, but a sector ID with a bad cylinder
/// specifier was found.
pub bad_cylinder: bool,
/// Whether the specified sector ID was not matched, but a sector ID with a different head
/// specifier was found.
pub wrong_head: bool,
/// The index of the start of sector data within `read_buf`.
pub data_range: Range<usize>,
/// The data read for the sector, potentially including address mark and CRC bytes.
/// Use the `data_idx` and `data_len` fields to isolate the sector data within this vector.
pub read_buf: Vec<u8>,
}
impl Default for ReadSectorResult {
fn default() -> Self {
Self {
id_chsn: None,
not_found: true,
no_dam: false,
deleted_mark: false,
address_crc_error: false,
address_crc: None,
data_crc_error: false,
data_crc: None,
wrong_cylinder: false,
bad_cylinder: false,
wrong_head: false,
data_range: 0..0,
read_buf: Vec::new(),
}
}
}
impl ReadSectorResult {
pub fn data(&self) -> &[u8] {
&self.read_buf[self.data_range.clone()]
}
}
/// A `ReadTrackResult` structure contains the results of a read track operation.
#[derive(Clone)]
pub struct ReadTrackResult {
/// Whether no sectors were found reading the track.
pub not_found: bool,
/// Whether the track contained at least one sector with a deleted data mark.
pub deleted_mark: bool,
/// Whether the track contained at least one sector with a CRC error in the address mark.
pub address_crc_error: bool,
/// Whether the track contained at least one sector with a CRC error in the data.
pub data_crc_error: bool,
/// The total number of sectors read from the track.
pub sectors_read: u16,
/// The data read for the track.
pub read_buf: Vec<u8>,
/// The total number of bits read.
pub read_len_bits: usize,
/// The total number of bytes read.
pub read_len_bytes: usize,
}
/// A `WriteSectorResult` structure contains the results of a write sector operation.
#[derive(Clone)]
pub struct WriteSectorResult {
/// Whether a matching Sector ID was found.
pub not_found: bool,
/// Whether the specified Sector ID was found, but no corresponding sector data was found.
pub no_dam: bool,
/// Whether the specific sector header matching the Sector ID had a bad CRC.
/// In this case, the write operation will have failed.
pub address_crc_error: bool,
/// Whether the specified sector ID was not matched, but a sector ID with a bad cylinder
/// specifier was found.
pub wrong_cylinder: bool,
/// Whether the specified sector ID was not matched, but a sector ID with a bad cylinder
/// specifier was found.
pub bad_cylinder: bool,
/// Whether the specified sector ID was not matched, but a sector ID with a different head
/// specifier was found.
pub wrong_head: bool,
}
pub struct TrackRegion {
pub start: usize,
pub end: usize,
}
/// `BitStreamTrackParams` structure contains parameters required to create a `BitStream`
/// resolution track.
///
/// `add_track_bitstream()` takes a `BitStreamTrackParams` structure as an argument.
pub struct BitStreamTrackParams<'a> {
/// The track schema to use, if known. If not known, use `None` then the schema will be
/// detected - however supplying the schema can improve performance by avoiding wasted decoding
/// attempts.
pub schema: Option<TrackSchema>,
/// The physical cylinder and head of the track to add.
pub ch: DiskCh,
pub encoding: TrackDataEncoding,
pub data_rate: TrackDataRate,
pub rpm: Option<DiskRpm>,
pub bitcell_ct: Option<usize>,
pub data: &'a [u8],
pub weak: Option<&'a [u8]>,
pub hole: Option<&'a [u8]>,
pub detect_weak: bool,
}
/// `FluxStreamTrackParams` contains parameters required to create a `FluxStream` resolution track.
///
/// `add_track_fluxstream()` takes a `FluxStreamTrackParams` structure as an argument.
pub struct FluxStreamTrackParams {
/// The physical cylinder and head of the track to add.
pub ch: DiskCh,
/// The track schema to use for the track, if known. If not known, use `None` and the schema
/// will be inferred from the track data.
pub schema: Option<TrackSchema>,
/// The data encoding used in the track. If not known, use `None` and the encoding will be
/// inferred from the track data.
pub encoding: Option<TrackDataEncoding>,
/// A hint for the base PLL clock frequency to use decoding the track. If not known, use `None`
/// and the clock frequency will be inferred from the track data.
pub clock: Option<f64>,
/// A hint for the disk rotation rate to use decoding the track. If not known, use `None`
/// and the rotation rate will be inferred from the track data.
pub rpm: Option<DiskRpm>,
}
/// `MetaSectorTrackParams` contains parameters required to create a `MetaSector` resolution track.
///
/// `add_track_metasector()` takes a `MetaSectorTrackParams` structure as an argument.
pub struct MetaSectorTrackParams {
/// The physical cylinder and head of the track to add.
/// This should be the next available track in the disk image.
pub ch: DiskCh,
/// The track data encoding used in the track. This may not be specified by a `MetaSector`
/// disk image, but can be inferred from the `Platform` or `StandardFormat`.
/// It does not really affect the operation of a `MetaSector` track, but incorrect values may
/// persist in exported disk images.
pub encoding: TrackDataEncoding,
/// The track data rate. Similar caveats to the ones discussed for `encoding` apply.
pub data_rate: TrackDataRate,
}
#[derive(Default)]
pub(crate) struct SharedDiskContext {
/// The number of write operations (WriteData or FormatTrack) operations performed on the disk image.
/// This can be used to determine if the disk image has been modified since the last save.
pub(crate) writes: u64,
}
| rust | MIT | b4c04b51746e5fe7769f49a1b32b8caad426fc81 | 2026-01-04T20:24:04.021295Z | false |
dbalsom/fluxfox | https://github.com/dbalsom/fluxfox/blob/b4c04b51746e5fe7769f49a1b32b8caad426fc81/src/types/rpm.rs | src/types/rpm.rs | /*
FluxFox
https://github.com/dbalsom/fluxfox
Copyright 2024-2025 Daniel Balsom
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the “Software”),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
--------------------------------------------------------------------------
*/
//! RPM (Revolutions Per Minute) related types and functions.
use crate::types::DiskCh;
use std::{
fmt,
fmt::{Display, Formatter},
};
/// A [DiskRpm] represents the physical rotation rate of a disk within a drive
/// context.
/// The most common rotation rate used for floppy disks is 300RPM, but this was
/// not universal.
///
/// The most common variant is the 360RPM used by the IBM PC's 5.25" high
/// density floppy drives, even when reading DD disks. Some of these drives
/// could also operate at 300RPM.
///
/// Some platforms would either halve or double the normal RPM rate as a
/// technical shortcut.
/// The Amiga's Paula chip couldn't handle high density data rates, so Amiga
/// high density disk drives would spin at 150RPM to halve the effective
/// data rate.
///
/// The Macintosh's SWIM controller could only handle fixed bitcell sizes,
/// so the Mac SuperDrive spun DD disks at 600RPM.
///
/// A drive's RPM was not always constant over the surface of the disk.
/// Zoned recording was a common technique used to increase the data density
/// to take advantage of the outer tracks' greater circumference. This was
/// used on the Apple II and inherited by the Macintosh in GCR mode.
///
#[derive(Copy, Clone, Debug, PartialEq)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub enum DiskRpm {
/// A 150 RPM base rotation rate (Amiga high density).
Rpm150(f64),
/// A 300 RPM base rotation rate.
Rpm300(f64),
/// A 360 RPM base rotation rate.
Rpm360(f64),
/// A 600 RPM base rotation rate (Macintosh SuperDrive reading DD).
Rpm600(f64),
/// A Zoned rotation rate, specifying an RPM mapping for each track.
Zoned(RpmZoneMap, f64),
}
impl From<DiskRpm> for f64 {
/// Convert a DiskRpm to a floating-point RPM value.
fn from(rpm: DiskRpm) -> Self {
use DiskRpm::*;
match rpm {
Rpm150(f) => 150.0 * f,
Rpm300(f) => 300.0 * f,
Rpm360(f) => 360.0 * f,
Rpm600(f) => 600.0 * f,
Zoned(map, f) => map.calculate(DiskCh::default()) as f64 * f,
}
}
}
impl Default for DiskRpm {
fn default() -> Self {
DiskRpm::Rpm300(1.0)
}
}
impl Display for DiskRpm {
fn fmt(&self, fmt: &mut Formatter) -> fmt::Result {
let f = self.factor();
let f_str = if f == 1.0 {
"".to_string()
}
else if f > 1.0 {
format!(" +{:.3}%", 1.0 - f)
}
else {
format!(" -{:.3}%", f - 1.0)
};
match self {
DiskRpm::Rpm150(_) => write!(fmt, "150RPM{}", f_str),
DiskRpm::Rpm300(_) => write!(fmt, "300RPM{}", f_str),
DiskRpm::Rpm360(_) => write!(fmt, "360RPM{}", f_str),
DiskRpm::Rpm600(_) => write!(fmt, "600RPM{}", f_str),
DiskRpm::Zoned(_map, _) => write!(fmt, "Zoned RPM"),
}
}
}
impl DiskRpm {
/// Retrieve the adjustment factor for this [DiskRpm].
pub fn factor(&self) -> f64 {
match *self {
DiskRpm::Rpm150(f) => f,
DiskRpm::Rpm300(f) => f,
DiskRpm::Rpm360(f) => f,
DiskRpm::Rpm600(f) => f,
DiskRpm::Zoned(_map, f) => f,
}
}
/// Try to calculate a [DiskRpm] from the time between index pulses in milliseconds.
/// Sometimes flux streams report bizarre RPMs, so you will need fallback logic if this
/// conversion fails.
///
/// This function should not be used on platforms with Zoned RPMs.
pub fn try_from_index_time(time: f64) -> Option<DiskRpm> {
let rpm = 60.0 / time;
// We'd like to support a 15% deviation, but there is a small overlap between 300 +15%
// and 360 -15%, so we split the difference at 327 RPM.
match rpm {
270.0..327.00 => Some(DiskRpm::Rpm300(rpm / 300.0)),
327.0..414.00 => Some(DiskRpm::Rpm360(rpm / 360.0)),
_ => None,
}
}
/// Convert a [DiskRpm] to an index time in milliseconds.
pub fn index_time_ms(&self) -> f64 {
60.0 / f64::from(*self)
}
#[inline]
pub fn adjust_clock(&self, base_clock: f64) -> f64 {
// Assume a base clock of 1.5us or greater is a double density disk.
if matches!(self, DiskRpm::Rpm360(_)) && base_clock >= 1.5e-6 {
base_clock * (300.0 / 360.0)
}
else {
base_clock
}
}
}
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
#[derive(Copy, Clone, Debug, Default, PartialEq, Eq)]
pub enum RpmZoneMap {
#[default]
AppleSpeed1,
AppleSpeed2,
}
impl RpmZoneMap {
/// Calculate the RPM for a given track with this zone map.
/// The head number is also required to support any potential weird platforms that might
/// have different RPMs per side.
pub fn calculate(&self, ch: DiskCh) -> u32 {
match self {
// Values taken from the Mac 400K drive datasheet.
// Confirmed by Applesauce
RpmZoneMap::AppleSpeed1 => match ch.c {
0..16 => 394,
16..32 => 429,
32..48 => 472,
48..64 => 525,
_ => 590,
},
RpmZoneMap::AppleSpeed2 => match ch.c {
0..16 => 402,
16..32 => 438,
32..48 => 482,
48..64 => 536,
_ => 603,
},
}
}
}
| rust | MIT | b4c04b51746e5fe7769f49a1b32b8caad426fc81 | 2026-01-04T20:24:04.021295Z | false |
dbalsom/fluxfox | https://github.com/dbalsom/fluxfox/blob/b4c04b51746e5fe7769f49a1b32b8caad426fc81/src/types/sector_layout.rs | src/types/sector_layout.rs | /*
FluxFox
https://github.com/dbalsom/fluxfox
Copyright 2024-2025 Daniel Balsom
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the “Software”),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
--------------------------------------------------------------------------
enums.rs
Defines common enum types
*/
use crate::{
types::{DiskCh, DiskChIterator, DiskChs, DiskChsIterator, DiskChsn, DiskChsnIterator},
DEFAULT_SECTOR_SIZE,
};
use std::{fmt::Display, ops::Range};
/// A [SectorLayoutRange] can be used to generalize sector layouts for platforms that had varying
/// track counts and sector counts per track. For example, it was common on the Atari ST to have 8-11
/// sectors per track, unlike the Ibm PC where more than 9 sectors per track was rare.
pub struct SectorLayoutRange {
pub c: Range<u16>,
pub h: u8,
pub s: Range<u8>,
pub s_off: u8,
pub size: usize,
}
impl SectorLayoutRange {
pub fn new(c: Range<u16>, h: u8, s: Range<u8>, s_off: u8, size: usize) -> Self {
Self { c, h, s, s_off, size }
}
/// Return a Range<usize> representing the byte range of the sector layout range.
/// Note it may be possible for ranges to overlap.
pub fn byte_range(&self) -> Range<usize> {
(self.c.start as usize * self.h as usize * self.s.start as usize * self.size)
..(self.c.end as usize * self.h as usize * self.s.end as usize * self.size)
}
}
/// A structure representing how sectors are laid out on a disk (assuming standard format)
/// - Cylinder (c)
/// - Head (h)
/// - Sector count (s)
///
/// Plus a sector ID offset (s_off) to represent whether a standard sector id starts at 0 or 1.
///
/// A DiskChs may represent a Sector ID, where size is ignored, or an overall disk geometry.
#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub struct SectorLayout {
pub(crate) c: u16,
pub(crate) h: u8,
pub(crate) s: u8,
pub(crate) s_off: u8,
pub(crate) size: usize,
}
impl Display for SectorLayout {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "[c:{:2} h:{} s:{:2} s_off:{}]", self.c, self.h, self.s, self.s_off)
}
}
impl TryFrom<usize> for SectorLayout {
type Error = &'static str;
fn try_from(size: usize) -> Result<Self, Self::Error> {
let sector_size = DEFAULT_SECTOR_SIZE;
let total_sectors = size / sector_size;
if total_sectors % sector_size != 0 {
return Err("Invalid sector size");
}
let c = 80;
let h = 2;
let s = 10;
let s_off = 1;
Ok(Self { c, h, s, s_off, size })
}
}
trait TryFromRawSize<T> {
type Error;
fn try_from_raw_size(size: usize, sector_size: Option<usize>) -> Result<Vec<T>, Self::Error>;
}
impl TryFromRawSize<SectorLayout> for SectorLayout {
type Error = &'static str;
fn try_from_raw_size(size: usize, sector_size: Option<usize>) -> Result<Vec<Self>, Self::Error> {
let sector_size = sector_size.unwrap_or(DEFAULT_SECTOR_SIZE);
let total_sectors = size / sector_size;
if total_sectors % sector_size != 0 {
return Err("Raw size must be multiple of sector size");
}
Self::derive_matches(size, Some(sector_size))
}
}
impl SectorLayout {
/// Create a new `SectorLayout` structure from cylinder, head and sector id components.
pub fn new(c: u16, h: u8, s: u8, s_off: u8, size: usize) -> Self {
Self { c, h, s, s_off, size }
}
pub fn get(&self) -> (u16, u8, u8, u8, usize) {
(self.c, self.h, self.s, self.s_off, self.size)
}
/// Return the cylinder (c) field.
#[inline]
pub fn c(&self) -> u16 {
self.c
}
/// Return the head (h) field.
#[inline]
pub fn h(&self) -> u8 {
self.h
}
/// Return the sector count (s) field.
#[inline]
pub fn s(&self) -> u8 {
self.s
}
/// Return the sector id offset (s_off) field.
#[inline]
pub fn s_off(&self) -> u8 {
self.s_off
}
#[inline]
/// Return the size of a sector in bytes.
pub fn size(&self) -> usize {
self.size
}
/// Return the equivalent 'n' size parameter for the specified byte size.
pub fn n(&self) -> u8 {
DiskChsn::bytes_to_n(self.size)
}
/// Return a [DiskCh] structure representing the cylinder and head count components of a [SectorLayout].
#[inline]
pub fn ch(&self) -> DiskCh {
DiskCh::new(self.c, self.h)
}
/// Return a [DiskChs] structure representing the cylinder, head and sector count components of a [SectorLayout].
#[inline]
pub fn chs(&self) -> DiskChs {
DiskChs::new(self.c, self.h, self.s)
}
/// Return a [DiskChsn] structure representing the cylinder, head and sector counts of a [SectorLayout].
#[inline]
pub fn chsn(&self) -> DiskChsn {
DiskChsn::new(self.c, self.h, self.s, DiskChsn::bytes_to_n(self.size))
}
/// Set the cylinder count (c) component of a [SectorLayout].
#[inline]
pub fn set_c(&mut self, c: u16) {
self.c = c;
}
/// Set the head count (h) component of a [SectorLayout].
#[inline]
pub fn set_h(&mut self, h: u8) {
self.h = h;
}
/// Set the sector count (s) component of a [SectorLayout].
#[inline]
pub fn set_s(&mut self, s: u8) {
self.s = s;
}
/// Set the sector id offset (s_off) component of a [SectorLayout].
#[inline]
pub fn set_s_off(&mut self, s_off: u8) {
self.s_off = s_off;
}
/// Return the number of sectors represented by a [SectorLayout].
pub fn total_sectors(&self) -> usize {
(self.c as usize) * (self.h as usize) * (self.s as usize)
}
/// Return a boolean indicating whether this [SectorLayout] contains the specified [DiskChs]
/// representing a sector id.
pub fn contains(&self, chs: impl Into<DiskChs>) -> bool {
let chs = chs.into();
self.c > chs.c && self.h > chs.h && self.s > (chs.s.saturating_sub(self.s_off))
}
pub fn ch_iter(&self) -> DiskChIterator {
DiskCh::new(self.c, self.h).iter()
}
pub fn chs_iter(&self) -> DiskChsIterator {
DiskChs::new(self.c, self.h, self.s).iter(*self)
}
pub fn chsn_iter(&self) -> DiskChsnIterator {
DiskChsn::new(self.c, self.h, self.s, self.n()).iter(*self)
}
fn derive_matches(size: usize, sector_size: Option<usize>) -> Result<Vec<Self>, &'static str> {
// Overall cylinder range is 39-85
// We allow one less cylinder than normal, this is sometimes seen in ST files
let cylinder_range = 39usize..=85;
// Consider anything from 45-79 as an invalid cylinder range. Would indicate under-dumped image.
let invalid_cylinders = 45usize..79;
let sector_size = sector_size.unwrap_or(DEFAULT_SECTOR_SIZE);
let total_sectors = size / sector_size;
if size % sector_size != 0 {
return Err("Raw size must be multiple of sector size");
}
//let mut layout_match = None;
let mut layout_matches = Vec::with_capacity(2);
for spt in 8..=18 {
// Iterate over possible sectors per track
if total_sectors % spt != 0 {
continue; // Skip if total_sectors is not divisible by spt
}
let total_tracks = total_sectors / spt; // Calculate total tracks
// Determine the number of heads (1 or 2) and corresponding track count
let heads = if total_tracks % 2 == 0 { 2 } else { 1 };
let tracks = total_tracks / heads;
if cylinder_range.contains(&tracks) && !invalid_cylinders.contains(&tracks) {
layout_matches.push(SectorLayout {
c: tracks as u16,
h: heads as u8,
s: spt as u8,
s_off: 0,
size: sector_size,
});
}
}
if !layout_matches.is_empty() {
layout_matches
.sort_by(|a, b| Self::normal_cylinder_distance(a.c).cmp(&Self::normal_cylinder_distance(b.c)));
let vec = layout_matches.iter().flat_map(|layout| layout.equivalents()).collect();
Ok(vec)
}
else {
Err("No match for raw image size")
}
}
fn normal_cylinder_distance(c: u16) -> u16 {
if c < 60 {
40i16.abs_diff(c as i16)
}
else {
80i16.abs_diff(c as i16)
}
}
fn equivalents(&self) -> Vec<Self> {
let mut equivalents = Vec::with_capacity(2);
let mut layout = *self;
// Add the original layout
equivalents.push(layout);
// If the track count is >= 79, we could have either a double-sided 5.25" disk or a
// single sided 3.5" disk. We can't determine which from the raw size alone.
if layout.c >= 79 && layout.c % 2 == 0 && layout.h == 1 {
layout.c /= 2;
layout.h = 2;
equivalents.push(layout);
}
else if layout.c <= 45 && layout.h == 2 {
// Otherwise, if the track count is small enough to be a 48TPI 5.25" disk with two
// sides, it might also be a 96tpi 3.5" disk with one side.
layout.c *= 2;
layout.h = 1;
equivalents.push(layout);
}
equivalents
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_derive() {
// Test that we can determine sector layout from common raw image sizes.
// The following sizes were discovered from a collection of ST image files.
// Test cases: (raw_size, expected_sector_size, expected_spt, expected_heads, s_off, expected_tracks)
let test_cases = [
(1427456, (82, 2, 17, 0, 512)),
(1064960, (80, 2, 13, 0, 512)),
(1032192, (84, 2, 12, 0, 512)),
(995328, (81, 2, 12, 0, 512)),
(983040, (80, 2, 12, 0, 512)),
(946176, (84, 2, 11, 0, 512)),
(934912, (83, 2, 11, 0, 512)),
(923648, (82, 2, 11, 0, 512)),
(912384, (81, 2, 11, 0, 512)),
(901120, (80, 2, 11, 0, 512)),
(860160, (84, 2, 10, 0, 512)),
(849920, (83, 2, 10, 0, 512)),
(839680, (82, 2, 10, 0, 512)),
(829440, (81, 2, 10, 0, 512)),
(819200, (80, 2, 10, 0, 512)),
(808960, (79, 2, 10, 0, 512)),
(764928, (83, 2, 9, 0, 512)),
(755712, (82, 2, 9, 0, 512)),
(746496, (81, 2, 9, 0, 512)),
(737280, (80, 2, 9, 0, 512)),
(728064, (79, 2, 9, 0, 512)),
(461824, (82, 1, 11, 0, 512)),
(456192, (81, 1, 11, 0, 512)),
(450560, (80, 1, 11, 0, 512)),
(424960, (83, 1, 10, 0, 512)),
(419840, (82, 1, 10, 0, 512)),
(414720, (81, 1, 10, 0, 512)),
(409600, (80, 1, 10, 0, 512)),
(404480, (79, 1, 10, 0, 512)),
(377856, (82, 1, 9, 0, 512)),
(373248, (81, 1, 9, 0, 512)),
(368640, (80, 1, 9, 0, 512)),
(364032, (79, 1, 9, 0, 512)),
];
for (i, (raw_size, expected)) in test_cases.iter().enumerate() {
println!("Test case {}: {:?}", i, test_cases[i]);
match SectorLayout::derive_matches(*raw_size, Some(512)) {
Ok(layouts) => {
println!("Layouts: {:?}", layouts);
let test_layout = SectorLayout::new(expected.0, expected.1, expected.2, expected.3, expected.4);
assert!(layouts.contains(&test_layout));
}
Err(e) => {
panic!("Failed for raw_size: {} with error: {}", raw_size, e);
}
}
}
}
}
| rust | MIT | b4c04b51746e5fe7769f49a1b32b8caad426fc81 | 2026-01-04T20:24:04.021295Z | false |
dbalsom/fluxfox | https://github.com/dbalsom/fluxfox/blob/b4c04b51746e5fe7769f49a1b32b8caad426fc81/src/types/enums.rs | src/types/enums.rs | /*
FluxFox
https://github.com/dbalsom/fluxfox
Copyright 2024-2025 Daniel Balsom
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the “Software”),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
--------------------------------------------------------------------------
enums.rs
Defines common enum types
*/
use crate::{types::IntegrityField, StandardFormat};
use std::{
fmt,
fmt::{Display, Formatter},
path::PathBuf,
};
pub use crate::platform::Platform;
use crate::types::DiskRpm;
/// The level of data resolution for a given track.
/// fluxfox supports three types of data resolutions:
/// * MetaSector tracks hold only sector data along with optional metadata per sector.
/// * BitStream tracks hold a bitwise representation of each track on a disk.
/// * FluxStream tracks hold one or more `revolutions` of flux transition delta times per track,
/// which are resolved to a single bitstream.
///
/// It is possible for some image formats to contain a combination of BitStream and FluxStream
/// tracks.
#[repr(usize)]
#[derive(Copy, Clone, Default, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub enum TrackDataResolution {
#[default]
#[doc = "MetaSector images hold only sector data along with optional metadata per sector."]
MetaSector = 0,
#[doc = "BitStream images hold a bitwise representation of each track on a disk."]
BitStream = 1,
#[doc = "FluxStream images hold one or more `revolutions` of flux transition delta times per track, which are resolved to a single bitstream."]
FluxStream = 2,
}
/// The type of data encoding used by a track in a disk image.
/// Note that some disk images may contain tracks with different encodings.
/// fluxfox supports two types of data encodings:
/// * Fm: Frequency Modulation encoding. Used by older 8" diskettes, and 'duplication mark' tracks
/// on some 3.5" and 5.25" diskettes.
/// * Mfm: Modified Frequency Modulation encoding. Used by almost all PC 5.25" and 3.5" diskettes,
/// Amiga 3.5" diskettes, and Macintosh 1.44MB 3.5" diskettes.
///
/// Not implemented are:
/// * Gcr: Group Code Recording encoding. Used by Apple and Macintosh diskettes.
#[derive(Default, Copy, Clone, Debug)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub enum TrackDataEncoding {
#[default]
#[doc = "Frequency Modulation encoding. Used by older 8" diskettes, and duplication tracks on some 5.25" diskettes."]
Fm,
#[doc = "Modified Frequency Modulation encoding. Used by almost all 5.25" and 3.5" diskettes."]
Mfm,
#[doc = "Group Code Recording encoding. Used by Apple and Macintosh diskettes."]
Gcr,
}
impl TrackDataEncoding {
pub fn byte_size(&self) -> usize {
match self {
TrackDataEncoding::Fm => 16,
TrackDataEncoding::Mfm => 16,
TrackDataEncoding::Gcr => 0,
}
}
pub fn marker_size(&self) -> usize {
match self {
TrackDataEncoding::Fm => 64,
TrackDataEncoding::Mfm => 64,
TrackDataEncoding::Gcr => 0,
}
}
}
impl Display for TrackDataEncoding {
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
match self {
TrackDataEncoding::Fm => write!(f, "FM"),
TrackDataEncoding::Mfm => write!(f, "MFM"),
TrackDataEncoding::Gcr => write!(f, "GCR"),
}
}
}
/// The physical dimensions of a disk.
/// A few disk image formats such as MFI have a metadata field to specify a disk's dimensions.
/// There is not a perfect way to determine this heuristically, but one can take a pretty good
/// guess based on the cylinder count, density, data rate, RPM, and other parameters.
#[derive(Default, Copy, Clone, Debug)]
pub enum DiskPhysicalDimensions {
#[doc = "An 8\" Diskette"]
Dimension8,
#[default]
#[doc = "A 5.25\" Diskette"]
Dimension5_25,
#[doc = "A 3.5\" Diskette"]
Dimension3_5,
}
/// The density of data recording on a disk track.
/// A disk image may contain tracks with different densities.
///
/// * `Standard` density: typically referring to FM encoding, typically used by 8" diskettes.
/// * `Double` density: typically referring to MFM encoding at 250/300Kbps. Appeared on 5.25" and 3.5" diskettes.
/// * `High` density: typically referring to MFM encoding at 500Kbps. Appeared on 5.25" and 3.5" diskettes.
/// * `Extended` density: typically referring to MFM encoding at 1Mbps. Appeared on 3.5" diskettes.
#[derive(Default, Copy, Clone, Debug)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub enum TrackDensity {
Standard,
#[default]
Double,
High,
Extended,
}
impl From<TrackDataRate> for TrackDensity {
fn from(rate: TrackDataRate) -> Self {
use TrackDataRate::*;
match rate {
Rate125Kbps(_) => TrackDensity::Standard,
Rate250Kbps(_) => TrackDensity::Double,
Rate300Kbps(_) => TrackDensity::Double,
Rate500Kbps(_) => TrackDensity::High,
Rate1000Kbps(_) => TrackDensity::Extended,
_ => TrackDensity::Double,
}
}
}
impl Display for TrackDensity {
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
use TrackDensity::*;
match self {
Standard => write!(f, "Standard"),
Double => write!(f, "Double"),
High => write!(f, "High"),
Extended => write!(f, "Extended"),
}
}
}
impl TrackDensity {
/// Return the base number of bitcells for a given disk density.
/// It is ideal to provide the disk RPM to get the most accurate bitcell count as high
/// density 5.25 disks have different bitcell counts than high density 3.5 disks.
///
/// The value provided is only an estimate for the ideal bitcell count. The actual bitcell
/// may vary depending on variances in the disk drive used to write the diskette.
pub fn bitcells(&self, rpm: Option<DiskRpm>) -> Option<usize> {
use TrackDensity::*;
match (self, rpm) {
(Standard, _) => Some(50_000),
(Double, _) => Some(100_000),
(High, Some(DiskRpm::Rpm360(_))) => Some(166_666),
(High, Some(DiskRpm::Rpm300(_)) | None) => Some(200_000),
(High, Some(_)) => Some(200_000),
(Extended, _) => Some(400_000),
}
}
pub fn from_bitcells(bitcells: u32) -> Option<TrackDensity> {
match bitcells {
40_000..60_000 => Some(TrackDensity::Standard),
80_000..120_000 => Some(TrackDensity::Double),
150_000..250_000 => Some(TrackDensity::High),
350_000..450_000 => Some(TrackDensity::Extended),
_ => None,
}
}
/// Return a value in seconds representing the base clock of a PLL for a given disk density.
/// A `DiskRpm` must be provided for double density disks, as the clock is adjusted for
/// double-density disks read in high-density 360RPM drives.
/// TODO: Add Option<DiskCh> to calculate clocks for Zoned RPM disks.
pub fn base_clock(&self, rpm: Option<DiskRpm>) -> f64 {
match (self, rpm) {
(TrackDensity::Standard, _) => 4e-6,
(TrackDensity::Double, None | Some(DiskRpm::Rpm300(_))) => 2e-6,
(TrackDensity::Double, Some(DiskRpm::Rpm360(_))) => 1.666e-6,
(TrackDensity::Double, Some(_)) => 2e-6,
(TrackDensity::High, _) => 1e-6,
(TrackDensity::Extended, _) => 5e-7,
}
}
/// Attempt to determine the disk density from the base clock of a PLL.
pub fn from_base_clock(clock: f64) -> Option<TrackDensity> {
match clock {
0.375e-6..0.625e-6 => Some(TrackDensity::Extended),
0.75e-6..1.25e-6 => Some(TrackDensity::High),
1.5e-6..2.5e-6 => Some(TrackDensity::Double),
_ => None,
}
}
}
/// DiskDataRate defines the data rate of the disk image - for MFM and FM encoding, this is the
/// bit rate / 2.
/// DiskDataRate defines standard data rate categories, while storing a clock adjustment factor to
/// make possible calculation of the exact data rate if required.
#[derive(Copy, Clone, Debug)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub enum TrackDataRate {
RateNonstandard(u32),
Rate125Kbps(f64),
Rate250Kbps(f64),
Rate300Kbps(f64),
Rate500Kbps(f64),
Rate1000Kbps(f64),
}
impl Default for TrackDataRate {
fn default() -> Self {
TrackDataRate::Rate250Kbps(1.0)
}
}
impl From<TrackDataRate> for u32 {
fn from(rate: TrackDataRate) -> Self {
use TrackDataRate::*;
match rate {
Rate125Kbps(f) => (125_000.0 * f) as u32,
Rate250Kbps(f) => (250_000.0 * f) as u32,
Rate300Kbps(f) => (300_000.0 * f) as u32,
Rate500Kbps(f) => (500_000.0 * f) as u32,
Rate1000Kbps(f) => (1_000_000.0 * f) as u32,
RateNonstandard(rate) => rate,
}
}
}
/// Implement a conversion from a u32 to a DiskDataRate.
/// An 8-15% rate deviance is allowed for standard rates, otherwise a RateNonstandard is returned.
impl From<u32> for TrackDataRate {
fn from(rate: u32) -> Self {
use TrackDataRate::*;
match rate {
93_750..143_750 => Rate125Kbps(rate as f64 / 125_000.0),
212_000..271_000 => Rate250Kbps(rate as f64 / 250_000.0),
271_000..345_000 => Rate300Kbps(rate as f64 / 300_000.0),
425_000..575_000 => Rate500Kbps(rate as f64 / 500_000.0),
850_000..1_150_000 => Rate1000Kbps(rate as f64 / 1_000_000.0),
_ => RateNonstandard(rate),
}
}
}
impl From<TrackDensity> for TrackDataRate {
fn from(density: TrackDensity) -> Self {
use TrackDensity::*;
match density {
Standard => TrackDataRate::Rate125Kbps(1.0),
Double => TrackDataRate::Rate250Kbps(1.0),
High => TrackDataRate::Rate500Kbps(1.0),
Extended => TrackDataRate::Rate1000Kbps(1.0),
}
}
}
impl Display for TrackDataRate {
fn fmt(&self, fmt: &mut Formatter) -> fmt::Result {
use TrackDataRate::*;
match self {
RateNonstandard(rate) => write!(fmt, "*{}Kbps", rate / 1000),
Rate125Kbps(f) => write!(fmt, "125Kbps (x{:.2})", f),
Rate250Kbps(f) => write!(fmt, "250Kbps (x{:.2})", f),
Rate300Kbps(f) => write!(fmt, "300Kbps (x{:.2})", f),
Rate500Kbps(f) => write!(fmt, "500Kbps (x{:.2})", f),
Rate1000Kbps(f) => write!(fmt, "1000Kbps (x{:.2})", f),
}
}
}
/// A DiskSelection enumeration is used to select a disk image by either index or path when dealing
/// with containers that contain multiple disk images.
#[derive(Clone, Debug)]
pub enum DiskSelection {
/// Specify a disk image by index into a list of normally sorted path names within the container.
Index(usize),
/// Specify a disk image by path within the container.
Path(PathBuf),
}
impl Display for DiskSelection {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
DiskSelection::Index(idx) => write!(f, "(Index: {})", idx),
DiskSelection::Path(path) => write!(f, "(Path: {})", path.display()),
}
}
}
/// `DiskImageFileFormat` is an enumeration listing the various disk image file formats that can be
/// read or written by FluxFox.
#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq, strum::EnumIter)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub enum DiskImageFileFormat {
/// A raw sector image. Typically, has extensions IMG, IMA, DSK.
RawSectorImage,
/// An ImageDisk sector image. Typically has extension IMD.
ImageDisk,
/// A PCE sector image. Typically, has extension PSI.
PceSectorImage,
/// A PCE bitstream image. Typically, has extension PRI,
PceBitstreamImage,
/// A PCE flux stream image. Typically, has extension PFI.
PceFluxImage,
/// An MFM bitstream image. Typically, has extension MFM.
MfmBitstreamImage,
/// A TeleDisk sector image. Typically, has extension TD0.
#[cfg(feature = "td0")]
TeleDisk,
/// A Kryoflux flux stream image. Typically, has extension RAW.
KryofluxStream,
/// An HFEv1 bitstream image. Typically, has extension HFE.
HfeImage,
/// An 86F bitstream image. Typically, has extension 86F.
F86Image,
/// A TransCopy bitstream image. Typically, has extension TC.
TransCopyImage,
/// A SuperCard Pro flux stream image. Typically, has extension SCP.
SuperCardPro,
/// A MAME floppy image. Typically, has extension MFI.
#[cfg(feature = "mfi")]
MameFloppyImage,
/// Interchangeable Preservation Format image. Typically, has extension IPF.
#[cfg(feature = "ipf")]
IpfImage,
/// MOOF - Applesauce Macintosh Disk Image
#[cfg(feature = "moof")]
MoofImage,
/// WOZ - Applesauce Macintosh Disk Image
#[cfg(feature = "woz")]
WozImage,
}
impl DiskImageFileFormat {
/// Return the priority of the disk image format. Higher values are higher priority.
/// Used to sort returned lists of disk image formats, hopefully returning the most desirable
/// format first.
pub fn priority(self) -> usize {
use DiskImageFileFormat::*;
match self {
KryofluxStream => 0,
// Supported bytestream formats (low priority)
RawSectorImage => 1,
#[cfg(feature = "td0")]
TeleDisk => 0,
ImageDisk => 0,
PceSectorImage => 1,
// Supported bitstream formats (high priority)
TransCopyImage => 0,
MfmBitstreamImage => 0,
HfeImage => 0,
PceBitstreamImage => 7,
F86Image => 8,
// Flux images (not supported for writes)
SuperCardPro => 0,
PceFluxImage => 0,
#[cfg(feature = "mfi")]
MameFloppyImage => 0,
#[cfg(feature = "ipf")]
IpfImage => 0,
#[cfg(feature = "moof")]
MoofImage => 0,
#[cfg(feature = "woz")]
WozImage => 0,
}
}
pub fn resolution(self) -> TrackDataResolution {
use DiskImageFileFormat::*;
match self {
RawSectorImage => TrackDataResolution::MetaSector,
ImageDisk => TrackDataResolution::MetaSector,
PceSectorImage => TrackDataResolution::MetaSector,
PceBitstreamImage => TrackDataResolution::BitStream,
MfmBitstreamImage => TrackDataResolution::BitStream,
#[cfg(feature = "td0")]
TeleDisk => TrackDataResolution::MetaSector,
KryofluxStream => TrackDataResolution::FluxStream,
HfeImage => TrackDataResolution::BitStream,
F86Image => TrackDataResolution::BitStream,
TransCopyImage => TrackDataResolution::BitStream,
SuperCardPro => TrackDataResolution::FluxStream,
PceFluxImage => TrackDataResolution::FluxStream,
#[cfg(feature = "mfi")]
MameFloppyImage => TrackDataResolution::FluxStream,
#[cfg(feature = "ipf")]
IpfImage => TrackDataResolution::BitStream,
#[cfg(feature = "moof")]
MoofImage => TrackDataResolution::BitStream,
#[cfg(feature = "woz")]
WozImage => TrackDataResolution::BitStream,
}
}
}
impl Display for DiskImageFileFormat {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
use DiskImageFileFormat::*;
let str = match self {
RawSectorImage => "Raw Sector".to_string(),
PceSectorImage => "PCE Sector".to_string(),
PceBitstreamImage => "PCE Bitstream".to_string(),
ImageDisk => "ImageDisk Sector".to_string(),
#[cfg(feature = "td0")]
TeleDisk => "TeleDisk Sector".to_string(),
KryofluxStream => "Kryoflux Flux Stream".to_string(),
MfmBitstreamImage => "HxC MFM Bitstream".to_string(),
HfeImage => "HFEv1 Bitstream".to_string(),
F86Image => "86F Bitstream".to_string(),
TransCopyImage => "TransCopy Bitstream".to_string(),
SuperCardPro => "SuperCard Pro Flux".to_string(),
PceFluxImage => "PCE Flux Stream".to_string(),
#[cfg(feature = "mfi")]
MameFloppyImage => "MAME Flux Stream".to_string(),
#[cfg(feature = "ipf")]
IpfImage => "IPF Disk".to_string(),
#[cfg(feature = "moof")]
MoofImage => "MOOF Disk".to_string(),
#[cfg(feature = "woz")]
WozImage => "WOZ Disk".to_string(),
};
write!(f, "{}", str)
}
}
/// A [DiskFormat] enumeration describes the format of a disk image.
#[derive(Debug, Copy, Clone, Hash, Eq, PartialEq)]
pub enum DiskFormat {
/// An unknown format. This is the default format for a disk image before a disk's format can
/// be determined.
Unknown,
/// A non-standard disk format. This format is used for disk images that do not conform to a
/// standard format, such a copy-protected titles that may have varying track lengths,
/// non-consecutive sectors, or other non-standard features.
Nonstandard,
/// A standard disk format. This format is used for disk images that conform to a standard
/// format type, determined by a `StandardFormat` enum.
Standard(StandardFormat),
}
/// An enum that defines the scope of a track element read/write operation.
/// Not all operations for any given [TrackSchema] may support all scopes.
#[derive(Copy, Clone, Debug)]
pub enum RwScope {
/// The operation will include the entire track data element, including address marker,
/// CRC/checksum, or other track schema metadata.
EntireElement,
/// The operation will include only the element data. For sector data elements, this would
/// return just the sector data, excluding address marker and CRC bytes.
DataOnly,
/// The operation will only affect the element CRC or Checksum.
CrcOnly,
}
/// An enum that encompasses data integrity verification strategies.
/// Some track schemas may use a CRC to verify the integrity of the data on a track, others may
/// use a checksum. Other types can be added here as needed as support for new track schemas is
/// added.
#[derive(Copy, Clone, Debug)]
pub enum IntegrityCheck {
/// Represents the result of a 16-bit CRC (Cyclic Redundancy Check)
Crc16(IntegrityField<u16>),
/// Represents the result of a 16-bit checksum
Checksum16(IntegrityField<u16>),
}
impl Display for IntegrityCheck {
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
use IntegrityCheck::*;
match self {
Crc16(result) if result.is_valid() => write!(f, "Valid"),
Crc16(_) => write!(f, "Invalid"),
Checksum16(result) if result.is_valid() => write!(f, "Valid"),
Checksum16(_) => write!(f, "Invalid"),
}
}
}
impl IntegrityCheck {
pub fn is_valid(&self) -> bool {
use IntegrityCheck::*;
match self {
Crc16(result) => result.is_valid(),
Checksum16(result) => result.is_valid(),
}
}
pub fn is_error(&self) -> bool {
!self.is_valid()
}
}
| rust | MIT | b4c04b51746e5fe7769f49a1b32b8caad426fc81 | 2026-01-04T20:24:04.021295Z | false |
dbalsom/fluxfox | https://github.com/dbalsom/fluxfox/blob/b4c04b51746e5fe7769f49a1b32b8caad426fc81/src/types/flags.rs | src/types/flags.rs | /*
FluxFox
https://github.com/dbalsom/fluxfox
Copyright 2024-2025 Daniel Balsom
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the “Software”),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
--------------------------------------------------------------------------
flags.rs
Defines common bitflags
*/
use bitflags::bitflags;
bitflags! {
/// Bit flags that can be applied to a disk image.
#[derive(Debug, Default, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
#[rustfmt::skip]
pub struct DiskImageFlags: u32 {
#[doc = "Disk Image source specified image is read-only"]
const READONLY = 0b0000_0000_0000_0001;
#[doc = "Disk Image has been written to since last save"]
const DIRTY = 0b0000_0000_0000_0010;
#[doc = "Disk Image represents a PROLOK protected disk"]
const PROLOK = 0b0000_0000_0000_0100;
}
}
| rust | MIT | b4c04b51746e5fe7769f49a1b32b8caad426fc81 | 2026-01-04T20:24:04.021295Z | false |
dbalsom/fluxfox | https://github.com/dbalsom/fluxfox/blob/b4c04b51746e5fe7769f49a1b32b8caad426fc81/src/types/mod.rs | src/types/mod.rs | /*
FluxFox
https://github.com/dbalsom/fluxfox
Copyright 2024-2025 Daniel Balsom
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the “Software”),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
--------------------------------------------------------------------------
*/
pub mod chs;
pub mod enums;
pub mod flags;
pub mod rpm;
pub mod sector_layout;
pub mod standard_format;
pub mod structs;
// Expose all types under types module namespace
pub use chs::*;
pub use enums::*;
pub use flags::*;
pub use rpm::*;
pub use standard_format::*;
pub use structs::*;
| rust | MIT | b4c04b51746e5fe7769f49a1b32b8caad426fc81 | 2026-01-04T20:24:04.021295Z | false |
dbalsom/fluxfox | https://github.com/dbalsom/fluxfox/blob/b4c04b51746e5fe7769f49a1b32b8caad426fc81/src/file_system/mod.rs | src/file_system/mod.rs | /*
FluxFox
https://github.com/dbalsom/fluxfox
Copyright 2024-2025 Daniel Balsom
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the “Software”),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
--------------------------------------------------------------------------
*/
#[cfg(feature = "serde")]
use serde;
use std::fmt::{self, Display, Formatter};
use thiserror::Error;
pub mod date_time;
#[cfg(feature = "fat")]
pub mod fat;
pub mod file_tree;
pub use date_time::FsDateTime;
pub use file_tree::{FileEntry, FileNameType, FileTreeNode};
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub enum FileSystemArchive {
Zip,
Tar,
}
impl Display for FileSystemArchive {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
match self {
FileSystemArchive::Zip => write!(f, "Zip"),
FileSystemArchive::Tar => write!(f, "Tar"),
}
}
}
impl FileSystemArchive {
pub fn ext(&self) -> &str {
match self {
FileSystemArchive::Zip => ".zip",
FileSystemArchive::Tar => ".tar",
}
}
}
#[derive(Clone, Debug, Error)]
pub enum FileSystemError {
#[error("An IO error occurred reading or writing the disk image: {0}")]
IoError(String),
#[error("The filesystem is not mounted")]
NotMountedError,
#[error("The filesystem is empty")]
EmptyFileSystem,
#[error("An error occurred mounting the file system: {0}")]
MountError(String),
#[error("An error occurred reading a file: {0}")]
ReadError(String),
#[error("An archive error occurred: {0}")]
ArchiveError(String),
#[error("The requested path was not found: {0}")]
PathNotFound(String),
#[error("Feature {0} option required but not compiled.")]
FeatureError(String),
}
impl From<crate::io::Error> for FileSystemError {
fn from(e: crate::io::Error) -> Self {
FileSystemError::IoError(e.to_string())
}
}
#[cfg(feature = "zip")]
impl From<zip::result::ZipError> for FileSystemError {
fn from(e: zip::result::ZipError) -> Self {
FileSystemError::ArchiveError(e.to_string())
}
}
| rust | MIT | b4c04b51746e5fe7769f49a1b32b8caad426fc81 | 2026-01-04T20:24:04.021295Z | false |
dbalsom/fluxfox | https://github.com/dbalsom/fluxfox/blob/b4c04b51746e5fe7769f49a1b32b8caad426fc81/src/file_system/file_tree.rs | src/file_system/file_tree.rs | /*
FluxFox
https://github.com/dbalsom/fluxfox
Copyright 2024-2025 Daniel Balsom
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the “Software”),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
--------------------------------------------------------------------------
*/
use crate::file_system::date_time::FsDateTime;
use std::fmt::{Display, Formatter, Result};
#[derive(Copy, Clone, Debug)]
pub enum FileEntryType {
File,
Directory,
}
#[derive(Copy, Clone, Default, Debug)]
pub enum FileNameType {
#[default]
Short,
Long,
}
#[derive(Clone)]
pub struct FileEntry {
pub(crate) e_type: FileEntryType,
pub(crate) short_name: String,
pub(crate) long_name: Option<String>,
pub(crate) path: String,
pub(crate) size: u64,
pub(crate) created: Option<FsDateTime>,
pub(crate) modified: Option<FsDateTime>,
}
impl Display for FileEntry {
fn fmt(&self, f: &mut Formatter<'_>) -> Result {
write!(
f,
"{} {:>10} {}",
self.modified.as_ref().unwrap_or(&FsDateTime::default()),
self.size,
self.short_name
)
}
}
impl FileEntry {
/// Returns the name of the file, of the requested `FileNameType`.
/// # Arguments
/// * `name_type` - The type of name to return.
/// # Returns
/// * `Some(String)` - The name of the file, if the specified filename type exists.
/// * `None` - If the specified filename type does not exist.
pub fn name(&self, name_type: FileNameType) -> Option<&str> {
match name_type {
FileNameType::Short => Some(&self.short_name),
FileNameType::Long => self.long_name.as_deref(),
}
}
/// Returns the short name of the file.
pub fn short_name(&self) -> &str {
&self.short_name
}
/// Returns the `FileEntryType` of the file entry.
pub fn entry_type(&self) -> FileEntryType {
self.e_type
}
/// Returns the full short path of the file.
pub fn path(&self) -> &str {
&self.path
}
/// Returns the size of the file as u64 in bytes, or 0 if the entry is a directory.
pub fn size(&self) -> u64 {
self.size
}
/// Returns `true` if the entry is a file.
/// # Returns
/// * `true` - If the entry is a file.
/// * `false` - If the entry is a directory.
pub fn is_file(&self) -> bool {
matches!(self.e_type, FileEntryType::File)
}
/// Returns `true` if the entry is a directory.
/// # Returns
/// * `true` - If the entry is a directory.
/// * `false` - If the entry is a file.
pub fn is_dir(&self) -> bool {
matches!(self.e_type, FileEntryType::Directory)
}
/// Return the extension of the file, if it exists.
/// # Returns
/// * `Some(&str)` - The extension of the file.
/// * `None` - If the file does not have an extension.
pub fn ext(&self) -> Option<&str> {
let parts: Vec<&str> = self.short_name.split('.').collect();
if parts.len() > 1 {
let ext = parts[parts.len() - 1];
//log::debug!("ext: {}", ext);
Some(ext)
}
else {
None
}
}
pub fn modified(&self) -> Option<&FsDateTime> {
self.modified.as_ref()
}
pub fn created(&self) -> Option<&FsDateTime> {
self.created.as_ref()
}
}
#[derive(Clone)]
pub enum FileTreeNode {
File(FileEntry),
Directory { dfe: FileEntry, children: Vec<FileTreeNode> },
}
impl Default for FileTreeNode {
fn default() -> Self {
FileTreeNode::Directory {
dfe: FileEntry {
e_type: FileEntryType::Directory,
short_name: "/".to_string(),
long_name: Some("/".to_string()),
path: "/".to_string(),
size: 0,
created: None,
modified: None,
},
children: Vec::new(),
}
}
}
impl Display for FileTreeNode {
fn fmt(&self, f: &mut Formatter<'_>) -> Result {
match self {
FileTreeNode::File(entry) => write!(f, "{}", entry),
FileTreeNode::Directory { dfe, .. } => {
write!(f, "{}", dfe)
}
}
}
}
impl FileTreeNode {
pub fn sub_dir_ct(&self) -> usize {
if let FileTreeNode::Directory { children, .. } = self {
children
.iter()
.filter(|c| matches!(c, FileTreeNode::Directory { dfe: _, children: _ }))
.count()
}
else {
0
}
}
/// Returns `true` if the current node represents a file.
pub fn is_file(&self) -> bool {
matches!(self, FileTreeNode::File(_))
}
/// Returns `true` if the current node represents a directory.
pub fn is_dir(&self) -> bool {
matches!(self, FileTreeNode::Directory { dfe: _, children: _ })
}
/// Returns a vector of [FileEntry] for the given path, if the path exists, or else `None`.
pub fn dir(&self, path: &str) -> Option<Vec<FileEntry>> {
// Split the path into components for navigation
let components: Vec<&str> = path.split('/').filter(|c| !c.is_empty()).collect();
self.resolve_dir(false, &components)
}
/// Walk the file tree from this node and call the provided function for each [FileEntry] found.
pub fn for_each_file(&self, recursive: bool, f: &mut impl FnMut(&FileEntry)) {
match self {
FileTreeNode::File(file) => f(file),
FileTreeNode::Directory { children, .. } if recursive => {
for child in children {
child.for_each_file(true, f);
}
}
_ => {}
}
}
/// Returns a vector of file names recursively from the current node, of the specified [FileNameType].
/// If [FileNameType::Long] is requested, but a long filename does not exist, the short filename
/// will be returned instead.
pub fn file_names(&self, recursive: bool, name_type: FileNameType) -> Vec<String> {
let mut names = Vec::new();
match name_type {
FileNameType::Short => {
self.for_each_file(recursive, &mut |file_entry| {
names.push(file_entry.short_name.clone());
});
}
FileNameType::Long => {
self.for_each_file(recursive, &mut |file_entry| {
if file_entry.long_name.is_none() {
names.push(file_entry.short_name.clone());
}
else if let Some(name) = file_entry.long_name.as_ref() {
names.push(name.clone());
}
});
}
}
names
}
/// Returns a vector of file paths for the given path, of the specified [FileNameType].
/// If [FileNameType::Long] is requested, but a long filename does not exist, the short path
/// will be returned instead.
pub fn file_paths(&self, recursive: bool, name_type: FileNameType) -> Vec<String> {
let mut names = Vec::new();
match name_type {
FileNameType::Short | FileNameType::Long => {
self.for_each_file(recursive, &mut |file_entry| {
names.push(file_entry.path.clone());
});
}
}
names
}
pub fn node(&self, path: &str) -> Option<&FileTreeNode> {
// Split the path into components for navigation
let components: Vec<&str> = path.split('/').filter(|c| !c.is_empty()).collect();
self.resolve_node(&components)
}
/// Helper function to resolve a FileTreeNode from a list of components
fn resolve_node(&self, components: &[&str]) -> Option<&FileTreeNode> {
match self {
FileTreeNode::File(_) => None, // A file cannot have children
FileTreeNode::Directory { dfe: _, children } => {
if components.is_empty() {
// If no more components, we have resolved the path
Some(self)
}
else {
// Otherwise, find the next component in the children and continue resolving
let next_component = components[0];
let remaining_components = &components[1..];
for child in children {
if let FileTreeNode::Directory { dfe, .. } = child {
if dfe.short_name == next_component {
return child.resolve_node(remaining_components);
}
}
}
None // No matching directory found
}
}
}
}
/// Helper function to resolve a directory from a list of components
fn resolve_dir(&self, recursive: bool, components: &[&str]) -> Option<Vec<FileEntry>> {
match self {
FileTreeNode::File(_) => None, // A file cannot have children
FileTreeNode::Directory { dfe: _, children } => {
if components.is_empty() {
// If no more components, return the current directory's children as FileEntry
Some(
children
.iter()
.map(|child| match child {
FileTreeNode::File(file) => file.clone(),
FileTreeNode::Directory { dfe, .. } => dfe.clone(),
})
.collect(),
)
}
else if recursive {
// Otherwise, find the next component in the children and continue resolving
let next_component = components[0];
let remaining_components = &components[1..];
for child in children {
if let FileTreeNode::Directory { dfe, .. } = child {
if dfe.short_name == next_component {
return child.resolve_dir(true, remaining_components);
}
}
}
None // No matching directory found
}
else {
None
}
}
}
}
}
| rust | MIT | b4c04b51746e5fe7769f49a1b32b8caad426fc81 | 2026-01-04T20:24:04.021295Z | false |
dbalsom/fluxfox | https://github.com/dbalsom/fluxfox/blob/b4c04b51746e5fe7769f49a1b32b8caad426fc81/src/file_system/date_time.rs | src/file_system/date_time.rs | /*
FluxFox
https://github.com/dbalsom/fluxfox
Copyright 2024-2025 Daniel Balsom
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the “Software”),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
--------------------------------------------------------------------------
*/
use std::fmt::{Display, Formatter, Result};
#[cfg(feature = "fat")]
use fluxfox_fat;
#[derive(Clone, Debug)]
pub struct FsDateTime {
pub year: u16,
pub month: u8,
pub day: u8,
pub hour: u8,
pub minute: u8,
pub second: u8,
pub millisecond: u16,
}
impl Default for FsDateTime {
fn default() -> Self {
Self {
year: 1980,
month: 1,
day: 1,
hour: 0,
minute: 0,
second: 0,
millisecond: 0,
}
}
}
impl Display for FsDateTime {
fn fmt(&self, f: &mut Formatter<'_>) -> Result {
write!(
f,
"{:04}/{:02}/{:02} {:02}:{:02}:{:02}",
self.year, self.month, self.day, self.hour, self.minute, self.second
)
}
}
#[cfg(feature = "fat")]
impl From<fluxfox_fat::DateTime> for FsDateTime {
fn from(dt: fluxfox_fat::DateTime) -> Self {
Self {
year: dt.date.year,
month: dt.date.month as u8,
day: dt.date.day as u8,
hour: dt.time.hour as u8,
minute: dt.time.min as u8,
second: dt.time.sec as u8,
millisecond: dt.time.millis,
}
}
}
| rust | MIT | b4c04b51746e5fe7769f49a1b32b8caad426fc81 | 2026-01-04T20:24:04.021295Z | false |
dbalsom/fluxfox | https://github.com/dbalsom/fluxfox/blob/b4c04b51746e5fe7769f49a1b32b8caad426fc81/src/file_system/fat/fat_fs.rs | src/file_system/fat/fat_fs.rs | /*
FluxFox
https://github.com/dbalsom/fluxfox
Copyright 2024-2025 Daniel Balsom
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the “Software”),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
--------------------------------------------------------------------------
*/
#[cfg(feature = "zip")]
use crate::io::Write;
#[cfg(feature = "tar")]
use crate::io::Cursor;
use crate::{
disk_lock::{DiskLock, LockContext, NonTrackingDiskLock},
file_system::{
file_tree::{FileEntry, FileEntryType, FileNameType, FileTreeNode},
FileSystemArchive,
FileSystemError,
},
io::{Read, Seek},
sector_view::StandardSectorView,
DiskImage,
StandardFormat,
};
use fluxfox_fat::{Dir, FileSystem, FsOptions, OemCpConverter, ReadWriteSeek, StdIoWrapper, TimeProvider};
pub struct FatFileSystem {
fat: Option<FileSystem<StdIoWrapper<StandardSectorView>>>,
}
impl FatFileSystem {
/// Mount a FAT filesystem from a disk image.
///
/// # Arguments
/// - `disk_lock`: A reference-counted `RwLock` wrapping a `DiskImage` object.
/// - `format`: An optional `StandardFormat` to use when mounting the filesystem. This can
/// be used to override auto-detection of the disk format.
///
///
pub fn mount<L, C>(disk_lock: L, lock_context: C, format: Option<StandardFormat>) -> Result<Self, FileSystemError>
where
L: DiskLock<DiskImage, C> + Into<NonTrackingDiskLock<DiskImage>>,
C: LockContext,
{
log::debug!(
"FatFileSystem::mount(): Attempting to lock disk image for writing with {} references...",
disk_lock.strong_count()
);
// If a format was not provided, attempt to auto-detect the format
let format = match format {
Some(f) => Some(f),
None => disk_lock.read(lock_context).unwrap().closest_format(true),
};
if format.is_none() {
// Auto-detection failed. We can't mount the filesystem.
return Err(FileSystemError::MountError(
"Could not auto-detect disk format".to_string(),
));
}
// Move the arc into the view without cloning.
let mut view = StandardSectorView::new(disk_lock, format.unwrap())
.map_err(|e| FileSystemError::MountError(e.to_string()))?;
// Reset the cursor to the beginning of the view or the mount will fail
view.seek(std::io::SeekFrom::Start(0))
.map_err(|e| FileSystemError::MountError(e.to_string()))?;
// Mount the filesystem
let fat = match FileSystem::new(view, FsOptions::new()) {
Ok(fs) => fs,
Err(e) => return Err(FileSystemError::MountError(e.to_string())),
};
Ok(Self { fat: Some(fat) })
}
pub fn unmount(&mut self) {
self.fat = None;
}
pub fn read_file(&self, path: &str) -> Result<Vec<u8>, FileSystemError> {
if let Some(fat) = &self.fat {
let mut file = fat
.root_dir()
.open_file(path)
.map_err(|e| FileSystemError::ReadError(e.to_string()))?;
let mut data = Vec::new();
match file.read_to_end(&mut data) {
Ok(_) => Ok(data),
Err(e) => Err(FileSystemError::ReadError(e.to_string())),
}
}
else {
Err(FileSystemError::MountError("Filesystem not mounted".to_string()))
}
}
pub fn list_all_files(&self) -> Vec<String> {
let mut files = Vec::new();
if let Some(fat) = &self.fat {
let dir = fat.root_dir();
Self::list_files_recursive(&dir, &mut files);
}
files
}
pub fn list_files_recursive<IO: ReadWriteSeek, TP: TimeProvider, OCC: OemCpConverter>(
dir: &Dir<IO, TP, OCC>,
files: &mut Vec<String>,
) {
for entry in dir.iter().flatten() {
if entry.is_dir() {
// Ignore the current and parent directory entries to avoid infinite recursion
if entry.short_file_name() == "." || entry.short_file_name() == ".." {
continue;
}
log::debug!("descending into dir: {}", entry.short_file_name());
let sub_dir = entry.to_dir();
Self::list_files_recursive(&sub_dir, files);
}
else if entry.is_file() {
files.push(entry.short_file_name());
}
}
}
pub fn build_file_tree_from_root(&self) -> Option<FileTreeNode> {
if let Some(fat) = &self.fat {
let root_dir = fat.root_dir();
let mut path_stack = Vec::new();
Some(Self::build_file_tree_recursive(None, &root_dir, &mut path_stack))
}
else {
None
}
}
pub fn build_file_tree_recursive<IO: ReadWriteSeek, TP: TimeProvider, OCC: OemCpConverter>(
dir_entry: Option<&fluxfox_fat::DirEntry<IO, TP, OCC>>,
dir: &Dir<IO, TP, OCC>,
path_stack: &mut Vec<String>,
) -> FileTreeNode {
let mut children = Vec::new();
if let Some(dir_entry) = dir_entry {
path_stack.push(dir_entry.short_file_name());
}
for entry in dir.iter().flatten() {
let entry_name = entry.short_file_name();
let entry_size = entry.len();
let full_path = format!("{}/{}", path_stack.join("/"), entry_name);
if entry.is_dir() {
// Ignore the current and parent directory entries to avoid infinite recursion
if entry_name == "." || entry_name == ".." {
continue;
}
log::debug!("descending into dir: {}", entry_name);
let sub_dir = entry.to_dir();
children.push(Self::build_file_tree_recursive(Some(&entry), &sub_dir, path_stack));
}
else if entry.is_file() {
// log::debug!(
// "adding file: {} modified date: {}",
// entry_name,
// FsDateTime::from(entry.modified())
// );
children.push(FileTreeNode::File(FileEntry {
e_type: FileEntryType::File,
short_name: entry_name,
long_name: Some(entry.file_name()),
size: entry_size,
path: full_path,
created: None, // Created date was not implemented by DOS. Added in NT + later
modified: Some(entry.modified().into()),
}));
}
}
let node = FileTreeNode::Directory {
dfe: FileEntry {
e_type: FileEntryType::Directory,
short_name: dir_entry.map(|e| e.short_file_name()).unwrap_or_default(),
long_name: dir_entry.map(|e| Some(e.file_name())).unwrap_or_else(|| None),
path: if path_stack.len() < 2 {
"/".to_string()
}
else {
path_stack.join("/")
},
size: 0, // Directory size can be calculated if needed
created: None,
modified: dir_entry.map(|e| e.modified().into()),
},
children,
};
// Pop the current directory name from the path stack
path_stack.pop();
node
}
#[cfg(any(feature = "zip", feature = "tar"))]
pub fn root_as_archive(&mut self, archive_type: FileSystemArchive) -> Result<Vec<u8>, FileSystemError> {
let root_node;
if let Some(fat) = &self.fat {
let root_dir = fat.root_dir();
root_node = Self::build_file_tree_recursive(None, &root_dir, &mut Vec::new());
}
else {
return Err(FileSystemError::MountError("Filesystem not mounted".to_string()));
}
self.node_as_archive(&root_node, true, FileNameType::Short, archive_type)
}
#[cfg(any(feature = "zip", feature = "tar"))]
pub fn path_as_archive(
&mut self,
path: &str,
recursive: bool,
archive_type: FileSystemArchive,
) -> Result<Vec<u8>, FileSystemError> {
let root_node;
if let Some(fat) = &self.fat {
let root_dir = fat.root_dir();
root_node = Self::build_file_tree_recursive(None, &root_dir, &mut Vec::new());
}
else {
return Err(FileSystemError::MountError("Filesystem not mounted".to_string()));
}
// Resolve the path to the node
if let Some(node) = root_node.node(path) {
self.node_as_archive(node, recursive, FileNameType::Short, archive_type)
}
else {
Err(FileSystemError::PathNotFound(path.to_string()))
}
}
#[cfg(any(feature = "zip", feature = "tar"))]
pub fn node_as_archive(
&mut self,
node: &FileTreeNode,
recursive: bool,
name_type: FileNameType,
archive_type: FileSystemArchive,
) -> Result<Vec<u8>, FileSystemError> {
let archive_data = match archive_type {
FileSystemArchive::Zip => {
#[cfg(feature = "zip")]
{
self.node_as_zip(node, recursive, name_type)?
}
#[cfg(not(feature = "zip"))]
{
return Err(FileSystemError::FeatureError("zip".to_string()));
}
}
FileSystemArchive::Tar => {
#[cfg(feature = "tar")]
{
self.node_as_tar(node, recursive, name_type)?
}
#[cfg(not(feature = "tar"))]
{
return Err(FileSystemError::FeatureError("tar".to_string()));
}
}
};
Ok(archive_data)
}
#[cfg(feature = "zip")]
pub fn node_as_zip(
&mut self,
node: &FileTreeNode,
recursive: bool,
name_type: FileNameType,
) -> Result<Vec<u8>, FileSystemError> {
log::debug!("node_as_zip(): Building zip archive from node: {}", node);
let mut writer = zip::ZipWriter::new(std::io::Cursor::new(Vec::new()));
let options = zip::write::SimpleFileOptions::default();
let file_list = node.file_paths(recursive, name_type);
if file_list.is_empty() {
return Err(FileSystemError::EmptyFileSystem);
}
for file_path in file_list {
log::debug!("node_as_zip(): Adding file to zip: {}", file_path);
let file_data = self.read_file(&file_path)?;
writer.start_file_from_path(file_path, options)?;
writer.write_all(&file_data)?;
}
let zip_data = writer.finish()?;
Ok(zip_data.into_inner())
}
#[cfg(feature = "tar")]
pub fn node_as_tar(
&mut self,
node: &FileTreeNode,
recursive: bool,
name_type: FileNameType,
) -> Result<Vec<u8>, FileSystemError> {
log::debug!("node_as_tar(): Building tarfile archive from node: {}", node);
let mut builder = tar::Builder::new(Vec::new());
let file_list = node.file_paths(recursive, name_type);
if file_list.is_empty() {
return Err(FileSystemError::EmptyFileSystem);
}
for file_path in file_list {
let mut header = tar::Header::new_gnu();
log::debug!("node_as_tar(): Adding file to tarfile: {}", file_path);
let file_data = self.read_file(&file_path)?;
header.set_size(file_data.len() as u64);
header.set_cksum();
// Remove the leading slash from the file path so that the tar is relative
builder.append_data(&mut header, file_path.trim_start_matches('/'), Cursor::new(file_data))?;
}
builder.finish()?;
let tar_data = builder.into_inner()?;
Ok(tar_data)
}
}
| rust | MIT | b4c04b51746e5fe7769f49a1b32b8caad426fc81 | 2026-01-04T20:24:04.021295Z | false |
dbalsom/fluxfox | https://github.com/dbalsom/fluxfox/blob/b4c04b51746e5fe7769f49a1b32b8caad426fc81/src/file_system/fat/mod.rs | src/file_system/fat/mod.rs | /*
FluxFox
https://github.com/dbalsom/fluxfox
Copyright 2024-2025 Daniel Balsom
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the “Software”),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
--------------------------------------------------------------------------
*/
pub mod fat_fs;
| rust | MIT | b4c04b51746e5fe7769f49a1b32b8caad426fc81 | 2026-01-04T20:24:04.021295Z | false |
dbalsom/fluxfox | https://github.com/dbalsom/fluxfox/blob/b4c04b51746e5fe7769f49a1b32b8caad426fc81/src/flux/flux_revolution.rs | src/flux/flux_revolution.rs | /*
FluxFox
https://github.com/dbalsom/fluxfox
Copyright 2024-2025 Daniel Balsom
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the “Software”),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
--------------------------------------------------------------------------
*/
use crate::{
flux::{
pll::{Pll, PllDecodeFlags, PllDecodeStatEntry, PllMarkerEntry},
BasicFluxStats,
FluxTransition,
},
types::{DiskCh, TrackDataEncoding},
};
use bit_vec::BitVec;
use std::cmp::Ordering;
/// Type of revolution.
/// `Source` is a direct read from the disk image.
/// `Synthetic` is a generated revolution, usually shifting a flux from one source revolution
/// to another.
#[derive(Copy, Clone, Debug)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub enum FluxRevolutionType {
Source,
Synthetic,
}
/// A struct containing statistics about a flux revolution.
pub struct FluxRevolutionStats {
/// The type of revolution.
pub rev_type: FluxRevolutionType,
/// The data encoding detected for the revolution.
pub encoding: TrackDataEncoding,
/// The data rate of the revolution in bits per second.
pub data_rate: f64,
/// The time taken to read the revolution in seconds.
pub index_time: f64,
/// The number of flux transitions in the revolution.
pub ft_ct: usize,
/// The number of bits decoded from the revolution.
pub bitcell_ct: usize,
/// The duration of the first flux transition in the revolution.
pub first_ft: f64,
/// The duration of the last flux transition in the revolution.
pub last_ft: f64,
}
/// A struct representing one revolution of a fluxstream track.
#[derive(Clone)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub struct FluxRevolution {
/// The type of revolution.
pub rev_type: FluxRevolutionType,
/// The physical cylinder and head of the revolution.
pub ch: DiskCh,
/// The data rate of the revolution in bits per second, or None if not determined.
pub data_rate: Option<f64>,
/// The time taken to read the revolution in seconds.
pub index_time: f64,
/// The list of times between flux transitions, in seconds.
pub flux_deltas: Vec<f64>,
/// The list of transitions decoded from the flux deltas as `FluxTransition` enums.
pub transitions: Vec<FluxTransition>,
/// The bitstream decoded from the flux deltas.
pub bitstream: BitVec,
/// The bit errors found in the bitstream.
pub biterrors: BitVec,
/// The data encoding detected for the revolution.
pub encoding: TrackDataEncoding,
/// Any discovered markers.
pub markers: Vec<PllMarkerEntry>,
/// Statistics from the PLL decoding process.
pub pll_stats: Vec<PllDecodeStatEntry>,
}
impl FluxRevolution {
/// Retrieve the data encoding detected for the revolution.
pub fn encoding(&self) -> TrackDataEncoding {
self.encoding
}
/// Retrieve statistics about a decoded revolution.
pub fn stats(&self) -> FluxRevolutionStats {
let computed_data_rate = self.bitstream.len() as f64 * (1.0 / self.index_time);
FluxRevolutionStats {
rev_type: self.rev_type,
encoding: self.encoding,
data_rate: self.data_rate.unwrap_or(computed_data_rate),
index_time: self.index_time,
ft_ct: self.flux_deltas.len(),
bitcell_ct: self.bitstream.len(),
first_ft: *self.flux_deltas.first().unwrap_or(&0.0),
last_ft: *self.flux_deltas.last().unwrap_or(&0.0),
}
}
/// Create a new `FluxRevolution` from a list of durations between flux transitions in seconds.
pub fn from_f64(ch: DiskCh, deltas: &[f64], index_time: f64) -> Self {
FluxRevolution {
rev_type: FluxRevolutionType::Source,
ch,
data_rate: None,
index_time,
flux_deltas: deltas.to_vec(),
transitions: Vec::with_capacity(deltas.len()),
bitstream: BitVec::with_capacity(deltas.len() * 3),
biterrors: BitVec::with_capacity(deltas.len() * 3),
encoding: TrackDataEncoding::Mfm,
markers: Vec::new(),
pll_stats: Vec::new(),
}
}
/// Create new synthetic `FluxRevolution`s from a pair of adjacent revolutions.
/// Fluxes are shifted from one revolution to another to correct for index jitter.
pub(crate) fn from_adjacent_pair(first: &FluxRevolution, second: &FluxRevolution) -> Vec<FluxRevolution> {
let mut new_revolutions = Vec::new();
let flux_ct_diff = (first.flux_deltas.len() as i64 - second.flux_deltas.len() as i64).abs();
match first.flux_deltas.len().cmp(&second.flux_deltas.len()) {
Ordering::Greater if flux_ct_diff == 2 => {
log::debug!(
"FluxRevolution::from_adjacent_pair(): First revolution is candidate for flux shift to second."
);
let mut first_deltas = first.flux_deltas.clone();
let shift_delta = first_deltas.pop();
let mut second_deltas = second.flux_deltas.clone();
second_deltas.insert(0, shift_delta.unwrap());
let new_first = FluxRevolution {
rev_type: FluxRevolutionType::Synthetic,
ch: first.ch,
data_rate: first.data_rate,
index_time: first.index_time,
transitions: Vec::with_capacity(first_deltas.len()),
flux_deltas: first_deltas,
bitstream: BitVec::with_capacity(first.bitstream.capacity()),
biterrors: BitVec::with_capacity(first.bitstream.capacity()),
encoding: TrackDataEncoding::Mfm,
markers: Vec::new(),
pll_stats: Vec::new(),
};
let new_second = FluxRevolution {
rev_type: FluxRevolutionType::Synthetic,
ch: second.ch,
data_rate: second.data_rate,
index_time: second.index_time,
transitions: Vec::with_capacity(second_deltas.len()),
flux_deltas: second_deltas,
bitstream: BitVec::with_capacity(second.bitstream.capacity()),
biterrors: BitVec::with_capacity(second.bitstream.capacity()),
encoding: TrackDataEncoding::Mfm,
markers: Vec::new(),
pll_stats: Vec::new(),
};
new_revolutions.push(new_first);
new_revolutions.push(new_second);
}
Ordering::Less if flux_ct_diff == 2 => {
log::debug!(
"FluxRevolution::from_adjacent_pair(): Second revolution is candidate for flux shift to first."
);
let mut first_deltas = first.flux_deltas.clone();
let mut second_deltas = second.flux_deltas.clone();
let shift_delta = second_deltas.remove(0);
first_deltas.push(shift_delta);
let new_first = FluxRevolution {
rev_type: FluxRevolutionType::Synthetic,
ch: first.ch,
data_rate: first.data_rate,
index_time: first.index_time,
transitions: Vec::with_capacity(first_deltas.len()),
flux_deltas: first_deltas,
bitstream: BitVec::with_capacity(first.bitstream.capacity()),
biterrors: BitVec::with_capacity(first.bitstream.capacity()),
encoding: TrackDataEncoding::Mfm,
markers: Vec::new(),
pll_stats: Vec::new(),
};
let new_second = FluxRevolution {
rev_type: FluxRevolutionType::Synthetic,
ch: second.ch,
data_rate: second.data_rate,
index_time: second.index_time,
transitions: Vec::with_capacity(second_deltas.len()),
flux_deltas: second_deltas,
bitstream: BitVec::with_capacity(second.bitstream.capacity()),
biterrors: BitVec::with_capacity(second.bitstream.capacity()),
encoding: TrackDataEncoding::Mfm,
markers: Vec::new(),
pll_stats: Vec::new(),
};
new_revolutions.push(new_first);
new_revolutions.push(new_second);
}
_ => {}
}
new_revolutions
}
/// Retrieve the number of flux transitions in this revolution.
pub(crate) fn ft_ct(&self) -> usize {
self.flux_deltas.len()
}
/// Retrieve the vector of `PllDecodeStatEntry` structs from the PLL decoding process.
#[allow(dead_code)]
pub(crate) fn pll_stats(&self) -> &Vec<PllDecodeStatEntry> {
&self.pll_stats
}
pub fn transition_ct(&self) -> usize {
self.transitions.len()
}
/// Retrieve the average time between flux transitions in seconds for the entire revolution.
/// Note: this value is probably not reliable for determining any specific heuristics.
pub fn transition_avg(&self) -> f64 {
let mut t_sum = 0.0;
let mut t_ct = 0;
for t in self.flux_deltas.iter() {
if *t > 0.0 {
t_ct += 1;
t_sum += *t;
}
}
t_sum / t_ct as f64
}
pub fn bitstream_data(&self) -> (Vec<u8>, usize) {
(self.bitstream.to_bytes(), self.bitstream.len())
}
pub fn decode(&mut self, pll: &mut Pll) {
self.transitions = pll.decode_transitions(self);
//self.decode_bitstream();
log::trace!(
"FluxRevolution::decode(): Decoded {} transitions into {} bits, ratio: {}",
self.transitions.len(),
self.bitstream.len(),
self.bitstream.len() as f64 / self.transitions.len() as f64
);
}
pub fn decode_direct(&mut self, pll: &mut Pll) -> BasicFluxStats {
let pll_flags = PllDecodeFlags::empty();
let mut decode_result = pll.decode(self, TrackDataEncoding::Mfm, pll_flags);
let encoding = decode_result
.flux_stats
.detect_encoding()
.unwrap_or(TrackDataEncoding::Mfm);
if decode_result.markers.is_empty() && matches!(encoding, TrackDataEncoding::Fm) {
// If we detected FM encoding, decode again as FM
log::warn!("FluxRevolution::decode(): No markers found. Track might be FM encoded? Re-decoding...");
let fm_result = pll.decode(self, TrackDataEncoding::Fm, pll_flags);
if fm_result.markers.is_empty() {
log::warn!("FluxRevolution::decode(): No markers found in FM decode. Keeping MFM.");
self.encoding = TrackDataEncoding::Mfm;
}
else {
log::debug!("FluxRevolution::decode(): Found FM marker! Setting track to FM encoding.");
self.encoding = TrackDataEncoding::Fm;
decode_result = fm_result;
}
}
self.bitstream = decode_result.bits;
log::trace!(
"FluxRevolution::decode(): Decoded {} transitions into {} bits with {} encoding, ratio: {}",
self.flux_deltas.len(),
self.bitstream.len(),
self.encoding,
self.bitstream.len() as f64 / self.flux_deltas.len() as f64
);
self.data_rate = Some(self.bitstream.len() as f64 * (1.0 / self.index_time) / 2.0);
self.pll_stats = decode_result.pll_stats;
self.markers = decode_result.markers;
decode_result.flux_stats
}
/// Create an iterator over the flux delta times in a revolution.
pub fn delta_iter(&self) -> std::slice::Iter<f64> {
self.flux_deltas.iter()
}
}
| rust | MIT | b4c04b51746e5fe7769f49a1b32b8caad426fc81 | 2026-01-04T20:24:04.021295Z | false |
dbalsom/fluxfox | https://github.com/dbalsom/fluxfox/blob/b4c04b51746e5fe7769f49a1b32b8caad426fc81/src/flux/mod.rs | src/flux/mod.rs | /*
FluxFox
https://github.com/dbalsom/fluxfox
Copyright 2024-2025 Daniel Balsom
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the “Software”),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
--------------------------------------------------------------------------
*/
use crate::types::{TrackDataEncoding, TrackDensity};
use std::{
fmt,
fmt::{Display, Formatter},
};
pub mod flux_revolution;
#[macro_use]
pub mod pll;
pub mod histogram;
pub use flux_revolution::FluxRevolutionType;
//pub const AVERAGE_FLUX_DENSITY: f64 = 2.636; // Average number of bits encoded per flux transition
#[doc(hidden)]
#[macro_export]
macro_rules! format_us {
($value:expr) => {
format!("{:.4}μs", $value * 1_000_000.0)
};
}
#[doc(hidden)]
#[macro_export]
macro_rules! format_ms {
($value:expr) => {
format!("{:.4}ms", $value * 1_000.0)
};
}
#[derive(Copy, Clone, PartialEq, Debug)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub enum FluxTransition {
TooShort,
Short,
Medium,
Long,
TooLong,
}
impl FluxTransition {
pub fn abnormal(&self) -> bool {
match self {
FluxTransition::TooShort | FluxTransition::TooLong => true,
_ => false,
}
}
}
impl Display for FluxTransition {
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
match self {
FluxTransition::TooShort => write!(f, "s"),
FluxTransition::Short => write!(f, "S"),
FluxTransition::Medium => write!(f, "M"),
FluxTransition::Long => write!(f, "L"),
FluxTransition::TooLong => write!(f, "X"),
}
}
}
impl FluxTransition {
#[allow(dead_code)]
pub fn to_bits(&self) -> &[bool] {
match self {
FluxTransition::Short => &[true, false],
FluxTransition::Medium => &[true, false, false],
FluxTransition::Long => &[true, false, false, false],
_ => &[],
}
}
}
#[derive(Default)]
pub struct BasicFluxStats {
pub total: u32,
pub short: u32,
pub short_time: f64,
pub medium: u32,
pub long: u32,
pub too_short: u32,
pub too_long: u32,
pub too_slow_bits: u32,
pub shortest_flux: f64,
pub longest_flux: f64,
}
impl Display for BasicFluxStats {
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
write!(
f,
"Total: {} S: {} M: {} L: {} Shortest: {} Longest: {} Too Short: {} Too Long: {}",
self.total,
self.short,
self.medium,
self.long,
format_us!(self.shortest_flux),
format_us!(self.longest_flux),
self.too_short,
self.too_long
)
}
}
impl BasicFluxStats {
pub fn detect_density(&self, mfi: bool) -> Option<TrackDensity> {
let mut avg = self.short_avg();
log::debug!(
"FluxStats::detect_density(): Transition average: {:.4}",
format_us!(avg)
);
if mfi {
avg *= 2.0;
}
match avg {
1.0e-6..=3e-6 => Some(TrackDensity::High),
3e-6..=5e-6 => Some(TrackDensity::Double),
_ => None,
}
}
fn short_avg(&self) -> f64 {
if self.short == 0 {
0.0
}
else {
self.short_time / self.short as f64
}
}
}
impl BasicFluxStats {
pub fn detect_encoding(&self) -> Option<TrackDataEncoding> {
let medium_freq = self.medium as f64 / self.total as f64;
// If we have fewer than 5% medium transitions, it is likely an FM track
if medium_freq > 0.05 {
Some(TrackDataEncoding::Mfm)
}
else {
Some(TrackDataEncoding::Fm)
}
}
}
| rust | MIT | b4c04b51746e5fe7769f49a1b32b8caad426fc81 | 2026-01-04T20:24:04.021295Z | false |
dbalsom/fluxfox | https://github.com/dbalsom/fluxfox/blob/b4c04b51746e5fe7769f49a1b32b8caad426fc81/src/flux/histogram.rs | src/flux/histogram.rs | /*
FluxFox
https://github.com/dbalsom/fluxfox
Copyright 2024-2025 Daniel Balsom
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the “Software”),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
--------------------------------------------------------------------------
*/
//! This module defines a [FluxHistogram] structure which is used to determine
//! the density, data rate and encoding of a flux track so that the PLL may
//! be properly initialized for decoding.
//!
//! Normally this is used internally by [FluxRevolution], but can be used
//! independently by format parsers. For example the MFI format parser uses
//! a [FluxHistogram] to normalize track timings when 360RPM images are
//! detected, as to not clutter normal flux processing logic with special
//! cases.
use histogram::{Bucket, Histogram};
pub struct FluxHistogram {
histogram: Histogram,
maxima: Vec<(u64, std::ops::RangeInclusive<u64>)>,
total_time: f64,
}
impl FluxHistogram {
/// Produce a [FluxHistogram] over a fraction of the flux deltas in the revolution.
/// # Arguments
/// * `deltas` - A slice of F values representing flux deltas times
/// * `fraction` - The fraction of the deltas to use in the histogram
pub fn new(deltas: &[f64], fraction: f64) -> Self {
// from docs:
// grouping_power should be set such that 2^(-1 * grouping_power) is an acceptable relative error.
// Rephrased, we can plug in the acceptable relative error into grouping_power = ceil(log2(1/e)).
// For example, if we want to limit the error to 0.1% (0.001) we should set grouping_power = 7.
// Max value power of 2^14 = 16384 (16us)
// Grouping power of 3 produces sharp spikes without false maxima
let mut histogram = Histogram::new(3, 14).unwrap();
let take_count = (deltas.len() as f64 * fraction).round() as usize;
log::debug!("FluxRevolution::histogram(): Taking {} flux deltas", take_count);
let mut total_time = 0.0;
for delta_ns in deltas.iter().take(take_count).map(|d| {
total_time += d;
Self::delta_to_u64(*d)
}) {
_ = histogram.increment(delta_ns);
}
FluxHistogram {
histogram,
maxima: Vec::new(),
total_time,
}
}
pub fn total_time(&self) -> f64 {
self.total_time
}
fn delta_to_u64(value: f64) -> u64 {
(value * 1_000_000_000.0) as u64
}
fn u64_to_delta(value: u64) -> f64 {
value as f64 / 1_000_000_000.0
}
/// Locate local maxima in a histogram by bucket.
fn find_local_maxima(&mut self, threshold: Option<f64>) -> &Vec<(u64, std::ops::RangeInclusive<u64>)> {
let mut peaks = vec![];
let mut previous_bucket: Option<Bucket> = None;
let mut current_bucket: Option<Bucket> = None;
// Calculate total count for threshold
let total_count: u64 = self.histogram.into_iter().map(|bucket| bucket.count()).sum();
let threshold = (total_count as f64 * threshold.unwrap_or(0.005)).round() as u64;
for bucket in self.histogram.into_iter() {
if let (Some(prev), Some(curr)) = (previous_bucket.as_ref(), current_bucket.as_ref()) {
// Identify local maximum and apply threshold check
if curr.count() >= prev.count() && curr.count() > bucket.count() && curr.count() >= threshold {
peaks.push((curr.count(), curr.start()..=curr.end()));
}
}
// Update previous and current buckets
previous_bucket = current_bucket.take();
current_bucket = Some(bucket.clone());
}
self.maxima = peaks;
&self.maxima
}
/// Attempt to calculate the base (short) transition time.
/// You must have called `find_local_maxima()` first.
pub fn base_transition_time(&mut self) -> Option<f64> {
if self.maxima.is_empty() {
self.find_local_maxima(None);
}
if self.maxima.len() < 2 {
log::warn!("FluxHistogram::base_transition_time(): Not enough peaks found");
return None;
}
let first_peak = &self.maxima[0].1;
let range_median = (first_peak.start() + first_peak.end()) / 2;
// Convert back to seconds
Some(Self::u64_to_delta(range_median))
}
#[allow(dead_code)]
pub(crate) fn print_debug(&self) {
for peak in self.maxima.iter() {
log::debug!(
"FluxRevolution::histogram(): Peak at range: {:?} ct: {}",
peak.1,
peak.0
);
}
}
/// Debugging function to print a histogram in ASCII.
#[allow(dead_code)]
pub(crate) fn print_horizontal_histogram_with_labels(&self, height: usize) {
let mut max_count = 0;
let mut buckets = vec![];
// Step 1: Collect buckets and find max count for scaling
for bucket in self.histogram.into_iter() {
max_count = max_count.max(bucket.count());
buckets.push(bucket);
}
// Step 2: Initialize 2D array for histogram, filled with spaces
let width = buckets.len();
let mut graph = vec![vec![' '; width]; height];
// Step 3: Plot each bucket count as a column of asterisks
for (i, bucket) in buckets.iter().enumerate() {
let bar_height = if max_count > 0 {
(bucket.count() as f64 / max_count as f64 * height as f64).round() as usize
}
else {
0
};
for row in (height - bar_height)..height {
graph[row][i] = '*';
}
}
// Step 4: Print the graph row by row
for row in &graph {
println!("{}", row.iter().collect::<String>());
}
// Step 5: Print bucket start values vertically
let max_label_len = buckets.iter().map(|b| b.start().to_string().len()).max().unwrap_or(0);
for i in 0..max_label_len {
let row: String = buckets
.iter()
.map(|b| {
let label = b.start().to_string();
label.chars().nth(i).unwrap_or(' ')
})
.collect();
println!("{}", row);
}
}
}
| rust | MIT | b4c04b51746e5fe7769f49a1b32b8caad426fc81 | 2026-01-04T20:24:04.021295Z | false |
dbalsom/fluxfox | https://github.com/dbalsom/fluxfox/blob/b4c04b51746e5fe7769f49a1b32b8caad426fc81/src/flux/pll.rs | src/flux/pll.rs | /*
FluxFox
https://github.com/dbalsom/fluxfox
Copyright 2024-2025 Daniel Balsom
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the “Software”),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
--------------------------------------------------------------------------
*/
use std::io::Write;
use bit_vec::BitVec;
use bitflags::bitflags;
use crate::{
flux::{flux_revolution::FluxRevolution, BasicFluxStats, FluxTransition},
format_ms,
format_us,
types::{TrackDataEncoding, TrackDataRate},
};
const BASE_CLOCK: f64 = 2e-6; // Represents the default clock for a 300RPM, 250Kbps disk.
const MAX_CLOCK_ADJUST: f64 = 0.20; // % Maximum clock adjustment in either direction
const SHORT_TRANSITION: f64 = 4.0e-6; // 4 µs
const MEDIUM_TRANSITION: f64 = 6.0e-6; // 6 µs
const LONG_TRANSITION: f64 = 8.0e-6; // 8 µs
const TOLERANCE: f64 = 0.5e-6; // 0.5 µs Tolerance for time deviation
bitflags! {
/// Bit flags representing loading options passed to a disk image file parser.
#[derive(Debug, Default, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
#[rustfmt::skip]
pub struct PllDecodeFlags: u32 {
const COLLECT_FLUX_STATS = 0b0000_0000_0000_0001; // Collect flux statistics. Memory intensive!
const COLLECT_ENUMS = 0b0000_0000_0000_0010; // Collect enumerated values representing each flux category
}
}
#[derive(Clone)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub struct PllMarkerEntry {
pub time: f64,
pub bitcell: usize,
}
#[derive(Clone)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub struct PllDecodeStatEntry {
pub time: f64,
pub len: f64,
pub predicted: f64,
pub clk: f64,
pub window_min: f64,
pub window_max: f64,
pub phase_err: f64,
pub phase_err_i: f64,
}
#[allow(dead_code)]
pub enum PllPreset {
Aggressive,
Conservative,
}
pub struct PllDecodeResult {
pub transitions: Vec<FluxTransition>,
pub bits: BitVec,
pub flux_stats: BasicFluxStats,
pub pll_stats: Vec<PllDecodeStatEntry>,
pub markers: Vec<PllMarkerEntry>,
}
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub struct Pll {
pub pll_default_rate: f64,
pub pll_rate: f64,
pub pll_period: f64,
pub working_period: f64,
pub period_factor: f64,
pub max_adjust: f64,
pub density_factor: f64,
pub clock_gain: f64,
pub phase_gain: f64,
}
impl Pll {
pub fn new() -> Self {
Pll {
pll_default_rate: u32::from(TrackDataRate::Rate250Kbps(1.0)) as f64 * 2.0,
pll_rate: u32::from(TrackDataRate::Rate250Kbps(1.0)) as f64 * 2.0,
pll_period: BASE_CLOCK, // 2 µs
working_period: BASE_CLOCK,
period_factor: 1.0,
max_adjust: MAX_CLOCK_ADJUST,
density_factor: 2.0,
clock_gain: 0.05,
phase_gain: 0.65,
}
}
pub fn from_preset(preset: PllPreset) -> Pll {
match preset {
PllPreset::Aggressive => Pll::new(),
PllPreset::Conservative => Pll::new(),
}
}
pub fn set_clock(&mut self, rate: f64, max_adj: Option<f64>) {
self.pll_default_rate = rate;
self.pll_rate = rate;
self.pll_period = 1.0 / rate;
self.working_period = self.pll_period;
if let Some(adj) = max_adj {
self.max_adjust = adj;
}
assert!(self.pll_rate > 1.0);
log::debug!(
"Pll::set_clock(): Setting clock rate to {:.2}, max adjust: {:.2} new period: {}",
self.pll_rate,
self.max_adjust,
format_us!(self.pll_period)
);
}
pub fn get_clock(&mut self) -> f64 {
self.pll_rate
}
pub fn reset_clock(&mut self) {
self.pll_rate = self.pll_default_rate;
self.pll_period = 1.0 / self.pll_rate;
self.working_period = self.pll_period;
log::debug!(
"Pll::reset_clock(): Resetting clock to default rate: {} period: {}",
self.pll_rate,
format_us!(self.pll_period)
);
}
pub fn adjust_clock(&mut self, factor: f64) {
let old_rate = self.pll_rate;
self.pll_rate *= factor;
self.pll_period = 1.0 / self.pll_rate;
self.working_period = self.pll_period;
log::debug!(
"Pll::adjust_clock(): Adjusting clock by {:.4}% factor: {:.4} old: {:.4} new: {:.4} period: {}",
factor * 100.0,
factor,
old_rate,
self.pll_rate,
format_us!(self.pll_period)
);
}
#[allow(dead_code)]
pub fn decode_transitions(&mut self, stream: &FluxRevolution) -> Vec<FluxTransition> {
let mut transitions = Vec::new();
let mut valid_deltas = 0;
let mut delta_avg = 0.0;
for t in stream.delta_iter() {
if *t > 0.0 {
delta_avg += *t;
valid_deltas += 1;
}
let transition = self.classify_transition(*t);
transitions.push(transition);
}
let delta_avg = delta_avg / valid_deltas as f64;
let other_ct = transitions.iter().filter(|t| t.abnormal()).count();
log::warn!(
"Pll::decode_transitions(): {} avg transition time, {} unclassified transitions",
delta_avg,
other_ct
);
transitions
}
#[allow(dead_code)]
pub fn classify_transition(&self, duration: f64) -> FluxTransition {
// log::trace!(
// "Pll::classify_transition(): Duration: {} short delta: {} medium delta: {} long delta: {}",
// duration,
// (duration - SHORT_TRANSITION).abs(),
// (duration - MEDIUM_TRANSITION).abs(),
// (duration - LONG_TRANSITION).abs()
// );
if (duration - SHORT_TRANSITION).abs() <= TOLERANCE {
FluxTransition::Short
}
else if (duration - MEDIUM_TRANSITION).abs() <= TOLERANCE {
FluxTransition::Medium
}
else if (duration - LONG_TRANSITION).abs() <= TOLERANCE {
FluxTransition::Long
}
else {
//log::trace!("unclassified duration: {}", duration);
FluxTransition::TooLong
}
}
pub fn decode(
&mut self,
stream: &FluxRevolution,
encoding: TrackDataEncoding,
flags: PllDecodeFlags,
) -> PllDecodeResult {
match encoding {
TrackDataEncoding::Mfm => self.decode_mfm(stream, flags),
TrackDataEncoding::Fm => self.decode_fm(stream, flags),
_ => {
log::error!("Unsupported encoding: {:?}", encoding);
self.decode_mfm(stream, flags)
}
}
}
fn decode_mfm(&mut self, stream: &FluxRevolution, flags: PllDecodeFlags) -> PllDecodeResult {
// Average conversion factor between flux transition count and bitcell count is ~ 2.6.
// We will use x3 to set the capacity for some headroom.
let mut output_bits = BitVec::with_capacity(stream.flux_deltas.len() * 3);
let mut error_bits = BitVec::with_capacity(stream.flux_deltas.len() * 3);
// The transitions vector will hold the classification of each flux transition as a
// `FluxTransition` enum - Short, Medium, Long, or Other. This again takes more space, and
// we're not currently using it for anything, however it may be useful for debugging
let mut transitions = if flags.contains(PllDecodeFlags::COLLECT_ENUMS) {
log::debug!("decode_mfm(): Collecting FluxTransition classifications...");
Vec::with_capacity(stream.flux_deltas.len())
}
else {
Vec::new()
};
// Collecting PllDecodeStatEntry uses tons of RAM - over 1GB easily, so we should only do it
// when requested. We the stats for plotting, but perhaps a track can be re-decoded with stats
// enabled on demand to produce a plot
let mut pll_stats = if flags.contains(PllDecodeFlags::COLLECT_FLUX_STATS) {
log::debug!("decode_mfm(): Collecting PLL statistics...");
Vec::with_capacity(stream.flux_deltas.len())
}
else {
Vec::new()
};
let mut phase_error: f64 = 0.0;
let mut phase_adjust: f64 = 0.0;
let mut this_flux_time;
// The first entry of the flux stream represents a transition time, so we start off the track
// at the first actual flux transition. We will assume that this transition is perfectly
// aligned within the center of the clock period by adding half the period from the
// start time.
let mut time = self.pll_period / 2.0;
let mut last_flux_time = 0.0;
let mut clock_ticks: u64 = 0;
let mut clock_ticks_since_flux: u64 = 0;
let mut shift_reg: u64 = 0;
let mut markers = Vec::new();
let mut zero_ct = 0;
let min_clock = self.working_period - (self.working_period * self.max_adjust);
let max_clock = self.working_period + (self.working_period * self.max_adjust);
self.working_period = self.pll_period;
//let p_term = self.pll_period * self.phase_gain;
let mut flux_stats = BasicFluxStats {
total: stream.flux_deltas.len() as u32,
..BasicFluxStats::default()
};
let mut last_bit = false;
let mut adjust_gate: i32 = 0;
// Each delta time represents the time in seconds between two flux transitions.
for (flux_ct, &delta_time) in stream.delta_iter().enumerate() {
flux_stats.shortest_flux = delta_time.min(flux_stats.shortest_flux);
flux_stats.longest_flux = delta_time.max(flux_stats.longest_flux);
if flux_ct == 0 {
flux_stats.shortest_flux = delta_time;
log::debug!(
"decode_mfm(): first flux transition: {} @({})",
format_us!(delta_time),
format_ms!(time)
);
}
// Set the time of the next flux transition.
this_flux_time = last_flux_time + delta_time;
// log::warn!(
// "next flux in {} @ {} ({:.4} clocks @ {})",
// format_us!(delta_time),
// format_ms!(next_flux_time),
// delta_time / self.working_period,
// format_us!(self.working_period)
// );
// Tick the clock until we *pass* the time of the next flux transition.
time += phase_adjust;
while time < this_flux_time {
time += self.working_period;
clock_ticks_since_flux += 1;
clock_ticks += 1;
// log::debug!(
// "tick! time: {} pll_clock: {} phase_adj: {} next_d: {} next_t: {} clocks: {}",
// time,
// self.working_period,
// phase_adjust,
// format_us!(delta_time),
// next_flux_time,
// clock_ticks_since_flux
// );
}
let flux_length = clock_ticks_since_flux;
//log::trace!("decode_mfm(): flux length: {}", flux_length);
// Emit 0's and 1's based on the number of clock ticks since last flux transition.
if flux_length < 2 {
//log::warn!("too fast flux: {} @({})", clock_ticks_since_flux, time);
flux_stats.too_short += 1;
}
else if flux_length > 4 {
log::trace!(
"decode_mfm(): Too slow flux detected: #{} @({}), dt: {}, clocks: {}",
flux_ct,
format_ms!(time),
delta_time,
clock_ticks_since_flux,
);
flux_stats.too_long += 1;
flux_stats.too_slow_bits += (flux_length - 4) as u32;
}
// Categorize the flux length into short, medium, or long.
match flux_length {
0..2 => {
if flags.contains(PllDecodeFlags::COLLECT_ENUMS) {
transitions.push(FluxTransition::TooShort);
}
}
2 => {
flux_stats.short_time += delta_time;
flux_stats.short += 1;
if flags.contains(PllDecodeFlags::COLLECT_ENUMS) {
transitions.push(FluxTransition::Short);
}
}
3 => {
flux_stats.medium += 1;
if flags.contains(PllDecodeFlags::COLLECT_ENUMS) {
transitions.push(FluxTransition::Medium);
}
}
4 => {
flux_stats.long += 1;
if flags.contains(PllDecodeFlags::COLLECT_ENUMS) {
transitions.push(FluxTransition::Long);
}
}
_ => {}
}
if flux_length > 0 {
for _ in 0..flux_length - 1 {
output_bits.push(false);
zero_ct += 1;
last_bit = false;
shift_reg <<= 1;
// More than 3 0's in a row is an MFM error.
error_bits.push(zero_ct > 3);
}
// Emit a 1 since we had a transition...
zero_ct = 0;
// Two 1's in a row is an MFM error.
error_bits.push(last_bit);
output_bits.push(true);
last_bit = true;
shift_reg <<= 1;
shift_reg |= 1;
}
// Look for MFM markers.
// System34 uses 0x4489_4489_4489, Amiga uses 0x4489_4489, but they both start with
// encoded 0x00 sync bytes (0xAAAA encoded). So we look for half sync / half marker
// to match either.
if (shift_reg & !0x8000_0000_0000_0000) == 0x2AAA_AAAA_4489_4489 {
//if shift_reg & 0x0000_0000_FFFF_FFFF == 0x0000_0000_4489_4489 {
log::trace!(
"decode_mfm(): Marker detected at {:10}, value: {:08X}/{:64b} bitcell: {}",
format_ms!(time),
shift_reg,
shift_reg,
output_bits.len() - 32
);
markers.push(PllMarkerEntry {
time,
bitcell: output_bits.len() - 32,
});
}
if zero_ct > 16 {
//log::warn!("decode_mfm(): NFA zone @ {}??", format_ms!(time));
}
// Transition should be somewhere within our last clock period, ideally in the center of it.
// Let's calculate the error.
// First, we calculate the predicted flux time. This is the time the transition should
// have arrived assuming no clock deviation since last flux.
//let predicted_flux_time = last_flux_time + (clock_ticks_since_flux as f64 * self.pll_clock);
//let predicted_flux_time = last_flux_time + phase_adjust + (clock_ticks_since_flux as f64 * self.working_period);
// The error is the difference between the actual flux time and the predicted flux time.
//let phase_error = next_flux_time - predicted_flux_time;
let window_max = (time - this_flux_time) + delta_time;
let window_min = window_max - self.working_period;
let window_center = window_max - self.working_period / 2.0;
let last_phase_error = phase_error;
phase_error = delta_time - window_center;
//phase_error = this_flux_time - (time - self.working_period / 2.0);
if phase_error < 0.0 {
// If delta is negative...
if adjust_gate < 0 {
adjust_gate -= 1;
}
else {
adjust_gate = -1;
}
}
else if phase_error >= 0.0 {
// If delta is positive...
if adjust_gate > 0 {
adjust_gate += 1;
}
else {
adjust_gate = 1;
}
}
// We calculate the change in phase error between pairs of fluxes as the primary
// driver of clock adjustment. Phase error alone is a bad indicator that the clock
// is wrong vs the window needing to be shifted.
//
// Consider the simplest case where we have a single flux off-center in one window.
// Its position in the window tells us nothing about the clock rate.
// If the next flux is a perfect 2us delta, it will be off by just as much. If we use
// phase offset alone, then we'll end up adjusting the clock when it shouldn't have been
// adjusted.
//
// If the phase_error_delta remains low, the clock is accurate, and it is the phase that
// needs to be adjusted. If phase_error_delta is high, we need to adjust the clock more.
//
//let phase_delta_error = phase_error - last_phase_error;
// The idea of taking the smallest magnitude phase error from the last two fluxes is that
// if one flux is well-centered, we have more of a clock problem than a phase
// problem. So we use the minimum phase error to adjust phase instead of directly.
let min_phase_error = if phase_error.abs() < last_phase_error.abs() {
phase_error
}
else {
last_phase_error
};
if flags.contains(PllDecodeFlags::COLLECT_FLUX_STATS) {
pll_stats.push(PllDecodeStatEntry {
time,
len: delta_time,
predicted: window_min + phase_adjust,
clk: self.working_period,
window_min,
window_max,
phase_err: phase_error,
phase_err_i: phase_adjust,
});
}
// Validate that flux is within expected window. if these fail our logic is bad.
// log::warn!(
// "window start: {} flux_time: {} window end: {}",
// format_us!(window_min),
// format_us!(delta_time),
// format_us!(window_max),
// );
//assert!(delta_time <= window_max);
//assert!(delta_time >= window_min);
//phase_adjust = min_phase_error;
//phase_adjust = (phase_adjust + (0.65 * min_phase_error)) % (self.working_period / 2.0);
phase_adjust = 0.65 * min_phase_error;
if flux_ct == 0 {
log::debug!(
"decode_mfm(): first phase error: {} @({:.9})",
format_us!(phase_error),
time
);
}
// Calculate the proportional frequency adjustment.
//let clk_adjust = (p_term * phase_error) / self.working_period;
//let clk_adjust = 0.05 * phase_delta_error;
let mut clk_adjust = 0.0;
if adjust_gate.abs() > 1 {
clk_adjust = 0.05 * phase_error;
}
//let clk_adjust = 0.075 * phase_error;
// log::debug!(
// "flux time: {} window center: {} phase error: {} clk_adjust: {} phase_adjust: {}",
// next_flux_time,
// window_center,
// format_us!(phase_error),
// format_us!(clk_adjust),
// format_us!(phase_adjust),
// );
// Adjust the clock frequency, and clamp it to the min/max values.
self.working_period += clk_adjust;
self.working_period = self.working_period.clamp(min_clock, max_clock);
// Save the last flux time for the next iteration.
clock_ticks_since_flux = 0;
last_flux_time = this_flux_time;
}
_ = std::io::stdout().flush();
log::debug!(
"decode_mfm(): Completed decoding of MFM flux stream. Total clocks: {} markers: {} FT stats: {}",
clock_ticks,
markers.len(),
flux_stats
);
PllDecodeResult {
transitions,
bits: output_bits,
flux_stats,
pll_stats,
markers,
}
}
fn decode_fm(&mut self, stream: &FluxRevolution, _flags: PllDecodeFlags) -> PllDecodeResult {
let mut output_bits = BitVec::with_capacity(stream.flux_deltas.len() * 3);
let pll_stats = Vec::with_capacity(stream.flux_deltas.len());
let mut phase_accumulator: f64 = 0.0;
let mut last_flux_time = 0.0;
let mut next_flux_time;
// The first entry of the flux stream represents a transition time, so we start off the track
// at the first actual flux transition. We will assume that this transition is perfectly
// aligned within the center of the clock period by subtracting half the period from the
// start time.
self.working_period = self.pll_period * 2.0;
let min_clock = self.working_period - (self.working_period * self.max_adjust);
let max_clock = self.working_period + (self.working_period * self.max_adjust);
let mut time = -self.working_period / 2.0;
let mut clock_ticks: u64 = 0;
let mut clock_ticks_since_flux: u64 = 0;
let mut shift_reg: u64 = 0;
let mut markers = Vec::new();
log::debug!(
"decode_fm(): normal period: {} working period: {} min: {} max: {}",
format_us!(self.pll_period),
format_us!(self.working_period),
format_us!(min_clock),
format_us!(max_clock)
);
let mut flux_stats = BasicFluxStats {
total: stream.flux_deltas.len() as u32,
..BasicFluxStats::default()
};
// Each delta time represents the time in seconds between two flux transitions.
for (flux_ct, &delta_time) in stream.delta_iter().enumerate() {
flux_stats.shortest_flux = delta_time.min(flux_stats.shortest_flux);
flux_stats.longest_flux = delta_time.max(flux_stats.longest_flux);
// pll_stats.push(PllDecodeStatEntry {
// time,
// len: delta_time,
// clk: self.working_period,
// phase_err: phase_accumulator,
// });
if flux_ct == 0 {
flux_stats.shortest_flux = delta_time;
log::debug!("first flux transition: {} @({:.9})", format_us!(delta_time), time);
}
// Set the time of the next flux transition.
next_flux_time = last_flux_time + delta_time;
// log::debug!(
// "next flux in {} @ {} ({} clocks)",
// format_us!(delta_time),
// next_flux_time,
// delta_time / self.working_period
// );
// Tick the clock until we *pass* the time of the next flux transition.
while (time + phase_accumulator) < next_flux_time {
time += self.working_period;
clock_ticks_since_flux += 1;
clock_ticks += 1;
// log::debug!(
// "tick! time: {} pll_clock: {} next_d: {} next_t: {} clocks: {}",
// time,
// self.working_period,
// format_us!(delta_time),
// next_flux_time,
// clock_ticks_since_flux
// );
}
time += phase_accumulator;
phase_accumulator = 0.0;
let flux_length = clock_ticks_since_flux;
//log::trace!("decode_fm(): flux length: {}", flux_length);
match flux_length {
0 => {
flux_stats.too_short += 1;
}
1 => {
flux_stats.short_time += delta_time;
flux_stats.short += 1;
//print!("S");
}
2 => {
flux_stats.long += 1;
//print!("L");
}
_ => {
flux_stats.too_long += 1;
flux_stats.too_slow_bits += (flux_length - 3) as u32;
//print!("X");
}
}
// Emit 0's and 1's based on the number of clock ticks since last flux transition.
if flux_length == 0 {
//log::error!("zero length flux detected at time: {}", time);
}
else {
for _ in 0..flux_length.saturating_sub(1) {
output_bits.push(false);
shift_reg <<= 1;
}
// Emit a 1 since we had a transition...
output_bits.push(true);
shift_reg <<= 1;
shift_reg |= 1;
}
// Look for FM marker.
if shift_reg & 0xAAAA_AAAA_AAAA_AAAA == 0xAAAA_AAAA_AAAA_A02A {
log::trace!(
"decode_fm(): Marker detected at {}, bitcell: {}",
format_ms!(time),
flux_ct - 16
);
markers.push(PllMarkerEntry {
time,
bitcell: output_bits.len() - 16,
});
}
// Transition should be somewhere within our last clock period, ideally in the center of it.
// Let's calculate the error.
// First, we calculate the predicted flux time. This is the time the transition should
// have arrived assuming no clock deviation since last flux.
//let predicted_flux_time = last_flux_time + (clock_ticks_since_flux as f64 * self.pll_clock);
let predicted_flux_time = last_flux_time + (clock_ticks_since_flux as f64 * self.working_period);
// The error is the difference between the actual flux time and the predicted flux time.
let phase_error = next_flux_time - predicted_flux_time;
// Calculate the proportional frequency adjustment.
let p_term = (self.phase_gain * phase_error) / self.working_period;
// log::debug!(
// "predicted time: {} phase error: {} p_accum: {} kp: {} p_term: {}",
// predicted_flux_time,
// format_us!(phase_error),
// format_us!(phase_accumulator),
// self.kp,
// p_term
// );
// Adjust the clock frequency, and clamp it to the min/max values.
self.working_period += p_term;
self.working_period = self.working_period.clamp(min_clock, max_clock);
// Adjust the phase of the clock by shifting the time variable.
phase_accumulator += phase_error;
if phase_accumulator.abs() > self.working_period {
phase_accumulator %= self.working_period;
}
// Save the last flux time for the next iteration.
clock_ticks_since_flux = 0;
last_flux_time = next_flux_time;
}
log::debug!(
"Completed decoding of FM flux stream. Total clocks: {} FT stats: {}",
clock_ticks,
flux_stats
);
PllDecodeResult {
transitions: Vec::new(),
bits: output_bits,
flux_stats,
pll_stats,
markers,
}
}
}
| rust | MIT | b4c04b51746e5fe7769f49a1b32b8caad426fc81 | 2026-01-04T20:24:04.021295Z | false |
dbalsom/fluxfox | https://github.com/dbalsom/fluxfox/blob/b4c04b51746e5fe7769f49a1b32b8caad426fc81/src/disk_schema/mod.rs | src/disk_schema/mod.rs | /*
FluxFox
https://github.com/dbalsom/fluxfox
Copyright 2024-2025 Daniel Balsom
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the “Software”),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
--------------------------------------------------------------------------
*/
//! A [DiskSchema] is a high level interpreter of a disk image's platform-specific data.
//! A [DiskSchema] is responsible for detecting platform type(s), reading information
//! such as the Bios Parameter Block (BPB).
//! A disk image may have multiple disk schemas, for example dual and triple-format
//! disk images. There should generally be one [DiskSchema] per [Platform] associated
//! with a disk image.
//! A [DiskSchema] is not strictly required (neither is a [Platform]), but operations
//! and information about the disk image will be limited.
// This module is in progress
#![allow(dead_code)]
use crate::DiskImage;
pub enum DiskSchema {
Dos,
MacintoshGcr,
MacintoshMfm,
AmigaGcr,
AmigaMfm,
AtariSt,
}
impl DiskSchema {
pub fn detect(_disk: &DiskImage) -> Option<Vec<Self>> {
None
}
}
| rust | MIT | b4c04b51746e5fe7769f49a1b32b8caad426fc81 | 2026-01-04T20:24:04.021295Z | false |
dbalsom/fluxfox | https://github.com/dbalsom/fluxfox/blob/b4c04b51746e5fe7769f49a1b32b8caad426fc81/src/ffi/mod.rs | src/ffi/mod.rs | /*
FluxFox
https://github.com/dbalsom/fluxfox
Copyright 2024-2025 Daniel Balsom
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the “Software”),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
--------------------------------------------------------------------------
*/
//! Define the FFI interface for the FluxFox library.
//! This interface is also used by [ScriptEngine] implementations to interact
//! with the library. The FFI tries to use the subset of capabilities that
//! are available to both C and Rhai.
//! This module is a work in progress. There is no usable FFI interface yet.
use crate::DiskImage;
use std::{
ffi::{CStr, CString},
os::raw::c_char,
path::PathBuf,
sync::{Arc, RwLock},
};
#[repr(C)]
#[derive(Copy, Clone, Debug)]
pub struct SectorIDQueryFfi {
cylinder: u16,
has_cylinder: bool,
head: u8,
has_head: bool,
sector: u8,
has_sector: bool,
}
pub struct DiskImageFfi {
inner: Arc<RwLock<DiskImage>>, // Raw pointer to the thread-safe wrapper
}
#[no_mangle]
extern "C" fn load_image(path: *const c_char) -> *mut DiskImageFfi {
// Validate the input pointer
if path.is_null() {
return std::ptr::null_mut();
}
// Convert C string (UTF-8) to Rust string
let c_str = unsafe { CStr::from_ptr(path) };
let rust_str = match c_str.to_str() {
Ok(s) => s,
Err(_) => return std::ptr::null_mut(), // Invalid UTF-8
};
// Handle platform-specific path conversion (Windows requires UTF-16)
let path = PathBuf::from(rust_str);
// Try to load the disk image
match DiskImage::load_from_file(&path, None, None) {
Ok(image) => {
let ffi_image = DiskImageFfi {
inner: Arc::new(RwLock::new(image)),
};
Box::into_raw(Box::new(ffi_image))
}
Err(_) => std::ptr::null_mut(), // Failed to load
}
}
| rust | MIT | b4c04b51746e5fe7769f49a1b32b8caad426fc81 | 2026-01-04T20:24:04.021295Z | false |
dbalsom/fluxfox | https://github.com/dbalsom/fluxfox/blob/b4c04b51746e5fe7769f49a1b32b8caad426fc81/src/containers/archive.rs | src/containers/archive.rs | /*
FluxFox
https://github.com/dbalsom/fluxfox
Copyright 2024-2025 Daniel Balsom
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the “Software”),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
--------------------------------------------------------------------------
*/
#[cfg(feature = "zip")]
use super::zip;
use std::path::{Path, PathBuf};
use strum::IntoEnumIterator;
#[cfg(feature = "gzip")]
use super::gzip;
use crate::io::ReadSeek;
use thiserror::Error;
///! FluxFox, with the appropriate feature flags enabled, can open archives an
///! attempt to detect the context - if there is a single disk image within the
///! archive, it can be extracted and treated as if it were any other disk image.
///!
///! If an archive contains multiple disk images, FluxFox will attempt to identify
///! all the disk images by path, and naturally sort them by path.
///!
///! A load operation without providing a discriminating path or index will result
///! in an error, as the container is unable to determine which disk image to load.
///!
///! A UI built around fluxfox could then display the list of detected disk images
///! in the archive and allow the user to re-try the operation with a specific image
///! specified.
/// An error type for file archive operations, using thiserror for convenience.
#[derive(Clone, Debug, Error)]
pub enum FileArchiveError {
#[error("An IO error occurred reading or writing the file archive: {0}")]
IoError(String),
#[error("A filename or path was not found during an operation on the archive: {0}")]
PathError(String),
#[error("The current archive does not support the requested operation: {0}")]
UnsupportedOperation(String),
#[error("The archive backend reported an unexpected error: {0}")]
OtherError(String),
#[error("No files were found in the archive")]
EmptyArchive,
}
/// A list of supported archive types.
#[derive(Copy, Clone, Debug, strum::EnumIter)]
pub enum FileArchiveType {
Zip,
Tar,
Gzip,
}
impl FileArchiveType {
pub fn verb(&self) -> &str {
match self {
FileArchiveType::Zip => "Zipped",
FileArchiveType::Tar => "Tarred",
FileArchiveType::Gzip => "GZipped",
}
}
}
#[allow(dead_code)]
pub struct ArchiveFileEntry {
pub name: String,
pub size: u64,
}
#[allow(dead_code)]
pub struct ArchiveFileListing {
pub files: Vec<ArchiveFileEntry>,
pub total_size: u64,
}
pub struct ArchiveInfo {
pub archive_type: FileArchiveType,
pub file_count: usize,
pub total_size: u64,
}
/// Define a simple interface trait for various file archive types to implement.
pub trait StatelessFileArchive {
fn detect_archive_type<T: ReadSeek>(image_io: &mut T) -> Option<FileArchiveType> {
FileArchiveType::iter().find(|&archive_type| archive_type.detect(image_io))
}
fn detect<T: ReadSeek>(&self, image_io: &mut T) -> bool;
fn info<T: ReadSeek>(&self, image_io: &mut T) -> Result<ArchiveInfo, FileArchiveError>;
#[allow(dead_code)]
fn file_ct<T: ReadSeek>(&self, image_io: &mut T) -> Result<usize, FileArchiveError>;
fn file_listing<T: ReadSeek>(&self, image_io: &mut T) -> Result<ArchiveFileListing, FileArchiveError>;
#[allow(dead_code)]
fn extract_file<T: ReadSeek>(&self, image_io: &mut T, file_name: &Path) -> Result<Vec<u8>, FileArchiveError>;
fn extract_first_file<T: ReadSeek>(&self, image_io: &mut T) -> Result<(Vec<u8>, PathBuf), FileArchiveError>;
}
impl StatelessFileArchive for FileArchiveType {
fn detect<T: ReadSeek>(&self, image_io: &mut T) -> bool {
#[allow(unreachable_patterns)]
match self {
#[cfg(feature = "zip")]
FileArchiveType::Zip => zip::detect(image_io),
FileArchiveType::Tar => false,
#[cfg(feature = "gzip")]
FileArchiveType::Gzip => gzip::detect(image_io),
_ => false,
}
}
fn info<T: ReadSeek>(&self, image_io: &mut T) -> Result<ArchiveInfo, FileArchiveError> {
#[allow(unreachable_patterns)]
match self {
#[cfg(feature = "zip")]
FileArchiveType::Zip => zip::info(image_io),
FileArchiveType::Tar => todo!(),
#[cfg(feature = "gzip")]
FileArchiveType::Gzip => gzip::info(image_io),
_ => Err(FileArchiveError::UnsupportedOperation(
"No archive enabled!".to_string(),
)),
}
}
fn file_ct<T: ReadSeek>(&self, image_io: &mut T) -> Result<usize, FileArchiveError> {
#[allow(unreachable_patterns)]
match self {
#[cfg(feature = "zip")]
FileArchiveType::Zip => zip::file_ct(image_io),
FileArchiveType::Tar => todo!(),
#[cfg(feature = "gzip")]
FileArchiveType::Gzip => Ok(1),
_ => Err(FileArchiveError::UnsupportedOperation(
"No archive enabled!".to_string(),
)),
}
}
fn file_listing<T: ReadSeek>(&self, image_io: &mut T) -> Result<ArchiveFileListing, FileArchiveError> {
#[allow(unreachable_patterns)]
match self {
#[cfg(feature = "zip")]
FileArchiveType::Zip => zip::file_listing(image_io),
FileArchiveType::Tar => todo!(),
#[cfg(feature = "gzip")]
FileArchiveType::Gzip => Ok(ArchiveFileListing {
files: vec![ArchiveFileEntry {
name: "file".to_string(),
size: 0,
}],
total_size: 0,
}),
_ => Err(FileArchiveError::UnsupportedOperation(
"No archive enabled!".to_string(),
)),
}
}
fn extract_file<T: ReadSeek>(&self, image_io: &mut T, file_name: &Path) -> Result<Vec<u8>, FileArchiveError> {
#[allow(unreachable_patterns)]
match self {
#[cfg(feature = "zip")]
FileArchiveType::Zip => zip::extract_file(image_io, file_name),
FileArchiveType::Tar => todo!(),
#[cfg(feature = "gzip")]
FileArchiveType::Gzip => gzip::extract(image_io),
_ => Err(FileArchiveError::UnsupportedOperation(
"No archive enabled!".to_string(),
)),
}
}
fn extract_first_file<T: ReadSeek>(&self, image_io: &mut T) -> Result<(Vec<u8>, PathBuf), FileArchiveError> {
#[allow(unreachable_patterns)]
match self {
#[cfg(feature = "zip")]
FileArchiveType::Zip => zip::extract_first_file(image_io),
FileArchiveType::Tar => todo!(),
#[cfg(feature = "gzip")]
FileArchiveType::Gzip => match gzip::extract(image_io) {
Ok(data) => Ok((data, PathBuf::from("file"))),
Err(e) => Err(e),
},
_ => Err(FileArchiveError::UnsupportedOperation(
"No archive enabled!".to_string(),
)),
}
}
}
| rust | MIT | b4c04b51746e5fe7769f49a1b32b8caad426fc81 | 2026-01-04T20:24:04.021295Z | false |
dbalsom/fluxfox | https://github.com/dbalsom/fluxfox/blob/b4c04b51746e5fe7769f49a1b32b8caad426fc81/src/containers/gzip.rs | src/containers/gzip.rs | /*
FluxFox
https://github.com/dbalsom/fluxfox
Copyright 2024-2025 Daniel Balsom
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the “Software”),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
--------------------------------------------------------------------------
src/containers/zip.rs
Code to handle a ZIP file container. Some disk images such as IMZ and
Kryoflux raw dumps are stored in ZIP files. This module provides the
utilities to handle these files.
*/
use crate::{
containers::archive::ArchiveInfo,
io::{Read, ReadSeek, SeekFrom},
};
use crate::containers::archive::{FileArchiveError, FileArchiveType};
use flate2::read::GzDecoder;
// Only support deflate-based gzips
const GZIP_SIGNATURE: &[u8; 3] = b"\x1F\x8B\x08";
const MAX_FILE_SIZE: u64 = 100_000_000;
/// Return true if the provided image is a ZIP file.
#[allow(dead_code)]
pub struct GZipFileEntry {
pub name: String,
pub size: u64,
}
#[allow(dead_code)]
pub struct GZipFileListing {
pub files: Vec<GZipFileEntry>,
pub total_size: u64,
}
pub fn detect<T: ReadSeek>(image_io: &mut T) -> bool {
let mut buf = [0u8; 3];
image_io.seek(SeekFrom::Start(0)).ok();
if image_io.read_exact(&mut buf).is_err() {
return false;
}
buf == *GZIP_SIGNATURE
}
// pub struct ArchiveInfo {
// pub archive_type: FileArchiveType,
// pub file_count: usize,
// pub total_size: u64,
// }
pub fn info<T: ReadSeek>(image_io: &mut T) -> Result<ArchiveInfo, FileArchiveError> {
if let Some(header) = GzDecoder::new(image_io).header() {
log::debug!("Gzip::info(): GZIP header: {:?}", header);
}
Ok(ArchiveInfo {
archive_type: FileArchiveType::Gzip,
file_count: 1,
total_size: 0, // Not sure if there's a way to get the total size of the archive
})
}
/// Reads and decompresses a GZIP file, returning its contents as a byte vector.
pub fn extract<T: ReadSeek>(image_io: &mut T) -> Result<Vec<u8>, FileArchiveError> {
image_io
.seek(SeekFrom::Start(0))
.map_err(|e| FileArchiveError::IoError(e.to_string()))?;
let mut decoder = GzDecoder::new(image_io);
let mut decompressed_data = Vec::new();
// Read and decompress the data
decoder
.read_to_end(&mut decompressed_data)
.map_err(|e| FileArchiveError::IoError(e.to_string()))?;
// Sanity check on the decompressed data size
if decompressed_data.len() as u64 > MAX_FILE_SIZE {
return Err(FileArchiveError::IoError("Decompressed file too large".to_string()));
}
Ok(decompressed_data)
}
/// Returns the name of the file inside the GZIP archive, if present.
#[allow(dead_code)]
pub fn filename<T: ReadSeek>(image_io: &mut T) -> Result<Option<String>, FileArchiveError> {
image_io
.seek(SeekFrom::Start(0))
.map_err(|e| FileArchiveError::IoError(e.to_string()))?;
let decoder = GzDecoder::new(image_io);
if let Some(header) = decoder.header() {
if let Some(filename_bytes) = header.filename() {
return String::from_utf8(filename_bytes.to_vec())
.map(Some)
.map_err(|_| FileArchiveError::IoError("Failed to parse filename".to_string()));
}
}
Ok(None)
}
| rust | MIT | b4c04b51746e5fe7769f49a1b32b8caad426fc81 | 2026-01-04T20:24:04.021295Z | false |
dbalsom/fluxfox | https://github.com/dbalsom/fluxfox/blob/b4c04b51746e5fe7769f49a1b32b8caad426fc81/src/containers/mod.rs | src/containers/mod.rs | /*
FluxFox
https://github.com/dbalsom/fluxfox
Copyright 2024-2025 Daniel Balsom
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the “Software”),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
--------------------------------------------------------------------------
*/
//! An abstract conception of a 'container' for disk images. All disk images
//! have a container, which may be of several types of [DiskImageContainer]:
//!
//! * `ImageFile` - A transparent container abstraction over a disk image file existing as a single
//! file object on the native filesystem or within a known container.
//! All of fluxfox's disk image format parsers must eventually operate on Raw
//! `File`s.
//!
//! * `Archive ` - A container abstraction over am archive format that may contain one or more disk
//! images. This may represent any of the known formats that a user may have placed
//! in a supported archive format, but also includes image 'standards' that are
//! compressed in some specific way:
//! `IMZ` files are `zip` archives containing a single `IMA` file internally.
//! `ADZ` files are simple `ADF` files compressed with `gzip`
//! Archives may or may not be compressed - some formats like `tar` typically just
//! bundle files together.
//! `Archive`s may be nested together, such as the common `tar.gz` scheme seen in
//! Linux and Unix systems.
//!
//! * `FileSet` - A container abstraction over a grouping of related files that comprise a single
//! disk image. comprise several individual files, such as when
//! using the Kryoflux stream format, which stores an individual file for each
//! track imaged. A FileSet may also be nested within an `Archive` (which is
//! preferable for file handling)
//! Typically, if a user selects a file that is detected to belong to a `FileSet`,
//! the entire set will be resolved. This allows loading of Kryoflux stream images
//! by opening any file in the set. Obviously this is not possible with web targets,
//! drag-and-drop or other non-file-based input methods.
//!
//! FluxFox, with the appropriate feature flags enabled, can open archives an
//! attempt to detect the context - if there is a single disk image within the
//! archive, it will be extracted and treated as if it were any other disk image.
//!
//! If an archive contains multiple disk images, FluxFox will attempt to identify
//! all the disk images by path, and naturally sort them by path.
//!
//! A load operation without providing a discriminating path or index will result
//! in an error, as the container is unable to determine which disk image to load.
//!
//! A UI built around fluxfox could then display the list of detected disk images
//! in the archive and allow the user to re-try the operation with a specific image
//! specified.
//!
//! More abstractly, a container can also represent a uncompressed `glob` of disk
//! image files, such as a Kryoflux set, where the "container" is actually just
//! a vector of [Paths] to the track stream files that comprise the disk image.
//!
//! A `FileSet` container may also exist within an `FileArchive` if that format
//! supports multiple files (e.g. `zip` or `tar`).
//!
//! Containers may also be nested, as is frequently seen on linux with the
//! `tar.gz` nested container format. A `kryoflux_dump.tar.gz` would essentially
//! be three nested containers: A Kryoflux `FileSet` inside a `tar` archive inside
//! a `Gzip` archive.
pub mod archive;
#[cfg(feature = "gzip")]
pub mod gzip;
#[cfg(feature = "zip")]
pub mod zip;
use std::{
fmt::{Display, Formatter, Result},
path::PathBuf,
};
use crate::containers::archive::FileArchiveType;
use crate::{DiskCh, DiskImageFileFormat};
#[derive(Clone, Debug)]
pub struct KryoFluxSet {
pub base_path: PathBuf,
pub file_set: Vec<PathBuf>,
pub geometry: DiskCh,
}
#[derive(Clone, Debug)]
pub enum DiskImageContainer {
/// During the process of loading a file container and analyzing it, sometimes we have the
/// entire disk image file in memory as a vector. If we can determine this is the only relevant
/// file in the container (or is a root level File container), we can return it directly as a
/// `ResolvedFile` container. This is useful for handling single-file archives transparently.
/// The final two [PathBuf] parameters specify the path to the file itself, and the path to any
/// parent archive that might have contained it.
/// Note that writing disk images back to nested [Archive]s is not supported.
ResolvedFile(DiskImageFileFormat, Vec<u8>, Option<PathBuf>, Option<PathBuf>),
/// A 'File' container represents no container. It is a single file either residing on the
/// native filesystem or resolved from another container.
/// It holds a [DiskImageFileFormat] and an optional [PathBuf] to the file. The path may only
/// be present in certain contexts - receiving a file via drag and drop on the web may not have
/// an associated [Path]. The [PathBuf] may also not represent a valid path on the native
/// filesystem, such as when a file is resolved from an archive
File(DiskImageFileFormat, Option<PathBuf>),
/// An `Archive` container represents some archive type that may contain one or more files
/// within it, optionally compressed. Examples of Archives include `tar`, `zip` and `gz`.
/// An Archive stores a vector of [DiskImageContainer], allowing it to recursively hold files,
/// file sets, or even other archives.
Archive(FileArchiveType, Vec<(DiskImageContainer, PathBuf)>, Option<PathBuf>),
/// A `FileSet` container represents a set of related files that comprise a single disk image.
/// The primary example of this is the Kryoflux stream format, which stores an individual file
/// for each track imaged. All files in a FileSet should belong to a specific [DiskImageFileFormat],
/// which for all intents and purposes will be `KryofluxStream`.
/// The final, optional [PathBuf] parameter represents the initial file used to resolve the set,
/// if available.
FileSet(DiskImageFileFormat, Vec<PathBuf>, Option<PathBuf>),
/// A set of Kryoflux images zipped together.
/// The outer vector represents the number of disks in the archive (the number of unique
/// paths found to a kryoflux .*00.0.raw file). It stores a tuple, the first element of which is
/// the path to the raw files, the second is a Vec containing the full path of all raws in that
/// set, and the third is the geometry of that set as DiskCh.
ZippedKryofluxSet(Vec<KryoFluxSet>),
KryofluxSet,
}
impl Display for DiskImageContainer {
fn fmt(&self, f: &mut Formatter) -> Result {
match self {
DiskImageContainer::ResolvedFile(fmt, buf, _, _) => write!(f, "{:?} of {} bytes", fmt, buf.len()),
DiskImageContainer::File(fmt, _) => write!(f, "{:?}", fmt),
DiskImageContainer::Archive(archive_type, items, _) => {
write!(f, "{:?} archive of {} items", archive_type, items.len())
}
DiskImageContainer::FileSet(fmt, items, _) => write!(f, "File set of {} {:?} images", items.len(), fmt),
DiskImageContainer::ZippedKryofluxSet(_) => write!(f, "Zipped Kryoflux Image Set"),
DiskImageContainer::KryofluxSet => write!(f, "Kryoflux Image Set"),
}
}
}
| rust | MIT | b4c04b51746e5fe7769f49a1b32b8caad426fc81 | 2026-01-04T20:24:04.021295Z | false |
dbalsom/fluxfox | https://github.com/dbalsom/fluxfox/blob/b4c04b51746e5fe7769f49a1b32b8caad426fc81/src/containers/zip.rs | src/containers/zip.rs | /*
FluxFox
https://github.com/dbalsom/fluxfox
Copyright 2024-2025 Daniel Balsom
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the “Software”),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
--------------------------------------------------------------------------
src/containers/zip.rs
Code to handle a ZIP file container. Some disk images such as IMZ and
Kryoflux raw dumps are stored in ZIP files. This module provides the
utilities to handle these files.
*/
use crate::{
containers::archive::ArchiveInfo,
io::{Read, ReadSeek, SeekFrom},
};
use crate::containers::archive::{ArchiveFileEntry, ArchiveFileListing, FileArchiveError, FileArchiveType};
use std::path::{Path, PathBuf};
const ZIP_SIGNATURE: &[u8; 4] = b"PK\x03\x04";
const MAX_FILE_SIZE: u64 = 100_000_000;
/// Return true if the provided image is a ZIP file.
#[allow(dead_code)]
pub struct ZipFileEntry {
pub name: String,
pub size: u64,
}
#[allow(dead_code)]
pub struct ZipFileListing {
pub files: Vec<ZipFileEntry>,
pub total_size: u64,
}
pub fn detect<T: ReadSeek>(image_io: &mut T) -> bool {
let mut buf = [0u8; 4];
image_io.seek(SeekFrom::Start(0)).ok();
if image_io.read_exact(&mut buf).is_err() {
return false;
}
buf == *ZIP_SIGNATURE
}
// pub struct ArchiveInfo {
// pub archive_type: FileArchiveType,
// pub file_count: usize,
// pub total_size: u64,
// }
pub fn info<T: ReadSeek>(image_io: &mut T) -> Result<ArchiveInfo, FileArchiveError> {
let zip = zip::ZipArchive::new(image_io).map_err(|e| FileArchiveError::IoError(e.to_string()))?;
Ok(ArchiveInfo {
archive_type: FileArchiveType::Zip,
file_count: zip.len(),
total_size: zip.decompressed_size().unwrap_or(0) as u64,
})
}
#[allow(dead_code)]
pub fn file_ct<T: ReadSeek>(image_io: &mut T) -> Result<usize, FileArchiveError> {
let zip = zip::ZipArchive::new(image_io).map_err(|e| FileArchiveError::IoError(e.to_string()))?;
Ok(zip.len())
}
pub fn file_listing<T: ReadSeek>(image_io: &mut T) -> Result<ArchiveFileListing, FileArchiveError> {
let mut zip = zip::ZipArchive::new(image_io).map_err(|e| FileArchiveError::IoError(e.to_string()))?;
let mut files = Vec::new();
let mut total_size = 0;
for i in 0..zip.len() {
let file = zip.by_index(i).map_err(|e| FileArchiveError::IoError(e.to_string()))?;
if let Some(file_name) = file.enclosed_name() {
files.push(ArchiveFileEntry {
name: file_name.to_string_lossy().to_string(),
size: file.size(),
});
total_size += file.size();
}
}
Ok(ArchiveFileListing { files, total_size })
}
pub fn extract_file<T: ReadSeek>(image_io: &mut T, file_name: &Path) -> Result<Vec<u8>, FileArchiveError> {
let mut zip = zip::ZipArchive::new(image_io).map_err(|e| FileArchiveError::IoError(e.to_string()))?;
let mut file = zip
.by_name(&file_name.to_string_lossy())
.map_err(|e| FileArchiveError::IoError(e.to_string()))?;
// Sanity check, is file < 100MB? Let's not zip-bomb ourselves.
if file.size() > MAX_FILE_SIZE {
return Err(FileArchiveError::IoError("File too large".to_string()));
}
let mut file_buf = Vec::new();
file.read_to_end(&mut file_buf)
.map_err(|e| FileArchiveError::IoError(e.to_string()))?;
Ok(file_buf)
}
pub fn extract_first_file<T: ReadSeek>(image_io: &mut T) -> Result<(Vec<u8>, PathBuf), FileArchiveError> {
let mut zip = zip::ZipArchive::new(image_io).map_err(|e| FileArchiveError::IoError(e.to_string()))?;
// No files in zip? Nothing we can do with that.
if zip.is_empty() {
return Err(FileArchiveError::EmptyArchive);
}
// Get the first file in the zip.
let mut file = zip.by_index(0).map_err(|e| FileArchiveError::IoError(e.to_string()))?;
// Sanity check, is file < 100MB? Let's not zip-bomb ourselves.
if file.size() > MAX_FILE_SIZE {
return Err(FileArchiveError::IoError("File too large".to_string()));
}
// Read the entire first file.
let mut file_buf = Vec::new();
file.read_to_end(&mut file_buf)
.map_err(|e| FileArchiveError::IoError(e.to_string()))?;
Ok((file_buf, PathBuf::from(file.name().to_string())))
}
| rust | MIT | b4c04b51746e5fe7769f49a1b32b8caad426fc81 | 2026-01-04T20:24:04.021295Z | false |
dbalsom/fluxfox | https://github.com/dbalsom/fluxfox/blob/b4c04b51746e5fe7769f49a1b32b8caad426fc81/src/boot_sector/bootsector.rs | src/boot_sector/bootsector.rs | /*
FluxFox
https://github.com/dbalsom/fluxfox
Copyright 2024-2025 Daniel Balsom
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the “Software”),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
--------------------------------------------------------------------------
*/
use crate::{
boot_sector::bpb::{BiosParameterBlock2, BiosParameterBlock3, BPB_OFFSET},
io::{Cursor, ReadSeek, ReadWriteSeek, Seek, SeekFrom, Write},
DiskImageError,
StandardFormat,
};
use binrw::{binrw, BinRead, BinWrite};
/// A simple wrapper around the last two bytes in a boot sector that comprise the boot signature.
/// Typically, these bytes should read 0x55, 0xAA, but this isn't guaranteed, especially on older
/// diskettes. Early PCs did not validate that these bytes were set, and DOS 1.0 didn't set them.
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
#[derive(Copy, Clone, Debug)]
#[repr(C)]
pub struct BootSignature {
pub(crate) marker: [u8; 2],
}
impl Default for BootSignature {
fn default() -> Self {
BootSignature { marker: [0x55, 0xAA] }
}
}
#[allow(dead_code)]
impl BootSignature {
/// Create a new BootMarker from the specified bytes.
/// To create a valid BootMarker without specifying the byte values, simple use BootMarker::default().
pub fn new(marker: [u8; 2]) -> Self {
BootSignature { marker }
}
/// Set the BootMarker to the specified bytes.
pub fn set(&mut self, marker: [u8; 2]) {
self.marker = marker;
}
/// Return true if the marker is 0x55, 0xAA.
pub fn is_valid(&self) -> bool {
self.marker == BootSignature::default().marker
}
/// Return a reference to the marker bytes.
pub fn bytes(&self) -> &[u8; 2] {
&self.marker
}
}
/// A struct representing a DOS boot sector.
/// [BootSector] is designed to be created from byte data instead of directly from a [DiskImage].
/// This allows flexibility in creating and interpreting a boot sector from other sources, such
/// as in external .bin file.
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub struct BootSector {
pub(crate) bpb2: BiosParameterBlock2,
pub(crate) bpb3: BiosParameterBlock3,
pub(crate) marker: BootSignature,
pub(crate) sector_buf: Vec<u8>,
}
#[binrw]
#[brw(big)]
pub struct CreatorString {
bytes: [u8; 8],
}
impl BootSector {
pub fn new<T: ReadSeek>(buffer: &mut T) -> Result<Self, DiskImageError> {
let mut sector_buf = [0; 512];
buffer.seek(SeekFrom::Start(0))?;
// Save a copy of the boot sector internally.
buffer.read_exact(&mut sector_buf)?;
// Seek to and read the BPB. Currently, we only support versions 2 and 3.
buffer.seek(SeekFrom::Start(BPB_OFFSET))?;
let bpb2 = BiosParameterBlock2::read(buffer)?;
let bpb3 = BiosParameterBlock3::read(buffer)?;
// Seek to the end and check the marker.
buffer.seek(SeekFrom::End(-2))?;
let mut marker = [0; 2];
buffer.read_exact(&mut marker)?;
Ok(BootSector {
bpb2,
bpb3,
marker: BootSignature::new(marker),
sector_buf: sector_buf.to_vec(),
})
}
/// The default bootsector includes a creator string of 8 characters. This is by default the
/// string "fluxfox ". This can be overridden to identify the application using fluxfox to
/// create disk images. If your string is shorter than 8 characters, pad with spaces.
pub(crate) fn set_creator(&mut self, creator: &[u8; 8]) -> Result<(), DiskImageError> {
let creator_offset = 0x147;
eprintln!(
"Creator offset: {} into {} bytes",
creator_offset,
self.sector_buf.len()
);
let mut cursor = Cursor::new(&mut self.sector_buf);
match cursor.seek(SeekFrom::Start(creator_offset)) {
Ok(_) => {}
Err(e) => {
eprintln!("Error seeking to creator offset: {:?}", e);
return Err(e)?;
}
}
// self.sector_buf
// .seek(SeekFrom::Start(creator_offset))
// .map_err(|_e| DiskImageError::IoError)?;
//let creator_string = CreatorString { creator: *creator };
let creator_string = CreatorString::read(&mut cursor)?;
if creator_string.bytes != "fluxfox ".as_bytes() {
// We can only set the creator if we're using the included boot sector, otherwise we'd overwrite some random data.
return Err(DiskImageError::IncompatibleImage(
"Creator string requires using default bootsector".to_string(),
));
}
cursor.seek(SeekFrom::Start(creator_offset))?;
let new_creator_string = CreatorString { bytes: *creator };
new_creator_string.write(&mut cursor)?;
Ok(())
}
pub fn has_valid_bpb(&self) -> bool {
self.bpb2.is_valid()
}
pub fn bpb2(&self) -> BiosParameterBlock2 {
self.bpb2
}
pub fn bpb3(&self) -> BiosParameterBlock3 {
self.bpb3
}
pub(crate) fn update_bpb_from_format(&mut self, format: StandardFormat) -> Result<(), DiskImageError> {
self.bpb2 = BiosParameterBlock2::try_from(format)?;
self.bpb3 = BiosParameterBlock3::try_from(format)?;
// Update the internal buffer.
let mut cursor = Cursor::new(&mut self.sector_buf);
cursor.seek(SeekFrom::Start(BPB_OFFSET))?;
self.bpb2.write(&mut cursor)?;
self.bpb3.write(&mut cursor)?;
Ok(())
}
pub fn as_bytes(&self) -> &[u8] {
&self.sector_buf
}
/// Return the BootSignature.
/// This is a simple wrapper around a two byte array, but provides an is_valid() method.
pub fn boot_signature(&self) -> BootSignature {
self.marker
}
/// Write a new BPB to the provided sector buffer based on the specified StandardFormat.
/// StandardFormat must not be Invalid!
pub(crate) fn write_bpb_to_buffer<T: ReadWriteSeek>(&mut self, buffer: &mut T) -> Result<(), DiskImageError> {
buffer.seek(SeekFrom::Start(BPB_OFFSET))?;
self.bpb2.write(buffer)?;
self.bpb3.write(buffer)?;
Ok(())
}
/// Attempt to correlate the current Bios Parameter Block with a StandardFormat.
/// If the BPB is invalid, or no match is found, return None.
pub fn standard_format(&self) -> Option<StandardFormat> {
StandardFormat::try_from(&self.bpb2).ok()
}
/// Dump the BPB values to a Write implementor for debugging purposes.
pub fn dump_bpb<T: Write>(&self, buffer: &mut T) -> Result<(), crate::io::Error> {
writeln!(buffer, "BIOS Parameter Block v2.0:")?;
writeln!(buffer, "\tBytes per sector: {}", self.bpb2.bytes_per_sector)?;
writeln!(buffer, "\tSectors per cluster: {}", self.bpb2.sectors_per_cluster)?;
writeln!(buffer, "\tReserved sectors: {}", self.bpb2.reserved_sectors)?;
writeln!(buffer, "\tNumber of FATs: {}", self.bpb2.number_of_fats)?;
writeln!(buffer, "\tRoot entries: {}", self.bpb2.root_entries)?;
writeln!(buffer, "\tTotal sectors: {}", self.bpb2.total_sectors)?;
writeln!(buffer, "\tMedia descriptor: 0x{:02X}", self.bpb2.media_descriptor)?;
writeln!(buffer, "\tSectors per FAT: {}", self.bpb2.sectors_per_fat)?;
writeln!(buffer)?;
writeln!(buffer, "BIOS Parameter Block v3.0:")?;
writeln!(buffer, "\tSectors per track: {}", self.bpb3.sectors_per_track)?;
writeln!(buffer, "\tNumber of heads: {}", self.bpb3.number_of_heads)?;
writeln!(buffer, "\tHidden sectors: {}", self.bpb3.hidden_sectors)?;
writeln!(buffer)?;
writeln!(buffer, "Boot sector signature: {:02X?}", self.marker.bytes())?;
if let Some(fmt) = self.standard_format() {
writeln!(buffer, "Best standard disk format guess: {}", fmt)?;
}
else {
writeln!(buffer, "Standard disk format not detected.")?;
}
buffer.flush()?;
Ok(())
}
}
| rust | MIT | b4c04b51746e5fe7769f49a1b32b8caad426fc81 | 2026-01-04T20:24:04.021295Z | false |
dbalsom/fluxfox | https://github.com/dbalsom/fluxfox/blob/b4c04b51746e5fe7769f49a1b32b8caad426fc81/src/boot_sector/bpb.rs | src/boot_sector/bpb.rs | /*
FluxFox
https://github.com/dbalsom/fluxfox
Copyright 2024-2025 Daniel Balsom
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the “Software”),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
--------------------------------------------------------------------------
src/boot_sector/bpb.rs
Routines for reading and modifying the BIOS Parameter block.
This structure was present from DOS 2.0 onwards, although it was expanded
with almost every DOS release. The BPB is used to encode metadata about the
diskette media type and filesystem.
When creating disk images with a supplied boot sector template, we must
be able to patch the BPB values as appropriate for the specified floppy
image format, or the disk will not be bootable.
*/
use crate::{DiskImageError, StandardFormat};
use binrw::binrw;
// Offset of the bios parameter block in the boot sector.
pub const BPB_OFFSET: u64 = 0x0B;
#[derive(Debug, Copy, Clone, Default)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
#[binrw]
#[brw(little)]
pub struct BiosParameterBlock2 {
pub bytes_per_sector: u16,
pub sectors_per_cluster: u8,
pub reserved_sectors: u16,
pub number_of_fats: u8,
pub root_entries: u16,
pub total_sectors: u16,
pub media_descriptor: u8,
pub sectors_per_fat: u16,
}
impl BiosParameterBlock2 {
/// Perform a sanity check on the BPB parameters. This functio should return true if a valid
/// BPB is present for any standard floppy disk format from 160K to 2.88MB.
///
pub fn is_valid(&self) -> bool {
// TODO: Make more robust by validating against the media descriptor for specific values
// instead of ranges.
if self.bytes_per_sector < 128 || self.bytes_per_sector > 4096 {
return false;
}
if self.sectors_per_cluster > 2 {
return false;
}
if self.number_of_fats == 0 || self.number_of_fats > 2 {
return false;
}
if self.root_entries < 0x70 || self.root_entries > 0xF0 {
return false;
}
if self.total_sectors < 320 || self.total_sectors > 5760 {
return false;
}
if self.sectors_per_fat < 1 || self.sectors_per_fat > 9 {
return false;
}
true
}
}
impl TryFrom<&BiosParameterBlock2> for StandardFormat {
type Error = &'static str;
fn try_from(bpb: &BiosParameterBlock2) -> Result<Self, Self::Error> {
let mut best_match = None;
match bpb.total_sectors {
320 => best_match = Some(StandardFormat::PcFloppy160),
360 => best_match = Some(StandardFormat::PcFloppy180),
640 => best_match = Some(StandardFormat::PcFloppy320),
720 => best_match = Some(StandardFormat::PcFloppy360),
1440 => best_match = Some(StandardFormat::PcFloppy720),
1200 => best_match = Some(StandardFormat::PcFloppy1200),
2880 => best_match = Some(StandardFormat::PcFloppy1440),
5760 => best_match = Some(StandardFormat::PcFloppy2880),
_ => {}
};
if let Some(best_match) = best_match {
return Ok(best_match);
}
match bpb.media_descriptor {
0xFE => best_match = Some(StandardFormat::PcFloppy160),
0xFC => best_match = Some(StandardFormat::PcFloppy180),
0xFD => best_match = Some(StandardFormat::PcFloppy360),
0xFF => best_match = Some(StandardFormat::PcFloppy320),
0xF9 => best_match = Some(StandardFormat::PcFloppy1200),
0xF0 => best_match = Some(StandardFormat::PcFloppy1440),
_ => {}
}
if let Some(best_match) = best_match {
return Ok(best_match);
}
Err("Invalid BPB")
}
}
impl TryFrom<StandardFormat> for BiosParameterBlock2 {
type Error = DiskImageError;
fn try_from(format: StandardFormat) -> Result<BiosParameterBlock2, DiskImageError> {
#[allow(unreachable_patterns)]
let pc_fmt = match format {
StandardFormat::PcFloppy160 => BiosParameterBlock2 {
bytes_per_sector: 512,
sectors_per_cluster: 2,
reserved_sectors: 1,
number_of_fats: 2,
root_entries: 0x70,
total_sectors: 320,
media_descriptor: 0xFE,
sectors_per_fat: 1,
},
StandardFormat::PcFloppy180 => BiosParameterBlock2 {
bytes_per_sector: 512,
sectors_per_cluster: 2,
reserved_sectors: 1,
number_of_fats: 2,
root_entries: 0x70,
total_sectors: 360,
media_descriptor: 0xFE,
sectors_per_fat: 1,
},
StandardFormat::PcFloppy320 => BiosParameterBlock2 {
bytes_per_sector: 512,
sectors_per_cluster: 2,
reserved_sectors: 1,
number_of_fats: 2,
root_entries: 0x70,
total_sectors: 640,
media_descriptor: 0xFF,
sectors_per_fat: 1,
},
StandardFormat::PcFloppy360 => BiosParameterBlock2 {
bytes_per_sector: 512,
sectors_per_cluster: 2,
reserved_sectors: 1,
number_of_fats: 2,
root_entries: 0x70,
total_sectors: 720,
media_descriptor: 0xFD,
sectors_per_fat: 2,
},
StandardFormat::PcFloppy720 => BiosParameterBlock2 {
bytes_per_sector: 512,
sectors_per_cluster: 2,
reserved_sectors: 1,
number_of_fats: 2,
root_entries: 0x70,
total_sectors: 1440,
media_descriptor: 0xFD,
sectors_per_fat: 3,
},
StandardFormat::PcFloppy1200 => BiosParameterBlock2 {
bytes_per_sector: 512,
sectors_per_cluster: 1,
reserved_sectors: 1,
number_of_fats: 2,
root_entries: 0xE0, // 224
total_sectors: 2400,
media_descriptor: 0xF9,
sectors_per_fat: 7,
},
StandardFormat::PcFloppy1440 => BiosParameterBlock2 {
bytes_per_sector: 512,
sectors_per_cluster: 1,
reserved_sectors: 1,
number_of_fats: 2,
root_entries: 0xE0,
total_sectors: 2880,
media_descriptor: 0xF0,
sectors_per_fat: 9,
},
StandardFormat::PcFloppy2880 => BiosParameterBlock2 {
bytes_per_sector: 512,
sectors_per_cluster: 1,
reserved_sectors: 1,
number_of_fats: 2,
root_entries: 0xF0,
total_sectors: 5760,
media_descriptor: 0xF0,
sectors_per_fat: 9,
},
_ => {
return Err(DiskImageError::UnsupportedFormat);
}
};
Ok(pc_fmt)
}
}
/// BIOS Parameter Block extensions introduced in MS-DOS 3.0
#[derive(Debug, Copy, Clone, Default)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
#[binrw]
#[brw(little)]
pub struct BiosParameterBlock3 {
pub sectors_per_track: u16,
pub number_of_heads: u16,
pub hidden_sectors: u32,
}
impl TryFrom<StandardFormat> for BiosParameterBlock3 {
type Error = DiskImageError;
fn try_from(format: StandardFormat) -> Result<BiosParameterBlock3, DiskImageError> {
#[allow(unreachable_patterns)]
let pc_fmt = match format {
StandardFormat::PcFloppy160 => BiosParameterBlock3 {
sectors_per_track: 8,
number_of_heads: 1,
hidden_sectors: 0,
},
StandardFormat::PcFloppy180 => BiosParameterBlock3 {
sectors_per_track: 9,
number_of_heads: 1,
hidden_sectors: 0,
},
StandardFormat::PcFloppy320 => BiosParameterBlock3 {
sectors_per_track: 8,
number_of_heads: 2,
hidden_sectors: 0,
},
StandardFormat::PcFloppy360 => BiosParameterBlock3 {
sectors_per_track: 9,
number_of_heads: 2,
hidden_sectors: 0,
},
StandardFormat::PcFloppy720 => BiosParameterBlock3 {
sectors_per_track: 9,
number_of_heads: 2,
hidden_sectors: 0,
},
StandardFormat::PcFloppy1200 => BiosParameterBlock3 {
sectors_per_track: 15,
number_of_heads: 2,
hidden_sectors: 0,
},
StandardFormat::PcFloppy1440 => BiosParameterBlock3 {
sectors_per_track: 18,
number_of_heads: 2,
hidden_sectors: 0,
},
StandardFormat::PcFloppy2880 => BiosParameterBlock3 {
sectors_per_track: 36,
number_of_heads: 2,
hidden_sectors: 0,
},
_ => {
return Err(DiskImageError::UnsupportedFormat);
}
};
Ok(pc_fmt)
}
}
| rust | MIT | b4c04b51746e5fe7769f49a1b32b8caad426fc81 | 2026-01-04T20:24:04.021295Z | false |
dbalsom/fluxfox | https://github.com/dbalsom/fluxfox/blob/b4c04b51746e5fe7769f49a1b32b8caad426fc81/src/boot_sector/mod.rs | src/boot_sector/mod.rs | /*
FluxFox
https://github.com/dbalsom/fluxfox
Copyright 2024-2025 Daniel Balsom
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the “Software”),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
--------------------------------------------------------------------------
src/boot_sector/mod.rs
Routines for reading and modifying boot sector data - specifically the
BIOS Parameter block.
When creating disk images with a supplied boot sector template, we must
be able to patch the BPB values as appropriate for the specified floppy
image format, or the disk will not be bootable.
*/
pub mod bootsector;
mod bpb;
pub use bootsector::{BootSector, BootSignature};
pub use bpb::{BiosParameterBlock2, BiosParameterBlock3};
| rust | MIT | b4c04b51746e5fe7769f49a1b32b8caad426fc81 | 2026-01-04T20:24:04.021295Z | false |
dbalsom/fluxfox | https://github.com/dbalsom/fluxfox/blob/b4c04b51746e5fe7769f49a1b32b8caad426fc81/src/file_parsers/scp.rs | src/file_parsers/scp.rs | /*
FluxFox
https://github.com/dbalsom/fluxfox
Copyright 2024-2025 Daniel Balsom
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the “Software”),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
--------------------------------------------------------------------------
*/
//! A parser for the SuperCardPro format (SCP)
//!
//! SCP is a flux stream format originally invented for use by the SuperCardPro hardware.
//!
//! SCP images can be produced by a variety of different tools, and usually contain bad metadata
//! fields because these tools do not require the user to specify them before exporting the image.
//!
//! Fields like disk type and RPM are almost universally unreliable. We attempt to calculate the
//! disk parameters ourselves as a result.
//!
//! In contrast to Kryoflux streams, SCP images store only complete revolutions, normalized to start
//! at the track index.
use crate::{
file_parsers::{bitstream_flags, FormatCaps, ParserReadOptions, ParserWriteOptions},
io::{ReadSeek, ReadWriteSeek},
track::fluxstream::FluxStreamTrack,
types::{DiskCh, DiskDescriptor, DiskRpm, Platform, TrackDataEncoding, TrackDensity},
DiskImage,
DiskImageError,
DiskImageFileFormat,
LoadingCallback,
ParserWriteCompatibility,
StandardFormat,
};
use crate::types::FluxStreamTrackParams;
use binrw::{binrw, BinRead, BinReaderExt};
use strum::IntoEnumIterator;
pub const BASE_CAPTURE_RES: u32 = 25;
pub const SCP_FLUX_TIME_BASE: u32 = 25;
pub const SCP_TRACK_COUNT: usize = 168;
//pub const MAX_TRACK_NUMBER: usize = SCP_TRACK_COUNT - 1;
pub const SCP_FB_INDEX: u8 = 0b0000_0001;
//pub const SCP_FB_TPI: u8 = 0b0000_0010;
pub const SCP_FB_RPM: u8 = 0b0000_0100;
pub const SCP_FB_TYPE: u8 = 0b0000_1000;
pub const SCP_FB_READONLY: u8 = 0b0001_0000;
pub const SCP_FB_FOOTER: u8 = 0b0010_0000;
pub const SCP_FB_EXTENDED_MODE: u8 = 0b0100_0000;
pub const SCP_NON_SCP_CAPTURE: u8 = 0b1000_0000;
#[derive(Debug)]
pub enum ScpDiskManufacturer {
Cbm = 0x00,
Atari = 0x10,
Apple = 0x20,
Pc = 0x30,
Tandy = 0x40,
TI = 0x50,
Roland = 0x60,
Amstrad = 0x70,
Other = 0x80,
TapeDrive = 0xE0,
HardDrive = 0xF0,
}
#[derive(Debug)]
#[binrw]
#[brw(little)]
pub struct ScpFileHeader {
pub id: [u8; 3],
pub version: u8,
pub disk_type: u8,
pub revolutions: u8,
pub start_track: u8,
pub end_track: u8,
pub flags: u8,
pub bit_cell_width: u8,
pub heads: u8,
pub resolution: u8,
pub checksum: u32,
}
#[derive(Debug)]
#[binrw]
#[brw(little)]
pub struct ScpTrackOffsetTable {
pub track_offsets: [u32; SCP_TRACK_COUNT],
}
#[derive(Debug)]
#[binrw]
#[brw(little)]
pub struct ScpTrackHeader {
pub id: [u8; 3],
pub track_number: u8,
}
#[derive(Debug)]
#[binrw]
#[brw(little)]
pub struct ScpTrackRevolution {
pub index_time: u32,
pub length: u32,
pub data_offset: u32,
}
fn scp_parse_version(version_byte: u8) -> (u8, u8) {
let major = version_byte >> 4;
let minor = version_byte & 0x0F;
(major, minor)
}
fn scp_disk_type(type_byte: u8) -> Option<(ScpDiskManufacturer, Option<StandardFormat>)> {
let manufacturer = match type_byte & 0xF0 {
0x00 => ScpDiskManufacturer::Cbm,
0x10 => ScpDiskManufacturer::Atari,
0x20 => ScpDiskManufacturer::Apple,
0x30 => ScpDiskManufacturer::Pc,
0x40 => ScpDiskManufacturer::Tandy,
0x50 => ScpDiskManufacturer::TI,
0x60 => ScpDiskManufacturer::Roland,
0x70 => ScpDiskManufacturer::Amstrad,
0x80 => ScpDiskManufacturer::Other,
0xE0 => ScpDiskManufacturer::TapeDrive,
0xF0 => ScpDiskManufacturer::HardDrive,
_ => return None,
};
let subtype = type_byte & 0x0F;
let disk_format = match manufacturer {
ScpDiskManufacturer::Pc => match subtype {
0x00 => Some(StandardFormat::PcFloppy360),
0x01 => Some(StandardFormat::PcFloppy720),
0x02 => Some(StandardFormat::PcFloppy1200),
0x03 => Some(StandardFormat::PcFloppy1440),
_ => None,
},
ScpDiskManufacturer::Tandy => match subtype {
0x00 => None,
0x01 => Some(StandardFormat::PcFloppy180),
0x02 => None,
0x03 => Some(StandardFormat::PcFloppy360),
_ => None,
},
_ => None,
};
Some((manufacturer, disk_format))
}
pub struct ScpFormat {}
impl ScpFormat {
pub fn extensions() -> Vec<&'static str> {
vec!["scp"]
}
pub fn capabilities() -> FormatCaps {
bitstream_flags()
}
pub fn platforms() -> Vec<Platform> {
// SCP supports just about everything
Platform::iter().collect()
}
pub fn detect<RWS: ReadSeek>(mut image: RWS) -> bool {
if image.seek(std::io::SeekFrom::Start(0)).is_err() {
return false;
}
let header = if let Ok(header) = ScpFileHeader::read(&mut image) {
header
}
else {
return false;
};
header.id == "SCP".as_bytes()
}
pub fn can_write(_image: Option<&DiskImage>) -> ParserWriteCompatibility {
ParserWriteCompatibility::UnsupportedFormat
}
pub(crate) fn load_image<RWS: ReadSeek>(
mut read_buf: RWS,
disk_image: &mut DiskImage,
_opts: &ParserReadOptions,
_callback: Option<LoadingCallback>,
) -> Result<(), DiskImageError> {
disk_image.set_source_format(DiskImageFileFormat::SuperCardPro);
let disk_image_size = read_buf.seek(std::io::SeekFrom::End(0))?;
read_buf.seek(std::io::SeekFrom::Start(0))?;
let header = ScpFileHeader::read(&mut read_buf)?;
if header.id != "SCP".as_bytes() {
return Err(DiskImageError::UnsupportedFormat);
}
log::trace!("Detected SCP file.");
let (disk_manufacturer, disk_type) = match scp_disk_type(header.disk_type) {
Some(dt) => {
log::debug!("Disk type: Manufacturer {:?} Type: {:?} (*unreliable)", dt.0, dt.1);
dt
}
None => {
log::error!("Unknown SCP disk type: {:02X} (*unreliable)", header.disk_type);
return Err(DiskImageError::IncompatibleImage(format!(
"Unknown SCP disk type: {:02X} (*unreliable)",
header.disk_type
)));
}
};
if let Some(disk_type) = disk_type {
log::debug!(
"Have supported disk type. Manufacturer: {:?} Type: {:?}",
disk_manufacturer,
disk_type
);
}
else {
log::warn!(
"Unsupported SCP disk type. Manufacturer: {:?} Type: {:1X}",
disk_manufacturer,
header.disk_type & 0x0F
);
//return Err(DiskImageError::UnsupportedFormat);
}
let disk_major_ver;
let disk_minor_ver;
// Handle various flags now.
if header.flags & SCP_FB_FOOTER != 0 {
log::trace!("Extension footer is present.");
}
else {
log::trace!("Extension footer is NOT present.");
(disk_major_ver, disk_minor_ver) = scp_parse_version(header.version);
log::debug!(
"SCP version {}.{} ({:02X})",
disk_major_ver,
disk_minor_ver,
header.version
);
}
let disk_rpm = if header.flags & SCP_FB_RPM != 0 {
DiskRpm::Rpm300
}
else {
DiskRpm::Rpm360
};
log::debug!("Reported Disk RPM: {:?} (*unreliable)", disk_rpm);
let disk_readonly = header.flags & SCP_FB_READONLY == 0;
log::debug!("Disk read-only flag: {}", disk_readonly);
if header.flags & SCP_FB_INDEX != 0 {
log::trace!("Tracks aligned at index mark.");
}
else {
log::trace!("Tracks not aligned at index mark.");
}
if header.flags & SCP_FB_EXTENDED_MODE != 0 {
log::error!("Extended mode SCP images not supported.");
return Err(DiskImageError::IncompatibleImage(
"Extended mode SCP images not supported.".to_string(),
));
}
let flux_normalized = header.flags & SCP_FB_TYPE != 0;
log::trace!("Flux data normalization flag: {}", flux_normalized);
if header.flags & SCP_NON_SCP_CAPTURE == 0 {
log::trace!("SCP image was created by SuperCardPro device.");
}
else {
log::trace!("SCP image was not created by SuperCardPro device.");
}
log::trace!("Disk contains {} revolutions per track.", header.revolutions);
log::trace!(
"Starting track: {} Ending track: {}",
header.start_track,
header.end_track
);
log::trace!(
"Bit cell width: {}",
if header.bit_cell_width == 0 {
16
}
else {
header.bit_cell_width
}
);
if header.bit_cell_width != 0 {
log::error!("Non-standard bit cell width ({}) not supported.", header.bit_cell_width);
return Err(DiskImageError::IncompatibleImage(format!(
"Non-standard bit cell width ({}) not supported.",
header.bit_cell_width
)));
}
let disk_heads = match header.heads {
0 => 2,
1 => 1,
2 => {
log::error!("SCP images with just side 1 are not supported.");
return Err(DiskImageError::IncompatibleImage(
"SCP images with just side 1 are not supported.".to_string(),
));
}
_ => {
log::error!("Unsupported number of disk heads: {}", header.heads);
return Err(DiskImageError::IncompatibleImage(format!(
"Unsupported number of disk heads: {}",
header.heads
)));
}
};
log::debug!("Image has {} heads.", disk_heads);
let capture_resolution = BASE_CAPTURE_RES + (header.resolution as u32 * BASE_CAPTURE_RES);
let capture_resolution_seconds = capture_resolution as f64 * 1e-9;
log::debug!(
"Capture resolution: {}ns ({:.9} seconds)",
capture_resolution,
capture_resolution_seconds
);
if header.checksum == 0 {
log::debug!("Image has CRC==0. Skipping CRC verification.");
}
else {
log::debug!("Image CRC: {:08X}", header.checksum);
log::debug!("Image CRC not verified.");
}
let mut track_table_len = SCP_TRACK_COUNT;
let mut track_offsets: Vec<u32> = Vec::new();
// Read in the first track offset. Its value establishes a lower bound for the size of the
// track offset table. SCP files SHOULD contain 'SCP_TRACK_COUNT' track offsets, but some
// are observed to contain fewer.
let track_offset: u32 = read_buf.read_le()?;
log::trace!("Track offset table entry {} : {:08X}", 0, track_offset);
if track_offset < 0x10 {
log::error!("Invalid track offset table.");
return Err(DiskImageError::ImageCorruptError(
"Invalid track offset table entry".to_string(),
));
}
let max_table_size = (track_offset as usize - 0x10) / 4;
if max_table_size < track_table_len {
track_table_len = max_table_size;
log::warn!(
"Track offset table is too short. Truncating to {} entries.",
track_table_len
);
}
track_offsets.push(track_offset);
let mut last_offset = track_offset;
// Loop through the rest of the offset table entries.
for to in 0..max_table_size - 1 {
let track_offset: u32 = read_buf.read_le()?;
if track_offset > 0 {
if (track_offset <= last_offset) || (track_offset as u64 >= disk_image_size) {
log::error!("Bad track offset: {:08X} at entry {}", track_offset, to);
return Err(DiskImageError::FormatParseError);
}
else if track_offset > 0 {
log::trace!("Track offset table entry {} : {:08X}", to, track_offset);
track_offsets.push(track_offset);
}
}
else {
break;
}
last_offset = track_offset;
}
log::trace!("Got {} track offsets.", track_offsets.len());
//let mut c = 0;
//let mut h = 0;
let mut ch = DiskCh::default();
let mut disk_data_rate = None;
let mut ch_iter = DiskCh::new((SCP_TRACK_COUNT / 2) as u16, disk_heads).iter();
for (ti, offset) in track_offsets.iter().enumerate() {
ch = ch_iter.next().unwrap();
// Seek to the track header.
read_buf.seek(std::io::SeekFrom::Start(*offset as u64))?;
// Read the track header.
let track_header = ScpTrackHeader::read(&mut read_buf)?;
log::trace!(
"Track index: {} number: {} ch: {} offset: {:08X}",
ti,
track_header.track_number,
ch,
offset,
);
// Verify header.
if track_header.id != "TRK".as_bytes() {
log::error!("Expected track header signature, got: {:?}", track_header.id);
return Err(DiskImageError::ImageCorruptError(
"Invalid track header signature".to_string(),
));
}
// Read in revolutions.
let mut revolutions = Vec::new();
for _ in 0..header.revolutions {
let revolution = ScpTrackRevolution::read(&mut read_buf)?;
revolutions.push(revolution);
}
let mut flux_track = FluxStreamTrack::new();
#[allow(clippy::never_loop)]
for (ri, rev) in revolutions.iter().enumerate() {
// Calculate RPM of revolution.
let rev_nanos = (rev.index_time * SCP_FLUX_TIME_BASE) as f64;
let rev_seconds = rev_nanos * 1e-9;
let rev_millis = rev_nanos * 1e-6;
let rev_rpm = 60.0 / rev_seconds;
log::debug!(
"Revolution {}: rpm: {} index time: {:08} length: {:08} flux offset: {:08}",
ri,
rev_rpm,
rev.index_time,
rev.length,
rev.data_offset
);
// Read the raw flux data for this revolution [big-endian u16].
let mut data = vec![0u16; rev.length as usize];
read_buf.seek(std::io::SeekFrom::Start(*offset as u64 + rev.data_offset as u64))?;
for d in &mut data {
*d = read_buf.read_be()?;
}
// Convert the flux data to f64 seconds.
let fluxes = Self::convert_flux_data(&data, capture_resolution);
log::trace!(
"Adding revolution {} with index time: {}ms, rpm: {:.2} fts: {}",
ri,
rev_millis,
rev_rpm,
data.len()
);
flux_track.add_revolution(ch, &fluxes, rev_seconds);
}
let params = FluxStreamTrackParams {
ch,
schema: None,
encoding: None,
clock: None,
rpm: None,
};
let new_track = disk_image.add_track_fluxstream(flux_track, ¶ms)?;
let ti = new_track.info();
if disk_data_rate.is_none() {
log::trace!("Setting disk data rate to {}", ti.data_rate);
disk_data_rate = Some(ti.data_rate);
}
}
log::trace!("Read {} valid track offsets. Final track {}", track_offsets.len(), ch);
if disk_data_rate.is_none() {
log::error!("Unable to determine data rate from any track.");
return Err(DiskImageError::IncompatibleImage(
"Unable to determine data rate from any track.".to_string(),
));
}
disk_image.descriptor = DiskDescriptor {
// SCP does have a platform field, but unfortunately we can't trust it.
platforms: None,
geometry: DiskCh::from((ch.c() + 1, disk_heads)),
data_rate: disk_data_rate.unwrap(),
density: TrackDensity::from(disk_data_rate.unwrap()),
data_encoding: TrackDataEncoding::Mfm,
rpm: None,
write_protect: Some(disk_readonly),
};
Ok(())
}
fn convert_flux_data(data: &[u16], capture_resolution: u32) -> Vec<f64> {
let mut flux_f64 = Vec::with_capacity(data.len());
let resolution_secs = capture_resolution as f64 * 1e-9;
let mut accumulator: u64 = 0;
for d in data {
if *d == 0 {
// A flux time of 0 indicates rollover. Add U16::MAX to the accumulator,
// and continue to the next value.
accumulator += u64::from(u16::MAX);
}
else {
// Add the accumulator to the flux value, and convert to f64 seconds.
flux_f64.push((((*d as u64) + accumulator) as f64) * resolution_secs);
// Reset the accumulator
accumulator = 0;
}
}
flux_f64
}
pub fn save_image<RWS: ReadWriteSeek>(
_image: &DiskImage,
_opts: &ParserWriteOptions,
_output: &mut RWS,
) -> Result<(), DiskImageError> {
Err(DiskImageError::UnsupportedFormat)
}
}
| rust | MIT | b4c04b51746e5fe7769f49a1b32b8caad426fc81 | 2026-01-04T20:24:04.021295Z | false |
dbalsom/fluxfox | https://github.com/dbalsom/fluxfox/blob/b4c04b51746e5fe7769f49a1b32b8caad426fc81/src/file_parsers/td0.rs | src/file_parsers/td0.rs | /*
FluxFox
https://github.com/dbalsom/fluxfox
Copyright 2024-2025 Daniel Balsom
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the “Software”),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
--------------------------------------------------------------------------
src/parsers/td0.rs
A parser for the Teledisk (TD0) disk image format.
The proprietary format used by the Teledisk disk copying software, published by Sydex in the
1980s. This utility was quite popular for early disk archival efforts, and many Teledisk images
exist in the wild.
Teledisk disk images can be optionally encoded with 'advanced compression' which is a form of
LZHUF compression.
*/
use crate::{
file_parsers::{
compression::{
lzhuf,
lzhuf::TD0_READ_OPTIONS,
lzw,
lzw::{Options, OptionsPreset},
},
FormatCaps,
ParserReadOptions,
ParserWriteCompatibility,
ParserWriteOptions,
},
io::{Cursor, Read, ReadBytesExt, ReadSeek, ReadWriteSeek, Seek},
types::{
AddSectorParams,
DiskCh,
DiskChsn,
DiskDescriptor,
MetaSectorTrackParams,
Platform,
SectorAttributes,
TrackDataEncoding,
TrackDataRate,
TrackDensity,
},
DiskImage,
DiskImageError,
DiskImageFileFormat,
FoxHashSet,
LoadingCallback,
};
use binrw::{binrw, BinRead};
//pub const SECTOR_DUPLICATED: u8 = 0b0000_0001;
pub const SECTOR_CRC_ERROR: u8 = 0b0000_0010;
pub const SECTOR_DELETED: u8 = 0b0000_0100;
pub const SECTOR_SKIPPED: u8 = 0b0001_0000;
pub const SECTOR_NO_DAM: u8 = 0b0010_0000;
// When would we see this set? How would a sector with no IDAM even be seen?
//pub const SECTOR_NO_IDAM: u8 = 0b0100_0000;
#[derive(Debug)]
#[binrw]
#[brw(little)]
pub struct TelediskHeader {
pub id: [u8; 2],
pub sequence: u8,
pub check_sequence: u8,
pub version: u8,
pub data_rate: u8,
pub drive_type: u8,
pub stepping: u8,
pub allocation_flag: u8,
pub heads: u8,
pub crc: u16,
}
pub const COMMENT_HEADER_SIZE: usize = 10;
/// Teledisk comment block header
/// 'length' bytes of comment data line records follow the header, as nul-terminated strings.
#[derive(Debug)]
#[binrw]
#[brw(little)]
pub struct CommentHeader {
pub crc: u16,
pub length: u16,
pub year: u8,
pub month: u8,
pub day: u8,
pub hour: u8,
pub minute: u8,
pub second: u8,
}
#[derive(Debug)]
#[binrw]
#[brw(little)]
pub struct TrackHeader {
pub sectors: u8,
pub cylinder: u8,
pub head: u8,
pub crc: u8,
}
#[derive(Debug)]
#[binrw]
#[brw(little)]
pub struct SectorHeader {
pub cylinder: u8,
pub head: u8,
pub sector_id: u8,
pub sector_size: u8,
pub flags: u8,
pub crc: u8,
}
#[derive(Debug)]
#[binrw]
#[brw(little)]
pub struct SectorDataHeader {
pub len: u16,
pub encoding: u8,
}
#[derive(Debug)]
#[binrw]
#[brw(little)]
pub struct RepeatedDataEntry {
pub count: u16,
pub data: [u8; 2],
}
pub struct Td0Format {}
fn td0_data_rate(rate: u8) -> TrackDataRate {
match rate & 0x03 {
0 => TrackDataRate::Rate250Kbps(1.0),
1 => TrackDataRate::Rate300Kbps(1.0),
2 => TrackDataRate::Rate500Kbps(1.0),
_ => {
log::warn!("TD0 Data Rate out of range: {} Assuming 300Kbps", rate);
TrackDataRate::Rate300Kbps(1.0)
}
}
}
/// Implement a 16-bit CRC for the TD0 format. The TD0 CRC is a simple polynomial CRC with a
/// polynomial of 0xA097.
fn td0_crc(data: &[u8], input_crc: u16) -> u16 {
let mut crc = input_crc;
for byte in data.iter() {
crc ^= (*byte as u16) << 8;
for _j in 0..8 {
crc = (crc << 1) ^ if crc & 0x8000 != 0 { 0xA097 } else { 0 };
}
}
crc
}
/// Calculate the CRC of a block of data in a disk image, starting at the specified offset and
/// extending for the specified length. The stream position is restored to its original position
/// after CRC calculation.
fn calc_crc<RWS: ReadSeek>(image: &mut RWS, offset: u64, len: usize, input_crc: u16) -> Result<u16, DiskImageError> {
let mut crc_data = vec![0u8; len];
let saved_pos = image.stream_position()?;
image.seek(std::io::SeekFrom::Start(offset))?;
image.read_exact(&mut crc_data)?;
image.seek(std::io::SeekFrom::Start(saved_pos))?;
Ok(td0_crc(&crc_data, input_crc))
}
impl Td0Format {
#[allow(dead_code)]
fn format() -> DiskImageFileFormat {
DiskImageFileFormat::TeleDisk
}
pub(crate) fn capabilities() -> FormatCaps {
FormatCaps::empty()
}
pub fn platforms() -> Vec<Platform> {
// The Teledisk utility was a DOS-based program, so we'll assume that TD0 images are
// intended only for the PC platform.
vec![Platform::IbmPc]
}
pub(crate) fn extensions() -> Vec<&'static str> {
vec!["td0"]
}
pub(crate) fn detect<RWS: ReadSeek>(mut image: RWS) -> bool {
let mut detected = false;
_ = image.seek(std::io::SeekFrom::Start(0));
if let Ok(file_header) = TelediskHeader::read(&mut image) {
if file_header.id == "TD".as_bytes() || file_header.id == "td".as_bytes() {
detected = true;
}
}
detected
}
pub(crate) fn can_write(_image: Option<&DiskImage>) -> ParserWriteCompatibility {
ParserWriteCompatibility::UnsupportedFormat
}
pub(crate) fn load_image<RWS: ReadSeek>(
mut read_buf: RWS,
disk_image: &mut DiskImage,
_opts: &ParserReadOptions,
_callback: Option<LoadingCallback>,
) -> Result<(), DiskImageError> {
disk_image.set_source_format(DiskImageFileFormat::TeleDisk);
let mut image_data = Vec::new();
read_buf.seek(std::io::SeekFrom::Start(0))?;
read_buf.read_to_end(&mut image_data)?;
if image_data.len() < 12 {
log::trace!("Image is too small to be a Teledisk read_buf.");
return Err(DiskImageError::UnknownFormat);
}
// Read first 10 bytes to calculate header CRC.
let header_crc = td0_crc(&image_data[0..10], 0);
read_buf.seek(std::io::SeekFrom::Start(0))?;
let file_header = TelediskHeader::read(&mut read_buf)?;
let detected = file_header.id == "TD".as_bytes() || file_header.id == "td".as_bytes();
if !detected {
return Err(DiskImageError::UnknownFormat);
}
let compressed = file_header.id == "td".as_bytes();
let major_version = file_header.version / 10;
let minor_version = file_header.version % 10;
let has_comment_block = file_header.stepping & 0x80 != 0;
let disk_data_rate = td0_data_rate(file_header.data_rate);
log::trace!(
"Detected Teledisk Image, version {}.{}, compressed: {} has_comment_block: {}",
major_version,
minor_version,
compressed,
has_comment_block
);
log::trace!("Header CRC: {:04X} Calculated CRC: {:04X}", file_header.crc, header_crc,);
if file_header.crc != header_crc {
return Err(DiskImageError::ImageCorruptError("Bad Header CRC".to_string()));
}
let current_pos = read_buf.stream_position()?;
// Decompress the read_buf data if necessary.
let data_len = image_data.len();
let mut compressed_data = Cursor::new(image_data);
let mut decompression_buffer = Cursor::new(Vec::with_capacity(data_len * 2));
let (mut image_data_ref, decompressed_length) = if compressed {
let (_, decompressed_length) = if major_version < 2 {
log::debug!("Using V1 LZW decompression.");
// Use older LZW decompression.
lzw::expand(
&mut compressed_data,
&mut decompression_buffer,
&Options::from(OptionsPreset::Teledisk),
)
.map_err(|_| DiskImageError::ImageCorruptError("LZW decompression failure".to_string()))?
}
else {
// Use LZHUF decompression.
log::debug!("Using V2 LZHUF decompression.");
lzhuf::expand(&mut compressed_data, &mut decompression_buffer, &TD0_READ_OPTIONS)
.map_err(|_| DiskImageError::ImageCorruptError("LZHUF decompression failure".to_string()))?
};
log::trace!("Decompressed {} bytes to {} bytes", data_len, decompressed_length);
(&mut decompression_buffer, decompressed_length)
}
else {
(&mut compressed_data, data_len as u64)
};
// From this point forward, we are working with decompressed data.
image_data_ref.seek(std::io::SeekFrom::Start(current_pos))?;
// Parse comment block if indicated.
if has_comment_block {
let comment_header = CommentHeader::read(&mut image_data_ref)?;
let calculated_crc = calc_crc(
&mut image_data_ref,
14,
COMMENT_HEADER_SIZE - 2 + comment_header.length as usize,
0,
)?;
log::trace!(
"Comment block header crc: {:04X} calculated_crc: {:04X}",
comment_header.crc,
calculated_crc
);
if comment_header.crc != calculated_crc {
log::warn!("Bad Comment block header CRC");
//return Err(DiskImageError::ImageCorruptError("Bad Comment CRC".to_string()));
}
if comment_header.length as u64 > decompressed_length.saturating_sub(COMMENT_HEADER_SIZE as u64) {
return Err(DiskImageError::ImageCorruptError(format!(
"Comment block length ({}) exceeds image size ({})",
comment_header.length, decompressed_length
)));
}
let mut comment_data_block = vec![0; comment_header.length as usize];
image_data_ref.read_exact(&mut comment_data_block)?;
// Comment black consists of nul-terminated strings. Convert nul terminators to newlines.
for char in comment_data_block.iter_mut() {
if *char == 0 {
*char = b'\n';
}
}
let comment = String::from_utf8(comment_data_block).map_err(|_| DiskImageError::FormatParseError)?;
log::trace!("Comment block data: {}", comment);
}
// Read tracks in
let mut cylinder_set: FoxHashSet<u16> = FoxHashSet::new();
let mut track_header_offset = image_data_ref.stream_position()?;
while let Ok(track_header) = TrackHeader::read(&mut image_data_ref) {
let calculated_track_header_crc = calc_crc(&mut image_data_ref, track_header_offset, 3, 0)?;
log::trace!(
"Read track header. c:{} h:{} Sectors: {} crc: {:02X} calculated: {:02X}",
track_header.cylinder,
track_header.head,
track_header.sectors,
track_header.crc,
calculated_track_header_crc as u8
);
if track_header.sectors == 0xFF {
// End of track headers.
log::trace!("End of TeleDisk track headers.");
break;
}
if track_header.crc != calculated_track_header_crc as u8 {
return Err(DiskImageError::ImageCorruptError("Bad Track Header CRC".to_string()));
}
log::trace!("Adding track: c:{} h:{}...", track_header.cylinder, track_header.head);
let params = MetaSectorTrackParams {
ch: DiskCh::from((track_header.cylinder as u16, track_header.head)),
data_rate: disk_data_rate,
encoding: TrackDataEncoding::Mfm,
};
let new_track = disk_image.add_track_metasector(¶ms)?;
cylinder_set.insert(track_header.cylinder as u16);
for _s in 0..track_header.sectors {
//let sector_header_offset = image_data_ref.stream_position()?;
let sector_header = SectorHeader::read(&mut image_data_ref)?;
log::trace!(
"Read sector header: c:{} h:{} sid:{} size:{} flags:{:02X} crc:{:02X}",
sector_header.cylinder,
sector_header.head,
sector_header.sector_id,
sector_header.sector_size,
sector_header.flags,
sector_header.crc
);
// The description of the sector header CRC in Dave Dunfield's TD0 notes is incorrect.
// The CRC is calculated for the expanded data block, and does not include the
// sector header or sector data header.
// A Sector Data Header follows as long as neither of these two flags are not set.
let have_sector_data = sector_header.flags & (SECTOR_NO_DAM | SECTOR_SKIPPED) == 0;
let sector_size_bytes = DiskChsn::n_to_bytes(sector_header.sector_size);
if have_sector_data {
// let sector_data_header_offset =
// image_data_ref.stream_position()?;
let sector_data_header = SectorDataHeader::read(&mut image_data_ref)?;
log::trace!(
"Read sector data header. len:{} encoding:{}",
sector_data_header.len - 1,
sector_data_header.encoding
);
// 'len' field of sector data header includes the encoding byte.
let mut sector_data_vec = vec![0; sector_size_bytes];
match sector_data_header.encoding {
0 => {
// Raw data. 'len' bytes follow.
image_data_ref.read_exact(&mut sector_data_vec)?;
}
1 => {
// Repeated two-byte pattern.
Td0Format::td0_decompress_repeated_data(&mut image_data_ref, &mut sector_data_vec)?;
}
2 => {
// Run-length encoded data.
Td0Format::td0_decompress_rle_data(&mut image_data_ref, &mut sector_data_vec)?;
}
_ => {
log::error!("Unknown sector data encoding: {}", sector_data_header.encoding);
return Err(DiskImageError::ImageCorruptError(
"Unknown sector data encoding".to_string(),
));
}
}
// Calculate sector CRC from expanded data.
let data_crc = td0_crc(§or_data_vec, 0);
log::trace!(
"Sector header crc: {:02X} Calculated data block crc: {:02X}",
sector_header.crc,
data_crc as u8
);
if sector_header.crc != data_crc as u8 {
return Err(DiskImageError::ImageCorruptError("Bad Sector Header CRC".to_string()));
}
// Add this sector to track.
let params = AddSectorParams {
id_chsn: DiskChsn::new(
sector_header.cylinder as u16,
sector_header.head,
sector_header.sector_id,
DiskChsn::bytes_to_n(sector_data_vec.len()),
),
data: §or_data_vec,
weak_mask: None,
hole_mask: None,
attributes: SectorAttributes {
address_error: false,
data_error: sector_header.flags & SECTOR_CRC_ERROR != 0,
deleted_mark: sector_header.flags & SECTOR_DELETED != 0,
no_dam: false,
},
alternate: false,
bit_index: None,
};
new_track.add_sector(¶ms)?;
}
}
// Update the track header offset for next track header crc calculation.
track_header_offset = image_data_ref.stream_position()?;
}
disk_image.descriptor = DiskDescriptor {
// Assume TeleDisk images are for IBM PC.
platforms: Some(vec![Platform::IbmPc]),
geometry: DiskCh::from((cylinder_set.len() as u16, file_header.heads)),
data_rate: disk_data_rate,
data_encoding: TrackDataEncoding::Mfm,
density: TrackDensity::from(disk_data_rate),
rpm: None,
write_protect: None,
};
Ok(())
}
pub fn td0_decompress_repeated_data<RWS: ReadSeek>(
read_buf: &mut RWS,
output: &mut [u8],
) -> Result<(), DiskImageError> {
let data_len = output.len();
let mut decoded_len = 0;
while decoded_len < data_len {
let entry = RepeatedDataEntry::read(read_buf)?;
let count = entry.count as usize;
for _ in 0..count {
if decoded_len < (data_len - 1) {
output[decoded_len + 1] = entry.data[0];
output[decoded_len] = entry.data[1];
decoded_len += 2;
}
else {
return Err(DiskImageError::FormatParseError);
}
}
}
log::trace!("td0_decompress_repeated_data(): Decoded {} bytes", decoded_len);
Ok(())
}
pub fn td0_decompress_rle_data<RWS: ReadSeek>(read_buf: &mut RWS, output: &mut [u8]) -> Result<(), DiskImageError> {
let start_pos = read_buf.stream_position()?;
//log::trace!("RLE data start pos: {:X}", start_pos);
let data_len = output.len();
let mut decoded_len = 0;
let mut encoded_len = 0;
while decoded_len < data_len {
let entry_code = read_buf.read_u8()?;
encoded_len += 1;
if entry_code == 0 {
// Literal data block. The next byte encodes a length, and `length` bytes are copied
// to the output slice.
let block_len = read_buf.read_u8()? as usize;
read_buf.read_exact(&mut output[decoded_len..decoded_len + block_len])?;
decoded_len += block_len;
encoded_len += block_len;
}
else {
// Run-length encoded block. The entry code byte encodes the length of the data pattern,
let pattern_length = entry_code as usize * 2;
let repeat_ct = read_buf.read_u8()?;
let mut pattern_block = vec![0; pattern_length];
read_buf.read_exact(&mut pattern_block)?;
encoded_len += pattern_length + 1;
for _ in 0..repeat_ct {
if decoded_len < data_len {
output[decoded_len..decoded_len + pattern_length].copy_from_slice(&pattern_block);
decoded_len += pattern_length;
}
else {
let data_pos = read_buf.stream_position()?;
log::trace!(
"td0_decompress_rle_data(): Output buffer overrun; input_offset: {} decoded_len: {}",
data_pos - start_pos,
decoded_len
);
return Err(DiskImageError::FormatParseError);
}
}
}
}
log::trace!(
"td0_decompress_rle_data(): Decoded {}->{} bytes",
encoded_len,
decoded_len
);
//util::dump_slice(output, 16, 0, std::io::stdout())?;
Ok(())
}
pub fn save_image<RWS: ReadWriteSeek>(
_image: &DiskImage,
_opts: &ParserWriteOptions,
_output: &mut RWS,
) -> Result<(), DiskImageError> {
Err(DiskImageError::UnsupportedFormat)
}
}
| rust | MIT | b4c04b51746e5fe7769f49a1b32b8caad426fc81 | 2026-01-04T20:24:04.021295Z | false |
dbalsom/fluxfox | https://github.com/dbalsom/fluxfox/blob/b4c04b51746e5fe7769f49a1b32b8caad426fc81/src/file_parsers/tc.rs | src/file_parsers/tc.rs | /*
FluxFox
https://github.com/dbalsom/fluxfox
Copyright 2024-2025 Daniel Balsom
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the “Software”),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
--------------------------------------------------------------------------
*/
//! A parser for the TransCopy (.TC) disk image format.
//!
//! TransCopy images are bitstream-level images produced by the TransCopy
//! utility bundled with Central Point Software's Copy II PC Option Board.
//!
//! Documentation of this format helpfully provided by NewRisingSun.
//! https://www.robcraig.com/wiki/transcopy-version-5-x-format/
//!
//! TransCopy images do not have a separate weak bit mask. Instead, weak bits
//! can be detected by an invalid sequence of 0's in the MFM bitstream.
//!
//! fluxfox will attempt to detect weak bits when adding tracks to the image,
//! if a weak bit mask is not provided.
//!
//! The padding between tracks is not just on 256 byte boundaries. It is a bit
//! unusual, but we don't write to TC yet so don't have to handle whatever
//! scheme it is using. Track data is padded so that it does not pass a 64k
//! boundary. This was apparently done so to make it easier for TransCopy to
//! handle DMA transfer of track data.
use crate::{
file_parsers::{bitstream_flags, FormatCaps, ParserWriteCompatibility},
io::{ReadSeek, ReadWriteSeek},
};
use crate::{
file_parsers::{ParserReadOptions, ParserWriteOptions},
types::{BitStreamTrackParams, DiskDescriptor, DiskRpm, Platform, TrackDataEncoding, TrackDataRate, TrackDensity},
DiskCh,
DiskImage,
DiskImageError,
DiskImageFileFormat,
LoadingCallback,
};
use binrw::{binrw, BinRead};
// Disk Type Constants
// All types are listed here, but fluxfox will initially only support PC-specific formats
// PCE tools generate TC's with the 'UNKNOWN' disk type, which is unfortunate.
// We normally use this disk type to set the data rate and RPM. So we'll have to come up with an
// alternate method for determining these values.
pub const TC_DISK_TYPE_UNKNOWN: u8 = 0xFF;
pub const TC_DISK_TYPE_MFM_HD: u8 = 0x02;
pub const TC_DISK_TYPE_MFM_DD_360: u8 = 0x03;
//pub const TC_DISK_TYPE_GCR_APPLEII: u8 = 0x04;
//pub const TC_DISK_TYPE_FM_SD: u8 = 0x05;
//pub const TC_DISK_TYPE_GCR_COMMODORE: u8 = 0x06;
pub const TC_DISK_TYPE_MFM_DD: u8 = 0x07;
//pub const TC_DISK_TYPE_AMIGA: u8 = 0x08;
//pub const TC_DISK_TYPE_FM_ATARI: u8 = 0x0C;
// Track flags. We don't use these yet, but they're here for reference.
//pub const TC_FLAG_KEEP_TRACK_LENGTH: u16 = 0b0000_0000_0000_0001;
//pub const TC_FLAG_COPY_ACROSS_INDEX: u16 = 0b0000_0000_0000_0010;
// I suppose this flag was some hint to TransCopy when writing a track. We will always create a
// weak bit mask when detecting weak bits.
//pub const TC_FLAG_COPY_WEAK_BITS: u16 = 0b0000_0000_0000_0100;
//pub const TC_FLAG_VERIFY_WRITE: u16 = 0b0000_0000_0000_1000;
//pub const TC_FLAG_TOLERANCE_ADJUST: u16 = 0b0000_0000_0100_0000;
// This flag indicates no address marks on a track. We'll find that out for ourselves when we add
// the track, so it's not really that important.
//pub const TC_FLAG_NO_ADDRESS_MARKS: u16 = 0b0000_0000_1000_0000;
//pub const TC_FLAG_UNKNOWN: u16 = 0b1000_0000_0000_0000;
// These values are used to represent empty entries in corresponding tables.
pub const TC_EMPTY_TRACK_SKEW: u16 = 0x1111;
pub const TC_EMPTY_TRACK_DATA: u16 = 0x3333;
pub const TC_EMPTY_TRACK_FLAGS: u16 = 0x4444;
#[derive(Debug)]
#[binrw]
#[brw(big)]
struct TCFileHeader {
id: [u8; 2], // Magic number: 0x5A 0xA5
comment0: [u8; 32], // First comment line, zero-terminated
comment1: [u8; 32], // Second comment line, zero-terminated
padding: [u8; 190], // Unused, filled with random memory data
}
#[derive(Debug)]
#[binrw]
#[brw(big)]
struct TCDiskInfo {
disk_type: u8,
starting_c: u8,
ending_c: u8,
num_sides: u8,
cylinder_increment: u8,
#[br(little)]
track_skews: [u16; 256],
#[br(big)]
track_offsets: [u16; 256],
#[br(little)]
track_sizes: [u16; 256],
#[br(little)]
track_flags: [u16; 256],
}
/// Convert one of the TC header comment fields into a String.
fn tc_read_comment(raw_comment: &[u8]) -> String {
let comment_end_pos = raw_comment.iter().position(|&c| c == 0).unwrap_or(raw_comment.len());
String::from(std::str::from_utf8(&raw_comment[..comment_end_pos]).unwrap_or_default())
}
fn tc_parse_disk_type(disk_type: u8) -> Result<(TrackDataEncoding, TrackDataRate, DiskRpm), DiskImageError> {
let (encoding, data_rate, disk_rpm) = match disk_type {
// Return a default for UNKNOWN, as PCE tools generate TC's with this disk type.
TC_DISK_TYPE_UNKNOWN => (
TrackDataEncoding::Mfm,
TrackDataRate::Rate250Kbps(1.0),
DiskRpm::Rpm300(1.0),
),
TC_DISK_TYPE_MFM_HD => (
TrackDataEncoding::Mfm,
TrackDataRate::Rate500Kbps(1.0),
DiskRpm::Rpm300(1.0),
),
TC_DISK_TYPE_MFM_DD_360 => (
TrackDataEncoding::Mfm,
TrackDataRate::Rate500Kbps(1.0),
DiskRpm::Rpm360(1.0),
),
TC_DISK_TYPE_MFM_DD => (
TrackDataEncoding::Mfm,
TrackDataRate::Rate250Kbps(1.0),
DiskRpm::Rpm300(1.0),
),
_ => return Err(DiskImageError::UnsupportedFormat),
};
Ok((encoding, data_rate, disk_rpm))
}
pub struct TCFormat {}
impl TCFormat {
pub fn extensions() -> Vec<&'static str> {
vec!["tc"]
}
pub fn capabilities() -> FormatCaps {
bitstream_flags() | FormatCaps::CAP_TRACK_ENCODING | FormatCaps::CAP_ENCODING_FM | FormatCaps::CAP_ENCODING_MFM
}
pub fn platforms() -> Vec<Platform> {
// Although the Copy II PC Option Board was a PC-specific device, it was capable of imaging
// disks for multiple platforms. For the moment, we'll only support PC until we have some
// cross-platform TC images to test with.
vec![Platform::IbmPc]
}
pub fn detect<RWS: ReadSeek>(mut image: RWS) -> bool {
if image.seek(std::io::SeekFrom::Start(0)).is_err() {
return false;
}
let header = if let Ok(header) = TCFileHeader::read(&mut image) {
header
}
else {
return false;
};
header.id[0] == 0x5A && header.id[1] == 0xA5
}
pub fn can_write(_image: Option<&DiskImage>) -> ParserWriteCompatibility {
ParserWriteCompatibility::UnsupportedFormat
}
pub(crate) fn load_image<RWS: ReadSeek>(
mut read_buf: RWS,
disk_image: &mut DiskImage,
_opts: &ParserReadOptions,
_callback: Option<LoadingCallback>,
) -> Result<(), DiskImageError> {
disk_image.set_source_format(DiskImageFileFormat::TransCopyImage);
let disk_image_size = read_buf.seek(std::io::SeekFrom::End(0))?;
read_buf.seek(std::io::SeekFrom::Start(0))?;
let header = if let Ok(header) = TCFileHeader::read(&mut read_buf) {
header
}
else {
return Err(DiskImageError::UnsupportedFormat);
};
if header.id[0] != 0x5A || header.id[1] != 0xA5 {
log::error!("Invalid TransCopy header id: {:?}", header.id);
return Err(DiskImageError::UnsupportedFormat);
}
log::trace!("load_image(): Got TransCopy read_buf.");
// Read comment arrays, and turn into a string with newlines.
let comment0_string = tc_read_comment(&header.comment0);
let comment1_string = tc_read_comment(&header.comment1);
let comment_string = format!("{}\n{}", comment0_string, comment1_string);
log::trace!("Read comment: {}", comment_string);
let disk_info = if let Ok(di) = TCDiskInfo::read(&mut read_buf) {
di
}
else {
return Err(DiskImageError::FormatParseError);
};
// Only support PC disk types for now
if ![
TC_DISK_TYPE_UNKNOWN,
TC_DISK_TYPE_MFM_HD,
TC_DISK_TYPE_MFM_DD_360,
TC_DISK_TYPE_MFM_DD,
]
.contains(&disk_info.disk_type)
{
log::error!("Unsupported disk type: {:02X}", disk_info.disk_type);
return Err(DiskImageError::IncompatibleImage(format!(
"Unsupported disk type: {:02X}",
disk_info.disk_type
)));
}
let (disk_encoding, disk_data_rate, disk_rpm) = tc_parse_disk_type(disk_info.disk_type)?;
log::trace!("Disk encoding: {:?}", disk_encoding);
log::trace!("Starting cylinder: {}", disk_info.starting_c);
log::trace!("Ending cylinder: {}", disk_info.ending_c);
log::trace!("Number of sides: {}", disk_info.num_sides);
log::trace!("Cylinder increment: {}", disk_info.cylinder_increment);
if disk_info.starting_c != 0 {
// I don't know if we'll ever encounter images like this, but for now let's require starting at 0.
log::error!("Unsupported starting cylinder: {}", disk_info.starting_c);
return Err(DiskImageError::IncompatibleImage(format!(
"Unsupported starting cylinder: {}",
disk_info.starting_c
)));
}
if disk_info.cylinder_increment != 1 {
// Similarly, I am not sure why the track increment would ever be anything other than 1.
log::error!("Unsupported cylinder increment: {}", disk_info.cylinder_increment);
return Err(DiskImageError::IncompatibleImage(format!(
"Unsupported cylinder increment: {}",
disk_info.cylinder_increment
)));
}
let raw_track_skew_ct = disk_info
.track_skews
.iter()
.take_while(|&v| *v != TC_EMPTY_TRACK_SKEW)
.count();
let raw_track_start_ct = disk_info.track_offsets.iter().take_while(|&v| *v != 0).count();
let raw_track_data_ct = disk_info
.track_sizes
.iter()
.take_while(|&v| *v != TC_EMPTY_TRACK_DATA)
.count();
let raw_track_flag_ct = disk_info
.track_flags
.iter()
.take_while(|&v| *v != TC_EMPTY_TRACK_FLAGS)
.count();
log::trace!(
"Raw track data counts: Skews {} Starts: {} Sizes: {} Flags: {}",
raw_track_skew_ct,
raw_track_start_ct,
raw_track_data_ct,
raw_track_flag_ct,
);
if raw_track_skew_ct != raw_track_data_ct
|| raw_track_start_ct != raw_track_data_ct
|| raw_track_flag_ct != raw_track_data_ct
{
log::error!("Mismatched track data counts");
return Err(DiskImageError::IncompatibleImage("Mismatched track data counts".into()));
}
// Limit tracks to pairs of sides
let raw_track_data_ct = raw_track_data_ct & !0x01;
let mut last_track_data_offset = 0;
for i in 0..raw_track_data_ct {
let track_offset = (disk_info.track_offsets[i] as u64) << 8;
let track_size = disk_info.track_sizes[i] as u64;
let adj_track_size = if track_size % 256 == 0 {
track_size
}
else {
((track_size >> 8) + 1) << 8
};
if track_offset == 0 || track_size == 0 {
log::error!("Invalid track offset or size: {} {}", track_offset, track_size);
return Err(DiskImageError::IncompatibleImage(format!(
"Invalid track offset or size: {} {}",
track_offset, track_size
)));
}
if track_offset + track_size > disk_image_size {
log::error!("Track data extends beyond end of read_buf");
return Err(DiskImageError::IncompatibleImage(
"Track data extends beyond end of read_buf".into(),
));
}
last_track_data_offset = track_offset + adj_track_size;
log::trace!(
"Track {}: RawOffset:{} Byte Offset: {} Size: {} File size: {} Calculated next offset: {}",
i,
disk_info.track_offsets[i],
track_offset,
track_size,
adj_track_size,
last_track_data_offset
);
}
let remaining_image = disk_image_size.saturating_sub(last_track_data_offset);
log::trace!("Remaining data in image: {}", remaining_image);
log::trace!(
"Remaining data per track: {}",
remaining_image / raw_track_data_ct as u64
);
log::trace!(
"Last track offset: {}",
disk_info.track_offsets[raw_track_data_ct - 1] << 8
);
log::trace!("Lack track size: {}", disk_info.track_sizes[raw_track_data_ct - 1]);
log::trace!(
"End of track data: {}",
((disk_info.track_offsets[raw_track_data_ct - 1] as u64) << 8)
+ disk_info.track_sizes[raw_track_data_ct - 1] as u64
);
// Read the tracks
let mut head_n = 0;
let track_shift = match disk_info.num_sides {
1 => 0,
2 => 1,
_ => {
log::error!("Unsupported number of sides: {}", disk_info.num_sides);
return Err(DiskImageError::IncompatibleImage(format!(
"Unsupported number of sides: {}",
disk_info.num_sides
)));
}
};
for i in 0..raw_track_data_ct {
let cylinder_n = (i >> track_shift) as u16;
let track_offset = (disk_info.track_offsets[i] as u64) << 8;
let track_size = disk_info.track_sizes[i] as u64;
let mut track_data_vec = vec![0; track_size as usize];
read_buf.seek(std::io::SeekFrom::Start(track_offset))?;
read_buf.read_exact(&mut track_data_vec)?;
log::trace!(
"Adding {:?} encoded track: {}",
disk_encoding,
DiskCh::from((cylinder_n, head_n))
);
let params = BitStreamTrackParams {
schema: None,
encoding: disk_encoding,
data_rate: disk_data_rate,
rpm: Some(disk_rpm),
ch: DiskCh::new(cylinder_n, head_n),
bitcell_ct: None,
data: &track_data_vec,
weak: None,
hole: None,
detect_weak: true, // flux2tc encodes weak bits as runs of MFM 0 bits
};
disk_image.add_track_bitstream(¶ms)?;
head_n += 1;
if head_n == disk_info.num_sides {
head_n = 0;
}
}
disk_image.descriptor = DiskDescriptor {
// You could use TransCopy with non-PC floppies, so I suppose we can't assume PC here.
platforms: None,
geometry: DiskCh::from((
(raw_track_data_ct / disk_info.num_sides as usize) as u16,
disk_info.num_sides,
)),
data_rate: disk_data_rate,
data_encoding: disk_encoding,
density: TrackDensity::from(disk_data_rate),
rpm: Some(disk_rpm),
write_protect: None,
};
Ok(())
}
pub fn save_image<RWS: ReadWriteSeek>(
_image: &DiskImage,
_opts: &ParserWriteOptions,
_output: &mut RWS,
) -> Result<(), DiskImageError> {
Err(DiskImageError::UnsupportedFormat)
}
}
| rust | MIT | b4c04b51746e5fe7769f49a1b32b8caad426fc81 | 2026-01-04T20:24:04.021295Z | false |
dbalsom/fluxfox | https://github.com/dbalsom/fluxfox/blob/b4c04b51746e5fe7769f49a1b32b8caad426fc81/src/file_parsers/mfm.rs | src/file_parsers/mfm.rs | /*
FluxFox
https://github.com/dbalsom/fluxfox
Copyright 2024-2025 Daniel Balsom
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the “Software”),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
--------------------------------------------------------------------------
src/parsers/mfm.rs
A parser for the MFM disk image format.
MFM format images are bitstream images produced by the HxC disk emulator software.
*/
use crate::{
file_parsers::{FormatCaps, ParserReadOptions, ParserWriteCompatibility, ParserWriteOptions},
io::{ReadSeek, ReadWriteSeek},
types::{BitStreamTrackParams, DiskCh, DiskDescriptor, Platform, TrackDataEncoding, TrackDataRate, TrackDensity},
DiskImage,
DiskImageError,
DiskImageFileFormat,
LoadingCallback,
};
use binrw::{binrw, BinRead};
use strum::IntoEnumIterator;
pub struct MfmFormat;
#[derive(Debug)]
#[binrw]
#[derive(Default)]
struct MfmFileHeader {
id: [u8; 6],
unused: u8,
track_ct: u16,
head_ct: u8,
rpm: u16,
bit_rate: u16,
if_type: u8,
track_list_offset: u32,
}
#[derive(Debug)]
#[binrw]
struct MfmTrackHeader {
track_no: u16,
side_no: u8,
track_size: u32,
track_offset: u32,
}
#[derive(Debug)]
#[binrw]
struct MfmAdvancedTrackHeader {
track_no: u16,
side_no: u8,
rpm: u16,
bit_rate: u16,
track_size: u32,
track_offset: u32,
}
enum TrackHeader {
Standard(MfmTrackHeader),
Advanced(MfmAdvancedTrackHeader),
}
impl MfmFormat {
#[allow(dead_code)]
fn format() -> DiskImageFileFormat {
DiskImageFileFormat::PceBitstreamImage
}
pub(crate) fn capabilities() -> FormatCaps {
FormatCaps::empty()
}
pub(crate) fn platforms() -> Vec<Platform> {
Platform::iter().collect()
}
pub(crate) fn extensions() -> Vec<&'static str> {
vec!["mfm"]
}
pub(crate) fn detect<RWS: ReadSeek>(mut image: RWS) -> bool {
let mut detected = false;
_ = image.seek(std::io::SeekFrom::Start(0));
if let Ok(file_header) = MfmFileHeader::read_le(&mut image) {
if file_header.id == "HXCMFM".as_bytes() {
detected = true;
}
}
detected
}
pub(crate) fn can_write(_image: Option<&DiskImage>) -> ParserWriteCompatibility {
ParserWriteCompatibility::UnsupportedFormat
}
pub(crate) fn load_image<RWS: ReadSeek>(
mut read_buf: RWS,
disk_image: &mut DiskImage,
_opts: &ParserReadOptions,
_callback: Option<LoadingCallback>,
) -> Result<(), DiskImageError> {
disk_image.set_source_format(DiskImageFileFormat::MfmBitstreamImage);
read_buf.seek(std::io::SeekFrom::Start(0))?;
let mut file_header = Default::default();
if let Ok(file_header_inner) = MfmFileHeader::read_le(&mut read_buf) {
file_header = file_header_inner;
if file_header.id != "HXCMFM".as_bytes() {
log::trace!("load_image(): File header ID not detected.");
return Err(DiskImageError::UnsupportedFormat);
}
}
let advanced_tracks = file_header.if_type & 0x80 != 0;
log::trace!(
"load_image(): TracksPerSide: {} Heads: {} RPM: {} BitRate: {} IfType: {:02X} Advanced tracks: {}",
file_header.track_ct,
file_header.head_ct,
file_header.rpm,
file_header.bit_rate,
file_header.if_type,
advanced_tracks
);
let disk_data_rate = TrackDataRate::from(file_header.bit_rate as u32 * 1000);
let total_tracks = file_header.track_ct as usize * file_header.head_ct as usize;
let mut track_headers = Vec::new();
// If the advanced header flag is set, a table of 'total_tracks' advanced track headers follows the file header.
// Otherwise, a table of 'total_tracks' standard track headers follows the file header.
read_buf.seek(std::io::SeekFrom::Start(file_header.track_list_offset as u64))?;
for _t in 0..total_tracks {
match advanced_tracks {
true => {
let track_header = MfmAdvancedTrackHeader::read_le(&mut read_buf);
match track_header {
Ok(track_header) => {
let track_no = track_header.track_no as usize;
let side_no = track_header.side_no as usize;
let track_size = track_header.track_size as usize;
let track_offset = track_header.track_offset as usize;
log::trace!(
"load_image(): Advanced Track: {} Side: {} Rpm: {} Bit rate: {} Size: {} Offset: {}",
track_no,
side_no,
track_header.rpm,
track_header.bit_rate,
track_size,
track_offset
);
track_headers.push(TrackHeader::Advanced(track_header));
}
Err(e) => {
log::error!("load_image(): Error reading track header: {:?}", e);
return Err(DiskImageError::FormatParseError);
}
}
}
false => {
let track_header = MfmTrackHeader::read_le(&mut read_buf);
match track_header {
Ok(track_header) => {
let track_no = track_header.track_no as usize;
let side_no = track_header.side_no as usize;
let track_size = track_header.track_size as usize;
let track_offset = track_header.track_offset as usize;
log::trace!(
"load_image(): Track: {} Side: {} Size: {} Offset: {}",
track_no,
side_no,
track_size,
track_offset
);
track_headers.push(TrackHeader::Standard(track_header));
}
Err(e) => {
log::error!("load_image(): Error reading track header: {:?}", e);
return Err(DiskImageError::FormatParseError);
}
}
}
}
}
// We now have a table of tracks. Read the data for each track and add it to the DiskImage.
for header in &track_headers {
let cylinder;
let head;
let track_data;
let data_rate;
let mut bitcell_ct = None;
match header {
TrackHeader::Standard(s_header) => {
let track_data_size = s_header.track_size;
log::debug!("Reading {} bytes of track data", track_data_size);
track_data = MfmFormat::read_track_data(
&mut read_buf,
s_header.track_offset as u64,
s_header.track_size as usize,
)?;
head = s_header.side_no;
cylinder = s_header.track_no as u8;
data_rate = file_header.bit_rate as u32 * 100;
}
TrackHeader::Advanced(a_header) => {
// Advanced header specifies actual bitcell count.
// Size in bytes is / 8, rounded up.
bitcell_ct = Some(a_header.track_size as usize);
let track_data_size = (a_header.track_size as usize + 7) / 8;
log::debug!("Reading {} bytes of advanced track data", track_data_size);
track_data =
MfmFormat::read_track_data(&mut read_buf, a_header.track_offset as u64, track_data_size)?;
head = a_header.side_no;
cylinder = a_header.track_no as u8;
data_rate = a_header.bit_rate as u32 * 100;
}
}
let params = BitStreamTrackParams {
schema: None,
encoding: TrackDataEncoding::Mfm,
data_rate: TrackDataRate::from(data_rate),
rpm: None,
ch: DiskCh::from((cylinder as u16, head)),
bitcell_ct,
data: &track_data,
weak: None,
hole: None,
detect_weak: false,
};
disk_image.add_track_bitstream(¶ms)?;
}
disk_image.descriptor = DiskDescriptor {
// Mfm doesn't specify platform info.
platforms: None,
geometry: DiskCh::from((file_header.track_ct, file_header.head_ct)),
data_rate: disk_data_rate,
data_encoding: TrackDataEncoding::Mfm,
density: TrackDensity::from(disk_data_rate),
rpm: None,
write_protect: None,
};
Ok(())
}
fn read_track_data<RWS: ReadSeek>(read_buf: &mut RWS, offset: u64, size: usize) -> Result<Vec<u8>, DiskImageError> {
let mut track_data = vec![0u8; size];
read_buf.seek(std::io::SeekFrom::Start(offset))?;
read_buf.read_exact(&mut track_data)?;
Ok(track_data)
}
pub fn save_image<RWS: ReadWriteSeek>(
_image: &DiskImage,
_opts: &ParserWriteOptions,
_output: &mut RWS,
) -> Result<(), DiskImageError> {
Err(DiskImageError::UnsupportedFormat)
}
}
| rust | MIT | b4c04b51746e5fe7769f49a1b32b8caad426fc81 | 2026-01-04T20:24:04.021295Z | false |
dbalsom/fluxfox | https://github.com/dbalsom/fluxfox/blob/b4c04b51746e5fe7769f49a1b32b8caad426fc81/src/file_parsers/mfi.rs | src/file_parsers/mfi.rs | /*
FluxFox
https://github.com/dbalsom/fluxfox
Copyright 2024-2025 Daniel Balsom
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the “Software”),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
--------------------------------------------------------------------------
*/
//! A parser for the `MFI` disk image format.
//!
//! MFI format images are MAME flux images, an internal format used by the MAME emulator and
//! designed to store normalized (resolved) flux transitions from disk images.
//!
//! Flux timings are encoded in 32-bit words, 28-bits thereof used to store the flux time in
//! increments of 1/200_000_000th of a track, or 1 nanosecond increments at 300RPM.
//! This requires conversion of flux times for 360RPM images.
//!
//! Some older MFI images may use a slightly different format, indicated by a `"MESSFLOPPYIMAGE"`
//! signature in the header. These images are not currently supported.
//!
//! MFI track data is compressed with the `deflate` algorithm, thus MFI support requires the `mfi`
//! feature which enables the `flate2` dependency.
//!
//! Unformatted tracks are indicated by a track header with a zero offset and zero compressed size.
//!
//! MFI supports specification of the physical disk form factor, which appears to be set somewhat
//! reliably in the header.
//! A format variant field can specify the density of the disk, but the most prolific writer of MFI
//! files (Applesauce) did not populate this field until v2.0, so we cannot rely on it to be present.
use crate::{
file_parsers::{bitstream_flags, FormatCaps, ParserWriteCompatibility},
format_ms,
format_us,
io::{ReadSeek, ReadWriteSeek},
track::fluxstream::FluxStreamTrack,
types::DiskDescriptor,
LoadingStatus,
StandardFormat,
};
use crate::{
file_parsers::{ParserReadOptions, ParserWriteOptions},
flux::histogram::FluxHistogram,
types::{
chs::DiskCh,
DiskPhysicalDimensions,
DiskRpm,
FluxStreamTrackParams,
Platform,
TrackDataEncoding,
TrackDensity,
},
DiskImage,
DiskImageError,
DiskImageFileFormat,
LoadingCallback,
};
use crate::{
source_map::{MapDump, OptionalSourceMap, SourceValue},
types::TrackDataResolution,
};
use binrw::{binrw, BinRead};
use strum::IntoEnumIterator;
pub const OLD_SIGNATURE: &[u8; 15] = b"MESSFLOPPYIMAGE";
pub const NEW_SIGNATURE: &[u8; 15] = b"MAMEFLOPPYIMAGE";
pub const THREE_POINT_FIVE_INCH: &[u8] = b"35 ";
pub const FIVE_POINT_TWO_FIVE_INCH: &[u8] = b"525 ";
pub const EIGHT_INCH: &[u8] = b"8 ";
pub const CYLINDER_MASK: u32 = 0x3FFFFFFF;
//pub const MFI_TIME_UNIT: f64 = 1.0 / 200_000_000.0;
// Disk form factors - defined in MAME src/lib/formats/flopimg.h
impl TryFrom<&[u8]> for DiskPhysicalDimensions {
type Error = DiskImageError;
fn try_from(value: &[u8]) -> Result<Self, Self::Error> {
match value {
THREE_POINT_FIVE_INCH => Ok(DiskPhysicalDimensions::Dimension3_5),
FIVE_POINT_TWO_FIVE_INCH => Ok(DiskPhysicalDimensions::Dimension5_25),
EIGHT_INCH => Ok(DiskPhysicalDimensions::Dimension8),
_ => Err(DiskImageError::UnsupportedFormat),
}
}
}
impl TryFrom<(&[u8], DiskPhysicalDimensions)> for StandardFormat {
type Error = DiskImageError;
fn try_from(value: (&[u8], DiskPhysicalDimensions)) -> Result<Self, Self::Error> {
match value {
(b"SSSD", _) => {
// Single sided single density (8" format)
Err(DiskImageError::UnsupportedFormat)
}
(b"SSDD", DiskPhysicalDimensions::Dimension5_25) => {
// This could be 160K or 180K, we don't really know
Ok(StandardFormat::PcFloppy180)
}
(b"DSSD", _) => {
// Double sided single density (8" format)
Err(DiskImageError::UnsupportedFormat)
}
(b"DSDD", DiskPhysicalDimensions::Dimension5_25) => {
// This could be 320K or 360K, we don't really know
Ok(StandardFormat::PcFloppy360)
}
(b"DSDD", DiskPhysicalDimensions::Dimension3_5) => {
// 720K 3.5" disk
Ok(StandardFormat::PcFloppy720)
}
(b"DSHD", DiskPhysicalDimensions::Dimension5_25) => {
// MAME src doesn't seem to mention this one...
Ok(StandardFormat::PcFloppy1200)
}
(b"DSHD", DiskPhysicalDimensions::Dimension3_5) => {
// 1.44M 3.5" disk
Ok(StandardFormat::PcFloppy1440)
}
(b"DSED", DiskPhysicalDimensions::Dimension3_5) => {
// 2.88M 3.5" disk
Ok(StandardFormat::PcFloppy2880)
}
_ => Err(DiskImageError::UnsupportedFormat),
}
}
}
pub struct MfiFormat;
#[derive(Debug)]
#[binrw]
#[brw(little)]
pub struct MfiFileHeader {
pub id: [u8; 16],
pub cylinders: u32,
pub heads: u32,
pub form_factor: [u8; 4],
pub variant: [u8; 4],
}
impl MapDump for MfiFileHeader {
fn write_to_map(&self, map: &mut Box<dyn OptionalSourceMap>, parent: usize) -> usize {
let id_str = std::str::from_utf8(self.id.as_slice()).ok().unwrap_or("Invalid UTF-8");
let form_factor_str = std::str::from_utf8(self.form_factor.as_slice())
.ok()
.unwrap_or("Invalid UTF-8");
let variant_str = std::str::from_utf8(self.variant.as_slice())
.ok()
.unwrap_or("Invalid UTF-8");
#[rustfmt::skip]
let _info_node = map
.add_child(parent, "MFI File Header", SourceValue::default())
.add_child("ID", SourceValue::string(id_str))
.add_sibling("Cylinders", SourceValue::u32(self.cylinders))
.add_sibling("Heads", SourceValue::u32(self.heads))
.add_sibling("Form Factor", SourceValue::string(form_factor_str))
.add_sibling("Variant", SourceValue::string(variant_str));
parent
}
}
/// A [MfiTrackHeader] defines a track entry in an MFI image.
/// We pass `binrw` an index parameter so that an index can be prepended to the source map.
#[derive(Debug)]
#[binrw]
#[br(import(index: usize))]
#[brw(little)]
pub struct MfiTrackHeader {
#[bw(ignore)]
#[br(calc = index)]
pub index: usize,
pub offset: u32,
pub compressed_size: u32,
pub uncompressed_size: u32,
pub write_splice: u32,
}
impl MapDump for MfiTrackHeader {
fn write_to_map(&self, map: &mut Box<dyn OptionalSourceMap>, parent: usize) -> usize {
#[rustfmt::skip]
let _info_node = map
.add_child(parent, &format!("[{}] MFI Track Header", self.index), SourceValue::default())
.add_child("Offset", SourceValue::u32(self.offset))
.add_sibling("Compressed Size", SourceValue::u32(self.compressed_size))
.add_sibling("Uncompressed Size", SourceValue::u32(self.uncompressed_size))
.add_sibling("Write Splice", SourceValue::u32(self.write_splice));
parent
}
}
pub struct MfiTrackData {
pub ch: DiskCh,
pub data: Vec<u8>,
}
#[derive(Debug)]
pub enum FluxEntryType {
Flux = 0,
Nfa = 1,
Hole = 2,
End = 3,
}
#[allow(dead_code)]
pub struct MfiTrackZone {
start: u32,
end: u32,
}
impl MfiFormat {
#[allow(dead_code)]
fn format() -> DiskImageFileFormat {
DiskImageFileFormat::PceBitstreamImage
}
pub(crate) fn capabilities() -> FormatCaps {
bitstream_flags() | FormatCaps::CAP_COMMENT | FormatCaps::CAP_WEAK_BITS
}
pub(crate) fn platforms() -> Vec<Platform> {
Platform::iter().collect()
}
pub(crate) fn extensions() -> Vec<&'static str> {
vec!["mfi"]
}
pub(crate) fn detect<RWS: ReadSeek>(mut image: RWS) -> bool {
let mut detected = false;
_ = image.seek(std::io::SeekFrom::Start(0));
if let Ok(file_header) = MfiFileHeader::read_be(&mut image) {
if file_header.id[0..15] == *OLD_SIGNATURE || file_header.id[0..15] == *NEW_SIGNATURE {
detected = true;
}
}
detected
}
pub(crate) fn can_write(_image: Option<&DiskImage>) -> ParserWriteCompatibility {
ParserWriteCompatibility::UnsupportedFormat
}
pub(crate) fn load_image<RWS: ReadSeek>(
mut read_buf: RWS,
disk: &mut DiskImage,
_opts: &ParserReadOptions,
callback: Option<LoadingCallback>,
) -> Result<(), DiskImageError> {
if let Some(ref callback_fn) = callback {
// Let caller know to show a progress bar
callback_fn(LoadingStatus::ProgressSupport);
}
disk.set_source_format(DiskImageFileFormat::MameFloppyImage);
disk.assign_source_map(true);
let disk_len = read_buf.seek(std::io::SeekFrom::End(0))?;
// Seek to start of image.
read_buf.seek(std::io::SeekFrom::Start(0))?;
// Read in the file header, and check its signature.
let file_header = MfiFileHeader::read(&mut read_buf)?;
if file_header.id[0..15] != *NEW_SIGNATURE {
return if file_header.id[0..15] == *OLD_SIGNATURE {
log::error!(
"Old MFI format {:?} not implemented.",
std::str::from_utf8(&file_header.id[0..15]).unwrap()
);
Err(DiskImageError::UnsupportedFormat)
}
else {
log::error!("Invalid MFI file signature.");
Err(DiskImageError::UnsupportedFormat)
};
}
// Write header to source map
file_header.write_to_map(disk.source_map_mut(), 0);
let file_form_factor = DiskPhysicalDimensions::try_from(file_header.form_factor.as_slice()).ok();
if let Some(form_factor) = file_form_factor {
log::debug!("Got MFI file form factor: {:?}", form_factor);
}
else {
log::error!(
"Unknown or unsupported disk form factor: {:08X?}",
file_header.form_factor.as_slice()
);
return Err(DiskImageError::UnsupportedFormat);
}
if let Ok(standard_format) =
StandardFormat::try_from((file_header.variant.as_slice(), file_form_factor.unwrap()))
{
log::debug!("Got MFI file standard format: {:?}", standard_format);
}
else {
log::warn!(
"Unknown or unsupported disk variant: {:08X?}",
file_header.variant.as_slice()
);
// Unfortunately some versions of Applesauce < 2.0 failed to set this properly, so we
// have to deal with it and can't bail.
//return Err(DiskImageError::UnsupportedFormat);
}
let file_ch = DiskCh::from(((file_header.cylinders & CYLINDER_MASK) as u16, file_header.heads as u8));
let file_resolution = file_header.cylinders >> 30;
log::trace!("Got MFI file: ch: {} resolution: {}", file_ch, file_resolution);
// Create a vector to hold track headers. 84 * 2 represents the maximum track and head count.
let mut track_list: Vec<MfiTrackHeader> = Vec::with_capacity(84 * 2);
// Sanity check - we can't have 0 cylinders or heads.
if file_ch.c() == 0 || file_ch.h() == 0 {
log::error!("Invalid MFI file: cylinders or heads was 0");
return Err(DiskImageError::ImageCorruptError(
"Cylinders or heads was 0".to_string(),
));
}
// Read track header table into our track_list vector.
let mut last_offset: u32 = 0;
for (ti, ch) in file_ch.iter().enumerate() {
let track_header = MfiTrackHeader::read_args(&mut read_buf, (ti,))?;
log::trace!(
"Track {} at offset: {} compressed: {} uncompressed: {}",
ch,
track_header.offset,
track_header.compressed_size,
track_header.uncompressed_size
);
// Sanity check - we assume that tracks will be stored sequentially
if (track_header.compressed_size > 0) && (track_header.offset < last_offset) {
log::error!(
"Invalid MFI file: non-zero length track {} offset {} is less than last offset ({}).",
ch,
track_header.offset,
last_offset
);
return Err(DiskImageError::ImageCorruptError(
"Non-empty track offset less than last offset".to_string(),
));
}
// Sanity check - track offset must be less than file length.
if track_header.offset as u64 > disk_len {
log::error!(
"Invalid MFI file: track {} offset {} is greater than file length.",
ch,
track_header.offset
);
return Err(DiskImageError::ImageCorruptError(
"Track offset greater than file length".to_string(),
));
}
// Ignore offsets of 0 - they indicate empty tracks.
if track_header.offset != 0 {
last_offset = track_header.offset;
}
// Write track header info to source map, then add to the track list.
track_header.write_to_map(disk.source_map_mut(), 0);
track_list.push(track_header);
}
log::debug!("Got {} track entries.", track_list.len());
let mut tracks: Vec<MfiTrackData> = Vec::with_capacity(track_list.len());
let mut ch_cursor = DiskCh::new(0, 0);
for (ti, entry) in track_list.iter().enumerate() {
log::debug!(
"Track {} at offset: {} compressed: {} uncompressed: {}",
ti,
entry.offset,
entry.compressed_size,
entry.uncompressed_size
);
if entry.offset == 0 || entry.compressed_size == 0 || entry.uncompressed_size == 0 {
// All of the above are indicative of an empty/unformatted track.
// Push empty trackdata
tracks.push(MfiTrackData {
ch: ch_cursor,
data: Vec::new(),
});
}
else {
// Read in compressed track data.
let mut track_data = vec![0u8; entry.compressed_size as usize];
read_buf.seek(std::io::SeekFrom::Start(entry.offset as u64))?;
read_buf.read_exact(&mut track_data)?;
// Decompress track data.
let mut decompressed_data = vec![0u8; entry.uncompressed_size as usize];
let mut decompress = flate2::Decompress::new(true);
match decompress.decompress(&track_data, &mut decompressed_data, flate2::FlushDecompress::Finish) {
Ok(flate2::Status::Ok) | Ok(flate2::Status::StreamEnd) => {
log::debug!("Successfully decompressed track data for track {}", ch_cursor);
}
Ok(flate2::Status::BufError) => {
log::error!("Decompression buffer error reading track {} data.", ch_cursor);
return Err(DiskImageError::ImageCorruptError(format!(
"Decompression buffer error reading track {} data",
ch_cursor
)));
}
Err(e) => {
log::error!("Decompression error reading track data: {:?}", e);
return Err(DiskImageError::ImageCorruptError(format!(
"Decompression error reading track {} data: {:?}",
ch_cursor, e
)));
}
}
// Push uncompressed trackdata
tracks.push(MfiTrackData {
ch: ch_cursor,
data: decompressed_data.to_vec(),
});
}
// Advance ch
ch_cursor.seek_next_track(file_ch);
}
let mut disk_density = None;
let mut disk_rpm = None;
let total_tracks = tracks.len();
let mut last_data_rate = None;
let mut last_bitcell_ct = None;
for (ti, track) in tracks.iter().enumerate() {
let flux_track = Self::process_track_data_new(track, disk_rpm)?;
if flux_track.is_empty() {
if last_data_rate.is_none() || last_bitcell_ct.is_none() {
log::error!("Track 0 cannot be unformatted.");
return Err(DiskImageError::ImageCorruptError(
"Track 0 cannot be unformatted.".to_string(),
));
}
log::warn!(
"Flux track appears unformatted. Adding empty track of {:?} density",
disk_density
);
// TODO: Change this to add a FluxStream resolution track when we support adding
// unformatted fluxstream tracks.
disk.add_empty_track(
track.ch,
TrackDataEncoding::Mfm,
Some(TrackDataResolution::BitStream),
last_data_rate.unwrap(),
last_bitcell_ct.unwrap(),
Some(false),
)?;
}
else {
let params = FluxStreamTrackParams {
ch: track.ch,
schema: None,
encoding: None,
clock: None,
rpm: None,
};
let new_track = disk.add_track_fluxstream(flux_track, ¶ms)?;
let info = new_track.info();
log::debug!(
"Added {} track {} containing {} bits to image...",
track.ch,
info.encoding,
info.bit_length,
);
last_data_rate = Some(info.data_rate);
last_bitcell_ct = Some(info.bit_length);
if disk_rpm.is_none() {
// Set disk RPM to the first track's RPM.
log::debug!("Setting disk RPM to {:?}", info.rpm);
disk_rpm = info.rpm;
}
if disk_density.is_none() {
// Set disk density to the first track's density.
log::debug!("Setting disk density to {:?}", info.density);
disk_density = info.density;
}
if let Some(ref callback_fn) = callback {
let progress = ti as f64 / total_tracks as f64;
callback_fn(LoadingStatus::Progress(progress));
}
}
}
disk.descriptor = DiskDescriptor {
// MFI specifies disk geometry, but not platform.
platforms: None,
geometry: file_ch,
data_rate: disk_density.unwrap_or(TrackDensity::Double).into(),
density: disk_density.unwrap_or(TrackDensity::Double),
data_encoding: TrackDataEncoding::Mfm,
rpm: disk_rpm,
write_protect: Some(true),
};
Ok(())
}
pub fn process_track_data_new(
track: &MfiTrackData,
rpm_hint: Option<DiskRpm>,
) -> Result<FluxStreamTrack, DiskImageError> {
let mut fluxes = Vec::with_capacity(track.data.len() / 4);
let mut total_flux_time = 0.0;
let mut nfa_zones = Vec::new();
let mut hole_zones = Vec::new();
let mut flux_ct = 0;
let mut current_nfa_zone = None;
let mut current_hole_zone = None;
let mut track_rpm = rpm_hint;
for u32_bytes in track.data.chunks_exact(4) {
let flux_entry = u32::from_le_bytes(u32_bytes.try_into().unwrap());
let (flux_type, flux_delta) = MfiFormat::mfi_read_flux(flux_entry);
match flux_type {
FluxEntryType::Flux => {
// Process flux entry
let flux_delta_f64 = flux_delta as f64 * 1e-9;
//log::trace!("Flux entry: {} {}", flux_ct, format_us!(flux_delta_f64));
total_flux_time += flux_delta_f64;
fluxes.push(flux_delta_f64);
flux_ct += 1;
}
FluxEntryType::Nfa => {
// Process NFA entry
if current_nfa_zone.is_some() {
log::warn!("NFA entry found while already in NFA zone.");
if current_hole_zone.is_some() {
log::error!("HOLE entry found while already in NFA zone.");
}
}
else {
// Start NFA zone
current_nfa_zone = Some(MfiTrackZone {
start: flux_delta,
end: 0,
});
}
}
FluxEntryType::Hole => {
// Process hole entry
if current_hole_zone.is_some() {
log::warn!("HOLE entry found while already in HOLE zone.");
if current_nfa_zone.is_some() {
log::error!("NFA entry found while already in HOLE zone.");
}
}
else {
// Start HOLE zone
current_hole_zone = Some(MfiTrackZone {
start: flux_delta,
end: 0,
});
}
}
FluxEntryType::End => {
// End of zone
if current_nfa_zone.is_some() {
// End NFA zone
current_nfa_zone.as_mut().unwrap().end = flux_delta;
nfa_zones.push(current_nfa_zone.take().unwrap());
}
else if current_hole_zone.is_some() {
// End HOLE zone
current_hole_zone.as_mut().unwrap().end = flux_delta;
hole_zones.push(current_hole_zone.take().unwrap());
}
else {
log::warn!("END ZONE entry found without an active zone.");
}
}
}
}
// Normalize flux times. MFI technically stores flux times in angles, which is directly
// convertable to times at 300RPM, but will skew times at 360RPM.
if let Some((index_time, rpm)) = MfiFormat::normalize_flux_times(&mut fluxes, None) {
log::trace!("Normalized index time: {} and rpm: {}", format_ms!(index_time), rpm);
total_flux_time = index_time;
if track_rpm.is_none() {
track_rpm = Some(rpm);
}
}
log::trace!(
"Track {} has {} flux entries over {}, {} NFA zones, and {} HOLE zones.",
track.ch,
flux_ct,
format_ms!(total_flux_time),
nfa_zones.len(),
hole_zones.len()
);
// let mut pll = Pll::from_preset(PllPreset::Aggressive);
// pll.set_clock(1_000_000.0, None);
//let mut flux_track = FluxStreamTrack::new(1.0 / 2e-6);
let mut flux_track = FluxStreamTrack::new();
flux_track.add_revolution(track.ch, &fluxes, track_rpm.unwrap_or_default().index_time_ms());
//let flux_stream = flux_track.revolution_mut(0).unwrap();
//let rev_stats = flux_stream.decode_direct(&mut pll);
//let rev_encoding = flux_stream.encoding();
// let new_track = disk_image.add_track_fluxstream(track.ch, flux_track, None, None);
//
// let rev_density = match rev_stats.detect_density(true) {
// Some(d) => {
// log::debug!("Revolution {} density: {:?}", 0, d);
// d
// }
// None => {
// log::error!("Unable to detect track density!");
// //return Err(DiskImageError::IncompatibleImage);
// DiskDensity::Double
// }
// };
//
// flux_track.set_density(rev_density);
// flux_track.normalize();
Ok(flux_track)
}
/// Detect 360RPM flux streams and normalize the times, returning the normalized index time.
pub fn normalize_flux_times(fts: &mut [f64], known_rpm: Option<DiskRpm>) -> Option<(f64, DiskRpm)> {
if let Some(rpm) = known_rpm {
// If we're explicitly given an RPM value (detected from a previous track)
// we can skip the detection process.
return match rpm {
DiskRpm::Rpm300(_) => None,
DiskRpm::Rpm360(_) => Some((Self::adjust_flux_times(fts, 300.0 / 360.0), rpm)),
_ => {
log::warn!("MfiFormat::normalize_flux_times(): Unsupported RPM value: {:?}", rpm);
None
}
};
}
// Create a histogram of the flux times over the entire track.
let mut hist = FluxHistogram::new(fts, 0.02);
//hist.print_horizontal_histogram_with_labels(16);
// Retrieve the base transition time.
let base_opt = hist.base_transition_time();
if let Some(base) = base_opt {
let clock = base / 2.0;
// Try to detect the RPM from the clock skew.
let detected_rpm = (clock * 1e6) * 300.0;
log::debug!(
"MfiFormat::normalize_flux_times(): Detected clock: {} rpm: {:.2}",
format_us!(clock),
detected_rpm
);
if (340.0..380.00).contains(&detected_rpm) {
// Detected 360RPM
let normal_index_time = Self::adjust_flux_times(fts, 300.0 / 360.0);
Some((normal_index_time, DiskRpm::Rpm360(detected_rpm / 360.0)))
}
else if (280.0..320.00).contains(&detected_rpm) {
// Detected 300RPM
return None;
}
else {
log::warn!(
"MfiFormat::normalize_flux_times(): Detected RPM {} is out of range.",
detected_rpm
);
None
}
}
else {
None
}
}
pub fn adjust_flux_times(fts: &mut [f64], factor: f64) -> f64 {
let mut total_time = 0.0;
for ft in fts.iter_mut() {
*ft *= factor;
total_time += *ft;
}
total_time
}
pub fn mfi_read_flux(flux_entry: u32) -> (FluxEntryType, u32) {
let flux_type = match flux_entry >> 28 {
0 => FluxEntryType::Flux,
1 => FluxEntryType::Nfa,
2 => FluxEntryType::Hole,
3 => FluxEntryType::End,
_ => unreachable!(),
};
(flux_type, flux_entry & 0x3FFFFFFF)
}
pub fn save_image<RWS: ReadWriteSeek>(
_image: &DiskImage,
_opts: &ParserWriteOptions,
_output: &mut RWS,
) -> Result<(), DiskImageError> {
Err(DiskImageError::UnsupportedFormat)
}
}
| rust | MIT | b4c04b51746e5fe7769f49a1b32b8caad426fc81 | 2026-01-04T20:24:04.021295Z | false |
dbalsom/fluxfox | https://github.com/dbalsom/fluxfox/blob/b4c04b51746e5fe7769f49a1b32b8caad426fc81/src/file_parsers/raw.rs | src/file_parsers/raw.rs | /*
FluxFox
https://github.com/dbalsom/fluxfox
Copyright 2024-2025 Daniel Balsom
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the “Software”),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
--------------------------------------------------------------------------
*/
use std::cmp::Ordering;
use crate::{
detect::chs_from_raw_size,
diskimage::DiskImage,
file_parsers::{FormatCaps, ParserReadOptions, ParserWriteCompatibility, ParserWriteOptions},
io::{ReadSeek, ReadWriteSeek},
prelude::DiskChs,
track_schema::system34::System34Standard,
types::{
chs::{DiskChsn, DiskChsnQuery},
AddSectorParams,
DiskCh,
DiskDescriptor,
MetaSectorTrackParams,
Platform,
TrackDataResolution,
TrackDensity,
},
util::get_length,
DiskImageError,
DiskImageFileFormat,
LoadingCallback,
StandardFormat,
};
pub struct RawFormat;
impl RawFormat {
#[allow(dead_code)]
pub(crate) fn format() -> DiskImageFileFormat {
DiskImageFileFormat::RawSectorImage
}
pub(crate) fn extensions() -> Vec<&'static str> {
const BASE_EXTENSIONS: &[&str] = &["img", "ima", "dsk", "bin"];
#[allow(unused_mut)]
let mut extra_extensions = Vec::new();
#[cfg(feature = "adf")]
extra_extensions.push("adf");
#[cfg(feature = "st")]
extra_extensions.push("st");
[BASE_EXTENSIONS, &extra_extensions].concat()
}
pub(crate) fn platforms() -> Vec<Platform> {
vec![Platform::IbmPc, Platform::Amiga]
}
pub(crate) fn capabilities() -> FormatCaps {
FormatCaps::empty()
}
pub(crate) fn detect<RWS: ReadSeek>(mut image: RWS) -> bool {
let raw_len = get_length(&mut image).map_or(0, |l| l as usize);
chs_from_raw_size(raw_len).is_some()
}
pub(crate) fn can_write(image: Option<&DiskImage>) -> ParserWriteCompatibility {
image
.map(|image| {
if !image.analysis.image_caps.is_empty() {
// RAW sector images support no capability flags.
log::warn!("RAW sector images do not support capability flags.");
ParserWriteCompatibility::DataLoss
}
else {
ParserWriteCompatibility::Ok
}
})
.unwrap_or(ParserWriteCompatibility::Ok)
}
pub(crate) fn load_image<RWS: ReadSeek>(
mut raw: RWS,
disk_image: &mut DiskImage,
_opts: &ParserReadOptions,
_callback: Option<LoadingCallback>,
) -> Result<(), DiskImageError> {
disk_image.set_source_format(DiskImageFileFormat::RawSectorImage);
// Assign the disk geometry or return error.
let raw_len = get_length(&mut raw).map_err(|_e| DiskImageError::UnknownFormat)? as usize;
let floppy_format = match StandardFormat::try_from(raw_len) {
Ok(floppy_format) => {
log::trace!("Raw::load_image(): Detected format {}", floppy_format);
floppy_format
}
Err(e) => {
log::error!("Raw::load_image(): Error detecting format: {}", e);
return Err(DiskImageError::UnknownFormat);
}
};
let disk_chs = floppy_format.chs();
let track_size = disk_chs.s() as usize * floppy_format.sector_size();
let track_ct = raw_len / track_size;
if disk_chs.c() as usize * disk_chs.h() as usize != track_ct {
log::error!("Raw::load_image(): Calculated track count does not match standard image.");
return Err(DiskImageError::UnknownFormat);
}
let track_ct_overflow = raw_len % track_size;
if track_ct_overflow != 0 {
return Err(DiskImageError::UnknownFormat);
}
match Platform::from(floppy_format) {
Platform::Amiga => {
#[cfg(feature = "adf")]
{
log::warn!(
"Raw::load_image(): ADF will be loaded as MetaSector until Amiga formatting is implemented."
);
RawFormat::load_as_metasector(raw, disk_image, floppy_format, _opts, _callback)
}
#[cfg(not(feature = "adf"))]
{
log::error!("Raw::load_image(): Detected ADF raw image but `adf` feature not enabled.");
Err(DiskImageError::UnsupportedFormat)
}
}
Platform::IbmPc => RawFormat::load_as_bitstream(raw, disk_image, floppy_format, _opts, _callback),
_ => {
log::error!(
"Raw::load_image(): Unsupported format/platform: {}/{}",
floppy_format,
Platform::from(floppy_format)
);
Err(DiskImageError::UnsupportedFormat)
}
}
}
fn load_as_bitstream<RWS: ReadSeek>(
mut raw: RWS,
disk_image: &mut DiskImage,
floppy_format: StandardFormat,
_opts: &ParserReadOptions,
_callback: Option<LoadingCallback>,
) -> Result<(), DiskImageError> {
disk_image.set_resolution(TrackDataResolution::BitStream);
let layout = floppy_format.layout();
log::debug!("Raw::load_as_bitstream(): Disk geometry: {}", layout);
let data_rate = floppy_format.data_rate();
let data_encoding = floppy_format.encoding();
let bitcell_ct = floppy_format.bitcell_ct();
let rpm = floppy_format.rpm();
let gap3 = floppy_format.gap3();
raw.seek(std::io::SeekFrom::Start(0))?;
// Despite being a sector-based format, we convert to a bitstream based image by providing
// the raw sector data to each track's format function.
let mut sector_buffer = vec![0u8; floppy_format.sector_size()];
// Iterate through all standard tracks
for DiskCh { c, h } in layout.ch().iter() {
log::trace!("Raw::load_as_bitstream(): Adding new track: c:{} h:{}", c, h);
let new_track_idx = disk_image.add_empty_track(
DiskCh::new(c, h),
data_encoding,
Some(TrackDataResolution::BitStream),
data_rate,
bitcell_ct,
Some(false),
)?;
let mut format_buffer = Vec::with_capacity(layout.s() as usize);
let mut track_pattern = Vec::with_capacity(layout.size() * layout.s() as usize);
log::trace!("Raw::load_as_bitstream(): Formatting track with {} sectors", layout.s());
for s in 0..layout.s() {
let s_adj = s + layout.s_off();
let sector_chsn = DiskChsn::new(c, h, s_adj, layout.n());
raw.read_exact(&mut sector_buffer)?;
//log::warn!("Raw::load_image(): Sector data: {:X?}", sector_buffer);
track_pattern.extend(sector_buffer.clone());
format_buffer.push(sector_chsn);
}
let td = disk_image
.track_by_idx_mut(new_track_idx)
.ok_or(DiskImageError::FormatParseError)?;
//log::warn!("Raw::load_image(): Track pattern: {:X?}", track_pattern);
td.format(System34Standard::Ibm, format_buffer, &track_pattern, gap3)?;
}
disk_image.descriptor = DiskDescriptor {
// RAW sector images have no platform information and can be ambiguous.
// Let platform be auto-detected.
platforms: None,
geometry: layout.ch(),
data_rate,
data_encoding,
density: TrackDensity::from(data_rate),
rpm: Some(rpm),
write_protect: None,
};
Ok(())
}
fn load_as_metasector<RWS: ReadSeek>(
mut raw: RWS,
disk_image: &mut DiskImage,
floppy_format: StandardFormat,
_opts: &ParserReadOptions,
_callback: Option<LoadingCallback>,
) -> Result<(), DiskImageError> {
disk_image.set_resolution(TrackDataResolution::MetaSector);
let layout = floppy_format.layout();
log::trace!("Raw::load_as_metasector(): Disk Geometry: {}", layout);
let data_rate = floppy_format.data_rate();
let data_encoding = floppy_format.encoding();
let rpm = floppy_format.rpm();
let mut sector_buffer = vec![0u8; floppy_format.sector_size()];
// Seek to the beginning of image reader
raw.seek(std::io::SeekFrom::Start(0))?;
// Iterate through all sectors in the standard format
for ch in layout.ch_iter() {
log::trace!("Raw::load_as_metasector(): Adding new track: {}", ch);
let params = MetaSectorTrackParams {
ch,
encoding: data_encoding,
data_rate,
};
let new_track = disk_image.add_track_metasector(¶ms)?;
for s in 0..layout.s() {
let adj_s = s + layout.s_off();
log::trace!("Raw::load_as_metasector(): Adding sector {} to track", adj_s);
raw.read_exact(&mut sector_buffer)?;
let chs = DiskChs::from((ch, adj_s));
let sector_params = AddSectorParams {
id_chsn: DiskChsn::from((chs, floppy_format.layout().n())),
data: §or_buffer,
weak_mask: None,
hole_mask: None,
attributes: Default::default(),
alternate: false,
bit_index: None,
};
new_track.add_sector(§or_params)?;
}
}
disk_image.descriptor = DiskDescriptor {
platforms: None,
geometry: layout.ch(),
data_rate,
data_encoding,
density: TrackDensity::from(data_rate),
rpm: Some(rpm),
write_protect: None,
};
Ok(())
}
pub fn save_image<RWS: ReadWriteSeek>(
disk: &mut DiskImage,
_opts: &ParserWriteOptions,
output: &mut RWS,
) -> Result<(), DiskImageError> {
let format = disk.closest_format(true).ok_or(DiskImageError::UnsupportedFormat)?;
log::debug!("Raw::save_image(): Using format: {}", format);
// An IMG file basically represents DOS's view of a disk. Non-standard sectors may as well not
// exist. The same basically applies for ADF files as well.
// Write out the sectors in the standard order using DiskChsn::iter().
for chsn in format.layout().chsn_iter() {
match disk.read_sector_basic(chsn.ch(), DiskChsnQuery::from(chsn), None) {
Ok(read_buf) => {
log::trace!("Raw::save_image(): Read {} bytes from sector: {}", read_buf.len(), chsn);
let mut new_buf = read_buf.to_vec();
match new_buf.len().cmp(&chsn.n_size()) {
Ordering::Greater => {
log::warn!(
"Raw::save_image(): Sector {} is too large ({}). Truncating to {} bytes",
chsn,
new_buf.len(),
chsn.n_size()
);
new_buf.truncate(chsn.n_size());
}
Ordering::Less => {
log::warn!(
"Raw::save_image(): Sector {} is too small ({}). Padding with to {} bytes",
chsn,
new_buf.len(),
chsn.n_size()
);
new_buf.extend(vec![0u8; chsn.n_size() - new_buf.len()]);
}
Ordering::Equal => {}
}
log::trace!("Raw::save_image(): Writing sector to output: {}...", chsn);
//println!("Raw::save_image(): Writing chs: {}...", chs);
output.write_all(new_buf.as_ref())?;
}
Err(e) => {
log::error!("Raw::save_image(): Error reading sector {}: {}", chsn, e);
return Err(DiskImageError::DataError);
}
}
}
output.flush()?;
Ok(())
}
}
| rust | MIT | b4c04b51746e5fe7769f49a1b32b8caad426fc81 | 2026-01-04T20:24:04.021295Z | false |
dbalsom/fluxfox | https://github.com/dbalsom/fluxfox/blob/b4c04b51746e5fe7769f49a1b32b8caad426fc81/src/file_parsers/mod.rs | src/file_parsers/mod.rs | /*
FluxFox
https://github.com/dbalsom/fluxfox
Copyright 2024-2025 Daniel Balsom
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the “Software”),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
--------------------------------------------------------------------------
*/
pub mod r#as;
pub mod compression;
pub mod f86;
pub mod hfe;
pub mod imd;
#[cfg(feature = "ipf")]
pub mod ipf;
pub mod kryoflux;
#[cfg(feature = "mfi")]
pub mod mfi;
pub mod mfm;
pub mod pce;
pub mod raw;
pub mod scp;
pub mod tc;
#[cfg(feature = "td0")]
pub mod td0;
#[cfg(feature = "async")]
use std::sync::{Arc, Mutex};
#[cfg(feature = "moof")]
use r#as::moof;
#[cfg(feature = "woz")]
use r#as::woz;
use pce::{pfi, pri, psi};
use crate::{
io::{ReadSeek, ReadWriteSeek, SeekFrom},
types::Platform,
DiskImage,
DiskImageError,
DiskImageFileFormat,
LoadingCallback,
};
use bitflags::bitflags;
use strum::IntoEnumIterator;
#[allow(dead_code)]
#[derive(Clone, Debug, Default)]
pub struct ParserReadOptions {
platform: Option<Platform>, // If we know the platform, we can give it to the parser as a hint if the platform is otherwise ambiguous.
flags: ReadFlags,
}
#[allow(dead_code)]
#[derive(Clone, Debug, Default)]
pub struct ParserWriteOptions {
platform: Option<Platform>, // If we know the platform, we can give it to the parser as a hint if the platform is otherwise ambiguous.
}
bitflags! {
/// Bit flags representing reading options passed to a disk image file parser.
#[derive(Debug, Default, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
#[rustfmt::skip]
pub struct ReadFlags: u32 {
const ERRORS_TO_WEAK_BITS = 0b0000_0000_0000_0001; // Convert MFM errors to weak bits
const NFA_TO_WEAK_BITS = 0b0000_0000_0000_0010; // Convert NFA zones to weak bits
const DETECT_WEAK_BITS = 0b0000_0000_0000_0100; // Analyze multiple revolutions for weak bits (requires flux image)
const WEAK_BITS_TO_HOLES = 0b0000_0000_0000_1000; // Convert weak bits to holes
const CREATE_SOURCE_MAP = 0b0000_0000_0001_0000; // Generate a SourceMap for the image (not all parsers support)
}
}
bitflags! {
/// Bit flags representing writing options passed to a disk image file parser.
#[derive(Debug, Default, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
#[rustfmt::skip]
pub struct WriteFlags: u32 {
const REUSE_SOURCE_FLUX = 0b0000_0000_0000_0001; // Reuse existing flux data if track is unmodified
const RESOLVE_FLUX = 0b0000_0000_0000_0010; // Write a single revolution to a flux image
}
}
bitflags! {
/// Bit flags representing the capabilities of a specific image format. Used to determine if a
/// specific image format can represent a particular [DiskImage].
#[derive(Debug, Default, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
#[rustfmt::skip]
pub struct FormatCaps: u32 {
const CAP_VARIABLE_SPT = 0b0000_0000_0000_0001; // Can support variable sector counts per track
const CAP_VARIABLE_SSPT = 0b0000_0000_0000_0010; // Can support variable sector sizes
const CAP_ADDRESS_CRC = 0b0000_0000_0000_0100; // Encodes sector address mark CRC status
const CAP_DATA_CRC = 0b0000_0000_0000_1000; // Encodes sector data CRC status
const CAP_DATA_DELETED = 0b0000_0000_0001_0000; // Encodes 'Deleted address' marks
const CAP_SID_OVERRIDE = 0b0000_0000_0010_0000; // Can specify the sector ID parameters (chs, size) independent of sector order
const CAP_COMMENT = 0b0000_0000_0100_0000; // Can store a text comment field
const CAP_TRACK_ENCODING = 0b0000_0000_1000_0000; // Can store per-track encoding type
const CAP_TRACK_DATA_RATE = 0b0000_0001_0000_0000; // Can store per-track data rate
const CAP_WEAK_BITS = 0b0000_0010_0000_0000; // Can store weak bit information
const CAP_HOLES = 0b0000_0100_0000_0000; // Can store hole information
const CAP_ENCODING_FM = 0b0000_1000_0000_0000; // Can store FM encoding
const CAP_ENCODING_MFM = 0b0001_0000_0000_0000; // Can store MFM encoding
const CAP_ENCODING_GCR = 0b0010_0000_0000_0000; // Can store GCR encoding
const CAP_NO_DAM = 0b0100_0000_0000_0000; // Can store IDAM with no DAM
}
}
/// Return a set of FormatCaps flags implicitly supported by the nature of any bitstream format.
pub fn bitstream_flags() -> FormatCaps {
FormatCaps::CAP_VARIABLE_SPT
| FormatCaps::CAP_VARIABLE_SSPT
| FormatCaps::CAP_ADDRESS_CRC
| FormatCaps::CAP_DATA_CRC
| FormatCaps::CAP_DATA_DELETED
| FormatCaps::CAP_SID_OVERRIDE
| FormatCaps::CAP_NO_DAM
}
/// Describes the basic write compatibility of a [DiskImage] disk image as determined by a specific
/// file format parser.
/// - `Ok`: The image is compatible with the parser and can be read or written without data loss.
/// - `DataLoss`: The image is compatible with the parser, but some data may be lost when reading or
/// writing.
/// - `Incompatible`: The image is not compatible with the parser and cannot be written.
/// - `UnsupportedFormat`: The parser does not support writing.
#[derive(Copy, Clone, Debug, PartialEq)]
pub enum ParserWriteCompatibility {
Ok,
DataLoss,
Incompatible,
UnsupportedFormat,
}
/// Returns a list of advertised file extensions supported by available image format parsers.
/// This is a convenience function for use in file dialogs - internal image detection is not based
/// on file extension, but by image file content (and occasionally size, in the case of raw sector
/// images)
pub fn supported_extensions() -> Vec<&'static str> {
let mut ext_vec: Vec<&str> = DiskImageFileFormat::iter().flat_map(|f| f.extensions()).collect();
ext_vec.sort();
ext_vec.dedup();
ext_vec
}
/// Returns a DiskImageFormat enum variant based on the file extension provided. If the extension
/// is not recognized, None is returned.
pub fn format_from_ext(ext: &str) -> Option<DiskImageFileFormat> {
for format in DiskImageFileFormat::iter() {
if format.extensions().contains(&ext.to_lowercase().as_str()) {
return Some(format);
}
}
None
}
/// Returns a list of image formats and their associated file extensions that support the specified
/// capabilities.
pub fn formats_from_caps(caps: FormatCaps) -> Vec<(DiskImageFileFormat, Vec<String>)> {
// if caps.is_empty() {
// log::warn!("formats_from_caps(): called with empty capabilities");
// }
let format_vec = DiskImageFileFormat::iter()
.filter(|f| caps.is_empty() || f.capabilities().contains(caps))
.map(|f| (f, f.extensions().iter().map(|s| s.to_string()).collect()))
.collect();
format_vec
}
pub fn filter_writable(image: &DiskImage, formats: Vec<DiskImageFileFormat>) -> Vec<DiskImageFileFormat> {
formats
.into_iter()
.filter(|f| matches!(f.can_write(Some(image)), ParserWriteCompatibility::Ok))
.collect()
}
/// A trait interface for defining a disk image file format parser.
/// An [ImageFormatParser] should not be used directly - a disk image should be loaded using an [ImageLoader] struct.
pub trait ImageFormatParser {
/// Return the [DiskImageFileFormat] enum variant associated with the parser.
fn format(&self) -> DiskImageFileFormat;
/// Return the capability flags for this format.
fn capabilities(&self) -> FormatCaps;
/// Return a list of [Platform]s that are supported by the image format.
fn platforms(&self) -> Vec<Platform>;
/// Detect and return true if the image is of a format that the parser can read.
fn detect<RWS: ReadSeek>(&self, image_buf: RWS) -> bool;
/// Return a list of file extensions associated with the parser.
fn extensions(&self) -> Vec<&'static str>;
/// Load a disk image file into an empty [DiskImage], or append a disk image file to an
/// existing [DiskImage].
fn load_image<RWS: ReadSeek>(
&self,
read_buf: RWS,
image: &mut DiskImage,
opts: &ParserReadOptions,
callback: Option<LoadingCallback>,
) -> Result<(), DiskImageError>;
/// Load a disk image file into an empty [DiskImage], or append a disk image file to an
/// existing [DiskImage]. This function is async and should be used in async contexts.
#[cfg(feature = "async")]
#[allow(async_fn_in_trait)]
async fn load_image_async<RWS: ReadSeek + Send + 'static>(
&self,
read_buf: RWS,
image: Arc<Mutex<DiskImage>>,
opts: &ParserReadOptions,
callback: Option<LoadingCallback>,
) -> Result<(), DiskImageError>;
/// Determine if the parser can write an image back to its own format.
/// # Arguments
/// * `image` - An `Option<DiskImage>` either specifying the [DiskImage] to check for write
/// compatibility, or `None` if the parser should check for general write support.
fn can_write(&self, image: Option<&DiskImage>) -> ParserWriteCompatibility;
fn save_image<RWS: ReadWriteSeek>(
self,
image: &mut DiskImage,
opts: &ParserWriteOptions,
image_buf: &mut RWS,
) -> Result<(), DiskImageError>;
}
impl ImageFormatParser for DiskImageFileFormat {
fn format(&self) -> DiskImageFileFormat {
*self
}
fn capabilities(&self) -> FormatCaps {
match self {
DiskImageFileFormat::RawSectorImage => raw::RawFormat::capabilities(),
DiskImageFileFormat::ImageDisk => imd::ImdFormat::capabilities(),
#[cfg(feature = "td0")]
DiskImageFileFormat::TeleDisk => td0::Td0Format::capabilities(),
DiskImageFileFormat::PceSectorImage => psi::PsiFormat::capabilities(),
DiskImageFileFormat::PceBitstreamImage => pri::PriFormat::capabilities(),
DiskImageFileFormat::MfmBitstreamImage => mfm::MfmFormat::capabilities(),
DiskImageFileFormat::HfeImage => hfe::HfeFormat::capabilities(),
DiskImageFileFormat::F86Image => f86::F86Format::capabilities(),
DiskImageFileFormat::TransCopyImage => tc::TCFormat::capabilities(),
DiskImageFileFormat::SuperCardPro => scp::ScpFormat::capabilities(),
DiskImageFileFormat::PceFluxImage => pfi::PfiFormat::capabilities(),
DiskImageFileFormat::KryofluxStream => kryoflux::KfxFormat::capabilities(),
#[cfg(feature = "mfi")]
DiskImageFileFormat::MameFloppyImage => mfi::MfiFormat::capabilities(),
#[cfg(feature = "ipf")]
DiskImageFileFormat::IpfImage => ipf::IpFormat::capabilities(),
#[cfg(feature = "moof")]
DiskImageFileFormat::MoofImage => moof::MoofFormat::capabilities(),
#[cfg(feature = "woz")]
DiskImageFileFormat::WozImage => woz::WozFormat::capabilities(),
}
}
fn platforms(&self) -> Vec<Platform> {
match self {
DiskImageFileFormat::RawSectorImage => raw::RawFormat::platforms(),
DiskImageFileFormat::ImageDisk => imd::ImdFormat::platforms(),
#[cfg(feature = "td0")]
DiskImageFileFormat::TeleDisk => td0::Td0Format::platforms(),
DiskImageFileFormat::PceSectorImage => psi::PsiFormat::platforms(),
DiskImageFileFormat::PceBitstreamImage => pri::PriFormat::platforms(),
DiskImageFileFormat::MfmBitstreamImage => mfm::MfmFormat::platforms(),
DiskImageFileFormat::HfeImage => hfe::HfeFormat::platforms(),
DiskImageFileFormat::F86Image => f86::F86Format::platforms(),
DiskImageFileFormat::TransCopyImage => tc::TCFormat::platforms(),
DiskImageFileFormat::SuperCardPro => scp::ScpFormat::platforms(),
DiskImageFileFormat::PceFluxImage => pfi::PfiFormat::platforms(),
DiskImageFileFormat::KryofluxStream => kryoflux::KfxFormat::platforms(),
#[cfg(feature = "mfi")]
DiskImageFileFormat::MameFloppyImage => mfi::MfiFormat::platforms(),
#[cfg(feature = "ipf")]
DiskImageFileFormat::IpfImage => ipf::IpFormat::platforms(),
#[cfg(feature = "moof")]
DiskImageFileFormat::MoofImage => moof::MoofFormat::platforms(),
#[cfg(feature = "woz")]
DiskImageFileFormat::WozImage => woz::WozFormat::platforms(),
}
}
fn detect<RWS: ReadSeek>(&self, image_buf: RWS) -> bool {
match self {
DiskImageFileFormat::RawSectorImage => raw::RawFormat::detect(image_buf),
DiskImageFileFormat::ImageDisk => imd::ImdFormat::detect(image_buf),
#[cfg(feature = "td0")]
DiskImageFileFormat::TeleDisk => td0::Td0Format::detect(image_buf),
DiskImageFileFormat::PceSectorImage => psi::PsiFormat::detect(image_buf),
DiskImageFileFormat::PceBitstreamImage => pri::PriFormat::detect(image_buf),
DiskImageFileFormat::MfmBitstreamImage => mfm::MfmFormat::detect(image_buf),
DiskImageFileFormat::HfeImage => hfe::HfeFormat::detect(image_buf),
DiskImageFileFormat::F86Image => f86::F86Format::detect(image_buf),
DiskImageFileFormat::TransCopyImage => tc::TCFormat::detect(image_buf),
DiskImageFileFormat::SuperCardPro => scp::ScpFormat::detect(image_buf),
DiskImageFileFormat::PceFluxImage => pfi::PfiFormat::detect(image_buf),
DiskImageFileFormat::KryofluxStream => kryoflux::KfxFormat::detect(image_buf),
#[cfg(feature = "mfi")]
DiskImageFileFormat::MameFloppyImage => mfi::MfiFormat::detect(image_buf),
#[cfg(feature = "ipf")]
DiskImageFileFormat::IpfImage => ipf::IpFormat::detect(image_buf),
#[cfg(feature = "moof")]
DiskImageFileFormat::MoofImage => moof::MoofFormat::detect(image_buf),
#[cfg(feature = "woz")]
DiskImageFileFormat::WozImage => woz::WozFormat::detect(image_buf),
}
}
fn extensions(&self) -> Vec<&'static str> {
match self {
DiskImageFileFormat::RawSectorImage => raw::RawFormat::extensions(),
DiskImageFileFormat::ImageDisk => imd::ImdFormat::extensions(),
#[cfg(feature = "td0")]
DiskImageFileFormat::TeleDisk => td0::Td0Format::extensions(),
DiskImageFileFormat::PceSectorImage => psi::PsiFormat::extensions(),
DiskImageFileFormat::PceBitstreamImage => pri::PriFormat::extensions(),
DiskImageFileFormat::MfmBitstreamImage => mfm::MfmFormat::extensions(),
DiskImageFileFormat::HfeImage => hfe::HfeFormat::extensions(),
DiskImageFileFormat::F86Image => f86::F86Format::extensions(),
DiskImageFileFormat::TransCopyImage => tc::TCFormat::extensions(),
DiskImageFileFormat::SuperCardPro => scp::ScpFormat::extensions(),
DiskImageFileFormat::PceFluxImage => pfi::PfiFormat::extensions(),
DiskImageFileFormat::KryofluxStream => kryoflux::KfxFormat::extensions(),
#[cfg(feature = "mfi")]
DiskImageFileFormat::MameFloppyImage => mfi::MfiFormat::extensions(),
#[cfg(feature = "ipf")]
DiskImageFileFormat::IpfImage => ipf::IpFormat::extensions(),
#[cfg(feature = "moof")]
DiskImageFileFormat::MoofImage => moof::MoofFormat::extensions(),
#[cfg(feature = "woz")]
DiskImageFileFormat::WozImage => woz::WozFormat::extensions(),
}
}
fn load_image<RWS: ReadSeek>(
&self,
read_buf: RWS,
image: &mut DiskImage,
opts: &ParserReadOptions,
callback: Option<LoadingCallback>,
) -> Result<(), DiskImageError> {
match self {
DiskImageFileFormat::RawSectorImage => raw::RawFormat::load_image(read_buf, image, opts, callback),
DiskImageFileFormat::ImageDisk => imd::ImdFormat::load_image(read_buf, image, opts, callback),
#[cfg(feature = "td0")]
DiskImageFileFormat::TeleDisk => td0::Td0Format::load_image(read_buf, image, opts, callback),
DiskImageFileFormat::PceSectorImage => psi::PsiFormat::load_image(read_buf, image, opts, callback),
DiskImageFileFormat::PceBitstreamImage => pri::PriFormat::load_image(read_buf, image, opts, callback),
DiskImageFileFormat::MfmBitstreamImage => mfm::MfmFormat::load_image(read_buf, image, opts, callback),
DiskImageFileFormat::HfeImage => hfe::HfeFormat::load_image(read_buf, image, opts, callback),
DiskImageFileFormat::F86Image => f86::F86Format::load_image(read_buf, image, opts, callback),
DiskImageFileFormat::TransCopyImage => tc::TCFormat::load_image(read_buf, image, opts, callback),
DiskImageFileFormat::SuperCardPro => scp::ScpFormat::load_image(read_buf, image, opts, callback),
DiskImageFileFormat::PceFluxImage => pfi::PfiFormat::load_image(read_buf, image, opts, callback),
DiskImageFileFormat::KryofluxStream => kryoflux::KfxFormat::load_image(read_buf, image, opts, callback),
#[cfg(feature = "mfi")]
DiskImageFileFormat::MameFloppyImage => mfi::MfiFormat::load_image(read_buf, image, opts, callback),
#[cfg(feature = "ipf")]
DiskImageFileFormat::IpfImage => ipf::IpFormat::load_image(read_buf, image, opts, callback),
#[cfg(feature = "moof")]
DiskImageFileFormat::MoofImage => moof::MoofFormat::load_image(read_buf, image, opts, callback),
#[cfg(feature = "woz")]
DiskImageFileFormat::WozImage => woz::WozFormat::load_image(read_buf, image, opts, callback),
}
}
#[cfg(feature = "async")]
async fn load_image_async<RWS: ReadSeek + Send + 'static>(
&self,
read_buf: RWS,
image: Arc<Mutex<DiskImage>>,
opts: &ParserReadOptions,
callback: Option<LoadingCallback>,
) -> Result<(), DiskImageError> {
// For WASM, use `spawn_local` to run synchronously on the main thread
#[cfg(target_arch = "wasm32")]
{
let self_clone = self.clone();
let opts_clone = opts.clone();
let task = async move {
let mut img = image.lock().unwrap();
match self_clone.load_image(read_buf, &mut img, &opts_clone, callback) {
Ok(_) => (),
Err(e) => log::error!("Error loading image: {:?}", e),
}
};
wasm_bindgen_futures::spawn_local(task);
// RustRover gets confused about the conditional compilation here
#[allow(clippy::needless_return)]
return Ok(());
}
// For non-WASM, use `tokio::task::spawn_blocking` to avoid blocking the async runtime
#[cfg(feature = "tokio-async")]
{
let self_clone = self.clone();
let opts_clone = opts.clone();
tokio::task::spawn_blocking(move || {
let mut img = image.lock().unwrap();
self_clone.load_image(read_buf, &mut img, &opts_clone, callback)
})
.await
.map_err(|e| DiskImageError::IoError(e.to_string()))?
}
}
fn can_write(&self, image: Option<&DiskImage>) -> ParserWriteCompatibility {
match self {
DiskImageFileFormat::RawSectorImage => raw::RawFormat::can_write(image),
DiskImageFileFormat::ImageDisk => imd::ImdFormat::can_write(image),
#[cfg(feature = "td0")]
DiskImageFileFormat::TeleDisk => td0::Td0Format::can_write(image),
DiskImageFileFormat::PceSectorImage => psi::PsiFormat::can_write(image),
DiskImageFileFormat::PceBitstreamImage => pri::PriFormat::can_write(image),
DiskImageFileFormat::MfmBitstreamImage => mfm::MfmFormat::can_write(image),
DiskImageFileFormat::HfeImage => hfe::HfeFormat::can_write(image),
DiskImageFileFormat::F86Image => f86::F86Format::can_write(image),
DiskImageFileFormat::TransCopyImage => tc::TCFormat::can_write(image),
DiskImageFileFormat::SuperCardPro => scp::ScpFormat::can_write(image),
DiskImageFileFormat::PceFluxImage => pfi::PfiFormat::can_write(image),
DiskImageFileFormat::KryofluxStream => kryoflux::KfxFormat::can_write(image),
#[cfg(feature = "mfi")]
DiskImageFileFormat::MameFloppyImage => mfi::MfiFormat::can_write(image),
#[cfg(feature = "ipf")]
DiskImageFileFormat::IpfImage => ipf::IpFormat::can_write(image),
#[cfg(feature = "moof")]
DiskImageFileFormat::MoofImage => moof::MoofFormat::can_write(image),
#[cfg(feature = "woz")]
DiskImageFileFormat::WozImage => woz::WozFormat::can_write(image),
}
}
fn save_image<RWS: ReadWriteSeek>(
self,
image: &mut DiskImage,
opts: &ParserWriteOptions,
write_buf: &mut RWS,
) -> Result<(), DiskImageError> {
match self {
DiskImageFileFormat::RawSectorImage => raw::RawFormat::save_image(image, opts, write_buf),
DiskImageFileFormat::ImageDisk => imd::ImdFormat::save_image(image, opts, write_buf),
#[cfg(feature = "td0")]
DiskImageFileFormat::TeleDisk => td0::Td0Format::save_image(image, opts, write_buf),
DiskImageFileFormat::PceSectorImage => psi::PsiFormat::save_image(image, opts, write_buf),
DiskImageFileFormat::PceBitstreamImage => pri::PriFormat::save_image(image, opts, write_buf),
DiskImageFileFormat::MfmBitstreamImage => mfm::MfmFormat::save_image(image, opts, write_buf),
DiskImageFileFormat::HfeImage => hfe::HfeFormat::save_image(image, opts, write_buf),
DiskImageFileFormat::F86Image => f86::F86Format::save_image(image, opts, write_buf),
DiskImageFileFormat::TransCopyImage => tc::TCFormat::save_image(image, opts, write_buf),
DiskImageFileFormat::SuperCardPro => scp::ScpFormat::save_image(image, opts, write_buf),
DiskImageFileFormat::PceFluxImage => pfi::PfiFormat::save_image(image, opts, write_buf),
DiskImageFileFormat::KryofluxStream => kryoflux::KfxFormat::save_image(image, opts, write_buf),
#[cfg(feature = "mfi")]
DiskImageFileFormat::MameFloppyImage => mfi::MfiFormat::save_image(image, opts, write_buf),
#[cfg(feature = "ipf")]
DiskImageFileFormat::IpfImage => ipf::IpFormat::save_image(image, opts, write_buf),
#[cfg(feature = "moof")]
DiskImageFileFormat::MoofImage => moof::MoofFormat::save_image(image, opts, write_buf),
#[cfg(feature = "woz")]
DiskImageFileFormat::WozImage => woz::WozFormat::save_image(image, opts, write_buf),
}
}
}
// Helper function to retrieve the length of a reader
fn reader_len<R: ReadSeek>(reader: &mut R) -> Result<u64, DiskImageError> {
let pos = reader.seek(SeekFrom::Current(0))?;
let len = reader.seek(SeekFrom::End(0))?;
reader.seek(SeekFrom::Start(pos))?;
Ok(len)
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_format_from_ext_tc() {
let ext = "tc";
let expected_format = DiskImageFileFormat::TransCopyImage;
let result = format_from_ext(ext);
assert_eq!(result, Some(expected_format));
}
}
| rust | MIT | b4c04b51746e5fe7769f49a1b32b8caad426fc81 | 2026-01-04T20:24:04.021295Z | false |
dbalsom/fluxfox | https://github.com/dbalsom/fluxfox/blob/b4c04b51746e5fe7769f49a1b32b8caad426fc81/src/file_parsers/imd.rs | src/file_parsers/imd.rs | /*
FluxFox
https://github.com/dbalsom/fluxfox
Copyright 2024-2025 Daniel Balsom
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the “Software”),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
--------------------------------------------------------------------------
*/
use crate::{
file_parsers::{FormatCaps, ParserReadOptions, ParserWriteCompatibility, ParserWriteOptions},
io::{ReadSeek, ReadWriteSeek},
types::{
chs::{DiskCh, DiskChsn},
AddSectorParams,
DiskDescriptor,
MetaSectorTrackParams,
Platform,
SectorAttributes,
TrackDataEncoding,
TrackDataRate,
TrackDensity,
},
util::{get_length, read_ascii},
DiskImage,
DiskImageError,
DiskImageFileFormat,
FoxHashSet,
LoadingCallback,
ASCII_EOF,
};
use binrw::{binrw, BinRead, BinReaderExt};
use regex::Regex;
pub const IMD_HEADER_REX: &str = r"(?s)IMD (?<v_major>\d)\.(?<v_minor>\d{2}):\s+(?<day>\d{1,2})/(?<month>\d{2})/(?<year>\d{4})\s+(?<hh>\d{1,2}):(?<mm>\d{2}):(?<ss>\d{2})(?<comment>.*)?";
pub struct ImdFormat;
#[derive(Debug)]
#[binrw]
pub struct ImdTrack {
pub mode: u8,
c: u8,
h: u8,
sector_ct: u8,
sector_size: u8,
}
impl ImdTrack {
pub fn c(&self) -> u8 {
self.c
}
pub fn h(&self) -> u8 {
self.h & 0x0F
}
pub fn is_valid(&self) -> bool {
self.mode < 6 && (self.h & !0xC0) < 2 && self.sector_size < 7
}
pub fn has_head_map(&self) -> bool {
self.h & 0x40 != 0
}
pub fn has_cylinder_map(&self) -> bool {
self.h & 0x80 != 0
}
pub fn has_sector_size_map(&self) -> bool {
self.h == 0xFF
}
pub fn sector_size(&self) -> Option<usize> {
imd_sector_size_to_usize(self.sector_size)
}
}
fn imd_mode_to_rate(data_rate: u8) -> Option<(TrackDataRate, TrackDataEncoding)> {
match data_rate {
0 => Some((TrackDataRate::Rate500Kbps(1.0), TrackDataEncoding::Fm)),
1 => Some((TrackDataRate::Rate300Kbps(1.0), TrackDataEncoding::Fm)),
2 => Some((TrackDataRate::Rate250Kbps(1.0), TrackDataEncoding::Fm)),
3 => Some((TrackDataRate::Rate500Kbps(1.0), TrackDataEncoding::Mfm)),
4 => Some((TrackDataRate::Rate300Kbps(1.0), TrackDataEncoding::Mfm)),
5 => Some((TrackDataRate::Rate250Kbps(1.0), TrackDataEncoding::Mfm)),
_ => None,
}
}
fn imd_sector_size_to_usize(sector_size: u8) -> Option<usize> {
match sector_size {
0 => Some(128),
1 => Some(256),
2 => Some(512),
3 => Some(1024),
4 => Some(2048),
5 => Some(4096),
6 => Some(8192),
_ => None,
}
}
pub struct ImdSectorData {
data: Vec<u8>,
deleted: bool,
error: bool,
}
impl ImdFormat {
#[allow(dead_code)]
fn format() -> DiskImageFileFormat {
DiskImageFileFormat::ImageDisk
}
pub(crate) fn capabilities() -> FormatCaps {
FormatCaps::empty()
}
pub fn platforms() -> Vec<Platform> {
// As far as I know, IMD files were only intended for the PC.
vec![Platform::IbmPc]
}
pub(crate) fn extensions() -> Vec<&'static str> {
vec!["imd"]
}
pub(crate) fn detect<RWS: ReadSeek>(mut image: RWS) -> bool {
let _raw_len = get_length(&mut image).map_or(0, |l| l as usize);
let mut detected = false;
_ = image.seek(std::io::SeekFrom::Start(0));
//log::debug!("Detecting IMD header...");
if let (Some(header_str), _) = read_ascii(&mut image, Some(ASCII_EOF), None) {
//log::debug!("Detected header: {}", &header_str);
if let Some(_caps) = Regex::new(IMD_HEADER_REX).unwrap().captures(&header_str) {
detected = true;
}
}
detected
}
pub(crate) fn can_write(_image: Option<&DiskImage>) -> ParserWriteCompatibility {
ParserWriteCompatibility::UnsupportedFormat
}
pub(crate) fn load_image<RWS: ReadSeek>(
mut read_buf: RWS,
disk_image: &mut DiskImage,
_opts: &ParserReadOptions,
_callback: Option<LoadingCallback>,
) -> Result<(), DiskImageError> {
disk_image.set_source_format(DiskImageFileFormat::ImageDisk);
// Assign the disk geometry or return error.
let _raw_len = get_length(&mut read_buf).map_err(|_e| DiskImageError::UnknownFormat)? as usize;
_ = read_buf.seek(std::io::SeekFrom::Start(0));
if let (Some(header_str), terminator) = read_ascii(&mut read_buf, Some(ASCII_EOF), None) {
if let Some(caps) = Regex::new(IMD_HEADER_REX).unwrap().captures(&header_str) {
let v_major = &caps["v_major"];
let v_minor = &caps["v_minor"];
let comment_match = caps.name("comment");
let comment = comment_match.map(|c| c.as_str().to_string());
log::trace!(
"load_image(): Detected IMD header version: {}.{} terminator: {:02X}, comment: {}",
v_major,
v_minor,
terminator,
&comment.clone().unwrap_or("None".to_string())
);
if let Some(comment) = comment {
if !comment.is_empty() {
log::trace!("load_image(): Setting comment metadata: {}", &comment);
disk_image.set_metadata_key("comment", &comment);
}
}
}
}
let mut header_offset = read_buf.stream_position()?;
let mut heads_seen: FoxHashSet<u8> = FoxHashSet::new();
let mut rate_opt = None;
let mut encoding_opt = None;
let mut track_ct = 0;
while let Ok(track_header) = ImdTrack::read_le(&mut read_buf) {
log::trace!("from_image: Track header: {:?} @ {:X}", &track_header, header_offset);
log::trace!("from_image: Track header valid: {}", &track_header.is_valid());
if !track_header.is_valid() {
log::error!("from_image: Invalid track header at offset {:X}", header_offset);
return Err(DiskImageError::FormatParseError);
}
log::trace!(
"from_image: Track has cylinder map: {} head map: {}",
&track_header.has_cylinder_map(),
&track_header.has_head_map()
);
//let sector_size = imd_sector_size_to_usize(track_header.sector_size).unwrap();
let mut sector_numbers = vec![0; track_header.sector_ct as usize];
let mut cylinder_map = vec![track_header.c(); track_header.sector_ct as usize];
let mut head_map = vec![track_header.h(); track_header.sector_ct as usize];
//let default_n = track_header.sector_size;
let default_sector_size = track_header.sector_size();
if default_sector_size.is_none() {
return Err(DiskImageError::FormatParseError);
}
// Sector size map is in words; so double the bytes.
let mut sector_size_map_u8: Vec<u8> = vec![0, track_header.sector_ct * 2];
let mut sector_size_map: Vec<u16> =
vec![default_sector_size.unwrap() as u16; track_header.sector_ct as usize];
// Keep a set of heads seen.
heads_seen.insert(track_header.h());
read_buf.read_exact(&mut sector_numbers)?;
if track_header.has_cylinder_map() {
read_buf.read_exact(&mut cylinder_map)?;
}
if track_header.has_head_map() {
read_buf.read_exact(&mut head_map)?;
}
// Note: This is listed as a 'proposed extension' in the IMD docs but apparently there
// are images like this in the wild. 86box supports this extension.
if track_header.has_sector_size_map() {
read_buf.read_exact(&mut sector_size_map_u8)?;
// Convert raw u8 to u16 values, little-endian.
for (i, s) in sector_size_map_u8.chunks_exact(2).enumerate() {
sector_size_map[i] = u16::from_le_bytes([s[0], s[1]]);
}
}
log::trace!(
"from_image: Track sector numbers: {:?} Cyl map: {:?} Head map: {:?}",
§or_numbers,
&cylinder_map,
&head_map
);
// Add track to read_buf.
let (data_rate, data_encoding) = match imd_mode_to_rate(track_header.mode) {
Some((rate, encoding)) => (rate, encoding),
None => return Err(DiskImageError::FormatParseError),
};
if rate_opt.is_none() {
rate_opt = Some(data_rate);
}
if encoding_opt.is_none() {
encoding_opt = Some(data_encoding);
}
log::trace!("Adding track: C: {} H: {}", track_header.c, track_header.h);
let params = MetaSectorTrackParams {
ch: DiskCh::from((track_header.c() as u16, track_header.h())),
encoding: data_encoding,
data_rate,
};
let new_track = disk_image.add_track_metasector(¶ms)?;
// Read all sectors for this track.
for s in 0..sector_numbers.len() {
// Read data byte marker.
let data_marker: u8 = read_buf.read_le()?;
let sector_size = sector_size_map[s] as usize;
let sector_n = DiskChsn::bytes_to_n(sector_size);
match data_marker {
0x00..=0x08 => {
let data = ImdFormat::read_data(data_marker, sector_size, &mut read_buf)?;
log::trace!(
"from_image: Sector {}: Data Marker: {:02X} Data ({}): {:02X?} Deleted: {} Error: {}",
s + 1,
data_marker,
&data.data.len(),
&data.data[0..16],
&data.deleted,
&data.error
);
// Add this sector to track.
let params = AddSectorParams {
id_chsn: DiskChsn::new(cylinder_map[s] as u16, head_map[s], sector_numbers[s], sector_n),
data: &data.data,
weak_mask: None,
hole_mask: None,
attributes: SectorAttributes {
address_error: false,
data_error: data.error,
deleted_mark: data.deleted,
no_dam: false,
},
alternate: false,
bit_index: None,
};
new_track.add_sector(¶ms)?;
}
_ => {
return Err(DiskImageError::FormatParseError);
}
}
}
header_offset = read_buf.stream_position()?;
if track_header.sector_ct == 0 {
continue;
}
track_ct += 1;
}
let head_ct = heads_seen.len() as u8;
disk_image.descriptor = DiskDescriptor {
// IMD was PC-specific.
platforms: Some(vec![Platform::IbmPc]),
geometry: DiskCh::from((track_ct as u16 / head_ct as u16, head_ct)),
data_rate: rate_opt.unwrap(),
data_encoding: encoding_opt.unwrap(),
density: TrackDensity::from(rate_opt.unwrap()),
rpm: None,
write_protect: None,
};
Ok(())
}
fn read_data<RWS: ReadSeek>(
data_marker: u8,
sector_size: usize,
read_buf: &mut RWS,
) -> Result<ImdSectorData, DiskImageError> {
match data_marker {
0x00 => {
// Sector data unavailable.
Ok(ImdSectorData {
data: Vec::new(),
deleted: false,
error: false,
})
}
0x01 => {
// Normal data - sector_size bytes follow.
let mut data = vec![0; sector_size];
read_buf.read_exact(&mut data)?;
Ok(ImdSectorData {
data,
deleted: false,
error: false,
})
}
0x02 => {
// Compressed data: A single byte follows, repeated sector_size times.
let data_byte = read_buf.read_le()?;
let data = vec![data_byte; sector_size];
Ok(ImdSectorData {
data,
deleted: false,
error: false,
})
}
0x03 => {
// Normal data with 'deleted' address-mark.
let mut data = vec![0; sector_size];
read_buf.read_exact(&mut data)?;
Ok(ImdSectorData {
data,
deleted: true,
error: false,
})
}
0x04 => {
// Compressed data with 'deleted' address-mark.
// A single byte follows, repeated sector_size times.
let data_byte = read_buf.read_le()?;
let data = vec![data_byte; sector_size];
Ok(ImdSectorData {
data,
deleted: true,
error: false,
})
}
0x05 => {
// Normal data with 'error' indicator.
let mut data = vec![0; sector_size];
read_buf.read_exact(&mut data)?;
Ok(ImdSectorData {
data,
deleted: false,
error: true,
})
}
0x06 => {
// Compressed data with 'error' indicator.
let data_byte = read_buf.read_le()?;
let data = vec![data_byte; sector_size];
Ok(ImdSectorData {
data,
deleted: false,
error: true,
})
}
_ => Err(DiskImageError::FormatParseError),
}
}
pub fn save_image<RWS: ReadWriteSeek>(
_image: &DiskImage,
_opts: &ParserWriteOptions,
_output: &mut RWS,
) -> Result<(), DiskImageError> {
Err(DiskImageError::UnsupportedFormat)
}
}
| rust | MIT | b4c04b51746e5fe7769f49a1b32b8caad426fc81 | 2026-01-04T20:24:04.021295Z | false |
dbalsom/fluxfox | https://github.com/dbalsom/fluxfox/blob/b4c04b51746e5fe7769f49a1b32b8caad426fc81/src/file_parsers/f86.rs | src/file_parsers/f86.rs | /*
FluxFox
https://github.com/dbalsom/fluxfox
Copyright 2024-2025 Daniel Balsom
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the “Software”),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
--------------------------------------------------------------------------
src/parsers/f86.rs
A parser for the 86f disk image format. (F is prepended due to inability to
start identifiers with numbers in Rust.)
86f format images are an internal bitstream-level format used by the 86Box emulator.
*/
use std::mem::size_of;
use crate::{
file_parsers::{bitstream_flags, FormatCaps, ParserReadOptions, ParserWriteCompatibility, ParserWriteOptions},
io::{ReadSeek, ReadWriteSeek},
source_map::{MapDump, OptionalSourceMap, SourceValue},
track::bitstream::BitStreamTrack,
track_schema::TrackSchema,
types::{
BitStreamTrackParams,
DiskCh,
DiskDescriptor,
DiskImageFlags,
DiskRpm,
Platform,
TrackDataEncoding,
TrackDataRate,
TrackDataResolution,
TrackDensity,
},
DiskImage,
DiskImageError,
DiskImageFileFormat,
LoadingCallback,
};
use binrw::{binrw, BinRead, BinWrite};
use bitflags::bitflags;
pub const F86_TRACK_TABLE_LEN_PER_HEAD: usize = 256;
pub const F86_TRACK_SIZE_BYTES: usize = 25000;
bitflags! {
#[derive (Default, Debug)]
pub struct F86DiskFlags: u16 {
const HAS_SURFACE_DESC = 0b0000_0001;
const HOLE_MASK = 0b0000_0110;
const TWO_SIDES = 0b0000_1000;
const WRITE_PROTECT = 0b0001_0000;
const RPM_SLOWDOWN = 0b0110_0000;
const BITCELL_MODE = 0b1000_0000;
const TYPE = 0b0000_0001_0000_0000;
const REVERSE_ENDIAN = 0b0000_1000_0000_0000;
const SPEEDUP_FLAG = 0b0001_0000_0000_0000;
}
}
impl F86DiskFlags {
/// Returns an iterator over the names of the set flags.
fn iter_set_flags(&self) -> Vec<String> {
format!("{:?}", self)
.split_once('(') // Split at the first '('
.and_then(|(_, rest)| rest.split_once(')')) // Split at the first ')'
.map(|(inside, _)| inside) // Take the part inside the parentheses
.unwrap_or("") // Fallback to an empty string
.split('|') // Split by '|'
.map(str::trim) // Trim each flag name
.map(String::from) // Convert to String
.collect()
}
}
pub const F86_DISK_HAS_SURFACE_DESC: u16 = 0b0000_0001;
pub const F86_DISK_HOLE_MASK: u16 = 0b0000_0110;
pub const F86_DISK_SIDES: u16 = 0b0000_1000;
pub const F86_DISK_WRITE_PROTECT: u16 = 0b0001_0000;
pub const F86_DISK_RPM_SLOWDOWN: u16 = 0b0110_0000;
pub const F86_DISK_BITCELL_MODE: u16 = 0b1000_0000;
//pub const F86_DISK_TYPE: u16 = 0b0000_0001_0000_0000;
//pub const F86_DISK_REVERSE_ENDIAN: u16 = 0b0000_1000_0000_0000;
pub const F86_DISK_SPEEDUP_FLAG: u16 = 0b0001_0000_0000_0000;
#[derive(Debug)]
#[binrw]
#[brw(little)]
struct FileHeader {
id: [u8; 4], // “86BF”
minor_version: u8, // 0C (12)
major_version: u8, // 02 (2) -> 2.12
#[br(map = |flags: u16| F86DiskFlags::from_bits_truncate(flags))]
#[bw(map = |flags: &F86DiskFlags| flags.bits())]
flags: F86DiskFlags,
}
impl MapDump for FileHeader {
fn write_to_map(&self, map: &mut Box<dyn OptionalSourceMap>, parent: usize) -> usize {
let header_str = String::from_utf8_lossy(&self.id).to_string();
#[rustfmt::skip]
let mut flags = map
.add_child(parent, "File Header", SourceValue::default())
.add_child("id", SourceValue::string(&header_str))
.add_sibling("minor_version", SourceValue::u8(self.minor_version))
.add_sibling("major_version", SourceValue::u8(self.major_version))
.add_sibling("flags", SourceValue::hex_u16(self.flags.bits()))
.add_sibling("flags set", SourceValue::default());
for (i, flag) in self.flags.iter_set_flags().iter().enumerate() {
if i == 0 {
flags = flags.add_child(flag, SourceValue::default());
}
else {
flags = flags.add_sibling(flag, SourceValue::default());
}
}
parent
}
}
impl Default for FileHeader {
fn default() -> Self {
Self {
id: *b"86BF",
minor_version: 0x0C,
major_version: 0x02,
flags: F86DiskFlags::default(),
}
}
}
#[derive(Debug)]
#[binrw]
#[br(import(index: usize))]
#[brw(little)]
struct TrackHeader {
#[bw(ignore)]
#[br(calc = index)]
index: usize,
flags: u16,
index_hole: u32,
}
impl MapDump for TrackHeader {
fn write_to_map(&self, map: &mut Box<dyn OptionalSourceMap>, parent: usize) -> usize {
map.add_child(
parent,
&format!("[{}] Track Header", self.index),
SourceValue::default(),
)
.add_child("flags", SourceValue::hex_u16(self.flags))
.add_sibling("index_hole", SourceValue::u32(self.index_hole));
parent
}
}
#[derive(Debug)]
#[binrw]
#[br(import(index: usize))]
#[brw(little)]
struct TrackHeaderBitCells {
#[bw(ignore)]
#[br(calc = index)]
index: usize,
flags: u16,
bit_cells: i32,
index_hole: u32,
}
impl MapDump for TrackHeaderBitCells {
fn write_to_map(&self, map: &mut Box<dyn OptionalSourceMap>, parent: usize) -> usize {
map.add_child(
parent,
&format!("[{}] Track Header (Bitcells)", self.index),
SourceValue::default(),
)
.add_child("flags", SourceValue::hex_u16(self.flags))
.add_sibling("bit_cells", SourceValue::u32(self.bit_cells as u32))
.add_sibling("index_hole", SourceValue::u32(self.index_hole));
parent
}
}
#[allow(clippy::enum_variant_names)]
#[derive(Debug, Clone, Copy)]
enum F86TimeShift {
ZeroPercent,
SlowOnePercent,
SlowOneAndAHalfPercent,
SlowTwoPercent,
FastOnePercent,
FastOneAndAHalfPercent,
FastTwoPercent,
}
impl F86TimeShift {
pub(crate) fn adjust(&self, value: f64) -> f64 {
value
* match self {
F86TimeShift::ZeroPercent => 1.0,
F86TimeShift::SlowOnePercent => 1.01,
F86TimeShift::SlowOneAndAHalfPercent => 1.01,
F86TimeShift::SlowTwoPercent => 1.02,
F86TimeShift::FastOnePercent => 1.0 / 1.01,
F86TimeShift::FastOneAndAHalfPercent => 1.0 / 1.015,
F86TimeShift::FastTwoPercent => 1.0 / 1.02,
}
}
}
#[derive(Debug)]
enum F86Endian {
Little,
Big,
}
#[derive(Debug)]
enum F86Density {
Double,
High,
Extended,
ExtendedPlus,
}
impl F86Density {
fn track_length_words(&self, time_shift: F86TimeShift) -> usize {
match self {
F86Density::Double | F86Density::High => match time_shift {
F86TimeShift::SlowTwoPercent => 12750,
F86TimeShift::SlowOneAndAHalfPercent => 12687,
F86TimeShift::SlowOnePercent => 12625,
F86TimeShift::ZeroPercent => 12500,
F86TimeShift::FastOnePercent => 12376,
F86TimeShift::FastOneAndAHalfPercent => 12315,
F86TimeShift::FastTwoPercent => 12254,
},
F86Density::Extended => match time_shift {
F86TimeShift::SlowTwoPercent => 25250,
F86TimeShift::SlowOneAndAHalfPercent => 25375,
F86TimeShift::SlowOnePercent => 25250,
F86TimeShift::ZeroPercent => 24752,
F86TimeShift::FastOnePercent => 24630,
F86TimeShift::FastOneAndAHalfPercent => 12315,
F86TimeShift::FastTwoPercent => 12254,
},
F86Density::ExtendedPlus => match time_shift {
F86TimeShift::SlowTwoPercent => 51000,
F86TimeShift::SlowOneAndAHalfPercent => 50750,
F86TimeShift::SlowOnePercent => 50500,
F86TimeShift::ZeroPercent => 50000,
F86TimeShift::FastOnePercent => 49504,
F86TimeShift::FastOneAndAHalfPercent => 49261,
F86TimeShift::FastTwoPercent => 49019,
},
}
}
}
fn f86_disk_time_shift(flags: u16) -> F86TimeShift {
match ((flags & F86_DISK_RPM_SLOWDOWN) >> 5, flags & F86_DISK_SPEEDUP_FLAG != 0) {
(0b00, _) => F86TimeShift::ZeroPercent,
(0b01, false) => F86TimeShift::SlowOnePercent,
(0b10, false) => F86TimeShift::SlowOneAndAHalfPercent,
(0b11, false) => F86TimeShift::SlowTwoPercent,
(0b01, true) => F86TimeShift::FastOnePercent,
(0b10, true) => F86TimeShift::FastOneAndAHalfPercent,
(0b11, true) => F86TimeShift::FastTwoPercent,
_ => unreachable!(),
}
}
fn f86_disk_density(flags: u16) -> F86Density {
match (flags & F86_DISK_HOLE_MASK) >> 1 {
0b00 => F86Density::Double,
0b01 => F86Density::High,
0b10 => F86Density::Extended,
0b11 => F86Density::ExtendedPlus,
_ => unreachable!(),
}
}
fn f86_track_data_rate(flags: u16) -> Option<TrackDataRate> {
match flags & 0x07 {
0b000 => Some(TrackDataRate::Rate500Kbps(1.0)),
0b001 => Some(TrackDataRate::Rate300Kbps(1.0)),
0b010 => Some(TrackDataRate::Rate250Kbps(1.0)),
0b011 => Some(TrackDataRate::Rate1000Kbps(1.0)),
_ => None,
}
}
fn f86_track_encoding(flags: u16) -> Option<TrackDataEncoding> {
match (flags >> 3) & 0x03 {
0b00 => Some(TrackDataEncoding::Fm),
0b01 => Some(TrackDataEncoding::Mfm),
0b11 => Some(TrackDataEncoding::Gcr),
_ => None,
}
}
fn f86_track_rpm(flags: u16) -> Option<DiskRpm> {
match (flags >> 5) & 0x07 {
0b000 => Some(DiskRpm::Rpm300(1.0)),
0b001 => Some(DiskRpm::Rpm360(1.0)),
_ => None,
}
}
fn f86_weak_to_weak(bit_data: &mut [u8], weak_data: &[u8]) {
for (byte, &weak_byte) in bit_data.iter_mut().zip(weak_data.iter()) {
*byte |= weak_byte;
}
}
fn f86_weak_to_holes(bit_data: &mut [u8], weak_data: &[u8]) {
for (byte, &weak_byte) in bit_data.iter_mut().zip(weak_data.iter()) {
*byte &= !weak_byte;
}
}
/// Equivalent of 86Box's `common_get_raw_size()` function.
fn f86_track_bit_length(
encoding: TrackDataEncoding,
data_rate: TrackDataRate,
rpm: DiskRpm,
time_shift: F86TimeShift,
extra_bitcells: i32,
) -> usize {
let mut size = 100000.0;
let mut rate = u32::from(data_rate) as f64 / 1000.0;
if matches!(encoding, TrackDataEncoding::Fm) {
rate /= 2.0;
}
size = (size / 250.0) * rate;
size = (size * 300.0) / f64::from(rpm);
size = time_shift.adjust(size);
//log::debug!("f86_track_bit_length: rate: {}, rpm: {} size: {}", rate, rpm, size);
(size as usize).saturating_add_signed(extra_bitcells as isize)
}
pub struct F86Format {}
impl F86Format {
pub fn extensions() -> Vec<&'static str> {
vec!["86f"]
}
pub fn capabilities() -> FormatCaps {
bitstream_flags()
}
pub fn platforms() -> Vec<Platform> {
// 86f can in theory support other platforms, but since 86Box is a PC-only emulator, we'll
// stick with that.
vec![Platform::IbmPc]
}
pub fn detect<RWS: ReadSeek>(mut image: RWS) -> bool {
if image.seek(std::io::SeekFrom::Start(0)).is_err() {
return false;
}
let header = if let Ok(header) = FileHeader::read(&mut image) {
header
}
else {
return false;
};
header.id == "86BF".as_bytes() && header.minor_version == 0x0C && header.major_version == 0x02
}
pub fn can_write(image: Option<&DiskImage>) -> ParserWriteCompatibility {
image
.map(|image| {
if (image.resolution.len() > 1) || !image.resolution.contains(&TrackDataResolution::BitStream) {
// 86f images can't store multiple resolutions, and must store bitstream data
ParserWriteCompatibility::Incompatible
}
else {
// 86f images can encode about everything we can store for a bitstream format
ParserWriteCompatibility::Ok
}
})
.unwrap_or(ParserWriteCompatibility::Ok)
}
pub(crate) fn load_image<RWS: ReadSeek>(
mut read_buf: RWS,
disk_image: &mut DiskImage,
_opts: &ParserReadOptions,
_callback: Option<LoadingCallback>,
) -> Result<(), DiskImageError> {
disk_image.set_source_format(DiskImageFileFormat::F86Image);
disk_image.assign_source_map(true);
// Seek to the start of the image and read the 86F header.
read_buf.seek(std::io::SeekFrom::Start(0))?;
let header = FileHeader::read(&mut read_buf)?;
let has_surface_desc = header.flags.contains(F86DiskFlags::HAS_SURFACE_DESC);
if has_surface_desc {
log::trace!("Image has surface description.");
}
// Write the header to the source map.
header.write_to_map(disk_image.source_map_mut(), 0);
log::debug!(
"bitcell flags: {},{},{},{}",
header.flags.bits() >> 12 & 0x01,
header.flags.bits() >> 7 & 0x01,
header.flags.bits() >> 6 & 0x01,
header.flags.bits() >> 5 & 0x01
);
let hole = f86_disk_density(header.flags.bits());
let disk_sides = if header.flags.contains(F86DiskFlags::TWO_SIDES) {
2
}
else {
1
};
let (image_data_rate, image_density) = match hole {
F86Density::Double => (TrackDataRate::Rate250Kbps(1.0), TrackDensity::Double),
F86Density::High => (TrackDataRate::Rate500Kbps(1.0), TrackDensity::High),
F86Density::Extended | F86Density::ExtendedPlus => {
log::error!("Extended density images not supported.");
return Err(DiskImageError::UnsupportedFormat);
}
};
log::trace!("Image data rate: {:?} density: {:?}", image_data_rate, image_density);
if header.flags.contains(F86DiskFlags::TYPE) {
log::error!("Images with Zoned RPM unsupported.");
return Err(DiskImageError::UnsupportedFormat);
}
let extra_bitcell_mode = header.flags.contains(F86DiskFlags::BITCELL_MODE);
let disk_data_endian = if header.flags.contains(F86DiskFlags::REVERSE_ENDIAN) {
F86Endian::Big
}
else {
F86Endian::Little
};
if matches!(disk_data_endian, F86Endian::Big) {
log::warn!("Big-endian 86f images are not supported.");
return Err(DiskImageError::UnsupportedFormat);
}
/* if extra_bitcell_mode {
log::warn!("Extra bitcell mode not implemented.");
return Err(DiskImageError::UnsupportedFormat);
}*/
let time_shift = f86_disk_time_shift(header.flags.bits());
log::debug!("Time shift: {:?}", time_shift);
let absolute_bitcell_count = if matches!(time_shift, F86TimeShift::ZeroPercent)
&& (header.flags.contains(F86DiskFlags::SPEEDUP_FLAG))
&& extra_bitcell_mode
{
log::trace!("Extra bitcell count is an absolute count.");
true
}
else {
false
};
// A table of track offsets immediately follows the header. We can calculate the number of
// tracks from the offset of the first track - the header size, giving us the number of
// offsets in the table.
let mut track_offsets: Vec<(u32, usize)> = Vec::new();
let mut first_offset_buf = [0u8; 4];
read_buf.read_exact(&mut first_offset_buf)?;
let first_offset = u32::from_le_bytes(first_offset_buf);
let num_tracks = (first_offset as usize - size_of::<FileHeader>()) / 4;
log::trace!("Track offset table has {} entries", num_tracks);
let mut cursor = disk_image
.source_map_mut()
.add_child(0, "Track Offsets", SourceValue::default())
.add_child("[0] Track Offset", SourceValue::u32(first_offset));
track_offsets.push((first_offset, 0));
// Read the rest of the track offsets now that we know how many there are
for i in 1..num_tracks {
let mut offset_buf = [0u8; 4];
read_buf.read_exact(&mut offset_buf)?;
let offset = u32::from_le_bytes(offset_buf);
cursor = cursor.add_sibling(&format!("[{}] Track Offset", i), SourceValue::u32(offset));
if offset == 0 {
break;
}
// Adjust size of previous track offset
if let Some((prev_offset, prev_size)) = track_offsets.last_mut() {
log::trace!("Track offset: {} - {}", *prev_offset, offset);
*prev_size = (offset - *prev_offset) as usize;
}
track_offsets.push((offset, 0));
}
// Patch up the size of the last track
if let Some((prev_offset, prev_size)) = track_offsets.last_mut() {
let stream_len = read_buf.seek(std::io::SeekFrom::End(0))?;
*prev_size = (stream_len - *prev_offset as u64) as usize;
}
log::trace!("Read {} track offsets from table.", track_offsets.len());
let mut head_n = 0;
let mut cylinder_n = 0;
let mut disk_rpm: Option<DiskRpm> = None;
for (ti, (track_offset, track_entry_len)) in track_offsets.into_iter().enumerate() {
read_buf.seek(std::io::SeekFrom::Start(track_offset as u64))?;
let (track_flags, extra_bitcells, index_pos) = match extra_bitcell_mode {
true => {
let track_header = TrackHeaderBitCells::read_args(&mut read_buf, (ti,))?;
track_header.write_to_map(disk_image.source_map_mut(), 0);
log::trace!("Read track header with extra bitcells: {:?}", track_header);
(
track_header.flags,
Some(track_header.bit_cells),
track_header.index_hole,
)
}
false => {
let track_header = TrackHeader::read_args(&mut read_buf, (ti,))?;
track_header.write_to_map(disk_image.source_map_mut(), 0);
log::trace!("Read track header: {:?}", track_header);
(track_header.flags, None, track_header.index_hole)
}
};
log::debug!("Index position: {}", index_pos);
let track_rpm = match f86_track_rpm(track_flags) {
Some(rpm) => rpm,
None => {
log::error!("Unsupported RPM: {:04X}", track_flags);
return Err(DiskImageError::UnsupportedFormat);
}
};
if disk_rpm.is_none() {
disk_rpm = Some(track_rpm);
}
else if disk_rpm != Some(track_rpm) {
log::error!("Inconsistent RPMs in disk read_buf.");
return Err(DiskImageError::UnsupportedFormat);
}
let track_encoding = match f86_track_encoding(track_flags) {
Some(enc) => enc,
None => {
log::error!("Unsupported data encoding: {:04X}", track_flags);
return Err(DiskImageError::UnsupportedFormat);
}
};
let track_data_rate = match f86_track_data_rate(track_flags) {
Some(rate) => rate,
None => {
log::error!("Unsupported data rate: {:04X}", track_flags);
return Err(DiskImageError::UnsupportedFormat);
}
};
// Read the track data
let raw_track_size = track_entry_len
- match extra_bitcell_mode {
true => 10, //size_of::<TrackHeaderBitCells>(),
false => 6, //size_of::<TrackHeader>(),
};
if raw_track_size & 0x01 != 0 {
log::error!("Invalid 86f: Track data size is not word-aligned.");
return Err(DiskImageError::ImageCorruptError(
"Track data size is not word-aligned".to_string(),
));
}
let raw_track_data_size = if has_surface_desc {
log::debug!("Track has surface description, halving data size.");
raw_track_size / 2
}
else {
raw_track_size
};
log::debug!(
"Track raw data size: {} ({} words) Extra bitcells: {}",
raw_track_data_size,
raw_track_data_size / 2,
extra_bitcells.unwrap_or(0)
);
let mut read_length_bytes = raw_track_data_size;
// Calculate the expected track length in words from the density and time shift.
let mut read_length_expected_words = hole.track_length_words(time_shift);
// Adjust track length in words for extra bitcells.
let adjusted_read_length_words = if let Some(bitcells) = extra_bitcells {
let track_bitcells = (read_length_expected_words * 16).saturating_add_signed(bitcells as isize);
if track_bitcells % 16 != 0 {
(track_bitcells / 16) + 1
}
else {
track_bitcells / 16
}
}
else {
read_length_expected_words
};
log::debug!(
"Base track word length: {} Adjusted track word length: {}",
read_length_expected_words,
adjusted_read_length_words,
);
read_length_expected_words = adjusted_read_length_words;
let bitcell_ct = if absolute_bitcell_count {
// An absolute bitcell count overrides the calculated track length.
if let Some(absolute_count) = extra_bitcells {
let absolute_data_len =
((absolute_count / 8) + if (absolute_count % 8) != 0 { 1 } else { 0 }) as usize;
log::trace!(
"Absolute bitcell count ({}) specifies: {} bytes. Raw data length is: {}",
absolute_count,
absolute_data_len,
raw_track_data_size
);
if absolute_data_len > raw_track_data_size {
log::error!(
"Data length calculated from absolute bitcell count is greater than track data length: {} > {}",
absolute_data_len,
raw_track_data_size
);
return Err(DiskImageError::ImageCorruptError(
"Absolute bitcell count exceeds track data length.".to_string(),
));
}
read_length_bytes = absolute_data_len;
absolute_count as usize
}
else {
log::error!("Absolute bitcell count flag set, but no count provided.");
return Err(DiskImageError::ImageCorruptError(
"Absolute bitcell count flag set, but no count provided.".to_string(),
));
}
}
else {
#[allow(clippy::comparison_chain)]
if raw_track_data_size < read_length_expected_words * 2 {
log::error!(
"Track data length is less than expected: {} < {}",
read_length_bytes,
read_length_expected_words * 2
);
return Err(DiskImageError::ImageCorruptError(
"Track data length is less than expected.".to_string(),
));
}
else if raw_track_data_size > read_length_expected_words * 2 {
log::warn!(
"Track data length is greater than expected: {} > {}",
read_length_bytes,
read_length_expected_words * 2
);
// We'll truncate the data to the expected length.
read_length_bytes = read_length_expected_words * 2;
}
// Calculate the bitcell count from track parameters. It may be less than the
// track data length, especially for DD images.
let calculated_bitcell_ct = f86_track_bit_length(
track_encoding,
track_data_rate,
track_rpm,
time_shift,
extra_bitcells.unwrap_or(0),
);
log::debug!(
"Calculated bitcell count: {} Track data length: {} bits",
calculated_bitcell_ct,
read_length_bytes * 16
);
calculated_bitcell_ct
};
log::debug!(
"Data read length: {} ({} words)",
read_length_bytes,
read_length_bytes / 2
);
let track_data_vec = {
let mut track_data = vec![0u8; read_length_bytes];
read_buf.read_exact(&mut track_data)?;
track_data
};
let surface_data_vec = if has_surface_desc {
let mut surface_data = vec![0u8; read_length_bytes];
read_buf.read_exact(&mut surface_data)?;
Some(surface_data)
}
else {
None
};
let mut track_weak_vec = vec![0u8; read_length_bytes];
let mut track_hole_vec = vec![0u8; read_length_bytes];
let (track_weak_opt, track_hole_opt) = if let Some(surface_data) = surface_data_vec {
let mut have_weak = false;
let mut have_hole = false;
for (((weak, hole), surface), data) in track_weak_vec
.iter_mut()
.zip(track_hole_vec.iter_mut())
.zip(surface_data.iter())
.zip(track_data_vec.iter())
{
// Weak bits are set when surface bit is 1 and data bit is 1.
*weak = *surface & *data;
// Hole bits are set when surface bit is 1 and data bit is 0.
*hole = *surface & !*data;
have_weak |= *weak != 0;
have_hole |= *hole != 0;
}
let weak_opt = have_weak.then_some(track_weak_vec.as_slice());
let hole_opt = have_hole.then_some(track_hole_vec.as_slice());
(weak_opt, hole_opt)
}
else {
(None, None)
};
log::debug!(
"Adding {:?} encoded track: {}",
track_encoding,
DiskCh::from((cylinder_n, head_n))
);
let params = BitStreamTrackParams {
schema: Some(TrackSchema::System34),
encoding: track_encoding,
data_rate: track_data_rate,
rpm: disk_rpm,
ch: DiskCh::from((cylinder_n, head_n)),
bitcell_ct: Some(bitcell_ct),
data: &track_data_vec,
weak: track_weak_opt,
hole: track_hole_opt,
detect_weak: false,
};
disk_image.add_track_bitstream(¶ms)?;
head_n += 1;
if head_n == disk_sides {
cylinder_n += 1;
head_n = 0;
}
}
disk_image.descriptor = DiskDescriptor {
// 86box is mostly a PC emulator, although it does support other formats...
// We'll stick with PC for now.
platforms: Some(vec![Platform::IbmPc]),
geometry: DiskCh::from((cylinder_n, disk_sides)),
data_rate: Default::default(),
data_encoding: TrackDataEncoding::Mfm,
density: image_density,
rpm: disk_rpm,
write_protect: Some(header.flags.contains(F86DiskFlags::WRITE_PROTECT)),
};
Ok(())
}
/// Write a disk read_buf in 86F format.
/// We always emit 86f images with absolute bitcell counts - this is easier to handle.
/// Without specifying an absolute bitcell count, there is a formula to use to calculate the
/// number of words to write per track. Due to the variety of formats we import, we cannot
/// guarantee a specific bitcell length.
///
/// When writing track data, the size must be rounded to the nearest word (2 bytes).
pub fn save_image<RWS: ReadWriteSeek>(
image: &DiskImage,
_opts: &ParserWriteOptions,
output: &mut RWS,
) -> Result<(), DiskImageError> {
if Self::can_write(Some(&image)) == ParserWriteCompatibility::Incompatible {
log::error!("Incompatible image format.");
return Err(DiskImageError::UnsupportedFormat);
}
log::trace!("Saving 86f image...");
let mut disk_flags = 0;
let mut has_surface_description = false;
let has_weak_bits = image.has_weak_bits();
if has_weak_bits {
// We'll need to include a surface descriptor.
log::trace!("Image has weak/hole bits.");
has_surface_description = true;
disk_flags |= F86_DISK_HAS_SURFACE_DESC;
}
else {
log::trace!("Image has no weak/hole bits.");
}
disk_flags |= match image.descriptor.density {
TrackDensity::Double => 0,
TrackDensity::High => 0b01 << 1,
TrackDensity::Extended => 0b10 << 1,
_ => {
log::error!("Unsupported disk density: {:?}", image.descriptor.density);
return Err(DiskImageError::UnsupportedFormat);
}
};
disk_flags |= match image.descriptor.geometry.h() {
1 => 0,
2 => F86_DISK_SIDES,
_ => {
log::error!("Unsupported number of heads: {}", image.descriptor.geometry.h());
return Err(DiskImageError::UnsupportedFormat);
}
};
// We don't support the RPM slowdown feature.
// We always want to specify an absolute bitcell count, so set bits 7 and 12.
let use_absolute_bit_count = true;
disk_flags |= F86_DISK_BITCELL_MODE;
disk_flags |= F86_DISK_SPEEDUP_FLAG;
if image.descriptor.write_protect.unwrap_or(false) {
disk_flags |= F86_DISK_WRITE_PROTECT;
}
let f86_header = FileHeader {
flags: F86DiskFlags::from_bits(disk_flags).unwrap(),
..Default::default()
};
// Write header to output.
output.seek(std::io::SeekFrom::Start(0))?;
f86_header.write(output)?;
log::trace!("Image geometry: {}", image.descriptor.geometry);
| rust | MIT | b4c04b51746e5fe7769f49a1b32b8caad426fc81 | 2026-01-04T20:24:04.021295Z | true |
dbalsom/fluxfox | https://github.com/dbalsom/fluxfox/blob/b4c04b51746e5fe7769f49a1b32b8caad426fc81/src/file_parsers/hfe.rs | src/file_parsers/hfe.rs | /*
FluxFox
https://github.com/dbalsom/fluxfox
Copyright 2024-2025 Daniel Balsom
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the “Software”),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
--------------------------------------------------------------------------
src/parsers/hfe.rs
A parser for the HFEv1 disk image format.
HFE format images are an internal bitstream-level format used by the HxC disk emulator.
*/
use crate::{
file_parsers::{FormatCaps, ParserReadOptions, ParserWriteCompatibility, ParserWriteOptions},
io::{ReadSeek, ReadWriteSeek},
source_map::{MapDump, OptionalSourceMap, SourceValue},
types::{BitStreamTrackParams, DiskCh, DiskDescriptor, Platform, TrackDataEncoding, TrackDataRate, TrackDensity},
DiskImage,
DiskImageError,
DiskImageFileFormat,
LoadingCallback,
};
use binrw::{binrw, BinRead};
use strum::IntoEnumIterator;
const fn reverse_bits(mut byte: u8) -> u8 {
//byte = (byte >> 4) | (byte << 4);
byte = byte.rotate_left(4);
byte = ((byte & 0x33) << 2) | ((byte & 0xCC) >> 2);
byte = ((byte & 0x55) << 1) | ((byte & 0xAA) >> 1);
byte
}
const fn generate_reverse_table() -> [u8; 256] {
let mut table = [0; 256];
let mut i = 0;
while i < 256 {
table[i] = reverse_bits(i as u8);
i += 1;
}
table
}
const REVERSE_TABLE: [u8; 256] = generate_reverse_table();
pub const HFE_TRACK_OFFSET_BLOCK: u64 = 0x200;
#[repr(u8)]
#[derive(Copy, Clone, Debug)]
pub enum HfeFloppyInterface {
IbmPcDd = 0x00,
IbmPcHd = 0x01,
AtariStDd = 0x02,
AtariStHd = 0x03,
AmigaDd = 0x04,
AmigaHd = 0x05,
CpcDd = 0x06,
GenericShugartDd = 0x07,
IbmPcEd = 0x08,
Msx2Dd = 0x09,
C64Dd = 0x0A,
EmuShugart = 0x0B,
S950Dd = 0x0C,
S950Hd = 0x0D,
Disable = 0xFE,
Unknown = 0xFF,
}
impl From<u8> for HfeFloppyInterface {
fn from(value: u8) -> Self {
match value {
0x00 => HfeFloppyInterface::IbmPcDd,
0x01 => HfeFloppyInterface::IbmPcHd,
0x02 => HfeFloppyInterface::AtariStDd,
0x03 => HfeFloppyInterface::AtariStHd,
0x04 => HfeFloppyInterface::AmigaDd,
0x05 => HfeFloppyInterface::AmigaHd,
0x06 => HfeFloppyInterface::CpcDd,
0x07 => HfeFloppyInterface::GenericShugartDd,
0x08 => HfeFloppyInterface::IbmPcEd,
0x09 => HfeFloppyInterface::Msx2Dd,
0x0A => HfeFloppyInterface::C64Dd,
0x0B => HfeFloppyInterface::EmuShugart,
0x0C => HfeFloppyInterface::S950Dd,
0x0D => HfeFloppyInterface::S950Hd,
0xFE => HfeFloppyInterface::Disable,
_ => HfeFloppyInterface::Unknown,
}
}
}
impl From<(Platform, TrackDensity)> for HfeFloppyInterface {
fn from(value: (Platform, TrackDensity)) -> Self {
match value {
(Platform::IbmPc, TrackDensity::Double) => HfeFloppyInterface::IbmPcDd,
(Platform::IbmPc, TrackDensity::High) => HfeFloppyInterface::IbmPcHd,
(Platform::IbmPc, TrackDensity::Extended) => HfeFloppyInterface::IbmPcEd,
(Platform::Amiga, TrackDensity::Double) => HfeFloppyInterface::AmigaDd,
(Platform::Amiga, TrackDensity::High) => HfeFloppyInterface::AmigaHd,
_ => HfeFloppyInterface::Unknown,
}
}
}
impl TryFrom<HfeFloppyInterface> for Platform {
type Error = ();
fn try_from(value: HfeFloppyInterface) -> Result<Self, Self::Error> {
match value {
HfeFloppyInterface::IbmPcDd => Ok(Platform::IbmPc),
HfeFloppyInterface::IbmPcHd => Ok(Platform::IbmPc),
HfeFloppyInterface::AtariStDd => Err(()),
HfeFloppyInterface::AtariStHd => Err(()),
HfeFloppyInterface::AmigaDd => Ok(Platform::Amiga),
HfeFloppyInterface::AmigaHd => Ok(Platform::Amiga),
HfeFloppyInterface::CpcDd => Err(()),
HfeFloppyInterface::GenericShugartDd => Err(()),
HfeFloppyInterface::IbmPcEd => Ok(Platform::IbmPc),
HfeFloppyInterface::Msx2Dd => Err(()),
HfeFloppyInterface::C64Dd => Err(()),
HfeFloppyInterface::EmuShugart => Err(()),
HfeFloppyInterface::S950Dd => Err(()),
HfeFloppyInterface::S950Hd => Err(()),
HfeFloppyInterface::Disable => Err(()),
HfeFloppyInterface::Unknown => Err(()),
}
}
}
impl From<HfeFloppyInterface> for TrackDensity {
fn from(value: HfeFloppyInterface) -> Self {
match value {
HfeFloppyInterface::IbmPcDd => TrackDensity::Double,
HfeFloppyInterface::IbmPcHd => TrackDensity::High,
HfeFloppyInterface::AtariStDd => TrackDensity::Double,
HfeFloppyInterface::AtariStHd => TrackDensity::High,
HfeFloppyInterface::AmigaDd => TrackDensity::Double,
HfeFloppyInterface::AmigaHd => TrackDensity::High,
HfeFloppyInterface::CpcDd => TrackDensity::Double,
HfeFloppyInterface::GenericShugartDd => TrackDensity::Double,
HfeFloppyInterface::IbmPcEd => TrackDensity::Extended,
HfeFloppyInterface::Msx2Dd => TrackDensity::Double,
HfeFloppyInterface::C64Dd => TrackDensity::Double,
HfeFloppyInterface::EmuShugart => TrackDensity::Double,
HfeFloppyInterface::S950Dd => TrackDensity::Double,
HfeFloppyInterface::S950Hd => TrackDensity::High,
HfeFloppyInterface::Disable => TrackDensity::Double,
HfeFloppyInterface::Unknown => TrackDensity::Double,
}
}
}
#[repr(u8)]
#[derive(Copy, Clone, Debug)]
pub enum HfeFloppyEncoding {
IsoIbmMfm = 0x00,
AmigaMfm = 0x01,
IsoIbmFm = 0x02,
EmuFm = 0x03,
Unknown = 0xFF,
}
impl From<u8> for HfeFloppyEncoding {
fn from(value: u8) -> Self {
match value {
0x00 => HfeFloppyEncoding::IsoIbmMfm,
0x01 => HfeFloppyEncoding::AmigaMfm,
0x02 => HfeFloppyEncoding::IsoIbmFm,
0x03 => HfeFloppyEncoding::EmuFm,
_ => HfeFloppyEncoding::Unknown,
}
}
}
pub struct HfeFormat {}
#[derive(Debug)]
#[binrw]
#[brw(little)]
struct HfeFileHeader {
signature: [u8; 8], // “HXCPICFE”
format_revision: u8, // Revision 0
number_of_tracks: u8, // Number of track in the file
number_of_sides: u8, // Number of valid side (Not used by the emulator)
track_encoding: u8, // Track Encoding mode
// (Used for the write support - Please see the list above)
bit_rate: u16, // Bitrate in Kbit/s. Ex : 250=250000bits/s
// Max value : 500
rpm: u16, // Rotation per minute (Not used by the emulator)
interface_mode: u8, // Floppy interface mode. (Please see the list above.)
unused: u8, // Reserved
track_list_offset: u16, // Offset of the track list LUT in block of 512 bytes
// (Ex: 1=0x200)
write_allowed: u8, // The Floppy image is write protected ?
// v1.1 addition – Set them to 0xFF if unused.
single_step: u8, // 0xFF : Single Step – 0x00 Double Step mode
track0s0_alt_encoding: u8, // 0x00 : Use an alternate track_encoding for track 0 Side 0
track0s0_encoding: u8, // alternate track_encoding for track 0 Side 0
track0s1_alt_encoding: u8, // 0x00 : Use an alternate track_encoding for track 0 Side 1
track0s1_encoding: u8, // alternate track_encoding for track 0 Side 1
}
impl MapDump for HfeFileHeader {
fn write_to_map(&self, map: &mut Box<dyn OptionalSourceMap>, parent: usize) -> usize {
let signature_str = String::from_utf8_lossy(&self.signature).to_string();
#[rustfmt::skip]
map.add_child(parent, "HFE File Header", SourceValue::default())
.add_child("signature", SourceValue::string(&signature_str))
.add_sibling("format_revision", SourceValue::u8(self.format_revision))
.add_sibling("number_of_tracks", SourceValue::u8(self.number_of_tracks))
.add_sibling("number_of_sides", SourceValue::u8(self.number_of_sides))
.add_sibling("track_encoding", SourceValue::u8(self.track_encoding))
.add_sibling("bit_rate", SourceValue::u16(self.bit_rate))
.add_sibling("rpm", SourceValue::u16(self.rpm))
.add_sibling("interface_mode", SourceValue::u8(self.interface_mode))
.add_sibling("unused", SourceValue::u8(self.unused))
.add_sibling("track_list_offset", SourceValue::u16(self.track_list_offset))
.add_sibling("write_allowed", SourceValue::u8(self.write_allowed))
.add_sibling("single_step", SourceValue::u8(self.single_step))
.add_sibling("track0s0_alt_encoding", SourceValue::u8(self.track0s0_alt_encoding))
.add_sibling("track0s0_encoding", SourceValue::u8(self.track0s0_encoding))
.add_sibling("track0s1_alt_encoding", SourceValue::u8(self.track0s1_alt_encoding))
.add_sibling("track0s1_encoding", SourceValue::u8(self.track0s1_encoding));
parent
}
}
#[derive(Debug)]
#[binrw]
#[br(import(index: usize))]
#[brw(little)]
struct HfeTrackIndexEntry {
#[bw(ignore)]
#[br(calc = index)]
index: usize,
offset: u16,
len: u16,
}
impl MapDump for HfeTrackIndexEntry {
fn write_to_map(&self, map: &mut Box<dyn OptionalSourceMap>, parent: usize) -> usize {
#[rustfmt::skip]
map.add_child(parent,&format!("[{}] Track Index Entry", self.index), SourceValue::default())
.add_child("offset", SourceValue::u16(self.offset))
.add_sibling("len", SourceValue::u16(self.len));
parent
}
}
impl HfeFormat {
#[allow(dead_code)]
fn format() -> DiskImageFileFormat {
DiskImageFileFormat::PceBitstreamImage
}
pub(crate) fn capabilities() -> FormatCaps {
FormatCaps::empty()
}
pub(crate) fn extensions() -> Vec<&'static str> {
vec!["hfe"]
}
pub(crate) fn platforms() -> Vec<Platform> {
// HFE images support a wide variety of platforms
Platform::iter().collect()
}
pub(crate) fn detect<RWS: ReadSeek>(mut image: RWS) -> bool {
let mut detected = false;
_ = image.seek(std::io::SeekFrom::Start(0));
if let Ok(file_header) = HfeFileHeader::read(&mut image) {
if file_header.signature == "HXCPICFE".as_bytes() {
detected = true;
}
}
detected
}
pub(crate) fn can_write(_image: Option<&DiskImage>) -> ParserWriteCompatibility {
ParserWriteCompatibility::UnsupportedFormat
}
pub(crate) fn load_image<RWS: ReadSeek>(
mut read_buf: RWS,
disk_image: &mut DiskImage,
_opts: &ParserReadOptions,
_callback: Option<LoadingCallback>,
) -> Result<(), DiskImageError> {
disk_image.set_source_format(DiskImageFileFormat::HfeImage);
disk_image.assign_source_map(true);
let image_len = read_buf.seek(std::io::SeekFrom::End(0))?;
read_buf.seek(std::io::SeekFrom::Start(0))?;
let file_header = HfeFileHeader::read(&mut read_buf)?;
if file_header.signature != "HXCPICFE".as_bytes() {
log::error!("Invalid HFE signature");
return Err(DiskImageError::UnknownFormat);
}
file_header.write_to_map(disk_image.source_map_mut(), 0);
let hfe_floppy_interface = HfeFloppyInterface::from(file_header.interface_mode);
let hfe_track_encoding = HfeFloppyEncoding::from(file_header.track_encoding);
log::trace!(
"Got HXE header. Cylinders: {} Heads: {} Encoding: {:?}",
file_header.number_of_tracks,
file_header.number_of_sides,
hfe_track_encoding
);
let track_list_offset = file_header.track_list_offset as u64 * HFE_TRACK_OFFSET_BLOCK;
read_buf.seek(std::io::SeekFrom::Start(track_list_offset))?;
let mut track_index_vec = Vec::new();
for ti in 0..file_header.number_of_tracks {
let track_index_entry = HfeTrackIndexEntry::read_args(&mut read_buf, (ti as usize,))?;
track_index_entry.write_to_map(disk_image.source_map_mut(), 0);
if track_index_entry.len & 1 != 0 {
log::error!("Track {} length cannot be odd, due to head interleave.", ti);
return Err(DiskImageError::FormatParseError);
}
track_index_vec.push(track_index_entry);
}
for (ti, track) in track_index_vec.iter().enumerate() {
let mut track_data: [Vec<u8>; 2] = [Vec::with_capacity(50 * 512), Vec::with_capacity(50 * 512)];
let track_data_offset = track.offset as u64 * HFE_TRACK_OFFSET_BLOCK;
read_buf.seek(std::io::SeekFrom::Start(track_data_offset))?;
// Use either the offset of the next data block od the end of the file to determine the
// length of the current data block.
let next_data = track_index_vec
.get(ti + 1)
.map(|ti| ti.offset as u64 * HFE_TRACK_OFFSET_BLOCK)
.unwrap_or(image_len);
let data_block_len = next_data - track_data_offset;
let data_block_ct = data_block_len / 512;
if data_block_len % 512 != 0 {
log::warn!(
"Cylinder {} data length {} is not a multiple of 512 bytes",
ti,
track.len
);
}
else {
log::trace!(
"Cylinder {} data length {} contains {} 512 byte blocks.",
ti,
track.len,
data_block_ct
);
}
let mut bytes_remaining = track.len as usize;
let mut block_ct = 0;
let mut last_block = false;
while !last_block && bytes_remaining > 0 {
// HFE always seems to store two heads?
let block_data_size: usize = if bytes_remaining >= 512 {
256
}
else {
last_block = true;
bytes_remaining / 2
};
for head in 0..2 {
log::trace!(
"Reading track {} head {} block {} bytes_remaining: {}",
ti,
head,
block_ct,
bytes_remaining
);
// Read 256 bytes for the current head...
let mut track_block_data = vec![0; block_data_size];
read_buf.read_exact(&mut track_block_data)?;
// Reverse all the bits in each byte read.
for byte in track_block_data.iter_mut() {
*byte = REVERSE_TABLE[*byte as usize];
}
// Add to track data under the appropriate head no
track_data[head].extend_from_slice(&track_block_data);
bytes_remaining = match bytes_remaining.checked_sub(block_data_size) {
Some(bytes) => bytes,
None => {
log::error!(
"Track {}: Block: {} Head: {} Data underflow reading track data",
ti,
block_ct,
head
);
return Err(DiskImageError::FormatParseError);
}
}
}
block_ct += 1;
}
// We should have two full vectors of track data now.
// Add the track data for head 0...
log::trace!(
"Adding bitstream track: C:{} H:{} Bitcells: {}",
ti,
0,
track_data[0].len() * 8
);
let params = BitStreamTrackParams {
schema: None,
encoding: TrackDataEncoding::Mfm,
data_rate: TrackDataRate::from(file_header.bit_rate as u32 * 100),
rpm: None,
ch: DiskCh::from((ti as u16, 0)),
bitcell_ct: None,
data: &track_data[0],
weak: None,
hole: None,
detect_weak: false,
};
disk_image.add_track_bitstream(¶ms)?;
// And the track data for head 1, if sides > 1
if file_header.number_of_sides > 1 {
log::trace!(
"Adding bitstream track: C:{} H:{} Bitcells: {}",
ti,
1,
track_data[1].len() * 8
);
let params = BitStreamTrackParams {
schema: None,
encoding: TrackDataEncoding::Mfm,
data_rate: TrackDataRate::from(file_header.bit_rate as u32 * 100),
rpm: None,
ch: DiskCh::from((ti as u16, 1)),
bitcell_ct: None,
data: &track_data[1],
weak: None,
hole: None,
detect_weak: false,
};
disk_image.add_track_bitstream(¶ms)?;
}
}
disk_image.descriptor = DiskDescriptor {
// Can't trust HFE platform, so return empty list.
platforms: None,
geometry: DiskCh::from((file_header.number_of_tracks as u16, file_header.number_of_sides)),
data_rate: TrackDataRate::from(file_header.bit_rate as u32 * 1000),
density: TrackDensity::from(hfe_floppy_interface),
data_encoding: TrackDataEncoding::Mfm,
rpm: None,
write_protect: Some(file_header.write_allowed == 0),
};
Ok(())
}
pub fn save_image<RWS: ReadWriteSeek>(
_image: &DiskImage,
_opts: &ParserWriteOptions,
_output: &mut RWS,
) -> Result<(), DiskImageError> {
Err(DiskImageError::UnsupportedFormat)
}
}
#[cfg(test)]
mod tests {
use super::*;
fn simple_reverse_bits(byte: u8) -> u8 {
let mut reversed = 0;
for i in 0..8 {
reversed |= ((byte >> i) & 1) << (7 - i);
}
reversed
}
#[test]
fn test_generate_reverse_table() {
let table = generate_reverse_table();
for (ti, table_item) in table.into_iter().enumerate() {
assert_eq!(table_item, simple_reverse_bits(ti as u8), "Failed at index {}", ti);
}
println!("test_generate_reverse_table(): passed");
}
}
| rust | MIT | b4c04b51746e5fe7769f49a1b32b8caad426fc81 | 2026-01-04T20:24:04.021295Z | false |
dbalsom/fluxfox | https://github.com/dbalsom/fluxfox/blob/b4c04b51746e5fe7769f49a1b32b8caad426fc81/src/file_parsers/kryoflux.rs | src/file_parsers/kryoflux.rs | /*
FluxFox
https://github.com/dbalsom/fluxfox
Copyright 2024-2025 Daniel Balsom
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the “Software”),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
--------------------------------------------------------------------------
src/parsers/kryoflux.rs
A parser for the KryoFlux stream image format.
Kryoflux files (.raw) represent the raw stream of flux transitions for
a single track on a disk. A set of files is used to represent a complete
disk image.
*/
use crate::{
file_parsers::{bitstream_flags, FormatCaps, ParserReadOptions, ParserWriteOptions},
format_us,
io,
io::{ReadBytesExt, ReadSeek, ReadWriteSeek},
source_map::{OptionalSourceMap, SourceValue},
track::fluxstream::FluxStreamTrack,
types::{DiskCh, DiskDescriptor, FluxStreamTrackParams, Platform, TrackDataEncoding, TrackDataResolution},
util::read_ascii,
DiskImage,
DiskImageError,
DiskImageFileFormat,
FoxHashMap,
FoxHashSet,
LoadingCallback,
ParserWriteCompatibility,
};
use binrw::{binrw, BinRead};
use std::path::{Path, PathBuf};
use strum::IntoEnumIterator;
pub const KFX_DEFAULT_MCK: f64 = ((18432000.0 * 73.0) / 14.0) / 2.0;
pub const KFX_DEFAULT_SCK: f64 = KFX_DEFAULT_MCK / 2.0;
pub const KFX_DEFAULT_ICK: f64 = KFX_DEFAULT_MCK / 16.0;
pub enum OsbBlock {
Invalid(u8),
StreamInfo,
Index,
StreamEnd,
KfInfo,
Eof,
}
fn read_osb_block<R: ReadBytesExt>(reader: &mut R) -> OsbBlock {
let byte = reader.read_u8().unwrap_or(0);
//log::trace!("Read OOB block type: {:02X}", byte);
match byte {
0x01 => OsbBlock::StreamInfo,
0x02 => OsbBlock::Index,
0x03 => OsbBlock::StreamEnd,
0x04 => OsbBlock::KfInfo,
0x0D => OsbBlock::Eof,
_ => OsbBlock::Invalid(byte),
}
}
#[derive(Debug)]
#[binrw]
#[brw(little)]
pub struct StreamInfoBlock {
pub size: u16,
pub stream_pos: u32,
pub transfer_time_ms: u32,
}
#[derive(Debug)]
#[binrw]
#[brw(little)]
pub struct IndexBlock {
pub size: u16,
pub stream_pos: u32,
pub sample_counter: u32,
pub index_counter: u32,
}
#[derive(Debug)]
#[binrw]
#[brw(little)]
pub struct StreamEndBlock {
pub size: u16,
pub stream_pos: u32,
pub hw_status_code: u32,
}
#[derive(Debug)]
#[binrw]
#[brw(little)]
pub struct KfInfoBlock {
pub size: u16,
// null terminated ascii string follows
}
#[derive(Debug)]
#[binrw]
#[brw(little)]
pub struct EofBlock {
pub size: u16,
}
pub struct KfxFormat<'a> {
source_map: &'a mut Box<(dyn OptionalSourceMap)>,
map_root: usize,
sck: f64,
ick: f64,
last_index_counter: Option<u32>,
current_offset_idx: usize,
idx_ct: u32,
flux_ovl: u32,
}
impl<'a> KfxFormat<'a> {
pub fn new(map: &'a mut Box<(dyn OptionalSourceMap)>) -> Self {
Self {
map_root: map.last_node().index(),
source_map: map,
sck: KFX_DEFAULT_SCK,
ick: KFX_DEFAULT_ICK,
last_index_counter: None,
current_offset_idx: 0,
idx_ct: 0,
flux_ovl: 0,
}
}
pub fn extensions() -> Vec<&'static str> {
vec!["raw"]
}
pub fn capabilities() -> FormatCaps {
bitstream_flags()
}
pub fn platforms() -> Vec<Platform> {
// Kryoflux images support just about every platform.
Platform::iter().collect()
}
pub fn detect<RWS: ReadSeek>(mut image: RWS) -> bool {
if image.seek(io::SeekFrom::Start(0)).is_err() {
return false;
}
let byte = image.read_u8().unwrap_or(0);
// Assume stream starts with an OOB header byte(?)
byte == 0x0D
}
pub fn can_write(_image: Option<&DiskImage>) -> ParserWriteCompatibility {
ParserWriteCompatibility::UnsupportedFormat
}
pub(crate) fn load_image<RWS: ReadSeek>(
mut image: RWS,
disk_image: &mut DiskImage,
_opts: &ParserReadOptions,
_callback: Option<LoadingCallback>,
) -> Result<(), DiskImageError> {
disk_image.set_resolution(TrackDataResolution::FluxStream);
disk_image.set_source_format(DiskImageFileFormat::KryofluxStream);
let binding = disk_image.source_map_mut();
let mut kfx_context = KfxFormat::new(binding);
image.seek(io::SeekFrom::Start(0))?;
// Create vector of streams.
let mut streams: Vec<Vec<f64>> = Vec::with_capacity(5);
// Create first stream.
streams.push(Vec::with_capacity(100_000));
// Create vector of index times.
let mut index_times: Vec<f64> = Vec::with_capacity(5);
// Create vector if index offsets
let mut index_offsets: Vec<u64> = Vec::with_capacity(5);
// Read the steam once to gather the index offsets.
log::debug!("Scanning stream for index blocks...");
let mut stream_position = 0;
let mut eof = false;
while !eof {
eof =
kfx_context.read_index_block(&mut image, &mut index_offsets, &mut stream_position, &mut index_times)?;
}
kfx_context.current_offset_idx = 0;
// Read the stream again now that we know where the indexes are
log::debug!("Reading stream... [Found {} index offsets]", index_offsets.len());
image.seek(io::SeekFrom::Start(0))?;
stream_position = 0;
eof = false;
while !eof {
eof = kfx_context.read_block(&mut image, &index_offsets, &mut stream_position, &mut streams)?;
}
let mut flux_track = FluxStreamTrack::new();
let complete_revs = (kfx_context.idx_ct - 1) as usize;
// We need to have at least two index markers to have a complete revolution.
if complete_revs < 1 || index_offsets.len() < 2 {
log::error!("Stream did not contain a complete revolution.");
return Err(DiskImageError::IncompatibleImage(
"Stream did not contain a complete revolution".to_string(),
));
}
log::debug!(
"Found {} complete revolutions in stream, with {} index times",
complete_revs,
index_times.len()
);
// Get last ch in image.
let next_ch = if disk_image.track_ch_iter().count() == 0 {
log::debug!("No tracks in image, starting at c:0 h:0");
DiskCh::new(0, 0)
}
else {
let mut last_ch = disk_image.track_ch_iter().last().unwrap_or(DiskCh::new(0, 0));
log::debug!("Previous track in image: {} heads: {}", last_ch, disk_image.heads());
last_ch.seek_next_track(disk_image.geometry());
last_ch
};
for ((_ri, rev), index_time) in streams
.iter()
.enumerate()
.skip(1)
.take(complete_revs)
.zip(index_times.iter())
{
flux_track.add_revolution(next_ch, rev, *index_time);
}
#[cfg(feature = "plot")]
{
let plot_rev: usize = std::cmp::min(0, complete_revs - 1);
let flux_rev = flux_track.revolution_mut(plot_rev).unwrap();
let plot_stats = flux_rev.pll_stats();
let x: Vec<f64> = plot_stats.iter().map(|point| point.time).collect();
let len: Vec<f64> = plot_stats.iter().map(|point| point.len).collect();
let predicted: Vec<f64> = plot_stats.iter().map(|point| point.predicted).collect();
let clk_samples: Vec<f64> = plot_stats.iter().map(|point| point.clk).collect();
let win_min: Vec<f64> = plot_stats.iter().map(|point| point.window_min).collect();
let win_max: Vec<f64> = plot_stats.iter().map(|point| point.window_max).collect();
let phase_err: Vec<f64> = plot_stats.iter().map(|point| point.phase_err).collect();
let phase_err_i: Vec<f64> = plot_stats.iter().map(|point| point.phase_err_i).collect();
use plotly::{
common::{Line, Marker, Mode},
layout::Axis,
*,
};
let mut plot = Plot::new();
let flux_times = Scatter::new(x.clone(), len.clone())
.mode(Mode::Markers)
.name("FT length")
.marker(Marker::new().size(2).color(Rgba::new(0, 128, 0, 1.0)));
let predicted_times = Scatter::new(x.clone(), predicted)
.mode(Mode::Markers)
.name("FT length")
.marker(Marker::new().size(2).color(Rgba::new(0, 255, 0, 0.5)));
let clock_trace = Scatter::new(x.clone(), clk_samples)
.mode(Mode::Lines)
.name("PLL Clock")
.line(Line::new().color(Rgba::new(128, 0, 0, 1.0)));
let window_trace = Scatter::new(
x.iter().flat_map(|&x| vec![x, x]).collect::<Vec<_>>(), // Duplicate each x for the start and end points
win_min
.iter()
.zip(&win_max)
.flat_map(|(&start, &end)| vec![start, end])
.collect::<Vec<_>>(), // Flatten each pair of y1, y2
)
.mode(Mode::Lines) // Use lines to draw each segment
.name("PLL Window");
let win_min_trace = Scatter::new(x.clone(), win_min.clone())
.mode(Mode::Markers)
.name("Window min")
.marker(Marker::new().size(3).color(Rgba::new(128, 128, 0, 0.6)));
let win_max_trace = Scatter::new(x.clone(), win_max.clone())
.mode(Mode::Markers)
.name("Window max")
.marker(Marker::new().size(3).color(Rgba::new(0, 128, 128, 0.6)));
let error_trace = Scatter::new(x.clone(), phase_err.clone())
.mode(Mode::Lines)
.name("Phase Error")
.line(Line::new().color(Rgba::new(0, 0, 128, 1.0)));
let error_i_trace = Scatter::new(x.clone(), phase_err_i.clone())
.mode(Mode::Lines)
.name("Integrated error")
.line(Line::new().color(Rgba::new(255, 255, 0, 1.0)));
//let candle_trace = Candlestick::new(x.clone(), win_min.clone(), win_max.clone(), win_min.clone(), win_max.clone());
let mut path = PathBuf::from("plots");
if !path.exists() {
std::fs::create_dir(path.clone())?;
}
let filename = format!("pll_{}_{}.html", next_ch.c(), next_ch.h());
// let flux_filename = format!("flux_{}_{}.csv", next_ch.c(), next_ch.h());
// let flux_path = path.join(flux_filename);
//
// use std::io::Write;
// let mut flux_file = std::fs::File::create(flux_path)?;
// for (x, y) in x.iter().zip(len.clone().iter()) {
// writeln!(flux_file, "{},{}", x, y)?;
// }
path.push(filename);
//plot.add_trace(candle_trace);
plot.add_trace(error_trace);
plot.add_trace(error_i_trace);
plot.add_trace(win_min_trace);
plot.add_trace(win_max_trace);
//plot.add_trace(window_trace);
plot.add_trace(predicted_times);
plot.add_trace(flux_times);
use plotly::color::Rgba;
// // Create a list of shapes representing each PLL window
// let shapes: Vec<Shape> = x
// .iter()
// .enumerate()
// .map(|(i, &start)| {
// let x0 = start;
// let x1 = if i + 1 < x.len() { x[i + 1] } else { x0 };
// let min = win_min[i];
// let max = win_max[i];
//
// Shape::new()
// .shape_type(ShapeType::Rect)
// .x0(x0)
// .x1(x1)
// .y0(min)
// .y1(max)
// .layer(ShapeLayer::Below)
// .line(ShapeLine::new().width(0.0))
// .fill_color(Rgba::new(128, 128, 128, 0.3))
// })
// .collect();
//
// log::warn!("Plotting {} shapes", shapes.len());
let layout = Layout::new().y_axis(Axis::new().range(vec![-1.0e-6, 10.0e-6]));
// layout = layout.shapes(shapes);
plot.add_trace(clock_trace);
plot.set_layout(layout);
plot.write_html(path);
}
// let rev_encoding = flux_rev.encoding();
// let rev_density = match rev_stats.detect_density(false) {
// Some(d) => {
// log::debug!("Revolution {} density: {:?}", rev, d);
// d
// }
// None => {
// log::error!(
// "Unable to detect rev {} track {} density: {}",
// rev,
// next_ch,
// flux_rev.transition_avg()
// );
// //return Err(DiskImageError::IncompatibleImage);
// DiskDensity::Double
// }
// };
// let (track_data, track_bits) = flux_track.revolution_mut(rev).unwrap().bitstream_data();
//
// let data_rate = DiskDataRate::from(rev_density);
//
// if track_bits < 1000 {
// log::warn!("Track contains less than 1000 bits. Adding empty track.");
// disk_image.add_empty_track(next_ch, DiskDataEncoding::Mfm, data_rate, 100_000)?;
// }
// else {
// log::debug!(
// "Adding {:?} track {} containing {} bits to image...",
// rev_encoding,
// next_ch,
// track_bits
// );
//
// let params = BitStreamTrackParams {
// encoding: rev_encoding,
// data_rate,
// ch: next_ch,
// bitcell_ct: Some(track_bits),
// data: &track_data,
// weak: None,
// hole: None,
// detect_weak: false,
// };
// disk_image.add_track_bitstream(params)?;
// }
let data_rate = disk_image.data_rate();
// Get hints from disk image if we aren't the first track.
let (clock_hint, rpm_hint) = if !disk_image.track_pool.is_empty() {
(
Some(disk_image.descriptor.density.base_clock(disk_image.descriptor.rpm)),
disk_image.descriptor.rpm,
)
}
else {
(None, None)
};
let params = FluxStreamTrackParams {
ch: next_ch,
schema: None,
encoding: None,
clock: clock_hint,
rpm: rpm_hint,
};
let new_track = disk_image.add_track_fluxstream(flux_track, ¶ms)?;
let (new_density, new_rpm) = if new_track.sector_ct() == 0 {
log::warn!("Track did not decode any sectors. Not updating disk image descriptor.");
(disk_image.descriptor.density, disk_image.descriptor.rpm)
}
else {
let info = new_track.info();
log::debug!(
"Updating disk descriptor with density: {:?} and RPM: {:?}",
info.density,
info.rpm
);
(info.density.unwrap_or(disk_image.descriptor.density), info.rpm)
};
log::debug!("Track added.");
disk_image.descriptor = DiskDescriptor {
// Kryoflux doesn't specify platform at all. Figure it out after import.
platforms: None,
geometry: disk_image.geometry(),
data_rate,
density: new_density,
data_encoding: TrackDataEncoding::Mfm,
rpm: new_rpm,
write_protect: Some(true),
};
Ok(())
}
pub fn save_image<RWS: ReadWriteSeek>(
_image: &DiskImage,
_opts: &ParserWriteOptions,
_output: &mut RWS,
) -> Result<(), DiskImageError> {
Err(DiskImageError::UnsupportedFormat)
}
fn read_index_block<RWS: ReadSeek>(
&mut self,
image: &mut RWS,
index_offsets: &mut Vec<u64>,
stream_position: &mut u64,
index_times: &mut Vec<f64>,
) -> Result<bool, DiskImageError> {
let file_offset = image.stream_position()?;
let byte = image.read_u8()?;
match byte {
0x00..=0x07 => {
// Flux2 block
image.seek(io::SeekFrom::Current(1))?;
*stream_position += 2;
}
0x09 => {
// Nop2 block
// Skip one byte
image.seek(io::SeekFrom::Current(1))?;
*stream_position += 2;
}
0x0A => {
// Nop3 block
// Skip two bytes
image.seek(io::SeekFrom::Current(2))?;
*stream_position += 3;
}
0x0B => {
// Ovl16 block
*stream_position += 1;
}
0x0C => {
// Flux3 block
image.seek(io::SeekFrom::Current(2))?;
*stream_position += 3;
}
0x0D => {
// OOB block
let oob_block = read_osb_block(image);
match oob_block {
OsbBlock::Invalid(oob_byte) => {
log::error!("Invalid OOB block type: {:02X}", oob_byte);
}
OsbBlock::StreamInfo => {
let _sib = StreamInfoBlock::read(image)?;
}
OsbBlock::Index => {
let ib = IndexBlock::read(image)?;
//let index_time = ib.index_counter as f64 / self.ick;
if let Some(last_index_counter) = self.last_index_counter {
let index_delta = ib.index_counter.wrapping_sub(last_index_counter);
let index_time_delta = index_delta as f64 / self.ick;
index_times.push(index_time_delta);
let sample_time = ib.sample_counter as f64 / self.sck;
log::debug!(
"Index block: file_offset: {} next_pos: {} sample_ct: {} ({}) index_ct: {} delta: {:.6} rpm: {:.3}",
file_offset,
ib.stream_pos,
ib.sample_counter,
format_us!(sample_time),
ib.index_counter,
index_time_delta,
60.0 / index_time_delta
);
}
else {
let sample_time = ib.sample_counter as f64 / self.sck;
log::debug!(
"Index block: file_offset: {} next_pos: {} sample_ct: {} ({}) index_ct: {}",
file_offset,
ib.stream_pos,
ib.sample_counter,
format_us!(sample_time),
ib.index_counter
);
}
index_offsets.push(ib.stream_pos as u64);
// If stream_pos is behind us, we need to go back and create a revolution
// at stream_pos
if (ib.stream_pos as u64) < *stream_position {
log::warn!(
"Stream pos is behind current stream position: {} < {}",
ib.stream_pos,
stream_position
);
}
self.last_index_counter = Some(ib.index_counter);
}
OsbBlock::StreamEnd => {
let _seb = StreamEndBlock::read(image)?;
}
OsbBlock::KfInfo => {
log::debug!("KfInfo block");
let _kib = KfInfoBlock::read(image)?;
// Ascii string follows
let mut string_end = false;
while !string_end {
let (str_opt, terminator) = read_ascii(image, None, None);
string_end = str_opt.is_none() || terminator == 0;
}
}
OsbBlock::Eof => {
log::debug!("EOF block");
return Ok(true);
}
}
}
_ => {
// Flux1 block
*stream_position += 1;
}
}
// Return whether we reached end of file
Ok(false)
}
fn read_block<RWS: ReadSeek>(
&mut self,
image: &mut RWS,
index_offsets: &[u64],
stream_position: &mut u64,
streams: &mut Vec<Vec<f64>>,
) -> Result<bool, DiskImageError> {
let file_offset = image.stream_position()?;
let byte = image.read_u8()?;
// If we've reached the stream position indicated by the last index block,
// we're starting a new revolution.
if (self.current_offset_idx < index_offsets.len())
&& (*stream_position >= index_offsets[self.current_offset_idx])
{
log::debug!(
"Starting new revolution at stream_pos: {}, file_offset: {}",
*stream_position,
file_offset
);
streams.push(Vec::new());
self.current_offset_idx += 1;
self.idx_ct += 1;
}
//log::trace!("Read block type: {:02X}", byte);
match byte {
0x00..=0x07 => {
// Flux2 block
let byte2 = image.read_u8()?;
let flux_u32 = u16::from_be_bytes([byte, byte2]) as u32;
let flux = (self.flux_ovl + flux_u32) as f64 / self.sck;
*stream_position += 2;
streams.last_mut().unwrap().push(flux);
self.flux_ovl = 0;
}
0x08 => {
// Nop1 block
// Do nothing
*stream_position += 1;
}
0x09 => {
// Nop2 block
// Skip one byte
image.seek(io::SeekFrom::Current(1))?;
*stream_position += 2;
}
0x0A => {
// Nop3 block
// Skip two bytes
image.seek(io::SeekFrom::Current(2))?;
*stream_position += 3;
}
0x0B => {
// Ovl16 block
self.flux_ovl = self.flux_ovl.saturating_add(0x10000);
}
0x0C => {
// Flux3 block
let byte2 = image.read_u8()?;
let byte3 = image.read_u8()?;
let flux_u32 = u16::from_be_bytes([byte2, byte3]) as u32;
let flux = (self.flux_ovl + flux_u32) as f64 / self.sck;
streams.last_mut().unwrap().push(flux);
self.flux_ovl = 0;
}
0x0D => {
// OSB block. OSB blocks do not advance the stream position.
let osb_block = read_osb_block(image);
match osb_block {
OsbBlock::Invalid(oob_byte) => {
log::error!("Invalid OOB block type: {:02X}", oob_byte);
}
OsbBlock::StreamInfo => {
let sib = StreamInfoBlock::read(image)?;
log::trace!(
"StreamInfo block: pos: {} time: {}",
sib.stream_pos,
sib.transfer_time_ms
);
self.source_map
.add_child(self.map_root, "OSB: StreamInfo", SourceValue::default())
.add_child("Stream Position", SourceValue::u32(sib.stream_pos))
.add_sibling(
"Transfer Time",
SourceValue::string(&format!("{} ms", sib.transfer_time_ms)),
);
}
OsbBlock::Index => {
let ib = IndexBlock::read(image)?;
self.source_map
.add_child(self.map_root, "OSB: Index", SourceValue::default())
.add_child("Stream Position", SourceValue::u32(ib.stream_pos))
.add_sibling("Sample Counter", SourceValue::u32(ib.sample_counter))
.add_sibling("Index Counter", SourceValue::u32(ib.index_counter))
.add_sibling(
"Index Time",
SourceValue::string(&format!("{:.3} s", ib.index_counter as f64 / self.ick)),
);
}
OsbBlock::StreamEnd => {
let seb = StreamEndBlock::read(image)?;
log::debug!(
"StreamEnd block: end_pos: {} stream_pos: {} offset: {} hw_status: {:02X}",
seb.stream_pos,
*stream_position,
file_offset,
seb.hw_status_code
);
self.source_map
.add_child(self.map_root, "OSB: StreamEnd", SourceValue::default())
.add_child("Stream Position", SourceValue::u32(seb.stream_pos))
.add_sibling("Hardware Status", SourceValue::u32(seb.hw_status_code));
if seb.stream_pos as u64 != *stream_position {
log::warn!(
"StreamEnd position does not match stream position: {} != {}",
seb.stream_pos,
*stream_position
);
}
match seb.hw_status_code {
0 => {
log::debug!("Hardware status reported OK");
}
1 => {
log::error!("A buffering issue was recorded in the stream. Stream may be corrupt");
return Err(DiskImageError::ImageCorruptError(
"Buffering issue detected".to_string(),
));
}
2 => {
log::error!("No index signal was detected.");
return Err(DiskImageError::ImageCorruptError(
"No index signal detected".to_string(),
));
}
_ => {
log::error!("Unknown hardware status. Hope it wasn't important!");
}
}
}
OsbBlock::KfInfo => {
log::debug!("KfInfo block");
let _kib = KfInfoBlock::read(image)?;
// Ascii string follows
let mut string_end = false;
let mut string = String::new();
let mut first = true;
let mut cursor =
self.source_map
.add_child(self.map_root, "OSB: KfInfo", SourceValue::default());
while !string_end {
let (str_opt, terminator) = read_ascii(image, None, None);
if let Some(s) = &str_opt {
log::debug!("KfInfo str: {}", s);
let (sck_opt, ick_opt) = kfx_parse_clk_str(s);
if let Some(sck) = sck_opt {
log::debug!("Set SCK to {}", sck);
self.sck = sck;
}
if let Some(ick) = ick_opt {
log::debug!("Set ICK to {}", ick);
self.ick = ick;
}
let kv = kfx_parse_str(&s);
let mut pairs: Vec<(String, String)> = kv.into_iter().collect();
pairs.sort();
for (k, v) in pairs {
match first {
true => {
let cur_index = cursor.index();
cursor = self.source_map.add_child(cur_index, &k, SourceValue::string(&v));
first = false;
}
false => {
cursor = cursor.add_sibling(&k, SourceValue::string(&v));
}
}
}
string.push_str(s);
}
//log::warn!("terminator: {:02X}", terminator);
string_end = str_opt.is_none() || terminator == 0;
}
}
OsbBlock::Eof => {
log::debug!("EOF block");
return Ok(true);
}
}
}
_ => {
// Flux1 block
let flux = (self.flux_ovl + byte as u32) as f64 / self.sck;
streams.last_mut().unwrap().push(flux);
*stream_position += 1;
self.flux_ovl = 0;
}
}
// Return whether we reached end of file
Ok(false)
}
/// Resolves a supplied PathBuf into a vector of PathBufs representing a KryoFlux set.
/// The set can be resolved from a provided list of PathBufs passed via 'directory', or from the
/// base directory of the 'filepath' argument, if 'directory' is None.
/// This allows building a set from either a directory listing or a list of files from a ZIP
/// archive.
pub fn expand_kryoflux_set(
filepath: &Path,
directory: Option<Vec<PathBuf>>,
) -> Result<(Vec<PathBuf>, DiskCh), DiskImageError> {
let mut set_vec = Vec::new();
// Isolate the base path and filename
let base_path = filepath.parent().unwrap_or(Path::new(""));
let base_name = filepath.file_name().ok_or(DiskImageError::FsError)?;
| rust | MIT | b4c04b51746e5fe7769f49a1b32b8caad426fc81 | 2026-01-04T20:24:04.021295Z | true |
dbalsom/fluxfox | https://github.com/dbalsom/fluxfox/blob/b4c04b51746e5fe7769f49a1b32b8caad426fc81/src/file_parsers/pce/psi.rs | src/file_parsers/pce/psi.rs | /*
FluxFox
https://github.com/dbalsom/fluxfox
Copyright 2024-2025 Daniel Balsom
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the “Software”),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
--------------------------------------------------------------------------
src/parsers/psi.rs
A parser for the PSI disk image format.
PSI format images are PCE Sector Images, an internal format used by the PCE emulator and
devised by Hampa Hug.
It is a chunk-based format similar to RIFF.
*/
use crate::{
file_parsers::{FormatCaps, ParserWriteCompatibility},
io::{Cursor, ReadSeek, ReadWriteSeek},
types::{AddSectorParams, DiskDescriptor},
};
use crate::{
file_parsers::{ParserReadOptions, ParserWriteOptions},
types::{
chs::{DiskCh, DiskChs, DiskChsn},
MetaSectorTrackParams,
Platform,
SectorAttributes,
TrackDataEncoding,
TrackDataRate,
TrackDensity,
},
DiskImage,
DiskImageError,
DiskImageFileFormat,
FoxHashMap,
FoxHashSet,
LoadingCallback,
};
use crate::file_parsers::pce::crc::pce_crc;
use binrw::{binrw, BinRead};
pub struct PsiFormat;
pub const MAXIMUM_CHUNK_SIZE: usize = 0x100000; // Reasonable 1MB limit for chunk sizes.
pub const SH_FLAG_COMPRESSED: u8 = 0b0001;
pub const SH_FLAG_ALTERNATE: u8 = 0b0010;
pub const SH_FLAG_CRC_ERROR: u8 = 0b0100;
pub const SH_IBM_FLAG_CRC_ERROR_ID: u8 = 0b0001;
pub const SH_IBM_FLAG_CRC_ERROR_DATA: u8 = 0b0010;
pub const SH_IBM_DELETED_DATA: u8 = 0b0100;
pub const SH_IBM_MISSING_DATA: u8 = 0b1000;
#[derive(Default)]
pub struct SectorContext {
phys_chs: Option<DiskChs>,
phys_size: usize,
ibm_chsn: Option<DiskChsn>,
data_crc_error: bool,
address_crc_error: bool,
deleted: bool,
no_dam: bool,
alternate: bool,
bit_offset: Option<u32>,
}
impl SectorContext {
fn have_context(&self) -> bool {
self.phys_chs.is_some()
}
fn reset(&mut self) {
*self = SectorContext::default();
}
#[allow(dead_code)]
fn phys_ch(&self) -> DiskCh {
DiskCh::from(self.phys_chs.unwrap())
}
fn sid(&self) -> DiskChsn {
self.ibm_chsn.unwrap_or(DiskChsn::new(
self.phys_chs.unwrap().c(),
self.phys_chs.unwrap().h(),
self.phys_chs.unwrap().s(),
DiskChsn::bytes_to_n(self.phys_size),
))
}
}
#[derive(Debug)]
#[binrw]
#[brw(big)]
pub struct PsiChunkHeader {
pub id: [u8; 4],
pub size: u32,
}
#[derive(Debug)]
#[binrw]
#[brw(big)]
pub struct PsiHeader {
pub version: u16,
pub sector_format: [u8; 2],
}
#[derive(Debug)]
#[binrw]
#[brw(big)]
pub struct PsiChunkCrc {
pub crc: u32,
}
#[binrw]
#[brw(big)]
pub struct PsiSectorHeader {
pub cylinder: u16,
pub head: u8,
pub sector: u8,
pub size: u16,
pub flags: u8,
pub compressed_data: u8,
}
#[binrw]
#[brw(big)]
pub struct PsiIbmSectorHeader {
pub cylinder: u8,
pub head: u8,
pub sector: u8,
pub n: u8,
pub flags: u8,
pub encoding: u8,
}
#[derive(Copy, Clone, Debug, PartialEq)]
pub enum PsiChunkType {
FileHeader,
Text,
SectorHeader,
SectorData,
WeakMask,
IbmFmSectorHeader,
IbmMfmSectorHeader,
MacintoshSectorHeader,
SectorPositionOffset,
ClockRateAdjustment,
End,
Unknown,
}
pub struct PsiChunk {
pub chunk_type: PsiChunkType,
pub data: Vec<u8>,
}
pub(crate) fn decode_psi_sector_format(sector_format: [u8; 2]) -> Option<(TrackDataEncoding, TrackDensity)> {
match sector_format {
[0x00, 0x00] => Some((TrackDataEncoding::Fm, TrackDensity::Standard)),
[0x01, 0x00] => Some((TrackDataEncoding::Fm, TrackDensity::Double)),
[0x02, 0x00] => Some((TrackDataEncoding::Fm, TrackDensity::High)),
[0x02, 0x01] => Some((TrackDataEncoding::Fm, TrackDensity::High)),
[0x02, 0x02] => Some((TrackDataEncoding::Mfm, TrackDensity::Extended)),
// TODO: What density are GCR disks? Are they all the same? PSI doesn't specify any variants.
[0x03, 0x00] => Some((TrackDataEncoding::Gcr, TrackDensity::Double)),
_ => None,
}
}
impl PsiFormat {
#[allow(dead_code)]
fn format() -> DiskImageFileFormat {
DiskImageFileFormat::PceSectorImage
}
pub(crate) fn capabilities() -> FormatCaps {
FormatCaps::empty()
}
pub fn platforms() -> Vec<Platform> {
// PSI images support both PC and Macintosh platforms.
vec![Platform::IbmPc, Platform::Macintosh]
}
pub(crate) fn extensions() -> Vec<&'static str> {
vec!["psi"]
}
pub(crate) fn detect<RWS: ReadSeek>(mut image: RWS) -> bool {
let mut detected = false;
_ = image.seek(std::io::SeekFrom::Start(0));
if let Ok(file_header) = PsiChunkHeader::read_be(&mut image) {
if file_header.id == "PSI ".as_bytes() {
detected = true;
}
}
detected
}
pub(crate) fn can_write(_image: Option<&DiskImage>) -> ParserWriteCompatibility {
ParserWriteCompatibility::UnsupportedFormat
}
pub(crate) fn read_chunk<RWS: ReadSeek>(mut image: RWS) -> Result<PsiChunk, DiskImageError> {
let chunk_pos = image.stream_position()?;
//log::trace!("Reading chunk header...");
let chunk_header = PsiChunkHeader::read(&mut image)?;
if let Ok(id) = std::str::from_utf8(&chunk_header.id) {
log::trace!("Chunk ID: {} Size: {}", id, chunk_header.size);
}
else {
log::trace!("Chunk ID: {:?} Size: {}", chunk_header.id, chunk_header.size);
}
let chunk_type = match &chunk_header.id {
b"PSI " => PsiChunkType::FileHeader,
b"TEXT" => PsiChunkType::Text,
b"END " => PsiChunkType::End,
b"SECT" => PsiChunkType::SectorHeader,
b"DATA" => PsiChunkType::SectorData,
b"WEAK" => PsiChunkType::WeakMask,
b"IBMF" => PsiChunkType::IbmFmSectorHeader,
b"IBMM" => PsiChunkType::IbmMfmSectorHeader,
b"MACG" => PsiChunkType::MacintoshSectorHeader,
b"OFFS" => PsiChunkType::SectorPositionOffset,
b"TIME" => PsiChunkType::ClockRateAdjustment,
_ => {
log::trace!("Unknown chunk type.");
PsiChunkType::Unknown
}
};
if chunk_header.size > MAXIMUM_CHUNK_SIZE as u32 {
return Err(DiskImageError::FormatParseError);
}
let mut buffer = vec![0u8; chunk_header.size as usize + 8];
//log::trace!("Seeking to chunk start...");
image.seek(std::io::SeekFrom::Start(chunk_pos))?;
image.read_exact(&mut buffer)?;
let crc_calc = pce_crc(&buffer);
let chunk_crc = PsiChunkCrc::read(&mut image)?;
if chunk_crc.crc != crc_calc {
return Err(DiskImageError::CrcError);
}
//log::trace!("CRC matched: {:04X} {:04X}", chunk_crc.crc, crc_calc);
let chunk = PsiChunk {
chunk_type,
data: buffer[8..].to_vec(),
};
Ok(chunk)
}
pub(crate) fn load_image<RWS: ReadSeek>(
mut read_buf: RWS,
disk_image: &mut DiskImage,
_opts: &ParserReadOptions,
_callback: Option<LoadingCallback>,
) -> Result<(), DiskImageError> {
disk_image.set_source_format(DiskImageFileFormat::PceSectorImage);
// Seek to start of read_buf.
read_buf.seek(std::io::SeekFrom::Start(0))?;
let mut chunk = PsiFormat::read_chunk(&mut read_buf)?;
// File header must be first chunk.
if chunk.chunk_type != PsiChunkType::FileHeader {
return Err(DiskImageError::UnknownFormat);
}
let file_header =
PsiHeader::read(&mut Cursor::new(&chunk.data)).map_err(|_| DiskImageError::FormatParseError)?;
log::trace!("Read PSI file header. Format version: {}", file_header.version);
let (default_encoding, disk_density) =
decode_psi_sector_format(file_header.sector_format).ok_or(DiskImageError::FormatParseError)?;
let mut comment_string = String::new();
let mut ctx = SectorContext::default();
let mut track_set: FoxHashSet<DiskCh> = FoxHashSet::new();
let mut sector_counts: FoxHashMap<u8, u32> = FoxHashMap::new();
let mut heads_seen: FoxHashSet<u8> = FoxHashSet::new();
let mut sectors_per_track = 0;
let mut current_track = None;
while chunk.chunk_type != PsiChunkType::End {
match chunk.chunk_type {
PsiChunkType::FileHeader => {}
PsiChunkType::SectorHeader => {
//log::trace!("Sector header chunk.");
let sector_header = PsiSectorHeader::read(&mut Cursor::new(&chunk.data))?;
let chs = DiskChs::from((sector_header.cylinder, sector_header.head, sector_header.sector));
let ch = DiskCh::from((sector_header.cylinder, sector_header.head));
heads_seen.insert(sector_header.head);
if !track_set.contains(&ch) {
log::trace!("Adding track...");
let params = MetaSectorTrackParams {
ch,
data_rate: TrackDataRate::from(disk_density),
encoding: default_encoding,
};
let new_track = disk_image.add_track_metasector(¶ms)?;
current_track = Some(new_track);
track_set.insert(ch);
log::trace!("Observing sector count: {}", sectors_per_track);
sector_counts
.entry(sectors_per_track)
.and_modify(|e| *e += 1)
.or_insert(1);
sectors_per_track = 0;
}
if sector_header.flags & SH_FLAG_ALTERNATE != 0 {
log::trace!("Alternate sector data.");
ctx.alternate = true;
}
else {
ctx.alternate = false;
}
ctx.phys_chs = Some(chs);
ctx.phys_size = sector_header.size as usize;
ctx.data_crc_error = sector_header.flags & SH_FLAG_CRC_ERROR != 0;
// Write sector data immediately if compressed data is indicated (no sector data chunk follows)
if sector_header.flags & SH_FLAG_COMPRESSED != 0 {
log::trace!("Compressed sector data: {:02X}", sector_header.compressed_data);
let chunk_expand = vec![sector_header.compressed_data; sector_header.size as usize];
if let Some(ref mut track) = current_track {
// Add this sector to track.
let params = AddSectorParams {
id_chsn: DiskChsn::from((chs, DiskChsn::bytes_to_n(sector_header.size as usize))),
data: &chunk_expand,
weak_mask: None,
hole_mask: None,
attributes: SectorAttributes {
address_error: false, // Compressed data cannot encode address CRC state.
data_error: ctx.data_crc_error,
deleted_mark: false,
no_dam: false,
},
alternate: ctx.alternate,
bit_index: ctx.bit_offset.map(|x| x as usize),
};
track.add_sector(¶ms)?;
ctx.reset();
}
else {
log::error!("Tried to add sector without a current track.");
return Err(DiskImageError::FormatParseError);
}
}
log::trace!(
"SECT chunk: Sector ID: {} size: {} data_crc_error: {}",
chs,
sector_header.size,
ctx.data_crc_error
);
}
PsiChunkType::SectorData => {
if !ctx.have_context() {
log::error!("Sector data chunk without a preceding sector header.");
return Err(DiskImageError::FormatParseError);
}
log::trace!(
"DATA chunk: {} crc_error: {}",
ctx.phys_chs.unwrap(),
ctx.data_crc_error
);
if ctx.phys_size != chunk.data.len() {
log::warn!(
"Sector data size mismatch. Header specified: {} SectorData specified: {}",
ctx.phys_size,
chunk.data.len()
);
}
if let Some(ref mut track) = current_track {
// Add this sector to track.
let params = AddSectorParams {
id_chsn: ctx.sid(),
data: &chunk.data,
weak_mask: None,
hole_mask: None,
attributes: SectorAttributes {
address_error: ctx.address_crc_error,
data_error: ctx.data_crc_error,
deleted_mark: ctx.deleted,
no_dam: ctx.no_dam,
},
alternate: ctx.alternate,
bit_index: ctx.bit_offset.map(|x| x as usize),
};
track.add_sector(¶ms)?;
}
else {
log::error!("Tried to add sector without a current track.");
return Err(DiskImageError::FormatParseError);
}
sectors_per_track += 1;
ctx.reset();
}
PsiChunkType::Text => {
// PSI docs:
// `If there are multiple TEXT chunks, their contents should be concatenated`
if let Ok(text) = std::str::from_utf8(&chunk.data) {
comment_string.push_str(text);
}
}
PsiChunkType::SectorPositionOffset => {
let offset = u32::from_be_bytes([chunk.data[0], chunk.data[1], chunk.data[2], chunk.data[3]]);
ctx.bit_offset = Some(offset);
log::trace!("Sector position offset: {}", offset);
}
PsiChunkType::IbmMfmSectorHeader => {
let ibm_header = PsiIbmSectorHeader::read(&mut Cursor::new(&chunk.data))?;
if ctx.ibm_chsn.is_some() {
log::warn!("Duplicate IBM sector header or context not reset");
}
ctx.ibm_chsn = Some(DiskChsn::from((
ibm_header.cylinder as u16,
ibm_header.head,
ibm_header.sector,
ibm_header.n,
)));
ctx.data_crc_error = ibm_header.flags & SH_IBM_FLAG_CRC_ERROR_DATA != 0;
ctx.address_crc_error = ibm_header.flags & SH_IBM_FLAG_CRC_ERROR_ID != 0;
ctx.deleted = ibm_header.flags & SH_IBM_DELETED_DATA != 0;
ctx.no_dam = ibm_header.flags & SH_IBM_MISSING_DATA != 0;
}
PsiChunkType::End => {
log::trace!("End chunk.");
break;
}
_ => {
log::warn!("Unhandled chunk type: {:?}", chunk.chunk_type);
}
}
chunk = PsiFormat::read_chunk(&mut read_buf)?;
}
let head_ct = heads_seen.len() as u8;
let track_ct = track_set.len() as u16;
disk_image.descriptor = DiskDescriptor {
// PSI images are going to be either PC or Mac. Since we aren't handling Macintosh-specific
// chunks, we'll just assume it's a PC disk.
platforms: Some(vec![Platform::IbmPc]),
geometry: DiskCh::from((track_ct / head_ct as u16, head_ct)),
data_rate: Default::default(),
data_encoding: TrackDataEncoding::Mfm,
density: disk_density,
rpm: None,
write_protect: None,
};
Ok(())
}
pub fn save_image<RWS: ReadWriteSeek>(
_image: &DiskImage,
_opts: &ParserWriteOptions,
_output: &mut RWS,
) -> Result<(), DiskImageError> {
Err(DiskImageError::UnsupportedFormat)
}
}
| rust | MIT | b4c04b51746e5fe7769f49a1b32b8caad426fc81 | 2026-01-04T20:24:04.021295Z | false |
dbalsom/fluxfox | https://github.com/dbalsom/fluxfox/blob/b4c04b51746e5fe7769f49a1b32b8caad426fc81/src/file_parsers/pce/pri.rs | src/file_parsers/pce/pri.rs | /*
FluxFox
https://github.com/dbalsom/fluxfox
Copyright 2024-2025 Daniel Balsom
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the “Software”),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
--------------------------------------------------------------------------
src/parsers/pri.rs
A parser for the PRI disk image format.
PRI format images are PCE bitstream images, an internal format used by the PCE emulator and
devised by Hampa Hug.
It is a chunk-based format similar to RIFF.
*/
use crate::{
file_parsers::{bitstream_flags, FormatCaps, ParserWriteCompatibility},
io::{Cursor, ReadSeek, ReadWriteSeek, Write},
types::{BitStreamTrackParams, DiskDescriptor},
};
use crate::{
file_parsers::{pce::crc::pce_crc, ParserReadOptions, ParserWriteOptions},
track::bitstream::BitStreamTrack,
types::{chs::DiskCh, Platform, TrackDataEncoding, TrackDataRate, TrackDataResolution, TrackDensity},
DiskImage,
DiskImageError,
DiskImageFileFormat,
FoxHashSet,
LoadingCallback,
};
use binrw::{binrw, meta::WriteEndian, BinRead, BinWrite};
pub struct PriFormat;
pub const MAXIMUM_CHUNK_SIZE: usize = 0x100000; // Reasonable 1MB limit for chunk sizes.
#[derive(Debug)]
#[binrw]
#[brw(big)]
pub struct PriChunkHeader {
pub id: [u8; 4],
pub size: u32,
}
#[derive(Debug)]
#[binrw]
#[brw(big)]
pub struct PriChunkFooter {
pub id: [u8; 4],
pub size: u32,
pub footer: u32,
}
/// We use the Default implementation to set the special CRC value for the footer.
impl Default for PriChunkFooter {
fn default() -> Self {
PriChunkFooter {
id: *b"END ",
size: 0,
footer: 0x3d64af78,
}
}
}
#[derive(Debug)]
#[binrw]
#[brw(big)]
pub struct PriHeader {
pub version: u16,
pub reserved: u16,
}
#[derive(Debug)]
#[binrw]
#[brw(big)]
pub struct PriChunkCrc {
pub crc: u32,
}
#[derive(Default, Debug)]
#[binrw]
#[brw(big)]
pub struct PriTrackHeader {
pub cylinder: u32,
pub head: u32,
pub bit_length: u32,
pub clock_rate: u32,
}
#[binrw]
#[brw(big)]
pub struct PriWeakMaskEntry {
pub bit_offset: u32,
pub bit_mask: u32,
}
#[binrw]
#[brw(big)]
pub struct PriAlternateClock {
pub bit_offset: u32,
pub new_clock: u32,
}
#[derive(Copy, Clone, Debug, PartialEq)]
pub enum PriChunkType {
FileHeader,
Text,
TrackHeader,
TrackData,
WeakMask,
AlternateBitClock,
End,
Unknown,
}
pub struct PriChunk {
pub chunk_type: PriChunkType,
pub size: u32,
pub data: Vec<u8>,
}
#[derive(Default)]
pub struct TrackContext {
phys_ch: DiskCh,
bit_clock: u32,
}
impl PriFormat {
#[allow(dead_code)]
fn format() -> DiskImageFileFormat {
DiskImageFileFormat::PceBitstreamImage
}
pub(crate) fn capabilities() -> FormatCaps {
bitstream_flags() | FormatCaps::CAP_COMMENT | FormatCaps::CAP_WEAK_BITS
}
pub fn platforms() -> Vec<Platform> {
// PRI images should in theory support any platform that can be represented as bitstream
// tracks. PCE itself only supports PC and Macintosh platforms, however, so for now we'll
// limit it to those.
vec![Platform::IbmPc, Platform::Macintosh]
}
pub(crate) fn extensions() -> Vec<&'static str> {
vec!["pri"]
}
pub(crate) fn detect<RWS: ReadSeek>(mut image: RWS) -> bool {
let mut detected = false;
_ = image.seek(std::io::SeekFrom::Start(0));
if let Ok(file_header) = PriChunkHeader::read_be(&mut image) {
if file_header.id == "PRI ".as_bytes() {
detected = true;
}
}
detected
}
/// Return the compatibility of the image with the parser.
pub(crate) fn can_write(image: Option<&DiskImage>) -> ParserWriteCompatibility {
image
.map(|image| {
if (image.resolution.len() > 1) || !image.resolution.contains(&TrackDataResolution::BitStream) {
// PRI images can't store multiple resolutions, and must store bitstream data
return ParserWriteCompatibility::Incompatible;
}
if PriFormat::capabilities().contains(image.required_caps()) {
ParserWriteCompatibility::Ok
}
else {
ParserWriteCompatibility::DataLoss
}
})
.unwrap_or(ParserWriteCompatibility::Ok)
}
pub(crate) fn read_chunk<RWS: ReadSeek>(mut image: RWS) -> Result<PriChunk, DiskImageError> {
let chunk_pos = image.stream_position()?;
//log::trace!("Reading chunk header...");
let chunk_header = PriChunkHeader::read(&mut image)?;
if let Ok(id) = std::str::from_utf8(&chunk_header.id) {
log::trace!("Chunk ID: {} Size: {}", id, chunk_header.size);
}
else {
log::trace!("Chunk ID: {:?} Size: {}", chunk_header.id, chunk_header.size);
}
let chunk_type = match &chunk_header.id {
b"PRI " => PriChunkType::FileHeader,
b"TEXT" => PriChunkType::Text,
b"END " => PriChunkType::End,
b"TRAK" => PriChunkType::TrackHeader,
b"DATA" => PriChunkType::TrackData,
b"WEAK" => PriChunkType::WeakMask,
b"BCLK" => PriChunkType::AlternateBitClock,
_ => {
log::trace!("Unknown chunk type.");
PriChunkType::Unknown
}
};
if chunk_header.size > MAXIMUM_CHUNK_SIZE as u32 {
return Err(DiskImageError::FormatParseError);
}
let mut buffer = vec![0u8; chunk_header.size as usize + 8];
//log::trace!("Seeking to chunk start...");
image.seek(std::io::SeekFrom::Start(chunk_pos))?;
image.read_exact(&mut buffer)?;
let crc_calc = pce_crc(&buffer);
let chunk_crc = PriChunkCrc::read(&mut image)?;
if chunk_crc.crc != crc_calc {
return Err(DiskImageError::CrcError);
}
//log::trace!("CRC matched: {:04X} {:04X}", chunk_crc.crc, crc_calc);
let chunk = PriChunk {
chunk_type,
size: chunk_header.size,
data: buffer[8..].to_vec(),
};
Ok(chunk)
}
pub(crate) fn write_chunk<RWS: ReadWriteSeek, T: BinWrite + WriteEndian>(
image: &mut RWS,
chunk_type: PriChunkType,
data: &T,
) -> Result<(), DiskImageError>
where
for<'a> <T as BinWrite>::Args<'a>: Default,
{
// Create a chunk buffer Cursor to write our chunk data into.
let mut chunk_buf = Cursor::new(Vec::new());
let chunk_str = match chunk_type {
PriChunkType::FileHeader => b"PRI ",
PriChunkType::Text => b"TEXT",
PriChunkType::End => b"END ",
PriChunkType::TrackHeader => b"TRAK",
PriChunkType::TrackData => b"DATA",
PriChunkType::WeakMask => b"WEAK",
PriChunkType::AlternateBitClock => b"BCLK",
PriChunkType::Unknown => b"UNKN",
};
// Serialize the data to a buffer, so we can set the length in the chunk header.
let mut data_buf = Cursor::new(Vec::new());
data.write(&mut data_buf)?;
let chunk_header = PriChunkHeader {
id: *chunk_str,
size: data_buf.get_ref().len() as u32,
};
log::trace!("Writing chunk: {:?} size: {}", chunk_type, data_buf.get_ref().len());
chunk_header.write(&mut chunk_buf)?;
chunk_buf.write_all(data_buf.get_ref())?;
// Calculate CRC for chunk, over header and data bytes.
let crc_calc = pce_crc(chunk_buf.get_ref());
// Write the CRC to the chunk.
let chunk_crc = PriChunkCrc { crc: crc_calc };
chunk_crc.write(&mut chunk_buf)?;
// Write the chunk buffer to the image.
image.write_all(chunk_buf.get_ref())?;
Ok(())
}
/// We use a separate function to write text chunks, as str does not implement BinWrite.
pub(crate) fn write_text<RWS: ReadWriteSeek>(image: &mut RWS, text: &str) -> Result<(), DiskImageError> {
// Create a chunk buffer Cursor to write our chunk data into.
let mut chunk_buf = Cursor::new(Vec::new());
if text.len() > 1000 {
panic!("Text chunk too large.");
}
let chunk_str = b"TEXT";
let chunk_header = PriChunkHeader {
id: *chunk_str,
size: text.len() as u32,
};
chunk_header.write(&mut chunk_buf)?;
chunk_buf.write_all(text.as_bytes())?;
// Calculate CRC for chunk, over header and data bytes.
let crc_calc = pce_crc(chunk_buf.get_ref());
// Write the CRC to the chunk.
let chunk_crc = PriChunkCrc { crc: crc_calc };
chunk_crc.write(&mut chunk_buf)?;
// Write the chunk buffer to the image.
image.write_all(chunk_buf.get_ref())?;
Ok(())
}
/// We use a separate function to write raw data chunks, as Vec or &[u8] does not implement BinWrite.
pub(crate) fn write_chunk_raw<RWS: ReadWriteSeek>(
image: &mut RWS,
chunk_type: PriChunkType,
data: &[u8],
) -> Result<(), DiskImageError> {
// Create a chunk buffer Cursor to write our chunk data into.
let mut chunk_buf = Cursor::new(Vec::new());
let chunk_str = match chunk_type {
PriChunkType::FileHeader => b"PRI ",
PriChunkType::Text => b"TEXT",
PriChunkType::End => b"END ",
PriChunkType::TrackHeader => b"TRAK",
PriChunkType::TrackData => b"DATA",
PriChunkType::WeakMask => b"WEAK",
PriChunkType::AlternateBitClock => b"BCLK",
PriChunkType::Unknown => b"UNKN",
};
let chunk_header = PriChunkHeader {
id: *chunk_str,
size: data.len() as u32,
};
chunk_header.write(&mut chunk_buf)?;
chunk_buf.write_all(data)?;
// Calculate CRC for chunk, over header and data bytes.
let crc_calc = pce_crc(chunk_buf.get_ref());
// Write the CRC to the chunk.
let chunk_crc = PriChunkCrc { crc: crc_calc };
chunk_crc.write(&mut chunk_buf)?;
// Write the chunk buffer to the image.
image.write_all(chunk_buf.get_ref())?;
Ok(())
}
pub(crate) fn load_image<RWS: ReadSeek>(
mut read_buf: RWS,
disk_image: &mut DiskImage,
_opts: &ParserReadOptions,
_callback: Option<LoadingCallback>,
) -> Result<(), DiskImageError> {
disk_image.set_source_format(DiskImageFileFormat::PceBitstreamImage);
// Seek to start of read_buf.
read_buf.seek(std::io::SeekFrom::Start(0))?;
let mut chunk = PriFormat::read_chunk(&mut read_buf)?;
// File header must be first chunk.
if chunk.chunk_type != PriChunkType::FileHeader {
return Err(DiskImageError::UnknownFormat);
}
let file_header =
PriHeader::read(&mut Cursor::new(&chunk.data)).map_err(|_| DiskImageError::FormatParseError)?;
log::trace!("Read PRI file header. Format version: {}", file_header.version);
let mut comment_string = String::new();
let mut heads_seen: FoxHashSet<u8> = FoxHashSet::new();
let mut cylinders_seen: FoxHashSet<u16> = FoxHashSet::new();
let mut default_bit_clock = 0;
let mut expected_data_size = 0;
let mut track_header = PriTrackHeader::default();
let mut ctx = TrackContext::default();
let mut disk_data_rate = None;
while chunk.chunk_type != PriChunkType::End {
match chunk.chunk_type {
PriChunkType::TrackHeader => {
track_header = PriTrackHeader::read(&mut Cursor::new(&chunk.data))
.map_err(|_| DiskImageError::FormatParseError)?;
let ch = DiskCh::from((track_header.cylinder as u16, track_header.head as u8));
log::trace!(
"Track header: {:?} Bitcells: {} Clock Rate: {}",
ch,
track_header.bit_length,
track_header.clock_rate
);
expected_data_size =
track_header.bit_length as usize / 8 + if track_header.bit_length % 8 != 0 { 1 } else { 0 };
default_bit_clock = track_header.clock_rate;
cylinders_seen.insert(track_header.cylinder as u16);
heads_seen.insert(track_header.head as u8);
ctx.phys_ch = ch;
}
PriChunkType::AlternateBitClock => {
let alt_clock = PriAlternateClock::read(&mut Cursor::new(&chunk.data))
.map_err(|_| DiskImageError::FormatParseError)?;
if alt_clock.new_clock == 0 {
ctx.bit_clock = default_bit_clock;
}
else {
let new_bit_clock =
((alt_clock.new_clock as f64 / u16::MAX as f64) * default_bit_clock as f64) as u32;
ctx.bit_clock = new_bit_clock;
}
log::trace!(
"Alternate bit clock. Bit offset: {} New clock: {}",
alt_clock.bit_offset,
ctx.bit_clock
);
}
PriChunkType::TrackData => {
log::trace!(
"Track data chunk: {} size: {} expected size: {}",
ctx.phys_ch,
chunk.size,
expected_data_size
);
// Set the global disk data rate once.
if disk_data_rate.is_none() {
disk_data_rate = Some(TrackDataRate::from(ctx.bit_clock));
}
let params = BitStreamTrackParams {
schema: None,
encoding: TrackDataEncoding::Mfm,
data_rate: TrackDataRate::from(ctx.bit_clock),
rpm: None,
ch: ctx.phys_ch,
bitcell_ct: Some(track_header.bit_length as usize),
data: &chunk.data,
weak: None,
hole: None,
detect_weak: false,
};
disk_image.add_track_bitstream(¶ms)?;
}
PriChunkType::WeakMask => {
let weak_table_len = chunk.size / 8;
if chunk.size % 8 != 0 {
log::error!("Weak mask chunk size is not a multiple of 8.");
return Err(DiskImageError::FormatParseError);
}
let mut cursor = Cursor::new(&chunk.data);
let track = disk_image
.track_mut(ctx.phys_ch)
.ok_or(DiskImageError::FormatParseError)?;
let bit_track = track
.as_any_mut()
.downcast_mut::<BitStreamTrack>()
.ok_or(DiskImageError::FormatParseError)?;
for _i in 0..weak_table_len {
let weak_mask =
PriWeakMaskEntry::read(&mut cursor).map_err(|_| DiskImageError::FormatParseError)?;
log::trace!(
"Weak mask entry. Bit offset: {} Mask: {:08X}",
weak_mask.bit_offset,
weak_mask.bit_mask
);
bit_track.write_weak_mask_u32(weak_mask.bit_mask, weak_mask.bit_offset as usize);
}
}
PriChunkType::Text => {
// PSI docs:
// `If there are multiple TEXT chunks, their contents should be concatenated`
if let Ok(text) = std::str::from_utf8(&chunk.data) {
comment_string.push_str(text);
}
}
PriChunkType::End => {
log::trace!("End chunk.");
break;
}
_ => {
log::trace!("Chunk type: {:?}", chunk.chunk_type);
}
}
chunk = PriFormat::read_chunk(&mut read_buf)?;
}
log::trace!("Comment: {}", comment_string);
let head_ct = heads_seen.len() as u8;
let cylinder_ct = cylinders_seen.len() as u16;
disk_image.descriptor = DiskDescriptor {
platforms: None,
geometry: DiskCh::from((cylinder_ct, head_ct)),
data_rate: disk_data_rate.unwrap(),
data_encoding: TrackDataEncoding::Mfm,
density: TrackDensity::from(disk_data_rate.unwrap()),
rpm: None,
write_protect: None,
};
Ok(())
}
pub fn save_image<RWS: ReadWriteSeek>(
image: &DiskImage,
_opts: &ParserWriteOptions,
output: &mut RWS,
) -> Result<(), DiskImageError> {
if (image.resolution.len() > 1) || !image.resolution.contains(&TrackDataResolution::BitStream) {
log::error!("Unsupported image resolution.");
return Err(DiskImageError::UnsupportedFormat);
}
log::trace!("Saving PRI image...");
// Write the file header chunk. Version remains at 0 for now.
let file_header = PriHeader {
version: 0,
reserved: 0,
};
PriFormat::write_chunk(output, PriChunkType::FileHeader, &file_header)?;
// Write any comments present in the image to a TEXT chunk.
image
.metadata_key("comment")
.map(|comment| PriFormat::write_text(output, &comment));
// Iterate through tracks and write track headers and data.
for track in image.track_iter() {
if let Some(track) = track.as_any().downcast_ref::<BitStreamTrack>() {
log::trace!(
"Track {}: encoding: {:?} data_rate: {:?} bit length: {}",
track.ch,
track.encoding,
track.data_rate,
track.data.len(),
);
// Write the track header.
let track_header = PriTrackHeader {
cylinder: track.ch.c() as u32,
head: track.ch.h() as u32,
bit_length: track.data.len() as u32,
clock_rate: track.data_rate.into(),
};
PriFormat::write_chunk(output, PriChunkType::TrackHeader, &track_header)?;
// Write the track data.
let track_data = track.data.data_copied();
PriFormat::write_chunk(output, PriChunkType::TrackData, &track_data)?;
if track.data.weak_mask().any() {
// At least one bit is set in the weak bit mask, so let's export it.
let weak_mask = track.data.weak_mask();
// Create a buffer for our weak mask table.
let mut weak_buffer = Cursor::new(Vec::new());
let mut mask_offset;
let mut bit_offset = 0;
let mut iter = weak_mask.iter();
while let Some(bit) = iter.next() {
bit_offset += 1;
if bit {
mask_offset = bit_offset;
// Start with a 1 in the MSB position of the shift register
let mut mask_u32: u32 = 1 << 31;
// Shift in the next 31 bits, if available
for pos in 1..32 {
if let Some(next_bit) = iter.next() {
bit_offset += 1;
mask_u32 |= (next_bit as u32) << (31 - pos);
}
else {
break;
}
}
// Add an entry to the table.
PriWeakMaskEntry {
bit_offset: mask_offset,
bit_mask: mask_u32,
}
.write_be(&mut weak_buffer)?;
}
}
PriFormat::write_chunk_raw(output, PriChunkType::WeakMask, weak_buffer.get_ref())?;
}
}
else {
unreachable!("Expected only BitStream variants");
}
}
// Write the file-end chunk.
log::trace!("Writing END chunk...");
let end_chunk = PriChunkFooter::default();
end_chunk.write(output)?;
Ok(())
}
}
| rust | MIT | b4c04b51746e5fe7769f49a1b32b8caad426fc81 | 2026-01-04T20:24:04.021295Z | false |
dbalsom/fluxfox | https://github.com/dbalsom/fluxfox/blob/b4c04b51746e5fe7769f49a1b32b8caad426fc81/src/file_parsers/pce/mod.rs | src/file_parsers/pce/mod.rs | /*
FluxFox
https://github.com/dbalsom/fluxfox
Copyright 2024-2025 Daniel Balsom
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the “Software”),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
--------------------------------------------------------------------------
*/
//! Parsers for the various formats associated with the PCE emulator and disk
//! tool suite, invented by Hampa Hug.
//!
//! These include:
//! * PFI - PCE Flux Image - A single-file flux container format.
//! * PRI - PCE Raw Image - A track-based, bitstream container format.
//! * PSI - PCE Sector Image - A sector-based container format containing lots
//! of metadata that can support a surprising number of copy-protected
//! titles.
pub(crate) mod crc;
pub(crate) mod pfi;
pub(crate) mod pri;
pub(crate) mod psi;
| rust | MIT | b4c04b51746e5fe7769f49a1b32b8caad426fc81 | 2026-01-04T20:24:04.021295Z | false |
dbalsom/fluxfox | https://github.com/dbalsom/fluxfox/blob/b4c04b51746e5fe7769f49a1b32b8caad426fc81/src/file_parsers/pce/pfi.rs | src/file_parsers/pce/pfi.rs | /*
FluxFox
https://github.com/dbalsom/fluxfox
Copyright 2024-2025 Daniel Balsom
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the “Software”),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
--------------------------------------------------------------------------
*/
//! A parser for the PFI disk image format.
//!
//! PFI format images are PCE flux stream images, a format associated with the PCE emulator and disk
//! tool suite, invented by Hampa Hug.
//!
//! It is a chunk-based format similar to RIFF.
//!
//! Flux transition times are stored in a variable-length encoding similar to Kryoflux.
use crate::{
file_parsers::{
bitstream_flags,
pce::crc::pce_crc,
FormatCaps,
ParserReadOptions,
ParserWriteCompatibility,
ParserWriteOptions,
},
io::{Cursor, ReadBytesExt, ReadSeek, ReadWriteSeek},
track::fluxstream::FluxStreamTrack,
types::{chs::DiskCh, DiskDescriptor, FluxStreamTrackParams, Platform, TrackDataEncoding, TrackDensity},
DiskImage,
DiskImageError,
DiskImageFileFormat,
FoxHashSet,
LoadingCallback,
};
use binrw::{binrw, BinRead};
use strum::IntoEnumIterator;
pub struct PfiFormat;
pub const MAXIMUM_CHUNK_SIZE: usize = 0x1000000; // Reasonable 10MB limit for chunk sizes.
#[derive(Debug)]
#[binrw]
#[brw(big)]
pub struct PfiChunkHeader {
pub id: [u8; 4],
pub size: u32,
}
#[derive(Debug)]
#[binrw]
#[brw(big)]
pub struct PfiHeader {
pub version: u16,
pub reserved: u16,
}
#[derive(Debug)]
#[binrw]
#[brw(big)]
pub struct PfiChunkCrc {
pub crc: u32,
}
#[derive(Default, Debug)]
#[binrw]
#[brw(big)]
pub struct PfiTrackHeader {
pub cylinder: u32,
pub head: u32,
pub clock_rate: u32,
}
#[derive(Copy, Clone, Debug, PartialEq)]
pub enum PfiChunkType {
FileHeader,
Text,
TrackHeader,
Index,
TrackData,
End,
Unknown,
}
pub struct PfiChunk {
pub chunk_type: PfiChunkType,
pub size: u32,
pub data: Vec<u8>,
}
#[derive(Default)]
pub struct TrackContext {
phys_ch: Option<DiskCh>,
clock_rate: Option<u32>,
clock_period: f64,
index_clocks: Vec<u32>,
}
#[derive(Default)]
pub struct PfiRevolution {
transitions: Vec<f64>,
index_time: f64,
}
impl PfiFormat {
#[allow(dead_code)]
fn format() -> DiskImageFileFormat {
DiskImageFileFormat::PceFluxImage
}
pub(crate) fn capabilities() -> FormatCaps {
bitstream_flags() | FormatCaps::CAP_COMMENT | FormatCaps::CAP_WEAK_BITS
}
pub fn platforms() -> Vec<Platform> {
// PFI images should basically support every platform that Kryoflux does
Platform::iter().collect()
}
pub(crate) fn extensions() -> Vec<&'static str> {
vec!["pfi"]
}
pub(crate) fn detect<RWS: ReadSeek>(mut image: RWS) -> bool {
let mut detected = false;
_ = image.seek(std::io::SeekFrom::Start(0));
if let Ok(file_header) = PfiChunkHeader::read_be(&mut image) {
if file_header.id == "PFI ".as_bytes() {
detected = true;
}
}
detected
}
/// Return the compatibility of the image with the parser.
pub(crate) fn can_write(_image: Option<&DiskImage>) -> ParserWriteCompatibility {
ParserWriteCompatibility::UnsupportedFormat
}
pub(crate) fn read_chunk<RWS: ReadSeek>(mut image: RWS) -> Result<PfiChunk, DiskImageError> {
let chunk_pos = image.stream_position()?;
//log::trace!("Reading chunk header...");
let chunk_header = PfiChunkHeader::read(&mut image)?;
if let Ok(id) = std::str::from_utf8(&chunk_header.id) {
log::trace!("Chunk ID: {} Size: {}", id, chunk_header.size);
}
else {
log::trace!("Chunk ID: {:?} Size: {}", chunk_header.id, chunk_header.size);
}
let chunk_type = match &chunk_header.id {
b"PFI " => PfiChunkType::FileHeader,
b"TEXT" => PfiChunkType::Text,
b"END " => PfiChunkType::End,
b"TRAK" => PfiChunkType::TrackHeader,
b"INDX" => PfiChunkType::Index,
b"DATA" => PfiChunkType::TrackData,
_ => {
log::trace!("Unknown chunk type.");
PfiChunkType::Unknown
}
};
if chunk_header.size > MAXIMUM_CHUNK_SIZE as u32 {
return Err(DiskImageError::FormatParseError);
}
let mut buffer = vec![0u8; chunk_header.size as usize + 8];
//log::trace!("Seeking to chunk start...");
image.seek(std::io::SeekFrom::Start(chunk_pos))?;
image.read_exact(&mut buffer)?;
let crc_calc = pce_crc(&buffer);
let chunk_crc = PfiChunkCrc::read(&mut image)?;
if chunk_crc.crc != crc_calc {
return Err(DiskImageError::CrcError);
}
//log::trace!("CRC matched: {:04X} {:04X}", chunk_crc.crc, crc_calc);
let chunk = PfiChunk {
chunk_type,
size: chunk_header.size,
data: buffer[8..].to_vec(),
};
Ok(chunk)
}
pub(crate) fn load_image<RWS: ReadSeek>(
mut read_buf: RWS,
disk_image: &mut DiskImage,
_opts: &ParserReadOptions,
_callback: Option<LoadingCallback>,
) -> Result<(), DiskImageError> {
disk_image.set_source_format(DiskImageFileFormat::PceFluxImage);
// Seek to start of read_buf.
read_buf.seek(std::io::SeekFrom::Start(0))?;
let mut chunk = PfiFormat::read_chunk(&mut read_buf)?;
// File header must be first chunk.
if chunk.chunk_type != PfiChunkType::FileHeader {
return Err(DiskImageError::UnknownFormat);
}
let file_header =
PfiHeader::read(&mut Cursor::new(&chunk.data)).map_err(|_| DiskImageError::FormatParseError)?;
log::trace!("Read PFI file header. Format version: {}", file_header.version);
let mut comment_string = String::new();
let mut heads_seen: FoxHashSet<u8> = FoxHashSet::new();
let mut cylinders_seen: FoxHashSet<u16> = FoxHashSet::new();
let disk_clock_rate = None;
let mut track_header;
let mut ctx = TrackContext::default();
while chunk.chunk_type != PfiChunkType::End {
match chunk.chunk_type {
PfiChunkType::TrackHeader => {
track_header = PfiTrackHeader::read(&mut Cursor::new(&chunk.data))
.map_err(|_| DiskImageError::FormatParseError)?;
let ch = DiskCh::from((track_header.cylinder as u16, track_header.head as u8));
ctx.phys_ch = Some(ch);
ctx.clock_rate = Some(track_header.clock_rate);
ctx.clock_period = 1.0 / (track_header.clock_rate as f64);
log::trace!(
"Track header: {:?} Clock Rate: {:.04}Mhz Period: {:.04}us",
ch,
track_header.clock_rate as f64 / 1_000_000.0,
ctx.clock_period * 1_000_000.0
);
cylinders_seen.insert(track_header.cylinder as u16);
heads_seen.insert(track_header.head as u8);
}
PfiChunkType::Index => {
let index_entries = chunk.size / 4;
let mut index_list: Vec<u32> = Vec::with_capacity(index_entries as usize);
for i in 0..index_entries {
let index = u32::from_be_bytes([
chunk.data[i as usize * 4],
chunk.data[i as usize * 4 + 1],
chunk.data[i as usize * 4 + 2],
chunk.data[i as usize * 4 + 3],
]);
index_list.push(index);
}
log::trace!("Index chunk with {} entries:", index_entries);
for idx in &index_list {
log::trace!("Index clock: {}", idx);
}
ctx.index_clocks = index_list;
}
PfiChunkType::TrackData => {
log::trace!(
"Track data chunk: {} size: {}",
ctx.phys_ch.unwrap_or_default(),
chunk.size,
);
let revolutions = PfiFormat::read_track_data(&chunk.data, &ctx.index_clocks, ctx.clock_period)?;
log::trace!("Read {} revolutions from track data.", revolutions.len());
let mut flux_track = FluxStreamTrack::new();
// Get last ch in image.
let next_ch = if disk_image.track_ch_iter().count() == 0 {
log::debug!("No tracks in image, starting at c:0 h:0");
DiskCh::new(0, 0)
}
else {
let mut last_ch = disk_image.track_ch_iter().last().unwrap_or(DiskCh::new(0, 0));
log::debug!("Previous track in image: {} heads: {}", last_ch, heads_seen.len());
last_ch.seek_next_track_unchecked(heads_seen.len() as u8);
log::debug!("Setting next track ch: {}", last_ch);
last_ch
};
for (ri, rev) in revolutions.iter().enumerate() {
log::trace!(
"Adding revolution {} with {} transitions and index time of {:.04}ms.",
ri,
rev.transitions.len(),
rev.index_time * 1_000.0
);
flux_track.add_revolution(next_ch, &rev.transitions, rev.index_time);
}
// Get hints from disk image if we aren't the first track.
let (clock_hint, rpm_hint) = if !disk_image.track_pool.is_empty() {
(
Some(disk_image.descriptor.density.base_clock(disk_image.descriptor.rpm)),
disk_image.descriptor.rpm,
)
}
else {
(None, None)
};
let data_rate = disk_image.data_rate();
let params = FluxStreamTrackParams {
ch: next_ch,
schema: None,
encoding: None,
clock: clock_hint,
rpm: rpm_hint,
};
let new_track = disk_image.add_track_fluxstream(flux_track, ¶ms)?;
let (new_density, new_rpm) = if new_track.sector_ct() == 0 {
log::warn!("Track did not decode any sectors. Not updating disk image descriptor.");
(disk_image.descriptor.density, disk_image.descriptor.rpm)
}
else {
let info = new_track.info();
log::debug!(
"Updating disk descriptor with density: {:?} and RPM: {:?}",
info.density,
info.rpm
);
(info.density.unwrap_or(disk_image.descriptor.density), info.rpm)
};
log::debug!("Track added.");
disk_image.descriptor = DiskDescriptor {
// PFI doesn't specify platform.
platforms: None,
geometry: DiskCh::from((cylinders_seen.len() as u16, heads_seen.len() as u8)),
data_rate,
density: new_density,
data_encoding: TrackDataEncoding::Mfm,
rpm: new_rpm,
write_protect: Some(true),
};
}
PfiChunkType::Text => {
// PFI docs:
// `If there are multiple TEXT chunks, their contents should be concatenated`
if let Ok(text) = std::str::from_utf8(&chunk.data) {
comment_string.push_str(text);
}
}
PfiChunkType::End => {
log::trace!("End chunk.");
break;
}
_ => {
log::trace!("Chunk type: {:?}", chunk.chunk_type);
}
}
chunk = PfiFormat::read_chunk(&mut read_buf)?;
}
log::trace!("Comment: {}", comment_string);
let head_ct = heads_seen.len() as u8;
let cylinder_ct = cylinders_seen.len() as u16;
let clock_rate = disk_clock_rate.unwrap_or_default();
disk_image.descriptor = DiskDescriptor {
platforms: None,
geometry: DiskCh::from((cylinder_ct, head_ct)),
data_rate: clock_rate,
data_encoding: TrackDataEncoding::Mfm,
density: TrackDensity::from(clock_rate),
rpm: None,
write_protect: None,
};
Ok(())
}
/// Read PFI variable-length flux transitions and return a list of flux transition times
/// in f64 seconds
fn read_track_data(
data: &[u8],
index_times: &[u32],
clock_period: f64,
) -> Result<Vec<PfiRevolution>, DiskImageError> {
if index_times.is_empty() {
log::error!("No index times found in track data.");
return Err(DiskImageError::FormatParseError);
}
let mut revs: Vec<PfiRevolution> = Vec::with_capacity(5);
let mut current_rev_idx = 0;
let mut next_index = index_times[0];
let mut current_rev = &mut PfiRevolution::default();
let mut clocks = 0;
let mut data_cursor = Cursor::new(data);
let mut last_index_clock = 0;
while let Ok(byte) = data_cursor.read_u8() {
if clocks >= next_index {
log::trace!("Reached next index position at clock: {}", clocks);
current_rev_idx += 1;
if current_rev_idx >= index_times.len() {
break;
}
current_rev.index_time = (clocks - last_index_clock) as f64 * clock_period;
next_index = index_times[current_rev_idx];
revs.push(PfiRevolution {
transitions: Vec::with_capacity(225_000),
index_time: 0.0,
});
current_rev = revs.last_mut().unwrap();
last_index_clock = clocks;
}
match byte {
0x00 => {
// Invalid
log::error!("Invalid 0x00 byte in flux stream.");
return Err(DiskImageError::FormatParseError);
}
0x01 => {
// XX YY
let xx = data_cursor.read_u8()?;
let yy = data_cursor.read_u8()?;
let time = (xx as u16) << 8 | yy as u16;
clocks += time as u32;
current_rev.transitions.push(time as f64 * clock_period);
}
0x02 => {
// XX YY ZZ
let xx = data_cursor.read_u8()?;
let yy = data_cursor.read_u8()?;
let zz = data_cursor.read_u8()?;
let time = (xx as u32) << 16 | (yy as u32) << 8 | zz as u32;
clocks += time;
current_rev.transitions.push(time as f64 * clock_period);
}
0x03 => {
// XX YY ZZ WW
let xx = data_cursor.read_u8()?;
let yy = data_cursor.read_u8()?;
let zz = data_cursor.read_u8()?;
let ww = data_cursor.read_u8()?;
let time = (xx as u32) << 24 | (yy as u32) << 16 | (zz as u32) << 8 | ww as u32;
clocks += time;
current_rev.transitions.push(time as f64 * clock_period);
}
0x04..0x08 => {
// 0(N-4) XX
let base = byte - 0x04;
let xx = data_cursor.read_u8()?;
let time = (base as u16) << 8 | xx as u16;
clocks += time as u32;
current_rev.transitions.push(time as f64 * clock_period);
}
_ => {
// Byte as literal clock count.
clocks += byte as u32;
current_rev.transitions.push(byte as f64 * clock_period);
}
}
}
current_rev.index_time = (clocks - last_index_clock) as f64 * clock_period;
Ok(revs)
}
pub fn save_image<RWS: ReadWriteSeek>(
_image: &DiskImage,
_opts: &ParserWriteOptions,
_output: &mut RWS,
) -> Result<(), DiskImageError> {
Err(DiskImageError::UnsupportedFormat)
}
}
| rust | MIT | b4c04b51746e5fe7769f49a1b32b8caad426fc81 | 2026-01-04T20:24:04.021295Z | false |
dbalsom/fluxfox | https://github.com/dbalsom/fluxfox/blob/b4c04b51746e5fe7769f49a1b32b8caad426fc81/src/file_parsers/pce/crc.rs | src/file_parsers/pce/crc.rs | /*
FluxFox
https://github.com/dbalsom/fluxfox
Copyright 2024-2025 Daniel Balsom
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the “Software”),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
--------------------------------------------------------------------------
*/
//! The shared CRC algorith mused by all PCE disk image formats.
pub(crate) fn pce_crc(buf: &[u8]) -> u32 {
let mut crc = 0;
for byte in buf {
crc ^= ((*byte & 0xFF) as u32) << 24;
for _j in 0..8 {
if crc & 0x8000_0000 != 0 {
crc = (crc << 1) ^ 0x1EDC_6F41;
}
else {
crc <<= 1;
}
}
}
crc & 0xFFFF_FFFF
}
| rust | MIT | b4c04b51746e5fe7769f49a1b32b8caad426fc81 | 2026-01-04T20:24:04.021295Z | false |
dbalsom/fluxfox | https://github.com/dbalsom/fluxfox/blob/b4c04b51746e5fe7769f49a1b32b8caad426fc81/src/file_parsers/compression/mod.rs | src/file_parsers/compression/mod.rs | /*
FluxFox
https://github.com/dbalsom/fluxfox
Copyright 2024-2025 Daniel Balsom
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the “Software”),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
--------------------------------------------------------------------------
Custom decompression algorithms for FluxFox image formats parsers.
Most of this code is based on retrocompressor, (c) 2023 Daniel Gordon
and is designed to accommodate Teledisk (TD0) images with 'advanced
compression'.
Version 1.0 Teledisk files use an LZW variant, whereas v2.0 images use
LZHUF, likely due to patent issues at the time.
*/
#[cfg(feature = "td0")]
pub mod lzhuf;
#[cfg(feature = "td0")]
pub mod lzw;
#[cfg(feature = "td0")]
type DYNERR = Box<dyn std::error::Error>;
/// Errors produced during compression or decompression
#[derive(thiserror::Error, Debug)]
#[allow(unused)]
pub enum CompressionError {
#[error("File format mismatch")]
FileFormatMismatch,
#[error("File too large")]
FileTooLarge,
#[error("Checksum failed")]
BadChecksum,
}
| rust | MIT | b4c04b51746e5fe7769f49a1b32b8caad426fc81 | 2026-01-04T20:24:04.021295Z | false |
dbalsom/fluxfox | https://github.com/dbalsom/fluxfox/blob/b4c04b51746e5fe7769f49a1b32b8caad426fc81/src/file_parsers/compression/lzw/mod.rs | src/file_parsers/compression/lzw/mod.rs | /*
Original code from retrocompressor by Daniel Gordon
https://github.com/dfgordon/retrocompressor/
Copyright (c) 2023 Daniel Gordon
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
//! LZW Compression
//!
//! This currently supports fixed code widths only, other parameters are flexible.
//! Efficiency is probably not optimal, we rely on `std::collections::HashMap` to perform
//! fast lookups on keys of the type `(usize,usize)`.
#![allow(dead_code)]
use crate::FoxHashMap;
use bit_vec::BitVec;
use super::{CompressionError, DYNERR};
use crate::io::{Cursor, ErrorKind, Read, Seek, SeekFrom, Write};
#[derive(Copy, Clone, Debug)]
pub enum Ord {
Msb0,
Lsb0,
}
/// Options controlling compression
#[derive(Clone)]
pub struct Options {
/// Length in bits of the header preceding each chunk, can be 0.
/// Can be used with fixed code width in lieu of clear code.
pub header_bits: usize,
/// header contains bit count divided by this number
pub header_divisor: usize,
/// starting position in the input file
pub in_offset: u64,
/// starting position in the output file
pub out_offset: u64,
/// number of codes to write before a reset
pub chunk_size: usize,
/// minimum value of a symbol, currently must be 0
pub min_symbol: usize,
/// maximum value of a symbol, usually 255, currently cannot exceed 255 or there will be a panic
pub max_symbol: usize,
/// clear code, usually max_symbol+1 or max_symbol+2, match codes will skip over
pub clear_code: Option<usize>,
/// stop code, usually max_symbol+1 or max_symbol+2, match codes will skip over
pub stop_code: Option<usize>,
/// min code width in bits, currently must be same as max_code_width
pub min_code_width: usize,
/// max code with in bits
pub max_code_width: usize,
/// bit packing strategy,
pub ord: Ord,
/// return error if file is larger
pub max_file_size: u64,
}
impl Default for Options {
// Default options for Teledisk v1 compression
fn default() -> Self {
Options::from(OptionsPreset::Teledisk)
}
}
pub enum OptionsPreset {
Standard,
Teledisk,
}
impl From<OptionsPreset> for Options {
fn from(preset: OptionsPreset) -> Options {
match preset {
OptionsPreset::Standard => Options {
header_bits: 0,
header_divisor: 1,
in_offset: 0,
out_offset: 0,
chunk_size: 4096,
min_symbol: 0,
max_symbol: 255,
clear_code: Some(256),
stop_code: Some(257),
min_code_width: 12,
max_code_width: 12,
ord: Ord::Lsb0,
max_file_size: u32::MAX as u64 / 4,
},
OptionsPreset::Teledisk => Options {
header_bits: 16,
header_divisor: 4,
in_offset: 12,
out_offset: 12,
chunk_size: 4096,
min_symbol: 0,
max_symbol: 255,
clear_code: None,
stop_code: None,
min_code_width: 12,
max_code_width: 12,
ord: Ord::Lsb0,
max_file_size: 3_000_000,
},
}
}
}
/// bit_vec crate only handles MSB, this assumes starting alignment
fn bits_to_bytes_lsb0(bits: &BitVec) -> Vec<u8> {
let mut ans = Vec::new();
let byte_count = bits.len() / 8;
let rem = bits.len() % 8;
for i in 0..byte_count {
let mut val = 0;
for b in 0..8 {
val |= (bits.get(i * 8 + b).unwrap() as u8) << b;
}
ans.push(val);
}
if rem > 0 {
let mut val = 0;
for b in 0..rem {
val |= (bits.get(byte_count * 8 + b).unwrap() as u8) << b;
}
ans.push(val);
}
ans
}
/// bit_vec crate only handles MSB, this assumes starting alignment
fn bytes_to_bits_lsb0(bytes: &[u8]) -> BitVec {
let mut ans = BitVec::new();
for i in 0..bytes.len() {
let val = bytes[i];
for b in 0..8 {
ans.push((val & (1 << b)) != 0);
}
}
ans
}
#[derive(Clone)]
struct LZWCoder {
bits: BitVec,
ptr: usize,
ord: Ord,
count: usize,
}
struct LZWDecoder {
bits: BitVec,
ptr: usize,
ord: Ord,
count: usize,
}
impl LZWCoder {
pub fn new(ord: Ord) -> Self {
Self {
bits: BitVec::new(),
ptr: 0,
ord,
count: 0,
}
}
/// keep the bit vector small, we don't need the bits behind us
fn drop_leading_bits(&mut self) {
let cpy = self.bits.clone();
self.bits = BitVec::new();
for i in self.ptr..cpy.len() {
self.bits.push(cpy.get(i).unwrap());
}
self.ptr = 0;
}
/// output `num_bits` of `code` in given bit-order, the bits are always
/// written to the output stream (sometimes backing up and rewriting) such that
/// the start of the bit vector stays aligned.
pub fn put_code<W: Write + Seek>(&mut self, num_bits: usize, mut code: usize, writer: &mut W) {
let bytes = match self.ord {
Ord::Msb0 => {
code <<= usize::BITS as usize - num_bits;
let msk = 1 << (usize::BITS - 1);
for _i in 0..num_bits {
self.bits.push(code & msk > 0);
code <<= 1;
self.ptr += 1;
}
self.bits.to_bytes()
}
Ord::Lsb0 => {
for _i in 0..num_bits {
self.bits.push(code & 1 > 0);
code >>= 1;
self.ptr += 1;
}
bits_to_bytes_lsb0(&self.bits)
}
};
_ = writer.write(&bytes.as_slice()).expect("write err");
if self.bits.len() % 8 > 0 {
writer.seek(SeekFrom::Current(-1)).expect("seek err");
self.ptr = 8 * (self.bits.len() / 8);
self.drop_leading_bits();
}
else {
self.bits = BitVec::new();
self.ptr = 0;
}
self.count += 1;
}
}
impl LZWDecoder {
pub fn new(ord: Ord) -> Self {
Self {
bits: BitVec::new(),
ptr: 0,
ord,
count: 0,
}
}
/// Keep the bit vector small, we don't need the bits behind us
fn drop_leading_bits(&mut self) {
let cpy = self.bits.clone();
self.bits = BitVec::new();
for i in self.ptr..cpy.len() {
self.bits.push(cpy.get(i).unwrap());
}
self.ptr = 0;
}
/// Get the next bit reading from the stream as needed.
/// When EOF is reached 0 is returned (behavior comes from LZHUF.C).
/// `reader` should not be advanced outside this function until decoding is done.
fn get_bit<R: Read>(&mut self, reader: &mut R) -> Result<u8, std::io::Error> {
match self.bits.get(self.ptr) {
Some(bit) => {
self.ptr += 1;
Ok(bit as u8)
}
None => {
let mut by: [u8; 1] = [0];
match reader.read_exact(&mut by) {
Ok(()) => {
if self.bits.len() > 512 {
self.drop_leading_bits();
}
match self.ord {
Ord::Msb0 => self.bits.append(&mut BitVec::from_bytes(&by)),
Ord::Lsb0 => self.bits.append(&mut bytes_to_bits_lsb0(&by)),
}
self.get_bit(reader)
}
Err(e) => Err(e),
}
}
}
}
pub fn get_code<R: Read>(&mut self, num_bits: usize, reader: &mut R) -> Result<usize, std::io::Error> {
let mut ans: usize = 0;
match self.ord {
Ord::Msb0 => {
for _i in 0..num_bits {
ans <<= 1;
ans |= self.get_bit(reader)? as usize;
}
}
Ord::Lsb0 => {
for i in 0..num_bits {
ans |= (self.get_bit(reader)? as usize) << i;
}
}
}
self.count += 1;
Ok(ans)
}
}
/// Dictionary element, can be a key or value.
/// This stores an LZW code and a symbol, which typically is what we need to do a lookup during
/// encoding, or reconstruct a string during decoding.
#[derive(Clone)]
struct Link {
code: usize,
sym: usize,
}
impl Link {
fn root(code: usize) -> Self {
// root can be identified by setting sym to any consistent
// value that is out of range of valid codes
Self { code, sym: usize::MAX }
}
fn create(code: usize, sym: usize) -> Self {
Self { code, sym }
}
fn hash(&self) -> (usize, usize) {
(self.code, self.sym)
}
}
/// Structure to perform LZW compression.
struct LZW {
opt: Options,
/// when used in compression, (base_code,sym) maps to {code,*}.
/// when used in expansion, (code,*) maps to {base_code,sym}
dictionary: FoxHashMap<(usize, usize), Link>,
/// the code most recently added to the dictionary
curr_code: Option<usize>,
/// the key that has just been matched
curr_match: Option<Link>,
}
impl LZW {
/// Create LZW structures, including initial dictionary, can
/// also be used to reset LZW for a new block.
/// Allowed to panic if options cannot be satisfied.
fn create(opt: Options) -> Self {
if opt.min_code_width != opt.max_code_width {
panic!("variable code width not supported");
}
if opt.min_symbol != 0 {
panic!("minimum symbol value must be 0");
}
let mut lzw = Self {
opt: opt.clone(),
dictionary: FoxHashMap::new(),
curr_code: None,
curr_match: None,
};
for i in opt.min_symbol..=opt.max_symbol {
lzw.dictionary.insert(Link::root(i).hash(), Link::create(i, i));
}
lzw
}
/// Walk back through the concatenation sequence to form the string, this does a lookup
/// for every symbol, so this may be where we pay the biggest price for suboptimal hashing.
fn get_string(&self, mut code: usize) -> Vec<u8> {
let mut rev = Vec::new();
loop {
let val = self.dictionary.get(&Link::root(code).hash()).unwrap();
rev.push(val.sym as u8);
if val.sym == val.code && code >= self.opt.min_symbol && code <= self.opt.max_symbol {
break;
}
code = val.code
}
rev.iter().rev().map(|x| *x).collect()
}
/// Return the next available code, or None if bit width would be exceeded,
/// Also updates `self.curr_code`, unless None is returned, in which case
/// it retains the maximum value.
fn advance_code(&mut self) -> Option<usize> {
let max_code = (1usize << self.opt.max_code_width) - 1;
let mut new_code = match self.curr_code {
None => 0,
Some(c) => c + 1,
};
loop {
let test = new_code;
if let Some(clear) = self.opt.clear_code {
if new_code == clear {
new_code += 1;
}
}
if let Some(stop) = self.opt.stop_code {
if new_code == stop {
new_code += 1;
}
}
if new_code >= self.opt.min_symbol && new_code <= self.opt.max_symbol {
new_code = self.opt.max_symbol + 1;
}
if new_code == test {
break;
}
}
if new_code > max_code {
self.curr_code = Some(max_code);
return None;
}
self.curr_code = Some(new_code);
Some(new_code)
}
/// Try to match concatenation of `self.curr_match` with `next_sym`.
/// If matching, update `self.curr_match` and return `true`, caller should call again with the next symbol.
/// If not matching, create a new dictionary entry and return `false`, caller should write the code for `self.curr_match`,
/// then set `self.curr_match` to `None` and call again with the next symbol.
/// If not matching and no more symbols available return `None`, caller can proceed as if `false` was returned,
/// or choose to reset the dictionary.
/// After calling this, `self.curr_match` should always be `Some`, assuming a valid dictionary.
fn check_match(&mut self, next_sym: usize) -> Option<bool> {
let search_key = match &self.curr_match {
Some(curr_match) => {
let base = self.dictionary.get(&curr_match.hash()).unwrap();
Link::create(base.code, next_sym)
}
None => Link::root(next_sym),
};
match self.dictionary.contains_key(&search_key.hash()) {
true => {
self.curr_match = Some(search_key.clone());
Some(true)
}
false => match self.advance_code() {
Some(code) => {
self.dictionary.insert(search_key.hash(), Link::create(code, 0));
Some(false)
}
None => None,
},
}
}
}
/// Main compression function.
/// `expanded_in` is an object with `Read` and `Seek` traits, usually `std::fs::File`, or `std::io::Cursor<&[u8]>`.
/// `compressed_out` is an object with `Write` and `Seek` traits, usually `std::fs::File`, or `std::io::Cursor<Vec<u8>>`.
/// Returns (in_size,out_size) or error. Can panic if options are inconsistent.
pub fn compress<R, W>(expanded_in: &mut R, compressed_out: &mut W, opt: &Options) -> Result<(u64, u64), DYNERR>
where
R: Read + Seek,
W: Write + Seek,
{
let reader = expanded_in;
let mut writer = compressed_out;
let mut coder = LZWCoder::new(opt.ord);
let mut expanded_length = reader.seek(SeekFrom::End(0))?;
if opt.in_offset > expanded_length {
return Err(Box::new(CompressionError::FileFormatMismatch));
}
expanded_length -= opt.in_offset;
if expanded_length > opt.max_file_size {
return Err(Box::new(CompressionError::FileTooLarge));
}
let mut write_offset_header = opt.out_offset;
let mut read_chunk_offset = opt.in_offset;
let mut old_coder_state = LZWCoder::new(opt.ord);
let mut sym_in: [u8; 1] = [0];
log::debug!("entering loop over chunks");
loop {
log::debug!("create LZW dictionary");
let mut lzw = LZW::create(opt.clone());
reader.seek(SeekFrom::Start(read_chunk_offset))?;
writer.seek(SeekFrom::Start(write_offset_header))?;
//placeholder for header
if opt.header_bits > 0 {
coder.put_code(opt.header_bits, 0, &mut writer);
}
coder.count = 0;
//let mut lookahead = 0;
log::debug!("entering loop over matches");
loop {
lzw.curr_match = None;
// loop to build the longest possible match
loop {
match reader.read_exact(&mut sym_in) {
Ok(()) => {
match lzw.check_match(sym_in[0] as usize) {
Some(true) => {
// keep matching
}
Some(false) => {
// didn't match
break;
}
None => {
// didn't match and no more codes,
// choose to keep going with stale dictionary
break;
}
}
}
Err(e) if e.kind() == ErrorKind::UnexpectedEof => {
if let Some(curr) = &lzw.curr_match {
let val = lzw.dictionary.get(&curr.hash()).unwrap(); // should never panic
coder.put_code(opt.max_code_width, val.code, &mut writer);
}
if let Some(code) = opt.stop_code {
coder.put_code(opt.max_code_width, code, &mut writer);
}
if opt.header_bits > 0 {
writer.seek(SeekFrom::Start(write_offset_header))?;
old_coder_state.put_code(
opt.header_bits,
coder.count * opt.max_code_width / opt.header_divisor,
&mut writer,
);
}
log::debug!("last chunk has {} codes", coder.count);
writer.seek(SeekFrom::End(0))?; // coder could be rewound
writer.flush()?;
return Ok((expanded_length, writer.stream_position()? - opt.out_offset));
}
Err(e) => return Err(Box::new(e)),
}
}
// should never panic
let curr = lzw.dictionary.get(&lzw.curr_match.as_ref().unwrap().hash()).unwrap();
log::trace!("code: {}", curr.code);
coder.put_code(opt.max_code_width, curr.code, &mut writer);
// backup to try the character that didn't match again
reader.seek_relative(-1)?;
if coder.count >= opt.chunk_size {
log::debug!("close chunk with {} codes", coder.count);
if let Some(code) = opt.clear_code {
coder.put_code(opt.max_code_width, code, &mut writer);
}
let save_offset = writer.stream_position()?;
if opt.header_bits > 0 {
writer.seek(SeekFrom::Start(write_offset_header))?;
old_coder_state.put_code(
opt.header_bits,
coder.count * opt.max_code_width / opt.header_divisor,
&mut writer,
);
}
old_coder_state = coder.clone();
write_offset_header = save_offset;
// back up to catch the character left in the dictionary that will be cleared
read_chunk_offset = reader.stream_position()?; // - 1;
break;
}
}
}
}
/// Main decompression function.
/// `compressed_in` is an object with `Read` and `Seek` traits, usually `std::fs::File`, or `std::io::Cursor<&[u8]>`.
/// `expanded_out` is an object with `Write` and `Seek` traits, usually `std::fs::File`, or `std::io::Cursor<Vec<u8>>`.
/// Returns (in_size,out_size) or error. Can panic if options are inconsistent.
pub fn expand<R, W>(compressed_in: &mut R, expanded_out: &mut W, opt: &Options) -> Result<(u64, u64), DYNERR>
where
R: Read + Seek,
W: Write + Seek,
{
let mut reader = compressed_in;
let writer = expanded_out;
let mut decoder = LZWDecoder::new(opt.ord);
let mut compressed_size = reader.seek(SeekFrom::End(0))?;
if opt.in_offset > compressed_size {
return Err(Box::new(CompressionError::FileFormatMismatch));
}
compressed_size -= opt.in_offset;
if compressed_size > opt.max_file_size {
return Err(Box::new(CompressionError::FileTooLarge));
}
reader.seek(SeekFrom::Start(opt.in_offset))?;
writer.seek(SeekFrom::Start(opt.out_offset))?;
let mut end_of_data = false;
log::trace!("expand(): entering loop over chunks");
loop {
log::trace!("expand(): creating LZW dictionary");
let mut lzw = LZW::create(opt.clone());
let chunk_bits = match opt.header_bits {
0 => usize::MAX,
num_bits => {
log::trace!("expand(): read length of chunk");
match decoder.get_code(num_bits, &mut reader) {
Ok(code) => opt.header_divisor * code,
Err(e) if e.kind() == ErrorKind::UnexpectedEof => {
break;
}
Err(e) => return Err(Box::new(e)),
}
}
};
lzw.curr_code = None;
let mut prev_code = None;
let mut prev_str = Vec::new();
let mut bit_count = 0;
log::trace!("expand(): enter main LZW loop");
while bit_count < chunk_bits {
let code = match decoder.get_code(opt.max_code_width, &mut reader) {
Ok(c) => c,
Err(e) if e.kind() == ErrorKind::UnexpectedEof => {
end_of_data = true;
break;
}
Err(e) => return Err(Box::new(e)),
};
if let Some(stop) = opt.stop_code {
if code == stop {
end_of_data = true;
break;
}
}
if let Some(clear) = opt.clear_code {
if code == clear {
break;
}
}
bit_count += opt.max_code_width;
let next_code = match prev_code {
None => None,
Some(_) => lzw.advance_code(),
};
match lzw.dictionary.contains_key(&Link::root(code).hash()) {
false => {
prev_str.push(prev_str[0]);
if next_code.is_none() {
log::error!("expand(): new code was needed but none were available");
return Err(Box::new(CompressionError::FileFormatMismatch));
}
if code != next_code.unwrap() {
log::error!("expand(): Bad LZW code, expected {}, got {}", next_code.unwrap(), code);
return Err(Box::new(CompressionError::FileFormatMismatch));
}
}
true => {
prev_str = lzw.get_string(code);
}
};
if let (Some(next_code), Some(prev_code)) = (next_code, prev_code) {
lzw.dictionary.insert(
Link::root(next_code).hash(),
Link::create(prev_code, prev_str[0] as usize),
);
log::trace!("expand(): add {} linking to {}.{}", next_code, prev_code, prev_str[0]);
}
_ = writer.write(&prev_str)?;
log::trace!("expand(): write {} as {:?}", code, prev_str);
prev_code = Some(code);
}
log::debug!("expand(): chunk completed with {} bits", bit_count);
if end_of_data {
break;
}
}
log::debug!("expand(): end of data, closing stream");
writer.flush()?;
Ok((compressed_size, writer.stream_position()? - opt.out_offset))
}
/// Convenience function, calls `compress` with a slice returning a Vec
pub fn compress_slice(slice: &[u8], opt: &Options) -> Result<Vec<u8>, DYNERR> {
let mut src = Cursor::new(slice);
let mut ans: Cursor<Vec<u8>> = Cursor::new(Vec::new());
compress(&mut src, &mut ans, opt)?;
Ok(ans.into_inner())
}
/// Convenience function, calls `expand` with a slice returning a Vec
pub fn expand_slice(slice: &[u8], opt: &Options) -> Result<Vec<u8>, DYNERR> {
let mut src = Cursor::new(slice);
let mut ans: Cursor<Vec<u8>> = Cursor::new(Vec::new());
expand(&mut src, &mut ans, opt)?;
Ok(ans.into_inner())
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn compression_works() {
// Example adapted from wikipedia; in their example there are 26 symbols and # is a stop code.
// Here # and newline are symbols, and the stop code is 0x101.
let mut opt = Options::from(OptionsPreset::Standard);
opt.ord = Ord::Msb0;
let test_data = "TOBEORNOTTOBEORTOBEORNOT#\n".as_bytes();
let lzw_str = "054 04F 042 045 04F 052 04E 04F 054 102 104 106 10B 105 107 109 023 00A 101 0";
let compressed = compress_slice(test_data, &opt).expect("compression failed");
assert_eq!(compressed, hex::decode(lzw_str.replace(" ", "")).unwrap());
}
#[test]
fn compression_works_16() {
// Example adapted from wikipedia as above but with 16 bit codes
let mut opt = Options::from(OptionsPreset::Standard);
opt.ord = Ord::Msb0;
opt.min_code_width = 16;
opt.max_code_width = 16;
let test_data = "TOBEORNOTTOBEORTOBEORNOT#\n".as_bytes();
let lzw_str = "0054 004F 0042 0045 004F 0052 004E 004F 0054 0102 0104 0106 010B 0105 0107 0109 0023 000A 0101";
let compressed = compress_slice(test_data, &opt).expect("compression failed");
assert_eq!(compressed, hex::decode(lzw_str.replace(" ", "")).unwrap());
}
#[test]
fn compression_works_with_clear() {
// Example adapted from wikipedia; in their example there are 26 symbols and # is a stop code.
// Here # and newline are symbols, the stop code is 0x101, and we clear with 0x100 after 14 codes.
let mut opt = Options::from(OptionsPreset::Standard);
opt.ord = Ord::Msb0;
opt.chunk_size = 14;
let test_data = "TOBEORNOTTOBEORTOBEORNOT#\n".as_bytes();
let lzw_str = "054 04F 042 045 04F 052 04E 04F 054 102 104 106 10B 105 100 052 04E 04F 054 023 00A 101";
let compressed = compress_slice(test_data, &opt).expect("compression failed");
assert_eq!(compressed, hex::decode(lzw_str.replace(" ", "")).unwrap());
}
/* #[test]
fn compression_works_td_mode() {
// Example adapted from wikipedia; in their example there are 26 symbols and # is a stop code.
// Here # and newline are symbols, there is a header, and no stop code.
let mut opt = Options::from(OptionsPreset::Teledisk);
opt.in_offset = 0;
opt.out_offset = 0;
let test_data = "TOBEORNOTTOBEORTOBEORNOT#\n".as_bytes();
let lzw_str = "36 00 54 F0 04 42 50 04 4F 20 05 4E F0 04 54 00 10 02 41 10 09 31 10 05 71 10 23 A0 00";
let compressed = compress_slice(test_data, &opt).expect("compression failed");
assert_eq!(compressed, hex::decode(lzw_str.replace(" ", "")).unwrap());
}*/
#[test]
fn invertibility() {
let mut opt = Options::from(OptionsPreset::Standard);
opt.ord = Ord::Msb0;
let test_data = "I am Sam. Sam I am. I do not like this Sam I am.\n".as_bytes();
let compressed = compress_slice(test_data, &opt).expect("compression failed");
let expanded = expand_slice(&compressed, &opt).expect("expansion failed");
assert_eq!(test_data.to_vec(), expanded);
}
#[test]
fn invertibility_16() {
let mut opt = Options::from(OptionsPreset::Standard);
opt.ord = Ord::Msb0;
opt.min_code_width = 16;
opt.max_code_width = 16;
let test_data = "I am Sam. Sam I am. I do not like this Sam I am.\n".as_bytes();
let compressed = compress_slice(test_data, &opt).expect("compression failed");
let expanded = expand_slice(&compressed, &opt).expect("expansion failed");
assert_eq!(test_data.to_vec(), expanded);
}
#[test]
fn invertibility_td_mode() {
let mut opt = Options::from(OptionsPreset::Teledisk);
opt.in_offset = 0;
opt.out_offset = 0;
let test_data = "I am Sam. Sam I am. I do not like this Sam I am.\n".as_bytes();
let compressed = compress_slice(test_data, &opt).expect("compression failed");
let expanded = expand_slice(&compressed, &opt).expect("expansion failed");
assert_eq!(test_data.to_vec(), expanded);
}
#[test]
fn invertibility_with_clear() {
let mut opt = Options::from(OptionsPreset::Standard);
opt.ord = Ord::Msb0;
opt.chunk_size = 14;
let test_data = "I am Sam. Sam I am. I do not like this Sam I am.\n".as_bytes();
let compressed = compress_slice(test_data, &opt).expect("compression failed");
let expanded = expand_slice(&compressed, &opt).expect("expansion failed");
assert_eq!(test_data.to_vec(), expanded);
}
}
| rust | MIT | b4c04b51746e5fe7769f49a1b32b8caad426fc81 | 2026-01-04T20:24:04.021295Z | false |
dbalsom/fluxfox | https://github.com/dbalsom/fluxfox/blob/b4c04b51746e5fe7769f49a1b32b8caad426fc81/src/file_parsers/compression/lzhuf/ring_buffer.rs | src/file_parsers/compression/lzhuf/ring_buffer.rs | //! Ring buffer for LZ type compression windows
pub struct RingBuffer {
buf: Vec<u8>,
pos: usize,
n: usize,
}
impl RingBuffer {
pub fn create(n: usize) -> Self {
Self {
buf: vec![0; n],
pos: 0,
n,
}
}
pub fn get_pos(&self, offset: i64) -> usize {
(self.pos as i64 + offset).rem_euclid(self.n as i64) as usize
}
pub fn set_pos(&mut self, pos: usize) {
self.pos = pos % self.n;
}
/// use absolute index
pub fn get_abs(&self, abs: usize) -> u8 {
self.buf[abs % self.n]
}
/// use absolute index
#[allow(dead_code)]
pub fn set_abs(&mut self, abs: usize, val: u8) {
self.buf[abs % self.n] = val;
}
pub fn get(&self, offset: i64) -> u8 {
self.buf[(self.pos as i64 + offset).rem_euclid(self.n as i64) as usize]
}
pub fn set(&mut self, offset: i64, val: u8) {
self.buf[(self.pos as i64 + offset).rem_euclid(self.n as i64) as usize] = val;
}
pub fn advance(&mut self) {
self.pos = (self.pos + 1) % self.n;
}
pub fn retreat(&mut self) {
self.pos = (self.pos - 1) % self.n;
}
/// Distance to another position, assuming it is behind us.
/// Correctly handles positions that are "ahead" in memory order.
pub fn distance_behind(&self, other: usize) -> usize {
(self.pos as i64 - other as i64).rem_euclid(self.n as i64) as usize
}
}
#[test]
fn offset() {
let mut ring = RingBuffer::create(4);
ring.set_pos(5);
assert_eq!(ring.get_pos(0), 1);
assert_eq!(ring.get_pos(4), 1);
assert_eq!(ring.get_pos(3), 0);
assert_eq!(ring.get_pos(-4), 1);
}
#[test]
fn distance() {
// four positions 0 1 2 3
// set position ^ (wraps once)
let mut ring = RingBuffer::create(4);
ring.set_pos(5);
assert_eq!(ring.get_pos(0), 1);
assert_eq!(ring.distance_behind(0), 1);
assert_eq!(ring.distance_behind(1), 0);
assert_eq!(ring.distance_behind(3), 2);
}
| rust | MIT | b4c04b51746e5fe7769f49a1b32b8caad426fc81 | 2026-01-04T20:24:04.021295Z | false |
dbalsom/fluxfox | https://github.com/dbalsom/fluxfox/blob/b4c04b51746e5fe7769f49a1b32b8caad426fc81/src/file_parsers/compression/lzhuf/mod.rs | src/file_parsers/compression/lzhuf/mod.rs | /*
Original code from retrocompressor by Daniel Gordon
https://github.com/dfgordon/retrocompressor/
Copyright (c) 2023 Daniel Gordon
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
pub mod adaptive_huff;
pub mod lzhuf;
pub mod node_pool;
pub mod ring_buffer;
/// Options controlling compression
#[derive(Clone)]
pub struct Options {
/// whether to include an optional header
header: bool,
/// starting position in the input file
in_offset: u64,
/// starting position in the output file
out_offset: u64,
/// size of window, e.g., for LZSS dictionary
window_size: usize,
/// threshold, e.g. minimum length of match to encode
threshold: usize,
/// lookahead, e.g. for LZSS matches
lookahead: usize,
/// precursor symbol, e.g. backfill symbol for LZSS dictionary
precursor: u8,
}
#[allow(unused)]
pub const STD_OPTIONS: Options = Options {
header: true,
in_offset: 0,
out_offset: 0,
window_size: 4096,
threshold: 2,
lookahead: 60,
precursor: b' ',
};
pub const TD0_READ_OPTIONS: Options = Options {
header: false,
in_offset: 12,
out_offset: 12,
window_size: 4096,
threshold: 2,
lookahead: 60,
precursor: b' ',
};
pub use lzhuf::expand;
| rust | MIT | b4c04b51746e5fe7769f49a1b32b8caad426fc81 | 2026-01-04T20:24:04.021295Z | false |
dbalsom/fluxfox | https://github.com/dbalsom/fluxfox/blob/b4c04b51746e5fe7769f49a1b32b8caad426fc81/src/file_parsers/compression/lzhuf/lzhuf.rs | src/file_parsers/compression/lzhuf/lzhuf.rs | /*
lzhuf.rs
Original code by Daniel Gordon, retrieved from the 'retrocompressor' repository.
Minor modifications were made for fluxfox.
It is assumed that if a BufReader or BufWriter is required, the caller will provide such.
https://github.com/dfgordon/retrocompressor
Original comments:
LZSS Compression with Adaptive Huffman Encoding
This can perform compression equivalent to the C program `LZHUF.C` by
Haruyasu Yoshizaki, Haruhiko Okumura, and Kenji Rikitake. This is not a direct
port, but it will produce the same bit-for-bit output as `LZHUF.C`, assuming the
standard options are chosen. The header is always treated as little endian.
This program appears to work more reliably than `LZHUF.C`.
I found that `LZHUF.C` will hang on large files when compiled with `clang 16`,
among other problems. One theory is this happens when it gets to the stage
where the Huffman tree has to be rebuilt, and something goes amiss with the
C integer types as interpreted by clang (compared to whatever old compiler).
Neither this module nor the direct port exhibit such problems.
*/
use super::{
super::{CompressionError, DYNERR},
adaptive_huff::*,
node_pool::*,
ring_buffer::*,
Options,
};
use crate::io::{Cursor, ErrorKind, Read, Seek, SeekFrom, Write};
/// Structure to perform the LZSS stage of compression.
/// This maintains two components. First a sliding window containing
/// the symbols in the order encountered ("dictionary"), and second a
/// tree structure whose nodes point at dictionary locations where matches
/// have been previously found ("index")
#[allow(unused)]
struct LZSS {
opt: Options,
dictionary: RingBuffer,
index: Tree,
match_offset: i32,
match_length: usize,
}
impl LZSS {
fn create(opt: Options) -> Self {
let dictionary = RingBuffer::create(opt.window_size);
let index = Tree::create(opt.window_size, 256);
Self {
opt,
dictionary,
index,
match_offset: 0,
match_length: 0,
}
}
/// This finds a match to the symbol run starting at position `pos`.
/// It always exits by inserting a node: either for a match that was found,
/// or for a prospective match to come.
#[allow(dead_code)]
fn insert_node(&mut self) -> Result<(), Error> {
let pos = self.dictionary.get_pos(0);
self.match_length = 0;
// Whatever is attached at this position can only index things that are ahead of us.
// Therefore, throw it all away. (but see note below)
self.index.set_cursor(pos)?;
self.index.drop_branch(Side::Left)?;
self.index.drop_branch(Side::Right)?;
// find or create root for this symbol
let symbol = self.dictionary.get(0);
let mut curs = match self.index.set_cursor_to_root(symbol as usize) {
Ok(()) => self.index.get_cursor().unwrap(),
Err(_) => {
// Symbol has not been indexed yet, save position and go out.
self.index.spawn_root(symbol as usize, pos)?;
return Ok(());
}
};
self.index.set_cursor(curs)?;
loop {
let mut cmp = 0;
let mut i: usize = 1;
// upon exiting this loop, `i` will have the number of matched symbols,
// and `cmp` will have the difference in first mismatched symbol values.
while i < self.opt.lookahead {
cmp = self.dictionary.get(i as i64) as i16 - self.dictionary.get_abs(curs + i) as i16;
if cmp != 0 {
break;
}
i += 1;
}
if i > self.opt.threshold {
if i > self.match_length {
// we found a better match, take it
self.match_offset = self.dictionary.distance_behind(curs) as i32 - 1;
self.match_length = i;
if self.match_length >= self.opt.lookahead {
// cannot get a better match than this, so remove the prior position from the index,
// and index this position in its place. TODO: this seems to break the assumption
// that farther from root means later in buffer.
self.index.change_value(pos, true)?;
return Ok(());
}
}
if i == self.match_length {
// if a match has the same length, but occurs with smaller offset, take it
let c = self.dictionary.distance_behind(curs) as i32 - 1;
if c < self.match_offset {
self.match_offset = c;
}
}
}
// try next match on one of two branches, determined by the symbol ordering associated
// with the last mismatch.
let side = match cmp >= 0 {
true => Side::Right,
false => Side::Left,
};
curs = match self.index.down(side) {
Ok(c) => c,
Err(Error::NodeMissing) => {
// no match, make this position a new node, go out
self.index.spawn(pos, side)?;
return Ok(());
}
Err(e) => {
return Err(e);
}
};
}
}
fn delete_node(&mut self, offset: i64) -> Result<(), Error> {
// The big idea here is to delete the node without having to cut a whole branch.
// If p has only one branch, this is easy, the next node down replaces p.
// If p has two branches, and the left branch has no right branch, then p's right branch
// moves down to become the left branch's right branch. The left branch moves up to replace p.
// If p has two branches, and the left branch branches right, we go down on the right as deep
// as possible. The deepest node is brought up to replace p, see below.
let p = self.dictionary.get_pos(offset);
if self.index.is_free(p)? {
return Ok(());
}
self.index.set_cursor(p)?;
// first assemble the branch that will replace p
let replacement = match self.index.get_down()? {
[None, None] => {
return self.index.drop();
}
[Some(repl), None] => repl, // only 1 branch, it moves up to replace p
[None, Some(repl)] => repl, // only 1 branch, it moves up to replace p
[Some(left), Some(right)] => {
// There are 2 branches, we have to rearrange things to avoid losing data.
self.index.set_cursor(left)?;
match self.index.get_down()? {
[_, None] => {
// Left branch does not branch right.
// Therefore, we can simply attach the right branch to left branch's right branch.
// The updated left branch will be the replacement.
self.index.set_cursor(right)?;
self.index.move_node(left, Side::Right, false)?;
left
}
[_, Some(_)] => {
// The left branch branches right, find the terminus on the right.
// A right-terminus is not necessarily a leaf, i.e., it can have a left branch.
let terminus: usize = self.index.terminus(Side::Right)?;
let (terminus_dad, _) = self.index.get_parent_and_side()?;
self.index.cut_upward()?;
// possible left branch of the terminus takes the former spot of the terminus
match self.index.get_down()? {
[Some(_), None] => {
self.index.down(Side::Left)?;
self.index.move_node(terminus_dad, Side::Right, false)?;
}
[None, None] => {}
_ => panic!("unexpected children"),
}
// The 2 branches of p can now be attached to what was the terminus,
// whereas the terminus will be the replacement.
self.index.set_cursor(left)?;
self.index.move_node(terminus, Side::Left, false)?;
self.index.set_cursor(right)?;
self.index.move_node(terminus, Side::Right, false)?;
terminus
}
}
}
};
// Replace `p` with `replacement`
self.index.set_cursor(p)?;
if self.index.is_root()? {
let symbol = self.index.get_symbol()?;
self.index.set_cursor(replacement)?;
self.index.move_node_to_root(symbol, true)
}
else {
let (parent, side) = self.index.get_parent_and_side()?;
self.index.set_cursor(replacement)?;
self.index.move_node(parent, side, true)
}
}
}
/// Main compression function.
/// `expanded_in` is an object with `Read` and `Seek` traits, usually `std::fs::File`, or `std::io::Cursor<&[u8]>`.
/// `compressed_out` is an object with `Write` and `Seek` traits, usually `std::fs::File`, or `std::io::Cursor<Vec<u8>>`.
/// Returns (in_size,out_size) or error, can panic if offsets are out of range.
#[allow(dead_code)]
pub fn compress<R, W>(expanded_in: &mut R, compressed_out: &mut W, opt: &Options) -> Result<(u64, u64), DYNERR>
where
R: Read + Seek,
W: Write + Seek,
{
let reader = expanded_in;
let mut writer = compressed_out;
let expanded_length = reader.seek(SeekFrom::End(0))? - opt.in_offset;
if expanded_length >= u32::MAX as u64 {
return Err(Box::new(CompressionError::FileTooLarge));
}
reader.seek(SeekFrom::Start(opt.in_offset))?;
writer.seek(SeekFrom::Start(opt.out_offset))?;
// write the 32-bit header with length of expanded data
if opt.header {
let header = u32::to_le_bytes(expanded_length as u32);
_ = writer.write(&header)?;
}
// init
let mut bytes = reader.bytes();
let mut lzss = LZSS::create(opt.clone());
let mut huff = AdaptiveHuffmanCoder::create(256 + opt.lookahead - opt.threshold);
// setup dictionary
let start_pos = opt.window_size - opt.lookahead;
for i in 0..start_pos {
lzss.dictionary.set(i as i64, opt.precursor);
}
let mut len = 0;
lzss.dictionary.set_pos(start_pos);
while len < opt.lookahead {
match bytes.next() {
Some(Ok(c)) => {
lzss.dictionary.set(len as i64, c);
len += 1;
}
None => {
break;
}
Some(Err(e)) => {
return Err(Box::new(e));
}
}
}
for _i in 1..=opt.lookahead {
lzss.dictionary.retreat();
lzss.insert_node()?;
}
lzss.dictionary.set_pos(start_pos);
lzss.insert_node()?;
// main compression loop
loop {
if lzss.match_length > len {
lzss.match_length = len;
}
if lzss.match_length <= opt.threshold {
lzss.match_length = 1;
huff.encode_char(lzss.dictionary.get(0) as u16, &mut writer);
}
else {
huff.encode_char((255 - opt.threshold + lzss.match_length) as u16, &mut writer);
huff.encode_position(lzss.match_offset as u16, &mut writer);
}
let last_match_length = lzss.match_length;
let mut i = 0;
while i < last_match_length {
let c = match bytes.next() {
Some(Ok(c)) => c,
None => break,
Some(Err(e)) => return Err(Box::new(e)),
};
lzss.delete_node(opt.lookahead as i64)?;
lzss.dictionary.set(opt.lookahead as i64, c);
lzss.dictionary.advance();
lzss.insert_node()?;
i += 1;
}
while i < last_match_length {
lzss.delete_node(opt.lookahead as i64)?;
lzss.dictionary.advance();
len -= 1;
if len > 0 {
lzss.insert_node()?;
}
i += 1;
}
if len == 0 {
break;
}
}
writer.seek(SeekFrom::End(0))?; // coder could be rewound
writer.flush()?;
Ok((expanded_length, writer.stream_position()? - opt.out_offset))
}
/// Main decompression function.
/// `compressed_in` is an object with `Read` and `Seek` traits, usually `std::fs::File`, or `std::io::Cursor<&[u8]>`.
/// `expanded_out` is an object with `Write` and `Seek` traits, usually `std::fs::File`, or `std::io::Cursor<Vec<u8>>`.
/// Returns (in_size,out_size) or error, can panic if offsets are out of range.
pub fn expand<R, W>(compressed_in: &mut R, expanded_out: &mut W, opt: &Options) -> Result<(u64, u64), DYNERR>
where
R: Read + Seek,
W: Write + Seek,
{
let mut reader = compressed_in;
let writer = expanded_out;
let compressed_size = reader.seek(SeekFrom::End(0))? - opt.in_offset;
reader.seek(SeekFrom::Start(opt.in_offset))?;
writer.seek(SeekFrom::Start(opt.out_offset))?;
// get size of expanded data from 32 bit header or set to max
let max_expanded_size = match opt.header {
true => {
let mut header: [u8; 4] = [0; 4];
reader.read_exact(&mut header)?;
u32::from_le_bytes(header)
}
false => u32::MAX,
};
// init
let mut huff = AdaptiveHuffmanDecoder::create(256 + opt.lookahead - opt.threshold);
let mut lzss = LZSS::create(opt.clone());
let start_pos = opt.window_size - opt.lookahead;
for i in 0..start_pos {
lzss.dictionary.set(i as i64, opt.precursor);
}
lzss.dictionary.set_pos(start_pos);
// start expanding
while writer.stream_position()? < max_expanded_size as u64 {
let c = match huff.decode_char(&mut reader) {
Ok(c) => c,
Err(e) if e.kind() == ErrorKind::UnexpectedEof => break,
Err(e) => return Err(Box::new(e)),
};
if c < 256 {
_ = writer.write(&[c as u8])?;
lzss.dictionary.set(0, c as u8);
lzss.dictionary.advance();
}
else {
let offset = match huff.decode_position(&mut reader) {
Ok(pos) => -(pos as i64 + 1),
Err(e) if e.kind() == ErrorKind::UnexpectedEof => break,
Err(e) => return Err(Box::new(e)),
};
let strlen = c as i64 + opt.threshold as i64 - 255;
for _k in 0..strlen {
let c8 = lzss.dictionary.get(offset);
_ = writer.write(&[c8])?;
lzss.dictionary.set(0, c8);
lzss.dictionary.advance();
}
}
}
writer.flush()?;
Ok((compressed_size, writer.stream_position()? - opt.out_offset))
}
/// Convenience function, calls `compress` with a slice returning a Vec
#[allow(dead_code)]
pub fn compress_slice(slice: &[u8], opt: &Options) -> Result<Vec<u8>, DYNERR> {
let mut src = Cursor::new(slice);
let mut ans: Cursor<Vec<u8>> = Cursor::new(Vec::new());
compress(&mut src, &mut ans, opt)?;
Ok(ans.into_inner())
}
/// Convenience function, calls `expand` with a slice returning a Vec
#[allow(dead_code)]
pub fn expand_slice(slice: &[u8], opt: &Options) -> Result<Vec<u8>, DYNERR> {
let mut src = Cursor::new(slice);
let mut ans: Cursor<Vec<u8>> = Cursor::new(Vec::new());
expand(&mut src, &mut ans, opt)?;
Ok(ans.into_inner())
}
#[test]
fn compression_works() {
use super::STD_OPTIONS;
let test_data = "12345123456789123456789\n".as_bytes();
let lzhuf_str = "18 00 00 00 DE EF B7 FC 0E 0C 70 13 85 C3 E2 71 64 81 19 60";
let compressed = compress_slice(test_data, &STD_OPTIONS).expect("compression failed");
assert_eq!(compressed, hex::decode(lzhuf_str.replace(' ', "")).unwrap());
let test_data = "I am Sam. Sam I am. I do not like this Sam I am.\n".as_bytes();
let lzhuf_str = "31 00 00 00 EA EB 3D BF 9C 4E FE 1E 16 EA 34 09 1C 0D C0 8C 02 FC 3F 77 3F 57 20 17 7F 1F 5F BF C6 AB 7F A5 AF FE 4C 39 96";
let compressed = compress_slice(test_data, &STD_OPTIONS).expect("compression failed");
assert_eq!(compressed, hex::decode(lzhuf_str.replace(' ', "")).unwrap());
}
#[test]
fn invertibility() {
use super::STD_OPTIONS;
let test_data = "I am Sam. Sam I am. I do not like this Sam I am.\n".as_bytes();
let compressed = compress_slice(test_data, &STD_OPTIONS).expect("compression failed");
let expanded = expand_slice(&compressed, &STD_OPTIONS).expect("expansion failed");
assert_eq!(test_data.to_vec(), expanded);
let test_data = "1234567".as_bytes();
let compressed = compress_slice(test_data, &STD_OPTIONS).expect("compression failed");
let expanded = expand_slice(&compressed, &STD_OPTIONS).expect("expansion failed");
assert_eq!(test_data.to_vec(), expanded[0..7]);
}
| rust | MIT | b4c04b51746e5fe7769f49a1b32b8caad426fc81 | 2026-01-04T20:24:04.021295Z | false |
dbalsom/fluxfox | https://github.com/dbalsom/fluxfox/blob/b4c04b51746e5fe7769f49a1b32b8caad426fc81/src/file_parsers/compression/lzhuf/node_pool.rs | src/file_parsers/compression/lzhuf/node_pool.rs | //! Node Pool backed Binary Tree
//!
//! This is a binary tree, tuned for the task of indexing an LZSS dictionary.
//! The nodes must take unique values from 0..LEN-1, where LEN is the size of the node pool.
//! The value of the node is the index of its slot in the node pool.
//! Hence we can define a tree cursor simply by the index into the node pool, i.e., the
//! node value, index into the node pool, and cursor, are all one and the same.
//!
//! For application to LZSS we have the following.
//! * There can be a root node for each possible symbol, each one occupies a slot in the node pool.
//! * The size of the node pool corresponds to the size of the sliding window.
//! * The node value points to a slot in the sliding window.
//!
//! The implementation does a lot of error checking that an optimized code might not do.
//! This is a choice reflecting the expectation of small retro-files as data sets.
use num_derive::FromPrimitive;
/// Tree Errors
#[derive(thiserror::Error, Debug)]
pub enum Error {
#[error("node is missing")]
NodeMissing,
#[error("node already exists")]
NodeExists,
#[error("out of range")]
OutOfRange,
#[error("there is no cursor")]
NoCursor,
#[error("tree connection is broken")]
BrokenConnection,
#[error("root node or broken connection")]
BrokenConnectionOrRoot,
#[error("non-root node or broken connection")]
BrokenConnectionOrNotRoot,
}
#[derive(FromPrimitive, Clone, Copy)]
pub enum Side {
Left = 0,
Right = 1,
}
/// Node used to build a tree.
/// With this type of tree the values may also serve as node pointers.
#[derive(Clone)]
struct Node {
/// value is also index into node pool
val: usize,
/// if this is a root, gives the value of the symbol
symbol: Option<usize>,
/// index to the parent
up: Option<usize>,
/// index to the children [left,right]
down: [Option<usize>; 2],
}
pub struct Tree {
/// one element for each symbol, value is index into the node pool
roots: Vec<Option<usize>>,
/// cursor as index into the node pool, can be None
curs: Option<usize>,
/// the node pool, one node for each unique value that is allowed
pool: Vec<Node>,
}
impl Tree {
/// The node values are from 0..len-1, the symbol values from 0..symbols-1.
/// Each symbol may create its own root and tree within the node buffer.
pub fn create(len: usize, symbols: usize) -> Self {
let roots: Vec<Option<usize>> = vec![None; symbols];
let mut pool: Vec<Node> = Vec::new();
for i in 0..len {
pool.push(Node {
val: i,
symbol: None,
up: None,
down: [None, None],
});
}
Self {
roots,
curs: None,
pool,
}
}
fn chk_cursor(&self) -> Result<usize, Error> {
match self.curs {
None => Err(Error::NoCursor),
Some(curs) => match curs < self.pool.len() {
true => Ok(curs),
false => Err(Error::OutOfRange),
},
}
}
pub fn get_cursor(&self) -> Option<usize> {
self.curs
}
pub fn set_cursor(&mut self, curs: usize) -> Result<(), Error> {
match curs < self.pool.len() {
true => {
self.curs = Some(curs);
Ok(())
}
false => Err(Error::OutOfRange),
}
}
pub fn set_cursor_to_root(&mut self, symbol: usize) -> Result<(), Error> {
match self.roots[symbol] {
None => Err(Error::NodeMissing),
Some(v) => {
self.curs = Some(v);
Ok(())
}
}
}
#[allow(dead_code)]
pub fn up(&mut self) -> Result<usize, Error> {
match self.pool[self.chk_cursor()?].up {
None => Err(Error::NodeMissing),
Some(v) => {
self.curs = Some(v);
Ok(v)
}
}
}
pub fn down(&mut self, side: Side) -> Result<usize, Error> {
match self.pool[self.chk_cursor()?].down[side as usize] {
None => Err(Error::NodeMissing),
Some(v) => {
self.curs = Some(v);
Ok(v)
}
}
}
/// Go down to the end on one side. Cursor follows.
pub fn terminus(&mut self, side: Side) -> Result<usize, Error> {
let mut term = self.chk_cursor()?;
while let Ok(curs) = self.down(side) {
term = curs;
}
Ok(term)
}
/// Get the array of children, cursor does not move.
pub fn get_down(&self) -> Result<[Option<usize>; 2], Error> {
let curs = self.chk_cursor()?;
Ok(self.pool[curs].down)
}
/// Get the parent and side (left or right) of the current cursor location.
/// Cursor does not move.
pub fn get_parent_and_side(&mut self) -> Result<(usize, Side), Error> {
let curs = self.chk_cursor()?;
if let Some(parent) = self.pool[curs].up {
return match self.pool[parent].down {
[Some(v), _] if v == curs => Ok((parent, Side::Left)),
[_, Some(v)] if v == curs => Ok((parent, Side::Right)),
_ => Err(Error::BrokenConnection),
};
}
Err(Error::BrokenConnectionOrRoot)
}
/// Get the symbol and side (left or right) of the current cursor location.
/// This is only used if we are on one of the roots of a multi-root tree.
/// Cursor does not move.
pub fn get_symbol(&mut self) -> Result<usize, Error> {
let curs = self.chk_cursor()?;
if let Some(symbol) = self.pool[curs].symbol {
return match self.roots[symbol] {
Some(v) if v == curs => Ok(symbol),
_ => Err(Error::BrokenConnection),
};
}
Err(Error::BrokenConnectionOrNotRoot)
}
pub fn is_root(&self) -> Result<bool, Error> {
let curs = self.chk_cursor()?;
Ok(self.pool[curs].up.is_none())
}
#[allow(dead_code)]
pub fn is_leaf(&self) -> Result<bool, Error> {
let curs = self.chk_cursor()?;
Ok(self.pool[curs].down == [None, None])
}
pub fn is_free(&self, curs: usize) -> Result<bool, Error> {
match (self.pool[curs].symbol, self.pool[curs].up, self.pool[curs].down) {
// TODO: how should we define a free slot
(None, None, _) => Ok(true),
_ => Ok(false),
}
}
/// Spawn a new node attaching to the cursor, cursor does not move.
/// If the cursor is already linked downward an error is returned.
/// If the target slot is already linked, the old links are overwritten.
pub fn spawn(&mut self, val: usize, side: Side) -> Result<(), Error> {
if val >= self.pool.len() {
return Err(Error::OutOfRange);
}
let curs = self.chk_cursor()?;
if self.pool[curs].down[side as usize].is_some() {
eprintln!(
"spawn: cannot overwrite {}",
self.pool[curs].down[side as usize].unwrap()
);
return Err(Error::NodeExists);
}
self.pool[curs].down[side as usize] = Some(val);
self.pool[val].up = Some(curs);
self.pool[val].down = [None, None];
Ok(())
}
/// This type of tree can have multiple roots or no roots.
/// The root occupies a slot in the node pool, the slot must be free.
pub fn spawn_root(&mut self, symbol: usize, curs: usize) -> Result<(), Error> {
if symbol >= self.roots.len() || curs >= self.pool.len() {
return Err(Error::OutOfRange);
}
if self.is_free(curs)? {
self.roots[symbol] = Some(curs);
self.pool[curs].symbol = Some(symbol);
self.pool[curs].up = None;
self.pool[curs].down = [None, None];
return Ok(());
}
eprintln!("spawn_root: cannot overwrite {}", curs);
Err(Error::NodeExists)
}
/// Drop nodes at and below the cursor. On exit cursor moves up.
/// If node is root, cursor becomes None. This may be called recursively.
pub fn drop(&mut self) -> Result<(), Error> {
let curs = self.chk_cursor()?;
let maybe_parent = self.pool[curs].up;
let maybe_symbol = self.pool[curs].symbol;
// recursively delete everything below
if self.down(Side::Left).is_ok() {
self.drop()?;
self.set_cursor(curs)?;
}
if self.down(Side::Right).is_ok() {
self.drop()?;
self.set_cursor(curs)?;
}
// cut all links
if let Some(parent) = maybe_parent {
let (_, side) = self.get_parent_and_side()?;
self.pool[parent].down[side as usize] = None;
self.curs = Some(parent);
}
if let Some(symbol) = maybe_symbol {
self.roots[symbol] = None;
self.curs = None;
}
self.pool[curs].symbol = None;
self.pool[curs].up = None;
self.pool[curs].down = [None, None];
Ok(())
}
/// Drop everything below the cursor on one side, OK if no branch to drop.
pub fn drop_branch(&mut self, side: Side) -> Result<(), Error> {
if self.down(side).is_ok() {
self.drop()?;
}
Ok(())
}
/// Cut the links between this node and the one above. Normally part of
/// another operation (tree is left broken).
pub fn cut_upward(&mut self) -> Result<(), Error> {
let (parent, side) = self.get_parent_and_side()?;
self.pool[parent].down[side as usize] = None;
self.pool[self.curs.unwrap()].up = None;
Ok(())
}
/// Cut the links between this node and one below. Normally part of
/// another operation (tree could be left broken).
#[allow(dead_code)]
pub fn cut_downward(&mut self, side: Side) -> Result<(), Error> {
let curs: usize = self.chk_cursor()?;
if let Some(son) = self.pool[curs].down[side as usize] {
self.pool[curs].down[side as usize] = None;
self.pool[son].up = None;
}
Ok(())
}
/// Move node and everything below to a new parent.
/// If node has an existing parent the link is cut.
/// If there is an existing branch and `force==true` it is dropped and replaced.
/// If there is an existing branch and `force==false` an error is returned.
/// Upon exit cursor points to the same node, only its parent has changed.
/// This may free up slots in the node pool if `force==true`.
pub fn move_node(&mut self, new_parent: usize, side: Side, force: bool) -> Result<(), Error> {
let curs: usize = self.chk_cursor()?;
match (self.pool[new_parent].down[side as usize], force) {
(None, _) => {
if self.pool[curs].up.is_some() {
self.cut_upward()?; // do first
}
}
(Some(_), true) => {
if self.pool[curs].up.is_some() {
self.cut_upward()?; // do first
}
self.set_cursor(new_parent)?;
self.drop_branch(side)?;
self.set_cursor(curs)?;
}
_ => {
eprintln!(
"move: cannot overwrite {}",
self.pool[new_parent].down[side as usize].unwrap()
);
return Err(Error::NodeExists);
}
}
self.pool[new_parent].down[side as usize] = Some(curs);
self.pool[curs].up = Some(new_parent);
Ok(())
}
/// Same as `move_node` except target node is a root
pub fn move_node_to_root(&mut self, symbol: usize, force: bool) -> Result<(), Error> {
let curs: usize = self.chk_cursor()?;
match (self.roots[symbol], force) {
(None, _) => {
if self.pool[curs].up.is_some() {
self.cut_upward()?; // do first
}
}
(Some(old_root), true) => {
if self.pool[curs].up.is_some() {
self.cut_upward()?; // do first
}
self.set_cursor(old_root)?;
self.drop()?;
self.set_cursor(curs)?;
}
(Some(old_root), false) => {
eprintln!("move: cannot overwrite root {}", old_root);
return Err(Error::NodeExists);
}
}
self.roots[symbol] = Some(curs);
self.pool[curs].up = None;
self.pool[curs].symbol = Some(symbol);
Ok(())
}
/// Change the value of a node. This frees one slot in the node pool and uses another.
/// The cursor stays on the node, but its value has changed.
/// If the new value is already used and `force==false` an error is returned,
/// if `force==true` the former occupant of the slot is deleted.
pub fn change_value(&mut self, new_val: usize, force: bool) -> Result<(), Error> {
let old_val = self.chk_cursor()?;
if new_val == old_val {
return Ok(());
}
match (self.is_free(new_val)?, force) {
(true, _) => {}
(false, false) => {
eprintln!("cannot change node value to {}", new_val);
return Err(Error::NodeExists);
}
(false, true) => {
self.set_cursor(new_val)?;
self.drop()?;
self.set_cursor(old_val)?;
}
}
// update links pointing into old_val
if let Some(symbol) = self.pool[old_val].symbol {
self.roots[symbol] = Some(new_val);
}
if let Some(parent) = self.pool[old_val].up {
let (_, side) = self.get_parent_and_side()?;
self.pool[parent].down[side as usize] = Some(new_val);
}
if let Some(child) = self.pool[old_val].down[0] {
self.pool[child].up = Some(new_val);
}
if let Some(child) = self.pool[old_val].down[1] {
self.pool[child].up = Some(new_val);
}
// update links pointing out of old_val and new_val
self.pool[new_val] = self.pool[old_val].clone();
self.pool[new_val].val = new_val;
self.pool[old_val] = Node {
val: old_val,
symbol: None,
up: None,
down: [None, None],
};
self.curs = Some(new_val);
Ok(())
}
}
| rust | MIT | b4c04b51746e5fe7769f49a1b32b8caad426fc81 | 2026-01-04T20:24:04.021295Z | false |
dbalsom/fluxfox | https://github.com/dbalsom/fluxfox/blob/b4c04b51746e5fe7769f49a1b32b8caad426fc81/src/file_parsers/compression/lzhuf/adaptive_huff.rs | src/file_parsers/compression/lzhuf/adaptive_huff.rs | //! Module to perform the adaptive Huffman coding.
//! This is used by the `lzss_huff` module.
//! This is supposed to perform the coding the same way as `LZHUF.C`,
//! see the `direct_ports` module for more on the legacy.
use bit_vec::BitVec;
use std::io::{Read, Seek, SeekFrom, Write};
/// Tree used for both encoding and decoding.
/// The tree is constantly updated during either operation.
pub struct AdaptiveHuffmanTree {
max_freq: usize,
num_symb: usize,
node_count: usize,
root: usize,
/// node frequency and sorting key, extra is the frequency backstop
freq: Vec<usize>,
/// index of parent node of the node in this slot
parent: Vec<usize>,
/// index of the left son of the node in this slot, right son is found by incrementing by 1
son: Vec<usize>,
/// map from symbols (index) to leaves (value)
symb_map: Vec<usize>,
}
pub struct AdaptiveHuffmanCoder {
tree: AdaptiveHuffmanTree,
bits: BitVec,
ptr: usize,
}
pub struct AdaptiveHuffmanDecoder {
tree: AdaptiveHuffmanTree,
bits: BitVec,
ptr: usize,
}
/// encoding table giving number of bits used to encode the
/// upper 6 bits of the position
const P_LEN: [u8; 64] = [
0x03, 0x04, 0x04, 0x04, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06,
0x06, 0x06, 0x06, 0x06, 0x06, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07,
0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08,
0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08,
];
/// codes for the upper 6 bits of position, the P_LEN
/// most significant bits are the code, remaining bits should
/// not be written.
const P_CODE: [u8; 64] = [
0x00, 0x20, 0x30, 0x40, 0x50, 0x58, 0x60, 0x68, 0x70, 0x78, 0x80, 0x88, 0x90, 0x94, 0x98, 0x9C, 0xA0, 0xA4, 0xA8,
0xAC, 0xB0, 0xB4, 0xB8, 0xBC, 0xC0, 0xC2, 0xC4, 0xC6, 0xC8, 0xCA, 0xCC, 0xCE, 0xD0, 0xD2, 0xD4, 0xD6, 0xD8, 0xDA,
0xDC, 0xDE, 0xE0, 0xE2, 0xE4, 0xE6, 0xE8, 0xEA, 0xEC, 0xEE, 0xF0, 0xF1, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0xF7, 0xF8,
0xF9, 0xFA, 0xFB, 0xFC, 0xFD, 0xFE, 0xFF,
];
/// decoding table for number of bits used to encode the
/// upper 6 bits of the position, the index is the code
/// plus some few bits on the right that don't matter
/// (extra bits are the MSB's of the lower 6 bits)
const D_LEN: [u8; 256] = [
0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03,
0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04,
0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04,
0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04,
0x04, 0x04, 0x04, 0x04, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05,
0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05,
0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05,
0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06,
0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06,
0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06,
0x06, 0x06, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07,
0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07,
0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08,
0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08,
];
/// values for the upper 6 bits of position, indexing is
/// the same as for D_LEN
const D_CODE: [u8; 256] = [
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03,
0x03, 0x03, 0x03, 0x03, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05,
0x05, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x08, 0x08,
0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x09, 0x09, 0x09, 0x09, 0x09, 0x09, 0x09, 0x09, 0x0A, 0x0A, 0x0A, 0x0A, 0x0A,
0x0A, 0x0A, 0x0A, 0x0B, 0x0B, 0x0B, 0x0B, 0x0B, 0x0B, 0x0B, 0x0B, 0x0C, 0x0C, 0x0C, 0x0C, 0x0D, 0x0D, 0x0D, 0x0D,
0x0E, 0x0E, 0x0E, 0x0E, 0x0F, 0x0F, 0x0F, 0x0F, 0x10, 0x10, 0x10, 0x10, 0x11, 0x11, 0x11, 0x11, 0x12, 0x12, 0x12,
0x12, 0x13, 0x13, 0x13, 0x13, 0x14, 0x14, 0x14, 0x14, 0x15, 0x15, 0x15, 0x15, 0x16, 0x16, 0x16, 0x16, 0x17, 0x17,
0x17, 0x17, 0x18, 0x18, 0x19, 0x19, 0x1A, 0x1A, 0x1B, 0x1B, 0x1C, 0x1C, 0x1D, 0x1D, 0x1E, 0x1E, 0x1F, 0x1F, 0x20,
0x20, 0x21, 0x21, 0x22, 0x22, 0x23, 0x23, 0x24, 0x24, 0x25, 0x25, 0x26, 0x26, 0x27, 0x27, 0x28, 0x28, 0x29, 0x29,
0x2A, 0x2A, 0x2B, 0x2B, 0x2C, 0x2C, 0x2D, 0x2D, 0x2E, 0x2E, 0x2F, 0x2F, 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36,
0x37, 0x38, 0x39, 0x3A, 0x3B, 0x3C, 0x3D, 0x3E, 0x3F,
];
impl AdaptiveHuffmanTree {
pub fn create(num_symbols: usize) -> Self {
let mut ans = Self {
max_freq: 0x8000,
num_symb: num_symbols,
node_count: 2 * num_symbols - 1,
root: 2 * num_symbols - 2,
freq: vec![0; 2 * num_symbols],
parent: vec![0; 2 * num_symbols - 1],
son: vec![0; 2 * num_symbols - 1],
symb_map: vec![0; num_symbols],
};
// Leaves are stored first, one for each symbol (character)
// leaves are signaled by son[i] >= node_count
for i in 0..ans.num_symb {
ans.freq[i] = 1;
ans.son[i] = i + ans.node_count;
ans.symb_map[i] = i;
}
// Next construct the branches and root, there are num_symb-1 non-leaf nodes.
// The sons will be 0,2,4,...,node_count-3, these are left sons, the right sons
// are not explicitly stored, because we always have rson[i] = lson[i] + 1
// parent will be n,n,n+1,n+1,n+2,n+2,...,n+node_count-1,n+node_count-1
// Frequency (freq) of a parent node is the sum of the frequencies attached to it.
// Note the frequencies will be in ascending order.
let mut i = 0;
let mut j = ans.num_symb;
while j <= ans.root {
ans.freq[j] = ans.freq[i] + ans.freq[i + 1];
ans.son[j] = i;
ans.parent[i] = j;
ans.parent[i + 1] = j;
i += 2;
j += 1;
}
// last frequency entry is a backstop that prevents any frequency from moving
// beyond the end of the array (must be larger than any possible frequency)
ans.freq[ans.node_count] = 0xffff;
ans.parent[ans.root] = 0;
ans
}
/// Rebuild the adaptive Huffman tree, triggered by frequency hitting the maximum.
fn rebuild_huff(&mut self) {
// Collect leaf nodes from anywhere and pack them on the left.
// Replace the freq of every leaf by (freq+1)/2.
let mut j = 0;
for i in 0..self.node_count {
if self.son[i] >= self.node_count {
self.freq[j] = (self.freq[i] + 1) / 2;
self.son[j] = self.son[i];
j += 1;
}
}
// Connect sons, old connections are not used in any way.
// LZHUF has i,j,k as signed, seems to be no reason.
let mut i: usize = 0; // left son
j = self.num_symb; // parent node - should already be num_symb
let mut k: usize; // right son or sorting reference
let mut f: usize; // sum of lson and rson frequencies
let mut l: usize; // offset from sorting reference to parent node
while j < self.node_count {
// first set parent frequency, supposing i,k are sons
k = i + 1;
f = self.freq[i] + self.freq[k];
self.freq[j] = f;
// make k the farthest node with frequency > this frequency
k = j - 1;
while f < self.freq[k] {
k -= 1;
}
k += 1;
// insert parent of i at position k
l = (j - k) * 2;
for kp in (k..k + l).rev() {
self.freq[kp + 1] = self.freq[kp]
}
self.freq[k] = f;
for kp in (k..k + l).rev() {
self.son[kp + 1] = self.son[kp]
}
self.son[k] = i;
i += 2; // next left son
j += 1; // next parent
}
// Connect parents.
// In this loop i is the parent, k is the son
for i in 0..self.node_count {
k = self.son[i];
if k >= self.node_count {
// k is a leaf, connect to symbol table
self.symb_map[k - self.node_count] = i;
}
else {
// k=left son, k+1=right son
self.parent[k] = i;
self.parent[k + 1] = i;
}
}
}
/// increment frequency of given code by one, and update tree
fn update(&mut self, c0: i16) {
let mut i: usize;
let mut j: usize;
let mut k: usize;
let mut l: usize;
if self.freq[self.root] == self.max_freq {
self.rebuild_huff()
}
// the leaf node corresponding to this character
let mut c = self.symb_map[c0 as usize];
// sorting loop, node pool is arranged in ascending frequency order
loop {
self.freq[c] += 1;
k = self.freq[c];
// if order is disturbed, exchange nodes
l = c + 1;
if k > self.freq[l] {
while k > self.freq[l] {
l += 1;
}
l -= 1;
// swap the node being checked with the farthest one that is smaller than it
self.freq[c] = self.freq[l];
self.freq[l] = k;
i = self.son[c];
if i < self.node_count {
self.parent[i] = l;
self.parent[i + 1] = l;
}
else {
self.symb_map[i - self.node_count] = l;
}
j = self.son[l];
self.son[l] = i;
if j < self.node_count {
self.parent[j] = c;
self.parent[j + 1] = c;
}
else {
self.symb_map[j - self.node_count] = c;
}
self.son[c] = j;
c = l;
}
c = self.parent[c];
if c == 0 {
break; // root was reached
}
}
}
}
#[allow(dead_code)]
impl AdaptiveHuffmanCoder {
pub fn create(num_symbols: usize) -> Self {
Self {
tree: AdaptiveHuffmanTree::create(num_symbols),
bits: BitVec::new(),
ptr: 0,
}
}
/// keep the bit vector small, we don't need the bits behind us
fn drop_leading_bits(&mut self) {
let cpy = self.bits.clone();
self.bits = BitVec::new();
for i in self.ptr..cpy.len() {
self.bits.push(cpy.get(i).unwrap());
}
self.ptr = 0;
}
/// output `num_bits` of `code` starting from the MSB, unlike LZHUF.C the bits are always
/// written to the output stream (sometimes backing up and rewriting)
fn put_code<W: Write + Seek>(&mut self, num_bits: u16, mut code: u16, writer: &mut W) {
for _i in 0..num_bits {
self.bits.push(code & 0x8000 > 0);
code <<= 1;
self.ptr += 1;
}
let bytes = self.bits.to_bytes();
_ = writer.write(bytes.as_slice()).expect("write err");
if self.bits.len() % 8 > 0 {
writer.seek(SeekFrom::Current(-1)).expect("seek err");
self.ptr = 8 * (self.bits.len() / 8);
self.drop_leading_bits();
}
else {
self.bits = BitVec::new();
self.ptr = 0;
}
}
pub fn encode_char<W: Write + Seek>(&mut self, c: u16, writer: &mut W) {
let mut code: u16 = 0;
let mut num_bits: u16 = 0;
let mut curr_node: usize = self.tree.symb_map[c as usize];
// This is the Huffman scheme: going from leaf to root, add a 0 bit if we
// are coming from the left, or a 1 bit if we are coming from the right.
loop {
code >>= 1;
// if node's address is odd-numbered, we are coming from the right
code += (curr_node as u16 & 1) << 15;
num_bits += 1;
curr_node = self.tree.parent[curr_node];
if curr_node == self.tree.root {
break;
}
}
self.put_code(num_bits, code, writer);
self.tree.update(c as i16); // TODO: why is input to update signed
}
pub fn encode_position<W: Write + Seek>(&mut self, c: u16, writer: &mut W) {
// upper 6 bits come from table
let i = (c >> 6) as usize;
self.put_code(P_LEN[i] as u16, (P_CODE[i] as u16) << 8, writer);
// lower 6 bits verbatim
self.put_code(6, (c & 0x3f) << 10, writer);
}
}
impl AdaptiveHuffmanDecoder {
pub fn create(num_symbols: usize) -> Self {
Self {
tree: AdaptiveHuffmanTree::create(num_symbols),
bits: BitVec::new(),
ptr: 0,
}
}
/// keep the bit vector small, we don't need the bits behind us
fn drop_leading_bits(&mut self) {
let cpy = self.bits.clone();
self.bits = BitVec::new();
for i in self.ptr..cpy.len() {
self.bits.push(cpy.get(i).unwrap());
}
self.ptr = 0;
}
/// Get the next bit reading from the stream as needed.
/// When EOF is reached 0 is returned, consistent with original C code.
/// `reader` should not be advanced outside this function until decoding is done.
fn get_bit<R: Read>(&mut self, reader: &mut R) -> Result<u8, std::io::Error> {
match self.bits.get(self.ptr) {
Some(bit) => {
self.ptr += 1;
Ok(bit as u8)
}
None => {
let mut by: [u8; 1] = [0];
match reader.read_exact(&mut by) {
Ok(()) => {
if self.bits.len() > 512 {
self.drop_leading_bits();
}
self.bits.append(&mut BitVec::from_bytes(&by));
self.get_bit(reader)
}
Err(e) => Err(e),
}
}
}
}
/// get the next 8 bits into a u8, used exclusively to decode the position
fn get_byte<R: Read>(&mut self, bytes: &mut R) -> Result<u8, std::io::Error> {
let mut ans: u8 = 0;
for _i in 0..8 {
ans <<= 1;
ans |= self.get_bit(bytes)?;
}
Ok(ans)
}
pub fn decode_char<R: Read>(&mut self, reader: &mut R) -> Result<i16, std::io::Error> {
let mut c: usize = self.tree.son[self.tree.root];
// This is the Huffman scheme: go from root to leaf, branching left or right depending on the
// successive bits. The nodes are arranged so that branching left or right means adding 0 or
// 1 to the index. Remember leaves are signaled by son >= node_count.
while c < self.tree.node_count {
c += self.get_bit(reader)? as usize;
c = self.tree.son[c];
}
c -= self.tree.node_count;
self.tree.update(c as i16); // TODO: why is input to update signed
Ok(c as i16)
}
pub fn decode_position<R: Read>(&mut self, reader: &mut R) -> Result<u16, std::io::Error> {
// get upper 6 bits from table
let mut first8 = self.get_byte(reader)? as u16;
let upper6 = (D_CODE[first8 as usize] as u16) << 6;
let coded_bits = D_LEN[first8 as usize] as u16;
// read lower 6 bits verbatim
// we already got 8 bits, we need another 6 - (8-coded_bits) = coded_bits - 2
for _i in 0..coded_bits - 2 {
first8 <<= 1;
first8 += self.get_bit(reader)? as u16;
}
Ok(upper6 | (first8 & 0x3f))
}
}
| rust | MIT | b4c04b51746e5fe7769f49a1b32b8caad426fc81 | 2026-01-04T20:24:04.021295Z | false |
dbalsom/fluxfox | https://github.com/dbalsom/fluxfox/blob/b4c04b51746e5fe7769f49a1b32b8caad426fc81/src/file_parsers/ipf/track_common.rs | src/file_parsers/ipf/track_common.rs | /*
FluxFox
https://github.com/dbalsom/fluxfox
Copyright 2024-2025 Daniel Balsom
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the “Software”),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
--------------------------------------------------------------------------
*/
| rust | MIT | b4c04b51746e5fe7769f49a1b32b8caad426fc81 | 2026-01-04T20:24:04.021295Z | false |
dbalsom/fluxfox | https://github.com/dbalsom/fluxfox/blob/b4c04b51746e5fe7769f49a1b32b8caad426fc81/src/file_parsers/ipf/data_record.rs | src/file_parsers/ipf/data_record.rs | /*
FluxFox
https://github.com/dbalsom/fluxfox
Copyright 2024-2025 Daniel Balsom
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the “Software”),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
--------------------------------------------------------------------------
*/
use crate::source_map::{MapDump, OptionalSourceMap, SourceValue};
use binrw::binrw;
#[binrw]
#[brw(big)]
#[derive(Debug)]
pub(crate) struct DataRecord {
pub(crate) length: u32, // Length of the Extra Data Block (or 0)
pub(crate) bit_size: u32, // Data area size in bits (length * 8)
pub(crate) crc: u32, // CRC32 of the Extra Data Block
pub(crate) data_key: u32, // Unique key used to match the same key in an Image record.
}
impl DataRecord {
pub(crate) fn key(&self) -> u32 {
self.data_key
}
}
impl MapDump for DataRecord {
fn write_to_map(&self, map: &mut Box<dyn OptionalSourceMap>, parent: usize) -> usize {
let record = map.add_child(parent, "Data Record", SourceValue::default());
let record_idx = record.index();
record
.add_child("length", SourceValue::u32(self.length))
.add_sibling("bit_size", SourceValue::u32(self.bit_size))
.add_sibling("crc", SourceValue::hex_u32(self.crc))
.add_sibling("data_key", SourceValue::u32(self.data_key));
record_idx
}
}
| rust | MIT | b4c04b51746e5fe7769f49a1b32b8caad426fc81 | 2026-01-04T20:24:04.021295Z | false |
dbalsom/fluxfox | https://github.com/dbalsom/fluxfox/blob/b4c04b51746e5fe7769f49a1b32b8caad426fc81/src/file_parsers/ipf/v2_track.rs | src/file_parsers/ipf/v2_track.rs | /*
FluxFox
https://github.com/dbalsom/fluxfox
Copyright 2024-2025 Daniel Balsom
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the “Software”),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
--------------------------------------------------------------------------
*/
//! Decoding functions for IPF encoder v2 tracks (SXX).
use crate::{
bitstream_codec::{mfm::MFM_BYTE_LEN, TrackDataStream},
file_parsers::ipf::{
data_block::{BlockDescriptor, BlockFlags},
image_record::ImageRecord,
info_record::InfoRecord,
ipf::IpfParser,
stream_element::{DataSample, DataStreamElement, DataType, GapSample, GapStreamElement},
},
io::ReadSeek,
prelude::{TrackDataEncoding, TrackDataRate, TrackDataResolution, TrackDensity},
source_map::{MapDump, OptionalSourceMap},
track_schema::TrackSchema,
DiskImage,
DiskImageError,
};
use binrw::BinRead;
use bit_vec::BitVec;
pub enum GapFillDirection {
Forwards,
Backwards,
}
impl IpfParser {
pub(crate) fn decode_v2_track<RWS>(
reader: &mut RWS,
image: &mut DiskImage,
info_record: &InfoRecord,
image_record: &ImageRecord,
record_node: usize,
data: &crate::file_parsers::ipf::ipf::DataRecordInfo,
) -> Result<(), DiskImageError>
where
RWS: ReadSeek,
{
image.set_resolution(TrackDataResolution::BitStream);
log::debug!("-------------------------- Decoding V2 (SXX) Track ----------------------------------");
log::debug!(
"Track {} bitct: {:6} block_ct: {:02} data_bits: {}",
image_record.ch(),
image_record.track_bits,
image_record.block_count,
image_record.data_bits,
);
//log::trace!("Image Record: {:#?}", image_record);
// Density is *probably* double. Guess from bitcell count or assume double.
let data_rate =
TrackDataRate::from(TrackDensity::from_bitcells(image_record.track_bits).unwrap_or(TrackDensity::Double));
// // Create empty BitVec for track data.
// let track_bits = BitVec::from_elem(image_record.track_bits as usize, false);
// // Amiga is *probably* MFM encoded.
// let codec = Box::new(MfmCodec::new(track_bits, Some(image_record.track_bits as usize), None));
//let start_clock = image_record.start_bit_pos % 2 != 0;
// There's a variety of approaches here - we could craft a BitStreamTrack in isolation
// and then attach it to the Disk, or we can add an empty track and then write to it.
// I'm going to try the latter approach first.
let new_track_idx = image.add_empty_track(
image_record.ch(),
TrackDataEncoding::Mfm,
Some(TrackDataResolution::BitStream),
data_rate,
image_record.track_bits as usize,
Some(true),
)?;
// After we get a reference to the track, the disk image will be mutably borrowed until
// the end of track processing, and we won't be able to get a mutable reference to the
// source map.
//
// We fall back to the trusty ol take hack to get around this. But now we have to put it
// back on error if we want to preserve it.
//
// A better design would probably be to construct a detached track object and then attach
// it to the image after it is built. Or, if we store tracks as options, I'd rather take
// the track than the source map as it would simplify error handling.
//
// TODO: Revisit this design
let mut source_map = image.take_source_map().unwrap();
let track = match image.track_by_idx_mut(new_track_idx) {
Some(track) => track,
None => {
image.put_source_map(source_map);
log::error!("Failed to get mutable track for image.");
return Err(DiskImageError::FormatParseError);
}
};
// let mut bitstream_track = track.as_bitstream_track_mut().ok_or_else(|| {
// log::error!("Failed to get mutable bitstream track for image.");
// DiskImageError::FormatParseError
// })?;
// let params = BitStreamTrackParams {
// schema: Some(TrackSchema::Amiga),
// ch: image_record.ch(),
// encoding: TrackDataEncoding::Mfm,
// data_rate,
// rpm: None,
// bitcell_ct: Some(image_record.track_bits as usize),
// data: &[],
// weak: None,
// hole: None,
// detect_weak: false,
// };
//
// let mut track = BitStreamTrack::new_optional_ctx(¶ms, None)?;
{
// Seek to the start position for the first block.
let bitstream = match track.stream_mut() {
Some(stream) => stream,
None => {
image.put_source_map(source_map);
log::error!("Failed to get mutable stream for track.");
return Err(DiskImageError::FormatParseError);
}
};
log::trace!("Seeking to {} for first block.", image_record.start_bit_pos & !0xF);
let mut cursor = image_record.start_bit_pos as usize & !0xF;
//bitstream.seek(std::io::SeekFrom::Start(image_record.start_bit_pos as u64))?;
for (bi, block) in data.blocks.iter().enumerate() {
log::debug!(
"Block {}: data offset: {} data: [bytes: {:?} bits: {}], gap: [bytes: {:?} bits: {}]",
bi,
data.edb_offset + block.data_offset as u64,
block.data_bytes,
block.data_bits,
block.gap_bytes,
block.gap_bits
);
// reader.seek(std::io::SeekFrom::Start(data.edb_offset + block.data_offset as u64))?;
//
// let mut debug_buf = [0; 16];
// reader.read_exact(&mut debug_buf)?;
//log::warn!("Data element: {:02X?}", debug_buf);
let encoded_bytes = match Self::decode_v2_data_block(
reader,
&mut source_map,
data.edb_offset,
block,
record_node,
bitstream,
&mut cursor,
) {
Ok(bytes) => bytes,
Err(e) => {
image.put_source_map(source_map);
log::error!("Failed to decode V2 block: {}", e);
return Err(e);
}
};
// if encoded_bytes != data_bytes as usize {
// log::warn!(
// "Block {} decoded {} bytes, but expected {} bytes.",
// bi,
// encoded_bytes,
// data_bytes
// );
// }
// As far as I can tell there's no field that gives the un-decoded length of the data elements.
// let pos = reader.stream_position()?;
// if pos - data_offset != block.data_bytes.unwrap() as u64 {
// log::error!(
// "Reached End element with {} bytes remaining in data block.",
// data_bytes - (pos - data_offset)
// );
// return Err(DiskImageError::ImageCorruptError(
// "Data element length mismatch.".to_string(),
// ));
// }
}
}
let track = match image.track_by_idx_mut(new_track_idx) {
Some(track) => track,
None => {
image.put_source_map(source_map);
log::error!("Failed to get mutable track for image.");
return Err(DiskImageError::FormatParseError);
}
};
let bitstream_track = match track.as_bitstream_track_mut() {
Some(track) => track,
None => {
image.put_source_map(source_map);
log::error!("Failed to get mutable bitstream track for image.");
return Err(DiskImageError::FormatParseError);
}
};
bitstream_track.rescan(Some(TrackSchema::Amiga))?;
// Finally, put the source map back on the image.
image.put_source_map(source_map);
Ok(())
}
pub fn decode_v2_data_block<RWS>(
reader: &mut RWS,
source_map: &mut Box<dyn OptionalSourceMap>,
edb_offset: u64,
block: &BlockDescriptor,
record_node: usize,
bitstream: &mut TrackDataStream,
cursor: &mut usize,
) -> Result<usize, DiskImageError>
where
RWS: ReadSeek,
{
log::debug!("-------------------------- Decoding V2 Data Block --------------------------------");
// Write BlockDescriptor to source map
let block_node = block.write_to_map(source_map, record_node);
if block_node == 0 {
log::error!("Invalid block descriptor!");
return Err(DiskImageError::ImageCorruptError(
"V2 block descriptor missing gap_offset.".to_string(),
));
}
// V2 Block Descriptor should have gap_offset and cell_type
let gap_offset = if let Some(gap) = &block.gap_offset {
*gap as usize
}
else {
log::error!("V2 block descriptor missing gap_offset.");
return Err(DiskImageError::ImageCorruptError(
"V2 block descriptor missing gap_offset.".to_string(),
));
};
let cell_type = if let Some(cell_type) = &block.cell_type {
*cell_type as usize
}
else {
log::error!("V2 block descriptor missing cell_type.");
return Err(DiskImageError::ImageCorruptError(
"V2 block descriptor missing gap_offset.".to_string(),
));
};
// V2 Block Descriptor should have flags
let flags = if let Some(flags) = &block.block_flags {
log::debug!("Block flags: {:?}", flags);
flags
}
else {
log::error!("V2 block descriptor missing block flags.");
return Err(DiskImageError::ImageCorruptError(
"V2 block descriptor missing block flags.".to_string(),
));
};
// Read GapStreamElements
// -----------------------------------------------------------------------------------------
// These only exist on v2 tracks
// They seem to come before the data elements, but I'm not sure if that's always the case.
// In any case, the order doesn't really matter because the offsets will determine where
// they are found.
let mut decoded_bytes = 0;
// Gap elements are only present if gap_bits > 0
if block.gap_bits > 0 {
// Safe to unwrap: we've already failed if gap_offset is None
let gap_offset = edb_offset + block.gap_offset.unwrap() as u64;
log::trace!("Seeking to gap offset: {}", gap_offset);
reader.seek(std::io::SeekFrom::Start(gap_offset))?;
// Read forward gap list, if present
if block.block_flags.as_ref().unwrap().contains(BlockFlags::FORWARD_GAP) {
let gap_node = source_map
.add_child(block_node, "Forward Gap List", Default::default())
.index();
Self::read_v2_gap_elements(
reader,
source_map,
block,
gap_node,
bitstream,
GapFillDirection::Backwards,
)?;
}
// Read reverse gap list, if present
if block.block_flags.as_ref().unwrap().contains(BlockFlags::BACKWARD_GAP) {
let gap_node = source_map
.add_child(block_node, "Backward Gap List", Default::default())
.index();
Self::read_v2_gap_elements(
reader,
source_map,
block,
gap_node,
bitstream,
GapFillDirection::Forwards,
)?;
}
}
// Seek to the first data element
let data_offset = edb_offset + block.data_offset as u64;
log::trace!("Seeking to data offset: {}", data_offset);
match reader.seek(std::io::SeekFrom::Start(data_offset)) {
Ok(_) => {}
Err(e) => {
log::error!("Failed to seek to data element: {}", e);
return Err(DiskImageError::from(e));
}
}
decoded_bytes += Self::decode_v2_data_elements(reader, source_map, block, block_node, bitstream, cursor)?;
// Render gap
Self::write_gap_elements(block, bitstream, cursor, None, None)?;
Ok(decoded_bytes * 2)
}
pub fn read_v2_gap_elements<RWS>(
reader: &mut RWS,
source_map: &mut Box<dyn OptionalSourceMap>,
block: &BlockDescriptor,
block_node: usize,
bitstream: &mut TrackDataStream,
direction: GapFillDirection,
) -> Result<BitVec, DiskImageError>
where
RWS: ReadSeek,
{
log::debug!("------------------------ Decoding V2 GapStreamElements ---------------------------");
let mut gap_element = GapStreamElement::read(reader)?;
// Write gap element to source map
let _gap_node = gap_element.write_to_map(source_map, block_node);
let mut element_ct = 0;
log::debug!("Total gap bits: {}", block.gap_bits);
let mut repeat_ct = None;
let mut bit_vec = BitVec::new();
while !gap_element.gap_head.is_null() {
let gap_type = gap_element.gap_head.gap_type();
let wrote = if let Some(samples) = &gap_element.gap_sample {
match samples {
GapSample::RepeatCt(ct) => {
repeat_ct = Some(*ct);
0
}
GapSample::Sample(bits) => {
let repeat = if let Some(repeat_ct) = repeat_ct {
repeat_ct
}
else {
log::warn!("Gap element has no repeat count!");
1
};
repeat_ct = None;
bit_vec = BitVec::from_fn(bits.len() * repeat, |i| bits[i % bits.len()]);
0
}
}
}
else {
break;
};
//decoded_bytes += wrote;
//*cursor += wrote * MFM_BYTE_LEN;
// Read the next data element
element_ct += 1;
gap_element = GapStreamElement::read(reader)?;
// Write data element to source map
let _gap_node = gap_element.write_to_map(source_map, block_node);
}
log::debug!("Read {} gap elements from V12 block", element_ct,);
Ok(bit_vec)
}
pub fn decode_v2_data_elements<RWS>(
reader: &mut RWS,
source_map: &mut Box<dyn OptionalSourceMap>,
block: &BlockDescriptor,
block_node: usize,
bitstream: &mut TrackDataStream,
cursor: &mut usize,
) -> Result<usize, DiskImageError>
where
RWS: ReadSeek,
{
log::debug!("------------------------ Decoding V2 DataStreamElements ---------------------------");
// Read DataStreamElements
// -----------------------------------------------------------------------------------------
// Pass DATA_IN_BITS flag to data element reader
let mut data_element = DataStreamElement::read(reader)?;
// Write data element to source map
let _data_node = data_element.write_to_map(source_map, block_node);
let mut element_ct = 0;
let mut decoded_bytes = 0;
while !data_element.data_head.is_null() {
let data_type = data_element.data_head.data_type();
let data = if let Some(samples) = &data_element.data_sample {
match samples {
DataSample::Bytes(data) => {
log::debug!(
"Data element contains: {} bytes: {:02X?}",
data.len(),
&data[0..std::cmp::min(16, data.len())]
);
data
}
DataSample::Bits(bits) => {
// This shouldn't really happen in a V1 block...
log::warn!("Unhandled: Bit samples in V1 block!");
log::debug!("Data element contains: {} bits", bits.len());
&bits.to_bytes()
}
}
}
else {
log::error!("Data element has no samples!");
return Err(DiskImageError::ImageCorruptError(
"Data element has no samples.".to_string(),
));
};
let wrote = match data_type {
DataType::Sync => {
// Write SYNC bytes RAW (they are already MFM-encoded!)
log::trace!(
"Writing raw Sync bytes: {:02X?}",
&data[0..std::cmp::min(16, data.len())]
);
// Write the raw bytes
bitstream.write_raw_buf(data, *cursor);
data.len() / 2
}
DataType::Data => {
// Encode data bytes as MFM
log::trace!(
"Encoding data element: {:02X?}",
&data[0..std::cmp::min(16, data.len())]
);
bitstream.write_encoded_buf(data, *cursor);
data.len()
}
DataType::Gap => {
// Encode gap bytes as MFM
log::trace!("Encoding GAP element: {:02X?}", &data[0..std::cmp::min(16, data.len())]);
bitstream.write_encoded_buf(data, *cursor);
data.len()
}
DataType::End => {
// End of data block
log::debug!("End of data block.");
break;
}
_ => {
log::warn!("Unknown data element type: {:?}", data_type);
data.len()
}
};
decoded_bytes += wrote;
*cursor += wrote * MFM_BYTE_LEN;
// Read the next data element
element_ct += 1;
data_element = DataStreamElement::read_args(
reader,
(
block.block_flags.as_ref().unwrap().contains(BlockFlags::DATA_IN_BITS),
0,
),
)?;
// Write data element to source map
let _data_node = data_element.write_to_map(source_map, block_node);
}
log::debug!(
"Read {} data elements from V1 block, wrote {} MFM bytes to track",
element_ct,
decoded_bytes * 2
);
Ok(decoded_bytes * 2)
}
pub fn write_gap_elements(
block: &BlockDescriptor,
bitstream: &mut TrackDataStream,
cursor: &mut usize,
forwards_bits: Option<BitVec>,
backwards_bits: Option<BitVec>,
) -> Result<(), DiskImageError> {
// Advance track cursor to the end of the gap to write the next set of data elements.
*cursor += block.gap_bits as usize;
Ok(())
}
}
| rust | MIT | b4c04b51746e5fe7769f49a1b32b8caad426fc81 | 2026-01-04T20:24:04.021295Z | false |
dbalsom/fluxfox | https://github.com/dbalsom/fluxfox/blob/b4c04b51746e5fe7769f49a1b32b8caad426fc81/src/file_parsers/ipf/ipf.rs | src/file_parsers/ipf/ipf.rs | /*
FluxFox
https://github.com/dbalsom/fluxfox
Copyright 2024-2025 Daniel Balsom
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the “Software”),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
--------------------------------------------------------------------------
*/
//! A format parser for the Interchangeable Preservation Format (IPF).
//! IPF is a format devised by the Software Preservation Society for the
//! preservation of magnetic media. It is a complex format that includes
//! a variety of metadata and data structures to support the preservation
//! of a wide variety of disk formats.
//!
//! This parser is a work in progress and is not yet complete.
//!
//! References used, MAME's IPF parser (BSD licensed)
//! https://github.com/mamedev/mame/blob/master/src/lib/formats/ipf_dsk.cpp
//!
//! IPF documentation by Jean Louis-Guerin
//! https://www.kryoflux.com/download/ipf_documentation_v1.6.pdf
use crate::{
file_parsers::{
bitstream_flags,
ipf::{
chunk::{IpfChunk, IpfChunkType},
data_block::BlockDescriptor,
data_record::DataRecord,
image_record::ImageRecord,
info_record::{EncoderType, InfoRecord},
},
reader_len,
FormatCaps,
ParserReadOptions,
ParserWriteOptions,
},
io::{ReadSeek, ReadWriteSeek},
source_map::MapDump,
types::{DiskCh, DiskDescriptor, Platform, TrackDataEncoding, TrackDataRate, TrackDensity},
DiskImage,
DiskImageError,
DiskImageFileFormat,
FoxHashMap,
LoadingCallback,
ParserWriteCompatibility,
};
use binrw::BinRead;
pub(crate) struct DataRecordInfo {
pub(crate) data_record: DataRecord,
pub(crate) edb_offset: u64,
pub(crate) blocks: Vec<BlockDescriptor>,
}
pub struct IpfParser {}
impl IpfParser {
#[allow(dead_code)]
fn format() -> DiskImageFileFormat {
DiskImageFileFormat::PceBitstreamImage
}
pub(crate) fn capabilities() -> FormatCaps {
bitstream_flags() | FormatCaps::CAP_COMMENT | FormatCaps::CAP_WEAK_BITS
}
pub fn platforms() -> Vec<Platform> {
// IPF images should really support anything (i think), but I'm only aware of
// IPF collections for Amiga and AtariST, only one of which we support.
vec![Platform::Amiga]
}
pub(crate) fn extensions() -> Vec<&'static str> {
vec!["ipf"]
}
pub(crate) fn detect<RWS: ReadSeek>(mut image: RWS) -> bool {
let mut detected = false;
_ = image.seek(std::io::SeekFrom::Start(0));
// The first chunk in an IPF file must be the CAPS chunk
// Pass a data limit of 0 so we don't end up reading a huge chunk from an invalid file.
if let Ok(file_header) = IpfChunk::read_args(&mut image, (0,)) {
if file_header.chunk_type == Some(IpfChunkType::Caps) {
detected = true;
}
}
detected
}
/// Return the compatibility of the image with the parser.
/// Currently, writing to IPF is not supported. It is unlikely it ever will be implemented,
/// to avoid controversy.
pub(crate) fn can_write(_image: Option<&DiskImage>) -> ParserWriteCompatibility {
ParserWriteCompatibility::Incompatible
}
pub(crate) fn load_image<RWS: ReadSeek>(
mut reader: RWS,
disk_image: &mut DiskImage,
_opts: &ParserReadOptions,
_callback: Option<LoadingCallback>,
) -> Result<(), DiskImageError> {
disk_image.set_source_format(DiskImageFileFormat::IpfImage);
// Request a source map, if options specified.
//let null = !opts.flags.contains(ReadFlags::CREATE_SOURCE_MAP);
disk_image.assign_source_map(true);
// Create a new parser instance with the source map.
// let mut parser = IpfParser {
// platforms: Self::platforms(),
// };
// Get length of reader
let image_len = reader_len(&mut reader)?;
// Seek to start of read_buf.
reader.seek(std::io::SeekFrom::Start(0))?;
// Read the first chunk (CAPS chunk)
let header = Self::read_chunk(&mut reader)?;
// First chunk must be CAPS header.
if header.chunk_type != Some(IpfChunkType::Caps) {
return Err(DiskImageError::UnknownFormat);
}
log::debug!("Parsed CAPS chunk: {:#?}", header);
let mut encoder_type = 0u32;
// IPF ImageRecords all define a key that is referenced by DataRecords -
// when we encounter a DataRecord, we must resolve the corresponding ImageRecord
// to know how many BlockDescriptors to expect.
// It appears that the ImageRecord key is just the index, but the scheme allows for it
// not to be, so we'll take the cautious approach and store a hash map of indexes into
// the pool of collected ImageRecords.
let mut image_pool: Vec<ImageRecord> = Vec::with_capacity(200);
let mut image_map: FoxHashMap<u32, usize> = FoxHashMap::with_capacity(200);
let mut data_pool: Vec<DataRecordInfo> = Vec::new();
let mut info_record_opt: Option<InfoRecord> = None;
while let Ok(chunk) = Self::read_chunk(&mut reader) {
match chunk.chunk_type {
Some(IpfChunkType::Info) => {
let info_record: InfoRecord = chunk.into_inner::<InfoRecord>()?;
info_record.write_to_map(disk_image.source_map_mut(), 0);
log::debug!("InfoRecord: {:#?}", info_record);
log::debug!(
"Setting encoder_type to {} ({:?})",
info_record.encoder_type,
info_record.encoder_type_enum
);
encoder_type = info_record.encoder_type;
info_record_opt = Some(info_record);
}
Some(IpfChunkType::Image) => {
let image_record: ImageRecord = chunk.into_inner()?;
//log::debug!("ImageRecord: {:?}", image_record);
log::debug!("Hashing ImageRecord with key {}", image_record.key());
image_map.insert(image_record.key(), image_pool.len());
image_pool.push(image_record);
}
Some(IpfChunkType::Data) => {
let data_record: DataRecord = chunk.into_inner()?;
log::trace!("Parsed DataRecord: {:#?}", data_record);
log::debug!("DataRecord has ImageRecord key of {}", data_record.key());
// Resolve the ImageRecord via map -> pool index -> image_pool chain
let image_record = image_map
.get(&data_record.key())
.and_then(|&index| image_pool.get(index))
.ok_or_else(|| {
log::error!("No ImageRecord found for DataRecord with key {}.", data_record.key());
DiskImageError::ImageCorruptError(format!(
"No ImageRecord found for DataRecord with key {}.",
data_record.key()
))
})?;
// Extra Data Block begins here, with an array of BlockDescriptors.
// Save the stream position at the start of the EDB so we can calculate where
// the next Data Record begins.
let edb_offset = reader.stream_position()?;
let mut blocks = Vec::with_capacity(20);
for _ in 0..image_record.block_count {
let block_descriptor = BlockDescriptor::read_args(&mut reader, (encoder_type,))?;
log::trace!("Parsed BlockDescriptor: {:#?}", block_descriptor);
blocks.push(block_descriptor);
}
let bytes_left = image_len - reader.stream_position()?;
let edb_len = data_record.length;
log::debug!(
"DataRecord reports EDB length of {} bytes and a CRC of {:08X}, {} bytes left in stream.",
edb_len,
data_record.crc,
bytes_left
);
data_pool.push(DataRecordInfo {
data_record,
edb_offset,
blocks,
});
// Calculate address of next DataRecord
let next_data_record = edb_offset + edb_len as u64;
// Address cannot be greater than the length of the image.
if next_data_record > image_len {
log::error!("Next DataRecord address exceeds image length.");
return Err(DiskImageError::ImageCorruptError(
"A DataRecord offset exceeded image length.".to_string(),
));
}
// Seek to the next DataRecord. Hope we find one!
reader.seek(std::io::SeekFrom::Start(next_data_record))?;
}
_ => {
println!("Unknown chunk type: {:?}", chunk.chunk_type);
}
}
}
let bytes_left = image_len - reader.stream_position()?;
println!("Last chunk read with {} bytes left in stream.", bytes_left);
let mut sorted_pool: Vec<usize> = (0..image_pool.len()).collect();
// Sort ImageRecord indices by physical track.
sorted_pool.sort_by(|&a, &b| image_pool[a].cmp(&image_pool[b]));
let info_record = info_record_opt.ok_or_else(|| {
log::error!("No InfoRecord found in IPF image.");
DiskImageError::ImageCorruptError("No InfoRecord found in IPF image.".to_string())
})?;
let platforms = info_record.platforms();
if platforms.is_empty() {
log::warn!("IPF image is not for any compatible platform.");
//return Err(DiskImageError::IncompatibleImage("IPF image is not for any compatible platform.".to_string()));
}
for pi in sorted_pool.iter() {
Self::process_track(&mut reader, disk_image, &info_record, &image_pool[*pi], &data_pool[*pi])?;
}
let desc = DiskDescriptor {
platforms: (!platforms.is_empty()).then_some(platforms),
geometry: DiskCh::new((info_record.max_track + 1) as u16, (info_record.max_side + 1) as u8),
data_encoding: TrackDataEncoding::Mfm,
density: TrackDensity::Double,
data_rate: TrackDataRate::from(TrackDensity::Double),
rpm: None,
write_protect: None,
};
log::debug!("Source Map:");
log::debug!("\n{:?}", disk_image.source_map());
disk_image.descriptor = desc;
Ok(())
}
fn process_track<RWS>(
reader: &mut RWS,
image: &mut DiskImage,
info_record: &InfoRecord,
image_record: &ImageRecord,
data: &DataRecordInfo,
) -> Result<(), DiskImageError>
where
RWS: ReadSeek,
{
let image_node = image_record.write_to_map(image.source_map_mut(), 0);
let data_node = data.data_record.write_to_map(image.source_map_mut(), image_node);
if let Some(encoder) = info_record.encoder_type_enum {
match encoder {
EncoderType::V1 => {
Self::decode_v1_track(reader, image, info_record, image_record, data_node, data)?;
}
EncoderType::V2 => {
Self::decode_v2_track(reader, image, info_record, image_record, data_node, data)?;
}
EncoderType::Unknown => {
log::error!("Invalid encoder type: {:02X}", info_record.encoder_type);
return Err(DiskImageError::ImageCorruptError(format!(
"Invalid encoder type: {:02X}",
info_record.encoder_type
)));
}
}
encoder
}
else {
log::error!("Invalid encoder type: {:02X}", info_record.encoder_type);
return Err(DiskImageError::ImageCorruptError(format!(
"Invalid encoder type: {:02X}",
info_record.encoder_type
)));
};
Ok(())
}
pub fn save_image<RWS: ReadWriteSeek>(
_image: &DiskImage,
_opts: &ParserWriteOptions,
_output: &mut RWS,
) -> Result<(), DiskImageError> {
Err(DiskImageError::UnsupportedFormat)
}
}
| rust | MIT | b4c04b51746e5fe7769f49a1b32b8caad426fc81 | 2026-01-04T20:24:04.021295Z | false |
dbalsom/fluxfox | https://github.com/dbalsom/fluxfox/blob/b4c04b51746e5fe7769f49a1b32b8caad426fc81/src/file_parsers/ipf/data_block.rs | src/file_parsers/ipf/data_block.rs | /*
FluxFox
https://github.com/dbalsom/fluxfox
Copyright 2024-2025 Daniel Balsom
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the “Software”),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
--------------------------------------------------------------------------
*/
use crate::source_map::{MapDump, OptionalSourceMap, SourceValue};
use binrw::binrw;
// Flags for block_flags field
bitflags::bitflags! {
#[derive(Debug)]
pub(crate) struct BlockFlags: u32 {
const FORWARD_GAP = 0b0001; // Bit 0: Indicates associated forward gap stream elements
const BACKWARD_GAP = 0b0010; // Bit 1: Indicates associated backward gap stream elements
const DATA_IN_BITS = 0b0100; // Bit 2: Data stream sample length: 0 = bytes, 1 = bits
}
}
#[binrw]
#[brw(big)]
#[br(import(encoder_type: u32))]
#[bw(import(encoder_type: u32))]
#[derive(Debug)]
pub(crate) struct BlockDescriptor {
pub(crate) data_bits: u32, // Size in bits of the decoded block data
pub(crate) gap_bits: u32, // Size in bits of the decoded gap
#[br(if(encoder_type == 1))]
#[bw(if(encoder_type == 1))]
pub(crate) data_bytes: Option<u32>, // Parsed only if encoder_type == 1
#[br(if(encoder_type == 1))]
#[bw(if(encoder_type == 1))]
pub(crate) gap_bytes: Option<u32>, // Parsed only if encoder_type == 1
#[br(if(encoder_type == 2))]
#[bw(if(encoder_type == 2))]
pub(crate) gap_offset: Option<u32>, // Parsed only if encoder_type == 2
#[br(if(encoder_type == 2))]
#[bw(if(encoder_type == 2))]
pub(crate) cell_type: Option<u32>, // Parsed only if encoder_type == 2
pub(crate) blk_encoder_type: u32, // Block encoder type (not to be confused with INFO record encoder type)
#[bw(calc = if encoder_type == 2 { block_flags.as_ref().map_or(0, |flags| flags.bits()) } else { 0 })]
pub(crate) block_flags_raw: u32,
#[br(calc = if encoder_type == 2 { Some(BlockFlags::from_bits_truncate(block_flags_raw)) } else { None })]
#[bw(ignore)]
pub(crate) block_flags: Option<BlockFlags>,
pub(crate) gap_default: u32, // Default gap value
pub(crate) data_offset: u32, // Offset to the data stream in the extra data area
/// Save the input encoder type.
#[br(calc = encoder_type)]
#[bw(ignore)]
saved_encoder_type: u32,
}
impl MapDump for BlockDescriptor {
fn write_to_map(&self, map: &mut Box<dyn OptionalSourceMap>, parent: usize) -> usize {
match self.saved_encoder_type {
1 => {
let record = map.add_child(parent, "V1 Block Descriptor", SourceValue::default());
let node_index = record.index();
#[rustfmt::skip]
record
.add_child("dataBits", SourceValue::u32(self.data_bits))
.add_sibling("gapBits", SourceValue::u32(self.gap_bits))
.add_sibling("dataBytes", SourceValue::u32(self.data_bytes.unwrap()))
.add_sibling("gapBytes", SourceValue::u32(self.gap_bytes.unwrap()))
.add_sibling("blockEncoderType", SourceValue::u32(self.blk_encoder_type))
.add_sibling("gapDefault", SourceValue::hex_u32(self.gap_default))
.add_sibling("dataOffset", SourceValue::u32(self.data_offset));
node_index
}
2 => {
let record = map.add_child(parent, "V2 Block Descriptor", SourceValue::default());
let node_index = record.index();
#[rustfmt::skip]
record
.add_child("dataBits", SourceValue::u32(self.data_bits))
.add_sibling("gapBits", SourceValue::u32(self.gap_bits))
.add_sibling("gapOffset", SourceValue::u32(self.gap_offset.unwrap()))
.add_sibling("cellType", SourceValue::u32(self.cell_type.unwrap()))
.add_sibling("blockEncoderType", SourceValue::u32(self.blk_encoder_type))
.add_sibling("blockFlags", SourceValue::hex_u32(self.block_flags.as_ref().unwrap().bits()).comment(&format!("{:?}",self.block_flags.as_ref().unwrap_or(&BlockFlags::empty()))),)
.add_sibling("gapDefault", SourceValue::hex_u32(self.gap_default).comment("Default gap value if no gap stream"))
.add_sibling("dataOffset", SourceValue::u32(self.data_offset));
node_index
}
_ => {
let record = map.add_child(parent, "Unknown Block Descriptor", SourceValue::default());
record.index()
}
}
}
}
| rust | MIT | b4c04b51746e5fe7769f49a1b32b8caad426fc81 | 2026-01-04T20:24:04.021295Z | false |
dbalsom/fluxfox | https://github.com/dbalsom/fluxfox/blob/b4c04b51746e5fe7769f49a1b32b8caad426fc81/src/file_parsers/ipf/mod.rs | src/file_parsers/ipf/mod.rs | /*
FluxFox
https://github.com/dbalsom/fluxfox
Copyright 2024-2025 Daniel Balsom
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the “Software”),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
--------------------------------------------------------------------------
*/
//! An image format parser for the Interchangeable Preservation Format (IPF).
//!
//! No code in this module was taken or adapted from proprietary or license-encumbered
//! sources and is the original work of the author.
pub mod chunk;
pub mod crc;
mod data_block;
mod data_record;
pub mod image_record;
pub mod info_record;
pub mod ipf;
mod platforms;
mod stream_element;
mod track_common;
mod v1_track;
mod v2_track;
pub use ipf::IpfParser as IpFormat;
| rust | MIT | b4c04b51746e5fe7769f49a1b32b8caad426fc81 | 2026-01-04T20:24:04.021295Z | false |
dbalsom/fluxfox | https://github.com/dbalsom/fluxfox/blob/b4c04b51746e5fe7769f49a1b32b8caad426fc81/src/file_parsers/ipf/crc.rs | src/file_parsers/ipf/crc.rs | /*
FluxFox
https://github.com/dbalsom/fluxfox
Copyright 2024-2025 Daniel Balsom
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the “Software”),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
--------------------------------------------------------------------------
*/
/// 'CRC32 Reverse' Hasher for IPF files.
pub(crate) struct IpfCrcHasher {
crc: u32,
}
impl IpfCrcHasher {
pub(crate) fn new() -> Self {
Self { crc: 0xFFFF_FFFF }
}
pub(crate) fn update(&mut self, data: &[u8]) {
for &byte in data.iter() {
self.crc ^= byte as u32;
for _ in 0..8 {
if self.crc & 1 != 0 {
self.crc = (self.crc >> 1) ^ 0xEDB8_8320;
}
else {
self.crc >>= 1;
}
}
}
}
pub(crate) fn finalize(&self) -> u32 {
!self.crc
}
}
impl Default for IpfCrcHasher {
fn default() -> Self {
Self::new()
}
}
/// Reference implementation of the CRC algorithm used by IPF files.
#[allow(dead_code)]
pub(crate) fn ipf_crc_u32r(data: &[u8], start: Option<u32>) -> u32 {
let mut crc = start.unwrap_or(0xFFFF_FFFF);
for byte in data.iter() {
crc ^= *byte as u32;
for _ in 0..8 {
if crc & 1 != 0 {
crc = (crc >> 1) ^ 0xEDB8_8320;
}
else {
crc >>= 1;
}
}
}
!crc
}
| rust | MIT | b4c04b51746e5fe7769f49a1b32b8caad426fc81 | 2026-01-04T20:24:04.021295Z | false |
dbalsom/fluxfox | https://github.com/dbalsom/fluxfox/blob/b4c04b51746e5fe7769f49a1b32b8caad426fc81/src/file_parsers/ipf/image_record.rs | src/file_parsers/ipf/image_record.rs | /*
FluxFox
https://github.com/dbalsom/fluxfox
Copyright 2024-2025 Daniel Balsom
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the “Software”),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
--------------------------------------------------------------------------
*/
use crate::{
source_map::{MapDump, OptionalSourceMap, SourceValue},
types::DiskCh,
};
use binrw::binrw;
use std::cmp::Ordering;
use strum::IntoEnumIterator;
#[repr(u32)]
#[derive(Copy, Clone, Debug, strum::EnumIter)]
pub enum IpfTrackDensity {
Unknown,
Noise = 1,
Auto = 2,
CopylockAmiga = 3,
CopylockAmigaNew = 4,
CopylockSt = 5,
SpeedlockAmiga = 6,
OldSpeedlockAmiga = 7,
AdamBrierleyAmiga = 8,
AdamBrierleyDensityKeyAmiga = 9,
}
impl From<u32> for IpfTrackDensity {
/// Since IpfTrackDensity has an Unknown variant, we can implement From<u32> directly for it.
/// All undefined values will be mapped to Unknown.
fn from(value: u32) -> IpfTrackDensity {
IpfTrackDensity::iter()
.find(|x| *x as u32 == value)
.unwrap_or(IpfTrackDensity::Unknown)
}
}
#[derive(Debug)]
#[binrw]
#[brw(big)]
pub(crate) struct ImageRecord {
pub(crate) track: u32, // Track (cylinder) number
pub(crate) side: u32, // Side (head) number
pub(crate) density: u32, // Density of the track
#[bw(ignore)]
#[br(calc = <IpfTrackDensity>::from(density))]
pub(crate) density_enum: IpfTrackDensity,
pub(crate) signal_type: u32, // Signal processing type
pub(crate) track_bytes: u32, // Rounded number of decoded bytes on track
pub(crate) start_byte_pos: u32, // Rounded start byte position (useless)
pub(crate) start_bit_pos: u32, // Start position in bits of the first sync bit
pub(crate) data_bits: u32, // Number of decoded data bits (clock + data)
pub(crate) gap_bits: u32, // Number of decoded gap bits (clock + data)
pub(crate) track_bits: u32, // Total number of bits on the track (useless)
pub(crate) block_count: u32, // Number of blocks describing one track
pub(crate) encoder_process: u32, // Encoder process
pub(crate) track_flags: u32, // Track flags
pub(crate) data_key: u32, // Unique key matching the DATA record
pub(crate) reserved: [u32; 3], // Reserved for future use
}
impl ImageRecord {
#[inline]
pub fn key(&self) -> u32 {
self.data_key
}
pub fn ch(&self) -> DiskCh {
DiskCh {
c: self.track as u16,
h: self.side as u8,
}
}
}
impl MapDump for ImageRecord {
fn write_to_map(&self, map: &mut Box<dyn OptionalSourceMap>, parent: usize) -> usize {
let record = map.add_child(
parent,
&format!("Image Record {}", DiskCh::new(self.track as u16, self.side as u8)),
SourceValue::default(),
);
let record_idx = record.index();
#[rustfmt::skip]
record
.add_child("track", SourceValue::u32(self.track))
.add_sibling("side", SourceValue::u32(self.side))
.add_sibling("density", SourceValue::u32(self.density).comment(&format!("{:?}", self.density_enum)))
// Signal type should be '1' - mark it questionable if it's not
.add_sibling("signalType", SourceValue::u32(self.signal_type).quest_if(self.signal_type != 1))
.add_sibling("trackBytes", SourceValue::u32(self.track_bytes))
.add_sibling("startBytePos", SourceValue::u32(self.start_byte_pos))
.add_sibling("startBitPos", SourceValue::u32(self.start_bit_pos))
.add_sibling("dataBits", SourceValue::u32(self.data_bits))
.add_sibling("gapBits", SourceValue::u32(self.gap_bits))
.add_sibling("trackBits", SourceValue::u32(self.track_bits))
.add_sibling("blockCount", SourceValue::u32(self.block_count))
// Encoder process should be 0. Mark it questionable if it's not.
.add_sibling("encoderProcess", SourceValue::u32(self.encoder_process).quest_if(self.encoder_process != 0))
// Only bit 1 of flags is defined. Mark questionable if more bits are set.
.add_sibling("trackFlags", SourceValue::hex_u32(self.track_flags).quest_if(self.track_flags & !1 != 0))
.add_sibling("dataKey", SourceValue::u32(self.data_key))
.add_sibling("reserved", SourceValue::default())
// Any of the reserved fields can be marked questionable if they are not 0 - they might represent future use we're not handling.
.add_child("[0]", SourceValue::u32(self.reserved[0]).quest_if(self.reserved[0] != 0))
.add_sibling("[1]", SourceValue::u32(self.reserved[1]).quest_if(self.reserved[1] != 0))
.add_sibling("[2]", SourceValue::u32(self.reserved[2]).quest_if(self.reserved[2] != 0));
record_idx.into()
}
}
impl Eq for ImageRecord {}
impl PartialEq<Self> for ImageRecord {
fn eq(&self, other: &Self) -> bool {
self.track == other.track && self.side == other.side
}
}
impl PartialOrd for ImageRecord {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl Ord for ImageRecord {
fn cmp(&self, other: &Self) -> Ordering {
// Construct DiskCh for comparison
let self_disk_ch = DiskCh {
c: self.track as u16,
h: self.side as u8,
};
let other_disk_ch = DiskCh {
c: other.track as u16,
h: other.side as u8,
};
self_disk_ch.cmp(&other_disk_ch)
}
}
| rust | MIT | b4c04b51746e5fe7769f49a1b32b8caad426fc81 | 2026-01-04T20:24:04.021295Z | false |
dbalsom/fluxfox | https://github.com/dbalsom/fluxfox/blob/b4c04b51746e5fe7769f49a1b32b8caad426fc81/src/file_parsers/ipf/info_record.rs | src/file_parsers/ipf/info_record.rs | /*
FluxFox
https://github.com/dbalsom/fluxfox
Copyright 2024-2025 Daniel Balsom
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the “Software”),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
--------------------------------------------------------------------------
*/
use crate::{
source_map::{MapDump, OptionalSourceMap, SourceValue},
types::Platform,
};
use binrw::binrw;
use core::fmt::{self, Debug, Formatter};
/// An IPF Media Type. Currently only floppy disks are defined (?) at least
/// as of the time of the writing of Jean Louis-Guerin's IPF documentation.
#[binrw]
#[brw(repr = u32)]
#[derive(Debug, Copy, Clone, PartialEq)]
pub enum MediaType {
Unknown = 0,
FloppyDisk = 1,
}
impl TryFrom<u32> for MediaType {
type Error = ();
fn try_from(value: u32) -> Result<Self, Self::Error> {
match value {
0 => Ok(MediaType::Unknown),
1 => Ok(MediaType::FloppyDisk),
_ => Err(()),
}
}
}
#[binrw]
#[brw(repr = u32)]
#[derive(Debug, Copy, Clone, PartialEq)]
pub enum EncoderType {
Unknown = 0,
V1 = 1, // IPF encoder version 1. Sometimes referred to with an acronym starting with 'C'.
V2 = 2, // IPF encoder version 2. Sometimes referred to with an acronym starting with 'S'.
}
impl TryFrom<u32> for EncoderType {
type Error = ();
fn try_from(value: u32) -> Result<Self, Self::Error> {
match value {
0 => Ok(EncoderType::Unknown),
1 => Ok(EncoderType::V1),
2 => Ok(EncoderType::V2),
_ => Err(()),
}
}
}
/// Supported IPF platforms. Is this list complete as of 2025? Who knows!
#[derive(Copy, Clone, PartialEq)]
pub enum IpfPlatform {
None,
Amiga,
AtariSt,
Pc,
AmstradCpc,
Spectrum,
SamCoupe,
Archimedes,
C64,
Atari8Bit,
}
/// Convert an [IpfPlatform] to a fluxfox [Platform]
/// Due to a lack of a `Platform::None` variant, this function returns an `Option<Platform>`
/// if successful, with `None` indicating `IpfPlatform::None`.
/// The IPF platform list typically pads the platform table to 4 entries, using
/// IpfPlatform::None.
impl TryFrom<IpfPlatform> for Option<Platform> {
type Error = ();
fn try_from(value: IpfPlatform) -> Result<Option<Platform>, Self::Error> {
match value {
IpfPlatform::None => Ok(None),
IpfPlatform::Amiga => Ok(Some(Platform::Amiga)),
IpfPlatform::AtariSt => Ok(Some(Platform::AtariSt)),
IpfPlatform::Pc => Ok(Some(Platform::IbmPc)),
IpfPlatform::AmstradCpc => Err(()),
IpfPlatform::Spectrum => Err(()),
IpfPlatform::SamCoupe => Err(()),
IpfPlatform::Archimedes => Err(()),
IpfPlatform::C64 => Err(()),
IpfPlatform::Atari8Bit => Err(()),
}
}
}
impl Debug for IpfPlatform {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
let name = match self {
IpfPlatform::None => "None",
IpfPlatform::Amiga => "Amiga",
IpfPlatform::AtariSt => "AtariSt",
IpfPlatform::Pc => "Pc",
IpfPlatform::AmstradCpc => "AmstradCpc",
IpfPlatform::Spectrum => "Spectrum",
IpfPlatform::SamCoupe => "SamCoupe",
IpfPlatform::Archimedes => "Archimedes",
IpfPlatform::C64 => "C64",
IpfPlatform::Atari8Bit => "Atari8Bit",
};
write!(f, "{}", name)
}
}
impl TryFrom<IpfPlatform> for Platform {
type Error = ();
fn try_from(value: IpfPlatform) -> Result<Self, Self::Error> {
match value {
IpfPlatform::None => Err(()),
IpfPlatform::Amiga => Ok(Platform::Amiga),
IpfPlatform::AtariSt => Ok(Platform::AtariSt),
IpfPlatform::Pc => Ok(Platform::IbmPc),
IpfPlatform::AmstradCpc => Err(()),
IpfPlatform::Spectrum => Err(()),
IpfPlatform::SamCoupe => Err(()),
IpfPlatform::Archimedes => Err(()),
IpfPlatform::C64 => Err(()),
IpfPlatform::Atari8Bit => Err(()),
}
}
}
impl TryFrom<u32> for IpfPlatform {
type Error = ();
fn try_from(value: u32) -> Result<Self, Self::Error> {
match value {
0 => Ok(IpfPlatform::None),
1 => Ok(IpfPlatform::Amiga),
2 => Ok(IpfPlatform::AtariSt),
3 => Ok(IpfPlatform::Pc),
4 => Ok(IpfPlatform::AmstradCpc),
5 => Ok(IpfPlatform::Spectrum),
6 => Ok(IpfPlatform::SamCoupe),
7 => Ok(IpfPlatform::Archimedes),
8 => Ok(IpfPlatform::C64),
9 => Ok(IpfPlatform::Atari8Bit),
_ => Err(()),
}
}
}
#[binrw]
#[brw(big)]
pub struct InfoRecord {
pub(crate) media_type: u32, // Media type of the imaged media
#[bw(ignore)]
#[br(calc = MediaType::try_from(media_type).ok())]
pub(crate) media_type_enum: Option<MediaType>, // Media type of the imaged media, parsed to MediaType
pub(crate) encoder_type: u32, // Image encoder type (raw)
#[bw(ignore)]
#[br(calc = EncoderType::try_from(encoder_type).ok())]
pub(crate) encoder_type_enum: Option<EncoderType>, // Image encoder type, parsed to EncoderType
pub(crate) encoder_rev: u32, // Image encoder revision
pub(crate) file_key: u32, // Unique file key ID (for database purposes)
pub(crate) file_rev: u32, // Revision of the file. If there are no known revisions, revision should be 1.
pub(crate) origin: u32, // CRC32 value of the original .ctr file (no idea what that is)
pub(crate) min_track: u32, // Lowest track number (usually 0)
pub(crate) max_track: u32, // Highest track number (usually 83)
pub(crate) min_side: u32, // Lowest side (head) number - should be 0
pub(crate) max_side: u32, // Highest side (head) number - should be 1
pub(crate) creation_date: u32, // Creation date (year, month, day) encoded
pub(crate) creation_time: u32, // Creation time (hour, minute, second, tick) encoded
pub(crate) platforms: [u32; 4], // Intended platforms. Up to four platforms per disk (to support multi-format disks)
#[bw(ignore)]
#[br(calc = platforms.iter().filter_map(|p| IpfPlatform::try_from(*p).ok()).collect())]
pub(crate) platform_enums: Vec<IpfPlatform>, // Intended platforms. May contain fewer than 4 Platforms if conversion fails.
pub(crate) disk_number: u32, // Disk number in a multi-disc release
pub(crate) creator_id: u32, // Unique ID of the disk image creator
pub(crate) reserved: [u8; 12], // Reserve for future use
}
impl MapDump for InfoRecord {
fn write_to_map(&self, map: &mut Box<dyn OptionalSourceMap>, parent: usize) -> usize {
#[rustfmt::skip]
let _info_node = map
.add_child(parent, "Info Record", SourceValue::default())
.add_child("mediaType", SourceValue::u32(self.media_type).comment(&format!("{:?}", self.media_type_enum)))
.add_sibling("encoderType", SourceValue::u32(self.encoder_type).comment(&format!("{:?}", self.encoder_type_enum)))
.add_sibling("encoderRev", SourceValue::u32(self.encoder_rev))
.add_sibling("fileKey", SourceValue::u32(self.file_key).bad())
.add_sibling("fileRev", SourceValue::u32(self.file_rev))
.add_sibling("origin",SourceValue::hex_u32(self.origin).comment("CRC32 of the original .ctr file"))
.add_sibling("minTrack", SourceValue::u32(self.min_track))
.add_sibling("maxTrack", SourceValue::u32(self.max_track))
.add_sibling("minSide", SourceValue::u32(self.min_side))
.add_sibling("maxSide", SourceValue::u32(self.max_side))
.add_sibling("creationDate", SourceValue::hex_u32(self.creation_date))
.add_sibling("creationTime", SourceValue::hex_u32(self.creation_time))
.add_sibling("platforms", SourceValue::default())
.add_child("[0]", SourceValue::u32(self.platforms[0]).comment(&format!("{:?}", self.platform_enums.get(0).unwrap_or(&IpfPlatform::None))))
.add_sibling("[1]", SourceValue::u32(self.platforms[1]).comment(&format!("{:?}", self.platform_enums.get(1).unwrap_or(&IpfPlatform::None))))
.add_sibling("[2]", SourceValue::u32(self.platforms[2]).comment(&format!("{:?}", self.platform_enums.get(2).unwrap_or(&IpfPlatform::None))))
.add_sibling("[3]", SourceValue::u32(self.platforms[3]).comment(&format!("{:?}", self.platform_enums.get(3).unwrap_or(&IpfPlatform::None))))
.up()
.add_sibling("diskNumber", SourceValue::u32(self.disk_number))
.add_sibling("creatorId", SourceValue::u32(self.creator_id));
parent
}
}
impl Debug for InfoRecord {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
f.debug_struct("InfoRecord")
.field(
"media_type_enum",
&self
.media_type_enum
.as_ref()
.map(|t| format!("{:?}", t))
.unwrap_or_else(|| "Unknown".to_string()),
)
.field(
"encoder_type_enum",
&self
.encoder_type_enum
.as_ref()
.map(|t| format!("{:?}", t))
.unwrap_or_else(|| "Unknown".to_string()),
)
.field("encoder_rev", &self.encoder_rev)
.field("file_key", &format!("{:08X}", self.file_key))
.field("file_rev", &self.file_rev)
.field("origin", &format!("{:08X}", self.origin))
.field("min_track", &self.min_track)
.field("max_track", &self.max_track)
.field("min_side", &self.min_side)
.field("max_side", &self.max_side)
.field("creation_date", &format!("{:08X}", self.creation_date))
.field("creation_time", &format!("{:08X}", self.creation_time))
.field(
"platform_enums",
&self
.platform_enums
.iter()
.map(|platform| format!("{:?}", *platform)) // Convert each platform to a string
.collect::<Vec<_>>(),
)
.field("disk_number", &self.disk_number)
.field("creator_id", &format!("{:08X}", self.creator_id))
.finish()
}
}
impl InfoRecord {
/// Get the list of fluxfox [Platform]s specified by the IPF file.
pub fn platforms(&self) -> Vec<Platform> {
self.platform_enums
.iter()
.filter_map(|platform| Platform::try_from(*platform).ok())
.collect()
}
}
| rust | MIT | b4c04b51746e5fe7769f49a1b32b8caad426fc81 | 2026-01-04T20:24:04.021295Z | false |
dbalsom/fluxfox | https://github.com/dbalsom/fluxfox/blob/b4c04b51746e5fe7769f49a1b32b8caad426fc81/src/file_parsers/ipf/chunk.rs | src/file_parsers/ipf/chunk.rs | /*
FluxFox
https://github.com/dbalsom/fluxfox
Copyright 2024-2025 Daniel Balsom
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the “Software”),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
--------------------------------------------------------------------------
*/
use crate::{
file_parsers::ipf::{crc::IpfCrcHasher, ipf::IpfParser},
io::ReadSeek,
DiskImageError,
};
use binrw::{binrw, BinRead};
use core::fmt;
use std::io::Cursor;
const CHUNK_DEFS: [&[u8; 4]; 8] = [b"CAPS", b"DUMP", b"DATA", b"TRCK", b"INFO", b"IMGE", b"CTEI", b"CTEX"];
pub const MAXIMUM_CHUNK_SIZE: usize = 0x100000; // Set some reasonable limit for chunk sizes. Here 1MB.
#[binrw]
#[brw(big)]
#[br(import(data_size_limit: u32))]
pub(crate) struct IpfChunk {
pub id: [u8; 4],
#[bw(ignore)]
#[br(calc = <IpfChunkType>::try_from(&id).ok())]
pub chunk_type: Option<IpfChunkType>,
pub size: u32,
pub crc: u32,
#[br(if(data_size_limit > 0), count = size.saturating_sub(12).min(data_size_limit))]
pub data: Vec<u8>,
#[bw(ignore)]
#[br(calc = IpfChunk::calculate_crc(&id, size, &data))] // Calculate the CRC based on fields
pub calculated_crc: u32, // Computed CRC value
}
impl fmt::Debug for IpfChunk {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("IpfChunk")
.field("id", &format!("{:02X?}", &self.id)) // Show the raw chunk ID bytes
.field(
"chunk_type",
&self
.chunk_type
.as_ref()
.map(|t| format!("{:?}", t))
.unwrap_or_else(|| "Unknown".to_string()),
) // Show the chunk type enum or "Unknown"
.field("size", &self.size) // Show the size of the chunk
.field("crc", &format!("{:08X}", self.crc)) // Show the CRC in hex format
.field("data_length", &self.data.len()) // Show the length of the data
.field("calculated_crc", &format!("{:08X}", self.calculated_crc)) // Show the calculated CRC in hex format
.finish()
}
}
impl IpfChunk {
fn calculate_crc(id: &[u8; 4], size: u32, data: &[u8]) -> u32 {
let mut hasher = IpfCrcHasher::new();
hasher.update(id);
hasher.update(&size.to_be_bytes());
// When calculating chunk CRC, we treat the CRC field as if zeroed
hasher.update(&[0; 4]);
hasher.update(data);
hasher.finalize()
}
fn is_crc_valid(&self) -> bool {
self.crc == self.calculated_crc
}
pub(crate) fn into_inner<T>(self) -> Result<T, DiskImageError>
where
for<'a> T: binrw::BinRead<Args<'a> = ()> + binrw::meta::ReadEndian,
{
let mut cursor = Cursor::new(self.data);
let inner = T::read(&mut cursor).map_err(|e| DiskImageError::ImageCorruptError(e.to_string()))?;
Ok(inner)
}
#[allow(dead_code)]
pub(crate) fn into_inner_args<T, Args>(self, args: Args) -> Result<T, DiskImageError>
where
T: for<'a> BinRead<Args<'a> = Args> + binrw::meta::ReadEndian,
{
let mut cursor = Cursor::new(self.data);
let inner = T::read_args(&mut cursor, args).map_err(|e| DiskImageError::ImageCorruptError(e.to_string()))?;
Ok(inner)
}
}
#[derive(Copy, Clone, Debug, PartialEq)]
pub(crate) enum IpfChunkType {
Caps = 0,
Dump = 1,
Data = 2,
Track = 3,
Info = 4,
Image = 5,
Ctei = 6,
Ctex = 7,
}
// impl Debug for IpfChunkType {
// fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
// let chunk_bytes = match self {
// IpfChunkType::Caps => CHUNK_DEFS[0],
// IpfChunkType::Dump => CHUNK_DEFS[1],
// IpfChunkType::Data => CHUNK_DEFS[2],
// IpfChunkType::Track => CHUNK_DEFS[3],
// IpfChunkType::Info => CHUNK_DEFS[4],
// IpfChunkType::Image => CHUNK_DEFS[5],
// IpfChunkType::Ctei => CHUNK_DEFS[6],
// IpfChunkType::Ctex => CHUNK_DEFS[7],
// };
// write!(f, "{:04X?}", chunk_bytes)
// }
// }
impl TryFrom<&[u8; 4]> for IpfChunkType {
type Error = ();
fn try_from(value: &[u8; 4]) -> Result<Self, Self::Error> {
match value {
b"CAPS" => Ok(IpfChunkType::Caps),
b"DUMP" => Ok(IpfChunkType::Dump),
b"DATA" => Ok(IpfChunkType::Data),
b"TRCK" => Ok(IpfChunkType::Track),
b"INFO" => Ok(IpfChunkType::Info),
b"IMGE" => Ok(IpfChunkType::Image),
b"CTEI" => Ok(IpfChunkType::Ctei),
b"CTEX" => Ok(IpfChunkType::Ctex),
_ => Err(()),
}
}
}
impl From<IpfChunkType> for &[u8; 4] {
fn from(val: IpfChunkType) -> Self {
&CHUNK_DEFS[val as usize]
}
}
impl IpfParser {
pub(crate) fn read_chunk<RWS: ReadSeek>(image: &mut RWS) -> Result<IpfChunk, DiskImageError> {
//let chunk_pos = image.stream_position()?;
//log::trace!("Reading chunk header...");
let chunk = IpfChunk::read(image)?;
//log::debug!("Read chunk: {:?}", chunk);
if chunk.chunk_type.is_none() {
log::warn!("Unknown chunk type: {:0X?}", chunk.id);
}
if chunk.size > MAXIMUM_CHUNK_SIZE as u32 {
return Err(DiskImageError::IncompatibleImage(format!(
"Chunk length exceeds limit: {} > {}",
chunk.size, MAXIMUM_CHUNK_SIZE,
)));
}
if !chunk.is_crc_valid() {
return Err(DiskImageError::ImageCorruptError(format!(
"CRC mismatch in {:?} chunk",
chunk
.chunk_type
.map(|t| format!("{:?}", t))
.unwrap_or_else(|| "Unknown".to_string()),
)));
}
Ok(chunk)
}
}
| rust | MIT | b4c04b51746e5fe7769f49a1b32b8caad426fc81 | 2026-01-04T20:24:04.021295Z | false |
dbalsom/fluxfox | https://github.com/dbalsom/fluxfox/blob/b4c04b51746e5fe7769f49a1b32b8caad426fc81/src/file_parsers/ipf/stream_element.rs | src/file_parsers/ipf/stream_element.rs | /*
FluxFox
https://github.com/dbalsom/fluxfox
Copyright 2024-2025 Daniel Balsom
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the “Software”),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
--------------------------------------------------------------------------
*/
use crate::source_map::{MapDump, OptionalSourceMap, SourceValue};
use binrw::{binrw, BinResult};
use bit_vec::BitVec;
use modular_bitfield::prelude::*;
// Set a maximum sample size as sanity check. An extended density track could be 400,000+ bitcells
// long, or around 50K. So 100KiB feels like a reasonable limit.
const MAX_SAMPLE_SIZE: usize = 100_000;
#[derive(BitfieldSpecifier, Eq, PartialEq, Debug)]
pub enum DataType {
End,
Sync,
Data,
Gap,
Raw,
Fuzzy,
Invalid0,
Invalid1,
}
#[allow(dead_code)]
#[bitfield]
#[derive(Copy, Clone, Debug)]
pub(crate) struct DataHead {
#[bits = 3]
pub(crate) data_type: DataType,
#[skip]
pub(crate) unused: B2,
pub(crate) data_size_width: B3,
}
impl DataHead {
pub(crate) fn is_null(&self) -> bool {
self.data_type() == DataType::End && self.data_size_width() == 0
}
}
#[binrw::parser(reader)]
fn parse_data_head() -> BinResult<DataHead> {
let mut buf = [0u8; 1];
reader.read_exact(&mut buf)?;
let dh = DataHead::from_bytes(buf);
log::debug!("Parsed data head: {:?}", dh);
Ok(dh)
}
#[binrw::writer(writer: w)]
fn write_data_head(value: &DataHead) -> BinResult<()> {
//let byte = value.into_bytes();
_ = w.write(&value.into_bytes())?;
Ok(())
}
// impl Endian for DataHead {
// const ENDIANNESS: Endian = Endian::Big;
// }
pub(crate) enum DataSample {
Bytes(Vec<u8>),
Bits(BitVec),
}
//#[br(if(data_head.data_type() != DataType::Fuzzy))]
#[binrw]
#[brw(big)]
#[br(import(data_is_bits: bool, data_bytes: usize))]
#[bw(import(data_is_bits: bool, data_bytes: usize))]
pub struct DataStreamElement {
#[br(parse_with = parse_data_head)]
#[bw(write_with = write_data_head)]
pub(crate) data_head: DataHead,
#[br(count = data_head.data_size_width() as usize)]
pub(crate) encoded_data_size: Vec<u8>,
#[br(calc = calculate_sample_size(&encoded_data_size))]
#[bw(ignore)]
pub(crate) sample_size_decoded: usize,
#[br(parse_with = read_data_samples, args(sample_size_decoded, data_bytes, data_is_bits, data_head.data_type() == DataType::Fuzzy))]
#[bw(write_with = write_data_samples, args(sample_size_decoded, data_bytes, data_is_bits, data_head.data_type() == DataType::Fuzzy))]
pub(crate) data_sample: Option<DataSample>,
}
impl MapDump for DataStreamElement {
fn write_to_map(&self, map: &mut Box<dyn OptionalSourceMap>, parent: usize) -> usize {
let record = map.add_child(parent, "Data Stream Element", SourceValue::default());
let element_idx = record.index();
#[rustfmt::skip]
let sample_record = record
.add_child("dataSizeWidth", SourceValue::u32(self.data_head.data_size_width() as u32))
.add_sibling("dataType", SourceValue::string(&format!("{:?}" , self.data_head.data_type())))
.add_sibling("dataSize", SourceValue::u32(self.sample_size_decoded as u32));
match &self.data_sample {
Some(DataSample::Bytes(bytes)) => {
// Add a maximum of 8 bytes of data as a comment
sample_record.add_sibling(
"dataSample(Bytes)",
SourceValue::string(&format!("{}", bytes.len()))
.comment(&format!("{:02X?}", &bytes[0..std::cmp::min(8, bytes.len())])),
);
}
Some(DataSample::Bits(bits)) => {
sample_record.add_sibling("dataSample(Bits)", SourceValue::string(&format!("{}", bits.len())));
}
None => {
sample_record.add_child("Unknown sample type!", SourceValue::default().bad());
}
};
element_idx
}
}
pub(crate) enum GapSample {
RepeatCt(usize),
Sample(BitVec),
}
#[derive(BitfieldSpecifier, Eq, PartialEq, Debug)]
pub enum GapType {
End,
GapLength,
SampleLength,
Invalid0,
}
#[allow(dead_code)]
#[bitfield]
#[derive(Copy, Clone, Debug)]
pub(crate) struct GapHead {
#[bits = 2]
pub(crate) gap_type: GapType,
#[skip]
pub(crate) unused: B3,
pub(crate) data_size_width: B3,
}
impl GapHead {
pub(crate) fn is_null(&self) -> bool {
self.gap_type() == GapType::End && self.data_size_width() == 0
}
}
#[binrw]
#[brw(big)]
pub struct GapStreamElement {
#[br(parse_with = parse_gap_head)]
#[bw(write_with = write_gap_head)]
pub(crate) gap_head: GapHead,
#[br(count = gap_head.data_size_width() as usize)]
pub(crate) encoded_data_size: Vec<u8>,
#[br(calc = calculate_sample_size(&encoded_data_size))]
#[bw(ignore)]
pub(crate) sample_size_decoded: usize,
#[br(parse_with = read_gap_samples, args(sample_size_decoded, gap_head.gap_type()))]
#[bw(write_with = write_gap_samples, args(sample_size_decoded, gap_head.gap_type()))]
pub(crate) gap_sample: Option<GapSample>,
}
impl MapDump for GapStreamElement {
fn write_to_map(&self, map: &mut Box<dyn OptionalSourceMap>, parent: usize) -> usize {
let record = map.add_child(parent, "Gap Stream Element", SourceValue::default());
let element_idx = record.index();
#[rustfmt::skip]
let sample_record = record
.add_child("gapSizeWidth", SourceValue::u32(self.gap_head.data_size_width() as u32))
.add_sibling("gapType", SourceValue::string(&format!("{:?}" , self.gap_head.gap_type())))
.add_sibling("gapSize", SourceValue::u32(self.sample_size_decoded as u32));
if let Some(sample) = &self.gap_sample {
match sample {
GapSample::RepeatCt(ct) => {
sample_record.add_sibling("repeatCt", SourceValue::string(&format!("{}", ct)));
}
GapSample::Sample(bits) => {
sample_record.add_sibling("sample", SourceValue::string(&format!("{}", bits)));
}
}
}
element_idx
}
}
#[binrw::parser(reader)]
fn parse_gap_head() -> BinResult<GapHead> {
let mut buf = [0u8; 1];
reader.read_exact(&mut buf)?;
let gh = GapHead::from_bytes(buf);
log::debug!("Parsed gap head: {:?}", gh);
Ok(gh)
}
#[binrw::writer(writer: w)]
fn write_gap_head(value: &GapHead) -> BinResult<()> {
//let byte = value.into_bytes();
_ = w.write(&value.into_bytes())?;
Ok(())
}
#[binrw::parser(reader: r)]
fn read_data_samples(
sample_size: usize,
_data_bytes: usize,
data_is_bits: bool,
data_is_fuzzy: bool,
) -> BinResult<Option<DataSample>> {
if data_is_fuzzy {
return Ok(None);
}
// if sample_size != 1 {
// log::warn!("Sample size is not 1: {}", sample_size);
// }
let sample_bytes = match data_is_bits {
true => (sample_size + 7) / 8, // Round to the next byte
false => sample_size,
};
let mut sample_buf = vec![0u8; sample_bytes];
r.read_exact(&mut sample_buf)?;
let samples = match data_is_bits {
true => DataSample::Bits(BitVec::from_bytes(&sample_buf)),
false => DataSample::Bytes(sample_buf),
};
Ok(Some(samples))
}
#[allow(dead_code)]
#[allow(unused_variables)]
#[binrw::writer(writer: r)]
fn write_data_samples(
value: &Option<DataSample>,
sample_size: &usize,
data_bytes: usize,
data_is_bits: bool,
data_is_fuzzy: bool,
) -> BinResult<()> {
Ok(())
}
fn calculate_sample_size(data_size_encoded: &[u8]) -> usize {
let mut final_size = 0usize;
for byte in data_size_encoded {
final_size = (final_size << 8) | *byte as usize;
}
log::debug!(
"Decoded sample size of {} using {} bytes",
final_size,
data_size_encoded.len()
);
final_size
}
#[binrw::parser(reader: r)]
fn read_gap_samples(sample_size: usize, sample_type: GapType) -> BinResult<Option<GapSample>> {
match sample_type {
GapType::GapLength => {
log::debug!("read_gap_samples(): Read repeat length of {}", sample_size);
// Nothing to actually read - repeat count is sample_size
Ok(Some(GapSample::RepeatCt(sample_size)))
}
GapType::SampleLength => {
log::debug!("read_gap_samples(): Read sample length of {}", sample_size);
// Read sample_size bits
let sample_bytes = (sample_size + 7) / 8;
let mut sample_buf = vec![0u8; sample_bytes];
r.read_exact(&mut sample_buf)?;
// Convert to BitVec
let mut bits = BitVec::from_bytes(&sample_buf);
// Trim bits to actual size
bits.truncate(sample_size);
Ok(Some(GapSample::Sample(bits)))
}
_ => {
log::warn!("read_gap_samples(): Unhandled gap type: {:?}", sample_type);
Ok(None)
}
}
}
#[allow(dead_code)]
#[allow(unused_variables)]
#[binrw::writer(writer: r)]
fn write_gap_samples(value: &Option<GapSample>, sample_size: &usize, sample_type: GapType) -> BinResult<()> {
Ok(())
}
| rust | MIT | b4c04b51746e5fe7769f49a1b32b8caad426fc81 | 2026-01-04T20:24:04.021295Z | false |
dbalsom/fluxfox | https://github.com/dbalsom/fluxfox/blob/b4c04b51746e5fe7769f49a1b32b8caad426fc81/src/file_parsers/ipf/v1_track.rs | src/file_parsers/ipf/v1_track.rs | /*
FluxFox
https://github.com/dbalsom/fluxfox
Copyright 2024-2025 Daniel Balsom
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the “Software”),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
--------------------------------------------------------------------------
*/
//! Decoding functions for IPF encoder v1 tracks (CXXX).
use crate::{
bitstream_codec::{mfm::MFM_BYTE_LEN, TrackDataStream},
file_parsers::ipf::{
data_block::BlockDescriptor,
image_record::ImageRecord,
info_record::InfoRecord,
ipf::IpfParser,
stream_element::{DataSample, DataStreamElement, DataType},
},
io::ReadSeek,
prelude::{TrackDataEncoding, TrackDataRate, TrackDataResolution, TrackDensity},
source_map::{MapDump, OptionalSourceMap},
track_schema::TrackSchema,
DiskImage,
DiskImageError,
};
use binrw::BinRead;
impl IpfParser {
pub(crate) fn decode_v1_track<RWS>(
reader: &mut RWS,
image: &mut DiskImage,
_info_record: &InfoRecord,
image_record: &ImageRecord,
record_node: usize,
data: &crate::file_parsers::ipf::ipf::DataRecordInfo,
) -> Result<(), DiskImageError>
where
RWS: ReadSeek,
{
image.set_resolution(TrackDataResolution::BitStream);
log::debug!("-------------------------- Decoding V1 (CXXX) Track ----------------------------------");
log::debug!(
"Track {} bitct: {:6} block_ct: {:02} data_bits: {}",
image_record.ch(),
image_record.track_bits,
image_record.block_count,
image_record.data_bits,
);
//log::trace!("Image Record: {:#?}", image_record);
// Density is *probably* double. Guess from bitcell count or assume double.
let data_rate =
TrackDataRate::from(TrackDensity::from_bitcells(image_record.track_bits).unwrap_or(TrackDensity::Double));
// // Create empty BitVec for track data.
// let track_bits = BitVec::from_elem(image_record.track_bits as usize, false);
// // Amiga is *probably* MFM encoded.
// let codec = Box::new(MfmCodec::new(track_bits, Some(image_record.track_bits as usize), None));
//let start_clock = image_record.start_bit_pos % 2 != 0;
// There's a variety of approaches here - we could craft a BitStreamTrack in isolation
// and then attach it to the Disk, or we can add an empty track and then write to it.
// I'm going to try the latter approach first.
let new_track_idx = image.add_empty_track(
image_record.ch(),
TrackDataEncoding::Mfm,
Some(TrackDataResolution::BitStream),
data_rate,
image_record.track_bits as usize,
Some(true),
)?;
// After we get a reference to the track, the disk image will be mutably borrowed until
// the end of track processing, and we won't be able to get a mutable reference to the
// source map.
//
// We fall back to the trusty ol take hack to get around this. But now we have to put it
// back on error if we want to preserve it.
//
// A better design would probably be to construct a detached track object and then attach
// it to the image after it is built. Or, if we store tracks as options, I'd rather take
// the track than the source map as it would simplify error handling.
//
// TODO: Revisit this design
let mut source_map = image.take_source_map().unwrap();
let track = match image.track_by_idx_mut(new_track_idx) {
Some(track) => track,
None => {
image.put_source_map(source_map);
log::error!("Failed to get mutable track for image.");
return Err(DiskImageError::FormatParseError);
}
};
// let mut bitstream_track = track.as_bitstream_track_mut().ok_or_else(|| {
// log::error!("Failed to get mutable bitstream track for image.");
// DiskImageError::FormatParseError
// })?;
// let params = BitStreamTrackParams {
// schema: Some(TrackSchema::Amiga),
// ch: image_record.ch(),
// encoding: TrackDataEncoding::Mfm,
// data_rate,
// rpm: None,
// bitcell_ct: Some(image_record.track_bits as usize),
// data: &[],
// weak: None,
// hole: None,
// detect_weak: false,
// };
//
// let mut track = BitStreamTrack::new_optional_ctx(¶ms, None)?;
{
// Seek to the start position for the first block.
let bitstream = match track.stream_mut() {
Some(stream) => stream,
None => {
image.put_source_map(source_map);
log::error!("Failed to get mutable stream for track.");
return Err(DiskImageError::FormatParseError);
}
};
log::trace!("Seeking to {} for first block.", image_record.start_bit_pos & !0xF);
let mut cursor = image_record.start_bit_pos as usize & !0xF;
//bitstream.seek(std::io::SeekFrom::Start(image_record.start_bit_pos as u64))?;
for (bi, block) in data.blocks.iter().enumerate() {
log::debug!(
"Block {}: data offset: {} data: [bytes: {:?} bits: {}], gap: [bytes: {:?} bits: {}]",
bi,
data.edb_offset + block.data_offset as u64,
block.data_bytes,
block.data_bits,
block.gap_bytes,
block.gap_bits
);
// reader.seek(std::io::SeekFrom::Start(data.edb_offset + block.data_offset as u64))?;
//
// let mut debug_buf = [0; 16];
// reader.read_exact(&mut debug_buf)?;
//log::warn!("Data element: {:02X?}", debug_buf);
let data_bytes = if let Some(bytes) = block.data_bytes {
bytes as u64
}
else {
log::error!("V1 block descriptor missing data_bytes.");
return Err(DiskImageError::ImageCorruptError(
"V1 block descriptor missing data_bytes.".to_string(),
));
};
// Seek to the first data element
let data_offset = data.edb_offset + block.data_offset as u64;
match reader.seek(std::io::SeekFrom::Start(data_offset)) {
Ok(_) => {}
Err(e) => {
image.put_source_map(source_map);
log::error!("Failed to seek to data element: {}", e);
return Err(DiskImageError::from(e));
}
}
// V1 descriptor should have valid `data_bytes`. Ignore block flags!
let encoded_bytes =
match Self::decode_v1_block(reader, &mut source_map, block, record_node, bitstream, &mut cursor) {
Ok(bytes) => bytes,
Err(e) => {
image.put_source_map(source_map);
log::error!("Failed to decode V1 block: {}", e);
return Err(e);
}
};
if encoded_bytes != data_bytes as usize {
log::warn!(
"Block {} decoded {} bytes, but expected {} bytes.",
bi,
encoded_bytes,
data_bytes
);
}
// As far as I can tell there's no field that gives the un-decoded length of the data elements.
// let pos = reader.stream_position()?;
// if pos - data_offset != block.data_bytes.unwrap() as u64 {
// log::error!(
// "Reached End element with {} bytes remaining in data block.",
// data_bytes - (pos - data_offset)
// );
// return Err(DiskImageError::ImageCorruptError(
// "Data element length mismatch.".to_string(),
// ));
// }
}
}
let track = match image.track_by_idx_mut(new_track_idx) {
Some(track) => track,
None => {
image.put_source_map(source_map);
log::error!("Failed to get mutable track for image.");
return Err(DiskImageError::FormatParseError);
}
};
let bitstream_track = match track.as_bitstream_track_mut() {
Some(track) => track,
None => {
image.put_source_map(source_map);
log::error!("Failed to get mutable bitstream track for image.");
return Err(DiskImageError::FormatParseError);
}
};
bitstream_track.rescan(Some(TrackSchema::Amiga))?;
// Finally, put the source map back on the image.
image.put_source_map(source_map);
Ok(())
}
pub fn decode_v1_block<RWS>(
reader: &mut RWS,
source_map: &mut Box<dyn OptionalSourceMap>,
block: &BlockDescriptor,
record_node: usize,
bitstream: &mut TrackDataStream,
cursor: &mut usize,
) -> Result<usize, DiskImageError>
where
RWS: ReadSeek,
{
log::debug!("-------------------------- Decoding V1 Block ----------------------------------");
// Write BlockDescriptor to source map
let block_node = block.write_to_map(source_map, record_node);
//log::trace!("Block: {:#?}", block);
let data_bytes = if let Some(bytes) = &block.data_bytes {
*bytes as usize
}
else {
log::error!("V1 block descriptor missing data_bytes.");
return Err(DiskImageError::ImageCorruptError(
"V1 block descriptor missing data_bytes.".to_string(),
));
};
let mut data_element = DataStreamElement::read_args(reader, (false, data_bytes))?;
// Write data element to source map
let _data_node = data_element.write_to_map(source_map, block_node);
let mut element_ct = 0;
let mut decoded_bytes = 0;
while !data_element.data_head.is_null() {
let data_type = data_element.data_head.data_type();
let data = if let Some(samples) = &data_element.data_sample {
match samples {
DataSample::Bytes(data) => {
log::debug!(
"Data element contains: {} bytes: {:02X?}",
data.len(),
&data[0..std::cmp::min(16, data.len())]
);
data
}
DataSample::Bits(bits) => {
// This shouldn't really happen in a V1 block...
log::warn!("Unhandled: Bit samples in V1 block!");
log::debug!("Data element contains: {} bits", bits.len());
&bits.to_bytes()
}
}
}
else {
log::error!("Data element has no samples!");
return Err(DiskImageError::ImageCorruptError(
"Data element has no samples.".to_string(),
));
};
let wrote = match data_type {
DataType::Sync => {
// Write SYNC bytes RAW (they are already MFM-encoded!)
log::trace!(
"Writing raw Sync bytes: {:02X?}",
&data[0..std::cmp::min(16, data.len())]
);
// Write the raw bytes
bitstream.write_raw_buf(data, *cursor);
data.len() / 2
}
DataType::Data => {
// Encode data bytes as MFM
log::trace!(
"Encoding data element: {:02X?}",
&data[0..std::cmp::min(16, data.len())]
);
bitstream.write_encoded_buf(data, *cursor);
data.len()
}
DataType::Gap => {
// Encode gap bytes as MFM
log::trace!("Encoding GAP element: {:02X?}", &data[0..std::cmp::min(16, data.len())]);
bitstream.write_encoded_buf(data, *cursor);
data.len()
}
DataType::End => {
// End of data block
log::debug!("End of data block.");
break;
}
_ => {
log::warn!("Unknown data element type: {:?}", data_type);
data.len()
}
};
decoded_bytes += wrote;
*cursor += wrote * MFM_BYTE_LEN;
// Read the next data element
element_ct += 1;
data_element = DataStreamElement::read_args(reader, (false, data_bytes))?;
// Write data element to source map
let _data_node = data_element.write_to_map(source_map, block_node);
}
log::debug!(
"Read {} data elements from V1 block, wrote {} MFM bytes to track",
element_ct,
decoded_bytes * 2
);
Ok(decoded_bytes * 2)
}
}
| rust | MIT | b4c04b51746e5fe7769f49a1b32b8caad426fc81 | 2026-01-04T20:24:04.021295Z | false |
dbalsom/fluxfox | https://github.com/dbalsom/fluxfox/blob/b4c04b51746e5fe7769f49a1b32b8caad426fc81/src/file_parsers/ipf/platforms/mod.rs | src/file_parsers/ipf/platforms/mod.rs | /*
FluxFox
https://github.com/dbalsom/fluxfox
Copyright 2024-2025 Daniel Balsom
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the “Software”),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
--------------------------------------------------------------------------
*/
//! A module for handling IPF platforms.
//!
//! Parsing IPF files requires certain domain knowledge about the format being
//! parsed. For example, Amiga disks encoded as CAPS IPF tracks use certain
//! conventions that may not be obvious.
pub mod amiga;
| rust | MIT | b4c04b51746e5fe7769f49a1b32b8caad426fc81 | 2026-01-04T20:24:04.021295Z | false |
dbalsom/fluxfox | https://github.com/dbalsom/fluxfox/blob/b4c04b51746e5fe7769f49a1b32b8caad426fc81/src/file_parsers/ipf/platforms/amiga.rs | src/file_parsers/ipf/platforms/amiga.rs | /*
FluxFox
https://github.com/dbalsom/fluxfox
Copyright 2024-2025 Daniel Balsom
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the “Software”),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
--------------------------------------------------------------------------
*/
#![allow(dead_code)]
//! Parsing IPF files requires certain domain knowledge about the format being
//! parsed. For example, Amiga disks encoded as CAPS IPF tracks use certain
//! conventions that may not be obvious.
//! The 'Sync' data stream element, for example, is in encoded MFM format,
//! likely to capture the clock sync pattern of the marker.
//! The `Data` stream element includes the sector header and checksum fields
//! in Amiga odd/even LONG format, sector labels in odd/even 4xLONG format,
//! and the sector data in odd/even 512xBYTE format, and there's no indication
//! of the actual format of the sector or the differences in encoding.
//!
//! This module provides the necessary structures and tools to parse Amiga
//! track data from IPF files.
/// An Amiga sector as represented by an IPF CAPS Data Stream Element.
pub(crate) struct IpfAmigaSector {
/// MFM decoded sync bytes, usually `[0x00, 0x00]`
pub(crate) sync: [u8; 2],
pub(crate) header: IpfAmigaSectorHeader,
pub(crate) data: IpfAmigaSectorData,
}
pub(crate) struct IpfAmigaSectorHeader {
/// MFM `encoded` address marker. It is encoded to preserve the MFM clock sync pattern.
pub(crate) marker: [u8; 4],
/// MFM decoded sector ID.
pub(crate) id: u32,
/// Decoded 4 LONGs representing the sector label.
pub(crate) label: [u32; 4],
/// The header checksum.
pub(crate) header_checksum: u32,
}
pub(crate) struct IpfAmigaSectorData {
/// The data checksum
pub(crate) checksum: u32,
/// MFM decoded sector data.
pub(crate) data: [u8; 512],
}
| rust | MIT | b4c04b51746e5fe7769f49a1b32b8caad426fc81 | 2026-01-04T20:24:04.021295Z | false |
dbalsom/fluxfox | https://github.com/dbalsom/fluxfox/blob/b4c04b51746e5fe7769f49a1b32b8caad426fc81/src/file_parsers/as/flux.rs | src/file_parsers/as/flux.rs | /*
FluxFox
https://github.com/dbalsom/fluxfox
Copyright 2024-2025 Daniel Balsom
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the “Software”),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
--------------------------------------------------------------------------
*/
//! Module for flux conversion for MOOF and WOZ formats
//! Explanation of the flux format from AS documentation:
//!
//! > A quick explanation of the flux encoding: Each byte represents a single flux transition and
//! > its value is the number of ticks since the previous flux transition. A single tick is 125
//! > nanoseconds. Therefore the normal 2 microsecond spacing between sequential GCR 1 bits is
//! > represented by approximately 16 ticks. This also puts 101 and 1001 bit sequences at
//! > approximately 32 and 48 ticks. You are probably thinking to yourself that when it comes to
//! > longer runs of no transitions, how is this unsigned byte going to handle representing the
//! > time? That is taken care of via the special value of 255. When you encounter a 255, you need
//! > to keep adding the values up until you reach a byte that has a non-255 value. You then add
//! > this value to all of your accumulated 255s to give you the tick count. For example 255, 255,
//! > 10 should be treated as 255 + 255 + 10 = 520 ticks.
pub const AS_TICK_RES: f64 = 125e-9;
/// Decode AS-encoded flux data into a vector of flux times in seconds, and the total decoded time
/// in seconds.
pub fn decode_as_flux(buf: &[u8]) -> (Vec<f64>, f64) {
let mut fts = Vec::new();
let mut time = 0.0;
let mut ticks = 0;
for &byte in buf {
if byte == 255 {
//log::warn!("rollover!");
ticks += 255;
}
else if byte > 0 {
ticks += byte as u64;
let ft_time = (ticks as f64) * AS_TICK_RES;
time += ft_time;
fts.push(ft_time);
ticks = 0;
}
}
if buf[buf.len() - 1] == 255 {
log::warn!("decode_as_flux(): illegal last tick count (255)");
}
(fts, time)
}
| rust | MIT | b4c04b51746e5fe7769f49a1b32b8caad426fc81 | 2026-01-04T20:24:04.021295Z | false |
dbalsom/fluxfox | https://github.com/dbalsom/fluxfox/blob/b4c04b51746e5fe7769f49a1b32b8caad426fc81/src/file_parsers/as/woz.rs | src/file_parsers/as/woz.rs | /*
FluxFox
https://github.com/dbalsom/fluxfox
Copyright 2024-2025 Daniel Balsom
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the “Software”),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
--------------------------------------------------------------------------
*/
//! File format parser for the WOZ disk image format.
//! WOZ images are intended to store Apple II disk images.
//! The format was developed by the author of the Applesauce project.
//! https://applesaucefdc.com/woz/reference2/
use crate::{
file_parsers::{bitstream_flags, FormatCaps},
format_ms,
io::ReadSeek,
platform::Platform,
DiskImage,
DiskImageError,
DiskImageFileFormat,
FoxHashMap,
LoadingCallback,
LoadingStatus,
ParserWriteCompatibility,
};
use crate::{
file_parsers::{
r#as::{crc::applesauce_crc32, flux::decode_as_flux},
ParserReadOptions,
ParserWriteOptions,
},
io::ReadWriteSeek,
prelude::{DiskCh, TrackDataEncoding, TrackDataRate, TrackDataResolution, TrackDensity},
source_map::{MapDump, OptionalSourceMap, SourceValue},
track::fluxstream::FluxStreamTrack,
types::{BitStreamTrackParams, DiskDescriptor, DiskRpm, FluxStreamTrackParams},
};
use binrw::{binrw, BinRead};
pub const WOZ_MAGIC: &str = "WOZ2";
pub const MAX_TRACKS: u8 = 160;
#[derive(Copy, Clone, Debug)]
pub enum WozDiskType {
FiveInch,
ThreeInch,
Unknown,
}
#[binrw]
#[brw(little)]
pub struct WozHeader {
magic: [u8; 4],
data_check: u32,
crc: u32,
}
#[binrw]
#[br(little)]
pub struct WozChunkHeader {
id: [u8; 4],
size: u32,
}
pub enum WozChunk {
Info(InfoChunk),
TMap(TMapChunk),
Trks(TrksChunk),
Flux(FluxChunk),
Meta(String),
Unknown,
}
#[derive(BinRead)]
#[br(little)]
pub struct InfoChunk {
info_version: u8,
#[br(map = |x: u8| match x {
1 => WozDiskType::FiveInch,
2 => WozDiskType::ThreeInch,
_ => WozDiskType::Unknown,
})]
disk_type: WozDiskType,
write_protected: u8,
synchronized: u8,
cleaned: u8,
#[br(map = |x: [u8; 32]| String::from_utf8_lossy(&x).trim_end().to_string())]
creator: String, // 32-byte UTF-8 string padded with spaces
#[br(if(info_version >= 2))]
sides: u8,
#[br(if(info_version >= 2))]
boot_sector_format: u8,
#[br(if(info_version >= 2))]
optimal_bit_timing: u8,
#[br(if(info_version >= 2))]
compatible_hardware: u16,
#[br(if(info_version >= 2))]
required_ram: u16,
#[br(if(info_version >= 2))]
largest_track: u16,
#[br(if(info_version >= 3))]
flux_block: u16,
#[br(if(info_version >= 3))]
largest_flux_track: u16,
}
impl MapDump for InfoChunk {
fn write_to_map(&self, map: &mut Box<dyn OptionalSourceMap>, parent: usize) -> usize {
let mut record = map.add_child(parent, "[INFO] Chunk", SourceValue::default());
let record_idx = record.index();
record = record
.add_child("info_version", SourceValue::u8(self.info_version))
.add_sibling("disk_type", SourceValue::string(&format!("{:?}", self.disk_type)))
.add_sibling("write_protected", SourceValue::u8(self.write_protected))
.add_sibling("synchronized", SourceValue::u8(self.synchronized))
.add_sibling("cleaned", SourceValue::u8(self.cleaned))
.add_sibling("creator", SourceValue::string(&self.creator.clone()));
if self.info_version >= 2 {
record = record
.add_sibling("sides", SourceValue::u8(self.sides))
.add_sibling("boot_sector_format", SourceValue::u8(self.boot_sector_format))
.add_sibling("optimal_bit_timing", SourceValue::u8(self.optimal_bit_timing))
.add_sibling("compatible_hardware", SourceValue::u32(self.compatible_hardware.into()))
.add_sibling("required_ram", SourceValue::u32(self.required_ram.into()))
.add_sibling("largest_track", SourceValue::u32(self.largest_track as u32));
}
if self.info_version >= 3 {
record
.add_sibling(
"flux_block",
SourceValue::u32(self.flux_block as u32).comment(if self.has_flux_block() {
"Flux block present"
}
else {
"No flux block"
}),
)
.add_sibling("largest_flux_track", SourceValue::u32(self.largest_flux_track as u32));
}
record_idx
}
}
impl InfoChunk {
pub fn has_flux_block(&self) -> bool {
self.flux_block > 0 && self.largest_flux_track > 0
}
}
#[derive(BinRead)]
#[br(import(disk_type: WozDiskType))]
#[br(little)]
pub struct TMapChunk {
pub(crate) track_map: [u8; 160],
#[br(calc = disk_type)]
pub(crate) disk_type: WozDiskType,
}
impl MapDump for TMapChunk {
fn write_to_map(&self, map: &mut Box<dyn OptionalSourceMap>, parent: usize) -> usize {
let entry = map.add_child(parent, "[TMAP] Chunk", SourceValue::default());
let entry_idx = entry.index();
match self.disk_type {
WozDiskType::FiveInch => {
for (i, track_quad) in self.track_map.chunks_exact(4).enumerate() {
map.add_child(entry_idx, &format!("[{}] TMap Entry", i), SourceValue::default())
.add_child(
&format!("{}.0", i),
SourceValue::u8(track_quad[0]).bad_if(track_quad[0] == 0xFF),
)
.add_sibling(
&format!("{}.25", i),
SourceValue::u8(track_quad[1]).bad_if(track_quad[1] == 0xFF),
)
.add_sibling(
&format!("{}.50", i),
SourceValue::u8(track_quad[2]).bad_if(track_quad[2] == 0xFF),
)
.add_sibling(
&format!("{}.75", i),
SourceValue::u8(track_quad[3]).bad_if(track_quad[3] == 0xFF),
);
}
}
WozDiskType::ThreeInch => {
for (i, track_pair) in self.track_map.chunks_exact(2).enumerate() {
map.add_child(entry_idx, &format!("[{}] TMap Entry", i), SourceValue::default())
.add_child("head0", SourceValue::u8(track_pair[0]).bad_if(track_pair[0] == 0xFF))
.add_sibling("head1", SourceValue::u8(track_pair[1]).bad_if(track_pair[1] == 0xFF));
}
}
_ => {}
}
entry_idx
}
}
#[derive(BinRead)]
#[br(import(disk_type: WozDiskType))]
#[br(little)]
pub struct FluxChunk {
pub(crate) track_map: [u8; 160],
#[br(calc = disk_type)]
pub(crate) disk_type: WozDiskType,
}
impl MapDump for FluxChunk {
fn write_to_map(&self, map: &mut Box<dyn OptionalSourceMap>, parent: usize) -> usize {
let entry = map.add_child(parent, "[FLUX] Chunk", SourceValue::default());
let entry_idx = entry.index();
match self.disk_type {
WozDiskType::FiveInch => {
for (i, track_quad) in self.track_map.chunks_exact(4).enumerate() {
map.add_child(entry_idx, &format!("[{}] Flux Map Entry", i), SourceValue::default())
.add_child("0.0", SourceValue::u8(track_quad[0]).bad_if(track_quad[0] == 0xFF))
.add_sibling("0.25", SourceValue::u8(track_quad[1]).bad_if(track_quad[1] == 0xFF))
.add_sibling("0.5", SourceValue::u8(track_quad[2]).bad_if(track_quad[2] == 0xFF))
.add_sibling("0.75", SourceValue::u8(track_quad[3]).bad_if(track_quad[3] == 0xFF));
}
}
WozDiskType::ThreeInch => {
for (i, track_pair) in self.track_map.chunks_exact(2).enumerate() {
map.add_child(entry_idx, &format!("[{}] Flux Map Entry", i), SourceValue::default())
.add_child("head0", SourceValue::u8(track_pair[0]).bad_if(track_pair[0] == 0xFF))
.add_sibling("head1", SourceValue::u8(track_pair[1]).bad_if(track_pair[1] == 0xFF));
}
}
_ => {}
}
entry_idx
}
}
#[derive(BinRead)]
#[br(little)]
pub struct Trk {
#[br(map = |x: u16| {x as u64 * 512})]
starting_block: u64,
block_ct: u16,
bit_ct: u32,
}
impl MapDump for Trk {
fn write_to_map(&self, map: &mut Box<dyn OptionalSourceMap>, parent: usize) -> usize {
map.add_child(
parent,
"starting_block",
SourceValue::u32((self.starting_block / 512) as u32),
)
.add_sibling("block_ct", SourceValue::u32(self.block_ct as u32))
.add_sibling("bit_ct", SourceValue::u32(self.bit_ct));
0
}
}
#[derive(BinRead)]
#[br(little)]
pub struct TrksChunk {
trks: [Trk; 160],
}
impl MapDump for TrksChunk {
fn write_to_map(&self, map: &mut Box<dyn OptionalSourceMap>, parent: usize) -> usize {
let entry = map.add_child(parent, "[TRKS] Chunk", SourceValue::default());
let entry_idx = entry.index();
for (i, trk) in self.trks.iter().enumerate() {
let trk_entry = map.add_child(entry_idx, &format!("[{}] Track Entry", i), SourceValue::default());
let trk_entry_idx = trk_entry.index();
trk.write_to_map(map, trk_entry_idx);
}
entry_idx
}
}
pub struct WozFormat;
impl WozFormat {
#[allow(dead_code)]
fn format() -> DiskImageFileFormat {
DiskImageFileFormat::WozImage
}
pub(crate) fn capabilities() -> FormatCaps {
bitstream_flags() | FormatCaps::CAP_COMMENT | FormatCaps::CAP_WEAK_BITS
}
pub fn platforms() -> Vec<Platform> {
// WOZ is exclusively intended for Apple II disk images
vec![Platform::AppleII]
}
pub(crate) fn extensions() -> Vec<&'static str> {
vec!["woz"]
}
pub(crate) fn detect<RWS: ReadSeek>(mut image: RWS) -> bool {
let mut detected = false;
_ = image.seek(std::io::SeekFrom::Start(0));
if let Ok(file_header) = WozHeader::read(&mut image) {
if file_header.magic == WOZ_MAGIC.as_bytes() {
detected = true;
}
}
detected
}
pub(crate) fn can_write(_image: Option<&DiskImage>) -> ParserWriteCompatibility {
ParserWriteCompatibility::Incompatible
}
pub(crate) fn load_image<RWS: ReadSeek>(
mut reader: RWS,
disk_image: &mut DiskImage,
_opts: &ParserReadOptions,
callback: Option<LoadingCallback>,
) -> Result<(), DiskImageError> {
disk_image.set_source_format(DiskImageFileFormat::WozImage);
disk_image.assign_source_map(true);
// Advertise progress support
if let Some(ref callback_fn) = callback {
callback_fn(LoadingStatus::ProgressSupport);
}
// Get image size
let image_size = reader.seek(std::io::SeekFrom::End(0))?;
log::debug!("Image size: {} bytes", image_size);
_ = reader.seek(std::io::SeekFrom::Start(0));
if let Ok(file_header) = WozHeader::read(&mut reader) {
if file_header.magic != WOZ_MAGIC.as_bytes() {
return Err(DiskImageError::ImageCorruptError(
"WOZ2 magic bytes not found".to_string(),
));
}
let rewind_pos = reader.seek(std::io::SeekFrom::Current(0))?;
let mut crc_buf = Vec::with_capacity(image_size as usize);
reader.read_to_end(&mut crc_buf)?;
let crc = applesauce_crc32(&crc_buf, 0);
log::debug!("Header CRC: {:0X?} Calculated CRC: {:0X?}", file_header.crc, crc);
reader.seek(std::io::SeekFrom::Start(rewind_pos))?;
if file_header.crc != crc {
return Err(DiskImageError::ImageCorruptError("Header CRC mismatch".to_string()));
}
}
let mut info_chunk_opt = None;
let mut tmap_chunk_opt = None;
let mut trks_chunk_opt = None;
let mut flux_chunk_opt = None;
let mut disk_type = WozDiskType::Unknown;
log::debug!("Reading chunks...");
let mut more_chunks = true;
while more_chunks {
let chunk_opt = match Self::read_chunk(&mut reader, image_size, disk_type) {
Ok(chunk_opt) => chunk_opt,
Err(e) => {
log::error!("Error reading WOZ chunk: {}", e);
break;
}
};
if let Some(chunk) = chunk_opt {
match chunk {
WozChunk::Info(info_chunk) => {
log::debug!(
"Got Info Chunk: version: {} Disk Type: {:?} Creator: {}",
info_chunk.info_version,
info_chunk.disk_type,
info_chunk.creator
);
disk_type = info_chunk.disk_type;
if info_chunk.flux_block != 0 {
log::debug!("Flux block is present: {}", info_chunk.flux_block);
}
if info_chunk.info_version > 3 {
let err_str = format!("Unsupported WOZ Info Chunk version ({})", info_chunk.info_version);
log::error!("{}", err_str);
return Err(DiskImageError::IncompatibleImage(err_str));
}
info_chunk.write_to_map(disk_image.source_map_mut(), 0);
info_chunk_opt = Some(info_chunk);
}
WozChunk::TMap(tmap_chunk) => {
log::debug!("Got Track Map Chunk");
tmap_chunk.write_to_map(disk_image.source_map_mut(), 0);
tmap_chunk_opt = Some(tmap_chunk);
}
WozChunk::Trks(trks_chunk) => {
log::debug!("Got Tracks Chunk");
trks_chunk.write_to_map(disk_image.source_map_mut(), 0);
trks_chunk_opt = Some(trks_chunk);
}
WozChunk::Flux(flux_chunk) => {
log::debug!("Got Flux Chunk");
flux_chunk.write_to_map(disk_image.source_map_mut(), 0);
flux_chunk_opt = Some(flux_chunk);
}
WozChunk::Meta(meta_str) => {
let meta_map = Self::parse_meta(&meta_str);
log::debug!("Metadata KV pairs:");
let mut cursor =
disk_image
.source_map_mut()
.add_child(0, "[META] Chunk", SourceValue::default());
for (i, (key, value)) in meta_map.iter().enumerate() {
if i == 0 {
cursor = cursor.add_child(key, SourceValue::string(value));
}
else {
cursor = cursor.add_sibling(key, SourceValue::string(value));
}
log::debug!("{}: {}", key, value);
}
}
WozChunk::Unknown => {
log::debug!("Got Unknown Chunk");
}
}
}
else {
log::debug!("No more chunks found in WOZ image");
more_chunks = false;
}
}
if info_chunk_opt.is_none() {
log::error!("Missing Info chunk");
return Err(DiskImageError::ImageCorruptError("Missing Info chunk".to_string()));
}
let info_chunk = info_chunk_opt.unwrap();
// Enable multi-resolution support if necessary
if info_chunk.has_flux_block() {
disk_image.set_multires(true);
}
let disk_heads = info_chunk.sides;
let disk_encoding = TrackDataEncoding::Gcr;
// let disk_density = match TrackDensity::try_from(&info_chunk.disk_type) {
// Ok(disk_density) => disk_density,
// Err(e) => {
// log::error!("Error converting WOZ disk type to TrackDensity: {}", e);
// return Err(DiskImageError::IncompatibleImage(
// "Error converting WOZ disk type to TrackDensity".to_string(),
// ));
// }
// };
let disk_density = TrackDensity::Standard;
// 160 "tracks" for quarter-stepping 40 tracks
let mut ch_iter = DiskCh::new(161, disk_heads).iter();
if let (Some(tmap), Some(trks)) = (tmap_chunk_opt, trks_chunk_opt) {
log::debug!("Track Map:");
// Fluxfox should be able to deduplicate empty tracks, but we can save effort by skipping
// empty tracks here.
match disk_type {
WozDiskType::ThreeInch => {
for (i, track_pair) in tmap.track_map.chunks_exact(2).enumerate() {
log::debug!("\tMap Entry {}: h0: Trk {} h1: Trk {}", i, track_pair[0], track_pair[1]);
for (head, trk_idx) in track_pair.iter().take(disk_heads as usize).enumerate() {
if let Some(ref callback_fn) = callback {
let progress = ((i * 2) + head) as f64 / MAX_TRACKS as f64;
callback_fn(LoadingStatus::Progress(progress));
}
if *trk_idx != 0xFF {
if trk_idx >= &MAX_TRACKS {
log::error!("Invalid track index: {}", trk_idx);
return Err(DiskImageError::ImageCorruptError(
"Invalid track index in TMAP chunk".to_string(),
));
}
let ch = ch_iter.next().unwrap();
let trk_entry = &trks.trks[*trk_idx as usize];
Self::add_bitstream_track(
&mut reader,
disk_image,
image_size,
ch,
disk_encoding,
trk_entry,
)?;
}
else {
let ch = ch_iter.next().unwrap();
let mut add_empty_track = false;
// If we have a flux chunk we can check if this track has flux data
if let Some(flux_chunk) = &flux_chunk_opt {
let flux_idx = flux_chunk.track_map[(i * 2) + head];
if flux_idx < MAX_TRACKS {
let flux_entry = &trks.trks[flux_idx as usize];
log::debug!("\t\tFlux Track Index: {}", flux_idx);
Self::add_fluxstream_track(
&mut reader,
disk_image,
image_size,
ch,
disk_encoding,
flux_entry,
)?;
}
else {
log::debug!("\t\t(no flux)");
add_empty_track = true;
}
}
if add_empty_track {
disk_image.add_empty_track(
ch,
disk_encoding,
Some(TrackDataResolution::BitStream),
TrackDataRate::from(disk_density),
0,
None,
)?;
}
}
}
}
}
WozDiskType::FiveInch => {
for (i, track_quad) in tmap.track_map.chunks_exact(4).enumerate() {
log::debug!(
"\tMap Entry {}: 0.0: {} 0.25: {}. 0.5: {} 0.75: {}",
i,
track_quad[0],
track_quad[1],
track_quad[2],
track_quad[3]
);
for (step, trk_idx) in track_quad.iter().enumerate() {
if let Some(ref callback_fn) = callback {
let progress = ((i * 4) + step) as f64 / MAX_TRACKS as f64;
callback_fn(LoadingStatus::Progress(progress));
}
if *trk_idx != 0xFF {
if trk_idx >= &MAX_TRACKS {
log::error!("Invalid track index: {}", trk_idx);
return Err(DiskImageError::ImageCorruptError(
"Invalid track index in TMAP chunk".to_string(),
));
}
let ch = ch_iter.next().unwrap();
let trk_entry = &trks.trks[*trk_idx as usize];
Self::add_bitstream_track(
&mut reader,
disk_image,
image_size,
ch,
disk_encoding,
trk_entry,
)?;
}
else {
let ch = ch_iter.next().unwrap();
let mut add_empty_track = false;
// If we have a flux chunk we can check if this track has flux data
if let Some(flux_chunk) = &flux_chunk_opt {
let flux_idx = flux_chunk.track_map[(i * 4) + step];
if flux_idx < MAX_TRACKS {
let flux_entry = &trks.trks[flux_idx as usize];
log::debug!("\t\tFlux Track Index: {}", flux_idx);
Self::add_fluxstream_track(
&mut reader,
disk_image,
image_size,
ch,
disk_encoding,
flux_entry,
)?;
}
else {
log::debug!("\t\t(no flux)");
add_empty_track = true;
}
}
else {
add_empty_track = true;
}
if add_empty_track {
log::warn!("Adding empty track: {:?}", ch);
disk_image.add_empty_track(
ch,
disk_encoding,
Some(TrackDataResolution::BitStream),
TrackDataRate::from(disk_density),
// Some average bitcell count for a 5.25" disk
51_000,
None,
)?;
}
}
}
}
}
_ => {}
}
}
else {
log::error!("Missing Track Map or Tracks chunk");
return Err(DiskImageError::ImageCorruptError(
"Missing Track Map or Tracks chunk".to_string(),
));
}
let geometry = DiskCh::new(ch_iter.next().unwrap().c(), disk_heads);
let desc = DiskDescriptor {
platforms: Some(vec![Platform::AppleII]),
geometry,
data_encoding: disk_encoding,
density: disk_density,
data_rate: TrackDataRate::from(disk_density),
rpm: None,
write_protect: Some(info_chunk.write_protected != 0),
};
disk_image.descriptor = desc;
Ok(())
}
fn add_bitstream_track<RWS: ReadSeek>(
mut reader: RWS,
disk: &mut DiskImage,
_image_size: u64,
ch: DiskCh,
encoding: TrackDataEncoding,
track: &Trk,
) -> Result<(), DiskImageError> {
log::debug!(
"add_bitstream_track(): Track: {} Starting block: {} Blocks: {} ({} bytes) Bitcells: {}",
ch,
track.starting_block,
track.block_ct,
track.block_ct as usize * 512,
track.bit_ct
);
// Seek to the start of the track data block (This was converted to byte offset on read)
reader.seek(std::io::SeekFrom::Start(track.starting_block))?;
// Read in the track data.
let mut read_vec = vec![0u8; track.block_ct as usize * 512];
reader.read_exact(&mut read_vec)?;
// Create the bitstream track parameters
let params = BitStreamTrackParams {
schema: None,
ch,
encoding,
data_rate: Default::default(),
rpm: None,
bitcell_ct: Some(track.bit_ct as usize),
data: &read_vec,
weak: None,
hole: None,
detect_weak: false,
};
disk.add_track_bitstream(¶ms)?;
Ok(())
}
fn add_fluxstream_track<RWS: ReadSeek>(
mut reader: RWS,
disk: &mut DiskImage,
_image_size: u64,
ch: DiskCh,
_encoding: TrackDataEncoding,
track: &Trk,
) -> Result<(), DiskImageError> {
log::debug!(
"add_fluxstream_track(): Track: {} Starting block: {} Blocks: {} ({} bytes) Fts: {}",
ch,
track.starting_block,
track.block_ct,
track.block_ct as usize * 512,
track.bit_ct
);
// Seek to the start of the track data block (This was converted to byte offset on read)
reader.seek(std::io::SeekFrom::Start(track.starting_block))?;
// Read in the track data.
let mut read_vec = vec![0u8; track.block_ct as usize * 512];
reader.read_exact(&mut read_vec)?;
// Decode the flux data
let (fluxes, rev_time) = decode_as_flux(&read_vec);
log::warn!(
"Decoded {} flux transitions, index time: {}",
fluxes.len(),
format_ms!(rev_time)
);
// Create a fluxstream track
let mut flux_track = FluxStreamTrack::new();
// TODO: calculate the Zoned RPM here for Gcr disks
flux_track.add_revolution(ch, &fluxes, DiskRpm::Rpm300(1.0).index_time_ms());
let params = FluxStreamTrackParams {
ch,
schema: None,
encoding: None,
clock: None,
rpm: None,
};
let new_track = disk.add_track_fluxstream(flux_track, ¶ms)?;
let info = new_track.info();
log::debug!(
"Added {} track {} containing {} bits to image...",
ch,
info.encoding,
info.bit_length,
);
Ok(())
}
fn read_chunk<RWS: ReadSeek>(
mut reader: RWS,
image_size: u64,
disk_type: WozDiskType,
) -> Result<Option<WozChunk>, DiskImageError> {
// Any bytes left in the stream?
let offset = reader.seek(std::io::SeekFrom::Current(0))?;
log::debug!("At file offset: {}", offset);
if image_size == offset {
log::debug!("No bytes left in reader!");
return Ok(None);
}
// Read in the chunk header
let chunk_header = WozChunkHeader::read(&mut reader)?;
log::debug!("Read chunk header: {:0X?}", chunk_header.id);
// Save chunk data offset to advance unknown chunks
let chunk_offset = reader.seek(std::io::SeekFrom::Current(0))?;
let chunk = match &chunk_header.id {
b"INFO" => {
let info_chunk = InfoChunk::read(&mut reader)?;
WozChunk::Info(info_chunk)
}
b"TMAP" => {
let tmap_chunk = TMapChunk::read_args(&mut reader, (disk_type,))?;
WozChunk::TMap(tmap_chunk)
}
b"TRKS" => {
let trks_chunk = TrksChunk::read(&mut reader)?;
WozChunk::Trks(trks_chunk)
}
b"FLUX" => {
let flux_chunk = FluxChunk::read_args(&mut reader, (disk_type,))?;
WozChunk::Flux(flux_chunk)
}
b"META" => {
// Metadata chunk is just a string
let mut meta_data = vec![0u8; chunk_header.size as usize];
reader.read_exact(&mut meta_data)?;
let meta_str = String::from_utf8_lossy(&meta_data).trim_end().to_string();
WozChunk::Meta(meta_str)
}
_ => {
log::warn!("Unknown WOZ chunk: {:0X?}", chunk_header.id);
WozChunk::Unknown
}
};
// Seek to next chunk
reader.seek(std::io::SeekFrom::Start(chunk_offset + chunk_header.size as u64))?;
Ok(Some(chunk))
}
fn parse_meta(meta_str: &str) -> FoxHashMap<String, String> {
let mut meta_map = FoxHashMap::new();
for line in meta_str.lines() {
let mut parts = line.splitn(2, '\t');
let key = parts.next().unwrap_or("").trim();
let value = parts.next().unwrap_or("").trim();
meta_map.insert(key.to_string(), value.to_string());
}
meta_map
}
pub fn save_image<RWS: ReadWriteSeek>(
_image: &DiskImage,
_opts: &ParserWriteOptions,
_output: &mut RWS,
) -> Result<(), DiskImageError> {
| rust | MIT | b4c04b51746e5fe7769f49a1b32b8caad426fc81 | 2026-01-04T20:24:04.021295Z | true |
dbalsom/fluxfox | https://github.com/dbalsom/fluxfox/blob/b4c04b51746e5fe7769f49a1b32b8caad426fc81/src/file_parsers/as/mod.rs | src/file_parsers/as/mod.rs | /*
FluxFox
https://github.com/dbalsom/fluxfox
Copyright 2024-2025 Daniel Balsom
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the “Software”),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
--------------------------------------------------------------------------
*/
//! Module for Applesauce disk image formats developed by John K. Morris.
//! These include A2R, WOZ and MOOF.
//! A2R is a low level flux image format for Apple II disks, designed for use
//! with the Applesauce FDC hardware.
//!
//! WOZ is a disk image format intended for Apple II software preservation.
//! MOOF is similar to WOZ, but designed to contain Macintosh disk images.
//! These formats share a similar chunk structure and CRC algorithm, quite
//! similar to Hampa Hug's various PCE disk formats.
pub(crate) mod crc;
pub(crate) mod flux;
#[cfg(feature = "moof")]
pub(crate) mod moof;
#[cfg(feature = "woz")]
pub(crate) mod woz;
| rust | MIT | b4c04b51746e5fe7769f49a1b32b8caad426fc81 | 2026-01-04T20:24:04.021295Z | false |
dbalsom/fluxfox | https://github.com/dbalsom/fluxfox/blob/b4c04b51746e5fe7769f49a1b32b8caad426fc81/src/file_parsers/as/crc.rs | src/file_parsers/as/crc.rs | /*
FluxFox
https://github.com/dbalsom/fluxfox
Copyright 2024-2025 Daniel Balsom
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the “Software”),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
--------------------------------------------------------------------------
*/
//! Common CRC algorithm for Applesauce disk image formats.
/// From AS docs:
/// The routine that has been chosen for use originated with Gary S. Brown in 1986
const AS_CRC32_TABLE: [u32; 256] = [
0x00000000, 0x77073096, 0xee0e612c, 0x990951ba, 0x076dc419, 0x706af48f, 0xe963a535, 0x9e6495a3, 0x0edb8832,
0x79dcb8a4, 0xe0d5e91e, 0x97d2d988, 0x09b64c2b, 0x7eb17cbd, 0xe7b82d07, 0x90bf1d91, 0x1db71064, 0x6ab020f2,
0xf3b97148, 0x84be41de, 0x1adad47d, 0x6ddde4eb, 0xf4d4b551, 0x83d385c7, 0x136c9856, 0x646ba8c0, 0xfd62f97a,
0x8a65c9ec, 0x14015c4f, 0x63066cd9, 0xfa0f3d63, 0x8d080df5, 0x3b6e20c8, 0x4c69105e, 0xd56041e4, 0xa2677172,
0x3c03e4d1, 0x4b04d447, 0xd20d85fd, 0xa50ab56b, 0x35b5a8fa, 0x42b2986c, 0xdbbbc9d6, 0xacbcf940, 0x32d86ce3,
0x45df5c75, 0xdcd60dcf, 0xabd13d59, 0x26d930ac, 0x51de003a, 0xc8d75180, 0xbfd06116, 0x21b4f4b5, 0x56b3c423,
0xcfba9599, 0xb8bda50f, 0x2802b89e, 0x5f058808, 0xc60cd9b2, 0xb10be924, 0x2f6f7c87, 0x58684c11, 0xc1611dab,
0xb6662d3d, 0x76dc4190, 0x01db7106, 0x98d220bc, 0xefd5102a, 0x71b18589, 0x06b6b51f, 0x9fbfe4a5, 0xe8b8d433,
0x7807c9a2, 0x0f00f934, 0x9609a88e, 0xe10e9818, 0x7f6a0dbb, 0x086d3d2d, 0x91646c97, 0xe6635c01, 0x6b6b51f4,
0x1c6c6162, 0x856530d8, 0xf262004e, 0x6c0695ed, 0x1b01a57b, 0x8208f4c1, 0xf50fc457, 0x65b0d9c6, 0x12b7e950,
0x8bbeb8ea, 0xfcb9887c, 0x62dd1ddf, 0x15da2d49, 0x8cd37cf3, 0xfbd44c65, 0x4db26158, 0x3ab551ce, 0xa3bc0074,
0xd4bb30e2, 0x4adfa541, 0x3dd895d7, 0xa4d1c46d, 0xd3d6f4fb, 0x4369e96a, 0x346ed9fc, 0xad678846, 0xda60b8d0,
0x44042d73, 0x33031de5, 0xaa0a4c5f, 0xdd0d7cc9, 0x5005713c, 0x270241aa, 0xbe0b1010, 0xc90c2086, 0x5768b525,
0x206f85b3, 0xb966d409, 0xce61e49f, 0x5edef90e, 0x29d9c998, 0xb0d09822, 0xc7d7a8b4, 0x59b33d17, 0x2eb40d81,
0xb7bd5c3b, 0xc0ba6cad, 0xedb88320, 0x9abfb3b6, 0x03b6e20c, 0x74b1d29a, 0xead54739, 0x9dd277af, 0x04db2615,
0x73dc1683, 0xe3630b12, 0x94643b84, 0x0d6d6a3e, 0x7a6a5aa8, 0xe40ecf0b, 0x9309ff9d, 0x0a00ae27, 0x7d079eb1,
0xf00f9344, 0x8708a3d2, 0x1e01f268, 0x6906c2fe, 0xf762575d, 0x806567cb, 0x196c3671, 0x6e6b06e7, 0xfed41b76,
0x89d32be0, 0x10da7a5a, 0x67dd4acc, 0xf9b9df6f, 0x8ebeeff9, 0x17b7be43, 0x60b08ed5, 0xd6d6a3e8, 0xa1d1937e,
0x38d8c2c4, 0x4fdff252, 0xd1bb67f1, 0xa6bc5767, 0x3fb506dd, 0x48b2364b, 0xd80d2bda, 0xaf0a1b4c, 0x36034af6,
0x41047a60, 0xdf60efc3, 0xa867df55, 0x316e8eef, 0x4669be79, 0xcb61b38c, 0xbc66831a, 0x256fd2a0, 0x5268e236,
0xcc0c7795, 0xbb0b4703, 0x220216b9, 0x5505262f, 0xc5ba3bbe, 0xb2bd0b28, 0x2bb45a92, 0x5cb36a04, 0xc2d7ffa7,
0xb5d0cf31, 0x2cd99e8b, 0x5bdeae1d, 0x9b64c2b0, 0xec63f226, 0x756aa39c, 0x026d930a, 0x9c0906a9, 0xeb0e363f,
0x72076785, 0x05005713, 0x95bf4a82, 0xe2b87a14, 0x7bb12bae, 0x0cb61b38, 0x92d28e9b, 0xe5d5be0d, 0x7cdcefb7,
0x0bdbdf21, 0x86d3d2d4, 0xf1d4e242, 0x68ddb3f8, 0x1fda836e, 0x81be16cd, 0xf6b9265b, 0x6fb077e1, 0x18b74777,
0x88085ae6, 0xff0f6a70, 0x66063bca, 0x11010b5c, 0x8f659eff, 0xf862ae69, 0x616bffd3, 0x166ccf45, 0xa00ae278,
0xd70dd2ee, 0x4e048354, 0x3903b3c2, 0xa7672661, 0xd06016f7, 0x4969474d, 0x3e6e77db, 0xaed16a4a, 0xd9d65adc,
0x40df0b66, 0x37d83bf0, 0xa9bcae53, 0xdebb9ec5, 0x47b2cf7f, 0x30b5ffe9, 0xbdbdf21c, 0xcabac28a, 0x53b39330,
0x24b4a3a6, 0xbad03605, 0xcdd70693, 0x54de5729, 0x23d967bf, 0xb3667a2e, 0xc4614ab8, 0x5d681b02, 0x2a6f2b94,
0xb40bbe37, 0xc30c8ea1, 0x5a05df1b, 0x2d02ef8d,
];
// Rust conversion of the AS CRC32 algorithm, originally in C:
// uint32_t crc32(uint32_t crc, const void *buf, size_t size)
// {
// const uint8_t *p;
// p = buf;
// crc = crc ^ ~0U;
// while (size--)
// crc = crc32_tab[(crc ^ *p++) & 0xFF] ^ (crc >> 8);
// return crc ^ ~0U;
// }
pub fn applesauce_crc32(buf: &[u8], mut crc: u32) -> u32 {
crc = crc ^ !0;
for byte in buf {
crc = AS_CRC32_TABLE[((crc ^ (*byte as u32)) & 0xFF) as usize] ^ (crc >> 8);
}
crc ^ !0
}
| rust | MIT | b4c04b51746e5fe7769f49a1b32b8caad426fc81 | 2026-01-04T20:24:04.021295Z | false |
dbalsom/fluxfox | https://github.com/dbalsom/fluxfox/blob/b4c04b51746e5fe7769f49a1b32b8caad426fc81/src/file_parsers/as/moof.rs | src/file_parsers/as/moof.rs | /*
FluxFox
https://github.com/dbalsom/fluxfox
Copyright 2024-2025 Daniel Balsom
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the “Software”),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
--------------------------------------------------------------------------
*/
//! File format parser for the MOOF disk image format.
//! MOOF images are intended to store Macintosh disk images.
//! The format was developed by the author of the Applesauce project.
//! https://applesaucefdc.com/moof-reference/
use crate::{
file_parsers::{bitstream_flags, FormatCaps},
format_ms,
io::ReadSeek,
platform::Platform,
DiskImage,
DiskImageError,
DiskImageFileFormat,
FoxHashMap,
LoadingCallback,
LoadingStatus,
ParserWriteCompatibility,
};
use crate::{
file_parsers::{
r#as::{crc::applesauce_crc32, flux::decode_as_flux},
ParserReadOptions,
ParserWriteOptions,
},
io::ReadWriteSeek,
prelude::{DiskCh, TrackDataEncoding, TrackDataRate, TrackDataResolution, TrackDensity},
source_map::{MapDump, OptionalSourceMap, SourceValue},
track::fluxstream::FluxStreamTrack,
types::{BitStreamTrackParams, DiskDescriptor, DiskRpm, FluxStreamTrackParams},
};
use binrw::{binrw, BinRead};
pub const MOOF_MAGIC: &str = "MOOF";
pub const MAX_TRACKS: u8 = 160;
#[derive(Debug)]
pub enum MoofDiskType {
SsDdGcr400K,
DsDsGcr800K,
DsHdMfm144M,
Twiggy,
Unknown,
}
impl MoofDiskType {
pub fn heads(&self) -> u8 {
match self {
MoofDiskType::SsDdGcr400K => 1,
MoofDiskType::DsDsGcr800K => 2,
MoofDiskType::DsHdMfm144M => 2,
// The Twiggy drive was a bizarre thing with two heads, but not directly opposing each
// other. See http://www.brouhaha.com/~eric/retrocomputing/lisa/twiggy.html
MoofDiskType::Twiggy => 2,
// Just guess
MoofDiskType::Unknown => 2,
}
}
}
impl TryFrom<&MoofDiskType> for TrackDataEncoding {
type Error = String;
fn try_from(value: &MoofDiskType) -> Result<Self, Self::Error> {
match value {
MoofDiskType::SsDdGcr400K => Ok(TrackDataEncoding::Gcr),
MoofDiskType::DsDsGcr800K => Ok(TrackDataEncoding::Gcr),
MoofDiskType::DsHdMfm144M => Ok(TrackDataEncoding::Mfm),
MoofDiskType::Twiggy => Ok(TrackDataEncoding::Gcr),
MoofDiskType::Unknown => Err("Unknown MOOF disk type".to_string()),
}
}
}
impl TryFrom<&MoofDiskType> for TrackDensity {
type Error = String;
fn try_from(value: &MoofDiskType) -> Result<Self, Self::Error> {
match value {
MoofDiskType::SsDdGcr400K => Ok(TrackDensity::Standard),
MoofDiskType::DsDsGcr800K => Ok(TrackDensity::Double),
MoofDiskType::DsHdMfm144M => Ok(TrackDensity::High),
MoofDiskType::Twiggy => Ok(TrackDensity::Double),
MoofDiskType::Unknown => Err("Unknown MOOF disk type".to_string()),
}
}
}
#[binrw]
#[brw(little)]
pub struct MoofHeader {
magic: [u8; 4],
data_check: u32,
crc: u32,
}
#[binrw]
#[br(little)]
pub struct MoofChunkHeader {
id: [u8; 4],
size: u32,
}
pub enum MoofChunk {
Info(InfoChunk),
TMap(TMapChunk),
Trks(TrksChunk),
Flux(FluxChunk),
Meta(String),
Unknown,
}
#[derive(BinRead)]
#[br(little)]
pub struct InfoChunk {
info_version: u8,
#[br(map = |x: u8| match x {
1 => MoofDiskType::SsDdGcr400K,
2 => MoofDiskType::DsDsGcr800K,
3 => MoofDiskType::DsHdMfm144M,
4 => MoofDiskType::Twiggy,
_ => MoofDiskType::Unknown,
})]
disk_type: MoofDiskType,
write_protected: u8,
synchronized: u8,
optimal_bit_timing: u8,
#[br(map = |x: [u8; 32]| String::from_utf8_lossy(&x).trim_end().to_string())]
creator: String, // 32-byte UTF-8 string padded with spaces
_padding: u8, // Always 0
largest_track: u16,
flux_block: u16,
largest_flux_track: u16,
}
impl MapDump for InfoChunk {
fn write_to_map(&self, map: &mut Box<dyn OptionalSourceMap>, parent: usize) -> usize {
let record = map.add_child(parent, "[INFO] Chunk", SourceValue::default());
let record_idx = record.index();
record
.add_child("info_version", SourceValue::u8(self.info_version))
.add_sibling("disk_type", SourceValue::string(&format!("{:?}", self.disk_type)))
.add_sibling("write_protected", SourceValue::u8(self.write_protected))
.add_sibling("synchronized", SourceValue::u8(self.synchronized))
.add_sibling("optimal_bit_timing", SourceValue::u8(self.optimal_bit_timing))
.add_sibling("creator", SourceValue::string(&self.creator.clone()))
.add_sibling("largest_track", SourceValue::u32(self.largest_track as u32))
.add_sibling(
"flux_block",
SourceValue::u32(self.flux_block as u32).comment(if self.has_flux_block() {
"Flux block present"
}
else {
"No flux block"
}),
)
.add_sibling("largest_flux_track", SourceValue::u32(self.largest_flux_track as u32));
record_idx
}
}
impl InfoChunk {
pub fn has_flux_block(&self) -> bool {
self.flux_block > 0 && self.largest_flux_track > 0
}
}
#[binrw]
#[br(little)]
pub struct TMapChunk {
pub(crate) track_map: [u8; 160],
}
impl MapDump for TMapChunk {
fn write_to_map(&self, map: &mut Box<dyn OptionalSourceMap>, parent: usize) -> usize {
let entry = map.add_child(parent, "[TMAP] Chunk", SourceValue::default());
let entry_idx = entry.index();
for (i, track_pair) in self.track_map.chunks_exact(2).enumerate() {
map.add_child(entry_idx, &format!("[{}] TMap Entry", i), SourceValue::default())
.add_child("head0", SourceValue::u8(track_pair[0]).bad_if(track_pair[0] == 0xFF))
.add_sibling("head1", SourceValue::u8(track_pair[1]).bad_if(track_pair[1] == 0xFF));
}
entry_idx
}
}
#[binrw]
#[br(little)]
pub struct FluxChunk {
pub(crate) track_map: [u8; 160],
}
impl MapDump for FluxChunk {
fn write_to_map(&self, map: &mut Box<dyn OptionalSourceMap>, parent: usize) -> usize {
let entry = map.add_child(parent, "[FLUX] Chunk", SourceValue::default());
let entry_idx = entry.index();
for (i, track_pair) in self.track_map.chunks_exact(2).enumerate() {
map.add_child(entry_idx, &format!("[{}] Flux Map Entry", i), SourceValue::default())
.add_child("head0", SourceValue::u8(track_pair[0]).bad_if(track_pair[0] == 0xFF))
.add_sibling("head1", SourceValue::u8(track_pair[1]).bad_if(track_pair[1] == 0xFF));
}
entry_idx
}
}
#[derive(BinRead)]
#[br(little)]
pub struct Trk {
#[br(map = |x: u16| {x as u64 * 512})]
starting_block: u64,
block_ct: u16,
bit_ct: u32,
}
impl MapDump for Trk {
fn write_to_map(&self, map: &mut Box<dyn OptionalSourceMap>, parent: usize) -> usize {
map.add_child(
parent,
"starting_block",
SourceValue::u32((self.starting_block / 512) as u32),
)
.add_sibling("block_ct", SourceValue::u32(self.block_ct as u32))
.add_sibling("bit_ct", SourceValue::u32(self.bit_ct));
0
}
}
#[derive(BinRead)]
#[br(little)]
pub struct TrksChunk {
trks: [Trk; 160],
}
impl MapDump for TrksChunk {
fn write_to_map(&self, map: &mut Box<dyn OptionalSourceMap>, parent: usize) -> usize {
let entry = map.add_child(parent, "[TRKS] Chunk", SourceValue::default());
let entry_idx = entry.index();
for (i, trk) in self.trks.iter().enumerate() {
let trk_entry = map.add_child(entry_idx, &format!("[{}] Track Entry", i), SourceValue::default());
let trk_entry_idx = trk_entry.index();
trk.write_to_map(map, trk_entry_idx);
}
entry_idx
}
}
pub struct MoofFormat;
impl MoofFormat {
#[allow(dead_code)]
fn format() -> DiskImageFileFormat {
DiskImageFileFormat::MoofImage
}
pub(crate) fn capabilities() -> FormatCaps {
bitstream_flags() | FormatCaps::CAP_COMMENT | FormatCaps::CAP_WEAK_BITS
}
pub fn platforms() -> Vec<Platform> {
// MOOF in theory could support other formats, but is primarily used for Macintosh disk
// images.
vec![Platform::Macintosh]
}
pub(crate) fn extensions() -> Vec<&'static str> {
vec!["moof"]
}
pub(crate) fn detect<RWS: ReadSeek>(mut image: RWS) -> bool {
let mut detected = false;
_ = image.seek(std::io::SeekFrom::Start(0));
if let Ok(file_header) = MoofHeader::read(&mut image) {
if file_header.magic == MOOF_MAGIC.as_bytes() {
detected = true;
}
}
detected
}
pub(crate) fn can_write(_image: Option<&DiskImage>) -> ParserWriteCompatibility {
ParserWriteCompatibility::Incompatible
}
pub(crate) fn load_image<RWS: ReadSeek>(
mut reader: RWS,
disk_image: &mut DiskImage,
_opts: &ParserReadOptions,
callback: Option<LoadingCallback>,
) -> Result<(), DiskImageError> {
disk_image.set_source_format(DiskImageFileFormat::MoofImage);
disk_image.assign_source_map(true);
// Advertise progress support
if let Some(ref callback_fn) = callback {
callback_fn(LoadingStatus::ProgressSupport);
}
// Get image size
let image_size = reader.seek(std::io::SeekFrom::End(0))?;
log::debug!("Image size: {} bytes", image_size);
_ = reader.seek(std::io::SeekFrom::Start(0));
if let Ok(file_header) = MoofHeader::read(&mut reader) {
if file_header.magic != MOOF_MAGIC.as_bytes() {
return Err(DiskImageError::ImageCorruptError(
"MOOF magic bytes not found".to_string(),
));
}
let rewind_pos = reader.seek(std::io::SeekFrom::Current(0))?;
let mut crc_buf = Vec::with_capacity(image_size as usize);
reader.read_to_end(&mut crc_buf)?;
let crc = applesauce_crc32(&crc_buf, 0);
log::debug!("Header CRC: {:0X?} Calculated CRC: {:0X?}", file_header.crc, crc);
reader.seek(std::io::SeekFrom::Start(rewind_pos))?;
if file_header.crc != crc {
return Err(DiskImageError::ImageCorruptError("Header CRC mismatch".to_string()));
}
}
let mut info_chunk_opt = None;
let mut tmap_chunk_opt = None;
let mut trks_chunk_opt = None;
let mut flux_chunk_opt = None;
log::debug!("Reading chunks...");
let mut more_chunks = true;
while more_chunks {
let chunk_opt = match Self::read_chunk(&mut reader, image_size) {
Ok(chunk_opt) => chunk_opt,
Err(e) => {
log::error!("Error reading MOOF chunk: {}", e);
break;
}
};
if let Some(chunk) = chunk_opt {
match chunk {
MoofChunk::Info(info_chunk) => {
log::debug!(
"Got Info Chunk: version: {} Disk Type: {:?} Creator: {}",
info_chunk.info_version,
info_chunk.disk_type,
info_chunk.creator
);
if info_chunk.flux_block != 0 {
log::debug!("Flux block is present: {}", info_chunk.flux_block);
}
if info_chunk.info_version != 1 {
log::error!("Unsupported MOOF Info Chunk version: {}", info_chunk.info_version);
return Err(DiskImageError::IncompatibleImage(
"Unsupported MOOF Info Chunk version".to_string(),
));
}
info_chunk.write_to_map(disk_image.source_map_mut(), 0);
info_chunk_opt = Some(info_chunk);
}
MoofChunk::TMap(tmap_chunk) => {
log::debug!("Got Track Map Chunk");
tmap_chunk.write_to_map(disk_image.source_map_mut(), 0);
tmap_chunk_opt = Some(tmap_chunk);
}
MoofChunk::Trks(trks_chunk) => {
log::debug!("Got Tracks Chunk");
trks_chunk.write_to_map(disk_image.source_map_mut(), 0);
trks_chunk_opt = Some(trks_chunk);
}
MoofChunk::Flux(flux_chunk) => {
log::debug!("Got Flux Chunk");
flux_chunk.write_to_map(disk_image.source_map_mut(), 0);
flux_chunk_opt = Some(flux_chunk);
}
MoofChunk::Meta(meta_str) => {
let meta_map = Self::parse_meta(&meta_str);
log::debug!("Metadata KV pairs:");
let mut cursor =
disk_image
.source_map_mut()
.add_child(0, "[META] Chunk", SourceValue::default());
for (i, (key, value)) in meta_map.iter().enumerate() {
if i == 0 {
cursor = cursor.add_child(key, SourceValue::string(value));
}
else {
cursor = cursor.add_sibling(key, SourceValue::string(value));
}
log::debug!("{}: {}", key, value);
}
}
MoofChunk::Unknown => {
log::debug!("Got Unknown Chunk");
}
}
}
else {
log::debug!("No more chunks found in MOOF image");
more_chunks = false;
}
}
if info_chunk_opt.is_none() {
log::error!("Missing Info chunk");
return Err(DiskImageError::ImageCorruptError("Missing Info chunk".to_string()));
}
let info_chunk = info_chunk_opt.unwrap();
// Enable multi-resolution support if necessary
if info_chunk.has_flux_block() {
disk_image.set_multires(true);
}
let disk_heads = info_chunk.disk_type.heads();
let disk_encoding = match TrackDataEncoding::try_from(&info_chunk.disk_type) {
Ok(disk_encoding) => disk_encoding,
Err(e) => {
log::error!("Error converting MOOF disk type to TrackDataEncoding: {}", e);
return Err(DiskImageError::IncompatibleImage(
"Error converting MOOF disk type to TrackDataEncoding".to_string(),
));
}
};
let disk_density = match TrackDensity::try_from(&info_chunk.disk_type) {
Ok(disk_density) => disk_density,
Err(e) => {
log::error!("Error converting MOOF disk type to TrackDensity: {}", e);
return Err(DiskImageError::IncompatibleImage(
"Error converting MOOF disk type to TrackDensity".to_string(),
));
}
};
let mut ch_iter = DiskCh::new(160, disk_heads).iter();
if let (Some(tmap), Some(trks)) = (tmap_chunk_opt, trks_chunk_opt) {
log::debug!("Track Map:");
// Fluxfox should be able to deduplicate empty tracks, but we can save effort by skipping
// empty tracks here.
for (i, track_pair) in tmap.track_map.chunks_exact(2).enumerate() {
log::debug!("\tMap Entry {}: h0: Trk {} h1: Trk {}", i, track_pair[0], track_pair[1]);
for (head, trk_idx) in track_pair.iter().take(disk_heads as usize).enumerate() {
if let Some(ref callback_fn) = callback {
let progress = ((i * 2) + head) as f64 / MAX_TRACKS as f64;
callback_fn(LoadingStatus::Progress(progress));
}
if *trk_idx != 0xFF {
if trk_idx >= &MAX_TRACKS {
log::error!("Invalid track index: {}", trk_idx);
return Err(DiskImageError::ImageCorruptError(
"Invalid track index in TMAP chunk".to_string(),
));
}
let ch = ch_iter.next().unwrap();
let trk_entry = &trks.trks[*trk_idx as usize];
Self::add_bitstream_track(&mut reader, disk_image, image_size, ch, disk_encoding, trk_entry)?;
}
else {
let ch = ch_iter.next().unwrap();
let mut add_empty_track = false;
// If we have a flux chunk we can check if this track has flux data
if let Some(flux_chunk) = &flux_chunk_opt {
let flux_idx = flux_chunk.track_map[(i * 2) + head];
if flux_idx < MAX_TRACKS {
let flux_entry = &trks.trks[flux_idx as usize];
log::debug!("\t\tFlux Track Index: {}", flux_idx);
Self::add_fluxstream_track(
&mut reader,
disk_image,
image_size,
ch,
disk_encoding,
flux_entry,
)?;
}
else {
log::debug!("\t\t(no flux)");
add_empty_track = true;
}
}
if add_empty_track {
disk_image.add_empty_track(
ch,
disk_encoding,
Some(TrackDataResolution::BitStream),
TrackDataRate::from(disk_density),
0,
None,
)?;
}
}
}
}
}
else {
log::error!("Missing Track Map or Tracks chunk");
return Err(DiskImageError::ImageCorruptError(
"Missing Track Map or Tracks chunk".to_string(),
));
}
let geometry = DiskCh::new(ch_iter.next().unwrap().c(), disk_heads);
let desc = DiskDescriptor {
platforms: Some(vec![Platform::Macintosh]),
geometry,
data_encoding: disk_encoding,
density: disk_density,
data_rate: TrackDataRate::from(disk_density),
rpm: None,
write_protect: Some(info_chunk.write_protected != 0),
};
disk_image.descriptor = desc;
Ok(())
}
fn add_bitstream_track<RWS: ReadSeek>(
mut reader: RWS,
disk: &mut DiskImage,
_image_size: u64,
ch: DiskCh,
encoding: TrackDataEncoding,
track: &Trk,
) -> Result<(), DiskImageError> {
log::debug!(
"add_bitstream_track(): Track: {} Starting block: {} Blocks: {} ({} bytes) Bitcells: {}",
ch,
track.starting_block,
track.block_ct,
track.block_ct as usize * 512,
track.bit_ct
);
// Seek to the start of the track data block (This was converted to byte offset on read)
reader.seek(std::io::SeekFrom::Start(track.starting_block))?;
// Read in the track data.
let mut read_vec = vec![0u8; track.block_ct as usize * 512];
reader.read_exact(&mut read_vec)?;
// Create the bitstream track parameters
let params = BitStreamTrackParams {
schema: None,
ch,
encoding,
data_rate: Default::default(),
rpm: None,
bitcell_ct: Some(track.bit_ct as usize),
data: &read_vec,
weak: None,
hole: None,
detect_weak: false,
};
disk.add_track_bitstream(¶ms)?;
Ok(())
}
fn add_fluxstream_track<RWS: ReadSeek>(
mut reader: RWS,
disk: &mut DiskImage,
_image_size: u64,
ch: DiskCh,
_encoding: TrackDataEncoding,
track: &Trk,
) -> Result<(), DiskImageError> {
log::debug!(
"add_fluxstream_track(): Track: {} Starting block: {} Blocks: {} ({} bytes) Fts: {}",
ch,
track.starting_block,
track.block_ct,
track.block_ct as usize * 512,
track.bit_ct
);
// Seek to the start of the track data block (This was converted to byte offset on read)
reader.seek(std::io::SeekFrom::Start(track.starting_block))?;
// Read in the track data.
let mut read_vec = vec![0u8; track.block_ct as usize * 512];
reader.read_exact(&mut read_vec)?;
// Decode the flux data
let (fluxes, rev_time) = decode_as_flux(&read_vec);
log::warn!(
"Decoded {} flux transitions, index time: {}",
fluxes.len(),
format_ms!(rev_time)
);
// Create a fluxstream track
let mut flux_track = FluxStreamTrack::new();
// TODO: calculate the Zoned RPM here for Gcr disks
flux_track.add_revolution(ch, &fluxes, DiskRpm::Rpm300(1.0).index_time_ms());
let params = FluxStreamTrackParams {
ch,
schema: None,
encoding: None,
clock: None,
rpm: None,
};
let new_track = disk.add_track_fluxstream(flux_track, ¶ms)?;
let info = new_track.info();
log::debug!(
"Added {} track {} containing {} bits to image...",
ch,
info.encoding,
info.bit_length,
);
Ok(())
}
fn read_chunk<RWS: ReadSeek>(mut reader: RWS, image_size: u64) -> Result<Option<MoofChunk>, DiskImageError> {
// Any bytes left in the stream?
let offset = reader.seek(std::io::SeekFrom::Current(0))?;
log::debug!("At file offset: {}", offset);
if image_size == offset {
log::debug!("No bytes left in reader!");
return Ok(None);
}
// Read in the chunk header
let chunk_header = MoofChunkHeader::read(&mut reader)?;
log::debug!("Read chunk header: {:0X?}", chunk_header.id);
// Save chunk data offset to advance unknown chunks
let chunk_offset = reader.seek(std::io::SeekFrom::Current(0))?;
let chunk = match &chunk_header.id {
b"INFO" => {
let info_chunk = InfoChunk::read(&mut reader)?;
MoofChunk::Info(info_chunk)
}
b"TMAP" => {
let tmap_chunk = TMapChunk::read(&mut reader)?;
MoofChunk::TMap(tmap_chunk)
}
b"TRKS" => {
let trks_chunk = TrksChunk::read(&mut reader)?;
MoofChunk::Trks(trks_chunk)
}
b"FLUX" => {
let flux_chunk = FluxChunk::read(&mut reader)?;
MoofChunk::Flux(flux_chunk)
}
b"META" => {
// Metadata chunk is just a string
let mut meta_data = vec![0u8; chunk_header.size as usize];
reader.read_exact(&mut meta_data)?;
let meta_str = String::from_utf8_lossy(&meta_data).trim_end().to_string();
MoofChunk::Meta(meta_str)
}
_ => {
log::warn!("Unknown MOOF chunk: {:0X?}", chunk_header.id);
MoofChunk::Unknown
}
};
// Seek to next chunk
reader.seek(std::io::SeekFrom::Start(chunk_offset + chunk_header.size as u64))?;
Ok(Some(chunk))
}
fn parse_meta(meta_str: &str) -> FoxHashMap<String, String> {
let mut meta_map = FoxHashMap::new();
for line in meta_str.lines() {
let mut parts = line.splitn(2, '\t');
let key = parts.next().unwrap_or("").trim();
let value = parts.next().unwrap_or("").trim();
meta_map.insert(key.to_string(), value.to_string());
}
meta_map
}
pub fn save_image<RWS: ReadWriteSeek>(
_image: &DiskImage,
_opts: &ParserWriteOptions,
_output: &mut RWS,
) -> Result<(), DiskImageError> {
Err(DiskImageError::UnsupportedFormat)
}
}
| rust | MIT | b4c04b51746e5fe7769f49a1b32b8caad426fc81 | 2026-01-04T20:24:04.021295Z | false |
dbalsom/fluxfox | https://github.com/dbalsom/fluxfox/blob/b4c04b51746e5fe7769f49a1b32b8caad426fc81/src/track_schema/system34.rs | src/track_schema/system34.rs | /*
FluxFox
https://github.com/dbalsom/fluxfox
Copyright 2024-2025 Daniel Balsom
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the “Software”),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
--------------------------------------------------------------------------
*/
//! An indirect implementation of the `TrackSchemaParser` trait for the IBM System 34
//! track schema, used by IBM PCs and compatibles and Macintosh 1.44MB HD disks.
//!
//! The System34 track schema supports both MFM and FM track encodings.
use core::ops::Range;
use std::fmt::{Display, Formatter};
use crate::{
bitstream_codec::{
mfm::{MfmCodec, MFM_BYTE_LEN, MFM_MARKER_LEN},
MarkerEncoding,
TrackDataStream,
},
io::{Read, Seek, SeekFrom},
mfm_offset,
prelude::{RwScope, TrackDataEncoding},
source_map::{OptionalSourceMap, SourceMap, SourceValue},
track::{TrackAnalysis, TrackSectorScanResult},
track_schema::{
GenericTrackElement,
TrackElement,
TrackElementInstance,
TrackMarker,
TrackMarkerItem,
TrackMetadata,
},
types::{chs::DiskChsn, IntegrityCheck, IntegrityField},
util::crc_ibm_3740,
DiskImageError,
FoxHashSet,
SectorIdQuery,
};
use bit_vec::BitVec;
pub const DEFAULT_TRACK_SIZE_BYTES: usize = 6250;
pub const GAP_BYTE: u8 = 0x4E;
pub const SYNC_BYTE: u8 = 0;
pub const IBM_GAP3_DEFAULT: usize = 22;
pub const IBM_GAP4A: usize = 80;
pub const IBM_GAP1: usize = 50;
pub const IBM_GAP2: usize = 22;
pub const ISO_GAP1: usize = 32;
pub const ISO_GAP2: usize = 22;
pub const SYNC_LEN: usize = 12;
pub const PERPENDICULAR_GAP1: usize = 50;
pub const PERPENDICULAR_GAP2: usize = 41;
// Pre-encoded markers for IAM, IDAM, DAM and DDAM.
pub const IAM_MARKER: u64 = 0x5224_5224_5224_5552;
pub const IDAM_MARKER: u64 = 0x4489_4489_4489_5554;
pub const DAM_MARKER: u64 = 0x4489_4489_4489_5545;
pub const DDAM_MARKER: u64 = 0x4489_4489_4489_5548;
pub const ANY_MARKER: u64 = 0x4489_4489_4489_0000;
pub const CLOCK_MASK: u64 = 0xAAAA_AAAA_AAAA_0000;
pub const DATA_MARK: u64 = 0x5555_5555_5555_5555;
pub const MARKER_MASK: u64 = 0xFFFF_FFFF_FFFF_0000;
pub const FM_MARKER_CLOCK: u64 = 0xAAAA_AAAA_AAAA_0000;
pub const MFM_MARKER_CLOCK: u64 = 0x0220_0220_0220_0000;
pub const IAM_MARKER_FM: u64 = 0xFAAE_FAAE_FAAE_FFFA;
pub const IAM_MARKER_BYTES: [u8; 4] = [0xC2, 0xC2, 0xC2, 0xFC];
pub const IDAM_MARKER_BYTES: [u8; 4] = [0xA1, 0xA1, 0xA1, 0xFE];
pub const DAM_MARKER_BYTES: [u8; 4] = [0xA1, 0xA1, 0xA1, 0xFB];
pub const DDAM_MARKER_BYTES: [u8; 4] = [0xA1, 0xA1, 0xA1, 0xF8];
pub enum System34Variant {
Ibm3740,
Ibm,
Iso,
Perpendicular,
}
#[derive(Debug)]
pub struct System34FormatBuffer {
pub chs_vec: Vec<DiskChsn>,
}
impl From<&[u8]> for System34FormatBuffer {
fn from(buffer: &[u8]) -> Self {
let mut chs_vec = Vec::new();
for i in (0..buffer.len()).step_by(4) {
let c = buffer[i];
let h = buffer[i + 1];
let s = buffer[i + 2];
let n = buffer[i + 3];
chs_vec.push(DiskChsn::new(c as u16, h, s, n));
}
System34FormatBuffer { chs_vec }
}
}
#[derive(Copy, Clone, Debug)]
pub enum System34Standard {
Ibm,
Perpendicular,
Iso,
}
impl System34Standard {
pub fn gap2(&self) -> usize {
match self {
System34Standard::Ibm => IBM_GAP2,
System34Standard::Perpendicular => PERPENDICULAR_GAP2,
System34Standard::Iso => ISO_GAP2,
}
}
}
#[derive(Copy, Clone, Debug)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub enum System34Marker {
Iam,
Idam,
Dam,
Ddam,
}
impl From<System34Marker> for u64 {
fn from(marker: System34Marker) -> u64 {
match marker {
System34Marker::Iam => IAM_MARKER,
System34Marker::Idam => IDAM_MARKER,
System34Marker::Dam => DAM_MARKER,
System34Marker::Ddam => DDAM_MARKER,
}
}
}
impl TryInto<System34Marker> for u16 {
type Error = ();
fn try_into(self) -> Result<System34Marker, Self::Error> {
match self {
0x5554 | 0xF57E => Ok(System34Marker::Idam),
0x5545 | 0xF56F => Ok(System34Marker::Dam),
0x554A => Ok(System34Marker::Ddam),
_ => {
log::error!("Invalid System34 marker: {:04X}", self);
Err(())
}
}
}
}
#[derive(Copy, Clone, Debug)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub enum System34Element {
Gap1,
Gap2,
Gap3,
Gap4a,
Gap4b,
Sync,
Marker(System34Marker, Option<bool>),
SectorHeader {
chsn: DiskChsn,
address_error: bool,
data_missing: bool,
},
SectorData {
chsn: DiskChsn,
address_error: bool,
data_error: bool,
deleted: bool,
},
}
impl From<System34Element> for GenericTrackElement {
fn from(elem: System34Element) -> Self {
match elem {
System34Element::Gap1 => GenericTrackElement::NullElement,
System34Element::Gap2 => GenericTrackElement::NullElement,
System34Element::Gap3 => GenericTrackElement::NullElement,
System34Element::Gap4a => GenericTrackElement::NullElement,
System34Element::Gap4b => GenericTrackElement::NullElement,
System34Element::Sync => GenericTrackElement::NullElement,
System34Element::Marker(_, _) => GenericTrackElement::Marker,
System34Element::SectorHeader { address_error, .. } => match address_error {
true => GenericTrackElement::SectorBadHeader,
false => GenericTrackElement::SectorHeader,
},
System34Element::SectorData {
address_error,
data_error,
deleted,
..
} => match (address_error || data_error, deleted) {
(false, false) => GenericTrackElement::SectorData,
(true, false) => GenericTrackElement::SectorBadData,
(false, true) => GenericTrackElement::SectorDeletedData,
(true, true) => GenericTrackElement::SectorBadDeletedData,
},
}
}
}
impl System34Element {
pub fn size(&self) -> usize {
match self {
System34Element::Gap1 => 8,
System34Element::Gap2 => 8,
System34Element::Gap3 => 8,
System34Element::Gap4a => 8,
System34Element::Gap4b => 8,
System34Element::Sync => 8,
System34Element::Marker(_, _) => 4,
System34Element::SectorData { chsn, .. } => {
// Data size is determined by the sector size in bytes, plus DAM and CRC.
4 + 2 + chsn.n_size()
}
System34Element::SectorHeader { .. } => {
// IDAM + Sector ID (4 bytes) + CRC (2 bytes)
4 + 4 + 2
}
}
}
/// Provide a subset data range corresponding scope requested for the current element.
pub fn range(&self, scope: RwScope) -> Range<usize> {
// Most elements don't support a scope.
match (self, scope) {
(System34Element::SectorData { .. }, RwScope::DataOnly) => {
// Data scope is the data portion of the sector only.
// Skip the IDAM (4 bytes) and omit CRC field from end (2 bytes)
4..(self.size() - 2)
}
(System34Element::SectorData { .. } | System34Element::SectorHeader { .. }, RwScope::CrcOnly) => {
// CRC scope is the CRC field only (last two bytes).
self.size() - 2..self.size()
}
(_, _) => 0..self.size(),
}
}
pub fn is_sector_data_marker(&self) -> bool {
matches!(self, System34Element::Marker(System34Marker::Dam, _))
}
pub fn is_sector_data(&self) -> bool {
matches!(self, System34Element::SectorData { .. })
}
pub fn is_sector_id(&self) -> (u8, bool) {
match self {
System34Element::SectorHeader {
chsn, address_error, ..
} => match address_error {
true => (0, false),
false => (chsn.s(), true),
},
_ => (0, false),
}
}
}
// TODO: get rid of this duplicate sector id type. Use DiskChsn?
#[derive(Default)]
pub struct SectorId {
pub c: u8,
pub h: u8,
pub s: u8,
pub b: u8,
pub crc: u16,
pub crc_valid: bool,
}
impl SectorId {
pub fn sector_size_in_bytes(&self) -> usize {
std::cmp::min(8192, 128usize.overflowing_shl(self.b as u32).0)
}
}
impl Display for SectorId {
fn fmt(&self, f: &mut Formatter) -> std::fmt::Result {
write!(
f,
"[C: {} H: {} S: {} B: {} CRC: {:04X}]",
self.c, self.h, self.s, self.b, self.crc
)
}
}
pub struct System34FormatResult {
pub track_bytes: Vec<u8>,
pub markers: Vec<(System34Marker, usize)>,
}
pub struct System34Schema;
impl System34Schema {
// System34 masks clock bits in the MFM encoding of address marks.
// This is to help differentiate markers from data.
const MFM_MARKER_CLOCK_MASK: u64 = 0x5555_5555_5555_FFFF;
const MFM_MARKER_CLOCK: u64 = 0x0088_0088_0088_0000;
#[inline]
pub fn encode_marker(pattern: &[u8]) -> u64 {
let marker = MfmCodec::encode_marker(pattern);
marker & Self::MFM_MARKER_CLOCK_MASK | Self::MFM_MARKER_CLOCK
}
pub fn format_track_as_bytes(
standard: System34Standard,
bitcell_ct: usize,
format_buffer: Vec<DiskChsn>,
fill_pattern: &[u8],
gap3: usize,
) -> Result<System34FormatResult, DiskImageError> {
if fill_pattern.is_empty() {
log::error!("Fill pattern cannot be empty.");
return Err(DiskImageError::ParameterError);
}
let track_byte_ct = (bitcell_ct + MFM_BYTE_LEN - 1) / MFM_BYTE_LEN;
log::trace!(
"format_track_as_bytes(): Formatting track with {} bitcells, {} bytes",
bitcell_ct,
track_byte_ct
);
let mut track_bytes: Vec<u8> = Vec::with_capacity(track_byte_ct);
let mut markers = Vec::new();
if matches!(standard, System34Standard::Ibm | System34Standard::Perpendicular) {
// Write out GAP0, sync,IAM marker, and GAP1.
track_bytes.extend_from_slice(&[GAP_BYTE; IBM_GAP4A]); // GAP0
track_bytes.extend_from_slice(&[SYNC_BYTE; SYNC_LEN]); // Sync
markers.push((System34Marker::Iam, track_bytes.len()));
}
else {
// Just write Gap1 for ISO standard, there is no IAM marker.
track_bytes.extend_from_slice(&[GAP_BYTE; ISO_GAP1]);
}
let mut pat_cursor = 0;
for sector in format_buffer {
track_bytes.extend_from_slice(&[SYNC_BYTE; SYNC_LEN]); // Write initial sync.
markers.push((System34Marker::Idam, track_bytes.len()));
let idam_crc_offset = track_bytes.len();
track_bytes.extend_from_slice(IDAM_MARKER_BYTES.as_ref()); // Write IDAM marker.
// Write CHSN bytes.
track_bytes.push(sector.c() as u8);
track_bytes.push(sector.h());
track_bytes.push(sector.s());
track_bytes.push(sector.n());
// Write CRC word.
//log::error!("Calculating crc over : {:X?}", &track_bytes[idam_crc_offset..]);
let crc16 = crc_ibm_3740(&track_bytes[idam_crc_offset..], None);
track_bytes.extend_from_slice(&crc16.to_be_bytes());
// Write GAP2.
track_bytes.extend_from_slice(&vec![GAP_BYTE; standard.gap2()]);
// Write SYNC.
track_bytes.extend_from_slice(&[SYNC_BYTE; SYNC_LEN]);
// Write DAM marker.
markers.push((System34Marker::Dam, track_bytes.len()));
let dam_crc_offset = track_bytes.len();
track_bytes.extend_from_slice(DAM_MARKER_BYTES.as_ref());
// Write sector data using provided pattern buffer.
if fill_pattern.len() == 1 {
track_bytes.extend_from_slice(&vec![fill_pattern[0]; sector.n_size()]);
}
else {
let mut sector_buffer = Vec::with_capacity(sector.n_size());
while sector_buffer.len() < sector.n_size() {
let remain = sector.n_size() - sector_buffer.len();
let copy_pat = if pat_cursor + remain <= fill_pattern.len() {
&fill_pattern[pat_cursor..pat_cursor + remain]
}
else {
&fill_pattern[pat_cursor..]
};
sector_buffer.extend_from_slice(copy_pat);
//log::warn!("format: sector_buffer: {:X?}", sector_buffer);
pat_cursor = (pat_cursor + copy_pat.len()) % fill_pattern.len();
}
//log::warn!("sector buffer is now {} bytes", sector_buffer.len());
track_bytes.extend_from_slice(§or_buffer);
}
//log::warn!("format: track_bytes: {:X?}", track_bytes);
//log::warn!("track_bytes is now {} bytes", track_bytes.len());
// Write CRC word.
let crc16 = crc_ibm_3740(&track_bytes[dam_crc_offset..], None);
track_bytes.extend_from_slice(&crc16.to_be_bytes());
// Write GAP3.
track_bytes.extend_from_slice(&vec![GAP_BYTE; gap3]);
}
// Fill rest of track with GAP4B.
if track_bytes.len() < track_byte_ct {
track_bytes.extend_from_slice(&vec![GAP_BYTE; track_byte_ct - track_bytes.len()]);
}
if track_bytes.len() > track_byte_ct {
log::warn!(
"format_track_as_bytes(): Format operation passed index. Truncating track to {} bytes",
track_byte_ct
);
track_bytes.truncate(track_byte_ct);
}
log::trace!(
"format_track_as_bytes(): Wrote {} markers to track of size {} bitcells: {}",
markers.len(),
track_bytes.len(),
track_bytes.len() * 8
);
Ok(System34FormatResult { track_bytes, markers })
}
pub(crate) fn set_track_markers(
codec: &mut TrackDataStream,
markers: Vec<(System34Marker, usize)>,
) -> Result<(), DiskImageError> {
for (marker, offset) in markers {
let marker_u64 = u64::from(marker);
let marker_bit_index = offset * MFM_BYTE_LEN;
let marker_bytes = marker_u64.to_be_bytes();
//log::trace!("Setting marker {:X?} at bit index: {}", marker_bytes, marker_bit_index);
codec.write_raw_buf(&marker_bytes, marker_bit_index);
}
Ok(())
}
}
// Quasi-trait impl of TrackSchema - called by enum dispatch
impl System34Schema {
/// Find the provided pattern of bytes within the specified bitstream, starting at `offset` bits
/// into the track.
/// The bit offset of the pattern is returned if found, otherwise None.
/// The pattern length is limited to 8 characters.
#[allow(dead_code)]
pub(crate) fn find_data_pattern(stream: &TrackDataStream, pattern: &[u8], index: usize) -> Option<usize> {
let mut buffer = [0u8; 8];
let len = pattern.len().min(8);
buffer[(8 - len)..8].copy_from_slice(&pattern[..len]);
let pat = u64::from_be_bytes(buffer);
let pat_mask = u64::MAX >> (8 * (8 - len));
let mut shift_reg = 0;
//log::trace!("Constructed pattern: {:016X?}, mask: {:016X}", pat, pat_mask);
for (bit_ct, bi) in (index..stream.len()).enumerate() {
shift_reg = shift_reg << 1 | stream[bi] as u64;
if (bit_ct >= (len * 8)) && (shift_reg & pat_mask) == pat {
//log::trace!("find_data_pattern: shift_reg: {:064b} pat: {:064b}", shift_reg, pat);
//log::trace!("find_data_pattern: Found pattern at bit offset: {}", bi);
return Some(bi - len * 8 + 1);
}
}
None
}
pub(crate) fn analyze_elements(metadata: &TrackMetadata) -> TrackAnalysis {
let mut analysis = TrackAnalysis::default();
let mut n_set: FoxHashSet<u8> = FoxHashSet::new();
let mut last_n = 0;
let sector_ids = metadata.sector_ids();
let sector_ct = sector_ids.len();
for (si, sector_id) in sector_ids.iter().enumerate() {
if sector_id.s() != si as u8 + 1 {
analysis.nonconsecutive_sectors = true;
}
last_n = sector_id.n();
n_set.insert(sector_id.n());
}
if n_set.len() > 1 {
//log::warn!("get_track_consistency(): Variable sector sizes detected: {:?}", n_set);
analysis.consistent_sector_size = None;
}
else {
//log::warn!("get_track_consistency(): Consistent sector size: {}", last_n);
analysis.consistent_sector_size = Some(last_n);
}
for ei in metadata.elements() {
match ei.element {
TrackElement::System34(System34Element::SectorData {
address_error,
data_error,
deleted,
..
}) => {
if address_error {
analysis.address_error = true;
}
if data_error {
analysis.data_error = true
}
if deleted {
analysis.deleted_data = true;
}
}
TrackElement::System34(System34Element::SectorHeader {
address_error,
data_missing,
..
}) => {
if address_error {
analysis.address_error = true;
}
if data_missing {
analysis.no_dam = true;
}
}
_ => {}
}
}
analysis.sector_ct = sector_ct;
analysis
}
/// Find the next address marker in the track bitstream. The type of marker and its position in
/// the bitstream is returned, or None.
pub(crate) fn find_next_marker(stream: &TrackDataStream, offset: usize) -> Option<(TrackMarker, usize)> {
match stream.encoding() {
TrackDataEncoding::Mfm => {
let marker = MarkerEncoding {
bits: ANY_MARKER,
mask: MARKER_MASK,
..MarkerEncoding::default()
};
if let Some((index, marker_u16)) = stream.find_marker(&marker, offset, None) {
if let Ok(marker) = marker_u16.try_into() {
return Some((TrackMarker::System34(marker), index));
}
}
}
TrackDataEncoding::Fm => {
let marker = MarkerEncoding {
bits: ANY_MARKER,
mask: MARKER_MASK,
..MarkerEncoding::default()
};
if let Some((index, marker_u16)) = stream.find_marker(&marker, offset, None) {
if let Ok(marker) = marker_u16.try_into() {
return Some((TrackMarker::System34(marker), index));
}
}
}
_ => {
// System34 only supports MFM and FM encodings.
log::warn!(
"find_next_marker(): Unsupported stream encoding: {:?}",
stream.encoding()
);
return None;
}
}
None
}
pub(crate) fn find_marker(
stream: &TrackDataStream,
marker: TrackMarker,
index: usize,
limit: Option<usize>,
) -> Option<(usize, u16)> {
if let TrackMarker::System34(marker) = marker {
let marker = MarkerEncoding {
bits: u64::from(marker),
..MarkerEncoding::default()
};
return stream.find_marker(&marker, index, limit);
}
None
}
pub(crate) fn find_sector_element(
id: impl Into<SectorIdQuery>,
elements: &[TrackElementInstance],
index: usize,
_limit: Option<usize>,
) -> TrackSectorScanResult {
let id = id.into();
let mut wrong_cylinder = false;
let mut bad_cylinder = false;
let mut wrong_head = false;
let mut last_idam_matched = false;
//let mut last_idam_chsn: Option<DiskChsn> = None;
for (ei, instance) in elements.iter().enumerate() {
if instance.start < index {
continue;
}
let TrackElementInstance { element, .. } = instance;
match element {
TrackElement::System34(System34Element::SectorHeader {
chsn,
address_error,
data_missing,
}) => {
if chsn.s() == id.s() {
// if c is 0xFF, we set the flag for bad cylinder.
if chsn.c() == 0xFF {
bad_cylinder = true;
}
// If c differs, we set the flag for wrong cylinder.
if id.c().is_some() && chsn.c() != id.c().unwrap() {
wrong_cylinder = true;
}
// If h differs, we set the flag for wrong head.
if id.h().is_some() && chsn.h() != id.h().unwrap() {
wrong_head = true;
}
last_idam_matched = id.matches(chsn);
// A bad header CRC will short-circuit the search.
if *address_error {
return TrackSectorScanResult::Found {
ei,
sector_chsn: *chsn,
address_error: *address_error,
data_error: false,
deleted_mark: false,
no_dam: *data_missing,
};
}
}
//idam_chsn = Some(*chsn);
}
TrackElement::System34(System34Element::SectorData {
chsn,
address_error,
data_error,
deleted,
}) => {
// log::trace!(
// "find_sector_element(): Found sector {} data at index: {} last idam matched? {}",
// idam_chsn,
// mdi.start,
// last_idam_matched
// );
// If we matched the last sector header, then this is the sector data
// we are looking for. Return the info.
if last_idam_matched {
return TrackSectorScanResult::Found {
ei,
sector_chsn: *chsn,
address_error: *address_error,
data_error: *data_error,
deleted_mark: *deleted,
no_dam: false,
};
}
}
_ => {}
}
}
TrackSectorScanResult::NotFound {
wrong_cylinder,
bad_cylinder,
wrong_head,
}
}
pub(crate) fn decode_element(
stream: &TrackDataStream,
element: &TrackElementInstance,
scope: RwScope,
buf: &mut [u8],
) -> (Range<usize>, Option<IntegrityCheck>) {
// Read the element into the buffer
stream.read_decoded_buf(buf, element.start);
match element.element {
TrackElement::System34(System34Element::SectorHeader { .. }) => {
// Calculate the CRC16 of the sector header
let (recorded_crc, calculated_crc) = Self::crc16_bytes(buf);
let check = IntegrityCheck::Crc16(IntegrityField::new(recorded_crc, calculated_crc));
(element.element.range(scope).unwrap_or_default(), Some(check))
}
TrackElement::System34(System34Element::SectorData { data_error, .. }) => {
// Calculate the CRC16 of the data.
let (recorded_crc, calculated_crc) = Self::crc16_bytes(buf);
let check = IntegrityCheck::Crc16(IntegrityField::new(recorded_crc, calculated_crc));
if data_error != check.is_error() {
log::warn!("Data CRC state out of sync with metadata!");
}
(element.element.range(scope).unwrap_or_default(), Some(check))
}
_ => (element.element.range(scope).unwrap_or_default(), None),
}
}
#[allow(dead_code)]
#[inline]
pub(crate) fn encode_element(
stream: &mut TrackDataStream,
element: &TrackElementInstance,
_scope: RwScope,
buf: &[u8],
) -> usize {
// TODO: Detect and properly encode markers
stream.write_encoded_buf(buf, element.start)
}
#[allow(dead_code)]
pub(crate) fn find_element(stream: &TrackDataStream, element: TrackElement, index: usize) -> Option<usize> {
if let TrackElement::System34(element) = element {
use System34Element::*;
let (marker_u64, _pattern) = match element {
Gap1 | Gap2 | Gap3 | Gap4a | Gap4b => (System34Schema::encode_marker(&[0x4E; 4]), &[0x4E; 4]),
Sync => (MfmCodec::encode_marker(&[0x00; 4]), &[0x00; 4]),
_ => return None,
};
//let marker = System34Parser::encode_marker(pattern);
log::trace!(
"find_element(): Encoded element: {:?} as {:016X}/{:064b}",
element,
marker_u64,
marker_u64
);
log::trace!("find_element(): Searching for element at offset: {}", index);
let marker = MarkerEncoding {
bits: marker_u64,
..MarkerEncoding::default()
};
let found_marker = stream.find_marker(&marker, index, None);
if let Some(marker_pos) = found_marker {
log::trace!(
"find_element(): Found element in raw stream: {:?} at index: {}, sync: {} debug: {}",
element,
marker_pos.0,
marker_pos.0 & 1,
stream.debug_marker(index)
);
return Some(index);
}
//log::trace!("Searching for pattern: {:02X?} at offset {}", pattern, offset);
//return System34Parser::find_pattern(track, pattern, offset);
};
None
}
/// Scan a track bitstream for address markers, including the IAM, IDAM and DAM markers. Return
/// their positions. The marker positions will be used to create the clock phase map for the
/// track, which must be performed before we can read the data off the disk which is done in
/// a second pass.
pub(crate) fn scan_markers(stream: &TrackDataStream) -> Vec<TrackMarkerItem> {
let mut bit_cursor: usize = 0;
let mut markers = Vec::new();
// Look for the IAM marker first - but it may not be present (ISO standard encoding does
// not require it).
if let Some(marker) = System34Schema::find_marker(
stream,
TrackMarker::System34(System34Marker::Iam),
bit_cursor,
Some(5_000),
) {
log::trace!("scan_track_markers(): Found IAM marker at bit index: {}", marker.0);
markers.push(TrackMarkerItem {
elem_type: TrackMarker::System34(System34Marker::Iam),
start: marker.0,
});
bit_cursor = marker.0 + 4 * MFM_BYTE_LEN;
}
while let Some((marker, marker_offset)) = System34Schema::find_next_marker(stream, bit_cursor) {
/*
log::trace!(
"scan_track_markers(): Found marker of type {:?} at bit offset: {}",
marker,
marker_offset
);*/
markers.push(TrackMarkerItem {
elem_type: marker,
start: marker_offset,
});
bit_cursor = marker_offset + 4 * MFM_BYTE_LEN;
}
markers
}
/// Scan a track bitstream using the pre-scanned marker positions to extract marker data such
/// as Sector ID values and CRCs. This is done in a second pass after the markers have been
/// found by scan_track_markers() and a clock phase map created for the track - required for the
/// proper functioning of the Read and Seek traits on MfmCodec.
pub(crate) fn scan_metadata(
stream: &mut TrackDataStream,
markers: Vec<TrackMarkerItem>,
) -> Vec<TrackElementInstance> {
let mut elements = Vec::new();
let mut last_marker_opt: Option<System34Marker> = None;
let mut last_sector_id = SectorId::default();
let mut last_element_offset = 0;
for marker in &markers {
let element_offset = marker.start;
if let TrackMarker::System34(sys34_marker) = marker.elem_type {
match (last_marker_opt, sys34_marker) {
(Some(System34Marker::Idam), System34Marker::Idam) => {
// Encountered IDAMs back to back. This is sometimes seen in copy-protection methods
// such as XELOK v1.
// Push a Sector Header metadata item spanning from last IDAM to this IDAM.
let metadata = TrackElementInstance {
element: TrackElement::System34(System34Element::SectorHeader {
chsn: DiskChsn::from((
last_sector_id.c as u16,
last_sector_id.h,
last_sector_id.s,
last_sector_id.b,
)),
address_error: !last_sector_id.crc_valid,
data_missing: true, // Flag data as missing.
}),
start: last_element_offset,
end: element_offset,
chsn: None,
};
elements.push(metadata)
}
(_, System34Marker::Idam) => {
// Encountered a sector ID address mark (sector header), after any element.
let mut sector_header = [0; 8];
// TODO: Don't unwrap in a library unless provably safe.
| rust | MIT | b4c04b51746e5fe7769f49a1b32b8caad426fc81 | 2026-01-04T20:24:04.021295Z | true |
dbalsom/fluxfox | https://github.com/dbalsom/fluxfox/blob/b4c04b51746e5fe7769f49a1b32b8caad426fc81/src/track_schema/dispatch.rs | src/track_schema/dispatch.rs | /*
FluxFox
https://github.com/dbalsom/fluxfox
Copyright 2024-2025 Daniel Balsom
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the “Software”),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
--------------------------------------------------------------------------
*/
#[cfg(feature = "amiga")]
use crate::track_schema::amiga::AmigaSchema;
use crate::{
bitstream_codec::TrackDataStream,
prelude::RwScope,
source_map::SourceMap,
track::{TrackAnalysis, TrackSectorScanResult},
track_schema::{
system34::System34Schema,
TrackElementInstance,
TrackMarker,
TrackMarkerItem,
TrackMetadata,
TrackSchema,
TrackSchemaParser,
},
types::IntegrityCheck,
SectorIdQuery,
};
use bit_vec::BitVec;
use std::ops::Range;
const SCHEMA_ERR: &str = "You must enable at least one platform feature!";
impl TrackSchemaParser for TrackSchema {
/*
fn find_data_pattern(&self, track: &TrackDataStream, pattern: &[u8], offset: usize) -> Option<usize> {
#[allow(clippy::match_single_binding)]
match self {
TrackSchema::System34 => System34Schema::find_data_pattern(track, pattern, offset),
TrackSchema::Amiga => todo!(),
}
}
*/
fn analyze_elements(&self, metadata: &TrackMetadata) -> TrackAnalysis {
#[allow(clippy::match_single_binding)]
#[allow(unreachable_patterns)]
match self {
TrackSchema::System34 => System34Schema::analyze_elements(metadata),
#[cfg(feature = "amiga")]
TrackSchema::Amiga => AmigaSchema::analyze_elements(metadata),
_ => {
panic!("{}", SCHEMA_ERR)
}
}
}
fn find_next_marker(&self, track: &TrackDataStream, offset: usize) -> Option<(TrackMarker, usize)> {
#[allow(clippy::match_single_binding)]
#[allow(unreachable_patterns)]
match self {
TrackSchema::System34 => System34Schema::find_next_marker(track, offset),
#[cfg(feature = "amiga")]
TrackSchema::Amiga => AmigaSchema::find_next_marker(track, offset),
_ => {
panic!("{}", SCHEMA_ERR)
}
}
}
fn find_marker(
&self,
track: &TrackDataStream,
marker: TrackMarker,
offset: usize,
limit: Option<usize>,
) -> Option<(usize, u16)> {
#[allow(clippy::match_single_binding)]
#[allow(unreachable_patterns)]
match self {
TrackSchema::System34 => System34Schema::find_marker(track, marker, offset, limit),
#[cfg(feature = "amiga")]
TrackSchema::Amiga => AmigaSchema::find_marker(track, marker, offset, limit),
_ => {
panic!("{}", SCHEMA_ERR)
}
}
}
fn match_sector_element<'a>(
&self,
id: impl Into<SectorIdQuery>,
elements: &[TrackElementInstance],
index: usize,
limit: Option<usize>,
) -> TrackSectorScanResult {
#[allow(clippy::match_single_binding)]
#[allow(unreachable_patterns)]
match self {
TrackSchema::System34 => System34Schema::find_sector_element(id, elements, index, limit),
#[cfg(feature = "amiga")]
TrackSchema::Amiga => AmigaSchema::find_sector_element(id, elements, index, limit),
_ => {
panic!("{}", SCHEMA_ERR)
}
}
}
fn decode_element(
&self,
track: &TrackDataStream,
element: &TrackElementInstance,
scope: RwScope,
buf: &mut [u8],
) -> (Range<usize>, Option<IntegrityCheck>) {
#[allow(clippy::match_single_binding)]
#[allow(unreachable_patterns)]
match self {
TrackSchema::System34 => System34Schema::decode_element(track, element, scope, buf),
#[cfg(feature = "amiga")]
TrackSchema::Amiga => AmigaSchema::decode_element(track, element, scope, buf),
_ => {
panic!("{}", SCHEMA_ERR)
}
}
}
fn encode_element(
&self,
track: &mut TrackDataStream,
element: &TrackElementInstance,
scope: RwScope,
buf: &[u8],
) -> usize {
#[allow(clippy::match_single_binding)]
#[allow(unreachable_patterns)]
match self {
TrackSchema::System34 => System34Schema::encode_element(track, element, scope, buf),
#[cfg(feature = "amiga")]
TrackSchema::Amiga => AmigaSchema::encode_element(track, element, scope, buf),
_ => {
panic!("{}", SCHEMA_ERR)
}
}
}
/*
fn find_element(&self, track: &TrackDataStream, element: TrackElement, offset: usize) -> Option<usize> {
#[allow(clippy::match_single_binding)]
match self {
TrackSchema::System34 => System34Schema::find_element(track, element, offset),
#[cfg(feature = "amiga")]
TrackSchema::Amiga => todo!(),
}
}
*/
fn scan_for_markers(&self, track: &TrackDataStream) -> Vec<TrackMarkerItem> {
#[allow(clippy::match_single_binding)]
#[allow(unreachable_patterns)]
match self {
TrackSchema::System34 => System34Schema::scan_markers(track),
#[cfg(feature = "amiga")]
TrackSchema::Amiga => AmigaSchema::scan_markers(track),
_ => {
panic!("{}", SCHEMA_ERR)
}
}
}
fn scan_for_elements(
&self,
track: &mut TrackDataStream,
markers: Vec<TrackMarkerItem>,
) -> Vec<TrackElementInstance> {
#[allow(clippy::match_single_binding)]
#[allow(unreachable_patterns)]
match self {
TrackSchema::System34 => System34Schema::scan_metadata(track, markers),
#[cfg(feature = "amiga")]
TrackSchema::Amiga => AmigaSchema::scan_for_elements(track, markers),
_ => {
panic!("{}", SCHEMA_ERR)
}
}
}
fn create_clock_map(&self, markers: &[TrackMarkerItem], clock_map: &mut BitVec) {
#[allow(clippy::match_single_binding)]
#[allow(unreachable_patterns)]
match self {
TrackSchema::System34 => System34Schema::create_clock_map(markers, clock_map),
#[cfg(feature = "amiga")]
TrackSchema::Amiga => AmigaSchema::create_clock_map(markers, clock_map),
_ => {
panic!("{}", SCHEMA_ERR)
}
}
}
fn crc_u16(&self, track: &mut TrackDataStream, bit_index: usize, end: usize) -> (u16, u16) {
#[allow(clippy::match_single_binding)]
#[allow(unreachable_patterns)]
match self {
TrackSchema::System34 => System34Schema::crc16(track, bit_index, end),
#[cfg(feature = "amiga")]
TrackSchema::Amiga => todo!(),
_ => {
panic!("{}", SCHEMA_ERR)
}
}
}
fn crc_u16_buf(&self, data: &[u8]) -> (u16, u16) {
#[allow(clippy::match_single_binding)]
#[allow(unreachable_patterns)]
match self {
TrackSchema::System34 => System34Schema::crc16_bytes(data),
#[cfg(feature = "amiga")]
TrackSchema::Amiga => todo!(),
_ => {
panic!("{}", SCHEMA_ERR)
}
}
}
fn build_element_map(&self, elements: &[TrackElementInstance]) -> SourceMap {
#[allow(clippy::match_single_binding)]
#[allow(unreachable_patterns)]
match self {
TrackSchema::System34 => System34Schema::build_element_map(elements),
#[cfg(feature = "amiga")]
TrackSchema::Amiga => AmigaSchema::build_element_map(elements),
_ => {
panic!("{}", SCHEMA_ERR)
}
}
}
}
| rust | MIT | b4c04b51746e5fe7769f49a1b32b8caad426fc81 | 2026-01-04T20:24:04.021295Z | false |
dbalsom/fluxfox | https://github.com/dbalsom/fluxfox/blob/b4c04b51746e5fe7769f49a1b32b8caad426fc81/src/track_schema/mod.rs | src/track_schema/mod.rs | /*
FluxFox
https://github.com/dbalsom/fluxfox
Copyright 2024-2025 Daniel Balsom
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the “Software”),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
--------------------------------------------------------------------------
*/
//! The `track_schema` module defines a [TrackSchema] enum that represents a track schema used to
//! interpret the layout of a track, and a [TrackSchemaParser] trait that defines the interface for
//! track schema parsers.
//!
//! A track schema parser is responsible for interpreting the layout of syncs, gaps, and address
//! markers on a track, relying on a track's [TrackCodec] to decode the actual underlying data
//! representation.
//! However, a [TrackSchemaParser] implementation need not be fully encoding agnostic - a certain
//! schema may only ever have been paired with specific encoding types.
//!
//! A [TrackSchema] also defines the layout of a track for formatting operations, and defines any
//! applicable CRC algorithm.
//!
//! A track schema parser typically maintains no state. Since this is not object-compatible, the
//! [TrackSchemaParser] trait is implemented on the [TrackSchema] enum directly.
//!
//! A disk image may contain tracks with varying [TrackSchema] values, such as dual-format disks
//! (Amiga/PC), (Atari ST/Amiga).
use std::{
fmt::{self, Display, Formatter},
ops::Range,
};
#[cfg(feature = "amiga")]
pub mod amiga;
mod dispatch;
mod meta_encoding;
pub mod system34;
use crate::{
bitstream_codec::{mfm::MFM_BYTE_LEN, TrackDataStream},
track::{TrackAnalysis, TrackSectorScanResult},
track_schema::system34::{System34Element, System34Marker, System34Variant},
types::{chs::DiskChsn, IntegrityCheck, Platform, RwScope, SectorAttributes},
SectorId,
SectorIdQuery,
SectorMapEntry,
};
#[cfg(feature = "amiga")]
use crate::track_schema::amiga::{AmigaElement, AmigaMarker, AmigaVariant};
use crate::source_map::SourceMap;
use bit_vec::BitVec;
pub enum TrackSchemaVariant {
System34(System34Variant),
#[cfg(feature = "amiga")]
Amiga(AmigaVariant),
}
#[derive(Copy, Clone, Debug, Default, Eq, PartialEq, strum::EnumIter)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub enum TrackSchema {
#[default]
System34,
#[cfg(feature = "amiga")]
Amiga,
}
impl Display for TrackSchema {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
match self {
TrackSchema::System34 => write!(f, "IBM System34"),
#[cfg(feature = "amiga")]
TrackSchema::Amiga => write!(f, "Amiga"),
}
}
}
impl TryFrom<Platform> for TrackSchema {
type Error = ();
/// Convert a `Platform` to a `TrackSchema`. This provides a sensible default, but is not
/// exhaustive as a platform may use multiple track schemas.
fn try_from(platform: Platform) -> Result<TrackSchema, Self::Error> {
match platform {
Platform::IbmPc => Ok(TrackSchema::System34),
#[cfg(feature = "amiga")]
Platform::Amiga => Ok(TrackSchema::Amiga),
#[cfg(not(feature = "amiga"))]
Platform::Amiga => Err(()),
#[cfg(feature = "macintosh")]
Platform::Macintosh => Err(()),
#[cfg(not(feature = "macintosh"))]
Platform::Macintosh => Err(()),
#[cfg(feature = "atari_st")]
Platform::AtariSt => Ok(TrackSchema::System34),
#[cfg(not(feature = "atari_st"))]
Platform::AtariSt => Err(()),
#[cfg(feature = "apple_ii")]
Platform::AppleII => Err(()),
#[cfg(not(feature = "apple_ii"))]
Platform::AppleII => Err(()),
}
}
}
/// A `TrackMetadata` structure represents a collection of metadata items found in a track,
/// represented as `TrackElementInstance`s.
#[derive(Clone, Default)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub struct TrackMetadata {
pub(crate) items: Vec<TrackElementInstance>,
pub(crate) sector_ids: Vec<SectorId>,
pub(crate) valid_sector_ids: Vec<SectorId>,
pub(crate) element_map: SourceMap,
}
impl TrackMetadata {
/// Create a new `DiskStructureMetadata` instance from the specified items.
pub(crate) fn new(items: Vec<TrackElementInstance>, schema: TrackSchema) -> Self {
TrackMetadata {
sector_ids: Self::find_sector_ids(&items),
valid_sector_ids: Self::find_valid_sector_ids(&items),
element_map: schema.build_element_map(&items),
items,
}
}
/// Clear all metadata items from the collection.
pub(crate) fn clear(&mut self) {
self.items.clear();
self.sector_ids.clear();
self.valid_sector_ids.clear();
}
/// Return a vector of metadata items contained in the collection as `TrackElementInstance`s.
pub fn elements(&self) -> &[TrackElementInstance] {
&self.items
}
/// Add a new `TrackElementInstance` to the collection.
/// This method is not currently public as it does not make sense for the user to add to
/// the metadata collection directly.
#[allow(dead_code)]
pub(crate) fn add_element(&mut self, item: TrackElementInstance) {
self.items.push(item);
}
/// Get the `TrackElementInstance` at the specified element index, or `None` if the index is
/// out of bounds.
pub fn item(&self, index: usize) -> Option<&TrackElementInstance> {
self.items.get(index)
}
/// Return a reference to the innermost metadata item that contains the specified index,
/// along with a count of the total number of matching items (to handle overlapping items).
/// # Arguments
/// * `index` - The bit index to match.
/// # Returns
/// A tuple containing a reference to the metadata item and the count of matching items, or
/// `None` if no match was found.
pub fn item_at(&self, index: usize) -> Option<(&TrackElementInstance, u32)> {
let mut ref_stack = Vec::new();
let mut match_ct = 0;
for item in &self.items {
if item.start <= index && item.end >= index {
ref_stack.push(item);
match_ct += 1;
}
}
if ref_stack.is_empty() {
None
}
else {
// Sort by smallest element to allow address markers to have highest
// priority.
ref_stack.sort_by(|a, b| a.start.cmp(&b.start));
Some((ref_stack.pop()?, match_ct))
}
}
/// Attempt a fast hit test via binary search that returns the smallest element containing
/// the specified bit index.
pub(crate) fn hit_test(&self, bit_index: usize) -> Option<(&TrackElementInstance, usize)> {
if self.items.is_empty() {
//og::warn!("hit_test() called on empty metadata collection");
return None;
}
// Find the first element where `start` is greater than `bit_index` using binary search
let pos = self
.items
.binary_search_by_key(&bit_index, |e| e.start)
.unwrap_or_else(|x| x);
//log::debug!("pos: {}", pos);
// Search backward and forward from `pos` for candidates containing `bit_index`
let mut result: Option<(&TrackElementInstance, usize)> = None;
let mut smallest_length = usize::MAX;
// Check elements before and including `pos`
for i in (0..=pos.min(self.items.len() - 1)).rev() {
let elem = &self.items[i];
if elem.contains(bit_index) && elem.len() < smallest_length {
smallest_length = elem.len();
result = Some((elem, i));
}
}
// Check elements after `pos`
for i in pos..self.items.len() {
let elem = &self.items[i];
if elem.start > bit_index {
break; // Later elements can't contain `bit_index`
}
if elem.contains(bit_index) && elem.len() < smallest_length {
smallest_length = elem.len();
result = Some((elem, i));
}
}
result
}
/// Return the number of sectors represented in the metadata collection.
/// To be counted, a sector must have a corresponding, valid sector header.
pub fn sector_ct(&self) -> u8 {
let mut sector_ct = 0;
for item in &self.items {
if item.element.is_sector_data_marker() {
sector_ct += 1;
}
}
sector_ct
}
pub fn markers(&self) -> Vec<TrackElementInstance> {
let mut markers = Vec::new();
for item in &self.items {
if item.element.is_sector_data_marker() {
markers.push(*item);
}
}
markers
}
/// Return a vector of [SectorMapEntry]s representing the sectors contained in the metadata
pub fn sector_list(&self) -> Vec<SectorMapEntry> {
let mut sector_list = Vec::new();
for item in &self.items {
#[allow(clippy::unreachable)]
match item.element {
TrackElement::System34(System34Element::SectorData {
chsn,
address_error,
data_error,
deleted,
}) => {
sector_list.push(SectorMapEntry {
chsn,
attributes: SectorAttributes {
address_error,
data_error,
deleted_mark: deleted,
no_dam: false,
},
});
}
#[cfg(feature = "amiga")]
TrackElement::Amiga(AmigaElement::SectorData {
chsn,
address_error,
data_error,
}) => {
sector_list.push(SectorMapEntry {
chsn,
attributes: SectorAttributes {
address_error,
data_error,
deleted_mark: false, // Amiga sectors can't be deleted
no_dam: false, // Can Amiga sectors be missing data? There is no DAM marker to check for.
},
});
}
_ => {}
}
}
sector_list
}
/// Return a reference to a slice of the [SectorId]s represented in the metadata collection.
/// Note that the number of Sector IDs may not match the number of sectors returned by
/// sector_list(), as not all sector headers may correspond to valid sector data, especially
/// on copy-protected disks.
pub fn sector_ids(&self) -> &[SectorId] {
&self.sector_ids
}
/// Return a reference to a slice of the [SectorId]s represented in the metadata collection
/// that have valid sector headers (i.e. no address errors).
/// Note that the number of Sector IDs may not match the number of sectors returned by
/// sector_list(), as not all sector headers may correspond to valid sector data, especially
/// on copy-protected disks.
pub fn valid_sector_ids(&self) -> &[SectorId] {
&self.valid_sector_ids
}
/// Return a vector of Sector IDs as [SectorId] represented in the metadata collection.
/// Note that the number of Sector IDs may not match the number of sectors returned by
/// sector_list(), as not all sector headers may correspond to valid sector data, especially
/// on copy-protected disks.
fn find_valid_sector_ids(items: &[TrackElementInstance]) -> Vec<SectorId> {
let mut sector_ids: Vec<SectorId> = Vec::new();
for item in items {
#[allow(clippy::unreachable)]
match item.element {
TrackElement::System34(System34Element::SectorHeader {
chsn, address_error, ..
}) if address_error == false => {
sector_ids.push(chsn);
}
#[cfg(feature = "amiga")]
TrackElement::Amiga(AmigaElement::SectorHeader {
chsn, address_error, ..
}) if address_error == false => {
sector_ids.push(chsn);
}
_ => {}
}
}
sector_ids
}
/// Return a vector of Sector IDs as [SectorId] represented in the metadata collection.
/// Note that the number of Sector IDs may not match the number of sectors returned by
/// sector_list(), as not all sector headers may correspond to valid sector data, especially
/// on copy-protected disks.
fn find_sector_ids(items: &[TrackElementInstance]) -> Vec<SectorId> {
let mut sector_ids: Vec<SectorId> = Vec::new();
for item in items {
#[allow(clippy::unreachable)]
match item.element {
TrackElement::System34(System34Element::SectorHeader { chsn, .. }) => {
sector_ids.push(chsn);
}
#[cfg(feature = "amiga")]
TrackElement::Amiga(AmigaElement::SectorHeader { chsn, .. }) => {
sector_ids.push(chsn);
}
_ => {}
}
}
sector_ids
}
/// Return a vector of data ranges representing the start and end bit indices of sector data.
/// Primarily used as helper for disk visualization.
/// # Returns
/// A vector of tuples containing the start and end bit indices of sector data.
pub fn data_ranges(&self) -> Vec<Range<usize>> {
let mut data_ranges = Vec::new();
for instance in &self.items {
match instance.element {
TrackElement::System34(System34Element::SectorData { .. }) => {
// Should the data range for a sector include the address mark?
// For now we will exclude it.
data_ranges.push(Range::from(instance.start + (4 * MFM_BYTE_LEN)..instance.end));
}
#[cfg(feature = "amiga")]
TrackElement::Amiga(AmigaElement::SectorData { .. }) => {
data_ranges.push(Range::from(instance.start..instance.end));
}
_ => {}
}
}
data_ranges
}
pub fn header_ranges(&self) -> Vec<Range<usize>> {
let mut header_ranges: Vec<Range<usize>> = Vec::new();
for item in &self.items {
if item.element.is_sector_header() {
header_ranges.push(Range::from(item.start..item.end));
}
}
header_ranges
}
pub fn marker_ranges(&self) -> Vec<Range<usize>> {
let mut marker_ranges: Vec<Range<usize>> = Vec::new();
for item in &self.items {
if let TrackElement::System34(System34Element::Marker { .. }) = item.element {
marker_ranges.push(Range::from(item.start..item.end));
}
}
marker_ranges
}
}
#[derive(Copy, Clone, Debug)]
pub struct TrackMarkerItem {
pub(crate) elem_type: TrackMarker,
pub(crate) start: usize,
}
/// A `TrackElementInstance` represents a single element of a track schema, such as an address marker
/// or data marker. It encodes the start and end of the element (as raw bitstream offsets),
/// and optionally the status of any CRC field (valid for IDAM and DAM marks)
#[derive(Copy, Clone, Debug)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub struct TrackElementInstance {
pub(crate) element: TrackElement,
pub(crate) start: usize,
pub(crate) end: usize,
pub(crate) chsn: Option<DiskChsn>,
}
impl TrackElementInstance {
pub fn contains(&self, bit_index: usize) -> bool {
self.start <= bit_index && self.end >= bit_index
}
pub fn range(&self) -> Range<usize> {
self.start..self.end
}
pub fn len(&self) -> usize {
self.end - self.start
}
}
/// A [TrackMarker] represents an encoding marker found in a track, such as an address marker or
/// data marker. Markers are used by FM and MFM encodings, utilizing unique clock bit patterns to
/// create an out-of-band signal for synchronization.
///
/// When parsing a track, [TrackMarker]s are discovered first, effectively dividing a track into
/// regions, which are then used to discover [TrackElement]s to populate a [TrackMetadata]
/// collection.
///
/// In the event that FM/MFM markers are not applicable to a track schema, synthetic markers can
/// be created to divide tracks into regions for parsing metadata.
#[derive(Copy, Clone, Debug)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub enum TrackMarker {
System34(System34Marker),
#[cfg(feature = "amiga")]
Amiga(AmigaMarker),
Placeholder,
}
/// A [GenericTrackElement] represents track elements in a generic fashion, not specific to a
/// particular track schema. This is useful for operations that do not require schema-specific
/// knowledge, such as disk visualization, which maps [GenericTrackElement]s to colors.
///
/// Elements defined by [TrackSchemaParser] implementations should implement `From<T>` to provide
/// a conversion to [GenericTrackElement]. Not all track schemas may use all generic elements -
/// this is fine!
#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub enum GenericTrackElement {
NullElement,
Marker,
SectorHeader,
SectorBadHeader,
SectorData,
SectorDeletedData,
SectorBadData,
SectorBadDeletedData,
}
impl Display for GenericTrackElement {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
use GenericTrackElement::*;
match self {
NullElement => write!(f, "Null"),
Marker => write!(f, "Marker"),
SectorHeader => write!(f, "Sector Header"),
SectorBadHeader => write!(f, "Sector Header (Bad)"),
SectorData => write!(f, "Sector Data"),
SectorDeletedData => write!(f, "Deleted Sector Data"),
SectorBadData => write!(f, "Sector Data (Bad)"),
SectorBadDeletedData => write!(f, "Deleted Sector Data (Bad)"),
}
}
}
/// A [TrackElement] encompasses the concept of a track 'element', representing any notable region
/// of the track such as markers, headers, sector data, syncs and gaps. [TrackElement]s may overlap
/// and be nested within each other.
/// [TrackMarker]s are used to discover and classify [TrackElement]s, and some [TrackElements]
/// represent markers.
/// A [TrackElement] contains only metadata. Its position and size are represented in by a
/// [TrackElementInstance].
#[derive(Copy, Clone, Debug)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub enum TrackElement {
System34(System34Element),
#[cfg(feature = "amiga")]
Amiga(AmigaElement),
Placeholder,
}
/// Convert a `TrackElement` to a `TrackGenericElement`.
impl From<TrackElement> for GenericTrackElement {
fn from(elem: TrackElement) -> Self {
match elem {
TrackElement::System34(sys34elem) => sys34elem.into(),
#[cfg(feature = "amiga")]
TrackElement::Amiga(ami_elem) => ami_elem.into(),
_ => GenericTrackElement::NullElement,
}
}
}
impl TrackElement {
pub fn is_marker(&self) -> bool {
match self {
TrackElement::System34(System34Element::Marker { .. }) => true,
#[cfg(feature = "amiga")]
TrackElement::Amiga(AmigaElement::Marker { .. }) => true,
_ => false,
}
}
pub fn is_sector_header(&self) -> bool {
matches!(self, TrackElement::System34(System34Element::SectorHeader { .. }))
}
pub fn is_sector_data_marker(&self) -> bool {
match self {
TrackElement::System34(elem) => elem.is_sector_data_marker(),
_ => false,
}
}
pub fn is_sector_data(&self) -> bool {
match self {
TrackElement::System34(elem) => elem.is_sector_data(),
_ => false,
}
}
pub fn chsn(&self) -> Option<DiskChsn> {
match self {
TrackElement::System34(System34Element::SectorHeader { chsn, .. }) => Some(*chsn),
TrackElement::System34(System34Element::SectorData { chsn, .. }) => Some(*chsn),
#[cfg(feature = "amiga")]
TrackElement::Amiga(AmigaElement::SectorHeader { chsn, .. }) => Some(*chsn),
#[cfg(feature = "amiga")]
TrackElement::Amiga(AmigaElement::SectorData { chsn, .. }) => Some(*chsn),
_ => None,
}
}
pub fn size(&self) -> usize {
match self {
TrackElement::System34(elem) => elem.size(),
#[cfg(feature = "amiga")]
TrackElement::Amiga(elem) => elem.size(),
_ => 0,
}
}
pub fn range(&self, scope: RwScope) -> Option<Range<usize>> {
match self {
TrackElement::System34(element) => Some(element.range(scope)),
#[cfg(feature = "amiga")]
TrackElement::Amiga(element) => Some(element.range(scope)),
_ => None,
}
}
}
/// The `TrackSchemaParser` trait defines the interface that must be implemented by any track schema
/// parser.
/// These methods are responsible for finding patterns of bytes within a bitstream, locating
/// markers and elements, and scanning a track for metadata.
pub(crate) trait TrackSchemaParser: Send + Sync {
/*
/// Find the provided pattern of decoded data bytes within the specified bitstream, starting at
/// `offset` bits into the track.
/// The pattern length is limited to 8 characters.
/// # Arguments
/// * `track` - The bitstream to search for the pattern.
/// * `pattern` - The pattern to search for as a slice of bytes.
/// * `index` - The bit index to start searching at.
/// # Returns
/// The bit offset of the pattern if found, otherwise `None`.
fn find_data_pattern(&self, track: &TrackDataStream, pattern: &[u8], index: usize) -> Option<usize>;
*/
/// Analyze the elements in the specified track and return a [TrackAnalysis] structure containing
/// the results of the analysis. This method is responsible for identifying any 'nonstandard'
/// conditions on the track that may affect the ability to represent the track in any given
/// disk image format.
fn analyze_elements(&self, elements: &TrackMetadata) -> TrackAnalysis;
/// TODO: we could combine find_next_marker and find_marker if the latter took an Option<TrackMarker>
/// Find the next marker (of any kind) within the specified bitstream, starting at `index` bits
/// into the track.
/// # Arguments
/// * `track` - The [TrackDataStream] to search for the marker.
/// * `index` - The bit index to start searching at.
/// # Returns
/// A tuple containing the marker type and the bit offset of the marker if found, otherwise `None`.
fn find_next_marker(&self, track: &TrackDataStream, index: usize) -> Option<(TrackMarker, usize)>;
/// Find a specific marker within the specified [TrackDataStream], starting at `index` bits
/// into the track.
/// # Arguments
/// * `stream` - The [TrackDataStream] to search for the marker.
/// * `marker` - The [TrackMarker] to search for.
/// * `index` - The bit index to start searching at.
/// * `limit` - An optional bit index to terminate the search at.
/// # Returns
/// A tuple containing the bit offset of the marker and the marker value if found, otherwise `None`.
fn find_marker(
&self,
stream: &TrackDataStream,
marker: TrackMarker,
index: usize,
limit: Option<usize>,
) -> Option<(usize, u16)>;
/// Match the element in `elements` that corresponds to sector data specified by `id` within the
/// list of track element instances. This function does not directly read the stream - so
/// valid metadata must have been previously scanned before it can be used.
///
/// A track schema may not have a concept of sectors, in which case this method should simply
/// return `None`.
///
/// # Arguments
/// * `stream` - The [TrackDataStream] to search for the marker.
/// * `id` - The [SectorIdQuery] to use as matching criteria.
/// * `index` - The bit index within the track to start searching at.
/// * `limit` - An optional bit index to terminate the search at.
/// # Returns
/// A [TrackSectorScanResult] containing the result of the sector search.
fn match_sector_element(
&self,
id: impl Into<SectorIdQuery>,
elements: &[TrackElementInstance],
index: usize,
limit: Option<usize>,
) -> TrackSectorScanResult;
/// Decode the element specified by `TrackElementInstance` from the track data stream into the
/// provided buffer. The data may be transformed or decoded as necessary depending on the
/// schema implementation - for example, Amiga sector data elements will be reconstructed from
/// odd/even bit pairs.
///
/// Not all schemas will support decoding all elements. In this case, the method should return
/// 0.
/// # Arguments
/// * `stream` - The [TrackDataStream] to read the element from.
/// * `item` - The [TrackElementInstance] specifying the element to read.
/// * `buf` - A mutable reference to a byte slice to store the element data.
/// This buffer should be at least `TrackElement::size()` bytes long.
/// * `scope` - The read/write scope of the operation. An element may be partially decoded
/// by limiting the scope. This is useful, for example, when reading only the
/// sector data of a sector data element.
/// # Returns
/// * A [Range] representing the start and end byte indices into the buffer corresponding to
/// the requested `scope`.
/// * An optional [IntegrityCheck] value representing the integrity of the data read.
/// Different track schemas may have different ways of verifying data integrity.
fn decode_element(
&self,
stream: &TrackDataStream,
element: &TrackElementInstance,
scope: RwScope,
buf: &mut [u8],
) -> (Range<usize>, Option<IntegrityCheck>);
/// Encode the element specified by `TrackElementInstance` from the track data stream from the
/// provided buffer. The data may be transformed or encoded as necessary depending on the
/// schema implementation - for example, marker elements will receive appropriate clock patterns
/// and Amiga sector data elements will be separated into odd/even bit pairs.
///
/// Not all schemas will support encoding all elements. In this case, the method should return
/// 0.
/// # Arguments
/// * `stream` - The [TrackDataStream] to write the element to.
/// * `item` - The [TrackElementInstance] specifying the element to write.
/// * `buf` - A reference to a byte slice that represents the element data.
/// * `scope` - The read/write scope of the operation. An element may be partially updated
/// by limiting the scope.
/// # Returns
/// The number of bytes written to the track.
fn encode_element(
&self,
stream: &mut TrackDataStream,
item: &TrackElementInstance,
scope: RwScope,
buf: &[u8],
) -> usize;
/*
/// Find the specified `TrackElement` within the specified bitstream, starting at `offset` bits
/// into the track.
/// # Arguments
/// * `stream` - The [TrackDataStream] to search for the element.
/// * `element` - The element to search for as a `TrackElement` enum.
/// * `index` - The bit index to start searching at.
/// # Returns
/// The bit offset of the element if found, otherwise `None`.
fn find_element(&self, track: &TrackDataStream, element: TrackElement, index: usize) -> Option<usize>;
*/
/// Scan the specified track for markers.
/// # Arguments
/// * `stream` - The [TrackDataStream] to scan for markers
/// # Returns
/// A vector of [TrackMarkerItem]s representing the markers found in the track. If no markers
/// are found, an empty vector is returned.
fn scan_for_markers(&self, track: &TrackDataStream) -> Vec<TrackMarkerItem>;
/// Scan the specified track for [TrackElements].
/// # Arguments
/// * `track` - The [TrackDataStream] to scan for metadata.
/// * `markers` - A vector of [TrackMarkerItem]s representing the markers found in the track.
/// # Returns
/// A vector of [TrackElementInstance] instances representing the metadata found in the track.
/// If no metadata is found, an empty vector is returned.
fn scan_for_elements(
&self,
track: &mut TrackDataStream,
markers: Vec<TrackMarkerItem>,
) -> Vec<TrackElementInstance>;
/// Create a clock map from the specified markers. A clock map enables random access into an encoded
/// bitstream containing both clock and data bits.
/// # Arguments
/// * `markers` - A vector of [TrackMarkerItem]s representing the markers found in the track.
/// * `clock_map` - A mutable reference to a [BitVec] to store the clock map.
fn create_clock_map(&self, markers: &[TrackMarkerItem], clock_map: &mut BitVec);
/// Calculate a 16-bit CRC for a region of the specified track. The region is assumed to end with
/// a CRC value.
/// # Arguments
/// * `track` - The [TrackDataStream] to calculate the CRC for.
/// * `index` - The bit index to start calculating the CRC from.
/// * `index_end` - The bit index to stop calculating the CRC at.
/// # Returns
/// A tuple containing the CRC value as specified by the track data and the calculated CRC
/// value.
fn crc_u16(&self, track: &mut TrackDataStream, index: usize, index_end: usize) -> (u16, u16);
/// Calculate a 16-bit CRC for the specified byte slice. The end of the slice should contain the
/// encoded CRC.
/// # Arguments
/// * `buf` - A byte slice over which to calculate the CRC.
/// # Returns
/// A tuple containing the CRC value contained in the byte slice, and the calculated CRC value.
fn crc_u16_buf(&self, buf: &[u8]) -> (u16, u16);
fn build_element_map(&self, elements: &[TrackElementInstance]) -> SourceMap;
}
| rust | MIT | b4c04b51746e5fe7769f49a1b32b8caad426fc81 | 2026-01-04T20:24:04.021295Z | false |
dbalsom/fluxfox | https://github.com/dbalsom/fluxfox/blob/b4c04b51746e5fe7769f49a1b32b8caad426fc81/src/track_schema/amiga.rs | src/track_schema/amiga.rs | /*
FluxFox
https://github.com/dbalsom/fluxfox
Copyright 2024-2025 Daniel Balsom
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the “Software”),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
--------------------------------------------------------------------------
*/
//! An indirect implementation of the [TrackSchemaParser] trait for the Amiga
//! trackdisk track schema.
//!
//! Amiga trackdisk images are MFM encoded and typically contain sequential
//! sectors from 0-10 without the inter-sector gaps seen on IBM PC diskettes.
//!
//! The MFM encoding strategy is also different from the IBM PC in that the
//! Amiga stores odd and even bits of data in separate data blocks which
//! must be reconstructed. Due to this requirement, track data must be read
//! and written via the [TrackDataStream] trait so that the interleaved data
//! can be properly encoded/decoded for schemas such as this that require
//! it.
//!
//! Good documentation on the Amiga trackdisk format can be found at:
//! http://lclevy.free.fr/adflib/adf_info.html
//!
use crate::{
bitstream_codec::{
mfm::{MfmCodec, MFM_BYTE_LEN},
MarkerEncoding,
TrackDataStream,
},
io::{Read, Seek, SeekFrom},
mfm_offset,
source_map::{OptionalSourceMap, SourceMap, SourceValue},
track::{TrackAnalysis, TrackSectorScanResult},
track_schema::{
system34::System34Element,
GenericTrackElement,
TrackElement,
TrackElementInstance,
TrackMarker,
TrackMarkerItem,
TrackMetadata,
},
types::{chs::DiskChsn, IntegrityCheck, IntegrityField, RwScope},
util::crc_ibm_3740,
DiskImageError,
FoxHashSet,
SectorIdQuery,
};
use bit_vec::BitVec;
use std::ops::Range;
pub const DEFAULT_TRACK_SIZE_BYTES: usize = 6250;
pub const GAP_BYTE: u8 = 0x4E;
pub const SYNC_BYTE: u8 = 0;
pub const IBM_GAP3_DEFAULT: usize = 22;
pub const IBM_GAP4A: usize = 80;
pub const IBM_GAP1: usize = 50;
pub const IBM_GAP2: usize = 22;
pub const ISO_GAP1: usize = 32;
pub const ISO_GAP2: usize = 22;
pub const SYNC_LEN: usize = 12;
pub const PERPENDICULAR_GAP1: usize = 50;
pub const PERPENDICULAR_GAP2: usize = 41;
// Pre-encoded markers for IAM, IDAM, DAM and DDAM.
//pub const IAM_MARKER: u64 = 0x5224_5224_5224_5552;
//pub const IDAM_MARKER: u64 = 0x4489_4489_4489_5554;
pub const AMIGA_DAM_MARKER: u64 = 0x2AAA_AAAA_4489_4489;
pub const AMIGA_DAM_MASK: u64 = 0x7FFF_FFFF_FFFF_FFFF;
pub const DDAM_MARKER: u64 = 0x4489_4489_4489_5548;
pub const ANY_MARKER: u64 = 0x4489_4489_4489_0000;
pub const CLOCK_MASK: u64 = 0xAAAA_AAAA_AAAA_0000;
pub const DATA_MARK: u64 = 0x5555_5555_5555_5555;
pub const MARKER_MASK: u64 = 0xFFFF_FFFF_FFFF_0000;
pub const MFM_MARKER_CLOCK: u64 = 0x0220_0220_0220_0000;
pub const IAM_MARKER_BYTES: [u8; 4] = [0xC2, 0xC2, 0xC2, 0xFC];
pub const IDAM_MARKER_BYTES: [u8; 4] = [0xA1, 0xA1, 0xA1, 0xFE];
pub const DAM_MARKER_BYTES: [u8; 4] = [0xA1, 0xA1, 0xA1, 0xFB];
pub const DDAM_MARKER_BYTES: [u8; 4] = [0xA1, 0xA1, 0xA1, 0xF8];
/*#[derive(Debug)]
pub struct System34FormatBuffer {
pub chs_vec: Vec<DiskChsn>,
}
impl From<&[u8]> for System34FormatBuffer {
fn from(buffer: &[u8]) -> Self {
let mut chs_vec = Vec::new();
for i in (0..buffer.len()).step_by(4) {
let c = buffer[i];
let h = buffer[i + 1];
let s = buffer[i + 2];
let n = buffer[i + 3];
chs_vec.push(DiskChsn::new(c as u16, h, s, n));
}
System34FormatBuffer { chs_vec }
}
}*/
/// Not sure if there are any others to define, but if GCR has a different format, we can add it here.
pub enum AmigaVariant {
MfmTrackDisk,
}
#[derive(Default, Debug)]
pub struct AmigaSectorQuery {
pub t: Option<u8>, // Track number
pub s: u8, // Sector number/id
pub s_to_end: Option<u8>, // Sectors until end of track
}
/// The minimal specifier for an Amiga sector is a sector number, so we can convert from a u8.
impl From<u8> for AmigaSectorQuery {
fn from(s: u8) -> Self {
AmigaSectorQuery {
t: None,
s,
s_to_end: None,
}
}
}
impl From<AmigaSectorQuery> for SectorIdQuery {
fn from(asq: AmigaSectorQuery) -> Self {
// Assume Amiga disks are double-sided
let c = asq.t.map(|t| (t / 2) as u16);
let h = asq.t.map(|t| t % 2);
SectorIdQuery::new(c, h, asq.s, Some(2))
}
}
#[allow(dead_code)]
#[derive(Default, Debug)]
struct AmigaSectorId {
fmt: u8, // Usually 0xFF (Amiga v1.0 format)
tt: u8, // Track number (lba-type address)
ss: u8, // Sector number (not necessarily consecutive)
sg: u8, // Sectors until end (including this one)
}
#[derive(Copy, Clone, Debug)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub enum AmigaMarker {
Sector,
}
impl From<AmigaMarker> for u64 {
fn from(marker: AmigaMarker) -> u64 {
match marker {
AmigaMarker::Sector => AMIGA_DAM_MARKER,
}
}
}
#[derive(Copy, Clone, Debug)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub enum AmigaElement {
Marker(AmigaMarker, Option<bool>),
SectorHeader { chsn: DiskChsn, address_error: bool, data_missing: bool },
SectorData { chsn: DiskChsn, address_error: bool, data_error: bool },
}
impl From<AmigaElement> for GenericTrackElement {
fn from(elem: AmigaElement) -> Self {
use AmigaElement::*;
match elem {
Marker(_, _) => GenericTrackElement::Marker,
SectorHeader { address_error, .. } => match address_error {
true => GenericTrackElement::SectorBadHeader,
false => GenericTrackElement::SectorHeader,
},
SectorData {
address_error,
data_error,
..
} => match address_error || data_error {
true => GenericTrackElement::SectorBadData,
false => GenericTrackElement::SectorData,
},
}
}
}
impl AmigaElement {
pub fn size(&self) -> usize {
use AmigaElement::*;
match self {
Marker(_, _) => {
// Amiga marker is 2 bytes (0xA1, 0xA1)
2
}
SectorData { .. } => {
// Sector data comprises:
// - data checksum (4 bytes)
// - data (512 bytes)
4 + 512
}
SectorHeader { .. } => {
// Sector header comprises:
// - marker (2 bytes)
// - info (4 bytes)
// - label (16 bytes)
// - header checksum (4 bytes)
2 + 4 + 16 + 4
}
}
}
/// Provide a subset data range corresponding to the scope requested for the current element.
pub fn range(&self, scope: RwScope) -> Range<usize> {
// Most elements don't support a scope.
use AmigaElement::*;
match (self, scope) {
(SectorData { .. }, RwScope::DataOnly) => {
// Data scope is the data portion of the sector only.
// Skip the data checksum field
4..self.size()
}
(SectorData { .. }, RwScope::CrcOnly) => {
// CRC scope is the data checksum field only (first 4 bytes of sector data).
0..4
}
(SectorHeader { .. }, RwScope::CrcOnly) => {
// CRC scope is the header checksum field only (last 4 bytes of header)
self.size() - 4..self.size()
}
(_, _) => 0..self.size(),
}
}
pub fn is_sector_data_marker(&self) -> bool {
matches!(self, AmigaElement::Marker(AmigaMarker::Sector, _))
}
pub fn is_sector_data(&self) -> bool {
matches!(self, AmigaElement::SectorData { .. })
}
pub fn is_sector_id(&self) -> (u8, bool) {
match self {
AmigaElement::SectorHeader {
chsn, address_error, ..
} => match address_error {
true => (0, false),
false => (chsn.s(), true),
},
_ => (0, false),
}
}
}
pub struct AmigaSchema;
impl AmigaSchema {
// System34 masks clock bits in the MFM encoding of address marks.
// This is to help differentiate markers from data.
const MFM_MARKER_CLOCK_MASK: u64 = 0x5555_5555_5555_FFFF;
const MFM_MARKER_CLOCK: u64 = 0x0088_0088_0088_0000;
#[inline]
pub fn encode_marker(pattern: &[u8]) -> u64 {
let marker = MfmCodec::encode_marker(pattern);
marker & Self::MFM_MARKER_CLOCK_MASK | Self::MFM_MARKER_CLOCK
}
/* pub fn format_track_as_bytes(
standard: System34Standard,
bitcell_ct: usize,
format_buffer: Vec<DiskChsn>,
fill_pattern: &[u8],
gap3: usize,
) -> Result<System34FormatResult, DiskImageError> {
if fill_pattern.is_empty() {
log::error!("Fill pattern cannot be empty.");
return Err(DiskImageError::ParameterError);
}
let track_byte_ct = (bitcell_ct + MFM_BYTE_LEN - 1) / MFM_BYTE_LEN;
log::trace!(
"format_track_as_bytes(): Formatting track with {} bitcells, {} bytes",
bitcell_ct,
track_byte_ct
);
let mut track_bytes: Vec<u8> = Vec::with_capacity(track_byte_ct);
let mut markers = Vec::new();
if matches!(standard, System34Standard::Ibm | System34Standard::Perpendicular) {
// Write out GAP0, sync,IAM marker, and GAP1.
track_bytes.extend_from_slice(&[GAP_BYTE; IBM_GAP4A]); // GAP0
track_bytes.extend_from_slice(&[SYNC_BYTE; SYNC_LEN]); // Sync
markers.push((System34Marker::Iam, track_bytes.len()));
}
else {
// Just write Gap1 for ISO standard, there is no IAM marker.
track_bytes.extend_from_slice(&[GAP_BYTE; ISO_GAP1]);
}
let mut pat_cursor = 0;
for sector in format_buffer {
track_bytes.extend_from_slice(&[SYNC_BYTE; SYNC_LEN]); // Write initial sync.
markers.push((System34Marker::Idam, track_bytes.len()));
let idam_crc_offset = track_bytes.len();
track_bytes.extend_from_slice(IDAM_MARKER_BYTES.as_ref()); // Write IDAM marker.
// Write CHSN bytes.
track_bytes.push(sector.c() as u8);
track_bytes.push(sector.h());
track_bytes.push(sector.s());
track_bytes.push(sector.n());
// Write CRC word.
//log::error!("Calculating crc over : {:X?}", &track_bytes[idam_crc_offset..]);
let crc16 = crc_ibm_3740(&track_bytes[idam_crc_offset..], None);
track_bytes.extend_from_slice(&crc16.to_be_bytes());
// Write GAP2.
track_bytes.extend_from_slice(&vec![GAP_BYTE; standard.gap2()]);
// Write SYNC.
track_bytes.extend_from_slice(&[SYNC_BYTE; SYNC_LEN]);
// Write DAM marker.
markers.push((System34Marker::Dam, track_bytes.len()));
let dam_crc_offset = track_bytes.len();
track_bytes.extend_from_slice(DAM_MARKER_BYTES.as_ref());
// Write sector data using provided pattern buffer.
if fill_pattern.len() == 1 {
track_bytes.extend_from_slice(&vec![fill_pattern[0]; sector.n_size()]);
}
else {
let mut sector_buffer = Vec::with_capacity(sector.n_size());
while sector_buffer.len() < sector.n_size() {
let remain = sector.n_size() - sector_buffer.len();
let copy_pat = if pat_cursor + remain <= fill_pattern.len() {
&fill_pattern[pat_cursor..pat_cursor + remain]
}
else {
&fill_pattern[pat_cursor..]
};
sector_buffer.extend_from_slice(copy_pat);
//log::warn!("format: sector_buffer: {:X?}", sector_buffer);
pat_cursor = (pat_cursor + copy_pat.len()) % fill_pattern.len();
}
//log::warn!("sector buffer is now {} bytes", sector_buffer.len());
track_bytes.extend_from_slice(§or_buffer);
}
//log::warn!("format: track_bytes: {:X?}", track_bytes);
//log::warn!("track_bytes is now {} bytes", track_bytes.len());
// Write CRC word.
let crc16 = crc_ibm_3740(&track_bytes[dam_crc_offset..], None);
track_bytes.extend_from_slice(&crc16.to_be_bytes());
// Write GAP3.
track_bytes.extend_from_slice(&vec![GAP_BYTE; gap3]);
}
// Fill rest of track with GAP4B.
if track_bytes.len() < track_byte_ct {
track_bytes.extend_from_slice(&vec![GAP_BYTE; track_byte_ct - track_bytes.len()]);
}
if track_bytes.len() > track_byte_ct {
log::warn!(
"format_track_as_bytes(): Format operation passed index. Truncating track to {} bytes",
track_byte_ct
);
track_bytes.truncate(track_byte_ct);
}
log::trace!(
"format_track_as_bytes(): Wrote {} markers to track of size {} bitcells: {}",
markers.len(),
track_bytes.len(),
track_bytes.len() * 8
);
Ok(System34FormatResult { track_bytes, markers })
}*/
pub(crate) fn set_track_markers(
stream: &mut TrackDataStream,
markers: Vec<(AmigaMarker, usize)>,
) -> Result<(), DiskImageError> {
for (marker, offset) in markers {
let marker_u64 = u64::from(marker);
let marker_bit_index = offset * MFM_BYTE_LEN;
let marker_bytes = marker_u64.to_be_bytes();
//log::trace!("Setting marker {:X?} at bit index: {}", marker_bytes, marker_bit_index);
stream.write_raw_buf(&marker_bytes, marker_bit_index);
}
Ok(())
}
}
// Quasi-trait impl of TrackSchemaParser - called by enum dispatch
impl AmigaSchema {
/// Find the next address marker in the track bitstream. The type of marker and its position in
/// the bitstream is returned, or None.
pub(crate) fn find_next_marker(stream: &TrackDataStream, offset: usize) -> Option<(TrackMarker, usize)> {
// Amiga only has one marker type
let marker = MarkerEncoding {
bits: AMIGA_DAM_MARKER,
mask: AMIGA_DAM_MASK,
len: 32,
};
if let Some((index, _marker_u16)) = stream.find_marker(&marker, offset, None) {
// if let Ok(marker) = marker_u16.try_into() {
// return Some((TrackMarker::Amiga(AmigaMarker::Sector), index));
// }
// Amiga only has one marker type
return Some((TrackMarker::Amiga(AmigaMarker::Sector), index));
}
None
}
pub(crate) fn analyze_elements(metadata: &TrackMetadata) -> TrackAnalysis {
let mut analysis = TrackAnalysis::default();
let mut n_set: FoxHashSet<u8> = FoxHashSet::new();
let mut last_n = 0;
let sector_ids = metadata.sector_ids();
let sector_ct = sector_ids.len();
for (si, sector_id) in sector_ids.iter().enumerate() {
if sector_id.s() != si as u8 + 1 {
analysis.nonconsecutive_sectors = true;
}
last_n = sector_id.n();
n_set.insert(sector_id.n());
}
if n_set.len() > 1 {
//log::warn!("get_track_consistency(): Variable sector sizes detected: {:?}", n_set);
analysis.consistent_sector_size = None;
}
else {
//log::warn!("get_track_consistency(): Consistent sector size: {}", last_n);
analysis.consistent_sector_size = Some(last_n);
}
for ei in metadata.elements() {
match ei.element {
TrackElement::Amiga(AmigaElement::SectorHeader {
address_error,
data_missing,
..
}) => {
if address_error {
analysis.address_error = true;
}
if data_missing {
analysis.no_dam = true;
}
}
TrackElement::Amiga(AmigaElement::SectorData {
address_error,
data_error,
..
}) => {
if address_error {
analysis.address_error = true;
}
if data_error {
analysis.data_error = true
}
}
_ => {}
}
}
analysis.sector_ct = sector_ct;
analysis
}
pub(crate) fn find_marker(
stream: &TrackDataStream,
marker: TrackMarker,
index: usize,
limit: Option<usize>,
) -> Option<(usize, u16)> {
if let TrackMarker::System34(marker) = marker {
let marker = MarkerEncoding {
bits: u64::from(marker),
..MarkerEncoding::default()
};
return stream.find_marker(&marker, index, limit);
}
None
}
pub(crate) fn find_sector_element(
id: impl Into<SectorIdQuery>,
elements: &[TrackElementInstance],
index: usize,
_limit: Option<usize>,
) -> TrackSectorScanResult {
let id = id.into();
let mut wrong_cylinder = false;
let mut bad_cylinder = false;
let mut wrong_head = false;
let mut last_idam_matched = false;
//let mut idam_chsn: Option<DiskChsn> = None;
for (ei, instance) in elements.iter().enumerate() {
if instance.start < index {
continue;
}
let TrackElementInstance { element, .. } = instance;
match element {
TrackElement::Amiga(AmigaElement::SectorHeader {
chsn,
address_error,
data_missing,
}) => {
if *data_missing {
// If this sector header has no DAM, we will return right away
// and set no_dam to true.
return TrackSectorScanResult::Found {
ei,
no_dam: true,
sector_chsn: *chsn,
address_error: *address_error,
data_error: false,
deleted_mark: false,
};
}
// Sector header should have a corresponding DAM marker which we will
// match in the next iteration, if this sector header matches.
// We match in two stages - first we match sector id if provided.
if chsn.s() == id.s() {
// if c is 0xFF, we set the flag for bad cylinder.
if chsn.c() == 0xFF {
bad_cylinder = true;
}
// If c differs, we set the flag for wrong cylinder.
if id.c().is_some() && chsn.c() != id.c().unwrap() {
wrong_cylinder = true;
}
// If h differs, we set the flag for wrong head.
if id.h().is_some() && chsn.h() != id.h().unwrap() {
wrong_head = true;
}
if id.matches(chsn) {
last_idam_matched = true;
}
log::debug!(
"find_sector_element(): Found sector header with id {}, matching against sector query: {}",
chsn,
id
);
}
//idam_chsn = Some(*chsn);
}
TrackElement::Amiga(AmigaElement::SectorData {
chsn,
address_error,
data_error,
}) => {
// log::trace!(
// "get_sector_bit_index(): Found DAM at CHS: {:?}, index: {} last idam matched? {}",
// idam_chsn,
// mdi.start,
// last_idam_matched
// );
// If we matched the last sector header, then this is the sector data
// we are looking for. Return the info.
if last_idam_matched {
return TrackSectorScanResult::Found {
ei,
sector_chsn: *chsn,
address_error: *address_error,
data_error: *data_error,
deleted_mark: false,
no_dam: false,
};
}
}
_ => {}
}
}
TrackSectorScanResult::NotFound {
wrong_cylinder,
bad_cylinder,
wrong_head,
}
}
/// Amiga trackdisk images encode odd and even bits of data in separate blocks. This function
/// decodes the interleaved data into the provided buffer.
pub(crate) fn decode_element(
stream: &TrackDataStream,
element: &TrackElementInstance,
scope: RwScope,
buf: &mut [u8],
) -> (Range<usize>, Option<IntegrityCheck>) {
log::debug!("AmigaSchema::decode_element(): Decoding {:?}", element);
match element.element {
TrackElement::Amiga(AmigaElement::SectorData { .. }) => {
// Read the data checksum.
let recorded_checksum = Self::decode_checksum(stream, element.start);
log::warn!("decode_element(): got buf size of {}", buf.len());
// It's easier to interleave the data via raw MFM than try to spread decoded bits
// back out. We need a buffer that is twice the size of the provided buffer to hold the
// raw MFM data (8 data bits = 16 MFM bits)
let mut raw_buf = vec![0u8; buf.len() * 2];
let raw_bytes_read = stream.read_raw_buf(&mut raw_buf, element.start + mfm_offset!(4));
if raw_bytes_read < 1024 {
log::warn!(
"AmigaSchema::decode_element(): Sector data read less than 1024 raw bytes: {}",
raw_bytes_read
);
}
// The checksum is calculated over the data before it is interleaved, so do that now.
let calculated_checksum = Self::checksum_u32(
stream,
element.start + mfm_offset!(4),
element.start + mfm_offset!(4 + 512),
);
// I'm not sure if there's any point in trying to decode the data if bytes_read < 512,
// but this would basically try anyway. The result can be discarded by the caller if
// this is meaningless.
// for (i, out_byte) in buf.iter_mut().take(raw_bytes_read / 2).enumerate() {
// let odd_byte = raw_buf[i];
// let even_byte = raw_buf[i + raw_bytes_read / 2];
// // Bits are numbered from 0-7 with LSB being 0.
// // Therefore, the first even bit is 0 - so we need to shift even bits right by 1.
// let combined = ((even_byte & 0x55) >> 1) | (odd_byte & 0x55); // Interleave even and odd bits
// *out_byte = combined;
// }
// Write the decoded CRC to the buffer.
for (b, cb) in buf.iter_mut().zip(recorded_checksum.to_be_bytes().iter()) {
*b = *cb;
}
// Decode the sector data to the buffer.
for (i, out_byte) in buf.iter_mut().skip(4).take((raw_bytes_read / 2) - 4).enumerate() {
let odd_byte = raw_buf[i] & 0x55;
let even_byte = raw_buf[i + 512] & 0x55;
*out_byte = (odd_byte << 1) | even_byte;
}
let check = IntegrityCheck::Checksum16(IntegrityField::new(
recorded_checksum as u16,
calculated_checksum as u16,
));
log::debug!(
"AmigaSchema::decode_element(): Decoded sector data with {} checksum",
check
);
(element.element.range(scope).unwrap_or_default(), Some(check))
}
_ => (Range::default(), None),
}
}
pub(crate) fn encode_element(
_stream: &mut TrackDataStream,
_element: &TrackElementInstance,
_scope: RwScope,
_buf: &[u8],
) -> usize {
0
}
pub(crate) fn scan_markers(stream: &TrackDataStream) -> Vec<TrackMarkerItem> {
let mut bit_cursor: usize = 0;
let mut markers = Vec::new();
// Amiga has no IAM marker. Just look for sector markers
while let Some((marker, marker_offset)) = Self::find_next_marker(stream, bit_cursor) {
log::trace!(
"AmigaSchema::scan_track_markers(): Found marker of type {:?} at bit offset: {}",
marker,
marker_offset
);
markers.push(TrackMarkerItem {
elem_type: marker,
start: marker_offset,
});
bit_cursor = marker_offset + 4 * MFM_BYTE_LEN;
}
markers
}
/// Decode interleaved Amiga data. The Amiga uses a 64-bit interleaved format for sector
/// header info block and checksum values. It is easier to do this via raw MFM than try to
/// spread the decoded bits back out.
fn decode_interleaved_u32(stream: &TrackDataStream, index: usize) -> u32 {
let mut info_block_buf = vec![0; 8];
let mut debug_buf = vec![0; 4];
stream.read_raw_buf(&mut info_block_buf, index);
stream.read_decoded_buf(&mut debug_buf, index);
let decoded_long = u32::from_be_bytes([debug_buf[0], debug_buf[1], debug_buf[2], debug_buf[3]]);
log::trace!(
"interleaved block: {:02X?} decoded: {:02X?} decoded_u32: {:08X}",
info_block_buf,
debug_buf,
decoded_long
);
let odd_long = u32::from_be_bytes([
info_block_buf[0],
info_block_buf[1],
info_block_buf[2],
info_block_buf[3],
]);
let even_long = u32::from_be_bytes([
info_block_buf[4],
info_block_buf[5],
info_block_buf[6],
info_block_buf[7],
]);
log::debug!("odd_long: {:08X} even_long: {:08X}", odd_long, even_long);
//let dword = odd_long & 0xAAAA_AAAA | ((even_long & 0xAAAA_AAAA) << 1);
let dword = ((odd_long & 0x5555_5555) << 1) | (even_long & 0x5555_5555);
log::trace!("Decoded interleaved DWORD: {:08X}", dword);
dword
}
fn decode_sector_header(stream: &TrackDataStream, index: usize) -> AmigaSectorId {
let dword = Self::decode_interleaved_u32(stream, index + mfm_offset!(2));
let info_block: [u8; 4] = dword.to_be_bytes();
let sector_header = AmigaSectorId {
fmt: info_block[0],
tt: info_block[1],
ss: info_block[2],
sg: info_block[3],
};
log::trace!("Read {:X?}", sector_header);
sector_header
}
fn decode_checksum(stream: &TrackDataStream, index: usize) -> u32 {
let mut buf = vec![0; 4];
stream.read_decoded_buf(&mut buf, index);
u32::from_be_bytes([buf[0], buf[1], buf[2], buf[3]])
}
pub(crate) fn scan_for_elements(
stream: &mut TrackDataStream,
markers: Vec<TrackMarkerItem>,
) -> Vec<TrackElementInstance> {
if markers.is_empty() {
log::error!("scan_metadata(): No markers provided!");
return Vec::new();
}
let mut elements = Vec::new();
//let mut last_marker_opt: Option<AmigaMarker> = None;
//let mut last_element_offset = 0;
log::trace!("scan_metadata(): Scanning {} markers...", markers.len());
for marker in &markers {
let index = marker.start;
if let TrackMarker::Amiga(_) = marker.elem_type {
let sector_header = Self::decode_sector_header(stream, index);
let c = sector_header.tt / 2;
let h = sector_header.tt % 2;
let chsn = DiskChsn::new(c as u16, h, sector_header.ss, 2);
let mut byte_index = 2;
let header_sum_calculated = Self::checksum_u32(
stream,
index + mfm_offset!(byte_index),
index + mfm_offset!(byte_index + 10),
);
log::debug!("Calculated header checksum: {:08X}", header_sum_calculated);
// Advance past header checksum
byte_index += 4;
// Advance past sector label block
for _ in 0..4 {
//let data = Self::decode_interleaved(stream, index + mfm_offset!(byte_index));
//log::trace!("Sector label: {:08X}", data);
byte_index += 4;
}
let header_sum = Self::decode_checksum(stream, index + mfm_offset!(byte_index));
log::debug!(
"Recorded Header checksum: {:08X} Valid: {}",
header_sum,
header_sum == header_sum_calculated
);
// Advance past sector header checksum
byte_index += 4;
let data_sum = Self::decode_checksum(stream, index + mfm_offset!(byte_index));
// Calculate the crc for remaining sector data
let data_sum_calculated = Self::checksum_u32(
stream,
index + mfm_offset!(byte_index + 4),
index + mfm_offset!(byte_index + 4 + 512),
);
log::debug!("Calculated Data checksum: {:08X}", data_sum_calculated);
log::debug!(
"Recorded Data checksum: {:08X} Valid: {}",
data_sum,
data_sum == data_sum_calculated
);
// Byte index currently points at sector data checksum field.
elements.push(TrackElementInstance {
element: TrackElement::Amiga(AmigaElement::SectorHeader {
chsn,
address_error: header_sum != header_sum_calculated,
data_missing: false,
}),
start: marker.start,
end: marker.start + mfm_offset!(10),
chsn: Some(chsn),
});
| rust | MIT | b4c04b51746e5fe7769f49a1b32b8caad426fc81 | 2026-01-04T20:24:04.021295Z | true |
dbalsom/fluxfox | https://github.com/dbalsom/fluxfox/blob/b4c04b51746e5fe7769f49a1b32b8caad426fc81/src/track_schema/meta_encoding/mod.rs | src/track_schema/meta_encoding/mod.rs | /*
FluxFox
https://github.com/dbalsom/fluxfox
Copyright 2024-2025 Daniel Balsom
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the “Software”),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
--------------------------------------------------------------------------
*/
//! A module for 'meta-encodings', basically any track data encoding that sits
//! on top of the base encoding (FM,MFM or GCR).
//!
//! The best example of this is the odd-even encoding of the Amiga.
pub mod odd_even;
| rust | MIT | b4c04b51746e5fe7769f49a1b32b8caad426fc81 | 2026-01-04T20:24:04.021295Z | false |
dbalsom/fluxfox | https://github.com/dbalsom/fluxfox/blob/b4c04b51746e5fe7769f49a1b32b8caad426fc81/src/track_schema/meta_encoding/odd_even.rs | src/track_schema/meta_encoding/odd_even.rs | /*
FluxFox
https://github.com/dbalsom/fluxfox
Copyright 2024-2025 Daniel Balsom
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the “Software”),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
--------------------------------------------------------------------------
*/
#![allow(dead_code)]
//! A module for handling odd/even meta-encoding, such as that used by the
//! Commodore Amiga trackdisk system.
//!
//! Odd-even encoding operates on the principle of `block sizes` that control
//! the interleaving of data bits. The block size is the number of bits that
//! are interleaved in a single unit.
//!
//! For example, the Amiga defines three block sizes:
//! * `LONG` - a 32-bit value, encoded as 16 odd 16 even bits.
//! * `LONGx4` - 4x 32-bit values, encoded as 4x16 odd and 4x16 even bits.
//! This block size is only used for a sectors `Sector Label` area.
//! * `BYTEx512` - 512x 8-bit values, encoded as 256 odd and 256 even bits.
const EVN_BITS_U64: u64 = 0x5555_5555_5555_5555;
const ODD_BITS_U64: u64 = 0xAAAA_AAAA_AAAA_AAAA;
const DATA_BITS_U64: u64 = EVN_BITS_U64;
const EVN_BITS_U32: u32 = 0x5555_5555;
const ODD_BITS_U32: u32 = 0xAAAA_AAAA;
const DATA_BITS_U32: u32 = EVN_BITS_U32;
const EVN_BITS_U16: u16 = 0x5555;
const ODD_BITS_U16: u16 = 0xAAAA;
const DATA_BITS_U16: u16 = EVN_BITS_U16;
const EVN_BITS_U8: u8 = 0x55;
const ODD_BITS_U8: u8 = 0xAA;
/// Decode interleaved Amiga data from raw, clock-aligned FM/MFM bytes.
/// This function will expect 8 raw FM/MFM bytes which will yield a single decoded u32 value.
/// Insufficient bytes will be return 0.
pub(crate) fn odd_even_decode_raw_u32_from_slice(bytes: &[u8]) -> u32 {
if bytes.len() < 8 {
return 0;
}
let odd_long = u32::from_be_bytes([bytes[0], bytes[1], bytes[2], bytes[3]]);
let even_long = u32::from_be_bytes([bytes[4], bytes[5], bytes[6], bytes[7]]);
// Clock aligned FM/MFM data bits are always "even" as the MSB should always be a clock bit.
// making the LSB a data bit.
// Thus the 'odd bits', encoded first, need to be shifted left by 1 bit to become the odd
// bits of the decoded value.
// 0x55 => 0b0101_0101 (even bits (data))
let dword = ((odd_long & 0x5555_5555) << 1) | (even_long & 0x5555_5555);
dword
}
pub(crate) fn odd_even_decode_u8_pair(o: u8, e: u8) -> (u8, u8) {
let mut o = (o as u16) << 8;
let mut e = e as u16;
// Distribute even bits to even positions
e = (e | (e << 4)) & 0x0F0F;
e = (e | (e << 2)) & 0x3333;
e = (e | (e << 1)) & 0x5555;
// Distribute odd bits to odd positions
o = (o | (o >> 4)) & 0xF0F0;
o = (o | (o >> 2)) & 0xCCCC;
o = (o | (o >> 1)) & 0xAAAA;
// Combine even and odd bits
o |= e;
((o >> 8) as u8, o as u8)
}
pub(crate) fn odd_even_encode_u8_pair(x: u8, y: u8) -> (u8, u8) {
let w = (x as u16) << 8 | y as u16;
// Extract even bits into e, odd bits into o (adjusted)
let mut e = w & 0x5555;
let mut o = (w & 0xAAAA) >> 1;
// Compress even bits downward into lower 8 bits
e = (e | (e >> 1)) & 0x3333;
e = (e | (e >> 2)) & 0x0F0F;
e = (e | (e >> 4)) & 0x00FF;
// Compress odd bits upward into upper 8 bits
o = (o | (o << 1)) & 0xCCCC;
o = (o | (o << 2)) & 0xF0F0;
o = (o | (o << 4)) & 0xFF00;
// Split out word
((o >> 8) as u8, e as u8)
}
/// Decode a pair of u32 values, representing odd and even bits, into two u32 values representing
/// the upper dword and lower dword of a 64 bit value (big endian)
pub(crate) fn odd_even_decode_u32_pair(o: u32, e: u32) -> (u32, u32) {
let mut o = (o as u64) << 32;
let mut e = e as u64;
// Distribute even bits to even positions
e = (e | (e << 16)) & 0x0000_FFFF_0000_FFFF;
e = (e | (e << 8)) & 0x00FF_00FF_00FF_00FF;
e = (e | (e << 4)) & 0x0F0F_0F0F_0F0F_0F0F;
e = (e | (e << 2)) & 0x3333_3333_3333_3333;
e = (e | (e << 1)) & 0x5555_5555_5555_5555;
// Distribute odd bits to odd positions
o = (o | (o >> 16)) & 0xFFFF_0000_FFFF_0000;
o = (o | (o >> 8)) & 0xFF00_FF00_FF00_FF00;
o = (o | (o >> 4)) & 0xF0F0_F0F0_F0F0_F0F0;
o = (o | (o >> 2)) & 0xCCCC_CCCC_CCCC_CCCC;
o = (o | (o >> 1)) & 0xAAAA_AAAA_AAAA_AAAA;
// Combine even and odd bits
o |= e;
((o >> 32) as u32, o as u32)
}
pub(crate) fn odd_even_encode_u32_pair(x: u32, y: u32) -> (u32, u32) {
let w = (x as u64) << 32 | y as u64;
// Extract even bits into e, odd bits into o (adjusted)
let mut e = w & 0x5555_5555_5555_5555;
let mut o = (w & 0xAAAA_AAAA_AAAA_AAAA) >> 1;
// Compress even bits downward into lower 32 bits
e = (e | (e >> 1)) & 0x3333_3333_3333_3333;
e = (e | (e >> 2)) & 0x0F0F_0F0F_0F0F_0F0F;
e = (e | (e >> 4)) & 0x00FF_00FF_00FF_00FF;
e = (e | (e >> 8)) & 0x0000_FFFF_0000_FFFF;
e = (e | (e >> 16)) & 0x0000_0000_FFFF_FFFF;
// Compress odd bits upward into upper 8 bits
o = (o | (o << 1)) & 0xCCCC_CCCC_CCCC_CCCC;
o = (o | (o << 2)) & 0xF0F0_F0F0_F0F0_F0F0;
o = (o | (o << 4)) & 0xFF00_FF00_FF00_FF00;
o = (o | (o << 8)) & 0xFFFF_0000_FFFF_0000;
o = (o | (o << 16)) & 0xFFFF_FFFF_0000_0000;
// Split out dwords
((o >> 32) as u32, e as u32)
}
pub(crate) fn odd_even_decode_u32(x: u32) -> u32 {
let mut o = x & 0xFFFF0000; // Extract odd bits compressed in upper 16 bits
let mut e = x & 0x0000FFFF; // Extract even bits compressed in lower 16 bits
// Distribute even bits to even positions
e = (e | (e << 8)) & 0x00FF00FF;
e = (e | (e << 4)) & 0x0F0F0F0F;
e = (e | (e << 2)) & 0x33333333;
e = (e | (e << 1)) & 0x55555555;
// Distribute odd bits to odd positions
o = (o | (o >> 8)) & 0xFF00FF00;
o = (o | (o >> 4)) & 0xF0F0F0F0;
o = (o | (o >> 2)) & 0xCCCCCCCC;
o = (o | (o >> 1)) & 0xAAAAAAAA;
// Combine even and odd bits
o | e
}
/// Encode a normal u32 value into odd/even bit words
pub(crate) fn odd_even_encode_u32(x: u32) -> u32 {
// Extract even bits into e, odd bits into o (adjusted)
let mut e = x & 0x55555555;
let mut o = (x & 0xAAAAAAAA) >> 1;
// Compress even bits downward into lower 16 bits
e = (e | (e >> 1)) & 0x33333333;
e = (e | (e >> 2)) & 0x0F0F0F0F;
e = (e | (e >> 4)) & 0x00FF00FF;
e = (e | (e >> 8)) & 0x0000FFFF;
// Compress odd bits upward into upper 16 bits
o = (o | (o << 1)) & 0xCCCCCCCC;
o = (o | (o << 2)) & 0xF0F0F0F0;
o = (o | (o << 4)) & 0xFF00FF00;
o = (o | (o << 8)) & 0xFFFF0000;
// Combine both words
o | e
}
/// Decode the odd/even-encoded u8 data in the `src` buffer into the `dst` buffer.
/// Odd/even split is assumed to be half source slice length. Both slices should be equal length.
pub(crate) fn odd_even_decode_u8_buf(src: &[u8], dst: &mut [u8]) {
let (odds, evens) = src.split_at(src.len() / 2);
for ((&odd_byte, &even_byte), dst_pair) in odds.iter().zip(evens).zip(dst.chunks_exact_mut(2)) {
(dst_pair[0], dst_pair[1]) = odd_even_decode_u8_pair(odd_byte, even_byte);
}
}
/// Encode the u8 data from `src` with odd/even encoding, saving to `dst` u8 buffer
/// Odd/even split is assumed to be half source slice length. Both slices should be equal length.
pub(crate) fn odd_even_encode_u8_buf(src: &[u8], dst: &mut [u8]) {
let (odds, evens) = dst.split_at_mut(src.len() / 2);
for ((dst_byte0, dst_byte1), src_pair) in odds.iter_mut().zip(evens).zip(src.chunks_exact(2)) {
let (odd, even) = odd_even_encode_u8_pair(src_pair[0], src_pair[1]);
*dst_byte0 = odd;
*dst_byte1 = even;
}
}
/// Decode the odd/even-encoded u32 data in the `src` buffer into the `dst` buffer.
/// Odd/even split is assumed to be half source slice length. Both slices should be equal length.
pub(crate) fn odd_even_decode_u32_buf(src: &[u32], dst: &mut [u32]) {
let (odds, evens) = src.split_at(src.len() / 2);
for ((&odd_byte, &even_byte), dst_pair) in odds.iter().zip(evens).zip(dst.chunks_exact_mut(2)) {
(dst_pair[0], dst_pair[1]) = odd_even_decode_u32_pair(odd_byte, even_byte);
}
}
/// Encode the sector data from `src` into odd/even encoding, saving to `dst` buffer
/// Odd/even split is assumed to be half source slice length. Both slices should be equal length.
pub(crate) fn odd_even_encode_u32_buf(src: &[u32], dst: &mut [u32]) {
let (odds, evens) = dst.split_at_mut(src.len() / 2);
for ((dst_byte0, dst_byte1), src_pair) in odds.iter_mut().zip(evens).zip(src.chunks_exact(2)) {
let (odd, even) = odd_even_encode_u32_pair(src_pair[0], src_pair[1]);
*dst_byte0 = odd;
*dst_byte1 = even;
}
}
/// Encode interleaved Amiga data from the u32 `value` into a buffer of 8, clock-aligned MFM bytes.
/// This function expects a mutable reference to a slice of at least 8 bytes, or else it does
/// nothing.
fn odd_even_encode_mfm_u32(value: u32, bytes: &mut [u8]) {
if bytes.len() < 8 {
return;
}
let odd_long = (value >> 1) & 0x5555_5555;
let even_long = value & 0x5555_5555;
odd_long
.to_be_bytes()
.iter()
.enumerate()
.for_each(|(i, &b)| bytes[i] = b);
even_long
.to_be_bytes()
.iter()
.enumerate()
.for_each(|(i, &b)| bytes[i + 4] = b);
}
/// Encode interleaved Amiga data from the u32 `value` into a buffer of 8, clock-aligned FM bytes.
/// This function expects a mutable reference to a slice of at least 8 bytes, or else it does
/// nothing.
fn odd_even_encode_fm_u32(value: u32, bytes: &mut [u8]) {
if bytes.len() < 8 {
return;
}
let odd_long = ((value >> 1) & 0x5555_5555) | 0xAAAA_AAAA;
let even_long = (value & 0x5555_5555) | 0xAAAA_AAAA;
odd_long
.to_be_bytes()
.iter()
.enumerate()
.for_each(|(i, &b)| bytes[i] = b);
even_long
.to_be_bytes()
.iter()
.enumerate()
.for_each(|(i, &b)| bytes[i + 4] = b);
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_odd_even_decode_u8_pair() {
let (b0, b1) = odd_even_decode_u8_pair(0x00, 0xFF);
println!("b0: {:02x}, b1: {:02x}", b0, b1);
assert_eq!(b0, 0x55);
assert_eq!(b1, 0x55);
let (b0, b1) = odd_even_decode_u8_pair(0xFF, 0x00);
println!("b0: {:02x}, b1: {:02x}", b0, b1);
assert_eq!(b0, 0xAA);
assert_eq!(b1, 0xAA);
}
#[test]
fn test_odd_even_encode_u8_pair() {
let (b0, b1) = odd_even_encode_u8_pair(0x55, 0x55);
assert_eq!(b0, 0x00);
assert_eq!(b1, 0xFF);
let (b0, b1) = odd_even_encode_u8_pair(0xAA, 0xAA);
assert_eq!(b0, 0xFF);
assert_eq!(b1, 0x00);
}
#[test]
fn test_odd_even_decode_u32_pair() {
let (b0, b1) = odd_even_decode_u32_pair(0u32, !0u32);
println!("b0: {:02x}, b1: {:02x}", b0, b1);
assert_eq!(b0, EVN_BITS_U32);
assert_eq!(b1, EVN_BITS_U32);
let (b0, b1) = odd_even_decode_u32_pair(!0u32, 0x00);
println!("b0: {:02x}, b1: {:02x}", b0, b1);
assert_eq!(b0, ODD_BITS_U32);
assert_eq!(b1, ODD_BITS_U32);
}
#[test]
fn test_odd_even_encode_u32_pair() {
let (dw0, dw1) = odd_even_encode_u32_pair(EVN_BITS_U32, EVN_BITS_U32);
assert_eq!(dw0, 0u32);
assert_eq!(dw1, !0u32);
let (b0, b1) = odd_even_encode_u32_pair(ODD_BITS_U32, ODD_BITS_U32);
assert_eq!(b0, !0u32);
assert_eq!(b1, 0);
}
#[test]
fn test_odd_even_decode_u8_buf() {
// Create a test sector, with all the even bits set.
let mut v = vec![0u8; 256];
v.extend(vec![0xFFu8; 256]);
// Create a buffer to decode into.
let mut decoded = vec![0u8; 512];
odd_even_decode_u8_buf(&v, &mut decoded);
assert_eq!(decoded, vec![0x55u8; 512]);
// Create a test sector, with all the odd bits set.
let mut v = vec![0xFFu8; 256];
v.extend(vec![0x00u8; 256]);
odd_even_decode_u8_buf(&v, &mut decoded);
assert_eq!(decoded, vec![0xAAu8; 512]);
}
#[test]
fn test_odd_even_encode_u8_buf() {
let v = vec![0x55u8; 512];
let mut encoded = vec![0u8; 512];
odd_even_encode_u8_buf(&v, &mut encoded);
assert_eq!(encoded[..256], vec![0u8; 256]);
assert_eq!(encoded[256..], vec![0xFFu8; 256]);
}
#[test]
fn test_odd_even_decode_u32_buf() {
// Create a test sector label, 4 entries of 0 and 4 entries all bits set.
let mut v = vec![0u32; 4];
v.extend(vec![!0u32; 4]);
// Create a buffer to decode into.
let mut decoded = vec![0u32; 4];
odd_even_decode_u32_buf(&v, &mut decoded);
assert_eq!(decoded, vec![EVN_BITS_U32; 4]);
// Create a test sector label, with all the odd bits set, even bits 0.
let mut v = vec![!0u32; 4];
v.extend(vec![0u32; 4]);
odd_even_decode_u32_buf(&v, &mut decoded);
// Decoded buffer should have only odd bits set.
assert_eq!(decoded, vec![ODD_BITS_U32; 4]);
}
#[test]
fn test_odd_even_encode_u32_buf() {
let v = vec![0x55u8; 512];
let mut encoded = vec![0u8; 512];
odd_even_encode_u8_buf(&v, &mut encoded);
assert_eq!(encoded[..256], vec![0u8; 256]);
assert_eq!(encoded[256..], vec![0xFFu8; 256]);
}
#[test]
fn test_odd_even_encode_u32() {
let original: u32 = 0xFF00_000B;
let encoded = odd_even_encode_u32(original);
assert_eq!(encoded, 0xF003_F001);
}
#[test]
fn test_odd_even_encode_decode_u32() {
let original: u32 = 0xFF00_000B;
let encoded = odd_even_encode_u32(original);
let decoded = odd_even_decode_u32(encoded);
assert_eq!(original, decoded);
}
}
| rust | MIT | b4c04b51746e5fe7769f49a1b32b8caad426fc81 | 2026-01-04T20:24:04.021295Z | false |
dbalsom/fluxfox | https://github.com/dbalsom/fluxfox/blob/b4c04b51746e5fe7769f49a1b32b8caad426fc81/tests/psi.rs | tests/psi.rs | mod common;
use common::*;
use fluxfox::prelude::*;
use std::path::PathBuf;
fn init() {
let _ = env_logger::builder().is_test(true).try_init();
}
#[test]
fn test_psi() {
init();
use std::io::Cursor;
let disk_image_buf = std::fs::read(".\\tests\\images\\transylvania\\Transylvania.psi").unwrap();
let mut in_buffer = Cursor::new(disk_image_buf);
let mut img_image = DiskImage::load(&mut in_buffer, None, None, None).unwrap();
println!("Loaded PSI image of geometry {}...", img_image.image_format().geometry);
let mut out_buffer = Cursor::new(Vec::new());
let fmt = DiskImageFileFormat::RawSectorImage;
fmt.save_image(&mut img_image, &ParserWriteOptions::default(), &mut out_buffer)
.unwrap();
//let in_inner: Vec<u8> = in_buffer.into_inner();
let out_inner: Vec<u8> = out_buffer.into_inner();
let in_hash = compute_file_hash(".\\tests\\images\\transylvania\\Transylvania.img");
//println!("Input file is {} bytes.", in_inner.len());
//println!("First bytes of input file: {:02X?}", &in_inner[0..16]);
println!("Input file SHA1: {}", in_hash);
//println!("Output file is {} bytes.", out_inner.len());
//println!("First bytes of output file: {:02X?}", &out_inner[0..16]);
//std::fs::write("test_out.img", out_inner.clone()).unwrap();
let out_hash = compute_slice_hash(&out_inner);
println!("Output file SHA1: {:}", out_hash);
if in_hash != out_hash {
println!("Hashes do not match!");
//std::fs::write("test_out.img", out_inner.clone()).unwrap();
}
assert_eq!(in_hash, out_hash);
println!("Hashes match!");
}
#[test]
fn test_psi_sector_tests() {
init();
run_sector_test(
PathBuf::from(".\\tests\\images\\sector_test\\sector_test_360k.psi"),
DiskImageFileFormat::F86Image,
);
}
| rust | MIT | b4c04b51746e5fe7769f49a1b32b8caad426fc81 | 2026-01-04T20:24:04.021295Z | false |
dbalsom/fluxfox | https://github.com/dbalsom/fluxfox/blob/b4c04b51746e5fe7769f49a1b32b8caad426fc81/tests/scp.rs | tests/scp.rs | mod common;
use crate::common::{run_sector_test, test_convert_exact};
use fluxfox::DiskImageFileFormat;
fn init() {
let _ = env_logger::builder().is_test(true).try_init();
}
// #[test]
// fn test_scp_sector_test_360k() {
// init();
// run_sector_test(
// "tests/images/sector_test/sector_test_360k.scp",
// DiskImageFileFormat::SuperCardPro,
// );
// }
//
// #[test]
// fn test_scp_trans() {
// init();
// test_convert_exact(
// ".\\tests\\images\\transylvania\\Transylvania.scp",
// ".\\tests\\images\\transylvania\\Transylvania.img",
// DiskImageFileFormat::RawSectorImage,
// );
// }
| rust | MIT | b4c04b51746e5fe7769f49a1b32b8caad426fc81 | 2026-01-04T20:24:04.021295Z | false |
dbalsom/fluxfox | https://github.com/dbalsom/fluxfox/blob/b4c04b51746e5fe7769f49a1b32b8caad426fc81/tests/pri.rs | tests/pri.rs | mod common;
use crate::common::run_sector_test;
use fluxfox::prelude::*;
use std::path::PathBuf;
fn init() {
let _ = env_logger::builder().is_test(true).try_init();
}
#[test]
fn test_pri_write() {
init();
use std::io::Cursor;
let disk_image_buf = std::fs::read(".\\tests\\images\\transylvania\\Transylvania.86f").unwrap();
let mut in_buffer = Cursor::new(disk_image_buf);
let mut f86_image = DiskImage::load(&mut in_buffer, None, None, None).unwrap();
println!("Loaded 86F image of geometry {}...", f86_image.image_format().geometry);
let mut out_buffer = Cursor::new(Vec::new());
let fmt = DiskImageFileFormat::PceBitstreamImage;
match fmt.save_image(&mut f86_image, &ParserWriteOptions::default(), &mut out_buffer) {
Ok(_) => println!("Saved PRI image."),
Err(e) => panic!("Failed to save PRI image: {}", e),
}
let out_inner: Vec<u8> = out_buffer.into_inner();
std::fs::write(".\\tests\\images\\temp\\temp_out.pri", out_inner).unwrap();
// let readback_disk_image_buf = std::fs::read(".\\tests\\images\\temp\\temp_out.pri").unwrap();
// let mut readback_in_buffer = Cursor::new(readback_disk_image_buf);
//
// let mut f86_image = match DiskImage::load(&mut readback_in_buffer) {
// Ok(image) => image,
// Err(e) => panic!("Failed to re-load new PRI image: {}", e),
// };
}
#[test]
fn test_pri_sector_test() {
run_sector_test(
PathBuf::from(".\\tests\\images\\sector_test\\sector_test_360k.pri"),
DiskImageFileFormat::PceBitstreamImage,
);
}
| rust | MIT | b4c04b51746e5fe7769f49a1b32b8caad426fc81 | 2026-01-04T20:24:04.021295Z | false |
dbalsom/fluxfox | https://github.com/dbalsom/fluxfox/blob/b4c04b51746e5fe7769f49a1b32b8caad426fc81/tests/img.rs | tests/img.rs | mod common;
use common::*;
use fluxfox::prelude::*;
use std::path::PathBuf;
fn init() {
let _ = env_logger::builder().is_test(true).try_init();
}
#[test]
fn test_img() {
init();
test_invertibility(
".\\tests\\images\\transylvania\\Transylvania.img",
DiskImageFileFormat::RawSectorImage,
);
}
#[test]
fn test_img_sector_test() {
init();
run_sector_test(
PathBuf::from(".\\tests\\images\\sector_test\\sector_test_360k.img"),
DiskImageFileFormat::RawSectorImage,
);
#[cfg(feature = "zip")]
run_sector_test(
PathBuf::from(".\\tests\\images\\sector_test\\sector_test_360k.imz"),
DiskImageFileFormat::RawSectorImage,
);
run_sector_test(
PathBuf::from(".\\tests\\images\\sector_test\\sector_test_1200k.img"),
DiskImageFileFormat::RawSectorImage,
);
}
| rust | MIT | b4c04b51746e5fe7769f49a1b32b8caad426fc81 | 2026-01-04T20:24:04.021295Z | false |
dbalsom/fluxfox | https://github.com/dbalsom/fluxfox/blob/b4c04b51746e5fe7769f49a1b32b8caad426fc81/tests/prolok.rs | tests/prolok.rs | mod common;
use fluxfox::prelude::*;
fn init() {
let _ = env_logger::builder().is_test(true).try_init();
}
#[test]
fn test_prolok() {
use std::io::Cursor;
init();
let disk_image_buf = std::fs::read(".\\tests\\images\\monster_disk\\monster_disk_360k.pri").unwrap();
let mut in_buffer = Cursor::new(disk_image_buf);
let mut disk = DiskImage::load(&mut in_buffer, None, None, None).unwrap();
println!("Loaded PRI image of geometry {}...", disk.image_format().geometry);
let rsr = match disk.read_sector(
DiskCh::new(39, 0),
DiskChsnQuery::new(39, 0, 5, None),
None,
None,
RwScope::DataOnly,
false,
) {
Ok(result) => result,
Err(DiskImageError::DataError) => {
panic!("Data error reading sector.");
}
Err(e) => panic!("Error reading sector: {:?}", e),
};
let sector_data = rsr.data();
let original_data = sector_data.to_vec();
println!(
"Read sector data: {:02X?} of length {}",
§or_data[0..8],
sector_data.len()
);
assert_eq!(sector_data.len(), 512);
match disk.write_sector(
DiskCh::new(39, 0),
DiskChsnQuery::new(39, 0, 5, 2),
None,
sector_data,
RwScope::DataOnly,
false,
false,
) {
Ok(result) => result,
Err(DiskImageError::DataError) => {
panic!("Data error writing sector.");
}
Err(e) => panic!("Error writing sector: {:?}", e),
};
// Read the sector back. It should have different data.
let rsr = match disk.read_sector(
DiskCh::new(39, 0),
DiskChsnQuery::new(39, 0, 5, 2),
None,
None,
RwScope::DataOnly,
false,
) {
Ok(result) => result,
Err(DiskImageError::DataError) => {
panic!("Data error reading sector.");
}
Err(e) => panic!("Error reading sector: {:?}", e),
};
let sector_data = rsr.data();
if sector_data.len() == 512 {
println!("Original data: {:02X?}", &original_data[0..8]);
println!("Post-write data: {:02X?}", §or_data[0..8]);
}
if sector_data == original_data {
panic!("Data read back from written sector did not change - no hole detected!");
}
println!("Data read back from written sector changed - hole detected!");
}
| rust | MIT | b4c04b51746e5fe7769f49a1b32b8caad426fc81 | 2026-01-04T20:24:04.021295Z | false |
dbalsom/fluxfox | https://github.com/dbalsom/fluxfox/blob/b4c04b51746e5fe7769f49a1b32b8caad426fc81/tests/adf.rs | tests/adf.rs | #![cfg(all(feature = "adf", feature = "amiga"))]
mod common;
use common::*;
use fluxfox::prelude::*;
fn init() {
let _ = env_logger::builder().is_test(true).try_init();
}
#[test]
fn test_adf() {
init();
test_invertibility(
".\\tests\\images\\adf\\flightyfox.adf",
DiskImageFileFormat::RawSectorImage,
);
}
#[test]
#[cfg(feature = "gzip")]
fn test_adz() {
init();
test_convert_exact(
".\\tests\\images\\adf\\flightyfox.adz",
".\\tests\\images\\adf\\flightyfox.adf",
DiskImageFileFormat::RawSectorImage,
);
}
// #[test]
// fn test_img_sector_test() {
// init();
// run_sector_test(
// PathBuf::from(".\\tests\\images\\sector_test\\sector_test_360k.img"),
// DiskImageFileFormat::RawSectorImage,
// );
// #[cfg(feature = "zip")]
// run_sector_test(
// PathBuf::from(".\\tests\\images\\sector_test\\sector_test_360k.imz"),
// DiskImageFileFormat::RawSectorImage,
// );
// run_sector_test(
// PathBuf::from(".\\tests\\images\\sector_test\\sector_test_1200k.img"),
// DiskImageFileFormat::RawSectorImage,
// );
// }
| rust | MIT | b4c04b51746e5fe7769f49a1b32b8caad426fc81 | 2026-01-04T20:24:04.021295Z | false |
dbalsom/fluxfox | https://github.com/dbalsom/fluxfox/blob/b4c04b51746e5fe7769f49a1b32b8caad426fc81/tests/td0.rs | tests/td0.rs | #![cfg(feature = "td0")]
mod common;
use std::path::PathBuf;
use common::*;
use fluxfox::prelude::*;
fn init() {
let _ = env_logger::builder().is_test(true).try_init();
}
#[test]
fn test_td0() {
init();
use std::io::Cursor;
let disk_image_buf = std::fs::read(".\\tests\\images\\transylvania\\Transylvania.td0").unwrap();
let mut in_buffer = Cursor::new(disk_image_buf);
let mut img_image = DiskImage::load(&mut in_buffer, None, None, None).unwrap();
println!("Loaded TD0 image of geometry {}...", img_image.image_format().geometry);
let mut out_buffer = Cursor::new(Vec::new());
let fmt = DiskImageFileFormat::RawSectorImage;
fmt.save_image(&mut img_image, &ParserWriteOptions::default(), &mut out_buffer)
.unwrap();
//let in_inner: Vec<u8> = in_buffer.into_inner();
let out_inner: Vec<u8> = out_buffer.into_inner();
let in_hash = compute_file_hash(".\\tests\\images\\transylvania\\Transylvania.img");
//println!("Input file is {} bytes.", in_inner.len());
//println!("First bytes of input file: {:02X?}", &in_inner[0..16]);
println!("Input file SHA1: {}", in_hash);
//println!("Output file is {} bytes.", out_inner.len());
//println!("First bytes of output file: {:02X?}", &out_inner[0..16]);
//std::fs::write("test_out.img", out_inner.clone()).unwrap();
let out_hash = compute_slice_hash(&out_inner);
println!("Output file SHA1: {:}", out_hash);
if in_hash != out_hash {
println!("Hashes do not match!");
//std::fs::write("test_out.img", out_inner.clone()).unwrap();
}
assert_eq!(in_hash, out_hash);
println!("Hashes match!");
}
#[test]
fn test_td0_sector_tests() {
init();
run_sector_test(
PathBuf::from(".\\tests\\images\\sector_test\\sector_test_360k.td0"),
DiskImageFileFormat::F86Image,
);
}
| rust | MIT | b4c04b51746e5fe7769f49a1b32b8caad426fc81 | 2026-01-04T20:24:04.021295Z | false |
dbalsom/fluxfox | https://github.com/dbalsom/fluxfox/blob/b4c04b51746e5fe7769f49a1b32b8caad426fc81/tests/tc.rs | tests/tc.rs | mod common;
use crate::common::run_sector_test;
use fluxfox::DiskImageFileFormat;
fn init() {
let _ = env_logger::builder().is_test(true).try_init();
}
#[test]
fn test_scp_sector_test() {
init();
run_sector_test(
"tests/images/sector_test/sector_test_360k.tc",
DiskImageFileFormat::TransCopyImage,
);
}
| rust | MIT | b4c04b51746e5fe7769f49a1b32b8caad426fc81 | 2026-01-04T20:24:04.021295Z | false |
dbalsom/fluxfox | https://github.com/dbalsom/fluxfox/blob/b4c04b51746e5fe7769f49a1b32b8caad426fc81/tests/image_builder.rs | tests/image_builder.rs | use fluxfox::{image_builder::ImageBuilder, prelude::*, DiskImageFileFormat, ImageFormatParser, StandardFormat};
use std::io::Cursor;
mod common;
fn init() {
let _ = env_logger::builder().is_test(true).try_init();
}
#[test]
fn test_image_builder() {
init();
let mut image = match ImageBuilder::new()
.with_resolution(TrackDataResolution::BitStream)
.with_standard_format(StandardFormat::PcFloppy360)
.with_creator_tag("MartyPC ".as_bytes())
.with_formatted(true)
.build()
{
Ok(image) => image,
Err(e) => panic!("Failed to create image: {}", e),
};
let mut out_buffer = Cursor::new(Vec::new());
let output_fmt = DiskImageFileFormat::F86Image;
match output_fmt.save_image(&mut image, &ParserWriteOptions::default(), &mut out_buffer) {
Ok(_) => println!("Wrote 86F image."),
Err(e) => panic!("Failed to write 86F image: {}", e),
};
std::fs::write(".\\tests\\images\\test_formatted.86f", out_buffer.get_ref()).unwrap();
}
| rust | MIT | b4c04b51746e5fe7769f49a1b32b8caad426fc81 | 2026-01-04T20:24:04.021295Z | false |
dbalsom/fluxfox | https://github.com/dbalsom/fluxfox/blob/b4c04b51746e5fe7769f49a1b32b8caad426fc81/tests/mfm.rs | tests/mfm.rs | mod common;
use crate::common::run_sector_test;
use fluxfox::{DiskImageFileFormat};
use std::path::PathBuf;
fn init() {
let _ = env_logger::builder().is_test(true).try_init();
}
#[test]
fn test_mfm_sector_tests() {
init();
run_sector_test(
PathBuf::from(".\\tests\\images\\sector_test\\sector_test_360k.mfm"),
DiskImageFileFormat::MfmBitstreamImage,
);
}
| rust | MIT | b4c04b51746e5fe7769f49a1b32b8caad426fc81 | 2026-01-04T20:24:04.021295Z | false |
dbalsom/fluxfox | https://github.com/dbalsom/fluxfox/blob/b4c04b51746e5fe7769f49a1b32b8caad426fc81/tests/mfi.rs | tests/mfi.rs | #![cfg(feature = "mfi")]
mod common;
use crate::common::run_sector_test;
use fluxfox::DiskImageFileFormat;
use std::path::PathBuf;
fn init() {
let _ = env_logger::builder().is_test(true).try_init();
}
#[test]
fn test_mfi_sector_test_360k() {
init();
run_sector_test(
PathBuf::from(".\\tests\\images\\sector_test\\sector_test_360k.mfi"),
DiskImageFileFormat::MameFloppyImage,
);
}
#[test]
fn test_mfi_sector_test_1200k() {
init();
run_sector_test(
PathBuf::from(".\\tests\\images\\sector_test\\sector_test_1200k.mfi"),
DiskImageFileFormat::MameFloppyImage,
);
}
| rust | MIT | b4c04b51746e5fe7769f49a1b32b8caad426fc81 | 2026-01-04T20:24:04.021295Z | false |
dbalsom/fluxfox | https://github.com/dbalsom/fluxfox/blob/b4c04b51746e5fe7769f49a1b32b8caad426fc81/tests/pfi.rs | tests/pfi.rs | mod common;
use crate::common::run_sector_test;
use fluxfox::DiskImageFileFormat;
use std::path::PathBuf;
fn init() {
let _ = env_logger::builder().is_test(true).try_init();
}
#[test]
fn test_pfi_sector_test() {
init();
run_sector_test(
PathBuf::from(".\\tests\\images\\sector_test\\sector_test_360k.pfi"),
DiskImageFileFormat::PceFluxImage,
);
}
| rust | MIT | b4c04b51746e5fe7769f49a1b32b8caad426fc81 | 2026-01-04T20:24:04.021295Z | false |
dbalsom/fluxfox | https://github.com/dbalsom/fluxfox/blob/b4c04b51746e5fe7769f49a1b32b8caad426fc81/tests/imd.rs | tests/imd.rs | mod common;
use common::*;
use fluxfox::prelude::*;
use std::path::PathBuf;
fn init() {
match env_logger::builder().is_test(true).try_init() {
Ok(_) => {
println!("Logger initialized. A debug log should follow:");
log::debug!("Logger initialized.");
}
Err(e) => eprintln!("Failed to initialize logger: {}", e),
}
}
#[test]
fn test_imd() {
init();
test_convert_exact(
".\\tests\\images\\transylvania\\Transylvania.imd",
".\\tests\\images\\transylvania\\Transylvania.img",
DiskImageFileFormat::RawSectorImage,
);
}
#[test]
fn test_imd_sector_test_360k() {
init();
run_sector_test(
PathBuf::from(".\\tests\\images\\sector_test\\sector_test_360k.imd"),
DiskImageFileFormat::ImageDisk,
);
}
| rust | MIT | b4c04b51746e5fe7769f49a1b32b8caad426fc81 | 2026-01-04T20:24:04.021295Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.