|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
use crate::cd::{ |
|
|
coherence_ratio, encoding_languages, mb_encoding_languages, merge_coherence_ratios, |
|
|
}; |
|
|
use crate::consts::{IANA_SUPPORTED, MAX_PROCESSED_BYTES, TOO_BIG_SEQUENCE, TOO_SMALL_SEQUENCE}; |
|
|
use crate::entity::{CharsetMatch, CharsetMatches, CoherenceMatches, NormalizerSettings}; |
|
|
use crate::md::mess_ratio; |
|
|
use crate::utils::{ |
|
|
any_specified_encoding, decode, iana_name, identify_sig_or_bom, is_cp_similar, |
|
|
is_invalid_chunk, is_multi_byte_encoding, |
|
|
}; |
|
|
use encoding::DecoderTrap; |
|
|
use log::{debug, trace}; |
|
|
use std::collections::VecDeque; |
|
|
use std::fs::File; |
|
|
use std::io::Read; |
|
|
use std::path::Path; |
|
|
|
|
|
pub mod assets; |
|
|
|
|
|
#[allow(clippy::cast_lossless, clippy::cast_precision_loss)] |
|
|
mod cd; |
|
|
pub mod consts; |
|
|
pub mod entity; |
|
|
mod md; |
|
|
mod tests; |
|
|
pub mod utils; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
pub fn from_bytes(bytes: &[u8], settings: Option<NormalizerSettings>) -> CharsetMatches { |
|
|
|
|
|
|
|
|
let mut settings = settings.unwrap_or_default(); |
|
|
if !settings.include_encodings.is_empty() { |
|
|
settings.include_encodings = settings |
|
|
.include_encodings |
|
|
.iter() |
|
|
.map(|e| iana_name(e).unwrap().to_string()) |
|
|
.collect(); |
|
|
trace!( |
|
|
"include_encodings is set. Use this flag for debugging purpose. \ |
|
|
Limited list of encoding allowed : {}.", |
|
|
settings.include_encodings.join(", ") |
|
|
); |
|
|
} |
|
|
if !settings.exclude_encodings.is_empty() { |
|
|
settings.exclude_encodings = settings |
|
|
.exclude_encodings |
|
|
.iter() |
|
|
.map(|e| iana_name(e).unwrap().to_string()) |
|
|
.collect(); |
|
|
trace!( |
|
|
"exclude_encodings is set. Use this flag for debugging purpose. \ |
|
|
Limited list of encoding allowed : {}.", |
|
|
settings.exclude_encodings.join(", ") |
|
|
); |
|
|
} |
|
|
|
|
|
|
|
|
let bytes_length = bytes.len(); |
|
|
if bytes_length == 0 { |
|
|
debug!("Encoding detection on empty bytes, assuming utf_8 intention."); |
|
|
return CharsetMatches::from_single(CharsetMatch::default()); |
|
|
} |
|
|
|
|
|
|
|
|
if bytes_length <= (settings.chunk_size * settings.steps) { |
|
|
trace!( |
|
|
"override steps ({}) and chunk_size ({}) as content does not \ |
|
|
fit ({} byte(s) given) parameters.", |
|
|
settings.steps, |
|
|
settings.chunk_size, |
|
|
bytes_length, |
|
|
); |
|
|
settings.steps = 1; |
|
|
settings.chunk_size = bytes_length; |
|
|
} |
|
|
|
|
|
if settings.steps > 1 && bytes_length / settings.steps < settings.chunk_size { |
|
|
settings.chunk_size = bytes_length / settings.steps; |
|
|
} |
|
|
|
|
|
|
|
|
if bytes_length < TOO_SMALL_SEQUENCE { |
|
|
trace!( |
|
|
"Trying to detect encoding from a tiny portion of ({}) byte(s).", |
|
|
bytes_length |
|
|
); |
|
|
} |
|
|
|
|
|
|
|
|
let is_too_large_sequence = bytes_length > TOO_BIG_SEQUENCE; |
|
|
if is_too_large_sequence { |
|
|
trace!( |
|
|
"Using lazy str decoding because the payload is quite large, ({}) byte(s).", |
|
|
bytes_length |
|
|
); |
|
|
} |
|
|
|
|
|
|
|
|
let mut prioritized_encodings: Vec<&str> = vec![]; |
|
|
|
|
|
|
|
|
let mut specified_encoding: String = String::new(); |
|
|
if settings.preemptive_behaviour { |
|
|
if let Some(enc) = any_specified_encoding(bytes, 4096) { |
|
|
trace!( |
|
|
"Detected declarative mark in sequence. Priority +1 given for {}.", |
|
|
&enc |
|
|
); |
|
|
specified_encoding = enc.to_string(); |
|
|
prioritized_encodings.push(&specified_encoding); |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
let (sig_encoding, sig_payload) = identify_sig_or_bom(bytes); |
|
|
if let (Some(sig_enc), Some(sig_pay)) = (&sig_encoding, sig_payload) { |
|
|
trace!( |
|
|
"Detected a SIG or BOM mark on first {} byte(s). Priority +1 given for {}.", |
|
|
sig_pay.len(), |
|
|
sig_enc, |
|
|
); |
|
|
prioritized_encodings.push(sig_enc); |
|
|
} |
|
|
|
|
|
|
|
|
prioritized_encodings.extend(&["ascii", "utf-8"]); |
|
|
|
|
|
|
|
|
let mut iana_encodings: VecDeque<&str> = VecDeque::from(IANA_SUPPORTED.clone()); |
|
|
for pe in prioritized_encodings.iter().rev() { |
|
|
if let Some(index) = iana_encodings.iter().position(|x| x == pe) { |
|
|
let value = iana_encodings.remove(index).unwrap(); |
|
|
iana_encodings.push_front(value); |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
let mut tested_but_hard_failure: Vec<&str> = vec![]; |
|
|
let mut tested_but_soft_failure: Vec<&str> = vec![]; |
|
|
let mut fallback_ascii: Option<CharsetMatch> = None; |
|
|
let mut fallback_u8: Option<CharsetMatch> = None; |
|
|
let mut fallback_specified: Option<CharsetMatch> = None; |
|
|
let mut results: CharsetMatches = CharsetMatches::default(); |
|
|
|
|
|
|
|
|
'iana_encodings_loop: for encoding_iana in iana_encodings { |
|
|
if (!settings.include_encodings.is_empty() |
|
|
&& !settings |
|
|
.include_encodings |
|
|
.contains(&encoding_iana.to_string())) |
|
|
|| settings |
|
|
.exclude_encodings |
|
|
.contains(&encoding_iana.to_string()) |
|
|
{ |
|
|
continue; |
|
|
} |
|
|
let bom_or_sig_available: bool = sig_encoding.as_deref() == Some(encoding_iana); |
|
|
|
|
|
let is_multi_byte_decoder: bool = is_multi_byte_encoding(encoding_iana); |
|
|
|
|
|
|
|
|
if !bom_or_sig_available && ["utf-16le", "utf-16be"].contains(&encoding_iana) { |
|
|
trace!( |
|
|
"Encoding {} won't be tested as-is because it require a BOM. Will try some sub-encoder LE/BE", |
|
|
encoding_iana, |
|
|
); |
|
|
continue; |
|
|
} |
|
|
|
|
|
|
|
|
let start_idx = match bom_or_sig_available { |
|
|
true => sig_payload.unwrap().len(), |
|
|
false => 0, |
|
|
}; |
|
|
let end_idx = match is_too_large_sequence && !is_multi_byte_decoder { |
|
|
true => MAX_PROCESSED_BYTES, |
|
|
false => bytes_length, |
|
|
}; |
|
|
let decoded_payload: Option<String> = if let Ok(payload) = decode( |
|
|
&bytes[start_idx..end_idx], |
|
|
encoding_iana, |
|
|
DecoderTrap::Strict, |
|
|
is_too_large_sequence && !is_multi_byte_decoder, |
|
|
false, |
|
|
) { |
|
|
(!is_too_large_sequence || is_multi_byte_decoder).then_some(payload) |
|
|
} else { |
|
|
trace!( |
|
|
"Code page {} does not fit given bytes sequence at ALL.", |
|
|
encoding_iana, |
|
|
); |
|
|
tested_but_hard_failure.push(encoding_iana); |
|
|
continue 'iana_encodings_loop; |
|
|
}; |
|
|
|
|
|
|
|
|
|
|
|
for encoding_soft_failed in &tested_but_soft_failure { |
|
|
if is_cp_similar(encoding_iana, encoding_soft_failed) { |
|
|
trace!("{} is deemed too similar to code page {} and was consider unsuited already. Continuing!", |
|
|
encoding_iana, |
|
|
encoding_soft_failed, |
|
|
); |
|
|
continue 'iana_encodings_loop; |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
let max_chunk_gave_up = 2.max(settings.steps / 4); |
|
|
let mut early_stop_count: usize = 0; |
|
|
let mut lazy_str_hard_failure = false; |
|
|
let mut md_ratios: Vec<f32> = vec![]; |
|
|
|
|
|
|
|
|
let target_languages = if is_multi_byte_decoder { |
|
|
mb_encoding_languages(encoding_iana) |
|
|
} else { |
|
|
encoding_languages(encoding_iana.to_string()) |
|
|
}; |
|
|
trace!( |
|
|
"{} should target any language(s) of {:?}", |
|
|
encoding_iana, |
|
|
target_languages, |
|
|
); |
|
|
|
|
|
|
|
|
|
|
|
let seq_len = match &decoded_payload { |
|
|
Some(payload) => payload.chars().count(), |
|
|
None => bytes_length, |
|
|
}; |
|
|
let starting_offset = match (bom_or_sig_available, &decoded_payload) { |
|
|
(true, None) => start_idx, |
|
|
_ => 0, |
|
|
}; |
|
|
let offsets = (starting_offset..seq_len).step_by((seq_len / settings.steps).max(1)); |
|
|
|
|
|
|
|
|
|
|
|
let mut md_chunks: Vec<String> = vec![]; |
|
|
'chunks_loop: for offset in offsets { |
|
|
let decoded_chunk_result = match &decoded_payload { |
|
|
|
|
|
Some(payload) => Ok(payload |
|
|
.chars() |
|
|
.skip(offset) |
|
|
.take(settings.chunk_size) |
|
|
.collect()), |
|
|
|
|
|
None => decode( |
|
|
&bytes[offset..(offset + settings.chunk_size).min(seq_len)], |
|
|
encoding_iana, |
|
|
DecoderTrap::Strict, |
|
|
false, |
|
|
false, |
|
|
), |
|
|
}; |
|
|
|
|
|
if is_invalid_chunk(&decoded_chunk_result, encoding_iana) { |
|
|
trace!( |
|
|
"LazyStr Loading: After MD chunk decode, code page {} \ |
|
|
does not fit given bytes sequence at ALL. {}", |
|
|
encoding_iana, |
|
|
match decoded_chunk_result { |
|
|
Ok(_) => String::from("non-ascii"), |
|
|
Err(message) => message.to_string(), |
|
|
}, |
|
|
); |
|
|
early_stop_count = max_chunk_gave_up; |
|
|
lazy_str_hard_failure = true; |
|
|
break 'chunks_loop; |
|
|
} |
|
|
let decoded_chunk = decoded_chunk_result.unwrap(); |
|
|
|
|
|
|
|
|
md_chunks.push(decoded_chunk.clone()); |
|
|
md_ratios.push(mess_ratio(decoded_chunk, Some(settings.threshold))); |
|
|
if md_ratios.last().unwrap() >= &settings.threshold { |
|
|
early_stop_count += 1; |
|
|
} |
|
|
if early_stop_count >= max_chunk_gave_up { |
|
|
break 'chunks_loop; |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
if !lazy_str_hard_failure && is_too_large_sequence && !is_multi_byte_decoder { |
|
|
let decoded_chunk_result = decode( |
|
|
&bytes[MAX_PROCESSED_BYTES..], |
|
|
encoding_iana, |
|
|
DecoderTrap::Strict, |
|
|
false, |
|
|
false, |
|
|
); |
|
|
if is_invalid_chunk(&decoded_chunk_result, encoding_iana) { |
|
|
trace!( |
|
|
"LazyStr Loading: After final lookup, code page {} does not fit \ |
|
|
given bytes sequence at ALL. {}", |
|
|
encoding_iana, |
|
|
decoded_chunk_result.unwrap_err().to_string(), |
|
|
); |
|
|
tested_but_hard_failure.push(encoding_iana); |
|
|
continue 'iana_encodings_loop; |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
let mean_mess_ratio = match md_ratios.is_empty() { |
|
|
true => 0.0, |
|
|
false => md_ratios.iter().sum::<f32>() / (md_ratios.len() as f32), |
|
|
}; |
|
|
|
|
|
if mean_mess_ratio >= *settings.threshold || early_stop_count >= max_chunk_gave_up { |
|
|
tested_but_soft_failure.push(encoding_iana); |
|
|
trace!( |
|
|
"{} was excluded because of initial chaos probing. \ |
|
|
Gave up {} time(s). Computed mean chaos is {} %.", |
|
|
encoding_iana, |
|
|
early_stop_count, |
|
|
mean_mess_ratio * 100.0, |
|
|
); |
|
|
|
|
|
if settings.enable_fallback |
|
|
&& !lazy_str_hard_failure |
|
|
&& prioritized_encodings.contains(&encoding_iana) |
|
|
{ |
|
|
let fallback_entry = Some(CharsetMatch::new( |
|
|
bytes, |
|
|
encoding_iana, |
|
|
f32::from(settings.threshold), |
|
|
false, |
|
|
&vec![], |
|
|
decoded_payload.as_deref(), |
|
|
)); |
|
|
|
|
|
match encoding_iana { |
|
|
e if e == specified_encoding => fallback_specified = fallback_entry, |
|
|
"ascii" => fallback_ascii = fallback_entry, |
|
|
_ => fallback_u8 = fallback_entry, |
|
|
} |
|
|
} |
|
|
continue 'iana_encodings_loop; |
|
|
} |
|
|
trace!( |
|
|
"{} passed initial chaos probing. Mean measured chaos is {} %", |
|
|
encoding_iana, |
|
|
mean_mess_ratio * 100.0, |
|
|
); |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
let mut cd_ratios: Vec<CoherenceMatches> = vec![]; |
|
|
if encoding_iana != "ascii" { |
|
|
cd_ratios.extend(md_chunks.iter().filter_map(|chunk| { |
|
|
coherence_ratio( |
|
|
chunk.clone(), |
|
|
Some(settings.language_threshold), |
|
|
Some(target_languages.clone()), |
|
|
) |
|
|
.ok() |
|
|
})); |
|
|
} |
|
|
|
|
|
|
|
|
let cd_ratios_merged = merge_coherence_ratios(&cd_ratios); |
|
|
if !cd_ratios_merged.is_empty() { |
|
|
trace!( |
|
|
"We detected language {:?} using {}", |
|
|
cd_ratios_merged, |
|
|
encoding_iana |
|
|
); |
|
|
} |
|
|
|
|
|
|
|
|
results.append(CharsetMatch::new( |
|
|
bytes, |
|
|
encoding_iana, |
|
|
mean_mess_ratio, |
|
|
bom_or_sig_available, |
|
|
&cd_ratios_merged, |
|
|
decoded_payload.as_deref(), |
|
|
)); |
|
|
|
|
|
if (mean_mess_ratio < 0.1 && prioritized_encodings.contains(&encoding_iana)) |
|
|
|| encoding_iana == sig_encoding.clone().unwrap_or_default() |
|
|
{ |
|
|
debug!( |
|
|
"Encoding detection: {} is most likely the one.", |
|
|
encoding_iana |
|
|
); |
|
|
return CharsetMatches::from_single( |
|
|
results.get_by_encoding(encoding_iana).unwrap().clone(), |
|
|
); |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
if results.is_empty() { |
|
|
let fb = match (&fallback_specified, &fallback_u8, &fallback_ascii) { |
|
|
(Some(specified), _, _) => Some(specified), |
|
|
(None, Some(u8_fallback), None) => Some(u8_fallback), |
|
|
(None, Some(u8_fallback), Some(ascii)) |
|
|
if u8_fallback.decoded_payload() != ascii.decoded_payload() => |
|
|
{ |
|
|
Some(u8_fallback) |
|
|
} |
|
|
(None, _, Some(ascii)) => Some(ascii), |
|
|
_ => None, |
|
|
}; |
|
|
if let Some(fb_to_pass) = fb { |
|
|
debug!( |
|
|
"Encoding detection: will be used as a fallback match {}", |
|
|
fb_to_pass.encoding() |
|
|
); |
|
|
results.append(fb_to_pass.clone()); |
|
|
}; |
|
|
} |
|
|
|
|
|
|
|
|
if results.is_empty() { |
|
|
debug!("Encoding detection: Unable to determine any suitable charset."); |
|
|
} else { |
|
|
debug!( |
|
|
"Encoding detection: Found {} as plausible (best-candidate) for content. \ |
|
|
With {} alternatives.", |
|
|
results.get_best().unwrap().encoding(), |
|
|
results.len() - 1, |
|
|
); |
|
|
} |
|
|
results |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
pub fn from_path( |
|
|
path: &Path, |
|
|
settings: Option<NormalizerSettings>, |
|
|
) -> Result<CharsetMatches, String> { |
|
|
|
|
|
let mut file = File::open(path).map_err(|e| format!("Error opening file: {e}"))?; |
|
|
let file_size = file.metadata().map(|m| m.len()).unwrap_or_default(); |
|
|
|
|
|
let mut buffer = Vec::with_capacity(file_size as usize); |
|
|
file.read_to_end(&mut buffer) |
|
|
.map_err(|e| format!("Error reading from file: {e}"))?; |
|
|
|
|
|
|
|
|
Ok(from_bytes(&buffer, settings)) |
|
|
} |
|
|
|