symphonia_core/io/
bit.rs

1// Symphonia
2// Copyright (c) 2019-2022 The Project Symphonia Developers.
3//
4// This Source Code Form is subject to the terms of the Mozilla Public
5// License, v. 2.0. If a copy of the MPL was not distributed with this
6// file, You can obtain one at https://mozilla.org/MPL/2.0/.
7
8use std::cmp::min;
9use std::io;
10
11use crate::io::ReadBytes;
12use crate::util::bits::*;
13
14fn end_of_bitstream_error<T>() -> io::Result<T> {
15    Err(io::Error::new(io::ErrorKind::Other, "unexpected end of bitstream"))
16}
17
18pub mod vlc {
19    //! The `vlc` module provides support for decoding variable-length codes (VLC).
20
21    use std::cmp::max;
22    use std::collections::{BTreeMap, VecDeque};
23    use std::io;
24
25    fn codebook_error<T>(desc: &'static str) -> io::Result<T> {
26        Err(io::Error::new(io::ErrorKind::Other, desc))
27    }
28
29    /// `BitOrder` describes the relationship between the order of bits in the provided codewords
30    /// and the order in which bits are read.
31    #[derive(Copy, Clone)]
32    pub enum BitOrder {
33        /// The provided codewords have bits in the same order as the order in which they're being
34        /// read.
35        Verbatim,
36        /// The provided codeword have bits in the reverse order as the order in which they're
37        /// being read.
38        Reverse,
39    }
40
41    /// `CodebookEntry` provides the functions required for an entry in the `Codebook`.
42    pub trait CodebookEntry: Copy + Clone + Default {
43        /// The type of a value in this entry.
44        type ValueType: Copy + From<u8>;
45        /// The type of a jump offset in this entry.
46        type OffsetType: Copy;
47
48        /// The maximum jump offset.
49        const JUMP_OFFSET_MAX: u32;
50
51        /// Creates a new value entry.
52        fn new_value(value: Self::ValueType, len: u8) -> Self;
53
54        /// Create a new jump entry.
55        fn new_jump(offset: u32, len: u8) -> Self;
56
57        /// Returns `true` if this entry is a value entry.
58        fn is_value(&self) -> bool;
59
60        /// Returns `true` if this entry is a jump entry.
61        fn is_jump(&self) -> bool;
62
63        /// Gets the value.
64        fn value(&self) -> Self::ValueType;
65
66        /// Get the length of the value in bits.
67        fn value_len(&self) -> u32;
68
69        /// Get the position in the table to jump to.
70        fn jump_offset(&self) -> usize;
71
72        /// Get the number of bits to read after jumping in the table.
73        fn jump_len(&self) -> u32;
74    }
75
76    macro_rules! decl_entry {
77        (
78            #[doc = $expr:expr]
79            $name:ident, $value_type:ty, $offset_type:ty, $offset_max:expr, $jump_flag:expr
80        ) => {
81            #[doc = $expr]
82            #[derive(Copy, Clone, Default)]
83            pub struct $name($value_type, $offset_type);
84
85            impl CodebookEntry for $name {
86                type ValueType = $value_type;
87                type OffsetType = $offset_type;
88
89                const JUMP_OFFSET_MAX: u32 = $offset_max;
90
91                #[inline(always)]
92                fn new_value(value: Self::ValueType, len: u8) -> Self {
93                    $name(value, len.into())
94                }
95
96                #[inline(always)]
97                fn new_jump(offset: u32, len: u8) -> Self {
98                    $name(len.into(), $jump_flag | offset as Self::OffsetType)
99                }
100
101                #[inline(always)]
102                fn is_jump(&self) -> bool {
103                    self.1 & $jump_flag != 0
104                }
105
106                #[inline(always)]
107                fn is_value(&self) -> bool {
108                    self.1 & $jump_flag == 0
109                }
110
111                #[inline(always)]
112                fn value(&self) -> Self::ValueType {
113                    debug_assert!(self.is_value());
114                    self.0
115                }
116
117                #[inline(always)]
118                fn value_len(&self) -> u32 {
119                    debug_assert!(self.is_value());
120                    (self.1 & (!$jump_flag)).into()
121                }
122
123                #[inline(always)]
124                fn jump_offset(&self) -> usize {
125                    debug_assert!(self.is_jump());
126                    (self.1 & (!$jump_flag)) as usize
127                }
128
129                #[inline(always)]
130                fn jump_len(&self) -> u32 {
131                    debug_assert!(self.is_jump());
132                    self.0.into()
133                }
134            }
135        };
136    }
137
138    decl_entry!(
139        /// `Entry8x8` is a codebook entry for 8-bit values with codes up-to 8-bits.
140        Entry8x8,
141        u8,
142        u8,
143        0x7f,
144        0x80
145    );
146
147    decl_entry!(
148        /// `Entry8x16` is a codebook entry for 8-bit values with codes up-to 16-bits.
149        Entry8x16,
150        u8,
151        u16,
152        0x7fff,
153        0x8000
154    );
155
156    decl_entry!(
157        /// `Entry8x32` is a codebook entry for 8-bit values with codes up-to 32-bits.
158        Entry8x32,
159        u8,
160        u32,
161        0x7fff_ffff,
162        0x8000_0000
163    );
164
165    decl_entry!(
166        /// `Entry16x8` is a codebook entry for 16-bit values with codes up-to 8-bits.
167        Entry16x8,
168        u16,
169        u8,
170        0x7f,
171        0x80
172    );
173
174    decl_entry!(
175        /// `Entry16x16` is a codebook entry for 16-bit values with codes up-to 16-bits.
176        Entry16x16,
177        u16,
178        u16,
179        0x7fff,
180        0x8000
181    );
182
183    decl_entry!(
184        /// `Entry16x32` is a codebook entry for 16-bit values with codes up-to 32-bits.
185        Entry16x32,
186        u16,
187        u32,
188        0x7fff_ffff,
189        0x8000_0000
190    );
191
192    decl_entry!(
193        /// `Entry32x8` is a codebook entry for 32-bit values with codes up-to 8-bits.
194        Entry32x8,
195        u32,
196        u8,
197        0x7fff,
198        0x80
199    );
200
201    decl_entry!(
202        /// `Entry32x16` is a codebook entry for 32-bit values with codes up-to 16-bits.
203        Entry32x16,
204        u32,
205        u16,
206        0x7fff,
207        0x8000
208    );
209
210    decl_entry!(
211        /// `Entry32x32` is a codebook entry for 32-bit values with codes up-to 32-bits.
212        Entry32x32,
213        u32,
214        u32,
215        0x7fff_ffff,
216        0x8000_0000
217    );
218
219    /// `Codebook` is a variable-length code decoding table that may be used to efficiently read
220    /// symbols from a source of bits.
221    #[derive(Default)]
222    pub struct Codebook<E: CodebookEntry> {
223        pub table: Vec<E>,
224        pub max_code_len: u32,
225        pub init_block_len: u32,
226    }
227
228    impl<E: CodebookEntry> Codebook<E> {
229        /// Returns `true` if the `Codebook` is empty.
230        pub fn is_empty(&self) -> bool {
231            self.table.is_empty()
232        }
233    }
234
235    #[derive(Default)]
236    struct CodebookValue<E: CodebookEntry> {
237        prefix: u16,
238        width: u8,
239        value: E::ValueType,
240    }
241
242    impl<E: CodebookEntry> CodebookValue<E> {
243        fn new(prefix: u16, width: u8, value: E::ValueType) -> Self {
244            CodebookValue { prefix, width, value }
245        }
246    }
247
248    #[derive(Default)]
249    struct CodebookBlock<E: CodebookEntry> {
250        width: u8,
251        nodes: BTreeMap<u16, usize>,
252        values: Vec<CodebookValue<E>>,
253    }
254
255    /// `CodebookBuilder` generates a `Codebook` using a provided codebook specification and
256    /// description.
257    pub struct CodebookBuilder {
258        max_bits_per_block: u8,
259        bit_order: BitOrder,
260        is_sparse: bool,
261    }
262
263    impl CodebookBuilder {
264        /// Instantiates a new `CodebookBuilder`.
265        ///
266        /// The `bit_order` parameter specifies if the codeword bits should be reversed when
267        /// constructing the codebook. If the `BitReader` or `BitStream` reading the constructed
268        /// codebook reads bits in an order different from the order of the provided codewords,
269        /// then this option can be used to make them compatible.
270        pub fn new(bit_order: BitOrder) -> Self {
271            CodebookBuilder { max_bits_per_block: 4, bit_order, is_sparse: false }
272        }
273
274        /// Instantiates a new `CodebookBuilder` for sparse codebooks.
275        ///
276        /// A sparse codebook is one in which not all codewords are valid. These invalid codewords
277        /// are effectively "unused" and have no value. Therefore, it is illegal for a bitstream to
278        /// contain the codeword bit pattern.
279        ///
280        /// Unused codewords are marked by having a length of 0.
281        pub fn new_sparse(bit_order: BitOrder) -> Self {
282            CodebookBuilder { max_bits_per_block: 4, bit_order, is_sparse: true }
283        }
284
285        /// Specify the maximum number of bits that should be consumed from the source at a time.
286        /// This value must be within the range 1 <= `max_bits_per_read` <= 16. Values outside of
287        /// this range will cause this function to panic. If not provided, a value will be
288        /// automatically chosen.
289        pub fn bits_per_read(&mut self, max_bits_per_read: u8) -> &mut Self {
290            assert!(max_bits_per_read <= 16);
291            assert!(max_bits_per_read > 0);
292            self.max_bits_per_block = max_bits_per_read;
293            self
294        }
295
296        fn generate_lut<E: CodebookEntry>(
297            bit_order: BitOrder,
298            is_sparse: bool,
299            blocks: &[CodebookBlock<E>],
300        ) -> io::Result<Vec<E>> {
301            // The codebook table.
302            let mut table = Vec::new();
303
304            let mut queue = VecDeque::new();
305
306            // The computed end of the table given the blocks in the queue.
307            let mut table_end = 0u32;
308
309            if !blocks.is_empty() {
310                // Start traversal at the first block.
311                queue.push_front(0);
312
313                // The first entry in the table is always a jump to the first block.
314                let block = &blocks[0];
315                table.push(CodebookEntry::new_jump(1, block.width));
316                table_end += 1 + (1 << block.width);
317            }
318
319            // Traverse the tree in breadth-first order.
320            while !queue.is_empty() {
321                // Count of the total number of entries added to the table by this block.
322                let mut entry_count = 0;
323
324                // Get the block id at the front of the queue.
325                let block_id = queue.pop_front().unwrap();
326
327                // Get the block at the front of the queue.
328                let block = &blocks[block_id];
329                let block_len = 1 << block.width;
330
331                // The starting index of the current block.
332                let table_base = table.len();
333
334                // Resize the table to accomodate all entries within the block.
335                table.resize(table_base + block_len, Default::default());
336
337                // Push child blocks onto the queue and record the jump entries in the table. Jumps
338                // will be in order of increasing prefix because of the implicit sorting provided
339                // by BTreeMap, thus traversing a level of the tree left-to-right.
340                for (&child_block_prefix, &child_block_id) in block.nodes.iter() {
341                    queue.push_back(child_block_id);
342
343                    // The width of the child block in bits.
344                    let child_block_width = blocks[child_block_id].width;
345
346                    // Verify the jump offset does not exceed the entry's jump maximum.
347                    if table_end > E::JUMP_OFFSET_MAX {
348                        return codebook_error("core (io): codebook overflow");
349                    }
350
351                    // Determine the offset into the table depending on the bit-order.
352                    let offset = match bit_order {
353                        BitOrder::Verbatim => child_block_prefix,
354                        BitOrder::Reverse => {
355                            child_block_prefix.reverse_bits().rotate_left(u32::from(block.width))
356                        }
357                    } as usize;
358
359                    // Add a jump entry to table.
360                    let jump_entry = CodebookEntry::new_jump(table_end, child_block_width);
361
362                    table[table_base + offset] = jump_entry;
363
364                    // Add the length of the child block to the end of the table.
365                    table_end += 1 << child_block_width;
366
367                    // Update the entry count.
368                    entry_count += 1;
369                }
370
371                // Add value entries into the table. If a value has a prefix width less than the
372                // block width, then do-not-care bits must added to the end of the prefix to pad it
373                // to the block width.
374                for value in block.values.iter() {
375                    // The number of do-not-care bits to add to the value's prefix.
376                    let num_dnc_bits = block.width - value.width;
377
378                    // Extend the value's prefix to the block's width.
379                    let base_prefix = (value.prefix << num_dnc_bits) as usize;
380
381                    // Using the base prefix, synthesize all prefixes for this value.
382                    let count = 1 << num_dnc_bits;
383
384                    // The value entry that will be duplicated.
385                    let value_entry = CodebookEntry::new_value(value.value, value.width);
386
387                    match bit_order {
388                        BitOrder::Verbatim => {
389                            // For verbatim bit order, the do-not-care bits are in the LSb
390                            // position.
391                            let start = table_base + base_prefix;
392                            let end = start + count;
393
394                            for entry in table[start..end].iter_mut() {
395                                *entry = value_entry;
396                            }
397                        }
398                        BitOrder::Reverse => {
399                            // For reverse bit order, the do-not-care bits are in the MSb position.
400                            let start = base_prefix;
401                            let end = start + count;
402
403                            for prefix in start..end {
404                                let offset =
405                                    prefix.reverse_bits().rotate_left(u32::from(block.width));
406
407                                table[table_base + offset] = value_entry;
408                            }
409                        }
410                    }
411
412                    // Update the entry count.
413                    entry_count += count;
414                }
415
416                // If the decoding tree is not sparse, the number of entries added to the table
417                // should equal the block length if the. It is a fatal error if this is not true.
418                if !is_sparse && entry_count != block_len {
419                    return codebook_error("core (io): codebook is incomplete");
420                }
421            }
422
423            Ok(table)
424        }
425
426        /// Construct a `Codebook` using the given codewords, their respective lengths, and values.
427        ///
428        /// This function may fail if the provided codewords do not form a complete VLC tree, or if
429        /// the `CodebookEntry` is undersized.
430        ///
431        /// This function will panic if the number of code words, code lengths, and values differ.
432        pub fn make<E: CodebookEntry>(
433            &mut self,
434            code_words: &[u32],
435            code_lens: &[u8],
436            values: &[E::ValueType],
437        ) -> io::Result<Codebook<E>> {
438            assert!(code_words.len() == code_lens.len());
439            assert!(code_words.len() == values.len());
440
441            let mut blocks = Vec::<CodebookBlock<E>>::new();
442
443            let mut max_code_len = 0;
444
445            // Only attempt to generate something if there are code words.
446            if !code_words.is_empty() {
447                let prefix_mask = !(!0 << self.max_bits_per_block);
448
449                // Push a root block.
450                blocks.push(Default::default());
451
452                // Populate the tree
453                for ((&code, &code_len), &value) in code_words.iter().zip(code_lens).zip(values) {
454                    let mut parent_block_id = 0;
455                    let mut len = code_len;
456
457                    // A zero length codeword in a spare codebook is allowed, but not in a regular
458                    // codebook.
459                    if code_len == 0 {
460                        if self.is_sparse {
461                            continue;
462                        }
463                        else {
464                            return codebook_error("core (io): zero length codeword");
465                        }
466                    }
467
468                    while len > self.max_bits_per_block {
469                        len -= self.max_bits_per_block;
470
471                        let prefix = ((code >> len) & prefix_mask) as u16;
472
473                        // Recurse down the tree.
474                        if let Some(&block_id) = blocks[parent_block_id].nodes.get(&prefix) {
475                            parent_block_id = block_id;
476                        }
477                        else {
478                            // Add a child block to the parent block.
479                            let block_id = blocks.len();
480
481                            let block = &mut blocks[parent_block_id];
482
483                            block.nodes.insert(prefix, block_id);
484
485                            // The parent's block width must accomodate the prefix of the child.
486                            // This is always max_bits_per_block bits.
487                            block.width = self.max_bits_per_block;
488
489                            // Append the new block.
490                            blocks.push(Default::default());
491
492                            parent_block_id = block_id;
493                        }
494                    }
495
496                    // The final chunk of code bits always has <= max_bits_per_block bits. Obtain
497                    // the final prefix.
498                    let prefix = code & (prefix_mask >> (self.max_bits_per_block - len));
499
500                    let block = &mut blocks[parent_block_id];
501
502                    // Push the value.
503                    block.values.push(CodebookValue::new(prefix as u16, len, value));
504
505                    // Update the block's width.
506                    block.width = max(block.width, len);
507
508                    // Update maximum observed codeword.
509                    max_code_len = max(max_code_len, code_len);
510                }
511            }
512
513            // Generate the codebook lookup table.
514            let table = CodebookBuilder::generate_lut(self.bit_order, self.is_sparse, &blocks)?;
515
516            // Determine the first block length if skipping the initial jump entry.
517            let init_block_len = table.first().map(|block| block.jump_len()).unwrap_or(0);
518
519            Ok(Codebook { table, max_code_len: u32::from(max_code_len), init_block_len })
520        }
521    }
522}
523
524mod private {
525    use std::io;
526
527    pub trait FetchBitsLtr {
528        /// Discard any remaining bits in the source and fetch new bits.
529        fn fetch_bits(&mut self) -> io::Result<()>;
530
531        /// Fetch new bits, and append them after the remaining bits.
532        fn fetch_bits_partial(&mut self) -> io::Result<()>;
533
534        /// Get all the bits in the source.
535        fn get_bits(&self) -> u64;
536
537        /// Get the number of bits left in the source.
538        fn num_bits_left(&self) -> u32;
539
540        /// Consume `num` bits from the source.
541        fn consume_bits(&mut self, num: u32);
542    }
543
544    pub trait FetchBitsRtl {
545        /// Discard any remaining bits in the source and fetch new bits.
546        fn fetch_bits(&mut self) -> io::Result<()>;
547
548        /// Fetch new bits, and append them after the remaining bits.
549        fn fetch_bits_partial(&mut self) -> io::Result<()>;
550
551        /// Get all the bits in the source.
552        fn get_bits(&self) -> u64;
553
554        /// Get the number of bits left in the source.
555        fn num_bits_left(&self) -> u32;
556
557        /// Consume `num` bits from the source.
558        fn consume_bits(&mut self, num: u32);
559    }
560}
561
562/// A `FiniteBitStream` is a bit stream that has a known length in bits.
563pub trait FiniteBitStream {
564    /// Gets the number of bits left unread.
565    fn bits_left(&self) -> u64;
566}
567
568/// `ReadBitsLtr` reads bits from most-significant to least-significant.
569pub trait ReadBitsLtr: private::FetchBitsLtr {
570    /// Discards any saved bits and resets the `BitStream` to prepare it for a byte-aligned read.
571    #[inline(always)]
572    fn realign(&mut self) {
573        let skip = self.num_bits_left() & 0x7;
574        self.consume_bits(skip);
575    }
576
577    /// Ignores the specified number of bits from the stream or returns an error.
578    #[inline(always)]
579    fn ignore_bits(&mut self, mut num_bits: u32) -> io::Result<()> {
580        if num_bits <= self.num_bits_left() {
581            self.consume_bits(num_bits);
582        }
583        else {
584            // Consume whole bit caches directly.
585            while num_bits > self.num_bits_left() {
586                num_bits -= self.num_bits_left();
587                self.fetch_bits()?;
588            }
589
590            if num_bits > 0 {
591                // Shift out in two parts to prevent panicing when num_bits == 64.
592                self.consume_bits(num_bits - 1);
593                self.consume_bits(1);
594            }
595        }
596
597        Ok(())
598    }
599
600    /// Ignores one bit from the stream or returns an error.
601    #[inline(always)]
602    fn ignore_bit(&mut self) -> io::Result<()> {
603        self.ignore_bits(1)
604    }
605
606    /// Read a single bit as a boolean value or returns an error.
607    #[inline(always)]
608    fn read_bool(&mut self) -> io::Result<bool> {
609        if self.num_bits_left() < 1 {
610            self.fetch_bits()?;
611        }
612
613        let bit = self.get_bits() & (1 << 63) != 0;
614
615        self.consume_bits(1);
616        Ok(bit)
617    }
618
619    /// Reads and returns a single bit or returns an error.
620    #[inline(always)]
621    fn read_bit(&mut self) -> io::Result<u32> {
622        if self.num_bits_left() < 1 {
623            self.fetch_bits()?;
624        }
625
626        let bit = self.get_bits() >> 63;
627
628        self.consume_bits(1);
629
630        Ok(bit as u32)
631    }
632
633    /// Reads and returns up to 32-bits or returns an error.
634    #[inline(always)]
635    fn read_bits_leq32(&mut self, mut bit_width: u32) -> io::Result<u32> {
636        debug_assert!(bit_width <= u32::BITS);
637
638        // Shift in two 32-bit operations instead of a single 64-bit operation to avoid panicing
639        // when bit_width == 0 (and thus shifting right 64-bits). This is preferred to branching
640        // the bit_width == 0 case, since reading up-to 32-bits at a time is a hot code-path.
641        let mut bits = (self.get_bits() >> u32::BITS) >> (u32::BITS - bit_width);
642
643        while bit_width > self.num_bits_left() {
644            bit_width -= self.num_bits_left();
645
646            self.fetch_bits()?;
647
648            // Unlike the first shift, bit_width is always > 0 here so this operation will never
649            // shift by > 63 bits.
650            bits |= self.get_bits() >> (u64::BITS - bit_width);
651        }
652
653        self.consume_bits(bit_width);
654
655        Ok(bits as u32)
656    }
657
658    /// Reads up to 32-bits and interprets them as a signed two's complement integer or returns an
659    /// error.
660    #[inline(always)]
661    fn read_bits_leq32_signed(&mut self, bit_width: u32) -> io::Result<i32> {
662        let value = self.read_bits_leq32(bit_width)?;
663        Ok(sign_extend_leq32_to_i32(value, bit_width))
664    }
665
666    /// Reads and returns up to 64-bits or returns an error.
667    #[inline(always)]
668    fn read_bits_leq64(&mut self, mut bit_width: u32) -> io::Result<u64> {
669        debug_assert!(bit_width <= u64::BITS);
670
671        // Hard-code the bit_width == 0 case as it's not possible to handle both the bit_width == 0
672        // and bit_width == 64 cases branchlessly. This should be optimized out when bit_width is
673        // known at compile time. Since it's generally rare to need to read up-to 64-bits at a time
674        // (as oppopsed to 32-bits), this is an acceptable solution.
675        if bit_width == 0 {
676            Ok(0)
677        }
678        else {
679            // Since bit_width is always > 0, this shift operation is always < 64, and will
680            // therefore never panic.
681            let mut bits = self.get_bits() >> (u64::BITS - bit_width);
682
683            while bit_width > self.num_bits_left() {
684                bit_width -= self.num_bits_left();
685
686                self.fetch_bits()?;
687
688                bits |= self.get_bits() >> (u64::BITS - bit_width);
689            }
690
691            // Shift in two parts to prevent panicing when bit_width == 64.
692            self.consume_bits(bit_width - 1);
693            self.consume_bits(1);
694
695            Ok(bits)
696        }
697    }
698
699    /// Reads up to 64-bits and interprets them as a signed two's complement integer or returns an
700    /// error.
701    #[inline(always)]
702    fn read_bits_leq64_signed(&mut self, bit_width: u32) -> io::Result<i64> {
703        let value = self.read_bits_leq64(bit_width)?;
704        Ok(sign_extend_leq64_to_i64(value, bit_width))
705    }
706
707    /// Reads and returns a unary zeros encoded integer or an error.
708    #[inline(always)]
709    fn read_unary_zeros(&mut self) -> io::Result<u32> {
710        let mut num = 0;
711
712        loop {
713            // Get the number of leading zeros.
714            let num_zeros = self.get_bits().leading_zeros();
715
716            if num_zeros >= self.num_bits_left() {
717                // If the number of zeros exceeds the number of bits left then all the remaining
718                // bits were 0.
719                num += self.num_bits_left();
720                self.fetch_bits()?;
721            }
722            else {
723                // Otherwise, a 1 bit was encountered after `n_zeros` 0 bits.
724                num += num_zeros;
725
726                // Since bits are shifted off the cache after they're consumed, for there to be a
727                // 1 bit there must be atleast one extra available bit in the cache that can be
728                // consumed after the 0 bits.
729                self.consume_bits(num_zeros);
730                self.consume_bits(1);
731
732                // Done decoding.
733                break;
734            }
735        }
736
737        Ok(num)
738    }
739
740    /// Reads and returns a unary zeros encoded integer that is capped to a maximum value.
741    #[inline(always)]
742    fn read_unary_zeros_capped(&mut self, mut limit: u32) -> io::Result<u32> {
743        let mut num = 0;
744
745        loop {
746            // Get the number of leading zeros, capped to the limit.
747            let num_bits_left = self.num_bits_left();
748            let num_zeros = min(self.get_bits().leading_zeros(), num_bits_left);
749
750            if num_zeros >= limit {
751                // There are more ones than the limit. A terminator cannot be encountered.
752                num += limit;
753                self.consume_bits(limit);
754                break;
755            }
756            else {
757                // There are less ones than the limit. A terminator was encountered OR more bits
758                // are needed.
759                limit -= num_zeros;
760                num += num_zeros;
761
762                if num_zeros < num_bits_left {
763                    // There are less ones than the number of bits left in the reader. Thus, a
764                    // terminator was not encountered and not all bits have not been consumed.
765                    self.consume_bits(num_zeros);
766                    self.consume_bits(1);
767                    break;
768                }
769            }
770
771            self.fetch_bits()?;
772        }
773
774        Ok(num)
775    }
776
777    /// Reads and returns a unary ones encoded integer or an error.
778    #[inline(always)]
779    fn read_unary_ones(&mut self) -> io::Result<u32> {
780        // Note: This algorithm is identical to read_unary_zeros except flipped for 1s.
781        let mut num = 0;
782
783        loop {
784            let num_ones = self.get_bits().leading_ones();
785
786            if num_ones >= self.num_bits_left() {
787                num += self.num_bits_left();
788                self.fetch_bits()?;
789            }
790            else {
791                num += num_ones;
792
793                self.consume_bits(num_ones);
794                self.consume_bits(1);
795
796                break;
797            }
798        }
799
800        Ok(num)
801    }
802
803    /// Reads and returns a unary ones encoded integer that is capped to a maximum value.
804    #[inline(always)]
805    fn read_unary_ones_capped(&mut self, mut limit: u32) -> io::Result<u32> {
806        // Note: This algorithm is identical to read_unary_zeros_capped except flipped for 1s.
807        let mut num = 0;
808
809        loop {
810            let num_bits_left = self.num_bits_left();
811            let num_ones = min(self.get_bits().leading_ones(), num_bits_left);
812
813            if num_ones >= limit {
814                num += limit;
815                self.consume_bits(limit);
816                break;
817            }
818            else {
819                limit -= num_ones;
820                num += num_ones;
821
822                if num_ones < num_bits_left {
823                    self.consume_bits(num_ones);
824                    self.consume_bits(1);
825                    break;
826                }
827            }
828
829            self.fetch_bits()?;
830        }
831
832        Ok(num)
833    }
834
835    /// Reads a codebook value from the `BitStream` using the provided `Codebook` and returns the
836    /// decoded value or an error.
837    #[inline(always)]
838    fn read_codebook<E: vlc::CodebookEntry>(
839        &mut self,
840        codebook: &vlc::Codebook<E>,
841    ) -> io::Result<(E::ValueType, u32)> {
842        // Attempt refill the bit buffer with enough bits for the longest codeword in the codebook.
843        // However, this does not mean the bit buffer will have enough bits to decode a codeword.
844        if self.num_bits_left() < codebook.max_code_len {
845            self.fetch_bits_partial()?;
846        }
847
848        // The number of bits actually buffered in the bit buffer.
849        let num_bits_left = self.num_bits_left();
850
851        let mut bits = self.get_bits();
852
853        let mut block_len = codebook.init_block_len;
854        let mut entry = codebook.table[(bits >> (u64::BITS - block_len)) as usize + 1];
855
856        let mut consumed = 0;
857
858        while entry.is_jump() {
859            // Consume the bits used for the initial or previous jump iteration.
860            consumed += block_len;
861            bits <<= block_len;
862
863            // Since this is a jump entry, if there are no bits left then the bitstream ended early.
864            if consumed > num_bits_left {
865                return end_of_bitstream_error();
866            }
867
868            // Prepare for the next jump.
869            block_len = entry.jump_len();
870
871            let index = bits >> (u64::BITS - block_len);
872
873            // Jump to the next entry.
874            entry = codebook.table[entry.jump_offset() + index as usize];
875        }
876
877        // The entry is always a value entry at this point. Consume the bits containing the value.
878        consumed += entry.value_len();
879
880        if consumed > num_bits_left {
881            return end_of_bitstream_error();
882        }
883
884        self.consume_bits(consumed);
885
886        Ok((entry.value(), consumed))
887    }
888}
889
890/// `BitStreamLtr` reads bits from most-significant to least-significant from any source
891/// that implements [`ReadBytes`].
892///
893/// Stated another way, if N-bits are read from a `BitReaderLtr` then bit 0, the first bit read,
894/// is the most-significant bit, and bit N-1, the last bit read, is the least-significant.
895pub struct BitStreamLtr<'a, B: ReadBytes> {
896    reader: &'a mut B,
897    bits: u64,
898    n_bits_left: u32,
899}
900
901impl<'a, B: ReadBytes> BitStreamLtr<'a, B> {
902    /// Instantiate a new `BitStreamLtr` with the given source.
903    pub fn new(reader: &'a mut B) -> Self {
904        BitStreamLtr { reader, bits: 0, n_bits_left: 0 }
905    }
906}
907
908impl<'a, B: ReadBytes> private::FetchBitsLtr for BitStreamLtr<'a, B> {
909    #[inline(always)]
910    fn fetch_bits(&mut self) -> io::Result<()> {
911        self.bits = u64::from(self.reader.read_u8()?) << 56;
912        self.n_bits_left = u8::BITS;
913        Ok(())
914    }
915
916    #[inline(always)]
917    fn fetch_bits_partial(&mut self) -> io::Result<()> {
918        todo!()
919    }
920
921    #[inline(always)]
922    fn get_bits(&self) -> u64 {
923        self.bits
924    }
925
926    #[inline(always)]
927    fn num_bits_left(&self) -> u32 {
928        self.n_bits_left
929    }
930
931    #[inline(always)]
932    fn consume_bits(&mut self, num: u32) {
933        self.n_bits_left -= num;
934        self.bits <<= num;
935    }
936}
937
938impl<'a, B: ReadBytes> ReadBitsLtr for BitStreamLtr<'a, B> {}
939
940/// `BitReaderLtr` reads bits from most-significant to least-significant from any `&[u8]`.
941///
942/// Stated another way, if N-bits are read from a `BitReaderLtr` then bit 0, the first bit read,
943/// is the most-significant bit, and bit N-1, the last bit read, is the least-significant.
944pub struct BitReaderLtr<'a> {
945    buf: &'a [u8],
946    bits: u64,
947    n_bits_left: u32,
948}
949
950impl<'a> BitReaderLtr<'a> {
951    /// Instantiate a new `BitReaderLtr` with the given buffer.
952    pub fn new(buf: &'a [u8]) -> Self {
953        BitReaderLtr { buf, bits: 0, n_bits_left: 0 }
954    }
955}
956
957impl<'a> private::FetchBitsLtr for BitReaderLtr<'a> {
958    #[inline]
959    fn fetch_bits_partial(&mut self) -> io::Result<()> {
960        let mut buf = [0u8; std::mem::size_of::<u64>()];
961
962        let read_len = min(self.buf.len(), (u64::BITS - self.n_bits_left) as usize >> 3);
963
964        buf[..read_len].copy_from_slice(&self.buf[..read_len]);
965
966        self.buf = &self.buf[read_len..];
967
968        self.bits |= u64::from_be_bytes(buf) >> self.n_bits_left;
969        self.n_bits_left += (read_len as u32) << 3;
970
971        Ok(())
972    }
973
974    fn fetch_bits(&mut self) -> io::Result<()> {
975        let mut buf = [0u8; std::mem::size_of::<u64>()];
976
977        let read_len = min(self.buf.len(), std::mem::size_of::<u64>());
978
979        if read_len == 0 {
980            return end_of_bitstream_error();
981        }
982
983        buf[..read_len].copy_from_slice(&self.buf[..read_len]);
984
985        self.buf = &self.buf[read_len..];
986
987        self.bits = u64::from_be_bytes(buf);
988        self.n_bits_left = (read_len as u32) << 3;
989
990        Ok(())
991    }
992
993    #[inline(always)]
994    fn get_bits(&self) -> u64 {
995        self.bits
996    }
997
998    #[inline(always)]
999    fn num_bits_left(&self) -> u32 {
1000        self.n_bits_left
1001    }
1002
1003    #[inline(always)]
1004    fn consume_bits(&mut self, num: u32) {
1005        self.n_bits_left -= num;
1006        self.bits <<= num;
1007    }
1008}
1009
1010impl<'a> ReadBitsLtr for BitReaderLtr<'a> {}
1011
1012impl<'a> FiniteBitStream for BitReaderLtr<'a> {
1013    fn bits_left(&self) -> u64 {
1014        (8 * self.buf.len() as u64) + u64::from(self.n_bits_left)
1015    }
1016}
1017
1018/// `ReadBitsRtl` reads bits from least-significant to most-significant.
1019pub trait ReadBitsRtl: private::FetchBitsRtl {
1020    /// Discards any saved bits and resets the `BitStream` to prepare it for a byte-aligned read.
1021    #[inline(always)]
1022    fn realign(&mut self) {
1023        let skip = self.num_bits_left() & 0x7;
1024        self.consume_bits(skip);
1025    }
1026
1027    /// Ignores the specified number of bits from the stream or returns an error.
1028    #[inline(always)]
1029    fn ignore_bits(&mut self, mut num_bits: u32) -> io::Result<()> {
1030        if num_bits <= self.num_bits_left() {
1031            self.consume_bits(num_bits);
1032        }
1033        else {
1034            // Consume whole bit caches directly.
1035            while num_bits > self.num_bits_left() {
1036                num_bits -= self.num_bits_left();
1037                self.fetch_bits()?;
1038            }
1039
1040            if num_bits > 0 {
1041                // Shift out in two parts to prevent panicing when num_bits == 64.
1042                self.consume_bits(num_bits - 1);
1043                self.consume_bits(1);
1044            }
1045        }
1046
1047        Ok(())
1048    }
1049
1050    /// Ignores one bit from the stream or returns an error.
1051    #[inline(always)]
1052    fn ignore_bit(&mut self) -> io::Result<()> {
1053        self.ignore_bits(1)
1054    }
1055
1056    /// Read a single bit as a boolean value or returns an error.
1057    #[inline(always)]
1058    fn read_bool(&mut self) -> io::Result<bool> {
1059        if self.num_bits_left() < 1 {
1060            self.fetch_bits()?;
1061        }
1062
1063        let bit = (self.get_bits() & 1) == 1;
1064
1065        self.consume_bits(1);
1066        Ok(bit)
1067    }
1068
1069    /// Reads and returns a single bit or returns an error.
1070    #[inline(always)]
1071    fn read_bit(&mut self) -> io::Result<u32> {
1072        if self.num_bits_left() < 1 {
1073            self.fetch_bits()?;
1074        }
1075
1076        let bit = self.get_bits() & 1;
1077
1078        self.consume_bits(1);
1079
1080        Ok(bit as u32)
1081    }
1082
1083    /// Reads and returns up to 32-bits or returns an error.
1084    #[inline(always)]
1085    fn read_bits_leq32(&mut self, bit_width: u32) -> io::Result<u32> {
1086        debug_assert!(bit_width <= u32::BITS);
1087
1088        let mut bits = self.get_bits();
1089        let mut bits_needed = bit_width;
1090
1091        while bits_needed > self.num_bits_left() {
1092            bits_needed -= self.num_bits_left();
1093
1094            self.fetch_bits()?;
1095
1096            bits |= self.get_bits() << (bit_width - bits_needed);
1097        }
1098
1099        self.consume_bits(bits_needed);
1100
1101        // Since bit_width is <= 32, this shift will never panic.
1102        let mask = !(!0 << bit_width);
1103
1104        Ok((bits & mask) as u32)
1105    }
1106
1107    /// Reads up to 32-bits and interprets them as a signed two's complement integer or returns an
1108    /// error.
1109    #[inline(always)]
1110    fn read_bits_leq32_signed(&mut self, bit_width: u32) -> io::Result<i32> {
1111        let value = self.read_bits_leq32(bit_width)?;
1112        Ok(sign_extend_leq32_to_i32(value, bit_width))
1113    }
1114
1115    /// Reads and returns up to 64-bits or returns an error.
1116    #[inline(always)]
1117    fn read_bits_leq64(&mut self, bit_width: u32) -> io::Result<u64> {
1118        debug_assert!(bit_width <= u64::BITS);
1119
1120        // Hard-code the bit_width == 0 case as it's not possible to handle both the bit_width == 0
1121        // and bit_width == 64 cases branchlessly. This should be optimized out when bit_width is
1122        // known at compile time. Since it's generally rare to need to read up-to 64-bits at a time
1123        // (as oppopsed to 32-bits), this is an acceptable solution.
1124        if bit_width == 0 {
1125            Ok(0)
1126        }
1127        else {
1128            let mut bits = self.get_bits();
1129            let mut bits_needed = bit_width;
1130
1131            while bits_needed > self.num_bits_left() {
1132                bits_needed -= self.num_bits_left();
1133
1134                self.fetch_bits()?;
1135
1136                // Since bits_needed will always be > 0, this will never shift by > 63 bits if
1137                // bit_width == 64 and therefore will never panic.
1138                bits |= self.get_bits() << (bit_width - bits_needed);
1139            }
1140
1141            // Shift in two parts to prevent panicing when bit_width == 64.
1142            self.consume_bits(bits_needed - 1);
1143            self.consume_bits(1);
1144
1145            // Generate the mask in two parts to prevent panicing when bit_width == 64.
1146            let mask = !((!0 << (bit_width - 1)) << 1);
1147
1148            Ok(bits & mask)
1149        }
1150    }
1151
1152    /// Reads up to 64-bits and interprets them as a signed two's complement integer or returns an
1153    /// error.
1154    #[inline(always)]
1155    fn read_bits_leq64_signed(&mut self, bit_width: u32) -> io::Result<i64> {
1156        let value = self.read_bits_leq64(bit_width)?;
1157        Ok(sign_extend_leq64_to_i64(value, bit_width))
1158    }
1159
1160    /// Reads and returns a unary zeros encoded integer or an error.
1161    #[inline(always)]
1162    fn read_unary_zeros(&mut self) -> io::Result<u32> {
1163        let mut num = 0;
1164
1165        loop {
1166            // Get the number of trailing zeros.
1167            let num_zeros = self.get_bits().trailing_zeros();
1168
1169            if num_zeros >= self.num_bits_left() {
1170                // If the number of zeros exceeds the number of bits left then all the remaining
1171                // bits were 0.
1172                num += self.num_bits_left();
1173                self.fetch_bits()?;
1174            }
1175            else {
1176                // Otherwise, a 1 bit was encountered after `n_zeros` 0 bits.
1177                num += num_zeros;
1178
1179                // Since bits are shifted off the cache after they're consumed, for there to be a
1180                // 1 bit there must be atleast one extra available bit in the cache that can be
1181                // consumed after the 0 bits.
1182                self.consume_bits(num_zeros);
1183                self.consume_bits(1);
1184
1185                // Done decoding.
1186                break;
1187            }
1188        }
1189
1190        Ok(num)
1191    }
1192
1193    /// Reads and returns a unary zeros encoded integer that is capped to a maximum value.
1194    #[inline(always)]
1195    fn read_unary_zeros_capped(&mut self, mut limit: u32) -> io::Result<u32> {
1196        let mut num = 0;
1197
1198        loop {
1199            // Get the number of trailing zeros, capped to the limit.
1200            let num_bits_left = self.num_bits_left();
1201            let num_zeros = min(self.get_bits().trailing_zeros(), num_bits_left);
1202
1203            if num_zeros >= limit {
1204                // There are more zeros than the limit. A terminator cannot be encountered.
1205                num += limit;
1206                self.consume_bits(limit);
1207                break;
1208            }
1209            else {
1210                // There are less zeros than the limit. A terminator was encountered OR more bits
1211                // are needed.
1212                limit -= num_zeros;
1213                num += num_zeros;
1214
1215                if num_zeros < num_bits_left {
1216                    // There are less zeros than the number of bits left in the reader. Thus, a
1217                    // terminator was not encountered and not all bits have not been consumed.
1218                    self.consume_bits(num_zeros);
1219                    self.consume_bits(1);
1220                    break;
1221                }
1222            }
1223
1224            self.fetch_bits()?;
1225        }
1226
1227        Ok(num)
1228    }
1229
1230    /// Reads and returns a unary ones encoded integer or an error.
1231    #[inline(always)]
1232    fn read_unary_ones(&mut self) -> io::Result<u32> {
1233        // Note: This algorithm is identical to read_unary_zeros except flipped for 1s.
1234        let mut num = 0;
1235
1236        loop {
1237            let num_ones = self.get_bits().trailing_ones();
1238
1239            if num_ones >= self.num_bits_left() {
1240                num += self.num_bits_left();
1241                self.fetch_bits()?;
1242            }
1243            else {
1244                num += num_ones;
1245
1246                self.consume_bits(num_ones);
1247                self.consume_bits(1);
1248
1249                break;
1250            }
1251        }
1252
1253        Ok(num)
1254    }
1255
1256    /// Reads and returns a unary ones encoded integer or an error.
1257    #[inline(always)]
1258    fn read_unary_ones_capped(&mut self, mut limit: u32) -> io::Result<u32> {
1259        // Note: This algorithm is identical to read_unary_zeros_capped except flipped for 1s.
1260        let mut num = 0;
1261
1262        loop {
1263            let num_bits_left = self.num_bits_left();
1264            let num_ones = min(self.get_bits().trailing_ones(), num_bits_left);
1265
1266            if num_ones >= limit {
1267                num += limit;
1268                self.consume_bits(limit);
1269                break;
1270            }
1271            else {
1272                limit -= num_ones;
1273                num += num_ones;
1274
1275                if num_ones < num_bits_left {
1276                    self.consume_bits(num_ones);
1277                    self.consume_bits(1);
1278                    break;
1279                }
1280            }
1281
1282            self.fetch_bits()?;
1283        }
1284
1285        Ok(num)
1286    }
1287
1288    #[inline(always)]
1289    fn read_codebook<E: vlc::CodebookEntry>(
1290        &mut self,
1291        codebook: &vlc::Codebook<E>,
1292    ) -> io::Result<(E::ValueType, u32)> {
1293        if self.num_bits_left() < codebook.max_code_len {
1294            self.fetch_bits_partial()?;
1295        }
1296
1297        // The number of bits actually buffered in the bit buffer.
1298        let num_bits_left = self.num_bits_left();
1299
1300        let mut bits = self.get_bits();
1301
1302        let mut block_len = codebook.init_block_len;
1303        let mut entry = codebook.table[(bits & ((1 << block_len) - 1)) as usize + 1];
1304
1305        let mut consumed = 0;
1306
1307        while entry.is_jump() {
1308            // Consume the bits used for the initial or previous jump iteration.
1309            consumed += block_len;
1310            bits >>= block_len;
1311
1312            // Since this is a jump entry, if there are no bits left then the bitstream ended early.
1313            if consumed > num_bits_left {
1314                return end_of_bitstream_error();
1315            }
1316
1317            // Prepare for the next jump.
1318            block_len = entry.jump_len();
1319
1320            let index = bits & ((1 << block_len) - 1);
1321
1322            // Jump to the next entry.
1323            entry = codebook.table[entry.jump_offset() + index as usize];
1324        }
1325
1326        // The entry is always a value entry at this point. Consume the bits containing the value.
1327        consumed += entry.value_len();
1328
1329        if consumed > num_bits_left {
1330            return end_of_bitstream_error();
1331        }
1332
1333        self.consume_bits(consumed);
1334
1335        Ok((entry.value(), consumed))
1336    }
1337}
1338
1339/// `BitStreamRtl` reads bits from least-significant to most-significant from any source
1340/// that implements [`ReadBytes`].
1341///
1342/// Stated another way, if N-bits are read from a `BitReaderLtr` then bit 0, the first bit read,
1343/// is the least-significant bit, and bit N-1, the last bit read, is the most-significant.
1344pub struct BitStreamRtl<'a, B: ReadBytes> {
1345    reader: &'a mut B,
1346    bits: u64,
1347    n_bits_left: u32,
1348}
1349
1350impl<'a, B: ReadBytes> BitStreamRtl<'a, B> {
1351    /// Instantiate a new `BitStreamRtl` with the given buffer.
1352    pub fn new(reader: &'a mut B) -> Self {
1353        BitStreamRtl { reader, bits: 0, n_bits_left: 0 }
1354    }
1355}
1356
1357impl<'a, B: ReadBytes> private::FetchBitsRtl for BitStreamRtl<'a, B> {
1358    #[inline(always)]
1359    fn fetch_bits(&mut self) -> io::Result<()> {
1360        self.bits = u64::from(self.reader.read_u8()?);
1361        self.n_bits_left = u8::BITS;
1362        Ok(())
1363    }
1364
1365    #[inline(always)]
1366    fn fetch_bits_partial(&mut self) -> io::Result<()> {
1367        todo!()
1368    }
1369
1370    #[inline(always)]
1371    fn get_bits(&self) -> u64 {
1372        self.bits
1373    }
1374
1375    #[inline(always)]
1376    fn num_bits_left(&self) -> u32 {
1377        self.n_bits_left
1378    }
1379
1380    #[inline(always)]
1381    fn consume_bits(&mut self, num: u32) {
1382        self.n_bits_left -= num;
1383        self.bits >>= num;
1384    }
1385}
1386
1387impl<'a, B: ReadBytes> ReadBitsRtl for BitStreamRtl<'a, B> {}
1388
1389/// `BitReaderRtl` reads bits from least-significant to most-significant from any `&[u8]`.
1390///
1391/// Stated another way, if N-bits are read from a `BitReaderRtl` then bit 0, the first bit read,
1392/// is the least-significant bit, and bit N-1, the last bit read, is the most-significant.
1393pub struct BitReaderRtl<'a> {
1394    buf: &'a [u8],
1395    bits: u64,
1396    n_bits_left: u32,
1397}
1398
1399impl<'a> BitReaderRtl<'a> {
1400    /// Instantiate a new `BitReaderRtl` with the given buffer.
1401    pub fn new(buf: &'a [u8]) -> Self {
1402        BitReaderRtl { buf, bits: 0, n_bits_left: 0 }
1403    }
1404}
1405
1406impl<'a> private::FetchBitsRtl for BitReaderRtl<'a> {
1407    #[inline]
1408    fn fetch_bits_partial(&mut self) -> io::Result<()> {
1409        let mut buf = [0u8; std::mem::size_of::<u64>()];
1410
1411        let read_len = min(self.buf.len(), (u64::BITS - self.n_bits_left) as usize >> 3);
1412
1413        buf[..read_len].copy_from_slice(&self.buf[..read_len]);
1414
1415        self.buf = &self.buf[read_len..];
1416
1417        self.bits |= u64::from_le_bytes(buf) << self.n_bits_left;
1418        self.n_bits_left += (read_len as u32) << 3;
1419
1420        Ok(())
1421    }
1422
1423    fn fetch_bits(&mut self) -> io::Result<()> {
1424        let mut buf = [0u8; std::mem::size_of::<u64>()];
1425
1426        let read_len = min(self.buf.len(), std::mem::size_of::<u64>());
1427
1428        if read_len == 0 {
1429            return end_of_bitstream_error();
1430        }
1431
1432        buf[..read_len].copy_from_slice(&self.buf[..read_len]);
1433
1434        self.buf = &self.buf[read_len..];
1435
1436        self.bits = u64::from_le_bytes(buf);
1437        self.n_bits_left = (read_len as u32) << 3;
1438
1439        Ok(())
1440    }
1441
1442    #[inline(always)]
1443    fn get_bits(&self) -> u64 {
1444        self.bits
1445    }
1446
1447    #[inline(always)]
1448    fn num_bits_left(&self) -> u32 {
1449        self.n_bits_left
1450    }
1451
1452    #[inline(always)]
1453    fn consume_bits(&mut self, num: u32) {
1454        self.n_bits_left -= num;
1455        self.bits >>= num;
1456    }
1457}
1458
1459impl<'a> ReadBitsRtl for BitReaderRtl<'a> {}
1460
1461impl<'a> FiniteBitStream for BitReaderRtl<'a> {
1462    fn bits_left(&self) -> u64 {
1463        (8 * self.buf.len() as u64) + u64::from(self.n_bits_left)
1464    }
1465}
1466
1467#[cfg(test)]
1468mod tests {
1469    use super::vlc::{BitOrder, Codebook, CodebookBuilder, Entry8x8};
1470    use super::{BitReaderLtr, ReadBitsLtr};
1471    use super::{BitReaderRtl, ReadBitsRtl};
1472
1473    #[test]
1474    #[allow(clippy::bool_assert_comparison)]
1475    fn verify_bitstreamltr_ignore_bits() {
1476        let mut bs = BitReaderLtr::new(&[
1477            0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, //
1478            0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, //
1479            0xc0, 0x10, 0x00, 0x01, 0x00, 0x00, 0x00, 0x0a, //
1480        ]);
1481
1482        assert_eq!(bs.read_bool().unwrap(), true);
1483
1484        bs.ignore_bits(128).unwrap();
1485
1486        assert_eq!(bs.read_bool().unwrap(), true);
1487        assert_eq!(bs.read_bool().unwrap(), false);
1488        assert_eq!(bs.read_bool().unwrap(), false);
1489
1490        bs.ignore_bits(7).unwrap();
1491
1492        assert_eq!(bs.read_bool().unwrap(), true);
1493
1494        bs.ignore_bits(19).unwrap();
1495
1496        assert_eq!(bs.read_bool().unwrap(), true);
1497
1498        assert_eq!(bs.read_bool().unwrap(), false);
1499        assert_eq!(bs.read_bool().unwrap(), false);
1500        assert_eq!(bs.read_bool().unwrap(), false);
1501        assert_eq!(bs.read_bool().unwrap(), false);
1502
1503        bs.ignore_bits(24).unwrap();
1504
1505        assert_eq!(bs.read_bool().unwrap(), true);
1506        assert_eq!(bs.read_bool().unwrap(), false);
1507        assert_eq!(bs.read_bool().unwrap(), true);
1508        assert_eq!(bs.read_bool().unwrap(), false);
1509
1510        // Lower limit test.
1511        let mut bs = BitReaderLtr::new(&[0x00]);
1512
1513        assert!(bs.ignore_bits(0).is_ok());
1514
1515        let mut bs = BitReaderLtr::new(&[]);
1516
1517        assert!(bs.ignore_bits(0).is_ok());
1518        assert!(bs.ignore_bits(1).is_err());
1519
1520        // Upper limit test.
1521        let mut bs = BitReaderLtr::new(&[
1522            0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, //
1523            0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, //
1524            0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, //
1525            0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, //
1526        ]);
1527
1528        assert!(bs.ignore_bits(64).is_ok());
1529        assert!(bs.ignore_bits(64).is_ok());
1530        assert!(bs.ignore_bits(32).is_ok());
1531        assert!(bs.ignore_bits(32).is_ok());
1532        assert!(bs.ignore_bits(64).is_ok());
1533    }
1534
1535    #[test]
1536    #[allow(clippy::bool_assert_comparison)]
1537    fn verify_bitstreamltr_read_bool() {
1538        // General tests.
1539        let mut bs = BitReaderLtr::new(&[0b1010_1010]);
1540
1541        assert_eq!(bs.read_bool().unwrap(), true);
1542        assert_eq!(bs.read_bool().unwrap(), false);
1543        assert_eq!(bs.read_bool().unwrap(), true);
1544        assert_eq!(bs.read_bool().unwrap(), false);
1545        assert_eq!(bs.read_bool().unwrap(), true);
1546        assert_eq!(bs.read_bool().unwrap(), false);
1547        assert_eq!(bs.read_bool().unwrap(), true);
1548        assert_eq!(bs.read_bool().unwrap(), false);
1549
1550        // Error test.
1551        let mut bs = BitReaderLtr::new(&[]);
1552
1553        assert!(bs.read_bool().is_err());
1554    }
1555
1556    #[test]
1557    fn verify_bitstreamltr_read_bit() {
1558        // General tests.
1559        let mut bs = BitReaderLtr::new(&[0b1010_1010]);
1560
1561        assert_eq!(bs.read_bit().unwrap(), 1);
1562        assert_eq!(bs.read_bit().unwrap(), 0);
1563        assert_eq!(bs.read_bit().unwrap(), 1);
1564        assert_eq!(bs.read_bit().unwrap(), 0);
1565        assert_eq!(bs.read_bit().unwrap(), 1);
1566        assert_eq!(bs.read_bit().unwrap(), 0);
1567        assert_eq!(bs.read_bit().unwrap(), 1);
1568        assert_eq!(bs.read_bit().unwrap(), 0);
1569
1570        // Error test.
1571        let mut bs = BitReaderLtr::new(&[]);
1572
1573        assert!(bs.read_bool().is_err());
1574    }
1575
1576    #[test]
1577    fn verify_bitstreamltr_read_bits_leq32() {
1578        // General tests.
1579        let mut bs = BitReaderLtr::new(&[0b1010_0101, 0b0111_1110, 0b1101_0011]);
1580
1581        assert_eq!(bs.read_bits_leq32(4).unwrap(), 0b0000_0000_0000_1010);
1582        assert_eq!(bs.read_bits_leq32(4).unwrap(), 0b0000_0000_0000_0101);
1583        assert_eq!(bs.read_bits_leq32(13).unwrap(), 0b0000_1111_1101_1010);
1584        assert_eq!(bs.read_bits_leq32(3).unwrap(), 0b0000_0000_0000_0011);
1585
1586        // Lower limit test.
1587        let mut bs = BitReaderLtr::new(&[0xff, 0xff, 0xff, 0xff]);
1588
1589        assert_eq!(bs.read_bits_leq32(0).unwrap(), 0);
1590
1591        // Upper limit test.
1592        let mut bs = BitReaderLtr::new(&[0xff, 0xff, 0xff, 0xff, 0x01]);
1593
1594        assert_eq!(bs.read_bits_leq32(32).unwrap(), u32::MAX);
1595        assert_eq!(bs.read_bits_leq32(8).unwrap(), 0x01);
1596
1597        // Cache fetch test.
1598        let mut bs = BitReaderLtr::new(&[0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x01]);
1599
1600        assert_eq!(bs.read_bits_leq32(32).unwrap(), u32::MAX);
1601        assert_eq!(bs.read_bits_leq32(32).unwrap(), u32::MAX);
1602        assert_eq!(bs.read_bits_leq32(8).unwrap(), 0x01);
1603
1604        // Test error cases.
1605        let mut bs = BitReaderLtr::new(&[0xff]);
1606
1607        assert!(bs.read_bits_leq32(9).is_err());
1608    }
1609
1610    #[test]
1611    fn verify_bitstreamltr_read_bits_leq64() {
1612        // General tests.
1613        let mut bs = BitReaderLtr::new(&[
1614            0x99, 0xaa, 0x55, 0xff, 0xff, 0x55, 0xaa, 0x99, //
1615            0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, //
1616        ]);
1617
1618        assert_eq!(bs.read_bits_leq64(40).unwrap(), 0x99aa55ffff);
1619        assert_eq!(bs.read_bits_leq64(4).unwrap(), 0x05);
1620        assert_eq!(bs.read_bits_leq64(4).unwrap(), 0x05);
1621        assert_eq!(bs.read_bits_leq64(16).unwrap(), 0xaa99);
1622        assert_eq!(bs.read_bits_leq64(64).unwrap(), 0x1122334455667788);
1623
1624        // Lower limit test.
1625        let mut bs = BitReaderLtr::new(&[0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff]);
1626
1627        assert_eq!(bs.read_bits_leq64(0).unwrap(), 0);
1628
1629        // Upper limit test.
1630        let mut bs = BitReaderLtr::new(&[0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x01]);
1631
1632        assert_eq!(bs.read_bits_leq64(64).unwrap(), u64::MAX);
1633        assert_eq!(bs.read_bits_leq64(8).unwrap(), 0x01);
1634
1635        // Test error cases.
1636        let mut bs = BitReaderLtr::new(&[0xff]);
1637
1638        assert!(bs.read_bits_leq64(9).is_err());
1639    }
1640
1641    #[test]
1642    fn verify_bitstreamltr_read_unary_zeros() {
1643        // General tests
1644        let mut bs =
1645            BitReaderLtr::new(&[0b0000_0001, 0b0001_0000, 0b0000_0000, 0b1000_0000, 0b1111_1011]);
1646
1647        assert_eq!(bs.read_unary_zeros().unwrap(), 7);
1648        assert_eq!(bs.read_unary_zeros().unwrap(), 3);
1649        assert_eq!(bs.read_unary_zeros().unwrap(), 12);
1650        assert_eq!(bs.read_unary_zeros().unwrap(), 7);
1651        assert_eq!(bs.read_unary_zeros().unwrap(), 0);
1652        assert_eq!(bs.read_unary_zeros().unwrap(), 0);
1653        assert_eq!(bs.read_unary_zeros().unwrap(), 0);
1654        assert_eq!(bs.read_unary_zeros().unwrap(), 0);
1655        assert_eq!(bs.read_unary_zeros().unwrap(), 1);
1656        assert_eq!(bs.read_unary_zeros().unwrap(), 0);
1657
1658        // Upper limit test
1659        let mut bs = BitReaderLtr::new(&[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01]);
1660
1661        assert_eq!(bs.read_unary_zeros().unwrap(), 63);
1662
1663        // Lower limit test
1664        let mut bs = BitReaderLtr::new(&[0x80]);
1665
1666        assert_eq!(bs.read_unary_zeros().unwrap(), 0);
1667
1668        // Error test.
1669        let mut bs = BitReaderLtr::new(&[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00]);
1670
1671        assert!(bs.read_unary_zeros().is_err());
1672    }
1673
1674    #[test]
1675    fn verify_bitstreamltr_read_unary_zeros_capped() {
1676        // Basic test
1677        let mut bs = BitReaderLtr::new(&[0b0000_0001, 0b0000_0001]);
1678
1679        assert_eq!(bs.read_unary_zeros_capped(8).unwrap(), 7);
1680        assert_eq!(bs.read_unary_zeros_capped(4).unwrap(), 4);
1681
1682        // Long limit test
1683        let mut bs = BitReaderLtr::new(&[
1684            0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, //
1685            0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, //
1686            0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, //
1687        ]);
1688
1689        assert_eq!(bs.read_unary_zeros_capped(96).unwrap(), 79);
1690        assert_eq!(bs.read_unary_zeros_capped(104).unwrap(), 104);
1691    }
1692
1693    #[test]
1694    fn verify_bitstreamltr_read_unary_ones() {
1695        // General tests
1696        let mut bs =
1697            BitReaderLtr::new(&[0b1111_1110, 0b1110_1111, 0b1111_1111, 0b0111_1111, 0b0000_0100]);
1698
1699        assert_eq!(bs.read_unary_ones().unwrap(), 7);
1700        assert_eq!(bs.read_unary_ones().unwrap(), 3);
1701        assert_eq!(bs.read_unary_ones().unwrap(), 12);
1702        assert_eq!(bs.read_unary_ones().unwrap(), 7);
1703        assert_eq!(bs.read_unary_ones().unwrap(), 0);
1704        assert_eq!(bs.read_unary_ones().unwrap(), 0);
1705        assert_eq!(bs.read_unary_ones().unwrap(), 0);
1706        assert_eq!(bs.read_unary_ones().unwrap(), 0);
1707        assert_eq!(bs.read_unary_ones().unwrap(), 1);
1708        assert_eq!(bs.read_unary_ones().unwrap(), 0);
1709
1710        // Upper limit test
1711        let mut bs = BitReaderLtr::new(&[0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe]);
1712
1713        assert_eq!(bs.read_unary_ones().unwrap(), 63);
1714
1715        // Lower limit test
1716        let mut bs = BitReaderLtr::new(&[0x7f]);
1717
1718        assert_eq!(bs.read_unary_ones().unwrap(), 0);
1719
1720        // Error test.
1721        let mut bs = BitReaderLtr::new(&[0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff]);
1722
1723        assert!(bs.read_unary_ones().is_err());
1724    }
1725
1726    #[test]
1727    fn verify_bitstreamltr_read_unary_ones_capped() {
1728        // Basic test
1729        let mut bs = BitReaderLtr::new(&[0b1111_1110, 0b1111_1110]);
1730
1731        assert_eq!(bs.read_unary_ones_capped(8).unwrap(), 7);
1732        assert_eq!(bs.read_unary_ones_capped(4).unwrap(), 4);
1733
1734        let mut bs =
1735            BitReaderLtr::new(&[0b1111_1110, 0b1110_1111, 0b1111_1111, 0b0111_1111, 0b0000_0100]);
1736
1737        assert_eq!(bs.read_unary_ones_capped(9).unwrap(), 7);
1738        assert_eq!(bs.read_unary_ones_capped(9).unwrap(), 3);
1739        assert_eq!(bs.read_unary_ones_capped(9).unwrap(), 9); // Limit
1740        assert_eq!(bs.read_unary_ones_capped(9).unwrap(), 3);
1741        assert_eq!(bs.read_unary_ones_capped(9).unwrap(), 7);
1742        assert_eq!(bs.read_unary_ones_capped(9).unwrap(), 0);
1743        assert_eq!(bs.read_unary_ones_capped(9).unwrap(), 0);
1744        assert_eq!(bs.read_unary_ones_capped(9).unwrap(), 0);
1745        assert_eq!(bs.read_unary_ones_capped(9).unwrap(), 0);
1746        assert_eq!(bs.read_unary_ones_capped(9).unwrap(), 1);
1747
1748        // Long limit test
1749        let mut bs = BitReaderLtr::new(&[
1750            0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
1751            0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
1752            0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
1753            0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
1754        ]);
1755
1756        assert_eq!(bs.read_unary_ones_capped(144).unwrap(), 143);
1757        assert_eq!(bs.read_unary_ones_capped(256).unwrap(), 256);
1758    }
1759
1760    fn generate_codebook(bit_order: BitOrder) -> (Codebook<Entry8x8>, Vec<u8>, &'static str) {
1761        // Codewords in MSb bit-order.
1762        #[rustfmt::skip]
1763        const CODE_WORDS: [u32; 25] = [
1764            0b001,
1765            0b111,
1766            0b010,
1767            0b1001,
1768            0b1101,
1769            0b0001,
1770            0b0111,
1771            0b1000,
1772            0b10110,
1773            0b10111,
1774            0b10101,
1775            0b10100,
1776            0b01100,
1777            0b000010,
1778            0b110000,
1779            0b000011,
1780            0b110001,
1781            0b000001,
1782            0b011010,
1783            0b000000,
1784            0b011011,
1785            0b1100111,
1786            0b1100110,
1787            0b1100101,
1788            0b1100100,
1789        ];
1790
1791        const CODE_LENS: [u8; 25] =
1792            [3, 3, 3, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 6, 7, 7, 7, 7];
1793
1794        const VALUES: [u8; 25] = [
1795            b'i', b' ', b'e', b't', b's', b'l', b'n', b'o', b'.', b'r', b'g', b'h', b'u', b'p',
1796            b'w', b',', b'f', b'y', b'm', b'v', b'a', b'd', b'b', b'c', b'T',
1797        ];
1798
1799        // Encoded data in MSb bit-order.
1800        const DATA: [u8; 57] = [
1801            0xc9, 0x43, 0xbf, 0x48, 0xa7, 0xca, 0xbe, 0x64, 0x30, 0xf5, 0xdf, 0x31, 0xd9, 0xb6,
1802            0xb5, 0xbb, 0x6f, 0x9f, 0xa0, 0x15, 0xc1, 0xfa, 0x5e, 0xa2, 0xb8, 0x4a, 0xfb, 0x0f,
1803            0xe1, 0x93, 0xe6, 0x8a, 0xe8, 0x3e, 0x77, 0xe0, 0xd9, 0x92, 0xf5, 0xf8, 0xc5, 0xfb,
1804            0x37, 0xcc, 0x7c, 0x48, 0x8f, 0x33, 0xf0, 0x33, 0x4f, 0xb0, 0xd2, 0x9a, 0x17, 0xad,
1805            0x80,
1806        ];
1807
1808        const TEXT: &str = "This silence belongs to us... and every single person out \
1809                            there, is waiting for us to fill it with something.";
1810
1811        // Reverse the bits in the data vector if testing a reverse bit-order.
1812        let data = match bit_order {
1813            BitOrder::Verbatim => DATA.to_vec(),
1814            BitOrder::Reverse => DATA.iter().map(|&b| b.reverse_bits()).collect(),
1815        };
1816
1817        // Construct a codebook using the tables above.
1818        let mut builder = CodebookBuilder::new(bit_order);
1819        let codebook = builder.make::<Entry8x8>(&CODE_WORDS, &CODE_LENS, &VALUES).unwrap();
1820
1821        (codebook, data, TEXT)
1822    }
1823
1824    #[test]
1825    fn verify_bitstreamltr_read_codebook() {
1826        let (codebook, buf, text) = generate_codebook(BitOrder::Verbatim);
1827
1828        let mut bs = BitReaderLtr::new(&buf);
1829
1830        let decoded: Vec<u8> =
1831            (0..text.len()).into_iter().map(|_| bs.read_codebook(&codebook).unwrap().0).collect();
1832
1833        assert_eq!(text, std::str::from_utf8(&decoded).unwrap());
1834    }
1835
1836    // BitStreamRtl
1837
1838    #[test]
1839    #[allow(clippy::bool_assert_comparison)]
1840    fn verify_bitstreamrtl_ignore_bits() {
1841        let mut bs = BitReaderRtl::new(&[
1842            0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, //
1843            0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, //
1844            0x02, 0x08, 0x00, 0x80, 0x00, 0x00, 0x00, 0x50, //
1845        ]);
1846
1847        assert_eq!(bs.read_bool().unwrap(), true);
1848
1849        bs.ignore_bits(128).unwrap();
1850
1851        assert_eq!(bs.read_bool().unwrap(), true);
1852        assert_eq!(bs.read_bool().unwrap(), false);
1853        assert_eq!(bs.read_bool().unwrap(), false);
1854
1855        bs.ignore_bits(7).unwrap();
1856
1857        assert_eq!(bs.read_bool().unwrap(), true);
1858
1859        bs.ignore_bits(19).unwrap();
1860
1861        assert_eq!(bs.read_bool().unwrap(), true);
1862
1863        assert_eq!(bs.read_bool().unwrap(), false);
1864        assert_eq!(bs.read_bool().unwrap(), false);
1865        assert_eq!(bs.read_bool().unwrap(), false);
1866        assert_eq!(bs.read_bool().unwrap(), false);
1867
1868        bs.ignore_bits(24).unwrap();
1869
1870        assert_eq!(bs.read_bool().unwrap(), true);
1871        assert_eq!(bs.read_bool().unwrap(), false);
1872        assert_eq!(bs.read_bool().unwrap(), true);
1873        assert_eq!(bs.read_bool().unwrap(), false);
1874
1875        // Lower limit test.
1876        let mut bs = BitReaderRtl::new(&[0x00]);
1877
1878        assert!(bs.ignore_bits(0).is_ok());
1879
1880        let mut bs = BitReaderRtl::new(&[]);
1881
1882        assert!(bs.ignore_bits(0).is_ok());
1883        assert!(bs.ignore_bits(1).is_err());
1884
1885        // Upper limit test.
1886        let mut bs = BitReaderRtl::new(&[
1887            0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, //
1888            0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, //
1889            0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, //
1890            0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, //
1891        ]);
1892
1893        assert!(bs.ignore_bits(64).is_ok());
1894        assert!(bs.ignore_bits(64).is_ok());
1895        assert!(bs.ignore_bits(32).is_ok());
1896        assert!(bs.ignore_bits(32).is_ok());
1897        assert!(bs.ignore_bits(64).is_ok());
1898    }
1899
1900    #[test]
1901    #[allow(clippy::bool_assert_comparison)]
1902    fn verify_bitstreamrtl_read_bool() {
1903        // General tests.
1904        let mut bs = BitReaderRtl::new(&[0b1010_1010]);
1905
1906        assert_eq!(bs.read_bool().unwrap(), false);
1907        assert_eq!(bs.read_bool().unwrap(), true);
1908        assert_eq!(bs.read_bool().unwrap(), false);
1909        assert_eq!(bs.read_bool().unwrap(), true);
1910        assert_eq!(bs.read_bool().unwrap(), false);
1911        assert_eq!(bs.read_bool().unwrap(), true);
1912        assert_eq!(bs.read_bool().unwrap(), false);
1913        assert_eq!(bs.read_bool().unwrap(), true);
1914
1915        // Error test.
1916        let mut bs = BitReaderRtl::new(&[]);
1917
1918        assert!(bs.read_bool().is_err());
1919    }
1920
1921    #[test]
1922    fn verify_bitstreamrtl_read_bit() {
1923        // General tests.
1924        let mut bs = BitReaderRtl::new(&[0b1010_1010]);
1925
1926        assert_eq!(bs.read_bit().unwrap(), 0);
1927        assert_eq!(bs.read_bit().unwrap(), 1);
1928        assert_eq!(bs.read_bit().unwrap(), 0);
1929        assert_eq!(bs.read_bit().unwrap(), 1);
1930        assert_eq!(bs.read_bit().unwrap(), 0);
1931        assert_eq!(bs.read_bit().unwrap(), 1);
1932        assert_eq!(bs.read_bit().unwrap(), 0);
1933        assert_eq!(bs.read_bit().unwrap(), 1);
1934
1935        // Error test.
1936        let mut bs = BitReaderRtl::new(&[]);
1937
1938        assert!(bs.read_bit().is_err());
1939    }
1940
1941    #[test]
1942    fn verify_bitstreamrtl_read_bits_leq32() {
1943        // General tests.
1944        let mut bs = BitReaderRtl::new(&[0b1010_0101, 0b0111_1110, 0b1101_0011]);
1945
1946        assert_eq!(bs.read_bits_leq32(4).unwrap(), 0b0000_0000_0000_0101);
1947        assert_eq!(bs.read_bits_leq32(4).unwrap(), 0b0000_0000_0000_1010);
1948        assert_eq!(bs.read_bits_leq32(13).unwrap(), 0b0001_0011_0111_1110);
1949        assert_eq!(bs.read_bits_leq32(3).unwrap(), 0b0000_0000_0000_0110);
1950
1951        // Lower limit test.
1952        let mut bs = BitReaderRtl::new(&[0xff, 0xff, 0xff, 0xff]);
1953
1954        assert_eq!(bs.read_bits_leq32(0).unwrap(), 0);
1955
1956        // Upper limit test.
1957        let mut bs = BitReaderRtl::new(&[0xff, 0xff, 0xff, 0xff, 0x01]);
1958
1959        assert_eq!(bs.read_bits_leq32(32).unwrap(), u32::MAX);
1960        assert_eq!(bs.read_bits_leq32(8).unwrap(), 0x01);
1961
1962        // Cache fetch test.
1963        let mut bs = BitReaderRtl::new(&[0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x01]);
1964
1965        assert_eq!(bs.read_bits_leq32(32).unwrap(), u32::MAX);
1966        assert_eq!(bs.read_bits_leq32(32).unwrap(), u32::MAX);
1967        assert_eq!(bs.read_bits_leq32(8).unwrap(), 0x01);
1968
1969        // Test error cases.
1970        let mut bs = BitReaderRtl::new(&[0xff]);
1971
1972        assert!(bs.read_bits_leq32(9).is_err());
1973    }
1974
1975    #[test]
1976    fn verify_bitstreamrtl_read_bits_leq64() {
1977        // General tests.
1978        let mut bs = BitReaderRtl::new(&[
1979            0x99, 0xaa, 0x55, 0xff, 0xff, 0x55, 0xaa, 0x99, //
1980            0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, //
1981            0x00, 0x11, 0x22, 0x33, 0x00, 0x11, 0x22, 0x33, //
1982            0x44, 0x55, 0x66, 0x77,
1983        ]);
1984
1985        assert_eq!(bs.read_bits_leq64(40).unwrap(), 0xffff55aa99);
1986        assert_eq!(bs.read_bits_leq64(4).unwrap(), 0x05);
1987        assert_eq!(bs.read_bits_leq64(4).unwrap(), 0x05);
1988        assert_eq!(bs.read_bits_leq64(16).unwrap(), 0x99aa);
1989        assert_eq!(bs.read_bits_leq64(64).unwrap(), 0x8877665544332211);
1990        assert_eq!(bs.read_bits_leq64(32).unwrap(), 0x33221100);
1991        assert_eq!(bs.read_bits_leq64(64).unwrap(), 0x7766554433221100);
1992
1993        // Lower limit test.
1994        let mut bs = BitReaderRtl::new(&[0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff]);
1995
1996        assert_eq!(bs.read_bits_leq64(0).unwrap(), 0);
1997
1998        // Upper limit test.
1999        let mut bs = BitReaderRtl::new(&[0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x01]);
2000
2001        assert_eq!(bs.read_bits_leq64(64).unwrap(), u64::MAX);
2002        assert_eq!(bs.read_bits_leq64(8).unwrap(), 0x01);
2003
2004        // Test error cases.
2005        let mut bs = BitReaderRtl::new(&[0xff]);
2006
2007        assert!(bs.read_bits_leq64(9).is_err());
2008    }
2009
2010    #[test]
2011    fn verify_bitstreamrtl_read_unary_zeros() {
2012        // General tests
2013        let mut bs =
2014            BitReaderRtl::new(&[0b1000_0000, 0b0000_1000, 0b0000_0000, 0b0000_0001, 0b1101_1111]);
2015
2016        assert_eq!(bs.read_unary_zeros().unwrap(), 7);
2017        assert_eq!(bs.read_unary_zeros().unwrap(), 3);
2018        assert_eq!(bs.read_unary_zeros().unwrap(), 12);
2019        assert_eq!(bs.read_unary_zeros().unwrap(), 7);
2020        assert_eq!(bs.read_unary_zeros().unwrap(), 0);
2021        assert_eq!(bs.read_unary_zeros().unwrap(), 0);
2022        assert_eq!(bs.read_unary_zeros().unwrap(), 0);
2023        assert_eq!(bs.read_unary_zeros().unwrap(), 0);
2024        assert_eq!(bs.read_unary_zeros().unwrap(), 1);
2025        assert_eq!(bs.read_unary_zeros().unwrap(), 0);
2026
2027        // Upper limit test
2028        let mut bs = BitReaderRtl::new(&[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80]);
2029
2030        assert_eq!(bs.read_unary_zeros().unwrap(), 63);
2031
2032        // Lower limit test
2033        let mut bs = BitReaderRtl::new(&[0x01]);
2034
2035        assert_eq!(bs.read_unary_zeros().unwrap(), 0);
2036
2037        // Error test.
2038        let mut bs = BitReaderRtl::new(&[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00]);
2039
2040        assert!(bs.read_unary_zeros().is_err());
2041    }
2042
2043    #[test]
2044    fn verify_bitstreamrtl_read_unary_zeros_capped() {
2045        // General tests
2046        let mut bs = BitReaderRtl::new(&[0b1000_0000, 0b1000_0000]);
2047
2048        assert_eq!(bs.read_unary_zeros_capped(8).unwrap(), 7);
2049        assert_eq!(bs.read_unary_zeros_capped(4).unwrap(), 4);
2050
2051        // Long limit tests
2052        let mut bs = BitReaderRtl::new(&[
2053            0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, //
2054            0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, //
2055            0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, //
2056            0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, //
2057            0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, //
2058        ]);
2059
2060        assert_eq!(bs.read_unary_zeros_capped(96).unwrap(), 79);
2061        assert_eq!(bs.read_unary_zeros_capped(163).unwrap(), 163);
2062    }
2063
2064    #[test]
2065    fn verify_bitstreamrtl_read_unary_ones() {
2066        // General tests
2067        let mut bs =
2068            BitReaderRtl::new(&[0b0111_1111, 0b1111_0111, 0b1111_1111, 0b1111_1110, 0b0010_0000]);
2069
2070        assert_eq!(bs.read_unary_ones().unwrap(), 7);
2071        assert_eq!(bs.read_unary_ones().unwrap(), 3);
2072        assert_eq!(bs.read_unary_ones().unwrap(), 12);
2073        assert_eq!(bs.read_unary_ones().unwrap(), 7);
2074        assert_eq!(bs.read_unary_ones().unwrap(), 0);
2075        assert_eq!(bs.read_unary_ones().unwrap(), 0);
2076        assert_eq!(bs.read_unary_ones().unwrap(), 0);
2077        assert_eq!(bs.read_unary_ones().unwrap(), 0);
2078        assert_eq!(bs.read_unary_ones().unwrap(), 1);
2079        assert_eq!(bs.read_unary_ones().unwrap(), 0);
2080
2081        // Upper limit test
2082        let mut bs = BitReaderRtl::new(&[0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f]);
2083
2084        assert_eq!(bs.read_unary_ones().unwrap(), 63);
2085
2086        // Lower limit test
2087        let mut bs = BitReaderRtl::new(&[0xfe]);
2088
2089        assert_eq!(bs.read_unary_ones().unwrap(), 0);
2090
2091        // Error test.
2092        let mut bs = BitReaderRtl::new(&[0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff]);
2093
2094        assert!(bs.read_unary_ones().is_err());
2095    }
2096
2097    #[test]
2098    fn verify_bitstreamrtl_read_unary_ones_capped() {
2099        // General tests
2100        let mut bs = BitReaderRtl::new(&[0b0111_1111, 0b0111_1111]);
2101
2102        assert_eq!(bs.read_unary_ones_capped(8).unwrap(), 7);
2103        assert_eq!(bs.read_unary_ones_capped(4).unwrap(), 4);
2104
2105        // Long limit tests
2106        let mut bs = BitReaderRtl::new(&[
2107            0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, //
2108            0xff, 0x7f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, //
2109            0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, //
2110            0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, //
2111            0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, //
2112        ]);
2113
2114        assert_eq!(bs.read_unary_ones_capped(96).unwrap(), 79);
2115        assert_eq!(bs.read_unary_ones_capped(163).unwrap(), 163);
2116    }
2117
2118    #[test]
2119    fn verify_bitstreamrtl_read_codebook() {
2120        // The codewords are in MSb bit-order, but reading the bitstream in LSb order. Therefore,
2121        // use the reverse bit order.
2122        let (codebook, buf, text) = generate_codebook(BitOrder::Reverse);
2123
2124        let mut bs = BitReaderRtl::new(&buf);
2125
2126        let decoded: Vec<u8> =
2127            (0..text.len()).into_iter().map(|_| bs.read_codebook(&codebook).unwrap().0).collect();
2128
2129        assert_eq!(text, std::str::from_utf8(&decoded).unwrap());
2130    }
2131}