brotli/enc/
prior_eval.rs

1use super::super::alloc;
2use super::super::alloc::{Allocator, SliceWrapper, SliceWrapperMut};
3use super::backward_references::BrotliEncoderParams;
4use super::find_stride;
5use super::input_pair::{InputPair, InputReference, InputReferenceMut};
6use super::interface;
7use super::ir_interpret::{push_base, IRInterpreter};
8use super::util::{floatX, FastLog2u16};
9use super::{s16, v8};
10use core;
11#[cfg(feature = "simd")]
12use core::simd::prelude::SimdPartialOrd;
13
14// the high nibble, followed by the low nibbles
15pub const CONTEXT_MAP_PRIOR_SIZE: usize = 256 * 17;
16pub const STRIDE_PRIOR_SIZE: usize = 256 * 256 * 2;
17pub const ADV_PRIOR_SIZE: usize = 65536 + (20 << 16);
18pub const DEFAULT_SPEED: (u16, u16) = (8, 8192);
19
20pub enum WhichPrior {
21    CM = 0,
22    ADV = 1,
23    SLOW_CM = 2,
24    FAST_CM = 3,
25    STRIDE1 = 4,
26    STRIDE2 = 5,
27    STRIDE3 = 6,
28    STRIDE4 = 7,
29    //    STRIDE8 = 8,
30    NUM_PRIORS = 8,
31    // future ideas
32}
33
34pub trait Prior {
35    fn lookup_lin(
36        stride_byte: u8,
37        selected_context: u8,
38        actual_context: usize,
39        high_nibble: Option<u8>,
40    ) -> usize;
41    #[inline(always)]
42    fn lookup_mut(
43        data: &mut [s16],
44        stride_byte: u8,
45        selected_context: u8,
46        actual_context: usize,
47        high_nibble: Option<u8>,
48    ) -> CDF {
49        let index = Self::lookup_lin(stride_byte, selected_context, actual_context, high_nibble);
50        CDF::from(&mut data[index])
51    }
52    #[inline(always)]
53    fn lookup(
54        data: &[s16],
55        stride_byte: u8,
56        selected_context: u8,
57        actual_context: usize,
58        high_nibble: Option<u8>,
59    ) -> &s16 {
60        let index = Self::lookup_lin(stride_byte, selected_context, actual_context, high_nibble);
61        &data[index]
62    }
63    #[allow(unused_variables)]
64    #[inline(always)]
65    fn score_index(
66        stride_byte: u8,
67        selected_context: u8,
68        actual_context: usize,
69        high_nibble: Option<u8>,
70    ) -> usize {
71        let which = Self::which();
72        assert!(which < WhichPrior::NUM_PRIORS as usize);
73        assert!(actual_context < 256);
74        if let Some(nibble) = high_nibble {
75            WhichPrior::NUM_PRIORS as usize * (actual_context + 4096 + 256 * nibble as usize)
76                + which
77        } else {
78            WhichPrior::NUM_PRIORS as usize * (actual_context + 256 * (stride_byte >> 4) as usize)
79                + which
80        }
81    }
82    fn which() -> usize;
83}
84
85#[inline(always)]
86fn upper_score_index(stride_byte: u8, _selected_context: u8, actual_context: usize) -> usize {
87    actual_context + 256 * (stride_byte >> 4) as usize
88}
89#[inline(always)]
90fn lower_score_index(
91    _stride_byte: u8,
92    _selected_context: u8,
93    actual_context: usize,
94    high_nibble: u8,
95) -> usize {
96    debug_assert!(actual_context < 256);
97    debug_assert!(high_nibble < 16);
98    actual_context + 4096 + 256 * high_nibble as usize
99}
100
101#[allow(unused_variables)]
102#[inline(always)]
103fn stride_lookup_lin(
104    stride_byte: u8,
105    selected_context: u8,
106    actual_context: usize,
107    high_nibble: Option<u8>,
108) -> usize {
109    if let Some(nibble) = high_nibble {
110        1 + 2 * (actual_context | ((stride_byte as usize & 0xf) << 8) | ((nibble as usize) << 12))
111    } else {
112        2 * (actual_context | ((stride_byte as usize) << 8))
113    }
114}
115pub struct Stride1Prior {}
116impl Stride1Prior {
117    #[inline(always)]
118    pub fn offset() -> usize {
119        0
120    }
121}
122
123impl Prior for Stride1Prior {
124    #[inline(always)]
125    fn lookup_lin(
126        stride_byte: u8,
127        selected_context: u8,
128        actual_context: usize,
129        high_nibble: Option<u8>,
130    ) -> usize {
131        stride_lookup_lin(stride_byte, selected_context, actual_context, high_nibble)
132    }
133    #[inline(always)]
134    fn which() -> usize {
135        WhichPrior::STRIDE1 as usize
136    }
137}
138/*impl StridePrior for Stride1Prior {
139    const STRIDE_OFFSET:usize = 0;
140}*/
141pub struct Stride2Prior {}
142impl Stride2Prior {
143    #[inline(always)]
144    pub fn offset() -> usize {
145        1
146    }
147}
148
149impl Prior for Stride2Prior {
150    #[inline(always)]
151    fn lookup_lin(
152        stride_byte: u8,
153        selected_context: u8,
154        actual_context: usize,
155        high_nibble: Option<u8>,
156    ) -> usize {
157        stride_lookup_lin(stride_byte, selected_context, actual_context, high_nibble)
158    }
159    #[inline]
160    fn which() -> usize {
161        WhichPrior::STRIDE2 as usize
162    }
163}
164/*impl StridePrior for Stride2Prior {
165    const STRIDE_OFFSET:usize = 1;
166}*/
167pub struct Stride3Prior {}
168impl Stride3Prior {
169    #[inline(always)]
170    pub fn offset() -> usize {
171        2
172    }
173}
174
175impl Prior for Stride3Prior {
176    #[inline(always)]
177    fn lookup_lin(
178        stride_byte: u8,
179        selected_context: u8,
180        actual_context: usize,
181        high_nibble: Option<u8>,
182    ) -> usize {
183        stride_lookup_lin(stride_byte, selected_context, actual_context, high_nibble)
184    }
185    #[inline(always)]
186    fn which() -> usize {
187        WhichPrior::STRIDE3 as usize
188    }
189}
190
191/*impl StridePrior for Stride3Prior {
192    const STRIDE_OFFSET:usize = 2;
193}*/
194pub struct Stride4Prior {}
195impl Stride4Prior {
196    #[inline(always)]
197    pub fn offset() -> usize {
198        3
199    }
200}
201impl Prior for Stride4Prior {
202    #[inline(always)]
203    fn lookup_lin(
204        stride_byte: u8,
205        selected_context: u8,
206        actual_context: usize,
207        high_nibble: Option<u8>,
208    ) -> usize {
209        stride_lookup_lin(stride_byte, selected_context, actual_context, high_nibble)
210    }
211    #[inline]
212    fn which() -> usize {
213        WhichPrior::STRIDE4 as usize
214    }
215}
216
217/*impl StridePrior for Stride4Prior {
218    const STRIDE_OFFSET:usize = 3;
219}*/
220/*pub struct Stride8Prior{
221}
222impl StridePrior for Stride8Prior {
223    const STRIDE_OFFSET:usize = 7;
224}
225impl Stride8Prior {
226    #[inline(always)]
227    pub fn offset() -> usize{
228        7
229    }
230}
231impl Prior for Stride8Prior {
232    fn lookup_lin(stride_byte:u8, selected_context:u8, actual_context:usize, high_nibble: Option<u8>) -> usize {
233        stride_lookup_lin(stride_byte, selected_context, actual_context, high_nibble)
234    }
235    #[inline]
236    fn which() -> usize {
237      WhichPrior::STRIDE8 as usize
238    }
239}
240*/
241pub struct CMPrior {}
242impl Prior for CMPrior {
243    #[allow(unused_variables)]
244    #[inline(always)]
245    fn lookup_lin(
246        stride_byte: u8,
247        selected_context: u8,
248        actual_context: usize,
249        high_nibble: Option<u8>,
250    ) -> usize {
251        if let Some(nibble) = high_nibble {
252            (nibble as usize + 1) + 17 * actual_context
253        } else {
254            17 * actual_context
255        }
256    }
257    #[inline(always)]
258    fn which() -> usize {
259        WhichPrior::CM as usize
260    }
261}
262pub struct FastCMPrior {}
263impl Prior for FastCMPrior {
264    #[allow(unused_variables)]
265    #[inline(always)]
266    fn lookup_lin(
267        stride_byte: u8,
268        selected_context: u8,
269        actual_context: usize,
270        high_nibble: Option<u8>,
271    ) -> usize {
272        if let Some(nibble) = high_nibble {
273            2 * actual_context
274        } else {
275            2 * actual_context + 1
276        }
277    }
278    #[inline(always)]
279    fn which() -> usize {
280        WhichPrior::FAST_CM as usize
281    }
282}
283
284pub struct SlowCMPrior {}
285impl Prior for SlowCMPrior {
286    #[allow(unused_variables)]
287    #[inline(always)]
288    fn lookup_lin(
289        stride_byte: u8,
290        selected_context: u8,
291        actual_context: usize,
292        high_nibble: Option<u8>,
293    ) -> usize {
294        if let Some(nibble) = high_nibble {
295            (nibble as usize + 1) + 17 * actual_context
296        } else {
297            17 * actual_context
298        }
299    }
300    #[inline]
301    fn which() -> usize {
302        WhichPrior::SLOW_CM as usize
303    }
304}
305
306pub struct AdvPrior {}
307impl Prior for AdvPrior {
308    #[allow(unused_variables)]
309    #[inline(always)]
310    fn lookup_lin(
311        stride_byte: u8,
312        selected_context: u8,
313        actual_context: usize,
314        high_nibble: Option<u8>,
315    ) -> usize {
316        if let Some(nibble) = high_nibble {
317            65536
318                + (actual_context | ((stride_byte as usize) << 8) | ((nibble as usize & 0xf) << 16))
319        } else {
320            actual_context | ((stride_byte as usize & 0xf0) << 8)
321        }
322    }
323    #[inline(always)]
324    fn which() -> usize {
325        WhichPrior::ADV as usize
326    }
327}
328
329pub struct CDF<'a> {
330    cdf: &'a mut s16,
331}
332
333impl<'a> CDF<'a> {
334    #[inline(always)]
335    pub fn cost(&self, nibble_u8: u8) -> floatX {
336        let nibble = nibble_u8 as usize & 0xf;
337        let mut pdf = self.cdf[nibble];
338        if nibble_u8 != 0 {
339            pdf -= self.cdf[(nibble - 1)];
340        }
341        FastLog2u16(self.cdf[15] as u16) - FastLog2u16(pdf as u16)
342    }
343    #[inline(always)]
344    pub fn update(&mut self, nibble_u8: u8, speed: (u16, u16)) {
345        let mut cdf = *self.cdf;
346        let increment_v = s16::splat(speed.0 as i16);
347        let one_to_16 = s16::from([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]);
348        let mask_v: s16 = one_to_16.simd_gt(s16::splat(i16::from(nibble_u8))).to_int();
349        cdf = cdf + (increment_v & mask_v);
350        if cdf[15] >= speed.1 as i16 {
351            let cdf_bias = one_to_16;
352            cdf = cdf + cdf_bias - ((cdf + cdf_bias) >> 2);
353        }
354        *self.cdf = cdf;
355    }
356}
357
358impl<'a> From<&'a mut s16> for CDF<'a> {
359    #[inline(always)]
360    fn from(cdf: &'a mut s16) -> CDF<'a> {
361        CDF { cdf }
362    }
363}
364
365pub fn init_cdfs(cdfs: &mut [s16]) {
366    for item in cdfs.iter_mut() {
367        *item = s16::from([4, 8, 12, 16, 20, 24, 28, 32, 36, 40, 44, 48, 52, 56, 60, 64]);
368    }
369}
370
371pub struct PriorEval<
372    'a,
373    Alloc: alloc::Allocator<s16> + alloc::Allocator<u32> + alloc::Allocator<v8>,
374> {
375    input: InputPair<'a>,
376    context_map: interface::PredictionModeContextMap<InputReferenceMut<'a>>,
377    block_type: u8,
378    local_byte_offset: usize,
379    _nop: <Alloc as Allocator<u32>>::AllocatedMemory,
380    cm_priors: <Alloc as Allocator<s16>>::AllocatedMemory,
381    slow_cm_priors: <Alloc as Allocator<s16>>::AllocatedMemory,
382    fast_cm_priors: <Alloc as Allocator<s16>>::AllocatedMemory,
383    stride_priors: [<Alloc as Allocator<s16>>::AllocatedMemory; 4],
384    adv_priors: <Alloc as Allocator<s16>>::AllocatedMemory,
385    _stride_pyramid_leaves: [u8; find_stride::NUM_LEAF_NODES],
386    score: <Alloc as Allocator<v8>>::AllocatedMemory,
387    cm_speed: [(u16, u16); 2],
388    stride_speed: [(u16, u16); 2],
389    cur_stride: u8,
390}
391
392impl<'a, Alloc: alloc::Allocator<s16> + alloc::Allocator<u32> + alloc::Allocator<v8>>
393    PriorEval<'a, Alloc>
394{
395    pub fn new(
396        alloc: &mut Alloc,
397        input: InputPair<'a>,
398        stride: [u8; find_stride::NUM_LEAF_NODES],
399        prediction_mode: interface::PredictionModeContextMap<InputReferenceMut<'a>>,
400        params: &BrotliEncoderParams,
401    ) -> Self {
402        let do_alloc = params.prior_bitmask_detection != 0;
403        let mut cm_speed = prediction_mode.context_map_speed();
404        let mut stride_speed = prediction_mode.stride_context_speed();
405        if cm_speed[0] == (0, 0) {
406            cm_speed[0] = params.literal_adaptation[2]
407        }
408        if cm_speed[0] == (0, 0) {
409            cm_speed[0] = DEFAULT_SPEED;
410        }
411        if cm_speed[1] == (0, 0) {
412            cm_speed[1] = params.literal_adaptation[3]
413        }
414        if cm_speed[1] == (0, 0) {
415            cm_speed[1] = cm_speed[0];
416        }
417        if stride_speed[0] == (0, 0) {
418            stride_speed[0] = params.literal_adaptation[0]
419        }
420        if stride_speed[0] == (0, 0) {
421            stride_speed[0] = DEFAULT_SPEED;
422        }
423        if stride_speed[1] == (0, 0) {
424            stride_speed[1] = params.literal_adaptation[1]
425        }
426        if stride_speed[1] == (0, 0) {
427            stride_speed[1] = stride_speed[0];
428        }
429        let mut ret = PriorEval::<Alloc> {
430            input,
431            context_map: prediction_mode,
432            block_type: 0,
433            cur_stride: 1,
434            local_byte_offset: 0,
435            _nop: <Alloc as Allocator<u32>>::AllocatedMemory::default(),
436            cm_priors: if do_alloc {
437                <Alloc as Allocator<s16>>::alloc_cell(alloc, CONTEXT_MAP_PRIOR_SIZE)
438            } else {
439                <Alloc as Allocator<s16>>::AllocatedMemory::default()
440            },
441            slow_cm_priors: if do_alloc {
442                <Alloc as Allocator<s16>>::alloc_cell(alloc, CONTEXT_MAP_PRIOR_SIZE)
443            } else {
444                <Alloc as Allocator<s16>>::AllocatedMemory::default()
445            },
446            fast_cm_priors: if do_alloc {
447                <Alloc as Allocator<s16>>::alloc_cell(alloc, CONTEXT_MAP_PRIOR_SIZE)
448            } else {
449                <Alloc as Allocator<s16>>::AllocatedMemory::default()
450            },
451            stride_priors: [
452                if do_alloc {
453                    <Alloc as Allocator<s16>>::alloc_cell(alloc, STRIDE_PRIOR_SIZE)
454                } else {
455                    <Alloc as Allocator<s16>>::AllocatedMemory::default()
456                },
457                if do_alloc {
458                    <Alloc as Allocator<s16>>::alloc_cell(alloc, STRIDE_PRIOR_SIZE)
459                } else {
460                    <Alloc as Allocator<s16>>::AllocatedMemory::default()
461                },
462                if do_alloc {
463                    <Alloc as Allocator<s16>>::alloc_cell(alloc, STRIDE_PRIOR_SIZE)
464                } else {
465                    <Alloc as Allocator<s16>>::AllocatedMemory::default()
466                },
467                if do_alloc {
468                    <Alloc as Allocator<s16>>::alloc_cell(alloc, STRIDE_PRIOR_SIZE)
469                } else {
470                    <Alloc as Allocator<s16>>::AllocatedMemory::default()
471                },
472                /*if do_alloc {m16x16.alloc_cell(STRIDE_PRIOR_SIZE)} else {
473                Alloc16x16::AllocatedMemory::default()},*/
474            ],
475            adv_priors: if do_alloc {
476                <Alloc as Allocator<s16>>::alloc_cell(alloc, ADV_PRIOR_SIZE)
477            } else {
478                <Alloc as Allocator<s16>>::AllocatedMemory::default()
479            },
480            _stride_pyramid_leaves: stride,
481            score: if do_alloc {
482                <Alloc as Allocator<v8>>::alloc_cell(alloc, 8192)
483            } else {
484                <Alloc as Allocator<v8>>::AllocatedMemory::default()
485            },
486            cm_speed,
487            stride_speed,
488        };
489        init_cdfs(ret.cm_priors.slice_mut());
490        init_cdfs(ret.slow_cm_priors.slice_mut());
491        init_cdfs(ret.fast_cm_priors.slice_mut());
492        init_cdfs(ret.stride_priors[0].slice_mut());
493        init_cdfs(ret.stride_priors[1].slice_mut());
494        init_cdfs(ret.stride_priors[2].slice_mut());
495        init_cdfs(ret.stride_priors[3].slice_mut());
496        //init_cdfs(ret.stride_priors[4].slice_mut());
497        init_cdfs(ret.adv_priors.slice_mut());
498        ret
499    }
500    pub fn choose_bitmask(&mut self) {
501        let epsilon = 6.0;
502        let mut max_popularity = 0u32;
503        let mut max_popularity_index = 0u8;
504        assert_eq!(WhichPrior::NUM_PRIORS as usize, 8);
505        let mut popularity = [0u32; 8];
506        let mut bitmask = [0u8; super::interface::NUM_MIXING_VALUES];
507        for (i, score) in self.score.slice().iter().enumerate() {
508            let cm_score = score[WhichPrior::CM as usize];
509            let slow_cm_score = score[WhichPrior::SLOW_CM as usize];
510            let fast_cm_score = score[WhichPrior::FAST_CM as usize] + 16.0;
511            let stride1_score = score[WhichPrior::STRIDE1 as usize];
512            let stride2_score = score[WhichPrior::STRIDE2 as usize];
513            let stride3_score = score[WhichPrior::STRIDE3 as usize] + 16.0;
514            let stride4_score = score[WhichPrior::STRIDE4 as usize];
515            //let stride8_score = score[WhichPrior::STRIDE8] * 1.125 + 16.0;
516            let stride8_score = stride4_score + 1.0; // FIXME: never lowest -- ignore stride 8
517            let stride_score = core::cmp::min(
518                stride1_score as u64,
519                core::cmp::min(
520                    stride2_score as u64,
521                    core::cmp::min(
522                        stride3_score as u64,
523                        core::cmp::min(stride4_score as u64, stride8_score as u64),
524                    ),
525                ),
526            );
527
528            let adv_score = score[WhichPrior::ADV as usize];
529            if adv_score + epsilon < stride_score as floatX
530                && adv_score + epsilon < cm_score
531                && adv_score + epsilon < slow_cm_score
532                && adv_score + epsilon < fast_cm_score
533            {
534                bitmask[i] = 1;
535            } else if slow_cm_score + epsilon < stride_score as floatX
536                && slow_cm_score + epsilon < cm_score
537                && slow_cm_score + epsilon < fast_cm_score
538            {
539                bitmask[i] = 2;
540            } else if fast_cm_score + epsilon < stride_score as floatX
541                && fast_cm_score + epsilon < cm_score
542            {
543                bitmask[i] = 3;
544            } else if epsilon + (stride_score as floatX) < cm_score {
545                bitmask[i] = WhichPrior::STRIDE1 as u8;
546                if stride_score == stride8_score as u64 {
547                    //bitmask[i] = WhichPrior::STRIDE8 as u8;
548                }
549                if stride_score == stride4_score as u64 {
550                    bitmask[i] = WhichPrior::STRIDE4 as u8;
551                }
552                if stride_score == stride3_score as u64 {
553                    bitmask[i] = WhichPrior::STRIDE3 as u8;
554                }
555                if stride_score == stride2_score as u64 {
556                    bitmask[i] = WhichPrior::STRIDE2 as u8;
557                }
558                if stride_score == stride1_score as u64 {
559                    bitmask[i] = WhichPrior::STRIDE1 as u8;
560                }
561            } else {
562                bitmask[i] = 0;
563            }
564            if stride_score == 0 {
565                bitmask[i] = max_popularity_index;
566                //eprintln!("Miss {}[{}] ~ {}", bitmask[i], i, max_popularity_index);
567            } else {
568                popularity[bitmask[i] as usize] += 1;
569                if popularity[bitmask[i] as usize] > max_popularity {
570                    max_popularity = popularity[bitmask[i] as usize];
571                    max_popularity_index = bitmask[i];
572                }
573                //eprintln!("Score {} {} {} {} {}: {}[{}] max={},{}", cm_score, adv_score, slow_cm_score, fast_cm_score, stride_score, bitmask[i], i, max_popularity, max_popularity_index);
574            }
575        }
576        self.context_map.set_mixing_values(&bitmask);
577    }
578    pub fn free(&mut self, alloc: &mut Alloc) {
579        <Alloc as Allocator<v8>>::free_cell(alloc, core::mem::take(&mut self.score));
580        <Alloc as Allocator<s16>>::free_cell(alloc, core::mem::take(&mut self.cm_priors));
581        <Alloc as Allocator<s16>>::free_cell(alloc, core::mem::take(&mut self.slow_cm_priors));
582        <Alloc as Allocator<s16>>::free_cell(alloc, core::mem::take(&mut self.fast_cm_priors));
583        <Alloc as Allocator<s16>>::free_cell(alloc, core::mem::take(&mut self.stride_priors[0]));
584        <Alloc as Allocator<s16>>::free_cell(alloc, core::mem::take(&mut self.stride_priors[1]));
585        <Alloc as Allocator<s16>>::free_cell(alloc, core::mem::take(&mut self.stride_priors[2]));
586        <Alloc as Allocator<s16>>::free_cell(alloc, core::mem::take(&mut self.stride_priors[3]));
587        //<Alloc as Allocator<s16>>::free_cell(alloc, core::mem::replace(&mut self.stride_priors[4], <Alloc as Allocator<s16>>::AllocatedMemory::default()));
588        <Alloc as Allocator<s16>>::free_cell(alloc, core::mem::take(&mut self.adv_priors));
589    }
590
591    pub fn take_prediction_mode(
592        &mut self,
593    ) -> interface::PredictionModeContextMap<InputReferenceMut<'a>> {
594        core::mem::replace(
595            &mut self.context_map,
596            interface::PredictionModeContextMap::<InputReferenceMut<'a>> {
597                literal_context_map: InputReferenceMut::default(),
598                predmode_speed_and_distance_context_map: InputReferenceMut::default(),
599            },
600        )
601    }
602    fn update_cost_base(
603        &mut self,
604        stride_prior: [u8; 8],
605        stride_prior_offset: usize,
606        selected_bits: u8,
607        cm_prior: usize,
608        literal: u8,
609    ) {
610        let mut l_score = v8::splat(0.0);
611        let mut h_score = v8::splat(0.0);
612        let base_stride_prior =
613            stride_prior[stride_prior_offset.wrapping_sub(self.cur_stride as usize) & 7];
614        let hscore_index = upper_score_index(base_stride_prior, selected_bits, cm_prior);
615        let lscore_index =
616            lower_score_index(base_stride_prior, selected_bits, cm_prior, literal >> 4);
617        {
618            type CurPrior = CMPrior;
619            let mut cdf = CurPrior::lookup_mut(
620                self.cm_priors.slice_mut(),
621                base_stride_prior,
622                selected_bits,
623                cm_prior,
624                None,
625            );
626            h_score[CurPrior::which()] = cdf.cost(literal >> 4);
627            cdf.update(literal >> 4, self.cm_speed[1]);
628        }
629        {
630            type CurPrior = CMPrior;
631            let mut cdf = CurPrior::lookup_mut(
632                self.cm_priors.slice_mut(),
633                base_stride_prior,
634                selected_bits,
635                cm_prior,
636                Some(literal >> 4),
637            );
638            l_score[CurPrior::which()] = cdf.cost(literal & 0xf);
639            cdf.update(literal & 0xf, self.cm_speed[0]);
640        }
641        {
642            type CurPrior = SlowCMPrior;
643            let mut cdf = CurPrior::lookup_mut(
644                self.slow_cm_priors.slice_mut(),
645                base_stride_prior,
646                selected_bits,
647                cm_prior,
648                None,
649            );
650            h_score[CurPrior::which()] = cdf.cost(literal >> 4);
651            cdf.update(literal >> 4, (0, 1024));
652        }
653        {
654            type CurPrior = SlowCMPrior;
655            let mut cdf = CurPrior::lookup_mut(
656                self.slow_cm_priors.slice_mut(),
657                base_stride_prior,
658                selected_bits,
659                cm_prior,
660                Some(literal >> 4),
661            );
662            l_score[CurPrior::which()] = cdf.cost(literal & 0xf);
663            cdf.update(literal & 0xf, (0, 1024));
664        }
665        {
666            type CurPrior = FastCMPrior;
667            let mut cdf = CurPrior::lookup_mut(
668                self.fast_cm_priors.slice_mut(),
669                base_stride_prior,
670                selected_bits,
671                cm_prior,
672                None,
673            );
674            h_score[CurPrior::which()] = cdf.cost(literal >> 4);
675            cdf.update(literal >> 4, self.cm_speed[0]);
676        }
677        {
678            type CurPrior = FastCMPrior;
679            let mut cdf = CurPrior::lookup_mut(
680                self.fast_cm_priors.slice_mut(),
681                base_stride_prior,
682                selected_bits,
683                cm_prior,
684                Some(literal >> 4),
685            );
686            l_score[CurPrior::which()] = cdf.cost(literal & 0xf);
687            cdf.update(literal & 0xf, self.cm_speed[0]);
688        }
689        {
690            type CurPrior = Stride1Prior;
691            let mut cdf = CurPrior::lookup_mut(
692                self.stride_priors[0].slice_mut(),
693                stride_prior[stride_prior_offset.wrapping_sub(CurPrior::offset()) & 7],
694                selected_bits,
695                cm_prior,
696                None,
697            );
698            h_score[CurPrior::which()] = cdf.cost(literal >> 4);
699            cdf.update(literal >> 4, self.stride_speed[1]);
700        }
701        {
702            type CurPrior = Stride1Prior;
703            let mut cdf = CurPrior::lookup_mut(
704                self.stride_priors[0].slice_mut(),
705                stride_prior[stride_prior_offset.wrapping_sub(CurPrior::offset()) & 7],
706                selected_bits,
707                cm_prior,
708                Some(literal >> 4),
709            );
710            l_score[CurPrior::which()] = cdf.cost(literal & 0xf);
711            cdf.update(literal & 0xf, self.stride_speed[0]);
712        }
713        {
714            type CurPrior = Stride2Prior;
715            let mut cdf = CurPrior::lookup_mut(
716                self.stride_priors[1].slice_mut(),
717                stride_prior[stride_prior_offset.wrapping_sub(CurPrior::offset()) & 7],
718                selected_bits,
719                cm_prior,
720                None,
721            );
722            h_score[CurPrior::which()] = cdf.cost(literal >> 4);
723            cdf.update(literal >> 4, self.stride_speed[1]);
724        }
725        {
726            type CurPrior = Stride2Prior;
727            let mut cdf = CurPrior::lookup_mut(
728                self.stride_priors[1].slice_mut(),
729                stride_prior[stride_prior_offset.wrapping_sub(CurPrior::offset()) & 7],
730                selected_bits,
731                cm_prior,
732                Some(literal >> 4),
733            );
734            l_score[CurPrior::which()] = cdf.cost(literal & 0xf);
735            cdf.update(literal & 0xf, self.stride_speed[0]);
736        }
737        {
738            type CurPrior = Stride3Prior;
739            let mut cdf = CurPrior::lookup_mut(
740                self.stride_priors[2].slice_mut(),
741                stride_prior[stride_prior_offset.wrapping_sub(CurPrior::offset()) & 7],
742                selected_bits,
743                cm_prior,
744                None,
745            );
746            h_score[CurPrior::which()] = cdf.cost(literal >> 4);
747            cdf.update(literal >> 4, self.stride_speed[1]);
748        }
749        {
750            type CurPrior = Stride3Prior;
751            let mut cdf = CurPrior::lookup_mut(
752                self.stride_priors[2].slice_mut(),
753                stride_prior[stride_prior_offset.wrapping_sub(CurPrior::offset()) & 7],
754                selected_bits,
755                cm_prior,
756                Some(literal >> 4),
757            );
758            l_score[CurPrior::which()] = cdf.cost(literal & 0xf);
759            cdf.update(literal & 0xf, self.stride_speed[0]);
760        }
761        {
762            type CurPrior = Stride4Prior;
763            let mut cdf = CurPrior::lookup_mut(
764                self.stride_priors[3].slice_mut(),
765                stride_prior[stride_prior_offset.wrapping_sub(CurPrior::offset()) & 7],
766                selected_bits,
767                cm_prior,
768                None,
769            );
770            h_score[CurPrior::which()] = cdf.cost(literal >> 4);
771            cdf.update(literal >> 4, self.stride_speed[1]);
772        }
773        {
774            type CurPrior = Stride4Prior;
775            let mut cdf = CurPrior::lookup_mut(
776                self.stride_priors[3].slice_mut(),
777                stride_prior[stride_prior_offset.wrapping_sub(CurPrior::offset()) & 7],
778                selected_bits,
779                cm_prior,
780                Some(literal >> 4),
781            );
782            l_score[CurPrior::which()] = cdf.cost(literal & 0xf);
783            cdf.update(literal & 0xf, self.stride_speed[0]);
784        }
785        /*       {
786                   type CurPrior = Stride8Prior;
787                   let mut cdf = CurPrior::lookup_mut(self.stride_priors[4].slice_mut(),
788                                                      stride_prior[stride_prior_offset.wrapping_sub(CurPrior::offset())&7], selected_bits, cm_prior, None);
789                   h_score[CurPrior::which()] = cdf.cost(literal>>4);
790                   cdf.update(literal >> 4, self.stride_speed[1]);
791               }
792               {
793                   type CurPrior = Stride8Prior;
794                   let mut cdf = CurPrior::lookup_mut(self.stride_priors[4].slice_mut(),
795                                                      stride_prior[stride_prior_offset.wrapping_sub(CurPrior::offset()) & 7],
796                                                      selected_bits,
797                                                      cm_prior,
798                                                      Some(literal >> 4));
799                   l_score[CurPrior::which()] = cdf.cost(literal&0xf);
800                   cdf.update(literal&0xf, self.stride_speed[0]);
801               }
802        */
803        type CurPrior = AdvPrior;
804        {
805            let mut cdf = CurPrior::lookup_mut(
806                self.adv_priors.slice_mut(),
807                base_stride_prior,
808                selected_bits,
809                cm_prior,
810                None,
811            );
812            h_score[CurPrior::which()] = cdf.cost(literal >> 4);
813            cdf.update(literal >> 4, self.stride_speed[1]);
814        }
815        {
816            let mut cdf = CurPrior::lookup_mut(
817                self.adv_priors.slice_mut(),
818                base_stride_prior,
819                selected_bits,
820                cm_prior,
821                Some(literal >> 4),
822            );
823            l_score[CurPrior::which()] = cdf.cost(literal & 0xf);
824            cdf.update(literal & 0xf, self.stride_speed[0]);
825        }
826        self.score.slice_mut()[lscore_index] += l_score;
827        self.score.slice_mut()[hscore_index] += h_score;
828    }
829}
830impl<'a, Alloc: alloc::Allocator<s16> + alloc::Allocator<u32> + alloc::Allocator<v8>> IRInterpreter
831    for PriorEval<'a, Alloc>
832{
833    #[inline]
834    fn inc_local_byte_offset(&mut self, inc: usize) {
835        self.local_byte_offset += inc;
836    }
837    #[inline]
838    fn local_byte_offset(&self) -> usize {
839        self.local_byte_offset
840    }
841    #[inline]
842    fn update_block_type(&mut self, new_type: u8, stride: u8) {
843        self.block_type = new_type;
844        self.cur_stride = stride;
845    }
846    #[inline]
847    fn block_type(&self) -> u8 {
848        self.block_type
849    }
850    #[inline]
851    fn literal_data_at_offset(&self, index: usize) -> u8 {
852        self.input[index]
853    }
854    #[inline]
855    fn literal_context_map(&self) -> &[u8] {
856        self.context_map.literal_context_map.slice()
857    }
858    #[inline]
859    fn prediction_mode(&self) -> ::interface::LiteralPredictionModeNibble {
860        self.context_map.literal_prediction_mode()
861    }
862    #[inline]
863    fn update_cost(
864        &mut self,
865        stride_prior: [u8; 8],
866        stride_prior_offset: usize,
867        selected_bits: u8,
868        cm_prior: usize,
869        literal: u8,
870    ) {
871        //let stride = self.cur_stride as usize;
872        self.update_cost_base(
873            stride_prior,
874            stride_prior_offset,
875            selected_bits,
876            cm_prior,
877            literal,
878        )
879    }
880}
881
882impl<'a, 'b, Alloc: alloc::Allocator<s16> + alloc::Allocator<u32> + alloc::Allocator<v8>>
883    interface::CommandProcessor<'b> for PriorEval<'a, Alloc>
884{
885    #[inline]
886    fn push(&mut self, val: interface::Command<InputReference<'b>>) {
887        push_base(self, val)
888    }
889}