1use crate::common::f_fmla;
30use crate::logs::{LOG_R_DD, LOG_RANGE_REDUCTION};
31use crate::polyeval::{f_estrin_polyeval8, f_polyeval6};
32
33#[inline]
34pub(crate) fn core_logf(x: f64) -> f64 {
35 let x_u = x.to_bits();
36
37 const E_BIAS: u64 = (1u64 << (11 - 1u64)) - 1u64;
38
39 let mut x_e: i32 = -(E_BIAS as i32);
40
41 let shifted = (x_u >> 45) as i32;
47 let index = shifted & 0x7F;
48 let r = f64::from_bits(LOG_RANGE_REDUCTION[index as usize]);
49
50 x_e = x_e.wrapping_add(x_u.wrapping_add(1u64 << 45).wrapping_shr(52) as i32);
53 let e_x = x_e as f64;
54
55 const LOG_2_HI: f64 = f64::from_bits(0x3fe62e42fefa3800);
56 const LOG_2_LO: f64 = f64::from_bits(0x3d2ef35793c76730);
57
58 let log_r_dd = LOG_R_DD[index as usize];
59
60 let hi = f_fmla(e_x, LOG_2_HI, f64::from_bits(log_r_dd.1));
62 let lo = f_fmla(e_x, LOG_2_LO, f64::from_bits(log_r_dd.0));
63
64 let x_m = (x_u & 0x000F_FFFF_FFFF_FFFFu64) | 0x3FF0_0000_0000_0000u64;
66 let m = f64::from_bits(x_m);
67
68 let u;
69 #[cfg(any(
70 all(
71 any(target_arch = "x86", target_arch = "x86_64"),
72 target_feature = "fma"
73 ),
74 target_arch = "aarch64"
75 ))]
76 {
77 u = f_fmla(r, m, -1.0); }
79 #[cfg(not(any(
80 all(
81 any(target_arch = "x86", target_arch = "x86_64"),
82 target_feature = "fma"
83 ),
84 target_arch = "aarch64"
85 )))]
86 {
87 use crate::logs::LOG_CD;
88 let c_m = x_m & 0x3FFF_E000_0000_0000u64;
89 let c = f64::from_bits(c_m);
90 u = f_fmla(r, m - c, f64::from_bits(LOG_CD[index as usize])); }
92
93 let r1 = hi;
94 let p = f_polyeval6(
100 u,
101 f64::from_bits(0x3fefffffffffffff),
102 f64::from_bits(0xbfdffffffffff3e6),
103 f64::from_bits(0x3fd5555555626b74),
104 f64::from_bits(0xbfd0000026aeecc8),
105 f64::from_bits(0x3fc9999114d16c06),
106 f64::from_bits(0xbfc51e433a85278a),
107 );
108 f_fmla(p, u, r1) + lo
109}
110
111#[inline]
115pub fn f_log1pf(x: f32) -> f32 {
116 let ux = x.to_bits().wrapping_shl(1);
117 if ux >= 0xffu32 << 24 || ux == 0 {
118 if ux == 0 {
120 return x;
121 }
122 if x.is_infinite() {
123 return if x.is_sign_positive() {
124 f32::INFINITY
125 } else {
126 f32::NAN
127 };
128 }
129 return x + f32::NAN;
130 }
131
132 let xd = x as f64;
133 let ax = x.to_bits() & 0x7fff_ffffu32;
134
135 if ax > 0x3c80_0000u32 {
137 if x == -1. {
138 return f32::NEG_INFINITY;
139 }
140 let x1p = xd + 1.;
141 if x1p <= 0. {
142 if x1p == 0. {
143 return f32::NEG_INFINITY;
144 }
145 return f32::NAN;
146 }
147 return core_logf(x1p) as f32;
148 }
149
150 let p = f_estrin_polyeval8(
158 xd,
159 f64::from_bits(0x3ff0000000000000),
160 f64::from_bits(0xbfe0000000000000),
161 f64::from_bits(0x3fd5555555556aad),
162 f64::from_bits(0xbfd000000000181a),
163 f64::from_bits(0x3fc999998998124e),
164 f64::from_bits(0xbfc55555452e2a2b),
165 f64::from_bits(0x3fc24adb8cde4aa7),
166 f64::from_bits(0xbfc0019db915ef6f),
167 ) * xd;
168 p as f32
169}
170
171#[inline]
172pub(crate) fn core_log1pf(x: f32) -> f64 {
173 let xd = x as f64;
174 let ax = x.to_bits() & 0x7fff_ffffu32;
175
176 if ax > 0x3c80_0000u32 {
178 let x1p = xd + 1.;
179 return core_logf(x1p);
180 }
181
182 f_estrin_polyeval8(
190 xd,
191 f64::from_bits(0x3ff0000000000000),
192 f64::from_bits(0xbfe0000000000000),
193 f64::from_bits(0x3fd5555555556aad),
194 f64::from_bits(0xbfd000000000181a),
195 f64::from_bits(0x3fc999998998124e),
196 f64::from_bits(0xbfc55555452e2a2b),
197 f64::from_bits(0x3fc24adb8cde4aa7),
198 f64::from_bits(0xbfc0019db915ef6f),
199 ) * xd
200}
201
202#[cfg(test)]
203mod tests {
204 use super::*;
205
206 #[test]
207 fn log1pf_works() {
208 assert!(f_log1pf(f32::from_bits(0xffefb9a7)).is_nan());
209 assert!(f_log1pf(f32::NAN).is_nan());
210 assert_eq!(f_log1pf(f32::from_bits(0x41078feb)), 2.2484074);
211 assert_eq!(f_log1pf(-0.0000014305108), -0.0000014305118);
212 assert_eq!(f_log1pf(0.0), 0.0);
213 assert_eq!(f_log1pf(2.0), 1.0986123);
214 assert_eq!(f_log1pf(-0.7), -1.2039728);
215 assert_eq!(f_log1pf(-0.0000000000043243), -4.3243e-12);
216 assert_eq!(f_log1pf(f32::INFINITY), f32::INFINITY);
217 assert!(f_log1pf(-2.0).is_nan());
218 assert!(f_log1pf(f32::NAN).is_nan());
219 }
220}