deskew_neon.h 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757
  1. // Copyright (C) 2004-2024 Artifex Software, Inc.
  2. //
  3. // This file is part of MuPDF.
  4. //
  5. // MuPDF is free software: you can redistribute it and/or modify it under the
  6. // terms of the GNU Affero General Public License as published by the Free
  7. // Software Foundation, either version 3 of the License, or (at your option)
  8. // any later version.
  9. //
  10. // MuPDF is distributed in the hope that it will be useful, but WITHOUT ANY
  11. // WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
  12. // FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more
  13. // details.
  14. //
  15. // You should have received a copy of the GNU Affero General Public License
  16. // along with MuPDF. If not, see <https://www.gnu.org/licenses/agpl-3.0.en.html>
  17. //
  18. // Alternative licensing terms are available from the licensor.
  19. // For commercial licensing, see <https://www.artifex.com/> or contact
  20. // Artifex Software, Inc., 39 Mesa Street, Suite 108A, San Francisco,
  21. // CA 94129, USA, for further information.
  22. /* This file is included from deskew.c if NEON cores are allowed. */
  23. #include "arm_neon.h"
  24. static void
  25. zoom_x1_neon(uint8_t * FZ_RESTRICT tmp,
  26. const uint8_t * FZ_RESTRICT src,
  27. const index_t * FZ_RESTRICT index,
  28. const weight_t * FZ_RESTRICT weights,
  29. uint32_t dst_w,
  30. uint32_t src_w,
  31. uint32_t channels,
  32. const uint8_t * FZ_RESTRICT bg)
  33. {
  34. int32x4_t round = vdupq_n_s32(WEIGHT_ROUND);
  35. if (0)
  36. slow:
  37. {
  38. /* Do any where we might index off the edge of the source */
  39. int pix_num = index->first_pixel;
  40. const uint8_t *s = &src[pix_num];
  41. const weight_t *w = &weights[index->index];
  42. uint32_t j = index->n;
  43. int32_t pixel0 = WEIGHT_ROUND;
  44. if (pix_num < 0)
  45. {
  46. int32_t wt = *w++;
  47. assert(pix_num == -1);
  48. pixel0 += bg[0] * wt;
  49. s++;
  50. j--;
  51. pix_num = 0;
  52. }
  53. pix_num = (int)src_w - pix_num;
  54. if (pix_num > (int)j)
  55. pix_num = j;
  56. j -= pix_num;
  57. while (pix_num > 0)
  58. {
  59. pixel0 += *s++ * *w++;
  60. pix_num--;
  61. }
  62. if (j > 0)
  63. {
  64. assert(j == 1);
  65. pixel0 += bg[0] * *w;
  66. }
  67. pixel0 >>= WEIGHT_SHIFT;
  68. *tmp++ = CLAMP(pixel0, 0, 255);
  69. index++;
  70. dst_w--;
  71. }
  72. while (dst_w > 0)
  73. {
  74. const uint8_t *s;
  75. uint32_t j;
  76. const weight_t *w;
  77. /* Jump out of band to do the (rare) slow (edge) pixels */
  78. if (index->slow)
  79. goto slow;
  80. s = &src[index->first_pixel];
  81. j = index->n;
  82. w = &weights[index->index];
  83. if (j <= 4)
  84. {
  85. int32x4_t q_pair_sum;
  86. int16x4_t wts = vld1_s16(w);
  87. uint8x8_t pix_bytes = vld1_u8(s);
  88. int16x4_t pix16 = vreinterpret_s16_u16(vget_low_u16(vmovl_u8(pix_bytes)));
  89. int32x4_t sum = vmlal_s16(round, pix16, wts);
  90. int32x2_t pair_sum = vpadd_s32(vget_high_s32(sum), vget_low_s32(sum));
  91. pair_sum = vpadd_s32(pair_sum, pair_sum);
  92. q_pair_sum = vcombine_s32(pair_sum, vget_high_s32(q_pair_sum));
  93. *tmp++ = vget_lane_u8(vreinterpret_u8_u16(vqshrun_n_s32(q_pair_sum, WEIGHT_SHIFT-8)), 1);
  94. }
  95. else if (j <= 8)
  96. {
  97. int32x4_t q_pair_sum;
  98. int16x8_t wts = vld1q_s16(w);
  99. uint8x8_t pix_bytes = vld1_u8(s);
  100. int16x8_t pix16 = vreinterpretq_s16_u16(vmovl_u8(pix_bytes));
  101. int32x4_t sum = vmlal_s16(vmlal_s16(round, vget_low_s16(pix16), vget_low_s16(wts)),
  102. vget_high_s16(pix16), vget_high_s16(wts));
  103. int32x2_t pair_sum = vpadd_s32(vget_high_s32(sum), vget_low_s32(sum));
  104. pair_sum = vpadd_s32(pair_sum, pair_sum);
  105. q_pair_sum = vcombine_s32(pair_sum, vget_high_s32(q_pair_sum));
  106. *tmp++ = vget_lane_u8(vreinterpret_u8_u16(vqshrun_n_s32(q_pair_sum, WEIGHT_SHIFT-8)), 1);
  107. }
  108. else
  109. {
  110. int32_t pixel0 = WEIGHT_ROUND;
  111. for (j = index->n; j > 0; j--)
  112. {
  113. pixel0 += *s++ * *w++;
  114. }
  115. pixel0 >>= WEIGHT_SHIFT;
  116. *tmp++ = CLAMP(pixel0, 0, 255);
  117. }
  118. index++;
  119. dst_w--;
  120. }
  121. }
  122. static void
  123. zoom_x3_neon(uint8_t * FZ_RESTRICT tmp,
  124. const uint8_t * FZ_RESTRICT src,
  125. const index_t * FZ_RESTRICT index,
  126. const weight_t * FZ_RESTRICT weights,
  127. uint32_t dst_w,
  128. uint32_t src_w,
  129. uint32_t channels,
  130. const uint8_t * FZ_RESTRICT bg)
  131. {
  132. int32x4_t round = vdupq_n_s32(WEIGHT_ROUND);
  133. if (0)
  134. slow:
  135. {
  136. /* Do any where we might index off the edge of the source */
  137. int pix_num = index->first_pixel;
  138. const uint8_t *s = &src[pix_num * 3];
  139. const weight_t *w = &weights[index->index];
  140. uint32_t j = index->n;
  141. int32_t pixel0 = WEIGHT_ROUND;
  142. int32_t pixel1 = WEIGHT_ROUND;
  143. int32_t pixel2 = WEIGHT_ROUND;
  144. if (pix_num < 0)
  145. {
  146. int32_t wt = *w++;
  147. assert(pix_num == -1);
  148. pixel0 += bg[0] * wt;
  149. pixel1 += bg[1] * wt;
  150. pixel2 += bg[2] * wt;
  151. s += 3;
  152. j--;
  153. pix_num = 0;
  154. }
  155. pix_num = (int)src_w - pix_num;
  156. if (pix_num > (int)j)
  157. pix_num = j;
  158. j -= pix_num;
  159. while (pix_num > 0)
  160. {
  161. int32_t wt = *w++;
  162. pixel0 += *s++ * wt;
  163. pixel1 += *s++ * wt;
  164. pixel2 += *s++ * wt;
  165. pix_num--;
  166. }
  167. if (j > 0)
  168. {
  169. int32_t wt = *w++;
  170. assert(j == 1);
  171. pixel0 += bg[0] * wt;
  172. pixel1 += bg[1] * wt;
  173. pixel2 += bg[2] * wt;
  174. }
  175. pixel0 >>= WEIGHT_SHIFT;
  176. pixel1 >>= WEIGHT_SHIFT;
  177. pixel2 >>= WEIGHT_SHIFT;
  178. *tmp++ = CLAMP(pixel0, 0, 255);
  179. *tmp++ = CLAMP(pixel1, 0, 255);
  180. *tmp++ = CLAMP(pixel2, 0, 255);
  181. index++;
  182. dst_w--;
  183. }
  184. while (dst_w > 0)
  185. {
  186. const uint8_t *s;
  187. int j;
  188. const weight_t *w;
  189. uint8x16_t pix_bytes;
  190. int32x4_t sum;
  191. uint8x8_t out_pix;
  192. /* Jump out of band to do the (rare) slow (edge) pixels */
  193. if (index->slow)
  194. goto slow;
  195. s = &src[index->first_pixel * 3];
  196. j = (int)index->n;
  197. w = &weights[index->index];
  198. pix_bytes = vld1q_u8(s); // pix_bytes = ppoonnmmllkkjjiihhggffeeddccbbaa
  199. if (j == 4)
  200. {
  201. int16x4_t pix16;
  202. int16x4_t vw;
  203. vw = vdup_n_s16(w[0]);
  204. pix16 = vreinterpret_s16_u16(vget_low_u16(vmovl_u8(vget_low_u8(pix_bytes))));
  205. pix_bytes = vextq_u8(pix_bytes, pix_bytes, 3);
  206. sum = vmlal_s16(round, pix16, vw);
  207. vw = vdup_n_s16(w[1]);
  208. pix16 = vreinterpret_s16_u16(vget_low_u16(vmovl_u8(vget_low_u8(pix_bytes))));
  209. pix_bytes = vextq_u8(pix_bytes, pix_bytes, 3);
  210. sum = vmlal_s16(sum, pix16, vw);
  211. vw = vdup_n_s16(w[2]);
  212. pix16 = vreinterpret_s16_u16(vget_low_u16(vmovl_u8(vget_low_u8(pix_bytes))));
  213. pix_bytes = vextq_u8(pix_bytes, pix_bytes, 3);
  214. sum = vmlal_s16(sum, pix16, vw);
  215. vw = vdup_n_s16(w[3]);
  216. pix16 = vreinterpret_s16_u16(vget_low_u16(vmovl_u8(vget_low_u8(pix_bytes))));
  217. sum = vmlal_s16(sum, pix16, vw);
  218. }
  219. else
  220. {
  221. int off = j & 3;
  222. int16x4_t vw;
  223. s += (off ? off : 4) * 3;
  224. sum = round;
  225. /* This is a use of Duff's Device. I'm very sorry, but on the other hand, Yay! */
  226. switch (off)
  227. {
  228. do
  229. {
  230. int16x4_t pix16;
  231. pix_bytes = vld1q_u8(s); // pix_bytes = ppoonnmmllkkjjiihhggffeeddccbbaa
  232. s += 4 * 3;
  233. case 0:
  234. vw = vdup_n_s16(*w++);
  235. pix16 = vreinterpret_s16_u16(vget_low_u16(vmovl_u8(vget_low_u8(pix_bytes))));
  236. pix_bytes = vextq_u8(pix_bytes, pix_bytes, 3);
  237. sum = vmlal_s16(sum, pix16, vw);
  238. case 3:
  239. vw = vdup_n_s16(*w++);
  240. pix16 = vreinterpret_s16_u16(vget_low_u16(vmovl_u8(vget_low_u8(pix_bytes))));
  241. pix_bytes = vextq_u8(pix_bytes, pix_bytes, 3);
  242. sum = vmlal_s16(sum, pix16, vw);
  243. case 2:
  244. vw = vdup_n_s16(*w++);
  245. pix16 = vreinterpret_s16_u16(vget_low_u16(vmovl_u8(vget_low_u8(pix_bytes))));
  246. pix_bytes = vextq_u8(pix_bytes, pix_bytes, 3);
  247. sum = vmlal_s16(sum, pix16, vw);
  248. case 1:
  249. vw = vdup_n_s16(*w++);
  250. pix16 = vreinterpret_s16_u16(vget_low_u16(vmovl_u8(vget_low_u8(pix_bytes))));
  251. sum = vmlal_s16(sum, pix16, vw);
  252. j -= 4;
  253. } while (j > 0);
  254. }
  255. }
  256. out_pix = vreinterpret_u8_u16(vqshrun_n_s32(sum, WEIGHT_SHIFT-8));
  257. *tmp++ = vget_lane_u8(out_pix, 1);
  258. *tmp++ = vget_lane_u8(out_pix, 3);
  259. *tmp++ = vget_lane_u8(out_pix, 5);
  260. index++;
  261. dst_w--;
  262. }
  263. while (dst_w > 0)
  264. {
  265. const uint8_t *s;
  266. /* Jump out of band to do the (rare) slow (edge) pixels */
  267. if (index->slow)
  268. goto slow;
  269. s = &src[index->first_pixel * 3];
  270. {
  271. const weight_t *w = &weights[index->index];
  272. uint32_t j = index->n;
  273. int32_t pixel0 = WEIGHT_ROUND;
  274. int32_t pixel1 = WEIGHT_ROUND;
  275. int32_t pixel2 = WEIGHT_ROUND;
  276. for (j = index->n; j > 0; j--)
  277. {
  278. int32_t wt = *w++;
  279. pixel0 += *s++ * wt;
  280. pixel1 += *s++ * wt;
  281. pixel2 += *s++ * wt;
  282. }
  283. pixel0 >>= WEIGHT_SHIFT;
  284. pixel1 >>= WEIGHT_SHIFT;
  285. pixel2 >>= WEIGHT_SHIFT;
  286. *tmp++ = CLAMP(pixel0, 0, 255);
  287. *tmp++ = CLAMP(pixel1, 0, 255);
  288. *tmp++ = CLAMP(pixel2, 0, 255);
  289. }
  290. index++;
  291. dst_w--;
  292. }
  293. }
  294. static void
  295. zoom_x4_neon(uint8_t * FZ_RESTRICT tmp,
  296. const uint8_t * FZ_RESTRICT src,
  297. const index_t * FZ_RESTRICT index,
  298. const weight_t * FZ_RESTRICT weights,
  299. uint32_t dst_w,
  300. uint32_t src_w,
  301. uint32_t channels,
  302. const uint8_t * FZ_RESTRICT bg)
  303. {
  304. int32x4_t round = vdupq_n_s32(WEIGHT_ROUND);
  305. if (0)
  306. slow:
  307. {
  308. /* Do any where we might index off the edge of the source */
  309. int pn = index->first_pixel;
  310. const uint8_t *s = &src[pn * 4];
  311. const weight_t *w = &weights[index->index];
  312. uint32_t j = index->n;
  313. int32_t pixel0 = WEIGHT_ROUND;
  314. int32_t pixel1 = WEIGHT_ROUND;
  315. int32_t pixel2 = WEIGHT_ROUND;
  316. int32_t pixel3 = WEIGHT_ROUND;
  317. int pix_num = pn;
  318. if (pix_num < 0)
  319. {
  320. int32_t wt = *w++;
  321. assert(pix_num == -1);
  322. pixel0 += bg[0] * wt;
  323. pixel1 += bg[1] * wt;
  324. pixel2 += bg[2] * wt;
  325. pixel3 += bg[3] * wt;
  326. s += 4;
  327. j--;
  328. pix_num = 0;
  329. }
  330. pix_num = (int)src_w - pix_num;
  331. if (pix_num > (int)j)
  332. pix_num = j;
  333. j -= pix_num;
  334. while (pix_num > 0)
  335. {
  336. int32_t wt = *w++;
  337. pixel0 += *s++ * wt;
  338. pixel1 += *s++ * wt;
  339. pixel2 += *s++ * wt;
  340. pixel3 += *s++ * wt;
  341. pix_num--;
  342. }
  343. if (j > 0)
  344. {
  345. int32_t wt = *w;
  346. assert(j == 1);
  347. pixel0 += bg[0] * wt;
  348. pixel1 += bg[1] * wt;
  349. pixel2 += bg[2] * wt;
  350. pixel3 += bg[3] * wt;
  351. }
  352. pixel0 >>= WEIGHT_SHIFT;
  353. pixel1 >>= WEIGHT_SHIFT;
  354. pixel2 >>= WEIGHT_SHIFT;
  355. pixel3 >>= WEIGHT_SHIFT;
  356. *tmp++ = CLAMP(pixel0, 0, 255);
  357. *tmp++ = CLAMP(pixel1, 0, 255);
  358. *tmp++ = CLAMP(pixel2, 0, 255);
  359. *tmp++ = CLAMP(pixel3, 0, 255);
  360. index++;
  361. dst_w--;
  362. }
  363. while (dst_w > 0)
  364. {
  365. const uint8_t *s;
  366. int j;
  367. const weight_t *w;
  368. int32x4_t sum;
  369. uint8x16_t pix_bytes;
  370. uint8x8_t out_pix;
  371. //__m128i mm0, mm1, mm4, mw0, mw1;
  372. /* Jump out of band to do the (rare) slow (edge) pixels */
  373. if (index->slow)
  374. goto slow;
  375. s = &src[index->first_pixel * 4];
  376. j = (int)index->n;
  377. w = &weights[index->index];
  378. pix_bytes = vld1q_u8(s); // pix_bytes = ppoonnmmllkkjjiihhggffeeddccbbaa
  379. if (j == 4)
  380. {
  381. int16x4_t pix16;
  382. int16x4_t vw;
  383. vw = vdup_n_s16(w[0]);
  384. pix16 = vreinterpret_s16_u16(vget_low_u16(vmovl_u8(vget_low_u8(pix_bytes))));
  385. pix_bytes = vextq_u8(pix_bytes, pix_bytes, 4);
  386. sum = vmlal_s16(round, pix16, vw);
  387. vw = vdup_n_s16(w[1]);
  388. pix16 = vreinterpret_s16_u16(vget_low_u16(vmovl_u8(vget_low_u8(pix_bytes))));
  389. pix_bytes = vextq_u8(pix_bytes, pix_bytes, 4);
  390. sum = vmlal_s16(sum, pix16, vw);
  391. vw = vdup_n_s16(w[2]);
  392. pix16 = vreinterpret_s16_u16(vget_low_u16(vmovl_u8(vget_low_u8(pix_bytes))));
  393. pix_bytes = vextq_u8(pix_bytes, pix_bytes, 4);
  394. sum = vmlal_s16(sum, pix16, vw);
  395. vw = vdup_n_s16(w[3]);
  396. pix16 = vreinterpret_s16_u16(vget_low_u16(vmovl_u8(vget_low_u8(pix_bytes))));
  397. sum = vmlal_s16(sum, pix16, vw);
  398. }
  399. else
  400. {
  401. int off = j & 3;
  402. int16x4_t vw;
  403. s += (off ? off : 4) * 4;
  404. /* This is a use of Duff's Device. I'm very sorry, but on the other hand, Yay! */
  405. sum = round;
  406. switch (off)
  407. {
  408. do
  409. {
  410. int16x4_t pixels;
  411. pix_bytes = vld1q_u8(s); // pix_bytes = ppoonnmmllkkjjiihhggffeeddccbbaa
  412. s += 4 * 4;
  413. case 0:
  414. vw = vdup_n_s16(*w++);
  415. pixels = vreinterpret_s16_u16(vget_low_u16(vmovl_u8(vget_low_u8(pix_bytes))));
  416. pix_bytes = vextq_u8(pix_bytes, pix_bytes, 4);
  417. sum = vmlal_s16(sum, pixels, vw);
  418. case 3:
  419. vw = vdup_n_s16(*w++);
  420. pixels = vreinterpret_s16_u16(vget_low_u16(vmovl_u8(vget_low_u8(pix_bytes))));
  421. pix_bytes = vextq_u8(pix_bytes, pix_bytes, 4);
  422. sum = vmlal_s16(sum, pixels, vw);
  423. case 2:
  424. vw = vdup_n_s16(*w++);
  425. pixels = vreinterpret_s16_u16(vget_low_u16(vmovl_u8(vget_low_u8(pix_bytes))));
  426. pix_bytes = vextq_u8(pix_bytes, pix_bytes, 4);
  427. sum = vmlal_s16(sum, pixels, vw);
  428. case 1:
  429. vw = vdup_n_s16(*w++);
  430. pixels = vreinterpret_s16_u16(vget_low_u16(vmovl_u8(vget_low_u8(pix_bytes))));
  431. sum = vmlal_s16(sum, pixels, vw);
  432. j -= 4;
  433. } while (j > 0);
  434. }
  435. }
  436. out_pix = vreinterpret_u8_u16(vqshrun_n_s32(sum, WEIGHT_SHIFT-8));
  437. *tmp++ = vget_lane_u8(out_pix, 1);
  438. *tmp++ = vget_lane_u8(out_pix, 3);
  439. *tmp++ = vget_lane_u8(out_pix, 5);
  440. *tmp++ = vget_lane_u8(out_pix, 7);
  441. index++;
  442. dst_w--;
  443. }
  444. }
  445. static void
  446. zoom_y1_neon(uint8_t * dst,
  447. const uint8_t * FZ_RESTRICT tmp,
  448. const index_t * FZ_RESTRICT index,
  449. const weight_t * FZ_RESTRICT weights,
  450. uint32_t width,
  451. uint32_t channels,
  452. uint32_t mod,
  453. int32_t y)
  454. {
  455. uint32_t stride = width;
  456. uint32_t offset = 0;
  457. int32x4_t round = vdupq_n_s32(WEIGHT_ROUND);
  458. if (0)
  459. slow:
  460. {
  461. uint32_t off = (index->first_pixel + y) * stride + offset;
  462. offset++;
  463. if (off >= mod)
  464. off -= mod;
  465. {
  466. const weight_t *w = (const weight_t *)&weights[index->index * 4];
  467. uint32_t j;
  468. int32_t pixel0 = WEIGHT_ROUND;
  469. for (j = index->n; j > 0; j--)
  470. {
  471. pixel0 += tmp[off] * *w;
  472. w += 4;
  473. off += stride;
  474. if (off >= mod)
  475. off -= mod;
  476. }
  477. pixel0 >>= WEIGHT_SHIFT;
  478. *dst++ = CLAMP(pixel0, 0, 255);
  479. }
  480. index++;
  481. width--;
  482. }
  483. while (width > 0)
  484. {
  485. uint32_t off;
  486. /* The slow flag stops us accessing off the end of the source row.
  487. * It also tells us how many pixels we can do at once. This usage
  488. * is different for zoom_y1 than for all other cores. */
  489. int n = index->slow;
  490. if (n <= 1)
  491. goto slow;
  492. off = (index->first_pixel + y) * stride + offset;
  493. offset += n;
  494. if (off >= mod)
  495. off -= mod;
  496. {
  497. const weight_t *w = &weights[index->index * 4];
  498. uint32_t j = index->n;
  499. int32x4_t sum;
  500. uint16x4_t out16;
  501. if (j == 4)
  502. {
  503. uint8x8_t pix0, pix1, pix2, pix3;
  504. int16x4_t vw0, vw1, vw2, vw3;
  505. pix0 = vld1_u8(&tmp[off]);
  506. off += stride;
  507. if (off >= mod)
  508. off -= mod;
  509. vw0 = vld1_s16(w);
  510. w += 4;
  511. sum = vmlal_s16(round, vreinterpret_s16_u16(vget_low_u16(vmovl_u8(pix0))), vw0);
  512. pix1 = vld1_u8(&tmp[off]);
  513. off += stride;
  514. if (off >= mod)
  515. off -= mod;
  516. vw1 = vld1_s16(w);
  517. w += 4;
  518. sum = vmlal_s16(sum, vreinterpret_s16_u16(vget_low_u16(vmovl_u8(pix1))), vw1);
  519. pix2 = vld1_u8(&tmp[off]);
  520. off += stride;
  521. if (off >= mod)
  522. off -= mod;
  523. vw2 = vld1_s16(w);
  524. w += 4;
  525. sum = vmlal_s16(sum, vreinterpret_s16_u16(vget_low_u16(vmovl_u8(pix2))), vw2);
  526. pix3 = vld1_u8(&tmp[off]);
  527. off += stride;
  528. if (off >= mod)
  529. off -= mod;
  530. vw3 = vld1_s16(w);
  531. sum = vmlal_s16(sum, vreinterpret_s16_u16(vget_low_u16(vmovl_u8(pix3))), vw3);
  532. }
  533. else
  534. {
  535. sum = round;
  536. for ( ; j > 0; j--)
  537. {
  538. uint8x8_t pix0;
  539. int16x4_t vw0;
  540. pix0 = vld1_u8(&tmp[off]);
  541. off += stride;
  542. if (off >= mod)
  543. off -= mod;
  544. vw0 = vld1_s16(w);
  545. w += 4;
  546. sum = vmlal_s16(sum, vreinterpret_s16_u16(vget_low_u16(vmovl_u8(pix0))), vw0);
  547. }
  548. }
  549. out16 = vqshrun_n_s32(sum, WEIGHT_SHIFT-8);
  550. *dst++ = vget_lane_u8(vreinterpret_u8_u16(out16), 1);
  551. if (n > 1)
  552. {
  553. *dst++ = vget_lane_u8(vreinterpret_u8_u16(out16), 3);
  554. if (n > 2)
  555. {
  556. *dst++ = vget_lane_u8(vreinterpret_u8_u16(out16), 5);
  557. if (n > 3)
  558. {
  559. *dst++ = vget_lane_u8(vreinterpret_u8_u16(out16), 7);
  560. }
  561. }
  562. }
  563. }
  564. index += n;
  565. width -= n;
  566. }
  567. }
  568. static void
  569. zoom_y3_neon(uint8_t * dst,
  570. const uint8_t * FZ_RESTRICT tmp,
  571. const index_t * FZ_RESTRICT index,
  572. const weight_t * FZ_RESTRICT weights,
  573. uint32_t width,
  574. uint32_t channels,
  575. uint32_t mod,
  576. int32_t y)
  577. {
  578. uint32_t stride = width * 3;
  579. uint32_t offset = 0;
  580. while (width--)
  581. {
  582. const weight_t *w = &weights[index->index];
  583. uint32_t j = index->n;
  584. int32x4_t sum;
  585. uint16x4_t out16;
  586. uint32_t off = (index->first_pixel + y) * stride + offset;
  587. offset += 3;
  588. if (off >= mod)
  589. off -= mod;
  590. if (j == 4)
  591. {
  592. const weight_t *w = &weights[index->index];
  593. uint8x8_t pix0, pix1, pix2, pix3;
  594. int16x4_t vw0, vw1, vw2, vw3;
  595. pix0 = vld1_u8(&tmp[off]);
  596. off += stride;
  597. if (off >= mod)
  598. off -= mod;
  599. vw0 = vdup_n_s16(*w++);
  600. sum = vmlal_s16(vdupq_n_s32(WEIGHT_ROUND), vreinterpret_s16_u16(vget_low_u16(vmovl_u8(pix0))), vw0);
  601. pix1 = vld1_u8(&tmp[off]);
  602. off += stride;
  603. if (off >= mod)
  604. off -= mod;
  605. vw1 = vdup_n_s16(*w++);
  606. sum = vmlal_s16(sum, vreinterpret_s16_u16(vget_low_u16(vmovl_u8(pix1))), vw1);
  607. pix2 = vld1_u8(&tmp[off]);
  608. off += stride;
  609. if (off >= mod)
  610. off -= mod;
  611. vw2 = vdup_n_s16(*w++);
  612. sum = vmlal_s16(sum, vreinterpret_s16_u16(vget_low_u16(vmovl_u8(pix2))), vw2);
  613. pix3 = vld1_u8(&tmp[off]);
  614. off += stride;
  615. if (off >= mod)
  616. off -= mod;
  617. vw3 = vdup_n_s16(*w++);
  618. sum = vmlal_s16(sum, vreinterpret_s16_u16(vget_low_u16(vmovl_u8(pix3))), vw3);
  619. }
  620. else
  621. {
  622. sum = vdupq_n_s32(WEIGHT_ROUND);
  623. do
  624. {
  625. uint8x8_t pix0 = vld1_u8(&tmp[off]);
  626. int16x4_t vw0;
  627. off += stride;
  628. if (off >= mod)
  629. off -= mod;
  630. vw0 = vdup_n_s16(*w++);
  631. sum = vmlal_s16(sum, vreinterpret_s16_u16(vget_low_u16(vmovl_u8(pix0))), vw0);
  632. }
  633. while (--j);
  634. }
  635. out16 = vqshrun_n_s32(sum, WEIGHT_SHIFT-8);
  636. *dst++ = vget_lane_u8(vreinterpret_u8_u16(out16), 1);
  637. *dst++ = vget_lane_u8(vreinterpret_u8_u16(out16), 3);
  638. *dst++ = vget_lane_u8(vreinterpret_u8_u16(out16), 5);
  639. index++;
  640. }
  641. }
  642. static void
  643. zoom_y4_neon(uint8_t * dst,
  644. const uint8_t * FZ_RESTRICT tmp,
  645. const index_t * FZ_RESTRICT index,
  646. const weight_t * FZ_RESTRICT weights,
  647. uint32_t width,
  648. uint32_t channels,
  649. uint32_t mod,
  650. int32_t y)
  651. {
  652. uint32_t stride = width * 4;
  653. uint32_t offset = 0;
  654. int32x4_t round = vdupq_n_s32(WEIGHT_ROUND);
  655. while (width--)
  656. {
  657. uint32_t off = (index->first_pixel + y) * stride + offset;
  658. offset += 4;
  659. if (off >= mod)
  660. off -= mod;
  661. {
  662. const weight_t *w = &weights[index->index];
  663. uint32_t j = index->n;
  664. int32x4_t sum;
  665. uint16x4_t out16;
  666. if (j == 4)
  667. {
  668. uint8x8_t pix0, pix1, pix2, pix3;
  669. int16x4_t vw0, vw1, vw2, vw3;
  670. pix0 = vld1_u8(&tmp[off]);
  671. off += stride;
  672. if (off >= mod)
  673. off -= mod;
  674. vw0 = vdup_n_s16(*w++);
  675. sum = vmlal_s16(round, vreinterpret_s16_u16(vget_low_u16(vmovl_u8(pix0))), vw0);
  676. pix1 = vld1_u8(&tmp[off]);
  677. off += stride;
  678. if (off >= mod)
  679. off -= mod;
  680. vw1 = vdup_n_s16(*w++);
  681. sum = vmlal_s16(sum, vreinterpret_s16_u16(vget_low_u16(vmovl_u8(pix1))), vw1);
  682. pix2 = vld1_u8(&tmp[off]);
  683. off += stride;
  684. if (off >= mod)
  685. off -= mod;
  686. vw2 = vdup_n_s16(*w++);
  687. sum = vmlal_s16(sum, vreinterpret_s16_u16(vget_low_u16(vmovl_u8(pix2))), vw2);
  688. pix3 = vld1_u8(&tmp[off]);
  689. off += stride;
  690. if (off >= mod)
  691. off -= mod;
  692. vw3 = vdup_n_s16(*w++);
  693. sum = vmlal_s16(sum, vreinterpret_s16_u16(vget_low_u16(vmovl_u8(pix3))), vw3);
  694. }
  695. else
  696. {
  697. sum = round;
  698. for ( ; j > 0; j--)
  699. {
  700. uint8x8_t pix0;
  701. int16x4_t vw0;
  702. pix0 = vld1_u8(&tmp[off]);
  703. off += stride;
  704. if (off >= mod)
  705. off -= mod;
  706. vw0 = vdup_n_s16(*w++);
  707. sum = vmlal_s16(sum, vreinterpret_s16_u16(vget_low_u16(vmovl_u8(pix0))), vw0);
  708. }
  709. }
  710. out16 = vqshrun_n_s32(sum, WEIGHT_SHIFT-8);
  711. *dst++ = vget_lane_u8(vreinterpret_u8_u16(out16), 1);
  712. *dst++ = vget_lane_u8(vreinterpret_u8_u16(out16), 3);
  713. *dst++ = vget_lane_u8(vreinterpret_u8_u16(out16), 5);
  714. *dst++ = vget_lane_u8(vreinterpret_u8_u16(out16), 7);
  715. }
  716. index++;
  717. }
  718. }