| 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757 |
- // Copyright (C) 2004-2024 Artifex Software, Inc.
- //
- // This file is part of MuPDF.
- //
- // MuPDF is free software: you can redistribute it and/or modify it under the
- // terms of the GNU Affero General Public License as published by the Free
- // Software Foundation, either version 3 of the License, or (at your option)
- // any later version.
- //
- // MuPDF is distributed in the hope that it will be useful, but WITHOUT ANY
- // WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
- // FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more
- // details.
- //
- // You should have received a copy of the GNU Affero General Public License
- // along with MuPDF. If not, see <https://www.gnu.org/licenses/agpl-3.0.en.html>
- //
- // Alternative licensing terms are available from the licensor.
- // For commercial licensing, see <https://www.artifex.com/> or contact
- // Artifex Software, Inc., 39 Mesa Street, Suite 108A, San Francisco,
- // CA 94129, USA, for further information.
- /* This file is included from deskew.c if NEON cores are allowed. */
- #include "arm_neon.h"
- static void
- zoom_x1_neon(uint8_t * FZ_RESTRICT tmp,
- const uint8_t * FZ_RESTRICT src,
- const index_t * FZ_RESTRICT index,
- const weight_t * FZ_RESTRICT weights,
- uint32_t dst_w,
- uint32_t src_w,
- uint32_t channels,
- const uint8_t * FZ_RESTRICT bg)
- {
- int32x4_t round = vdupq_n_s32(WEIGHT_ROUND);
- if (0)
- slow:
- {
- /* Do any where we might index off the edge of the source */
- int pix_num = index->first_pixel;
- const uint8_t *s = &src[pix_num];
- const weight_t *w = &weights[index->index];
- uint32_t j = index->n;
- int32_t pixel0 = WEIGHT_ROUND;
- if (pix_num < 0)
- {
- int32_t wt = *w++;
- assert(pix_num == -1);
- pixel0 += bg[0] * wt;
- s++;
- j--;
- pix_num = 0;
- }
- pix_num = (int)src_w - pix_num;
- if (pix_num > (int)j)
- pix_num = j;
- j -= pix_num;
- while (pix_num > 0)
- {
- pixel0 += *s++ * *w++;
- pix_num--;
- }
- if (j > 0)
- {
- assert(j == 1);
- pixel0 += bg[0] * *w;
- }
- pixel0 >>= WEIGHT_SHIFT;
- *tmp++ = CLAMP(pixel0, 0, 255);
- index++;
- dst_w--;
- }
- while (dst_w > 0)
- {
- const uint8_t *s;
- uint32_t j;
- const weight_t *w;
- /* Jump out of band to do the (rare) slow (edge) pixels */
- if (index->slow)
- goto slow;
- s = &src[index->first_pixel];
- j = index->n;
- w = &weights[index->index];
- if (j <= 4)
- {
- int32x4_t q_pair_sum;
- int16x4_t wts = vld1_s16(w);
- uint8x8_t pix_bytes = vld1_u8(s);
- int16x4_t pix16 = vreinterpret_s16_u16(vget_low_u16(vmovl_u8(pix_bytes)));
- int32x4_t sum = vmlal_s16(round, pix16, wts);
- int32x2_t pair_sum = vpadd_s32(vget_high_s32(sum), vget_low_s32(sum));
- pair_sum = vpadd_s32(pair_sum, pair_sum);
- q_pair_sum = vcombine_s32(pair_sum, vget_high_s32(q_pair_sum));
- *tmp++ = vget_lane_u8(vreinterpret_u8_u16(vqshrun_n_s32(q_pair_sum, WEIGHT_SHIFT-8)), 1);
- }
- else if (j <= 8)
- {
- int32x4_t q_pair_sum;
- int16x8_t wts = vld1q_s16(w);
- uint8x8_t pix_bytes = vld1_u8(s);
- int16x8_t pix16 = vreinterpretq_s16_u16(vmovl_u8(pix_bytes));
- int32x4_t sum = vmlal_s16(vmlal_s16(round, vget_low_s16(pix16), vget_low_s16(wts)),
- vget_high_s16(pix16), vget_high_s16(wts));
- int32x2_t pair_sum = vpadd_s32(vget_high_s32(sum), vget_low_s32(sum));
- pair_sum = vpadd_s32(pair_sum, pair_sum);
- q_pair_sum = vcombine_s32(pair_sum, vget_high_s32(q_pair_sum));
- *tmp++ = vget_lane_u8(vreinterpret_u8_u16(vqshrun_n_s32(q_pair_sum, WEIGHT_SHIFT-8)), 1);
- }
- else
- {
- int32_t pixel0 = WEIGHT_ROUND;
- for (j = index->n; j > 0; j--)
- {
- pixel0 += *s++ * *w++;
- }
- pixel0 >>= WEIGHT_SHIFT;
- *tmp++ = CLAMP(pixel0, 0, 255);
- }
- index++;
- dst_w--;
- }
- }
- static void
- zoom_x3_neon(uint8_t * FZ_RESTRICT tmp,
- const uint8_t * FZ_RESTRICT src,
- const index_t * FZ_RESTRICT index,
- const weight_t * FZ_RESTRICT weights,
- uint32_t dst_w,
- uint32_t src_w,
- uint32_t channels,
- const uint8_t * FZ_RESTRICT bg)
- {
- int32x4_t round = vdupq_n_s32(WEIGHT_ROUND);
- if (0)
- slow:
- {
- /* Do any where we might index off the edge of the source */
- int pix_num = index->first_pixel;
- const uint8_t *s = &src[pix_num * 3];
- const weight_t *w = &weights[index->index];
- uint32_t j = index->n;
- int32_t pixel0 = WEIGHT_ROUND;
- int32_t pixel1 = WEIGHT_ROUND;
- int32_t pixel2 = WEIGHT_ROUND;
- if (pix_num < 0)
- {
- int32_t wt = *w++;
- assert(pix_num == -1);
- pixel0 += bg[0] * wt;
- pixel1 += bg[1] * wt;
- pixel2 += bg[2] * wt;
- s += 3;
- j--;
- pix_num = 0;
- }
- pix_num = (int)src_w - pix_num;
- if (pix_num > (int)j)
- pix_num = j;
- j -= pix_num;
- while (pix_num > 0)
- {
- int32_t wt = *w++;
- pixel0 += *s++ * wt;
- pixel1 += *s++ * wt;
- pixel2 += *s++ * wt;
- pix_num--;
- }
- if (j > 0)
- {
- int32_t wt = *w++;
- assert(j == 1);
- pixel0 += bg[0] * wt;
- pixel1 += bg[1] * wt;
- pixel2 += bg[2] * wt;
- }
- pixel0 >>= WEIGHT_SHIFT;
- pixel1 >>= WEIGHT_SHIFT;
- pixel2 >>= WEIGHT_SHIFT;
- *tmp++ = CLAMP(pixel0, 0, 255);
- *tmp++ = CLAMP(pixel1, 0, 255);
- *tmp++ = CLAMP(pixel2, 0, 255);
- index++;
- dst_w--;
- }
- while (dst_w > 0)
- {
- const uint8_t *s;
- int j;
- const weight_t *w;
- uint8x16_t pix_bytes;
- int32x4_t sum;
- uint8x8_t out_pix;
- /* Jump out of band to do the (rare) slow (edge) pixels */
- if (index->slow)
- goto slow;
- s = &src[index->first_pixel * 3];
- j = (int)index->n;
- w = &weights[index->index];
- pix_bytes = vld1q_u8(s); // pix_bytes = ppoonnmmllkkjjiihhggffeeddccbbaa
- if (j == 4)
- {
- int16x4_t pix16;
- int16x4_t vw;
- vw = vdup_n_s16(w[0]);
- pix16 = vreinterpret_s16_u16(vget_low_u16(vmovl_u8(vget_low_u8(pix_bytes))));
- pix_bytes = vextq_u8(pix_bytes, pix_bytes, 3);
- sum = vmlal_s16(round, pix16, vw);
- vw = vdup_n_s16(w[1]);
- pix16 = vreinterpret_s16_u16(vget_low_u16(vmovl_u8(vget_low_u8(pix_bytes))));
- pix_bytes = vextq_u8(pix_bytes, pix_bytes, 3);
- sum = vmlal_s16(sum, pix16, vw);
- vw = vdup_n_s16(w[2]);
- pix16 = vreinterpret_s16_u16(vget_low_u16(vmovl_u8(vget_low_u8(pix_bytes))));
- pix_bytes = vextq_u8(pix_bytes, pix_bytes, 3);
- sum = vmlal_s16(sum, pix16, vw);
- vw = vdup_n_s16(w[3]);
- pix16 = vreinterpret_s16_u16(vget_low_u16(vmovl_u8(vget_low_u8(pix_bytes))));
- sum = vmlal_s16(sum, pix16, vw);
- }
- else
- {
- int off = j & 3;
- int16x4_t vw;
- s += (off ? off : 4) * 3;
- sum = round;
- /* This is a use of Duff's Device. I'm very sorry, but on the other hand, Yay! */
- switch (off)
- {
- do
- {
- int16x4_t pix16;
- pix_bytes = vld1q_u8(s); // pix_bytes = ppoonnmmllkkjjiihhggffeeddccbbaa
- s += 4 * 3;
- case 0:
- vw = vdup_n_s16(*w++);
- pix16 = vreinterpret_s16_u16(vget_low_u16(vmovl_u8(vget_low_u8(pix_bytes))));
- pix_bytes = vextq_u8(pix_bytes, pix_bytes, 3);
- sum = vmlal_s16(sum, pix16, vw);
- case 3:
- vw = vdup_n_s16(*w++);
- pix16 = vreinterpret_s16_u16(vget_low_u16(vmovl_u8(vget_low_u8(pix_bytes))));
- pix_bytes = vextq_u8(pix_bytes, pix_bytes, 3);
- sum = vmlal_s16(sum, pix16, vw);
- case 2:
- vw = vdup_n_s16(*w++);
- pix16 = vreinterpret_s16_u16(vget_low_u16(vmovl_u8(vget_low_u8(pix_bytes))));
- pix_bytes = vextq_u8(pix_bytes, pix_bytes, 3);
- sum = vmlal_s16(sum, pix16, vw);
- case 1:
- vw = vdup_n_s16(*w++);
- pix16 = vreinterpret_s16_u16(vget_low_u16(vmovl_u8(vget_low_u8(pix_bytes))));
- sum = vmlal_s16(sum, pix16, vw);
- j -= 4;
- } while (j > 0);
- }
- }
- out_pix = vreinterpret_u8_u16(vqshrun_n_s32(sum, WEIGHT_SHIFT-8));
- *tmp++ = vget_lane_u8(out_pix, 1);
- *tmp++ = vget_lane_u8(out_pix, 3);
- *tmp++ = vget_lane_u8(out_pix, 5);
- index++;
- dst_w--;
- }
- while (dst_w > 0)
- {
- const uint8_t *s;
- /* Jump out of band to do the (rare) slow (edge) pixels */
- if (index->slow)
- goto slow;
- s = &src[index->first_pixel * 3];
- {
- const weight_t *w = &weights[index->index];
- uint32_t j = index->n;
- int32_t pixel0 = WEIGHT_ROUND;
- int32_t pixel1 = WEIGHT_ROUND;
- int32_t pixel2 = WEIGHT_ROUND;
- for (j = index->n; j > 0; j--)
- {
- int32_t wt = *w++;
- pixel0 += *s++ * wt;
- pixel1 += *s++ * wt;
- pixel2 += *s++ * wt;
- }
- pixel0 >>= WEIGHT_SHIFT;
- pixel1 >>= WEIGHT_SHIFT;
- pixel2 >>= WEIGHT_SHIFT;
- *tmp++ = CLAMP(pixel0, 0, 255);
- *tmp++ = CLAMP(pixel1, 0, 255);
- *tmp++ = CLAMP(pixel2, 0, 255);
- }
- index++;
- dst_w--;
- }
- }
- static void
- zoom_x4_neon(uint8_t * FZ_RESTRICT tmp,
- const uint8_t * FZ_RESTRICT src,
- const index_t * FZ_RESTRICT index,
- const weight_t * FZ_RESTRICT weights,
- uint32_t dst_w,
- uint32_t src_w,
- uint32_t channels,
- const uint8_t * FZ_RESTRICT bg)
- {
- int32x4_t round = vdupq_n_s32(WEIGHT_ROUND);
- if (0)
- slow:
- {
- /* Do any where we might index off the edge of the source */
- int pn = index->first_pixel;
- const uint8_t *s = &src[pn * 4];
- const weight_t *w = &weights[index->index];
- uint32_t j = index->n;
- int32_t pixel0 = WEIGHT_ROUND;
- int32_t pixel1 = WEIGHT_ROUND;
- int32_t pixel2 = WEIGHT_ROUND;
- int32_t pixel3 = WEIGHT_ROUND;
- int pix_num = pn;
- if (pix_num < 0)
- {
- int32_t wt = *w++;
- assert(pix_num == -1);
- pixel0 += bg[0] * wt;
- pixel1 += bg[1] * wt;
- pixel2 += bg[2] * wt;
- pixel3 += bg[3] * wt;
- s += 4;
- j--;
- pix_num = 0;
- }
- pix_num = (int)src_w - pix_num;
- if (pix_num > (int)j)
- pix_num = j;
- j -= pix_num;
- while (pix_num > 0)
- {
- int32_t wt = *w++;
- pixel0 += *s++ * wt;
- pixel1 += *s++ * wt;
- pixel2 += *s++ * wt;
- pixel3 += *s++ * wt;
- pix_num--;
- }
- if (j > 0)
- {
- int32_t wt = *w;
- assert(j == 1);
- pixel0 += bg[0] * wt;
- pixel1 += bg[1] * wt;
- pixel2 += bg[2] * wt;
- pixel3 += bg[3] * wt;
- }
- pixel0 >>= WEIGHT_SHIFT;
- pixel1 >>= WEIGHT_SHIFT;
- pixel2 >>= WEIGHT_SHIFT;
- pixel3 >>= WEIGHT_SHIFT;
- *tmp++ = CLAMP(pixel0, 0, 255);
- *tmp++ = CLAMP(pixel1, 0, 255);
- *tmp++ = CLAMP(pixel2, 0, 255);
- *tmp++ = CLAMP(pixel3, 0, 255);
- index++;
- dst_w--;
- }
- while (dst_w > 0)
- {
- const uint8_t *s;
- int j;
- const weight_t *w;
- int32x4_t sum;
- uint8x16_t pix_bytes;
- uint8x8_t out_pix;
- //__m128i mm0, mm1, mm4, mw0, mw1;
- /* Jump out of band to do the (rare) slow (edge) pixels */
- if (index->slow)
- goto slow;
- s = &src[index->first_pixel * 4];
- j = (int)index->n;
- w = &weights[index->index];
- pix_bytes = vld1q_u8(s); // pix_bytes = ppoonnmmllkkjjiihhggffeeddccbbaa
- if (j == 4)
- {
- int16x4_t pix16;
- int16x4_t vw;
- vw = vdup_n_s16(w[0]);
- pix16 = vreinterpret_s16_u16(vget_low_u16(vmovl_u8(vget_low_u8(pix_bytes))));
- pix_bytes = vextq_u8(pix_bytes, pix_bytes, 4);
- sum = vmlal_s16(round, pix16, vw);
- vw = vdup_n_s16(w[1]);
- pix16 = vreinterpret_s16_u16(vget_low_u16(vmovl_u8(vget_low_u8(pix_bytes))));
- pix_bytes = vextq_u8(pix_bytes, pix_bytes, 4);
- sum = vmlal_s16(sum, pix16, vw);
- vw = vdup_n_s16(w[2]);
- pix16 = vreinterpret_s16_u16(vget_low_u16(vmovl_u8(vget_low_u8(pix_bytes))));
- pix_bytes = vextq_u8(pix_bytes, pix_bytes, 4);
- sum = vmlal_s16(sum, pix16, vw);
- vw = vdup_n_s16(w[3]);
- pix16 = vreinterpret_s16_u16(vget_low_u16(vmovl_u8(vget_low_u8(pix_bytes))));
- sum = vmlal_s16(sum, pix16, vw);
- }
- else
- {
- int off = j & 3;
- int16x4_t vw;
- s += (off ? off : 4) * 4;
- /* This is a use of Duff's Device. I'm very sorry, but on the other hand, Yay! */
- sum = round;
- switch (off)
- {
- do
- {
- int16x4_t pixels;
- pix_bytes = vld1q_u8(s); // pix_bytes = ppoonnmmllkkjjiihhggffeeddccbbaa
- s += 4 * 4;
- case 0:
- vw = vdup_n_s16(*w++);
- pixels = vreinterpret_s16_u16(vget_low_u16(vmovl_u8(vget_low_u8(pix_bytes))));
- pix_bytes = vextq_u8(pix_bytes, pix_bytes, 4);
- sum = vmlal_s16(sum, pixels, vw);
- case 3:
- vw = vdup_n_s16(*w++);
- pixels = vreinterpret_s16_u16(vget_low_u16(vmovl_u8(vget_low_u8(pix_bytes))));
- pix_bytes = vextq_u8(pix_bytes, pix_bytes, 4);
- sum = vmlal_s16(sum, pixels, vw);
- case 2:
- vw = vdup_n_s16(*w++);
- pixels = vreinterpret_s16_u16(vget_low_u16(vmovl_u8(vget_low_u8(pix_bytes))));
- pix_bytes = vextq_u8(pix_bytes, pix_bytes, 4);
- sum = vmlal_s16(sum, pixels, vw);
- case 1:
- vw = vdup_n_s16(*w++);
- pixels = vreinterpret_s16_u16(vget_low_u16(vmovl_u8(vget_low_u8(pix_bytes))));
- sum = vmlal_s16(sum, pixels, vw);
- j -= 4;
- } while (j > 0);
- }
- }
- out_pix = vreinterpret_u8_u16(vqshrun_n_s32(sum, WEIGHT_SHIFT-8));
- *tmp++ = vget_lane_u8(out_pix, 1);
- *tmp++ = vget_lane_u8(out_pix, 3);
- *tmp++ = vget_lane_u8(out_pix, 5);
- *tmp++ = vget_lane_u8(out_pix, 7);
- index++;
- dst_w--;
- }
- }
- static void
- zoom_y1_neon(uint8_t * dst,
- const uint8_t * FZ_RESTRICT tmp,
- const index_t * FZ_RESTRICT index,
- const weight_t * FZ_RESTRICT weights,
- uint32_t width,
- uint32_t channels,
- uint32_t mod,
- int32_t y)
- {
- uint32_t stride = width;
- uint32_t offset = 0;
- int32x4_t round = vdupq_n_s32(WEIGHT_ROUND);
- if (0)
- slow:
- {
- uint32_t off = (index->first_pixel + y) * stride + offset;
- offset++;
- if (off >= mod)
- off -= mod;
- {
- const weight_t *w = (const weight_t *)&weights[index->index * 4];
- uint32_t j;
- int32_t pixel0 = WEIGHT_ROUND;
- for (j = index->n; j > 0; j--)
- {
- pixel0 += tmp[off] * *w;
- w += 4;
- off += stride;
- if (off >= mod)
- off -= mod;
- }
- pixel0 >>= WEIGHT_SHIFT;
- *dst++ = CLAMP(pixel0, 0, 255);
- }
- index++;
- width--;
- }
- while (width > 0)
- {
- uint32_t off;
- /* The slow flag stops us accessing off the end of the source row.
- * It also tells us how many pixels we can do at once. This usage
- * is different for zoom_y1 than for all other cores. */
- int n = index->slow;
- if (n <= 1)
- goto slow;
- off = (index->first_pixel + y) * stride + offset;
- offset += n;
- if (off >= mod)
- off -= mod;
- {
- const weight_t *w = &weights[index->index * 4];
- uint32_t j = index->n;
- int32x4_t sum;
- uint16x4_t out16;
- if (j == 4)
- {
- uint8x8_t pix0, pix1, pix2, pix3;
- int16x4_t vw0, vw1, vw2, vw3;
- pix0 = vld1_u8(&tmp[off]);
- off += stride;
- if (off >= mod)
- off -= mod;
- vw0 = vld1_s16(w);
- w += 4;
- sum = vmlal_s16(round, vreinterpret_s16_u16(vget_low_u16(vmovl_u8(pix0))), vw0);
- pix1 = vld1_u8(&tmp[off]);
- off += stride;
- if (off >= mod)
- off -= mod;
- vw1 = vld1_s16(w);
- w += 4;
- sum = vmlal_s16(sum, vreinterpret_s16_u16(vget_low_u16(vmovl_u8(pix1))), vw1);
- pix2 = vld1_u8(&tmp[off]);
- off += stride;
- if (off >= mod)
- off -= mod;
- vw2 = vld1_s16(w);
- w += 4;
- sum = vmlal_s16(sum, vreinterpret_s16_u16(vget_low_u16(vmovl_u8(pix2))), vw2);
- pix3 = vld1_u8(&tmp[off]);
- off += stride;
- if (off >= mod)
- off -= mod;
- vw3 = vld1_s16(w);
- sum = vmlal_s16(sum, vreinterpret_s16_u16(vget_low_u16(vmovl_u8(pix3))), vw3);
- }
- else
- {
- sum = round;
- for ( ; j > 0; j--)
- {
- uint8x8_t pix0;
- int16x4_t vw0;
- pix0 = vld1_u8(&tmp[off]);
- off += stride;
- if (off >= mod)
- off -= mod;
- vw0 = vld1_s16(w);
- w += 4;
- sum = vmlal_s16(sum, vreinterpret_s16_u16(vget_low_u16(vmovl_u8(pix0))), vw0);
- }
- }
- out16 = vqshrun_n_s32(sum, WEIGHT_SHIFT-8);
- *dst++ = vget_lane_u8(vreinterpret_u8_u16(out16), 1);
- if (n > 1)
- {
- *dst++ = vget_lane_u8(vreinterpret_u8_u16(out16), 3);
- if (n > 2)
- {
- *dst++ = vget_lane_u8(vreinterpret_u8_u16(out16), 5);
- if (n > 3)
- {
- *dst++ = vget_lane_u8(vreinterpret_u8_u16(out16), 7);
- }
- }
- }
- }
- index += n;
- width -= n;
- }
- }
- static void
- zoom_y3_neon(uint8_t * dst,
- const uint8_t * FZ_RESTRICT tmp,
- const index_t * FZ_RESTRICT index,
- const weight_t * FZ_RESTRICT weights,
- uint32_t width,
- uint32_t channels,
- uint32_t mod,
- int32_t y)
- {
- uint32_t stride = width * 3;
- uint32_t offset = 0;
- while (width--)
- {
- const weight_t *w = &weights[index->index];
- uint32_t j = index->n;
- int32x4_t sum;
- uint16x4_t out16;
- uint32_t off = (index->first_pixel + y) * stride + offset;
- offset += 3;
- if (off >= mod)
- off -= mod;
- if (j == 4)
- {
- const weight_t *w = &weights[index->index];
- uint8x8_t pix0, pix1, pix2, pix3;
- int16x4_t vw0, vw1, vw2, vw3;
- pix0 = vld1_u8(&tmp[off]);
- off += stride;
- if (off >= mod)
- off -= mod;
- vw0 = vdup_n_s16(*w++);
- sum = vmlal_s16(vdupq_n_s32(WEIGHT_ROUND), vreinterpret_s16_u16(vget_low_u16(vmovl_u8(pix0))), vw0);
- pix1 = vld1_u8(&tmp[off]);
- off += stride;
- if (off >= mod)
- off -= mod;
- vw1 = vdup_n_s16(*w++);
- sum = vmlal_s16(sum, vreinterpret_s16_u16(vget_low_u16(vmovl_u8(pix1))), vw1);
- pix2 = vld1_u8(&tmp[off]);
- off += stride;
- if (off >= mod)
- off -= mod;
- vw2 = vdup_n_s16(*w++);
- sum = vmlal_s16(sum, vreinterpret_s16_u16(vget_low_u16(vmovl_u8(pix2))), vw2);
- pix3 = vld1_u8(&tmp[off]);
- off += stride;
- if (off >= mod)
- off -= mod;
- vw3 = vdup_n_s16(*w++);
- sum = vmlal_s16(sum, vreinterpret_s16_u16(vget_low_u16(vmovl_u8(pix3))), vw3);
- }
- else
- {
- sum = vdupq_n_s32(WEIGHT_ROUND);
- do
- {
- uint8x8_t pix0 = vld1_u8(&tmp[off]);
- int16x4_t vw0;
- off += stride;
- if (off >= mod)
- off -= mod;
- vw0 = vdup_n_s16(*w++);
- sum = vmlal_s16(sum, vreinterpret_s16_u16(vget_low_u16(vmovl_u8(pix0))), vw0);
- }
- while (--j);
- }
- out16 = vqshrun_n_s32(sum, WEIGHT_SHIFT-8);
- *dst++ = vget_lane_u8(vreinterpret_u8_u16(out16), 1);
- *dst++ = vget_lane_u8(vreinterpret_u8_u16(out16), 3);
- *dst++ = vget_lane_u8(vreinterpret_u8_u16(out16), 5);
- index++;
- }
- }
- static void
- zoom_y4_neon(uint8_t * dst,
- const uint8_t * FZ_RESTRICT tmp,
- const index_t * FZ_RESTRICT index,
- const weight_t * FZ_RESTRICT weights,
- uint32_t width,
- uint32_t channels,
- uint32_t mod,
- int32_t y)
- {
- uint32_t stride = width * 4;
- uint32_t offset = 0;
- int32x4_t round = vdupq_n_s32(WEIGHT_ROUND);
- while (width--)
- {
- uint32_t off = (index->first_pixel + y) * stride + offset;
- offset += 4;
- if (off >= mod)
- off -= mod;
- {
- const weight_t *w = &weights[index->index];
- uint32_t j = index->n;
- int32x4_t sum;
- uint16x4_t out16;
- if (j == 4)
- {
- uint8x8_t pix0, pix1, pix2, pix3;
- int16x4_t vw0, vw1, vw2, vw3;
- pix0 = vld1_u8(&tmp[off]);
- off += stride;
- if (off >= mod)
- off -= mod;
- vw0 = vdup_n_s16(*w++);
- sum = vmlal_s16(round, vreinterpret_s16_u16(vget_low_u16(vmovl_u8(pix0))), vw0);
- pix1 = vld1_u8(&tmp[off]);
- off += stride;
- if (off >= mod)
- off -= mod;
- vw1 = vdup_n_s16(*w++);
- sum = vmlal_s16(sum, vreinterpret_s16_u16(vget_low_u16(vmovl_u8(pix1))), vw1);
- pix2 = vld1_u8(&tmp[off]);
- off += stride;
- if (off >= mod)
- off -= mod;
- vw2 = vdup_n_s16(*w++);
- sum = vmlal_s16(sum, vreinterpret_s16_u16(vget_low_u16(vmovl_u8(pix2))), vw2);
- pix3 = vld1_u8(&tmp[off]);
- off += stride;
- if (off >= mod)
- off -= mod;
- vw3 = vdup_n_s16(*w++);
- sum = vmlal_s16(sum, vreinterpret_s16_u16(vget_low_u16(vmovl_u8(pix3))), vw3);
- }
- else
- {
- sum = round;
- for ( ; j > 0; j--)
- {
- uint8x8_t pix0;
- int16x4_t vw0;
- pix0 = vld1_u8(&tmp[off]);
- off += stride;
- if (off >= mod)
- off -= mod;
- vw0 = vdup_n_s16(*w++);
- sum = vmlal_s16(sum, vreinterpret_s16_u16(vget_low_u16(vmovl_u8(pix0))), vw0);
- }
- }
- out16 = vqshrun_n_s32(sum, WEIGHT_SHIFT-8);
- *dst++ = vget_lane_u8(vreinterpret_u8_u16(out16), 1);
- *dst++ = vget_lane_u8(vreinterpret_u8_u16(out16), 3);
- *dst++ = vget_lane_u8(vreinterpret_u8_u16(out16), 5);
- *dst++ = vget_lane_u8(vreinterpret_u8_u16(out16), 7);
- }
- index++;
- }
- }
|