Milestone 5: deliver embedded RDP sessions and lifecycle hardening

This commit is contained in:
Keith Smith
2026-03-03 18:59:26 -07:00
parent 230a401386
commit 36006bd4aa
2941 changed files with 724359 additions and 77 deletions

View File

@@ -0,0 +1,168 @@
/* FreeRDP: A Remote Desktop Protocol Client
* Optimized YCoCg<->RGB conversion operations.
* vi:ts=4 sw=4:
*
* (c) Copyright 2014 Hewlett-Packard Development Company, L.P.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <freerdp/config.h>
#include <freerdp/types.h>
#include <freerdp/primitives.h>
#include <winpr/sysinfo.h>
#include "prim_internal.h"
#include "prim_YCoCg.h"
#if defined(NEON_INTRINSICS_ENABLED)
#include <arm_neon.h>
static primitives_t* generic = nullptr;
static pstatus_t neon_YCoCgToRGB_8u_X(const BYTE* WINPR_RESTRICT pSrc, INT32 srcStep,
BYTE* WINPR_RESTRICT pDst, UINT32 DstFormat, INT32 dstStep,
UINT32 width, UINT32 height, UINT8 shift, BYTE bPos,
BYTE gPos, BYTE rPos, BYTE aPos, BOOL alpha)
{
BYTE* dptr = pDst;
const BYTE* sptr = pSrc;
const DWORD formatSize = FreeRDPGetBytesPerPixel(DstFormat);
const int8_t cll = shift - 1; /* -1 builds in the /2's */
const UINT32 srcPad = srcStep - (width * 4);
const UINT32 dstPad = dstStep - (width * formatSize);
const UINT32 pad = width % 8;
const uint8x8_t aVal = vdup_n_u8(0xFF);
const int8x8_t cllv = vdup_n_s8(cll);
for (UINT32 y = 0; y < height; y++)
{
for (UINT32 x = 0; x < width - pad; x += 8)
{
/* Note: shifts must be done before sign-conversion. */
const uint8x8x4_t raw = vld4_u8(sptr);
const int8x8_t CgRaw = vreinterpret_s8_u8(vshl_u8(raw.val[0], cllv));
const int8x8_t CoRaw = vreinterpret_s8_u8(vshl_u8(raw.val[1], cllv));
const int16x8_t Cg = vmovl_s8(CgRaw);
const int16x8_t Co = vmovl_s8(CoRaw);
const int16x8_t Y = vreinterpretq_s16_u16(vmovl_u8(raw.val[2])); /* UINT8 -> INT16 */
const int16x8_t T = vsubq_s16(Y, Cg);
const int16x8_t R = vaddq_s16(T, Co);
const int16x8_t G = vaddq_s16(Y, Cg);
const int16x8_t B = vsubq_s16(T, Co);
uint8x8x4_t bgrx;
bgrx.val[bPos] = vqmovun_s16(B);
bgrx.val[gPos] = vqmovun_s16(G);
bgrx.val[rPos] = vqmovun_s16(R);
if (alpha)
bgrx.val[aPos] = raw.val[3];
else
bgrx.val[aPos] = aVal;
vst4_u8(dptr, bgrx);
sptr += sizeof(raw);
dptr += sizeof(bgrx);
}
for (UINT32 x = 0; x < pad; x++)
{
/* Note: shifts must be done before sign-conversion. */
const INT16 Cg = (INT16)((INT8)((*sptr++) << cll));
const INT16 Co = (INT16)((INT8)((*sptr++) << cll));
const INT16 Y = (INT16)(*sptr++); /* UINT8->INT16 */
const INT16 T = Y - Cg;
const INT16 R = T + Co;
const INT16 G = Y + Cg;
const INT16 B = T - Co;
BYTE bgra[4];
bgra[bPos] = CLIP(B);
bgra[gPos] = CLIP(G);
bgra[rPos] = CLIP(R);
bgra[aPos] = *sptr++;
if (!alpha)
bgra[aPos] = 0xFF;
*dptr++ = bgra[0];
*dptr++ = bgra[1];
*dptr++ = bgra[2];
*dptr++ = bgra[3];
}
sptr += srcPad;
dptr += dstPad;
}
return PRIMITIVES_SUCCESS;
}
static pstatus_t neon_YCoCgToRGB_8u_AC4R(const BYTE* WINPR_RESTRICT pSrc, INT32 srcStep,
BYTE* WINPR_RESTRICT pDst, UINT32 DstFormat, INT32 dstStep,
UINT32 width, UINT32 height, UINT8 shift, BOOL withAlpha)
{
switch (DstFormat)
{
case PIXEL_FORMAT_BGRA32:
return neon_YCoCgToRGB_8u_X(pSrc, srcStep, pDst, DstFormat, dstStep, width, height,
shift, 2, 1, 0, 3, withAlpha);
case PIXEL_FORMAT_BGRX32:
return neon_YCoCgToRGB_8u_X(pSrc, srcStep, pDst, DstFormat, dstStep, width, height,
shift, 2, 1, 0, 3, withAlpha);
case PIXEL_FORMAT_RGBA32:
return neon_YCoCgToRGB_8u_X(pSrc, srcStep, pDst, DstFormat, dstStep, width, height,
shift, 0, 1, 2, 3, withAlpha);
case PIXEL_FORMAT_RGBX32:
return neon_YCoCgToRGB_8u_X(pSrc, srcStep, pDst, DstFormat, dstStep, width, height,
shift, 0, 1, 2, 3, withAlpha);
case PIXEL_FORMAT_ARGB32:
return neon_YCoCgToRGB_8u_X(pSrc, srcStep, pDst, DstFormat, dstStep, width, height,
shift, 1, 2, 3, 0, withAlpha);
case PIXEL_FORMAT_XRGB32:
return neon_YCoCgToRGB_8u_X(pSrc, srcStep, pDst, DstFormat, dstStep, width, height,
shift, 1, 2, 3, 0, withAlpha);
case PIXEL_FORMAT_ABGR32:
return neon_YCoCgToRGB_8u_X(pSrc, srcStep, pDst, DstFormat, dstStep, width, height,
shift, 3, 2, 1, 0, withAlpha);
case PIXEL_FORMAT_XBGR32:
return neon_YCoCgToRGB_8u_X(pSrc, srcStep, pDst, DstFormat, dstStep, width, height,
shift, 3, 2, 1, 0, withAlpha);
default:
return generic->YCoCgToRGB_8u_AC4R(pSrc, srcStep, pDst, DstFormat, dstStep, width,
height, shift, withAlpha);
}
}
#endif
/* ------------------------------------------------------------------------- */
void primitives_init_YCoCg_neon_int(primitives_t* WINPR_RESTRICT prims)
{
#if defined(NEON_INTRINSICS_ENABLED)
generic = primitives_get_generic();
WLog_VRB(PRIM_TAG, "NEON optimizations");
prims->YCoCgToRGB_8u_AC4R = neon_YCoCgToRGB_8u_AC4R;
#else
WLog_VRB(PRIM_TAG, "undefined WITH_SIMD or neon intrinsics not available");
WINPR_UNUSED(prims);
#endif
}

View File

@@ -0,0 +1,837 @@
/**
* FreeRDP: A Remote Desktop Protocol Implementation
* Optimized YUV/RGB conversion operations
*
* Copyright 2014 Thomas Erbesdobler
* Copyright 2016-2017 Armin Novak <armin.novak@thincast.com>
* Copyright 2016-2017 Norbert Federa <norbert.federa@thincast.com>
* Copyright 2016-2017 Thincast Technologies GmbH
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <freerdp/config.h>
#include <winpr/sysinfo.h>
#include <winpr/crt.h>
#include <freerdp/types.h>
#include <freerdp/primitives.h>
#include "prim_internal.h"
#include "prim_YUV.h"
#if defined(NEON_INTRINSICS_ENABLED)
#include <arm_neon.h>
static primitives_t* generic = nullptr;
static inline uint8x8_t neon_YUV2R_single(uint16x8_t C, int16x8_t D, int16x8_t E)
{
/* R = (256 * Y + 403 * (V - 128)) >> 8 */
const int32x4_t Ch = vreinterpretq_s32_u32(vmovl_u16(vget_high_u16(C)));
const int32x4_t e403h = vmull_n_s16(vget_high_s16(E), 403);
const int32x4_t cehm = vaddq_s32(Ch, e403h);
const int32x4_t ceh = vshrq_n_s32(cehm, 8);
const int32x4_t Cl = vreinterpretq_s32_u32(vmovl_u16(vget_low_u16(C)));
const int32x4_t e403l = vmull_n_s16(vget_low_s16(E), 403);
const int32x4_t celm = vaddq_s32(Cl, e403l);
const int32x4_t cel = vshrq_n_s32(celm, 8);
const int16x8_t ce = vcombine_s16(vqmovn_s32(cel), vqmovn_s32(ceh));
return vqmovun_s16(ce);
}
static inline uint8x8x2_t neon_YUV2R(uint16x8x2_t C, int16x8x2_t D, int16x8x2_t E)
{
uint8x8x2_t res = { { neon_YUV2R_single(C.val[0], D.val[0], E.val[0]),
neon_YUV2R_single(C.val[1], D.val[1], E.val[1]) } };
return res;
}
static inline uint8x8_t neon_YUV2G_single(uint16x8_t C, int16x8_t D, int16x8_t E)
{
/* G = (256L * Y - 48 * (U - 128) - 120 * (V - 128)) >> 8 */
const int16x8_t d48 = vmulq_n_s16(D, 48);
const int16x8_t e120 = vmulq_n_s16(E, 120);
const int32x4_t deh = vaddl_s16(vget_high_s16(d48), vget_high_s16(e120));
const int32x4_t Ch = vreinterpretq_s32_u32(vmovl_u16(vget_high_u16(C)));
const int32x4_t cdeh32m = vsubq_s32(Ch, deh);
const int32x4_t cdeh32 = vshrq_n_s32(cdeh32m, 8);
const int16x4_t cdeh = vqmovn_s32(cdeh32);
const int32x4_t del = vaddl_s16(vget_low_s16(d48), vget_low_s16(e120));
const int32x4_t Cl = vreinterpretq_s32_u32(vmovl_u16(vget_low_u16(C)));
const int32x4_t cdel32m = vsubq_s32(Cl, del);
const int32x4_t cdel32 = vshrq_n_s32(cdel32m, 8);
const int16x4_t cdel = vqmovn_s32(cdel32);
const int16x8_t cde = vcombine_s16(cdel, cdeh);
return vqmovun_s16(cde);
}
static inline uint8x8x2_t neon_YUV2G(uint16x8x2_t C, int16x8x2_t D, int16x8x2_t E)
{
uint8x8x2_t res = { { neon_YUV2G_single(C.val[0], D.val[0], E.val[0]),
neon_YUV2G_single(C.val[1], D.val[1], E.val[1]) } };
return res;
}
static inline uint8x8_t neon_YUV2B_single(uint16x8_t C, int16x8_t D, int16x8_t E)
{
/* B = (256L * Y + 475 * (U - 128)) >> 8*/
const int32x4_t Ch = vreinterpretq_s32_u32(vmovl_u16(vget_high_u16(C)));
const int32x4_t d475h = vmull_n_s16(vget_high_s16(D), 475);
const int32x4_t cdhm = vaddq_s32(Ch, d475h);
const int32x4_t cdh = vshrq_n_s32(cdhm, 8);
const int32x4_t Cl = vreinterpretq_s32_u32(vmovl_u16(vget_low_u16(C)));
const int32x4_t d475l = vmull_n_s16(vget_low_s16(D), 475);
const int32x4_t cdlm = vaddq_s32(Cl, d475l);
const int32x4_t cdl = vshrq_n_s32(cdlm, 8);
const int16x8_t cd = vcombine_s16(vqmovn_s32(cdl), vqmovn_s32(cdh));
return vqmovun_s16(cd);
}
static inline uint8x8x2_t neon_YUV2B(uint16x8x2_t C, int16x8x2_t D, int16x8x2_t E)
{
uint8x8x2_t res = { { neon_YUV2B_single(C.val[0], D.val[0], E.val[0]),
neon_YUV2B_single(C.val[1], D.val[1], E.val[1]) } };
return res;
}
static inline void neon_store_bgrx(BYTE* WINPR_RESTRICT pRGB, uint8x8_t r, uint8x8_t g, uint8x8_t b,
uint8_t rPos, uint8_t gPos, uint8_t bPos, uint8_t aPos)
{
uint8x8x4_t bgrx = vld4_u8(pRGB);
bgrx.val[rPos] = r;
bgrx.val[gPos] = g;
bgrx.val[bPos] = b;
vst4_u8(pRGB, bgrx);
}
static inline void neon_YuvToRgbPixel(BYTE* pRGB, uint8x8x2_t Y, int16x8x2_t D, int16x8x2_t E,
const uint8_t rPos, const uint8_t gPos, const uint8_t bPos,
const uint8_t aPos)
{
/* Y * 256 == Y << 8 */
const uint16x8x2_t C = { { vshlq_n_u16(vmovl_u8(Y.val[0]), 8),
vshlq_n_u16(vmovl_u8(Y.val[1]), 8) } };
const uint8x8x2_t r = neon_YUV2R(C, D, E);
const uint8x8x2_t g = neon_YUV2G(C, D, E);
const uint8x8x2_t b = neon_YUV2B(C, D, E);
neon_store_bgrx(pRGB, r.val[0], g.val[0], b.val[0], rPos, gPos, bPos, aPos);
neon_store_bgrx(pRGB + sizeof(uint8x8x4_t), r.val[1], g.val[1], b.val[1], rPos, gPos, bPos,
aPos);
}
static inline int16x8x2_t loadUV(const BYTE* WINPR_RESTRICT pV, size_t x)
{
const uint8x8_t Vraw = vld1_u8(&pV[x / 2]);
const int16x8_t V = vreinterpretq_s16_u16(vmovl_u8(Vraw));
const int16x8_t c128 = vdupq_n_s16(128);
const int16x8_t E = vsubq_s16(V, c128);
return vzipq_s16(E, E);
}
static inline void neon_write_pixel(BYTE* pRGB, BYTE Y, BYTE U, BYTE V, const uint8_t rPos,
const uint8_t gPos, const uint8_t bPos, const uint8_t aPos)
{
const BYTE r = YUV2R(Y, U, V);
const BYTE g = YUV2G(Y, U, V);
const BYTE b = YUV2B(Y, U, V);
pRGB[rPos] = r;
pRGB[gPos] = g;
pRGB[bPos] = b;
}
static inline void neon_YUV420ToX_DOUBLE_ROW(const BYTE* WINPR_RESTRICT pY[2],
const BYTE* WINPR_RESTRICT pU,
const BYTE* WINPR_RESTRICT pV,
BYTE* WINPR_RESTRICT pRGB[2], size_t width,
const uint8_t rPos, const uint8_t gPos,
const uint8_t bPos, const uint8_t aPos)
{
UINT32 x = 0;
for (; x < width - width % 16; x += 16)
{
const uint8x16_t Y0raw = vld1q_u8(&pY[0][x]);
const uint8x8x2_t Y0 = { { vget_low_u8(Y0raw), vget_high_u8(Y0raw) } };
const int16x8x2_t D = loadUV(pU, x);
const int16x8x2_t E = loadUV(pV, x);
neon_YuvToRgbPixel(&pRGB[0][4ULL * x], Y0, D, E, rPos, gPos, bPos, aPos);
const uint8x16_t Y1raw = vld1q_u8(&pY[1][x]);
const uint8x8x2_t Y1 = { { vget_low_u8(Y1raw), vget_high_u8(Y1raw) } };
neon_YuvToRgbPixel(&pRGB[1][4ULL * x], Y1, D, E, rPos, gPos, bPos, aPos);
}
for (; x < width - width % 2; x += 2)
{
const BYTE U = pU[x / 2];
const BYTE V = pV[x / 2];
neon_write_pixel(&pRGB[0][4 * x], pY[0][x], U, V, rPos, gPos, bPos, aPos);
neon_write_pixel(&pRGB[0][4 * (1ULL + x)], pY[0][1ULL + x], U, V, rPos, gPos, bPos, aPos);
neon_write_pixel(&pRGB[1][4 * x], pY[1][x], U, V, rPos, gPos, bPos, aPos);
neon_write_pixel(&pRGB[1][4 * (1ULL + x)], pY[1][1ULL + x], U, V, rPos, gPos, bPos, aPos);
}
for (; x < width; x++)
{
const BYTE U = pU[x / 2];
const BYTE V = pV[x / 2];
neon_write_pixel(&pRGB[0][4 * x], pY[0][x], U, V, rPos, gPos, bPos, aPos);
neon_write_pixel(&pRGB[1][4 * x], pY[1][x], U, V, rPos, gPos, bPos, aPos);
}
}
static inline void neon_YUV420ToX_SINGLE_ROW(const BYTE* WINPR_RESTRICT pY,
const BYTE* WINPR_RESTRICT pU,
const BYTE* WINPR_RESTRICT pV,
BYTE* WINPR_RESTRICT pRGB, size_t width,
const uint8_t rPos, const uint8_t gPos,
const uint8_t bPos, const uint8_t aPos)
{
UINT32 x = 0;
for (; x < width - width % 16; x += 16)
{
const uint8x16_t Y0raw = vld1q_u8(&pY[x]);
const uint8x8x2_t Y0 = { { vget_low_u8(Y0raw), vget_high_u8(Y0raw) } };
const int16x8x2_t D = loadUV(pU, x);
const int16x8x2_t E = loadUV(pV, x);
neon_YuvToRgbPixel(&pRGB[4ULL * x], Y0, D, E, rPos, gPos, bPos, aPos);
}
for (; x < width - width % 2; x += 2)
{
const BYTE U = pU[x / 2];
const BYTE V = pV[x / 2];
neon_write_pixel(&pRGB[4 * x], pY[x], U, V, rPos, gPos, bPos, aPos);
neon_write_pixel(&pRGB[4 * (1ULL + x)], pY[1ULL + x], U, V, rPos, gPos, bPos, aPos);
}
for (; x < width; x++)
{
const BYTE U = pU[x / 2];
const BYTE V = pV[x / 2];
neon_write_pixel(&pRGB[4 * x], pY[x], U, V, rPos, gPos, bPos, aPos);
}
}
static inline pstatus_t neon_YUV420ToX(const BYTE* WINPR_RESTRICT pSrc[3], const UINT32 srcStep[3],
BYTE* WINPR_RESTRICT pDst, UINT32 dstStep,
const prim_size_t* WINPR_RESTRICT roi, const uint8_t rPos,
const uint8_t gPos, const uint8_t bPos, const uint8_t aPos)
{
const UINT32 nWidth = roi->width;
const UINT32 nHeight = roi->height;
WINPR_ASSERT(nHeight > 0);
UINT32 y = 0;
for (; y < (nHeight - 1); y += 2)
{
const uint8_t* pY[2] = { pSrc[0] + y * srcStep[0], pSrc[0] + (1ULL + y) * srcStep[0] };
const uint8_t* pU = pSrc[1] + (y / 2) * srcStep[1];
const uint8_t* pV = pSrc[2] + (y / 2) * srcStep[2];
uint8_t* pRGB[2] = { pDst + y * dstStep, pDst + (1ULL + y) * dstStep };
neon_YUV420ToX_DOUBLE_ROW(pY, pU, pV, pRGB, nWidth, rPos, gPos, bPos, aPos);
}
for (; y < nHeight; y++)
{
const uint8_t* pY = pSrc[0] + y * srcStep[0];
const uint8_t* pU = pSrc[1] + (y / 2) * srcStep[1];
const uint8_t* pV = pSrc[2] + (y / 2) * srcStep[2];
uint8_t* pRGB = pDst + y * dstStep;
neon_YUV420ToX_SINGLE_ROW(pY, pU, pV, pRGB, nWidth, rPos, gPos, bPos, aPos);
}
return PRIMITIVES_SUCCESS;
}
static pstatus_t neon_YUV420ToRGB_8u_P3AC4R(const BYTE* WINPR_RESTRICT pSrc[3],
const UINT32 srcStep[3], BYTE* WINPR_RESTRICT pDst,
UINT32 dstStep, UINT32 DstFormat,
const prim_size_t* WINPR_RESTRICT roi)
{
switch (DstFormat)
{
case PIXEL_FORMAT_BGRA32:
case PIXEL_FORMAT_BGRX32:
return neon_YUV420ToX(pSrc, srcStep, pDst, dstStep, roi, 2, 1, 0, 3);
case PIXEL_FORMAT_RGBA32:
case PIXEL_FORMAT_RGBX32:
return neon_YUV420ToX(pSrc, srcStep, pDst, dstStep, roi, 0, 1, 2, 3);
case PIXEL_FORMAT_ARGB32:
case PIXEL_FORMAT_XRGB32:
return neon_YUV420ToX(pSrc, srcStep, pDst, dstStep, roi, 1, 2, 3, 0);
case PIXEL_FORMAT_ABGR32:
case PIXEL_FORMAT_XBGR32:
return neon_YUV420ToX(pSrc, srcStep, pDst, dstStep, roi, 3, 2, 1, 0);
default:
return generic->YUV420ToRGB_8u_P3AC4R(pSrc, srcStep, pDst, dstStep, DstFormat, roi);
}
}
static inline int16x8_t loadUVreg(uint8x8_t Vraw)
{
const int16x8_t V = vreinterpretq_s16_u16(vmovl_u8(Vraw));
const int16x8_t c128 = vdupq_n_s16(128);
const int16x8_t E = vsubq_s16(V, c128);
return E;
}
static inline int16x8x2_t loadUV444(uint8x16_t Vld)
{
const uint8x8x2_t V = { { vget_low_u8(Vld), vget_high_u8(Vld) } };
const int16x8x2_t res = { {
loadUVreg(V.val[0]),
loadUVreg(V.val[1]),
} };
return res;
}
static inline void avgUV(BYTE U[2][2])
{
const BYTE u00 = U[0][0];
const INT16 umul = (INT16)u00 << 2;
const INT16 sum = (INT16)U[0][1] + U[1][0] + U[1][1];
const INT16 wavg = umul - sum;
const BYTE val = CONDITIONAL_CLIP(wavg, u00);
U[0][0] = val;
}
static inline void neon_avgUV(uint8x16_t pU[2])
{
/* put even and odd values into different registers.
* U 0/0 is in lower half */
const uint8x16x2_t usplit = vuzpq_u8(pU[0], pU[1]);
const uint8x16_t ueven = usplit.val[0];
const uint8x16_t uodd = usplit.val[1];
const uint8x8_t u00 = vget_low_u8(ueven);
const uint8x8_t u01 = vget_low_u8(uodd);
const uint8x8_t u10 = vget_high_u8(ueven);
const uint8x8_t u11 = vget_high_u8(uodd);
/* Create sum of U01 + U10 + U11 */
const uint16x8_t uoddsum = vaddl_u8(u01, u10);
const uint16x8_t usum = vaddq_u16(uoddsum, vmovl_u8(u11));
/* U00 * 4 */
const uint16x8_t umul = vshll_n_u8(u00, 2);
/* U00 - (U01 + U10 + U11) */
const int16x8_t wavg = vsubq_s16(vreinterpretq_s16_u16(umul), vreinterpretq_s16_u16(usum));
const uint8x8_t avg = vqmovun_s16(wavg);
/* abs(u00 - avg) */
const uint8x8_t absdiff = vabd_u8(avg, u00);
/* (diff < 30) ? u00 : avg */
const uint8x8_t mask = vclt_u8(absdiff, vdup_n_u8(30));
/* out1 = u00 & mask */
const uint8x8_t out1 = vand_u8(u00, mask);
/* invmask = ~mask */
const uint8x8_t notmask = vmvn_u8(mask);
/* out2 = avg & invmask */
const uint8x8_t out2 = vand_u8(avg, notmask);
/* out = out1 | out2 */
const uint8x8_t out = vorr_u8(out1, out2);
const uint8x8x2_t ua = vzip_u8(out, u01);
const uint8x16_t u = vcombine_u8(ua.val[0], ua.val[1]);
pU[0] = u;
}
static inline pstatus_t neon_YUV444ToX_SINGLE_ROW(const BYTE* WINPR_RESTRICT pY,
const BYTE* WINPR_RESTRICT pU,
const BYTE* WINPR_RESTRICT pV,
BYTE* WINPR_RESTRICT pRGB, size_t width,
const uint8_t rPos, const uint8_t gPos,
const uint8_t bPos, const uint8_t aPos)
{
WINPR_ASSERT(width % 2 == 0);
size_t x = 0;
for (; x < width - width % 16; x += 16)
{
uint8x16_t U = vld1q_u8(&pU[x]);
uint8x16_t V = vld1q_u8(&pV[x]);
const uint8x16_t Y0raw = vld1q_u8(&pY[x]);
const uint8x8x2_t Y0 = { { vget_low_u8(Y0raw), vget_high_u8(Y0raw) } };
const int16x8x2_t D0 = loadUV444(U);
const int16x8x2_t E0 = loadUV444(V);
neon_YuvToRgbPixel(&pRGB[4ULL * x], Y0, D0, E0, rPos, gPos, bPos, aPos);
}
for (; x < width; x += 2)
{
BYTE* rgb = &pRGB[x * 4];
for (size_t j = 0; j < 2; j++)
{
const BYTE y = pY[x + j];
const BYTE u = pU[x + j];
const BYTE v = pV[x + j];
neon_write_pixel(&rgb[4 * (j)], y, u, v, rPos, gPos, bPos, aPos);
}
}
return PRIMITIVES_SUCCESS;
}
static inline pstatus_t neon_YUV444ToX_DOUBLE_ROW(const BYTE* WINPR_RESTRICT pY[2],
const BYTE* WINPR_RESTRICT pU[2],
const BYTE* WINPR_RESTRICT pV[2],
BYTE* WINPR_RESTRICT pRGB[2], size_t width,
const uint8_t rPos, const uint8_t gPos,
const uint8_t bPos, const uint8_t aPos)
{
WINPR_ASSERT(width % 2 == 0);
size_t x = 0;
for (; x < width - width % 16; x += 16)
{
uint8x16_t U[2] = { vld1q_u8(&pU[0][x]), vld1q_u8(&pU[1][x]) };
neon_avgUV(U);
uint8x16_t V[2] = { vld1q_u8(&pV[0][x]), vld1q_u8(&pV[1][x]) };
neon_avgUV(V);
const uint8x16_t Y0raw = vld1q_u8(&pY[0][x]);
const uint8x8x2_t Y0 = { { vget_low_u8(Y0raw), vget_high_u8(Y0raw) } };
const int16x8x2_t D0 = loadUV444(U[0]);
const int16x8x2_t E0 = loadUV444(V[0]);
neon_YuvToRgbPixel(&pRGB[0][4ULL * x], Y0, D0, E0, rPos, gPos, bPos, aPos);
const uint8x16_t Y1raw = vld1q_u8(&pY[1][x]);
const uint8x8x2_t Y1 = { { vget_low_u8(Y1raw), vget_high_u8(Y1raw) } };
const int16x8x2_t D1 = loadUV444(U[1]);
const int16x8x2_t E1 = loadUV444(V[1]);
neon_YuvToRgbPixel(&pRGB[1][4ULL * x], Y1, D1, E1, rPos, gPos, bPos, aPos);
}
for (; x < width; x += 2)
{
BYTE* rgb[2] = { &pRGB[0][x * 4], &pRGB[1][x * 4] };
BYTE U[2][2] = { { pU[0][x], pU[0][x + 1] }, { pU[1][x], pU[1][x + 1] } };
avgUV(U);
BYTE V[2][2] = { { pV[0][x], pV[0][x + 1] }, { pV[1][x], pV[1][x + 1] } };
avgUV(V);
for (size_t i = 0; i < 2; i++)
{
for (size_t j = 0; j < 2; j++)
{
const BYTE y = pY[i][x + j];
const BYTE u = U[i][j];
const BYTE v = V[i][j];
neon_write_pixel(&rgb[i][4 * (j)], y, u, v, rPos, gPos, bPos, aPos);
}
}
}
return PRIMITIVES_SUCCESS;
}
static inline pstatus_t neon_YUV444ToX(const BYTE* WINPR_RESTRICT pSrc[3], const UINT32 srcStep[3],
BYTE* WINPR_RESTRICT pDst, UINT32 dstStep,
const prim_size_t* WINPR_RESTRICT roi, const uint8_t rPos,
const uint8_t gPos, const uint8_t bPos, const uint8_t aPos)
{
WINPR_ASSERT(roi);
const UINT32 nWidth = roi->width;
const UINT32 nHeight = roi->height;
size_t y = 0;
for (; y < nHeight - nHeight % 2; y += 2)
{
const uint8_t* WINPR_RESTRICT pY[2] = { pSrc[0] + y * srcStep[0],
pSrc[0] + (y + 1) * srcStep[0] };
const uint8_t* WINPR_RESTRICT pU[2] = { pSrc[1] + y * srcStep[1],
pSrc[1] + (y + 1) * srcStep[1] };
const uint8_t* WINPR_RESTRICT pV[2] = { pSrc[2] + y * srcStep[2],
pSrc[2] + (y + 1) * srcStep[2] };
uint8_t* WINPR_RESTRICT pRGB[2] = { &pDst[y * dstStep], &pDst[(y + 1) * dstStep] };
const pstatus_t rc =
neon_YUV444ToX_DOUBLE_ROW(pY, pU, pV, pRGB, nWidth, rPos, gPos, bPos, aPos);
if (rc != PRIMITIVES_SUCCESS)
return rc;
}
for (; y < nHeight; y++)
{
const uint8_t* WINPR_RESTRICT pY = pSrc[0] + y * srcStep[0];
const uint8_t* WINPR_RESTRICT pU = pSrc[1] + y * srcStep[1];
const uint8_t* WINPR_RESTRICT pV = pSrc[2] + y * srcStep[2];
uint8_t* WINPR_RESTRICT pRGB = &pDst[y * dstStep];
const pstatus_t rc =
neon_YUV444ToX_SINGLE_ROW(pY, pU, pV, pRGB, nWidth, rPos, gPos, bPos, aPos);
if (rc != PRIMITIVES_SUCCESS)
return rc;
}
return PRIMITIVES_SUCCESS;
}
static pstatus_t neon_YUV444ToRGB_8u_P3AC4R(const BYTE* WINPR_RESTRICT pSrc[3],
const UINT32 srcStep[3], BYTE* WINPR_RESTRICT pDst,
UINT32 dstStep, UINT32 DstFormat,
const prim_size_t* WINPR_RESTRICT roi)
{
switch (DstFormat)
{
case PIXEL_FORMAT_BGRA32:
case PIXEL_FORMAT_BGRX32:
return neon_YUV444ToX(pSrc, srcStep, pDst, dstStep, roi, 2, 1, 0, 3);
case PIXEL_FORMAT_RGBA32:
case PIXEL_FORMAT_RGBX32:
return neon_YUV444ToX(pSrc, srcStep, pDst, dstStep, roi, 0, 1, 2, 3);
case PIXEL_FORMAT_ARGB32:
case PIXEL_FORMAT_XRGB32:
return neon_YUV444ToX(pSrc, srcStep, pDst, dstStep, roi, 1, 2, 3, 0);
case PIXEL_FORMAT_ABGR32:
case PIXEL_FORMAT_XBGR32:
return neon_YUV444ToX(pSrc, srcStep, pDst, dstStep, roi, 3, 2, 1, 0);
default:
return generic->YUV444ToRGB_8u_P3AC4R(pSrc, srcStep, pDst, dstStep, DstFormat, roi);
}
}
static pstatus_t neon_LumaToYUV444(const BYTE* WINPR_RESTRICT pSrcRaw[3], const UINT32 srcStep[3],
BYTE* WINPR_RESTRICT pDstRaw[3], const UINT32 dstStep[3],
const RECTANGLE_16* WINPR_RESTRICT roi)
{
const UINT32 nWidth = roi->right - roi->left;
const UINT32 nHeight = roi->bottom - roi->top;
const UINT32 halfWidth = (nWidth + 1) / 2;
const UINT32 halfHeight = (nHeight + 1) / 2;
const UINT32 evenY = 0;
const BYTE* pSrc[3] = { pSrcRaw[0] + roi->top * srcStep[0] + roi->left,
pSrcRaw[1] + roi->top / 2 * srcStep[1] + roi->left / 2,
pSrcRaw[2] + roi->top / 2 * srcStep[2] + roi->left / 2 };
BYTE* pDst[3] = { pDstRaw[0] + roi->top * dstStep[0] + roi->left,
pDstRaw[1] + roi->top * dstStep[1] + roi->left,
pDstRaw[2] + roi->top * dstStep[2] + roi->left };
/* Y data is already here... */
/* B1 */
for (UINT32 y = 0; y < nHeight; y++)
{
const BYTE* Ym = pSrc[0] + srcStep[0] * y;
BYTE* pY = pDst[0] + dstStep[0] * y;
memcpy(pY, Ym, nWidth);
}
/* The first half of U, V are already here part of this frame. */
/* B2 and B3 */
for (UINT32 y = 0; y < halfHeight; y++)
{
const UINT32 val2y = (2 * y + evenY);
const BYTE* Um = pSrc[1] + srcStep[1] * y;
const BYTE* Vm = pSrc[2] + srcStep[2] * y;
BYTE* pU = pDst[1] + dstStep[1] * val2y;
BYTE* pV = pDst[2] + dstStep[2] * val2y;
BYTE* pU1 = pU + dstStep[1];
BYTE* pV1 = pV + dstStep[2];
UINT32 x = 0;
for (; x + 16 < halfWidth; x += 16)
{
{
const uint8x16_t u = vld1q_u8(Um);
uint8x16x2_t u2x;
u2x.val[0] = u;
u2x.val[1] = u;
vst2q_u8(pU, u2x);
vst2q_u8(pU1, u2x);
Um += 16;
pU += 32;
pU1 += 32;
}
{
const uint8x16_t v = vld1q_u8(Vm);
uint8x16x2_t v2x;
v2x.val[0] = v;
v2x.val[1] = v;
vst2q_u8(pV, v2x);
vst2q_u8(pV1, v2x);
Vm += 16;
pV += 32;
pV1 += 32;
}
}
for (; x < halfWidth; x++)
{
const BYTE u = *Um++;
const BYTE v = *Vm++;
*pU++ = u;
*pU++ = u;
*pU1++ = u;
*pU1++ = u;
*pV++ = v;
*pV++ = v;
*pV1++ = v;
*pV1++ = v;
}
}
return PRIMITIVES_SUCCESS;
}
static pstatus_t neon_ChromaV1ToYUV444(const BYTE* WINPR_RESTRICT pSrcRaw[3],
const UINT32 srcStep[3], BYTE* WINPR_RESTRICT pDstRaw[3],
const UINT32 dstStep[3],
const RECTANGLE_16* WINPR_RESTRICT roi)
{
const UINT32 mod = 16;
UINT32 uY = 0;
UINT32 vY = 0;
const UINT32 nWidth = roi->right - roi->left;
const UINT32 nHeight = roi->bottom - roi->top;
const UINT32 halfWidth = (nWidth) / 2;
const UINT32 halfHeight = (nHeight) / 2;
const UINT32 oddY = 1;
const UINT32 evenY = 0;
const UINT32 oddX = 1;
/* The auxiliary frame is aligned to multiples of 16x16.
* We need the padded height for B4 and B5 conversion. */
const UINT32 padHeigth = nHeight + 16 - nHeight % 16;
const UINT32 halfPad = halfWidth % 16;
const BYTE* pSrc[3] = { pSrcRaw[0] + roi->top * srcStep[0] + roi->left,
pSrcRaw[1] + roi->top / 2 * srcStep[1] + roi->left / 2,
pSrcRaw[2] + roi->top / 2 * srcStep[2] + roi->left / 2 };
BYTE* pDst[3] = { pDstRaw[0] + roi->top * dstStep[0] + roi->left,
pDstRaw[1] + roi->top * dstStep[1] + roi->left,
pDstRaw[2] + roi->top * dstStep[2] + roi->left };
/* The second half of U and V is a bit more tricky... */
/* B4 and B5 */
for (UINT32 y = 0; y < padHeigth; y++)
{
const BYTE* Ya = pSrc[0] + srcStep[0] * y;
BYTE* pX;
if ((y) % mod < (mod + 1) / 2)
{
const UINT32 pos = (2 * uY++ + oddY);
if (pos >= nHeight)
continue;
pX = pDst[1] + dstStep[1] * pos;
}
else
{
const UINT32 pos = (2 * vY++ + oddY);
if (pos >= nHeight)
continue;
pX = pDst[2] + dstStep[2] * pos;
}
memcpy(pX, Ya, nWidth);
}
/* B6 and B7 */
for (UINT32 y = 0; y < halfHeight; y++)
{
const UINT32 val2y = (y * 2 + evenY);
const BYTE* Ua = pSrc[1] + srcStep[1] * y;
const BYTE* Va = pSrc[2] + srcStep[2] * y;
BYTE* pU = pDst[1] + dstStep[1] * val2y;
BYTE* pV = pDst[2] + dstStep[2] * val2y;
UINT32 x = 0;
for (; x < halfWidth - halfPad; x += 16)
{
{
uint8x16x2_t u = vld2q_u8(&pU[2 * x]);
u.val[1] = vld1q_u8(&Ua[x]);
vst2q_u8(&pU[2 * x], u);
}
{
uint8x16x2_t v = vld2q_u8(&pV[2 * x]);
v.val[1] = vld1q_u8(&Va[x]);
vst2q_u8(&pV[2 * x], v);
}
}
for (; x < halfWidth; x++)
{
const UINT32 val2x1 = (x * 2 + oddX);
pU[val2x1] = Ua[x];
pV[val2x1] = Va[x];
}
}
return PRIMITIVES_SUCCESS;
}
static pstatus_t neon_ChromaV2ToYUV444(const BYTE* WINPR_RESTRICT pSrc[3], const UINT32 srcStep[3],
UINT32 nTotalWidth, UINT32 nTotalHeight,
BYTE* WINPR_RESTRICT pDst[3], const UINT32 dstStep[3],
const RECTANGLE_16* WINPR_RESTRICT roi)
{
const UINT32 nWidth = roi->right - roi->left;
const UINT32 nHeight = roi->bottom - roi->top;
const UINT32 halfWidth = (nWidth + 1) / 2;
const UINT32 halfPad = halfWidth % 16;
const UINT32 halfHeight = (nHeight + 1) / 2;
const UINT32 quaterWidth = (nWidth + 3) / 4;
const UINT32 quaterPad = quaterWidth % 16;
/* B4 and B5: odd UV values for width/2, height */
for (UINT32 y = 0; y < nHeight; y++)
{
const UINT32 yTop = y + roi->top;
const BYTE* pYaU = pSrc[0] + srcStep[0] * yTop + roi->left / 2;
const BYTE* pYaV = pYaU + nTotalWidth / 2;
BYTE* pU = pDst[1] + dstStep[1] * yTop + roi->left;
BYTE* pV = pDst[2] + dstStep[2] * yTop + roi->left;
UINT32 x = 0;
for (; x < halfWidth - halfPad; x += 16)
{
{
uint8x16x2_t u = vld2q_u8(&pU[2 * x]);
u.val[1] = vld1q_u8(&pYaU[x]);
vst2q_u8(&pU[2 * x], u);
}
{
uint8x16x2_t v = vld2q_u8(&pV[2 * x]);
v.val[1] = vld1q_u8(&pYaV[x]);
vst2q_u8(&pV[2 * x], v);
}
}
for (; x < halfWidth; x++)
{
const UINT32 odd = 2 * x + 1;
pU[odd] = pYaU[x];
pV[odd] = pYaV[x];
}
}
/* B6 - B9 */
for (UINT32 y = 0; y < halfHeight; y++)
{
const BYTE* pUaU = pSrc[1] + srcStep[1] * (y + roi->top / 2) + roi->left / 4;
const BYTE* pUaV = pUaU + nTotalWidth / 4;
const BYTE* pVaU = pSrc[2] + srcStep[2] * (y + roi->top / 2) + roi->left / 4;
const BYTE* pVaV = pVaU + nTotalWidth / 4;
BYTE* pU = pDst[1] + dstStep[1] * (2 * y + 1 + roi->top) + roi->left;
BYTE* pV = pDst[2] + dstStep[2] * (2 * y + 1 + roi->top) + roi->left;
UINT32 x = 0;
for (; x < quaterWidth - quaterPad; x += 16)
{
{
uint8x16x4_t u = vld4q_u8(&pU[4 * x]);
u.val[0] = vld1q_u8(&pUaU[x]);
u.val[2] = vld1q_u8(&pVaU[x]);
vst4q_u8(&pU[4 * x], u);
}
{
uint8x16x4_t v = vld4q_u8(&pV[4 * x]);
v.val[0] = vld1q_u8(&pUaV[x]);
v.val[2] = vld1q_u8(&pVaV[x]);
vst4q_u8(&pV[4 * x], v);
}
}
for (; x < quaterWidth; x++)
{
pU[4 * x + 0] = pUaU[x];
pV[4 * x + 0] = pUaV[x];
pU[4 * x + 2] = pVaU[x];
pV[4 * x + 2] = pVaV[x];
}
}
return PRIMITIVES_SUCCESS;
}
static pstatus_t neon_YUV420CombineToYUV444(avc444_frame_type type,
const BYTE* WINPR_RESTRICT pSrc[3],
const UINT32 srcStep[3], UINT32 nWidth, UINT32 nHeight,
BYTE* WINPR_RESTRICT pDst[3], const UINT32 dstStep[3],
const RECTANGLE_16* WINPR_RESTRICT roi)
{
if (!pSrc || !pSrc[0] || !pSrc[1] || !pSrc[2])
return -1;
if (!pDst || !pDst[0] || !pDst[1] || !pDst[2])
return -1;
if (!roi)
return -1;
switch (type)
{
case AVC444_LUMA:
return neon_LumaToYUV444(pSrc, srcStep, pDst, dstStep, roi);
case AVC444_CHROMAv1:
return neon_ChromaV1ToYUV444(pSrc, srcStep, pDst, dstStep, roi);
case AVC444_CHROMAv2:
return neon_ChromaV2ToYUV444(pSrc, srcStep, nWidth, nHeight, pDst, dstStep, roi);
default:
return -1;
}
}
#endif
void primitives_init_YUV_neon_int(primitives_t* WINPR_RESTRICT prims)
{
#if defined(NEON_INTRINSICS_ENABLED)
generic = primitives_get_generic();
WLog_VRB(PRIM_TAG, "NEON optimizations");
prims->YUV420ToRGB_8u_P3AC4R = neon_YUV420ToRGB_8u_P3AC4R;
prims->YUV444ToRGB_8u_P3AC4R = neon_YUV444ToRGB_8u_P3AC4R;
prims->YUV420CombineToYUV444 = neon_YUV420CombineToYUV444;
#else
WLog_VRB(PRIM_TAG, "undefined WITH_SIMD or neon intrinsics not available");
WINPR_UNUSED(prims);
#endif
}

View File

@@ -0,0 +1,274 @@
/* FreeRDP: A Remote Desktop Protocol Client
* Optimized Color conversion operations.
* vi:ts=4 sw=4:
*
* Copyright 2011 Stephen Erisman
* Copyright 2011 Norbert Federa <norbert.federa@thincast.com>
* Copyright 2011 Martin Fleisz <martin.fleisz@thincast.com>
* (c) Copyright 2012 Hewlett-Packard Development Company, L.P.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License. You may obtain
* a copy of the License at http://www.apache.org/licenses/LICENSE-2.0.
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
#include <freerdp/config.h>
#include <freerdp/types.h>
#include <freerdp/primitives.h>
#include <winpr/sysinfo.h>
#include "prim_internal.h"
#include "prim_colors.h"
/*---------------------------------------------------------------------------*/
#if defined(NEON_INTRINSICS_ENABLED)
#include <arm_neon.h>
static primitives_t* generic = nullptr;
static pstatus_t neon_yCbCrToRGB_16s8u_P3AC4R_X(const INT16* WINPR_RESTRICT pSrc[3], UINT32 srcStep,
BYTE* WINPR_RESTRICT pDst, UINT32 dstStep,
const prim_size_t* WINPR_RESTRICT roi, uint8_t rPos,
uint8_t gPos, uint8_t bPos, uint8_t aPos)
{
BYTE* pRGB = pDst;
const INT16* pY = pSrc[0];
const INT16* pCb = pSrc[1];
const INT16* pCr = pSrc[2];
const size_t srcPad = (srcStep - (roi->width * sizeof(INT16))) / sizeof(INT16);
const size_t dstPad = (dstStep - (roi->width * 4)) / 4;
const size_t pad = roi->width % 8;
const int16x4_t c4096 = vdup_n_s16(4096);
for (UINT32 y = 0; y < roi->height; y++)
{
for (UINT32 x = 0; x < roi->width - pad; x += 8)
{
const int16x8_t Y = vld1q_s16(pY);
const int16x4_t Yh = vget_high_s16(Y);
const int16x4_t Yl = vget_low_s16(Y);
const int32x4_t YhAdd = vaddl_s16(Yh, c4096); /* Y + 4096 */
const int32x4_t YlAdd = vaddl_s16(Yl, c4096); /* Y + 4096 */
const int32x4_t YhW = vshlq_n_s32(YhAdd, 16);
const int32x4_t YlW = vshlq_n_s32(YlAdd, 16);
const int16x8_t Cr = vld1q_s16(pCr);
const int16x4_t Crh = vget_high_s16(Cr);
const int16x4_t Crl = vget_low_s16(Cr);
const int16x8_t Cb = vld1q_s16(pCb);
const int16x4_t Cbh = vget_high_s16(Cb);
const int16x4_t Cbl = vget_low_s16(Cb);
uint8x8x4_t bgrx;
{
/* R */
const int32x4_t CrhR = vmulq_n_s32(vmovl_s16(Crh), 91916); /* 1.402525 * 2^16 */
const int32x4_t CrlR = vmulq_n_s32(vmovl_s16(Crl), 91916); /* 1.402525 * 2^16 */
const int32x4_t CrhRa = vaddq_s32(CrhR, YhW);
const int32x4_t CrlRa = vaddq_s32(CrlR, YlW);
const int16x4_t Rsh = vmovn_s32(vshrq_n_s32(CrhRa, 21));
const int16x4_t Rsl = vmovn_s32(vshrq_n_s32(CrlRa, 21));
const int16x8_t Rs = vcombine_s16(Rsl, Rsh);
bgrx.val[rPos] = vqmovun_s16(Rs);
}
{
/* G */
const int32x4_t CbGh = vmull_n_s16(Cbh, 22527); /* 0.343730 * 2^16 */
const int32x4_t CbGl = vmull_n_s16(Cbl, 22527); /* 0.343730 * 2^16 */
const int32x4_t CrGh = vmulq_n_s32(vmovl_s16(Crh), 46819); /* 0.714401 * 2^16 */
const int32x4_t CrGl = vmulq_n_s32(vmovl_s16(Crl), 46819); /* 0.714401 * 2^16 */
const int32x4_t CbCrGh = vaddq_s32(CbGh, CrGh);
const int32x4_t CbCrGl = vaddq_s32(CbGl, CrGl);
const int32x4_t YCbCrGh = vsubq_s32(YhW, CbCrGh);
const int32x4_t YCbCrGl = vsubq_s32(YlW, CbCrGl);
const int16x4_t Gsh = vmovn_s32(vshrq_n_s32(YCbCrGh, 21));
const int16x4_t Gsl = vmovn_s32(vshrq_n_s32(YCbCrGl, 21));
const int16x8_t Gs = vcombine_s16(Gsl, Gsh);
const uint8x8_t G = vqmovun_s16(Gs);
bgrx.val[gPos] = G;
}
{
/* B */
const int32x4_t CbBh = vmulq_n_s32(vmovl_s16(Cbh), 115992); /* 1.769905 * 2^16 */
const int32x4_t CbBl = vmulq_n_s32(vmovl_s16(Cbl), 115992); /* 1.769905 * 2^16 */
const int32x4_t YCbBh = vaddq_s32(CbBh, YhW);
const int32x4_t YCbBl = vaddq_s32(CbBl, YlW);
const int16x4_t Bsh = vmovn_s32(vshrq_n_s32(YCbBh, 21));
const int16x4_t Bsl = vmovn_s32(vshrq_n_s32(YCbBl, 21));
const int16x8_t Bs = vcombine_s16(Bsl, Bsh);
const uint8x8_t B = vqmovun_s16(Bs);
bgrx.val[bPos] = B;
}
/* A */
{
bgrx.val[aPos] = vdup_n_u8(0xFF);
}
vst4_u8(pRGB, bgrx);
pY += 8;
pCb += 8;
pCr += 8;
pRGB += 32;
}
for (UINT32 x = 0; x < pad; x++)
{
const INT32 divisor = 16;
const INT32 Y = ((*pY++) + 4096) << divisor;
const INT32 Cb = (*pCb++);
const INT32 Cr = (*pCr++);
const INT32 CrR = Cr * (INT32)(1.402525f * (1 << divisor));
const INT32 CrG = Cr * (INT32)(0.714401f * (1 << divisor));
const INT32 CbG = Cb * (INT32)(0.343730f * (1 << divisor));
const INT32 CbB = Cb * (INT32)(1.769905f * (1 << divisor));
INT16 R = ((INT16)((CrR + Y) >> divisor) >> 5);
INT16 G = ((INT16)((Y - CbG - CrG) >> divisor) >> 5);
INT16 B = ((INT16)((CbB + Y) >> divisor) >> 5);
BYTE bgrx[4];
bgrx[bPos] = CLIP(B);
bgrx[gPos] = CLIP(G);
bgrx[rPos] = CLIP(R);
bgrx[aPos] = 0xFF;
*pRGB++ = bgrx[0];
*pRGB++ = bgrx[1];
*pRGB++ = bgrx[2];
*pRGB++ = bgrx[3];
}
pY += srcPad;
pCb += srcPad;
pCr += srcPad;
pRGB += dstPad;
}
return PRIMITIVES_SUCCESS;
}
static pstatus_t neon_yCbCrToRGB_16s8u_P3AC4R(const INT16* WINPR_RESTRICT pSrc[3], UINT32 srcStep,
BYTE* WINPR_RESTRICT pDst, UINT32 dstStep,
UINT32 DstFormat,
const prim_size_t* WINPR_RESTRICT roi)
{
switch (DstFormat)
{
case PIXEL_FORMAT_BGRA32:
case PIXEL_FORMAT_BGRX32:
return neon_yCbCrToRGB_16s8u_P3AC4R_X(pSrc, srcStep, pDst, dstStep, roi, 2, 1, 0, 3);
case PIXEL_FORMAT_RGBA32:
case PIXEL_FORMAT_RGBX32:
return neon_yCbCrToRGB_16s8u_P3AC4R_X(pSrc, srcStep, pDst, dstStep, roi, 0, 1, 2, 3);
case PIXEL_FORMAT_ARGB32:
case PIXEL_FORMAT_XRGB32:
return neon_yCbCrToRGB_16s8u_P3AC4R_X(pSrc, srcStep, pDst, dstStep, roi, 1, 2, 3, 0);
case PIXEL_FORMAT_ABGR32:
case PIXEL_FORMAT_XBGR32:
return neon_yCbCrToRGB_16s8u_P3AC4R_X(pSrc, srcStep, pDst, dstStep, roi, 3, 2, 1, 0);
default:
return generic->yCbCrToRGB_16s8u_P3AC4R(pSrc, srcStep, pDst, dstStep, DstFormat, roi);
}
}
static pstatus_t
neon_RGBToRGB_16s8u_P3AC4R_X(const INT16* WINPR_RESTRICT pSrc[3], /* 16-bit R,G, and B arrays */
UINT32 srcStep, /* bytes between rows in source data */
BYTE* WINPR_RESTRICT pDst, /* 32-bit interleaved ARGB (ABGR?) data */
UINT32 dstStep, /* bytes between rows in dest data */
const prim_size_t* WINPR_RESTRICT roi, /* region of interest */
uint8_t rPos, uint8_t gPos, uint8_t bPos, uint8_t aPos)
{
UINT32 pad = roi->width % 8;
for (UINT32 y = 0; y < roi->height; y++)
{
const INT16* pr = (const INT16*)(((BYTE*)pSrc[0]) + y * srcStep);
const INT16* pg = (const INT16*)(((BYTE*)pSrc[1]) + y * srcStep);
const INT16* pb = (const INT16*)(((BYTE*)pSrc[2]) + y * srcStep);
BYTE* dst = pDst + y * dstStep;
for (UINT32 x = 0; x < roi->width - pad; x += 8)
{
int16x8_t r = vld1q_s16(pr);
int16x8_t g = vld1q_s16(pg);
int16x8_t b = vld1q_s16(pb);
uint8x8x4_t bgrx;
bgrx.val[aPos] = vdup_n_u8(0xFF);
bgrx.val[rPos] = vqmovun_s16(r);
bgrx.val[gPos] = vqmovun_s16(g);
bgrx.val[bPos] = vqmovun_s16(b);
vst4_u8(dst, bgrx);
pr += 8;
pg += 8;
pb += 8;
dst += 32;
}
for (UINT32 x = 0; x < pad; x++)
{
BYTE bgrx[4];
bgrx[bPos] = *pb++;
bgrx[gPos] = *pg++;
bgrx[rPos] = *pr++;
bgrx[aPos] = 0xFF;
*dst++ = bgrx[0];
*dst++ = bgrx[1];
*dst++ = bgrx[2];
*dst++ = bgrx[3];
}
}
return PRIMITIVES_SUCCESS;
}
static pstatus_t
neon_RGBToRGB_16s8u_P3AC4R(const INT16* WINPR_RESTRICT pSrc[3], /* 16-bit R,G, and B arrays */
UINT32 srcStep, /* bytes between rows in source data */
BYTE* WINPR_RESTRICT pDst, /* 32-bit interleaved ARGB (ABGR?) data */
UINT32 dstStep, /* bytes between rows in dest data */
UINT32 DstFormat,
const prim_size_t* WINPR_RESTRICT roi) /* region of interest */
{
switch (DstFormat)
{
case PIXEL_FORMAT_BGRA32:
case PIXEL_FORMAT_BGRX32:
return neon_RGBToRGB_16s8u_P3AC4R_X(pSrc, srcStep, pDst, dstStep, roi, 2, 1, 0, 3);
case PIXEL_FORMAT_RGBA32:
case PIXEL_FORMAT_RGBX32:
return neon_RGBToRGB_16s8u_P3AC4R_X(pSrc, srcStep, pDst, dstStep, roi, 0, 1, 2, 3);
case PIXEL_FORMAT_ARGB32:
case PIXEL_FORMAT_XRGB32:
return neon_RGBToRGB_16s8u_P3AC4R_X(pSrc, srcStep, pDst, dstStep, roi, 1, 2, 3, 0);
case PIXEL_FORMAT_ABGR32:
case PIXEL_FORMAT_XBGR32:
return neon_RGBToRGB_16s8u_P3AC4R_X(pSrc, srcStep, pDst, dstStep, roi, 3, 2, 1, 0);
default:
return generic->RGBToRGB_16s8u_P3AC4R(pSrc, srcStep, pDst, dstStep, DstFormat, roi);
}
}
#endif /* NEON_INTRINSICS_ENABLED */
/* ------------------------------------------------------------------------- */
void primitives_init_colors_neon_int(primitives_t* WINPR_RESTRICT prims)
{
#if defined(NEON_INTRINSICS_ENABLED)
generic = primitives_get_generic();
WLog_VRB(PRIM_TAG, "NEON optimizations");
prims->RGBToRGB_16s8u_P3AC4R = neon_RGBToRGB_16s8u_P3AC4R;
prims->yCbCrToRGB_16s8u_P3AC4R = neon_yCbCrToRGB_16s8u_P3AC4R;
#else
WLog_VRB(PRIM_TAG, "undefined WITH_SIMD or neon intrinsics not available");
WINPR_UNUSED(prims);
#endif
}