Skip to content

Commit

Permalink
Fix overflows in two functions in src/reformat.c
Browse files Browse the repository at this point in the history
Fix overflows when multiplying with rowBytes in avifImageRGBToYUV() and
avifImageYUVAnyToRGBAnySlow(), by storing the various uint32_t rowBytes
fields in local variables of the size_t type. Then multiplications with
the size_t rowBytes local variables will be done in size_t.

Part of the fix to AOMediaCodec#2271.
  • Loading branch information
wantehchang committed Aug 1, 2024
1 parent 1f86eb6 commit 1ff7bae
Showing 1 changed file with 37 additions and 38 deletions.
75 changes: 37 additions & 38 deletions src/reformat.c
Original file line number Diff line number Diff line change
Expand Up @@ -271,8 +271,13 @@ avifResult avifImageRGBToYUV(avifImage * image, const avifRGBImage * rgb)
struct YUVBlock yuvBlock[2][2];
float rgbPixel[3];
const float rgbMaxChannelF = state.rgb.maxChannelF;
uint8_t ** yuvPlanes = image->yuvPlanes;
uint32_t * yuvRowBytes = image->yuvRowBytes;
const size_t rgbRowBytes = rgb->rowBytes;
uint8_t * yPlane = image->yuvPlanes[AVIF_CHAN_Y];
uint8_t * uPlane = image->yuvPlanes[AVIF_CHAN_U];
uint8_t * vPlane = image->yuvPlanes[AVIF_CHAN_V];
const size_t yRowBytes = image->yuvRowBytes[AVIF_CHAN_Y];
const size_t uRowBytes = image->yuvRowBytes[AVIF_CHAN_U];
const size_t vRowBytes = image->yuvRowBytes[AVIF_CHAN_V];
for (uint32_t outerJ = 0; outerJ < image->height; outerJ += 2) {
for (uint32_t outerI = 0; outerI < image->width; outerI += 2) {
int blockW = 2, blockH = 2;
Expand All @@ -292,30 +297,30 @@ avifResult avifImageRGBToYUV(avifImage * image, const avifRGBImage * rgb)
// Unpack RGB into normalized float
if (state.rgb.channelBytes > 1) {
rgbPixel[0] =
*((uint16_t *)(&rgb->pixels[state.rgb.offsetBytesR + (i * state.rgb.pixelBytes) + (j * rgb->rowBytes)])) /
*((uint16_t *)(&rgb->pixels[state.rgb.offsetBytesR + (i * state.rgb.pixelBytes) + (j * rgbRowBytes)])) /
rgbMaxChannelF;
rgbPixel[1] =
*((uint16_t *)(&rgb->pixels[state.rgb.offsetBytesG + (i * state.rgb.pixelBytes) + (j * rgb->rowBytes)])) /
*((uint16_t *)(&rgb->pixels[state.rgb.offsetBytesG + (i * state.rgb.pixelBytes) + (j * rgbRowBytes)])) /
rgbMaxChannelF;
rgbPixel[2] =
*((uint16_t *)(&rgb->pixels[state.rgb.offsetBytesB + (i * state.rgb.pixelBytes) + (j * rgb->rowBytes)])) /
*((uint16_t *)(&rgb->pixels[state.rgb.offsetBytesB + (i * state.rgb.pixelBytes) + (j * rgbRowBytes)])) /
rgbMaxChannelF;
} else {
rgbPixel[0] = rgb->pixels[state.rgb.offsetBytesR + (i * state.rgb.pixelBytes) + (j * rgb->rowBytes)] /
rgbPixel[0] = rgb->pixels[state.rgb.offsetBytesR + (i * state.rgb.pixelBytes) + (j * rgbRowBytes)] /
rgbMaxChannelF;
rgbPixel[1] = rgb->pixels[state.rgb.offsetBytesG + (i * state.rgb.pixelBytes) + (j * rgb->rowBytes)] /
rgbPixel[1] = rgb->pixels[state.rgb.offsetBytesG + (i * state.rgb.pixelBytes) + (j * rgbRowBytes)] /
rgbMaxChannelF;
rgbPixel[2] = rgb->pixels[state.rgb.offsetBytesB + (i * state.rgb.pixelBytes) + (j * rgb->rowBytes)] /
rgbPixel[2] = rgb->pixels[state.rgb.offsetBytesB + (i * state.rgb.pixelBytes) + (j * rgbRowBytes)] /
rgbMaxChannelF;
}

if (alphaMode != AVIF_ALPHA_MULTIPLY_MODE_NO_OP) {
float a;
if (state.rgb.channelBytes > 1) {
a = *((uint16_t *)(&rgb->pixels[state.rgb.offsetBytesA + (i * state.rgb.pixelBytes) + (j * rgb->rowBytes)])) /
a = *((uint16_t *)(&rgb->pixels[state.rgb.offsetBytesA + (i * state.rgb.pixelBytes) + (j * rgbRowBytes)])) /
rgbMaxChannelF;
} else {
a = rgb->pixels[state.rgb.offsetBytesA + (i * state.rgb.pixelBytes) + (j * rgb->rowBytes)] / rgbMaxChannelF;
a = rgb->pixels[state.rgb.offsetBytesA + (i * state.rgb.pixelBytes) + (j * rgbRowBytes)] / rgbMaxChannelF;
}

if (alphaMode == AVIF_ALPHA_MULTIPLY_MODE_MULTIPLY) {
Expand Down Expand Up @@ -377,24 +382,21 @@ avifResult avifImageRGBToYUV(avifImage * image, const avifRGBImage * rgb)
}

if (state.yuv.channelBytes > 1) {
uint16_t * pY = (uint16_t *)&yuvPlanes[AVIF_CHAN_Y][(i * 2) + (j * yuvRowBytes[AVIF_CHAN_Y])];
uint16_t * pY = (uint16_t *)&yPlane[(i * 2) + (j * yRowBytes)];
*pY = (uint16_t)avifYUVColorSpaceInfoYToUNorm(&state.yuv, yuvBlock[bI][bJ].y);
if (image->yuvFormat == AVIF_PIXEL_FORMAT_YUV444) {
// YUV444, full chroma
uint16_t * pU = (uint16_t *)&yuvPlanes[AVIF_CHAN_U][(i * 2) + (j * yuvRowBytes[AVIF_CHAN_U])];
uint16_t * pU = (uint16_t *)&uPlane[(i * 2) + (j * uRowBytes)];
*pU = (uint16_t)avifYUVColorSpaceInfoUVToUNorm(&state.yuv, yuvBlock[bI][bJ].u);
uint16_t * pV = (uint16_t *)&yuvPlanes[AVIF_CHAN_V][(i * 2) + (j * yuvRowBytes[AVIF_CHAN_V])];
uint16_t * pV = (uint16_t *)&vPlane[(i * 2) + (j * vRowBytes)];
*pV = (uint16_t)avifYUVColorSpaceInfoUVToUNorm(&state.yuv, yuvBlock[bI][bJ].v);
}
} else {
yuvPlanes[AVIF_CHAN_Y][i + (j * yuvRowBytes[AVIF_CHAN_Y])] =
(uint8_t)avifYUVColorSpaceInfoYToUNorm(&state.yuv, yuvBlock[bI][bJ].y);
yPlane[i + (j * yRowBytes)] = (uint8_t)avifYUVColorSpaceInfoYToUNorm(&state.yuv, yuvBlock[bI][bJ].y);
if (image->yuvFormat == AVIF_PIXEL_FORMAT_YUV444) {
// YUV444, full chroma
yuvPlanes[AVIF_CHAN_U][i + (j * yuvRowBytes[AVIF_CHAN_U])] =
(uint8_t)avifYUVColorSpaceInfoUVToUNorm(&state.yuv, yuvBlock[bI][bJ].u);
yuvPlanes[AVIF_CHAN_V][i + (j * yuvRowBytes[AVIF_CHAN_V])] =
(uint8_t)avifYUVColorSpaceInfoUVToUNorm(&state.yuv, yuvBlock[bI][bJ].v);
uPlane[i + (j * uRowBytes)] = (uint8_t)avifYUVColorSpaceInfoUVToUNorm(&state.yuv, yuvBlock[bI][bJ].u);
vPlane[i + (j * vRowBytes)] = (uint8_t)avifYUVColorSpaceInfoUVToUNorm(&state.yuv, yuvBlock[bI][bJ].v);
}
}
}
Expand Down Expand Up @@ -423,15 +425,13 @@ avifResult avifImageRGBToYUV(avifImage * image, const avifRGBImage * rgb)
int uvI = outerI >> chromaShiftX;
int uvJ = outerJ >> chromaShiftY;
if (state.yuv.channelBytes > 1) {
uint16_t * pU = (uint16_t *)&yuvPlanes[AVIF_CHAN_U][(uvI * 2) + (uvJ * yuvRowBytes[AVIF_CHAN_U])];
uint16_t * pU = (uint16_t *)&uPlane[(uvI * 2) + (uvJ * uRowBytes)];
*pU = (uint16_t)avifYUVColorSpaceInfoUVToUNorm(&state.yuv, avgU);
uint16_t * pV = (uint16_t *)&yuvPlanes[AVIF_CHAN_V][(uvI * 2) + (uvJ * yuvRowBytes[AVIF_CHAN_V])];
uint16_t * pV = (uint16_t *)&vPlane[(uvI * 2) + (uvJ * vRowBytes)];
*pV = (uint16_t)avifYUVColorSpaceInfoUVToUNorm(&state.yuv, avgV);
} else {
yuvPlanes[AVIF_CHAN_U][uvI + (uvJ * yuvRowBytes[AVIF_CHAN_U])] =
(uint8_t)avifYUVColorSpaceInfoUVToUNorm(&state.yuv, avgU);
yuvPlanes[AVIF_CHAN_V][uvI + (uvJ * yuvRowBytes[AVIF_CHAN_V])] =
(uint8_t)avifYUVColorSpaceInfoUVToUNorm(&state.yuv, avgV);
uPlane[uvI + (uvJ * uRowBytes)] = (uint8_t)avifYUVColorSpaceInfoUVToUNorm(&state.yuv, avgU);
vPlane[uvI + (uvJ * vRowBytes)] = (uint8_t)avifYUVColorSpaceInfoUVToUNorm(&state.yuv, avgV);
}
} else if (image->yuvFormat == AVIF_PIXEL_FORMAT_YUV422) {
// YUV422, average 2 samples (1x2), twice
Expand All @@ -451,15 +451,13 @@ avifResult avifImageRGBToYUV(avifImage * image, const avifRGBImage * rgb)
int uvI = outerI >> chromaShiftX;
int uvJ = outerJ + bJ;
if (state.yuv.channelBytes > 1) {
uint16_t * pU = (uint16_t *)&yuvPlanes[AVIF_CHAN_U][(uvI * 2) + (uvJ * yuvRowBytes[AVIF_CHAN_U])];
uint16_t * pU = (uint16_t *)&uPlane[(uvI * 2) + (uvJ * uRowBytes)];
*pU = (uint16_t)avifYUVColorSpaceInfoUVToUNorm(&state.yuv, avgU);
uint16_t * pV = (uint16_t *)&yuvPlanes[AVIF_CHAN_V][(uvI * 2) + (uvJ * yuvRowBytes[AVIF_CHAN_V])];
uint16_t * pV = (uint16_t *)&vPlane[(uvI * 2) + (uvJ * vRowBytes)];
*pV = (uint16_t)avifYUVColorSpaceInfoUVToUNorm(&state.yuv, avgV);
} else {
yuvPlanes[AVIF_CHAN_U][uvI + (uvJ * yuvRowBytes[AVIF_CHAN_U])] =
(uint8_t)avifYUVColorSpaceInfoUVToUNorm(&state.yuv, avgU);
yuvPlanes[AVIF_CHAN_V][uvI + (uvJ * yuvRowBytes[AVIF_CHAN_V])] =
(uint8_t)avifYUVColorSpaceInfoUVToUNorm(&state.yuv, avgV);
uPlane[uvI + (uvJ * uRowBytes)] = (uint8_t)avifYUVColorSpaceInfoUVToUNorm(&state.yuv, avgU);
vPlane[uvI + (uvJ * vRowBytes)] = (uint8_t)avifYUVColorSpaceInfoUVToUNorm(&state.yuv, avgV);
}
}
}
Expand Down Expand Up @@ -592,10 +590,11 @@ static avifResult avifImageYUVAnyToRGBAnySlow(const avifImage * image,
const uint8_t * uPlane = image->yuvPlanes[AVIF_CHAN_U];
const uint8_t * vPlane = image->yuvPlanes[AVIF_CHAN_V];
const uint8_t * aPlane = image->alphaPlane;
const uint32_t yRowBytes = image->yuvRowBytes[AVIF_CHAN_Y];
const uint32_t uRowBytes = image->yuvRowBytes[AVIF_CHAN_U];
const uint32_t vRowBytes = image->yuvRowBytes[AVIF_CHAN_V];
const uint32_t aRowBytes = image->alphaRowBytes;
const size_t yRowBytes = image->yuvRowBytes[AVIF_CHAN_Y];
const size_t uRowBytes = image->yuvRowBytes[AVIF_CHAN_U];
const size_t vRowBytes = image->yuvRowBytes[AVIF_CHAN_V];
const size_t aRowBytes = image->alphaRowBytes;
const size_t rgbRowBytes = rgb->rowBytes;

// Various observations and limits
const avifBool hasColor = (uPlane && vPlane && (image->yuvFormat != AVIF_PIXEL_FORMAT_YUV400));
Expand All @@ -619,9 +618,9 @@ static avifResult avifImageYUVAnyToRGBAnySlow(const avifImage * image,
const uint16_t * ptrV16 = (const uint16_t *)ptrV8;
const uint16_t * ptrA16 = (const uint16_t *)ptrA8;

uint8_t * ptrR = &rgb->pixels[state->rgb.offsetBytesR + (j * rgb->rowBytes)];
uint8_t * ptrG = &rgb->pixels[state->rgb.offsetBytesG + (j * rgb->rowBytes)];
uint8_t * ptrB = &rgb->pixels[state->rgb.offsetBytesB + (j * rgb->rowBytes)];
uint8_t * ptrR = &rgb->pixels[state->rgb.offsetBytesR + (j * rgbRowBytes)];
uint8_t * ptrG = &rgb->pixels[state->rgb.offsetBytesG + (j * rgbRowBytes)];
uint8_t * ptrB = &rgb->pixels[state->rgb.offsetBytesB + (j * rgbRowBytes)];

for (uint32_t i = 0; i < image->width; ++i) {
float Y, Cb = 0.5f, Cr = 0.5f;
Expand Down

0 comments on commit 1ff7bae

Please sign in to comment.