From 66956d195169596472e956e3aebf2df8e3bd960d Mon Sep 17 00:00:00 2001 From: Graeme Leese Date: Mon, 22 Jul 2024 13:16:38 +0100 Subject: [PATCH] Fix conversions in 3fa2b7a060 changes for 10-bit RTs The new code was reading everything that is not 10-bit back as UNSIGNED_BYTE but then applying the actual RTs bit depth to compute the maximum value, leading to a double-scaling for RTs with less than 8-bits. Likewise, all RTs were read as RGBA, but formats without alpha were being scaled by 0.0f, giving incorrect results. Components: OpenGL VK-GL-CTS issue: 5186 Affects: KHR-GLES3.texture_lod_basic.lod_selection Change-Id: I7a2356f4cb10a74431a1a8ffcd7a6184014740e7 --- .../common/glcTextureLodBasicTests.cpp | 23 ++++++++----------- 1 file changed, 9 insertions(+), 14 deletions(-) diff --git a/external/openglcts/modules/common/glcTextureLodBasicTests.cpp b/external/openglcts/modules/common/glcTextureLodBasicTests.cpp index ec9590135e..fe6ba18a90 100644 --- a/external/openglcts/modules/common/glcTextureLodBasicTests.cpp +++ b/external/openglcts/modules/common/glcTextureLodBasicTests.cpp @@ -650,12 +650,6 @@ bool TextureLodSelectionTestCase::doComparison(const int size, const float *cons return e; }; - const auto calcMaxValue = [](int bits) { return (ldexp(1.0f, bits) - 1.0f); }; - - float maxValues[4] = {0.0f, 0.0f, 0.0f, 0.0f}; - for (size_t i = 0; i < de::arrayLength(maxValues); ++i) - maxValues[i] = calcMaxValue(col_bits[i]); - float epsilon[4] = {0.0f, 0.0f, 0.0f, 0.0f}; for (int i = 0; i < numChannels; ++i) epsilon[i] = calcEpsilon(col_bits[i]); @@ -669,17 +663,18 @@ bool TextureLodSelectionTestCase::doComparison(const int size, const float *cons if (use10Bits) { // Note this is a strange way to store RGB10A2 but it matches what implementations do. - resultColor[0] = static_cast(pixel & 0x3FF) / maxValues[0]; - resultColor[1] = static_cast((pixel >> 10) & 0x3FF) / maxValues[1]; - resultColor[2] = static_cast((pixel >> 20) & 0x3FF) / maxValues[2]; - resultColor[3] = static_cast((pixel >> 30) & 0x3) / maxValues[3]; + resultColor[0] = static_cast(pixel & 0x3FF) / 1023.0f; + resultColor[1] = static_cast((pixel >> 10) & 0x3FF) / 1023.0f; + resultColor[2] = static_cast((pixel >> 20) & 0x3FF) / 1023.0f; + resultColor[3] = static_cast((pixel >> 30) & 0x3) / 3.0f; } else { - resultColor[0] = static_cast(pxBytes[0]) / maxValues[0]; - resultColor[1] = static_cast(pxBytes[1]) / maxValues[1]; - resultColor[2] = static_cast(pxBytes[2]) / maxValues[2]; - resultColor[3] = static_cast(pxBytes[3]) / maxValues[3]; + // If not 10-bit then we already converted to 8-bit (UNSIGNED_BYTE) in the ReadPixels call, above. + resultColor[0] = static_cast(pxBytes[0]) / 255.0f; + resultColor[1] = static_cast(pxBytes[1]) / 255.0f; + resultColor[2] = static_cast(pxBytes[2]) / 255.0f; + resultColor[3] = static_cast(pxBytes[3]) / 255.0f; } for (int j = 0; j < numChannels; ++j)