Commit 51f21bcb authored by Khaled Mammou's avatar Khaled Mammou Committed by David Flynn
Browse files

attr: use single quant_step_size_* for all LoDs in lift/pred

This commit removes the ability to vary quant_step_size on a per-LoD
basis.  This greatly simplifies the codec configuration, especially
when used in conjunction with dist2 derivation.

cfg update:
 - rename quantizationSteps* -> quantizationStep*
 - update quantizationStep* to single values:
   - lossless-geom lossy-attrs:        8, 16, 32, 64, 128, 256 [8bit]
   - lossless-geom nearlossless-attrs: no change
   - lossless-geom lossless-attrs:     no change
   - lossy-geom    lossy-attrs:        8                       [8bit]

 => 16-bit cat3 sequences are have 8-bit qs values multipled by 255
parent 983f6388
...@@ -30,8 +30,8 @@ categories: ...@@ -30,8 +30,8 @@ categories:
- numberOfNearestNeighborsInPrediction: 3 - numberOfNearestNeighborsInPrediction: 3
- levelOfDetailCount: 9 - levelOfDetailCount: 9
- dist2: 134217728 33554432 8388608 2097152 524288 131072 32768 8192 0 - dist2: 134217728 33554432 8388608 2097152 524288 131072 32768 8192 0
- quantizationStepsLuma: 0 0 0 0 0 0 0 0 0 - quantizationStepLuma: 0
- quantizationStepsChroma: 0 0 0 0 0 0 0 0 0 - quantizationStepChroma: 0
- bitdepth: 8 - bitdepth: 8
- attribute: color - attribute: color
...@@ -89,7 +89,7 @@ categories: ...@@ -89,7 +89,7 @@ categories:
- numberOfNearestNeighborsInPrediction: 3 - numberOfNearestNeighborsInPrediction: 3
- levelOfDetailCount: 6 - levelOfDetailCount: 6
- dist2: 4194301 1048582 262149 65534 16383 0 - dist2: 4194301 1048582 262149 65534 16383 0
- quantizationStepsLuma: 0 0 0 0 0 0 - quantizationStepLuma: 0
- bitdepth: 8 - bitdepth: 8
- attribute: reflectance - attribute: reflectance
......
...@@ -31,40 +31,40 @@ categories: ...@@ -31,40 +31,40 @@ categories:
- numberOfNearestNeighborsInPrediction: 3 - numberOfNearestNeighborsInPrediction: 3
- levelOfDetailCount: 9 - levelOfDetailCount: 9
- dist2: 134217728 33554432 8388608 2097152 524288 131072 32768 8192 0 - dist2: 134217728 33554432 8388608 2097152 524288 131072 32768 8192 0
- quantizationStepsLuma: - quantizationStepLuma:
r01: 1 2 4 8 16 32 64 128 128 r01: 256
r02: 1 2 4 8 16 32 64 64 66 r02: 128
r03: 1 2 4 8 16 32 32 32 34 r03: 64
r04: 1 2 4 8 8 8 16 16 16 r04: 32
r05: 1 2 4 4 4 4 4 8 8 r05: 16
r06: 0 1 1 1 2 2 2 2 2 r06: 8
- quantizationStepsChroma: - quantizationStepChroma:
r01: 1 2 4 8 16 32 64 128 128 r01: 256
r02: 1 2 4 8 16 32 64 64 66 r02: 128
r03: 1 2 4 8 16 32 32 32 34 r03: 64
r04: 1 2 4 8 8 8 16 16 16 r04: 32
r05: 1 2 4 4 4 4 4 8 8 r05: 16
r06: 0 1 1 1 2 2 2 2 2 r06: 8
- bitdepth: 8 - bitdepth: 8
- attribute: color - attribute: color
tollbooth_q1mm: tollbooth_q1mm:
encflags: encflags:
- *commonAttr - *commonAttr
- quantizationStepsLuma: - quantizationStepLuma:
r01: 1 2 4 8 16 32 64 128 128 r01: 256
r02: 1 2 4 8 16 32 32 64 64 r02: 128
r03: 1 2 4 8 16 16 16 32 32 r03: 64
r04: 1 2 4 8 8 8 8 10 16 r04: 32
r05: 1 2 4 4 4 4 4 4 6 r05: 16
r06: 0 0 0 0 0 0 1 1 2 r06: 8
- quantizationStepsChroma: - quantizationStepChroma:
r01: 1 2 4 8 16 32 64 128 128 r01: 256
r02: 1 2 4 8 16 32 32 64 64 r02: 128
r03: 1 2 4 8 16 16 16 32 32 r03: 64
r04: 1 2 4 8 8 8 8 10 16 r04: 32
r05: 1 2 4 4 4 4 4 4 6 r05: 16
r06: 0 0 0 0 0 0 1 1 2 r06: 8
- bitdepth: 8 - bitdepth: 8
- attribute: color - attribute: color
...@@ -90,25 +90,25 @@ categories: ...@@ -90,25 +90,25 @@ categories:
citytunnel_q1mm: citytunnel_q1mm:
encflags: encflags:
- *commonAttr - *commonAttr
- quantizationStepsLuma: - quantizationStepLuma:
r01: 286 571 1143 2286 4571 9143 18286 36571 65536 r01: $eval{ 255 * 256 }
r02: 77 154 308 615 1231 2462 4923 9846 19692 r02: $eval{ 255 * 128 }
r03: 29 57 114 229 457 914 1829 3657 7314 r03: $eval{ 255 * 64 }
r04: 10 20 40 80 160 320 640 1280 2560 r04: $eval{ 255 * 32 }
r05: 2 5 10 20 40 80 160 320 640 r05: $eval{ 255 * 16 }
r06: 0 1 2 4 8 16 16 32 32 r06: $eval{ 255 * 8 }
- bitdepth: 16 - bitdepth: 16
- attribute: reflectance - attribute: reflectance
tollbooth_q1mm: tollbooth_q1mm:
encflags: encflags:
- *commonAttr - *commonAttr
- quantizationStepsLuma: - quantizationStepLuma:
r01: 286 571 1143 2286 4571 9143 18286 36571 65536 r01: $eval{ 255 * 256 }
r02: 100 200 400 800 1600 3200 6400 12800 25600 r02: $eval{ 255 * 128 }
r03: 50 100 200 400 800 1600 3200 6400 12800 r03: $eval{ 255 * 64 }
r04: 17 33 67 133 267 533 1067 2133 4267 r04: $eval{ 255 * 32 }
r05: 4 8 16 32 64 128 256 512 1024 r05: $eval{ 255 * 16 }
r06: 1 2 4 8 16 32 32 32 32 r06: $eval{ 255 * 8 }
- bitdepth: 16 - bitdepth: 16
- attribute: reflectance - attribute: reflectance
...@@ -31,32 +31,30 @@ categories: ...@@ -31,32 +31,30 @@ categories:
- numberOfNearestNeighborsInPrediction: 3 - numberOfNearestNeighborsInPrediction: 3
- levelOfDetailCount: 9 - levelOfDetailCount: 9
- dist2: 134217728 33554432 8388608 2097152 524288 131072 32768 8192 0 - dist2: 134217728 33554432 8388608 2097152 524288 131072 32768 8192 0
- quantizationStepsLuma: - &mitsubishiNearlosslessAttrsColour8
r01: 2 2 2 2 2 2 2 2 2 - *mitsubishiNearlosslessAttrs
r02: 4 4 4 4 4 4 4 4 4 - quantizationStepLuma:
r03: 8 8 8 8 8 8 8 8 8 r01: 2
r04: 16 16 16 16 16 16 16 16 16 r02: 4
r05: 32 32 32 32 32 32 32 32 32 r03: 8
- quantizationStepsChroma: r04: 16
r01: 2 2 2 2 2 2 2 2 2 r05: 32
r02: 4 4 4 4 4 4 4 4 4 - quantizationStepChroma:
r03: 8 8 8 8 8 8 8 8 8 r01: 2
r04: 16 16 16 16 16 16 16 16 16 r02: 4
r05: 32 32 32 32 32 32 32 32 32 r03: 8
- bitdepth: 8 r04: 16
- attribute: color r05: 32
- bitdepth: 8
- attribute: color
overpass_q1mm: overpass_q1mm:
encflags: encflags:
- *mitsubishiNearlosslessAttrs - *mitsubishiNearlosslessAttrsColour8
- bitdepth: 8
- attribute: color
tollbooth_q1mm: tollbooth_q1mm:
encflags: encflags:
- *mitsubishiNearlosslessAttrs - *mitsubishiNearlosslessAttrsColour8
- bitdepth: 8
- attribute: color
## ##
# Condition 3.Z # Condition 3.Z
...@@ -79,45 +77,44 @@ categories: ...@@ -79,45 +77,44 @@ categories:
sequences: sequences:
citytunnel_q1mm: citytunnel_q1mm:
encflags: encflags:
- *mitsubishiNearlosslessAttrs - &mitsubishiNearlosslessAttrsRefl16
- bitdepth: 16 - *mitsubishiNearlosslessAttrs
- attribute: reflectance - quantizationStepLuma:
r01: $eval{ 255 * 2 }
r02: $eval{ 255 * 4 }
r03: $eval{ 255 * 8 }
r04: $eval{ 255 * 16 }
r05: $eval{ 255 * 32 }
- bitdepth: 16
- attribute: reflectance
overpass_q1mm: overpass_q1mm:
encflags: encflags:
- *mitsubishiNearlosslessAttrs - *mitsubishiNearlosslessAttrsRefl16
- bitdepth: 16
- attribute: reflectance
tollbooth_q1mm: tollbooth_q1mm:
encflags: encflags:
- *mitsubishiNearlosslessAttrs - *mitsubishiNearlosslessAttrsRefl16
- bitdepth: 16
- attribute: reflectance
ford_03_q1mm: ford_03_q1mm:
encflags: encflags:
- &fordNearlosslessAttrs - &fordNearlosslessAttrsRefl8
- numberOfNearestNeighborsInPrediction: 3 - numberOfNearestNeighborsInPrediction: 3
- levelOfDetailCount: 6 - levelOfDetailCount: 6
- dist2: 4194301 1048582 262149 65534 16383 0 - dist2: 4194301 1048582 262149 65534 16383 0
- quantizationStepsLuma: - quantizationStepLuma:
r01: 2 2 2 2 2 2 r01: 2
r02: 4 4 4 4 4 4 r02: 4
r03: 8 8 8 8 8 8 r03: 8
r04: 16 16 16 16 16 16 r04: 16
r05: 32 32 32 32 32 32 r05: 32
- bitdepth: 8 - bitdepth: 8
- attribute: reflectance - attribute: reflectance
ford_02_q1mm: ford_02_q1mm:
encflags: encflags:
- *fordNearlosslessAttrs - *fordNearlosslessAttrsRefl8
- bitdepth: 8
- attribute: reflectance
ford_01_q1mm: ford_01_q1mm:
encflags: encflags:
- *fordNearlosslessAttrs - *fordNearlosslessAttrsRefl8
- bitdepth: 8
- attribute: reflectance
...@@ -40,13 +40,7 @@ categories: ...@@ -40,13 +40,7 @@ categories:
r04: 508 127 32 8 2 0 r04: 508 127 32 8 2 0
r05: 10486 2621 655 164 41 0 r05: 10486 2621 655 164 41 0
r06: 317194 79299 19825 4956 1239 0 r06: 317194 79299 19825 4956 1239 0
- quantizationStepsLuma: - quantizationStepLuma: 8
r01: 2 4
r02: 1 2 4 4
r03: 0 1 2 4 4
r04: 0 1 2 4 4 4
r05: 0 1 2 4 4 4
r06: 0 1 2 4 4 4
- bitdepth: 8 - bitdepth: 8
- attribute: reflectance - attribute: reflectance
......
...@@ -235,8 +235,7 @@ AttributeDecoder::decodeReflectancesPred( ...@@ -235,8 +235,7 @@ AttributeDecoder::decodeReflectancesPred(
for (size_t predictorIndex = 0; predictorIndex < pointCount; for (size_t predictorIndex = 0; predictorIndex < pointCount;
++predictorIndex) { ++predictorIndex) {
auto& predictor = predictors[predictorIndex]; auto& predictor = predictors[predictorIndex];
const size_t lodIndex = predictor.levelOfDetailIndex; const int64_t qs = aps.quant_step_size_luma;
const int64_t qs = aps.quant_step_size_luma[lodIndex];
computeReflectancePredictionWeights( computeReflectancePredictionWeights(
pointCloud, aps.num_pred_nearest_neighbours, threshold, qs, predictor, pointCloud, aps.num_pred_nearest_neighbours, threshold, qs, predictor,
decoder); decoder);
...@@ -315,9 +314,8 @@ AttributeDecoder::decodeColorsPred( ...@@ -315,9 +314,8 @@ AttributeDecoder::decodeColorsPred(
for (size_t predictorIndex = 0; predictorIndex < pointCount; for (size_t predictorIndex = 0; predictorIndex < pointCount;
++predictorIndex) { ++predictorIndex) {
auto& predictor = predictors[predictorIndex]; auto& predictor = predictors[predictorIndex];
const size_t lodIndex = predictor.levelOfDetailIndex; const int64_t qs = aps.quant_step_size_luma;
const int64_t qs = aps.quant_step_size_luma[lodIndex]; const int64_t qs2 = aps.quant_step_size_chroma;
const int64_t qs2 = aps.quant_step_size_chroma[lodIndex];
computeColorPredictionWeights( computeColorPredictionWeights(
pointCloud, aps.num_pred_nearest_neighbours, threshold, predictor, pointCloud, aps.num_pred_nearest_neighbours, threshold, predictor,
decoder); decoder);
...@@ -408,9 +406,8 @@ AttributeDecoder::decodeReflectancesRaht( ...@@ -408,9 +406,8 @@ AttributeDecoder::decodeReflectancesRaht(
// Inverse Quantize. // Inverse Quantize.
float* attributes = new float[voxelCount]; float* attributes = new float[voxelCount];
const int qstep = int(aps.quant_step_size_luma[0]);
for (int n = 0; n < voxelCount; n++) { for (int n = 0; n < voxelCount; n++) {
attributes[n] = integerizedAttributes[n] * qstep; attributes[n] = integerizedAttributes[n] * aps.quant_step_size_luma;
} }
regionAdaptiveHierarchicalInverseTransform( regionAdaptiveHierarchicalInverseTransform(
...@@ -512,11 +509,10 @@ AttributeDecoder::decodeColorsRaht( ...@@ -512,11 +509,10 @@ AttributeDecoder::decodeColorsRaht(
// Inverse Quantize. // Inverse Quantize.
float* attributes = new float[attribCount * voxelCount]; float* attributes = new float[attribCount * voxelCount];
const int qstep = int(aps.quant_step_size_luma[0]);
for (int n = 0; n < voxelCount; n++) { for (int n = 0; n < voxelCount; n++) {
for (int k = 0; k < attribCount; k++) { for (int k = 0; k < attribCount; k++) {
attributes[attribCount * n + k] = attributes[attribCount * n + k] =
integerizedAttributes[attribCount * n + k] * qstep; integerizedAttributes[attribCount * n + k] * aps.quant_step_size_luma;
} }
} }
...@@ -579,9 +575,8 @@ AttributeDecoder::decodeColorsLift( ...@@ -579,9 +575,8 @@ AttributeDecoder::decodeColorsLift(
values[0] = decoder.decode0(); values[0] = decoder.decode0();
values[1] = decoder.decode1(); values[1] = decoder.decode1();
values[2] = decoder.decode1(); values[2] = decoder.decode1();
const size_t lodIndex = predictors[predictorIndex].levelOfDetailIndex; const int64_t qs = aps.quant_step_size_luma;
const int64_t qs = aps.quant_step_size_luma[lodIndex]; const int64_t qs2 = aps.quant_step_size_chroma;
const int64_t qs2 = aps.quant_step_size_chroma[lodIndex];
const double quantWeight = sqrt(weights[predictorIndex]); const double quantWeight = sqrt(weights[predictorIndex]);
auto& color = colors[predictorIndex]; auto& color = colors[predictorIndex];
const int64_t delta = o3dgc::UIntToInt(values[0]); const int64_t delta = o3dgc::UIntToInt(values[0]);
...@@ -646,8 +641,7 @@ AttributeDecoder::decodeReflectancesLift( ...@@ -646,8 +641,7 @@ AttributeDecoder::decodeReflectancesLift(
for (size_t predictorIndex = 0; predictorIndex < pointCount; for (size_t predictorIndex = 0; predictorIndex < pointCount;
++predictorIndex) { ++predictorIndex) {
const int64_t detail = decoder.decode0(); const int64_t detail = decoder.decode0();
const size_t lodIndex = predictors[predictorIndex].levelOfDetailIndex; const int64_t qs = aps.quant_step_size_luma;
const int64_t qs = aps.quant_step_size_luma[lodIndex];
const double quantWeight = sqrt(weights[predictorIndex]); const double quantWeight = sqrt(weights[predictorIndex]);
auto& reflectance = reflectances[predictorIndex]; auto& reflectance = reflectances[predictorIndex];
const int64_t delta = o3dgc::UIntToInt(detail); const int64_t delta = o3dgc::UIntToInt(detail);
......
...@@ -401,8 +401,7 @@ AttributeEncoder::encodeReflectancesPred( ...@@ -401,8 +401,7 @@ AttributeEncoder::encodeReflectancesPred(
for (size_t predictorIndex = 0; predictorIndex < pointCount; for (size_t predictorIndex = 0; predictorIndex < pointCount;
++predictorIndex) { ++predictorIndex) {
auto& predictor = predictors[predictorIndex]; auto& predictor = predictors[predictorIndex];
const size_t lodIndex = predictor.levelOfDetailIndex; const int64_t qs = aps.quant_step_size_luma;
const int64_t qs = aps.quant_step_size_luma[lodIndex];
computeReflectancePredictionWeights( computeReflectancePredictionWeights(
pointCloud, aps.num_pred_nearest_neighbours, threshold, qs, predictor, pointCloud, aps.num_pred_nearest_neighbours, threshold, qs, predictor,
encoder, context); encoder, context);
...@@ -534,9 +533,8 @@ AttributeEncoder::encodeColorsPred( ...@@ -534,9 +533,8 @@ AttributeEncoder::encodeColorsPred(
for (size_t predictorIndex = 0; predictorIndex < pointCount; for (size_t predictorIndex = 0; predictorIndex < pointCount;
++predictorIndex) { ++predictorIndex) {
auto& predictor = predictors[predictorIndex]; auto& predictor = predictors[predictorIndex];
const size_t lodIndex = predictor.levelOfDetailIndex; const int64_t qs = aps.quant_step_size_luma;
const int64_t qs = aps.quant_step_size_luma[lodIndex]; const int64_t qs2 = aps.quant_step_size_chroma;
const int64_t qs2 = aps.quant_step_size_chroma[lodIndex];
computeColorPredictionWeights( computeColorPredictionWeights(
pointCloud, aps.num_pred_nearest_neighbours, threshold, qs, qs2, pointCloud, aps.num_pred_nearest_neighbours, threshold, qs, qs2,
predictor, encoder, context); predictor, encoder, context);
...@@ -622,7 +620,7 @@ AttributeEncoder::encodeReflectancesTransformRaht( ...@@ -622,7 +620,7 @@ AttributeEncoder::encodeReflectancesTransformRaht(
// Quantize. // Quantize.
for (int n = 0; n < voxelCount; n++) { for (int n = 0; n < voxelCount; n++) {
integerizedAttributes[n] = integerizedAttributes[n] =
int(round(attributes[n] / aps.quant_step_size_luma[0])); int(round(attributes[n] / aps.quant_step_size_luma));
} }
// Sort integerized attributes by weight. // Sort integerized attributes by weight.
...@@ -662,7 +660,7 @@ AttributeEncoder::encodeReflectancesTransformRaht( ...@@ -662,7 +660,7 @@ AttributeEncoder::encodeReflectancesTransformRaht(
} }
// Inverse Quantize. // Inverse Quantize.
for (int n = 0; n < voxelCount; n++) { for (int n = 0; n < voxelCount; n++) {
attributes[n] = integerizedAttributes[n] * aps.quant_step_size_luma[0]; attributes[n] = integerizedAttributes[n] * aps.quant_step_size_luma;
} }
regionAdaptiveHierarchicalInverseTransform( regionAdaptiveHierarchicalInverseTransform(
mortonCode, attributes, 1, voxelCount, aps.raht_depth); mortonCode, attributes, 1, voxelCount, aps.raht_depth);
...@@ -737,8 +735,8 @@ AttributeEncoder::encodeColorsTransformRaht( ...@@ -737,8 +735,8 @@ AttributeEncoder::encodeColorsTransformRaht(
// Quantize. // Quantize.
for (int n = 0; n < voxelCount; n++) { for (int n = 0; n < voxelCount; n++) {
for (int k = 0; k < attribCount; k++) { for (int k = 0; k < attribCount; k++) {
integerizedAttributes[attribCount * n + k] = int( integerizedAttributes[attribCount * n + k] =
round(attributes[attribCount * n + k] / aps.quant_step_size_luma[0])); int(round(attributes[attribCount * n + k] / aps.quant_step_size_luma));
} }
} }
...@@ -801,8 +799,7 @@ AttributeEncoder::encodeColorsTransformRaht( ...@@ -801,8 +799,7 @@ AttributeEncoder::encodeColorsTransformRaht(
for (int n = 0; n < voxelCount; n++) { for (int n = 0; n < voxelCount; n++) {
for (int k = 0; k < attribCount; k++) { for (int k = 0; k < attribCount; k++) {
attributes[attribCount * n + k] = attributes[attribCount * n + k] =
integerizedAttributes[attribCount * n + k] integerizedAttributes[attribCount * n + k] * aps.quant_step_size_luma;
* aps.quant_step_size_luma[0];
} }
} }
...@@ -878,8 +875,7 @@ AttributeEncoder::encodeColorsLift( ...@@ -878,8 +875,7 @@ AttributeEncoder::encodeColorsLift(
// compress // compress
for (size_t predictorIndex = 0; predictorIndex < pointCount; for (size_t predictorIndex = 0; predictorIndex < pointCount;
++predictorIndex) { ++predictorIndex) {
const size_t lodIndex = predictors[predictorIndex].levelOfDetailIndex; const int64_t qs = aps.quant_step_size_luma;
const int64_t qs = aps.quant_step_size_luma[lodIndex];
const double quantWeight = sqrt(weights[predictorIndex]); const double quantWeight = sqrt(weights[predictorIndex]);
auto& color = colors[predictorIndex]; auto& color = colors[predictorIndex];
const int64_t delta = PCCQuantization(color[0] * quantWeight, qs); const int64_t delta = PCCQuantization(color[0] * quantWeight, qs);
...@@ -889,7 +885,7 @@ AttributeEncoder::encodeColorsLift( ...@@ -889,7 +885,7 @@ AttributeEncoder::encodeColorsLift(
color[0] = reconstructedDelta / quantWeight; color[0] = reconstructedDelta / quantWeight;
uint32_t values[3]; uint32_t values[3];
values[0] = uint32_t(detail); values[0] = uint32_t(detail);
const size_t qs2 = aps.quant_step_size_chroma[lodIndex]; const size_t qs2 = aps.quant_step_size_chroma;
for (size_t d = 1; d < 3; ++d) { for (size_t d = 1; d < 3; ++d) {
const int64_t delta = PCCQuantization(color[d] * quantWeight, qs2); const int64_t delta = PCCQuantization(color[d] * quantWeight, qs2);
const int64_t detail = o3dgc::IntToUInt(delta); const int64_t detail = o3dgc::IntToUInt(delta);
...@@ -967,8 +963,7 @@ AttributeEncoder::encodeReflectancesLift( ...@@ -967,8 +963,7 @@ AttributeEncoder::encodeReflectancesLift(
// compress // compress
for (size_t predictorIndex = 0; predictorIndex < pointCount; for (size_t predictorIndex = 0; predictorIndex < pointCount;
++predictorIndex) { ++predictorIndex) {
const size_t lodIndex = predictors[predictorIndex].levelOfDetailIndex; const int64_t qs = aps.quant_step_size_luma;
const int64_t qs = aps.quant_step_size_luma[lodIndex];
const double quantWeight = sqrt(weights[predictorIndex]); const double quantWeight = sqrt(weights[predictorIndex]);
auto& reflectance = reflectances[predictorIndex]; auto& reflectance = reflectances[predictorIndex];
const int64_t delta = PCCQuantization(reflectance * quantWeight, qs); const int64_t delta = PCCQuantization(reflectance * quantWeight, qs);
......
...@@ -320,7 +320,7 @@ ParseParameters(int argc, char* argv[], Parameters& params) ...@@ -320,7 +320,7 @@ ParseParameters(int argc, char* argv[], Parameters& params)
"Used for chroma-subsampling in attribute=color only.") "Used for chroma-subsampling in attribute=color only.")
("rahtQuantizationStep", ("rahtQuantizationStep",
params_attr.aps.quant_step_size_luma, {}, params_attr.aps.quant_step_size_luma, 0,
"deprecated -- use quantizationStepsLuma") "deprecated -- use quantizationStepsLuma")
("rahtDepth", ("rahtDepth",
...@@ -342,17 +342,13 @@ ParseParameters(int argc, char* argv[], Parameters& params) ...@@ -342,17 +342,13 @@ ParseParameters(int argc, char* argv[], Parameters& params)
params_attr.aps.numDetailLevels, 1, params_attr.aps.numDetailLevels, 1,
"Attribute's number of levels of detail") "Attribute's number of levels of detail")
("quantizationSteps", ("quantizationStepLuma",
params_attr.aps.quant_step_size_luma, {}, params_attr.aps.quant_step_size_luma, 0,
"deprecated -- use quantizationStepsLuma") "Attribute's luma quantization step size")
("quantizationStepsLuma",
params_attr.aps.quant_step_size_luma, {},
"Attribute's luma quantization step sizes (one for each LoD)")
("quantizationStepsChroma", ("quantizationStepChroma",
params_attr.aps.quant_step_size_chroma, {}, params_attr.aps.quant_step_size_chroma, 0,
"Attribute's chroma quantization step sizes (one for each LoD)") "Attribute's chroma quantization step size")
("dist2", ("dist2",
params_attr.aps.dist2, {}, params_attr.aps.dist2, {},
...@@ -389,6 +385,11 @@ ParseParameters(int argc, char* argv[], Parameters& params) ...@@ -389,6 +385,11 @@ ParseParameters(int argc, char* argv[], Parameters& params)
auto& attr_sps = params.encoder.sps.attributeSets[it.second]; auto& attr_sps = params.encoder.sps.attributeSets[it.second];
auto& attr_aps = params.encoder.aps[it.second]; auto& attr_aps = params.encoder.aps[it.second];
// Avoid wasting bits signalling chroma quant step size for reflectance
if (it.first == "reflectance") {
attr_aps.quant_step_size_chroma = 0;
}
// Set default threshold based on bitdepth