Commit aba3e735 authored by Noritaka Iguchi's avatar Noritaka Iguchi Committed by David Flynn
Browse files

attr/m47401: derive quant stepsize from quantisation parameter

This commit introduces control of the quantisation step size using
the familiar HEVC | AVC quantisation parameter.
parent d85098de
...@@ -42,14 +42,13 @@ categories: ...@@ -42,14 +42,13 @@ categories:
- -
- !conditional '${has_refl}' - !conditional '${has_refl}'
- adaptivePredictionThreshold: '$eval{ 1 << (8 - 2) }' - adaptivePredictionThreshold: '$eval{ 1 << (8 - 2) }'
- quantizationStepLuma: - qp:
r01: 256 r01: 52
r02: 128 r02: 46
r03: 64 r03: 40
r04: 32 r04: 34
r05: 16 r05: 28
r06: 8 r06: 22
- quantizationStepChroma: 0
- bitdepth: 8 - bitdepth: 8
- attribute: reflectance - attribute: reflectance
...@@ -58,20 +57,14 @@ categories: ...@@ -58,20 +57,14 @@ categories:
- -
- !conditional '${has_colour}' - !conditional '${has_colour}'
- adaptivePredictionThreshold: '$eval{ 1 << (8 - 2) }' - adaptivePredictionThreshold: '$eval{ 1 << (8 - 2) }'
- quantizationStepLuma: - qp:
r01: 256 r01: 52
r02: 128 r02: 46
r03: 64 r03: 40
r04: 32 r04: 34
r05: 16 r05: 28
r06: 8 r06: 22
- quantizationStepChroma: - qpChromaOffset: 0
r01: 256
r02: 128
r03: 64
r04: 32
r05: 16
r06: 8
- bitdepth: 8 - bitdepth: 8
- attribute: color - attribute: color
......
...@@ -51,14 +51,13 @@ categories: ...@@ -51,14 +51,13 @@ categories:
- -
- !conditional '${has_refl}' - !conditional '${has_refl}'
- adaptivePredictionThreshold: '$eval{ 1 << (8 - 2) }' - adaptivePredictionThreshold: '$eval{ 1 << (8 - 2) }'
- quantizationStepLuma: - qp:
r01: 256 r01: 52
r02: 128 r02: 46
r03: 64 r03: 40
r04: 32 r04: 34
r05: 16 r05: 28
r06: 8 r06: 22
- quantizationStepChroma: 0
- bitdepth: 8 - bitdepth: 8
- attribute: reflectance - attribute: reflectance
...@@ -67,20 +66,14 @@ categories: ...@@ -67,20 +66,14 @@ categories:
- -
- !conditional '${has_colour}' - !conditional '${has_colour}'
- adaptivePredictionThreshold: '$eval{ 1 << (8 - 2) }' - adaptivePredictionThreshold: '$eval{ 1 << (8 - 2) }'
- quantizationStepLuma: - qp:
r01: 256 r01: 52
r02: 128 r02: 46
r03: 64 r03: 40
r04: 32 r04: 34
r05: 16 r05: 28
r06: 8 r06: 22
- quantizationStepChroma: - qpChromaOffset: 0
r01: 256
r02: 128
r03: 64
r04: 32
r05: 16
r06: 8
- bitdepth: 8 - bitdepth: 8
- attribute: color - attribute: color
......
...@@ -47,8 +47,7 @@ categories: ...@@ -47,8 +47,7 @@ categories:
- -
- !conditional '${has_refl}' - !conditional '${has_refl}'
- adaptivePredictionThreshold: '$eval{ 1 << (8 - 2) }' - adaptivePredictionThreshold: '$eval{ 1 << (8 - 2) }'
- quantizationStepLuma: 0 - qp: 4
- quantizationStepChroma: 0
- bitdepth: 8 - bitdepth: 8
- attribute: reflectance - attribute: reflectance
...@@ -57,8 +56,8 @@ categories: ...@@ -57,8 +56,8 @@ categories:
- -
- !conditional '${has_colour}' - !conditional '${has_colour}'
- adaptivePredictionThreshold: '$eval{ 1 << (8 - 2) }' - adaptivePredictionThreshold: '$eval{ 1 << (8 - 2) }'
- quantizationStepLuma: 0 - qp: 4
- quantizationStepChroma: 0 - qpChromaOffset: 0
- bitdepth: 8 - bitdepth: 8
- attribute: color - attribute: color
......
...@@ -43,13 +43,12 @@ categories: ...@@ -43,13 +43,12 @@ categories:
- -
- !conditional '${has_refl}' - !conditional '${has_refl}'
- adaptivePredictionThreshold: '$eval{ 1 << (8 - 2) }' - adaptivePredictionThreshold: '$eval{ 1 << (8 - 2) }'
- quantizationStepLuma: - qp:
r01: 2 r01: 10
r02: 4 r02: 16
r03: 8 r03: 22
r04: 16 r04: 28
r05: 32 r05: 34
- quantizationStepChroma: 0
- bitdepth: 8 - bitdepth: 8
- attribute: reflectance - attribute: reflectance
...@@ -58,18 +57,13 @@ categories: ...@@ -58,18 +57,13 @@ categories:
- -
- !conditional '${has_colour}' - !conditional '${has_colour}'
- adaptivePredictionThreshold: '$eval{ 1 << (8 - 2) }' - adaptivePredictionThreshold: '$eval{ 1 << (8 - 2) }'
- quantizationStepLuma: - qp:
r01: 2 r01: 10
r02: 4 r02: 16
r03: 8 r03: 22
r04: 16 r04: 28
r05: 32 r05: 34
- quantizationStepChroma: - qpChromaOffset: 0
r01: 2
r02: 4
r03: 8
r04: 16
r05: 32
- bitdepth: 8 - bitdepth: 8
- attribute: color - attribute: color
......
...@@ -31,14 +31,13 @@ categories: ...@@ -31,14 +31,13 @@ categories:
- -
- !conditional '${has_refl}' - !conditional '${has_refl}'
- rahtLeafDecimationDepth: 0 - rahtLeafDecimationDepth: 0
- quantizationStepLuma: - qp:
r01: 256 r01: 52
r02: 128 r02: 46
r03: 64 r03: 40
r04: 32 r04: 34
r05: 16 r05: 28
r06: 8 r06: 22
- quantizationStepChroma: 0
- bitdepth: 8 - bitdepth: 8
- attribute: reflectance - attribute: reflectance
...@@ -47,21 +46,15 @@ categories: ...@@ -47,21 +46,15 @@ categories:
- -
- !conditional '${has_colour}' - !conditional '${has_colour}'
- rahtLeafDecimationDepth: 0 - rahtLeafDecimationDepth: 0
- quantizationStepLuma: - qp:
r01: 256 r01: 52
r02: 128 r02: 46
r03: 64 r03: 40
r04: 32 r04: 34
r05: 16 r05: 28
r06: 8 r06: 22
- quantizationStepChroma: # NB: raht doesn't yet support qpChromaOffset
# NB: raht doesn't yet support quantizationStepChroma - qpChromaOffset: 0
r01: 256
r02: 128
r03: 64
r04: 32
r05: 16
r06: 8
- bitdepth: 8 - bitdepth: 8
- attribute: color - attribute: color
......
...@@ -36,14 +36,13 @@ categories: ...@@ -36,14 +36,13 @@ categories:
- -
- !conditional '${has_refl}' - !conditional '${has_refl}'
- rahtLeafDecimationDepth: 0 - rahtLeafDecimationDepth: 0
- quantizationStepLuma: - qp:
r01: 256 r01: 52
r02: 128 r02: 46
r03: 64 r03: 40
r04: 32 r04: 34
r05: 16 r05: 28
r06: 8 r06: 22
- quantizationStepChroma: 0
- bitdepth: 8 - bitdepth: 8
- attribute: reflectance - attribute: reflectance
...@@ -52,21 +51,15 @@ categories: ...@@ -52,21 +51,15 @@ categories:
- -
- !conditional '${has_colour}' - !conditional '${has_colour}'
- rahtLeafDecimationDepth: 0 - rahtLeafDecimationDepth: 0
- quantizationStepLuma: - qp:
r01: 256 r01: 52
r02: 128 r02: 46
r03: 64 r03: 40
r04: 32 r04: 34
r05: 16 r05: 28
r06: 8 r06: 22
- quantizationStepChroma: # NB: raht doesn't yet support quantizationStepChroma
# NB: raht doesn't yet support quantizationStepChroma - qpChromaOffset: 0
r01: 256
r02: 128
r03: 64
r04: 32
r05: 16
r06: 8
- bitdepth: 8 - bitdepth: 8
- attribute: color - attribute: color
......
...@@ -44,20 +44,14 @@ categories: ...@@ -44,20 +44,14 @@ categories:
- -
- !conditional '${has_colour}' - !conditional '${has_colour}'
- adaptivePredictionThreshold: '$eval{ 1 << (8 - 2) }' - adaptivePredictionThreshold: '$eval{ 1 << (8 - 2) }'
- quantizationStepLuma: - qp:
r01: 256 r01: 52
r02: 128 r02: 46
r03: 64 r03: 40
r04: 32 r04: 34
r05: 16 r05: 28
r06: 8 r06: 22
- quantizationStepChroma: - qpChromaOffset: 0
r01: 256
r02: 128
r03: 64
r04: 32
r05: 16
r06: 8
- bitdepth: 8 - bitdepth: 8
- attribute: color - attribute: color
......
...@@ -33,21 +33,15 @@ categories: ...@@ -33,21 +33,15 @@ categories:
- -
- !conditional '${has_colour}' - !conditional '${has_colour}'
- rahtLeafDecimationDepth: 0 - rahtLeafDecimationDepth: 0
- quantizationStepLuma: - qp:
r01: 256 r01: 52
r02: 128 r02: 46
r03: 64 r03: 40
r04: 32 r04: 34
r05: 16 r05: 28
r06: 8 r06: 22
- quantizationStepChroma: # NB: raht doesn't yet support quantizationStepChroma
# NB: raht doesn't yet support quantizationStepChroma - qpChromaOffset: 0
r01: 256
r02: 128
r03: 64
r04: 32
r05: 16
r06: 8
- bitdepth: 8 - bitdepth: 8
- attribute: color - attribute: color
......
...@@ -234,9 +234,6 @@ Sets coefficients to zero in the bottom n levels of the RAHT tree. ...@@ -234,9 +234,6 @@ Sets coefficients to zero in the bottom n levels of the RAHT tree.
This option provides a means to perform chroma-subsampling. Applies This option provides a means to perform chroma-subsampling. Applies
when `attribute=colour` only. when `attribute=colour` only.
### `--rahtQuantizationStep=INT-VALUE`
Deprecated -- use `quantizationStepsLuma`.
### `--rahtDepth=INT-VALUE` ### `--rahtDepth=INT-VALUE`
Number of bits for Morton representation of RAHT co-ordinate Number of bits for Morton representation of RAHT co-ordinate
components. components.
...@@ -268,13 +265,13 @@ Controls the level-of-detail generation method: ...@@ -268,13 +265,13 @@ Controls the level-of-detail generation method:
| 0 | Binary-tree based | | 0 | Binary-tree based |
| 1 | Euclidean distance thresholding | | 1 | Euclidean distance thresholding |
### `--quantizationStepLuma=INT-VALUE`
Attribute's luma quantization step size.
### `--quantizationStepChroma=INT-VALUE`
Attribute's chroma quantization step size. Only applies when
`attribute=colour`.
### `--dist2=INT-VALUE|INT-VALUE-LIST` ### `--dist2=INT-VALUE|INT-VALUE-LIST`
Attribute's list of squared distances, or initial value for automatic Attribute's list of squared distances, or initial value for automatic
derivation. derivation.
### `--qp=INT-VALUE`
Attribute's luma quantization parameter.
### `--qpChromaOffset=INT-VALUE`
Attribute's chroma quantization quantization parameter relative to luma.
Only applies when `attribute=colour`.
...@@ -151,7 +151,8 @@ AttributeDecoder::decode( ...@@ -151,7 +151,8 @@ AttributeDecoder::decode(
PCCPointSet3& pointCloud) PCCPointSet3& pointCloud)
{ {
int abhSize; int abhSize;
/* AttributeBrickHeader abh = */ parseAbh(payload, &abhSize); AttributeBrickHeader abh = parseAbh(attr_aps, payload, &abhSize);
Quantizers qstep = deriveQuantSteps(attr_aps);
PCCResidualsDecoder decoder; PCCResidualsDecoder decoder;
decoder.start(payload.data() + abhSize, payload.size() - abhSize); decoder.start(payload.data() + abhSize, payload.size() - abhSize);
...@@ -159,29 +160,29 @@ AttributeDecoder::decode( ...@@ -159,29 +160,29 @@ AttributeDecoder::decode(
if (attr_desc.attr_num_dimensions == 1) { if (attr_desc.attr_num_dimensions == 1) {
switch (attr_aps.attr_encoding) { switch (attr_aps.attr_encoding) {
case AttributeEncoding::kRAHTransform: case AttributeEncoding::kRAHTransform:
decodeReflectancesRaht(attr_desc, attr_aps, decoder, pointCloud); decodeReflectancesRaht(attr_desc, attr_aps, qstep, decoder, pointCloud);
break; break;
case AttributeEncoding::kPredictingTransform: case AttributeEncoding::kPredictingTransform:
decodeReflectancesPred(attr_desc, attr_aps, decoder, pointCloud); decodeReflectancesPred(attr_desc, attr_aps, qstep, decoder, pointCloud);
break; break;
case AttributeEncoding::kLiftingTransform: case AttributeEncoding::kLiftingTransform:
decodeReflectancesLift(attr_desc, attr_aps, decoder, pointCloud); decodeReflectancesLift(attr_desc, attr_aps, qstep, decoder, pointCloud);
break; break;
} }
} else if (attr_desc.attr_num_dimensions == 3) { } else if (attr_desc.attr_num_dimensions == 3) {
switch (attr_aps.attr_encoding) { switch (attr_aps.attr_encoding) {
case AttributeEncoding::kRAHTransform: case AttributeEncoding::kRAHTransform:
decodeColorsRaht(attr_desc, attr_aps, decoder, pointCloud); decodeColorsRaht(attr_desc, attr_aps, qstep, decoder, pointCloud);
break; break;
case AttributeEncoding::kPredictingTransform: case AttributeEncoding::kPredictingTransform:
decodeColorsPred(attr_desc, attr_aps, decoder, pointCloud); decodeColorsPred(attr_desc, attr_aps, qstep, decoder, pointCloud);
break; break;
case AttributeEncoding::kLiftingTransform: case AttributeEncoding::kLiftingTransform:
decodeColorsLift(attr_desc, attr_aps, decoder, pointCloud); decodeColorsLift(attr_desc, attr_aps, qstep, decoder, pointCloud);
break; break;
} }
} else { } else {
...@@ -231,6 +232,7 @@ void ...@@ -231,6 +232,7 @@ void
AttributeDecoder::decodeReflectancesPred( AttributeDecoder::decodeReflectancesPred(
const AttributeDescription& desc, const AttributeDescription& desc,
const AttributeParameterSet& aps, const AttributeParameterSet& aps,
const Quantizers& qstep,
PCCResidualsDecoder& decoder, PCCResidualsDecoder& decoder,
PCCPointSet3& pointCloud) PCCPointSet3& pointCloud)
{ {
...@@ -261,7 +263,7 @@ AttributeDecoder::decodeReflectancesPred( ...@@ -261,7 +263,7 @@ AttributeDecoder::decodeReflectancesPred(
for (size_t predictorIndex = 0; predictorIndex < pointCount; for (size_t predictorIndex = 0; predictorIndex < pointCount;
++predictorIndex) { ++predictorIndex) {
auto& predictor = predictors[predictorIndex]; auto& predictor = predictors[predictorIndex];
const int64_t qs = aps.quant_step_size_luma; const int64_t qs = qstep[0];
computeReflectancePredictionWeights( computeReflectancePredictionWeights(
aps, pointCloud, indexesLOD, predictor, decoder); aps, pointCloud, indexesLOD, predictor, decoder);
const uint32_t pointIndex = indexesLOD[predictorIndex]; const uint32_t pointIndex = indexesLOD[predictorIndex];
...@@ -269,7 +271,8 @@ AttributeDecoder::decodeReflectancesPred( ...@@ -269,7 +271,8 @@ AttributeDecoder::decodeReflectancesPred(
const uint32_t attValue0 = decoder.decode(); const uint32_t attValue0 = decoder.decode();
const int64_t quantPredAttValue = const int64_t quantPredAttValue =
predictor.predictReflectance(pointCloud, indexesLOD); predictor.predictReflectance(pointCloud, indexesLOD);
const int64_t delta = PCCInverseQuantization(UIntToInt(attValue0), qs); const int64_t delta =
PCCInverseQuantization(UIntToInt(attValue0), qs, true);
const int64_t reconstructedQuantAttValue = quantPredAttValue + delta; const int64_t reconstructedQuantAttValue = quantPredAttValue + delta;
reflectance = uint16_t( reflectance = uint16_t(
PCCClip(reconstructedQuantAttValue, int64_t(0), maxReflectance)); PCCClip(reconstructedQuantAttValue, int64_t(0), maxReflectance));
...@@ -318,6 +321,7 @@ void ...@@ -318,6 +321,7 @@ void
AttributeDecoder::decodeColorsPred( AttributeDecoder::decodeColorsPred(
const AttributeDescription& desc, const AttributeDescription& desc,
const AttributeParameterSet& aps, const AttributeParameterSet& aps,
const Quantizers& qstep,
PCCResidualsDecoder& decoder, PCCResidualsDecoder& decoder,
PCCPointSet3& pointCloud) PCCPointSet3& pointCloud)
{ {
...@@ -348,8 +352,8 @@ AttributeDecoder::decodeColorsPred( ...@@ -348,8 +352,8 @@ AttributeDecoder::decodeColorsPred(
for (size_t predictorIndex = 0; predictorIndex < pointCount; for (size_t predictorIndex = 0; predictorIndex < pointCount;
++predictorIndex) { ++predictorIndex) {
auto& predictor = predictors[predictorIndex]; auto& predictor = predictors[predictorIndex];
const int64_t qs = aps.quant_step_size_luma; const int64_t qs = qstep[0];
const int64_t qs2 = aps.quant_step_size_chroma; const int64_t qs2 = qstep[1];
computeColorPredictionWeights( computeColorPredictionWeights(
aps, pointCloud, indexesLOD, predictor, decoder); aps, pointCloud, indexesLOD, predictor, decoder);
decoder.decode(values); decoder.decode(values);
...@@ -358,14 +362,16 @@ AttributeDecoder::decodeColorsPred( ...@@ -358,14 +362,16 @@ AttributeDecoder::decodeColorsPred(
const Vec3<uint8_t> predictedColor = const Vec3<uint8_t> predictedColor =
predictor.predictColor(pointCloud, indexesLOD); predictor.predictColor(pointCloud, indexesLOD);
const int64_t quantPredAttValue = predictedColor[0]; const int64_t quantPredAttValue = predictedColor[0];
const int64_t delta = PCCInverseQuantization(UIntToInt(values[0]), qs); const int64_t delta =
PCCInverseQuantization(UIntToInt(values[0]), qs, true);
const int64_t reconstructedQuantAttValue = quantPredAttValue + delta; const int64_t reconstructedQuantAttValue = quantPredAttValue + delta;
int64_t clipMax = (1 << desc.attr_bitdepth) - 1; int64_t clipMax = (1 << desc.attr_bitdepth) - 1;
color[0] = color[0] =
uint8_t(PCCClip(reconstructedQuantAttValue, int64_t(0), clipMax)); uint8_t(PCCClip(reconstructedQuantAttValue, int64_t(0), clipMax));
for (size_t k = 1; k < 3; ++k) { for (size_t k = 1; k < 3; ++k) {
const int64_t quantPredAttValue = predictedColor[k]; const int64_t quantPredAttValue = predictedColor[k];
const int64_t delta = PCCInverseQuantization(UIntToInt(values[k]), qs2); const int64_t delta =
PCCInverseQuantization(UIntToInt(values[k]), qs2, true);
const int64_t reconstructedQuantAttValue = quantPredAttValue + delta; const int64_t reconstructedQuantAttValue = quantPredAttValue + delta;
color[k] = color[k] =
uint8_t(PCCClip(reconstructedQuantAttValue, int64_t(0), clipMax)); uint8_t(PCCClip(reconstructedQuantAttValue, int64_t(0), clipMax));
...@@ -379,6 +385,7 @@ void ...@@ -379,6 +385,7 @@ void
AttributeDecoder::decodeReflectancesRaht( AttributeDecoder::decodeReflectancesRaht(
const AttributeDescription& desc, const AttributeDescription& desc,