Commit 51f21bcb authored by Khaled Mammou's avatar Khaled Mammou Committed by David Flynn
Browse files

attr: use single quant_step_size_* for all LoDs in lift/pred

This commit removes the ability to vary quant_step_size on a per-LoD
basis.  This greatly simplifies the codec configuration, especially
when used in conjunction with dist2 derivation.

cfg update:
 - rename quantizationSteps* -> quantizationStep*
 - update quantizationStep* to single values:
   - lossless-geom lossy-attrs:        8, 16, 32, 64, 128, 256 [8bit]
   - lossless-geom nearlossless-attrs: no change
   - lossless-geom lossless-attrs:     no change
   - lossy-geom    lossy-attrs:        8                       [8bit]

 => 16-bit cat3 sequences are have 8-bit qs values multipled by 255
parent 983f6388
......@@ -30,8 +30,8 @@ categories:
- numberOfNearestNeighborsInPrediction: 3
- levelOfDetailCount: 9
- dist2: 134217728 33554432 8388608 2097152 524288 131072 32768 8192 0
- quantizationStepsLuma: 0 0 0 0 0 0 0 0 0
- quantizationStepsChroma: 0 0 0 0 0 0 0 0 0
- quantizationStepLuma: 0
- quantizationStepChroma: 0
- bitdepth: 8
- attribute: color
......@@ -89,7 +89,7 @@ categories:
- numberOfNearestNeighborsInPrediction: 3
- levelOfDetailCount: 6
- dist2: 4194301 1048582 262149 65534 16383 0
- quantizationStepsLuma: 0 0 0 0 0 0
- quantizationStepLuma: 0
- bitdepth: 8
- attribute: reflectance
......
......@@ -31,40 +31,40 @@ categories:
- numberOfNearestNeighborsInPrediction: 3
- levelOfDetailCount: 9
- dist2: 134217728 33554432 8388608 2097152 524288 131072 32768 8192 0
- quantizationStepsLuma:
r01: 1 2 4 8 16 32 64 128 128
r02: 1 2 4 8 16 32 64 64 66
r03: 1 2 4 8 16 32 32 32 34
r04: 1 2 4 8 8 8 16 16 16
r05: 1 2 4 4 4 4 4 8 8
r06: 0 1 1 1 2 2 2 2 2
- quantizationStepsChroma:
r01: 1 2 4 8 16 32 64 128 128
r02: 1 2 4 8 16 32 64 64 66
r03: 1 2 4 8 16 32 32 32 34
r04: 1 2 4 8 8 8 16 16 16
r05: 1 2 4 4 4 4 4 8 8
r06: 0 1 1 1 2 2 2 2 2
- quantizationStepLuma:
r01: 256
r02: 128
r03: 64
r04: 32
r05: 16
r06: 8
- quantizationStepChroma:
r01: 256
r02: 128
r03: 64
r04: 32
r05: 16
r06: 8
- bitdepth: 8
- attribute: color
tollbooth_q1mm:
encflags:
- *commonAttr
- quantizationStepsLuma:
r01: 1 2 4 8 16 32 64 128 128
r02: 1 2 4 8 16 32 32 64 64
r03: 1 2 4 8 16 16 16 32 32
r04: 1 2 4 8 8 8 8 10 16
r05: 1 2 4 4 4 4 4 4 6
r06: 0 0 0 0 0 0 1 1 2
- quantizationStepsChroma:
r01: 1 2 4 8 16 32 64 128 128
r02: 1 2 4 8 16 32 32 64 64
r03: 1 2 4 8 16 16 16 32 32
r04: 1 2 4 8 8 8 8 10 16
r05: 1 2 4 4 4 4 4 4 6
r06: 0 0 0 0 0 0 1 1 2
- quantizationStepLuma:
r01: 256
r02: 128
r03: 64
r04: 32
r05: 16
r06: 8
- quantizationStepChroma:
r01: 256
r02: 128
r03: 64
r04: 32
r05: 16
r06: 8
- bitdepth: 8
- attribute: color
......@@ -90,25 +90,25 @@ categories:
citytunnel_q1mm:
encflags:
- *commonAttr
- quantizationStepsLuma:
r01: 286 571 1143 2286 4571 9143 18286 36571 65536
r02: 77 154 308 615 1231 2462 4923 9846 19692
r03: 29 57 114 229 457 914 1829 3657 7314
r04: 10 20 40 80 160 320 640 1280 2560
r05: 2 5 10 20 40 80 160 320 640
r06: 0 1 2 4 8 16 16 32 32
- quantizationStepLuma:
r01: $eval{ 255 * 256 }
r02: $eval{ 255 * 128 }
r03: $eval{ 255 * 64 }
r04: $eval{ 255 * 32 }
r05: $eval{ 255 * 16 }
r06: $eval{ 255 * 8 }
- bitdepth: 16
- attribute: reflectance
tollbooth_q1mm:
encflags:
- *commonAttr
- quantizationStepsLuma:
r01: 286 571 1143 2286 4571 9143 18286 36571 65536
r02: 100 200 400 800 1600 3200 6400 12800 25600
r03: 50 100 200 400 800 1600 3200 6400 12800
r04: 17 33 67 133 267 533 1067 2133 4267
r05: 4 8 16 32 64 128 256 512 1024
r06: 1 2 4 8 16 32 32 32 32
- quantizationStepLuma:
r01: $eval{ 255 * 256 }
r02: $eval{ 255 * 128 }
r03: $eval{ 255 * 64 }
r04: $eval{ 255 * 32 }
r05: $eval{ 255 * 16 }
r06: $eval{ 255 * 8 }
- bitdepth: 16
- attribute: reflectance
......@@ -31,32 +31,30 @@ categories:
- numberOfNearestNeighborsInPrediction: 3
- levelOfDetailCount: 9
- dist2: 134217728 33554432 8388608 2097152 524288 131072 32768 8192 0
- quantizationStepsLuma:
r01: 2 2 2 2 2 2 2 2 2
r02: 4 4 4 4 4 4 4 4 4
r03: 8 8 8 8 8 8 8 8 8
r04: 16 16 16 16 16 16 16 16 16
r05: 32 32 32 32 32 32 32 32 32
- quantizationStepsChroma:
r01: 2 2 2 2 2 2 2 2 2
r02: 4 4 4 4 4 4 4 4 4
r03: 8 8 8 8 8 8 8 8 8
r04: 16 16 16 16 16 16 16 16 16
r05: 32 32 32 32 32 32 32 32 32
- &mitsubishiNearlosslessAttrsColour8
- *mitsubishiNearlosslessAttrs
- quantizationStepLuma:
r01: 2
r02: 4
r03: 8
r04: 16
r05: 32
- quantizationStepChroma:
r01: 2
r02: 4
r03: 8
r04: 16
r05: 32
- bitdepth: 8
- attribute: color
overpass_q1mm:
encflags:
- *mitsubishiNearlosslessAttrs
- bitdepth: 8
- attribute: color
- *mitsubishiNearlosslessAttrsColour8
tollbooth_q1mm:
encflags:
- *mitsubishiNearlosslessAttrs
- bitdepth: 8
- attribute: color
- *mitsubishiNearlosslessAttrsColour8
##
# Condition 3.Z
......@@ -79,45 +77,44 @@ categories:
sequences:
citytunnel_q1mm:
encflags:
- &mitsubishiNearlosslessAttrsRefl16
- *mitsubishiNearlosslessAttrs
- quantizationStepLuma:
r01: $eval{ 255 * 2 }
r02: $eval{ 255 * 4 }
r03: $eval{ 255 * 8 }
r04: $eval{ 255 * 16 }
r05: $eval{ 255 * 32 }
- bitdepth: 16
- attribute: reflectance
overpass_q1mm:
encflags:
- *mitsubishiNearlosslessAttrs
- bitdepth: 16
- attribute: reflectance
- *mitsubishiNearlosslessAttrsRefl16
tollbooth_q1mm:
encflags:
- *mitsubishiNearlosslessAttrs
- bitdepth: 16
- attribute: reflectance
- *mitsubishiNearlosslessAttrsRefl16
ford_03_q1mm:
encflags:
- &fordNearlosslessAttrs
- &fordNearlosslessAttrsRefl8
- numberOfNearestNeighborsInPrediction: 3
- levelOfDetailCount: 6
- dist2: 4194301 1048582 262149 65534 16383 0
- quantizationStepsLuma:
r01: 2 2 2 2 2 2
r02: 4 4 4 4 4 4
r03: 8 8 8 8 8 8
r04: 16 16 16 16 16 16
r05: 32 32 32 32 32 32
- quantizationStepLuma:
r01: 2
r02: 4
r03: 8
r04: 16
r05: 32
- bitdepth: 8
- attribute: reflectance
ford_02_q1mm:
encflags:
- *fordNearlosslessAttrs
- bitdepth: 8
- attribute: reflectance
- *fordNearlosslessAttrsRefl8
ford_01_q1mm:
encflags:
- *fordNearlosslessAttrs
- bitdepth: 8
- attribute: reflectance
- *fordNearlosslessAttrsRefl8
......@@ -40,13 +40,7 @@ categories:
r04: 508 127 32 8 2 0
r05: 10486 2621 655 164 41 0
r06: 317194 79299 19825 4956 1239 0
- quantizationStepsLuma:
r01: 2 4
r02: 1 2 4 4
r03: 0 1 2 4 4
r04: 0 1 2 4 4 4
r05: 0 1 2 4 4 4
r06: 0 1 2 4 4 4
- quantizationStepLuma: 8
- bitdepth: 8
- attribute: reflectance
......
......@@ -235,8 +235,7 @@ AttributeDecoder::decodeReflectancesPred(
for (size_t predictorIndex = 0; predictorIndex < pointCount;
++predictorIndex) {
auto& predictor = predictors[predictorIndex];
const size_t lodIndex = predictor.levelOfDetailIndex;
const int64_t qs = aps.quant_step_size_luma[lodIndex];
const int64_t qs = aps.quant_step_size_luma;
computeReflectancePredictionWeights(
pointCloud, aps.num_pred_nearest_neighbours, threshold, qs, predictor,
decoder);
......@@ -315,9 +314,8 @@ AttributeDecoder::decodeColorsPred(
for (size_t predictorIndex = 0; predictorIndex < pointCount;
++predictorIndex) {
auto& predictor = predictors[predictorIndex];
const size_t lodIndex = predictor.levelOfDetailIndex;
const int64_t qs = aps.quant_step_size_luma[lodIndex];
const int64_t qs2 = aps.quant_step_size_chroma[lodIndex];
const int64_t qs = aps.quant_step_size_luma;
const int64_t qs2 = aps.quant_step_size_chroma;
computeColorPredictionWeights(
pointCloud, aps.num_pred_nearest_neighbours, threshold, predictor,
decoder);
......@@ -408,9 +406,8 @@ AttributeDecoder::decodeReflectancesRaht(
// Inverse Quantize.
float* attributes = new float[voxelCount];
const int qstep = int(aps.quant_step_size_luma[0]);
for (int n = 0; n < voxelCount; n++) {
attributes[n] = integerizedAttributes[n] * qstep;
attributes[n] = integerizedAttributes[n] * aps.quant_step_size_luma;
}
regionAdaptiveHierarchicalInverseTransform(
......@@ -512,11 +509,10 @@ AttributeDecoder::decodeColorsRaht(
// Inverse Quantize.
float* attributes = new float[attribCount * voxelCount];
const int qstep = int(aps.quant_step_size_luma[0]);
for (int n = 0; n < voxelCount; n++) {
for (int k = 0; k < attribCount; k++) {
attributes[attribCount * n + k] =
integerizedAttributes[attribCount * n + k] * qstep;
integerizedAttributes[attribCount * n + k] * aps.quant_step_size_luma;
}
}
......@@ -579,9 +575,8 @@ AttributeDecoder::decodeColorsLift(
values[0] = decoder.decode0();
values[1] = decoder.decode1();
values[2] = decoder.decode1();
const size_t lodIndex = predictors[predictorIndex].levelOfDetailIndex;
const int64_t qs = aps.quant_step_size_luma[lodIndex];
const int64_t qs2 = aps.quant_step_size_chroma[lodIndex];
const int64_t qs = aps.quant_step_size_luma;
const int64_t qs2 = aps.quant_step_size_chroma;
const double quantWeight = sqrt(weights[predictorIndex]);
auto& color = colors[predictorIndex];
const int64_t delta = o3dgc::UIntToInt(values[0]);
......@@ -646,8 +641,7 @@ AttributeDecoder::decodeReflectancesLift(
for (size_t predictorIndex = 0; predictorIndex < pointCount;
++predictorIndex) {
const int64_t detail = decoder.decode0();
const size_t lodIndex = predictors[predictorIndex].levelOfDetailIndex;
const int64_t qs = aps.quant_step_size_luma[lodIndex];
const int64_t qs = aps.quant_step_size_luma;
const double quantWeight = sqrt(weights[predictorIndex]);
auto& reflectance = reflectances[predictorIndex];
const int64_t delta = o3dgc::UIntToInt(detail);
......
......@@ -401,8 +401,7 @@ AttributeEncoder::encodeReflectancesPred(
for (size_t predictorIndex = 0; predictorIndex < pointCount;
++predictorIndex) {
auto& predictor = predictors[predictorIndex];
const size_t lodIndex = predictor.levelOfDetailIndex;
const int64_t qs = aps.quant_step_size_luma[lodIndex];
const int64_t qs = aps.quant_step_size_luma;
computeReflectancePredictionWeights(
pointCloud, aps.num_pred_nearest_neighbours, threshold, qs, predictor,
encoder, context);
......@@ -534,9 +533,8 @@ AttributeEncoder::encodeColorsPred(
for (size_t predictorIndex = 0; predictorIndex < pointCount;
++predictorIndex) {
auto& predictor = predictors[predictorIndex];
const size_t lodIndex = predictor.levelOfDetailIndex;
const int64_t qs = aps.quant_step_size_luma[lodIndex];
const int64_t qs2 = aps.quant_step_size_chroma[lodIndex];
const int64_t qs = aps.quant_step_size_luma;
const int64_t qs2 = aps.quant_step_size_chroma;
computeColorPredictionWeights(
pointCloud, aps.num_pred_nearest_neighbours, threshold, qs, qs2,
predictor, encoder, context);
......@@ -622,7 +620,7 @@ AttributeEncoder::encodeReflectancesTransformRaht(
// Quantize.
for (int n = 0; n < voxelCount; n++) {
integerizedAttributes[n] =
int(round(attributes[n] / aps.quant_step_size_luma[0]));
int(round(attributes[n] / aps.quant_step_size_luma));
}
// Sort integerized attributes by weight.
......@@ -662,7 +660,7 @@ AttributeEncoder::encodeReflectancesTransformRaht(
}
// Inverse Quantize.
for (int n = 0; n < voxelCount; n++) {
attributes[n] = integerizedAttributes[n] * aps.quant_step_size_luma[0];
attributes[n] = integerizedAttributes[n] * aps.quant_step_size_luma;
}
regionAdaptiveHierarchicalInverseTransform(
mortonCode, attributes, 1, voxelCount, aps.raht_depth);
......@@ -737,8 +735,8 @@ AttributeEncoder::encodeColorsTransformRaht(
// Quantize.
for (int n = 0; n < voxelCount; n++) {
for (int k = 0; k < attribCount; k++) {
integerizedAttributes[attribCount * n + k] = int(
round(attributes[attribCount * n + k] / aps.quant_step_size_luma[0]));
integerizedAttributes[attribCount * n + k] =
int(round(attributes[attribCount * n + k] / aps.quant_step_size_luma));
}
}
......@@ -801,8 +799,7 @@ AttributeEncoder::encodeColorsTransformRaht(
for (int n = 0; n < voxelCount; n++) {
for (int k = 0; k < attribCount; k++) {
attributes[attribCount * n + k] =
integerizedAttributes[attribCount * n + k]
* aps.quant_step_size_luma[0];
integerizedAttributes[attribCount * n + k] * aps.quant_step_size_luma;
}
}
......@@ -878,8 +875,7 @@ AttributeEncoder::encodeColorsLift(
// compress
for (size_t predictorIndex = 0; predictorIndex < pointCount;
++predictorIndex) {
const size_t lodIndex = predictors[predictorIndex].levelOfDetailIndex;
const int64_t qs = aps.quant_step_size_luma[lodIndex];
const int64_t qs = aps.quant_step_size_luma;
const double quantWeight = sqrt(weights[predictorIndex]);
auto& color = colors[predictorIndex];
const int64_t delta = PCCQuantization(color[0] * quantWeight, qs);
......@@ -889,7 +885,7 @@ AttributeEncoder::encodeColorsLift(
color[0] = reconstructedDelta / quantWeight;
uint32_t values[3];
values[0] = uint32_t(detail);
const size_t qs2 = aps.quant_step_size_chroma[lodIndex];
const size_t qs2 = aps.quant_step_size_chroma;
for (size_t d = 1; d < 3; ++d) {
const int64_t delta = PCCQuantization(color[d] * quantWeight, qs2);
const int64_t detail = o3dgc::IntToUInt(delta);
......@@ -967,8 +963,7 @@ AttributeEncoder::encodeReflectancesLift(
// compress
for (size_t predictorIndex = 0; predictorIndex < pointCount;
++predictorIndex) {
const size_t lodIndex = predictors[predictorIndex].levelOfDetailIndex;
const int64_t qs = aps.quant_step_size_luma[lodIndex];
const int64_t qs = aps.quant_step_size_luma;
const double quantWeight = sqrt(weights[predictorIndex]);
auto& reflectance = reflectances[predictorIndex];
const int64_t delta = PCCQuantization(reflectance * quantWeight, qs);
......
......@@ -320,7 +320,7 @@ ParseParameters(int argc, char* argv[], Parameters& params)
"Used for chroma-subsampling in attribute=color only.")
("rahtQuantizationStep",
params_attr.aps.quant_step_size_luma, {},
params_attr.aps.quant_step_size_luma, 0,
"deprecated -- use quantizationStepsLuma")
("rahtDepth",
......@@ -342,17 +342,13 @@ ParseParameters(int argc, char* argv[], Parameters& params)
params_attr.aps.numDetailLevels, 1,
"Attribute's number of levels of detail")
("quantizationSteps",
params_attr.aps.quant_step_size_luma, {},
"deprecated -- use quantizationStepsLuma")
("quantizationStepsLuma",
params_attr.aps.quant_step_size_luma, {},
"Attribute's luma quantization step sizes (one for each LoD)")
("quantizationStepLuma",
params_attr.aps.quant_step_size_luma, 0,
"Attribute's luma quantization step size")
("quantizationStepsChroma",
params_attr.aps.quant_step_size_chroma, {},
"Attribute's chroma quantization step sizes (one for each LoD)")
("quantizationStepChroma",
params_attr.aps.quant_step_size_chroma, 0,
"Attribute's chroma quantization step size")
("dist2",
params_attr.aps.dist2, {},
......@@ -389,6 +385,11 @@ ParseParameters(int argc, char* argv[], Parameters& params)
auto& attr_sps = params.encoder.sps.attributeSets[it.second];
auto& attr_aps = params.encoder.aps[it.second];
// Avoid wasting bits signalling chroma quant step size for reflectance
if (it.first == "reflectance") {
attr_aps.quant_step_size_chroma = 0;
}
// Set default threshold based on bitdepth
if (attr_aps.adaptive_prediction_threshold == -1) {
attr_aps.adaptive_prediction_threshold = 1
......@@ -403,12 +404,13 @@ ParseParameters(int argc, char* argv[], Parameters& params)
if (attr_aps.attr_encoding == AttributeEncoding::kRAHTransform) {
attr_aps.numDetailLevels = 0;
attr_aps.adaptive_prediction_threshold = 0;
// todo(df): suggest chroma quant_step_size for raht
attr_aps.quant_step_size_chroma = 0;
}
}
// sanity checks
// - validate that quantizationStepsLuma/Chroma, dist2
// of each attribute contain levelOfDetailCount elements.
for (const auto& it : params.encoder.attributeIdxMap) {
const auto& attr_sps = params.encoder.sps.attributeSets[it.second];
const auto& attr_aps = params.encoder.aps[it.second];
......@@ -435,21 +437,10 @@ ParseParameters(int argc, char* argv[], Parameters& params)
err.error() << it.first
<< ".levelOfDetailCount must be less than 256\n";
}
// todo(df): the following two checks are removed in m42640/2
if (attr_aps.dist2.size() != lod) {
err.error() << it.first << ".dist2 does not have " << lod
<< " entries\n";
}
if (attr_aps.quant_step_size_luma.size() != lod) {
err.error() << it.first << ".quantizationStepsLuma does not have "
<< lod << " entries\n";
}
if (it.first == "color") {
if (attr_aps.quant_step_size_chroma.size() != lod) {
err.error() << it.first << ".quantizationStepsChroma does not have "
<< lod << " entries\n";
}
}
if (attr_aps.adaptive_prediction_threshold < 0) {
err.error() << it.first
......
......@@ -243,8 +243,8 @@ struct AttributeParameterSet {
std::vector<int64_t> dist2;
// NB: these parameters are shared by raht and lift
std::vector<int> quant_step_size_luma;
std::vector<int> quant_step_size_chroma;
int quant_step_size_luma;
int quant_step_size_chroma;
//--- raht parameters
......
......@@ -251,24 +251,18 @@ write(const AttributeParameterSet& aps)
bs.writeUe(aps.aps_seq_parameter_set_id);
bs.writeUe(aps.attr_encoding);
// todo(df): reconsider the derivation of the following
bool chroma_quant_steps_present_flag =
aps.quant_step_size_luma.size() == aps.quant_step_size_chroma.size();
bs.write(chroma_quant_steps_present_flag);
bool isLifting = aps.attr_encoding == AttributeEncoding::kLiftingTransform
|| aps.attr_encoding == AttributeEncoding::kPredictingTransform;
if (isLifting) {
bs.writeUe(aps.num_pred_nearest_neighbours);
bs.writeUe(aps.quant_step_size_luma);
bs.writeUe(aps.quant_step_size_chroma);
int num_detail_levels_minus1 = aps.numDetailLevels - 1;
bs.writeUe(num_detail_levels_minus1);
for (int idx = 0; idx <= num_detail_levels_minus1; idx++) {
// todo(??): is this an appropriate encoding?
bs.writeUe64(aps.dist2[idx]);
bs.writeUe(aps.quant_step_size_luma[idx]);
if (chroma_quant_steps_present_flag)
bs.writeUe(aps.quant_step_size_chroma[idx]);
}
}
......@@ -279,7 +273,7 @@ write(const AttributeParameterSet& aps)
if (aps.attr_encoding == AttributeEncoding::kRAHTransform) {
bs.writeUe(aps.raht_depth);
bs.writeUe(aps.raht_binary_level_threshold);
bs.write(aps.quant_step_size_luma[0]);
bs.writeUe(aps.quant_step_size_luma);
// todo(?): raht chroma quant_step_size?
}
......@@ -303,29 +297,19 @@ parseAps(const PayloadBuffer& buf)
bs.readUe(&aps.aps_seq_parameter_set_id);
bs.readUe(&aps.attr_encoding);
bool chroma_quant_steps_present_flag;
bs.read(&chroma_quant_steps_present_flag);
bool isLifting = aps.attr_encoding == AttributeEncoding::kLiftingTransform
|| aps.attr_encoding == AttributeEncoding::kPredictingTransform;
if (isLifting) {
bs.readUe(&aps.num_pred_nearest_neighbours);
bs.readUe(&aps.quant_step_size_luma);
bs.readUe(&aps.quant_step_size_chroma);
int num_detail_levels_minus1 = int(bs.readUe());
aps.numDetailLevels = num_detail_levels_minus1 + 1;
aps.dist2.resize(aps.numDetailLevels);
aps.quant_step_size_luma.resize(aps.numDetailLevels);
if (chroma_quant_steps_present_flag) {
aps.quant_step_size_chroma.resize(aps.numDetailLevels);
}
for (int idx = 0; idx <= num_detail_levels_minus1; idx++) {
bs.readUe(&aps.dist2[idx]);
bs.readUe(&aps.quant_step_size_luma[idx]);
if (chroma_quant_steps_present_flag) {
bs.readUe(&aps.quant_step_size_chroma[idx]);
}
}
}
......@@ -336,14 +320,8 @@ parseAps(const PayloadBuffer& buf)
if (aps.attr_encoding == AttributeEncoding::kRAHTransform) {
bs.readUe(&aps.raht_depth);
bs.readUe(&aps.raht_binary_level_threshold);
aps.quant_step_size_luma.resize(1);
bs.read(&aps.quant_step_size_luma[0]);
}
if (!chroma_quant_steps_present_flag) {
// infer when not present
aps.quant_step_size_chroma = aps.quant_step_size_luma;
bs.readUe(&aps.quant_step_size_luma);
// todo(?): raht chroma quant_step_size
}
bool aps_extension_flag = bs.read();
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment