encoder.cpp 21.5 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
/* The copyright in this software is being made available under the BSD
 * Licence, included below.  This software may be subject to other third
 * party and contributor rights, including patent rights, and no such
 * rights are granted under this licence.
 *
 * Copyright (c) 2017-2018, ISO/IEC
 * All rights reserved.
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions are met:
 *
 * * Redistributions of source code must retain the above copyright
 *   notice, this list of conditions and the following disclaimer.
 *
 * * Redistributions in binary form must reproduce the above copyright
 *   notice, this list of conditions and the following disclaimer in the
 *   documentation and/or other materials provided with the distribution.
 *
 * * Neither the name of the ISO/IEC nor the names of its contributors
 *   may be used to endorse or promote products derived from this
 *   software without specific prior written permission.
 *
 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
 * POSSIBILITY OF SUCH DAMAGE.
 */

#include "PCCTMC3Encoder.h"

#include <cassert>
#include <set>

41
#include "Attribute.h"
42
#include "pointset_processing.h"
43
#include "geometry.h"
44
45
#include "io_hls.h"
#include "osspecific.h"
46
#include "partitioning.h"
47
#include "pcc_chrono.h"
48
#include "ply.h"
49
50
51
52
53

namespace pcc {

//============================================================================

54
55
56
57
58
PCCTMC3Encoder3::PCCTMC3Encoder3() : _frameCounter(-1)
{}

//============================================================================

59
60
61
62
int
PCCTMC3Encoder3::compress(
  const PCCPointSet3& inputPointCloud,
  EncoderParams* params,
63
  PCCTMC3Encoder3::Callbacks* callback,
64
65
  PCCPointSet3* reconstructedCloud)
{
66
67
68
  // start of frame
  _frameCounter++;

69
  fixupParameterSets(params);
70

71
  // Determine input bounding box (for SPS metadata) if not manually set
72
  if (params->sps.seq_bounding_box_whd == Vec3<int>{0}) {
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
    const auto& bbox = inputPointCloud.computeBoundingBox();
    for (int k = 0; k < 3; k++) {
      params->sps.seq_bounding_box_xyz0[k] = int(bbox.min[k]);

      // somehow determine the decoder's reconstructed points bounding box
      // and update sps accordingly.
      auto max_k = bbox.max[k] - bbox.min[k];
      max_k = std::round(max_k * params->sps.seq_source_geom_scale_factor);
      max_k = std::round(max_k / params->sps.seq_source_geom_scale_factor);

      // NB: plus one to convert to range
      params->sps.seq_bounding_box_whd[k] = int(max_k) + 1;
    }
  }

88
89
90
91
92
93
94
95
96
  // placeholder to "activate" the parameter sets
  _sps = &params->sps;
  _gps = &params->gps;
  _aps.clear();
  for (const auto& aps : params->aps) {
    _aps.push_back(&aps);
  }

  // write out all parameter sets prior to encoding
97
98
  callback->onOutputBuffer(write(*_sps));
  callback->onOutputBuffer(write(*_gps));
99
  for (const auto aps : _aps) {
100
    callback->onOutputBuffer(write(*aps));
101
102
103
104
105
  }

  // initial geometry IDs
  _tileId = 0;
  _sliceId = 0;
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
  _sliceOrigin = Vec3<int>{0};

  // Partition the input point cloud into tiles
  //  - quantize the input point cloud (without duplicate point removal)
  //  - inverse quantize the cloud above to get the initial-sized cloud
  //  - if tile partitioning is enabled,partitioning function produces
  //    vectors tileMaps which map tileIDs to point indexes.
  //    Compute the tile metadata for each partition.
  //  - if not, regard the whole input cloud as a single tile to facilitate
  //    slice partitioning subsequent
  //  todo(df):
  PartitionSet partitions;
  PCCPointSet3 quantizedInputCloud;
  quantizedInputCloud = quantization(inputPointCloud);

  std::vector<std::vector<int32_t>> tileMaps;
  if (params->partition.tileSize) {
    PCCPointSet3 inverseQuantizedCloud;
    Box3<int32_t> clampBox{{0, 0, 0}, {INT32_MAX, INT32_MAX, INT32_MAX}};
    quantizePositions(
      1.0 / _sps->seq_source_geom_scale_factor, 0, clampBox,
      quantizedInputCloud, &inverseQuantizedCloud);
    tileMaps = tilePartition(params->partition, inverseQuantizedCloud);

    // Get the bounding box of current tile and write it into tileInventory
    partitions.tileInventory.tiles.resize(tileMaps.size());
    for (int t = 0; t < tileMaps.size(); t++) {
133
      Box3<int32_t> bbox = inverseQuantizedCloud.computeBoundingBox(
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
        tileMaps[t].begin(), tileMaps[t].end());

      auto& tileIvt = partitions.tileInventory.tiles[t];
      for (int k = 0; k < 3; k++) {
        tileIvt.tile_bounding_box_whd[k] = bbox.max[k] - bbox.min[k];
        tileIvt.tile_bounding_box_xyz0[k] =
          bbox.min[k] - _sps->seq_bounding_box_xyz0[k];
      }
    }
  } else {
    tileMaps.emplace_back();
    auto& tile = tileMaps.back();
    for (int i = 0; i < quantizedInputCloud.getPointCount(); i++)
      tile.push_back(i);
  }
149
150

  // If partitioning is not enabled, encode input as a single "partition"
151
  if (params->partition.method == PartitionMethod::kNone) {
152
    // todo(df): params->gps.geom_box_present_flag = false;
153
    _sliceOrigin = Vec3<int>{0};
154
    compressPartition(
155
      quantizedInputCloud, inputPointCloud, params, callback,
156
      reconstructedCloud);
157
158
159
160
    return 0;
  }

  // Partition the input point cloud
161
  //  - get the partitial cloud of each tile
162
163
164
165
166
167
  //  - partitioning function produces a list of point indexes, origin and
  //    optional tile metadata for each partition.
  //  - encode any tile metadata
  //  todo(df): consider requiring partitioning function to sort the input
  //            points and provide ranges rather than a set of indicies.
  do {
168
    Box3<int32_t> clampBox{{0, 0, 0}, {INT32_MAX, INT32_MAX, INT32_MAX}};
169

170
171
172
173
174
175
    for (int t = 0; t < tileMaps.size(); t++) {
      const auto& tile = tileMaps[t];

      // Get the point cloud of current tile and compute their bounding boxes
      PCCPointSet3 tileCloud;
      getSrcPartition(quantizedInputCloud, tileCloud, tile);
176
      Box3<int32_t> bbox = tileCloud.computeBoundingBox();
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
      Vec3<int> tile_quantized_box_xyz0;
      for (int k = 0; k < 3; k++) {
        tile_quantized_box_xyz0[k] = int(bbox.min[k]);
      }

      // Move the tile cloud to coodinate origin
      // for the convenience of slice partitioning
      quantizePositions(
        1, tile_quantized_box_xyz0, clampBox, tileCloud, &tileCloud);

      //Slice partition of current tile
      std::vector<Partition> curSlices;
      switch (params->partition.method) {
      // NB: this method is handled earlier
      case PartitionMethod::kNone: return 1;

      case PartitionMethod::kUniformGeom:
194
195
        curSlices = partitionByUniformGeom(
          params->partition, tileCloud, t, _gps->trisoup_node_size_log2);
196
197
198
199
200
201
202
203
204
205
206
207
        break;

      case PartitionMethod::kOctreeUniform:
        curSlices = partitionByOctreeDepth(params->partition, tileCloud, t);
        break;
      }
      // Map slice indexes to tile indexes(the original indexes)
      for (int i = 0; i < curSlices.size(); i++) {
        for (int p = 0; p < curSlices[i].pointIndexes.size(); p++) {
          curSlices[i].pointIndexes[p] = tile[curSlices[i].pointIndexes[p]];
        }
      }
208
209
210
211
      // Adjust the point number of each slice
      // to the range between sliceMaxPoints and sliceMinPoints
      // by merge small slices and split large ones.
      refineSlices(params->partition, inputPointCloud, curSlices);
212
213
214

      partitions.slices.insert(
        partitions.slices.end(), curSlices.begin(), curSlices.end());
215
    }
216
    std::cout << "Slice number: " << partitions.slices.size() << std::endl;
217
218
  } while (0);

219
220
221
  if (partitions.tileInventory.tiles.size() > 1) {
    assert(partitions.tileInventory.tiles.size() == tileMaps.size());
    std::cout << "Tile number: " << tileMaps.size() << std::endl;
222
    callback->onOutputBuffer(write(partitions.tileInventory));
223
224
225
226
227
228
229
230
  }

  // Encode each partition:
  //  - create a pointset comprising just the partitioned points
  //  - compress
  for (const auto& partition : partitions.slices) {
    // create partitioned point set
    PCCPointSet3 srcPartition;
231
232
233
234
235
236
    getSrcPartition(quantizedInputCloud, srcPartition, partition.pointIndexes);

    // Get the original partial cloud corresponding to each slice for recolor
    std::vector<int32_t> partitionOriginIdxes;
    for (int i = 0; i < partition.pointIndexes.size(); i++) {
      const auto& point = srcPartition[i];
237
      std::multimap<point_t, int32_t>::iterator pos;
238
239
240
241
      for (pos = quantizedToOrigin.lower_bound(point);
           pos != quantizedToOrigin.upper_bound(point); ++pos) {
        partitionOriginIdxes.push_back(pos->second);
      }
242
    }
243
244
245
    PCCPointSet3 partitionInOriginCloud;
    getSrcPartition(
      inputPointCloud, partitionInOriginCloud, partitionOriginIdxes);
246
247
248
249

    _sliceId = partition.sliceId;
    _tileId = partition.tileId;
    _sliceOrigin = partition.origin;
250
    compressPartition(
251
      srcPartition, partitionInOriginCloud, params, callback,
252
      reconstructedCloud);
253
254
255
256
257
258
  }

  return 0;
}

//----------------------------------------------------------------------------
259

260
261
262
void
PCCTMC3Encoder3::fixupParameterSets(EncoderParams* params)
{
263
264
265
266
267
268
269
270
271
272
273
274
275
  // fixup parameter set IDs
  params->sps.sps_seq_parameter_set_id = 0;
  params->gps.gps_seq_parameter_set_id = 0;
  params->gps.gps_geom_parameter_set_id = 0;
  for (int i = 0; i < params->aps.size(); i++) {
    params->aps[i].aps_seq_parameter_set_id = 0;
    params->aps[i].aps_attr_parameter_set_id = i;
  }

  // development level / header
  params->sps.profileCompatibility.profile_compatibility_flags = 0;
  params->sps.level = 0;

276
277
278
  // use one bit to indicate frame boundaries
  params->sps.log2_max_frame_idx = 1;

279
280
281
  // the encoder writes out the slice origin in the GBH
  // NB: this may be disabled during encoding
  params->gps.geom_box_present_flag = true;
282
283
  params->gps.geom_box_log2_scale_present_flag = true;
  params->gps.gps_geom_box_log2_scale = 0;
284

285
286
287
288
  // don't enable octree qp offset signalling if disabled
  params->gbh.geom_octree_qp_offset_enabled_flag =
    params->gbh.geom_octree_qp_offset_depth >= 0;

289
290
291
  // fixup attribute parameters
  for (auto it : params->attributeIdxMap) {
    auto& attr_sps = params->sps.attributeSets[it.second];
292
293
    auto& attr_aps = params->aps[it.second];
    auto& attr_enc = params->attr[it.second];
294
295
    attr_sps.attr_instance_id = it.second;

296
297
298
299
300
301
302
303
304
305
306
307
308
309
    // the encoder options may not specify sufficient offsets for the number
    // of layers used by the sytax: extend with last value as appropriate
    if (!attr_enc.abh.attr_layer_qp_delta_luma.empty()) {
      int numLayers =
        attr_aps.attr_encoding == AttributeEncoding::kRAHTransform
        ? attr_aps.raht_depth + 1
        : attr_aps.num_detail_levels + 1;

      attr_enc.abh.attr_layer_qp_delta_luma.resize(
        numLayers, attr_enc.abh.attr_layer_qp_delta_luma.back());

      attr_enc.abh.attr_layer_qp_delta_chroma.resize(
        numLayers, attr_enc.abh.attr_layer_qp_delta_chroma.back());
    }
310
  }
311
}
312

313
//----------------------------------------------------------------------------
314

315
316
317
void
PCCTMC3Encoder3::compressPartition(
  const PCCPointSet3& inputPointCloud,
318
  const PCCPointSet3& originPartCloud,
319
  EncoderParams* params,
320
  PCCTMC3Encoder3::Callbacks* callback,
321
322
  PCCPointSet3* reconstructedCloud)
{
323
324
  // geometry compression consists of the following stages:
  //  - prefilter/quantize geometry (non-normative)
325
  //  - encode geometry (single slice, id = 0)
326
327
  //  - recolour

328
  pointCloud.clear();
329
330
331
332
333
334
335
336
  pointCloud = inputPointCloud;

  // Offset the point cloud to account for (preset) _sliceOrigin.
  // The new maximum bounds of the offset cloud
  Vec3<int> maxBound{0};

  const size_t pointCount = pointCloud.getPointCount();
  for (size_t i = 0; i < pointCount; ++i) {
337
    const point_t point = (pointCloud[i] -= _sliceOrigin);
338
339
340
341
342
343
344
345
346
347
    for (int k = 0; k < 3; ++k) {
      const int k_coord = int(point[k]);
      assert(k_coord >= 0);
      if (maxBound[k] < k_coord)
        maxBound[k] = k_coord;
    }
  }

  // todo(df): don't update maxBound if something is forcing the value?
  _sliceBoxWhd = maxBound;
348
349
350
351
352
353
354
355

  // geometry encoding
  if (1) {
    PayloadBuffer payload(PayloadType::kGeometryBrick);

    pcc::chrono::Stopwatch<pcc::chrono::utime_inc_children_clock> clock_user;
    clock_user.start();

356
    encodeGeometryBrick(params, &payload);
357
358
359
360
361
362
363
364
365
366
367
368

    clock_user.stop();

    double bpp = double(8 * payload.size()) / inputPointCloud.getPointCount();
    std::cout << "positions bitstream size " << payload.size() << " B (" << bpp
              << " bpp)\n";

    auto total_user = std::chrono::duration_cast<std::chrono::milliseconds>(
      clock_user.count());
    std::cout << "positions processing time (user): "
              << total_user.count() / 1000.0 << " s" << std::endl;

369
    callback->onOutputBuffer(payload);
370
371
  }

372
373
374
  // recolouring

  // NB: recolouring is required if points are added / removed
375
  if (_gps->geom_unique_points_flag || _gps->trisoup_node_size_log2 > 0) {
376
377
378
379
380
381
    for (const auto& attr_sps : _sps->attributeSets) {
      recolour(
        attr_sps, params->recolour, originPartCloud,
        _sps->seq_source_geom_scale_factor, _sps->seq_bounding_box_xyz0,
        _sliceOrigin, &pointCloud);
    }
382
  }
383
384

  // dump recoloured point cloud
385
  // todo(df): this needs to work with partitioned clouds
386
  callback->onPostRecolour(pointCloud);
387

388
  // attributeCoding
389
  auto attrEncoder = makeAttributeEncoder();
390

391
392
393
394
395
  // for each attribute
  for (const auto& it : params->attributeIdxMap) {
    int attrIdx = it.second;
    const auto& attr_sps = _sps->attributeSets[attrIdx];
    const auto& attr_aps = *_aps[attrIdx];
396
    const auto& attr_enc = params->attr[attrIdx];
397
398
399
400
401
402
403
404
405
406
407
    const auto& label = attr_sps.attributeLabel;

    PayloadBuffer payload(PayloadType::kAttributeBrick);

    pcc::chrono::Stopwatch<pcc::chrono::utime_inc_children_clock> clock_user;
    clock_user.start();

    // todo(df): move elsewhere?
    AttributeBrickHeader abh;
    abh.attr_attr_parameter_set_id = attr_aps.aps_attr_parameter_set_id;
    abh.attr_sps_attr_idx = attrIdx;
408
    abh.attr_geom_slice_id = _sliceId;
409
410
    abh.attr_qp_delta_luma = 0;
    abh.attr_qp_delta_chroma = 0;
411
412
413
    abh.attr_layer_qp_delta_luma = attr_enc.abh.attr_layer_qp_delta_luma;
    abh.attr_layer_qp_delta_chroma = attr_enc.abh.attr_layer_qp_delta_chroma;

414
415
416
417
418
    abh.attr_region_qp_present_flag = false;
    abh.attr_region_qp_origin = Vec3<int>{0};
    abh.attr_region_qp_whd = Vec3<int>{0};
    abh.attr_region_qp_delta = 0;

419
    write(attr_aps, abh, &payload);
420

421
422
423
424
    // replace the attribute encoder if not compatible
    if (!attrEncoder->isReusable(attr_aps))
      attrEncoder = makeAttributeEncoder();

425
    attrEncoder->encode(*_sps, attr_sps, attr_aps, abh, pointCloud, &payload);
426
427
428
429
430
431
432
433
434
435
436
437
438
    clock_user.stop();

    int coded_size = int(payload.size());
    double bpp = double(8 * coded_size) / inputPointCloud.getPointCount();
    std::cout << label << "s bitstream size " << coded_size << " B (" << bpp
              << " bpp)\n";

    auto time_user = std::chrono::duration_cast<std::chrono::milliseconds>(
      clock_user.count());
    std::cout << label
              << "s processing time (user): " << time_user.count() / 1000.0
              << " s" << std::endl;

439
    callback->onOutputBuffer(payload);
440
441
  }

442
443
444
445
  // prevent re-use of this sliceId:  the next slice (geometry + attributes)
  // should be distinguishable from the current slice.
  _sliceId++;

446
  appendReconstructedPoints(reconstructedCloud);
447
448
449
450
}

//----------------------------------------------------------------------------

451
void
452
453
PCCTMC3Encoder3::encodeGeometryBrick(
  const EncoderParams* params, PayloadBuffer* buf)
454
455
456
{
  GeometryBrickHeader gbh;
  gbh.geom_geom_parameter_set_id = _gps->gps_geom_parameter_set_id;
457
  gbh.geom_slice_id = _sliceId;
458
  gbh.geom_tile_id = std::max(0, _tileId);
459
  gbh.frame_idx = _frameCounter & ((1 << _sps->log2_max_frame_idx) - 1);
460
  gbh.geomBoxOrigin = _sliceOrigin;
461
  gbh.geom_box_log2_scale = 0;
462
463
464
465
466
467
468
469
470
471
472
473
474

  if (!_gps->implicit_qtbt_enabled_flag) {
    // todo(df): confirm minimum of 1 isn't needed
    int32_t maxBB =
      std::max({1, _sliceBoxWhd[0], _sliceBoxWhd[1], _sliceBoxWhd[2]});
    gbh.geom_max_node_size_log2 = ceillog2(maxBB + 1);
  } else {
    // different node dimension for xyz, for the purpose of implicit qtbt
    gbh.geom_max_node_size_log2_xyz[0] = ceillog2(_sliceBoxWhd[0] + 1);
    gbh.geom_max_node_size_log2_xyz[1] = ceillog2(_sliceBoxWhd[1] + 1);
    gbh.geom_max_node_size_log2_xyz[2] = ceillog2(_sliceBoxWhd[2] + 1);
  }

475
  gbh.geom_num_points = int(pointCloud.getPointCount());
476
477
478
  gbh.geom_slice_qp_offset = params->gbh.geom_slice_qp_offset;
  gbh.geom_octree_qp_offset_enabled_flag =
    params->gbh.geom_octree_qp_offset_enabled_flag;
479
  gbh.geom_octree_qp_offset_depth = params->gbh.geom_octree_qp_offset_depth;
480
  write(*_sps, *_gps, gbh, buf);
481
482
483

  // todo(df): remove estimate when arithmetic codec is replaced
  int maxAcBufLen = int(pointCloud.getPointCount()) * 3 * 4 + 1024;
484
  EntropyEncoder arithmeticEncoder(maxAcBufLen, nullptr);
485
  arithmeticEncoder.enableBypassStream(_sps->cabac_bypass_stream_enabled_flag);
486
  arithmeticEncoder.start();
487

488
  if (_gps->trisoup_node_size_log2 == 0) {
489
    encodeGeometryOctree(*_gps, gbh, pointCloud, &arithmeticEncoder);
490
  } else {
491
492
    encodeGeometryTrisoup(*_gps, gbh, pointCloud, &arithmeticEncoder);
  }
493

494
  uint32_t dataLen = arithmeticEncoder.stop();
495
496
497
498
499
  std::copy_n(arithmeticEncoder.buffer(), dataLen, std::back_inserter(*buf));
}

//----------------------------------------------------------------------------

500
void
501
PCCTMC3Encoder3::appendReconstructedPoints(PCCPointSet3* reconstructedCloud)
502
503
504
505
506
{
  if (reconstructedCloud == nullptr) {
    return;
  }
  const size_t pointCount = pointCloud.getPointCount();
507
  size_t outIdx = reconstructedCloud->getPointCount();
508
509
510

  reconstructedCloud->addRemoveAttributes(
    pointCloud.hasColors(), pointCloud.hasReflectances());
511
  reconstructedCloud->resize(outIdx + pointCount);
512

513
  for (size_t i = 0; i < pointCount; ++i, ++outIdx) {
514
515
    (*reconstructedCloud)[outIdx] = pointCloud[i] + _sliceOrigin;

516
    if (pointCloud.hasColors()) {
517
      reconstructedCloud->setColor(outIdx, pointCloud.getColor(i));
518
519
    }
    if (pointCloud.hasReflectances()) {
520
      reconstructedCloud->setReflectance(outIdx, pointCloud.getReflectance(i));
521
522
523
524
525
    }
  }
}

//----------------------------------------------------------------------------
526
527
// translates and scales inputPointCloud, storing the result in
// this->pointCloud for use by the encoding process.
528

529
PCCPointSet3
530
531
PCCTMC3Encoder3::quantization(const PCCPointSet3& inputPointCloud)
{
532
533
534
535
  PCCPointSet3 pointCloud0;
  pointCloud0.clear();
  quantizedToOrigin.clear();

536
  // Currently the sequence width/height/depth must be set
537
  assert(_sps->seq_bounding_box_whd != Vec3<int>{0});
538

539
540
  // Clamp all points to [clampBox.min, clampBox.max] after translation
  // and quantisation.
541
  Box3<int32_t> clampBox{{0, 0, 0}, {INT32_MAX, INT32_MAX, INT32_MAX}};
542
  if (_sps->seq_bounding_box_whd != Vec3<int>{0}) {
543
544
545
    // todo(df): this is icky (not to mention rounding issues)
    // NB: the sps seq_bounding_box_* uses unscaled co-ordinates => convert
    // NB: minus 1 to convert to max x/y/z position
546
    clampBox = Box3<int32_t>{{0, 0, 0}, _sps->seq_bounding_box_whd};
547
548
549
550
551
    for (int k = 0; k < 3; k++)
      clampBox.max[k] =
        int(ceil(clampBox.max[k] * _sps->seq_source_geom_scale_factor)) - 1;
  }

552
553
  if (_gps->geom_unique_points_flag) {
    quantizePositionsUniq(
554
      _sps->seq_source_geom_scale_factor, _sps->seq_bounding_box_xyz0,
555
      clampBox, inputPointCloud, &pointCloud0, quantizedToOrigin);
556
  } else {
557
    quantizePositions(
558
      _sps->seq_source_geom_scale_factor, _sps->seq_bounding_box_xyz0,
559
      clampBox, inputPointCloud, &pointCloud0);
560
561
  }

562
  // Offset the point cloud to account for (preset) _sliceOrigin.
563
  // The new maximum bounds of the offset cloud
564
  Vec3<int> maxBound{0};
565

566
  const size_t pointCount = pointCloud0.getPointCount();
567
  for (size_t i = 0; i < pointCount; ++i) {
568
    const point_t point = (pointCloud0[i] -= _sliceOrigin);
569
    for (int k = 0; k < 3; ++k) {
570
571
572
573
      const int k_coord = int(point[k]);
      assert(k_coord >= 0);
      if (maxBound[k] < k_coord)
        maxBound[k] = k_coord;
574
575
    }
  }
576
577
578

  // todo(df): don't update maxBound if something is forcing the value?
  _sliceBoxWhd = maxBound;
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
  return pointCloud0;
}

//----------------------------------------------------------------------------
// get the partial point cloud according to required point indexes
void
PCCTMC3Encoder3::getSrcPartition(
  const PCCPointSet3& inputPointCloud,
  PCCPointSet3& srcPartition,
  std::vector<int32_t> Indexes)
{
  //PCCPointSet3 srcPartition;
  srcPartition.addRemoveAttributes(
    inputPointCloud.hasColors(), inputPointCloud.hasReflectances());

  int partitionSize = Indexes.size();
  srcPartition.resize(partitionSize);

  for (int i = 0; i < partitionSize; i++) {
    int inputIdx = Indexes[i];
    srcPartition[i] = inputPointCloud[inputIdx];

    if (inputPointCloud.hasColors())
      srcPartition.setColor(i, inputPointCloud.getColor(inputIdx));

    if (inputPointCloud.hasReflectances())
      srcPartition.setReflectance(i, inputPointCloud.getReflectance(inputIdx));
  }

  //return srcPartition;
609
610
611
612
613
}

//============================================================================

}  // namespace pcc