Commit ae0b2a4c authored by Paal Kvamme's avatar Paal Kvamme
Browse files

Refactoring: Flatten one loop.

parent d50b5edb
......@@ -1543,8 +1543,18 @@ ZgyInternalBulk::_writeAlignedRegion(
const index3_t survey_beg {0,0,0};
const index3_t survey_size = (ih.size() + (lodfactor - 1)) / lodfactor;
const index3_t count = data->size3d();
const index3_t beg_brick = (start / bs) * bs;
const index3_t end_brick = ((start + count + bs - (std::int64_t)1) / bs) * bs;
std::vector<index3_t> work;
{
const index3_t beg_brick = (start / bs) * bs;
const index3_t end_brick = ((start + count + bs - (std::int64_t)1) / bs) * bs;
index3_t it;
for (it[0] = beg_brick[0]; it[0] < end_brick[0]; it[0] += bs[0])
for (it[1] = beg_brick[1]; it[1] < end_brick[1]; it[1] += bs[1])
for (it[2] = beg_brick[2]; it[2] < end_brick[2]; it[2] += bs[2])
work.push_back(it);
}
const std::size_t worksize = work.size();
if (_logger(1))
_logger(1, std::stringstream()
<< "_writeAlignedRegion("
......@@ -1575,68 +1585,41 @@ ZgyInternalBulk::_writeAlignedRegion(
brick = DataBuffer::makeDataBuffer3d(nullptr, 0, bs, data->datatype());
do_copy = true;
}
index3_t it;
for (it[0] = beg_brick[0]; it[0] < end_brick[0]; it[0] += bs[0]) {
for (it[1] = beg_brick[1]; it[1] < end_brick[1]; it[1] += bs[1]) {
for (it[2] = beg_brick[2]; it[2] < end_brick[2]; it[2] += bs[2]) {
const index3_t surveypos = it; // user's start i0,j0,k0 rounded down
const index3_t brickpos = it / bs; // as above, but in brick coords
if (do_copy) {
brick->fill(defaultstorage);
brick->copyFrom(data.get(), // source
start.data(), surveypos.data(), // in survey coords
survey_beg.data(), survey_size.data()); // clip to srv
// The Python version:
//self._partialCopy(data, start, data.shape,
// brick, surveypos, bs,
// survey_beg, survey_size)
}
// TODO-Medium: Note Compression:
// This might be a good extension point, as this is the last place
// where we still have a list of bricks to be written. So it would
// be easier to parallelize here. But we don't yet know which
// bricks will be written as all-constant so there would need to be
// some refactoring. _writeOneNormalBrick() probably needs to be called
// from here. See comments in that function; it will need some
// changes. And _writeOneBrick() needs to be told whether the data
// has been compressed or not. Instead of being told which
// compressor (if any) to use. Another point: It would be nice
// to be able to run also the copyFrom async. So I might need a
// "fetch-data" functor.
// Note errorhandling:
// If there are any errors during _writeOneBrick() this probably
// means the entire file is a lost cause. This is true also for
// ZgyUserError raised in the file layer, because at that layer
// the "user" is really OpenZGY and not some client code. The only
// acceptable error is ZgySegmentIsClosed, and that will be caught
// and handled at lower levels.
// TODO-Low the logic here can probably be refined. If there is no
// buffering (on-prem files) then a write can probably just be
// re-tried if we make sure the write is done before updating
// metadata. But do we really want that complexity? The risk of
// transient errors is way higher with cloud access. And due to
// buffering in the file layer those would also be a lot harder to
// recover from.
ErrorsWillCorruptFile watchdog(this);
std::shared_ptr<const WriteBrickArgPack> args =
std::make_shared<const WriteBrickArgPack>
(brickpos, lod, brick, compressor, 0);
args = _writeOneBrick(*args);
if (args->data->isScalar()) {
_writeOneConstantBrick(*args);
}
else {
std::shared_ptr<const WriteNowArgPack> now =
_writeOneNormalBrick(*args);
_writeWithRetry(*now);
}
watchdog.disarm();
}
for (std::size_t ix = 0; ix < worksize; ++ix) {
const index3_t surveypos = work[ix]; // user's start i0,j0,k0 rounded down
const index3_t brickpos = work[ix] / bs; // as above, but in brick coords
if (do_copy) {
brick->fill(defaultstorage);
brick->copyFrom(data.get(), // source
start.data(), surveypos.data(), // in survey coords
survey_beg.data(), survey_size.data()); // clip to srv
}
// Note errorhandling:
// If there are any errors during _writeOneBrick() this probably
// means the entire file is a lost cause. This is true also for
// ZgyUserError raised in the file layer, because at that layer
// the "user" is really OpenZGY and not some client code. The only
// acceptable error is ZgySegmentIsClosed, and that will be caught
// and handled at lower levels.
// TODO-Low: Might implement retrying of writes at a lower level.
// In that case we still shouldn't see those errors here.
ErrorsWillCorruptFile watchdog(this);
std::shared_ptr<const WriteBrickArgPack> args =
std::make_shared<const WriteBrickArgPack>
(brickpos, lod, brick, compressor, 0);
args = _writeOneBrick(*args);
if (args->data->isScalar()) {
_writeOneConstantBrick(*args);
}
else {
std::shared_ptr<const WriteNowArgPack> now =
_writeOneNormalBrick(*args);
_writeWithRetry(*now);
}
watchdog.disarm();
}
}
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment