Commit 89d96b9e authored by Paal Kvamme's avatar Paal Kvamme
Browse files

New FileStatistics methods, to be used in upcoming changes.

parent 06dd5e92
......@@ -362,10 +362,12 @@ public:
case BrickStatus::Constant: result._alpha_constant_count += 1; break;
case BrickStatus::Normal:
result._alpha_normal_count += 1;
result._data_start = std::max(result._data_start, info.offset_in_file);
break;
case BrickStatus::Compressed:
result._alpha_compressed_count += 1;
result._alpha_compressed_size += info.size_in_file;
result._data_start = std::max(result._data_start, info.offset_in_file);
break;
}
}
......@@ -376,10 +378,13 @@ public:
case BrickStatus::Missing: result._brick_missing_count += 1; break;
case BrickStatus::Constant: result._brick_constant_count += 1; break;
case BrickStatus::Normal:
result._brick_normal_count += 1; break;
result._brick_normal_count += 1;
result._data_start = std::max(result._data_start, info.offset_in_file);
break;
case BrickStatus::Compressed:
result._brick_compressed_count += 1;
result._brick_compressed_size += info.size_in_file;
result._data_start = std::max(result._data_start, info.offset_in_file);
break;
}
}
......@@ -712,6 +717,7 @@ public:
(new FileStatistics(*filestats_nocache()));
// The base class has no _fd member so I need to set the size here.
result->_file_size = _fd->xx_eof();
result->_segment_sizes = _fd->xx_segments(false);
{
// Too bad there is no proper atomic_shared_ptr yet.
std::lock_guard<std::mutex> lk(_filestats_mutex);
......@@ -1580,6 +1586,7 @@ public:
(new FileStatistics(*filestats_nocache()));
// The base class has no _fd member so I need to set the size here.
result->_file_size = _fd->xx_eof();
result->_segment_sizes = _fd->xx_segments(false);
return result;
}
......
......@@ -23,6 +23,7 @@
#include <memory>
#include <ostream>
#include <iostream>
#include <sstream>
#include <string>
#include <mutex>
......@@ -474,6 +475,8 @@ private:
std::int64_t _file_version;
std::int64_t _file_size;
std::int64_t _header_size;
std::int64_t _data_start;
std::vector<std::int64_t> _segment_sizes;
//std::int64_t _padding_size;
//std::int64_t _wasted_size;
std::int64_t _alpha_normal_count;
......@@ -499,6 +502,8 @@ FileStatistics()
: _file_version(0)
, _file_size(0)
, _header_size(0)
, _data_start(-1)
, _segment_sizes()
//, _padding_size(0)
//, _wasted_size(0)
, _alpha_normal_count(0)
......@@ -526,6 +531,10 @@ FileStatistics()
std::int64_t fileSize() const { return _file_size; }
/// Size of all headers.
std::int64_t headerSize() const { return _header_size; }
/// Lowest address of any brick or tile, or -1 if there are none.
std::int64_t dataStart() const { return _data_start; }
/// Used for cloud storage only.
const std::vector<std::int64_t>& segmentSizes() const {return _segment_sizes;}
// Wasted due to first brick alignment.
//std::int64_t paddingSize() const { return _padding_size; }
// Wasted due to other reasons.
......@@ -596,6 +605,9 @@ FileStatistics()
* For debugging. Output most of the information to the supplied ostream.
*/
void dump(std::ostream& out, const std::string& prefix = "") const {
std::stringstream segs;
for (std::int64_t it : _segment_sizes)
segs << " " << it;
out << prefix << "ZGY version " << _file_version
<< " file compressed to "
<< int(100.0 * _compression_factor) << "% of original\n"
......@@ -603,6 +615,8 @@ FileStatistics()
<< _file_size << " bytes of which "
<< _header_size << " are in headers and "
<< _file_size - _used_size << " wasted\n"
<< prefix << "Segments:" << segs.str() << ", "
<< "Data area starts at: " << _data_start << "\n"
<< prefix << "Alpha: "
<< _alpha_missing_count << " missing, "
<< _alpha_constant_count << " constant, "
......
......@@ -1058,6 +1058,7 @@ do_check_written(const std::string& filename, const IOContext* context = nullptr
std::cout << "\n";
dump_api(reader, std::cout);
//reader->dump(std::cout);
reader->filestats()->dump(std::cout, "filestats: ");
}
std::unique_ptr<float[]> checkdata(new float[64*64*64]);
const OpenZGY::IZgyWriter::size3i_t origin{0,0,0};
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment