Commit bdcedb07 authored by Paal Kvamme's avatar Paal Kvamme
Browse files

Merge branch 'kvamme62/coverage' into 'master'

Test coverage

See merge request !61
parents e0fdf338 c503ca0f
Pipeline #34748 passed with stages
in 9 minutes and 4 seconds
......@@ -42,8 +42,10 @@ steps:
docker build --pull -t ${TAG} -f ${DOCKERFILE} .
docker rmi -f ${TAG}:old ${TAG}:test || true
/bin/rm -f cid.txt
docker run --cidfile cid.txt -e LINUXDISTRO -e AZURE_BUILDID=$(Build.BuildId) ${TAG} /bin/bash scripts/build-coverage.sh
docker run --cidfile cid.txt -e OPENZGY_TOKEN -e OPENZGY_SDURL -e OPENZGY_SDAPIKEY -e LINUXDISTRO -e AZURE_BUILDID=$(Build.BuildId) ${TAG} /bin/bash scripts/build-coverage.sh
docker cp $(cat cid.txt):/home/build/oz/build/deploy/native/coverage.tgz coverage.tgz
docker cp $(cat cid.txt):/home/build/oz/native/src/summary.txt summary.txt
docker cp $(cat cid.txt):/home/build/oz/native/src/final.info final.info
docker rm $(cat cid.txt)
/bin/rm -f cid.txt
env:
......@@ -56,6 +58,8 @@ steps:
inputs:
contents: |
coverage.tgz
summary.txt
final.info
targetFolder: $(Build.ArtifactStagingDirectory)
flattenFolders: false
......
......@@ -781,7 +781,7 @@ public:
, _accessor_rw()
, _dirty(false)
, _compressor(args._compressor)
, _lodcompressor(args._lodcompressor)
, _lodcompressor(args._lodcompressor ? args._lodcompressor : args._compressor)
{
const bool compress = args._compressor || args._lodcompressor;
// This is both pointless and expensive to support.
......@@ -848,6 +848,9 @@ public:
*/
void write(const size3i_t& start, const size3i_t& size, const float* data) override
{
if (errorflag())
throw Errors::ZgyCorruptedFile("Cannot continue due to previous errors.");
// TODO-Worry: The buffer is supposed to be copied at least once
// before being sent down to the lowest levels. The longer I wait
// before I do that, the higher the risk is that it isn't done.
......
// Copyright 2017-2020, Schlumberger
// Copyright 2017-2021, Schlumberger
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
......@@ -39,38 +39,6 @@ OrderedCornerPoints::Element::Element(index_type _i, index_type _j, annot_type _
{
}
OrderedCornerPoints::OrderedCornerPoints()
: m_element()
{
}
OrderedCornerPoints::OrderedCornerPoints
(
annot_type il0, annot_type ilinc, size_t ilcnt,
annot_type xl0, annot_type xlinc, size_t xlcnt,
const std::array<std::array<coord_type,2>,4>& ocp
)
{
// fill in (i, j) indices for each corner
m_element[Min0Min1] = Element(0, 0);
m_element[Max0Min1] = Element(ilcnt - 1, 0);
m_element[Min0Max1] = Element(0, xlcnt - 1);
m_element[Max0Max1] = Element(ilcnt - 1, xlcnt - 1);
// calculate in (inline, crossline) and fill in (x, y) for each corner
for (size_t i = 0; i < 4; ++i) {
// find (inline, crossline) annotation from (i, j) indices
m_element[i].il = il0 + ilinc*m_element[i].i;
m_element[i].xl = xl0 + xlinc*m_element[i].j;
// copy (x, y) from provided OCPs
m_element[i].x = ocp[i][0];
m_element[i].y = ocp[i][1];
}
}
OrderedCornerPoints::OrderedCornerPoints
(
annot_type il0, annot_type ilinc, size_t ilcnt,
......
// Copyright 2017-2020, Schlumberger
// Copyright 2017-2021, Schlumberger
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
......@@ -86,26 +86,6 @@ public:
Element(index_type _i, index_type _j, annot_type _il, annot_type _xl, coord_type _x, coord_type _y);
};
/** Default constructor. Initializes OCPs to zero. */
OrderedCornerPoints();
/**
* Construct OCPs from cube extent in annotation space and four OCP coordinates.
* @param il0 Inline annotation corresponding to bulk data index 0 along dimension 0 (I).
* @param ilinc Inline annotation increment corresponding to bulk data index increment of 1 along dimension 0 (I).
* @param ilcnt Number of samples along dimension 0 (I).
* @param xl0 Crossline annotation corresponding to bulk data index 0 along dimension 1 (J).
* @param xlinc Crossline annotation increment corresponding to bulk data index increment of 1 along dimension 1 (J).
* @param xlcnt Number of samples along dimension 1 (J).
* @param ocp Four sets of (x, y) map projection coordinates expected to be in correct OCP ordering.
*/
OrderedCornerPoints
(
annot_type il0, annot_type ilinc, size_t ilcnt,
annot_type xl0, annot_type xlinc, size_t xlcnt,
const std::array<std::array<coord_type,2>,4>& ocp
);
/**
* Construct OCPs from cube extent in annotation space and three ACPs.
* @param il0 Inline annotation corresponding to bulk data index 0 along dimension 0 (I).
......
......@@ -698,30 +698,6 @@ DataBufferNd<T,NDim>::itemsize() const
return sizeof(T);
}
/**
* Return the size of the buffer, as a pointer to the first dimension.
* This is unsafe since the compiler cannot check the length. This
* function is meant for internal use in this file. Used by CopySubset
* and CopySize. TODO-Low consider making those friends so that this method
* can be declared private or inlined.
*/
template <typename T, int NDim>
const std::int64_t*
DataBufferNd<T,NDim>::sizeptr() const
{
return _size.data();
}
/**
* Return the layout of the buffer, assuming it is 3d. See sizeptr().
*/
template <typename T, int NDim>
const std::int64_t*
DataBufferNd<T,NDim>::strideptr() const
{
return _stride.data();
}
/**
* Return the size of the buffer, assuming it is 3d. Which it almost always is.
* As a code smell this suggests that DataBuffer maybe shouldn't have been
......
......@@ -145,7 +145,7 @@ namespace InternalZGY {
* fix only helps for asynchronous writes, not reads.
*
* * TODO-Low: Remove the template on number of dimensions. This would
* make the API simpler. See e.g. sizeptr() vs. size3d(). Current usage
* make the API simpler. Current usage
* in OpenZGY is always NDim=3. If the Alpha tile feature is resurrected
* then there will be a need for 2d arrays, but can probably be handled
* as 2d arrays with nk=1.
......@@ -209,10 +209,6 @@ public:
virtual std::int64_t totalsize() const = 0;
//** Size in bytes of one element.
virtual std::int64_t itemsize() const = 0;
//** Number of elements as an unsafe (unchecked length) pointer.
virtual const std::int64_t* sizeptr() const = 0;
//** Buffer layout as an unsafe (unchecked length) pointer.
virtual const std::int64_t* strideptr() const = 0;
//** Number of elements in each of the (assumed) 3 dimensions
virtual std::array<std::int64_t,3> size3d() const = 0;
//** Buffer layout in each of the (assumed) 3 dimensions
......@@ -334,8 +330,6 @@ public:
std::int64_t allocsize() const override;
std::int64_t totalsize() const override;
std::int64_t itemsize() const override;
const std::int64_t* sizeptr() const override;
const std::int64_t* strideptr() const override;
std::array<std::int64_t,3> size3d() const override;
std::array<std::int64_t,3> stride3d() const override;
const ndsize_t& safesize() const {return _size;}
......
......@@ -14,6 +14,7 @@
#include "fancy_timers.h"
#include "environment.h"
#include <iostream>
namespace InternalZGY {
#if 0
......@@ -41,38 +42,58 @@ SummaryPrintingTimerEx::~SummaryPrintingTimerEx()
print();
}
/**
* \brief Convert a long integer to a human readable string.
* \details
* Use the largest suffix (TB, MB, etc.) that still allows the number
* to be displayed without any decimal point. So, 2*(1024^3) will be
* disokated as "2 GB" while that number plus 1024 will display
* returns the size in kilobytes. I.e. "2097153 KB".
*/
std::string
SummaryPrintingTimerEx::niceSize(const std::string& label, std::int64_t n)
SummaryPrintingTimerEx::niceSize(std::int64_t n)
{
if (n > 10*1024*1024)
return label + std::to_string(n/(1024*1024)) + " MB";
else if (n > 10*1024)
return label + std::to_string(n/1024) + " kB";
else if (n != 0)
return label + std::to_string(n) + " bytes";
else
return std::string();
static struct {std::int64_t factor; const char *unit;} lookup[]{
{1024LL*1024LL*1024LL*1024LL, " TB"},
{1024*1024*1024, " GB"},
{1024*1024, " MB"},
{1024, " KB"},
{1, " bytes"},
};
std::string neg(n<0?"-":"");
n = std::abs(n);
for (const auto& it : lookup)
if (n >= it.factor && (n % it.factor) == 0)
return neg + std::to_string(n / it.factor) + std::string(it.unit);
return neg + std::to_string(n) + " bytes";
}
void
SummaryPrintingTimerEx::print()
SummaryPrintingTimerEx::printToFile(std::ostream& outstream, bool csv, bool clear)
{
if (getCount() != 0) {
std::string msg(isCSVEnabled() ? getCSV() : getValue(true, true));
std::string msg(csv ? getCSV() : getValue(true, true));
if (!msg.empty() && msg.back() == '\n')
msg = msg.substr(0, msg.size()-1);
if (isCSVEnabled())
std::cerr << msg
if (csv)
outstream << msg
<< "," << bytes_read_.load()
<< "," << bytes_written_.load()
<< std::endl;
else
std::cerr << msg
<< niceSize(", R: ", bytes_read_.load())
<< niceSize(", W: ", bytes_written_.load())
outstream << msg
<< ", R: " << niceSize(bytes_read_.load())
<< ", W: " << niceSize(bytes_written_.load())
<< std::endl;
}
reset(); // Prevent the base class from printing as well.
if (clear)
reset();
}
void
SummaryPrintingTimerEx::print()
{
printToFile(std::cerr, isCSVEnabled(), true);
}
void
......
......@@ -18,7 +18,7 @@
#include <atomic>
#include <cstdint>
#include <string>
#include <iostream>
#include <ostream>
namespace InternalZGY {
#if 0
......@@ -46,8 +46,9 @@ class OPENZGY_TEST_API SummaryPrintingTimerEx : public SummaryPrintingTimer
public:
explicit SummaryPrintingTimerEx(const char *name);
virtual ~SummaryPrintingTimerEx();
static std::string niceSize(const std::string& label, std::int64_t n);
static std::string niceSize(std::int64_t n);
virtual void print();
void printToFile(std::ostream& os, bool csv, bool clear);
void addBytesRead(std::int64_t nbytes);
void addBytesWritten(std::int64_t nbytes);
static bool isCSVEnabled();
......
......@@ -49,14 +49,7 @@ FileADT::xx_close()
std::string
FileADT::_nice(std::int64_t n)
{
if (n >= 1024*1024 && (n % (1024*1024)) == 0)
return std::to_string(n/(1024*1024)) + " MB"; // whole number of MB
else if (n >= 256*1024 && (n % (256*1024)) == 0)
return std::to_string(n/(1024*1024.0)) + " MB"; // e.g. 42.75 MB
else if (n >= 1024 && (n % 1024) == 0)
return std::to_string(n/1024) + " kB";
else
return std::to_string(n) + " bytes";
return SummaryPrintingTimerEx::niceSize(n);
}
void
......
......@@ -13,6 +13,7 @@
// limitations under the License.
#include "file_consolidate.h"
#include "fancy_timers.h"
#include <algorithm>
#include <iostream>
......@@ -124,8 +125,8 @@ ConsolidateRequests::consolidate(
if (false && requests.size() != new_requests.size()) {
std::cout << "Consolidated " << requests.size()
<< " into " << new_requests.size() << "\n";
_print_requests(ReadDoubleList{requests}, "Requests:");
_print_requests(ReadDoubleList{new_requests}, "Consolidated:");
_print_requests(ReadDoubleList{requests}, "Requests:", std::cout);
_print_requests(ReadDoubleList{new_requests}, "Consolidated:", std::cout);
}
const std::int64_t old_size =
std::accumulate(requests.begin(), requests.end(), std::int64_t(0),
......@@ -279,44 +280,32 @@ ConsolidateRequests::_join_requests(
return new_requests;
}
static std::string nicenumber(std::int64_t n)
{
std::string neg(n<0?"-":"");
n = std::abs(n);
if (n >= (1024*1024*1024) && (n % (1024*1024*1024)) == 0)
return neg + std::to_string(n / (1024*1024*1024)) + " GB";
if (n >= (1024*1024) && (n % (1024*1024)) == 0)
return neg + std::to_string(n / (1024*1024)) + " MB";
if (n >= (1024) && (n % (1024)) == 0)
return neg + std::to_string(n / (1024)) + " KB";
return neg + std::to_string(n) + " bytes";
}
/**
* For debugging only, print a list of list of requests.
*/
void
ConsolidateRequests::_print_requests(
const ReadDoubleList& all_requests,
const std::string& name)
const std::string& name,
std::ostream& outstream)
{
static auto printit = []
static auto printit = [&outstream]
(const char *name, std::int64_t offset, std::int64_t size) {
std::cout << std::dec
outstream << std::dec
<< " " << name
<< " offset " << std::setw(8) << nicenumber(offset)
<< " end " << std::setw(8) << nicenumber(offset+size)
<< " size " << std::setw(6) << nicenumber(size) << std::dec << "\n";
<< " offset " << std::setw(8) << SummaryPrintingTimerEx::niceSize(offset)
<< " end " << std::setw(8) << SummaryPrintingTimerEx::niceSize(offset+size)
<< " size " << std::setw(6) << SummaryPrintingTimerEx::niceSize(size) << std::dec << "\n";
};
if (all_requests.empty() ||
(all_requests.size() == 1 && all_requests.front().empty())) {
std::cout << " (empty)" << std::endl;
outstream << " (empty)" << std::endl;
return;
}
std::cout << name << "\n";
outstream << name << "\n";
for (const ReadList& group : all_requests) {
if (all_requests.size() > 1)
std::cout << " Group:\n";
outstream << " Group:\n";
std::int64_t prev_offset = -1, prev_size = 0;
for (const ReadRequest& rr : group) {
if (prev_offset != -1) {
......
......@@ -15,6 +15,7 @@
#pragma once
#include "file.h"
#include <ostream>
namespace InternalZGY {
#if 0
......@@ -45,7 +46,8 @@ public:
static void _print_requests(
const ReadDoubleList& all_requests,
const std::string& name);
const std::string& name,
std::ostream& os);
private:
......
......@@ -263,7 +263,7 @@ LocalFileLinux::xx_readv(const ReadList& requests, bool parallel_ok, bool immuta
// terms of the other is an implementation detail.
if (!parallel_ok || requests.size() < 2) {
for (const ReadRequest& r : requests) {
std::shared_ptr<char> data(new char[r.size]);
std::shared_ptr<char> data(new char[r.size], std::default_delete<char[]>());
this->LocalFileLinux::xx_read(data.get(), r.offset, r.size, usagehint);
_deliver(r.delivery, data, 0, r.size, transient_ok);
}
......@@ -286,7 +286,7 @@ LocalFileLinux::xx_readv(const ReadList& requests, bool parallel_ok, bool immuta
const ReadRequest& r = requests[ii];
if (datasize < r.size || !data || !data.unique()) {
datasize = 0;
data.reset(new char[r.size]);
data.reset(new char[r.size], std::default_delete<char[]>());
datasize = r.size;
}
guard.run([&](){
......
......@@ -18,6 +18,7 @@
#include "../exception.h"
#include <string.h>
#include <iostream>
#include <fstream>
#include <sstream>
#include <cmath>
......@@ -26,8 +27,9 @@ namespace InternalZGY {
}
#endif
FileWithPerformanceLogger::FileWithPerformanceLogger(std::shared_ptr<FileADT> relay, std::int64_t chunksize, int hist_bincount, double hist_min, double hist_max, int interval)
FileWithPerformanceLogger::FileWithPerformanceLogger(std::shared_ptr<FileADT> relay, std::shared_ptr<std::ostream> outfile, std::int64_t chunksize, int hist_bincount, double hist_min, double hist_max, int interval)
: _relay(relay)
, _outfile(outfile)
, _chunksize(chunksize)
, _mutex()
, _nsamples(0)
......@@ -50,8 +52,8 @@ FileWithPerformanceLogger::~FileWithPerformanceLogger()
{
std::string str1 = dumpThroughput(true);
std::string str2 = dumpLatency(true);
if (!str1.empty() || !str2.empty())
std::cout << str1 + str2 << std::flush;
if (_outfile && (!str1.empty() || !str2.empty()))
*_outfile << str1 + str2 << std::flush;
}
void
......@@ -102,8 +104,20 @@ FileWithPerformanceLogger::xx_close()
_relay->xx_close();
std::string str1 = dumpThroughput(true);
std::string str2 = dumpLatency(true);
if (!str1.empty() || !str2.empty())
std::cout << str1 + str2 << std::flush;
if (_outfile && (!str1.empty() || !str2.empty()))
*_outfile << str1 + str2 << std::flush;
}
std::int64_t
FileWithPerformanceLogger::xx_eof() const
{
return _relay->xx_eof();
}
bool
FileWithPerformanceLogger::xx_iscloud() const
{
return _relay->xx_iscloud();
}
void
......@@ -120,8 +134,8 @@ FileWithPerformanceLogger::add(const Timer& timer, std::int64_t blocksize)
lk.unlock();
std::string msg = dumpThroughput(true);
lk.lock();
if (!msg.empty())
std::cout << msg << std::flush;
if (_outfile && !msg.empty())
*_outfile << msg << std::flush;
// Might also have reported and cleared the latency log.
}
......@@ -269,7 +283,15 @@ FileWithPerformanceLogger::inject(std::shared_ptr<FileADT> file)
int bincount = Environment::getNumericEnv("OPENZGY_MEASURE_BINS", 251);
int maxtime = Environment::getNumericEnv("OPENZGY_MEASURE_TIME", 500);
int interval = Environment::getNumericEnv("OPENZGY_MEASURE_INTERVAL", 0);
file = std::shared_ptr<FileADT>(new FileWithPerformanceLogger(file, target*1024, bincount, 0.0, maxtime, interval));
std::string filename = Environment::getStringEnv("OPENZGY_MEASURE_LOGFILE");
std::shared_ptr<std::ostream> out;
if (!filename.empty()) {
out = std::make_shared<std::ofstream>(filename, std::ofstream::app);
}
else {
out = std::shared_ptr<std::ostream>(&std::cout, [](std::ostream*){});
}
file = std::shared_ptr<FileADT>(new FileWithPerformanceLogger(file, out, target*1024, bincount, 0.0, maxtime, interval));
}
return file;
}
......
......@@ -24,6 +24,7 @@
#include <string>
#include <memory>
#include <mutex>
#include <ostream>
#include "file.h"
......@@ -42,6 +43,7 @@ class FileWithPerformanceLogger : public FileADT
{
private:
const std::shared_ptr<FileADT> _relay;
const std::shared_ptr<std::ostream> _outfile;
const std::int64_t _chunksize;
mutable std::mutex _mutex;
// Latency a.k.a. round trip time: reported one for each thread.
......@@ -60,14 +62,14 @@ private:
FileWithPerformanceLogger& operator=(const FileWithPerformanceLogger&) = delete;
public:
explicit FileWithPerformanceLogger(std::shared_ptr<FileADT> relay, std::int64_t chunksize, int hist_bincount, double hist_min, double hist_max, int interval);
explicit FileWithPerformanceLogger(std::shared_ptr<FileADT> relay, std::shared_ptr<std::ostream> outfile, std::int64_t chunksize, int hist_bincount, double hist_min, double hist_max, int interval);
virtual ~FileWithPerformanceLogger();
virtual void xx_read(void *data, std::int64_t offset, std::int64_t size, UsageHint usagehint) override;
virtual void xx_readv(const ReadList& requests, bool parallel_ok, bool immutable_ok, bool transient_ok, UsageHint usagehint) override;
virtual void xx_write(const void* data, std::int64_t offset, std::int64_t size, UsageHint usagehint) override;
virtual void xx_close() override;
virtual std::int64_t xx_eof() const override { return _relay->xx_eof(); }
virtual bool xx_iscloud() const override { return _relay->xx_iscloud(); }
virtual std::int64_t xx_eof() const override;
virtual bool xx_iscloud() const override;
public:
void add(const Timer& timer, std::int64_t blocksize);
std::string dumpLatency(bool clear);
......
......@@ -1156,7 +1156,7 @@ SeismicStoreFile::xx_readv(const ReadList& requests, bool parallel_ok, bool immu
#if 0
// For now just implement xx_readv in terms of xx_read.
for (const ReadRequest& r : requests) {
std::shared_ptr<char> data(new char[r.size]);
std::shared_ptr<char> data(new char[r.size], std::default_delete<char[]>());
this->SeismicStoreFile::xx_read(data.get(), r.offset, r.size, usagehint);
_deliver(r.delivery, data, 0, r.size, transient_ok);
}
......@@ -1223,7 +1223,7 @@ SeismicStoreFile::xx_readv(const ReadList& requests, bool parallel_ok, bool immu
return std::max(a, b.local_size + b.outpos);
});
std::shared_ptr<char> data(new char[realsize]);
std::shared_ptr<char> data(new char[realsize], std::default_delete<char[]>());
if (this->_config->_debug_trace)
this->_config->_debug_trace("readv", /*need=*/asked, /*want=*/realsize,/*parts*/ work.size(), this->_dataset->info()->allSizes(-1));
......@@ -1810,7 +1810,7 @@ SeismicStoreFileDelayedWrite::xx_readv(const ReadList& requests, bool parallel_o
// of xx_read() or vice versa. Because this is an implementation
// detail and overriding one of them should not affect the other.
for (const ReadRequest& r : requests) {
std::shared_ptr<char> data(new char[r.size]);
std::shared_ptr<char> data(new char[r.size], std::default_delete<char[]>());
this->SeismicStoreFileDelayedWrite::xx_read(data.get(), r.offset, r.size, usagehint);
_deliver(r.delivery, data, 0, r.size, transient_ok);
}
......
......@@ -52,7 +52,7 @@ FileWithSmallCache::xx_read(void *data, std::int64_t offset, std::int64_t size,
}
if (_cachesize > 0) {
//printf("Cache load offset %llx size %llx\n", 0, _cachesize);
std::unique_ptr<char> newcache(new char[_cachesize]);
std::unique_ptr<char[]> newcache(new char[_cachesize]);
_relay->xx_read(newcache.get(), 0, _cachesize, UsageHint::Unknown);
_cache.swap(newcache);
}
......@@ -81,4 +81,22 @@ FileWithSmallCache::xx_write(const void* data, std::int64_t offset, std::int64_t
throw OpenZGY::Errors::ZgyInternalError("The small cache doesn't allow writes");
}
void
FileWithSmallCache::xx_close()
{
_relay->xx_close();
}
std::int64_t
FileWithSmallCache::xx_eof() const
{
return _relay->xx_eof();
}
bool
FileWithSmallCache::xx_iscloud() const
{
return _relay->xx_iscloud();
}
} // namespace