Commit 1ad1c9a5 authored by Paal Kvamme's avatar Paal Kvamme
Browse files

Reduce compiler warnings.

parent a57c7d3a
......@@ -842,7 +842,7 @@ ZgyInternalBulk::trackedBricksTryEnable(bool on)
(ih.scnt(), /*inf=*/0, ih.ssum(), ih.sssq(),
ih.smin(), ih.smax()));
_modified_histo.reset(new HistogramData
(hh.bins(), hh.bincount(),
(hh.bins(), (int)hh.bincount(),
hh.minvalue(), hh.maxvalue()));
if (_logger(2))
_logger(2, std::stringstream()
......@@ -904,7 +904,7 @@ ZgyInternalBulk::_trackedBricksSetDirty(
int new_dirty{0}, old_dirty{0};
// number of lods we will build, not the current count.
for (const index3_t& pos : work) {
int ii = pos[0]/bs[0], jj = pos[1]/bs[1], kk = pos[2]/bs[2];
std::int64_t ii = pos[0]/bs[0], jj = pos[1]/bs[1], kk = pos[2]/bs[2];
for (std::int32_t mylod = lod; mylod < nlods; ++mylod) {
const std::int64_t bix = LookupTable::getBrickLookupIndex
(ii, jj, kk, mylod, ih.lodsizes(), ih.brickoffsets());
......@@ -966,7 +966,7 @@ ZgyInternalBulk::trackedBricksShowDirty(int loglevel) const
++num_total;
std::int64_t index = InternalZGY::LookupTable::getBrickLookupIndex
(ii, jj, kk, lod, ih.lodsizes(), ih.brickoffsets());
if (index >= _modified_bricks.size()) {
if (index >= (std::int64_t)_modified_bricks.size()) {
flag = '?'; // Actually a fatal error
break;
}
......@@ -1107,7 +1107,7 @@ ZgyInternalBulk::_validateUserPosition(
const std::array<std::int64_t,3> one{1,1,1} ;
const std::array<std::int64_t,3> bs = this->_metadata->ih().bricksize();
const std::array<std::int64_t,3> end = ((start + size + bs - one) / bs) * bs;
const std::int64_t nlods = static_cast<std::int64_t>(ih.lodsizes().size());
const std::int32_t nlods = static_cast<std::int32_t>(ih.lodsizes().size());
const bool open_for_write = this->_metadata_rw != nullptr;
// TODO-Low: Performance: Cache usable_nlods.
const std::int32_t usable_nlods = open_for_write ? nlods :
......
......@@ -268,9 +268,6 @@ public:
std::shared_ptr<DataBuffer> scaleToStorage(const std::array<double,2>&, RawDataType) override;
std::shared_ptr<DataBuffer> slice1(int dim, std::int64_t start, std::int64_t size) const override;
std::shared_ptr<self_type> slice(const ndsize_t& neworig,
const ndsize_t& newsize) const;
void copyFrom(const DataBuffer* src,
const std::int64_t *srcorig,const std::int64_t *dstorig,
const std::int64_t *cpyorig,const std::int64_t *cpysize);
......
......@@ -131,7 +131,7 @@ typedef std::vector<ReadList> ReadDoubleList;
* Thread safety: Interfaces and classes that only contain static
* methods do not have race conditions.
*/
class FileADT
class OPENZGY_TEST_API FileADT
{
public:
virtual ~FileADT();
......
......@@ -117,8 +117,8 @@ FileParallelizer::xx_readv(
// Deliver the buffers that we cached to the caller.
const std::int64_t threadcount = std::min(std::min(requestcount, (std::int64_t)omp_get_max_threads()), _cputhreads);
MTGuard guard("paralellizer", threadcount);
#pragma omp parallel for num_threads(threadcount)
MTGuard guard("paralellizer", (int)threadcount);
#pragma omp parallel for num_threads((int)threadcount)
for (std::int64_t ii = 0; ii < requestcount; ++ii) {
guard.run([&](){
//std::cerr << "0123456789"[omp_get_thread_num() % 10];
......
......@@ -1247,9 +1247,9 @@ SeismicStoreFile::xx_readv(const ReadList& requests, bool parallel_ok, bool immu
static_cast<std::int64_t>(omp_get_max_threads())),
_config->_iothreads),
static_cast<std::int64_t>(1));
MTGuard guard("cloud-read", threadcount);
MTGuard guard("cloud-read", (int)threadcount);
//std::cerr << "Access seismic store (" << worksize << "): ";
#pragma omp parallel for num_threads(threadcount) schedule(dynamic,1)
#pragma omp parallel for num_threads((int)threadcount) schedule(dynamic,1)
for (std::int64_t ii=0; ii<worksize; ++ii) {
//if (!ii) std::cerr << ("[" + std::to_string(omp_get_num_threads()) + "]");
const auto& it = work[ii];
......@@ -1442,7 +1442,7 @@ SeismicStoreFile::do_write_many(
// an int. To make the cast safe.
this->_dataset->info()->checkOnWrite(blocknum, std::min(size, blobsize));
const int blobcount = (size + blobsize - 1) / blobsize;
const int blobcount = static_cast<int>((size + blobsize - 1) / blobsize);
MTGuard guard("cloud-write", blobcount);
#pragma omp parallel for num_threads(blobcount)
for (int ii = 0; ii < blobcount; ++ii) {
......
......@@ -126,7 +126,7 @@ LookupTable::calcLookupSize(
// Technically I could also check the end of the last block, but
// that only works for uncompressed and only if maxsize is known.
//minimum_file_eof = entries.back().offset;
if (eof != 0 && !entries.empty() && entries.back().offset >= eof)
if (eof != 0 && !entries.empty() && entries.back().offset >= (std::uint64_t)eof)
if (return_file_truncated != nullptr)
*return_file_truncated = true;
......@@ -481,7 +481,7 @@ LookupTable::usableBrickLOD(
(0, 0, 0, lodsizes.size()-1,
lodsizes, brickoffsets, blup, bend,
0).status == BrickStatus::Missing ? 1 :
lodsizes.size();
(std::int32_t)lodsizes.size();
}
/**
......
......@@ -2250,8 +2250,11 @@ ZgyInternalMeta::initFromReopen(const ZgyInternalWriterArgs& args_in, bool compr
std::shared_ptr<HistHeaderV2Access> hh(new HistHeaderV2Access());
memset(&hh->_pod, 0, sizeof(hh->_pod));
if (this->_hh->minvalue() <= this->_hh->maxvalue()) {
hh->_pod._min = this->_hh->minvalue();
hh->_pod._max = this->_hh->maxvalue();
// TODO-Worry: These casts to hide compiler warnings MUST be
// removed once the file format is changed to use wider (double)
// types. Otherwise the added accuracy won't help.
hh->_pod._min = static_cast<float>(this->_hh->minvalue());
hh->_pod._max = static_cast<float>(this->_hh->maxvalue());
this->_hh = hh;
}
pod._scnt = 0;
......
......@@ -298,6 +298,7 @@ test_readmeta_v1_r()
reader->close();
}
#if 0 // The required data file is not checked in yet.
static void
test_readcmeta()
{
......@@ -308,6 +309,7 @@ test_readcmeta()
reader->filestats()->dump(std::cout, "filestats: ");
}
}
#endif
static void
test_readconst()
......@@ -1616,7 +1618,7 @@ test_histo_onevalue(SampleDataType dtype, float value, bool fill, const std::arr
static onevalue_t
test_histo_onevalue(SampleDataType dtype, float value, bool fill)
{
float center = std::isfinite(value) ? value : -0.25;
float center = std::isfinite(value) ? value : -0.25f;
return test_histo_onevalue(dtype, value, fill,
std::array<float,2>{center-1, center+1});
}
......@@ -2256,7 +2258,7 @@ test_decimate_edge()
std::vector<std::int8_t> check(lod1size[0]*lod1size[1]*lod1size[2], -1);
std::shared_ptr<OpenZGY::IZgyReader> reader = IZgyReader::open(lad.name());
reader->read(size3i_t{0,0,0}, lod1size, check.data(), 1);
const auto offset = [](const size3i_t& size, int ii, int jj, int kk) {
const auto offset = [](const size3i_t& size, std::int64_t ii, std::int64_t jj, std::int64_t kk) {
return ii*size[1]*size[2]+jj*size[2]+kk;
};
// Each lod1 sample was computed as the simple average of one "90" sample
......@@ -2281,23 +2283,6 @@ do_test_readwrite(const std::string& filename, const IOContext *context = nullpt
{
typedef OpenZGY::IZgyWriter::size3i_t size3i_t;
// Local functions
static const auto inside = [](int ii, int jj, int kk,
const size3i_t& start, const size3i_t& size) {
return (ii >= start[0] && ii < start[0] + size[0] &&
jj >= start[1] && jj < start[1] + size[1] &&
kk >= start[2] && kk < start[2] + size[2]);
};
static const auto expect = [](int ii, int jj, int kk) {
return (inside(ii, jj, kk, size3i_t{96,96,96}, size3i_t{64,64,64}) ?
3000 :
inside(ii, jj, kk, size3i_t{64,64,64}, size3i_t{64,64,64}) ?
2000 :
inside(ii, jj, kk, size3i_t{0,0,0}, size3i_t{64,64,64}) ?
1000 :
0);
};
std::shared_ptr<OpenZGY::IZgyWriter> writer =
OpenZGY::IZgyWriter::open
(ZgyWriterArgs()
......
......@@ -97,7 +97,7 @@ test_databuffer_construct()
const size3i_t size{3, 5, 7};
const std::int64_t count = size[0]*size[1]*size[2];
std::shared_ptr<T> data(new T[count], std::default_delete<T[]>());
std::fill(data.get(), data.get() + count, 1);
std::fill(data.get(), data.get() + count, (T)1);
data.get()[42] = 42;
std::shared_ptr<DataBuffer> buffer =
DataBuffer::makeDataBuffer3d(data, count*sizeof(T), size, dtype);
......@@ -129,7 +129,7 @@ test_databuffer_range()
for (int ii=0; ii<usedsize[0]; ++ii)
for (int jj=0; jj<usedsize[1]; ++jj)
for (int kk=0; kk<usedsize[2]; ++kk)
buffer.data()[ii*size[1]*size[2] + jj*size[2] + kk] = ++value;
buffer.data()[ii*size[1]*size[2] + jj*size[2] + kk] = (float)++value;
auto r1 = buffer.range(nullptr); // entire buffer
auto r2 = buffer.range(usedsize.data()); // valid part
TEST_EQUAL(r1.first, -999.25);
......@@ -150,7 +150,7 @@ test_databuffer_scale()
RawDataType datatype = RawDataTypeTraits<T>::datatype;
{
// Constant value
auto buffer = std::make_shared<intbuffer_t>(7, size);
auto buffer = std::make_shared<intbuffer_t>((T)7, size);
auto scaled1 = buffer ? buffer->scaleToFloat(factors) : nullptr;
auto scaled2 = scaled1 ? scaled1->scaleToFloat(factors) : nullptr;
auto scaled = std::dynamic_pointer_cast<floatbuffer_t>(scaled1);
......@@ -253,7 +253,7 @@ test_databuffer_scale()
{
// Negative test: Convert an integral buffer to storage should throw.
// Convert float buffer to float is a no-op and should return nullptr.
auto buffer = std::make_shared<intbuffer_t>(42, size);
auto buffer = std::make_shared<intbuffer_t>((T)42, size);
TEST_CHECK(buffer != nullptr);
if (buffer != nullptr) {
if (limits::is_integer) {
......@@ -305,7 +305,7 @@ test_databuffer_copyfrom()
// which means it doesn't go to the start of the user's buffer and
// the user isn't going to make use of more than 46 samples vertically.
DataBufferNd<T,3> brick(std::array<std::int64_t,3>{64,64,64});
std::fill(brick.data(), brick.data()+brick.allocsize(), 99);
std::fill(brick.data(), brick.data()+brick.allocsize(), (T)99);
std::int64_t orig1[3]{0,0,64};
target.copyFrom(&brick, orig1, targetorigin.data(), nullptr, nullptr);
......
......@@ -315,13 +315,13 @@ do_test_reopen(const std::string& filename, TestTwiceFlags flags)
false);
const float expect_lod0first =
(flagset(TestTwiceFlags::step2_replace) ? -10 :
flagset(TestTwiceFlags::step2_rmw) ? -10 :
flagset(TestTwiceFlags::step1_write) ? 42 :
(flagset(TestTwiceFlags::step2_replace) ? -10.0f :
flagset(TestTwiceFlags::step2_rmw) ? -10.0f :
flagset(TestTwiceFlags::step1_write) ? 42.0f :
0);
const float expect_lod0second =
(flagset(TestTwiceFlags::step2_write) ? -10 :
(flagset(TestTwiceFlags::step2_write) ? -10.0f :
0);
// first lod1 sample is normally identical to lod0 due to using simple decimation
......@@ -392,19 +392,19 @@ do_test_reopen(const std::string& filename, TestTwiceFlags flags)
const std::int64_t expect_stat_cnt = expect_finalized ? 66*100*100 : 0;
const double expect_stat_sum = !expect_finalized ? 0 :
(expect_41 * 41) +
(expect_42 * 42) +
(expect_15 * -15) +
(expect_10 * -10);
(expect_41 * 41.0) +
(expect_42 * 42.0) +
(expect_15 * -15.0) +
(expect_10 * -10.0);
const double expect_stat_ssq = !expect_finalized ? 0 :
(expect_41 * 41 * 41) +
(expect_42 * 42 * 42) +
(expect_15 * -15 * -15) +
(expect_10 * -10 * -10);
(expect_41 * 41 * 41.0) +
(expect_42 * 42 * 42.0) +
(expect_15 * -15 * -15.0) +
(expect_10 * -10 * -10.0);
const double expect_stat_min = !expect_finalized ? 0 :
(expect_15 + expect_10 == 0 ? 0 : -15);
const double expect_stat_min = !expect_finalized ? 0.0 :
(expect_15 + expect_10 == 0 ? 0.0 : -15.0);
// Note the quirk with incremental finalize. If there were ever
// some 42's written that number remains in the statistical
......@@ -761,7 +761,7 @@ test_reopen_not_if_final()
.filename(lad.name())
.size(10, 100, 300)
.zfp_compressor(99));
const float fortytwo{42}, fifteen{15};
const float fortytwo{42};
writer->write(size3i_t{0,0,0}, size3i_t{1,1,1}, &fortytwo);
writer->close();
writer.reset();
......@@ -1751,8 +1751,8 @@ test_reopen_zgypublic()
std::cout << "Append, first brick all 1000\n";
std::shared_ptr<OpenZGY::IZgyWriter> writer =
OpenZGY::IZgyWriter::reopen(secondargs);
std::vector<float> data(64*64*64, 1000);
std::fill(data.data() + 64*64*64 - 64, data.data() + 64*64*64, 0);
std::vector<float> data(64*64*64, 1000.0f);
std::fill(data.data() + 64*64*64 - 64, data.data() + 64*64*64, 0.0f);
writer->write(size3i_t{0,0,0}, size3i_t{64,64,64}, data.data());
writer->close();
}
......@@ -1784,16 +1784,16 @@ test_reopen_zgypublic()
jj >= start[1] && jj < start[1] + size[1] &&
kk >= start[2] && kk < start[2] + size[2]);
};
static const auto expect = [](int ii, int jj, int kk) {
static const auto expect = [](int ii, int jj, int kk) -> float {
return (inside(ii, jj, kk, size3i_t{96,96,96}, size3i_t{65,65,65}) ?
3000 :
3000.0f :
inside(ii, jj, kk, size3i_t{64,64,64}, size3i_t{64,64,64}) ?
2000 :
2000.0f :
inside(ii, jj, kk, size3i_t{63,63,0}, size3i_t{1,1,64}) ?
0 :
0.0f :
inside(ii, jj, kk, size3i_t{0,0,0}, size3i_t{64,64,64}) ?
1000 :
0);
1000.0f :
0.0f);
};
{
......@@ -1808,7 +1808,7 @@ test_reopen_zgypublic()
for (int jj=0; jj<192; ++jj) {
for (int kk=0; kk<192; ++kk) {
float value_expect = expect(ii, jj, kk);
float value_actual = check[ii*192*192 + jj*192 + kk];
float value_actual = check[(std::int64_t)ii*192*192 + (std::int64_t)jj*192 + kk];
if (std::abs(value_actual - value_expect) > 0.001) {
if (!TEST_EQUAL_FLOAT(value_actual, value_expect, 0.001)) {
std::cout << "FAIL at (" << ii << "," << jj << "," << kk << ")\n";
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment