Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Warning removal. #92

Open
wants to merge 1 commit into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
24 changes: 15 additions & 9 deletions src/CachedReader.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -22,14 +22,18 @@ int32_t CachedReader::read(void* buf, int32_t count, uint64_t offset)
std::cout << "CachedReader::read(): offset=" << offset << ", count=" << count << std::endl;
#endif

if (count+offset > length())
count = length() - offset;
if ( count < 0 )
return -1;
if (offset > length())
return 0;
if (count > length() - offset) // here, offset is >= length() => length()-offset >= 0. (Do not test like ' if (count+offset > length()) ', risk of overflow)
count = int32_t(length() - offset); // here, length()-offset >= 0 AND < count => length()-offset < INT32_MAX, cast is safe

while (done < count)
{
int32_t thistime = std::min<int32_t>(count - done, CacheZone::BLOCK_SIZE);
uint64_t blockNumber = (offset+done) / CacheZone::BLOCK_SIZE;
uint64_t blockOffset = 0;
int32_t blockOffset = 0;
size_t fromCache;

if (done == 0) // this may also happen when cache doesn't contain a full block, but not on a R/O filesystem
Expand Down Expand Up @@ -84,6 +88,8 @@ int32_t CachedReader::read(void* buf, int32_t count, uint64_t offset)

void CachedReader::nonCachedRead(void* buf, int32_t count, uint64_t offset)
{
// nonCachedRead is private. Checks on count, offfset and length() are already made.

uint64_t blockStart, blockEnd;
std::unique_ptr<uint8_t[]> optimalBlockBuffer;
uint32_t optimalBlockBufferSize = 0;
Expand All @@ -105,15 +111,15 @@ void CachedReader::nonCachedRead(void* buf, int32_t count, uint64_t offset)
if (blockEnd - blockStart > std::numeric_limits<int32_t>::max())
throw std::logic_error("Range returned by adviseOptimalBlock() is too large");

thistime = blockEnd-blockStart;
thistime = int32_t(blockEnd-blockStart); // safe cast, because of the 2 checks above.
if (thistime > optimalBlockBufferSize)
{
optimalBlockBufferSize = thistime;
optimalBlockBuffer.reset(new uint8_t[optimalBlockBufferSize]);
}

#ifdef DEBUG
std::cout << "Reading from backing reader: offset=" << blockStart << ", count=" << thistime << std::endl;
// std::cout << "Reading from backing reader: offset=" << blockStart << ", count=" << thistime << std::endl;
#endif
rd = m_reader->read(optimalBlockBuffer.get(), thistime, blockStart);

Expand All @@ -137,12 +143,12 @@ void CachedReader::nonCachedRead(void* buf, int32_t count, uint64_t offset)
uint32_t toCopy;

if (readPos > blockStart)
optimalOffset = readPos - blockStart;
outputOffset = readPos - offset;
toCopy = std::min<uint32_t>(offset+count - readPos, thistime - optimalOffset);
optimalOffset = uint32_t(readPos - blockStart); // safe cast, readPos is > blockStart
outputOffset = uint32_t(readPos - offset); // safe cast, readPos is > offset
toCopy = std::min<uint32_t>(uint32_t(offset+count - readPos), thistime - optimalOffset);

#ifdef DEBUG
std::cout << "Copying " << toCopy << " bytes into output buffer at offset " << outputOffset << " from internal offset " << optimalOffset << std::endl;
// std::cout << "Copying " << toCopy << " bytes into output buffer at offset " << outputOffset << " from internal offset " << optimalOffset << std::endl;
#endif
// if (toCopy+optimalOffset > thistime)
// throw std::logic_error("Internal error");
Expand Down
23 changes: 9 additions & 14 deletions src/DMGDecompressor.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -21,8 +21,8 @@ class DMGDecompressor_Zlib : public DMGDecompressor
~DMGDecompressor_Zlib();
virtual int32_t decompress(void* output, int32_t count, int64_t offset) override;
private:
virtual int32_t decompress(void* output, int32_t count);
z_stream m_strm;
virtual int32_t decompress(void* output, typeof(m_strm.avail_out) count);
};

class DMGDecompressor_Bzip2 : public DMGDecompressor
Expand All @@ -32,8 +32,8 @@ class DMGDecompressor_Bzip2 : public DMGDecompressor
~DMGDecompressor_Bzip2();
virtual int32_t decompress(void* output, int32_t count, int64_t offset) override;
private:
virtual int32_t decompress(void* output, int32_t count);
bz_stream m_strm;
virtual int32_t decompress(void* output, typeof(m_strm.avail_out) count);
};

class DMGDecompressor_ADC : public DMGDecompressor
Expand Down Expand Up @@ -109,7 +109,7 @@ DMGDecompressor_Zlib::~DMGDecompressor_Zlib()
inflateEnd(&m_strm);
}

int32_t DMGDecompressor_Zlib::decompress(void* output, int32_t count)
int32_t DMGDecompressor_Zlib::decompress(void* output, typeof(m_strm.avail_out) count)
{
int status;
char* input;
Expand Down Expand Up @@ -147,22 +147,20 @@ int32_t DMGDecompressor_Zlib::decompress(void* output, int32_t count)

int32_t DMGDecompressor_Zlib::decompress(void* output, int32_t count, int64_t offset)
{
int32_t done = 0;

#ifdef DEBUG
std::cout << "zlib: Asked to provide " << count << " bytes\n";
#endif

while (offset > 0)
{
char waste[4096];
int32_t to_read = std::min(int64_t(sizeof(waste)), offset);
char waste[4096]; // sizeof(waste) has to be < typeof(m_strm.avail_out) MAX
int32_t to_read = (typeof(m_strm.avail_out))std::min<uint64_t>(sizeof(waste), offset); // safe cast if sizeof(waste) < typeof(m_strm.avail_out) MAX
int32_t bytesDecompressed = decompress(waste, to_read);
if (bytesDecompressed <= 0)
return bytesDecompressed;
offset -= bytesDecompressed;
}
int32_t bytesDecompressed = decompress((uint8_t*)output+done, count);
int32_t bytesDecompressed = decompress((uint8_t*)output, count);
return bytesDecompressed;
}

Expand All @@ -179,7 +177,7 @@ DMGDecompressor_Bzip2::~DMGDecompressor_Bzip2()
BZ2_bzDecompressEnd(&m_strm);
}

int32_t DMGDecompressor_Bzip2::decompress(void* output, int32_t count)
int32_t DMGDecompressor_Bzip2::decompress(void* output, typeof(m_strm.avail_out) count)
{
int status;
char* input;
Expand Down Expand Up @@ -217,16 +215,14 @@ int32_t DMGDecompressor_Bzip2::decompress(void* output, int32_t count)

int32_t DMGDecompressor_Bzip2::decompress(void* output, int32_t count, int64_t offset)
{
int32_t done = 0;

#ifdef DEBUG
//std::cout << "bz2: Asked to provide " << outputBytes << " bytes\n";
#endif

while (offset > 0)
{
char waste[4096];
int32_t to_read = std::min(int64_t(sizeof(waste)), offset);
char waste[4096]; // sizeof(waste) has to be < typeof(m_strm.avail_out) MAX
int32_t to_read = (typeof(m_strm.avail_out))std::min<uint64_t>(sizeof(waste), offset); // safe cast if sizeof(waste) < typeof(m_strm.avail_out) MAX
int32_t bytesDecompressed = decompress(waste, to_read);
if (bytesDecompressed <= 0)
return bytesDecompressed;
Expand Down Expand Up @@ -290,7 +286,6 @@ int32_t DMGDecompressor_LZFSE::decompress(void* output, int32_t outputBytes)
{
// DMGDecompressor can only read by 8k while compressed length of a LZFSE block can be much bigger

int32_t done = 0;
char* input = nullptr;
char *inputBig = nullptr;

Expand Down
15 changes: 9 additions & 6 deletions src/DMGDisk.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@
#include <openssl/evp.h>
#include <memory>
#include <sstream>
#include <limits>
#include "DMGPartition.h"
#include "AppleDisk.h"
#include "GPTDisk.h"
Expand Down Expand Up @@ -57,11 +58,12 @@ void DMGDisk::loadKoly(const UDIFResourceFile& koly)

offset = be(koly.fUDIFXMLOffset);
length = be(koly.fUDIFXMLLength);
assert(length <= std::numeric_limits<int32_t>::max());

xmlData.reset(new char[length]);
m_reader->read(xmlData.get(), length, offset);
m_reader->read(xmlData.get(), (int32_t)length, offset); // safe cast because of assert.

m_kolyXML = xmlParseMemory(xmlData.get(), length);
m_kolyXML = xmlParseMemory(xmlData.get(), (int32_t)length); // safe cast because of assert.

//#if 0 // Asian copies of OS X put crap UTF characters into XML data making type/name parsing unreliable
xpathContext = xmlXPathNewContext(m_kolyXML);
Expand Down Expand Up @@ -203,16 +205,18 @@ BLKXTable* DMGDisk::loadBLKXTableForPartition(int index)

bool DMGDisk::base64Decode(const std::string& input, std::vector<uint8_t>& output)
{
assert(input.length() <= std::numeric_limits<int>::max());

BIO *b64, *bmem;
std::unique_ptr<char[]> buffer(new char[input.length()]);
int rd;

b64 = BIO_new(BIO_f_base64());
bmem = BIO_new_mem_buf((void*) input.c_str(), input.length());
bmem = BIO_new_mem_buf((void*) input.c_str(), (int)input.length()); // safe cast because of assert.
bmem = BIO_push(b64, bmem);
//BIO_set_flags(bmem, BIO_FLAGS_BASE64_NO_NL);

rd = BIO_read(bmem, buffer.get(), input.length());
rd = BIO_read(bmem, buffer.get(), (int)input.length()); // safe cast because of assert.

if (rd > 0)
output.assign(buffer.get(), buffer.get()+rd);
Expand All @@ -233,8 +237,7 @@ std::shared_ptr<Reader> DMGDisk::readerForPartition(int index)
if (be(table->firstSectorNumber)*512 == m_partitions[index].offset)
{
std::stringstream partName;
uint64_t l = m_reader->length();
uint32_t data_offset = be(m_udif.fUDIFDataForkOffset);
uint64_t data_offset = be(m_udif.fUDIFDataForkOffset);

partName << "part-" << index;

Expand Down
28 changes: 19 additions & 9 deletions src/DMGPartition.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@
#include <algorithm>
//#include <cstdio>
#include <iostream>
#include <limits>
#include "SubReader.h"
#include "exceptions.h"

Expand All @@ -20,12 +21,14 @@ DMGPartition::DMGPartition(std::shared_ptr<Reader> disk, BLKXTable* table)
RunType type = RunType(be(m_table->runs[i].type));
if (type == RunType::Comment || type == RunType::Terminator)
continue;

#ifdef DEBUG
assert(be(m_table->runs[i].sectorCount) <= std::numeric_limits<uint64_t>::max() / SECTOR_SIZE);
#endif
m_sectors[be(m_table->runs[i].sectorStart)] = i;

#ifdef DEBUG
std::cout << "Sector " << i << " has type 0x" << std::hex << uint32_t(type) << std::dec << ", starts at byte "
<< be(m_table->runs[i].sectorStart)*512l << ", compressed length: "
<< be(m_table->runs[i].sectorStart)*SECTOR_SIZE << ", compressed length: "
<< be(m_table->runs[i].compLength) << ", compressed offset: " << be(m_table->runs[i].compOffset) + be(m_table->dataStart) << std::endl;
#endif
}
Expand Down Expand Up @@ -61,6 +64,14 @@ void DMGPartition::adviseOptimalBlock(uint64_t offset, uint64_t& blockStart, uin

int32_t DMGPartition::read(void* buf, int32_t count, uint64_t offset)
{

if ( count < 0 )
return -1;
if (offset > length())
return 0;
if (count > length() - offset) // here, offset is >= length() => length()-offset >= 0. (Do not test like ' if (count+offset > length()) ', risk of overflow)
count = int32_t(length() - offset); // here, length()-offset >= 0 AND < count => length()-offset < INT32_MAX, cast is safe

int32_t done = 0;

while (done < count)
Expand Down Expand Up @@ -95,10 +106,15 @@ int32_t DMGPartition::read(void* buf, int32_t count, uint64_t offset)

int32_t DMGPartition::readRun(void* buf, int32_t runIndex, uint64_t offsetInSector, int32_t count)
{
// readRun is private. Assuming count is > 0
BLKXRun* run = &m_table->runs[runIndex];
RunType runType = RunType(be(run->type));

count = std::min<uint64_t>(count, uint64_t(be(run->sectorCount))*512 - offsetInSector);
uint64_t compLength = be(run->sectorCount)*SECTOR_SIZE; // no overflow because assert in ctor
if ( offsetInSector > compLength )
return 0;

count = (int32_t)std::min<uint64_t>(count, compLength - offsetInSector); // safe cast, result of min is < count.

#ifdef DEBUG
std::cout << "readRun(): runIndex = " << runIndex << ", offsetInSector = " << offsetInSector << ", count = " << count << std::endl;
Expand Down Expand Up @@ -131,12 +147,6 @@ int32_t DMGPartition::readRun(void* buf, int32_t runIndex, uint64_t offsetInSect
if (!decompressor)
throw std::logic_error("DMGDecompressor::create() returned nullptr!");

unsigned long long int compLength = be(run->sectorCount)*512;
if ( offsetInSector > compLength )
return 0;
if ( offsetInSector + count > compLength )
count = compLength - offsetInSector;

int32_t dec = decompressor->decompress((uint8_t*)buf, count, offsetInSector);
if (dec < count)
throw io_error("Error decompressing stream");
Expand Down
2 changes: 1 addition & 1 deletion src/FileReader.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ int32_t FileReader::read(void* buf, int32_t count, uint64_t offset)

static_assert(sizeof(off_t) == 8, "off_t is too small");

return ::pread(m_fd, buf, count, offset);
return (int32_t)::pread(m_fd, buf, count, offset); // safe cast, pread returned value is < count
}

uint64_t FileReader::length()
Expand Down
1 change: 0 additions & 1 deletion src/GPTDisk.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,6 @@ bool GPTDisk::isGPTDisk(std::shared_ptr<Reader> reader)
std::string GPTDisk::makeGUID(const GPT_GUID& guid)
{
std::stringstream ss;
int pos = 0;

ss << std::hex << std::uppercase;
ss << std::setw(8) << std::setfill('0') << guid.data1;
Expand Down
2 changes: 1 addition & 1 deletion src/HFSCatalogBTree.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
#include <sstream>
#include <cstring>
using icu::UnicodeString;
static const int MAX_SYMLINKS = 50;
//static const int MAX_SYMLINKS = 50;

extern UConverter *g_utf16be;

Expand Down
10 changes: 6 additions & 4 deletions src/HFSFork.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -43,16 +43,18 @@ void HFSFork::loadFromOverflowsFile(uint32_t blocksSoFar)
int32_t HFSFork::read(void* buf, int32_t count, uint64_t offset)
{
const auto blockSize = be(m_volume->m_header.blockSize);
const uint32_t firstBlock = offset / blockSize;
const uint64_t firstBlock = offset / blockSize;
uint32_t blocksSoFar;
int firstExtent, extent;
uint32_t read = 0;
uint64_t offsetInExtent;

if ( count < 0 )
return -1;
if (offset > be(m_fork.logicalSize))
count = 0;
else if (offset+count > be(m_fork.logicalSize))
count = be(m_fork.logicalSize) - offset;
else if (count > be(m_fork.logicalSize) - offset) // here, offset is >= be(m_fork.logicalSize) => be(m_fork.logicalSize)-offset >= 0. (Do not test like ' if (offset+count > be(m_fork.logicalSize)) ', offset+count could overflow)
count = int32_t(be(m_fork.logicalSize) - offset);// here, be(m_fork.logicalSize)-offset >= 0 AND < count => be(m_fork.logicalSize)-offset < INT32_MAX, cast is safe

if (!count)
return 0;
Expand Down Expand Up @@ -95,7 +97,7 @@ int32_t HFSFork::read(void* buf, int32_t count, uint64_t offset)
if (extent >= m_extents.size())
loadFromOverflowsFile(blocksSoFar);

thistime = std::min<int64_t>(m_extents[extent].blockCount * uint64_t(blockSize) - offsetInExtent, count-read);
thistime = (int32_t)std::min<int64_t>(m_extents[extent].blockCount * uint64_t(blockSize) - offsetInExtent, count-read); // safe cast, result of min is < count - read, which is < count.

if (thistime == 0)
throw std::logic_error("Internal error: thistime == 0");
Expand Down
8 changes: 5 additions & 3 deletions src/HFSHighLevelVolume.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@
#include "exceptions.h"
#include "decmpfs.h"
#include <assert.h>
#include <limits>

static const char* RESOURCE_FORK_SUFFIX = "#..namedfork#rsrc";
static const char* XATTR_RESOURCE_FORK = "com.apple.ResourceFork";
Expand Down Expand Up @@ -267,11 +268,12 @@ std::vector<uint8_t> HFSHighLevelVolume::getXattr(const std::string& path, const

if (file->length() == 0)
throw attribute_not_found_error();
if (file->length() > std::numeric_limits<int>::max())
throw io_error("Attribute too big");

rv = std::min<int>(std::numeric_limits<int>::max(), file->length());
output.resize(rv);
output.resize(file->length());

file->read(&output[0], rv, 0);
file->read(&output[0], int32_t(file->length()), 0);
}
else if (name == XATTR_FINDER_INFO)
{
Expand Down
Loading