squid/SOURCES/0007-Adapt-to-older-gcc-cleanup.patch

764 lines
30 KiB
Diff
Raw Normal View History

From 37de4ce82f7f8906606d0625774d856ffd3a9453 Mon Sep 17 00:00:00 2001
From: Alex Burmashev <alexander.burmashev@oracle.com>
Date: Thu, 7 Dec 2023 20:51:39 +0000
Subject: [PATCH] Adapt to older gcc, cleanup
Fix code that is not applicable to older codebase of squidv4.
On top do some work to adapt code to older gcc.
most of that is std::optional to std::pair conversion
Signed-off-by: Alex Burmashev <alexander.burmashev@oracle.com>
---
src/HttpReply.cc | 4 +-
src/MemObject.h | 3 ++
src/MemStore.cc | 6 +--
src/SquidMath.h | 27 ++++++------
src/Store.h | 3 ++
src/StoreClient.h | 2 +-
src/acl/Asn.cc | 14 +------
src/base/Assure.cc | 8 ++++
src/client_side_reply.cc | 64 ++++++++++++-----------------
src/peer_digest.cc | 1 +
src/store/ParsingBuffer.cc | 47 ++++++++++-----------
src/store/ParsingBuffer.h | 2 +-
src/store_client.cc | 84 +++++++++++++++++---------------------
src/urn.cc | 2 +-
14 files changed, 123 insertions(+), 144 deletions(-)
diff --git a/src/HttpReply.cc b/src/HttpReply.cc
index df5bcef..21c62c2 100644
--- a/src/HttpReply.cc
+++ b/src/HttpReply.cc
@@ -534,13 +534,13 @@ HttpReply::parseTerminatedPrefix(const char * const terminatedBuf, const size_t
const bool eof = false; // TODO: Remove after removing atEnd from HttpHeader::parse()
if (parse(terminatedBuf, bufSize, eof, &error)) {
debugs(58, 7, "success after accumulating " << bufSize << " bytes and parsing " << hdr_sz);
- Assure(pstate == Http::Message::psParsed);
+ Assure(pstate == psParsed);
Assure(hdr_sz > 0);
Assure(!Less(bufSize, hdr_sz)); // cannot parse more bytes than we have
return hdr_sz; // success
}
- Assure(pstate != Http::Message::psParsed);
+ Assure(pstate != psParsed);
hdr_sz = 0;
if (error) {
diff --git a/src/MemObject.h b/src/MemObject.h
index ba6646f..5a7590a 100644
--- a/src/MemObject.h
+++ b/src/MemObject.h
@@ -56,6 +56,9 @@ public:
void write(const StoreIOBuffer &buf);
void unlinkRequest();
+
+ HttpReply &baseReply() const { return *_reply; }
+
HttpReply const *getReply() const;
void replaceHttpReply(HttpReply *newrep);
void stat (MemBuf * mb) const;
diff --git a/src/MemStore.cc b/src/MemStore.cc
index fe7af2f..6762c4f 100644
--- a/src/MemStore.cc
+++ b/src/MemStore.cc
@@ -511,8 +511,8 @@ MemStore::copyFromShm(StoreEntry &e, const sfileno index, const Ipc::StoreMapAnc
" from " << extra.page << '+' << prefixSize);
// parse headers if needed; they might span multiple slices!
- auto &reply = e.mem().adjustableBaseReply();
- if (reply.pstate != Http::Message::psParsed) {
+ auto &reply = e.mem().baseReply();
+ if (reply.pstate != psParsed) {
httpHeaderParsingBuffer.append(sliceBuf.data, sliceBuf.length);
if (reply.parseTerminatedPrefix(httpHeaderParsingBuffer.c_str(), httpHeaderParsingBuffer.length()))
httpHeaderParsingBuffer = SBuf(); // we do not need these bytes anymore
@@ -542,7 +542,7 @@ MemStore::copyFromShm(StoreEntry &e, const sfileno index, const Ipc::StoreMapAnc
debugs(20, 5, "mem-loaded all " << e.mem_obj->endOffset() << '/' <<
anchor.basics.swap_file_sz << " bytes of " << e);
- if (e.mem().adjustableBaseReply().pstate != Http::Message::psParsed)
+ if (e.mem().baseReply().pstate != psParsed)
throw TextException(ToSBuf("truncated mem-cached headers; accumulated: ", httpHeaderParsingBuffer.length()), Here());
// from StoreEntry::complete()
diff --git a/src/SquidMath.h b/src/SquidMath.h
index e5b6e58..538833b 100644
--- a/src/SquidMath.h
+++ b/src/SquidMath.h
@@ -8,8 +8,6 @@
#ifndef _SQUID_SRC_SQUIDMATH_H
#define _SQUID_SRC_SQUIDMATH_H
-#include "base/forward.h"
-#include "base/TypeTraits.h"
#include <limits>
#include <optional>
@@ -68,7 +66,7 @@ AssertNaturalType()
/// \returns a non-overflowing sum of the two unsigned arguments (or nothing)
/// \prec both argument types are unsigned
template <typename S, typename A, typename B, std::enable_if_t<AllUnsigned<A,B>::value, int> = 0>
-std::optional<S>
+std::pair<S, bool>
IncreaseSumInternal(const A a, const B b) {
// paranoid: AllUnsigned<A,B> precondition established that already
static_assert(std::is_unsigned<A>::value, "AllUnsigned dispatch worked for A");
@@ -96,7 +94,7 @@ IncreaseSumInternal(const A a, const B b) {
// 2. the sum may overflow S (i.e. the return base type)
// We do not need Less() here because we compare promoted unsigned types.
return (sum >= a && sum <= std::numeric_limits<S>::max()) ?
- std::optional<S>(sum) : std::optional<S>();
+ std::make_pair(sum, true) : std::make_pair(S(), false);
}
/// This IncreaseSumInternal() overload supports a larger variety of types.
@@ -104,7 +102,7 @@ IncreaseSumInternal(const A a, const B b) {
/// \returns nothing if at least one of the arguments is negative
/// \prec at least one of the argument types is signed
template <typename S, typename A, typename B, std::enable_if_t<!AllUnsigned<A,B>::value, int> = 0>
-std::optional<S> constexpr
+std::pair<S, bool>
IncreaseSumInternal(const A a, const B b) {
AssertNaturalType<S>();
AssertNaturalType<A>();
@@ -118,7 +116,7 @@ IncreaseSumInternal(const A a, const B b) {
// We could support a non-under/overflowing sum of negative numbers, but
// our callers use negative values specially (e.g., for do-not-use or
// do-not-limit settings) and are not supposed to do math with them.
- (a < 0 || b < 0) ? std::optional<S>() :
+ (a < 0 || b < 0) ? std::make_pair(S(), false) :
// To avoid undefined behavior of signed overflow, we must not compute
// the raw a+b sum if it may overflow. When A is not B, a or b undergoes
// (safe for non-negatives) integer conversion in these expressions, so
@@ -130,13 +128,13 @@ IncreaseSumInternal(const A a, const B b) {
// which is the same as the overflow-safe condition here: maxS - a < b.
// Finally, (maxS - a) cannot overflow because a is not negative and
// cannot underflow because a is a promotion of s: 0 <= a <= maxS.
- Less(std::numeric_limits<S>::max() - a, b) ? std::optional<S>() :
- std::optional<S>(a + b);
+ Less(std::numeric_limits<S>::max() - a, b) ? std::make_pair(S(), false) :
+ std::make_pair(S(a + b), true);
}
/// argument pack expansion termination for IncreaseSum<S, T, Args...>()
template <typename S, typename T>
-std::optional<S>
+std::pair<S, bool>
IncreaseSum(const S s, const T t)
{
// Force (always safe) integer promotions now, to give std::enable_if_t<>
@@ -147,19 +145,20 @@ IncreaseSum(const S s, const T t)
/// \returns a non-overflowing sum of the arguments (or nothing)
template <typename S, typename T, typename... Args>
-std::optional<S>
+std::pair<S, bool>
IncreaseSum(const S sum, const T t, const Args... args) {
- if (const auto head = IncreaseSum(sum, t)) {
- return IncreaseSum(head.value(), args...);
+ const auto head = IncreaseSum(sum, t);
+ if (head.second) {
+ return IncreaseSum(head.first, args...);
} else {
// std::optional<S>() triggers bogus -Wmaybe-uninitialized warnings in GCC v10.3
- return std::nullopt;
+ return std::make_pair(S(), false);
}
}
/// \returns an exact, non-overflowing sum of the arguments (or nothing)
template <typename SummationType, typename... Args>
-std::optional<SummationType>
+std::pair<SummationType, bool>
NaturalSum(const Args... args) {
return IncreaseSum<SummationType>(0, args...);
}
diff --git a/src/Store.h b/src/Store.h
index 3eb6b84..2475fe0 100644
--- a/src/Store.h
+++ b/src/Store.h
@@ -49,6 +49,9 @@ public:
StoreEntry();
virtual ~StoreEntry();
+ MemObject &mem() { assert(mem_obj); return *mem_obj; }
+ const MemObject &mem() const { assert(mem_obj); return *mem_obj; }
+
virtual HttpReply const *getReply() const;
virtual void write (StoreIOBuffer);
diff --git a/src/StoreClient.h b/src/StoreClient.h
index 0524776..ba5e669 100644
--- a/src/StoreClient.h
+++ b/src/StoreClient.h
@@ -166,7 +166,7 @@ private:
/// request. Buffer contents depends on the source and parsing stage; it may
/// hold (parts of) swap metadata, HTTP response headers, and/or HTTP
/// response body bytes.
- std::optional<Store::ParsingBuffer> parsingBuffer;
+ std::pair<Store::ParsingBuffer, bool> parsingBuffer = std::make_pair(Store::ParsingBuffer(), false);
StoreIOBuffer lastDiskRead; ///< buffer used for the last storeRead() call
diff --git a/src/acl/Asn.cc b/src/acl/Asn.cc
index bcedc82..67e453f 100644
--- a/src/acl/Asn.cc
+++ b/src/acl/Asn.cc
@@ -73,7 +73,7 @@ class ASState
CBDATA_CLASS(ASState);
public:
- ASState();
+ ASState() = default;
~ASState();
StoreEntry *entry;
@@ -87,18 +87,6 @@ public:
CBDATA_CLASS_INIT(ASState);
-ASState::ASState() :
- entry(NULL),
- sc(NULL),
- request(NULL),
- as_number(0),
- offset(0),
- reqofs(0),
- dataRead(false)
-{
- memset(reqbuf, 0, AS_REQBUF_SZ);
-}
-
ASState::~ASState()
{
debugs(53, 3, entry->url());
diff --git a/src/base/Assure.cc b/src/base/Assure.cc
index b09b848..b4cf3e5 100644
--- a/src/base/Assure.cc
+++ b/src/base/Assure.cc
@@ -11,6 +11,14 @@
#include "base/TextException.h"
#include "sbuf/Stream.h"
+std::ostream &
+operator <<(std::ostream &os, const TextException &ex)
+{
+ ex.print(os);
+ return os;
+}
+
+
[[ noreturn ]] void
ReportAndThrow_(const int debugLevel, const char *description, const SourceLocation &location)
{
diff --git a/src/client_side_reply.cc b/src/client_side_reply.cc
index 470f4bc..64fd489 100644
--- a/src/client_side_reply.cc
+++ b/src/client_side_reply.cc
@@ -1142,8 +1142,8 @@ clientReplyContext::storeNotOKTransferDone() const
MemObject *mem = http->storeEntry()->mem_obj;
assert(mem != NULL);
assert(http->request != NULL);
-
- if (mem->baseReply().pstate != Http::Message::psParsed)
+ const auto expectedBodySize = mem->baseReply().content_length;
+ if (mem->baseReply().pstate != psParsed)
return 0;
/*
@@ -1808,32 +1808,6 @@ clientReplyContext::SendMoreData(void *data, StoreIOBuffer result)
context->sendMoreData (result);
}
-/// Whether the given body area describes the start of our Client Stream buffer.
-/// An empty area does.
-bool
-clientReplyContext::matchesStreamBodyBuffer(const StoreIOBuffer &their) const
-{
- // the answer is undefined for errors; they are not really "body buffers"
- Assure(!their.flags.error);
-
- if (!their.length)
- return true; // an empty body area always matches our body area
-
- if (their.data != next()->readBuffer.data) {
- debugs(88, 7, "no: " << their << " vs. " << next()->readBuffer);
- return false;
- }
-
- return true;
-}
-
-void
-clientReplyContext::noteStreamBufferredBytes(const StoreIOBuffer &result)
-{
- Assure(matchesStreamBodyBuffer(result));
- lastStreamBufferedBytes = result; // may be unchanged and/or zero-length
-}
-
void
clientReplyContext::makeThisHead()
{
@@ -2180,21 +2154,33 @@ clientReplyContext::sendMoreData (StoreIOBuffer result)
sc->setDelayId(DelayId::DelayClient(http,reply));
#endif
- /* handle headers */
+ holdingBuffer = result;
+ processReplyAccess();
+ return;
+}
- if (Config.onoff.log_mime_hdrs) {
- size_t k;
+/// Whether the given body area describes the start of our Client Stream buffer.
+/// An empty area does.
+bool
+clientReplyContext::matchesStreamBodyBuffer(const StoreIOBuffer &their) const
+{
+ // the answer is undefined for errors; they are not really "body buffers"
+ Assure(!their.flags.error);
+ if (!their.length)
+ return true; // an empty body area always matches our body area
+ if (their.data != next()->readBuffer.data) {
+ debugs(88, 7, "no: " << their << " vs. " << next()->readBuffer);
+ return false;
- if ((k = headersEnd(buf, reqofs))) {
- safe_free(http->al->headers.reply);
- http->al->headers.reply = (char *)xcalloc(k + 1, 1);
- xstrncpy(http->al->headers.reply, buf, k);
- }
}
+ return true;
+}
- holdingBuffer = result;
- processReplyAccess();
- return;
+void
+clientReplyContext::noteStreamBufferredBytes(const StoreIOBuffer &result)
+{
+ Assure(matchesStreamBodyBuffer(result));
+ lastStreamBufferedBytes = result; // may be unchanged and/or zero-length
}
/* Using this breaks the client layering just a little!
diff --git a/src/peer_digest.cc b/src/peer_digest.cc
index abfea4a..89ea73e 100644
--- a/src/peer_digest.cc
+++ b/src/peer_digest.cc
@@ -588,6 +588,7 @@ peerDigestFetchReply(void *data, char *buf, ssize_t size)
return 0; // we consumed/used no buffered bytes
}
+}
int
peerDigestSwapInCBlock(void *data, char *buf, ssize_t size)
diff --git a/src/store/ParsingBuffer.cc b/src/store/ParsingBuffer.cc
index e948fe2..affbe9e 100644
--- a/src/store/ParsingBuffer.cc
+++ b/src/store/ParsingBuffer.cc
@@ -28,19 +28,19 @@ Store::ParsingBuffer::ParsingBuffer(StoreIOBuffer &initialSpace):
const char *
Store::ParsingBuffer::memory() const
{
- return extraMemory_ ? extraMemory_->rawContent() : readerSuppliedMemory_.data;
+ return extraMemory_.second ? extraMemory_.first.rawContent() : readerSuppliedMemory_.data;
}
size_t
Store::ParsingBuffer::capacity() const
{
- return extraMemory_ ? (extraMemory_->length() + extraMemory_->spaceSize()) : readerSuppliedMemory_.length;
+ return extraMemory_.second ? (extraMemory_.first.length() + extraMemory_.first.spaceSize()) : readerSuppliedMemory_.length;
}
size_t
Store::ParsingBuffer::contentSize() const
{
- return extraMemory_ ? extraMemory_->length() : readerSuppliedMemoryContentSize_;
+ return extraMemory_.second ? extraMemory_.first.length() : readerSuppliedMemoryContentSize_;
}
void
@@ -56,10 +56,10 @@ Store::ParsingBuffer::appended(const char * const newBytes, const size_t newByte
assert(memory() + contentSize() == newBytes); // the new bytes start in our space
// and now we know that newBytes is not nil either
- if (extraMemory_)
- extraMemory_->rawAppendFinish(newBytes, newByteCount);
+ if (extraMemory_.second)
+ extraMemory_.first.rawAppendFinish(newBytes, newByteCount);
else
- readerSuppliedMemoryContentSize_ = *IncreaseSum(readerSuppliedMemoryContentSize_, newByteCount);
+ readerSuppliedMemoryContentSize_ = IncreaseSum(readerSuppliedMemoryContentSize_, newByteCount).first;
assert(contentSize() <= capacity()); // paranoid
}
@@ -68,8 +68,8 @@ void
Store::ParsingBuffer::consume(const size_t parsedBytes)
{
Assure(contentSize() >= parsedBytes); // more conservative than extraMemory_->consume()
- if (extraMemory_) {
- extraMemory_->consume(parsedBytes);
+ if (extraMemory_.second) {
+ extraMemory_.first.consume(parsedBytes);
} else {
readerSuppliedMemoryContentSize_ -= parsedBytes;
if (parsedBytes && readerSuppliedMemoryContentSize_)
@@ -81,8 +81,8 @@ StoreIOBuffer
Store::ParsingBuffer::space()
{
const auto size = spaceSize();
- const auto start = extraMemory_ ?
- extraMemory_->rawAppendStart(size) :
+ const auto start = extraMemory_.second ?
+ extraMemory_.first.rawAppendStart(size) :
(readerSuppliedMemory_.data + readerSuppliedMemoryContentSize_);
return StoreIOBuffer(spaceSize(), 0, start);
}
@@ -110,22 +110,23 @@ void
Store::ParsingBuffer::growSpace(const size_t minimumSpaceSize)
{
const auto capacityIncreaseAttempt = IncreaseSum(contentSize(), minimumSpaceSize);
- if (!capacityIncreaseAttempt)
+ if (!capacityIncreaseAttempt.second)
throw TextException(ToSBuf("no support for a single memory block of ", contentSize(), '+', minimumSpaceSize, " bytes"), Here());
- const auto newCapacity = *capacityIncreaseAttempt;
+ const auto newCapacity = capacityIncreaseAttempt.first;
if (newCapacity <= capacity())
return; // already have enough space; no reallocation is needed
debugs(90, 7, "growing to provide " << minimumSpaceSize << " in " << *this);
- if (extraMemory_) {
- extraMemory_->reserveCapacity(newCapacity);
+ if (extraMemory_.second) {
+ extraMemory_.first.reserveCapacity(newCapacity);
} else {
SBuf newStorage;
newStorage.reserveCapacity(newCapacity);
newStorage.append(readerSuppliedMemory_.data, readerSuppliedMemoryContentSize_);
- extraMemory_ = std::move(newStorage);
+ extraMemory_.first = std::move(newStorage);
+ extraMemory_.second = true;
}
Assure(spaceSize() >= minimumSpaceSize);
}
@@ -133,14 +134,14 @@ Store::ParsingBuffer::growSpace(const size_t minimumSpaceSize)
SBuf
Store::ParsingBuffer::toSBuf() const
{
- return extraMemory_ ? *extraMemory_ : SBuf(content().data, content().length);
+ return extraMemory_.second ? extraMemory_.first : SBuf(content().data, content().length);
}
size_t
Store::ParsingBuffer::spaceSize() const
{
- if (extraMemory_)
- return extraMemory_->spaceSize();
+ if (extraMemory_.second)
+ return extraMemory_.first.spaceSize();
assert(readerSuppliedMemoryContentSize_ <= readerSuppliedMemory_.length);
return readerSuppliedMemory_.length - readerSuppliedMemoryContentSize_;
@@ -169,12 +170,12 @@ Store::ParsingBuffer::packBack()
result.length = bytesToPack;
Assure(result.data);
- if (!extraMemory_) {
+ if (!extraMemory_.second) {
// no accumulated bytes copying because they are in readerSuppliedMemory_
debugs(90, 7, "quickly exporting " << result.length << " bytes via " << readerSuppliedMemory_);
} else {
- debugs(90, 7, "slowly exporting " << result.length << " bytes from " << extraMemory_->id << " back into " << readerSuppliedMemory_);
- memmove(result.data, extraMemory_->rawContent(), result.length);
+ debugs(90, 7, "slowly exporting " << result.length << " bytes from " << extraMemory_.first.id << " back into " << readerSuppliedMemory_);
+ memmove(result.data, extraMemory_.first.rawContent(), result.length);
}
return result;
@@ -185,9 +186,9 @@ Store::ParsingBuffer::print(std::ostream &os) const
{
os << "size=" << contentSize();
- if (extraMemory_) {
+ if (extraMemory_.second) {
os << " capacity=" << capacity();
- os << " extra=" << extraMemory_->id;
+ os << " extra=" << extraMemory_.first.id;
}
// report readerSuppliedMemory_ (if any) even if we are no longer using it
diff --git a/src/store/ParsingBuffer.h b/src/store/ParsingBuffer.h
index b8aa957..b473ac6 100644
--- a/src/store/ParsingBuffer.h
+++ b/src/store/ParsingBuffer.h
@@ -112,7 +112,7 @@ private:
/// our internal buffer that takes over readerSuppliedMemory_ when the
/// latter becomes full and more memory is needed
- std::optional<SBuf> extraMemory_;
+ std::pair<SBuf, bool> extraMemory_ = std::make_pair(SBuf(), false);
};
inline std::ostream &
diff --git a/src/store_client.cc b/src/store_client.cc
index 383aac8..0236274 100644
--- a/src/store_client.cc
+++ b/src/store_client.cc
@@ -10,6 +10,7 @@
#include "squid.h"
#include "base/AsyncCbdataCalls.h"
+#include "base/Assure.h"
#include "event.h"
#include "globals.h"
#include "HttpReply.h"
@@ -118,24 +119,14 @@ store_client::finishCallback()
// pointers. Some other legacy code expects "correct" result.offset even
// when there is no body to return. Accommodate all those expectations.
auto result = StoreIOBuffer(0, copyInto.offset, nullptr);
- if (object_ok && parsingBuffer && parsingBuffer->contentSize())
- result = parsingBuffer->packBack();
+ if (object_ok && parsingBuffer.second && parsingBuffer.first.contentSize())
+ result = parsingBuffer.first.packBack();
result.flags.error = object_ok ? 0 : 1;
- // TODO: Move object_ok handling above into this `if` statement.
- if (object_ok) {
- // works for zero hdr_sz cases as well; see also: nextHttpReadOffset()
- discardableHttpEnd_ = NaturalSum<int64_t>(entry->mem().baseReply().hdr_sz, result.offset, result.length).value();
- } else {
- // object_ok is sticky, so we will not be able to use any response bytes
- discardableHttpEnd_ = entry->mem().endOffset();
- }
- debugs(90, 7, "with " << result << "; discardableHttpEnd_=" << discardableHttpEnd_);
-
// no HTTP headers and no body bytes (but not because there was no space)
atEof_ = !sendingHttpHeaders() && !result.length && copyInto.length;
- parsingBuffer.reset();
+ parsingBuffer.second = false;
++answers;
STCB *temphandler = _callback.callback_handler;
@@ -228,7 +219,9 @@ store_client::copy(StoreEntry * anEntry,
// when we already can respond with HTTP headers.
Assure(!copyInto.offset || answeredOnce());
- parsingBuffer.emplace(copyInto);
+ parsingBuffer.first = Store::ParsingBuffer(copyInto);
+ parsingBuffer.second = true;
+
discardableHttpEnd_ = nextHttpReadOffset();
debugs(90, 7, "discardableHttpEnd_=" << discardableHttpEnd_);
@@ -454,14 +447,14 @@ store_client::canReadFromMemory() const
const auto &mem = entry->mem();
const auto memReadOffset = nextHttpReadOffset();
return mem.inmem_lo <= memReadOffset && memReadOffset < mem.endOffset() &&
- parsingBuffer->spaceSize();
+ parsingBuffer.first.spaceSize();
}
/// The offset of the next stored HTTP response byte wanted by the client.
int64_t
store_client::nextHttpReadOffset() const
{
- Assure(parsingBuffer);
+ Assure(parsingBuffer.second);
const auto &mem = entry->mem();
const auto hdr_sz = mem.baseReply().hdr_sz;
// Certain SMP cache manager transactions do not store HTTP headers in
@@ -469,7 +462,7 @@ store_client::nextHttpReadOffset() const
// In such cases, hdr_sz ought to be zero. In all other (known) cases,
// mem_hdr contains HTTP response headers (positive hdr_sz if parsed)
// followed by HTTP response body. This code math accommodates all cases.
- return NaturalSum<int64_t>(hdr_sz, copyInto.offset, parsingBuffer->contentSize()).value();
+ return NaturalSum<int64_t>(hdr_sz, copyInto.offset, parsingBuffer.first.contentSize()).first;
}
/// Copies at least some of the requested body bytes from MemObject memory,
@@ -478,13 +471,13 @@ store_client::nextHttpReadOffset() const
void
store_client::readFromMemory()
{
- Assure(parsingBuffer);
- const auto readInto = parsingBuffer->space().positionAt(nextHttpReadOffset());
+ Assure(parsingBuffer.second);
+ const auto readInto = parsingBuffer.first.space().positionAt(nextHttpReadOffset());
debugs(90, 3, "copying HTTP body bytes from memory into " << readInto);
const auto sz = entry->mem_obj->data_hdr.copy(readInto);
Assure(sz > 0); // our canReadFromMemory() precondition guarantees that
- parsingBuffer->appended(readInto.data, sz);
+ parsingBuffer.first.appended(readInto.data, sz);
}
void
@@ -497,7 +490,7 @@ store_client::fileRead()
flags.disk_io_pending = true;
// mem->swap_hdr_sz is zero here during initial read(s)
- const auto nextStoreReadOffset = NaturalSum<int64_t>(mem->swap_hdr_sz, nextHttpReadOffset()).value();
+ const auto nextStoreReadOffset = NaturalSum<int64_t>(mem->swap_hdr_sz, nextHttpReadOffset()).first;
// XXX: If fileRead() is called when we do not yet know mem->swap_hdr_sz,
// then we must start reading from disk offset zero to learn it: we cannot
@@ -522,10 +515,10 @@ store_client::fileRead()
// * performance effects of larger disk reads may be negative somewhere.
const decltype(StoreIOBuffer::length) maxReadSize = SM_PAGE_SIZE;
- Assure(parsingBuffer);
+ Assure(parsingBuffer.second);
// also, do not read more than we can return (via a copyInto.length buffer)
const auto readSize = std::min(copyInto.length, maxReadSize);
- lastDiskRead = parsingBuffer->makeSpace(readSize).positionAt(nextStoreReadOffset);
+ lastDiskRead = parsingBuffer.first.makeSpace(readSize).positionAt(nextStoreReadOffset);
debugs(90, 5, "into " << lastDiskRead);
storeRead(swapin_sio,
@@ -540,13 +533,12 @@ store_client::fileRead()
void
store_client::readBody(const char * const buf, const ssize_t lastIoResult)
{
- int parsed_header = 0;
Assure(flags.disk_io_pending);
flags.disk_io_pending = false;
assert(_callback.pending());
- Assure(parsingBuffer);
- debugs(90, 3, "got " << lastIoResult << " using " << *parsingBuffer);
+ Assure(parsingBuffer.second);
+ debugs(90, 3, "got " << lastIoResult << " using " << parsingBuffer.first);
if (lastIoResult < 0)
return fail();
@@ -560,7 +552,7 @@ store_client::readBody(const char * const buf, const ssize_t lastIoResult)
assert(lastDiskRead.data == buf);
lastDiskRead.length = lastIoResult;
- parsingBuffer->appended(buf, lastIoResult);
+ parsingBuffer.first.appended(buf, lastIoResult);
// we know swap_hdr_sz by now and were reading beyond swap metadata because
// readHead() would have been called otherwise (to read swap metadata)
@@ -589,13 +581,12 @@ store_client::handleBodyFromDisk()
if (!answeredOnce()) {
// All on-disk responses have HTTP headers. First disk body read(s)
// include HTTP headers that we must parse (if needed) and skip.
- const auto haveHttpHeaders = entry->mem_obj->baseReply().pstate == Http::Message::psParsed;
+ const auto haveHttpHeaders = entry->mem_obj->baseReply().pstate == psParsed;
if (!haveHttpHeaders && !parseHttpHeadersFromDisk())
return;
skipHttpHeadersFromDisk();
}
- const HttpReply *rep = entry->getReply();
noteNews();
}
@@ -626,8 +617,6 @@ store_client::maybeWriteFromDiskToMemory(const StoreIOBuffer &httpResponsePart)
}
}
-}
-
void
store_client::fail()
{
@@ -735,20 +724,20 @@ store_client::readHeader(char const *buf, ssize_t len)
if (!object_ok)
return;
- Assure(parsingBuffer);
- debugs(90, 3, "got " << len << " using " << *parsingBuffer);
+ Assure(parsingBuffer.second);
+ debugs(90, 3, "got " << len << " using " << parsingBuffer.first);
if (len < 0)
return fail();
- Assure(!parsingBuffer->contentSize());
- parsingBuffer->appended(buf, len);
+ Assure(!parsingBuffer.first.contentSize());
+ parsingBuffer.first.appended(buf, len);
if (!unpackHeader(buf, len)) {
fail();
return;
}
- parsingBuffer->consume(mem->swap_hdr_sz);
- maybeWriteFromDiskToMemory(parsingBuffer->content());
+ parsingBuffer.first.consume(mem->swap_hdr_sz);
+ maybeWriteFromDiskToMemory(parsingBuffer.first.content());
handleBodyFromDisk();
}
@@ -1020,8 +1009,9 @@ store_client::parseHttpHeadersFromDisk()
// cache a header that we cannot parse and get here. Same for MemStore.
debugs(90, DBG_CRITICAL, "ERROR: Cannot parse on-disk HTTP headers" <<
Debug::Extra << "exception: " << CurrentException <<
- Debug::Extra << "raw input size: " << parsingBuffer->contentSize() << " bytes" <<
- Debug::Extra << "current buffer capacity: " << parsingBuffer->capacity() << " bytes");
+ Debug::Extra << "raw input size: " << parsingBuffer.first.contentSize() << " bytes" <<
+ Debug::Extra << "current buffer capacity: " << parsingBuffer.first.capacity() << " bytes");
+
fail();
return false;
}
@@ -1032,10 +1022,10 @@ store_client::parseHttpHeadersFromDisk()
bool
store_client::tryParsingHttpHeaders()
{
- Assure(parsingBuffer);
+ Assure(parsingBuffer.second);
Assure(!copyInto.offset); // otherwise, parsingBuffer cannot have HTTP response headers
- auto &adjustableReply = entry->mem().adjustableBaseReply();
- if (adjustableReply.parseTerminatedPrefix(parsingBuffer->c_str(), parsingBuffer->contentSize()))
+ auto &adjustableReply = entry->mem().baseReply();
+ if (adjustableReply.parseTerminatedPrefix(parsingBuffer.first.c_str(), parsingBuffer.first.contentSize()))
return true;
// TODO: Optimize by checking memory as well. For simplicity sake, we
@@ -1052,12 +1042,12 @@ store_client::skipHttpHeadersFromDisk()
{
const auto hdr_sz = entry->mem_obj->baseReply().hdr_sz;
Assure(hdr_sz > 0); // all on-disk responses have HTTP headers
- if (Less(parsingBuffer->contentSize(), hdr_sz)) {
- debugs(90, 5, "discovered " << hdr_sz << "-byte HTTP headers in memory after reading some of them from disk: " << *parsingBuffer);
- parsingBuffer->consume(parsingBuffer->contentSize()); // skip loaded HTTP header prefix
+ if (Less(parsingBuffer.first.contentSize(), hdr_sz)) {
+ debugs(90, 5, "discovered " << hdr_sz << "-byte HTTP headers in memory after reading some of them from disk: " << parsingBuffer.first);
+ parsingBuffer.first.consume(parsingBuffer.first.contentSize()); // skip loaded HTTP header prefix
} else {
- parsingBuffer->consume(hdr_sz); // skip loaded HTTP headers
- const auto httpBodyBytesAfterHeader = parsingBuffer->contentSize(); // may be zero
+ parsingBuffer.first.consume(hdr_sz); // skip loaded HTTP headers
+ const auto httpBodyBytesAfterHeader = parsingBuffer.first.contentSize(); // may be zero
Assure(httpBodyBytesAfterHeader <= copyInto.length);
debugs(90, 5, "read HTTP body prefix: " << httpBodyBytesAfterHeader);
}
diff --git a/src/urn.cc b/src/urn.cc
index 9f5e89d..ad42b74 100644
--- a/src/urn.cc
+++ b/src/urn.cc
@@ -238,7 +238,7 @@ urnHandleReply(void *data, StoreIOBuffer result)
return;
}
-+ urnState->parsingBuffer.appended(result.data, result.length);
+ urnState->parsingBuffer.appended(result.data, result.length);
/* If we haven't received the entire object (urn), copy more */
if (!urnState->sc->atEof()) {
--
2.39.3