Skip to content

Commit

Permalink
[#25439] DocDB: Fix potential segv issue while fetching txn status of…
Browse files Browse the repository at this point in the history
… sealed transaction

Summary:
While fetching the transaction status at the coordinator, if the transaction exists in the list of managed transactions, we access the iterator at a couple of places assuming we still hold the mutex. But looks like the lock can be released and re-acquired when dealing with sealed transactions, so accessing the earlier iterator after this release and re-acquisition of the lock isn't safe.

This diff fixes the issue by accessing the iterator before any potential release and re-acquire of the lock.
Jira: DB-14676

Test Plan: Jenkins

Reviewers: sergei

Reviewed By: sergei

Subscribers: ybase

Differential Revision: https://phorge.dev.yugabyte.com/D40889
  • Loading branch information
basavaraj29 committed Dec 28, 2024
1 parent 1b2ef0f commit 9b72ebf
Showing 1 changed file with 24 additions and 21 deletions.
45 changes: 24 additions & 21 deletions src/yb/tablet/transaction_coordinator.cc
Original file line number Diff line number Diff line change
Expand Up @@ -1261,19 +1261,30 @@ class TransactionCoordinator::Impl : public TransactionStateContext,
postponed_leader_actions_.leader_term = leader_term;
for (const auto& transaction_id : transaction_ids) {
auto id = VERIFY_RESULT(FullyDecodeTransactionId(transaction_id));
auto it = managed_transactions_.find(id);
std::vector<ExpectedTabletBatches> expected_tablet_batches;
bool known_txn = it != managed_transactions_.end();
auto txn_status_with_ht = known_txn
? VERIFY_RESULT(it->GetStatus(&expected_tablet_batches))
: TransactionStatusResult(TransactionStatus::ABORTED, HybridTime::kMax);
VLOG_WITH_PREFIX(4) << __func__ << ": " << id << " => " << txn_status_with_ht
<< ", last touch: " << it->last_touch();
if (txn_status_with_ht.status == TransactionStatus::SEALED) {
// TODO(dtxn) Avoid concurrent resolve
txn_status_with_ht = VERIFY_RESULT(ResolveSealedStatus(
id, txn_status_with_ht.status_time, expected_tablet_batches,
/* abort_if_not_replicated = */ false, &lock));
bool known_txn = false;
TransactionStatusResult txn_status_with_ht;
{
auto it = managed_transactions_.find(id);
std::vector<ExpectedTabletBatches> expected_tablet_batches;
known_txn = it != managed_transactions_.end();
txn_status_with_ht = known_txn
? VERIFY_RESULT(it->GetStatus(&expected_tablet_batches))
: TransactionStatusResult(TransactionStatus::ABORTED, HybridTime::kMax);
VLOG_WITH_PREFIX_AND_FUNC(4)
<< id << " => " << txn_status_with_ht
<< ", last touch: " << (known_txn ? it->last_touch() : HybridTime::kInvalid);
auto mutable_aborted_set_pb = response->add_aborted_subtxn_set();
if (txn_status_with_ht.status == TransactionStatus::COMMITTED ||
txn_status_with_ht.status == TransactionStatus::PENDING) {
*mutable_aborted_set_pb = it->GetAbortedSubtxnInfo()->pb();
response->add_min_active_txn_reuse_version(it->min_active_txn_reuse_version());
}
if (txn_status_with_ht.status == TransactionStatus::SEALED) {
// TODO(dtxn) Avoid concurrent resolve
txn_status_with_ht = VERIFY_RESULT(ResolveSealedStatus(
id, txn_status_with_ht.status_time, expected_tablet_batches,
/* abort_if_not_replicated = */ false, &lock));
}
}
if (!known_txn) {
if (!leader_safe_time) {
Expand All @@ -1293,14 +1304,6 @@ class TransactionCoordinator::Impl : public TransactionStateContext,
response->add_status(txn_status_with_ht.status);
response->add_status_hybrid_time(txn_status_with_ht.status_time.ToUint64());
StatusToPB(txn_status_with_ht.expected_deadlock_status, response->add_deadlock_reason());

auto mutable_aborted_set_pb = response->add_aborted_subtxn_set();
if (it != managed_transactions_.end() &&
(txn_status_with_ht.status == TransactionStatus::COMMITTED ||
txn_status_with_ht.status == TransactionStatus::PENDING)) {
*mutable_aborted_set_pb = it->GetAbortedSubtxnInfo()->pb();
response->add_min_active_txn_reuse_version(it->min_active_txn_reuse_version());
}
}
postponed_leader_actions.Swap(&postponed_leader_actions_);
}
Expand Down

0 comments on commit 9b72ebf

Please sign in to comment.