Skip to content

Commit

Permalink
Merge pull request #3111 from 0dvictor/cas
Browse files Browse the repository at this point in the history
Read Barrier Support for Atomic CAS on X86
  • Loading branch information
andrewcraik authored Oct 4, 2018
2 parents e436fbd + b23b6ee commit f7c6132
Show file tree
Hide file tree
Showing 3 changed files with 128 additions and 12 deletions.
5 changes: 4 additions & 1 deletion runtime/compiler/codegen/J9CodeGenerator.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -667,7 +667,10 @@ J9::CodeGenerator::lowerTreesPreChildrenVisit(TR::Node *parent, TR::TreeTop *tre
{
// J9
//
if (self()->comp()->useCompressedPointers())
// Hiding compressedref logic from CodeGen doesn't seem a good practise, the evaluator always need the uncompressedref node for write barrier,
// therefore, this part is deprecated. It'll be removed once P and Z update their corresponding evaluators.
static bool UseOldCompareAndSwapObject = (bool)feGetEnv("TR_UseOldCompareAndSwapObject");
if (self()->comp()->useCompressedPointers() && (UseOldCompareAndSwapObject || !TR::Compiler->target.cpu.isX86()))
{
TR::MethodSymbol *methodSymbol = parent->getSymbol()->castToMethodSymbol();
// In Java9 Unsafe could be the jdk.internal JNI method or the sun.misc ordinary method wrapper,
Expand Down
15 changes: 5 additions & 10 deletions runtime/compiler/optimizer/InlinerTempForJ9.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -2076,10 +2076,13 @@ TR_J9InlinerPolicy::isInlineableJNI(TR_ResolvedMethod *method,TR::Node *callNode
!comp->fej9()->traceableMethodsCanBeInlined()))
return false;

if (method->convertToMethod()->isUnsafeWithObjectArg(comp))
if (method->convertToMethod()->isUnsafeWithObjectArg(comp) || method->convertToMethod()->isUnsafeCAS(comp))
{
// In Java9 sun/misc/Unsafe methods are simple Java wrappers to JNI
// methods in jdk.internal, and the enum values above match both. Only
// return true for the methods that are native.
if (!TR::Compiler->om.canGenerateArraylets() || (callNode && callNode->isUnsafeGetPutCASCallOnNonArray()))
return true;
return method->isNative();
else
return false;
}
Expand Down Expand Up @@ -2127,14 +2130,6 @@ TR_J9InlinerPolicy::isInlineableJNI(TR_ResolvedMethod *method,TR::Node *callNode
case TR::sun_misc_Unsafe_fullFence:
return true;

case TR::sun_misc_Unsafe_compareAndSwapInt_jlObjectJII_Z:
case TR::sun_misc_Unsafe_compareAndSwapLong_jlObjectJJJ_Z:
case TR::sun_misc_Unsafe_compareAndSwapObject_jlObjectJjlObjectjlObject_Z:
// In Java9 sun/misc/Unsafe methods are simple Java wrappers to JNI
// methods in jdk.internal, and the enum values above match both. Only
// return true for the methods that are native.
return method->isNative();

case TR::sun_misc_Unsafe_staticFieldBase:
return false; // todo
case TR::sun_misc_Unsafe_staticFieldOffset:
Expand Down
120 changes: 119 additions & 1 deletion runtime/compiler/x/codegen/J9TreeEvaluator.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -9352,6 +9352,115 @@ static TR::Register* inlineIntrinsicIndexOf(TR::Node* node, bool isLatin1, TR::C
return result;
}

/**
* \brief
* Generate inlined instructions equivalent to sun/misc/Unsafe.compareAndSwapObject or jdk/internal/misc/Unsafe.compareAndSwapObject
*
* \param node
* The tree node
*
* \param cg
* The Code Generator
*
*/
static TR::Register* inlineCompareAndSwapObjectNative(TR::Node* node, TR::CodeGenerator* cg)
{
TR_ASSERT(!TR::Compiler->om.canGenerateArraylets(), "This evaluator does not support arraylets.");
cg->recursivelyDecReferenceCount(node->getChild(0)); // The Unsafe
TR::Node* objectNode = node->getChild(1);
TR::Node* offsetNode = node->getChild(2);
TR::Node* oldValueNode = node->getChild(3);
TR::Node* newValueNode = node->getChild(4);

TR::Register* object = cg->evaluate(objectNode);
TR::Register* offset = cg->evaluate(offsetNode);
TR::Register* oldValue = cg->evaluate(oldValueNode);
TR::Register* newValue = cg->evaluate(newValueNode);
TR::Register* result = cg->allocateRegister();
TR::Register* EAX = cg->allocateRegister();
TR::Register* tmp = cg->allocateRegister();

bool use64BitClasses = TR::Compiler->target.is64Bit() && !cg->comp()->useCompressedPointers();

if (TR::Compiler->target.is32Bit())
{
// Assume that the offset is positive and not pathologically large (i.e., > 2^31).
offset = offset->getLowOrder();
}

#ifdef OMR_GC_CONCURRENT_SCAVENGER
if (TR::Compiler->om.shouldGenerateReadBarriersForFieldLoads())
{
generateRegMemInstruction(LRegMem(use64BitClasses), node, tmp, generateX86MemoryReference(object, offset, 0, cg), cg);

auto begLabel = generateLabelSymbol(cg);
auto endLabel = generateLabelSymbol(cg);
auto rdbarLabel = generateLabelSymbol(cg);
begLabel->setStartInternalControlFlow();
endLabel->setEndInternalControlFlow();

auto deps = generateRegisterDependencyConditions((uint8_t)1, 1, cg);
deps->addPreCondition(tmp, TR::RealRegister::NoReg, cg);
deps->addPostCondition(tmp, TR::RealRegister::NoReg, cg);

generateLabelInstruction(LABEL, node, begLabel, cg);

generateRegMemInstruction(CMPRegMem(use64BitClasses), node, tmp, generateX86MemoryReference(cg->getVMThreadRegister(), offsetof(J9VMThread, evacuateBase), cg), cg);
generateLabelInstruction(JAE4, node, rdbarLabel, cg);

{
TR_OutlinedInstructionsGenerator og(rdbarLabel, node, cg);

generateRegMemInstruction(CMPRegMem(use64BitClasses), node, tmp, generateX86MemoryReference(cg->getVMThreadRegister(), offsetof(J9VMThread, evacuateTop), cg), cg);
generateLabelInstruction(JAE4, node, endLabel, cg);

generateRegMemInstruction(LEARegMem(), node, tmp, generateX86MemoryReference(object, offset, 0, cg), cg);
generateMemRegInstruction(SMemReg(), node, generateX86MemoryReference(cg->getVMThreadRegister(), offsetof(J9VMThread, floatTemp1), cg), tmp, cg);
generateHelperCallInstruction(node, TR_readBarrier, NULL, cg)->setNeedsGCMap(0xFF00FFFF);
generateLabelInstruction(JMP4, node, endLabel, cg);
}

generateLabelInstruction(LABEL, node, endLabel, deps, cg);
}
#endif

generateRegRegInstruction(MOVRegReg(), node, EAX, oldValue, cg);
generateRegRegInstruction(MOVRegReg(), node, tmp, newValue, cg);
if (TR::Compiler->om.compressedReferenceShiftOffset() != 0)
{
if (!oldValueNode->isNull())
{
generateRegImmInstruction(SHRRegImm1(), node, EAX, TR::Compiler->om.compressedReferenceShiftOffset(), cg);
}
if (!newValueNode->isNull())
{
generateRegImmInstruction(SHRRegImm1(), node, tmp, TR::Compiler->om.compressedReferenceShiftOffset(), cg);
}
}

auto deps = generateRegisterDependencyConditions((uint8_t)1, 1, cg);
deps->addPreCondition(EAX, TR::RealRegister::eax, cg);
deps->addPostCondition(EAX, TR::RealRegister::eax, cg);
generateMemRegInstruction(use64BitClasses ? LCMPXCHG8MemReg : LCMPXCHG4MemReg, node, generateX86MemoryReference(object, offset, 0, cg), tmp, deps, cg);
generateRegInstruction(SETE1Reg, node, result, cg);
generateRegRegInstruction(MOVZXReg4Reg1, node, result, result, cg);

// We could insert a runtime test for whether the write actually succeeded or not.
// However, since in practice it will almost always succeed we do not want to
// penalize general runtime performance especially if it is still correct to do
// a write barrier even if the store never actually happened.
TR::TreeEvaluator::VMwrtbarWithoutStoreEvaluator(node, objectNode, newValueNode, NULL, cg->generateScratchRegisterManager(), cg);

cg->stopUsingRegister(tmp);
cg->stopUsingRegister(EAX);
node->setRegister(result);
for (int32_t i = 1; i < node->getNumChildren(); i++)
{
cg->decReferenceCount(node->getChild(i));
}
return result;
}

/** Replaces a call to an Unsafe CAS method with inline instructions.
@return true if the call was replaced, false if it was not.

Expand Down Expand Up @@ -9789,8 +9898,17 @@ bool J9::X86::TreeEvaluator::VMinlineCallEvaluator(
break;
case TR::sun_misc_Unsafe_compareAndSwapObject_jlObjectJjlObjectjlObject_Z:
{
static bool UseOldCompareAndSwapObject = (bool)feGetEnv("TR_UseOldCompareAndSwapObject");
if(node->isSafeForCGToFastPathUnsafeCall())
return inlineCompareAndSwapNative(node, (TR::Compiler->target.is64Bit() && !comp->useCompressedPointers()) ? 8 : 4, true, cg);
{
if (UseOldCompareAndSwapObject)
return inlineCompareAndSwapNative(node, (TR::Compiler->target.is64Bit() && !comp->useCompressedPointers()) ? 8 : 4, true, cg);
else
{
inlineCompareAndSwapObjectNative(node, cg);
return true;
}
}
}
break;

Expand Down

0 comments on commit f7c6132

Please sign in to comment.