diff --git a/src/coreclr/jit/fgwasm.cpp b/src/coreclr/jit/fgwasm.cpp index d55db54b0065d4..0268069f06e746 100644 --- a/src/coreclr/jit/fgwasm.cpp +++ b/src/coreclr/jit/fgwasm.cpp @@ -632,12 +632,28 @@ class Scc transferBlock = m_comp->fgSplitEdge(pred, header); } - GenTree* const targetIndex = m_comp->gtNewIconNode(headerNumber); - GenTree* const storeControlVar = m_comp->gtNewStoreLclVarNode(controlVarNum, targetIndex); - Statement* const assignStmt = m_comp->fgNewStmtNearEnd(transferBlock, storeControlVar); + GenTree* const targetIndex = m_comp->gtNewIconNode(headerNumber); + GenTree* const storeControlVar = m_comp->gtNewStoreLclVarNode(controlVarNum, targetIndex); - m_comp->gtSetStmtInfo(assignStmt); - m_comp->fgSetStmtSeq(assignStmt); + if (transferBlock->IsLIR()) + { + LIR::Range range = LIR::SeqTree(m_comp, storeControlVar); + + if (transferBlock->isEmpty()) + { + LIR::AsRange(transferBlock).InsertAtEnd(std::move(range)); + } + else + { + LIR::InsertBeforeTerminator(transferBlock, std::move(range)); + } + } + else + { + Statement* const assignStmt = m_comp->fgNewStmtNearEnd(transferBlock, storeControlVar); + m_comp->gtSetStmtInfo(assignStmt); + m_comp->fgSetStmtSeq(assignStmt); + } m_comp->fgReplaceJumpTarget(transferBlock, header, dispatcher); } @@ -675,12 +691,23 @@ class Scc new (m_comp, CMK_BasicBlock) BBswtDesc(succs, numHeaders, cases, numHeaders, true); dispatcher->SetSwitch(swtDesc); - GenTree* const controlVar = m_comp->gtNewLclvNode(controlVarNum, TYP_INT); - GenTree* const switchNode = m_comp->gtNewOperNode(GT_SWITCH, TYP_VOID, controlVar); - Statement* const switchStmt = m_comp->fgNewStmtAtEnd(dispatcher, switchNode); + GenTree* const controlVar = m_comp->gtNewLclvNode(controlVarNum, TYP_INT); + GenTree* const switchNode = m_comp->gtNewOperNode(GT_SWITCH, TYP_VOID, controlVar); + + assert(dispatcher->isEmpty()); - m_comp->gtSetStmtInfo(switchStmt); - m_comp->fgSetStmtSeq(switchStmt); + if (dispatcher->IsLIR()) + { + LIR::Range range = LIR::SeqTree(m_comp, switchNode); + LIR::AsRange(dispatcher).InsertAtEnd(std::move(range)); + } + else + { + Statement* const switchStmt = m_comp->fgNewStmtAtEnd(dispatcher, switchNode); + + m_comp->gtSetStmtInfo(switchStmt); + m_comp->fgSetStmtSeq(switchStmt); + } } // Handle nested Sccs diff --git a/src/coreclr/jit/gentree.h b/src/coreclr/jit/gentree.h index 0726b9b9f8846f..8839b713a9bea4 100644 --- a/src/coreclr/jit/gentree.h +++ b/src/coreclr/jit/gentree.h @@ -5611,7 +5611,7 @@ struct GenTreeCall final : public GenTree return WellKnownArg::VirtualStubCell; } -#if defined(TARGET_ARMARCH) || defined(TARGET_RISCV64) || defined(TARGET_LOONGARCH64) +#if defined(TARGET_ARMARCH) || defined(TARGET_RISCV64) || defined(TARGET_LOONGARCH64) || defined(TARGET_WASM) // For ARM architectures, we always use an indirection cell for R2R calls. if (IsR2RRelativeIndir() && !IsDelegateInvoke()) { diff --git a/src/coreclr/jit/lower.cpp b/src/coreclr/jit/lower.cpp index 118d7b75b320e1..d792c56c20f3d7 100644 --- a/src/coreclr/jit/lower.cpp +++ b/src/coreclr/jit/lower.cpp @@ -1548,7 +1548,7 @@ void Lowering::LowerArg(GenTreeCall* call, CallArg* callArg) JITDUMP("Passed in "); DBEXEC(comp->verbose, abiInfo.Dump()); -#if !defined(TARGET_64BIT) +#if !defined(TARGET_64BIT) && !defined(TARGET_WASM) if (comp->opts.compUseSoftFP && arg->TypeIs(TYP_DOUBLE)) { // Unlike TYP_LONG we do no decomposition for doubles, yet we maintain @@ -1582,7 +1582,7 @@ void Lowering::LowerArg(GenTreeCall* call, CallArg* callArg) JITDUMP("Transformed long arg on 32-bit to FIELD_LIST node\n"); } -#endif +#endif // !defined(TARGET_64BIT) && !defined(TARGET_WASM) #if FEATURE_ARG_SPLIT // Structs can be split into register(s) and stack on some targets @@ -4172,8 +4172,8 @@ GenTree* Lowering::OptimizeConstCompare(GenTree* cmp) { assert(cmp->gtGetOp2()->IsIntegralConst()); - GenTree* op1 = cmp->gtGetOp1(); - GenTreeIntCon* op2 = cmp->gtGetOp2()->AsIntCon(); + GenTree* op1 = cmp->gtGetOp1(); + GenTreeIntConCommon* op2 = cmp->gtGetOp2()->AsIntConCommon(); #if defined(TARGET_XARCH) || defined(TARGET_ARM64) || defined(TARGET_RISCV64) @@ -4206,7 +4206,7 @@ GenTree* Lowering::OptimizeConstCompare(GenTree* cmp) return false; }; - ssize_t op2Value = op2->IconValue(); + INT64 op2Value = op2->IntegralValue(); #ifdef TARGET_XARCH var_types op1Type = op1->TypeGet(); @@ -4260,7 +4260,7 @@ GenTree* Lowering::OptimizeConstCompare(GenTree* cmp) bool cmpEq = cmp->OperIs(GT_EQ); cmp->SetOperRaw(cmpEq ? GT_TEST_EQ : GT_TEST_NE); - op2->SetIconValue(0xff); + op2->SetIntegralValue(0xff); op2->gtType = castOp->gtType; #else castOp->gtType = castToType; @@ -4295,10 +4295,14 @@ GenTree* Lowering::OptimizeConstCompare(GenTree* cmp) // If we don't have a 0 compare we can get one by transforming ((x AND mask) EQ|NE mask) // into ((x AND mask) NE|EQ 0) when mask is a single bit. // - if ((op2Value != 0) && isPow2(static_cast(op2Value)) && andOp2->IsIntegralConst(op2Value)) + // TODO-Wasm: would like to use + // andOp2->IsIntegralValue(op2Value); + // + if ((op2Value != 0) && genExactlyOneBit(op2Value) && andOp2->IsIntegralConst() && + (andOp2->AsIntConCommon()->IntegralValue() == op2Value)) { op2Value = 0; - op2->SetIconValue(0); + op2->SetIntegralValue(0); cmp->SetOperRaw(GenTree::ReverseRelop(cmp->OperGet())); } @@ -8176,6 +8180,7 @@ bool Lowering::LowerUnsignedDivOrMod(GenTreeOp* divMod) return false; } +//------------------------------------------------------------------------ // LowerConstIntDivOrMod: Transform integer GT_DIV/GT_MOD nodes with a power of 2 // const divisor into equivalent but faster sequences. // @@ -8211,6 +8216,14 @@ bool Lowering::TryLowerConstIntDivOrMod(GenTree* node, GenTree** nextNode) assert(!node->OperIs(GT_MOD)); #endif // TARGET_ARM64 +#if defined(TARGET_WASM) + // TODO-Wasm: evaluate if this is worth doing for Wasm, since some cases will increase + // code size and the underlying engine may do something similar. If it is worth doing, + // fix the code below to work properly for a 32 bit target that supports 64 bit math. + // + return false; +#endif // TARGET_WASM + if (!divisor->IsCnsIntOrI()) { return false; // no transformations to make @@ -8450,6 +8463,7 @@ bool Lowering::TryLowerConstIntDivOrMod(GenTree* node, GenTree** nextNode) *nextNode = newDivMod->gtNext; return true; } + //------------------------------------------------------------------------ // LowerSignedDivOrMod: transform integer GT_DIV/GT_MOD nodes with a power of 2 // const divisor into equivalent but faster sequences. @@ -8975,8 +8989,14 @@ void Lowering::FindInducedParameterRegisterLocals() // We always use the full width for integer registers even if the // width is shorter, because various places in the JIT will type // accesses larger to generate smaller code. + +#ifdef TARGET_WASM + var_types fullWidthType = TYP_LONG; +#else + var_types fullWidthType = TYP_I_IMPL; +#endif var_types registerType = - genIsValidIntReg(regSegment->GetRegister()) ? TYP_I_IMPL : regSegment->GetRegisterType(); + genIsValidIntReg(regSegment->GetRegister()) ? fullWidthType : regSegment->GetRegisterType(); if ((registerType == TYP_I_IMPL) && varTypeIsGC(fld)) { registerType = fld->TypeGet(); diff --git a/src/coreclr/jit/morph.cpp b/src/coreclr/jit/morph.cpp index fd688eea61214b..c4b67079d6d621 100644 --- a/src/coreclr/jit/morph.cpp +++ b/src/coreclr/jit/morph.cpp @@ -7080,7 +7080,7 @@ GenTree* Compiler::fgMorphSmpOp(GenTree* tree, MorphAddrContext* mac, bool* optA case GT_MUL: noway_assert(op2 != nullptr); -#ifndef TARGET_64BIT +#if !defined(TARGET_64BIT) && !defined(TARGET_WASM) if (typ == TYP_LONG) { // For (long)int1 * (long)int2, we dont actually do the @@ -7115,7 +7115,7 @@ GenTree* Compiler::fgMorphSmpOp(GenTree* tree, MorphAddrContext* mac, bool* optA goto USE_HELPER_FOR_ARITH; } } -#endif // !TARGET_64BIT +#endif // !defined(TARGET_64BIT) && !defined(TARGET_WASM) break; case GT_ARR_LENGTH: @@ -7191,7 +7191,7 @@ GenTree* Compiler::fgMorphSmpOp(GenTree* tree, MorphAddrContext* mac, bool* optA return fgMorphSmpOp(tree, mac); } -#ifndef TARGET_64BIT +#if !defined(TARGET_64BIT) && !defined(TARGET_WASM) if (typ == TYP_LONG) { helper = CORINFO_HELP_LDIV; @@ -7205,12 +7205,12 @@ GenTree* Compiler::fgMorphSmpOp(GenTree* tree, MorphAddrContext* mac, bool* optA goto USE_HELPER_FOR_ARITH; } #endif -#endif // !TARGET_64BIT +#endif // !defined(TARGET_64BIT) && !defined(TARGET_WASM) break; case GT_UDIV: -#ifndef TARGET_64BIT +#if !defined(TARGET_64BIT) && !defined(TARGET_WASM) if (typ == TYP_LONG) { helper = CORINFO_HELP_ULDIV; @@ -7223,7 +7223,7 @@ GenTree* Compiler::fgMorphSmpOp(GenTree* tree, MorphAddrContext* mac, bool* optA goto USE_HELPER_FOR_ARITH; } #endif -#endif // TARGET_64BIT +#endif // !defined(TARGET_64BIT) && !defined(TARGET_WASM) break; case GT_MOD: @@ -7331,7 +7331,7 @@ GenTree* Compiler::fgMorphSmpOp(GenTree* tree, MorphAddrContext* mac, bool* optA } } -#ifndef TARGET_64BIT +#if !defined(TARGET_64BIT) && !defined(TARGET_WASM) if (typ == TYP_LONG) { helper = (oper == GT_UMOD) ? CORINFO_HELP_ULMOD : CORINFO_HELP_LMOD; @@ -7353,7 +7353,7 @@ GenTree* Compiler::fgMorphSmpOp(GenTree* tree, MorphAddrContext* mac, bool* optA } } #endif -#endif // !TARGET_64BIT +#endif // !defined(TARGET_64BIT) && !defined(TARGET_WASM) if (tree->OperIs(GT_UMOD) && op2->IsIntegralConstUnsignedPow2()) { @@ -7650,9 +7650,9 @@ GenTree* Compiler::fgMorphSmpOp(GenTree* tree, MorphAddrContext* mac, bool* optA } } -#ifndef TARGET_64BIT +#if !defined(TARGET_64BIT) && !defined(TARGET_WASM) DONE_MORPHING_CHILDREN: -#endif // !TARGET_64BIT +#endif // !defined(TARGET_64BIT) && !defined(TARGET_WASM) gtUpdateNodeOperSideEffects(tree); @@ -7888,14 +7888,14 @@ GenTree* Compiler::fgMorphSmpOp(GenTree* tree, MorphAddrContext* mac, bool* optA case GT_MUL: -#ifndef TARGET_64BIT +#if !defined(TARGET_64BIT) && !defined(TARGET_WASM) if (typ == TYP_LONG) { // This must be GTF_MUL_64RSLT INDEBUG(tree->AsOp()->DebugCheckLongMul()); return tree; } -#endif // TARGET_64BIT +#endif // !defined(TARGET_64BIT) && !defined(TARGET_WASM) goto CM_OVF_OP; case GT_SUB: @@ -10379,6 +10379,18 @@ GenTree* Compiler::fgOptimizeAddition(GenTreeOp* add) GenTree* Compiler::fgOptimizeMultiply(GenTreeOp* mul) { assert(mul->OperIs(GT_MUL)); + +#ifdef TARGET_WASM + // TODO-WASM: code below doesn't support a 32 bit target with 64 bit integer ops. + // For example, for wasm32 op2->IsCnsIntOrI() will return false for long constants, + // since both "int" and "I" are int32. + // + if (mul->TypeGet() == TYP_LONG) + { + return nullptr; + } +#endif + assert(varTypeIsIntOrI(mul) || varTypeIsFloating(mul)); assert(!mul->gtOverflow()); @@ -15287,14 +15299,14 @@ GenTree* Compiler::fgMorphReduceAddOps(GenTree* tree) return tree; } -#ifndef TARGET_64BIT +#if !defined(TARGET_64BIT) && !defined(TARGET_WASM) // Transforming 64-bit ADD to 64-bit MUL on 32-bit system results in replacing // ADD ops with a helper function call. Don't apply optimization in that case. if (tree->TypeIs(TYP_LONG)) { return tree; } -#endif +#endif // !defined(TARGET_64BIT) && !defined(TARGET_WASM) GenTree* lclVarTree = tree->AsOp()->gtOp2; GenTree* consTree = tree->AsOp()->gtOp1; diff --git a/src/coreclr/jit/valuenum.cpp b/src/coreclr/jit/valuenum.cpp index da7f28a59a8a67..7e1034277a7f0c 100644 --- a/src/coreclr/jit/valuenum.cpp +++ b/src/coreclr/jit/valuenum.cpp @@ -11904,7 +11904,7 @@ void Compiler::fgValueNumberTreeConst(GenTree* tree) } else if ((typ == TYP_LONG) || (typ == TYP_ULONG)) { - tree->gtVNPair.SetBoth(vnStore->VNForLongCon(INT64(tree->AsIntConCommon()->LngValue()))); + tree->gtVNPair.SetBoth(vnStore->VNForLongCon(tree->AsIntConCommon()->IntegralValue())); } else {