A RetroSearch Logo

Home - News ( United States | United Kingdom | Italy | Germany ) - Football scores

Search Query:

Showing content from https://llvm.org/doxygen/IRTranslator_8cpp_source.html below:

LLVM: lib/CodeGen/GlobalISel/IRTranslator.cpp Source File

65#include "llvm/IR/IntrinsicsAMDGPU.h" 95#define DEBUG_TYPE "irtranslator" 101 cl::desc

(

"Should enable CSE in irtranslator"

),

119

MF.getProperties().set(MachineFunctionProperties::Property::FailedISel);

123 if

(!R.getLocation().isValid() || TPC.isGlobalISelAbortEnabled())

124

R << (

" (in function: "

+ MF.getName() +

")"

).str();

126 if

(TPC.isGlobalISelAbortEnabled())

143

DILocationVerifier() =

default

;

144

~DILocationVerifier() =

default

;

146 const Instruction

*getCurrentInst()

const

{

return

CurrInst; }

147 void

setCurrentInst(

const Instruction

*Inst) { CurrInst = Inst; }

154 assert

(getCurrentInst() &&

"Inserted instruction without a current MI"

);

159

<<

" was copied to "

<<

MI

);

165

(

MI

.getParent()->isEntryBlock() && !

MI

.getDebugLoc()) ||

166

(

MI

.isDebugInstr())) &&

167 "Line info was not transferred to all instructions"

);

190

IRTranslator::allocateVRegs(

const Value

&Val) {

191 auto

VRegsIt = VMap.findVRegs(Val);

192 if

(VRegsIt != VMap.vregs_end())

193 return

*VRegsIt->second;

194 auto

*Regs = VMap.getVRegs(Val);

195 auto

*Offsets = VMap.getOffsets(Val);

198

Offsets->empty() ? Offsets :

nullptr

);

199 for

(

unsigned

i = 0; i < SplitTys.

size

(); ++i)

205 auto

VRegsIt = VMap.findVRegs(Val);

206 if

(VRegsIt != VMap.vregs_end())

207 return

*VRegsIt->second;

210 return

*VMap.getVRegs(Val);

213 auto

*VRegs = VMap.getVRegs(Val);

214 auto

*Offsets = VMap.getOffsets(Val);

218 "Don't know how to create an empty vreg"

);

222

Offsets->empty() ? Offsets :

nullptr

);

224 if

(!isa<Constant>(Val)) {

225 for

(

auto

Ty : SplitTys)

232 auto

&

C

= cast<Constant>(Val);

234 while

(

auto

Elt =

C

.getAggregateElement(

Idx

++)) {

235 auto

EltRegs = getOrCreateVRegs(*Elt);

236 llvm::copy

(EltRegs, std::back_inserter(*VRegs));

239 assert

(SplitTys.size() == 1 &&

"unexpectedly split LLT"

);

241 bool Success

= translate(cast<Constant>(Val), VRegs->front());

246 R

<<

"unable to translate constant: "

<<

ore::NV

(

"Type"

, Val.

getType

());

255int

IRTranslator::getOrCreateFrameIndex(

const AllocaInst

&AI) {

256 auto

MapEntry = FrameIndices.find(&AI);

257 if

(MapEntry != FrameIndices.end())

258 return

MapEntry->second;

262

ElementSize * cast<ConstantInt>(AI.

getArraySize

())->getZExtValue();

265 Size

= std::max<uint64_t>(

Size

, 1u);

267 int

&FI = FrameIndices[&AI];

273 if

(

const StoreInst

*SI = dyn_cast<StoreInst>(&

I

))

274 return SI

->getAlign();

275 if

(

const LoadInst

*LI = dyn_cast<LoadInst>(&

I

))

276 return

LI->getAlign();

283 R

<<

"unable to translate memop: "

<<

ore::NV

(

"Opcode"

, &

I

);

290 assert

(

MBB

&&

"BasicBlock was not encountered before"

);

295 assert

(NewPred &&

"new predecessor must be a real MachineBasicBlock"

);

296

MachinePreds[Edge].push_back(NewPred);

303 return

U.getType()->getScalarType()->isBFloatTy() ||

305

return V->getType()->getScalarType()->isBFloatTy();

309bool

IRTranslator::translateBinaryOp(

unsigned

Opcode,

const User

&U,

318 Register

Op0 = getOrCreateVReg(*

U

.getOperand(0));

319 Register

Op1 = getOrCreateVReg(*

U

.getOperand(1));

322 if

(isa<Instruction>(U)) {

331bool

IRTranslator::translateUnaryOp(

unsigned

Opcode,

const User

&U,

336 Register

Op0 = getOrCreateVReg(*

U

.getOperand(0));

339 if

(isa<Instruction>(U)) {

348 return

translateUnaryOp(TargetOpcode::G_FNEG, U, MIRBuilder);

351bool

IRTranslator::translateCompare(

const User

&U,

356 auto

*CI = cast<CmpInst>(&U);

357 Register

Op0 = getOrCreateVReg(*

U

.getOperand(0));

358 Register

Op1 = getOrCreateVReg(*

U

.getOperand(1));

363

MIRBuilder.

buildICmp

(Pred, Res, Op0, Op1, Flags);

371

MIRBuilder.

buildFCmp

(Pred, Res, Op0, Op1, Flags);

384

VRegs = getOrCreateVRegs(*Ret);

395 return

CLI->

lowerReturn

(MIRBuilder, Ret, VRegs, FuncInfo, SwiftErrorVReg);

398void

IRTranslator::emitBranchForMergedCondition(

404 if

(

const CmpInst

*BOp = dyn_cast<CmpInst>(

Cond

)) {

407

Condition = InvertCond ? IC->getInversePredicate() : IC->getPredicate();

410

Condition = InvertCond ?

FC

->getInversePredicate() :

FC

->getPredicate();

414

BOp->getOperand(1),

nullptr

,

TBB

, FBB, CurBB,

415

CurBuilder->getDebugLoc(), TProb, FProb);

416

SL->SwitchCases.push_back(CB);

424 nullptr

,

TBB

, FBB, CurBB, CurBuilder->getDebugLoc(), TProb, FProb);

425

SL->SwitchCases.push_back(CB);

430 return I

->getParent() == BB;

434void

IRTranslator::findMergedConditions(

439 using namespace

PatternMatch;

440 assert

((Opc == Instruction::And || Opc == Instruction::Or) &&

441 "Expected Opc to be AND/OR"

);

447

findMergedConditions(NotCond,

TBB

, FBB, CurBB, SwitchBB, Opc, TProb, FProb,

453 const Value

*BOpOp0, *BOpOp1;

467 if

(BOpc == Instruction::And)

468

BOpc = Instruction::Or;

469 else if

(BOpc == Instruction::Or)

470

BOpc = Instruction::And;

476 bool

BOpIsInOrAndTree = BOpc && BOpc == Opc && BOp->

hasOneUse

();

480

emitBranchForMergedCondition(

Cond

,

TBB

, FBB, CurBB, SwitchBB, TProb, FProb,

491 if

(Opc == Instruction::Or) {

512 auto

NewTrueProb = TProb / 2;

513 auto

NewFalseProb = TProb / 2 + FProb;

515

findMergedConditions(BOpOp0,

TBB

, TmpBB, CurBB, SwitchBB, Opc, NewTrueProb,

516

NewFalseProb, InvertCond);

522

findMergedConditions(BOpOp1,

TBB

, FBB, TmpBB, SwitchBB, Opc, Probs[0],

523

Probs[1], InvertCond);

525 assert

(Opc == Instruction::And &&

"Unknown merge op!"

);

545 auto

NewTrueProb = TProb + FProb / 2;

546 auto

NewFalseProb = FProb / 2;

548

findMergedConditions(BOpOp0, TmpBB, FBB, CurBB, SwitchBB, Opc, NewTrueProb,

549

NewFalseProb, InvertCond);

555

findMergedConditions(BOpOp1,

TBB

, FBB, TmpBB, SwitchBB, Opc, Probs[0],

556

Probs[1], InvertCond);

560bool

IRTranslator::shouldEmitAsBranches(

561 const

std::vector<SwitchCG::CaseBlock> &Cases) {

563 if

(Cases.size() != 2)

568 if

((Cases[0].CmpLHS == Cases[1].CmpLHS &&

569

Cases[0].CmpRHS == Cases[1].CmpRHS) ||

570

(Cases[0].CmpRHS == Cases[1].CmpLHS &&

571

Cases[0].CmpLHS == Cases[1].CmpRHS)) {

577 if

(Cases[0].CmpRHS == Cases[1].CmpRHS &&

578

Cases[0].PredInfo.Pred == Cases[1].PredInfo.Pred &&

579

isa<Constant>(Cases[0].CmpRHS) &&

580

cast<Constant>(Cases[0].CmpRHS)->isNullValue()) {

582

Cases[0].TrueBB == Cases[1].ThisBB)

585

Cases[0].FalseBB == Cases[1].ThisBB)

593 const BranchInst

&BrInst = cast<BranchInst>(U);

594 auto

&CurMBB = MIRBuilder.

getMBB

();

600

!CurMBB.isLayoutSuccessor(Succ0MBB))

601

MIRBuilder.

buildBr

(*Succ0MBB);

605

CurMBB.addSuccessor(&getMBB(*Succ));

631 using namespace

PatternMatch;

632 const Instruction

*CondI = dyn_cast<Instruction>(CondVal);

634

!BrInst.

hasMetadata

(LLVMContext::MD_unpredictable)) {

637 const Value

*BOp0, *BOp1;

639

Opcode = Instruction::And;

641

Opcode = Instruction::Or;

645

findMergedConditions(CondI, Succ0MBB, Succ1MBB, &CurMBB, &CurMBB, Opcode,

646

getEdgeProbability(&CurMBB, Succ0MBB),

647

getEdgeProbability(&CurMBB, Succ1MBB),

649 assert

(SL->SwitchCases[0].ThisBB == &CurMBB &&

"Unexpected lowering!"

);

652 if

(shouldEmitAsBranches(SL->SwitchCases)) {

654

emitSwitchCase(SL->SwitchCases[0], &CurMBB, *CurBuilder);

655

SL->SwitchCases.erase(SL->SwitchCases.begin());

661 for

(

unsigned I

= 1, E = SL->SwitchCases.size();

I

!= E; ++

I

)

662

MF->

erase

(SL->SwitchCases[

I

].ThisBB);

664

SL->SwitchCases.clear();

671 nullptr

, Succ0MBB, Succ1MBB, &CurMBB,

672

CurBuilder->getDebugLoc());

676

emitSwitchCase(CB, &CurMBB, *CurBuilder);

683 if

(!FuncInfo.

BPI

) {

684

Src->addSuccessorWithoutProb(Dst);

688

Prob = getEdgeProbability(Src, Dst);

689

Src->addSuccessor(Dst, Prob);

695 const BasicBlock

*SrcBB = Src->getBasicBlock();

696 const BasicBlock

*DstBB = Dst->getBasicBlock();

697 if

(!FuncInfo.

BPI

) {

700 auto

SuccSize = std::max<uint32_t>(

succ_size

(SrcBB), 1);

707 using namespace

SwitchCG;

712

Clusters.reserve(

SI

.getNumCases());

713 for

(

const auto

&

I

:

SI

.cases()) {

715 assert

(Succ &&

"Could not find successor mbb in mapping"

);

720

Clusters.push_back(CaseCluster::range(CaseVal, CaseVal, Succ, Prob));

733 if

(Clusters.empty()) {

740

SL->findJumpTables(Clusters, &SI, std::nullopt, DefaultMBB,

nullptr

,

nullptr

);

741

SL->findBitTestClusters(Clusters, &SI);

744 dbgs

() <<

"Case clusters: "

;

745 for

(

const

CaseCluster &

C

: Clusters) {

746 if

(

C

.Kind == CC_JumpTable)

748 if

(

C

.Kind == CC_BitTests)

751 C

.Low->getValue().print(

dbgs

(),

true

);

752 if

(

C

.Low !=

C

.High) {

754 C

.High->getValue().print(

dbgs

(),

true

);

761 assert

(!Clusters.empty());

765 auto

DefaultProb = getEdgeProbability(SwitchMBB, DefaultMBB);

766

WorkList.push_back({SwitchMBB,

First

,

Last

,

nullptr

,

nullptr

, DefaultProb});

768 while

(!WorkList.empty()) {

769

SwitchWorkListItem

W

= WorkList.pop_back_val();

771 unsigned

NumClusters =

W

.LastCluster -

W

.FirstCluster + 1;

773 if

(NumClusters > 3 &&

776

splitWorkItem(WorkList, W,

SI

.getCondition(), SwitchMBB, MIB);

780 if

(!lowerSwitchWorkItem(W,

SI

.getCondition(), SwitchMBB, DefaultMBB, MIB))

790 using namespace

SwitchCG;

791 assert

(

W

.FirstCluster->Low->getValue().slt(

W

.LastCluster->Low->getValue()) &&

792 "Clusters not sorted?"

);

793 assert

(

W

.LastCluster -

W

.FirstCluster + 1 >= 2 &&

"Too small to split!"

);

795 auto

[LastLeft, FirstRight, LeftProb, RightProb] =

796

SL->computeSplitWorkItemInfo(W);

801 assert

(PivotCluster >

W

.FirstCluster);

802 assert

(PivotCluster <=

W

.LastCluster);

817 if

(FirstLeft == LastLeft && FirstLeft->Kind == CC_Range &&

818

FirstLeft->Low ==

W

.GE &&

819

(FirstLeft->High->getValue() + 1LL) == Pivot->

getValue

()) {

820

LeftMBB = FirstLeft->MBB;

823

FuncInfo.

MF

->

insert

(BBI, LeftMBB);

825

{LeftMBB, FirstLeft, LastLeft,

W

.GE, Pivot,

W

.DefaultProb / 2});

832 if

(FirstRight == LastRight && FirstRight->Kind == CC_Range &&

W

.LT &&

833

(FirstRight->High->getValue() + 1ULL) ==

W

.LT->getValue()) {

834

RightMBB = FirstRight->MBB;

837

FuncInfo.

MF

->

insert

(BBI, RightMBB);

839

{RightMBB, FirstRight, LastRight, Pivot,

W

.LT,

W

.DefaultProb / 2});

844

LeftMBB, RightMBB,

W

.MBB, MIB.

getDebugLoc

(), LeftProb,

847 if

(

W

.MBB == SwitchMBB)

848

emitSwitchCase(CB, SwitchMBB, MIB);

850

SL->SwitchCases.push_back(CB);

856 assert

(

JT

.Reg &&

"Should lower JT Header first!"

);

878 Register

SwitchOpReg = getOrCreateVReg(SValue);

880 auto

Sub = MIB.

buildSub

({SwitchTy}, SwitchOpReg, FirstCst);

888 JT

.Reg = Sub.getReg(0);

899 auto

Cst = getOrCreateVReg(

936 const auto

*CI = dyn_cast<ConstantInt>(CB.

CmpRHS

);

954 "Can only handle SLE ranges"

);

960 if

(cast<ConstantInt>(CB.

CmpLHS

)->isMinValue(

true

)) {

966 auto

Sub = MIB.

buildSub

({CmpTy}, CmpOpReg, CondLHS);

1001 bool

FallthroughUnreachable) {

1002 using namespace

SwitchCG;

1005

JumpTableHeader *JTH = &SL->JTCases[

I

->JTCasesIndex].first;

1011

CurMF->

insert

(BBI, JumpMBB);

1021 auto

JumpProb =

I

->Prob;

1022 auto

FallthroughProb = UnhandledProbs;

1030 if

(*SI == DefaultMBB) {

1031

JumpProb += DefaultProb / 2;

1032

FallthroughProb -= DefaultProb / 2;

1037

addMachineCFGPred({SwitchMBB->

getBasicBlock

(), (*SI)->getBasicBlock()},

1042 if

(FallthroughUnreachable)

1043

JTH->FallthroughUnreachable =

true

;

1045 if

(!JTH->FallthroughUnreachable)

1046

addSuccessorWithProb(CurMBB, Fallthrough, FallthroughProb);

1047

addSuccessorWithProb(CurMBB, JumpMBB, JumpProb);

1052

JTH->HeaderBB = CurMBB;

1053 JT

->Default = Fallthrough;

1056 if

(CurMBB == SwitchMBB) {

1057 if

(!emitJumpTableHeader(*JT, *JTH, CurMBB))

1059

JTH->Emitted =

true

;

1066 bool

FallthroughUnreachable,

1071 using namespace

SwitchCG;

1074 if

(

I

->Low ==

I

->High) {

1090

CaseBlock CB(Pred, FallthroughUnreachable, LHS, RHS, MHS,

I

->MBB, Fallthrough,

1091

CurMBB, MIB.

getDebugLoc

(),

I

->Prob, UnhandledProbs);

1093

emitSwitchCase(CB, SwitchMBB, MIB);

1103 Register

SwitchOpReg = getOrCreateVReg(*

B

.SValue);

1105 LLT

SwitchOpTy = MRI->

getType

(SwitchOpReg);

1107 auto

RangeSub = MIB.

buildSub

(SwitchOpTy, SwitchOpReg, MinValReg);

1112 LLT

MaskTy = SwitchOpTy;

1118 for

(

unsigned I

= 0, E =

B

.Cases.size();

I

!= E; ++

I

) {

1128 if

(SwitchOpTy != MaskTy)

1136 if

(!

B

.FallthroughUnreachable)

1137

addSuccessorWithProb(SwitchBB,

B

.Default,

B

.DefaultProb);

1138

addSuccessorWithProb(SwitchBB,

MBB

,

B

.Prob);

1142 if

(!

B

.FallthroughUnreachable) {

1146

RangeSub, RangeCst);

1166 if

(PopCount == 1) {

1169 auto

MaskTrailingZeros =

1174

}

else if

(PopCount == BB.

Range

) {

1176 auto

MaskTrailingOnes =

1183 auto

SwitchVal = MIB.

buildShl

(SwitchTy, CstOne, Reg);

1187 auto

AndOp = MIB.

buildAnd

(SwitchTy, SwitchVal, CstMask);

1194

addSuccessorWithProb(SwitchBB,

B

.TargetBB,

B

.ExtraProb);

1196

addSuccessorWithProb(SwitchBB, NextMBB, BranchProbToNext);

1214bool

IRTranslator::lowerBitTestWorkItem(

1220 bool

FallthroughUnreachable) {

1221 using namespace

SwitchCG;

1224

BitTestBlock *BTB = &SL->BitTestCases[

I

->BTCasesIndex];

1226 for

(BitTestCase &BTC : BTB->Cases)

1227

CurMF->

insert

(BBI, BTC.ThisBB);

1230

BTB->Parent = CurMBB;

1231

BTB->Default = Fallthrough;

1233

BTB->DefaultProb = UnhandledProbs;

1237 if

(!BTB->ContiguousRange) {

1238

BTB->Prob += DefaultProb / 2;

1239

BTB->DefaultProb -= DefaultProb / 2;

1242 if

(FallthroughUnreachable)

1243

BTB->FallthroughUnreachable =

true

;

1246 if

(CurMBB == SwitchMBB) {

1247

emitBitTestHeader(*BTB, SwitchMBB);

1248

BTB->Emitted =

true

;

1258 using namespace

SwitchCG;

1262 if

(++BBI != FuncInfo.

MF

->

end

())

1271

[](

const

CaseCluster &a,

const

CaseCluster &b) {

1272

return a.Prob != b.Prob

1274

: a.Low->getValue().slt(b.Low->getValue());

1279 for

(CaseClusterIt

I

=

W

.LastCluster;

I

>

W

.FirstCluster;) {

1281 if

(

I

->Prob >

W

.LastCluster->Prob)

1283 if

(

I

->Kind == CC_Range &&

I

->MBB == NextMBB) {

1293 for

(CaseClusterIt

I

=

W

.FirstCluster;

I

<=

W

.LastCluster; ++

I

)

1294

UnhandledProbs +=

I

->Prob;

1297 for

(CaseClusterIt

I

=

W

.FirstCluster, E =

W

.LastCluster;

I

<= E; ++

I

) {

1298 bool

FallthroughUnreachable =

false

;

1300 if

(

I

==

W

.LastCluster) {

1302

Fallthrough = DefaultMBB;

1303

FallthroughUnreachable = isa<UnreachableInst>(

1307

CurMF->

insert

(BBI, Fallthrough);

1309

UnhandledProbs -=

I

->Prob;

1311 switch

(

I

->Kind) {

1313 if

(!lowerBitTestWorkItem(W, SwitchMBB, CurMBB, DefaultMBB, MIB, BBI,

1314

DefaultProb, UnhandledProbs,

I

, Fallthrough,

1315

FallthroughUnreachable)) {

1323 if

(!lowerJumpTableWorkItem(W, SwitchMBB, CurMBB, DefaultMBB, MIB, BBI,

1324

UnhandledProbs,

I

, Fallthrough,

1325

FallthroughUnreachable)) {

1332 if

(!lowerSwitchRangeWorkItem(

I

,

Cond

, Fallthrough,

1333

FallthroughUnreachable, UnhandledProbs,

1334

CurMBB, MIB, SwitchMBB)) {

1341

CurMBB = Fallthrough;

1347bool

IRTranslator::translateIndirectBr(

const User

&U,

1361 if

(!AddedSuccessors.

insert

(Succ).second)

1370 if

(

auto

Arg = dyn_cast<Argument>(V))

1371 return

Arg->hasSwiftErrorAttr();

1372 if

(

auto

AI = dyn_cast<AllocaInst>(V))

1378 const LoadInst

&LI = cast<LoadInst>(U);

1380 if

(StoreSize.

isZero

())

1393 assert

(Regs.

size

() == 1 &&

"swifterror should be single pointer"

);

1410

Regs.

size

() == 1 ? LI.

getMetadata

(LLVMContext::MD_range) :

nullptr

;

1411 for

(

unsigned

i = 0; i < Regs.

size

(); ++i) {

1416 Align

BaseAlign = getMemOpAlign(LI);

1440 assert

(Vals.

size

() == 1 &&

"swifterror should be single pointer"

);

1443 SI

.getPointerOperand());

1450 for

(

unsigned

i = 0; i < Vals.

size

(); ++i) {

1455 Align

BaseAlign = getMemOpAlign(SI);

1459 SI

.getSyncScopeID(),

SI

.getOrdering());

1466 const Value

*Src = U.getOperand(0);

1472

Indices.

push_back

(ConstantInt::get(Int32Ty, 0));

1475 for

(

auto Idx

: EVI->indices())

1477

}

else if

(

const InsertValueInst

*IVI = dyn_cast<InsertValueInst>(&U)) {

1478 for

(

auto Idx

: IVI->indices())

1486 DL

.getIndexedOffsetInType(Src->getType(), Indices));

1489bool

IRTranslator::translateExtractValue(

const User

&U,

1491 const Value

*Src =

U

.getOperand(0);

1496 auto

&DstRegs = allocateVRegs(U);

1498 for

(

unsigned

i = 0; i < DstRegs.size(); ++i)

1499

DstRegs[i] = SrcRegs[

Idx

++];

1504bool

IRTranslator::translateInsertValue(

const User

&U,

1506 const Value

*Src =

U

.getOperand(0);

1508 auto

&DstRegs = allocateVRegs(U);

1512 auto

*InsertedIt = InsertedRegs.

begin

();

1514 for

(

unsigned

i = 0; i < DstRegs.size(); ++i) {

1515 if

(DstOffsets[i] >=

Offset

&& InsertedIt != InsertedRegs.

end

())

1516

DstRegs[i] = *InsertedIt++;

1518

DstRegs[i] = SrcRegs[i];

1524bool

IRTranslator::translateSelect(

const User

&U,

1526 Register

Tst = getOrCreateVReg(*

U

.getOperand(0));

1532 if

(

const SelectInst

*SI = dyn_cast<SelectInst>(&U))

1535 for

(

unsigned

i = 0; i < ResRegs.

size

(); ++i) {

1536

MIRBuilder.

buildSelect

(ResRegs[i], Tst, Op0Regs[i], Op1Regs[i], Flags);

1542bool

IRTranslator::translateCopy(

const User

&U,

const Value

&V,

1545 auto

&Regs = *VMap.getVRegs(U);

1546 if

(Regs.

empty

()) {

1547

Regs.push_back(Src);

1548

VMap.getOffsets(U)->push_back(0);

1557bool

IRTranslator::translateBitCast(

const User

&U,

1564 if

(isa<ConstantInt>(

U

.getOperand(0)))

1565 return

translateCast(TargetOpcode::G_CONSTANT_FOLD_BARRIER, U,

1567 return

translateCopy(U, *

U

.getOperand(0), MIRBuilder);

1570 return

translateCast(TargetOpcode::G_BITCAST, U, MIRBuilder);

1573bool

IRTranslator::translateCast(

unsigned

Opcode,

const User

&U,

1579 if

(

const Instruction

*

I

= dyn_cast<Instruction>(&U))

1582 Register Op

= getOrCreateVReg(*

U

.getOperand(0));

1588bool

IRTranslator::translateGetElementPtr(

const User

&U,

1590 Value

&Op0 = *

U

.getOperand(0);

1591 Register

BaseReg = getOrCreateVReg(Op0);

1598 if

(

const Instruction

*

I

= dyn_cast<Instruction>(&U))

1603 unsigned

VectorWidth = 0;

1607 bool

WantSplatVector =

false

;

1608 if

(

auto

*VT = dyn_cast<VectorType>(

U

.getType())) {

1609

VectorWidth = cast<FixedVectorType>(VT)->getNumElements();

1611

WantSplatVector = VectorWidth > 1;

1616 if

(WantSplatVector && !PtrTy.

isVector

()) {

1617

BaseReg = MIRBuilder

1630 const Value

*

Idx

= GTI.getOperand();

1631 if

(

StructType

*StTy = GTI.getStructTypeOrNull()) {

1632 unsigned Field

= cast<Constant>(

Idx

)->getUniqueInteger().getZExtValue();

1636 uint64_t

ElementSize = GTI.getSequentialElementStride(*DL);

1640 if

(

const auto

*CI = dyn_cast<ConstantInt>(

Idx

)) {

1641 if

(std::optional<int64_t> Val = CI->getValue().trySExtValue()) {

1642 Offset

+= ElementSize * *Val;

1649

BaseReg = MIRBuilder.

buildPtrAdd

(PtrTy, BaseReg, OffsetMIB.getReg(0))

1656 if

(IdxTy != OffsetTy) {

1657 if

(!IdxTy.

isVector

() && WantSplatVector) {

1670 if

(ElementSize != 1) {

1674

MIRBuilder.

buildMul

(OffsetTy, IdxReg, ElementSizeMIB).

getReg

(0);

1676

GepOffsetReg = IdxReg;

1686 if

(int64_t(

Offset

) >= 0 && cast<GEPOperator>(U).isInBounds())

1689

MIRBuilder.

buildPtrAdd

(getOrCreateVReg(U), BaseReg, OffsetMIB.getReg(0),

1694

MIRBuilder.

buildCopy

(getOrCreateVReg(U), BaseReg);

1698bool

IRTranslator::translateMemFunc(

const CallInst

&CI,

1703 if

(isa<UndefValue>(SrcPtr))

1708 unsigned

MinPtrSize = UINT_MAX;

1709 for

(

auto

AI = CI.

arg_begin

(), AE = CI.

arg_end

(); std::next(AI) != AE; ++AI) {

1710 Register

SrcReg = getOrCreateVReg(**AI);

1713

MinPtrSize = std::min<unsigned>(SrcTy.

getSizeInBits

(), MinPtrSize);

1721 if

(MRI->

getType

(SizeOpReg) != SizeTy)

1724 auto

ICall = MIRBuilder.

buildInstr

(Opcode);

1735 if

(

auto

*MCI = dyn_cast<MemCpyInst>(&CI)) {

1736

DstAlign = MCI->getDestAlign().valueOrOne();

1737

SrcAlign = MCI->getSourceAlign().valueOrOne();

1738

CopySize = dyn_cast<ConstantInt>(MCI->getArgOperand(2));

1739

}

else if

(

auto

*MCI = dyn_cast<MemCpyInlineInst>(&CI)) {

1740

DstAlign = MCI->getDestAlign().valueOrOne();

1741

SrcAlign = MCI->getSourceAlign().valueOrOne();

1742

CopySize = dyn_cast<ConstantInt>(MCI->getArgOperand(2));

1743

}

else if

(

auto

*MMI = dyn_cast<MemMoveInst>(&CI)) {

1744

DstAlign = MMI->getDestAlign().valueOrOne();

1745

SrcAlign = MMI->getSourceAlign().valueOrOne();

1746

CopySize = dyn_cast<ConstantInt>(MMI->getArgOperand(2));

1748 auto

*MSI = cast<MemSetInst>(&CI);

1749

DstAlign = MSI->getDestAlign().valueOrOne();

1752 if

(Opcode != TargetOpcode::G_MEMCPY_INLINE) {

1768 if

(AA && CopySize &&

1779

ICall.addMemOperand(

1781

StoreFlags, 1, DstAlign, AAInfo));

1782 if

(Opcode != TargetOpcode::G_MEMSET)

1789bool

IRTranslator::translateTrap(

const CallInst

&CI,

1794 if

(TrapFuncName.

empty

()) {

1795 if

(Opcode == TargetOpcode::G_UBSANTRAP) {

1805 if

(Opcode == TargetOpcode::G_UBSANTRAP)

1812 return

CLI->

lowerCall

(MIRBuilder, Info);

1815bool

IRTranslator::translateVectorInterleave2Intrinsic(

1818 "This function can only be called on the interleave2 intrinsic!"

);

1822 Register

Res = getOrCreateVReg(CI);

1831bool

IRTranslator::translateVectorDeinterleave2Intrinsic(

1834 "This function can only be called on the deinterleave2 intrinsic!"

);

1850void

IRTranslator::getStackGuard(

Register

DstReg,

1855

MIRBuilder.

buildInstr

(TargetOpcode::LOAD_STACK_GUARD, {DstReg}, {});

1861 unsigned

AddrSpace =

Global

->getType()->getPointerAddressSpace();

1869

MIB.setMemRefs({

MemRef

});

1872bool

IRTranslator::translateOverflowIntrinsic(

const CallInst

&CI,

unsigned Op

,

1876 Op

, {ResRegs[0], ResRegs[1]},

1882bool

IRTranslator::translateFixedPointIntrinsic(

unsigned Op

,

const CallInst

&CI,

1884 Register

Dst = getOrCreateVReg(CI);

1888

MIRBuilder.

buildInstr

(

Op

, {Dst}, { Src0, Src1, Scale });

1896 case

Intrinsic::acos:

1897 return

TargetOpcode::G_FACOS;

1898 case

Intrinsic::asin:

1899 return

TargetOpcode::G_FASIN;

1900 case

Intrinsic::atan:

1901 return

TargetOpcode::G_FATAN;

1902 case

Intrinsic::atan2:

1903 return

TargetOpcode::G_FATAN2;

1904 case

Intrinsic::bswap:

1905 return

TargetOpcode::G_BSWAP;

1906 case

Intrinsic::bitreverse:

1907 return

TargetOpcode::G_BITREVERSE;

1908 case

Intrinsic::fshl:

1909 return

TargetOpcode::G_FSHL;

1910 case

Intrinsic::fshr:

1911 return

TargetOpcode::G_FSHR;

1912 case

Intrinsic::ceil:

1913 return

TargetOpcode::G_FCEIL;

1914 case

Intrinsic::cos:

1915 return

TargetOpcode::G_FCOS;

1916 case

Intrinsic::cosh:

1917 return

TargetOpcode::G_FCOSH;

1918 case

Intrinsic::ctpop:

1919 return

TargetOpcode::G_CTPOP;

1920 case

Intrinsic::exp:

1921 return

TargetOpcode::G_FEXP;

1922 case

Intrinsic::exp2:

1923 return

TargetOpcode::G_FEXP2;

1924 case

Intrinsic::exp10:

1925 return

TargetOpcode::G_FEXP10;

1926 case

Intrinsic::fabs:

1927 return

TargetOpcode::G_FABS;

1928 case

Intrinsic::copysign:

1929 return

TargetOpcode::G_FCOPYSIGN;

1930 case

Intrinsic::minnum:

1931 return

TargetOpcode::G_FMINNUM;

1932 case

Intrinsic::maxnum:

1933 return

TargetOpcode::G_FMAXNUM;

1934 case

Intrinsic::minimum:

1935 return

TargetOpcode::G_FMINIMUM;

1936 case

Intrinsic::maximum:

1937 return

TargetOpcode::G_FMAXIMUM;

1938 case

Intrinsic::canonicalize:

1939 return

TargetOpcode::G_FCANONICALIZE;

1940 case

Intrinsic::floor:

1941 return

TargetOpcode::G_FFLOOR;

1942 case

Intrinsic::fma:

1943 return

TargetOpcode::G_FMA;

1944 case

Intrinsic::log:

1945 return

TargetOpcode::G_FLOG;

1946 case

Intrinsic::log2:

1947 return

TargetOpcode::G_FLOG2;

1948 case

Intrinsic::log10:

1949 return

TargetOpcode::G_FLOG10;

1950 case

Intrinsic::ldexp:

1951 return

TargetOpcode::G_FLDEXP;

1952 case

Intrinsic::nearbyint:

1953 return

TargetOpcode::G_FNEARBYINT;

1954 case

Intrinsic::pow:

1955 return

TargetOpcode::G_FPOW;

1956 case

Intrinsic::powi:

1957 return

TargetOpcode::G_FPOWI;

1958 case

Intrinsic::rint:

1959 return

TargetOpcode::G_FRINT;

1960 case

Intrinsic::round:

1961 return

TargetOpcode::G_INTRINSIC_ROUND;

1962 case

Intrinsic::roundeven:

1963 return

TargetOpcode::G_INTRINSIC_ROUNDEVEN;

1964 case

Intrinsic::sin:

1965 return

TargetOpcode::G_FSIN;

1966 case

Intrinsic::sinh:

1967 return

TargetOpcode::G_FSINH;

1968 case

Intrinsic::sqrt:

1969 return

TargetOpcode::G_FSQRT;

1970 case

Intrinsic::tan:

1971 return

TargetOpcode::G_FTAN;

1972 case

Intrinsic::tanh:

1973 return

TargetOpcode::G_FTANH;

1974 case

Intrinsic::trunc:

1975 return

TargetOpcode::G_INTRINSIC_TRUNC;

1976 case

Intrinsic::readcyclecounter:

1977 return

TargetOpcode::G_READCYCLECOUNTER;

1978 case

Intrinsic::readsteadycounter:

1979 return

TargetOpcode::G_READSTEADYCOUNTER;

1980 case

Intrinsic::ptrmask:

1981 return

TargetOpcode::G_PTRMASK;

1982 case

Intrinsic::lrint:

1983 return

TargetOpcode::G_INTRINSIC_LRINT;

1984 case

Intrinsic::llrint:

1985 return

TargetOpcode::G_INTRINSIC_LLRINT;

1987 case

Intrinsic::vector_reduce_fmin:

1988 return

TargetOpcode::G_VECREDUCE_FMIN;

1989 case

Intrinsic::vector_reduce_fmax:

1990 return

TargetOpcode::G_VECREDUCE_FMAX;

1991 case

Intrinsic::vector_reduce_fminimum:

1992 return

TargetOpcode::G_VECREDUCE_FMINIMUM;

1993 case

Intrinsic::vector_reduce_fmaximum:

1994 return

TargetOpcode::G_VECREDUCE_FMAXIMUM;

1995 case

Intrinsic::vector_reduce_add:

1996 return

TargetOpcode::G_VECREDUCE_ADD;

1997 case

Intrinsic::vector_reduce_mul:

1998 return

TargetOpcode::G_VECREDUCE_MUL;

1999 case

Intrinsic::vector_reduce_and:

2000 return

TargetOpcode::G_VECREDUCE_AND;

2001 case

Intrinsic::vector_reduce_or:

2002 return

TargetOpcode::G_VECREDUCE_OR;

2003 case

Intrinsic::vector_reduce_xor:

2004 return

TargetOpcode::G_VECREDUCE_XOR;

2005 case

Intrinsic::vector_reduce_smax:

2006 return

TargetOpcode::G_VECREDUCE_SMAX;

2007 case

Intrinsic::vector_reduce_smin:

2008 return

TargetOpcode::G_VECREDUCE_SMIN;

2009 case

Intrinsic::vector_reduce_umax:

2010 return

TargetOpcode::G_VECREDUCE_UMAX;

2011 case

Intrinsic::vector_reduce_umin:

2012 return

TargetOpcode::G_VECREDUCE_UMIN;

2013 case

Intrinsic::experimental_vector_compress:

2014 return

TargetOpcode::G_VECTOR_COMPRESS;

2015 case

Intrinsic::lround:

2016 return

TargetOpcode::G_LROUND;

2017 case

Intrinsic::llround:

2018 return

TargetOpcode::G_LLROUND;

2019 case

Intrinsic::get_fpenv:

2020 return

TargetOpcode::G_GET_FPENV;

2021 case

Intrinsic::get_fpmode:

2022 return

TargetOpcode::G_GET_FPMODE;

2027bool

IRTranslator::translateSimpleIntrinsic(

const CallInst

&CI,

2031 unsigned Op

= getSimpleIntrinsicOpcode(

ID

);

2039 for

(

const auto

&Arg : CI.

args

())

2040

VRegs.

push_back

(getOrCreateVReg(*Arg));

2042

MIRBuilder.

buildInstr

(

Op

, {getOrCreateVReg(CI)}, VRegs,

2050 case

Intrinsic::experimental_constrained_fadd:

2051 return

TargetOpcode::G_STRICT_FADD;

2052 case

Intrinsic::experimental_constrained_fsub:

2053 return

TargetOpcode::G_STRICT_FSUB;

2054 case

Intrinsic::experimental_constrained_fmul:

2055 return

TargetOpcode::G_STRICT_FMUL;

2056 case

Intrinsic::experimental_constrained_fdiv:

2057 return

TargetOpcode::G_STRICT_FDIV;

2058 case

Intrinsic::experimental_constrained_frem:

2059 return

TargetOpcode::G_STRICT_FREM;

2060 case

Intrinsic::experimental_constrained_fma:

2061 return

TargetOpcode::G_STRICT_FMA;

2062 case

Intrinsic::experimental_constrained_sqrt:

2063 return

TargetOpcode::G_STRICT_FSQRT;

2064 case

Intrinsic::experimental_constrained_ldexp:

2065 return

TargetOpcode::G_STRICT_FLDEXP;

2071bool

IRTranslator::translateConstrainedFPIntrinsic(

2087

MIRBuilder.

buildInstr

(Opcode, {getOrCreateVReg(FPI)}, VRegs,

Flags

);

2091

std::optional<MCRegister> IRTranslator::getArgPhysReg(

Argument

&Arg) {

2092 auto

VRegs = getOrCreateVRegs(Arg);

2093 if

(VRegs.

size

() != 1)

2094 return

std::nullopt;

2098 if

(!VRegDef || !VRegDef->isCopy())

2099 return

std::nullopt;

2103bool

IRTranslator::translateIfEntryValueArgument(

bool

isDeclare,

Value

*Val,

2108 auto

*Arg = dyn_cast<Argument>(Val);

2115

std::optional<MCRegister> PhysReg = getArgPhysReg(*Arg);

2117 LLVM_DEBUG

(

dbgs

() <<

"Dropping dbg."

<< (isDeclare ?

"declare"

:

"value"

)

2118

<<

": expression is entry_value but " 2119

<<

"couldn't find a physical register\n"

);

2139 case

Intrinsic::experimental_convergence_anchor:

2140 return

TargetOpcode::CONVERGENCECTRL_ANCHOR;

2141 case

Intrinsic::experimental_convergence_entry:

2142 return

TargetOpcode::CONVERGENCECTRL_ENTRY;

2143 case

Intrinsic::experimental_convergence_loop:

2144 return

TargetOpcode::CONVERGENCECTRL_LOOP;

2148bool

IRTranslator::translateConvergenceControlIntrinsic(

2151 Register

OutputReg = getOrCreateConvergenceTokenVReg(CI);

2154 if

(

ID

== Intrinsic::experimental_convergence_loop) {

2156 assert

(Bundle &&

"Expected a convergence control token."

);

2158

getOrCreateConvergenceTokenVReg(*Bundle->Inputs[0].get());

2167 if

(

auto

*

MI

= dyn_cast<AnyMemIntrinsic>(&CI)) {

2168 if

(ORE->enabled()) {

2170 MemoryOpRemark R

(*ORE,

"gisel-irtranslator-memsize"

, *DL, *LibInfo);

2178 if

(translateSimpleIntrinsic(CI,

ID

, MIRBuilder))

2184 case

Intrinsic::lifetime_start:

2185 case

Intrinsic::lifetime_end: {

2191 unsigned Op

=

ID

== Intrinsic::lifetime_start ? TargetOpcode::LIFETIME_START

2192

: TargetOpcode::LIFETIME_END;

2201 for

(

const Value

*V : Allocas) {

2202 const AllocaInst

*AI = dyn_cast<AllocaInst>(V);

2213 case

Intrinsic::fake_use: {

2215 for

(

const auto

&Arg : CI.

args

())

2216 for

(

auto

VReg : getOrCreateVRegs(*Arg))

2218

MIRBuilder.

buildInstr

(TargetOpcode::FAKE_USE, {}, VRegs);

2222 case

Intrinsic::dbg_declare: {

2229 case

Intrinsic::dbg_label: {

2235 "Expected inlined-at fields to agree"

);

2240 case

Intrinsic::vaend:

2244 case

Intrinsic::vastart: {

2249

MIRBuilder.

buildInstr

(TargetOpcode::G_VASTART, {}, {getOrCreateVReg(*

Ptr

)})

2252

ListSize, Alignment));

2255 case

Intrinsic::dbg_assign:

2262 case

Intrinsic::dbg_value: {

2269 case

Intrinsic::uadd_with_overflow:

2270 return

translateOverflowIntrinsic(CI, TargetOpcode::G_UADDO, MIRBuilder);

2271 case

Intrinsic::sadd_with_overflow:

2272 return

translateOverflowIntrinsic(CI, TargetOpcode::G_SADDO, MIRBuilder);

2273 case

Intrinsic::usub_with_overflow:

2274 return

translateOverflowIntrinsic(CI, TargetOpcode::G_USUBO, MIRBuilder);

2275 case

Intrinsic::ssub_with_overflow:

2276 return

translateOverflowIntrinsic(CI, TargetOpcode::G_SSUBO, MIRBuilder);

2277 case

Intrinsic::umul_with_overflow:

2278 return

translateOverflowIntrinsic(CI, TargetOpcode::G_UMULO, MIRBuilder);

2279 case

Intrinsic::smul_with_overflow:

2280 return

translateOverflowIntrinsic(CI, TargetOpcode::G_SMULO, MIRBuilder);

2281 case

Intrinsic::uadd_sat:

2282 return

translateBinaryOp(TargetOpcode::G_UADDSAT, CI, MIRBuilder);

2283 case

Intrinsic::sadd_sat:

2284 return

translateBinaryOp(TargetOpcode::G_SADDSAT, CI, MIRBuilder);

2285 case

Intrinsic::usub_sat:

2286 return

translateBinaryOp(TargetOpcode::G_USUBSAT, CI, MIRBuilder);

2287 case

Intrinsic::ssub_sat:

2288 return

translateBinaryOp(TargetOpcode::G_SSUBSAT, CI, MIRBuilder);

2289 case

Intrinsic::ushl_sat:

2290 return

translateBinaryOp(TargetOpcode::G_USHLSAT, CI, MIRBuilder);

2291 case

Intrinsic::sshl_sat:

2292 return

translateBinaryOp(TargetOpcode::G_SSHLSAT, CI, MIRBuilder);

2293 case

Intrinsic::umin:

2294 return

translateBinaryOp(TargetOpcode::G_UMIN, CI, MIRBuilder);

2295 case

Intrinsic::umax:

2296 return

translateBinaryOp(TargetOpcode::G_UMAX, CI, MIRBuilder);

2297 case

Intrinsic::smin:

2298 return

translateBinaryOp(TargetOpcode::G_SMIN, CI, MIRBuilder);

2299 case

Intrinsic::smax:

2300 return

translateBinaryOp(TargetOpcode::G_SMAX, CI, MIRBuilder);

2301 case

Intrinsic::abs:

2303 return

translateUnaryOp(TargetOpcode::G_ABS, CI, MIRBuilder);

2304 case

Intrinsic::smul_fix:

2305 return

translateFixedPointIntrinsic(TargetOpcode::G_SMULFIX, CI, MIRBuilder);

2306 case

Intrinsic::umul_fix:

2307 return

translateFixedPointIntrinsic(TargetOpcode::G_UMULFIX, CI, MIRBuilder);

2308 case

Intrinsic::smul_fix_sat:

2309 return

translateFixedPointIntrinsic(TargetOpcode::G_SMULFIXSAT, CI, MIRBuilder);

2310 case

Intrinsic::umul_fix_sat:

2311 return

translateFixedPointIntrinsic(TargetOpcode::G_UMULFIXSAT, CI, MIRBuilder);

2312 case

Intrinsic::sdiv_fix:

2313 return

translateFixedPointIntrinsic(TargetOpcode::G_SDIVFIX, CI, MIRBuilder);

2314 case

Intrinsic::udiv_fix:

2315 return

translateFixedPointIntrinsic(TargetOpcode::G_UDIVFIX, CI, MIRBuilder);

2316 case

Intrinsic::sdiv_fix_sat:

2317 return

translateFixedPointIntrinsic(TargetOpcode::G_SDIVFIXSAT, CI, MIRBuilder);

2318 case

Intrinsic::udiv_fix_sat:

2319 return

translateFixedPointIntrinsic(TargetOpcode::G_UDIVFIXSAT, CI, MIRBuilder);

2320 case

Intrinsic::fmuladd: {

2322 Register

Dst = getOrCreateVReg(CI);

2331

MIRBuilder.

buildFMA

(Dst, Op0, Op1, Op2,

2342 case

Intrinsic::convert_from_fp16:

2348 case

Intrinsic::convert_to_fp16:

2354 case

Intrinsic::frexp: {

2361 case

Intrinsic::sincos: {

2368 case

Intrinsic::fptosi_sat:

2372 case

Intrinsic::fptoui_sat:

2376 case

Intrinsic::memcpy_inline:

2377 return

translateMemFunc(CI, MIRBuilder, TargetOpcode::G_MEMCPY_INLINE);

2378 case

Intrinsic::memcpy:

2379 return

translateMemFunc(CI, MIRBuilder, TargetOpcode::G_MEMCPY);

2380 case

Intrinsic::memmove:

2381 return

translateMemFunc(CI, MIRBuilder, TargetOpcode::G_MEMMOVE);

2382 case

Intrinsic::memset:

2383 return

translateMemFunc(CI, MIRBuilder, TargetOpcode::G_MEMSET);

2384 case

Intrinsic::eh_typeid_for: {

2391 case

Intrinsic::objectsize:

2394 case

Intrinsic::is_constant:

2395 llvm_unreachable

(

"llvm.is.constant.* should have been lowered already"

);

2397 case

Intrinsic::stackguard:

2398

getStackGuard(getOrCreateVReg(CI), MIRBuilder);

2400 case

Intrinsic::stackprotector: {

2405

getStackGuard(GuardVal, MIRBuilder);

2410 int

FI = getOrCreateFrameIndex(*Slot);

2414

GuardVal, getOrCreateVReg(*Slot),

2421 case

Intrinsic::stacksave: {

2422

MIRBuilder.

buildInstr

(TargetOpcode::G_STACKSAVE, {getOrCreateVReg(CI)}, {});

2425 case

Intrinsic::stackrestore: {

2426

MIRBuilder.

buildInstr

(TargetOpcode::G_STACKRESTORE, {},

2430 case

Intrinsic::cttz:

2431 case

Intrinsic::ctlz: {

2433 bool

isTrailing =

ID

== Intrinsic::cttz;

2434 unsigned

Opcode = isTrailing

2435

? Cst->

isZero

() ? TargetOpcode::G_CTTZ

2436

: TargetOpcode::G_CTTZ_ZERO_UNDEF

2437

: Cst->

isZero

() ? TargetOpcode::G_CTLZ

2438

: TargetOpcode::G_CTLZ_ZERO_UNDEF;

2439

MIRBuilder.

buildInstr

(Opcode, {getOrCreateVReg(CI)},

2443 case

Intrinsic::invariant_start: {

2449 case

Intrinsic::invariant_end:

2451 case

Intrinsic::expect:

2452 case

Intrinsic::expect_with_probability:

2453 case

Intrinsic::annotation:

2454 case

Intrinsic::ptr_annotation:

2455 case

Intrinsic::launder_invariant_group:

2456 case

Intrinsic::strip_invariant_group: {

2458

MIRBuilder.

buildCopy

(getOrCreateVReg(CI),

2462 case

Intrinsic::assume:

2463 case

Intrinsic::experimental_noalias_scope_decl:

2464 case

Intrinsic::var_annotation:

2465 case

Intrinsic::sideeffect:

2468 case

Intrinsic::read_volatile_register:

2469 case

Intrinsic::read_register: {

2472

.

buildInstr

(TargetOpcode::G_READ_REGISTER, {getOrCreateVReg(CI)}, {})

2473

.addMetadata(cast<MDNode>(cast<MetadataAsValue>(Arg)->getMetadata()));

2476 case

Intrinsic::write_register: {

2478

MIRBuilder.

buildInstr

(TargetOpcode::G_WRITE_REGISTER)

2479

.

addMetadata

(cast<MDNode>(cast<MetadataAsValue>(Arg)->getMetadata()))

2483 case

Intrinsic::localescape: {

2491 if

(isa<ConstantPointerNull>(Arg))

2494 int

FI = getOrCreateFrameIndex(*cast<AllocaInst>(Arg));

2504

EntryMBB.

insert

(EntryMBB.

begin

(), LocalEscape);

2509 case

Intrinsic::vector_reduce_fadd:

2510 case

Intrinsic::vector_reduce_fmul: {

2513 Register

Dst = getOrCreateVReg(CI);

2519

Opc =

ID

== Intrinsic::vector_reduce_fadd

2520

? TargetOpcode::G_VECREDUCE_SEQ_FADD

2521

: TargetOpcode::G_VECREDUCE_SEQ_FMUL;

2522

MIRBuilder.

buildInstr

(Opc, {Dst}, {ScalarSrc, VecSrc},

2529 if

(

ID

== Intrinsic::vector_reduce_fadd) {

2530

Opc = TargetOpcode::G_VECREDUCE_FADD;

2531

ScalarOpc = TargetOpcode::G_FADD;

2533

Opc = TargetOpcode::G_VECREDUCE_FMUL;

2534

ScalarOpc = TargetOpcode::G_FMUL;

2539

MIRBuilder.

buildInstr

(ScalarOpc, {Dst}, {ScalarSrc, Rdx},

2544 case

Intrinsic::trap:

2545 return

translateTrap(CI, MIRBuilder, TargetOpcode::G_TRAP);

2546 case

Intrinsic::debugtrap:

2547 return

translateTrap(CI, MIRBuilder, TargetOpcode::G_DEBUGTRAP);

2548 case

Intrinsic::ubsantrap:

2549 return

translateTrap(CI, MIRBuilder, TargetOpcode::G_UBSANTRAP);

2550 case

Intrinsic::allow_runtime_check:

2551 case

Intrinsic::allow_ubsan_check:

2552

MIRBuilder.

buildCopy

(getOrCreateVReg(CI),

2555 case

Intrinsic::amdgcn_cs_chain:

2556 return

translateCallBase(CI, MIRBuilder);

2557 case

Intrinsic::fptrunc_round: {

2562

std::optional<RoundingMode> RoundMode =

2567

.

buildInstr

(TargetOpcode::G_INTRINSIC_FPTRUNC_ROUND,

2568

{getOrCreateVReg(CI)},

2570

.addImm((

int

)*RoundMode);

2574 case

Intrinsic::is_fpclass: {

2579

.

buildInstr

(TargetOpcode::G_IS_FPCLASS, {getOrCreateVReg(CI)},

2580

{getOrCreateVReg(*FpValue)})

2585 case

Intrinsic::set_fpenv: {

2590 case

Intrinsic::reset_fpenv:

2593 case

Intrinsic::set_fpmode: {

2598 case

Intrinsic::reset_fpmode:

2601 case

Intrinsic::vscale: {

2605 case

Intrinsic::scmp:

2606

MIRBuilder.

buildSCmp

(getOrCreateVReg(CI),

2610 case

Intrinsic::ucmp:

2611

MIRBuilder.

buildUCmp

(getOrCreateVReg(CI),

2615 case

Intrinsic::vector_extract:

2616 return

translateExtractVector(CI, MIRBuilder);

2617 case

Intrinsic::vector_insert:

2618 return

translateInsertVector(CI, MIRBuilder);

2619 case

Intrinsic::stepvector: {

2623 case

Intrinsic::prefetch: {

2625 unsigned

RW = cast<ConstantInt>(CI.

getOperand

(1))->getZExtValue();

2626 unsigned

Locality = cast<ConstantInt>(CI.

getOperand

(2))->getZExtValue();

2627 unsigned

CacheType = cast<ConstantInt>(CI.

getOperand

(3))->getZExtValue();

2639 case

Intrinsic::vector_interleave2:

2640 case

Intrinsic::vector_deinterleave2: {

2648 return

translateVectorInterleave2Intrinsic(CI, MIRBuilder);

2650 return

translateVectorDeinterleave2Intrinsic(CI, MIRBuilder);

2653#define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC) \ 2654 case Intrinsic::INTRINSIC: 2655#include "llvm/IR/ConstrainedOps.def" 2656 return

translateConstrainedFPIntrinsic(cast<ConstrainedFPIntrinsic>(CI),

2658 case

Intrinsic::experimental_convergence_anchor:

2659 case

Intrinsic::experimental_convergence_entry:

2660 case

Intrinsic::experimental_convergence_loop:

2661 return

translateConvergenceControlIntrinsic(CI,

ID

, MIRBuilder);

2666bool

IRTranslator::translateInlineAsm(

const CallBase

&CB,

2675 dbgs

() <<

"Inline asm lowering is not supported for this target yet\n"

);

2680

MIRBuilder, CB, [&](

const Value

&Val) {

return

getOrCreateVRegs(Val); });

2683bool

IRTranslator::translateCallBase(

const CallBase

&CB,

2690 for

(

const auto

&Arg : CB.

args

()) {

2692 assert

(SwiftInVReg == 0 &&

"Expected only one swift error argument"

);

2696

&CB, &MIRBuilder.

getMBB

(), Arg));

2702 Args

.push_back(getOrCreateVRegs(*Arg));

2705 if

(

auto

*CI = dyn_cast<CallInst>(&CB)) {

2706 if

(ORE->enabled()) {

2708 MemoryOpRemark R

(*ORE,

"gisel-irtranslator-memsize"

, *DL, *LibInfo);

2714

std::optional<CallLowering::PtrAuthInfo> PAI;

2719 const Value

*

Key

= Bundle->Inputs[0];

2725 const auto

*CalleeCPA = dyn_cast<ConstantPtrAuth>(CB.

getCalledOperand

());

2726 if

(!CalleeCPA || !isa<Function>(CalleeCPA->getPointer()) ||

2727

!CalleeCPA->isKnownCompatibleWith(Key, Discriminator, *DL)) {

2729 Register

DiscReg = getOrCreateVReg(*Discriminator);

2737 const auto

&Token = *Bundle->Inputs[0].get();

2738

ConvergenceCtrlToken = getOrCreateConvergenceTokenVReg(Token);

2745

MIRBuilder, CB, Res, Args, SwiftErrorVReg, PAI, ConvergenceCtrlToken,

2750 assert

(!HasTailCall &&

"Can't tail call return twice from block?"

);

2762 const CallInst

&CI = cast<CallInst>(U);

2768 if

(

F

&& (

F

->hasDLLImportStorageClass() ||

2770 F

->hasExternalWeakLinkage())))

2778 if

(isa<GCStatepointInst, GCRelocateInst, GCResultInst>(U))

2782 return

translateInlineAsm(CI, MIRBuilder);

2787 if

(

F

&&

F

->isIntrinsic()) {

2788 ID

=

F

->getIntrinsicID();

2794 return

translateCallBase(CI, MIRBuilder);

2798 if

(translateKnownIntrinsic(CI,

ID

, MIRBuilder))

2803

ResultRegs = getOrCreateVRegs(CI);

2808 if

(isa<FPMathOperator>(CI))

2814 if

(CI.

paramHasAttr

(Arg.index(), Attribute::ImmArg)) {

2815 if

(

ConstantInt

*CI = dyn_cast<ConstantInt>(Arg.value())) {

2818 assert

(CI->getBitWidth() <= 64 &&

2819 "large intrinsic immediates not handled"

);

2820

MIB.

addImm

(CI->getSExtValue());

2822

MIB.

addFPImm

(cast<ConstantFP>(Arg.value()));

2824

}

else if

(

auto

*MDVal = dyn_cast<MetadataAsValue>(Arg.value())) {

2825 auto

*MD = MDVal->getMetadata();

2826 auto

*MDN = dyn_cast<MDNode>(MD);

2828 if

(

auto

*ConstMD = dyn_cast<ConstantAsMetadata>(MD))

2836 if

(VRegs.

size

() > 1)

2847 DL

->getABITypeAlign(

Info

.memVT.getTypeForEVT(

F

->getContext())));

2848 LLT

MemTy =

Info

.memVT.isSimple()

2850

:

LLT

::scalar(

Info

.memVT.getStoreSizeInBits());

2857 else if

(

Info

.fallbackAddressSpace)

2865 auto

*Token = Bundle->Inputs[0].get();

2866 Register

TokenReg = getOrCreateVReg(*Token);

2874bool

IRTranslator::findUnwindDestinations(

2894 if

(isa<LandingPadInst>(Pad)) {

2896

UnwindDests.emplace_back(&getMBB(*EHPadBB), Prob);

2899 if

(isa<CleanupPadInst>(Pad)) {

2902

UnwindDests.emplace_back(&getMBB(*EHPadBB), Prob);

2903

UnwindDests.

back

().first->setIsEHScopeEntry();

2904

UnwindDests.back().first->setIsEHFuncletEntry();

2907 if

(

auto

*CatchSwitch = dyn_cast<CatchSwitchInst>(Pad)) {

2909 for

(

const BasicBlock

*CatchPadBB : CatchSwitch->handlers()) {

2910

UnwindDests.emplace_back(&getMBB(*CatchPadBB), Prob);

2912 if

(IsMSVCCXX || IsCoreCLR)

2913

UnwindDests.back().first->setIsEHFuncletEntry();

2915

UnwindDests.back().first->setIsEHScopeEntry();

2917

NewEHPadBB = CatchSwitch->getUnwindDest();

2923 if

(BPI && NewEHPadBB)

2925

EHPadBB = NewEHPadBB;

2930bool

IRTranslator::translateInvoke(

const User

&U,

2938 const Function

*Fn =

I

.getCalledFunction();

2945 if

(

I

.hasDeoptState())

2963 bool

LowerInlineAsm =

I

.isInlineAsm();

2964 bool

NeedEHLabel =

true

;

2970

MIRBuilder.

buildInstr

(TargetOpcode::G_INVOKE_REGION_START);

2975 if

(LowerInlineAsm) {

2976 if

(!translateInlineAsm(

I

, MIRBuilder))

2978

}

else if

(!translateCallBase(

I

, MIRBuilder))

2994 if

(!findUnwindDestinations(EHPadBB, EHPadBBProb, UnwindDests))

2998

&ReturnMBB = getMBB(*ReturnBB);

3000

addSuccessorWithProb(InvokeMBB, &ReturnMBB);

3001 for

(

auto

&UnwindDest : UnwindDests) {

3002

UnwindDest.first->setIsEHPad();

3003

addSuccessorWithProb(InvokeMBB, UnwindDest.first, UnwindDest.second);

3008 assert

(BeginSymbol &&

"Expected a begin symbol!"

);

3009 assert

(EndSymbol &&

"Expected an end symbol!"

);

3010

MF->

addInvoke

(&EHPadMBB, BeginSymbol, EndSymbol);

3013

MIRBuilder.

buildBr

(ReturnMBB);

3017bool

IRTranslator::translateCallBr(

const User

&U,

3023bool

IRTranslator::translateLandingPad(

const User

&U,

3047

MIRBuilder.

buildInstr

(TargetOpcode::EH_LABEL)

3053 if

(

auto

*RegMask =

TRI

.getCustomEHPadPreservedMask(*MF))

3061 for

(

Type

*Ty : cast<StructType>(LP.

getType

())->elements())

3063 assert

(

Tys

.size() == 2 &&

"Only two-valued landingpads are supported"

);

3072

MIRBuilder.

buildCopy

(ResRegs[0], ExceptionReg);

3080

MIRBuilder.

buildCopy

(PtrVReg, SelectorReg);

3081

MIRBuilder.

buildCast

(ResRegs[1], PtrVReg);

3086bool

IRTranslator::translateAlloca(

const User

&U,

3088 auto

&AI = cast<AllocaInst>(U);

3094 Register

Res = getOrCreateVReg(AI);

3095 int

FI = getOrCreateFrameIndex(AI);

3108 if

(MRI->

getType

(NumElts) != IntPtrTy) {

3118

getOrCreateVReg(*ConstantInt::get(IntPtrIRTy,

DL

->getTypeAllocSize(Ty)));

3119

MIRBuilder.

buildMul

(AllocSize, NumElts, TySize);

3126 auto

AllocAdd = MIRBuilder.

buildAdd

(IntPtrTy, AllocSize, SAMinusOne,

3130 auto

AlignedAlloc = MIRBuilder.

buildAnd

(IntPtrTy, AllocAdd, AlignCst);

3132 Align

Alignment = std::max(AI.

getAlign

(),

DL

->getPrefTypeAlign(Ty));

3133 if

(Alignment <= StackAlign)

3134

Alignment =

Align

(1);

3147

MIRBuilder.

buildInstr

(TargetOpcode::G_VAARG, {getOrCreateVReg(U)},

3148

{getOrCreateVReg(*

U

.getOperand(0)),

3149 DL

->getABITypeAlign(

U

.getType()).value()});

3157 auto

&UI = cast<UnreachableInst>(U);

3160 if

(

const CallInst

*Call = dyn_cast_or_null<CallInst>(UI.getPrevNode());

3161

Call &&

Call

->doesNotReturn()) {

3165 if

(

Call

->isNonContinuableTrap())

3173bool

IRTranslator::translateInsertElement(

const User

&U,

3177 if

(

auto

*FVT = dyn_cast<FixedVectorType>(

U

.getType());

3178

FVT && FVT->getNumElements() == 1)

3179 return

translateCopy(U, *

U

.getOperand(1), MIRBuilder);

3182 Register

Val = getOrCreateVReg(*

U

.getOperand(0));

3183 Register

Elt = getOrCreateVReg(*

U

.getOperand(1));

3186 if

(

auto

*CI = dyn_cast<ConstantInt>(

U

.getOperand(2))) {

3187 if

(CI->getBitWidth() != PreferredVecIdxWidth) {

3188 APInt

NewIdx = CI->getValue().zextOrTrunc(PreferredVecIdxWidth);

3189 auto

*NewIdxCI = ConstantInt::get(CI->

getContext

(), NewIdx);

3190 Idx

= getOrCreateVReg(*NewIdxCI);

3194 Idx

= getOrCreateVReg(*

U

.getOperand(2));

3203bool

IRTranslator::translateInsertVector(

const User

&U,

3206 Register

Vec = getOrCreateVReg(*

U

.getOperand(0));

3207 Register

Elt = getOrCreateVReg(*

U

.getOperand(1));

3209 ConstantInt

*CI = cast<ConstantInt>(

U

.getOperand(2));

3213 if

(CI->

getBitWidth

() != PreferredVecIdxWidth) {

3215

CI = ConstantInt::get(CI->

getContext

(), NewIdx);

3219 if

(

auto

*ResultType = dyn_cast<FixedVectorType>(

U

.getOperand(1)->getType());

3220

ResultType && ResultType->getNumElements() == 1) {

3221 if

(

auto

*InputType = dyn_cast<FixedVectorType>(

U

.getOperand(0)->getType());

3222

InputType && InputType->getNumElements() == 1) {

3226 return

translateCopy(U, *

U

.getOperand(0), MIRBuilder);

3228 if

(isa<FixedVectorType>(

U

.getOperand(0)->getType())) {

3236 if

(isa<ScalableVectorType>(

U

.getOperand(0)->getType())) {

3241 auto

ScaledIndex = MIRBuilder.

buildMul

(

3249

getOrCreateVReg(U), getOrCreateVReg(*

U

.getOperand(0)),

3254bool

IRTranslator::translateExtractElement(

const User

&U,

3259

dyn_cast<FixedVectorType>(

U

.getOperand(0)->getType()))

3260 if

(FVT->getNumElements() == 1)

3261 return

translateCopy(U, *

U

.getOperand(0), MIRBuilder);

3264 Register

Val = getOrCreateVReg(*

U

.getOperand(0));

3267 if

(

auto

*CI = dyn_cast<ConstantInt>(

U

.getOperand(1))) {

3268 if

(CI->

getBitWidth

() != PreferredVecIdxWidth) {

3270 auto

*NewIdxCI = ConstantInt::get(CI->

getContext

(), NewIdx);

3271 Idx

= getOrCreateVReg(*NewIdxCI);

3275 Idx

= getOrCreateVReg(*

U

.getOperand(1));

3284bool

IRTranslator::translateExtractVector(

const User

&U,

3287 Register

Vec = getOrCreateVReg(*

U

.getOperand(0));

3288 ConstantInt

*CI = cast<ConstantInt>(

U

.getOperand(1));

3292 if

(CI->

getBitWidth

() != PreferredVecIdxWidth) {

3294

CI = ConstantInt::get(CI->

getContext

(), NewIdx);

3298 if

(

auto

*ResultType = dyn_cast<FixedVectorType>(

U

.getType());

3299

ResultType && ResultType->getNumElements() == 1) {

3300 if

(

auto

*InputType = dyn_cast<FixedVectorType>(

U

.getOperand(0)->getType());

3301

InputType && InputType->getNumElements() == 1) {

3304 return

translateCopy(U, *

U

.getOperand(0), MIRBuilder);

3306 if

(isa<FixedVectorType>(

U

.getOperand(0)->getType())) {

3314 if

(isa<ScalableVectorType>(

U

.getOperand(0)->getType())) {

3319 auto

ScaledIndex = MIRBuilder.

buildMul

(

3327

getOrCreateVReg(*

U

.getOperand(0)),

3332bool

IRTranslator::translateShuffleVector(

const User

&U,

3338 if

(

U

.getOperand(0)->getType()->isScalableTy()) {

3339 Register

Val = getOrCreateVReg(*

U

.getOperand(0));

3347 if

(

auto

*SVI = dyn_cast<ShuffleVectorInst>(&U))

3348 Mask

= SVI->getShuffleMask();

3350 Mask

= cast<ConstantExpr>(U).getShuffleMask();

3353

.

buildInstr

(TargetOpcode::G_SHUFFLE_VECTOR, {getOrCreateVReg(U)},

3354

{getOrCreateVReg(*

U

.getOperand(0)),

3355

getOrCreateVReg(*

U

.getOperand(1))})

3356

.addShuffleMask(MaskAlloc);

3361 const PHINode

&PI = cast<PHINode>(U);

3364 for

(

auto

Reg : getOrCreateVRegs(PI)) {

3365 auto

MIB = MIRBuilder.

buildInstr

(TargetOpcode::G_PHI, {

Reg

}, {});

3369

PendingPHIs.emplace_back(&PI, std::move(Insts));

3373bool

IRTranslator::translateAtomicCmpXchg(

const User

&U,

3379 auto

Res = getOrCreateVRegs(

I

);

3383 Register Cmp

= getOrCreateVReg(*

I

.getCompareOperand());

3384 Register

NewVal = getOrCreateVReg(*

I

.getNewValOperand());

3387

OldValRes, SuccessRes,

Addr

, Cmp, NewVal,

3390

getMemOpAlign(

I

),

I

.getAAMetadata(),

nullptr

,

I

.getSyncScopeID(),

3391 I

.getSuccessOrdering(),

I

.getFailureOrdering()));

3395bool

IRTranslator::translateAtomicRMW(

const User

&U,

3405 Register

Val = getOrCreateVReg(*

I

.getValOperand());

3407 unsigned

Opcode = 0;

3408 switch

(

I

.getOperation()) {

3412

Opcode = TargetOpcode::G_ATOMICRMW_XCHG;

3415

Opcode = TargetOpcode::G_ATOMICRMW_ADD;

3418

Opcode = TargetOpcode::G_ATOMICRMW_SUB;

3421

Opcode = TargetOpcode::G_ATOMICRMW_AND;

3424

Opcode = TargetOpcode::G_ATOMICRMW_NAND;

3427

Opcode = TargetOpcode::G_ATOMICRMW_OR;

3430

Opcode = TargetOpcode::G_ATOMICRMW_XOR;

3433

Opcode = TargetOpcode::G_ATOMICRMW_MAX;

3436

Opcode = TargetOpcode::G_ATOMICRMW_MIN;

3439

Opcode = TargetOpcode::G_ATOMICRMW_UMAX;

3442

Opcode = TargetOpcode::G_ATOMICRMW_UMIN;

3445

Opcode = TargetOpcode::G_ATOMICRMW_FADD;

3448

Opcode = TargetOpcode::G_ATOMICRMW_FSUB;

3451

Opcode = TargetOpcode::G_ATOMICRMW_FMAX;

3454

Opcode = TargetOpcode::G_ATOMICRMW_FMIN;

3457

Opcode = TargetOpcode::G_ATOMICRMW_UINC_WRAP;

3460

Opcode = TargetOpcode::G_ATOMICRMW_UDEC_WRAP;

3463

Opcode = TargetOpcode::G_ATOMICRMW_USUB_COND;

3466

Opcode = TargetOpcode::G_ATOMICRMW_USUB_SAT;

3471

Opcode, Res,

Addr

, Val,

3473

Flags, MRI->

getType

(Val), getMemOpAlign(

I

),

3474 I

.getAAMetadata(),

nullptr

,

I

.getSyncScopeID(),

3479bool

IRTranslator::translateFence(

const User

&U,

3481 const FenceInst

&Fence = cast<FenceInst>(U);

3487bool

IRTranslator::translateFreeze(

const User

&U,

3493 "Freeze with different source and destination type?"

);

3495 for

(

unsigned I

= 0;

I

< DstRegs.

size

(); ++

I

) {

3502void

IRTranslator::finishPendingPhis() {

3508 for

(

auto

&Phi : PendingPHIs) {

3523 for

(

auto

*Pred : getMachinePredBBs({IRPred, PI->

getParent

()})) {

3526

SeenPreds.

insert

(Pred);

3527 for

(

unsigned

j = 0;

j

< ValRegs.

size

(); ++

j

) {

3529

MIB.

addUse

(ValRegs[j]);

3537void

IRTranslator::translateDbgValueRecord(

Value

*V,

bool

HasArgList,

3543 "Expected inlined-at fields to agree"

);

3547 if

(!V || HasArgList) {

3554 if

(

const auto

*CI = dyn_cast<Constant>(V)) {

3559 if

(

auto

*AI = dyn_cast<AllocaInst>(V);

3564 auto

ExprOperands =

Expression

->getElements();

3565 auto

*ExprDerefRemoved =

3571 if

(translateIfEntryValueArgument(

false

, V, Variable,

Expression

, DL,

3574 for

(

Register

Reg : getOrCreateVRegs(*V)) {

3583void

IRTranslator::translateDbgDeclareRecord(

Value

*

Address

,

bool

HasArgList,

3589 LLVM_DEBUG

(

dbgs

() <<

"Dropping debug info for "

<< *Variable <<

"\n"

);

3594 "Expected inlined-at fields to agree"

);

3595 auto

AI = dyn_cast<AllocaInst>(

Address

);

3600

getOrCreateFrameIndex(*AI), DL);

3604 if

(translateIfEntryValueArgument(

true

,

Address

, Variable,

3617void

IRTranslator::translateDbgInfo(

const Instruction

&Inst,

3622 assert

(DLR->getLabel() &&

"Missing label"

);

3623 assert

(DLR->getLabel()->isValidLocationForIntrinsic(

3625 "Expected inlined-at fields to agree"

);

3642bool

IRTranslator::translate(

const Instruction

&Inst) {

3644

CurBuilder->setPCSections(Inst.

getMetadata

(LLVMContext::MD_pcsections));

3645

CurBuilder->setMMRAMetadata(Inst.

getMetadata

(LLVMContext::MD_mmra));

3651#define HANDLE_INST(NUM, OPCODE, CLASS) \ 3652 case Instruction::OPCODE: \ 3653 return translate##OPCODE(Inst, *CurBuilder.get()); 3654#include "llvm/IR/Instruction.def" 3663 if

(

auto

CurrInstDL = CurBuilder->getDL())

3664

EntryBuilder->setDebugLoc(

DebugLoc

());

3666 if

(

auto

CI = dyn_cast<ConstantInt>(&

C

))

3667

EntryBuilder->buildConstant(Reg, *CI);

3668 else if

(

auto

CF = dyn_cast<ConstantFP>(&

C

))

3669

EntryBuilder->buildFConstant(Reg, *CF);

3670 else if

(isa<UndefValue>(

C

))

3671

EntryBuilder->buildUndef(Reg);

3672 else if

(isa<ConstantPointerNull>(

C

))

3673

EntryBuilder->buildConstant(Reg, 0);

3674 else if

(

auto

GV = dyn_cast<GlobalValue>(&

C

))

3675

EntryBuilder->buildGlobalValue(Reg, GV);

3676 else if

(

auto

CPA = dyn_cast<ConstantPtrAuth>(&

C

)) {

3678 Register

AddrDisc = getOrCreateVReg(*CPA->getAddrDiscriminator());

3679

EntryBuilder->buildConstantPtrAuth(Reg, CPA,

Addr

, AddrDisc);

3680

}

else if

(

auto

CAZ = dyn_cast<ConstantAggregateZero>(&

C

)) {

3681 Constant

&Elt = *CAZ->getElementValue(0u);

3682 if

(isa<ScalableVectorType>(CAZ->getType())) {

3683

EntryBuilder->buildSplatVector(Reg, getOrCreateVReg(Elt));

3687 unsigned

NumElts = CAZ->getElementCount().getFixedValue();

3689 return

translateCopy(

C

, Elt, *EntryBuilder);

3691

EntryBuilder->buildSplatBuildVector(Reg, getOrCreateVReg(Elt));

3692

}

else if

(

auto

CV = dyn_cast<ConstantDataVector>(&

C

)) {

3694 if

(CV->getNumElements() == 1)

3695 return

translateCopy(

C

, *CV->getElementAsConstant(0), *EntryBuilder);

3697 for

(

unsigned

i = 0; i < CV->getNumElements(); ++i) {

3698 Constant

&Elt = *CV->getElementAsConstant(i);

3701

EntryBuilder->buildBuildVector(Reg, Ops);

3702

}

else if

(

auto

CE = dyn_cast<ConstantExpr>(&

C

)) {

3703 switch

(

CE

->getOpcode()) {

3704#define HANDLE_INST(NUM, OPCODE, CLASS) \ 3705 case Instruction::OPCODE: \ 3706 return translate##OPCODE(*CE, *EntryBuilder.get()); 3707#include "llvm/IR/Instruction.def" 3711

}

else if

(

auto

CV = dyn_cast<ConstantVector>(&

C

)) {

3712 if

(CV->getNumOperands() == 1)

3713 return

translateCopy(

C

, *CV->getOperand(0), *EntryBuilder);

3715 for

(

unsigned

i = 0; i < CV->getNumOperands(); ++i) {

3716

Ops.

push_back

(getOrCreateVReg(*CV->getOperand(i)));

3718

EntryBuilder->buildBuildVector(Reg, Ops);

3719

}

else if

(

auto

*BA = dyn_cast<BlockAddress>(&

C

)) {

3720

EntryBuilder->buildBlockAddress(Reg, BA);

3727bool

IRTranslator::finalizeBasicBlock(

const BasicBlock

&BB,

3729 for

(

auto

&BTB : SL->BitTestCases) {

3732

emitBitTestHeader(BTB, BTB.Parent);

3735 for

(

unsigned

j = 0, ej = BTB.Cases.size(); j != ej; ++j) {

3736

UnhandledProb -= BTB.Cases[

j

].ExtraProb;

3748 if

((BTB.ContiguousRange || BTB.FallthroughUnreachable) && j + 2 == ej) {

3751

NextMBB = BTB.Cases[

j

+ 1].TargetBB;

3752

}

else if

(j + 1 == ej) {

3754

NextMBB = BTB.Default;

3757

NextMBB = BTB.Cases[

j

+ 1].ThisBB;

3760

emitBitTestCase(BTB, NextMBB, UnhandledProb, BTB.Reg, BTB.Cases[j],

MBB

);

3762 if

((BTB.ContiguousRange || BTB.FallthroughUnreachable) && j + 2 == ej) {

3766

addMachineCFGPred({BTB.Parent->getBasicBlock(),

3767

BTB.Cases[ej - 1].TargetBB->getBasicBlock()},

3770

BTB.Cases.pop_back();

3776

CFGEdge HeaderToDefaultEdge = {BTB.Parent->getBasicBlock(),

3777

BTB.Default->getBasicBlock()};

3778

addMachineCFGPred(HeaderToDefaultEdge, BTB.Parent);

3779 if

(!BTB.ContiguousRange) {

3780

addMachineCFGPred(HeaderToDefaultEdge, BTB.Cases.back().ThisBB);

3783

SL->BitTestCases.clear();

3785 for

(

auto

&JTCase : SL->JTCases) {

3787 if

(!JTCase.first.Emitted)

3788

emitJumpTableHeader(JTCase.second, JTCase.first, JTCase.first.HeaderBB);

3790

emitJumpTable(JTCase.second, JTCase.second.MBB);

3792

SL->JTCases.clear();

3794 for

(

auto

&SwCase : SL->SwitchCases)

3795

emitSwitchCase(SwCase, &CurBuilder->getMBB(), *CurBuilder);

3796

SL->SwitchCases.clear();

3801 bool

FunctionBasedInstrumentation =

3803

SPDescriptor.

initialize

(&BB, &

MBB

, FunctionBasedInstrumentation);

3807 LLVM_DEBUG

(

dbgs

() <<

"Unimplemented stack protector case\n"

);

3823

SuccessMBB->

splice

(SuccessMBB->

end

(), ParentMBB, SplitPoint,

3824

ParentMBB->

end

());

3827 if

(!emitSPDescriptorParent(SPDescriptor, ParentMBB))

3832 if

(FailureMBB->

empty

()) {

3833 if

(!emitSPDescriptorFailure(SPDescriptor, FailureMBB))

3845

CurBuilder->setInsertPt(*ParentBB, ParentBB->

end

());

3855 Register

StackSlotPtr = CurBuilder->buildFrameIndex(PtrTy, FI).getReg(0);

3862

->buildLoad(PtrMemTy, StackSlotPtr,

3868 LLVM_DEBUG

(

dbgs

() <<

"Stack protector xor'ing with FP not yet implemented"

);

3886 assert

(FnTy->getNumParams() == 1 &&

"Invalid function signature"

);

3888 if

(GuardCheckFn->hasAttribute(1, Attribute::AttrKind::InReg))

3891

{GuardVal, FnTy->getParamType(0), {

Flags

}});

3894 Info

.OrigArgs.push_back(GuardArgInfo);

3895 Info

.CallConv = GuardCheckFn->getCallingConv();

3898 if

(!CLI->

lowerCall

(MIRBuilder, Info)) {

3899 LLVM_DEBUG

(

dbgs

() <<

"Failed to lower call to stack protector check\n"

);

3911

getStackGuard(Guard, *CurBuilder);

3915 Register

GuardPtr = getOrCreateVReg(*IRGuard);

3918

->buildLoad(PtrMemTy, GuardPtr,

3937

CurBuilder->setInsertPt(*FailureBB, FailureBB->

end

());

3947 if

(!CLI->

lowerCall

(*CurBuilder, Info)) {

3948 LLVM_DEBUG

(

dbgs

() <<

"Failed to lower call to stack protector fail\n"

);

3955

CurBuilder->buildInstr(TargetOpcode::G_TRAP);

3960void

IRTranslator::finalizeFunction() {

3963

PendingPHIs.clear();

3965

FrameIndices.clear();

3966

MachinePreds.clear();

3970

EntryBuilder.reset();

3985 const auto

*CI = dyn_cast<CallInst>(&

I

);

3986 return

CI && CI->isMustTailCall();

3994

getAnalysis<GISelCSEAnalysisWrapperPass>().getCSEWrapper();

3997

TPC = &getAnalysis<TargetPassConfig>();

4004

EntryBuilder = std::make_unique<CSEMIRBuilder>(CurMF);

4006

EntryBuilder->setCSEInfo(CSEInfo);

4007

CurBuilder = std::make_unique<CSEMIRBuilder>(CurMF);

4008

CurBuilder->setCSEInfo(CSEInfo);

4010

EntryBuilder = std::make_unique<MachineIRBuilder>();

4011

CurBuilder = std::make_unique<MachineIRBuilder>();

4014

CurBuilder->setMF(*MF);

4015

EntryBuilder->setMF(*MF);

4017 DL

= &

F

.getDataLayout();

4018

ORE = std::make_unique<OptimizationRemarkEmitter>(&

F

);

4020

TM.resetTargetOptions(

F

);

4024

AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();

4025

FuncInfo.

BPI

= &getAnalysis<BranchProbabilityInfoWrapperPass>().getBPI();

4028

FuncInfo.

BPI

=

nullptr

;

4031

AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(

4033

LibInfo = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(

F

);

4036

SL = std::make_unique<GISelSwitchLowering>(

this

, FuncInfo);

4037

SL->init(*TLI, TM, *

DL

);

4039 assert

(PendingPHIs.empty() &&

"stale PHIs"

);

4046 F

.getSubprogram(), &

F

.getEntryBlock());

4047

R <<

"unable to translate in big endian mode"

;

4053 auto

FinalizeOnReturn =

make_scope_exit

([

this

]() { finalizeFunction(); });

4058

EntryBuilder->setMBB(*EntryBB);

4060 DebugLoc

DbgLoc =

F

.getEntryBlock().getFirstNonPHIIt()->getDebugLoc();

4064 bool

IsVarArg =

F

.isVarArg();

4065 bool

HasMustTailInVarArgFn =

false

;

4068

FuncInfo.

MBBMap

.resize(

F

.getMaxBlockNumber());

4078 if

(!HasMustTailInVarArgFn)

4085

EntryBB->addSuccessor(&getMBB(

F

.front()));

4089 F

.getSubprogram(), &

F

.getEntryBlock());

4090

R <<

"unable to lower function: " 4091

<<

ore::NV

(

"Prototype"

,

F

.getFunctionType());

4099 if

(

DL

->getTypeStoreSize(Arg.

getType

()).isZero())

4104 if

(Arg.hasSwiftErrorAttr()) {

4105 assert

(VRegs.

size

() == 1 &&

"Too many vregs for Swift error"

);

4112 F

.getSubprogram(), &

F

.getEntryBlock());

4113

R <<

"unable to lower arguments: " 4114

<<

ore::NV

(

"Prototype"

,

F

.getFunctionType());

4121 if

(EnableCSE && CSEInfo)

4134

CurBuilder->setMBB(

MBB

);

4135

HasTailCall =

false

;

4149

translateDbgInfo(Inst, *CurBuilder);

4151 if

(translate(Inst))

4156

R <<

"unable to translate instruction: "

<<

ore::NV

(

"Opcode"

, &Inst);

4158 if

(ORE->allowExtraAnalysis(

"gisel-irtranslator"

)) {

4159

std::string InstStrStorage;

4163

R <<

": '"

<< InstStrStorage <<

"'"

;

4170 if

(!finalizeBasicBlock(*BB,

MBB

)) {

4172

BB->getTerminator()->getDebugLoc(), BB);

4173

R <<

"unable to translate basic block"

;

4183

finishPendingPhis();

4190 assert

(EntryBB->succ_size() == 1 &&

4191 "Custom BB used for lowering should have only one successor"

);

4195 "LLVM-IR entry block has a predecessor!?"

);

4198

NewEntryBB.

splice

(NewEntryBB.

begin

(), EntryBB, EntryBB->begin(),

4207

EntryBB->removeSuccessor(&NewEntryBB);

4212 "New entry wasn't next in the list of basic block!"

);

amdgpu aa AMDGPU Address space based Alias Analysis Wrapper

MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL

static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")

Analysis containing CSE Info

Provides analysis for continuously CSEing during GISel passes.

This file implements a version of MachineIRBuilder which CSEs insts within a MachineBasicBlock.

This file describes how to lower LLVM calls to machine code calls.

This file contains the declarations for the subclasses of Constant, which represent the different fla...

Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx

This contains common code to allow clients to notify changes to machine instr.

const HexagonInstrInfo * TII

IRTranslator LLVM IR static false void reportTranslationError(MachineFunction &MF, const TargetPassConfig &TPC, OptimizationRemarkEmitter &ORE, OptimizationRemarkMissed &R)

static bool checkForMustTailInVarArgFn(bool IsVarArg, const BasicBlock &BB)

Returns true if a BasicBlock BB within a variadic function contains a variadic musttail call.

static bool containsBF16Type(const User &U)

static unsigned getConvOpcode(Intrinsic::ID ID)

static uint64_t getOffsetFromIndices(const User &U, const DataLayout &DL)

static unsigned getConstrainedOpcode(Intrinsic::ID ID)

static cl::opt< bool > EnableCSEInIRTranslator("enable-cse-in-irtranslator", cl::desc("Should enable CSE in irtranslator"), cl::Optional, cl::init(false))

static bool isValInBlock(const Value *V, const BasicBlock *BB)

static bool isSwiftError(const Value *V)

This file declares the IRTranslator pass.

This file provides various utilities for inspecting and working with the control flow graph in LLVM I...

This file describes how to lower LLVM inline asm to machine code INLINEASM.

Legalize the Machine IR a function s Machine IR

Implement a low-level type suitable for MachineInstr level instruction selection.

Implement a low-level type suitable for MachineInstr level instruction selection.

This file declares the MachineIRBuilder class.

unsigned const TargetRegisterInfo * TRI

#define INITIALIZE_PASS_DEPENDENCY(depName)

#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)

#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)

This file builds on the ADT/GraphTraits.h file to build a generic graph post order iterator.

const SmallVectorImpl< MachineOperand > MachineBasicBlock * TBB

const SmallVectorImpl< MachineOperand > & Cond

assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())

verify safepoint Safepoint IR Verifier

This file defines the make_scope_exit function, which executes user-defined cleanup logic at scope ex...

This file defines the SmallSet class.

This file defines the SmallVector class.

This file describes how to lower LLVM code to machine code.

Target-Independent Code Generator Pass Configuration Options pass.

A wrapper pass to provide the legacy pass manager access to a suitably prepared AAResults object.

bool pointsToConstantMemory(const MemoryLocation &Loc, bool OrLocal=false)

Checks whether the given location points to constant memory, or if OrLocal is true whether it points ...

Class for arbitrary precision integers.

APInt zextOrTrunc(unsigned width) const

Zero extend or truncate to width.

an instruction to allocate memory on the stack

bool isSwiftError() const

Return true if this alloca is used as a swifterror argument to a call.

bool isStaticAlloca() const

Return true if this alloca is in the entry block of the function and is a constant size.

Align getAlign() const

Return the alignment of the memory that is being allocated by the instruction.

PointerType * getType() const

Overload to return most specific pointer type.

Type * getAllocatedType() const

Return the type that is being allocated by the instruction.

const Value * getArraySize() const

Get the number of elements allocated.

Represent the analysis usage information of a pass.

AnalysisUsage & addRequired()

AnalysisUsage & addPreserved()

Add the specified Pass class to the set of analyses preserved by this pass.

This class represents an incoming formal argument to a Function.

ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...

size_t size() const

size - Get the array size.

bool empty() const

empty - Check if the array is empty.

An immutable pass that tracks lazily created AssumptionCache objects.

An instruction that atomically checks whether a specified value is in a memory location,...

an instruction that atomically reads a memory location, combines it with another value,...

@ USubCond

Subtract only if no unsigned overflow.

@ Min

*p = old <signed v ? old : v

@ USubSat

*p = usub.sat(old, v) usub.sat matches the behavior of llvm.usub.sat.

@ UIncWrap

Increment one up to a maximum value.

@ Max

*p = old >signed v ? old : v

@ UMin

*p = old <unsigned v ? old : v

@ FMin

*p = minnum(old, v) minnum matches the behavior of llvm.minnum.

@ UMax

*p = old >unsigned v ? old : v

@ FMax

*p = maxnum(old, v) maxnum matches the behavior of llvm.maxnum.

@ UDecWrap

Decrement one until a minimum value or zero.

Attribute getFnAttr(Attribute::AttrKind Kind) const

Return the attribute object that exists for the function.

StringRef getValueAsString() const

Return the attribute's value as a string.

LLVM Basic Block Representation.

unsigned getNumber() const

bool hasAddressTaken() const

Returns true if there are any uses of this basic block other than direct branches,...

InstListType::const_iterator getFirstNonPHIIt() const

Returns an iterator to the first instruction in this block that is not a PHINode instruction.

InstListType::const_iterator const_iterator

InstListType::const_iterator getFirstNonPHIOrDbg(bool SkipPseudoOp=true) const

Returns a pointer to the first instruction in this block that is not a PHINode or a debug intrinsic,...

const Function * getParent() const

Return the enclosing method, or null if none.

const Instruction & back() const

const Module * getModule() const

Return the module owning the function this basic block belongs to, or nullptr if the function does no...

Legacy analysis pass which computes BlockFrequencyInfo.

Conditional or Unconditional Branch instruction.

BasicBlock * getSuccessor(unsigned i) const

bool isUnconditional() const

Value * getCondition() const

Legacy analysis pass which computes BranchProbabilityInfo.

Analysis providing branch probability information.

BranchProbability getEdgeProbability(const BasicBlock *Src, unsigned IndexInSuccessors) const

Get an edge's probability, relative to other out-edges of the Src.

static BranchProbability getZero()

static void normalizeProbabilities(ProbabilityIter Begin, ProbabilityIter End)

Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...

bool isInlineAsm() const

Check if this call is an inline asm statement.

std::optional< OperandBundleUse > getOperandBundle(StringRef Name) const

Return an operand bundle by name, if present.

Function * getCalledFunction() const

Returns the function called, or null if this is an indirect function invocation or the function signa...

bool paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const

Determine whether the argument or parameter has the given attribute.

User::op_iterator arg_begin()

Return the iterator pointing to the beginning of the argument list.

unsigned countOperandBundlesOfType(StringRef Name) const

Return the number of operand bundles with the tag Name attached to this instruction.

Value * getCalledOperand() const

Value * getArgOperand(unsigned i) const

User::op_iterator arg_end()

Return the iterator pointing to the end of the argument list.

bool isConvergent() const

Determine if the invoke is convergent.

Intrinsic::ID getIntrinsicID() const

Returns the intrinsic ID of the intrinsic called or Intrinsic::not_intrinsic if the called function i...

iterator_range< User::op_iterator > args()

Iteration adapter for range-for loops.

unsigned arg_size() const

AttributeList getAttributes() const

Return the attributes for this call.

This class represents a function call, abstracting a target machine's calling convention.

bool checkReturnTypeForCallConv(MachineFunction &MF) const

Toplevel function to check the return type based on the target calling convention.

virtual bool lowerFormalArguments(MachineIRBuilder &MIRBuilder, const Function &F, ArrayRef< ArrayRef< Register > > VRegs, FunctionLoweringInfo &FLI) const

This hook must be implemented to lower the incoming (formal) arguments, described by VRegs,...

virtual bool enableBigEndian() const

For targets which want to use big-endian can enable it with enableBigEndian() hook.

virtual bool supportSwiftError() const

virtual bool lowerReturn(MachineIRBuilder &MIRBuilder, const Value *Val, ArrayRef< Register > VRegs, FunctionLoweringInfo &FLI, Register SwiftErrorVReg) const

This hook must be implemented to lower outgoing return values, described by Val, into the specified v...

virtual bool lowerCall(MachineIRBuilder &MIRBuilder, CallLoweringInfo &Info) const

This hook must be implemented to lower the given call instruction, including argument and return valu...

virtual bool fallBackToDAGISel(const MachineFunction &MF) const

This class is the base class for the comparison instructions.

Predicate

This enumeration lists the possible predicates for CmpInst subclasses.

@ FCMP_TRUE

1 1 1 1 Always true (always folded)

@ ICMP_SLT

signed less than

@ ICMP_SLE

signed less or equal

@ ICMP_UGT

unsigned greater than

@ ICMP_ULE

unsigned less or equal

@ FCMP_FALSE

0 0 0 0 Always false (always folded)

bool isFPPredicate() const

bool isIntPredicate() const

This is the shared class of boolean and integer constants.

static ConstantInt * getTrue(LLVMContext &Context)

bool isZero() const

This is just a convenience method to make client code smaller for a common code.

unsigned getBitWidth() const

getBitWidth - Return the scalar bitwidth of this constant.

uint64_t getZExtValue() const

Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...

const APInt & getValue() const

Return the constant as an APInt value reference.

This is an important base class in LLVM.

static Constant * getAllOnesValue(Type *Ty)

static Constant * getNullValue(Type *Ty)

Constructor to create a '0' constant of arbitrary type.

This is the common base class for constrained floating point intrinsics.

std::optional< fp::ExceptionBehavior > getExceptionBehavior() const

unsigned getNonMetadataArgCount() const

bool isEntryValue() const

Check if the expression consists of exactly one entry value operand.

static DIExpression * append(const DIExpression *Expr, ArrayRef< uint64_t > Ops)

Append the opcodes Ops to DIExpr.

bool isValidLocationForIntrinsic(const DILocation *DL) const

Check that a location is valid for this label.

bool isValidLocationForIntrinsic(const DILocation *DL) const

Check that a location is valid for this variable.

This class represents an Operation in the Expression.

A parsed version of the target data layout string in and methods for querying it.

unsigned getPointerSizeInBits(unsigned AS=0) const

Layout pointer size, in bits FIXME: The defaults need to be removed once all of the backends/clients ...

const StructLayout * getStructLayout(StructType *Ty) const

Returns a StructLayout object, indicating the alignment of the struct, its size, and the offsets of i...

IntegerType * getIndexType(LLVMContext &C, unsigned AddressSpace) const

Returns the type of a GEP index in AddressSpace.

TypeSize getTypeAllocSize(Type *Ty) const

Returns the offset in bytes between successive objects of the specified type, including alignment pad...

TypeSize getTypeSizeInBits(Type *Ty) const

Size examples:

TypeSize getTypeStoreSize(Type *Ty) const

Returns the maximum number of bytes that may be overwritten by storing the specified type.

Align getPointerABIAlignment(unsigned AS) const

Layout pointer alignment.

This represents the llvm.dbg.declare instruction.

Value * getAddress() const

This represents the llvm.dbg.label instruction.

DILabel * getLabel() const

Records a position in IR for a source label (DILabel).

Base class for non-instruction debug metadata records that have positions within IR.

DebugLoc getDebugLoc() const

This represents the llvm.dbg.value instruction.

Value * getValue(unsigned OpIdx=0) const

DILocalVariable * getVariable() const

DIExpression * getExpression() const

Record of a variable value-assignment, aka a non instruction representation of the dbg....

DIExpression * getExpression() const

Value * getVariableLocationOp(unsigned OpIdx) const

DILocalVariable * getVariable() const

bool isDbgDeclare() const

Class representing an expression and its matching format.

This instruction compares its operands according to the predicate given to the constructor.

An instruction for ordering other memory operations.

SyncScope::ID getSyncScopeID() const

Returns the synchronization scope ID of this fence instruction.

AtomicOrdering getOrdering() const

Returns the ordering constraint of this fence instruction.

Class to represent fixed width SIMD vectors.

static FixedVectorType * get(Type *ElementType, unsigned NumElts)

BranchProbabilityInfo * BPI

void clear()

clear - Clear out all the function-specific state.

MachineBasicBlock * getMBB(const BasicBlock *BB) const

SmallVector< MachineBasicBlock * > MBBMap

A mapping from LLVM basic block number to their machine block.

bool CanLowerReturn

CanLowerReturn - true iff the function's return value can be lowered to registers.

bool skipFunction(const Function &F) const

Optional passes call this function to check whether the pass should be skipped.

const BasicBlock & getEntryBlock() const

DISubprogram * getSubprogram() const

Get the attached subprogram.

bool hasMinSize() const

Optimize this function for minimum size (-Oz).

Constant * getPersonalityFn() const

Get the personality function associated with this function.

const Function & getFunction() const

bool isIntrinsic() const

isIntrinsic - Returns true if the function's name starts with "llvm.".

bool hasOptNone() const

Do not optimize this function (-O0).

LLVMContext & getContext() const

getContext - Return a reference to the LLVMContext associated with this function.

The actual analysis pass wrapper.

Simple wrapper that does the following.

Abstract class that contains various methods for clients to notify about changes.

Simple wrapper observer that takes several observers, and calls each one for each event.

void removeObserver(GISelChangeObserver *O)

void addObserver(GISelChangeObserver *O)

static StringRef dropLLVMManglingEscape(StringRef Name)

If the given string begins with the GlobalValue name mangling escape character '\1',...

bool hasExternalWeakLinkage() const

bool hasDLLImportStorageClass() const

Module * getParent()

Get the module that this global value is contained inside of...

bool isTailCall(const MachineInstr &MI) const override

This instruction compares its operands according to the predicate given to the constructor.

bool runOnMachineFunction(MachineFunction &MF) override

runOnMachineFunction - This method must be overloaded to perform the desired machine code transformat...

IRTranslator(CodeGenOptLevel OptLevel=CodeGenOptLevel::None)

void getAnalysisUsage(AnalysisUsage &AU) const override

getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...

Indirect Branch Instruction.

bool lowerInlineAsm(MachineIRBuilder &MIRBuilder, const CallBase &CB, std::function< ArrayRef< Register >(const Value &Val)> GetOrCreateVRegs) const

Lower the given inline asm call instruction GetOrCreateVRegs is a callback to materialize a register ...

This instruction inserts a struct field of array element value into an aggregate value.

iterator_range< simple_ilist< DbgRecord >::iterator > getDbgRecordRange() const

Return a range over the DbgRecords attached to this instruction.

const DebugLoc & getDebugLoc() const

Return the debug location for this node as a DebugLoc.

const Module * getModule() const

Return the module owning the function this instruction belongs to or nullptr it the function does not...

bool hasMetadata() const

Return true if this instruction has any metadata attached to it.

MDNode * getMetadata(unsigned KindID) const

Get the metadata of given kind attached to this Instruction.

AAMDNodes getAAMetadata() const

Returns the AA metadata for this instruction.

unsigned getOpcode() const

Returns a member of one of the enums like Instruction::Add.

bool hasAllowReassoc() const LLVM_READONLY

Determine whether the allow-reassociation flag is set.

Intrinsic::ID getIntrinsicID() const

Return the intrinsic ID of this intrinsic.

constexpr LLT changeElementType(LLT NewEltTy) const

If this type is a vector, return a vector with the same number of elements but the new element type.

static constexpr LLT scalar(unsigned SizeInBits)

Get a low-level scalar or aggregate "bag of bits".

constexpr uint16_t getNumElements() const

Returns the number of elements in a vector LLT.

constexpr bool isVector() const

static constexpr LLT pointer(unsigned AddressSpace, unsigned SizeInBits)

Get a low-level pointer in the given address space.

constexpr TypeSize getSizeInBits() const

Returns the total size of the type. Must only be called on sized types.

constexpr bool isPointer() const

constexpr LLT getElementType() const

Returns the vector's element type. Only valid for vector types.

static constexpr LLT fixed_vector(unsigned NumElements, unsigned ScalarSizeInBits)

Get a low-level fixed-width vector of some number of elements and element width.

constexpr bool isFixedVector() const

Returns true if the LLT is a fixed vector.

The landingpad instruction holds all of the information necessary to generate correct exception handl...

An instruction for reading from memory.

Value * getPointerOperand()

AtomicOrdering getOrdering() const

Returns the ordering constraint of this load instruction.

SyncScope::ID getSyncScopeID() const

Returns the synchronization scope ID of this load instruction.

static LocationSize precise(uint64_t Value)

Context object for machine code objects.

MCSymbol * createTempSymbol()

Create a temporary symbol with a unique name.

MCSymbol * getOrCreateFrameAllocSymbol(const Twine &FuncName, unsigned Idx)

Gets a symbol that will be defined to the final stack offset of a local variable after codegen.

MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...

static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)

TypeSize getSizeInBits() const

Returns the size of the specified MVT in bits.

unsigned pred_size() const

void normalizeSuccProbs()

Normalize probabilities of all successors so that the sum of them becomes one.

void setAddressTakenIRBlock(BasicBlock *BB)

Set this block to reflect that it corresponds to an IR-level basic block with a BlockAddress.

instr_iterator insert(instr_iterator I, MachineInstr *M)

Insert MI into the instruction list before I, possibly inside a bundle.

const BasicBlock * getBasicBlock() const

Return the LLVM basic block that this instance corresponded to originally.

void setSuccProbability(succ_iterator I, BranchProbability Prob)

Set successor probability of a given iterator.

succ_iterator succ_begin()

void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())

Add Succ as a successor of this MachineBasicBlock.

SmallVectorImpl< MachineBasicBlock * >::iterator succ_iterator

void sortUniqueLiveIns()

Sorts and uniques the LiveIns vector.

bool isPredecessor(const MachineBasicBlock *MBB) const

Return true if the specified MBB is a predecessor of this block.

void addLiveIn(MCRegister PhysReg, LaneBitmask LaneMask=LaneBitmask::getAll())

Adds the specified register as a live in.

const MachineFunction * getParent() const

Return the MachineFunction containing this basic block.

void splice(iterator Where, MachineBasicBlock *Other, iterator From)

Take an instruction from MBB 'Other' at the position From, and insert it into this MBB right before '...

void setIsEHPad(bool V=true)

Indicates the block is a landing pad.

The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.

bool hasVarSizedObjects() const

This method may be called any time after instruction selection is complete to determine if the stack ...

int CreateStackObject(uint64_t Size, Align Alignment, bool isSpillSlot, const AllocaInst *Alloca=nullptr, uint8_t ID=0)

Create a new statically sized stack object, returning a nonnegative identifier to represent it.

int getStackProtectorIndex() const

Return the index for the stack protector object.

void setStackProtectorIndex(int I)

int CreateVariableSizedObject(Align Alignment, const AllocaInst *Alloca)

Notify the MachineFrameInfo object that a variable sized object has been created.

void setHasMustTailInVarArgFunc(bool B)

MachineFunctionPass - This class adapts the FunctionPass interface to allow convenient creation of pa...

void getAnalysisUsage(AnalysisUsage &AU) const override

getAnalysisUsage - Subclasses that override getAnalysisUsage must call this.

ArrayRef< int > allocateShuffleMask(ArrayRef< int > Mask)

const TargetSubtargetInfo & getSubtarget() const

getSubtarget - Return the subtarget for which this machine code is being compiled.

StringRef getName() const

getName - Return the name of the corresponding LLVM function.

MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)

getMachineMemOperand - Allocate a new MachineMemOperand.

MachineFrameInfo & getFrameInfo()

getFrameInfo - Return the frame info object for the current function.

unsigned getTypeIDFor(const GlobalValue *TI)

Return the type id for the specified typeinfo. This is function wide.

void push_back(MachineBasicBlock *MBB)

void setHasFakeUses(bool V)

MCContext & getContext() const

MachineRegisterInfo & getRegInfo()

getRegInfo - Return information about the registers currently in use.

MCSymbol * addLandingPad(MachineBasicBlock *LandingPad)

Add a new panding pad, and extract the exception handling information from the landingpad instruction...

void deleteMachineBasicBlock(MachineBasicBlock *MBB)

DeleteMachineBasicBlock - Delete the given MachineBasicBlock.

Function & getFunction()

Return the LLVM function that this machine code represents.

void remove(iterator MBBI)

void setVariableDbgInfo(const DILocalVariable *Var, const DIExpression *Expr, int Slot, const DILocation *Loc)

Collect information used to emit debugging information of a variable in a stack slot.

const MachineBasicBlock & front() const

void addInvoke(MachineBasicBlock *LandingPad, MCSymbol *BeginLabel, MCSymbol *EndLabel)

Provide the begin and end labels of an invoke style call and associate it with a try landing pad bloc...

MachineBasicBlock * CreateMachineBasicBlock(const BasicBlock *BB=nullptr, std::optional< UniqueBBID > BBID=std::nullopt)

CreateMachineBasicBlock - Allocate a new MachineBasicBlock.

void erase(iterator MBBI)

void insert(iterator MBBI, MachineBasicBlock *MBB)

const TargetMachine & getTarget() const

getTarget - Return the target machine this machine code is compiled with

Helper class to build MachineInstr.

MachineInstrBuilder buildFPTOUI_SAT(const DstOp &Dst, const SrcOp &Src0)

Build and insert Res = G_FPTOUI_SAT Src0.

MachineInstrBuilder buildFMul(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, std::optional< unsigned > Flags=std::nullopt)

MachineInstrBuilder buildFreeze(const DstOp &Dst, const SrcOp &Src)

Build and insert Dst = G_FREEZE Src.

MachineInstrBuilder buildBr(MachineBasicBlock &Dest)

Build and insert G_BR Dest.

std::optional< MachineInstrBuilder > materializePtrAdd(Register &Res, Register Op0, const LLT ValueTy, uint64_t Value)

Materialize and insert Res = G_PTR_ADD Op0, (G_CONSTANT Value)

MachineInstrBuilder buildAdd(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, std::optional< unsigned > Flags=std::nullopt)

Build and insert Res = G_ADD Op0, Op1.

MachineInstrBuilder buildUndef(const DstOp &Res)

Build and insert Res = IMPLICIT_DEF.

MachineInstrBuilder buildResetFPMode()

Build and insert G_RESET_FPMODE.

MachineInstrBuilder buildFPExt(const DstOp &Res, const SrcOp &Op, std::optional< unsigned > Flags=std::nullopt)

Build and insert Res = G_FPEXT Op.

MachineInstrBuilder buildFPTOSI_SAT(const DstOp &Dst, const SrcOp &Src0)

Build and insert Res = G_FPTOSI_SAT Src0.

MachineInstrBuilder buildUCmp(const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1)

Build and insert a Res = G_UCMP Op0, Op1.

MachineInstrBuilder buildJumpTable(const LLT PtrTy, unsigned JTI)

Build and insert Res = G_JUMP_TABLE JTI.

MachineInstrBuilder buildSCmp(const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1)

Build and insert a Res = G_SCMP Op0, Op1.

MachineInstrBuilder buildFence(unsigned Ordering, unsigned Scope)

Build and insert G_FENCE Ordering, Scope.

MachineInstrBuilder buildSelect(const DstOp &Res, const SrcOp &Tst, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)

Build and insert a Res = G_SELECT Tst, Op0, Op1.

MachineInstrBuilder buildFMA(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, const SrcOp &Src2, std::optional< unsigned > Flags=std::nullopt)

Build and insert Res = G_FMA Op0, Op1, Op2.

MachineInstrBuilder buildMul(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, std::optional< unsigned > Flags=std::nullopt)

Build and insert Res = G_MUL Op0, Op1.

MachineInstrBuilder buildInsertSubvector(const DstOp &Res, const SrcOp &Src0, const SrcOp &Src1, unsigned Index)

Build and insert Res = G_INSERT_SUBVECTOR Src0, Src1, Idx.

MachineInstrBuilder buildAnd(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1)

Build and insert Res = G_AND Op0, Op1.

MachineInstrBuilder buildCast(const DstOp &Dst, const SrcOp &Src)

Build and insert an appropriate cast between two registers of equal size.

MachineInstrBuilder buildICmp(CmpInst::Predicate Pred, const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)

Build and insert a Res = G_ICMP Pred, Op0, Op1.

MachineBasicBlock::iterator getInsertPt()

Current insertion point for new instructions.

MachineInstrBuilder buildSExtOrTrunc(const DstOp &Res, const SrcOp &Op)

Build and insert Res = G_SEXT Op, Res = G_TRUNC Op, or Res = COPY Op depending on the differing sizes...

MachineInstrBuilder buildAtomicRMW(unsigned Opcode, const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val, MachineMemOperand &MMO)

Build and insert OldValRes<def> = G_ATOMICRMW_<Opcode> Addr, Val, MMO.

MachineInstrBuilder buildSub(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, std::optional< unsigned > Flags=std::nullopt)

Build and insert Res = G_SUB Op0, Op1.

MachineInstrBuilder buildIntrinsic(Intrinsic::ID ID, ArrayRef< Register > Res, bool HasSideEffects, bool isConvergent)

Build and insert a G_INTRINSIC instruction.

MachineInstrBuilder buildVScale(const DstOp &Res, unsigned MinElts)

Build and insert Res = G_VSCALE MinElts.

MachineInstrBuilder buildSplatBuildVector(const DstOp &Res, const SrcOp &Src)

Build and insert Res = G_BUILD_VECTOR with Src replicated to fill the number of elements.

MachineInstrBuilder buildSetFPMode(const SrcOp &Src)

Build and insert G_SET_FPMODE Src.

MachineInstrBuilder buildIndirectDbgValue(Register Reg, const MDNode *Variable, const MDNode *Expr)

Build and insert a DBG_VALUE instruction expressing the fact that the associated Variable lives in me...

MachineInstrBuilder buildConstDbgValue(const Constant &C, const MDNode *Variable, const MDNode *Expr)

Build and insert a DBG_VALUE instructions specifying that Variable is given by C (suitably modified b...

MachineInstrBuilder buildBrCond(const SrcOp &Tst, MachineBasicBlock &Dest)

Build and insert G_BRCOND Tst, Dest.

MachineInstrBuilder buildExtractVectorElement(const DstOp &Res, const SrcOp &Val, const SrcOp &Idx)

Build and insert Res = G_EXTRACT_VECTOR_ELT Val, Idx.

MachineInstrBuilder buildLoad(const DstOp &Res, const SrcOp &Addr, MachineMemOperand &MMO)

Build and insert Res = G_LOAD Addr, MMO.

MachineInstrBuilder buildPtrAdd(const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)

Build and insert Res = G_PTR_ADD Op0, Op1.

MachineInstrBuilder buildZExtOrTrunc(const DstOp &Res, const SrcOp &Op)

Build and insert Res = G_ZEXT Op, Res = G_TRUNC Op, or Res = COPY Op depending on the differing sizes...

MachineInstrBuilder buildExtractVectorElementConstant(const DstOp &Res, const SrcOp &Val, const int Idx)

Build and insert Res = G_EXTRACT_VECTOR_ELT Val, Idx.

MachineInstrBuilder buildShl(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, std::optional< unsigned > Flags=std::nullopt)

MachineInstrBuilder buildStore(const SrcOp &Val, const SrcOp &Addr, MachineMemOperand &MMO)

Build and insert G_STORE Val, Addr, MMO.

MachineInstrBuilder buildInstr(unsigned Opcode)

Build and insert <empty> = Opcode <empty>.

MachineInstrBuilder buildFrameIndex(const DstOp &Res, int Idx)

Build and insert Res = G_FRAME_INDEX Idx.

MachineInstrBuilder buildDirectDbgValue(Register Reg, const MDNode *Variable, const MDNode *Expr)

Build and insert a DBG_VALUE instruction expressing the fact that the associated Variable lives in Re...

MachineInstrBuilder buildDbgLabel(const MDNode *Label)

Build and insert a DBG_LABEL instructions specifying that Label is given.

MachineInstrBuilder buildBrJT(Register TablePtr, unsigned JTI, Register IndexReg)

Build and insert G_BRJT TablePtr, JTI, IndexReg.

MachineInstrBuilder buildDynStackAlloc(const DstOp &Res, const SrcOp &Size, Align Alignment)

Build and insert Res = G_DYN_STACKALLOC Size, Align.

MachineInstrBuilder buildFIDbgValue(int FI, const MDNode *Variable, const MDNode *Expr)

Build and insert a DBG_VALUE instruction expressing the fact that the associated Variable lives in th...

MachineInstrBuilder buildResetFPEnv()

Build and insert G_RESET_FPENV.

void setDebugLoc(const DebugLoc &DL)

Set the debug location to DL for all the next build instructions.

const MachineBasicBlock & getMBB() const

Getter for the basic block we currently build.

MachineInstrBuilder buildInsertVectorElement(const DstOp &Res, const SrcOp &Val, const SrcOp &Elt, const SrcOp &Idx)

Build and insert Res = G_INSERT_VECTOR_ELT Val, Elt, Idx.

MachineInstrBuilder buildAtomicCmpXchgWithSuccess(const DstOp &OldValRes, const DstOp &SuccessRes, const SrcOp &Addr, const SrcOp &CmpVal, const SrcOp &NewVal, MachineMemOperand &MMO)

Build and insert OldValRes<def>, SuccessRes<def> = G_ATOMIC_CMPXCHG_WITH_SUCCESS Addr,...

void setMBB(MachineBasicBlock &MBB)

Set the insertion point to the end of MBB.

const DebugLoc & getDebugLoc()

Get the current instruction's debug location.

MachineInstrBuilder buildTrap(bool Debug=false)

Build and insert G_TRAP or G_DEBUGTRAP.

MachineInstrBuilder buildFFrexp(const DstOp &Fract, const DstOp &Exp, const SrcOp &Src, std::optional< unsigned > Flags=std::nullopt)

Build and insert Fract, Exp = G_FFREXP Src.

MachineInstrBuilder buildFPTrunc(const DstOp &Res, const SrcOp &Op, std::optional< unsigned > Flags=std::nullopt)

Build and insert Res = G_FPTRUNC Op.

MachineInstrBuilder buildFSincos(const DstOp &Sin, const DstOp &Cos, const SrcOp &Src, std::optional< unsigned > Flags=std::nullopt)

Build and insert Sin, Cos = G_FSINCOS Src.

MachineInstrBuilder buildShuffleVector(const DstOp &Res, const SrcOp &Src1, const SrcOp &Src2, ArrayRef< int > Mask)

Build and insert Res = G_SHUFFLE_VECTOR Src1, Src2, Mask.

MachineInstrBuilder buildInstrNoInsert(unsigned Opcode)

Build but don't insert <empty> = Opcode <empty>.

MachineInstrBuilder buildCopy(const DstOp &Res, const SrcOp &Op)

Build and insert Res = COPY Op.

MachineInstrBuilder buildPrefetch(const SrcOp &Addr, unsigned RW, unsigned Locality, unsigned CacheType, MachineMemOperand &MMO)

Build and insert G_PREFETCH Addr, RW, Locality, CacheType.

MachineInstrBuilder buildExtractSubvector(const DstOp &Res, const SrcOp &Src, unsigned Index)

Build and insert Res = G_EXTRACT_SUBVECTOR Src, Idx0.

const DataLayout & getDataLayout() const

MachineInstrBuilder buildBrIndirect(Register Tgt)

Build and insert G_BRINDIRECT Tgt.

MachineInstrBuilder buildSplatVector(const DstOp &Res, const SrcOp &Val)

Build and insert Res = G_SPLAT_VECTOR Val.

MachineInstrBuilder buildStepVector(const DstOp &Res, unsigned Step)

Build and insert Res = G_STEP_VECTOR Step.

virtual MachineInstrBuilder buildConstant(const DstOp &Res, const ConstantInt &Val)

Build and insert Res = G_CONSTANT Val.

MachineInstrBuilder buildFCmp(CmpInst::Predicate Pred, const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)

Build and insert a Res = G_FCMP PredOp0, Op1.

MachineInstrBuilder buildFAdd(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, std::optional< unsigned > Flags=std::nullopt)

Build and insert Res = G_FADD Op0, Op1.

MachineInstrBuilder buildSetFPEnv(const SrcOp &Src)

Build and insert G_SET_FPENV Src.

Register getReg(unsigned Idx) const

Get the register for the operand index.

const MachineInstrBuilder & addImm(int64_t Val) const

Add a new immediate operand.

const MachineInstrBuilder & addMetadata(const MDNode *MD) const

const MachineInstrBuilder & addSym(MCSymbol *Sym, unsigned char TargetFlags=0) const

const MachineInstrBuilder & addFrameIndex(int Idx) const

const MachineInstrBuilder & addFPImm(const ConstantFP *Val) const

const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const

const MachineInstrBuilder & addUse(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const

Add a virtual register use operand.

const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const

MachineInstr * getInstr() const

If conversion operators fail, use this method to get the MachineInstr explicitly.

const MachineInstrBuilder & addDef(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const

Add a virtual register definition operand.

Representation of each machine instruction.

void copyIRFlags(const Instruction &I)

Copy all flags to MachineInst MIFlags.

static uint32_t copyFlagsFromInstruction(const Instruction &I)

const MachineOperand & getOperand(unsigned i) const

A description of a memory reference used in the backend.

Flags

Flags values. These may be or'd together.

@ MOVolatile

The memory access is volatile.

@ MODereferenceable

The memory access is dereferenceable (i.e., doesn't trap).

@ MOLoad

The memory access reads data.

@ MOInvariant

The memory access always returns the same value (or traps).

@ MOStore

The memory access writes data.

static MachineOperand CreateES(const char *SymName, unsigned TargetFlags=0)

Register getReg() const

getReg - Returns the register number.

static MachineOperand CreateGA(const GlobalValue *GV, int64_t Offset, unsigned TargetFlags=0)

MachineInstr * getVRegDef(Register Reg) const

getVRegDef - Return the machine instr that defines the specified virtual register or null if none is ...

LLT getType(Register Reg) const

Get the low-level type of Reg or LLT{} if Reg is not a generic (target independent) virtual register.

void setRegClass(Register Reg, const TargetRegisterClass *RC)

setRegClass - Set the register class of the specified virtual register.

Register createGenericVirtualRegister(LLT Ty, StringRef Name="")

Create and return a new generic virtual register with low-level type Ty.

void addPhysRegsUsedFromRegMask(const uint32_t *RegMask)

addPhysRegsUsedFromRegMask - Mark any registers not in RegMask as used.

Representation for a specific memory location.

A Module instance is used to store all the information related to an LLVM module.

BasicBlock * getIncomingBlock(unsigned i) const

Return incoming basic block number i.

Value * getIncomingValue(unsigned i) const

Return incoming value number x.

unsigned getNumIncomingValues() const

Return the number of incoming edges.

static PointerType * getUnqual(Type *ElementType)

This constructs a pointer to an object of the specified type in the default address space (address sp...

Class to install both of the above.

Wrapper class representing virtual and physical registers.

MCRegister asMCReg() const

Utility to check-convert this value to a MCRegister.

Return a value (possibly void), from a function.

Value * getReturnValue() const

Convenience accessor. Returns null if there is no return value.

This class represents the LLVM 'select' instruction.

std::pair< iterator, bool > insert(PtrType Ptr)

Inserts Ptr if and only if there is no element in the container equal to Ptr.

SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.

SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...

size_type count(const T &V) const

count - Return 1 if the element is in the set, 0 otherwise.

std::pair< const_iterator, bool > insert(const T &V)

insert - Insert an element into the set if it isn't already there.

This class consists of common code factored out of the SmallVector class to reduce code duplication b...

void push_back(const T &Elt)

This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.

Encapsulates all of the information needed to generate a stack protector check, and signals to isel w...

void initialize(const BasicBlock *BB, MachineBasicBlock *MBB, bool FunctionBasedInstrumentation)

Initialize the stack protector descriptor structure for a new basic block.

MachineBasicBlock * getSuccessMBB()

void resetPerBBState()

Reset state that changes when we handle different basic blocks.

void resetPerFunctionState()

Reset state that only changes when we switch functions.

MachineBasicBlock * getFailureMBB()

MachineBasicBlock * getParentMBB()

bool shouldEmitStackProtector() const

Returns true if all fields of the stack protector descriptor are initialized implying that we should/...

bool shouldEmitFunctionBasedCheckStackProtector() const

bool shouldEmitSDCheck(const BasicBlock &BB) const

void copyToMachineFrameInfo(MachineFrameInfo &MFI) const

An instruction for storing to memory.

StringRef - Represent a constant reference to a string, i.e.

constexpr bool empty() const

empty - Check if the string is empty.

constexpr const char * data() const

data - Get a pointer to the start of the string (which may not be null terminated).

TypeSize getElementOffset(unsigned Idx) const

Class to represent struct types.

bool createEntriesInEntryBlock(DebugLoc DbgLoc)

Create initial definitions of swifterror values in the entry block of the current function.

void setFunction(MachineFunction &MF)

Initialize data structures for specified new function.

void setCurrentVReg(const MachineBasicBlock *MBB, const Value *, Register)

Set the swifterror virtual register in the VRegDefMap for this basic block.

Register getOrCreateVRegUseAt(const Instruction *, const MachineBasicBlock *, const Value *)

Get or create the swifterror value virtual register for a use of a swifterror by an instruction.

Register getOrCreateVRegDefAt(const Instruction *, const MachineBasicBlock *, const Value *)

Get or create the swifterror value virtual register for a def of a swifterror by an instruction.

const Value * getFunctionArg() const

Get the (unique) function argument that was marked swifterror, or nullptr if this function has no swi...

void propagateVRegs()

Propagate assigned swifterror vregs through a function, synthesizing PHI nodes when needed to maintai...

Align getStackAlign() const

getStackAlignment - This method returns the number of bytes to which the stack pointer must be aligne...

TargetInstrInfo - Interface to description of machine instruction set.

virtual bool isFMAFasterThanFMulAndFAdd(const MachineFunction &MF, EVT) const

Return true if an FMA operation is faster than a pair of fmul and fadd instructions.

virtual unsigned getVaListSizeInBits(const DataLayout &DL) const

Returns the size of the platform's va_list object.

EVT getValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const

Return the EVT corresponding to this LLVM type.

CallingConv::ID getLibcallCallingConv(RTLIB::Libcall Call) const

Get the CallingConv that should be used for the specified libcall.

virtual bool useStackGuardXorFP() const

If this function returns true, stack protection checks should XOR the frame pointer (or whichever poi...

virtual MVT getVectorIdxTy(const DataLayout &DL) const

Returns the type to be used for the index operand of: ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT...

const TargetMachine & getTargetMachine() const

virtual Value * getSDagStackGuard(const Module &M) const

Return the variable that's previously inserted by insertSSPDeclarations, if any, otherwise return nul...

bool isJumpExpensive() const

Return true if Flow Control is an expensive operation that should be avoided.

virtual Function * getSSPStackGuardCheck(const Module &M) const

If the target has a standard stack protection check function that performs validation and error handl...

MachineMemOperand::Flags getAtomicMemOperandFlags(const Instruction &AI, const DataLayout &DL) const

virtual bool getTgtMemIntrinsic(IntrinsicInfo &, const CallInst &, MachineFunction &, unsigned) const

Given an intrinsic, checks if on the target the intrinsic will need to map to a MemIntrinsicNode (tou...

MachineMemOperand::Flags getLoadMemOperandFlags(const LoadInst &LI, const DataLayout &DL, AssumptionCache *AC=nullptr, const TargetLibraryInfo *LibInfo=nullptr) const

MachineMemOperand::Flags getStoreMemOperandFlags(const StoreInst &SI, const DataLayout &DL) const

virtual bool fallBackToDAGISel(const Instruction &Inst) const

virtual Register getExceptionPointerRegister(const Constant *PersonalityFn) const

If a physical register, this returns the register that receives the exception address on entry to an ...

const char * getLibcallName(RTLIB::Libcall Call) const

Get the libcall routine name for the specified libcall.

virtual Register getExceptionSelectorRegister(const Constant *PersonalityFn) const

If a physical register, this returns the register that receives the exception typeid on entry to a la...

virtual MVT getPointerMemTy(const DataLayout &DL, uint32_t AS=0) const

Return the in-memory pointer type for the given address space, defaults to the pointer type from the ...

virtual bool useLoadStackGuardNode(const Module &M) const

If this function returns true, SelectionDAGBuilder emits a LOAD_STACK_GUARD node when it is lowering ...

Primary interface to the complete machine description for the target machine.

CodeGenOptLevel getOptLevel() const

Returns the optimization level: None, Less, Default, or Aggressive.

virtual const TargetIntrinsicInfo * getIntrinsicInfo() const

If intrinsic information is available, return it. If not, return null.

const Triple & getTargetTriple() const

unsigned NoTrapAfterNoreturn

Do not emit a trap instruction for 'unreachable' IR instructions behind noreturn calls,...

unsigned TrapUnreachable

Emit target-specific trap instruction for 'unreachable' IR instructions.

Target-Independent Code Generator Pass Configuration Options.

virtual std::unique_ptr< CSEConfigBase > getCSEConfig() const

Returns the CSEConfig object to use for the current optimization level.

virtual bool isGISelCSEEnabled() const

Check whether continuous CSE should be enabled in GISel passes.

TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...

virtual const InlineAsmLowering * getInlineAsmLowering() const

virtual const TargetRegisterInfo * getRegisterInfo() const

getRegisterInfo - If register information is available, return it.

virtual const CallLowering * getCallLowering() const

virtual const TargetFrameLowering * getFrameLowering() const

virtual const TargetInstrInfo * getInstrInfo() const

virtual const TargetLowering * getTargetLowering() const

bool isOSWindows() const

Tests whether the OS is Windows.

Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...

The instances of the Type class are immutable: once they are created, they are never changed.

bool isEmptyTy() const

Return true if this type is empty, that is, it has no elements or all of its elements are empty.

TypeID

Definitions of all of the base types for the Type system.

static Type * getVoidTy(LLVMContext &C)

bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const

Return true if it makes sense to take the size of this type.

bool isAggregateType() const

Return true if the type is an aggregate type.

static IntegerType * getInt32Ty(LLVMContext &C)

bool isTokenTy() const

Return true if this is 'token'.

bool isVoidTy() const

Return true if this is 'void'.

Value * getOperand(unsigned i) const

LLVM Value Representation.

Type * getType() const

All values are typed, get the type of this value.

bool hasOneUse() const

Return true if there is exactly one use of this value.

const Value * stripPointerCasts() const

Strip off pointer casts, all-zero GEPs and address space casts.

LLVMContext & getContext() const

All values hold a context through their type.

int getNumOccurrences() const

constexpr bool isZero() const

const ParentTy * getParent() const

NodeTy * getNextNode()

Get the next node, or nullptr for the list tail.

A raw_ostream that writes to an std::string.

#define llvm_unreachable(msg)

Marks that the current location is not supposed to be reachable.

constexpr char Args[]

Key for Kernel::Metadata::mArgs.

constexpr std::underlying_type_t< E > Mask()

Get a bitmask with 1s in all places up to the high-order bit of E's largest value.

@ C

The default llvm calling convention, compatible with C.

ID ArrayRef< Type * > Tys

bool match(Val *V, const Pattern &P)

specificval_ty m_Specific(const Value *V)

Match if we have a specific specified value.

TwoOps_match< Val_t, Idx_t, Instruction::ExtractElement > m_ExtractElt(const Val_t &Val, const Idx_t &Idx)

Matches ExtractElementInst.

OneUse_match< T > m_OneUse(const T &SubPattern)

auto m_LogicalOr()

Matches L || R where L and R are arbitrary values.

class_match< Value > m_Value()

Match an arbitrary value and ignore it.

auto m_LogicalAnd()

Matches L && R where L and R are arbitrary values.

BinaryOp_match< cst_pred_ty< is_all_ones >, ValTy, Instruction::Xor, true > m_Not(const ValTy &V)

Matches a 'Not' as 'xor V, -1' or 'xor -1, V'.

Libcall

RTLIB::Libcall enum - This enum defines all of the runtime library calls the backend can emit.

@ Implicit

Not emitted register (e.g. carry, or temporary result).

@ Undef

Value of the register doesn't matter.

Offsets

Offsets in bytes from the start of the input buffer.

SmallVector< SwitchWorkListItem, 4 > SwitchWorkList

std::vector< CaseCluster > CaseClusterVector

void sortAndRangeify(CaseClusterVector &Clusters)

Sort Clusters and merge adjacent cases.

CaseClusterVector::iterator CaseClusterIt

@ CC_Range

A cluster of adjacent case labels with the same destination, or just one case.

@ CC_JumpTable

A cluster of cases suitable for jump table lowering.

@ CC_BitTests

A cluster of cases suitable for bit test lowering.

@ CE

Windows NT (Windows on ARM)

Reg

All possible values of the reg field in the ModR/M byte.

initializer< Ty > init(const Ty &Val)

ExceptionBehavior

Exception behavior used for floating point operations.

@ ebIgnore

This corresponds to "fpexcept.ignore".

DiagnosticInfoOptimizationBase::Argument NV

NodeAddr< PhiNode * > Phi

NodeAddr< CodeNode * > Code

This is an optimization pass for GlobalISel generic memory operations.

auto drop_begin(T &&RangeOrContainer, size_t N=1)

Return a range covering RangeOrContainer with the first N elements excluded.

@ Low

Lower the current thread's priority such that it does not affect foreground tasks significantly.

int popcount(T Value) noexcept

Count the number of set bits in a value.

bool isUIntN(unsigned N, uint64_t x)

Checks if an unsigned integer fits into the given (dynamic) bit width.

detail::scope_exit< std::decay_t< Callable > > make_scope_exit(Callable &&F)

auto enumerate(FirstRange &&First, RestRanges &&...Rest)

Given two or more input ranges, returns a new range whose values are tuples (A, B,...

int countr_one(T Value)

Count the number of ones from the least significant bit to the first zero bit.

void diagnoseDontCall(const CallInst &CI)

auto successors(const MachineBasicBlock *BB)

MVT getMVTForLLT(LLT Ty)

Get a rough equivalent of an MVT for a given LLT.

gep_type_iterator gep_type_end(const User *GEP)

MachineBasicBlock::iterator findSplitPointForStackProtector(MachineBasicBlock *BB, const TargetInstrInfo &TII)

Find the split point at which to splice the end of BB into its success stack protector check machine ...

LLT getLLTForMVT(MVT Ty)

Get a rough equivalent of an LLT for a given MVT.

int countr_zero(T Val)

Count number of 0's from the least significant bit to the most stopping at the first 1.

Align getKnownAlignment(Value *V, const DataLayout &DL, const Instruction *CxtI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr)

Try to infer an alignment for the specified pointer.

bool any_of(R &&range, UnaryPredicate P)

Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.

llvm::SmallVector< int, 16 > createStrideMask(unsigned Start, unsigned Stride, unsigned VF)

Create a stride shuffle mask.

auto reverse(ContainerTy &&C)

void computeValueLLTs(const DataLayout &DL, Type &Ty, SmallVectorImpl< LLT > &ValueTys, SmallVectorImpl< uint64_t > *Offsets=nullptr, uint64_t StartingOffset=0)

computeValueLLTs - Given an LLVM IR type, compute a sequence of LLTs that represent all the individua...

void sort(IteratorTy Start, IteratorTy End)

raw_ostream & dbgs()

dbgs() - This returns a reference to a raw_ostream for debugging messages.

void report_fatal_error(Error Err, bool gen_crash_diag=true)

Report a serious error, calling any installed error handler.

auto succ_size(const MachineBasicBlock *BB)

EHPersonality classifyEHPersonality(const Value *Pers)

See if the given exception handling personality function is one that we understand.

CodeGenOptLevel

Code generation optimization level.

@ Global

Append to llvm.global_dtors.

@ First

Helpers to iterate all locations in the MemoryEffectsBase class.

void getSelectionDAGFallbackAnalysisUsage(AnalysisUsage &AU)

Modify analysis usage so it preserves passes required for the SelectionDAG fallback.

auto lower_bound(R &&Range, T &&Value)

Provide wrappers to std::lower_bound which take ranges instead of having to pass begin/end explicitly...

llvm::SmallVector< int, 16 > createInterleaveMask(unsigned VF, unsigned NumVecs)

Create an interleave shuffle mask.

@ FMul

Product of floats.

bool isAsynchronousEHPersonality(EHPersonality Pers)

Returns true if this personality function catches asynchronous exceptions.

OutputIt copy(R &&Range, OutputIt Out)

std::optional< RoundingMode > convertStrToRoundingMode(StringRef)

Returns a valid RoundingMode enumerator when given a string that is valid as input in constrained int...

gep_type_iterator gep_type_begin(const User *GEP)

GlobalValue * ExtractTypeInfo(Value *V)

ExtractTypeInfo - Returns the type info, possibly bitcast, encoded in V.

void getUnderlyingObjects(const Value *V, SmallVectorImpl< const Value * > &Objects, const LoopInfo *LI=nullptr, unsigned MaxLookup=6)

This method is similar to getUnderlyingObject except that it can look through phi and select instruct...

Align commonAlignment(Align A, uint64_t Offset)

Returns the alignment that satisfies both alignments.

LLT getLLTForType(Type &Ty, const DataLayout &DL)

Construct a low-level type based on an LLVM type.

void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)

Implement std::swap in terms of BitVector swap.

A collection of metadata nodes that might be associated with a memory access used by the alias-analys...

This struct is a compact representation of a valid (non-zero power of two) alignment.

uint64_t value() const

This is a hole in the type system and should not be abused.

Pair of physical register and lane mask.

This class contains a discriminated union of information about pointers in memory operands,...

static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)

Return a MachinePointerInfo record that refers to the specified FrameIndex.

MachineBasicBlock * Parent

This structure is used to communicate between SelectionDAGBuilder and SDISel for the code generation ...

BranchProbability TrueProb

MachineBasicBlock * ThisBB

struct PredInfoPair PredInfo

BranchProbability FalseProb

MachineBasicBlock * TrueBB

MachineBasicBlock * FalseBB


RetroSearch is an open source project built by @garambo | Open a GitHub Issue

Search and Browse the WWW like it's 1997 | Search results from DuckDuckGo

HTML: 3.2 | Encoding: UTF-8 | Version: 0.7.4