A RetroSearch Logo

Home - News ( United States | United Kingdom | Italy | Germany ) - Football scores

Search Query:

Showing content from https://clang.llvm.org/doxygen/CodeGen_2Targets_2AArch64_8cpp_source.html below:

clang: lib/CodeGen/Targets/AArch64.cpp Source File

10#include "TargetInfo.h" 13#include "llvm/TargetParser/AArch64TargetParser.h" 15using namespace clang

;

24class

AArch64ABIInfo :

public ABIInfo

{

39 bool

IsNamedArg,

unsigned

CallingConvention,

40 unsigned

&NSRN,

unsigned

&NPRN)

const

;

41

llvm::Type *convertFixedToScalableVectorType(

const VectorType

*VT)

const

;

43 unsigned

&NPRN)

const

;

44 ABIArgInfo

coerceAndExpandPureScalableAggregate(

45 QualType

Ty,

bool

IsNamedArg,

unsigned

NVec,

unsigned

NPred,

47 unsigned

&NPRN)

const

;

50

uint64_t Members)

const override

;

53 bool

isIllegalVectorType(

QualType

Ty)

const

;

55 bool

passAsAggregateType(

QualType

Ty)

const

;

56 bool

passAsPureScalableType(

QualType

Ty,

unsigned

&NV,

unsigned

&NP,

59 void

flattenType(llvm::Type *Ty,

68 unsigned

NSRN = 0, NPRN = 0;

70 const bool

IsNamedArg =

87 if

(isa<llvm::ScalableVectorType>(BaseTy))

88

llvm::report_fatal_error(

"Passing SVE types to variadic functions is " 89 "currently not supported"

);

91 return Kind

== AArch64ABIKind::Win64

93

: isDarwinPCS() ? EmitDarwinVAArg(VAListAddr, Ty, CGF, Slot)

94

: EmitAAPCSVAArg(VAListAddr, Ty, CGF,

Kind

, Slot);

106

raw_ostream &Out)

const override

;

108

raw_ostream &Out)

const override

;

117 unsigned

NumElts)

const override

;

124

SwiftInfo = std::make_unique<AArch64SwiftABIInfo>(CGT);

128 return "mov\tfp, fp\t\t// marker for objc_retainAutoreleaseReturnValue"

;

139 const FunctionDecl

*FD = dyn_cast_or_null<FunctionDecl>(

D

);

145 if

(

const auto

*TA = FD->

getAttr

<TargetAttr>()) {

148 if

(!

Attr

.BranchProtection.empty()) {

151 Attr

.CPU, BPI, Error);

152

assert(

Error

.empty());

155 auto

*

Fn

= cast<llvm::Function>(GV);

160

llvm::Type *Ty)

const override

{

162 auto

*ST = dyn_cast<llvm::StructType>(Ty);

163 if

(ST && ST->getNumElements() == 1) {

164 auto

*AT = dyn_cast<llvm::ArrayType>(ST->getElementType(0));

165 if

(AT && AT->getNumElements() == 8 &&

166

AT->getElementType()->isIntegerTy(64))

179 QualType

ReturnType)

const override

;

199class

WindowsAArch64TargetCodeGenInfo :

public

AArch64TargetCodeGenInfo {

202

: AArch64TargetCodeGenInfo(CGT, K) {}

204 void

setTargetAttributes(

const Decl

*

D

, llvm::GlobalValue *GV,

207 void

getDependentLibraryOption(llvm::StringRef Lib,

209

Opt =

"/DEFAULTLIB:"

+ qualifyWindowsLibrary(Lib);

212 void

getDetectMismatchOption(llvm::StringRef Name, llvm::StringRef

Value

,

214

Opt =

"/FAILIFMISMATCH:\""

+ Name.str() +

"="

+

Value

.str() +

"\""

;

218void

WindowsAArch64TargetCodeGenInfo::setTargetAttributes(

220

AArch64TargetCodeGenInfo::setTargetAttributes(

D

, GV, CGM);

221 if

(GV->isDeclaration())

223

addStackProbeTargetAttributes(

D

, GV, CGM);

228

AArch64ABIInfo::convertFixedToScalableVectorType(

const VectorType

*VT)

const

{

231 if

(VT->

getVectorKind

() == VectorKind::SveFixedLengthPredicate) {

233

BuiltinType::UChar &&

234 "unexpected builtin type for SVE predicate!"

);

235 return

llvm::ScalableVectorType::get(llvm::Type::getInt1Ty(getVMContext()),

239 if

(VT->

getVectorKind

() == VectorKind::SveFixedLengthData) {

241 switch

(BT->getKind()) {

243

llvm_unreachable(

"unexpected builtin type for SVE vector!"

);

245 case

BuiltinType::SChar:

246 case

BuiltinType::UChar:

247 case

BuiltinType::MFloat8:

248 return

llvm::ScalableVectorType::get(

249

llvm::Type::getInt8Ty(getVMContext()), 16);

251 case

BuiltinType::Short:

252 case

BuiltinType::UShort:

253 return

llvm::ScalableVectorType::get(

254

llvm::Type::getInt16Ty(getVMContext()), 8);

256 case

BuiltinType::Int:

257 case

BuiltinType::UInt:

258 return

llvm::ScalableVectorType::get(

259

llvm::Type::getInt32Ty(getVMContext()), 4);

261 case

BuiltinType::Long:

262 case

BuiltinType::ULong:

263 return

llvm::ScalableVectorType::get(

264

llvm::Type::getInt64Ty(getVMContext()), 2);

266 case

BuiltinType::Half:

267 return

llvm::ScalableVectorType::get(

268

llvm::Type::getHalfTy(getVMContext()), 8);

270 case

BuiltinType::Float:

271 return

llvm::ScalableVectorType::get(

272

llvm::Type::getFloatTy(getVMContext()), 4);

274 case

BuiltinType::Double:

275 return

llvm::ScalableVectorType::get(

276

llvm::Type::getDoubleTy(getVMContext()), 2);

278 case

BuiltinType::BFloat16:

279 return

llvm::ScalableVectorType::get(

280

llvm::Type::getBFloatTy(getVMContext()), 8);

284

llvm_unreachable(

"expected fixed-length SVE vector"

);

288 unsigned

&NPRN)

const

{

289

assert(Ty->

isVectorType

() &&

"expected vector type!"

);

292 if

(VT->

getVectorKind

() == VectorKind::SveFixedLengthPredicate) {

295

BuiltinType::UChar &&

296 "unexpected builtin type for SVE predicate!"

);

297

NPRN = std::min(NPRN + 1, 4u);

299

llvm::Type::getInt1Ty(getVMContext()), 16));

302 if

(VT->

getVectorKind

() == VectorKind::SveFixedLengthData) {

303

NSRN = std::min(NSRN + 1, 8u);

309 if

((isAndroid() || isOHOSFamily()) && (Size <= 16)) {

310

llvm::Type *ResType = llvm::Type::getInt16Ty(getVMContext());

314

llvm::Type *ResType = llvm::Type::getInt32Ty(getVMContext());

318

NSRN = std::min(NSRN + 1, 8u);

320

llvm::FixedVectorType::get(llvm::Type::getInt32Ty(getVMContext()), 2);

324

NSRN = std::min(NSRN + 1, 8u);

326

llvm::FixedVectorType::get(llvm::Type::getInt32Ty(getVMContext()), 4);

330 return

getNaturalAlignIndirect(Ty,

false

);

333ABIArgInfo

AArch64ABIInfo::coerceAndExpandPureScalableAggregate(

334 QualType

Ty,

bool

IsNamedArg,

unsigned

NVec,

unsigned

NPred,

336 unsigned

&NPRN)

const

{

337 if

(!IsNamedArg || NSRN + NVec > 8 || NPRN + NPred > 4)

338 return

getNaturalAlignIndirect(Ty,

false

);

346

llvm::Type *UnpaddedCoerceToType =

347

UnpaddedCoerceToSeq.size() == 1

348

? UnpaddedCoerceToSeq[0]

349

: llvm::StructType::get(CGT.getLLVMContext(), UnpaddedCoerceToSeq,

353

flattenType(CGT.ConvertType(Ty), CoerceToSeq);

355

llvm::StructType::get(CGT.getLLVMContext(), CoerceToSeq,

false

);

362 unsigned

CallingConvention,

364 unsigned

&NPRN)

const

{

368 if

(isIllegalVectorType(Ty))

369 return

coerceIllegalVector(Ty, NSRN, NPRN);

371 if

(!passAsAggregateType(Ty)) {

374

Ty = EnumTy->getDecl()->getIntegerType();

377 if

(EIT->getNumBits() > 128)

378 return

getNaturalAlignIndirect(Ty,

false

);

381

NSRN = std::min(NSRN + 1, 8u);

383 if

(BT->isFloatingPoint())

384

NSRN = std::min(NSRN + 1, 8u);

386 switch

(BT->getKind()) {

387 case

BuiltinType::SveBool:

388 case

BuiltinType::SveCount:

389

NPRN = std::min(NPRN + 1, 4u);

391 case

BuiltinType::SveBoolx2:

392

NPRN = std::min(NPRN + 2, 4u);

394 case

BuiltinType::SveBoolx4:

395

NPRN = std::min(NPRN + 4, 4u);

398 if

(BT->isSVESizelessBuiltinType())

400

NSRN + getContext().getBuiltinVectorTypeInfo(BT).NumVectors,

406 return

(isPromotableIntegerTypeForABI(Ty) && isDarwinPCS()

414 return

getNaturalAlignIndirect(Ty,

RAA ==

423 if

(!getContext().getLangOpts().

CPlusPlus

|| isDarwinPCS())

439 bool

IsWin64 =

Kind

== AArch64ABIKind::Win64 ||

440

CallingConvention == llvm::CallingConv::Win64;

441 bool

IsWinVariadic = IsWin64 && IsVariadicFn;

444 if

(!IsWinVariadic && isHomogeneousAggregate(Ty,

Base

, Members)) {

445

NSRN = std::min(NSRN + Members,

uint64_t

(8));

446 if

(Kind != AArch64ABIKind::AAPCS)

448

llvm::ArrayType::get(CGT.ConvertType(

QualType

(

Base

, 0)), Members));

453

getContext().getTypeUnadjustedAlignInChars(Ty).getQuantity();

454

Align = (Align >= 16) ? 16 : 8;

456

llvm::ArrayType::get(CGT.ConvertType(

QualType

(

Base

, 0)), Members), 0,

457 nullptr

,

true

, Align);

462 if

(Kind == AArch64ABIKind::AAPCS) {

463 unsigned

NVec = 0, NPred = 0;

465 if

(passAsPureScalableType(Ty, NVec, NPred, UnpaddedCoerceToSeq) &&

467 return

coerceAndExpandPureScalableAggregate(

468

Ty, IsNamedArg, NVec, NPred, UnpaddedCoerceToSeq, NSRN, NPRN);

474 if

(Kind == AArch64ABIKind::AAPCS) {

475

Alignment = getContext().getTypeUnadjustedAlign(Ty);

476

Alignment = Alignment < 128 ? 64 : 128;

479

std::max(getContext().getTypeAlign(Ty),

480

(

unsigned

)getTarget().getPointerWidth(LangAS::Default));

482 Size

= llvm::alignTo(Size, Alignment);

486

llvm::Type *BaseTy = llvm::Type::getIntNTy(getVMContext(), Alignment);

488

Size == Alignment ? BaseTy

489

: llvm::ArrayType::get(BaseTy, Size / Alignment));

492 return

getNaturalAlignIndirect(Ty,

false

);

496 bool

IsVariadicFn)

const

{

501 if

(VT->

getVectorKind

() == VectorKind::SveFixedLengthData ||

502

VT->

getVectorKind

() == VectorKind::SveFixedLengthPredicate) {

503 unsigned

NSRN = 0, NPRN = 0;

504 return

coerceIllegalVector(RetTy, NSRN, NPRN);

509 if

(RetTy->

isVectorType

() && getContext().getTypeSize(RetTy) > 128)

510 return

getNaturalAlignIndirect(RetTy);

512 if

(!passAsAggregateType(RetTy)) {

515

RetTy = EnumTy->getDecl()->getIntegerType();

518 if

(EIT->getNumBits() > 128)

519 return

getNaturalAlignIndirect(RetTy);

521 return

(isPromotableIntegerTypeForABI(RetTy) && isDarwinPCS()

533 if

(isHomogeneousAggregate(RetTy,

Base

, Members) &&

534

!(getTarget().getTriple().getArch() == llvm::Triple::aarch64_32 &&

542 if

(Kind == AArch64ABIKind::AAPCS) {

543 unsigned

NSRN = 0, NPRN = 0;

544 unsigned

NVec = 0, NPred = 0;

546 if

(passAsPureScalableType(RetTy, NVec, NPred, UnpaddedCoerceToSeq) &&

548 return

coerceAndExpandPureScalableAggregate(

549

RetTy,

true

, NVec, NPred, UnpaddedCoerceToSeq, NSRN,

555 if

(Size <= 64 && getDataLayout().isLittleEndian()) {

563

llvm::IntegerType::get(getVMContext(), Size));

566 unsigned

Alignment = getContext().getTypeAlign(RetTy);

567 Size

= llvm::alignTo(Size, 64);

571 if

(Alignment < 128 && Size == 128) {

572

llvm::Type *BaseTy = llvm::Type::getInt64Ty(getVMContext());

578 return

getNaturalAlignIndirect(RetTy);

582bool

AArch64ABIInfo::isIllegalVectorType(

QualType

Ty)

const

{

587 if

(VT->

getVectorKind

() == VectorKind::SveFixedLengthData ||

588

VT->

getVectorKind

() == VectorKind::SveFixedLengthPredicate)

595 if

(!llvm::isPowerOf2_32(NumElements))

600

llvm::Triple Triple = getTarget().getTriple();

601 if

(Triple.getArch() == llvm::Triple::aarch64_32 &&

602

Triple.isOSBinFormatMachO())

605 return Size

!= 64 && (

Size

!= 128 || NumElements == 1);

610bool

AArch64SwiftABIInfo::isLegalVectorType(

CharUnits

VectorSize,

612 unsigned

NumElts)

const

{

613 if

(!llvm::isPowerOf2_32(NumElts))

616

(VectorSize.

getQuantity

() != 16 || NumElts == 1))

621bool

AArch64ABIInfo::isHomogeneousAggregateBaseType(

QualType

Ty)

const

{

632 if

(BT->isFloatingPoint())

636

Kind == VectorKind::SveFixedLengthData ||

637

Kind == VectorKind::SveFixedLengthPredicate)

640 unsigned

VecSize = getContext().getTypeSize(VT);

641 if

(VecSize == 64 || VecSize == 128)

647bool

AArch64ABIInfo::isHomogeneousAggregateSmallEnough(

const Type

*

Base

,

648

uint64_t Members)

const

{

652bool

AArch64ABIInfo::isZeroLengthBitfieldPermittedInHomogeneousAggregate()

662bool

AArch64ABIInfo::passAsAggregateType(

QualType

Ty)

const

{

666

getContext().getBuiltinVectorTypeInfo(BT).NumVectors > 1;

679bool

AArch64ABIInfo::passAsPureScalableType(

680 QualType

Ty,

unsigned

&NVec,

unsigned

&NPred,

687 unsigned

NV = 0, NP = 0;

689 if

(!passAsPureScalableType(AT->getElementType(), NV, NP, EltCoerceToSeq))

692 if

(CoerceToSeq.size() + NElt * EltCoerceToSeq.size() > 12)

695 for

(uint64_t I = 0; I < NElt; ++I)

696

llvm::copy(EltCoerceToSeq, std::back_inserter(CoerceToSeq));

715 if

(

const CXXRecordDecl

*CXXRD = dyn_cast<CXXRecordDecl>(RD)) {

716 for

(

const auto

&I : CXXRD->bases()) {

719 if

(!passAsPureScalableType(I.getType(), NVec, NPred, CoerceToSeq))

725 for

(

const auto

*FD : RD->

fields

()) {

729 if

(!passAsPureScalableType(FT, NVec, NPred, CoerceToSeq))

737 if

(VT->

getVectorKind

() == VectorKind::SveFixedLengthPredicate) {

739 if

(CoerceToSeq.size() + 1 > 12)

741

CoerceToSeq.push_back(convertFixedToScalableVectorType(VT));

745 if

(VT->

getVectorKind

() == VectorKind::SveFixedLengthData) {

747 if

(CoerceToSeq.size() + 1 > 12)

749

CoerceToSeq.push_back(convertFixedToScalableVectorType(VT));

761#define SVE_VECTOR_TYPE(Name, MangledName, Id, SingletonId) \ 762 case BuiltinType::Id: \ 763 isPredicate = false; \ 765#define SVE_PREDICATE_TYPE(Name, MangledName, Id, SingletonId) \ 766 case BuiltinType::Id: \ 767 isPredicate = true; \ 769#define SVE_TYPE(Name, Id, SingletonId) 770#include "clang/Basic/AArch64SVEACLETypes.def" 776

getContext().getBuiltinVectorTypeInfo(cast<BuiltinType>(Ty));

778 "Expected 1, 2, 3 or 4 vectors!"

);

784

? llvm::Type::getInt8Ty(getVMContext())

785

: CGT.ConvertType(Info.ElementType);

786 auto

*VTy = llvm::ScalableVectorType::get(EltTy, Info.

EC

.getKnownMinValue());

788 if

(CoerceToSeq.size() + Info.

NumVectors

> 12)

790

std::fill_n(std::back_inserter(CoerceToSeq), Info.

NumVectors

, VTy);

798void

AArch64ABIInfo::flattenType(

802

Flattened.push_back(Ty);

806 if

(

const auto

*AT = dyn_cast<llvm::ArrayType>(Ty)) {

807 uint64_t

NElt = AT->getNumElements();

812

flattenType(AT->getElementType(), EltFlattened);

814 for

(uint64_t I = 0; I < NElt; ++I)

815

llvm::copy(EltFlattened, std::back_inserter(Flattened));

819 if

(

const auto

*ST = dyn_cast<llvm::StructType>(Ty)) {

820 for

(

auto

*ET : ST->elements())

821

flattenType(ET, Flattened);

825

Flattened.push_back(Ty);

834 unsigned

NSRN = 0, NPRN = 0;

846

BaseTy = llvm::PointerType::getUnqual(BaseTy);

850 unsigned

NumRegs = 1;

851 if

(llvm::ArrayType *ArrTy = dyn_cast<llvm::ArrayType>(BaseTy)) {

852

BaseTy = ArrTy->getElementType();

853

NumRegs = ArrTy->getNumElements();

856

!isSoftFloat() && (BaseTy->isFloatingPointTy() || BaseTy->isVectorTy());

869

llvm::BasicBlock *MaybeRegBlock = CGF.

createBasicBlock

(

"vaarg.maybe_reg"

);

871

llvm::BasicBlock *OnStackBlock = CGF.

createBasicBlock

(

"vaarg.on_stack"

);

874 CharUnits

TySize = getContext().getTypeSizeInChars(Ty);

875 CharUnits

TyAlign = getContext().getTypeUnadjustedAlignInChars(Ty);

878

llvm::Value *reg_offs =

nullptr

;

880 int

RegSize = IsIndirect ? 8 : TySize.

getQuantity

();

886

RegSize = llvm::alignTo(RegSize, 8);

892

RegSize = 16 * NumRegs;

903

llvm::Value *UsingStack =

nullptr

;

904

UsingStack = CGF.

Builder

.CreateICmpSGE(

905

reg_offs, llvm::ConstantInt::get(CGF.

Int32Ty

, 0));

907

CGF.

Builder

.CreateCondBr(UsingStack, OnStackBlock, MaybeRegBlock);

916 if

(!IsFPR && !IsIndirect && TyAlign.

getQuantity

() > 8) {

919

reg_offs = CGF.

Builder

.CreateAdd(

920

reg_offs, llvm::ConstantInt::get(CGF.

Int32Ty

, Align - 1),

922

reg_offs = CGF.

Builder

.CreateAnd(

923

reg_offs, llvm::ConstantInt::get(CGF.

Int32Ty

, -Align),

931

llvm::Value *NewOffset =

nullptr

;

932

NewOffset = CGF.

Builder

.CreateAdd(

933

reg_offs, llvm::ConstantInt::get(CGF.

Int32Ty

, RegSize),

"new_reg_offs"

);

938

llvm::Value *InRegs =

nullptr

;

939

InRegs = CGF.

Builder

.CreateICmpSLE(

940

NewOffset, llvm::ConstantInt::get(CGF.

Int32Ty

, 0),

"inreg"

);

942

CGF.

Builder

.CreateCondBr(InRegs, InRegBlock, OnStackBlock);

952

llvm::Value *reg_top =

nullptr

;

964

MemTy = llvm::PointerType::getUnqual(MemTy);

969 bool

IsHFA = isHomogeneousAggregate(Ty,

Base

, NumMembers);

970 if

(IsHFA && NumMembers > 1) {

975

assert(!IsIndirect &&

"Homogeneous aggregates should be passed directly"

);

976 auto

BaseTyInfo = getContext().getTypeInfoInChars(

QualType

(

Base

, 0));

978

llvm::Type *HFATy = llvm::ArrayType::get(BaseTy, NumMembers);

980

std::max(TyAlign, BaseTyInfo.Align));

985

BaseTyInfo.Width.getQuantity() < 16)

986

Offset = 16 - BaseTyInfo.Width.getQuantity();

988 for

(

unsigned

i = 0; i < NumMembers; ++i) {

1005 CharUnits

SlotSize = BaseAddr.getAlignment();

1008

TySize < SlotSize) {

1038

StackSize = StackSlotSize;

1040

StackSize = TySize.

alignTo

(StackSlotSize);

1044

CGF.

Int8Ty

, OnStackPtr, StackSizeC,

"new_stack"

);

1050

TySize < StackSlotSize) {

1051 CharUnits

Offset = StackSlotSize - TySize;

1065

OnStackBlock,

"vaargs.addr"

);

1090 uint64_t

PointerSize = getTarget().getPointerWidth(LangAS::Default) / 8;

1099 auto

TyInfo = getContext().getTypeInfoInChars(Ty);

1103 bool

IsIndirect =

false

;

1104 if

(TyInfo.Width.getQuantity() > 16) {

1107

IsIndirect = !isHomogeneousAggregate(Ty,

Base

, Members);

1110 return emitVoidPtrVAArg

(CGF, VAListAddr, Ty, IsIndirect, TyInfo, SlotSize,

1116 bool

IsIndirect =

false

;

1138 const

StringRef ABIName,

1139 const

AArch64ABIInfo &

ABIInfo

,

1142 const Type

*HABase =

nullptr

;

1143

uint64_t HAMembers = 0;

1146

Diags.

Report

(loc, diag::err_target_unsupported_type_for_abi)

1147

<<

D

->getDeclName() << Ty << ABIName;

1154void

AArch64TargetCodeGenInfo::checkFunctionABI(

1156 const

AArch64ABIInfo &

ABIInfo

= getABIInfo<AArch64ABIInfo>();

1187 bool

CallerIsStreaming =

1189 bool

CalleeIsStreaming =

1196 if

(!CalleeIsStreamingCompatible &&

1197

(CallerIsStreaming != CalleeIsStreaming || CallerIsStreamingCompatible)) {

1198 if

(CalleeIsStreaming)

1203 if

(

auto

*NewAttr = Callee->getAttr<ArmNewAttr>()) {

1204 if

(NewAttr->isNewZA())

1206 if

(NewAttr->isNewZT0())

1210 return

Inlinability;

1213void

AArch64TargetCodeGenInfo::checkFunctionCallABIStreaming(

1216 if

(!Caller || !Callee || !

Callee

->hasAttr<AlwaysInlineAttr>())

1227

? diag::err_function_always_inline_attribute_mismatch

1228

: diag::warn_function_always_inline_attribute_mismatch)

1233

CGM.

getDiags

().

Report

(CallLoc, diag::err_function_always_inline_new_za)

1234

<<

Callee

->getDeclName();

1238

CGM.

getDiags

().

Report

(CallLoc, diag::err_function_always_inline_new_zt0)

1239

<<

Callee

->getDeclName();

1245void

AArch64TargetCodeGenInfo::checkFunctionCallABISoftFloat(

1249 const

AArch64ABIInfo &

ABIInfo

= getABIInfo<AArch64ABIInfo>();

1256

Callee ? Callee : Caller, CallLoc);

1258 for

(

const CallArg

&Arg : Args)

1260

Callee ? Callee : Caller, CallLoc);

1263void

AArch64TargetCodeGenInfo::checkFunctionCallABI(

CodeGenModule

&CGM,

1269

checkFunctionCallABIStreaming(CGM, CallLoc, Caller, Callee);

1270

checkFunctionCallABISoftFloat(CGM, CallLoc, Caller, Callee, Args, ReturnType);

1273bool

AArch64TargetCodeGenInfo::wouldInliningViolateFunctionCallABI(

1275 return

Caller &&

Callee

&&

1279void

AArch64ABIInfo::appendAttributeMangling(TargetClonesAttr *

Attr

,

1281

raw_ostream &Out)

const

{

1282

appendAttributeMangling(

Attr

->getFeatureStr(Index), Out);

1285void

AArch64ABIInfo::appendAttributeMangling(StringRef AttrStr,

1286

raw_ostream &Out)

const

{

1287 if

(AttrStr ==

"default"

) {

1294

AttrStr.split(Features,

"+"

);

1295 for

(

auto

&Feat : Features)

1298

llvm::sort(Features, [](

const

StringRef LHS,

const

StringRef RHS) {

1299 return

LHS.compare(RHS) < 0;

1302

llvm::SmallDenseSet<StringRef, 8> UniqueFeats;

1303 for

(

auto

&Feat : Features)

1304 if

(

auto

Ext = llvm::AArch64::parseFMVExtension(Feat))

1305 if

(UniqueFeats.insert(Ext->Name).second)

1306

Out <<

'M'

<< Ext->Name;

1309

std::unique_ptr<TargetCodeGenInfo>

1312 return

std::make_unique<AArch64TargetCodeGenInfo>(CGM.

getTypes

(), Kind);

1315

std::unique_ptr<TargetCodeGenInfo>

1318 return

std::make_unique<WindowsAArch64TargetCodeGenInfo>(CGM.

getTypes

(), K);

static bool isStreamingCompatible(const FunctionDecl *F)

@ ErrorCalleeRequiresNewZA

@ WarnIncompatibleStreamingModes

@ ErrorCalleeRequiresNewZT0

@ IncompatibleStreamingModes

@ LLVM_MARK_AS_BITMASK_ENUM

@ ErrorIncompatibleStreamingModes

static ArmSMEInlinability GetArmSMEInlinability(const FunctionDecl *Caller, const FunctionDecl *Callee)

Determines if there are any Arm SME ABI issues with inlining Callee into Caller.

static void diagnoseIfNeedsFPReg(DiagnosticsEngine &Diags, const StringRef ABIName, const AArch64ABIInfo &ABIInfo, const QualType &Ty, const NamedDecl *D, SourceLocation loc)

TypeInfoChars getTypeInfoInChars(const Type *T) const

const TargetInfo & getTargetInfo() const

Attr - This represents one attribute.

A fixed int type of a specified bitwidth.

This class is used for builtin types like 'int'.

Represents a C++ struct/union/class.

CharUnits - This is an opaque type for sizes expressed in character units.

QuantityType getQuantity() const

getQuantity - Get the raw integer representation of this quantity.

static CharUnits fromQuantity(QuantityType Quantity)

fromQuantity - Construct a CharUnits quantity from a raw integer type.

CharUnits alignTo(const CharUnits &Align) const

alignTo - Returns the next integer (mod 2**64) that is greater than or equal to this quantity and is ...

ABIArgInfo - Helper class to encapsulate information about how a specific C type should be passed to ...

static ABIArgInfo getIgnore()

static bool isPaddingForCoerceAndExpand(llvm::Type *eltType)

static ABIArgInfo getDirect(llvm::Type *T=nullptr, unsigned Offset=0, llvm::Type *Padding=nullptr, bool CanBeFlattened=true, unsigned Align=0)

static ABIArgInfo getExtend(QualType Ty, llvm::Type *T=nullptr)

static ABIArgInfo getCoerceAndExpand(llvm::StructType *coerceToType, llvm::Type *unpaddedCoerceToType)

llvm::Type * getCoerceToType() const

ABIInfo - Target specific hooks for defining how a type should be passed or returned from functions.

virtual bool allowBFloatArgsAndRet() const

bool isHomogeneousAggregate(QualType Ty, const Type *&Base, uint64_t &Members) const

isHomogeneousAggregate - Return true if a type is an ELFv2 homogeneous aggregate.

CodeGen::CGCXXABI & getCXXABI() const

ASTContext & getContext() const

virtual bool isHomogeneousAggregateBaseType(QualType Ty) const

virtual void appendAttributeMangling(TargetAttr *Attr, raw_ostream &Out) const

virtual RValue EmitMSVAArg(CodeGen::CodeGenFunction &CGF, CodeGen::Address VAListAddr, QualType Ty, AggValueSlot Slot) const

Emit the target dependent code to load a value of.

virtual bool isHomogeneousAggregateSmallEnough(const Type *Base, uint64_t Members) const

const TargetInfo & getTarget() const

virtual RValue EmitVAArg(CodeGen::CodeGenFunction &CGF, CodeGen::Address VAListAddr, QualType Ty, AggValueSlot Slot) const =0

EmitVAArg - Emit the target dependent code to load a value of.

virtual bool isZeroLengthBitfieldPermittedInHomogeneousAggregate() const

virtual void computeInfo(CodeGen::CGFunctionInfo &FI) const =0

Like RawAddress, an abstract representation of an aligned address, but the pointer contained in this ...

Address withElementType(llvm::Type *ElemTy) const

Return address with different element type, but same pointer and alignment.

llvm::StoreInst * CreateStore(llvm::Value *Val, Address Addr, bool IsVolatile=false)

Address CreateConstInBoundsByteGEP(Address Addr, CharUnits Offset, const llvm::Twine &Name="")

Given a pointer to i8, adjust it by a given constant offset.

Address CreateConstArrayGEP(Address Addr, uint64_t Index, const llvm::Twine &Name="")

Given addr = [n x T]* ... produce name = getelementptr inbounds addr, i64 0, i64 index where i64 is a...

Address CreateStructGEP(Address Addr, unsigned Index, const llvm::Twine &Name="")

llvm::LoadInst * CreateLoad(Address Addr, const llvm::Twine &Name="")

llvm::ConstantInt * getSize(CharUnits N)

Address CreateInBoundsGEP(Address Addr, ArrayRef< llvm::Value * > IdxList, llvm::Type *ElementType, CharUnits Align, const Twine &Name="")

RecordArgABI

Specify how one should pass an argument of a record type.

@ RAA_Default

Pass it using the normal C aggregate rules for the ABI, potentially introducing extra copies and pass...

@ RAA_DirectInMemory

Pass it on the stack using its defined layout.

CGFunctionInfo - Class to encapsulate the information about a function definition.

ABIArgInfo & getReturnInfo()

unsigned getCallingConvention() const

getCallingConvention - Return the user specified calling convention, which has been translated into a...

CanQualType getReturnType() const

MutableArrayRef< ArgInfo > arguments()

RequiredArgs getRequiredArgs() const

CallArgList - Type for representing both the value and type of arguments in a call.

CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...

llvm::BasicBlock * createBasicBlock(const Twine &name="", llvm::Function *parent=nullptr, llvm::BasicBlock *before=nullptr)

createBasicBlock - Create an LLVM basic block.

void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false)

EmitBlock - Emit the given block.

llvm::AllocaInst * CreateTempAlloca(llvm::Type *Ty, const Twine &Name="tmp", llvm::Value *ArraySize=nullptr)

CreateTempAlloca - This creates an alloca and inserts it into the entry block if ArraySize is nullptr...

llvm::Type * ConvertTypeForMem(QualType T)

const TargetInfo & getTarget() const

void EmitBranch(llvm::BasicBlock *Block)

EmitBranch - Emit a branch to the specified basic block from the current insert block,...

ASTContext & getContext() const

llvm::Type * ConvertType(QualType T)

LValue MakeAddrLValue(Address Addr, QualType T, AlignmentSource Source=AlignmentSource::Type)

const CGFunctionInfo * CurFnInfo

RValue EmitLoadOfAnyValue(LValue V, AggValueSlot Slot=AggValueSlot::ignored(), SourceLocation Loc={})

Like EmitLoadOfLValue but also handles complex and aggregate types.

This class organizes the cross-function state that is used while generating LLVM code.

DiagnosticsEngine & getDiags() const

const LangOptions & getLangOpts() const

CodeGenTypes & getTypes()

const TargetInfo & getTarget() const

const llvm::DataLayout & getDataLayout() const

This class organizes the cross-module state that is used while lowering AST types to LLVM types.

RValue - This trivial value class is used to represent the result of an expression that is evaluated.

unsigned getNumRequiredArgs() const

Target specific hooks for defining how a type should be passed or returned from functions with one of...

virtual bool isLegalVectorType(CharUnits VectorSize, llvm::Type *EltTy, unsigned NumElts) const

Returns true if the given vector type is legal from Swift's calling convention perspective.

TargetCodeGenInfo - This class organizes various target-specific codegeneration issues,...

virtual bool doesReturnSlotInterfereWithArgs() const

doesReturnSlotInterfereWithArgs - Return true if the target uses an argument slot for an 'sret' type.

virtual bool wouldInliningViolateFunctionCallABI(const FunctionDecl *Caller, const FunctionDecl *Callee) const

Returns true if inlining the function call would produce incorrect code for the current target and sh...

virtual StringRef getARCRetainAutoreleasedReturnValueMarker() const

Retrieve the address of a function to call immediately before calling objc_retainAutoreleasedReturnVa...

static void setBranchProtectionFnAttributes(const TargetInfo::BranchProtectionInfo &BPI, llvm::Function &F)

virtual void checkFunctionCallABI(CodeGenModule &CGM, SourceLocation CallLoc, const FunctionDecl *Caller, const FunctionDecl *Callee, const CallArgList &Args, QualType ReturnType) const

Any further codegen related checks that need to be done on a function call in a target specific manne...

virtual void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M) const

setTargetAttributes - Provides a convenient hook to handle extra target-specific attributes for the g...

virtual void checkFunctionABI(CodeGenModule &CGM, const FunctionDecl *Decl) const

Any further codegen related checks that need to be done on a function signature in a target specific ...

virtual bool isScalarizableAsmOperand(CodeGen::CodeGenFunction &CGF, llvm::Type *Ty) const

Target hook to decide whether an inline asm operand can be passed by value.

virtual int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const

Determines the DWARF register number for the stack pointer, for exception-handling purposes.

Represents the canonical version of C arrays with a specified constant size.

Decl - This represents one declaration (or definition), e.g.

SourceLocation getLocation() const

Concrete class used by the front-end to report problems and issues.

DiagnosticBuilder Report(SourceLocation Loc, unsigned DiagID)

Issue the message to the client.

A helper class that allows the use of isa/cast/dyncast to detect TagType objects of enums.

Represents a function declaration or definition.

QualType getReturnType() const

ArrayRef< ParmVarDecl * > parameters() const

Represents a prototype with parameter type info, e.g.

unsigned getAArch64SMEAttributes() const

Return a bitmask describing the SME attributes on the function type, see AArch64SMETypeAttributes for...

@ SME_PStateSMCompatibleMask

This represents a decl that may have a name.

DeclarationName getDeclName() const

Get the actual, stored name of the declaration, which may be a special name.

Represents a parameter to a function.

A (possibly-)qualified type.

Represents a struct/union/class.

field_range fields() const

A helper class that allows the use of isa/cast/dyncast to detect TagType objects of structs/unions/cl...

Encodes a location in the source.

Exposes information about the current target.

virtual StringRef getABI() const

Get the ABI currently in use.

virtual ParsedTargetAttr parseTargetAttr(StringRef Str) const

virtual bool hasBFloat16Type() const

Determine whether the _BFloat16 type is supported on this target.

virtual bool hasFeature(StringRef Feature) const

Determine whether the given target has the given feature.

virtual bool validateBranchProtection(StringRef Spec, StringRef Arch, BranchProtectionInfo &BPI, StringRef &Err) const

Determine if this TargetInfo supports the given branch protection specification.

The base class of the type hierarchy.

bool isMFloat8Type() const

bool isSVESizelessBuiltinType() const

Returns true for SVE scalable vector types.

const T * castAs() const

Member-template castAs<specific type>.

bool isBuiltinType() const

Helper methods to distinguish type categories.

bool isVectorType() const

bool isFloatingType() const

const T * getAs() const

Member-template getAs<specific type>'.

Represents a GCC generic vector type.

unsigned getNumElements() const

VectorKind getVectorKind() const

QualType getElementType() const

ABIArgInfo classifyArgumentType(CodeGenModule &CGM, CanQualType type)

Classify the rules for how to pass a particular type.

CGCXXABI::RecordArgABI getRecordArgABI(const RecordType *RT, CGCXXABI &CXXABI)

bool classifyReturnType(const CGCXXABI &CXXABI, CGFunctionInfo &FI, const ABIInfo &Info)

Address EmitVAArgInstr(CodeGenFunction &CGF, Address VAListAddr, QualType Ty, const ABIArgInfo &AI)

RValue emitVoidPtrVAArg(CodeGenFunction &CGF, Address VAListAddr, QualType ValueTy, bool IsIndirect, TypeInfoChars ValueInfo, CharUnits SlotSizeAndAlign, bool AllowHigherAlign, AggValueSlot Slot, bool ForceRightAdjust=false)

Emit va_arg for a platform using the common void* representation, where arguments are simply emitted ...

Address emitMergePHI(CodeGenFunction &CGF, Address Addr1, llvm::BasicBlock *Block1, Address Addr2, llvm::BasicBlock *Block2, const llvm::Twine &Name="")

bool isEmptyField(ASTContext &Context, const FieldDecl *FD, bool AllowArrays, bool AsIfNoUniqueAddr=false)

isEmptyField - Return true iff a the field is "empty", that is it is an unnamed bit-field or an (arra...

llvm::Value * emitRoundPointerUpToAlignment(CodeGenFunction &CGF, llvm::Value *Ptr, CharUnits Align)

bool isAggregateTypeForABI(QualType T)

std::unique_ptr< TargetCodeGenInfo > createAArch64TargetCodeGenInfo(CodeGenModule &CGM, AArch64ABIKind Kind)

QualType useFirstFieldIfTransparentUnion(QualType Ty)

Pass transparent unions as if they were the type of the first element.

std::unique_ptr< TargetCodeGenInfo > createWindowsAArch64TargetCodeGenInfo(CodeGenModule &CGM, AArch64ABIKind K)

bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays, bool AsIfNoUniqueAddr=false)

isEmptyRecord - Return true iff a structure contains only empty fields.

The JSON file list parser is used to communicate input to InstallAPI.

const FunctionProtoType * T

bool IsArmStreamingFunction(const FunctionDecl *FD, bool IncludeLocallyStreaming)

Returns whether the given FunctionDecl has an __arm[_locally]_streaming attribute.

llvm::IntegerType * Int8Ty

i8, i16, i32, and i64

llvm::IntegerType * Int32Ty

Contains information gathered from parsing the contents of TargetAttr.


RetroSearch is an open source project built by @garambo | Open a GitHub Issue

Search and Browse the WWW like it's 1997 | Search results from DuckDuckGo

HTML: 3.2 | Encoding: UTF-8 | Version: 0.7.4