;
24classAArch64ABIInfo :
public ABIInfo{
39 boolIsNamedArg,
unsignedCallingConvention,
40 unsigned&NSRN,
unsigned&NPRN)
const;
41llvm::Type *convertFixedToScalableVectorType(
const VectorType*VT)
const;
43 unsigned&NPRN)
const;
44 ABIArgInfocoerceAndExpandPureScalableAggregate(
45 QualTypeTy,
boolIsNamedArg,
unsignedNVec,
unsignedNPred,
47 unsigned&NPRN)
const;
50uint64_t Members)
const override;
53 boolisIllegalVectorType(
QualTypeTy)
const;
55 boolpassAsAggregateType(
QualTypeTy)
const;
56 boolpassAsPureScalableType(
QualTypeTy,
unsigned&NV,
unsigned&NP,
59 voidflattenType(llvm::Type *Ty,
68 unsignedNSRN = 0, NPRN = 0;
70 const boolIsNamedArg =
87 if(isa<llvm::ScalableVectorType>(BaseTy))
88llvm::report_fatal_error(
"Passing SVE types to variadic functions is " 89 "currently not supported");
91 return Kind== AArch64ABIKind::Win64
93: isDarwinPCS() ? EmitDarwinVAArg(VAListAddr, Ty, CGF, Slot)
94: EmitAAPCSVAArg(VAListAddr, Ty, CGF,
Kind, Slot);
106raw_ostream &Out)
const override;
108raw_ostream &Out)
const override;
117 unsignedNumElts)
const override;
124SwiftInfo = std::make_unique<AArch64SwiftABIInfo>(CGT);
128 return "mov\tfp, fp\t\t// marker for objc_retainAutoreleaseReturnValue";
139 const FunctionDecl*FD = dyn_cast_or_null<FunctionDecl>(
D);
145 if(
const auto*TA = FD->
getAttr<TargetAttr>()) {
148 if(!
Attr.BranchProtection.empty()) {
151 Attr.CPU, BPI, Error);
152assert(
Error.empty());
155 auto*
Fn= cast<llvm::Function>(GV);
160llvm::Type *Ty)
const override{
162 auto*ST = dyn_cast<llvm::StructType>(Ty);
163 if(ST && ST->getNumElements() == 1) {
164 auto*AT = dyn_cast<llvm::ArrayType>(ST->getElementType(0));
165 if(AT && AT->getNumElements() == 8 &&
166AT->getElementType()->isIntegerTy(64))
179 QualTypeReturnType)
const override;
199classWindowsAArch64TargetCodeGenInfo :
publicAArch64TargetCodeGenInfo {
202: AArch64TargetCodeGenInfo(CGT, K) {}
204 voidsetTargetAttributes(
const Decl*
D, llvm::GlobalValue *GV,
207 voidgetDependentLibraryOption(llvm::StringRef Lib,
209Opt =
"/DEFAULTLIB:"+ qualifyWindowsLibrary(Lib);
212 voidgetDetectMismatchOption(llvm::StringRef Name, llvm::StringRef
Value,
214Opt =
"/FAILIFMISMATCH:\""+ Name.str() +
"="+
Value.str() +
"\"";
218voidWindowsAArch64TargetCodeGenInfo::setTargetAttributes(
220AArch64TargetCodeGenInfo::setTargetAttributes(
D, GV, CGM);
221 if(GV->isDeclaration())
223addStackProbeTargetAttributes(
D, GV, CGM);
228AArch64ABIInfo::convertFixedToScalableVectorType(
const VectorType*VT)
const{
231 if(VT->
getVectorKind() == VectorKind::SveFixedLengthPredicate) {
233BuiltinType::UChar &&
234 "unexpected builtin type for SVE predicate!");
235 returnllvm::ScalableVectorType::get(llvm::Type::getInt1Ty(getVMContext()),
239 if(VT->
getVectorKind() == VectorKind::SveFixedLengthData) {
241 switch(BT->getKind()) {
243llvm_unreachable(
"unexpected builtin type for SVE vector!");
245 caseBuiltinType::SChar:
246 caseBuiltinType::UChar:
247 caseBuiltinType::MFloat8:
248 returnllvm::ScalableVectorType::get(
249llvm::Type::getInt8Ty(getVMContext()), 16);
251 caseBuiltinType::Short:
252 caseBuiltinType::UShort:
253 returnllvm::ScalableVectorType::get(
254llvm::Type::getInt16Ty(getVMContext()), 8);
256 caseBuiltinType::Int:
257 caseBuiltinType::UInt:
258 returnllvm::ScalableVectorType::get(
259llvm::Type::getInt32Ty(getVMContext()), 4);
261 caseBuiltinType::Long:
262 caseBuiltinType::ULong:
263 returnllvm::ScalableVectorType::get(
264llvm::Type::getInt64Ty(getVMContext()), 2);
266 caseBuiltinType::Half:
267 returnllvm::ScalableVectorType::get(
268llvm::Type::getHalfTy(getVMContext()), 8);
270 caseBuiltinType::Float:
271 returnllvm::ScalableVectorType::get(
272llvm::Type::getFloatTy(getVMContext()), 4);
274 caseBuiltinType::Double:
275 returnllvm::ScalableVectorType::get(
276llvm::Type::getDoubleTy(getVMContext()), 2);
278 caseBuiltinType::BFloat16:
279 returnllvm::ScalableVectorType::get(
280llvm::Type::getBFloatTy(getVMContext()), 8);
284llvm_unreachable(
"expected fixed-length SVE vector");
288 unsigned&NPRN)
const{
289assert(Ty->
isVectorType() &&
"expected vector type!");
292 if(VT->
getVectorKind() == VectorKind::SveFixedLengthPredicate) {
295BuiltinType::UChar &&
296 "unexpected builtin type for SVE predicate!");
297NPRN = std::min(NPRN + 1, 4u);
299llvm::Type::getInt1Ty(getVMContext()), 16));
302 if(VT->
getVectorKind() == VectorKind::SveFixedLengthData) {
303NSRN = std::min(NSRN + 1, 8u);
309 if((isAndroid() || isOHOSFamily()) && (Size <= 16)) {
310llvm::Type *ResType = llvm::Type::getInt16Ty(getVMContext());
314llvm::Type *ResType = llvm::Type::getInt32Ty(getVMContext());
318NSRN = std::min(NSRN + 1, 8u);
320llvm::FixedVectorType::get(llvm::Type::getInt32Ty(getVMContext()), 2);
324NSRN = std::min(NSRN + 1, 8u);
326llvm::FixedVectorType::get(llvm::Type::getInt32Ty(getVMContext()), 4);
330 returngetNaturalAlignIndirect(Ty,
false);
333ABIArgInfoAArch64ABIInfo::coerceAndExpandPureScalableAggregate(
334 QualTypeTy,
boolIsNamedArg,
unsignedNVec,
unsignedNPred,
336 unsigned&NPRN)
const{
337 if(!IsNamedArg || NSRN + NVec > 8 || NPRN + NPred > 4)
338 returngetNaturalAlignIndirect(Ty,
false);
346llvm::Type *UnpaddedCoerceToType =
347UnpaddedCoerceToSeq.size() == 1
348? UnpaddedCoerceToSeq[0]
349: llvm::StructType::get(CGT.getLLVMContext(), UnpaddedCoerceToSeq,
353flattenType(CGT.ConvertType(Ty), CoerceToSeq);
355llvm::StructType::get(CGT.getLLVMContext(), CoerceToSeq,
false);
362 unsignedCallingConvention,
364 unsigned&NPRN)
const{
368 if(isIllegalVectorType(Ty))
369 returncoerceIllegalVector(Ty, NSRN, NPRN);
371 if(!passAsAggregateType(Ty)) {
374Ty = EnumTy->getDecl()->getIntegerType();
377 if(EIT->getNumBits() > 128)
378 returngetNaturalAlignIndirect(Ty,
false);
381NSRN = std::min(NSRN + 1, 8u);
383 if(BT->isFloatingPoint())
384NSRN = std::min(NSRN + 1, 8u);
386 switch(BT->getKind()) {
387 caseBuiltinType::SveBool:
388 caseBuiltinType::SveCount:
389NPRN = std::min(NPRN + 1, 4u);
391 caseBuiltinType::SveBoolx2:
392NPRN = std::min(NPRN + 2, 4u);
394 caseBuiltinType::SveBoolx4:
395NPRN = std::min(NPRN + 4, 4u);
398 if(BT->isSVESizelessBuiltinType())
400NSRN + getContext().getBuiltinVectorTypeInfo(BT).NumVectors,
406 return(isPromotableIntegerTypeForABI(Ty) && isDarwinPCS()
414 returngetNaturalAlignIndirect(Ty,
RAA ==
423 if(!getContext().getLangOpts().
CPlusPlus|| isDarwinPCS())
439 boolIsWin64 =
Kind== AArch64ABIKind::Win64 ||
440CallingConvention == llvm::CallingConv::Win64;
441 boolIsWinVariadic = IsWin64 && IsVariadicFn;
444 if(!IsWinVariadic && isHomogeneousAggregate(Ty,
Base, Members)) {
445NSRN = std::min(NSRN + Members,
uint64_t(8));
446 if(Kind != AArch64ABIKind::AAPCS)
448llvm::ArrayType::get(CGT.ConvertType(
QualType(
Base, 0)), Members));
453getContext().getTypeUnadjustedAlignInChars(Ty).getQuantity();
454Align = (Align >= 16) ? 16 : 8;
456llvm::ArrayType::get(CGT.ConvertType(
QualType(
Base, 0)), Members), 0,
457 nullptr,
true, Align);
462 if(Kind == AArch64ABIKind::AAPCS) {
463 unsignedNVec = 0, NPred = 0;
465 if(passAsPureScalableType(Ty, NVec, NPred, UnpaddedCoerceToSeq) &&
467 returncoerceAndExpandPureScalableAggregate(
468Ty, IsNamedArg, NVec, NPred, UnpaddedCoerceToSeq, NSRN, NPRN);
474 if(Kind == AArch64ABIKind::AAPCS) {
475Alignment = getContext().getTypeUnadjustedAlign(Ty);
476Alignment = Alignment < 128 ? 64 : 128;
479std::max(getContext().getTypeAlign(Ty),
480(
unsigned)getTarget().getPointerWidth(LangAS::Default));
482 Size= llvm::alignTo(Size, Alignment);
486llvm::Type *BaseTy = llvm::Type::getIntNTy(getVMContext(), Alignment);
488Size == Alignment ? BaseTy
489: llvm::ArrayType::get(BaseTy, Size / Alignment));
492 returngetNaturalAlignIndirect(Ty,
false);
496 boolIsVariadicFn)
const{
501 if(VT->
getVectorKind() == VectorKind::SveFixedLengthData ||
502VT->
getVectorKind() == VectorKind::SveFixedLengthPredicate) {
503 unsignedNSRN = 0, NPRN = 0;
504 returncoerceIllegalVector(RetTy, NSRN, NPRN);
509 if(RetTy->
isVectorType() && getContext().getTypeSize(RetTy) > 128)
510 returngetNaturalAlignIndirect(RetTy);
512 if(!passAsAggregateType(RetTy)) {
515RetTy = EnumTy->getDecl()->getIntegerType();
518 if(EIT->getNumBits() > 128)
519 returngetNaturalAlignIndirect(RetTy);
521 return(isPromotableIntegerTypeForABI(RetTy) && isDarwinPCS()
533 if(isHomogeneousAggregate(RetTy,
Base, Members) &&
534!(getTarget().getTriple().getArch() == llvm::Triple::aarch64_32 &&
542 if(Kind == AArch64ABIKind::AAPCS) {
543 unsignedNSRN = 0, NPRN = 0;
544 unsignedNVec = 0, NPred = 0;
546 if(passAsPureScalableType(RetTy, NVec, NPred, UnpaddedCoerceToSeq) &&
548 returncoerceAndExpandPureScalableAggregate(
549RetTy,
true, NVec, NPred, UnpaddedCoerceToSeq, NSRN,
555 if(Size <= 64 && getDataLayout().isLittleEndian()) {
563llvm::IntegerType::get(getVMContext(), Size));
566 unsignedAlignment = getContext().getTypeAlign(RetTy);
567 Size= llvm::alignTo(Size, 64);
571 if(Alignment < 128 && Size == 128) {
572llvm::Type *BaseTy = llvm::Type::getInt64Ty(getVMContext());
578 returngetNaturalAlignIndirect(RetTy);
582boolAArch64ABIInfo::isIllegalVectorType(
QualTypeTy)
const{
587 if(VT->
getVectorKind() == VectorKind::SveFixedLengthData ||
588VT->
getVectorKind() == VectorKind::SveFixedLengthPredicate)
595 if(!llvm::isPowerOf2_32(NumElements))
600llvm::Triple Triple = getTarget().getTriple();
601 if(Triple.getArch() == llvm::Triple::aarch64_32 &&
602Triple.isOSBinFormatMachO())
605 return Size!= 64 && (
Size!= 128 || NumElements == 1);
610boolAArch64SwiftABIInfo::isLegalVectorType(
CharUnitsVectorSize,
612 unsignedNumElts)
const{
613 if(!llvm::isPowerOf2_32(NumElts))
616(VectorSize.
getQuantity() != 16 || NumElts == 1))
621boolAArch64ABIInfo::isHomogeneousAggregateBaseType(
QualTypeTy)
const{
632 if(BT->isFloatingPoint())
636Kind == VectorKind::SveFixedLengthData ||
637Kind == VectorKind::SveFixedLengthPredicate)
640 unsignedVecSize = getContext().getTypeSize(VT);
641 if(VecSize == 64 || VecSize == 128)
647boolAArch64ABIInfo::isHomogeneousAggregateSmallEnough(
const Type*
Base,
648uint64_t Members)
const{
652boolAArch64ABIInfo::isZeroLengthBitfieldPermittedInHomogeneousAggregate()
662boolAArch64ABIInfo::passAsAggregateType(
QualTypeTy)
const{
666getContext().getBuiltinVectorTypeInfo(BT).NumVectors > 1;
679boolAArch64ABIInfo::passAsPureScalableType(
680 QualTypeTy,
unsigned&NVec,
unsigned&NPred,
687 unsignedNV = 0, NP = 0;
689 if(!passAsPureScalableType(AT->getElementType(), NV, NP, EltCoerceToSeq))
692 if(CoerceToSeq.size() + NElt * EltCoerceToSeq.size() > 12)
695 for(uint64_t I = 0; I < NElt; ++I)
696llvm::copy(EltCoerceToSeq, std::back_inserter(CoerceToSeq));
715 if(
const CXXRecordDecl*CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
716 for(
const auto&I : CXXRD->bases()) {
719 if(!passAsPureScalableType(I.getType(), NVec, NPred, CoerceToSeq))
725 for(
const auto*FD : RD->
fields()) {
729 if(!passAsPureScalableType(FT, NVec, NPred, CoerceToSeq))
737 if(VT->
getVectorKind() == VectorKind::SveFixedLengthPredicate) {
739 if(CoerceToSeq.size() + 1 > 12)
741CoerceToSeq.push_back(convertFixedToScalableVectorType(VT));
745 if(VT->
getVectorKind() == VectorKind::SveFixedLengthData) {
747 if(CoerceToSeq.size() + 1 > 12)
749CoerceToSeq.push_back(convertFixedToScalableVectorType(VT));
761#define SVE_VECTOR_TYPE(Name, MangledName, Id, SingletonId) \ 762 case BuiltinType::Id: \ 763 isPredicate = false; \ 765#define SVE_PREDICATE_TYPE(Name, MangledName, Id, SingletonId) \ 766 case BuiltinType::Id: \ 767 isPredicate = true; \ 769#define SVE_TYPE(Name, Id, SingletonId) 770#include "clang/Basic/AArch64SVEACLETypes.def" 776getContext().getBuiltinVectorTypeInfo(cast<BuiltinType>(Ty));
778 "Expected 1, 2, 3 or 4 vectors!");
784? llvm::Type::getInt8Ty(getVMContext())
785: CGT.ConvertType(Info.ElementType);
786 auto*VTy = llvm::ScalableVectorType::get(EltTy, Info.
EC.getKnownMinValue());
788 if(CoerceToSeq.size() + Info.
NumVectors> 12)
790std::fill_n(std::back_inserter(CoerceToSeq), Info.
NumVectors, VTy);
798voidAArch64ABIInfo::flattenType(
802Flattened.push_back(Ty);
806 if(
const auto*AT = dyn_cast<llvm::ArrayType>(Ty)) {
807 uint64_tNElt = AT->getNumElements();
812flattenType(AT->getElementType(), EltFlattened);
814 for(uint64_t I = 0; I < NElt; ++I)
815llvm::copy(EltFlattened, std::back_inserter(Flattened));
819 if(
const auto*ST = dyn_cast<llvm::StructType>(Ty)) {
820 for(
auto*ET : ST->elements())
821flattenType(ET, Flattened);
825Flattened.push_back(Ty);
834 unsignedNSRN = 0, NPRN = 0;
846BaseTy = llvm::PointerType::getUnqual(BaseTy);
850 unsignedNumRegs = 1;
851 if(llvm::ArrayType *ArrTy = dyn_cast<llvm::ArrayType>(BaseTy)) {
852BaseTy = ArrTy->getElementType();
853NumRegs = ArrTy->getNumElements();
856!isSoftFloat() && (BaseTy->isFloatingPointTy() || BaseTy->isVectorTy());
869llvm::BasicBlock *MaybeRegBlock = CGF.
createBasicBlock(
"vaarg.maybe_reg");
871llvm::BasicBlock *OnStackBlock = CGF.
createBasicBlock(
"vaarg.on_stack");
874 CharUnitsTySize = getContext().getTypeSizeInChars(Ty);
875 CharUnitsTyAlign = getContext().getTypeUnadjustedAlignInChars(Ty);
878llvm::Value *reg_offs =
nullptr;
880 intRegSize = IsIndirect ? 8 : TySize.
getQuantity();
886RegSize = llvm::alignTo(RegSize, 8);
892RegSize = 16 * NumRegs;
903llvm::Value *UsingStack =
nullptr;
904UsingStack = CGF.
Builder.CreateICmpSGE(
905reg_offs, llvm::ConstantInt::get(CGF.
Int32Ty, 0));
907CGF.
Builder.CreateCondBr(UsingStack, OnStackBlock, MaybeRegBlock);
916 if(!IsFPR && !IsIndirect && TyAlign.
getQuantity() > 8) {
919reg_offs = CGF.
Builder.CreateAdd(
920reg_offs, llvm::ConstantInt::get(CGF.
Int32Ty, Align - 1),
922reg_offs = CGF.
Builder.CreateAnd(
923reg_offs, llvm::ConstantInt::get(CGF.
Int32Ty, -Align),
931llvm::Value *NewOffset =
nullptr;
932NewOffset = CGF.
Builder.CreateAdd(
933reg_offs, llvm::ConstantInt::get(CGF.
Int32Ty, RegSize),
"new_reg_offs");
938llvm::Value *InRegs =
nullptr;
939InRegs = CGF.
Builder.CreateICmpSLE(
940NewOffset, llvm::ConstantInt::get(CGF.
Int32Ty, 0),
"inreg");
942CGF.
Builder.CreateCondBr(InRegs, InRegBlock, OnStackBlock);
952llvm::Value *reg_top =
nullptr;
964MemTy = llvm::PointerType::getUnqual(MemTy);
969 boolIsHFA = isHomogeneousAggregate(Ty,
Base, NumMembers);
970 if(IsHFA && NumMembers > 1) {
975assert(!IsIndirect &&
"Homogeneous aggregates should be passed directly");
976 autoBaseTyInfo = getContext().getTypeInfoInChars(
QualType(
Base, 0));
978llvm::Type *HFATy = llvm::ArrayType::get(BaseTy, NumMembers);
980std::max(TyAlign, BaseTyInfo.Align));
985BaseTyInfo.Width.getQuantity() < 16)
986Offset = 16 - BaseTyInfo.Width.getQuantity();
988 for(
unsignedi = 0; i < NumMembers; ++i) {
1005 CharUnitsSlotSize = BaseAddr.getAlignment();
1008TySize < SlotSize) {
1038StackSize = StackSlotSize;
1040StackSize = TySize.
alignTo(StackSlotSize);
1044CGF.
Int8Ty, OnStackPtr, StackSizeC,
"new_stack");
1050TySize < StackSlotSize) {
1051 CharUnitsOffset = StackSlotSize - TySize;
1065OnStackBlock,
"vaargs.addr");
1090 uint64_tPointerSize = getTarget().getPointerWidth(LangAS::Default) / 8;
1099 autoTyInfo = getContext().getTypeInfoInChars(Ty);
1103 boolIsIndirect =
false;
1104 if(TyInfo.Width.getQuantity() > 16) {
1107IsIndirect = !isHomogeneousAggregate(Ty,
Base, Members);
1110 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect, TyInfo, SlotSize,
1116 boolIsIndirect =
false;
1138 constStringRef ABIName,
1139 constAArch64ABIInfo &
ABIInfo,
1142 const Type*HABase =
nullptr;
1143uint64_t HAMembers = 0;
1146Diags.
Report(loc, diag::err_target_unsupported_type_for_abi)
1147<<
D->getDeclName() << Ty << ABIName;
1154voidAArch64TargetCodeGenInfo::checkFunctionABI(
1156 constAArch64ABIInfo &
ABIInfo= getABIInfo<AArch64ABIInfo>();
1187 boolCallerIsStreaming =
1189 boolCalleeIsStreaming =
1196 if(!CalleeIsStreamingCompatible &&
1197(CallerIsStreaming != CalleeIsStreaming || CallerIsStreamingCompatible)) {
1198 if(CalleeIsStreaming)
1203 if(
auto*NewAttr = Callee->getAttr<ArmNewAttr>()) {
1204 if(NewAttr->isNewZA())
1206 if(NewAttr->isNewZT0())
1210 returnInlinability;
1213voidAArch64TargetCodeGenInfo::checkFunctionCallABIStreaming(
1216 if(!Caller || !Callee || !
Callee->hasAttr<AlwaysInlineAttr>())
1227? diag::err_function_always_inline_attribute_mismatch
1228: diag::warn_function_always_inline_attribute_mismatch)
1233CGM.
getDiags().
Report(CallLoc, diag::err_function_always_inline_new_za)
1234<<
Callee->getDeclName();
1238CGM.
getDiags().
Report(CallLoc, diag::err_function_always_inline_new_zt0)
1239<<
Callee->getDeclName();
1245voidAArch64TargetCodeGenInfo::checkFunctionCallABISoftFloat(
1249 constAArch64ABIInfo &
ABIInfo= getABIInfo<AArch64ABIInfo>();
1256Callee ? Callee : Caller, CallLoc);
1258 for(
const CallArg&Arg : Args)
1260Callee ? Callee : Caller, CallLoc);
1263voidAArch64TargetCodeGenInfo::checkFunctionCallABI(
CodeGenModule&CGM,
1269checkFunctionCallABIStreaming(CGM, CallLoc, Caller, Callee);
1270checkFunctionCallABISoftFloat(CGM, CallLoc, Caller, Callee, Args, ReturnType);
1273boolAArch64TargetCodeGenInfo::wouldInliningViolateFunctionCallABI(
1275 returnCaller &&
Callee&&
1279voidAArch64ABIInfo::appendAttributeMangling(TargetClonesAttr *
Attr,
1281raw_ostream &Out)
const{
1282appendAttributeMangling(
Attr->getFeatureStr(Index), Out);
1285voidAArch64ABIInfo::appendAttributeMangling(StringRef AttrStr,
1286raw_ostream &Out)
const{
1287 if(AttrStr ==
"default") {
1294AttrStr.split(Features,
"+");
1295 for(
auto&Feat : Features)
1298llvm::sort(Features, [](
constStringRef LHS,
constStringRef RHS) {
1299 returnLHS.compare(RHS) < 0;
1302llvm::SmallDenseSet<StringRef, 8> UniqueFeats;
1303 for(
auto&Feat : Features)
1304 if(
autoExt = llvm::AArch64::parseFMVExtension(Feat))
1305 if(UniqueFeats.insert(Ext->Name).second)
1306Out <<
'M'<< Ext->Name;
1309std::unique_ptr<TargetCodeGenInfo>
1312 returnstd::make_unique<AArch64TargetCodeGenInfo>(CGM.
getTypes(), Kind);
1315std::unique_ptr<TargetCodeGenInfo>
1318 returnstd::make_unique<WindowsAArch64TargetCodeGenInfo>(CGM.
getTypes(), K);
static bool isStreamingCompatible(const FunctionDecl *F)
@ ErrorCalleeRequiresNewZA
@ WarnIncompatibleStreamingModes
@ ErrorCalleeRequiresNewZT0
@ IncompatibleStreamingModes
@ LLVM_MARK_AS_BITMASK_ENUM
@ ErrorIncompatibleStreamingModes
static ArmSMEInlinability GetArmSMEInlinability(const FunctionDecl *Caller, const FunctionDecl *Callee)
Determines if there are any Arm SME ABI issues with inlining Callee into Caller.
static void diagnoseIfNeedsFPReg(DiagnosticsEngine &Diags, const StringRef ABIName, const AArch64ABIInfo &ABIInfo, const QualType &Ty, const NamedDecl *D, SourceLocation loc)
TypeInfoChars getTypeInfoInChars(const Type *T) const
const TargetInfo & getTargetInfo() const
Attr - This represents one attribute.
A fixed int type of a specified bitwidth.
This class is used for builtin types like 'int'.
Represents a C++ struct/union/class.
CharUnits - This is an opaque type for sizes expressed in character units.
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
static CharUnits fromQuantity(QuantityType Quantity)
fromQuantity - Construct a CharUnits quantity from a raw integer type.
CharUnits alignTo(const CharUnits &Align) const
alignTo - Returns the next integer (mod 2**64) that is greater than or equal to this quantity and is ...
ABIArgInfo - Helper class to encapsulate information about how a specific C type should be passed to ...
static ABIArgInfo getIgnore()
static bool isPaddingForCoerceAndExpand(llvm::Type *eltType)
static ABIArgInfo getDirect(llvm::Type *T=nullptr, unsigned Offset=0, llvm::Type *Padding=nullptr, bool CanBeFlattened=true, unsigned Align=0)
static ABIArgInfo getExtend(QualType Ty, llvm::Type *T=nullptr)
static ABIArgInfo getCoerceAndExpand(llvm::StructType *coerceToType, llvm::Type *unpaddedCoerceToType)
llvm::Type * getCoerceToType() const
ABIInfo - Target specific hooks for defining how a type should be passed or returned from functions.
virtual bool allowBFloatArgsAndRet() const
bool isHomogeneousAggregate(QualType Ty, const Type *&Base, uint64_t &Members) const
isHomogeneousAggregate - Return true if a type is an ELFv2 homogeneous aggregate.
CodeGen::CGCXXABI & getCXXABI() const
ASTContext & getContext() const
virtual bool isHomogeneousAggregateBaseType(QualType Ty) const
virtual void appendAttributeMangling(TargetAttr *Attr, raw_ostream &Out) const
virtual RValue EmitMSVAArg(CodeGen::CodeGenFunction &CGF, CodeGen::Address VAListAddr, QualType Ty, AggValueSlot Slot) const
Emit the target dependent code to load a value of.
virtual bool isHomogeneousAggregateSmallEnough(const Type *Base, uint64_t Members) const
const TargetInfo & getTarget() const
virtual RValue EmitVAArg(CodeGen::CodeGenFunction &CGF, CodeGen::Address VAListAddr, QualType Ty, AggValueSlot Slot) const =0
EmitVAArg - Emit the target dependent code to load a value of.
virtual bool isZeroLengthBitfieldPermittedInHomogeneousAggregate() const
virtual void computeInfo(CodeGen::CGFunctionInfo &FI) const =0
Like RawAddress, an abstract representation of an aligned address, but the pointer contained in this ...
Address withElementType(llvm::Type *ElemTy) const
Return address with different element type, but same pointer and alignment.
llvm::StoreInst * CreateStore(llvm::Value *Val, Address Addr, bool IsVolatile=false)
Address CreateConstInBoundsByteGEP(Address Addr, CharUnits Offset, const llvm::Twine &Name="")
Given a pointer to i8, adjust it by a given constant offset.
Address CreateConstArrayGEP(Address Addr, uint64_t Index, const llvm::Twine &Name="")
Given addr = [n x T]* ... produce name = getelementptr inbounds addr, i64 0, i64 index where i64 is a...
Address CreateStructGEP(Address Addr, unsigned Index, const llvm::Twine &Name="")
llvm::LoadInst * CreateLoad(Address Addr, const llvm::Twine &Name="")
llvm::ConstantInt * getSize(CharUnits N)
Address CreateInBoundsGEP(Address Addr, ArrayRef< llvm::Value * > IdxList, llvm::Type *ElementType, CharUnits Align, const Twine &Name="")
RecordArgABI
Specify how one should pass an argument of a record type.
@ RAA_Default
Pass it using the normal C aggregate rules for the ABI, potentially introducing extra copies and pass...
@ RAA_DirectInMemory
Pass it on the stack using its defined layout.
CGFunctionInfo - Class to encapsulate the information about a function definition.
ABIArgInfo & getReturnInfo()
unsigned getCallingConvention() const
getCallingConvention - Return the user specified calling convention, which has been translated into a...
CanQualType getReturnType() const
MutableArrayRef< ArgInfo > arguments()
RequiredArgs getRequiredArgs() const
CallArgList - Type for representing both the value and type of arguments in a call.
CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...
llvm::BasicBlock * createBasicBlock(const Twine &name="", llvm::Function *parent=nullptr, llvm::BasicBlock *before=nullptr)
createBasicBlock - Create an LLVM basic block.
void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false)
EmitBlock - Emit the given block.
llvm::AllocaInst * CreateTempAlloca(llvm::Type *Ty, const Twine &Name="tmp", llvm::Value *ArraySize=nullptr)
CreateTempAlloca - This creates an alloca and inserts it into the entry block if ArraySize is nullptr...
llvm::Type * ConvertTypeForMem(QualType T)
const TargetInfo & getTarget() const
void EmitBranch(llvm::BasicBlock *Block)
EmitBranch - Emit a branch to the specified basic block from the current insert block,...
ASTContext & getContext() const
llvm::Type * ConvertType(QualType T)
LValue MakeAddrLValue(Address Addr, QualType T, AlignmentSource Source=AlignmentSource::Type)
const CGFunctionInfo * CurFnInfo
RValue EmitLoadOfAnyValue(LValue V, AggValueSlot Slot=AggValueSlot::ignored(), SourceLocation Loc={})
Like EmitLoadOfLValue but also handles complex and aggregate types.
This class organizes the cross-function state that is used while generating LLVM code.
DiagnosticsEngine & getDiags() const
const LangOptions & getLangOpts() const
CodeGenTypes & getTypes()
const TargetInfo & getTarget() const
const llvm::DataLayout & getDataLayout() const
This class organizes the cross-module state that is used while lowering AST types to LLVM types.
RValue - This trivial value class is used to represent the result of an expression that is evaluated.
unsigned getNumRequiredArgs() const
Target specific hooks for defining how a type should be passed or returned from functions with one of...
virtual bool isLegalVectorType(CharUnits VectorSize, llvm::Type *EltTy, unsigned NumElts) const
Returns true if the given vector type is legal from Swift's calling convention perspective.
TargetCodeGenInfo - This class organizes various target-specific codegeneration issues,...
virtual bool doesReturnSlotInterfereWithArgs() const
doesReturnSlotInterfereWithArgs - Return true if the target uses an argument slot for an 'sret' type.
virtual bool wouldInliningViolateFunctionCallABI(const FunctionDecl *Caller, const FunctionDecl *Callee) const
Returns true if inlining the function call would produce incorrect code for the current target and sh...
virtual StringRef getARCRetainAutoreleasedReturnValueMarker() const
Retrieve the address of a function to call immediately before calling objc_retainAutoreleasedReturnVa...
static void setBranchProtectionFnAttributes(const TargetInfo::BranchProtectionInfo &BPI, llvm::Function &F)
virtual void checkFunctionCallABI(CodeGenModule &CGM, SourceLocation CallLoc, const FunctionDecl *Caller, const FunctionDecl *Callee, const CallArgList &Args, QualType ReturnType) const
Any further codegen related checks that need to be done on a function call in a target specific manne...
virtual void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M) const
setTargetAttributes - Provides a convenient hook to handle extra target-specific attributes for the g...
virtual void checkFunctionABI(CodeGenModule &CGM, const FunctionDecl *Decl) const
Any further codegen related checks that need to be done on a function signature in a target specific ...
virtual bool isScalarizableAsmOperand(CodeGen::CodeGenFunction &CGF, llvm::Type *Ty) const
Target hook to decide whether an inline asm operand can be passed by value.
virtual int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const
Determines the DWARF register number for the stack pointer, for exception-handling purposes.
Represents the canonical version of C arrays with a specified constant size.
Decl - This represents one declaration (or definition), e.g.
SourceLocation getLocation() const
Concrete class used by the front-end to report problems and issues.
DiagnosticBuilder Report(SourceLocation Loc, unsigned DiagID)
Issue the message to the client.
A helper class that allows the use of isa/cast/dyncast to detect TagType objects of enums.
Represents a function declaration or definition.
QualType getReturnType() const
ArrayRef< ParmVarDecl * > parameters() const
Represents a prototype with parameter type info, e.g.
unsigned getAArch64SMEAttributes() const
Return a bitmask describing the SME attributes on the function type, see AArch64SMETypeAttributes for...
@ SME_PStateSMCompatibleMask
This represents a decl that may have a name.
DeclarationName getDeclName() const
Get the actual, stored name of the declaration, which may be a special name.
Represents a parameter to a function.
A (possibly-)qualified type.
Represents a struct/union/class.
field_range fields() const
A helper class that allows the use of isa/cast/dyncast to detect TagType objects of structs/unions/cl...
Encodes a location in the source.
Exposes information about the current target.
virtual StringRef getABI() const
Get the ABI currently in use.
virtual ParsedTargetAttr parseTargetAttr(StringRef Str) const
virtual bool hasBFloat16Type() const
Determine whether the _BFloat16 type is supported on this target.
virtual bool hasFeature(StringRef Feature) const
Determine whether the given target has the given feature.
virtual bool validateBranchProtection(StringRef Spec, StringRef Arch, BranchProtectionInfo &BPI, StringRef &Err) const
Determine if this TargetInfo supports the given branch protection specification.
The base class of the type hierarchy.
bool isMFloat8Type() const
bool isSVESizelessBuiltinType() const
Returns true for SVE scalable vector types.
const T * castAs() const
Member-template castAs<specific type>.
bool isBuiltinType() const
Helper methods to distinguish type categories.
bool isVectorType() const
bool isFloatingType() const
const T * getAs() const
Member-template getAs<specific type>'.
Represents a GCC generic vector type.
unsigned getNumElements() const
VectorKind getVectorKind() const
QualType getElementType() const
ABIArgInfo classifyArgumentType(CodeGenModule &CGM, CanQualType type)
Classify the rules for how to pass a particular type.
CGCXXABI::RecordArgABI getRecordArgABI(const RecordType *RT, CGCXXABI &CXXABI)
bool classifyReturnType(const CGCXXABI &CXXABI, CGFunctionInfo &FI, const ABIInfo &Info)
Address EmitVAArgInstr(CodeGenFunction &CGF, Address VAListAddr, QualType Ty, const ABIArgInfo &AI)
RValue emitVoidPtrVAArg(CodeGenFunction &CGF, Address VAListAddr, QualType ValueTy, bool IsIndirect, TypeInfoChars ValueInfo, CharUnits SlotSizeAndAlign, bool AllowHigherAlign, AggValueSlot Slot, bool ForceRightAdjust=false)
Emit va_arg for a platform using the common void* representation, where arguments are simply emitted ...
Address emitMergePHI(CodeGenFunction &CGF, Address Addr1, llvm::BasicBlock *Block1, Address Addr2, llvm::BasicBlock *Block2, const llvm::Twine &Name="")
bool isEmptyField(ASTContext &Context, const FieldDecl *FD, bool AllowArrays, bool AsIfNoUniqueAddr=false)
isEmptyField - Return true iff a the field is "empty", that is it is an unnamed bit-field or an (arra...
llvm::Value * emitRoundPointerUpToAlignment(CodeGenFunction &CGF, llvm::Value *Ptr, CharUnits Align)
bool isAggregateTypeForABI(QualType T)
std::unique_ptr< TargetCodeGenInfo > createAArch64TargetCodeGenInfo(CodeGenModule &CGM, AArch64ABIKind Kind)
QualType useFirstFieldIfTransparentUnion(QualType Ty)
Pass transparent unions as if they were the type of the first element.
std::unique_ptr< TargetCodeGenInfo > createWindowsAArch64TargetCodeGenInfo(CodeGenModule &CGM, AArch64ABIKind K)
bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays, bool AsIfNoUniqueAddr=false)
isEmptyRecord - Return true iff a structure contains only empty fields.
The JSON file list parser is used to communicate input to InstallAPI.
const FunctionProtoType * T
bool IsArmStreamingFunction(const FunctionDecl *FD, bool IncludeLocallyStreaming)
Returns whether the given FunctionDecl has an __arm[_locally]_streaming attribute.
llvm::IntegerType * Int8Ty
i8, i16, i32, and i64
llvm::IntegerType * Int32Ty
Contains information gathered from parsing the contents of TargetAttr.
RetroSearch is an open source project built by @garambo | Open a GitHub Issue
Search and Browse the WWW like it's 1997 | Search results from DuckDuckGo
HTML:
3.2
| Encoding:
UTF-8
| Version:
0.7.4