;
20boolIsX86_MMXType(llvm::Type *IRType) {
22 returnIRType->isVectorTy() && IRType->getPrimitiveSizeInBits() == 64 &&
23cast<llvm::VectorType>(IRType)->getElementType()->isIntegerTy() &&
24IRType->getScalarSizeInBits() != 64;
30 if(Constraint ==
"k") {
31llvm::Type *Int1Ty = llvm::Type::getInt1Ty(CGF.
getLLVMContext());
32 returnllvm::FixedVectorType::get(Int1Ty, Ty->getScalarSizeInBits());
43 if(BT->isFloatingPoint() && BT->getKind() != BuiltinType::Half) {
44 if(BT->getKind() == BuiltinType::LongDouble) {
46&llvm::APFloat::x87DoubleExtended())
55 if(VecSize == 128 || VecSize == 256 || VecSize == 512)
63static boolisX86VectorCallAggregateSmallEnough(uint64_t NumMembers) {
64 returnNumMembers <= 4;
68static ABIArgInfogetDirectX86Hva(llvm::Type*
T=
nullptr) {
71AI.setCanBeFlattened(
false);
82: IsPreassigned(FI.arg_size()), CC(FI.getCallingConvention()),
85llvm::SmallBitVector IsPreassigned;
86 unsignedCC = CallingConv::CC_C;
87 unsignedFreeRegs = 0;
88 unsignedFreeSSERegs = 0;
94classX86_32ABIInfo :
public ABIInfo{
100 static const unsignedMinABIStackAlignInBytes = 4;
102 boolIsDarwinVectorABI;
103 boolIsRetSmallStructInRegABI;
104 boolIsWin32StructABI;
108 unsignedDefaultNumRegisterParameters;
110 static boolisRegisterSize(
unsignedSize) {
111 return(Size == 8 || Size == 16 || Size == 32 || Size == 64);
116 returnisX86VectorTypeForVectorCall(
getContext(), Ty);
120uint64_t NumMembers)
const override{
122 returnisX86VectorCallAggregateSmallEnough(NumMembers);
134 unsignedgetTypeStackAlignInBytes(
QualTypeTy,
unsignedAlign)
const;
139 unsignedArgIndex)
const;
143 boolupdateFreeRegs(
QualTypeTy, CCState &State)
const;
145 boolshouldAggregateUseDirect(
QualTypeTy, CCState &State,
bool&InReg,
146 bool&NeedsPadding)
const;
147 boolshouldPrimitiveUseInReg(
QualTypeTy, CCState &State)
const;
149 boolcanExpandIndirectArgument(
QualTypeTy)
const;
158 voidrunVectorCallFirstPass(
CGFunctionInfo&FI, CCState &State)
const;
167 boolRetSmallStructInRegABI,
boolWin32StructABI,
168 unsignedNumRegisterParameters,
boolSoftFloatABI)
169:
ABIInfo(CGT), IsDarwinVectorABI(DarwinVectorABI),
170IsRetSmallStructInRegABI(RetSmallStructInRegABI),
171IsWin32StructABI(Win32StructABI), IsSoftFloatABI(SoftFloatABI),
172IsMCUABI(CGT.getTarget().getTriple().isOSIAMCU()),
173IsLinuxABI(CGT.getTarget().getTriple().isOSLinux() ||
174CGT.getTarget().getTriple().isOSCygMing()),
175DefaultNumRegisterParameters(NumRegisterParameters) {}
184 boolAsReturnValue)
const override{
196 boolRetSmallStructInRegABI,
boolWin32StructABI,
197 unsignedNumRegisterParameters,
boolSoftFloatABI)
199CGT, DarwinVectorABI, RetSmallStructInRegABI, Win32StructABI,
200NumRegisterParameters, SoftFloatABI)) {
201SwiftInfo = std::make_unique<X86_32SwiftABIInfo>(CGT);
204 static boolisStructReturnInRegABI(
217llvm::Value *
Address)
const override;
220StringRef Constraint,
221llvm::Type* Ty)
const override{
222 returnX86AdjustInlineAsmType(CGF, Constraint, Ty);
226std::string &Constraints,
227std::vector<llvm::Type *> &ResultRegTypes,
228std::vector<llvm::Type *> &ResultTruncRegTypes,
229std::vector<LValue> &ResultRegDests,
230std::string &AsmString,
231 unsignedNumOutputs)
const override;
234 return "movl\t%ebp, %ebp" 235 "\t\t// marker for objc_retainAutoreleaseReturnValue";
251std::string &AsmString) {
253llvm::raw_string_ostream OS(Buf);
255 while(Pos < AsmString.size()) {
256 size_tDollarStart = AsmString.find(
'$', Pos);
257 if(DollarStart == std::string::npos)
258DollarStart = AsmString.size();
259 size_tDollarEnd = AsmString.find_first_not_of(
'$', DollarStart);
260 if(DollarEnd == std::string::npos)
261DollarEnd = AsmString.size();
262OS << StringRef(&AsmString[Pos], DollarEnd - Pos);
264 size_tNumDollars = DollarEnd - DollarStart;
265 if(NumDollars % 2 != 0 && Pos < AsmString.size()) {
267 size_tDigitStart = Pos;
268 if(AsmString[DigitStart] ==
'{') {
272 size_tDigitEnd = AsmString.find_first_not_of(
"0123456789", DigitStart);
273 if(DigitEnd == std::string::npos)
274DigitEnd = AsmString.size();
275StringRef OperandStr(&AsmString[DigitStart], DigitEnd - DigitStart);
276 unsignedOperandIndex;
277 if(!OperandStr.getAsInteger(10, OperandIndex)) {
278 if(OperandIndex >= FirstIn)
279OperandIndex += NumNewOuts;
287AsmString = std::move(Buf);
291voidX86_32TargetCodeGenInfo::addReturnRegisterOutputs(
293std::vector<llvm::Type *> &ResultRegTypes,
294std::vector<llvm::Type *> &ResultTruncRegTypes,
295std::vector<LValue> &ResultRegDests, std::string &AsmString,
296 unsignedNumOutputs)
const{
301 if(!Constraints.empty())
303 if(RetWidth <= 32) {
304Constraints +=
"={eax}";
305ResultRegTypes.push_back(CGF.
Int32Ty);
308Constraints +=
"=A";
309ResultRegTypes.push_back(CGF.
Int64Ty);
313llvm::Type *CoerceTy = llvm::IntegerType::get(CGF.
getLLVMContext(), RetWidth);
314ResultTruncRegTypes.push_back(CoerceTy);
318ResultRegDests.push_back(ReturnSlot);
325boolX86_32ABIInfo::shouldReturnTypeInRegister(
QualTypeTy,
331 if((IsMCUABI && Size > 64) || (!IsMCUABI && !isRegisterSize(Size)))
337 if(Size == 64 || Size == 128)
352 returnshouldReturnTypeInRegister(AT->getElementType(), Context);
356 if(!RT)
return false;
368 if(!shouldReturnTypeInRegister(FD->getType(), Context))
377Ty = CTy->getElementType();
387 returnSize == 32 || Size == 64;
392 for(
const auto*FD : RD->
fields()) {
402 if(FD->isBitField())
427boolX86_32ABIInfo::canExpandIndirectArgument(
QualTypeTy)
const{
434 if(
const CXXRecordDecl*CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
435 if(!IsWin32StructABI) {
438 if(!CXXRD->isCLike())
442 if(CXXRD->isDynamicClass())
453 return Size== getContext().getTypeSize(Ty);
456ABIArgInfoX86_32ABIInfo::getIndirectReturnResult(
QualTypeRetTy, CCState &State)
const{
459 if(State.CC != llvm::CallingConv::X86_FastCall &&
460State.CC != llvm::CallingConv::X86_VectorCall && State.FreeRegs) {
463 returngetNaturalAlignIndirectInReg(RetTy);
465 returngetNaturalAlignIndirect(RetTy,
false);
469CCState &State)
const{
475 if((State.CC == llvm::CallingConv::X86_VectorCall ||
476State.CC == llvm::CallingConv::X86_RegCall) &&
477isHomogeneousAggregate(RetTy,
Base, NumElts)) {
484 if(IsDarwinVectorABI) {
492llvm::Type::getInt64Ty(getVMContext()), 2));
496 if((Size == 8 || Size == 16 || Size == 32) ||
497(Size == 64 && VT->getNumElements() == 1))
501 returngetIndirectReturnResult(RetTy, State);
511 returngetIndirectReturnResult(RetTy, State);
516 returngetIndirectReturnResult(RetTy, State);
527llvm::Type::getHalfTy(getVMContext()), 2));
532 if(shouldReturnTypeInRegister(RetTy, getContext())) {
541 if((!IsWin32StructABI && SeltTy->isRealFloatingType())
542|| SeltTy->hasPointerRepresentation())
550 returngetIndirectReturnResult(RetTy, State);
555RetTy = EnumTy->getDecl()->getIntegerType();
558 if(EIT->getNumBits() > 64)
559 returngetIndirectReturnResult(RetTy, State);
565unsignedX86_32ABIInfo::getTypeStackAlignInBytes(
QualTypeTy,
566 unsignedAlign)
const{
569 if(Align <= MinABIStackAlignInBytes)
577 if(Ty->
isVectorType() && (Align == 16 || Align == 32 || Align == 64))
581 if(!IsDarwinVectorABI) {
583 returnMinABIStackAlignInBytes;
591 returnMinABIStackAlignInBytes;
595CCState &State)
const{
597 if(State.FreeRegs) {
600 returngetNaturalAlignIndirectInReg(Ty);
602 returngetNaturalAlignIndirect(Ty,
false);
606 unsignedTypeAlign = getContext().getTypeAlign(Ty) / 8;
607 unsignedStackAlign = getTypeStackAlignInBytes(Ty, TypeAlign);
613 boolRealign = TypeAlign > StackAlign;
618X86_32ABIInfo::Class X86_32ABIInfo::classify(
QualTypeTy)
const{
625 if(K == BuiltinType::Float || K == BuiltinType::Double)
631boolX86_32ABIInfo::updateFreeRegs(
QualTypeTy, CCState &State)
const{
632 if(!IsSoftFloatABI) {
638 unsigned Size= getContext().getTypeSize(Ty);
639 unsignedSizeInRegs = (
Size+ 31) / 32;
645 if(SizeInRegs > State.FreeRegs) {
654 if(SizeInRegs > State.FreeRegs || SizeInRegs > 2)
658State.FreeRegs -= SizeInRegs;
662boolX86_32ABIInfo::shouldAggregateUseDirect(
QualTypeTy, CCState &State,
664 bool&NeedsPadding)
const{
671NeedsPadding =
false;
674 if(!updateFreeRegs(Ty, State))
680 if(State.CC == llvm::CallingConv::X86_FastCall ||
681State.CC == llvm::CallingConv::X86_VectorCall ||
682State.CC == llvm::CallingConv::X86_RegCall) {
683 if(getContext().getTypeSize(Ty) <= 32 && State.FreeRegs)
684NeedsPadding =
true;
692boolX86_32ABIInfo::shouldPrimitiveUseInReg(
QualTypeTy, CCState &State)
const{
693 boolIsPtrOrInt = (getContext().getTypeSize(Ty) <= 32) &&
697 if(!IsPtrOrInt && (State.CC == llvm::CallingConv::X86_FastCall ||
698State.CC == llvm::CallingConv::X86_VectorCall))
701 if(!updateFreeRegs(Ty, State))
704 if(!IsPtrOrInt && State.CC == llvm::CallingConv::X86_RegCall)
711voidX86_32ABIInfo::runVectorCallFirstPass(
CGFunctionInfo&FI, CCState &State)
const{
722 for(
intI = 0,
E= Args.size(); I <
E; ++I) {
727isHomogeneousAggregate(Ty,
Base, NumElts)) {
728 if(State.FreeSSERegs >= NumElts) {
729State.FreeSSERegs -= NumElts;
731State.IsPreassigned.set(I);
738 unsignedArgIndex)
const{
740 boolIsFastCall = State.CC == llvm::CallingConv::X86_FastCall;
741 boolIsRegCall = State.CC == llvm::CallingConv::X86_RegCall;
742 boolIsVectorCall = State.CC == llvm::CallingConv::X86_VectorCall;
745 TypeInfoTI = getContext().getTypeInfo(Ty);
752 returngetIndirectResult(Ty,
false, State);
753}
else if(State.IsDelegateCall) {
756 ABIArgInfoRes = getIndirectResult(Ty,
false, State);
769 if((IsRegCall || IsVectorCall) &&
770isHomogeneousAggregate(Ty,
Base, NumElts)) {
771 if(State.FreeSSERegs >= NumElts) {
772State.FreeSSERegs -= NumElts;
777 returngetDirectX86Hva();
785 returngetIndirectResult(Ty,
false, State);
792 returngetIndirectResult(Ty,
true, State);
795 if(!IsWin32StructABI &&
isEmptyRecord(getContext(), Ty,
true))
802llvm::LLVMContext &LLVMContext = getVMContext();
803llvm::IntegerType *
Int32= llvm::Type::getInt32Ty(LLVMContext);
804 boolNeedsPadding =
false;
806 if(shouldAggregateUseDirect(Ty, State, InReg, NeedsPadding)) {
807 unsignedSizeInRegs = (TI.
Width+ 31) / 32;
809llvm::Type *Result = llvm::StructType::get(LLVMContext, Elements);
815llvm::IntegerType *PaddingType = NeedsPadding ?
Int32:
nullptr;
822 if(IsWin32StructABI && State.Required.isRequiredArg(ArgIndex)) {
823 unsignedAlignInBits = 0;
826getContext().getASTRecordLayout(RT->
getDecl());
829AlignInBits = TI.
Align;
831 if(AlignInBits > 32)
832 returngetIndirectResult(Ty,
false, State);
841 if(TI.
Width<= 4 * 32 && (!IsMCUABI || State.FreeRegs == 0) &&
842canExpandIndirectArgument(Ty))
844IsFastCall || IsVectorCall || IsRegCall, PaddingType);
846 returngetIndirectResult(Ty,
true, State);
853 if(IsWin32StructABI) {
854 if(TI.
Width<= 512 && State.FreeSSERegs > 0) {
858 returngetIndirectResult(Ty,
false, State);
863 if(IsDarwinVectorABI) {
865(TI.
Width== 64 && VT->getNumElements() == 1))
867llvm::IntegerType::get(getVMContext(), TI.
Width));
870 if(IsX86_MMXType(CGT.ConvertType(Ty)))
878Ty = EnumTy->getDecl()->getIntegerType();
880 boolInReg = shouldPrimitiveUseInReg(Ty, State);
882 if(isPromotableIntegerTypeForABI(Ty)) {
889 if(EIT->getNumBits() <= 64) {
894 returngetIndirectResult(Ty,
false, State);
906 else if(State.CC == llvm::CallingConv::X86_FastCall) {
908State.FreeSSERegs = 3;
909}
else if(State.CC == llvm::CallingConv::X86_VectorCall) {
911State.FreeSSERegs = 6;
914 else if(State.CC == llvm::CallingConv::X86_RegCall) {
916State.FreeSSERegs = 8;
917}
else if(IsWin32StructABI) {
920State.FreeRegs = DefaultNumRegisterParameters;
921State.FreeSSERegs = 3;
923State.FreeRegs = DefaultNumRegisterParameters;
930 if(State.FreeRegs) {
943 if(State.CC == llvm::CallingConv::X86_VectorCall)
944runVectorCallFirstPass(FI, State);
946 boolUsedInAlloca =
false;
948 for(
unsignedI = 0,
E= Args.size(); I <
E; ++I) {
950 if(State.IsPreassigned.test(I))
961rewriteWithInAlloca(FI);
970assert(StackOffset.
isMultipleOf(WordSize) &&
"unaligned inalloca struct");
975 boolIsIndirect =
false;
979llvm::Type *LLTy = CGT.ConvertTypeForMem(
Type);
981LLTy = llvm::PointerType::getUnqual(getVMContext());
982FrameFields.push_back(LLTy);
983StackOffset += IsIndirect ? WordSize : getContext().getTypeSizeInChars(
Type);
987StackOffset = FieldEnd.
alignTo(WordSize);
988 if(StackOffset != FieldEnd) {
989 CharUnitsNumBytes = StackOffset - FieldEnd;
990llvm::Type *Ty = llvm::Type::getInt8Ty(getVMContext());
991Ty = llvm::ArrayType::get(Ty, NumBytes.
getQuantity());
992FrameFields.push_back(Ty);
1014llvm_unreachable(
"invalid enum");
1017voidX86_32ABIInfo::rewriteWithInAlloca(
CGFunctionInfo&FI)
const{
1018assert(IsWin32StructABI &&
"inalloca only supported on win32");
1033 if(
Ret.isIndirect() &&
Ret.isSRetAfterThis() && !IsThisCall &&
1035addFieldToArgStruct(FrameFields, StackOffset, I->
info, I->
type);
1040 if(
Ret.isIndirect() && !
Ret.getInReg()) {
1041addFieldToArgStruct(FrameFields, StackOffset, Ret, FI.
getReturnType());
1043 Ret.setInAllocaSRet(IsWin32StructABI);
1051 for(; I !=
E; ++I) {
1053addFieldToArgStruct(FrameFields, StackOffset, I->
info, I->
type);
1056FI.
setArgStruct(llvm::StructType::get(getVMContext(), FrameFields,
1064 auto TypeInfo= getContext().getTypeInfoInChars(Ty);
1077getTypeStackAlignInBytes(Ty,
TypeInfo.
Align.getQuantity()));
1084boolX86_32TargetCodeGenInfo::isStructReturnInRegABI(
1086assert(Triple.getArch() == llvm::Triple::x86);
1088 switch(Opts.getStructReturnConvention()) {
1097 if(Triple.isOSDarwin() || Triple.isOSIAMCU())
1100 switch(Triple.getOS()) {
1101 casellvm::Triple::DragonFly:
1102 casellvm::Triple::FreeBSD:
1103 casellvm::Triple::OpenBSD:
1104 casellvm::Triple::Win32:
1113 if(!FD->
hasAttr<AnyX86InterruptAttr>())
1116llvm::Function *Fn = cast<llvm::Function>(GV);
1117Fn->setCallingConv(llvm::CallingConv::X86_INTR);
1123llvm::Attribute NewAttr = llvm::Attribute::getWithByValType(
1124Fn->getContext(), ByValTy);
1125Fn->addParamAttr(0, NewAttr);
1128voidX86_32TargetCodeGenInfo::setTargetAttributes(
1130 if(GV->isDeclaration())
1132 if(
const FunctionDecl*FD = dyn_cast_or_null<FunctionDecl>(
D)) {
1133 if(FD->hasAttr<X86ForceAlignArgPointerAttr>()) {
1134llvm::Function *
Fn= cast<llvm::Function>(GV);
1135 Fn->addFnAttr(
"stackrealign");
1142boolX86_32TargetCodeGenInfo::initDwarfEHRegSizeTable(
1144llvm::Value *
Address)
const{
1147llvm::Value *Four8 = llvm::ConstantInt::get(CGF.
Int8Ty, 4);
1158llvm::Value *Sixteen8 = llvm::ConstantInt::get(CGF.
Int8Ty, 16);
1164Builder.CreateAlignedStore(
1165Four8, Builder.CreateConstInBoundsGEP1_32(CGF.
Int8Ty,
Address, 9),
1171llvm::Value *Twelve8 = llvm::ConstantInt::get(CGF.
Int8Ty, 12);
1186static unsignedgetNativeVectorSizeForAVXABI(
X86AVXABILevelAVXLevel) {
1188 caseX86AVXABILevel::AVX512:
1190 caseX86AVXABILevel::AVX:
1192 caseX86AVXABILevel::None:
1195llvm_unreachable(
"Unknown AVXLevel");
1199classX86_64ABIInfo :
public ABIInfo{
1220 static Classmerge(Class Accum, Class Field);
1236 voidpostMerge(
unsignedAggregateSize, Class &Lo, Class &Hi)
const;
1264 voidclassify(
QualType T, uint64_t OffsetBase, Class &Lo, Class &Hi,
1265 boolisNamedArg,
boolIsRegCall =
false)
const;
1267llvm::Type *GetByteVectorType(
QualTypeTy)
const;
1268llvm::Type *GetSSETypeAtOffset(llvm::Type *IRType,
1269 unsignedIROffset,
QualTypeSourceTy,
1270 unsignedSourceOffset)
const;
1271llvm::Type *GetINTEGERTypeAtOffset(llvm::Type *IRType,
1272 unsignedIROffset,
QualTypeSourceTy,
1273 unsignedSourceOffset)
const;
1289 unsigned&neededInt,
unsigned&neededSSE,
1291 boolIsRegCall =
false)
const;
1294 unsigned&NeededSSE,
1295 unsigned&MaxVectorWidth)
const;
1298 unsigned&NeededSSE,
1299 unsigned&MaxVectorWidth)
const;
1301 boolIsIllegalVectorType(
QualTypeTy)
const;
1308 boolhonorsRevision0_98()
const{
1314 boolclassifyIntegerMMXAsSSE()
const{
1316 if(
getContext().getLangOpts().getClangABICompat() <=
1317LangOptions::ClangABI::Ver3_8)
1321 if(Triple.isOSDarwin() || Triple.isPS() || Triple.isOSFreeBSD())
1327 boolpassInt128VectorsInMem()
const{
1329 if(
getContext().getLangOpts().getClangABICompat() <=
1330LangOptions::ClangABI::Ver9)
1334 return T.isOSLinux() ||
T.isOSNetBSD();
1340 boolHas64BitPointers;
1344:
ABIInfo(CGT), AVXLevel(AVXLevel),
1345Has64BitPointers(CGT.getDataLayout().getPointerSize(0) == 8) {}
1348 unsignedneededInt, neededSSE;
1354 if(llvm::VectorType *vectorTy = dyn_cast_or_null<llvm::VectorType>(ty))
1355 returnvectorTy->getPrimitiveSizeInBits().getFixedValue() > 128;
1367 boolhas64BitPointers()
const{
1368 returnHas64BitPointers;
1373classWinX86_64ABIInfo :
public ABIInfo{
1376:
ABIInfo(CGT), AVXLevel(AVXLevel),
1377IsMingw64(getTarget().getTriple().isWindowsGNUEnvironment()) {}
1386 returnisX86VectorTypeForVectorCall(
getContext(), Ty);
1390uint64_t NumMembers)
const override{
1392 returnisX86VectorCallAggregateSmallEnough(NumMembers);
1397 boolIsVectorCall,
boolIsRegCall)
const;
1411std::make_unique<SwiftABIInfo>(CGT,
true);
1423llvm::Value *
Address)
const override{
1424llvm::Value *Eight8 = llvm::ConstantInt::get(CGF.
Int8Ty, 8);
1433StringRef Constraint,
1434llvm::Type* Ty)
const override{
1435 returnX86AdjustInlineAsmType(CGF, Constraint, Ty);
1447 boolHasAVXType =
false;
1448 for(CallArgList::const_iterator
1449it = args.begin(), ie = args.end(); it != ie; ++it) {
1450 if(getABIInfo<X86_64ABIInfo>().isPassedUsingAVXType(it->Ty)) {
1465 if(GV->isDeclaration())
1467 if(
const FunctionDecl*FD = dyn_cast_or_null<FunctionDecl>(
D)) {
1468 if(FD->hasAttr<X86ForceAlignArgPointerAttr>()) {
1469llvm::Function *
Fn= cast<llvm::Function>(GV);
1470 Fn->addFnAttr(
"stackrealign");
1480 QualTypeReturnType)
const override;
1485llvm::StringMap<bool> &CallerMap,
1487llvm::StringMap<bool> &CalleeMap,
1489 if(CalleeMap.empty() && CallerMap.empty()) {
1500 constllvm::StringMap<bool> &CallerMap,
1501 constllvm::StringMap<bool> &CalleeMap,
1504 boolCallerHasFeat = CallerMap.lookup(Feature);
1505 boolCalleeHasFeat = CalleeMap.lookup(Feature);
1506 if(!CallerHasFeat && !CalleeHasFeat)
1507 return Diag.Report(CallLoc, diag::warn_avx_calling_convention)
1508<< IsArgument << Ty << Feature;
1511 if(!CallerHasFeat || !CalleeHasFeat)
1512 return Diag.Report(CallLoc, diag::err_avx_calling_convention)
1513<< IsArgument << Ty << Feature;
1522 constllvm::StringMap<bool> &CallerMap,
1523 constllvm::StringMap<bool> &CalleeMap,
1525 boolCaller256 = CallerMap.lookup(
"avx512f") && !CallerMap.lookup(
"evex512");
1526 boolCallee256 = CalleeMap.lookup(
"avx512f") && !CalleeMap.lookup(
"evex512");
1530 if(Caller256 || Callee256)
1531 return Diag.Report(CallLoc, diag::err_avx_calling_convention)
1532<< IsArgument << Ty <<
"evex512";
1535 "avx512f", IsArgument);
1540 constllvm::StringMap<bool> &CallerMap,
1541 constllvm::StringMap<bool> &CalleeMap,
QualTypeTy,
1555voidX86_64TargetCodeGenInfo::checkFunctionCallABI(
CodeGenModule&CGM,
1564llvm::StringMap<bool> CallerMap;
1565llvm::StringMap<bool> CalleeMap;
1566 unsignedArgIndex = 0;
1570 for(
const CallArg&Arg : Args) {
1578 if(Arg.getType()->isVectorType() &&
1584 if(ArgIndex < Callee->getNumParams())
1585Ty =
Callee->getParamDecl(ArgIndex)->getType();
1588CalleeMap, Ty,
true))
1596 if(
Callee->getReturnType()->isVectorType() &&
1600CalleeMap,
Callee->getReturnType(),
1609 boolQuote = Lib.contains(
' ');
1610std::string ArgStr = Quote ?
"\"":
"";
1612 if(!Lib.ends_with_insensitive(
".lib") && !Lib.ends_with_insensitive(
".a"))
1614ArgStr += Quote ?
"\"":
"";
1619classWinX86_32TargetCodeGenInfo :
publicX86_32TargetCodeGenInfo {
1622 boolDarwinVectorABI,
boolRetSmallStructInRegABI,
boolWin32StructABI,
1623 unsignedNumRegisterParameters)
1624: X86_32TargetCodeGenInfo(CGT, DarwinVectorABI, RetSmallStructInRegABI,
1625Win32StructABI, NumRegisterParameters,
false) {}
1627 voidsetTargetAttributes(
const Decl*
D, llvm::GlobalValue *GV,
1630 voidgetDependentLibraryOption(llvm::StringRef Lib,
1632Opt =
"/DEFAULTLIB:";
1633Opt += qualifyWindowsLibrary(Lib);
1636 voidgetDetectMismatchOption(llvm::StringRef Name,
1637llvm::StringRef
Value,
1639Opt =
"/FAILIFMISMATCH:\""+ Name.str() +
"="+
Value.str() +
"\"";
1644voidWinX86_32TargetCodeGenInfo::setTargetAttributes(
1646X86_32TargetCodeGenInfo::setTargetAttributes(
D, GV, CGM);
1647 if(GV->isDeclaration())
1649addStackProbeTargetAttributes(
D, GV, CGM);
1659std::make_unique<SwiftABIInfo>(CGT,
true);
1662 voidsetTargetAttributes(
const Decl*
D, llvm::GlobalValue *GV,
1670llvm::Value *
Address)
const override{
1671llvm::Value *Eight8 = llvm::ConstantInt::get(CGF.
Int8Ty, 8);
1679 voidgetDependentLibraryOption(llvm::StringRef Lib,
1681Opt =
"/DEFAULTLIB:";
1682Opt += qualifyWindowsLibrary(Lib);
1685 voidgetDetectMismatchOption(llvm::StringRef Name,
1686llvm::StringRef
Value,
1688Opt =
"/FAILIFMISMATCH:\""+ Name.str() +
"="+
Value.str() +
"\"";
1693voidWinX86_64TargetCodeGenInfo::setTargetAttributes(
1696 if(GV->isDeclaration())
1698 if(
const FunctionDecl*FD = dyn_cast_or_null<FunctionDecl>(
D)) {
1699 if(FD->hasAttr<X86ForceAlignArgPointerAttr>()) {
1700llvm::Function *
Fn= cast<llvm::Function>(GV);
1701 Fn->addFnAttr(
"stackrealign");
1707addStackProbeTargetAttributes(
D, GV, CGM);
1710voidX86_64ABIInfo::postMerge(
unsignedAggregateSize,
Class&Lo,
1735 if(Hi == X87Up && Lo != X87 && honorsRevision0_98())
1737 if(AggregateSize > 128 && (Lo != SSE || Hi != SSEUp))
1739 if(Hi == SSEUp && Lo != SSE)
1743X86_64ABIInfo::Class X86_64ABIInfo::merge(
ClassAccum,
ClassField) {
1767assert((Accum != Memory && Accum != ComplexX87) &&
1768 "Invalid accumulated classification during merge.");
1769 if(Accum == Field || Field == NoClass)
1771 if(Field == Memory)
1773 if(Accum == NoClass)
1777 if(Field == X87 || Field == X87Up || Field == ComplexX87 ||
1778Accum == X87 || Accum == X87Up)
1783voidX86_64ABIInfo::classify(
QualTypeTy, uint64_t OffsetBase,
Class&Lo,
1784 Class&Hi,
boolisNamedArg,
boolIsRegCall)
const{
1795 Class&Current = OffsetBase < 64 ? Lo : Hi;
1801 if(k == BuiltinType::Void) {
1803}
else if(k == BuiltinType::Int128 || k == BuiltinType::UInt128) {
1806}
else if(k >= BuiltinType::Bool && k <= BuiltinType::LongLong) {
1808}
else if(k == BuiltinType::Float || k == BuiltinType::Double ||
1809k == BuiltinType::Float16 || k == BuiltinType::BFloat16) {
1811}
else if(k == BuiltinType::Float128) {
1814}
else if(k == BuiltinType::LongDouble) {
1815 constllvm::fltSemantics *LDF = &getTarget().getLongDoubleFormat();
1816 if(LDF == &llvm::APFloat::IEEEquad()) {
1819}
else if(LDF == &llvm::APFloat::x87DoubleExtended()) {
1822}
else if(LDF == &llvm::APFloat::IEEEdouble()) {
1825llvm_unreachable(
"unexpected long double representation!");
1834classify(ET->getDecl()->getIntegerType(), OffsetBase, Lo, Hi, isNamedArg);
1845 if(Has64BitPointers) {
1852 uint64_tEB_FuncPtr = (OffsetBase) / 64;
1853 uint64_tEB_ThisAdj = (OffsetBase + 64 - 1) / 64;
1854 if(EB_FuncPtr != EB_ThisAdj) {
1868 if(Size == 1 || Size == 8 || Size == 16 || Size == 32) {
1877 uint64_tEB_Lo = (OffsetBase) / 64;
1881}
else if(Size == 64) {
1882 QualTypeElementType = VT->getElementType();
1891 if(!classifyIntegerMMXAsSSE() &&
1902 if(OffsetBase && OffsetBase != 64)
1904}
else if(Size == 128 ||
1905(isNamedArg && Size <= getNativeVectorSizeForAVXABI(AVXLevel))) {
1906 QualTypeElementType = VT->getElementType();
1909 if(passInt128VectorsInMem() &&
Size!= 128 &&
1941 else if(Size <= 128)
1943}
else if(ET->
isFloat16Type() || ET == getContext().FloatTy ||
1946}
else if(ET == getContext().DoubleTy) {
1948}
else if(ET == getContext().LongDoubleTy) {
1949 constllvm::fltSemantics *LDF = &getTarget().getLongDoubleFormat();
1950 if(LDF == &llvm::APFloat::IEEEquad())
1952 else if(LDF == &llvm::APFloat::x87DoubleExtended())
1953Current = ComplexX87;
1954 else if(LDF == &llvm::APFloat::IEEEdouble())
1957llvm_unreachable(
"unexpected long double representation!");
1962 uint64_tEB_Real = (OffsetBase) / 64;
1963 uint64_tEB_Imag = (OffsetBase + getContext().getTypeSize(ET)) / 64;
1964 if(Hi == NoClass && EB_Real != EB_Imag)
1971 if(EITy->getNumBits() <= 64)
1973 else if(EITy->getNumBits() <= 128)
1988 if(!IsRegCall && Size > 512)
1995 if(OffsetBase % getContext().getTypeAlign(AT->getElementType()))
2001 uint64_tEltSize = getContext().getTypeSize(AT->getElementType());
2002 uint64_tArraySize = AT->getZExtSize();
2009(Size != EltSize || Size > getNativeVectorSizeForAVXABI(AVXLevel)))
2012 for(uint64_t i=0, Offset=OffsetBase; i<ArraySize; ++i, Offset += EltSize) {
2013 ClassFieldLo, FieldHi;
2014classify(AT->getElementType(), Offset, FieldLo, FieldHi, isNamedArg);
2015Lo = merge(Lo, FieldLo);
2016Hi = merge(Hi, FieldHi);
2017 if(Lo == Memory || Hi == Memory)
2021postMerge(Size, Lo, Hi);
2022assert((Hi != SSEUp || Lo == SSE) &&
"Invalid SSEUp array classification.");
2052 if(
const CXXRecordDecl*CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
2053 for(
const auto&I : CXXRD->bases()) {
2054assert(!I.isVirtual() && !I.getType()->isDependentType() &&
2055 "Unexpected base class!");
2064 ClassFieldLo, FieldHi;
2067classify(I.getType(), Offset, FieldLo, FieldHi, isNamedArg);
2068Lo = merge(Lo, FieldLo);
2069Hi = merge(Hi, FieldHi);
2070 if(Lo == Memory || Hi == Memory) {
2071postMerge(Size, Lo, Hi);
2079 boolUseClang11Compat = getContext().getLangOpts().getClangABICompat() <=
2081getContext().getTargetInfo().getTriple().isPS();
2082 boolIsUnion = RT->
isUnionType() && !UseClang11Compat;
2085i != e; ++i, ++idx) {
2087 boolBitField = i->isBitField();
2090 if(BitField && i->isUnnamedBitField())
2103((!IsUnion && Size != getContext().getTypeSize(i->getType())) ||
2104Size > getNativeVectorSizeForAVXABI(AVXLevel))) {
2106postMerge(Size, Lo, Hi);
2111Offset % getContext().getTypeAlign(i->getType().getCanonicalType());
2113 if(!BitField && IsInMemory) {
2115postMerge(Size, Lo, Hi);
2125 ClassFieldLo, FieldHi;
2131assert(!i->isUnnamedBitField());
2139assert(EB_Hi == EB_Lo &&
"Invalid classification, type > 16 bytes.");
2144FieldHi = EB_Hi ?
Integer: NoClass;
2147classify(i->getType(), Offset, FieldLo, FieldHi, isNamedArg);
2148Lo = merge(Lo, FieldLo);
2149Hi = merge(Hi, FieldHi);
2150 if(Lo == Memory || Hi == Memory)
2154postMerge(Size, Lo, Hi);
2164Ty = EnumTy->getDecl()->getIntegerType();
2167 returngetNaturalAlignIndirect(Ty);
2173 returngetNaturalAlignIndirect(Ty);
2176boolX86_64ABIInfo::IsIllegalVectorType(
QualTypeTy)
const{
2179 unsignedLargestVector = getNativeVectorSizeForAVXABI(AVXLevel);
2180 if(Size <= 64 || Size > LargestVector)
2182 QualTypeEltTy = VecTy->getElementType();
2183 if(passInt128VectorsInMem() &&
2193 unsignedfreeIntRegs)
const{
2206Ty = EnumTy->getDecl()->getIntegerType();
2217 unsignedAlign = std::max(getContext().getTypeAlign(Ty) / 8, 8U);
2240 if(freeIntRegs == 0) {
2245 if(Align == 8 && Size <= 64)
2255llvm::Type *X86_64ABIInfo::GetByteVectorType(
QualTypeTy)
const{
2261llvm::Type *IRType = CGT.ConvertType(Ty);
2262 if(isa<llvm::VectorType>(IRType)) {
2265 if(passInt128VectorsInMem() &&
2266cast<llvm::VectorType>(IRType)->getElementType()->isIntegerTy(128)) {
2269 returnllvm::FixedVectorType::get(llvm::Type::getInt64Ty(getVMContext()),
2276 if(IRType->getTypeID() == llvm::Type::FP128TyID)
2281assert((Size == 128 || Size == 256 || Size == 512) &&
"Invalid type found!");
2285 returnllvm::FixedVectorType::get(llvm::Type::getDoubleTy(getVMContext()),
2302 if(TySize <= StartBit)
2307 unsignedNumElts = (
unsigned)AT->getZExtSize();
2310 for(
unsignedi = 0; i != NumElts; ++i) {
2312 unsignedEltOffset = i*EltSize;
2313 if(EltOffset >= EndBit)
break;
2315 unsignedEltStart = EltOffset < StartBit ? StartBit-EltOffset :0;
2317EndBit-EltOffset, Context))
2329 if(
const CXXRecordDecl*CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
2330 for(
const auto&I : CXXRD->bases()) {
2331assert(!I.isVirtual() && !I.getType()->isDependentType() &&
2332 "Unexpected base class!");
2338 if(BaseOffset >= EndBit)
continue;
2340 unsignedBaseStart = BaseOffset < StartBit ? StartBit-BaseOffset :0;
2342EndBit-BaseOffset, Context))
2353i != e; ++i, ++idx) {
2357 if(FieldOffset >= EndBit)
break;
2359 unsignedFieldStart = FieldOffset < StartBit ? StartBit-FieldOffset :0;
2375 constllvm::DataLayout &TD) {
2376 if(IROffset == 0 && IRType->isFloatingPointTy())
2380 if(llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) {
2381 if(!STy->getNumContainedTypes())
2384 constllvm::StructLayout *SL = TD.getStructLayout(STy);
2385 unsignedElt = SL->getElementContainingOffset(IROffset);
2386IROffset -= SL->getElementOffset(Elt);
2391 if(llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) {
2392llvm::Type *EltTy = ATy->getElementType();
2393 unsignedEltSize = TD.getTypeAllocSize(EltTy);
2394IROffset -= IROffset / EltSize * EltSize;
2403llvm::Type *X86_64ABIInfo::
2404GetSSETypeAtOffset(llvm::Type *IRType,
unsignedIROffset,
2405 QualTypeSourceTy,
unsignedSourceOffset)
const{
2406 constllvm::DataLayout &TD = getDataLayout();
2407 unsignedSourceSize =
2408(
unsigned)getContext().getTypeSize(SourceTy) / 8 - SourceOffset;
2410 if(!T0 || T0->isDoubleTy())
2411 returnllvm::Type::getDoubleTy(getVMContext());
2414llvm::Type *T1 =
nullptr;
2415 unsignedT0Size = TD.getTypeAllocSize(T0);
2416 if(SourceSize > T0Size)
2418 if(T1 ==
nullptr) {
2421 if(T0->is16bitFPTy() && SourceSize > 4)
2430 if(T0->isFloatTy() && T1->isFloatTy())
2431 returnllvm::FixedVectorType::get(T0, 2);
2433 if(T0->is16bitFPTy() && T1->is16bitFPTy()) {
2434llvm::Type *T2 =
nullptr;
2438 returnllvm::FixedVectorType::get(T0, 2);
2439 returnllvm::FixedVectorType::get(T0, 4);
2442 if(T0->is16bitFPTy() || T1->is16bitFPTy())
2443 returnllvm::FixedVectorType::get(llvm::Type::getHalfTy(getVMContext()), 4);
2445 returnllvm::Type::getDoubleTy(getVMContext());
2463llvm::Type *X86_64ABIInfo::
2464GetINTEGERTypeAtOffset(llvm::Type *IRType,
unsignedIROffset,
2465 QualTypeSourceTy,
unsignedSourceOffset)
const{
2468 if(IROffset == 0) {
2470 if((isa<llvm::PointerType>(IRType) && Has64BitPointers) ||
2471IRType->isIntegerTy(64))
2480 if(IRType->isIntegerTy(8) || IRType->isIntegerTy(16) ||
2481IRType->isIntegerTy(32) ||
2482(isa<llvm::PointerType>(IRType) && !Has64BitPointers)) {
2483 unsignedBitWidth = isa<llvm::PointerType>(IRType) ? 32 :
2484cast<llvm::IntegerType>(IRType)->getBitWidth();
2487SourceOffset*8+64, getContext()))
2492 if(llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) {
2494 constllvm::StructLayout *SL = getDataLayout().getStructLayout(STy);
2495 if(IROffset < SL->getSizeInBytes()) {
2496 unsignedFieldIdx = SL->getElementContainingOffset(IROffset);
2497IROffset -= SL->getElementOffset(FieldIdx);
2499 returnGetINTEGERTypeAtOffset(STy->getElementType(FieldIdx), IROffset,
2500SourceTy, SourceOffset);
2504 if(llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) {
2505llvm::Type *EltTy = ATy->getElementType();
2506 unsignedEltSize = getDataLayout().getTypeAllocSize(EltTy);
2507 unsignedEltOffset = IROffset/EltSize*EltSize;
2508 returnGetINTEGERTypeAtOffset(EltTy, IROffset-EltOffset, SourceTy,
2514 unsignedTySizeInBytes =
2515(
unsigned)getContext().getTypeSizeInChars(SourceTy).getQuantity();
2517assert(TySizeInBytes != SourceOffset &&
"Empty field?");
2521 returnllvm::IntegerType::get(getVMContext(),
2522std::min(TySizeInBytes-SourceOffset, 8U)*8);
2533 constllvm::DataLayout &TD) {
2538 unsignedLoSize = (
unsigned)TD.getTypeAllocSize(Lo);
2539llvm::Align HiAlign = TD.getABITypeAlign(Hi);
2540 unsignedHiStart = llvm::alignTo(LoSize, HiAlign);
2541assert(HiStart != 0 && HiStart <= 8 &&
"Invalid x86-64 argument pair!");
2553 if(Lo->isHalfTy() || Lo->isFloatTy())
2554Lo = llvm::Type::getDoubleTy(Lo->getContext());
2556assert((Lo->isIntegerTy() || Lo->isPointerTy())
2557&&
"Invalid/unknown lo type");
2558Lo = llvm::Type::getInt64Ty(Lo->getContext());
2562llvm::StructType *
Result= llvm::StructType::get(Lo, Hi);
2565assert(TD.getStructLayout(
Result)->getElementOffset(1) == 8 &&
2566 "Invalid x86-64 argument pair!");
2571classifyReturnType(
QualTypeRetTy)
const{
2574X86_64ABIInfo::Class Lo, Hi;
2575classify(RetTy, 0, Lo, Hi,
true);
2578assert((Hi != Memory || Lo == Memory) &&
"Invalid memory classification.");
2579assert((Hi != SSEUp || Lo == SSE) &&
"Invalid SSEUp classification.");
2581llvm::Type *ResType =
nullptr;
2588assert((Hi == SSE || Hi ==
Integer|| Hi == X87Up) &&
2589 "Unknown missing lo part");
2594llvm_unreachable(
"Invalid classification for lo word.");
2599 returngetIndirectReturnResult(RetTy);
2604ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0);
2608 if(Hi == NoClass && isa<llvm::IntegerType>(ResType)) {
2611RetTy = EnumTy->getDecl()->getIntegerType();
2614isPromotableIntegerTypeForABI(RetTy))
2622ResType = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0);
2628ResType = llvm::Type::getX86_FP80Ty(getVMContext());
2635assert(Hi == ComplexX87 &&
"Unexpected ComplexX87 classification.");
2636ResType = llvm::StructType::get(llvm::Type::getX86_FP80Ty(getVMContext()),
2637llvm::Type::getX86_FP80Ty(getVMContext()));
2641llvm::Type *HighPart =
nullptr;
2647llvm_unreachable(
"Invalid classification for hi word.");
2654HighPart = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8);
2659HighPart = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8);
2670assert(Lo == SSE &&
"Unexpected SSEUp classification.");
2671ResType = GetByteVectorType(RetTy);
2682HighPart = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8);
2699X86_64ABIInfo::classifyArgumentType(
QualTypeTy,
unsignedfreeIntRegs,
2700 unsigned&neededInt,
unsigned&neededSSE,
2701 boolisNamedArg,
boolIsRegCall)
const{
2704X86_64ABIInfo::Class Lo, Hi;
2705classify(Ty, 0, Lo, Hi, isNamedArg, IsRegCall);
2709assert((Hi != Memory || Lo == Memory) &&
"Invalid memory classification.");
2710assert((Hi != SSEUp || Lo == SSE) &&
"Invalid SSEUp classification.");
2714llvm::Type *ResType =
nullptr;
2721assert((Hi == SSE || Hi ==
Integer|| Hi == X87Up) &&
2722 "Unknown missing lo part");
2735 returngetIndirectResult(Ty, freeIntRegs);
2739llvm_unreachable(
"Invalid classification for lo word.");
2748ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 0, Ty, 0);
2752 if(Hi == NoClass && isa<llvm::IntegerType>(ResType)) {
2755Ty = EnumTy->getDecl()->getIntegerType();
2758isPromotableIntegerTypeForABI(Ty))
2768llvm::Type *IRType = CGT.ConvertType(Ty);
2769ResType = GetSSETypeAtOffset(IRType, 0, Ty, 0);
2775llvm::Type *HighPart =
nullptr;
2783llvm_unreachable(
"Invalid classification for hi word.");
2785 caseNoClass:
break;
2790HighPart = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8);
2801HighPart = GetSSETypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8);
2811assert(Lo == SSE &&
"Unexpected SSEUp classification");
2812ResType = GetByteVectorType(Ty);
2826X86_64ABIInfo::classifyRegCallStructTypeImpl(
QualTypeTy,
unsigned&NeededInt,
2827 unsigned&NeededSSE,
2828 unsigned&MaxVectorWidth)
const{
2830assert(RT &&
"classifyRegCallStructType only valid with struct types");
2833 returngetIndirectReturnResult(Ty);
2836 if(
autoCXXRD = dyn_cast<CXXRecordDecl>(RT->
getDecl())) {
2837 if(CXXRD->isDynamicClass()) {
2838NeededInt = NeededSSE = 0;
2839 returngetIndirectReturnResult(Ty);
2842 for(
const auto&I : CXXRD->bases())
2843 if(classifyRegCallStructTypeImpl(I.getType(), NeededInt, NeededSSE,
2846NeededInt = NeededSSE = 0;
2847 returngetIndirectReturnResult(Ty);
2855 if(classifyRegCallStructTypeImpl(MTy, NeededInt, NeededSSE,
2858NeededInt = NeededSSE = 0;
2859 returngetIndirectReturnResult(Ty);
2862 unsignedLocalNeededInt, LocalNeededSSE;
2866NeededInt = NeededSSE = 0;
2867 returngetIndirectReturnResult(Ty);
2869 if(
const auto*AT = getContext().getAsConstantArrayType(MTy))
2870MTy = AT->getElementType();
2872 if(getContext().getTypeSize(VT) > MaxVectorWidth)
2873MaxVectorWidth = getContext().getTypeSize(VT);
2874NeededInt += LocalNeededInt;
2875NeededSSE += LocalNeededSSE;
2883X86_64ABIInfo::classifyRegCallStructType(
QualTypeTy,
unsigned&NeededInt,
2884 unsigned&NeededSSE,
2885 unsigned&MaxVectorWidth)
const{
2891 returnclassifyRegCallStructTypeImpl(Ty, NeededInt, NeededSSE,
2902WinX86_64ABIInfo Win64ABIInfo(CGT, AVXLevel);
2903Win64ABIInfo.computeInfo(FI);
2907 boolIsRegCall =
CallingConv== llvm::CallingConv::X86_RegCall;
2910 unsignedFreeIntRegs = IsRegCall ? 11 : 6;
2911 unsignedFreeSSERegs = IsRegCall ? 16 : 8;
2912 unsignedNeededInt = 0, NeededSSE = 0, MaxVectorWidth = 0;
2918FI.
getReturnType(), NeededInt, NeededSSE, MaxVectorWidth);
2919 if(FreeIntRegs >= NeededInt && FreeSSERegs >= NeededSSE) {
2920FreeIntRegs -= NeededInt;
2921FreeSSERegs -= NeededSSE;
2929getContext().LongDoubleTy)
2941 else if(NeededSSE && MaxVectorWidth > 0)
2953it != ie; ++it, ++ArgNo) {
2954 boolIsNamedArg = ArgNo < NumRequiredArgs;
2956 if(IsRegCall && it->type->isStructureOrClassType())
2957it->info = classifyRegCallStructType(it->type, NeededInt, NeededSSE,
2961NeededSSE, IsNamedArg);
2967 if(FreeIntRegs >= NeededInt && FreeSSERegs >= NeededSSE) {
2968FreeIntRegs -= NeededInt;
2969FreeSSERegs -= NeededSSE;
2973it->info = getIndirectResult(it->type, FreeIntRegs);
2982llvm::Value *overflow_arg_area =
2997llvm::Value *Res = overflow_arg_area;
3005llvm::Value *Offset =
3006llvm::ConstantInt::get(CGF.
Int32Ty, (SizeInBytes + 7) & ~7);
3008Offset,
"overflow_arg_area.next");
3012 return Address(Res, LTy, Align);
3024 unsignedneededInt, neededSSE;
3036 if(!neededInt && !neededSSE)
3052llvm::Value *InRegs =
nullptr;
3054llvm::Value *gp_offset =
nullptr, *fp_offset =
nullptr;
3058InRegs = llvm::ConstantInt::get(CGF.
Int32Ty, 48 - neededInt * 8);
3059InRegs = CGF.
Builder.CreateICmpULE(gp_offset, InRegs,
"fits_in_gp");
3065llvm::Value *FitsInFP =
3066llvm::ConstantInt::get(CGF.
Int32Ty, 176 - neededSSE * 16);
3067FitsInFP = CGF.
Builder.CreateICmpULE(fp_offset, FitsInFP,
"fits_in_fp");
3068InRegs = InRegs ? CGF.
Builder.CreateAnd(InRegs, FitsInFP) : FitsInFP;
3074CGF.
Builder.CreateCondBr(InRegs, InRegBlock, InMemBlock);
3095 if(neededInt && neededSSE) {
3097assert(AI.
isDirect() &&
"Unexpected ABI info for mixed regs");
3098llvm::StructType *ST = cast<llvm::StructType>(AI.
getCoerceToType());
3101assert(ST->getNumElements() == 2 &&
"Unexpected ABI info for mixed regs");
3102llvm::Type *TyLo = ST->getElementType(0);
3103llvm::Type *TyHi = ST->getElementType(1);
3104assert((TyLo->isFPOrFPVectorTy() ^ TyHi->isFPOrFPVectorTy()) &&
3105 "Unexpected ABI info for mixed regs");
3106llvm::Value *GPAddr =
3108llvm::Value *FPAddr =
3110llvm::Value *RegLoAddr = TyLo->isFPOrFPVectorTy() ? FPAddr : GPAddr;
3111llvm::Value *RegHiAddr = TyLo->isFPOrFPVectorTy() ? GPAddr : FPAddr;
3127}
else if(neededInt || neededSSE == 1) {
3129 autoTInfo = getContext().getTypeInfoInChars(Ty);
3130 uint64_tTySize = TInfo.Width.getQuantity();
3132llvm::Type *CoTy =
nullptr;
3136llvm::Value *GpOrFpOffset = neededInt ? gp_offset : fp_offset;
3137 uint64_tAlignment = neededInt ? 8 : 16;
3138 uint64_tRegSize = neededInt ? neededInt * 8 : 16;
3163llvm::Value *PtrOffset =
3185assert(neededSSE == 2 &&
"Invalid number of needed registers!");
3218llvm::Value *Offset = llvm::ConstantInt::get(CGF.
Int32Ty, neededInt * 8);
3223llvm::Value *Offset = llvm::ConstantInt::get(CGF.
Int32Ty, neededSSE * 16);
3246 uint64_tWidth = getContext().getTypeSize(Ty);
3247 boolIsIndirect = Width > 64 || !llvm::isPowerOf2_64(Width);
3255ABIArgInfoWinX86_64ABIInfo::reclassifyHvaArgForVectorCall(
3261isHomogeneousAggregate(Ty,
Base, NumElts) && FreeSSERegs >= NumElts) {
3262FreeSSERegs -= NumElts;
3263 returngetDirectX86Hva();
3269 boolIsReturnType,
boolIsVectorCall,
3270 boolIsRegCall)
const{
3276Ty = EnumTy->getDecl()->getIntegerType();
3278 TypeInfoInfo = getContext().getTypeInfo(Ty);
3280 CharUnitsAlign = getContext().toCharUnitsFromBits(Info.
Align);
3284 if(!IsReturnType) {
3290 returngetNaturalAlignIndirect(Ty,
false);
3298 if((IsVectorCall || IsRegCall) &&
3299isHomogeneousAggregate(Ty,
Base, NumElts)) {
3301 if(FreeSSERegs >= NumElts) {
3302FreeSSERegs -= NumElts;
3308}
else if(IsVectorCall) {
3309 if(FreeSSERegs >= NumElts &&
3311FreeSSERegs -= NumElts;
3313}
else if(IsReturnType) {
3325llvm::Type *LLTy = CGT.ConvertType(Ty);
3326 if(LLTy->isPointerTy() || LLTy->isIntegerTy())
3333 if(Width > 64 || !llvm::isPowerOf2_64(Width))
3334 returngetNaturalAlignIndirect(Ty,
false);
3341 switch(BT->getKind()) {
3342 caseBuiltinType::Bool:
3347 caseBuiltinType::LongDouble:
3351 constllvm::fltSemantics *LDF = &getTarget().getLongDoubleFormat();
3352 if(LDF == &llvm::APFloat::x87DoubleExtended())
3357 caseBuiltinType::Int128:
3358 caseBuiltinType::UInt128:
3368llvm::Type::getInt64Ty(getVMContext()), 2));
3391 boolIsVectorCall = CC == llvm::CallingConv::X86_VectorCall;
3392 boolIsRegCall = CC == llvm::CallingConv::X86_RegCall;
3396 if(CC == llvm::CallingConv::X86_64_SysV) {
3397X86_64ABIInfo SysVABIInfo(CGT, AVXLevel);
3398SysVABIInfo.computeInfo(FI);
3402 unsignedFreeSSERegs = 0;
3406}
else if(IsRegCall) {
3413IsVectorCall, IsRegCall);
3418}
else if(IsRegCall) {
3423 unsignedArgNum = 0;
3424 unsignedZeroSSERegs = 0;
3429 unsigned*MaybeFreeSSERegs =
3430(IsVectorCall && ArgNum >= 6) ? &ZeroSSERegs : &FreeSSERegs;
3432classify(I.
type, *MaybeFreeSSERegs,
false, IsVectorCall, IsRegCall);
3440I.
info= reclassifyHvaArgForVectorCall(I.
type, FreeSSERegs, I.
info);
3448 uint64_tWidth = getContext().getTypeSize(Ty);
3449 boolIsIndirect = Width > 64 || !llvm::isPowerOf2_64(Width);
3458 CodeGenModule&CGM,
boolDarwinVectorABI,
boolWin32StructABI,
3459 unsignedNumRegisterParameters,
boolSoftFloatABI) {
3460 boolRetSmallStructInRegABI = X86_32TargetCodeGenInfo::isStructReturnInRegABI(
3462 returnstd::make_unique<X86_32TargetCodeGenInfo>(
3463CGM.
getTypes(), DarwinVectorABI, RetSmallStructInRegABI, Win32StructABI,
3464NumRegisterParameters, SoftFloatABI);
3468 CodeGenModule&CGM,
boolDarwinVectorABI,
boolWin32StructABI,
3469 unsignedNumRegisterParameters) {
3470 boolRetSmallStructInRegABI = X86_32TargetCodeGenInfo::isStructReturnInRegABI(
3472 returnstd::make_unique<WinX86_32TargetCodeGenInfo>(
3473CGM.
getTypes(), DarwinVectorABI, RetSmallStructInRegABI, Win32StructABI,
3474NumRegisterParameters);
3477std::unique_ptr<TargetCodeGenInfo>
3480 returnstd::make_unique<X86_64TargetCodeGenInfo>(CGM.
getTypes(), AVXLevel);
3483std::unique_ptr<TargetCodeGenInfo>
3486 returnstd::make_unique<WinX86_64TargetCodeGenInfo>(CGM.
getTypes(), AVXLevel);
static bool checkAVX512ParamFeature(DiagnosticsEngine &Diag, SourceLocation CallLoc, const llvm::StringMap< bool > &CallerMap, const llvm::StringMap< bool > &CalleeMap, QualType Ty, bool IsArgument)
static bool is32Or64BitBasicType(QualType Ty, ASTContext &Context)
static void rewriteInputConstraintReferences(unsigned FirstIn, unsigned NumNewOuts, std::string &AsmString)
Rewrite input constraint references after adding some output constraints.
static void initFeatureMaps(const ASTContext &Ctx, llvm::StringMap< bool > &CallerMap, const FunctionDecl *Caller, llvm::StringMap< bool > &CalleeMap, const FunctionDecl *Callee)
static llvm::Type * GetX86_64ByValArgumentPair(llvm::Type *Lo, llvm::Type *Hi, const llvm::DataLayout &TD)
GetX86_64ByValArgumentPair - Given a high and low type that can ideally be used as elements of a two ...
static bool checkAVXParam(DiagnosticsEngine &Diag, ASTContext &Ctx, SourceLocation CallLoc, const llvm::StringMap< bool > &CallerMap, const llvm::StringMap< bool > &CalleeMap, QualType Ty, bool IsArgument)
static bool checkAVXParamFeature(DiagnosticsEngine &Diag, SourceLocation CallLoc, const llvm::StringMap< bool > &CallerMap, const llvm::StringMap< bool > &CalleeMap, QualType Ty, StringRef Feature, bool IsArgument)
static bool addBaseAndFieldSizes(ASTContext &Context, const CXXRecordDecl *RD, uint64_t &Size)
static llvm::Type * getFPTypeAtOffset(llvm::Type *IRType, unsigned IROffset, const llvm::DataLayout &TD)
getFPTypeAtOffset - Return a floating point type at the specified offset.
static bool addFieldSizes(ASTContext &Context, const RecordDecl *RD, uint64_t &Size)
static bool BitsContainNoUserData(QualType Ty, unsigned StartBit, unsigned EndBit, ASTContext &Context)
BitsContainNoUserData - Return true if the specified [start,end) bit range is known to either be off ...
static Address EmitX86_64VAArgFromMemory(CodeGenFunction &CGF, Address VAListAddr, QualType Ty)
static void addX86InterruptAttrs(const FunctionDecl *FD, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM)
static bool isArgInAlloca(const ABIArgInfo &Info)
static DiagnosticBuilder Diag(DiagnosticsEngine *Diags, const LangOptions &Features, FullSourceLoc TokLoc, const char *TokBegin, const char *TokRangeBegin, const char *TokRangeEnd, unsigned DiagID)
Produce a diagnostic highlighting some portion of a literal.
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
const ConstantArrayType * getAsConstantArrayType(QualType T) const
CharUnits getTypeAlignInChars(QualType T) const
Return the ABI-specified alignment of a (complete) type T, in characters.
const ASTRecordLayout & getASTRecordLayout(const RecordDecl *D) const
Get or compute information about the layout of the specified record (struct/union/class) D,...
TypeInfoChars getTypeInfoInChars(const Type *T) const
int64_t toBits(CharUnits CharSize) const
Convert a size in characters to a size in bits.
uint64_t getTypeSize(QualType T) const
Return the size of the specified (complete) type T, in bits.
const TargetInfo & getTargetInfo() const
void getFunctionFeatureMap(llvm::StringMap< bool > &FeatureMap, const FunctionDecl *) const
ASTRecordLayout - This class contains layout information for one RecordDecl, which is a struct/union/...
uint64_t getFieldOffset(unsigned FieldNo) const
getFieldOffset - Get the offset of the given field index, in bits.
CharUnits getRequiredAlignment() const
CharUnits getBaseClassOffset(const CXXRecordDecl *Base) const
getBaseClassOffset - Get the offset, in chars, for the given base class.
A fixed int type of a specified bitwidth.
This class is used for builtin types like 'int'.
Represents a base class of a C++ class.
Represents a C++ struct/union/class.
CanProxy< U > getAs() const
Retrieve a canonical type pointer with a different static type, upcasting or downcasting as needed.
const T * getTypePtr() const
Retrieve the underlying type pointer, which refers to a canonical type.
CharUnits - This is an opaque type for sizes expressed in character units.
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
static CharUnits One()
One - Construct a CharUnits quantity of one.
bool isMultipleOf(CharUnits N) const
Test whether this is a multiple of the other value.
static CharUnits fromQuantity(QuantityType Quantity)
fromQuantity - Construct a CharUnits quantity from a raw integer type.
CharUnits alignTo(const CharUnits &Align) const
alignTo - Returns the next integer (mod 2**64) that is greater than or equal to this quantity and is ...
CodeGenOptions - Track various options which control how the code is optimized and passed to the back...
ABIArgInfo - Helper class to encapsulate information about how a specific C type should be passed to ...
bool getIndirectByVal() const
static ABIArgInfo getInAlloca(unsigned FieldIndex, bool Indirect=false)
static ABIArgInfo getIgnore()
static ABIArgInfo getExpand()
unsigned getDirectOffset() const
void setIndirectAlign(CharUnits IA)
static ABIArgInfo getExtendInReg(QualType Ty, llvm::Type *T=nullptr)
static ABIArgInfo getExpandWithPadding(bool PaddingInReg, llvm::Type *Padding)
static ABIArgInfo getIndirect(CharUnits Alignment, bool ByVal=true, bool Realign=false, llvm::Type *Padding=nullptr)
static ABIArgInfo getDirect(llvm::Type *T=nullptr, unsigned Offset=0, llvm::Type *Padding=nullptr, bool CanBeFlattened=true, unsigned Align=0)
@ Extend
Extend - Valid only for integer argument types.
@ Ignore
Ignore - Ignore the argument (treat as void).
@ IndirectAliased
IndirectAliased - Similar to Indirect, but the pointer may be to an object that is otherwise referenc...
@ Expand
Expand - Only valid for aggregate argument types.
@ InAlloca
InAlloca - Pass the argument directly using the LLVM inalloca attribute.
@ Indirect
Indirect - Pass the argument indirectly via a hidden pointer with the specified alignment (0 indicate...
@ CoerceAndExpand
CoerceAndExpand - Only valid for aggregate argument types.
@ Direct
Direct - Pass the argument directly using the normal converted LLVM type, or by coercing to another s...
static ABIArgInfo getExtend(QualType Ty, llvm::Type *T=nullptr)
llvm::Type * getCoerceToType() const
bool canHaveCoerceToType() const
static ABIArgInfo getDirectInReg(llvm::Type *T=nullptr)
ABIInfo - Target specific hooks for defining how a type should be passed or returned from functions.
ASTContext & getContext() const
virtual bool isHomogeneousAggregateBaseType(QualType Ty) const
virtual RValue EmitMSVAArg(CodeGen::CodeGenFunction &CGF, CodeGen::Address VAListAddr, QualType Ty, AggValueSlot Slot) const
Emit the target dependent code to load a value of.
virtual bool isHomogeneousAggregateSmallEnough(const Type *Base, uint64_t Members) const
const TargetInfo & getTarget() const
virtual RValue EmitVAArg(CodeGen::CodeGenFunction &CGF, CodeGen::Address VAListAddr, QualType Ty, AggValueSlot Slot) const =0
EmitVAArg - Emit the target dependent code to load a value of.
virtual void computeInfo(CodeGen::CGFunctionInfo &FI) const =0
Like RawAddress, an abstract representation of an aligned address, but the pointer contained in this ...
llvm::Value * getBasePointer() const
Address withElementType(llvm::Type *ElemTy) const
Return address with different element type, but same pointer and alignment.
llvm::StoreInst * CreateStore(llvm::Value *Val, Address Addr, bool IsVolatile=false)
Address CreateConstInBoundsByteGEP(Address Addr, CharUnits Offset, const llvm::Twine &Name="")
Given a pointer to i8, adjust it by a given constant offset.
Address CreateGEP(CodeGenFunction &CGF, Address Addr, llvm::Value *Index, const llvm::Twine &Name="")
Address CreateStructGEP(Address Addr, unsigned Index, const llvm::Twine &Name="")
llvm::LoadInst * CreateLoad(Address Addr, const llvm::Twine &Name="")
llvm::CallInst * CreateMemCpy(Address Dest, Address Src, llvm::Value *Size, bool IsVolatile=false)
llvm::LoadInst * CreateAlignedLoad(llvm::Type *Ty, llvm::Value *Addr, CharUnits Align, const llvm::Twine &Name="")
RecordArgABI
Specify how one should pass an argument of a record type.
@ RAA_Indirect
Pass it as a pointer to temporary memory.
@ RAA_DirectInMemory
Pass it on the stack using its defined layout.
CGFunctionInfo - Class to encapsulate the information about a function definition.
ABIArgInfo & getReturnInfo()
unsigned getCallingConvention() const
getCallingConvention - Return the user specified calling convention, which has been translated into a...
const_arg_iterator arg_begin() const
unsigned getRegParm() const
CanQualType getReturnType() const
bool getHasRegParm() const
MutableArrayRef< ArgInfo > arguments()
const_arg_iterator arg_end() const
void setArgStruct(llvm::StructType *Ty, CharUnits Align)
unsigned getMaxVectorWidth() const
Return the maximum vector width in the arguments.
unsigned getNumRequiredArgs() const
void setMaxVectorWidth(unsigned Width)
Set the maximum vector width in the arguments.
CallArgList - Type for representing both the value and type of arguments in a call.
CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...
llvm::BasicBlock * createBasicBlock(const Twine &name="", llvm::Function *parent=nullptr, llvm::BasicBlock *before=nullptr)
createBasicBlock - Create an LLVM basic block.
void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false)
EmitBlock - Emit the given block.
llvm::Type * ConvertTypeForMem(QualType T)
RawAddress CreateMemTemp(QualType T, const Twine &Name="tmp", RawAddress *Alloca=nullptr)
CreateMemTemp - Create a temporary memory object of the given type, with appropriate alignmen and cas...
void EmitBranch(llvm::BasicBlock *Block)
EmitBranch - Emit a branch to the specified basic block from the current insert block,...
ASTContext & getContext() const
LValue MakeAddrLValue(Address Addr, QualType T, AlignmentSource Source=AlignmentSource::Type)
const CGFunctionInfo * CurFnInfo
llvm::LLVMContext & getLLVMContext()
RValue EmitLoadOfAnyValue(LValue V, AggValueSlot Slot=AggValueSlot::ignored(), SourceLocation Loc={})
Like EmitLoadOfLValue but also handles complex and aggregate types.
This class organizes the cross-function state that is used while generating LLVM code.
DiagnosticsEngine & getDiags() const
CodeGenTypes & getTypes()
const TargetInfo & getTarget() const
const llvm::Triple & getTriple() const
ASTContext & getContext() const
const CodeGenOptions & getCodeGenOpts() const
This class organizes the cross-module state that is used while lowering AST types to LLVM types.
llvm::Type * ConvertType(QualType T)
ConvertType - Convert type T into a llvm::Type.
LValue - This represents an lvalue references.
Address getAddress() const
void setAddress(Address address)
RValue - This trivial value class is used to represent the result of an expression that is evaluated.
A class for recording the number of arguments that a function signature requires.
Target specific hooks for defining how a type should be passed or returned from functions with one of...
bool occupiesMoreThan(ArrayRef< llvm::Type * > scalarTypes, unsigned maxAllRegisters) const
Does the given lowering require more than the given number of registers when expanded?
virtual bool shouldPassIndirectly(ArrayRef< llvm::Type * > ComponentTys, bool AsReturnValue) const
Returns true if an aggregate which expands to the given type sequence should be passed / returned ind...
TargetCodeGenInfo - This class organizes various target-specific codegeneration issues,...
virtual void addReturnRegisterOutputs(CodeGen::CodeGenFunction &CGF, CodeGen::LValue ReturnValue, std::string &Constraints, std::vector< llvm::Type * > &ResultRegTypes, std::vector< llvm::Type * > &ResultTruncRegTypes, std::vector< CodeGen::LValue > &ResultRegDests, std::string &AsmString, unsigned NumOutputs) const
Adds constraints and types for result registers.
virtual llvm::Type * adjustInlineAsmType(CodeGen::CodeGenFunction &CGF, StringRef Constraint, llvm::Type *Ty) const
Corrects the low-level LLVM type for a given constraint and "usual" type.
virtual StringRef getARCRetainAutoreleasedReturnValueMarker() const
Retrieve the address of a function to call immediately before calling objc_retainAutoreleasedReturnVa...
virtual void checkFunctionCallABI(CodeGenModule &CGM, SourceLocation CallLoc, const FunctionDecl *Caller, const FunctionDecl *Callee, const CallArgList &Args, QualType ReturnType) const
Any further codegen related checks that need to be done on a function call in a target specific manne...
virtual bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, llvm::Value *Address) const
Initializes the given DWARF EH register-size table, a char*.
virtual void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M) const
setTargetAttributes - Provides a convenient hook to handle extra target-specific attributes for the g...
static std::string qualifyWindowsLibrary(StringRef Lib)
virtual int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const
Determines the DWARF register number for the stack pointer, for exception-handling purposes.
virtual bool markARCOptimizedReturnCallsAsNoTail() const
Determine whether a call to objc_retainAutoreleasedReturnValue or objc_unsafeClaimAutoreleasedReturnV...
virtual bool isNoProtoCallVariadic(const CodeGen::CallArgList &args, const FunctionNoProtoType *fnType) const
Determine whether a call to an unprototyped functions under the given calling convention should use t...
Complex values, per C99 6.2.5p11.
QualType getElementType() const
Represents the canonical version of C arrays with a specified constant size.
specific_decl_iterator - Iterates over a subrange of declarations stored in a DeclContext,...
Decl - This represents one declaration (or definition), e.g.
Concrete class used by the front-end to report problems and issues.
A helper class that allows the use of isa/cast/dyncast to detect TagType objects of enums.
Represents a function declaration or definition.
const ParmVarDecl * getParamDecl(unsigned i) const
unsigned getNumParams() const
Return the number of parameters this function must have based on its FunctionType.
Represents a K&R-style 'int foo()' function, which has no information available about its arguments.
CallingConv getCallConv() const
@ Ver11
Attempt to be ABI-compatible with code generated by Clang 11.0.x (git 2e10b7a39b93).
A (possibly-)qualified type.
const Type * getTypePtr() const
Retrieves a pointer to the underlying (unqualified) type.
QualType getCanonicalType() const
Represents a struct/union/class.
bool hasFlexibleArrayMember() const
field_iterator field_end() const
field_range fields() const
field_iterator field_begin() const
A helper class that allows the use of isa/cast/dyncast to detect TagType objects of structs/unions/cl...
RecordDecl * getDecl() const
Encodes a location in the source.
const llvm::Triple & getTriple() const
Returns the target triple of the primary target.
const llvm::fltSemantics & getLongDoubleFormat() const
The base class of the type hierarchy.
bool isBlockPointerType() const
bool isFloat16Type() const
bool isPointerType() const
bool isReferenceType() const
bool isEnumeralType() const
bool isIntegralOrEnumerationType() const
Determine whether this type is an integral or enumeration type.
bool isBitIntType() const
bool isSpecificBuiltinType(unsigned K) const
Test for a particular builtin type.
bool isBuiltinType() const
Helper methods to distinguish type categories.
bool isAnyComplexType() const
bool isMemberPointerType() const
bool isBFloat16Type() const
bool isMemberFunctionPointerType() const
bool isVectorType() const
const T * getAs() const
Member-template getAs<specific type>'.
bool isRecordType() const
bool hasPointerRepresentation() const
Whether this type is represented natively as a pointer.
Represents a GCC generic vector type.
ABIArgInfo classifyArgumentType(CodeGenModule &CGM, CanQualType type)
Classify the rules for how to pass a particular type.
CGCXXABI::RecordArgABI getRecordArgABI(const RecordType *RT, CGCXXABI &CXXABI)
std::unique_ptr< TargetCodeGenInfo > createX86_64TargetCodeGenInfo(CodeGenModule &CGM, X86AVXABILevel AVXLevel)
bool classifyReturnType(const CGCXXABI &CXXABI, CGFunctionInfo &FI, const ABIInfo &Info)
std::unique_ptr< TargetCodeGenInfo > createWinX86_32TargetCodeGenInfo(CodeGenModule &CGM, bool DarwinVectorABI, bool Win32StructABI, unsigned NumRegisterParameters)
bool isRecordWithSIMDVectorType(ASTContext &Context, QualType Ty)
RValue emitVoidPtrVAArg(CodeGenFunction &CGF, Address VAListAddr, QualType ValueTy, bool IsIndirect, TypeInfoChars ValueInfo, CharUnits SlotSizeAndAlign, bool AllowHigherAlign, AggValueSlot Slot, bool ForceRightAdjust=false)
Emit va_arg for a platform using the common void* representation, where arguments are simply emitted ...
Address emitMergePHI(CodeGenFunction &CGF, Address Addr1, llvm::BasicBlock *Block1, Address Addr2, llvm::BasicBlock *Block2, const llvm::Twine &Name="")
X86AVXABILevel
The AVX ABI level for X86 targets.
bool isEmptyField(ASTContext &Context, const FieldDecl *FD, bool AllowArrays, bool AsIfNoUniqueAddr=false)
isEmptyField - Return true iff a the field is "empty", that is it is an unnamed bit-field or an (arra...
llvm::Value * emitRoundPointerUpToAlignment(CodeGenFunction &CGF, llvm::Value *Ptr, CharUnits Align)
bool isAggregateTypeForABI(QualType T)
const Type * isSingleElementStruct(QualType T, ASTContext &Context)
isSingleElementStruct - Determine if a structure is a "single element struct", i.e.
void AssignToArrayRange(CodeGen::CGBuilderTy &Builder, llvm::Value *Array, llvm::Value *Value, unsigned FirstIndex, unsigned LastIndex)
QualType useFirstFieldIfTransparentUnion(QualType Ty)
Pass transparent unions as if they were the type of the first element.
std::unique_ptr< TargetCodeGenInfo > createX86_32TargetCodeGenInfo(CodeGenModule &CGM, bool DarwinVectorABI, bool Win32StructABI, unsigned NumRegisterParameters, bool SoftFloatABI)
bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays, bool AsIfNoUniqueAddr=false)
isEmptyRecord - Return true iff a structure contains only empty fields.
std::unique_ptr< TargetCodeGenInfo > createWinX86_64TargetCodeGenInfo(CodeGenModule &CGM, X86AVXABILevel AVXLevel)
bool isSIMDVectorType(ASTContext &Context, QualType Ty)
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
bool Ret(InterpState &S, CodePtr &PC)
The JSON file list parser is used to communicate input to InstallAPI.
@ Result
The result type of a method or function.
const FunctionProtoType * T
CallingConv
CallingConv - Specifies the calling convention that a function uses.
@ Class
The "class" keyword introduces the elaborated-type-specifier.
llvm::IntegerType * Int64Ty
llvm::IntegerType * Int8Ty
i8, i16, i32, and i64
llvm::IntegerType * Int32Ty
RetroSearch is an open source project built by @garambo | Open a GitHub Issue
Search and Browse the WWW like it's 1997 | Search results from DuckDuckGo
HTML:
3.2
| Encoding:
UTF-8
| Version:
0.7.4