147 CallingConv::ID CallConv,
149 const SmallVectorImpl<ISD::InputArg> &Ins,
152 SmallVectorImpl<SDValue> &InVals)
const
155 MachineFunction &MF = DAG.getMachineFunction();
156 auto& frameInfo = MF.getFrameInfo();
157 MachineRegisterInfo &RegInfo = MF.getRegInfo();
160 SmallVector<CCValAssign, 16> ArgLocs;
162 CallConv, isVarArg, DAG.getMachineFunction(),
163 ArgLocs, *DAG.getContext());
165 CCInfo.AnalyzeFormalArguments(Ins, CC_TCE);
167 const unsigned *CurArgReg = ArgRegs, *ArgRegEnd = ArgRegs + argRegCount;
169 unsigned ArgOffset = 0;
171 for (
unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
173 CCValAssign &VA = ArgLocs[i];
176 EVT ObjectVT = VA.getValVT();
177 MVT sType = ObjectVT.getSimpleVT().SimpleTy;
179 if (sType == MVT::i1 || sType == MVT::i8 || sType == MVT::i16 ||
186 if (CurArgReg < ArgRegEnd) {
190 InVals.push_back(DAG.getUNDEF(ObjectVT));
191 }
else if (CurArgReg < ArgRegEnd && !isVarArg) {
192 unsigned VReg = RegInfo.createVirtualRegister(
194 MF.getRegInfo().addLiveIn(*CurArgReg++, VReg);
195 SDValue Arg = DAG.getCopyFromReg(Chain, dl, VReg,
DEFAULT_TYPE);
197 unsigned AssertOp = ISD::AssertSext;
200 DAG.getValueType(ObjectVT));
201 Arg = DAG.getNode(ISD::TRUNCATE, dl, ObjectVT, Arg);
203 InVals.push_back(Arg);
206 int FrameIdx = frameInfo.CreateFixedObject(
209 SDValue FIPtr = DAG.getFrameIndex(FrameIdx,
DEFAULT_TYPE);
215 ISD::LoadExtType LoadOp = ISD::SEXTLOAD;
217#ifndef LITTLE_ENDIAN_TARGET
223 #ifdef LLVM_OLDER_THAN_16
224 1UL, ObjectVT.getSizeInBits().getFixedSize()/8);
226 1UL, ObjectVT.getSizeInBits().getFixedValue()/8);
234 Load = DAG.getExtLoad(
236 MachinePointerInfo(), ObjectVT);
237 Load = DAG.getNode(ISD::TRUNCATE, dl, ObjectVT, Load);
239 InVals.push_back(Load);
241 }
else if (sType == MVT::f16) {
243 if (CurArgReg < ArgRegEnd) {
246 InVals.push_back(DAG.getUNDEF(ObjectVT));
247 }
else if (CurArgReg < ArgRegEnd && !isVarArg) {
248 unsigned VReg = RegInfo.createVirtualRegister(
249 &TCE::HFPRegsRegClass);
250 MF.getRegInfo().addLiveIn(*CurArgReg++, VReg);
251 SDValue Arg = DAG.getCopyFromReg(Chain, dl, VReg, MVT::f16);
252 InVals.push_back(Arg);
254 int FrameIdx = frameInfo.CreateFixedObject(
256 SDValue FIPtr = DAG.getFrameIndex(FrameIdx,
DEFAULT_TYPE);
257 SDValue Load = DAG.getLoad(
258 MVT::f16, dl, Chain, FIPtr, MachinePointerInfo());
259 InVals.push_back(Load);
261 }
else if (sType == MVT::f32 || sType == MVT::f64) {
263 if (CurArgReg < ArgRegEnd) {
266 InVals.push_back(DAG.getUNDEF(ObjectVT));
267 }
else if (CurArgReg < ArgRegEnd && !isVarArg) {
268 auto regClass = sType == MVT::f32 ?
269 &TCE::FPRegsRegClass:
270 &TCE::R64DFPRegsRegClass;
271 unsigned VReg = RegInfo.createVirtualRegister(regClass);
272 MF.getRegInfo().addLiveIn(*CurArgReg++, VReg);
273 SDValue Arg = DAG.getCopyFromReg(Chain, dl, VReg, sType);
274 InVals.push_back(Arg);
276 int FrameIdx = frameInfo.CreateFixedObject(
278 SDValue FIPtr = DAG.getFrameIndex(FrameIdx,
DEFAULT_TYPE);
279 SDValue Load = DAG.getLoad(
280 sType, dl, Chain, FIPtr, MachinePointerInfo());
281 InVals.push_back(Load);
283 }
else if (sType.isVector()) {
285 InVals.push_back(DAG.getUNDEF(ObjectVT));
287 int FrameIdx = MF.getFrameInfo().CreateFixedObject(
288 sType.getStoreSize(), ArgOffset,
true);
289 SDValue FIPtr = DAG.getFrameIndex(FrameIdx,
DEFAULT_TYPE);
290 SDValue Load = DAG.getLoad(
291 sType, dl, Chain, FIPtr, MachinePointerInfo());
292 InVals.push_back(Load);
295 std::cerr <<
"Unhandled argument type: "
296 << ObjectVT.getEVTString() << std::endl;
297 std::cerr <<
"sType size in bits: " << sType.getSizeInBits() << std::endl;
298 std::cerr <<
"is a vector? " << sType.isVector() << std::endl;
302 unsigned argumentByteSize = sType.getStoreSize();
305 if (argumentByteSize <= maxMemAlignment) {
306 ArgOffset += maxMemAlignment;
308 unsigned alignBytes = maxMemAlignment - 1;
309 ArgOffset += (argumentByteSize + alignBytes) & (~alignBytes);
328 SmallVectorImpl<SDValue> &InVals)
const {
330 SelectionDAG &DAG = CLI.DAG;
332 SmallVector<ISD::OutputArg, 32> &Outs = CLI.Outs;
333 SmallVector<SDValue, 32> &OutVals = CLI.OutVals;
334 SmallVector<ISD::InputArg, 32> &Ins = CLI.Ins;
335 SDValue Chain = CLI.Chain;
336 SDValue Callee = CLI.Callee;
337 bool &isTailCall = CLI.IsTailCall;
338 CallingConv::ID CallConv = CLI.CallConv;
339 bool isVarArg = CLI.IsVarArg;
348 unsigned ArgsSize = 0;
351 for (
unsigned i = 0, e = Outs.size(); i != e; ++i) {
352 EVT ObjectVT = Outs[i].VT;
353 MVT sType = Outs[i].VT.SimpleTy;
355 if (sType == MVT::i1 || sType == MVT::i8 || sType == MVT::i16 ||
356 sType == MVT::i32 || sType == MVT::f16 || sType == MVT::f32) {
357 if (regParams < argRegCount) {
360 }
else if (sType == MVT::i64 || sType == MVT::f64) {
362 }
else if (sType.isVector()) {
365 std::cerr <<
"Unknown argument type: "
366 << ObjectVT.getEVTString() << std::endl;
370 if (sType == MVT::i1 || sType == MVT::i8 || sType == MVT::i16 ||
371 sType == MVT::i32 || sType == MVT::i64 || sType == MVT::f16 ||
372 sType == MVT::f32 || sType == MVT::f64) {
373 if (regParams < argRegCount) {
376 }
else if (sType.isVector()) {
379 std::cerr <<
"Unknown argument type: "
380 << ObjectVT.getEVTString() << std::endl;
385 unsigned argumentByteSize = sType.getStoreSize();
388 if (argumentByteSize <= maxMemAlignment) {
389 ArgsSize += maxMemAlignment;
391 unsigned alignBytes = maxMemAlignment - 1;
392 ArgsSize += (argumentByteSize + alignBytes) & (~alignBytes);
395 Chain = DAG.getCALLSEQ_START(Chain, ArgsSize, 0, dl);
396 SmallVector<SDValue, 8> MemOpChains;
398 SmallVector<std::pair<unsigned, SDValue>, argRegCount> RegsToPass;
400 unsigned ArgOffset = 0;
402 for (
unsigned i = 0, e = Outs.size(); i != e; ++i) {
403 SDValue Val = OutVals[i];
404 EVT ObjectVT = Val.getValueType();
405 MVT sType = ObjectVT.getSimpleVT().SimpleTy;
406 SDValue ValToStore(0, 0);
409 if (sType == MVT::i1 || sType == MVT::i8 || sType == MVT::i16 ||
410 sType == MVT::i32 || sType == MVT::f32 || sType == MVT::f16) {
411 if (RegsToPass.size() >= argRegCount || isVarArg) {
414 if (RegsToPass.size() < argRegCount) {
415 RegsToPass.push_back(
416 std::make_pair(ArgRegs[RegsToPass.size()], Val));
418 }
else if (sType.isVector()) {
421 std::cerr <<
"Unknown argument type: "
422 << ObjectVT.getEVTString() << std::endl;
426 if (sType == MVT::i1 || sType == MVT::i8 || sType == MVT::i16 ||
427 sType == MVT::i32 || sType == MVT::i64 || sType == MVT::f32 ||
429 if (RegsToPass.size() >= argRegCount || isVarArg) {
432 if (RegsToPass.size() < argRegCount) {
433 RegsToPass.push_back(
434 std::make_pair(ArgRegs[RegsToPass.size()], Val));
436 }
else if (sType.isVector()) {
439 std::cerr <<
"Unknown argument type: "
440 << ObjectVT.getEVTString() << std::endl;
445 if (ValToStore.getNode()) {
446 SDValue StackPtr = DAG.getCopyFromReg(
447 Chain, dl, TCE::SP, getPointerTy(
448 getTargetMachine().createDataLayout(), 0));
449 SDValue PtrOff = DAG.getConstant(ArgOffset, dl,
DEFAULT_TYPE);
450 PtrOff = DAG.getNode(ISD::ADD, dl,
DEFAULT_TYPE, StackPtr, PtrOff);
452 MemOpChains.push_back(DAG.getStore(Chain, dl, ValToStore,
453 PtrOff, MachinePointerInfo()));
456 unsigned argumentByteSize = sType.getStoreSize();
459 if (argumentByteSize <= maxMemAlignment) {
460 ArgOffset += maxMemAlignment;
462 unsigned alignBytes = maxMemAlignment - 1;
463 ArgOffset += (argumentByteSize + alignBytes) & (~alignBytes);
468 if (!MemOpChains.empty()) {
470 ISD::TokenFactor, dl, MVT::Other, ArrayRef<SDValue>(MemOpChains));
479 for (
unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
480 unsigned Reg = RegsToPass[i].first;
481 Chain = DAG.getCopyToReg(Chain, dl, Reg, RegsToPass[i].second, InFlag);
482 InFlag = Chain.getValue(1);
488 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
489 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), dl,
DEFAULT_TYPE);
490 else if (ExternalSymbolSDNode *E = dyn_cast<ExternalSymbolSDNode>(Callee))
491 Callee = DAG.getTargetExternalSymbol(E->getSymbol(),
DEFAULT_TYPE);
492 std::vector<EVT> NodeTys;
493 NodeTys.push_back(MVT::Other);
494 NodeTys.push_back(MVT::Glue);
495 SDValue Ops[] = { Chain, Callee, InFlag };
499 ArrayRef<SDValue>(Ops, InFlag.getNode() ? 3 : 2));
501 InFlag = Chain.getValue(1);
503 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(ArgsSize, dl,
true),
504 DAG.getIntPtrConstant(0, dl,
true), InFlag, dl);
505 InFlag = Chain.getValue(1);
508 SmallVector<CCValAssign, 16> RVLocs;
509 CCState RVInfo(CallConv, isVarArg, DAG.getMachineFunction(),
510 RVLocs, *DAG.getContext());
512 RVInfo.AnalyzeCallResult(Ins, RetCC_TCE);
515 for (
unsigned i = 0; i != RVLocs.size(); ++i) {
516 unsigned Reg = RVLocs[i].getLocReg();
518 Chain = DAG.getCopyFromReg(Chain, dl, Reg,
519 RVLocs[i].getValVT(), InFlag).getValue(1);
520 InFlag = Chain.getValue(2);
521 InVals.push_back(Chain.getValue(0));
539 setSchedulingPreference(llvm::Sched::RegPressure);
544 addRegisterClass(MVT::i1, &TCE::R1RegsRegClass);
547 addRegisterClass(MVT::i64, &TCE::R64IRegsRegClass);
548 addRegisterClass(MVT::f64, &TCE::R64DFPRegsRegClass);
550 addRegisterClass(MVT::i32, &TCE::R32IRegsRegClass);
552 addRegisterClass(MVT::f32, &TCE::FPRegsRegClass);
553 addRegisterClass(MVT::f16, &TCE::HFPRegsRegClass);
555 setOperationAction(ISD::UINT_TO_FP, MVT::i1 , Promote);
556 setOperationAction(ISD::UINT_TO_FP, MVT::i8 , Promote);
557 setOperationAction(ISD::UINT_TO_FP, MVT::i16 , Promote);
559 setOperationAction(ISD::SINT_TO_FP, MVT::i1 , Promote);
560 setOperationAction(ISD::SINT_TO_FP, MVT::i8 , Promote);
561 setOperationAction(ISD::SINT_TO_FP, MVT::i16 , Promote);
563 setOperationAction(ISD::FP_TO_UINT, MVT::i1 , Promote);
564 setOperationAction(ISD::FP_TO_UINT, MVT::i8 , Promote);
565 setOperationAction(ISD::FP_TO_UINT, MVT::i16 , Promote);
567 setOperationAction(ISD::FP_TO_SINT, MVT::i1 , Promote);
568 setOperationAction(ISD::FP_TO_SINT, MVT::i8 , Promote);
569 setOperationAction(ISD::FP_TO_SINT, MVT::i16 , Promote);
571 setOperationAction(ISD::FABS, MVT::f32 , Custom);
572 setOperationAction(ISD::FABS, MVT::f64 , Custom);
574 setOperationAction(ISD::GlobalAddress,
DEFAULT_TYPE, Custom);
575 setOperationAction(ISD::BlockAddress,
DEFAULT_TYPE, Custom);
576 setOperationAction(ISD::ConstantPool ,
DEFAULT_TYPE, Custom);
578 setOperationAction(ISD::TRAP, MVT::Other, Custom);
585 setOperationAction(ISD::SELECT_CC, MVT::f16, Expand);
586 setOperationAction(ISD::SELECT_CC, MVT::f32, Expand);
587 setOperationAction(ISD::SELECT_CC, MVT::f64, Expand);
588 setOperationAction(ISD::SELECT_CC, MVT::f80, Expand);
589 setOperationAction(ISD::SELECT_CC, MVT::i1, Expand);
590 setOperationAction(ISD::SELECT_CC, MVT::i8, Expand);
591 setOperationAction(ISD::SELECT_CC, MVT::i16, Expand);
592 setOperationAction(ISD::SELECT_CC, MVT::i32, Expand);
593 setOperationAction(ISD::SELECT_CC, MVT::i64, Expand);
599 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1 , Expand);
602 setOperationAction(ISD::BRIND, MVT::Other, Expand);
604 setOperationAction(ISD::BR_JT, MVT::Other, Expand);
610 std::cerr <<
"Only port guarded jumps supported, not expanding bc_cc" << std::endl;
612 setOperationAction(ISD::BRCOND, MVT::Other, Expand);
613 setOperationAction(ISD::BRCOND, MVT::i1, Expand);
614 setOperationAction(ISD::BRCOND, MVT::i32, Expand);
615 setOperationAction(ISD::BRCOND, MVT::f16, Expand);
616 setOperationAction(ISD::BRCOND, MVT::f32, Expand);
617 setOperationAction(ISD::BRCOND, MVT::i64, Expand);
619 setOperationAction(ISD::BR_CC, MVT::Other, Expand);
620 setOperationAction(ISD::BR_CC, MVT::i1, Expand);
621 setOperationAction(ISD::BR_CC, MVT::i32, Expand);
622 setOperationAction(ISD::BR_CC, MVT::f16, Expand);
623 setOperationAction(ISD::BR_CC, MVT::f32, Expand);
624 setOperationAction(ISD::BR_CC, MVT::i64, Expand);
628 setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom);
630 setTargetDAGCombine(ISD::BRCOND);
634 setOperationAction(ISD::BR_CC, MVT::f64, Expand);
636 setOperationAction(ISD::MULHU, MVT::i32, Expand);
637 setOperationAction(ISD::MULHS, MVT::i32, Expand);
640 setOperationAction(ISD::MULHU, MVT::i64, Expand);
641 setOperationAction(ISD::MULHS, MVT::i64, Expand);
643 setOperationAction(ISD::SHL_PARTS, MVT::i32, Expand);
644 setOperationAction(ISD::SRA_PARTS, MVT::i32, Expand);
645 setOperationAction(ISD::SRL_PARTS, MVT::i32, Expand);
647 setOperationAction(ISD::VASTART , MVT::Other, Custom);
649 setOperationAction(ISD::VAARG , MVT::Other, Expand);
650 setOperationAction(ISD::VACOPY , MVT::Other, Expand);
651 setOperationAction(ISD::VAEND , MVT::Other, Expand);
652 setOperationAction(ISD::STACKSAVE , MVT::Other, Expand);
653 setOperationAction(ISD::STACKRESTORE , MVT::Other, Expand);
655 setOperationAction(ISD::DYNAMIC_STACKALLOC,
DEFAULT_TYPE, Expand);
657 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
658 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand);
660 setOperationAction(ISD::ConstantFP, MVT::f64, Expand);
662 setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand);
663 setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand);
664 setOperationAction(ISD::SMUL_LOHI, MVT::i64, Expand);
665 setOperationAction(ISD::UMUL_LOHI, MVT::i64, Expand);
669 setOperationAction(ISD::SDIVREM, MVT::i32, Expand);
670 setOperationAction(ISD::UDIVREM, MVT::i32, Expand);
673 setOperationAction(ISD::SDIVREM, MVT::i64, Expand);
674 setOperationAction(ISD::UDIVREM, MVT::i64, Expand);
675 setTruncStoreAction(MVT::f64, MVT::f32, Expand);
678 setTruncStoreAction(MVT::f32, MVT::f16, Expand);
681 setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Expand);
682 setLoadExtAction(ISD::EXTLOAD, MVT::v2f32, MVT::v2f16, Expand);
683 setLoadExtAction(ISD::EXTLOAD, MVT::v4f32, MVT::v4f16, Expand);
684 setLoadExtAction(ISD::EXTLOAD, MVT::v8f32, MVT::v8f16, Expand);
685 setLoadExtAction(ISD::EXTLOAD, MVT::v16f32, MVT::v16f16, Expand);
686 setLoadExtAction(ISD::EXTLOAD, MVT::v32f32, MVT::v32f16, Expand);
689 setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f32, Expand);
692#if LLVM_HAS_CUSTOM_VECTOR_EXTENSION == 2
693 setLoadExtAction(ISD::EXTLOAD, MVT::v64f32, MVT::v64f16, Expand);
694 setLoadExtAction(ISD::EXTLOAD, MVT::v128f32, MVT::v128f16, Expand);
699 std::cout <<
"No 8-bit loads in the processor. "
700 <<
"Emulating 8-bit loads with wider loads. "
701 <<
"This may be very slow if the program performs "
702 <<
"lots of 8-bit loads." << std::endl;
706 setLoadExtAction(ISD::EXTLOAD, MVT::i64, MVT::i8, Custom);
707 setLoadExtAction(ISD::SEXTLOAD, MVT::i64, MVT::i8, Custom);
708 setLoadExtAction(ISD::ZEXTLOAD, MVT::i64, MVT::i8, Custom);
709 setOperationAction(ISD::LOAD, MVT::i8, Custom);
710 setOperationAction(ISD::LOAD, MVT::i1, Custom);
712 setLoadExtAction(ISD::EXTLOAD, MVT::i64, MVT::i1, Custom);
713 setLoadExtAction(ISD::SEXTLOAD, MVT::i64, MVT::i1, Custom);
714 setLoadExtAction(ISD::ZEXTLOAD, MVT::i64, MVT::i1, Custom);
716 setLoadExtAction(ISD::EXTLOAD, MVT::i32, MVT::i8, Custom);
717 setLoadExtAction(ISD::SEXTLOAD, MVT::i32, MVT::i8, Custom);
718 setLoadExtAction(ISD::ZEXTLOAD, MVT::i32, MVT::i8, Custom);
719 setOperationAction(ISD::LOAD, MVT::i8, Custom);
720 setOperationAction(ISD::LOAD, MVT::i1, Custom);
722 setLoadExtAction(ISD::EXTLOAD, MVT::i32, MVT::i1, Custom);
723 setLoadExtAction(ISD::SEXTLOAD, MVT::i32, MVT::i1, Custom);
724 setLoadExtAction(ISD::ZEXTLOAD, MVT::i32, MVT::i1, Custom);
730 std::cout <<
"No 16-bit loads in the processor. "
731 <<
"Emulating 16-bit loads with wider loads. "
732 <<
"This may be very slow if the program performs "
733 <<
"lots of 16-bit loads." << std::endl;
736 setLoadExtAction(ISD::EXTLOAD, MVT::i64, MVT::i16, Custom);
737 setLoadExtAction(ISD::SEXTLOAD, MVT::i64, MVT::i16, Custom);
738 setLoadExtAction(ISD::ZEXTLOAD, MVT::i64, MVT::i16, Custom);
739 setOperationAction(ISD::LOAD, MVT::i16, Custom);
741 setLoadExtAction(ISD::EXTLOAD, MVT::i32, MVT::i16, Custom);
742 setLoadExtAction(ISD::SEXTLOAD, MVT::i32, MVT::i16, Custom);
743 setLoadExtAction(ISD::ZEXTLOAD, MVT::i32, MVT::i16, Custom);
744 setOperationAction(ISD::LOAD, MVT::i16, Custom);
748 setOperationAction(ISD::ADDE, MVT::i32, Expand);
749 setOperationAction(ISD::ADDC, MVT::i32, Expand);
750 setOperationAction(ISD::ADDE, MVT::i16, Expand);
751 setOperationAction(ISD::ADDC, MVT::i16, Expand);
752 setOperationAction(ISD::ADDE, MVT::i8, Expand);
753 setOperationAction(ISD::ADDC, MVT::i8, Expand);
755 setOperationAction(ISD::Constant, MVT::i64, Custom);
757 setOperationAction(ISD::Constant, MVT::i32, Custom);
760 setStackPointerRegisterToSaveRestore(TCE::SP);
764 const std::set<std::pair<unsigned, llvm::MVT::SimpleValueType> >*
767 std::set<std::pair<unsigned, llvm::MVT::SimpleValueType> >::const_iterator
768 iter = missingOps->begin();
774 while (iter != missingOps->end()) {
775 unsigned nodetype = (*iter).first;
776 llvm::MVT::SimpleValueType valuetype = (*iter).second;
779 case ISD::SDIV: std::cerr <<
"SDIV,";
break;
780 case ISD::UDIV: std::cerr <<
"UDIV,";
break;
781 case ISD::SREM: std::cerr <<
"SREM,";
break;
782 case ISD::UREM: std::cerr <<
"UREM,";
break;
783 case ISD::ROTL: std::cerr <<
"ROTL,";
break;
784 case ISD::ROTR: std::cerr <<
"ROTR,";
break;
785 case ISD::MUL: std::cerr <<
"MUL,";
break;
786 case ISD::SIGN_EXTEND_INREG:
787 if (valuetype == MVT::i8) std::cerr <<
"SXQW,";
788 if (valuetype == MVT::i16) std::cerr <<
"SXHW,";
790 default: std::cerr << nodetype <<
", ";
break;
793 setOperationAction(nodetype, valuetype, Expand);
797 const std::set<std::pair<unsigned, llvm::MVT::SimpleValueType> >*
800 iter = promotedOps->begin();
801 while (iter != promotedOps->end()) {
802 unsigned nodetype = (*iter).first;
803 llvm::MVT::SimpleValueType valuetype = (*iter).second;
804 llvm::EVT evt(valuetype);
805 setOperationAction(nodetype, valuetype, Promote);
810 std::cerr << std::endl;
814 for (
auto i : *customLegalizedOps) {
815 unsigned nodetype = i.first;
816 llvm::MVT::SimpleValueType valuetype = i.second;
817 llvm::EVT evt(valuetype);
818 setOperationAction(nodetype, valuetype, Custom);
821 setJumpIsExpensive(
true);
825 PredictableSelectIsExpensive =
false;
832 if (as->numericalIds().empty()) {
836 std::cerr <<
"Global addresses by "
837 <<
"address space id of 0"
838 <<
" (implicitly specified by AS: " << as->name()
839 <<
") will be stored in constant pool."
849 for (
unsigned id : as->numericalIds()) {
852 std::cerr <<
"Global addresses belonging to "
853 <<
"address space id of " <<
id
854 <<
" (specified by AS: " << as->name()
855 <<
") will be stored in constant pool."
865 setBooleanContents(ZeroOrOneBooleanContent);
866 setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);
1892 SDNode* node, SmallVectorImpl<SDValue>& Results,
1893 SelectionDAG& DAG)
const {
1894 auto fnName = DAG.getMachineFunction().getName().str();
1898 if (node->getOpcode() == ISD::LOAD) {
1899 auto lsdn = dyn_cast<LoadSDNode>(node);
1900 if (lsdn ==
nullptr) {
1901 std::cerr <<
"Error: null loadsdnde!" << std::endl;
1904 #ifdef LLVM_OLDER_THAN_16
1905 if (lsdn->getAlignment() < 2 &&
1907 if (lsdn->getAlign() < 2 &&
1909 lsdn->getMemoryVT() != MVT::i8 && lsdn->getMemoryVT() != MVT::i1) {
1910 assert(0 &&
"Cannot lower 16-bit memory op with only one byte alignment");
1913 auto chain = node->getOperand(0);
1917 #ifdef LLVM_OLDER_THAN_16
1918 if (lsdn->getAlignment() >= 4) {
1920 if (lsdn->getAlign() >= 4) {
1923 MVT::i32, node, chain, lsdn->getBasePtr(), MachinePointerInfo());
1927 ISD::AND, node, MVT::i32, lsdn->getBasePtr(),
1928 DAG.getConstant(-4l, node, MVT::i32));
1930 auto lowBytes = DAG.getNode(
1931 ISD::AND, node, MVT::i32, lsdn->getBasePtr(),
1932 DAG.getConstant(3l, node, MVT::i32));
1934 lowBits = DAG.getNode(
1935 ISD::SHL, node, MVT::i32, lowBytes,
1936 DAG.getConstant(3l, node, MVT::i32));
1939 MVT::i32, node, chain, alignedAddr, MachinePointerInfo());
1944 MVT vt = node->getSimpleValueType(0);
1945 if (vt == MVT::i32) {
1946 assert(0 &&
"Result i32? this should be extload?");
1947 Results.push_back(SDValue(load));
1948 Results.push_back(SDValue(load.getNode(),1));
1953 if (lsdn->getExtensionType() == ISD::ZEXTLOAD) {
1954 #ifdef LLVM_OLDER_THAN_16
1955 shiftedVal = lsdn->getAlignment() < 4 ?
1957 shiftedVal = lsdn->getAlign() < 4 ?
1959 DAG.getNode(ISD::SRA, node, MVT::i32, load, lowBits):
1962 if (lsdn->getMemoryVT() == MVT::i1) {
1963 finalVal = DAG.getNode(
1964 ISD::AND, node, MVT::i32, shiftedVal,
1965 DAG.getConstant(1l, node, MVT::i32));
1966 }
else if (lsdn->getMemoryVT() == MVT::i8) {
1967 finalVal = DAG.getNode(
1968 ISD::AND, node, MVT::i32, shiftedVal,
1969 DAG.getConstant(255l, node, MVT::i32));
1972 assert(0 &&
"Wrong memory vt in zextload!");
1974 }
else if (lsdn->getExtensionType() == ISD::SEXTLOAD) {
1975 if (lsdn->getMemoryVT() == MVT::i1) {
1977 DAG.getNode(ISD::SUB, node, MVT::i32,
1978 DAG.getConstant(31l, node, MVT::i32),lowBits);
1979 auto shiftUp = DAG.getNode(
1980 ISD::SHL, node, MVT::i32, load, shiftsLeft);
1981 finalVal = DAG.getNode(
1982 ISD::SRA, node, MVT::i32, shiftUp,
1983 DAG.getConstant(31l, node, MVT::i32));
1984 }
else if (lsdn->getMemoryVT() == MVT::i8) {
1986 DAG.getNode(ISD::SUB, node, MVT::i32,
1987 DAG.getConstant(24l, node, MVT::i32),lowBits);
1988 auto shiftUp = DAG.getNode(
1989 ISD::SHL, node, MVT::i32, load, shiftsLeft);
1990 finalVal = DAG.getNode(
1991 ISD::SRA, node, MVT::i32, shiftUp,
1992 DAG.getConstant(24l, node, MVT::i32));
1995 assert(0 &&
"Wrong memory vt in sextload!");
1998 #ifdef LLVM_OLDER_THAN_16
1999 finalVal = lsdn->getAlignment() < 4 ?
2001 finalVal = lsdn->getAlign() < 4 ?
2003 DAG.getNode(ISD::SRA, node, MVT::i32, load, lowBits):
2008 if (vt == MVT::i16) {
2009 rv = DAG.getAnyExtOrTrunc(finalVal, node, MVT::i16);
2010 }
else if (vt == MVT::i8) {
2011 rv = DAG.getAnyExtOrTrunc(finalVal, node, MVT::i8);
2012 }
else if (vt == MVT::i1) {
2013 rv = DAG.getAnyExtOrTrunc(finalVal, node, MVT::i1);
2015 assert(0 &&
"Wrong vt in load lowering!");
2018 Results.push_back(rv);
2019 Results.push_back(SDValue(load.getNode(),1));
2021 assert(
false &&
"ReplaceNodeResults not load!");
2030 SelectionDAG& DAG)
const {
2032 auto lsdn = dyn_cast<LoadSDNode>(op.getNode());
2033 if (lsdn ==
nullptr) {
2034 assert(
false &&
"Not a lodsdnode on LowerExtLoad!");
2037 auto chain = op.getOperand(0);
2038 SDValue alignedAddr;
2041 #ifdef LLVM_OLDER_THAN_16
2042 if (lsdn->getAlignment() >= 4) {
2044 if (lsdn->getAlign() >= 4) {
2046 alignedAddr = lsdn->getBasePtr();
2047 lowBits = DAG.getConstant(0l, op, MVT::i32);
2049 alignedAddr = DAG.getNode(
2050 ISD::AND, op, MVT::i32, lsdn->getBasePtr(),
2051 DAG.getConstant(-4l, op, MVT::i32));
2053 auto lowBytes = DAG.getNode(
2054 ISD::AND, op, MVT::i32, lsdn->getBasePtr(),
2055 DAG.getConstant(3l, op, MVT::i32));
2057 lowBits = DAG.getNode(
2058 ISD::SHL, op, MVT::i32, lowBytes,
2059 DAG.getConstant(3l, op, MVT::i32));
2062 auto load = DAG.getLoad(
2063 MVT::i32, op, chain, alignedAddr, MachinePointerInfo());
2066 if (lsdn->getExtensionType() == ISD::ZEXTLOAD) {
2067 #ifdef LLVM_OLDER_THAN_16
2068 auto shiftedValue = lsdn->getAlignment() < 4 ?
2070 auto shiftedValue = lsdn->getAlign() < 4 ?
2072 DAG.getNode(ISD::SRA, op, MVT::i32, load, lowBits) :
2074 if (lsdn->getMemoryVT() == MVT::i16) {
2075 #ifdef LLVM_OLDER_THAN_16
2076 assert(lsdn->getAlignment() >= 2 &&
2078 assert(lsdn->getAlign() >= 2 &&
2080 "Cannot (yet?) emulate a 16-bit load which has 1-byte alignment. "
2081 " 16-bit memory operations needed to compile this code." );
2082 std::cerr <<
"\t\tSource is 16 bits." << std::endl;
2083 auto zext = DAG.getNode(
2084 ISD::AND, op, MVT::i32, shiftedValue,
2085 DAG.getConstant(65535l, op, MVT::i32));
2087 }
else if (lsdn->getMemoryVT() == MVT::i8) {
2088 auto zext = DAG.getNode(
2089 ISD::AND, op, MVT::i32, shiftedValue,
2090 DAG.getConstant(255l, op, MVT::i32));
2092 }
else if (lsdn->getMemoryVT() == MVT::i1) {
2093 auto zext = DAG.getNode(
2094 ISD::AND, op, MVT::i32, shiftedValue,
2095 DAG.getConstant(1l, op, MVT::i32));
2098 assert(
false &&
"Unknown data type on LowerSExtLoad!");
2101 if (lsdn->getExtensionType() == ISD::SEXTLOAD) {
2104 if (lsdn->getMemoryVT() == MVT::i16) {
2105 #ifdef LLVM_OLDER_THAN_16
2106 auto shiftsLeft = lsdn->getAlignment() < 4 ?
2108 auto shiftsLeft = lsdn->getAlign() < 4 ?
2110 DAG.getNode(ISD::SUB, op, MVT::i32,
2111 DAG.getConstant(16l, op, MVT::i32),
2113 DAG.getConstant(16l, op, MVT::i32);
2114 auto shiftUp = DAG.getNode(
2115 ISD::SHL, op, MVT::i32, load, shiftsLeft);
2116 auto shiftDown = DAG.getNode(
2117 ISD::SRA, op, MVT::i32, shiftUp,
2118 DAG.getConstant(16l, op, MVT::i32));
2120 }
else if (lsdn->getMemoryVT() == MVT::i8) {
2121 #ifdef LLVM_OLDER_THAN_16
2122 auto shiftsLeft = lsdn->getAlignment() < 4 ?
2124 auto shiftsLeft = lsdn->getAlign() < 4 ?
2126 DAG.getNode(ISD::SUB, op, MVT::i32,
2127 DAG.getConstant(24l, op, MVT::i32),
2129 DAG.getConstant(24l, op, MVT::i32);
2131 DAG.getNode(ISD::SHL, op, MVT::i32, load, shiftsLeft);
2132 auto shiftDown = DAG.getNode(
2133 ISD::SRA, op, MVT::i32, shiftUp,
2134 DAG.getConstant(24l, op, MVT::i32));
2136 }
else if (lsdn->getMemoryVT() == MVT::i1) {
2137 #ifdef LLVM_OLDER_THAN_16
2138 auto shiftsLeft = lsdn->getAlignment() < 4 ?
2140 auto shiftsLeft = lsdn->getAlign() < 4 ?
2142 DAG.getNode(ISD::SUB, op, MVT::i32,
2143 DAG.getConstant(31l, op, MVT::i32),
2145 DAG.getConstant(31l, op, MVT::i32);
2148 DAG.getNode(ISD::SHL, op, MVT::i32, load, shiftsLeft);
2149 auto shiftDown = DAG.getNode(
2150 ISD::SRA, op, MVT::i32, shiftUp,
2151 DAG.getConstant(31l, op, MVT::i32));
2154 assert(
false &&
"Unknown data type on Lower(Z)ExtLoad!");
2159 if (lsdn->getExtensionType() == ISD::EXTLOAD) {
2160 #ifdef LLVM_OLDER_THAN_16
2161 auto shiftedValue = lsdn->getAlignment() < 4 ?
2163 auto shiftedValue = lsdn->getAlign() < 4 ?
2165 DAG.getNode(ISD::SRA, op, MVT::i32, load, lowBits) :
2167 auto shiftDown = DAG.getNode(ISD::SRA, op, MVT::i32, load, lowBits);
2171 MVT vt = op->getSimpleValueType(0);
2172 if (vt == MVT::i1 && lsdn->getMemoryVT() == MVT::i1) {
2173 SDValue trunc = DAG.getAnyExtOrTrunc(load, op, MVT::i1);
2177 assert(
false &&
"Should not be here, non-ext-load");